print('--------------------------------------------')
print('-> RKNN-Yolov7交互式部署程序 by PurpleSky <-')
print('--------------------------------------------')
import re
import os
import requests
import torch
import yaml
import subprocess as sp
import sys
import zipfile
import tqdm
import tempfile
import time
import pathlib
import yaml
from contextlib import contextmanager
from copy import deepcopy
from models.yolo import Model
from utils.torch_utils import select_device, is_parallel
from glob import glob
from random import shuffle


CATEGORY_ID = 1
USER_ID = int(os.environ['USER_ID'])

MODEL_HUB_BASE_URL = 'http://model-hub-proxy.model-hub:6103'
MODEL_COMPILER_BASE_URL = 'http://model-compiler-spawner-proxy.model-compiler:5000'
GITEE_BASE_URL = 'https://gitee.com'

DIVIDER = '*' * 100

# 字段检查的函数
NOT_EMPTY_CHECK = (lambda data: bool(data), '输入不能为空！')
FILE_EXIST_CHECK = (lambda path: os.path.exists(path), '文件不存在，请输入正确的路径！')
DIR_CHECK = (lambda path: os.path.isdir(path), '该位置不是一个目录！')
DIGIT_CHECK = (lambda data: data.isdigit(), '请输入数字！')
MAX_LENGTH_CHECK = lambda length: (lambda data: len(data) <= length, f'长度过长，最长{length}个字符！')
MIN_LENGTH_CHECK = lambda length: (lambda data: len(data) >= length, f'长度过短，最短{length}个字符！')
FILE_EXT_CHECK = lambda ext: (lambda path: path.endswith(f'.{ext}'), f'该文件不是一个.{ext}类型的文件！')

def is_ok(ok):
    if ok.lower() == 'y' or ok.lower() == 'yes' or ok.lower() == '可以' or ok.lower() == '好' or ok.lower() == '是' or ok.lower() == '是的':
        return True
    elif ok.lower() == 'n' or ok.lower() == 'no' or ok.lower() == '不可以' or ok.lower() == '不好' or ok.lower() == '不是' or ok.lower() == '不是的':
        return False
    return None

def confirm(process_fn):
    def wrapper(*args,**kwargs):
        while True:
            res = process_fn(*args, **kwargs)
            while True:
                ok = is_ok(input('# 这样可以吗？（y/n）：'))
                if ok is None:
                    continue
                elif ok:
                    ok = True
                    break
                else:
                    print('# 请重新录入...')
                    ok = False
                    break
            if ok:
                break
        return res
    return wrapper

def input_with_check(promp, *check_and_tips, default=None):
    while True:
        ok = True
        data = input(promp)
        if data == '' and default is not None:
            return default
        for check_fn, tip in check_and_tips:
            if not check_fn(data):
                print('@', tip)
                ok = False
                break
        if ok:
            return data

def model_hub_api_r_check(r, error_msg):
    if not r.ok:
        print(f'{error_msg}！HTTP状态码为:{r.status_code}，返回内容为{r.text}')
        exit(1)
    r = r.json()
    if r['code'] != 0:
        print(f'{error_msg}！错误码为：{r["code"]}，错误原因为：{r["msg"]}')
        exit(1)

def model_compiler_api_r_check(r, error_msg):
    if not r.ok and r.status_code != 503:
        try:
            print(f'{error_msg}！HTTP状态码为:{r.status_code}，返回内容为{r.json()["message"]}')
        except:
            print(f'{error_msg}！HTTP状态码为:{r.status_code}，返回内容为{r.text}')
        exit(1)

def gitee_api_r_check(r, error_msg):
    if not r.ok:
        print(f'{error_msg}！HTTP状态码为:{r.status_code}，返回内容为{r.text}')
        exit(1)
    r = r.json()
    if 'error' in r:
        print(f'{error_msg}！原因为:{r["error"]}，描述为{r["error_description"]}')
        exit(1)

def choose(promp, *choices):
    print(promp)
    for i, choice in enumerate(choices):
        print(f'    {i + 1})', choice)
    return int(input_with_check(
        '# 请选择：',
        NOT_EMPTY_CHECK,
        DIGIT_CHECK,
        (lambda sel: int(sel) > 0 and int(sel) <= len(choices), '请在正确的数字范围中输入！'),
    ))

def query(promp):
    while True:
        ok = is_ok(input(f'{promp}（y/n）：'))
        if ok is None:
            continue
        elif ok:
            return True
        else:
            return False

try:
    exp_id = re.search('exp_(\d+)', __file__)[1]
except:
    print('$ 请在RKNN-Yolov7的实验根目录中进行操作！')
    exit(1)
if not os.path.basename(os.path.dirname(__file__)).startswith('exp_'):
    print('$ 请在RKNN-Yolov7的实验根目录中进行操作！')
    exit(1)

def reparameterization(model_path, deploy_cfg_path, nc, out_layer, output_path):
    device = select_device('cpu', batch_size=1)
    # model trained by cfg/training/*.yaml
    # 把下面这个nc改成你的类别数量
    ckpt = torch.load(model_path, map_location=device)
    # reparameterized model in cfg/deploy/*.yaml
    model = Model(deploy_cfg_path, ch=3, nc=nc).to(device)

    with open(deploy_cfg_path) as f:
        yml = yaml.load(f, Loader=yaml.SafeLoader)
    anchors = len(yml['anchors'][0]) // 2

    # copy intersect weights
    state_dict = ckpt['model'].float().state_dict()
    exclude = []
    intersect_state_dict = {k: v for k, v in state_dict.items() if k in model.state_dict() and not any(x in k for x in exclude) and v.shape == model.state_dict()[k].shape}
    model.load_state_dict(intersect_state_dict, strict=False)
    model.names = ckpt['model'].names
    model.nc = ckpt['model'].nc

    # reparametrized YOLOR
    for i in range((model.nc+5)*anchors):
        model.state_dict()[f'model.{out_layer}.m.0.weight'].data[i, :, :, :] *= state_dict[f'model.{out_layer}.im.0.implicit'].data[:, i, : :].squeeze()
        model.state_dict()[f'model.{out_layer}.m.1.weight'].data[i, :, :, :] *= state_dict[f'model.{out_layer}.im.1.implicit'].data[:, i, : :].squeeze()
        model.state_dict()[f'model.{out_layer}.m.2.weight'].data[i, :, :, :] *= state_dict[f'model.{out_layer}.im.2.implicit'].data[:, i, : :].squeeze()
    model.state_dict()[f'model.{out_layer}.m.0.bias'].data += state_dict[f'model.{out_layer}.m.0.weight'].mul(state_dict[f'model.{out_layer}.ia.0.implicit']).sum(1).squeeze()
    model.state_dict()[f'model.{out_layer}.m.1.bias'].data += state_dict[f'model.{out_layer}.m.1.weight'].mul(state_dict[f'model.{out_layer}.ia.1.implicit']).sum(1).squeeze()
    model.state_dict()[f'model.{out_layer}.m.2.bias'].data += state_dict[f'model.{out_layer}.m.2.weight'].mul(state_dict[f'model.{out_layer}.ia.2.implicit']).sum(1).squeeze()
    model.state_dict()[f'model.{out_layer}.m.0.bias'].data *= state_dict[f'model.{out_layer}.im.0.implicit'].data.squeeze()
    model.state_dict()[f'model.{out_layer}.m.1.bias'].data *= state_dict[f'model.{out_layer}.im.1.implicit'].data.squeeze()
    model.state_dict()[f'model.{out_layer}.m.2.bias'].data *= state_dict[f'model.{out_layer}.im.2.implicit'].data.squeeze()

    # model to be saved
    ckpt = {'model': deepcopy(model.module if is_parallel(model) else model).half(),
            'optimizer': None,
            'training_results': None,
            'epoch': -1}

    # save reparameterized model
    torch.save(ckpt, output_path)

def export_model_to_onnx():
    print('$ 下面将对你训练结构的模型进行重参数化，转换为部署结构的模型...')
    model_path = input_with_check('# 你的模型(.pt)存放在什么位置【模型路径】：',
        NOT_EMPTY_CHECK,
        FILE_EXIST_CHECK,
        FILE_EXT_CHECK('pt')
    )
    deploy_model_path = os.path.join(os.path.dirname(model_path), os.path.basename(model_path).replace('.pt', '.deploy.pt'))
    ch = choose('$ 你的模型使用哪种结构进行训练？',
        'Yolov7-tiny[cfg/training/yolov7-tiny*.yaml]',
        'Yolov7[cfg/training/yolov7.yaml]',
        '其他'
    )
    if ch == 3:
        print('$ 其他的结构暂不支持，请参考RK_DEPLOY_README.md自行进行重参数化并导出为ONNX')
        exit(1)
    elif ch == 1:
        out_layer = 77
    elif ch == 2:
        out_layer = 105
    
    cfg_path = input_with_check('# 训练时模型结构配置文件存放在什么位置【训练模型结构路径】：',
        NOT_EMPTY_CHECK,
        FILE_EXIST_CHECK,
        FILE_EXT_CHECK('yaml')
    )
    deploy_cfg_path = cfg_path.replace('training', 'deploy')

    nc = int(input_with_check('# 这个模型所能检测的分类数量是多少：',
        NOT_EMPTY_CHECK,
        DIGIT_CHECK,
    ))

    print()
    print(f'-> 好的，即将进行重参数化，将训练结构[{cfg_path}]转换为部署结构[{deploy_cfg_path}]...')
    reparameterization(model_path, deploy_cfg_path, nc, out_layer, deploy_model_path)
    print(f'$ 重参数化完成，部署结构的模型已保存在[{deploy_model_path}]')

    print(DIVIDER)
    print('$ 下面会将你的模型导出成ONNX模型')
    print(f'-> 正在执行导出命令[{sys.executable} export.py --rknpu RK3588 --simplify --weights {deploy_model_path}]...')
    print()
    if sp.run([sys.executable, 'export.py', '--rknpu', 'RK3588', '--simplify', '--weights', deploy_model_path]).returncode != 0:
        print('$ 模型导出失败！')
        exit(1)
    onnx_path = deploy_model_path.replace('.pt', '.onnx')
    if not os.path.exists(onnx_path):
        print('$ 模型导出失败！')
        exit(1)
    anchors_path = './RK_anchors.txt'
    if not os.path.exists(anchors_path):
        print(f'$ 模型导出失败，未在根目录下检测到[{anchors_path}]文件，说明未能按照正确的方式导出ONNX！')
        exit(1)
    print()
    print(f'$ 导出ONNX模型成功，导出模型保存在[{onnx_path}]，anchor文件生成在[{anchors_path}]')
    return onnx_path, anchors_path

def get_anchors_path():
    anchors_path = './RK_anchors.txt'
    def get_anchors(anchors_file_path):
        with open(anchors_file_path, 'r') as anchors_file:
            return [float(line.strip()) for line in anchors_file.readlines() if line.strip() != '']
    if os.path.exists(anchors_path):
        print(f'$ 已找到anchor文件[{anchors_path}]，获取到的anchors为：{get_anchors(anchors_path)}')
        if not query(f'# 是否使用该文件？'):
            anchors_path = None
    if not anchors_path:
        @confirm
        def input_anchors_path():
            anchors_path = input_with_check('# 请输入anchors文件的位置：',
                NOT_EMPTY_CHECK,
                FILE_EXIST_CHECK,
                FILE_EXT_CHECK('txt')
            )
            print(f'$ 获取到的anchors为：{get_anchors(anchors_path)}')
        anchors_path = input_anchors_path()
    return anchors_path

def get_onnx_and_anchors():
    if not query('# 你的模型是否进行重参数化并转为ONNX格式了？'):
        print('$ 好的，这里提供一个交互式模型导出工具，请跟随指引~')
        print()
        return export_model_to_onnx()
    else:
        print()
        onnx_path = input_with_check('# 请输入onnx模型位置：',
            NOT_EMPTY_CHECK,
            FILE_EXIST_CHECK,
            FILE_EXT_CHECK('onnx')
        )
        return onnx_path, get_anchors_path()

def compile_rknn(onnx_path):
    print(DIVIDER)
    print('$ 下面将进行模型编译，你的模型会被量化并被编译为RKNN格式，以能够部署到RK3588开发板中')
    print('$ 当模型量化时，需要一些图像的数据进行量化矫正，提供的数据量越多，量化之后的模型误差越小，但同时量化所需的时间越长。一般只有当量化后的误差非常大的时候才建议使用更多数据进行量化矫正！')
    print()
    @confirm
    def get_qta_images(qta_selected_images_path):
        images_root = input_with_check('# 请输入图像数据集目录的位置，程序会递归搜寻该目录下的所有图像：',
            NOT_EMPTY_CHECK,
            FILE_EXIST_CHECK,
            DIR_CHECK
        )
        img_list = [img for ext in ('jpg', 'jpeg', 'png', 'bmp') for img in glob(os.path.join(images_root, f'**/*.{ext}'), recursive=True)]
        if not img_list:
            print('$ 未查找到任何图像，无法进行量化矫正，编译失败！')
        print(f'$ 共查找到{len(img_list)}张图像')
        img_num = int(input_with_check('# 你想用多少张图像进行量化矫正？（默认20张，不足时选取尽量多的图像）：',
            (lambda num: not num or DIGIT_CHECK[0](num), '请输入数字或者留空！'),
            (lambda num: not num or int(num) <= len(img_list), '输入的数字必须小于总图像数量！'),
            default=20
        ))
        print(f'-> 正在打乱顺序并截取{img_num}张图像...')
        print()
        shuffle(img_list)
        img_list = img_list[:img_num]
        print('$ 将选取如下图像进行量化矫正：')
        if img_num > 6:
            with open(qta_selected_images_path, 'w') as qdf:
                for img_path in img_list:
                    qdf.write(f'{img_path}\n')
            [print(f'    {img_path}') for img_path in img_list[:3]]
            print('    ...')
            [print(f'    {img_path}') for img_path in img_list[-3:]]
            print(f'$ 所有被选取图像的路径可以在[{qta_selected_images_path}]中查看')
        else:
            [print(f'    {img_path}') for img_path in img_list]
        return img_list

    qta_selected_images_path = './qta-selected-images.txt'
    img_list = get_qta_images(qta_selected_images_path)
    os.remove(qta_selected_images_path)
    print(DIVIDER)
    print('-> 正在将模型和量化矫正集打包...')
    zip_file_path = '/tmp/onnx-compiler-archive.zip'
    zip_file = zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED)
    with tqdm.trange(2 + len(img_list)) as achive_pb:
        achive_pb.set_description_str('压缩模型')
        zip_file.write(onnx_path, os.path.basename(onnx_path))
        achive_pb.update()
        with tempfile.NamedTemporaryFile('w') as temp_txt:
            for i, img_path in enumerate(img_list):
                rel_path = f'image/{i}{os.path.splitext(img_path)[-1]}'
                temp_txt.write(rel_path + '\n')
                achive_pb.set_description_str(f'压缩图像[{img_path}]')
                zip_file.write(img_path, rel_path)
                achive_pb.update()
            achive_pb.set_description_str(f'压缩图像索引文件')
            temp_txt.seek(0)
            zip_file.write(temp_txt.name, 'dataset.txt')
            achive_pb.update()
        zip_file.close()
    print()
    print('-> 正在上传压缩包并创建模型编译器...')
    with open(zip_file_path, 'rb') as zf:
        r = requests.post(MODEL_COMPILER_BASE_URL + '/compiler', data={
            'image': 'registry.cn-hangzhou.aliyuncs.com/purplens/rknn-toolkit2-compiler:v1.0',
            'args': '--target_platform="rk3588" --std_values="[[255,255,255]]" --mean_values="[[0, 0, 0]]" --do_quantization="True" --dataset="dataset.txt"'
        }, files={
            'zip_file': zf
        })
    os.remove(zip_file_path)
    model_compiler_api_r_check(r, '模型编译器创建失败')
    compiler_id = r.json()['id']
    print(f'-> 模型编译器创建成功，其ID为[{compiler_id}]')
    compiler_log_path = './compiler.log'
    print(f'-> 正在编译ONNX模型到RKNN平台，编译日志实时同步到文件[{compiler_log_path}]中，可查看进度...')
    status = 'Pending'
    while status == 'Pending' or status == 'Running':
        r = requests.get(MODEL_COMPILER_BASE_URL + f'/compiler/{compiler_id}?get_logs=true')
        model_compiler_api_r_check(r, '获取编译日志失败，无法连接到模型编译器')
        resp = r.json()
        status = resp['status']
        with open(compiler_log_path, 'w') as compiler_log:
            compiler_log.write(resp['logs'])
        time.sleep(2)
    if status != 'Succeeded':
        print('$ 模型编译失败！')
        exit(1)
    print('$ 模型编译成功')
    print()
    os.remove(compiler_log_path)

    print('-> 正在下载编译后的RKNN模型...')
    with tempfile.TemporaryDirectory() as temp_dir_path:
        rknn_zip = os.path.join(temp_dir_path, 'rknn-model-archive.zip')
        r = requests.get(MODEL_COMPILER_BASE_URL + f'/compiler/{compiler_id}/artifact', stream=True)
        with open(rknn_zip, 'wb') as rzf:
            for data in r.iter_content(1024 * 1024):
                rzf.write(data)
        print('-> 正在解压RKNN模型...')
        zipfile.ZipFile(rknn_zip, 'r').extractall(os.path.dirname(onnx_path))
        rknn_path = onnx_path.replace('.onnx', '.rknn')
        print(f'$ 解压完成，RKNN模型已保存到[{rknn_path}]中')
    requests.delete(MODEL_COMPILER_BASE_URL + f'/compiler/{compiler_id}')
    print(DIVIDER)
    return rknn_path

def get_rknn_and_anchors():
    if not query('# 你的模型是否已经编译为RKNN模型了？'):
        print()
        onnx_path, anchors_path = get_onnx_and_anchors()
        return compile_rknn(onnx_path), anchors_path
    print()
    rknn_path = input_with_check('# 请输入RKNN模型位置：',
        NOT_EMPTY_CHECK,
        FILE_EXIST_CHECK,
        FILE_EXT_CHECK('rknn')
    )
    return rknn_path, get_anchors_path()

@contextmanager
def workspace(new_dir):
    origin_dir = os.getcwd()
    os.chdir(new_dir)
    yield None
    os.chdir(origin_dir)

def login_gitee():
    print(DIVIDER)
    print('-> 正在登录Gitee...')
    ssh_cert_dir = os.path.join(str(pathlib.Path.home()), '.ssh')
    os.system(f'mkdir -p {ssh_cert_dir}')
    ssh_cert_path = os.path.join(ssh_cert_dir, 'id_ed25519')
    with open(ssh_cert_path, 'w') as ssh_cert:
        ssh_cert.write('-----BEGIN OPENSSH PRIVATE KEY-----\nb3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW\nQyNTUxOQAAACDyHqLDfKk2CJ+Qc6f9Zk1+JCTnfg6cfSU197y6PMVvewAAAJBWyEjnVshI\n5wAAAAtzc2gtZWQyNTUxOQAAACDyHqLDfKk2CJ+Qc6f9Zk1+JCTnfg6cfSU197y6PMVvew\nAAAECkLt1bUGFeRiqGVEcRABrtiw7sOWDkBM2151jJkbD5SPIeosN8qTYIn5Bzp/1mTX4k\nJOd+Dpx9JTX3vLo8xW97AAAACnp6dS1udnJwcm8BAgM=\n-----END OPENSSH PRIVATE KEY-----\n')
    os.system(f'chmod 600 {ssh_cert_path}')
    os.system('ssh -o "StrictHostKeyChecking no" -T git@gitee.com')
    os.system('git config --global user.name "agricultureiot"')
    os.system('git config --global user.email "1745822665@qq.com"')

def pull_all_gitee_project(gitee_url, template_pro_name):
    print('-> 正在从Gitee拉取该模型仓库...')
    os.system(f'git clone {http_to_ssh_git_url(gitee_url)}')
    print('-> 正在从Gitee拉取NVRPRO-Yolov7-RKNN项目模板...')
    os.system(f'git clone git@gitee.com:research-group-2022/{template_pro_name}.git')

def update_project_files_and_push(project_path, rknn_path, anchors_path, classes_path, nvrpro_yaml_path, readme_path):
    print('$ 即将替换Yolov7-rknn模板中的文件')
    print('-> 正在替换模型...')
    os.system(f'cp {rknn_path} {os.path.join(project_path, "src/detect/model.rknn")}')
    print('-> 正在替换Anchors...')
    os.system(f'cp {anchors_path} {os.path.join(project_path, "src/detect/anchors.txt")}')
    print('-> 正在替换标签...')
    os.system(f'cp {classes_path} {os.path.join(project_path, "src/detect/classes.txt")}')
    print('-> 正在替换nvrpro配置...')
    os.system(f'cp {nvrpro_yaml_path} {os.path.join(project_path, "nvrpro.yaml")}')
    print('-> 正在替换README...')
    os.system(f'cp {readme_path} {os.path.join(project_path, "README.md")}')
    print()
    while not query(f'$ AI项目已修改完成，即将提交更改，如有需要，你可以在提交更改前手动在[{project_path}]中修改项目，继续吗？'):
        pass
    with workspace(project_path):
        print('-> 正在提交更改...')
        os.system(f'git add .')
        os.system(f'git commit -m 更新')
        print('-> 正在上传Gitee项目...')
        os.system(f'git push')
    print('$ Gitee项目提交完成')

def get_nvrpro_name(en_name):
    return f'nvrpro-{en_name}'

def get_nvrpro_from_git_url(git_url):
    return git_url[git_url.rfind('nvrpro'):].replace('.git', '')

def get_http_git_url(en_name):
    return f'https://gitee.com/research-group-2022/nvrpro-{en_name}.git'

def get_ssh_git_url(en_name):
    return f'git@gitee.com:research-group-2022/nvrpro-{en_name}.git'

def http_to_ssh_git_url(url):
    if url.startswith('git@gitee.com'):
        return url
    return f'git@gitee.com:research-group-2022/{url[url.rfind("nvrpro-"):]}'

def filter_git_model(models):
    def is_git_url(url):
        return re.match(r'https?://.*\.git', url) is not None or re.match(r'git@.*:\.git', url) is not None
    return [model for model in models if is_git_url(model["giteeAddress"])]

def get_model_hub_models(user_id=None):
    r = requests.get(MODEL_HUB_BASE_URL + '/model/openapi/selectAll?giteeIsNotNull=True')
    model_hub_api_r_check(r, '获取模型库模型算法列表失败')

    models = filter_git_model(r.json()['content'])
    if user_id is not None:
        models = [model for model in models if model['userId'] == user_id]
    return models

def increase_version(version):
    return f'v{int(version[1:]) + 1}'

@confirm
def input_classes():
    classes = input_with_check(
        '# 模型的中文标签分类，按顺序填写，不同标签中间用空格隔开[如"戴安全帽的人 没戴安全帽的人"]（请一定如实填写，模型能检测的分类需要按顺序全部写上）：',
        NOT_EMPTY_CHECK
    ).split(' ')

    print('$ 模型检测的分类为：')
    for i, cl in enumerate(classes):
        print(f'[{i}] {cl}')
    return classes

@confirm
def input_base_info(ignore_en_name=False, ignore_model=None):
    print(DIVIDER)
    
    exist_models = get_model_hub_models()
    if ignore_model:
        exist_models = [model for model in exist_models if model['id'] != ignore_model['id']]

    print('$ 目前已存在的算法为：')
    if exist_models:
        for i, model in enumerate(exist_models):
            print(f'{i + 1}.', model['name'], f'[{model["des"]}]', f'({model["giteeAddress"]})')
    else:
        print('$ 没有任何模型信息...')
    print()
    print('$ 现在请输入你算法的相关信息：')
    zh_name = input_with_check(
        '# 中文名称（会展示给用户看，请不要与上述模型的算法名称重复）[如"安全帽检测"]：', 
        NOT_EMPTY_CHECK,  
        MAX_LENGTH_CHECK(20),
        (lambda zh_name: zh_name not in map(lambda model: model['name'], exist_models), '中文名称重复，请重新命名!')
    )
    if not ignore_en_name:
        en_name = input_with_check(
            '# 英文名称（生成Git地址使用）[如helmet-detect]：', 
            NOT_EMPTY_CHECK, 
            MAX_LENGTH_CHECK(50),
            MIN_LENGTH_CHECK(2),
            (lambda en_name: get_http_git_url(en_name) not in map(lambda model: model['giteeAddress'], exist_models), '根据英文名称生成的Git地址重复，请重新命名！')
        )
        gitee_url = get_http_git_url(en_name)
    des = input_with_check(
        '# 算法描述（会展示给用户看，简单介绍你的算法是为了干啥）[如"可以检测戴安全帽的人以及没有戴安全帽的人"]：',
        NOT_EMPTY_CHECK, 
        MAX_LENGTH_CHECK(50),
    )

    print(DIVIDER)
    print('$ 你的算法信息为：')
    print(f'* 中文名称：{zh_name}')
    print(f'* 描述：{des}')
    if not ignore_en_name:
        print(f'* 英文名称：{en_name}')
        print(f'* Gitee地址为：{gitee_url}')
    print(DIVIDER)
    if not ignore_en_name:
        return zh_name, en_name, des, gitee_url
    return zh_name, des

def choose_update_model():
    models = get_model_hub_models(USER_ID)
    if not models:
        print('$ 你在模型库还没有任何AI项目，请先重新运行本程序进行创建！')
        exit(1)
    print()
    model_choice = choose('$ 下面是你在模型库中的模型：', *(f'{model["name"]} [{model["des"]}] ({model["giteeAddress"]})' for model in models))
    return models[model_choice - 1]

def generate_nvrpro_conf(temp_conf_path, save_to_path, name, des, version, model_version, git_url, classes):
    with open(save_to_path, 'w') as nvrpro_conf_file:
        with open(os.path.join(temp_conf_path), 'r') as template_nvrpro_conf_file:
            conf = yaml.safe_load(template_nvrpro_conf_file)
        conf['name'] = name
        conf['description'] = des
        conf['version'] = version
        conf['model_version'] = model_version
        conf['git_url'] = git_url
        conf['alert']['objects'] = classes
        yaml.safe_dump(conf, nvrpro_conf_file, allow_unicode=True)

def main():
    print(DIVIDER)
    print('$ 提醒：请不要随意中断程序')
    template_pro_name = 'nvrpro-airockchip-yolov7-rknn'
    nvrpro_conf_path = 'nvrpro.yaml'
    classes_path = 'classes.txt'
    readme_path = 'README.md'

    ch = choose('$ 你想新创建还是更新你现有的AI项目？', '创建', '更新')
    # 创建/拉取仓库 -> 修改 -> 推送
    # |- 获取模型
    with tempfile.TemporaryDirectory() as temp_dir:
        if ch == 1:
            rknn_path, anchors_path = get_rknn_and_anchors()
            print()
            classes = input_classes()
            print()
            zh_name, en_name, des, gitee_url = input_base_info()
            nvrpro_name = get_nvrpro_name(en_name)
            print()

            login_gitee()
            print('-> 正在创建Gitee仓库...')
            r = requests.post(GITEE_BASE_URL + '/oauth/token', params={
                'grant_type': 'password',
                'username': '1745822665@qq.com',
                'password': '875079028',
                'client_id': 'ffa696287e45d9a31597c51f749714ed2e66782a1cb2dece1a61dfd25ab3578c',
                'client_secret': '8a47849939928c81138c41cc85b11c50fefeb7bdbd3fa4dd76cefc9faff3fbb8',
                'scope': 'projects groups'
            })
            gitee_api_r_check(r, '获取Gitee认证失败')
            token = r.json()['access_token']

            gitee_api_r_check(requests.post(GITEE_BASE_URL + '/api/v5/orgs/research-group-2022/repos', data={
                'access_token': token,
                'name': nvrpro_name,
                'description': des,
                'homepage': 'https://gitee.com/research-group-2022/nvrpro-airockchip-yolov7-rknn',
                'public': 1,
                'path': nvrpro_name
            }), '创建Gitee仓库失败')

            print('-> 正在模型库创建相应的模型...')
            model_hub_api_r_check(requests.post(MODEL_HUB_BASE_URL + '/model/manage/add', json={
                'userId': USER_ID,
                'categoryId': CATEGORY_ID,  # 目标检测的ID
                'name': zh_name,
                'giteeAddress': gitee_url,
                'des': des,
                'img': '/model/imgupload/2023/6/08dbdf7c9f084a6f81c888cf3d4ca7f4.jpg',
                'versionId': 0
            }), '创建模型失败')
        
            with workspace(temp_dir):
                pull_all_gitee_project(http_to_ssh_git_url(gitee_url), template_pro_name)
                print('-> 正在复制模板文件...')
                os.system(f'cp -r {template_pro_name}/* {nvrpro_name}/')
                print('-> 正在生成nvrpro配置文件...')
                generate_nvrpro_conf(
                    os.path.join(template_pro_name, nvrpro_conf_path), 
                    nvrpro_conf_path,
                    name=zh_name,
                    des=des,
                    version='v1',
                    model_version='v1',
                    git_url=get_ssh_git_url(en_name),
                    classes=classes,
                )
                
                print('-> 正在生成标签文件...')
                with open(classes_path, 'w') as classes_file:
                    classes_file.writelines(map(lambda line: line + '\n', classes))
                
                print('-> 正在生成README...')
                with open(readme_path, 'w') as readme_file:
                    readme_file.write('# 说明\n')
                    readme_file.write('该项目来自AI项目模板：[nvrpro-airockchip-yolov7-rknn](https://gitee.com/research-group-2022/nvrpro-airockchip-yolov7-rknn.git)\n\n')
                    readme_file.write('由交互式部署工具deploy_to_RK3588创建\n\n')
                    readme_file.write('...by PurpleSky')

            update_project_files_and_push(
                os.path.join(temp_dir, nvrpro_name),
                rknn_path,
                anchors_path, 
                os.path.join(temp_dir, classes_path),
                os.path.join(temp_dir, nvrpro_conf_path),
                os.path.join(temp_dir, readme_path),
            )
        elif ch == 2:
            if not query('$ 请注意，这里只能更新通过这个脚本所创建的AI项目，如果你的不是，请联系管理员将之前的AI项目删除再通过该脚本重新创建，否则请手动更新AI项目，是否继续？'):
                exit(0)
            with workspace(temp_dir):
                model = choose_update_model()
                git_url = http_to_ssh_git_url(model['giteeAddress'])
                nvrpro_name = get_nvrpro_from_git_url(git_url)

                login_gitee()
                pull_all_gitee_project(git_url, template_pro_name)

            print(DIVIDER)
            if query('$ 是否要更新模型？'):
                rknn_path, anchors_path = get_rknn_and_anchors()
                print()
                classes = input_classes()
                print('-> 正在生成标签文件...')
                with workspace(temp_dir):
                    with open(classes_path, 'w') as classes_file:
                        classes_file.writelines(map(lambda line: line + '\n', classes))
            else:
                with workspace(temp_dir):
                    print('-> 正在暂存原模型...')
                    rknn_path = os.path.realpath('model.rknn')
                    anchors_path = os.path.realpath('anchors.txt')
                    os.system(f'mv {os.path.join(nvrpro_name, "src/detect/model.rknn")} {rknn_path}')
                    os.system(f'mv {os.path.join(nvrpro_name, "src/detect/anchors.txt")} {anchors_path}')
                    os.system(f'mv {os.path.join(nvrpro_name, "src/detect/classes.txt")} {classes_path}')
            
            with workspace(temp_dir):
                with open(os.path.join(nvrpro_name, nvrpro_conf_path), 'r') as original_conf_file:
                    conf = yaml.safe_load(original_conf_file)
                
                print()
                zh_name, des = conf['name'], conf['description']
                if query('$ 是否要更新基本信息（中文名称、描述）？'):
                    zh_name, des = input_base_info(ignore_en_name=True, ignore_model=model)
                
                with open(classes_path, 'r') as classes_file:
                    classes = [cl.strip() for cl in classes_file if cl.strip()]

                print('-> 正在修改nvrpro配置...')
                new_version = increase_version(conf['version'])
                generate_nvrpro_conf(
                    os.path.join(template_pro_name, nvrpro_conf_path), 
                    nvrpro_conf_path,
                    name=zh_name,
                    des=des,
                    version=new_version,
                    model_version=conf['model_version'], 
                    git_url=conf['git_url'],
                    classes=classes
                )
                print('-> 正在修改模型库信息...')
                model_hub_api_r_check(requests.put(MODEL_HUB_BASE_URL + '/model/manage/update', json={
                    'id': model['id'],
                    'name': zh_name,
                    'des': des,
                    'categoryId': CATEGORY_ID,
                    'userId': USER_ID,
                }), '修改模型库中的模型信息失败')

                print('-> 正在暂存README...')
                os.system(f'mv {os.path.join(nvrpro_name, readme_path)} {readme_path}')
                
                print(DIVIDER)
                print(f'$ 新版本为[{new_version}]')
                
                print('-> 正在复制模板文件...')
                os.system(f'rm -rf {nvrpro_name}/*')
                os.system(f'cp -r {template_pro_name}/* {nvrpro_name}/')

            update_project_files_and_push(
                os.path.join(temp_dir, nvrpro_name),
                rknn_path,
                anchors_path, 
                os.path.join(temp_dir, classes_path),
                os.path.join(temp_dir, nvrpro_conf_path),
                os.path.join(temp_dir, readme_path)
            )
    
    print()
    print('$ 部署/更新完成，可联系管理员到RK3588开发板的智能监控管理系统中使用该模型并查看在真实环境中的效果:D')

if __name__ == '__main__':
    main()