import os
import subprocess
from subprocess import DEVNULL

import pynvml
from InquirerPy import inquirer
from InquirerPy.base.control import Choice
from InquirerPy.separator import Separator
from prompt_toolkit.document import Document
from prompt_toolkit.validation import Validator, ValidationError

NVIDIA_DRIVER_INSTALLER_FILE = '/usr/local/bin/NVIDIA-Linux-x86_64-535.183.01.run'


def ensure_nvidia_driver_install():
    """
    确保英伟达驱动安装程序正常, 当驱动信息显示出问题时, 可通过重装恢复
    :return:
    """
    if not os.path.exists(NVIDIA_DRIVER_INSTALLER_FILE):
        subprocess.run(['wget', '-P', '/usr/local/bin', '--no-check-certificate',
                        'https://us.download.nvidia.com/tesla/535.183.01/NVIDIA-Linux-x86_64-535.183.01.run'],
                       stdout=subprocess.PIPE,
                       text=True)


def do_install_nvidia_driver():
    subprocess.Popen(f'bash {NVIDIA_DRIVER_INSTALLER_FILE}', shell=True).wait()


def get_gpu_free_mb():
    # 获取 GPU 设备数量
    global device_count
    pynvml.nvmlInit()
    device_count = pynvml.nvmlDeviceGetCount()
    # # GPU剩余显存(MB)
    global gpu_free_mb
    gpu_free_mb = sum(
        [pynvml.nvmlDeviceGetMemoryInfo(pynvml.nvmlDeviceGetHandleByIndex(x)).free for x in
         range(device_count)]) / (
                          1024 ** 2)


def check_process_on_gpu():
    run = subprocess.run(['nvidia-smi', '--query-compute-apps=pid,process_name', '--format=csv,noheader'],
                         capture_output=True,  # 捕获标准输出和标准错误
                         text=True)
    lines = run.stdout.splitlines()
    choices = []
    for line in lines:
        split = line.split(',')
        choices.append(Choice(name=split[1], value=line))
    if len(choices) == 0:
        return
    selects = inquirer.select(message='如下程序正在占用GPU， 请选择需要关闭的进程', choices=choices,
                              multiselect=True).execute()
    if selects and len(selects) > 0:
        for select in selects:
            select_split = select.split(',')
            print(f'正在关闭{select_split[1]}')
            subprocess.run(['kill', '-9', select_split[0]])


def install_nvidia_driver():
    install_driver = inquirer.confirm('无法获取显卡信息, 是否安装驱动?', default=True).execute()
    if install_driver:
        do_install_nvidia_driver()
        get_gpu_free_mb()
    else:
        print('终止程序')
        exit(1)


def check_gpu_driver():
    current_dir = os.path.dirname(os.path.abspath(__file__))
    yaml_path = os.path.join(current_dir, 'debug.yaml')

    if not os.path.exists(yaml_path):
        check_cmd = subprocess.run(['which', 'nvidia-smi'], stdout=subprocess.DEVNULL, )
        if check_cmd.returncode != 0:
            install_nvidia_driver()
        while True:
            run = subprocess.run(['nvidia-smi'], stdout=subprocess.DEVNULL, )
            return_code = run.returncode
            if return_code == 0:
                check_process_on_gpu()
                get_gpu_free_mb()
                break
            install_nvidia_driver()


gpu_free_mb = 0

device_count = 0


def reload_and_restart(service_name: str = 'ollama') -> None:
    """重新加载 Systemd 配置并重启服务"""
    subprocess.run(
        ["systemctl", "daemon-reload"],
        check=True,
    )
    subprocess.run(
        ["systemctl", "restart", f"{service_name}.service"],
        check=True,
    )


def get_service_file_path(service_name: str = 'ollama') -> str:
    """获取服务对应的 .service 文件路径"""
    result = subprocess.run(
        ["systemctl", "status", f"{service_name}.service"],
        capture_output=True,
        text=True,
        check=True,
    )
    # 解析输出中的 "Loaded: ... /path/to/file.service"
    lines = result.stdout.splitlines()
    for line in lines:
        if "Loaded:" in line:
            parts = line.split()
            return parts[parts.index("Loaded:") + 1].strip()
    raise FileNotFoundError(f"服务 {service_name} 的路径未找到")


def get_service_environment(file_path: str = '/etc/systemd/system/ollama.service') -> dict:
    """读取服务文件，返回 Environment 参数的字典"""
    environment = {}
    with open(file_path, "r") as f:
        lines = f.readlines()
        in_service_section = False
        for line in lines:
            line = line.strip()
            if line.startswith("[Service]"):
                in_service_section = True
            elif line.startswith("["):
                in_service_section = False
            elif in_service_section and line.startswith("Environment="):
                # 处理多变量行（如 Environment=VAR1=value1 VAR2=value2）
                parts = line[len("Environment="):].split()
                for part in parts:
                    key, sep, val = part.partition("=")
                    if sep:
                        environment[key] = val
    return environment


def vllm_download_model(model: str, cache_dir: str = '/root', revision='master') -> str:
    if not os.path.exists(os.path.join(cache_dir)):
        os.makedirs(os.path.join(cache_dir))
    from modelscope import snapshot_download
    model_dir = snapshot_download(model, cache_dir=cache_dir, revision=revision)
    print(f"模型下载完成, 存储目录:{model_dir}")
    return model_dir


require_gpu = None


class GpuValidator(Validator):

    def validate(self, document: Document) -> None:
        if not document or not document.text:
            print(document)
        else:
            global require_gpu
            strip = document.text.split('|')[1].strip()
            require_gpu = float(strip) * 1024
            invalid_message = f"可用显存不足, 最少需要{strip}G, 剩余{gpu_free_mb / 1024:0.1f} GB"
            if require_gpu > gpu_free_mb:
                raise ValidationError(message=invalid_message)


class ModelCacheDirValidator(Validator):
    def __init__(self, install_by):
        self.type = install_by

    def validate(self, document):
        if not document.text:
            raise ValidationError(message='模型存储文件夹不能为空')
        input_ = document.text
        if not os.path.exists(input_):
            os.makedirs(input_)
        if 'ollama' == self.type:
            subprocess.run(['chown', '-R', 'ollama:ollama', input_])
        isdir = os.path.isdir(input_)
        if not isdir:
            raise ValidationError(message='输入路径不是一个文件夹')


def main():
    ensure_nvidia_driver_install()
    check_gpu_driver()
    install_by = inquirer.select(
        message="选择安装方式:",
        choices=[
            "Ollama",
            "vLLM",
            Choice(value=None, name="退出"),
        ],
        default="Ollama",
    ).execute()
    if not install_by:
        exit(0)
    if install_by == "Ollama":
        install_by_ollama()
    if install_by == "vLLM":
        install_by_vllm()


VLLM_INSTALL_CONF_FILE = '~/.vllm/install.conf'
VLLM_MODEL_CACHE_DIR = '/data/models/vllm'


def install_by_vllm():
    # subprocess.Popen('conda deactivate', shell=True).wait()
    # subprocess.Popen('. ~/.bashrc && conda init && conda activate vllm-ds', shell=True).wait()
    os.makedirs("~/.vllm/", exist_ok=True)
    if not os.path.exists(os.path.expanduser("~/.vllm/install.conf")):
        subprocess.run(["touch", "~/.vllm/install.conf"])
    import configparser
    config = configparser.ConfigParser()
    config.read(VLLM_INSTALL_CONF_FILE)
    default_config = config['DEFAULT']
    if not default_config:
        config['DEFAULT'] = {
            'VLLM_MODEL_CACHE_DIR': VLLM_MODEL_CACHE_DIR,
        }
    vllm_model_cache_dir = config['DEFAULT']['VLLM_MODEL_CACHE_DIR'] or VLLM_MODEL_CACHE_DIR

    vllm_model_selected = inquirer.select(
        message="选择下载的模型:",
        choices=[
            Separator(line="【DeepSeek-R1】:括号内为最低显存要求"),
            Choice(name="DeepSeek-R1-Distill-Qwen-32B(80G)", value="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B|80"),
            Choice(name="DeepSeek-R1-Distill-Qwen-14B(36G)", value="deepseek-ai/DeepSeek-R1-Distill-Qwen-14B|36"),
            Choice(name="DeepSeek-R1-Distill-Llama-70B(172G)",
                   value="deepseek-ai/DeepSeek-R1-Distill-Llama-70B|172"),
            Choice(name="DeepSeek-R1-Distill-Qwen-7B(18.8G)", value="deepseek-ai/DeepSeek-R1-Distill-Qwen-7B|18.8"),
            Choice(name="DeepSeek-R1-Distill-Llama-8B(19.6G)",
                   value="deepseek-ai/DeepSeek-R1-Distill-Llama-8B|19.6"),
            Choice(name="DeepSeek-R1-Distill-Qwen-1.5B(4.4G)",
                   value="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B|4.4"),
            Separator(),
            Separator(line="【QwQ】:括号内为最低显存要求"),
            Choice(name="QwQ-32b(80GB)", value="Qwen/QwQ-32B|80"),
        ],
        default="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
        validate=GpuValidator(),
    ).execute()
    if not vllm_model_selected:
        exit(0)
    model_id = vllm_model_selected.split('|')[0].strip()
    model_name = model_id.split('/')[-1].strip()
    cache_dir = inquirer.text(message='请输入模型缓存的文件夹路径:', default=vllm_model_cache_dir,
                              validate=ModelCacheDirValidator('vllm')).execute()
    config['DEFAULT']['VLLM_MODEL_CACHE_DIR'] = cache_dir
    update_install_conf(config)
    model = vllm_download_model(model_id, cache_dir=cache_dir)

    run_now = inquirer.confirm('模型下载完成，是否立即运行？', default=True).execute()

    if not run_now:
        exit(0)

    messages = '{"role": "user", "content": "你好，请介绍下自己\\n"}'
    test_cmd = f"""
    curl http://localhost:8000/v1/chat/completions \\
      -H "Content-Type:application/json" \\
      -d '{{"model": "{model_name}","messages": [{messages}]}}'
    """

    copy_curl_cmd = inquirer.confirm(default=True,
                                     message=f'【提示】vllm没有终端交互功能，启动完成后需要采用http请求方式测试，比如：\n{test_cmd}\n, 是否复制以上命令').execute()
    if copy_curl_cmd:
        import pyperclip
        pyperclip.copy(test_cmd)
        print('测试命令已复制~')

    max_model_token = inquirer.select(message="请选择最大token数：", choices=[
        1024, 2048, 4096, 8172, 16384
    ], default=1024).execute()
    quantization_cmd = ''
    dtype = inquirer.select('选择数据类型',
                            choices=[
                                Choice(
                                    name="半精度浮点(half/float16): 适用于大多数 NVIDIA GPU（如 A100、H100、V100等），平衡显存占用与计算效率",
                                    value="float16"),
                                Choice(
                                    name="【不推荐】单精度浮点vfloat/float32v，保留最高精度，但显存占用和计算开销较大，通常用于调试或特殊场景",
                                    value="float32"),
                            ], default="float16").execute()
    tp_cmd = f' -tp {device_count} ' if device_count else ''
    dtype_cmd = f' --dtype {dtype} ' if dtype else ''

    daemon = inquirer.confirm('是否后台运行？', default=True).execute()
    daemon_cmd = ' &' if daemon else ''
    vllm_run_cmd = f"python -m vllm.entrypoints.openai.api_server --model {model} \
    {dtype_cmd}  {tp_cmd}  {quantization_cmd} \
    --served-model-name {model_name} --max-model-len={max_model_token} {daemon_cmd}"
    print(f'开始启动:\n{vllm_run_cmd}')
    subprocess.Popen(vllm_run_cmd, shell=True).wait()


def update_install_conf(config):
    with open(VLLM_INSTALL_CONF_FILE, 'w') as vllm_install_conf:
        config.write(vllm_install_conf)


def install_by_ollama():
    ollama_is_running = subprocess.run(['ollama', 'list'], stdout=DEVNULL, stderr=DEVNULL).returncode == 0

    result = subprocess.run(['systemctl', 'show', 'ollama', '-p', 'Environment'],
                            capture_output=True,
                            text=True, check=True, )
    envs = result.stdout.strip().replace("Environment=", "").split()
    for env in envs:
        if env.startswith("OLLAMA_MODELS="):
            ollama_model_dir = env.split("=")[1]
            cache_dir = inquirer.text(message='请输入模型缓存的文件夹路径:', default=ollama_model_dir,
                                      validate=ModelCacheDirValidator('ollama')).execute()
            if (not os.path.exists(cache_dir)) or ollama_model_dir != cache_dir:
                subprocess.run(['sed', '-i', f'\'s#Environment="OLLAMA_MODELS=.*"#Environment="{cache_dir}"#g\'', '/etc/systemd/system/ollama.service'])
                reload_and_restart()
            break
    if not ollama_is_running:
        print('ollama服务异常')
        exit(1)

    ollama_model_selected = inquirer.select(
        message="选择下载的模型:",
        choices=[
            Separator(line="【DeepSeek-R1】:括号内为最低显存要求"),
            Choice(name="DeepSeek-R1-Distill-Qwen-32B(20GB)", value="deepseek-r1:32b|20"),
            Choice(name="DeepSeek-R1-Distill-Qwen-32B-fp16(66GB)", value="deepseek-r1:32b-qwen-distill-fp16|66"),
            Choice(name="DeepSeek-R1-Distill-Qwen-32B-q8_0(35GB)", value="deepseek-r1:32b-qwen-distill-q8_0|35"),

            Choice(name="DeepSeek-R1-Distill-Qwen-14B(9.0GB)", value="deepseek-r1:14b|9.0"),
            Choice(name="DeepSeek-R1-Distill-Qwen-14B-fp16(30GB)", value="deepseek-r1:14b-qwen-distill-fp16|30"),
            Choice(name="DeepSeek-R1-Distill-Qwen-14B-q8_0(16GB)", value="deepseek-r1:14b-qwen-distill-q8_0|16"),

            Choice(name="DeepSeek-R1-Distill-Llama-70B(43GB)", value="deepseek-r1:70b|43"),
            Choice(name="DeepSeek-R1-Distill-Llama-70B-fp16(141GB)", value="deepseek-r1:70b-llama-distill-fp16|141"),
            Choice(name="DeepSeek-R1-Distill-Llama-70B-q8_0(75GB)", value="deepseek-r1:70b-llama-distill-q8_0|75"),

            Choice(name="DeepSeek-R1-Distill-Qwen-7B(4.7GB)", value="deepseek-r1:7b|4.7"),
            Choice(name="DeepSeek-R1-Distill-Qwen-7B-fp16(15GB)", value="deepseek-r1:7b-qwen-distill-fp16|15"),
            Choice(name="DeepSeek-R1-Distill-Qwen-7B-q8_0(8.1GB)", value="deepseek-r1:7b-qwen-distill-q8_0|8.1"),

            Choice(name="DeepSeek-R1-Distill-Llama-8B(4.9GB)", value="deepseek-r1:8b|4.9"),
            Choice(name="DeepSeek-R1-Distill-Llama-8B-fp16(16GB)", value="deepseek-r1:8b-llama-distill-fp16|16"),
            Choice(name="DeepSeek-R1-Distill-Llama-8B-q8_0(8.5GB)", value="deepseek-r1:8b-llama-distill-q8_0|8.5"),

            Choice(name="DeepSeek-R1-671B(404GB)", value="deepseek-r1:671b|404"),
            Choice(name="DeepSeek-R1-671B(1.3TB)", value="deepseek-r1:671b-fp16|1331.2"),
            Choice(name="DeepSeek-R1-671B(713GB)", value="deepseek-r1:671b-q8_0|713"),

            Choice(name="DeepSeek-R1-Distill-Qwen-1.5B(1.1GB)", value="deepseek-r1:1.5b|1.1"),
            Choice(name="DeepSeek-R1-Distill-Qwen-1.5B-fp16(3.6GB)", value="deepseek-r1:1.5b-qwen-distill-fp16|3.6"),
            Choice(name="DeepSeek-R1-Distill-Qwen-1.5B-q8_0(1.9GB)", value="deepseek-r1:1.5b-qwen-distill-q8_0|1.9"),

            Separator(),
            Separator(line='【QwQ】:括号内为最低显存要求'),
            Choice(name="32b(20GB)", value="qwq:32b|20"),
            Choice(name="32b-q8_0(35GB)", value="qwq:32b-q8_0|35"),
            Choice(name="32b-fp16(66GB)", value="qwq:32b-fp16|66"),
        ],
        default="deepseek-r1:32b|20",
        validate=GpuValidator(),
    ).execute()
    if not ollama_model_selected:
        exit(0)

    model_name = ollama_model_selected.split('|')[0].strip()
    subprocess.Popen(f"ollama pull {model_name}", shell=True).wait()
    run_now = inquirer.confirm('模型下载完成，是否立即运行？', default=True).execute()
    if run_now:
        subprocess.Popen(f"ollama run {model_name}", shell=True).wait()


if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        exit(0)
