import pynvml
import time
import numpy as np
from collections import deque
import argparse
import requests

# 飞书机器人发送消息
def send_message(token, message):
    headers = {
        'Content-Type': 'application/json'
    }
    data = {
        'msg_type': 'text',
        'content': {
            'text': message
        }
    }
    try: 
        response = requests.post(f"https://open.feishu.cn/open-apis/bot/v2/hook/{token}", json=data, headers=headers)
        if response.status_code == 200:
            print("消息发送成功")
        else:
            print("消息发送失败")
    except Exception as e:
        print(f"发送消息时出错: {e}")


def monitor_gpu(token, util_threshold=10, mem_threshold=10, window_size=12, interval=5):
    """
    监控NVIDIA显卡状态，在空闲时发送通知
    
    参数:
        util_threshold: GPU利用率阈值(%)
        mem_threshold: 显存使用率阈值(%)
        window_size: 滑动窗口大小(数据点数量)
        interval: 检查间隔(秒)
    """
    pynvml.nvmlInit()
    device_count = pynvml.nvmlDeviceGetCount()
    
    print(f"发现 {device_count} 个GPU设备，开始监控...")
    print(f"空闲判定条件: GPU利用率 < {util_threshold}% 且 显存使用率 < {mem_threshold}%")
    print(f"使用滑动窗口平均(window_size={window_size}, interval={interval}s)\n")

    # 为每个GPU创建监控队列
    util_queues = [deque(maxlen=window_size) for _ in range(device_count)]
    mem_queues = [deque(maxlen=window_size) for _ in range(device_count)]
    
    # 记录每个GPU的忙碌状态
    busy_states = [True] * device_count
    
    try:
        while True:
            all_idle = True
            
            for i in range(device_count):
                handle = pynvml.nvmlDeviceGetHandleByIndex(i)
                
                # 获取GPU利用率
                util = pynvml.nvmlDeviceGetUtilizationRates(handle).gpu
                
                # 获取显存使用情况
                mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
                mem_used_percent = (mem_info.used / mem_info.total) * 100
                
                # 更新监控队列
                util_queues[i].append(util)
                mem_queues[i].append(mem_used_percent)
                
                # 计算滑动窗口平均值
                avg_util = np.mean(util_queues[i]) if util_queues[i] else 0
                avg_mem = np.mean(mem_queues[i]) if mem_queues[i] else 0
                
                # 检查GPU状态
                is_idle = avg_util < util_threshold and avg_mem < mem_threshold
                
                # 打印状态变化
                if is_idle and busy_states[i]:
                    msg_idle = f"[{time.ctime()}: GPU {i}] 进入空闲状态! \n平均利用率: {avg_util:.1f}%,平均显存: {avg_mem:.1f}%"
                    print(msg_idle)
                    send_message(token, msg_idle)
                    busy_states[i] = False
                elif not is_idle and not busy_states[i]:
                    msg_busy = f"[{time.ctime()}: GPU {i}] 恢复工作中... \n当前利用率: {util}%, 显存: {mem_used_percent:.1f}%"
                    print(msg_busy)
                    send_message(token, msg_busy)
                    busy_states[i] = True
                
                # 更新全局状态
                all_idle &= is_idle
                
            time.sleep(interval)
            
    except KeyboardInterrupt:
        print("\n监控已停止")
    finally:
        pynvml.nvmlShutdown()

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='NVIDIA GPU空闲监控工具')
    parser.add_argument('--util', type=float, default=10.0,
                        help='GPU利用率阈值(百分比)')
    parser.add_argument('--mem', type=float, default=10.0,
                        help='显存使用率阈值(百分比)')
    parser.add_argument('--window', type=int, default=12,
                        help='滑动窗口大小(数据点数量)')
    parser.add_argument('--interval', type=int, default=5,
                        help='检查间隔(秒)')
    parser.add_argument('--token',type=str, required=True, default=None,
                        help='webhook token')
    
    args = parser.parse_args()
    
    monitor_gpu(
        token=args.token,
        util_threshold=args.util,
        mem_threshold=args.mem,
        window_size=args.window,
        interval=args.interval
    )