#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
大规模GPU服务器部署脚本
专门用于一次性部署大量GPU实例
"""

import requests
import json
import time
import sys
import argparse
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import List, Dict, Any
import asyncio
import aiohttp

class MassiveDeploymentManager:
    def __init__(self, base_url: str = "http://localhost:8080"):
        self.base_url = base_url
        self.max_concurrent = 20  # 基于腾讯云API限制优化
        self.batch_size = 50      # 批处理大小 - 符合API频率限制
        self.rate_limit_per_second = 15  # 每秒15个请求，使用75%的API限制
        
    def create_server_batch(self, batch_count: int, batch_id: int) -> int:
        """批量创建服务器"""
        created_count = 0
        
        def create_single_server(index: int) -> str:
            """创建单个服务器"""
            try:
                server_index = batch_id * self.batch_size + index + 1
                print(f"🚀 批次{batch_id+1} 创建服务器 {server_index}/{batch_count * self.batch_size}")
                
                response = requests.post(f"{self.base_url}/api/v1/servers", 
                                       json={'region': 'ap-beijing', 'gpu_type': 'basic'}, 
                                       timeout=120)  # 增加超时时间
                
                if response.status_code == 200:
                    data = response.json()
                    if data.get('success'):
                        instance_id = data.get('instance_id')
                        print(f"✅ 服务器创建成功: {instance_id}")
                        return instance_id
                    else:
                        print(f"❌ 服务器创建失败: {data.get('error')}")
                        return ""
                else:
                    print(f"❌ 创建请求失败: {response.status_code}")
                    return ""
            except Exception as e:
                print(f"❌ 创建异常: {e}")
                return ""
        
        # 使用高并发线程池
        with ThreadPoolExecutor(max_workers=self.max_concurrent) as executor:
            # 提交所有创建任务
            future_to_index = {
                executor.submit(create_single_server, i): i 
                for i in range(self.batch_size)
            }
            
            # 收集结果
            for future in as_completed(future_to_index):
                index = future_to_index[future]
                try:
                    instance_id = future.result()
                    if instance_id:
                        created_count += 1
                except Exception as e:
                    print(f"❌ 创建任务 {index+1} 异常: {e}")
        
        return created_count
    
    def deploy_massive_servers(self, total_count: int) -> bool:
        """大规模部署服务器"""
        print(f"🎯 开始大规模部署 {total_count} 个服务器...")
        print(f"📊 配置信息:")
        print(f"  - 批处理大小: {self.batch_size}")
        print(f"  - 最大并发数: {self.max_concurrent}")
        print(f"  - 总批次数: {(total_count + self.batch_size - 1) // self.batch_size}")
        print("=" * 60)
        
        total_created = 0
        batch_count = (total_count + self.batch_size - 1) // self.batch_size
        
        for batch_id in range(batch_count):
            print(f"\n🔄 开始批次 {batch_id + 1}/{batch_count}")
            
            # 计算当前批次的实际大小
            current_batch_size = min(self.batch_size, total_count - batch_id * self.batch_size)
            
            # 创建当前批次
            created = self.create_server_batch(batch_count, batch_id)
            total_created += created
            
            print(f"✅ 批次 {batch_id + 1} 完成，创建了 {created} 个服务器")
            print(f"📊 累计创建: {total_created}/{total_count}")
            
            # 批次间等待，避免API限制
            if batch_id < batch_count - 1:
                wait_time = max(3, self.batch_size / self.rate_limit_per_second)
                print(f"⏳ 批次间等待 {wait_time:.1f} 秒（基于API频率限制）...")
                time.sleep(wait_time)
        
        print(f"\n🎉 大规模部署完成！")
        print(f"📊 总计创建: {total_created}/{total_count} 个服务器")
        
        if total_created == total_count:
            print("✅ 部署成功率: 100%")
            return True
        else:
            success_rate = (total_created / total_count) * 100
            print(f"⚠️ 部署成功率: {success_rate:.1f}%")
            return False
    
    def get_system_status(self) -> Dict[str, Any]:
        """获取系统状态"""
        try:
            response = requests.get(f"{self.base_url}/api/v1/system/status", timeout=10)
            if response.status_code == 200:
                return response.json()
            else:
                print(f"❌ 获取系统状态失败: {response.status_code}")
                return {}
        except Exception as e:
            print(f"❌ 连接失败: {e}")
            return {}
    
    def get_servers(self) -> List[Dict[str, Any]]:
        """获取所有服务器列表"""
        try:
            response = requests.get(f"{self.base_url}/api/v1/servers", timeout=10)
            if response.status_code == 200:
                data = response.json()
                if data.get('success'):
                    return data.get('data', [])
            else:
                print(f"❌ 获取服务器列表失败: {response.status_code}")
                return []
        except Exception as e:
            print(f"❌ 连接失败: {e}")
            return []
    
    def show_status(self):
        """显示系统状态"""
        print("📊 大规模部署系统状态:")
        print("=" * 50)
        
        # 系统状态
        status = self.get_system_status()
        if status and status.get('success'):
            data = status['data']
            print(f"系统运行: {data['system']['running']}")
            print(f"扩缩容运行: {data['scaling']['running']}")
            print(f"总服务器: {data['servers']['servers']['total']}")
            print(f"活跃服务器: {data['servers']['servers']['available']}")
            print(f"队列长度: {data['queue']['total_tasks_in_queue']}")
            print()
        
        # 服务器列表（只显示前10个）
        servers = self.get_servers()
        if servers:
            print(f"📋 服务器列表 (显示前10个，共{len(servers)}个):")
            for i, server in enumerate(servers[:10], 1):
                instance_id = server.get('instance_id', 'Unknown')
                status = server.get('status', 'Unknown')
                region = server.get('region', 'Unknown')
                gpu_type = server.get('gpu_type', 'Unknown')
                print(f"  {i:2d}. {instance_id} | {status} | {region} | {gpu_type}")
            
            if len(servers) > 10:
                print(f"  ... 还有 {len(servers) - 10} 个服务器")
        else:
            print("❌ 无法获取服务器列表")

def main():
    parser = argparse.ArgumentParser(description='大规模GPU服务器部署脚本')
    parser.add_argument('action', choices=['deploy', 'status'], 
                       help='操作类型')
    parser.add_argument('--count', type=int, default=200, 
                       help='要部署的服务器数量')
    parser.add_argument('--url', default='http://localhost:8080', 
                       help='HAI优化系统URL')
    parser.add_argument('--batch-size', type=int, default=100,
                       help='批处理大小')
    parser.add_argument('--max-concurrent', type=int, default=50,
                       help='最大并发数')
    
    args = parser.parse_args()
    
    manager = MassiveDeploymentManager(args.url)
    manager.batch_size = args.batch_size
    manager.max_concurrent = args.max_concurrent
    
    if args.action == 'deploy':
        print("🚀 大规模GPU服务器部署工具")
        print("=" * 60)
        print(f"目标数量: {args.count} 个服务器")
        print(f"批处理大小: {args.batch_size}")
        print(f"最大并发数: {args.max_concurrent}")
        print("=" * 60)
        
        # 确认部署
        confirm = input("确认开始大规模部署？(y/N): ")
        if confirm.lower() != 'y':
            print("❌ 部署已取消")
            return
        
        success = manager.deploy_massive_servers(args.count)
        
        if success:
            print("\n🎉 大规模部署成功完成！")
        else:
            print("\n⚠️ 大规模部署部分完成，请检查系统状态")
            
    elif args.action == 'status':
        manager.show_status()

if __name__ == '__main__':
    main()
