import random
import numpy as np
import sys
import time
import heapq
import math
from typing import Dict, List
from .auto_parallel_service import AutoParallelService
import pprint
import numpy as np
import torch
import json

#计算全局的dp，tp，pp, ep的size
def global_sizes(model_name, gpu_nums, num_experts = 1, order = 'pp-tp-dp'):
    search_stratege = order.split('-')
    is_serch = [True if i in search_stratege else False for i in ['pp', 'tp', 'dp']]
    if model_name in ['mixtral']:
        assert is_serch[2] == True, '专家并行必须开dp'
    size_stratege = []

    def dfs(count, gpu_left, now):
        if count == len(search_stratege) - 1:
            gpu_used = 1
            for i in range(len(now)):
                gpu_used *= now[i]

            res = now + [gpu_nums // gpu_used]
            for i in range(len(is_serch)):
                if is_serch[i] == False:
                    res.insert(i, 1)

            size_stratege.append(res)
            return
        
        
        for i in range(1, gpu_left + 1):
            if gpu_left % i == 0:
                dfs(count + 1, gpu_left // i, now + [i])

    dfs(0, gpu_nums, now = [])

    # for i in range(1,gpu_nums + 1):
    #     if gpu_nums % i == 0:
    #         for j in range(1,(gpu_nums // i) + 1):
    #             if gpu_nums //  i %  j == 0:
    #                 if model_name == 'mixtral':
    #                     for k in range(1,(gpu_nums // i // j) + 1):
    #                         if num_experts % k == 0 and gpu_nums //  i // j % k == 0:
    #                             size_stratege.append([i ,j ,gpu_nums //  i // j ,k])   
    #                 else: 
    #                     size_stratege.append([i ,j ,gpu_nums //  i // j ])
    if gpu_nums > 16:
        size_stratege = [s for s in size_stratege if 1 not in s]

    if model_name in ['mixtral']:
        ans = []
        for s in size_stratege:
            for ep in range(1, s[2] + 1):
                if s[2] % ep == 0 and num_experts % ep == 0:
                    ans.append(s + [ep])
        return ans 
    return size_stratege


#产生一条满足要求的pp切分策略
def random_pp_strategy(pp_size,layer_nums):
    pp_strategy = []
    for i in range(pp_size - 1):
            rest_layer_nums  = layer_nums - sum(pp_strategy)
            rest_stage = pp_size - len(pp_strategy)
            n = random.randint(1,rest_layer_nums - rest_stage + 1)
            pp_strategy.append(n)
    pp_strategy.append(layer_nums - sum(pp_strategy))
    return pp_strategy


# 评估性能
def evaluate_strategy(model_name, strategy, tp_sizes, dp_size, layer_info: Dict[str, Dict[str, object]], ep_size = 1, experts_num = 0):
    # best_cost = sys.maxsize
    best_time_cost = float('inf')
    best_balance = float('inf')
    best_tp_size = []
    for tp_size in tp_sizes:
        # 使用模拟器来估算计算消耗和显存占用
        if model_name == 'mixtral':
            time_cost_total, balance_score = profile(model_name, strategy, tp_size, dp_size, layer_info, ep_size, experts_num)
        else:
            time_cost_total, balance_score = profile(model_name, strategy, tp_size, dp_size, layer_info)
        
        # print(f'tp_size is {tp_size}, cal_comm_rate is {cal_comm_rate}, memory_mean is {memory_mean}, balance_score is {balance_score}')
        if (time_cost_total < best_time_cost and balance_score < best_balance) or (
            balance_score == best_balance and time_cost_total < best_time_cost
        ):
            # best_cost = calculate_mean
            best_time_cost = time_cost_total
            best_balance = balance_score
            best_tp_size = tp_size
    return best_time_cost, best_balance, best_tp_size

# 评估当前策略的性能
def profile(model_name, strategy, tp_size, dp_size, layer_info: Dict[str, Dict[str, object]], ep_size = 1, experts_num = 0):
    # 这里你可以调用你的具体配置，返回计算消耗和显存占用
    if model_name == 'mixtral':
        time_cost_total, balance_score = AutoParallelService.mock(model_name, pp_layers_per_partition=strategy, tp_splits=tp_size, dp_size=dp_size, layer_info=layer_info,ep_size=ep_size, experts_num=experts_num)
    else:
        time_cost_total, balance_score = AutoParallelService.mock(model_name, pp_layers_per_partition=strategy, tp_splits=tp_size, dp_size=dp_size, layer_info=layer_info)

    return time_cost_total, balance_score


MIN_TP_SIZE = []
GPU_NUMS = 0
TP_SIZES = []
#生成gpu分配策略（tp）
def tp_st(pp_strateges, gpu_nums, layer_per_memory_allocated, active_memory):
    global MIN_TP_SIZE, GPU_NUMS,TP_SIZES
    #单个设备最大的显存
    max_memory_allocated = 60
    #每个stage最少需要的gpu数量
    min_tp_size = [((layers * layer_per_memory_allocated * 6 + active_memory * 8) / max_memory_allocated ) for layers in pp_strateges]
    #超过列表值的最小的2的幂次方数
    min_tp_size = [2 ** math.ceil(math.log2(x)) if x > 1 else 1 for x in min_tp_size]
    
    if sum(min_tp_size) > gpu_nums:
        return []
    if min_tp_size == MIN_TP_SIZE and gpu_nums == GPU_NUMS:
        return TP_SIZES
    else: 
        MIN_TP_SIZE = min_tp_size
        GPU_NUMS = gpu_nums  
    delta = sum(min_tp_size) - gpu_nums
    if delta > 0: 
        TP_SIZES = []
    elif delta == 0: 
        TP_SIZES = [min_tp_size]
    else :
        #print(min_tp_size,gpu_nums)
        TP_SIZES =all_power_of_two_lists(min_tp_size,gpu_nums)
        #print(tp_sizes)
    return TP_SIZES

    
#将最小所需的tp_gpus扩展到提供的gpus
def all_power_of_two_lists(min_tp_size, target_sum):
    results = []
    n = len(min_tp_size)

    def backtrack(index, current_list, current_sum):
        if index == n:
            if current_sum == target_sum:
                results.append(current_list[:])
            return
        val = min_tp_size[index]
        while current_sum + val <= target_sum:
            current_list.append(val)
            backtrack(index + 1, current_list, current_sum + val)
            current_list.pop()
            val *= 2  # 只能变成2的倍数

    if sum(min_tp_size) > target_sum:
        return []

    backtrack(0, [], 0)
    return results
    

    # MCMC 的主逻辑
def mcmc_search(model_name, pp_size, layer_nums, num_iterations, layer_info: Dict[str, Dict[str, object]], tp_gpus, dp_size, ep_size = 1, experts_num = 1, temperature=1.0):
    # 选择一个初始策略
    current_tp_sizes =[]
   
    #均切开始：
    if layer_nums % pp_size == 0:
        current_strategy = [layer_nums // pp_size for i in range(pp_size)]
    else:
        rest = layer_nums % pp_size
        current_strategy = [layer_nums // pp_size + 1 for i in range(rest)] + [layer_nums // pp_size for i in range(pp_size - rest)]
    
    #每一层的显存占用
    if model_name in ['mixtral']:
        layer_per_memory_allocated, active_memory = AutoParallelService.get_layer_memory_with_ep(layer_info=layer_info, ep_size=ep_size, experts_num=experts_num)
    else:
        layer_per_memory_allocated, active_memory = AutoParallelService.get_layer_momory(layer_info=layer_info)
    current_tp_sizes = tp_st(current_strategy, tp_gpus, layer_per_memory_allocated, active_memory)
  
    if model_name == 'mixtral':
        current_time_cost, current_balence, current_tp_size = evaluate_strategy(model_name, current_strategy, current_tp_sizes, dp_size, layer_info, ep_size, experts_num)
    else:
        current_time_cost, current_balence, current_tp_size = evaluate_strategy(model_name, current_strategy, current_tp_sizes, dp_size, layer_info)
    
    best_strategy = current_strategy
    best_tp_size = current_tp_size
    best_time_cost = current_time_cost
    best_balence = current_balence

    if len(current_strategy) == 1: 
        return current_strategy, current_tp_size, current_time_cost, current_balence
    epoch = 0
    for i in range(num_iterations):
        epoch += 1
        # 生成新策略，使用一些邻域结构来修改当前策略
        new_strategy = mutate_strategy(current_strategy)

        layer_per_memory_allocated, active_memory = AutoParallelService.get_layer_momory(layer_info=layer_info)
        tp_size = tp_st(current_strategy,tp_gpus, layer_per_memory_allocated, active_memory)

        if tp_size == []:
            continue

        # 评估新策略
        if model_name == 'mixtral':
            new_time_cost, new_balance, new_tp_size = evaluate_strategy(model_name, new_strategy, tp_size, dp_size, layer_info, ep_size, experts_num)
        else:
            new_time_cost, new_balance, new_tp_size = evaluate_strategy(model_name, new_strategy, tp_size, dp_size, layer_info)
       
        
        # 计算接受概率
        delta_cost = new_time_cost - current_time_cost
        delta_memory = new_balance - current_balence
        alpha = 0.5
        acceptance_prob = np.exp(-(alpha*delta_cost + (1 - alpha)* delta_memory) / temperature)  

        # 如果新策略更好或者接受概率较大，则接受新策略
        if delta_cost < 0 or random.random() < acceptance_prob:
            current_strategy = new_strategy
            current_tp_size = new_tp_size
            current_time_cost = new_time_cost
            current_balence = new_balance
            
            # 更新最佳策略
            if (new_time_cost < best_time_cost and new_balance < best_balence) or (new_balance == best_balence and new_time_cost < best_time_cost):
                best_strategy = new_strategy
                best_tp_size = new_tp_size
                best_time_cost = new_time_cost
                best_balence = new_balance

        # 每次迭代适当减少温度来模拟退火
        temperature *= 0.99  # 降低温度
        temperature = max(temperature, 0.001)
        
    return best_strategy, best_tp_size, best_time_cost, best_balence

# 生成新策略的邻域
def mutate_strategy(strategy):
    # 对策略做微小变动，模拟策略的邻域
    new_strategy = strategy[:]
    while (not all(x > 0 for x in new_strategy) or new_strategy == strategy):
        new_strategy = strategy[:]
        # 随机改变两个部分的流水线并行策略，保证要处理的层的总数不变
        index1, index2 = random.sample(range(len(new_strategy)), 2)
        #保证index2处理的层大于1才可以减少
        while new_strategy[index2] == 1:
                index1, index2 = random.sample(range(len(new_strategy)), 2)
        delta = random.randint(1, min(2, new_strategy[index2])) 
        new_strategy[index1] += delta
        new_strategy[index2] -= delta
    return new_strategy
