from agents.maddpg import MADDPGAgent
from environment.environment import MultiUAVEnv
from replay_buffer.replay_buffer import ReplayBuffer
from utils.uav_plot import plot_environment
import logging
import os 
import shutil

import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def main():
    num_uavs = 3
    num_users = 10
    grid_size = 1000
    episodes = 100
    num_energy_action = 2
    num_position_action = 4
    num_states = 3
    
    
    log_file_path = r'log\uav_log.log'
    log_dir = os.path.dirname(log_file_path)

    if os.path.exists(log_dir):
        for filename in os.listdir(log_dir):
            file_path = os.path.join(log_dir, filename)
            try:
                if os.path.isfile(file_path) or os.path.islink(file_path):
                    os.unlink(file_path)
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)
            except Exception as e:
                print(f'Failed to delete {file_path}. Reason: {e}')
    else:
        os.makedirs(log_dir)
    
    
    # 配置日志
    logging.basicConfig(filename=log_file_path, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

    # 初始化环境和算法
    env = MultiUAVEnv(num_uavs, grid_size, num_users,num_position_action,num_energy_action)
    agents_position = [MADDPGAgent(num_states, num_position_action) for _ in range(num_uavs)]
    agents_energy = [MADDPGAgent(num_states, num_energy_action) for _ in range(num_uavs)]
    replay_buffer = ReplayBuffer()
    flag = False
    continue_training = True
    reward_all = []
    

    for episode in range(episodes):
        i = 0
        continue_training = True
        total_reward = 0
        while(continue_training):
            for uav in env.uavs:
                state = uav.get_state()
                action_position = agents_position[uav.id].actor(torch.tensor(state, dtype=torch.float32).to(device),uav)
                reward_energy,reward= env.perform_action(uav,0,action_position)
                total_reward = total_reward+ reward
                #reward = env.compute_joint_reward()
                next_state = uav.get_state
                replay_buffer.add((state, action_position, reward, next_state))
                if flag:
                    agents_position[uav.id].update(replay_buffer,uav)
                    
                #print(f"uav{uav.id} position:{action_position},take the aciton;{action_position},received reward:{reward}")
                logging.info(f"uav{uav.id} position:{uav.position}, take the action;{action_position}, received reward:{reward}")
                
            if len(replay_buffer) > 64:
                flag = True
            #plot_environment(env)
            
            if all(gu.served for gu in env.ground_users):
                continue_training = False
            if i >= 2000:
                continue_training = False
            else:
                i += 1
            #print(i)
                
        print(f"Episode {episode + 1} completed.The total_reward is:{total_reward}")
        reward_all.append(total_reward)


    print(reward_all)

if __name__ == "__main__":
    main()
