from MADDPG import MADDPG
import numpy as np
import torch as th
import visdom
from params import scale_reward
import math
import sys
sys.path.append('/home/robin/starcraftEnv/starcraft_platform')
from environment.starcraft_rule_env import StarCraftRuleBasedEnv
import warnings
warnings.filterwarnings("ignore", category=UserWarning)

env = StarCraftRuleBasedEnv()

vis = visdom.Visdom(port=5274)
reward_record = []

np.random.seed(1234)
th.manual_seed(1234)
n_agents = 1
n_states = (env.nagents + env.nenemies) * (2 + 1)
n_actions = 2 * env.nagents # 2^2=3+1
capacity = 1000000
batch_size = 1000

n_episode = 1000
n_win = 0
episodes_before_train = 10000

win = None
param = None




maddpg = MADDPG(n_agents, env.nenemies, n_states, n_actions, batch_size, capacity,
                episodes_before_train)

FloatTensor = th.cuda.FloatTensor if maddpg.use_cuda else th.FloatTensor

'''
observation = env.reset()
our_agents = observation.units[our_play_id]
enemy_agents = observation.units[enemy_play_id]
our_num = [our_agents[i].id for i in range(len(our_agents))]
emeny_num = [enemy_agents[i].id for i in range(len(enemy_agents))]
obs = dict(zip(our_num+emeny_num,[[0]*3 for i in range(len(our_agents)+len(enemy_agents))]))
print(obs)
'''
obs = dict()

def obs_obtain(observation, env):
    our_play_id = env.state1.player_id
    enemy_play_id = env.state2.player_id
    for key in obs.keys():
        obs[key] = [0, 0, 0] 
    for i in range(len(observation.units[our_play_id])):
        agent = observation.units[our_play_id][i]
        obs[str(agent.id)] = [agent.x, agent.y, agent.health]  
    for i in range(len(observation.units[enemy_play_id])):
        enemy = observation.units[enemy_play_id][i]
        obs[str(enemy.id)] = [enemy.x, enemy.y, enemy.health] 
    obs_vec = np.ravel(list(obs.values()))/200.0
    obs_mat = obs_vec[np.newaxis, :]
    obs_mat = th.from_numpy(obs_mat).float().cuda()
    return obs_mat

for i_episode in range(n_episode):
    obs = dict()
    observation = env.reset()
    obs_mat = obs_obtain(observation, env)
    our_play_id = env.state1.player_id
    enemy_play_id = env.state2.player_id
    ours_ids = [ours.id for ours in observation.units[our_play_id]]
    enemy_ids = [enemy.id for enemy in observation.units[enemy_play_id]]
    actions_ = np.zeros([env.nagents,2])
    total_reward = 0.0
    rr = np.zeros((n_agents,))
    for t in range(env.max_episode_steps):
        actions = maddpg.select_action(obs_mat).data.cpu()
        for i in range(env.nagents):
            actions_[i,0] = ours_ids[i]
            binary_arr = actions.numpy()[0][i*2:(i+1)*2]
            enemy_id = 0
            for j in range(2):
                if binary_arr[j] <= -1.0:
                    binary_arr[j] += 0.5
                enemy_id += 2**(2-j-1)*math.ceil(binary_arr[j])
            if enemy_id < 3:
                actions_[i,1] = enemy_ids[enemy_id]
            else:
                actions_[i,1] = 0
        observation_ , reward, done, _ = env.step(actions_.astype(np.int32))
        obs_mat_ = obs_obtain(observation_, env)
        reward = th.Tensor([reward])
        if t != env.max_episode_steps - 1:
            next_obs_mat = obs_mat_
        else:
            next_obs_mat = None
        #print(reward.sum())
        total_reward += reward.sum()
        rr += reward.cpu().numpy()
        maddpg.memory.push(obs_mat_.data, actions, next_obs_mat, reward)
        obs_mat = next_obs_mat

        c_loss, a_loss = maddpg.update_policy()
        if done:
            if total_reward > 0.5:
                n_win += 1
            break
    maddpg.episode_done += 1
    print('Episode: %d, win times = %f' % (i_episode, n_win))
    reward_record.append(total_reward)
    
    '''
    # save models
    if(maddpg.episode_done % 50 ==0):
        th.save(maddpg.actors,'/home/robin/starcraftEnv/starcraft_platform/maddpg_demo/checkpoint/agent_actor_'+str(maddpg.episode_done)+'.pkl')
        th.save(maddpg.critics,'/home/robin/starcraftEnv/starcraft_platform/maddpg_demo/checkpoint/agent_critic_'+str(maddpg.episode_done)+'.pkl')

    if maddpg.episode_done == maddpg.episodes_before_train:
        print('training now begins...')
        print('MADDPG on TorchCraft\n' +
              'scale_reward=%f\n' % scale_reward +
              ' \nlr=0.001, 0.0001\n')

    if win is None:
        win = vis.line(X=np.arange(i_episode, i_episode+1),
                       Y=np.array([
                           np.append(total_reward.cpu().numpy(), rr)]),
                       opts=dict(
                           ylabel='Reward',
                           xlabel='Episode',
                           title='DDPG on TorchCraft\n'))
    else:
        vis.line(X=np.array(
            [np.array(i_episode).repeat(n_agents+1)]),
                 Y=np.array([np.append(total_reward.cpu().numpy(),
                                       rr)]),
                 win=win,
                 update='append')
    if param is None:
        param = vis.line(X=np.arange(i_episode, i_episode+1),
                         Y=np.array([maddpg.var[0]]),
                         opts=dict(
                             ylabel='Var',
                             xlabel='Episode',
                             title='DDPG on TorchCraft: Exploration',
                             legend=['Variance']))
    else:
        vis.line(X=np.array([i_episode]),
                 Y=np.array([maddpg.var[0]]),
                 win=param,
                 update='append')
    '''

world.close()
