import os
import logging
# import multiprocessing as mp
import torch.multiprocessing as mp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
from torch.distributions import Categorical

import a2c_FoV as a2c
import env_multi_tile as env
import load_data
from tensorboardX import SummaryWriter
import shutil
import tileSet
import random

S_INFO = 6  # bit_rate, buffer_size, next_chunk_size, bandwidth_measurement(throughput and time), chunk_til_video_end
S_LEN = 8  # take how many frames in the past
A_DIM = 4  # Action dimension
ACTOR_LR_RATE = 0.0001
# ACTOR_LR_RATE = 0.00001
CRITIC_LR_RATE = 0.001
# CRITIC_LR_RATE = 0.0001
NUM_AGENTS = 8
TRAIN_SEQ_LEN = 32  # take as a train batch 2^n
MODEL_SAVE_INTERVAL = 32
MAX_TRAINING_EPOCH = 50000
VIDEO_BIT_RATE = load_data.load_bitrates('tiledSkiing')  # 加载视频比特率等级信息 kbps
FACE_COORD = [[1, 0.5, -0.5], [1, 0.5, 0.5], [1, -0.5, -0.5], [1, -0.5, 0.5],
            [-1, 0.5, 0.5], [-1, 0.5, -0.5], [-1, -0.5, 0.5], [-1, -0.5, -0.5],
            [-0.5, 1, 0.5], [0.5, 1, 0.5], [-0.5, 1, -0.5], [0.5, 1, -0.5],
            [-0.5, -1, -0.5], [0.5, -1, -0.5], [-0.5, -1, 0.5], [0.5, -1, 0.5],
            [-0.5, 0.5, -1], [0.5, 0.5, -1], [-0.5, -0.5, -1], [0.5, -0.5, -1],
            [0.5, 0.5, 1], [-0.5, 0.5, 1], [0.5, -0.5, 1], [-0.5, -0.5, 1]]
TILES_IN_FACE = int(len(FACE_COORD) / 6)
TOTAL_TILES = TILES_IN_FACE * 6
# HD_REWARD = [1, 2, 3, 2, 15, 20]
BUFFER_NORM_FACTOR = 10.0
# MAX_BUFFER_SIZE = 60
CHUNK_TIL_VIDEO_END_CAP = 200
M_IN_K = 1000.0
DEVIATION_THRESHOLD = 0.33
QUALITY_FACTOR = 0.001 * 5
REBUF_PENALTY = 5
SMOOTH_PENALTY = 0
# DEFAULT_QUALITY = 1  # default video quality without agent
RANDOM_SEED = 30
RAND_RANGE = 1000
# SUMMARY_DIR = './results_5k/result_' + str(QUALITY_FACTOR) + '_' + str(BUFFER_FACTOR) + '_' + str(REBUF_PENALTY) + '_' + str(SMOOTH_PENALTY) + '_p' + str(MODEL_SAVE_INTERVAL)
SUMMARY_DIR = 'results'
# LOG_FILE = './results_5k/result_' + str(QUALITY_FACTOR) + '_' + str(BUFFER_FACTOR) + '_' + str(REBUF_PENALTY) + '_' + str(SMOOTH_PENALTY) + '_p' + str(MODEL_SAVE_INTERVAL) + '/log'
LOG_FILE = 'results/log'
TEST_LOG_FOLDER = './test_results/'
TRAIN_TRACES = './cooked_traces/'
# NN_MODEL = './results_5k/pretrain_linear_reward.ckpt'
NN_MODEL = None
tiles = []
for i in range(NUM_AGENTS):
    tiles_tmp = []
    for j in range(TOTAL_TILES):
        tile_tmp = tileSet.tile(j, load_data.load_multi_tiled_segment_size('tiledSkiing', j), 24)
        tiles_tmp.append(tile_tmp)
    tiles.append(tiles_tmp)


# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def convert_torch(variable, dtype=np.float32):
    if variable.dtype != dtype:
        variable = variable.astype(dtype)

    return torch.from_numpy(variable)


def testing(epoch, nn_model, log_file):
    # clean up the test results_4000 folder in Linux
    # os.system('rm -r ' + TEST_LOG_FOLDER)
    # os.system('mkdir ' + TEST_LOG_FOLDER)
    if os.path.exists(TEST_LOG_FOLDER):
        shutil.rmtree(TEST_LOG_FOLDER)
    os.makedirs(TEST_LOG_FOLDER, exist_ok=True)

    # run test script
    os.system('python rl_test_multi_tile.py ' + nn_model + ' 24')

    # append test performance to the log
    rewards = []
    test_log_files = os.listdir(TEST_LOG_FOLDER)
    for test_log_file in test_log_files:
        reward = []
        with open(TEST_LOG_FOLDER + test_log_file, 'r') as f:
            for line in f:
                parse = line.split()
                try:
                    reward.append(float(parse[-1]))
                except IndexError:
                    break
        rewards.append(np.mean(reward[1:]))

    rewards = np.array(rewards)

    rewards_min = np.min(rewards)
    rewards_5per = np.percentile(rewards, 5)
    rewards_mean = np.mean(rewards)
    rewards_median = np.percentile(rewards, 50)
    rewards_95per = np.percentile(rewards, 95)
    rewards_max = np.max(rewards)

    log_file.write(str(epoch) + '\t' +
                   format(rewards_min, '.3f') + '\t' +
                   format(rewards_5per, '.3f') + '\t' +
                   format(rewards_mean, '.3f') + '\t' +
                   format(rewards_median, '.3f') + '\t' +
                   format(rewards_95per, '.3f') + '\t' +
                   format(rewards_max, '.3f') + '\n')
    log_file.flush()

    return rewards_mean


def central_agent(net_params_queues, exp_queues):
    assert len(net_params_queues) == NUM_AGENTS
    assert len(exp_queues) == NUM_AGENTS

    logging.basicConfig(filename=LOG_FILE + '_central',
                        filemode='w',
                        level=logging.INFO)

    write_test = SummaryWriter(SUMMARY_DIR)
    with open(LOG_FILE + '_test', 'w') as test_log_file:
        actor = a2c.ActorNet(s_dim=[S_INFO, S_LEN], a_dim=A_DIM, lr=ACTOR_LR_RATE)
        critic = a2c.CriticNet(s_dim=[S_INFO, S_LEN], lr=CRITIC_LR_RATE)

        actor_optim = optim.RMSprop(actor.parameters(), lr=ACTOR_LR_RATE)
        critic_optim = optim.RMSprop(critic.parameters(), lr=CRITIC_LR_RATE)

        epoch = 0
        # while True:
        while epoch < MAX_TRAINING_EPOCH + 2:
            # if epoch == 135:
            #     print(epoch)
            print('epoch = ', epoch)
            actor_net_params = actor.state_dict()
            critic_net_params = critic.state_dict()
            # print(actor_net_params)
            for i in range(NUM_AGENTS):
                # for j in range(TOTAL_TILES):
                net_params_queues[i].put([actor_net_params, critic_net_params])
            # print('parameters hahaha: ', actor_net_params)
            # print('parameters: ', list(actor_net_params))
            total_batch_len = 0.0
            total_reward = 0.0
            total_td_loss = 0.0
            total_entropy = 0.0
            total_agents = 0.0
            actor_optim.zero_grad()
            critic_optim.zero_grad()
            for i in range(NUM_AGENTS):
                # update_id = random.randint(0, 5)

                # s_batch, a_batch, r_batch, old_pi_batch, terminal, info = exp_queues[i].get()
                # s_tmp, a_tmp, r_tmp, pi_tmp, terminal, info = exp_queues[i].get()
                for j in range(TOTAL_TILES):
                    s_batch, a_batch, r_batch, old_pi_batch, terminal, entropy = exp_queues[i][j].get()
                    s_batch = convert_torch(np.array(s_batch))
                    a_batch = convert_torch(np.array(a_batch))
                    r_batch = convert_torch(np.array(r_batch))
                    # old_pi_batch = convert_torch(np.array(old_pi_batch))
                    terminal = convert_torch(np.array(terminal))

                    # print('get from Agent ' + str(i) + ' exp_queue[' + str(j) + '] s_batch.shape = ' + str(s_batch.shape))

                    # if j == update_id:
                    critic_loss, td_batch = critic.cal_loss(s_batch, r_batch, terminal)
                    actor_loss = actor.cal_loss(s_batch, a_batch, td_batch, epoch, i, j)

                    critic_loss.backward()
                    actor_loss.backward()
                    total_reward += np.sum(r_batch.numpy())
                    total_td_loss += np.sum(td_batch.numpy())
                    total_batch_len += len(r_batch.numpy())
                    total_agents += 1.0
                    # print(i, j)
                    total_entropy += np.sum(entropy)
            critic_optim.step()
            actor_optim.step()
            # actor.cpu(), critic.cpu()
            epoch += 1
            avg_reward = total_reward / total_agents
            avg_td_loss = total_td_loss / total_batch_len
            avg_entropy = total_entropy / total_batch_len
            # print('Epoch: ' + str(epoch) + ' Reward: ' + str(avg_reward))
            logging.info('Epoch: ' + str(epoch) +
                         ' TD_loss: ' + str(avg_td_loss) +
                         ' Avg_reward: ' + str(avg_reward) +
                         ' Avg_entropy: ' + str(avg_entropy))

            if epoch % MODEL_SAVE_INTERVAL == 0:
                # Save the neural net parameters to disk.
                print('Epoch = ', epoch)
                torch.save(actor.state_dict(), SUMMARY_DIR + "/actor_nn_model_ep_" +
                           str(epoch) + ".pkl")
                torch.save(critic.state_dict(), SUMMARY_DIR + "/critic_nn_model_ep_" +
                           str(epoch) + ".pkl")

                # logging.info("Model saved in file: " + save_path)
                reward_mean = testing(epoch,
                                      SUMMARY_DIR + "/actor_nn_model_ep_" + str(epoch) + ".pkl",
                                      test_log_file)

                print('epoch = ', epoch, 'reward = ', format(reward_mean, '.3f'))
                write_test.add_scalar('Testing/total_reward', reward_mean, epoch)
                write_test.add_scalar('Training/Entropy', avg_entropy, epoch)
                write_test.add_scalar('Training/TD_Error', avg_td_loss, epoch)

                write_test.flush()


def agent(agent_id, all_cooked_time, all_cooked_bw, all_FoV_time, all_FoV_pos, net_params_queue, exp_queue):
    ep = 1
    net_env = env.Environment(all_cooked_time=all_cooked_time,
                              all_cooked_bw=all_cooked_bw,
                              all_FoV_time=all_FoV_time,
                              all_FoV_pos=all_FoV_pos,
                              random_seed=agent_id)

    with open(LOG_FILE + '_agent_' + str(agent_id), 'w') as log_file:
        actor = a2c.ActorNet(s_dim=[S_INFO, S_LEN], a_dim=A_DIM, lr=ACTOR_LR_RATE)
        critic = a2c.CriticNet(s_dim=[S_INFO, S_LEN], lr=CRITIC_LR_RATE)

        # initial synchronization of the network parameters from the coordinator
        actor_net_params, critic_net_params = net_params_queue.get()
        actor.load_state_dict(actor_net_params)
        critic.load_state_dict(critic_net_params)
        net_update = np.zeros(TOTAL_TILES)
        time_stamp = np.zeros(TOTAL_TILES)

        while True:  # experience video streaming forever
            # for i in range(MAX_TRAINING_EPOCH):  # experience video streaming forever
            for i in range(TOTAL_TILES):
                delay, deviation, sleep_time, buffer_size, rebuf, video_chunk_size, next_video_chunk_sizes,\
                end_of_video, video_chunk_remain, trace_idx = net_env.get_video_chunk(tiles[agent_id][i])

                time_stamp[i] += delay  # in ms
                time_stamp[i] += sleep_time  # in ms
                fov_tmp = 0
                if deviation < DEVIATION_THRESHOLD:
                    fov_tmp = 1

                state_ = [fov_tmp,
                          video_chunk_size / delay,
                          delay / M_IN_K,
                          next_video_chunk_sizes[0] / M_IN_K,
                          next_video_chunk_sizes[1] / M_IN_K,
                          next_video_chunk_sizes[2] / M_IN_K,
                          next_video_chunk_sizes[3] / M_IN_K,
                          buffer_size / BUFFER_NORM_FACTOR,
                          VIDEO_BIT_RATE[tiles[agent_id][i].quality] / float(np.max(VIDEO_BIT_RATE))]

                tiles[agent_id][i].update_state(state_)

                # if deviation <= 0.667:
                #     reward = (1 - deviation) * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[agent_id][i].quality] \
                #              - REBUF_PENALTY * rebuf
                #              # - SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[tiles[agent_id][i].quality] \
                #              #                           - VIDEO_BIT_RATE[tiles[agent_id][i].last_quality])
                # else:
                #     reward = (1 - deviation) * QUALITY_FACTOR * (VIDEO_BIT_RATE[1] - VIDEO_BIT_RATE[tiles[agent_id][i].quality]) \
                #              - REBUF_PENALTY * rebuf

                # reward = (1 - deviation) * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[agent_id][i].quality] \
                #          - REBUF_PENALTY * rebuf

                # reward = (1 - deviation) * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[agent_id][i].quality]

                # if deviation < 0.667:
                #     reward = (1 - deviation) * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[agent_id][i].quality]
                # else:
                #     reward = (1 - deviation) * QUALITY_FACTOR * (VIDEO_BIT_RATE[0] - VIDEO_BIT_RATE[tiles[agent_id][i].quality])

                # reward = fov_tmp * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[agent_id][i].quality]

                # if fov_tmp == 1:
                #     reward = QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[agent_id][i].quality]
                # else:
                #     reward = QUALITY_FACTOR * (VIDEO_BIT_RATE[0] - VIDEO_BIT_RATE[tiles[agent_id][i].quality])
                if fov_tmp == 1:
                    if tiles[agent_id][i].quality == A_DIM - 1:
                        reward = 100
                    else:
                        reward = (VIDEO_BIT_RATE[tiles[agent_id][i].quality] / 1000) / (video_chunk_size / delay * 8) \
                                 * 100
                else:
                    if tiles[agent_id][i].quality == 0:
                        reward = 100
                    else:
                        reward = tiles[agent_id][i].quality * (-25)
                reward -= rebuf * 10

                # if tiles[agent_id][i].quality == A_DIM:
                #     reward = (0.5 - deviation) * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[agent_id][i].quality]
                # else:
                #     reward = (0.5 - deviation) * min(1, VIDEO_BIT_RATE[tiles[agent_id][i].quality] / 1000
                #                                      / (video_chunk_size / delay * 8)) \
                #              * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[agent_id][i].quality]

                tiles[agent_id][i].add_reward(reward)

                if agent_id == 0 and i == 1:
                    # fov_tmp = 'out'
                    # if deviation < DEVIATION_THRESHOLD:
                    #     fov_tmp = 'in'
                    print('Network: ' + format(video_chunk_size / delay * 8, '.3f') + '\t' +
                          'Video: ' + format(VIDEO_BIT_RATE[tiles[agent_id][i].quality] / 1000, '.3f') + '\t' +
                          'Deviation: ' + format(deviation, '.3f') + '\t' +
                          'rebuf: ' + format(rebuf, '.3f') + '\t' +
                          'Reward: ' + format(reward, '.3f') + '\t' +
                          'Uti:' + format(VIDEO_BIT_RATE[tiles[agent_id][i].quality] / 1000
                                          / (video_chunk_size / delay * 8), '.3f') + '%\t' +
                          'Trace: ' + str(trace_idx))

                tiles[agent_id][i].last_quality = tiles[agent_id][i].quality
                tiles[agent_id][i].last_deviation = tiles[agent_id][i].deviation

                # compute action probability vector
                _, _, action_prob = actor.get_actor_out(
                    convert_torch(np.reshape(tiles[agent_id][i].state, (1, S_INFO, S_LEN))))
                action_prob = action_prob.cpu()
                # print(action_prob)

                # action_cumsum = np.cumsum(action_prob)
                # tile_quality = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()

                # if agent_id == 0 and i == 1:
                #     print(video_chunk_remain)
                #     print(action_prob)
                # probs = torch.sigmoid(action_prob)
                m = Categorical(action_prob)
                tile_quality = m.sample().detach().item()

                # Note: we need to discretize the probability into 1/RAND_RANGE steps,
                # because there is an intrinsic discrepancy in passing single state and batch states
                entropy_record = a2c.compute_entropy(action_prob[0])
                tiles[agent_id][i].add_entropy(entropy_record)

                # log time_stamp, bit_rate, buffer_size, reward
                log_file.write(str(i) + '\t' +
                               format(time_stamp[i], '.3f') + '\t' +
                               str(VIDEO_BIT_RATE[tiles[agent_id][i].quality]) + '\t' +
                               format(buffer_size, '.3f') + '\t' +
                               format(rebuf, '.3f') + '\t' +
                               str(video_chunk_size) + '\t' +
                               format(delay, '.3f') + '\t' +
                               format(reward, '.3f') + '\n')
                log_file.flush()

                # store the state and action into batches
                if end_of_video:
                    tiles[agent_id][i].end_of_video()

                else:
                    # print(state_)
                    tiles[agent_id][i].add_state(tiles[agent_id][i].state)
                    tiles[agent_id][i].add_action(tile_quality)
                    tiles[agent_id][i].add_old_pi(action_prob)

                # report experience to the coordinator

                if len(tiles[agent_id][i].r_batch) >= TRAIN_SEQ_LEN or end_of_video:
                    # if i == 5:
                    #     print('5')
                    net_update[i] = 1
                    # print('Agent ' + str(agent_id) + ' send exp_queue[' + str(i) + ']')
                    exp_queue[i].put([tiles[agent_id][i].s_batch[1:],  # ignore the first chuck
                                      tiles[agent_id][i].a_batch[1:],  # since we don't have the
                                      tiles[agent_id][i].r_batch[1:],  # control over it
                                      tiles[agent_id][i].old_pi_batch[1:],
                                      end_of_video,
                                      tiles[agent_id][i].entropy[1:]])

                    tiles[agent_id][i].clear_content()

                    if np.sum(net_update) == TOTAL_TILES:
                        log_file.write('\n')  # so that in the log we know where video ends
                        # print('Agent ' + str(agent_id) + ' synchronize the network parameters')
                        # synchronize the network parameters from the coordinator
                        actor_net_params, critic_net_params = net_params_queue.get()
                        actor.load_state_dict(actor_net_params)
                        critic.load_state_dict(critic_net_params)
                        net_update = np.zeros(TOTAL_TILES)
                        ep += 1
                        # if ep > 90:
                        #     print(ep)


def main():
    np.random.seed(RANDOM_SEED)
    assert len(VIDEO_BIT_RATE) == A_DIM

    # create result directory
    if os.path.exists(SUMMARY_DIR):
        shutil.rmtree(SUMMARY_DIR)

    os.makedirs(SUMMARY_DIR, exist_ok=True)

    # inter-process communication queues
    net_params_queues = []
    exp_queues = []

    for i in range(NUM_AGENTS):
        # net_tmp = []
        exp_tmp = []
        for j in range(TOTAL_TILES):
            # net_tmp.append()
            exp_tmp.append(mp.Queue(1))
        exp_queues.append(exp_tmp)
        net_params_queues.append(mp.Queue(1))

    # create a coordinator and multiple agent processes
    # (note: threading is not desirable due to python GIL)

    coordinator = mp.Process(target=central_agent,
                             args=(net_params_queues, exp_queues))

    coordinator.start()

    all_cooked_time, all_cooked_bw, _ = load_data.load_multi_throughput()
    all_FoV_time, all_FoV_pos = load_data.load_multi_fov()
    agents = []
    for i in range(NUM_AGENTS):
        agents.append(mp.Process(target=agent,
                                 args=(i, all_cooked_time, all_cooked_bw,
                                       all_FoV_time, all_FoV_pos,
                                       net_params_queues[i],
                                       exp_queues[i])))
    for i in range(NUM_AGENTS):
        agents[i].start()

    # wait unit training is done
    coordinator.join()


if __name__ == '__main__':
    main()
