import sys
import os
import logging
import torch.multiprocessing as mp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import a2c_FoV as a2c_torch
import env_multi_tile as env
import load_data
from train import tileSet

S_INFO = 6  # bit_rate, buffer_size, next_chunk_size, bandwidth_measurement(throughput and time), chunk_til_video_end
S_LEN = 8  # take how many frames in the past
A_DIM = 4
ACTOR_LR_RATE = 0.0001
CRITIC_LR_RATE = 0.001
VIDEO_BIT_RATE = load_data.load_bitrates('tiledSkiing')  # 加载视频比特率等级信息
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 200
M_IN_K = 1000.0
DEVIATION_THRESHOLD = 0.33
QUALITY_FACTOR = 0.001 * 5
REBUF_PENALTY = 5
SMOOTH_PENALTY = 0
DEFAULT_QUALITY = 1  # default video quality without agent
RANDOM_SEED = 30
RAND_RANGE = 1000
LOG_FILE = './test_results/log_sim_rl'
# TEST_TRACES = './cooked_test_traces/'
# log in format of time_stamp bit_rate buffer_size rebuffer_time chunk_size download_time reward
NN_MODEL = sys.argv[1]
TOTAL_TILES = int(sys.argv[2])


def convert_torch(variable, dtype=np.float32):
    if variable.dtype != dtype:
        variable = variable.astype(dtype)

    return torch.from_numpy(variable)


def main():
    tiles = []
    for i in range(TOTAL_TILES):
        if TOTAL_TILES == 6:
            tile_tmp = tileSet.tile(i, load_data.load_tiled_segment_size('tiledSkiing', i), TOTAL_TILES)
        else:
            tile_tmp = tileSet.tile(i, load_data.load_multi_tiled_segment_size('tiledSkiing', i), TOTAL_TILES)
        tiles.append(tile_tmp)

    np.random.seed(RANDOM_SEED)

    assert len(VIDEO_BIT_RATE) == A_DIM

    all_cooked_time, all_cooked_bw, all_file_names = load_data.load_multi_throughput()
    all_FoV_time, all_FoV_pos = load_data.load_multi_fov()

    net_env = env.Environment(all_cooked_time=all_cooked_time,
                              all_cooked_bw=all_cooked_bw,
                              all_FoV_time=all_FoV_time,
                              all_FoV_pos=all_FoV_pos)

    log_path = LOG_FILE + '_' + all_file_names[net_env.trace_idx]
    log_file = open(log_path, 'w')

    actor = a2c_torch.ActorNet(s_dim=[S_INFO, S_LEN], a_dim=A_DIM,
                               lr=ACTOR_LR_RATE)

    # restore neural net parameters
    if NN_MODEL is not None:  # NN_MODEL is the path to file
        # saver.restore(sess, NN_MODEL)
        print(NN_MODEL)
        actor.load_state_dict(torch.load(NN_MODEL))
        print("Testing model restored.")

    time_stamp = np.zeros(24)
    video_count = 0

    while True:  # serve video forever
        # the action is from the last decision
        # this is to make the framework similar to the real
        for i in range(TOTAL_TILES):
            delay, deviation, sleep_time, buffer_size, rebuf, \
            video_chunk_size, next_video_chunk_sizes, \
            end_of_video, video_chunk_remain, _ = \
                net_env.get_video_chunk(tiles[i])
            time_stamp[i] += delay  # in ms
            time_stamp[i] += sleep_time  # in ms
            fov_tmp = 0
            if deviation < DEVIATION_THRESHOLD:
                fov_tmp = 1

            state_ = [fov_tmp,
                      video_chunk_size / delay,
                      delay / M_IN_K,
                      next_video_chunk_sizes[0] / M_IN_K,
                      next_video_chunk_sizes[1] / M_IN_K,
                      next_video_chunk_sizes[2] / M_IN_K,
                      next_video_chunk_sizes[3] / M_IN_K,
                      buffer_size / BUFFER_NORM_FACTOR,
                      VIDEO_BIT_RATE[tiles[i].quality] / float(np.max(VIDEO_BIT_RATE))]

            # if deviation <= 0.667:
            #     reward = (1 - deviation) * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[i].quality] \
            #              - REBUF_PENALTY * rebuf
            # else:
            #     reward = (1 - deviation) * QUALITY_FACTOR * (
            #                 VIDEO_BIT_RATE[1] - VIDEO_BIT_RATE[tiles[i].quality]) \
            #              - REBUF_PENALTY * rebuf
            # reward = (1 - deviation) * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[i].quality] \
            #          - REBUF_PENALTY * rebuf
            # reward = (1 - deviation) * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[i].quality]
            # if deviation < 0.667:
            #     reward = (1 - deviation) * QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[i].quality]
            # else:
            #     reward = (1 - deviation) * QUALITY_FACTOR * (
            #                 VIDEO_BIT_RATE[1] - VIDEO_BIT_RATE[tiles[i].quality])
            # if fov_tmp == 1:
            #     reward = QUALITY_FACTOR * VIDEO_BIT_RATE[tiles[i].quality]
            # else:
            #     reward = QUALITY_FACTOR * (VIDEO_BIT_RATE[1] - VIDEO_BIT_RATE[tiles[i].quality])

            # 视野内
            if fov_tmp == 1:
                # 最高质量
                if tiles[i].quality == A_DIM - 1:
                    reward = 100
                # 其他质量
                else:
                    reward = (VIDEO_BIT_RATE[tiles[i].quality] / 1000) / (video_chunk_size / delay * 8) \
                             * 100
            # 视野外
            else:
                # 最低质量
                if tiles[i].quality == 0:
                    reward = 100
                # 其他质量
                else:
                    reward = tiles[i].quality * (-25)
            reward -= rebuf * 10

            tiles[i].add_reward(reward)

            tiles[i].last_quality = tiles[i].quality
            tiles[i].last_deviation = tiles[i].deviation

            # log time_stamp, bit_rate, buffer_size, reward
            log_file.write(str(time_stamp[i] / M_IN_K) + '\t' +
                           str(i) + '\t' +
                           str(VIDEO_BIT_RATE[tiles[i].quality]) + '\t' +
                           format(buffer_size, '.3f') + '\t' +
                           format(rebuf, '.3f') + '\t' +
                           str(video_chunk_size) + '\t' +
                           format(delay, '.3f') + '\t' +
                           format(reward, '.3f') + '\n')
            log_file.flush()

            tiles[i].add_state(state_)

            _, _, action_prob = actor.get_actor_out(convert_torch(np.reshape(tiles[i].state, (1, S_INFO, S_LEN))))
            action_prob = action_prob.numpy()
            action_cumsum = np.cumsum(action_prob)
            bit_rate = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()

            # action_prob = actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
            # action_cumsum = np.cumsum(action_prob)
            # print('action:', action_cumsum)
            # bit_rate = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()
            # bit_rate = np.argmax(action_prob)
            # Note: we need to discretize the probability into 1/RAND_RANGE steps,
            # because there is an intrinsic discrepancy in passing single state and batch states

            tiles[i].add_state(state_)
            tiles[i].add_action(bit_rate)
            entropy_record = a2c_torch.compute_entropy(action_prob[0])
            tiles[i].add_entropy(entropy_record)

            if end_of_video:

                tiles[i].clear_content()

                if i == 5:
                    log_file.write('\n')
                    log_file.close()
                    video_count += 1

                    if video_count >= len(all_file_names):
                        break

                    log_path = LOG_FILE + '_' + all_file_names[net_env.trace_idx]
                    log_file = open(log_path, 'w')
        else:
            continue
        break


if __name__ == '__main__':
    main()
