# -*- coding: UTF-8 -*-
import torch
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from object import delay
import env2 as environment
import config
import copy
import sys,argparse,random

def greedy_lyr(obs,uid,curseed):
    random.seed(curseed)
    choose_edge = None
    # get one User locx & locy, the same as task
    ux = [j for i,j,k in obs['each_use_loc'] if i == uid][0]
    uy = [k for i,j,k in obs['each_use_loc'] if i == uid][0]

    available_actions = obs['next_can']
    can = []
    for idx,item in enumerate(available_actions[:-1]):
        if item == 1.0:
            can.append(idx)

    if ux==np.inf or uy==np.inf or len(can)==0:
        choose_edge = config.EDGE_NODE_NUM
        print('choose_edge',choose_edge)
        return choose_edge 

    one_ulyr = [j for i,j in obs['each_usrhas_lyer'] if i == uid][0]
    layer_download_size = {}
    
    if can:
        for edge_id in can:
            tmp_downsize = 0
            for item in one_ulyr:
                if item not in obs['each_edgehas_lyr'][edge_id]:
                    tmp_downsize += obs['all_layer_downsiz'][item]
            layer_download_size.update({edge_id: tmp_downsize})
        
        if layer_download_size:
            choose_edge = min(layer_download_size,key = lambda x:layer_download_size[x])
            if satisfied_constrain(choose_edge,uid,obs) == False:
                choose_edge = None
        else:
            choose_edge = None
    else:
        choose_edge = None
    if satisfied_constrain(edge_id,uid,obs) == False:
        choose_edge = None
    if isinstance(choose_edge,type(None)):
        # all nodes
        can.append(config.EDGE_NODE_NUM)
        choose_edge = random.choice(can)
        count = 0
        while satisfied_constrain(choose_edge,uid,obs) == False:
            choose_edge = random.choice(can)
            count += 1
            if count >= 5:
                choose_edge = config.EDGE_NODE_NUM
                break
    
    return choose_edge  
        

def satisfied_constrain(edge_id, uidre, obs=None):
    # original from baseline.py
    one_ulyr = [j for i,j in obs['each_usrhas_lyer'] if i == uidre][0]
    if edge_id == config.EDGE_NODE_NUM:
        return True
    ux = [j for i,j,k in obs['each_use_loc'] if i == uidre][0]
    uy = [k for i,j,k in obs['each_use_loc']if i == uidre][0]
    onelocx,onelocy = [(i,j) for k,i,j in obs['each_edge_loc'] if k == edge_id][0]
    d = delay.bts_distance(ux,uy,onelocx,onelocy)
    if d > config.TRANSMISSION_RANGE:
        print('edge {} trans_d is {} > threshold.'.format(edge_id,d))
        return  False
    edge_con = [d for a,b,c,d,f in obs['each_edge_cpumemcondisk'] if a == edge_id][0]
    if config.node_max_container_number - edge_con -1 < 0:
        print('edge {} container num is {} > 10.'.format(edge_id,edge_con))
        return  False
    # edge_usrnum_limit
    if config.node_usrnum_limit - obs['each_edge_usrnum'][edge_id] -1 < 0:
        print('edge {} has user num {} > 50.'.format(edge_id,obs['each_edge_usrnum'][edge_id]))
        return  False
    # edge_storage_free - task_size - download_size
    tmp_downsize = 0
    for item in one_ulyr:
        if item not in obs['each_edgehas_lyr'][edge_id]:
            tmp_downsize += obs['all_layer_downsiz'][item]
    
    edge_disk = [f for a,b,c,d,f in obs['each_edge_cpumemcondisk'] if a == edge_id][0]

    usr_has_tsksiz = [k for i,j,k in obs['each_usrhas_task'] if i == uidre][0]
    if edge_disk - usr_has_tsksiz - tmp_downsize <0:
        print('edge {a} has disk {b} - user_tsk_size:{c} - layer_download_size:{d} < 0.'.format(a=edge_id,b=edge_disk,c=usr_has_tsksiz,d=tmp_downsize))
        return  False


def get_agent(policy=''):
        if policy == 'layer':
            return greedy_lyr
        else:
            print("--> error: unknown scheduling policy specified")
            sys.exit(1)

def run_funcs(args,policy):
    env = environment.Env()

    task_num = config.TASK_NUM
    epo = 0
    total_steps = 0
    total_epochs = 0

    rewards_list = []
    transdelay_list = []
    migdelay_list = []
    compdelay_list = []
    trans_detail = []
    transdown_detail = []
    mig_detail = []
    migdown_detail = []
    tmp_total_download_size_detail = []
    tmp_computation_time_detail = []
    tmp_pending_download_time_detail = []
    tmp_total_download_time_detail = []

    agent = get_agent(policy)

    # Build a tensorboard
    path ='./runs/out2000/env_{}_number_{}_ver_{}'.format('see','1',policy)
    writer = SummaryWriter(log_dir=path)

    # while total_steps < args.episodes:
    while total_epochs < args.episodes:
        args.seed = config.seed
        env.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)

        obs,_ = env.reset()
        
        init_uid = obs['next_uid']
        episode_steps = 0
        
        ep_reward_ = 0
        ep_trans = 0
        ep_transdown = 0
        ep_mig = 0
        ep_migdown = 0
        ep_tskcomp = 0
        ep_tsktrans = 0
        ep_tskmig = 0

        ep_total_download_size = 0
        ep_computation_time = 0
        ep_pending_download_time = 0
        ep_total_download_time = 0
        done = False

        while not done:

            a = agent(obs,init_uid,args.seed)  
            obs_,reward, done, details = env.step(a)
            print('total-steps: ',total_steps,' choosen action: ',a)

            init_uid = obs_['next_uid']
            obs = obs_
            r = -reward 
            
            ep_reward_ += r
            ep_trans += details[0]
            ep_transdown += details[1]
            ep_mig += details[2]
            ep_migdown += details[3]
            ep_tskcomp += details[4]
            ep_tsktrans += details[5]
            ep_tskmig += details[6]

            # consisting_time = [total_download_size, computation_time, pending_download_time, total_download_time]
            ep_total_download_size += details[-1][0]
            ep_computation_time += details[-1][1]
            ep_pending_download_time += details[-1][2]
            ep_total_download_time += details[-1][3]

  
            if done:
                dw = True
            else:
                dw = False
            
            episode_steps += 1
            total_steps += 1 # episode

        epo += 1
    
        tmp_r = copy.copy(ep_reward_)/task_num 
        tmp_trans = copy.copy(ep_trans)/task_num 
        tmp_transdown = copy.copy(ep_transdown)/task_num 
        tmp_mig = copy.copy(ep_mig)/task_num 
        tmp_migdown = copy.copy(ep_migdown)/task_num 
        tmp_tskcomp = copy.copy(ep_tskcomp)/task_num 
        tmp_tsktrans = copy.copy(ep_tsktrans)/task_num 
        tmp_tskmig = copy.copy(ep_tskmig)/task_num
        tmp_total_download_size = copy.copy(ep_total_download_size)/task_num
        tmp_computation_time = copy.copy(ep_computation_time)/task_num
        tmp_pending_download_time = copy.copy(ep_pending_download_time)/task_num
        tmp_total_download_time = copy.copy(ep_total_download_time)/task_num

        rewards_list.append(tmp_r)
        transdelay_list.append(tmp_tsktrans)
        migdelay_list.append(tmp_tskmig)
        compdelay_list.append(tmp_tskcomp)
        trans_detail.append(tmp_trans)
        transdown_detail.append(tmp_transdown)
        mig_detail.append(tmp_mig)
        migdown_detail.append(tmp_migdown)

        tmp_total_download_size_detail.append(tmp_total_download_size)
        tmp_computation_time_detail.append(tmp_computation_time)
        tmp_pending_download_time_detail.append(tmp_pending_download_time)
        tmp_total_download_time_detail.append(tmp_total_download_time)

        writer.add_scalar('reward/1epoch_reward:',tmp_r,global_step=total_epochs)
        writer.add_scalar('detail/trans:',tmp_trans,global_step=total_epochs)
        writer.add_scalar('detail/transdown:',tmp_transdown,global_step=total_epochs)
        writer.add_scalar('detail/mig:',tmp_mig,global_step=total_epochs)
        writer.add_scalar('detail/migdown:',tmp_migdown ,global_step=total_epochs)
        writer.add_scalar('delay/tskcomp:',tmp_tskcomp,global_step=total_epochs)
        writer.add_scalar('delay/init:',tmp_tsktrans,global_step=total_epochs)
        writer.add_scalar('delay/mig:',tmp_tskmig,global_step=total_epochs)

        writer.add_scalar('consisting/totalDownSize:',tmp_total_download_size,global_step=total_epochs)
        writer.add_scalar('consisting/computation:',tmp_computation_time,global_step=total_epochs)
        writer.add_scalar('consisting/pendingDownloadTime:',tmp_pending_download_time,global_step=total_epochs)
        writer.add_scalar('consisting/totalDownTime:',tmp_total_download_time,global_step=total_epochs)

        total_epochs += 1
        

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='baseline')
    parser.add_argument('--user', type=int, default=config.USER_NUM, metavar='T', help='user number')
    parser.add_argument('--generator', type=str, metavar='G', default='zipf')
    parser.add_argument('--seed', type=int, metavar='S', default=config.seed)
    parser.add_argument("--steps", type=int, default=int(1e6), help=" Maximum number of training steps")
    parser.add_argument("--episodes", type=int, default=int(1e3), help=" Maximum number of epochs")
    args = parser.parse_args()
    
    policy = ['layer']
    for po in policy:
        run_funcs(args,po)
    
