from torch.utils.tensorboard import SummaryWriter
from tensorboard.backend.event_processing import event_accumulator
import os
import fnmatch
import shutil
import numpy as np

class logger:
    def __init__(self, args):
        self.label = args.label
        if self.label == 'debug':
            return
        output_dir = './logger/' + 'algname_' + args.alg + '-' + 'envname_' + args.env + '-' + 'seed_' + str(args.seed) + '-'   \
                    + 'Qnmus_' + str(args.Qnums) + '-' + 'minqnum_' + str(args.M) + '-' + 'UTD_' + str(args.UTD) + '-'   \
                    + 'policynum_' + str(args.policynum) + '-' + str(args.lambd) + 'batchsize_' + str(args.batchsize) + \
                    '-' + args.label
        if(os.path.exists(output_dir)):
            shutil.rmtree(output_dir)
        
        self.output_dir = args.dir or output_dir
        self.writer = SummaryWriter(self.output_dir)
    
    def add_logname(self, var_name, value, global_step):
        if self.label == 'debug':
            return
        if isinstance(value, dict):
            self.writer.add_scalars(var_name, value, global_step)
        else:
            self.writer.add_scalar(var_name, value, global_step)
    
    def close(self):
        if self.label == 'debug':
            return
        self.writer.close()


def cal_mean_std(args):
    match_path = './logger/' + 'algname_' + args.alg + '-' + 'envname_' + args.env + '-' + 'seed_' + 'mean_std' + '-'   \
                + 'Qnmus_' + str(args.Qnums) + '-' + 'minqnum_' + str(args.M) + '-' + 'UTD_' + str(args.UTD) + '-'   \
                + 'policynum_' + str(args.policynum) + '-' + str(args.lambd) + 'batchsize_' + str(args.batchsize) + \
                '-' + args.label
    if(os.path.exists(match_path)):
            shutil.rmtree(match_path)

    match_path = 'algname_' + args.alg + '-' + 'envname_' + args.env + '-' + 'seed_' + '*' + '-'   \
            + 'Qnmus_' + str(args.Qnums) + '-' + 'minqnum_' + str(args.M) + '-' + 'UTD_' + str(args.UTD) + '-'   \
            + 'policynum_' + str(args.policynum) + '-' + str(args.lambd) + 'batchsize_' + str(args.batchsize) + \
            '-' + args.label

    event_datas = []
    for f_name in os.listdir('./logger'):
        if fnmatch.fnmatch(f_name, match_path):
            print(f_name)
            event_name = os.listdir('./logger/' + f_name)[0]
            ea=event_accumulator.EventAccumulator('./logger/' + f_name + '/' + event_name)
            ea.Reload()
            event_datas.append(ea.scalars)
    if event_datas:
        data_all = []
        for event_data in event_datas:
            value = event_data.Items('reward/test_reward')
            data = [i.value for i in value]
            data_all.append(data)
        min_len = len(data_all[0])
        for data in data_all:
            if min_len > len(data):
                min_len = len(data)
        data_temp = []
        for data in data_all:
            data_temp.append(data[:min_len])
        data_all = data_temp
        data_all = np.array(data_all)
        data_mean = np.mean(data_all, axis = 0)
        data_std = np.std(data_all, axis = 0)
        args.seed = 'mean_std'
        log = logger(args)
        for i in range(data_mean.size):
            log.add_logname("testreward/mean", data_mean[i], i)
            log.add_logname("testreward/std", data_std[i], i)
    
