import datetime
import logging
import logging.config
import os
import subprocess
import sys
import threading
import time

import tensorflow as tf
from loguru import logger
from torch.utils.tensorboard import SummaryWriter

try:
    from StringIO import StringIO  # Python 2.7
except ImportError:
    from io import BytesIO  # Python 3.x


def get_project_path():
    """
    os.getcwd() 获得的是当前工作目录
    :return:
    """
    project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    return project_path


def get_tensorboard_writer(model_name: str):
    log_root_path = os.path.join("..", "output")
    current_time = datetime.datetime.now().strftime('%b%d_%H_%M_%S')
    log_path = os.path.join(log_root_path, model_name + current_time)
    if os.path.exists(log_path):
        os.mkdir(log_path)
    logs_writer = SummaryWriter(log_dir=log_path)

    def inner_function(order):
        subprocess.run(order)

    run_order = r"tensorboard --logdir=" + log_path
    thread1 = threading.Thread(target=inner_function, kwargs={"order": run_order})
    thread1.start()
    my_logger.info(threading.current_thread().name + ' start to run tensorboard')

    return logs_writer


def get_logger_loguru():
    """
    loguru的特性是全局只有logger这一个对象
    :return:
    """
    # logger.add(sys.stdout, format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
    return logger

def get_file_logger_loguru(log_file_path):
    """
    此函数用来构建一个可以向文件中写入log的logger对象
    file_path_1 = "test_log1.txt"
    file_path_2 = "test_log2.txt"
    logger1 = get_file_logger_loguru(file_path_1)
    logger1.info("hello")
    logger2 = get_file_logger_loguru(file_path_2)
    logger2.info("hello world")
    print(id(logger1))
    print(id(logger2))
    这样输出的对象地址是一样的
    :param log_file_path:
    :return:
    """
    logger.add(log_file_path)
    return logger


def get_logger(name='Shuai'):
    """
    尝试使用单例模式，但好像现在并不是单例模式
    :param name:
    :return:
    """
    # conf_log = os.path.abspath(os.path.join(os.curdir, "..", "resource", "logger_config.ini"))
    # logging.matlab_config.fileConfig(conf_log)
    logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s: %(message)s")
    logger = logging.getLogger(name)
    # console_handler = logging.StreamHandler()
    # logger.addHandler(console_handler)
    # logger.setLevel(logging.INFO)
    return logger


my_logger = get_logger("Shuai")


class TensorBoardLogger(object):

    def __init__(self, log_dir):
        """Create a summary writer logging to log_dir."""
        # self.writer = tf.summary.FileWriter(log_dir)
        self.writer = tf.summary.create_file_writer(log_dir)

    def scalar_summary(self, tag, value, step):
        """Log a scalar variable."""
        # summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
        summary = tf.summary.scalar(tag, value)
        self.writer.add_summary(summary, step)

    def image_summary(self, tag, images, step):
        """Log a list of images."""

        # img_summaries = []
        # for i, img in enumerate(images):
        #     # Write the image to a string
        #     try:
        #         s = StringIO()
        #     except:
        #         s = BytesIO()
        #     scipy.misc.toimage(img).save(s, format="png")
        #
        #     # Create an Image object
        #     img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
        #                                height=img.shape[0],
        #                                width=img.shape[1])
        #     # Create a Summary value
        #     img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))

        # Create and write Summary
        # summary = tf.Summary(value=img_summaries)
        summary = tf.summary.image(tag, images)
        self.writer.add_summary(summary, step)

    # def histo_summary(self, tag, values, step, bins=1000):
    #     """Log a histogram of the tensor of values."""
    #
    #     # Create a histogram using numpy
    #     counts, bin_edges = np.histogram(values, bins=bins)
    #
    #     # Fill the fields of the histogram proto
    #     hist = tf.HistogramProto()
    #     hist.min = float(np.min(values))
    #     hist.max = float(np.max(values))
    #     hist.num = int(np.prod(values.shape))
    #     hist.sum = float(np.sum(values))
    #     hist.sum_squares = float(np.sum(values ** 2))
    #
    #     # Drop the start of the first bin
    #     bin_edges = bin_edges[1:]
    #
    #     # Add bin edges and counts
    #     for edge in bin_edges:
    #         hist.bucket_limit.append(edge)
    #     for c in counts:
    #         hist.bucket.append(c)
    #
    #     # Create and write Summary
    #     summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
    #     self.writer.add_summary(summary, step)
    #     self.writer.flush()


class Logger():
    """
    This class is copied from JIGAN
    """

    def __init__(self, env_name, ports, n_epochs, batches_epoch):
        self.n_epochs = n_epochs
        self.batches_epoch = batches_epoch
        self.epoch = 1
        self.batch = 1
        self.prev_time = time.time()
        self.mean_period = 0
        self.losses = {}
        self.loss_windows = {}
        self.image_windows = {}

    def log(self, losses=None, images=None):
        self.mean_period += (time.time() - self.prev_time)
        self.prev_time = time.time()

        sys.stdout.write(
            '\rEpoch %03d/%03d [%04d/%04d] -- ' % (self.epoch, self.n_epochs, self.batch, self.batches_epoch))

        for i, loss_name in enumerate(losses.keys()):
            if loss_name not in self.losses:
                self.losses[loss_name] = losses[loss_name].item()
            else:
                self.losses[loss_name] += losses[loss_name].item()

            if (i + 1) == len(losses.keys()):
                sys.stdout.write('%s:%.4f -- ' % (loss_name, self.losses[loss_name] / self.batch))
            else:
                sys.stdout.write('%s:%.4f| ' % (loss_name, self.losses[loss_name] / self.batch))

        batches_done = self.batches_epoch * (self.epoch - 1) + self.batch
        batches_left = self.batches_epoch * (self.n_epochs - self.epoch) + self.batches_epoch - self.batch
        sys.stdout.write('ETA: %s' % (datetime.timedelta(seconds=batches_left * self.mean_period / batches_done)))

        # End of epoch
        if (self.batch % self.batches_epoch) == 0:
            for loss_name, loss in self.losses.items():
                # Reset losses for next epoch
                self.losses[loss_name] = 0.0
            self.epoch += 1
            self.batch = 1
            sys.stdout.write('\n')
        else:
            self.batch += 1
