import os
import logging
# from time import time
import time
import sys
import torch
from logging import handlers

def get_logger1():
    logger = logging.getLogger()
    handler = logging.StreamHandler()
    formatter = logging.Formatter("%(asctime)s %(levelname)s \t%(message)s")
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(logging.INFO)
    return logger


def get_logger(filename):
    logger = logging.getLogger()
    # 第二步，创建一个handler，用于写入日志文件
    log_path = os.path.dirname(os.path.realpath(__file__)) + '/Logs/'
    ensure_folder(log_path)

    log_name = log_path + filename + '.log'
    fh = logging.FileHandler(log_name, mode='a+')

    # 第三步，定义handler的输出格式
    formatter = logging.Formatter("%(asctime)s-[line:%(lineno)d] - %(levelname)s: %(message)s")
    fh.setFormatter(formatter)
    # 第四步，将logger添加到handler里面
    logger.addHandler(fh)
    logger.setLevel(logging.INFO)
    return logger


# 日志输出
class Logger(object):
    # 日志级别关系映射
    level_relations = {
        "debug": logging.DEBUG,
        "info": logging.INFO,
        "warning": logging.WARNING,
        "error": logging.ERROR,
        "critical": logging.CRITICAL
    }

    def __init__(self, filename="test.log", level="info", when="D", backupCount=3,
                 fmt="%(asctime)s - [line:%(lineno)d] - %(message)s"):
        # 设置日志输出格式
        format_str = logging.Formatter(fmt)
        # 设置日志在控制台输出
        streamHandler = logging.StreamHandler()
        # 设置控制台中输出日志格式
        streamHandler.setFormatter(format_str)
        # 设置日志输出到文件（指定间隔时间自动生成文件的处理器  --按日生成）
        log_path = os.path.dirname(os.path.realpath(__file__)) + '/Logs/'
        ensure_folder(log_path)
        log_name = log_path + filename

        # filename：日志文件名，interval：时间间隔，when：间隔的时间单位， backupCount：备份文件个数，若超过这个数就会自动删除
        fileHandler = handlers.TimedRotatingFileHandler(filename=log_name, when=when, backupCount=backupCount,
                                                        encoding="utf-8")
        # 设置日志文件中的输出格式
        fileHandler.setFormatter(format_str)
        # 设置日志输出文件
        self.logger = logging.getLogger(filename)
        # 设置日志级别
        self.logger.setLevel(self.level_relations.get(level))
        # 将输出对象添加到logger中
        self.logger.addHandler(streamHandler)
        self.logger.addHandler(fileHandler)


# print('hello')

def myNMS(netout, iouthre=0.6, useMinIou=False):
    # 1.置信度筛选

    newArray = netout
    # 排序# 排序 从小到大  indices是原数组索引 用了数组索引  排序算一次就可以了
    data, indices = torch.sort(newArray, dim=0)
    newArray = newArray[indices[:, 0]]

    if newArray.numel() == torch.Size([]):
        print("this tensor is empty")
        # print(netout[:, 0] >= coef)
        return []
    target = []
    i = 0
    while True:
        # 2.找出最大置信度的框
        # 3.计算其余框和它的IOU 保留>0.6的框
        # 4.终止条件为找出所有的框
        i += 1
        if newArray.shape[0] == 1:
            target.append(newArray[0].detach().tolist())
            break
        # 增加目标数组
        try:
            cmpitem = newArray[-1].detach()
            target.append(cmpitem.tolist())
            newArray = newArray[:-1]
        except:
            break
        # 计算IOU
        iou = myIOU(newArray[:, 1:5], cmpitem[1:5].reshape(-1, 4), useMinIou)
        # 去除IOU大于0.6的部分
        newArray = newArray[iou <= iouthre]

    return target

# 需要增加一个 最小面积运算
def myIOU(pic0xy, pic1xy, useMinIou=False):
    """
    :param pic0xy: 第一张图片左上角坐标和右下角坐标，输入格式必须为元组，里面存放的格式为x0,y0,x1,y1
    :param pic1xy: 第二张图片左上角坐标和右下角坐标
    :return:
    """
    # 左上角x,y，要分别取，反正就是取左上角坐标中大的那个
    minx = torch.max(pic0xy[:, 0], pic1xy[:, 0])
    miny = torch.max(pic0xy[:, 1], pic1xy[:, 1])
    maxx = torch.min(pic0xy[:, 2], pic1xy[:, 2])
    maxy = torch.min(pic0xy[:, 3], pic1xy[:, 3])
    # 算交集面积 长宽
    awidth = maxx - minx
    aheight = maxy - miny
    awidth[awidth <= 0] = 0
    aheight[aheight <= 0] = 0
    area1 = (awidth) * (aheight)

    # 是否取较小值，并集除以较小值
    if useMinIou:
        # 计算交集 除以较小面积，大框套下框的情况
        pt0area = (pic0xy[:, 2] - pic0xy[:, 0]) * (pic0xy[:, 3] - pic0xy[:, 1])
        pt1area = (pic1xy[:, 2] - pic1xy[:, 0]) * (pic1xy[:, 3] - pic1xy[:, 1])
        minarea = torch.min(pt0area, pt1area)
        return area1 / minarea
    else:
        # 算并集面积 各自面积相加-交集
        area2 = (pic0xy[:, 2] - pic0xy[:, 0]) * (pic0xy[:, 3] - pic0xy[:, 1]) + (pic1xy[:, 2] - pic1xy[:, 0]) * (
                    pic1xy[:, 3] - pic1xy[:, 1]) - area1
        return area1 / area2


def ensure_folder(folder):
    if not os.path.isdir(folder):
        os.mkdir(folder)

def parse_config(args,cfg):
    args.weights_to_load = cfg.WEIGHTS_TO_LOAD
    args.freeze_backbonenet = cfg.FREEZE_BONE_NET
    args.backbone_net = cfg.BACKBONE_NET
    args.data =cfg.DATA_FROM
    args.end_epoch =cfg.END_EPOCH
    args.lr=cfg.LR
    args.lr_step =cfg.LR_STEP
    args.optimizer =cfg.OPTMIZER
    args.logout =cfg.LOG_OUT
    args.AddSigmoid_conf =cfg.IS_CONF_OUT_ADD_SIGMOID
    args.AddSigmoid_xy =cfg.IS_XY_OUT_ADD_SIGMOID
    args.conf_loss =cfg.CONF_LOSS_TYPE
    args.box_loss =cfg.BOXREGRESSOIN_LOSS_TYPE
    args.cls_loss =cfg.CLASSFICATION_LOSS_TYPE
    args.obj_weight =cfg.OBJ_LOSS_WEIGHT
    args.noobj_weight =cfg.NOOBJ_LOSS_WEIGHT
    args.batch_size=cfg.BATCH_SIZE


if __name__ == '__main__':
    mytestlog = Logger().logger
    mytestlog.info("helllo")