import time
import logging
import os, sys, math
import argparse
import collections
from collections import deque
from copy import deepcopy
import datetime
from typing import Iterable

import cv2
import zipfile
import pprint
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch import optim

from torch.nn import functional as F
from torchvision.ops import nms
# from torchsummary import summary

from torchvision import transforms
# from torchmetrics.detection.mean_ap import MeanAveragePrecision
from dataset.dataloader import CocoDataset, SimpleDetDataset
from torchmetrics.detection.mean_ap import MeanAveragePrecision
# import coco_eval
from util.metrics import NMS, collect_simpledetdataloader_resault
from loss.loss import Loss
from modules.reweighting import weight_learner

def sys_update_trainning_info(epoch, total_epoch, train_loss, val_loss, trainning_time, val_time, mem, word_L):

    train_t = int(round(trainning_time))
    val_t = int(round(val_time))
    sys.stdout.flush()
    sys.stdout.write(
        '{}/{}'.format(epoch, total_epoch).center(word_L) + 
        '{:1.5f}'.format(train_loss).center(word_L) + 
        '{:1.5f}'.format(val_loss).center(word_L) + 
        str(datetime.timedelta(seconds=train_t)).center(word_L) + 
        str(datetime.timedelta(seconds=val_t)).center(word_L) + 
        f'{mem:.3g}G'.center(word_L) + 
        # f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'.center(word_L) + 
        '\r'
        )

def filestream_update_trainng_info(file_stream, epoch, total_epoch, train_loss, val_loss, trainning_time, val_time, mem, word_L):
    train_t = int(round(trainning_time))
    val_t = int(round(val_time))
    line = '{}/{}'.format(epoch, total_epoch).center(word_L) + '{:1.5f}'.format(train_loss).center(word_L) + '{:1.5f}'.format(val_loss).center(word_L) + str(datetime.timedelta(seconds=train_t)).center(word_L) + str(datetime.timedelta(seconds=val_t)).center(word_L) + f'{mem:.3g}G'.center(word_L) + '\r\n'
    file_stream.writelines(line)

def train_one_epoch(model: torch.nn.Module, criterion: nn.Module,
                    dataloader_train, dataloader_val, optimizer: torch.optim.Optimizer,
                    device: torch.device, epoch: int, total_epoch: int, scaler, word_L = 20, out_file=''):
    model.train()
    # epoch_loss = []
    # parb = tqdm(dataloader_train)
    # for img, annot in parb:
    avg_loss = 0.0
    total_loss = 0.0
    t0 = time.time()
    val_t = int(round(time.time() - t0))
    mem = 0
    # for img, annot in parb:
    #     iteration = 1

    for iteration, batch in enumerate(dataloader_train):
        img, annot = batch
        batch = img.shape[0]
        optimizer.zero_grad()
        if not scaler is None:
            from torch.cuda.amp import autocast
            with autocast():
                pred = model(img.to(device).float())
                loss_value, loss_item = criterion(pred, annot.to(device))
                # pre_features = model.pre_features
                # pre_weight1 = model.pre_weight1
                # pred_distri, pred_scores = torch.cat([xi.view(pred[0].shape[0], 67, -1) for xi in pred], 2).split((16 * 4, 3), 1)  #batch channel suml(Hl*Wl), suml(Hl*Wl)三个输出层的anchors之和
                # cfeatures = pred_scores
                # cfeatures = pred_scores.split((1, 1, 1), dim=1)

                # weight1, pre_features, pre_weight1 = weight_learner(cfeatures, pre_features, pre_weight1, epoch, iteration)

            scaler.scale(loss_value).backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0)  # clip gradients
            scaler.unscale_(optimizer)  # unscale gradients
            scaler.step(optimizer)
            scaler.update()
        else:
            pred = model(img.to(device).float())

            # pre_features = model.pre_features
            # pre_weight1 = model.pre_weight1
            # pred_distri, pred_scores = torch.cat([xi.view(pred[0].shape[0], 67, -1) for xi in pred], 2).split((16 * 4, 3), 1)  #batch channel suml(Hl*Wl), suml(Hl*Wl)三个输出层的anchors之和
            # cfeatures = pred_scores.split((1, 1, 1), dim=1)

            # weight1, pre_features, pre_weight1 = weight_learner(cfeatures, pre_features, pre_weight1, epoch, iteration)

            loss_value, loss_item = criterion(pred, annot.to(device))
            loss_value.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0)  # clip gradients
            optimizer.step()

        total_loss += loss_value.item()
        avg_loss = total_loss / (iteration + 1)
        # avg_loss = total_loss
        mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0
        sys_update_trainning_info(epoch, total_epoch, avg_loss, 0.0, time.time() - t0, val_t, mem, word_L)
        # break
        # mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
        # parb.set_description('Epoch: {}/{} | | Running loss_value: {:1.5f} | {}'.format(epoch + 1, total_epoch, loss / batch, mem))
    # torch.cuda.empty_cache()                             #加入这句会使得推理变慢，但是能清理出一些显存?
    # parb.close()
    train_t = int(round(time.time() - t0))
    # model.eval()
    t0 = time.time()
    with torch.no_grad():
        val_total_loss = 0.0
        val_loss = 0.0
        # parb = tqdm(dataloader_val)
        # for iteration, batch in enumerate(parb):
        for iteration, batch in enumerate(dataloader_val):
            img, annot = batch
            pred = model(img.to(device).float())
            loss_value, loss_item = criterion(pred, annot.to(device))
            val_total_loss += loss_value.cpu().item() * img.shape[0]
            val_loss = val_total_loss / (iteration + 1)
            val_t = int(round(time.time() - t0))
            sys_update_trainning_info(epoch, total_epoch, avg_loss, val_total_loss, train_t, val_t, mem, word_L)
            # parb.set_postfix(**{'val_loss': val_loss / (iteration + 1)})
        # parb.close()
        sys.stdout.write('\n')
    if out_file:
        f = open(out_file, 'a')
        filestream_update_trainng_info(f, epoch, total_epoch, avg_loss, val_total_loss, train_t, val_t, mem, word_L)
        f.close()
    return avg_loss, val_loss

def eval_model(model: torch.nn.Module, dataloader_val, maxLen, out_file='', device=torch.device('cuda')):
    model.eval()
    # parb = tqdm(dataloader_val)
    with torch.no_grad():
        preds, target = collect_simpledetdataloader_resault(dataloader_val, model.nc, model)
        metric = MeanAveragePrecision()
        metric.update(preds, target)
        metrics = metric.compute()
        header = '\r'
        value = '\r'

        def print_info(header, value, out_file):
            print(header)
            print(value)
            if out_file:
                f = open(out_file, 'a')
                f.writelines(header + value)
                f.close()

        print_info(header, '\n', out_file)
        for i, m in enumerate(metrics.keys()):
            s = m.center(maxLen, ' ')
            header += s
            s = '{:1.5f}'.format(metrics[m]).center(maxLen, ' ')
            value += s
            if (i + 1) % 6 == 0:
                print_info(header, value, out_file)
                header = '\r'
                value = '\r'
        print_info(header, value, out_file)
        print_info('\r', '\n', out_file)

        return metrics
    
def freeze_backbone(model):
    for name, param in model.named_parameters():
        if 'backbone' in name:
            param.requires_grad = False

def unfreeze(model):
    for name, param in model.named_parameters():
        param.requires_grad = True



