import argparse
import math
import os
import random
import subprocess
import sys
import time
from copy import deepcopy
from pathlib import Path
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import yaml
from torch.optim import lr_scheduler
from tqdm import tqdm
from models.experimental import attempt_load
from models.yolo_final import Model
from utils.autoanchor import check_anchors
from utils.autobatch import check_train_batch_size
from utils.callbacks import Callbacks
from utils.dataloaders import create_dataloader
from utils.downloads import attempt_download, is_url
from utils.general import (
    LOGGER,
    TQDM_BAR_FORMAT,
    check_amp,
    check_dataset,
    check_file,
    check_git_info,
    check_git_status,
    check_img_size,
    check_requirements,
    check_suffix,
    check_yaml,
    colorstr,
    get_latest_run,
    increment_path,
    init_seeds,
    intersect_dicts,
    labels_to_class_weights,
    labels_to_image_weights,
    methods,
    one_cycle,
    print_args,
    print_mutation,
    strip_optimizer,
    yaml_save,
)
from utils.loggers import LOGGERS, Loggers
from utils.loggers.comet.comet_utils import check_comet_resume
from utils.loss import ComputeLoss
from utils.metrics import fitness
from utils.plots import plot_evolve
from utils.torch_utils import (
    EarlyStopping,
    ModelEMA,
    de_parallel,
    select_device,
    smart_DDP,
    smart_optimizer,
    smart_resume,
    torch_distributed_zero_first,
)
LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1))  # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv("RANK", -1))
WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1))
GIT_INFO = check_git_info()
from pathlib import Path




def cal_fisher(weights):
    ckpt = torch.load(weights, map_location="cpu")  # load checkpoint to CPU to avoid CUDA memory leak
    opt = ckpt['opt']
    
    save_dir, data, hyp, resume, cfg, single_cls, workers = (
        opt['save_dir'],
        opt['data'],
        opt['hyp'],
        opt['resume'],
        opt['cfg'],
        opt['single_cls'],
        opt['workers'],
    )
    save_dir = Path(save_dir)
    w = save_dir / "weights"  
    fisher = w / "fisher.pt"
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 读取模型相关
    with open(cfg, "r") as file:
        config = yaml.safe_load(file)
        nc = config.get('nc', None)

    #读取数据相关。我们要再前向传播一次，所以需要知道刚刚使用的训练路径。
    with open(data, "r") as file:
        config = yaml.safe_load(file)
        dataset_root = config.get('path', None)
        train_relative_path = config.get('train', None)
        train_path = os.path.join(dataset_root, train_relative_path)

    
    model = Model(cfg or ckpt["model"].yaml, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device)  # create
    exclude = ["anchor"] if (cfg or hyp.get("anchors")) and not resume else []  # exclude keys
    csd = ckpt["model"].float().state_dict()  # checkpoint state_dict as FP32
    csd = intersect_dicts(csd, model.state_dict(), exclude=exclude)  # intersect
    model.load_state_dict(csd, strict=False)  # load

    


    amp = check_amp(model)
    gs = max(int(model.stride.max()), 32)  # grid size (max stride)
    imgsz = check_img_size(opt['imgsz'], gs, floor=gs * 2)  # verify imgsz is gs-multiple
    batch_size = check_train_batch_size(model, imgsz, amp)


    #进行一次彻底的前向和反向传播。理论上来说是可以分batch的。这里直接抄train.py就好了
    train_loader, dataset = create_dataloader(
        train_path,
        imgsz,
        16, #batch_size // WORLD_SIZE,
        gs,
        single_cls,
        hyp=hyp,
        augment=True,
        cache=None if opt['cache'] == "val" else opt['cache'],
        rect=opt['rect'],
        rank=LOCAL_RANK,
        workers=workers, #python的多线程太复杂了这里先不要考虑多线程了。
        #workers = 0, 
        image_weights=opt['image_weights'],
        quad=opt['quad'],
        prefix=colorstr("train: "),
        shuffle=True,
        seed=opt['seed'],
    )
    nb = len(train_loader)

    model.nc = nc 
    model.hyp = hyp  
    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc  # attach class weights

    # 然后可以开始了，这里没有什么验证集，所以不需要分什么线程。
    compute_loss = ComputeLoss(model)  # init loss class

    pbar = enumerate(train_loader)
    # 初始化fisher相关的东西
    #old_modelpara = {}
    old_modelpara = deepcopy(csd)
    fisher_matrix = {}
    for name, param in model.named_parameters():
        fisher_matrix[name] = torch.zeros_like(param)
        #old_modelpara[name] = param.data.clone()

    model.eval()
    scaler = torch.cuda.amp.GradScaler(enabled=amp)

    for i, (imgs, targets, paths, _) in pbar: 
        model.zero_grad()

        imgs = imgs.to(device, non_blocking=True).float() / 255  # uint8 to float32, 0-255 to 0.0-1.0

        if opt['multi_scale']:
            sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs  # size
            sf = sz / max(imgs.shape[2:])  # scale factor
            if sf != 1:
                ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple)
                imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)

        #with torch.cuda.amp.autocast(amp):
        preds, train_out = model(imgs)  # forward
        loss, loss_items = compute_loss(train_out, targets.to(device))  # loss scaled by batch_size
        if opt['quad']:
            loss *= 4.0
                
        #scaler.scale(loss).backward()
        loss.backward()
        
        with torch.no_grad():
            for name, param in model.named_parameters():
                #print(name, type(param))
                fisher_matrix[name].data += param.grad.pow(2).detach() / nb 
                #fisher_matrix[name].data = alpha * fisher_matrix[name].data + (1 - alpha) * (param.grad.data**2 / nb)



    ckpt = {
        'fisher_matrix' : fisher_matrix,
        'old_modelpara' : old_modelpara
    }
    
    torch.save(ckpt, fisher)
    torch.cuda.empty_cache()
    print(f"fisher context saved at {fisher}")





    










