# YOLOv5 🚀 by Ultralytics, GPL-3.0 license

import math
import os
import random
import sys
import time
from copy import deepcopy
from datetime import datetime
from pathlib import Path

import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import yaml
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD, Adam, AdamW, lr_scheduler
from tqdm import tqdm

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

import val  # for end-of-epoch mAP
from models.experimental import attempt_load
from models.yolo import Model
from yolo_utils.autoanchor import check_anchors
from yolo_utils.autobatch import check_train_batch_size
from yolo_utils.callbacks import Callbacks
from yolo_utils.dataloaders import create_dataloader
from yolo_utils.downloads import attempt_download
from yolo_utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size,
                           check_requirements, check_suffix, check_version, check_yaml, colorstr, get_latest_run,
                           increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
                           labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer)
from yolo_utils.loggers import Loggers
from yolo_utils.loggers.wandb.wandb_utils import check_wandb_resume
from yolo_utils.loss import ComputeLoss
from yolo_utils.metrics import fitness
from yolo_utils.plots import plot_evolve, plot_labels
from yolo_utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first

import json
import augmentation
#import kmeans_for_anchors
from config import *

LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1))  # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv('RANK', -1))
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))


def train(opt):  # hyp is path/to/hyp.yaml or hyp dictionary
    hyp = opt.hyp
    save_dir, epochs, batch_size, evolve, data, cfg, resume, workers = \
        Path(opt.save_dir), opt.epochs, opt.batch_size, opt.evolve, opt.data, opt.cfg, \
        opt.resume, opt.workers

    device = select_device(opt.device, batch_size=opt.batch_size)

    loginfo(opt, 'augmentation...')
    li = listdir(opt.org_pa, ['.xml'])
    if len(li)<10000:
        n = 500//max(1, len(li))
        n = min(n, 5)
        n = max(n, 2)
        train_list = augmentation.augmentation(opt.org_pa, opt.train_pa, 2)
        val_list = augmentation.augmentation(opt.org_pa, opt.test_pa, 1, 1000)
    else:
        all_list = augmentation.load_xmls(opt.org_pa)
        train_list, val_list = split_train(all_list)

    #kmeans_for_anchors.gen_anchors(opt.arg_pa, opt.img_size[0], 9)
    loginfo(opt, 'xml2coco...')
    classes = get_classes(train_list)
    name2index(train_list, classes)
    name2index(val_list, classes)
    opt.names = classes
    #kmeans_for_anchors.xml2coco(xml_path, opt.lab_pa, classes)
    classes_str = '\n'.join(classes)
    open(f"{opt.arg_pa}/classes.txt", 'w').write(classes_str)

    # Config
    plots = False  # create plots
    cuda = device.type != 'cpu'
    init_seeds(1 + RANK)
        
    nc = len(classes)
    single_cls = 1==nc
    names = classes  # class names

    if opt.notrain:
        return 
    # Directories
    save_dir = Path(save_dir)
    print(save_dir)
    wdir = Path(opt.model_pa)
    wdir.mkdir(parents=True, exist_ok=True)  # make dir
    last = Path(opt.last)
    best = Path(opt.best)
    results_file = save_dir / 'results.txt'
    #model_dir,fn = os.path.split(weights)
    #model_dir = f'{wdir}/ckpt'

    # Directories

    device = select_device(opt.device, batch_size=opt.batch_size)

    # Hyperparameters
    if isinstance(hyp, str):
        with open(hyp, errors='ignore') as f:
            hyp = yaml.safe_load(f)  # load hyps dict
    LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))

    # Save run settings
    if not evolve:
        with open(save_dir / 'hyp.yaml', 'w') as f:
            yaml.safe_dump(hyp, f, sort_keys=False)
        with open(save_dir / 'opt.yaml', 'w') as f:
            yaml.safe_dump(vars(opt), f, sort_keys=False)

    # Model
    weights = best if os.path.exists(best) else last
    check_suffix(weights, '.pt')  # check weights
    pretrained = True
    if pretrained and os.path.exists(weights):
        with torch_distributed_zero_first(LOCAL_RANK):
            weights = attempt_download(weights)  # download if not found locally
        ckpt = torch.load(weights, map_location='cpu')  # load checkpoint to CPU to avoid CUDA memory leak
        model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device)  # create
        exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else []  # exclude keys
        csd = ckpt['model'].float().state_dict()  # checkpoint state_dict as FP32
        csd = intersect_dicts(csd, model.state_dict(), exclude=exclude)  # intersect
        model.load_state_dict(csd, strict=False)  # load
        LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}')  # report
    else:
        model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device)  # create
        ckpt = None
        pretrained = False
    amp = check_amp(model)  # check AMP

    # Freeze
    if False:
        freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))]  # layers to freeze
        for k, v in model.named_parameters():
            v.requires_grad = True  # train all layers
            if any(x in k for x in freeze):
                LOGGER.info(f'freezing {k}')
                v.requires_grad = False

    # Image size
    gs = max(int(model.stride.max()), 32)  # grid size (max stride)
    imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2)  # verify imgsz is gs-multiple

    # Batch size
    if RANK == -1 and batch_size == -1:  # single-GPU only, estimate best batch size
        batch_size = check_train_batch_size(model, imgsz, amp)
        loggers.on_params_update({"batch_size": batch_size})

    # Optimizer
    nbs = 64  # nominal batch size
    accumulate = max(round(nbs / batch_size), 1)  # accumulate loss before optimizing
    hyp['weight_decay'] *= batch_size * accumulate / nbs  # scale weight_decay
    LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}")

    g = [], [], []  # optimizer parameter groups
    bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k)  # normalization layers, i.e. BatchNorm2d()
    for v in model.modules():
        if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):  # bias
            g[2].append(v.bias)
        if isinstance(v, bn):  # weight (no decay)
            g[1].append(v.weight)
        elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):  # weight (with decay)
            g[0].append(v.weight)

    if opt.optimizer == 'Adam':
        optimizer = Adam(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999))  # adjust beta1 to momentum
    elif opt.optimizer == 'AdamW':
        optimizer = AdamW(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999))  # adjust beta1 to momentum
    else:
        optimizer = SGD(g[2], lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)

    optimizer.add_param_group({'params': g[0], 'weight_decay': hyp['weight_decay']})  # add g0 with weight_decay
    optimizer.add_param_group({'params': g[1]})  # add g1 (BatchNorm2d weights)
    LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups "
                f"{len(g[1])} weight (no decay), {len(g[0])} weight, {len(g[2])} bias")
    del g

    # Scheduler
    if opt.cos_lr:
        lf = one_cycle(1, hyp['lrf'], epochs)  # cosine 1->hyp['lrf']
    else:
        lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']  # linear
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)  # plot_lr_scheduler(optimizer, scheduler, epochs)

    # EMA
    ema = ModelEMA(model) if RANK in {-1, 0} else None

    # Resume
    start_epoch, best_fitness = 0, 0.0
    if pretrained:
        # Optimizer
        if ckpt['optimizer'] is not None:
            optimizer.load_state_dict(ckpt['optimizer'])
            best_fitness = ckpt['best_fitness']

        # EMA
        if ema and ckpt.get('ema'):
            ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
            ema.updates = ckpt['updates']

        # Epochs
        start_epoch = ckpt['epoch'] + 1
        if resume:
            assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.'
        if epochs < start_epoch:
            LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.")
            epochs += ckpt['epoch']  # finetune additional epochs

        del ckpt, csd

    # DP mode
    if cuda and RANK == -1 and torch.cuda.device_count() > 1:
        LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
                       'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
        model = torch.nn.DataParallel(model)

    # SyncBatchNorm
    if opt.sync_bn and cuda and RANK != -1:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
        LOGGER.info('Using SyncBatchNorm()')

    # Trainloader
    train_loader, dataset = create_dataloader(train_list, imgsz, batch_size // WORLD_SIZE, gs, single_cls,
                                            hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK,
                                            workers=workers,
                                            image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
    mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max())  # max label class
    nb = len(train_loader)  # number of batches
    assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'

    # Process 0
    val_loader = create_dataloader(val_list, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls,
                                    hyp=hyp, augment=False, cache=opt.cache_images, rect=True, rank=-1,
                                    workers=workers,
                                    pad=0.5, prefix=colorstr('val: '))[0]

    if not resume:
        labels = np.concatenate(dataset.labels, 0)
        # c = torch.tensor(labels[:, 0])  # classes
        # cf = torch.bincount(c.long(), minlength=nc) + 1.  # frequency
        # model._initialize_biases(cf.to(device))
        if plots:
            plot_labels(labels, names, save_dir)

        # Anchors
        if not opt.noautoanchor:
            check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
        model.half().float()  # pre-reduce anchor precision

    # DDP mode
    if cuda and RANK != -1:
        if check_version(torch.__version__, '1.11.0'):
            model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True)
        else:
            model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)

    # Model attributes
    nl = de_parallel(model).model[-1].nl  # number of detection layers (to scale hyps)
    hyp['box'] *= 3 / nl  # scale to layers
    hyp['cls'] *= nc / 80 * 3 / nl  # scale to classes and layers
    hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl  # scale to image size and layers
    hyp['label_smoothing'] = opt.label_smoothing
    model.nc = nc  # attach number of classes to model
    model.hyp = hyp  # attach hyperparameters to model
    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc  # attach class weights
    model.names = names

    # Start training
    t0 = time.time()
    nw = max(round(hyp['warmup_epochs'] * nb), 100)  # number of warmup iterations, max(3 epochs, 100 iterations)
    # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training
    last_opt_step = -1
    maps = np.zeros(nc)  # mAP per class
    results = (0, 0, 0, 0, 0, 0, 0)  # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
    scheduler.last_epoch = start_epoch - 1  # do not move
    scaler = torch.cuda.amp.GradScaler(enabled=amp)
    #stopper = EarlyStopping(patience=opt.patience)
    compute_loss = ComputeLoss(model)  # init loss class
    LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
                f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
                f"Logging results to {colorstr('bold', save_dir)}\n"
                f'Starting training for {epochs} epochs...')

    accumulate = max(round(nbs / batch_size), 1)

    for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------
        model.train()

        s = ('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')
        print(s)
        # Update image weights (optional, single-GPU only)
        if opt.image_weights:
            cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc  # class weights
            iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw)  # image weights
            dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n)  # rand weighted idx

        # Update mosaic border (optional)
        # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
        # dataset.mosaic_border = [b - imgsz, -b]  # height, width borders

        def run_batch(train_loader, is_train=True):
            pbar = tqdm(enumerate(train_loader), total=len(train_loader))
            mloss = torch.zeros(3, device=device)  # mean losses

            last_opt_step = -1
            if is_train:
                model.train()
                optimizer.zero_grad()
                ppp = f'{epoch:g}/{epochs:g}'
            else:
                model.eval()
                ppp = ''

            for i, (imgs, targets, paths, _) in pbar:  # batch -------------------------------------------------------------
                ni = i + nb * epoch  # number integrated batches (since train start)
                imgs = imgs.to(device, non_blocking=True).float() / 255  # uint8 to float32, 0-255 to 0.0-1.0

                if is_train:
                    # Warmup
                    if ni <= nw:
                        xi = [0, nw]  # x interp
                        # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0])  # iou loss ratio (obj_loss = 1.0 or iou)
                        accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
                        for j, x in enumerate(optimizer.param_groups):
                            # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                            x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
                            if 'momentum' in x:
                                x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])

                    # Multi-scale
                    if opt.multi_scale:
                        sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs  # size
                        sf = sz / max(imgs.shape[2:])  # scale factor
                        if sf != 1:
                            ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple)
                            imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)

                    # Forward
                    with torch.cuda.amp.autocast(amp):
                        pred = model(imgs)  # forward
                        loss, loss_items = compute_loss(pred, targets.to(device))  # loss scaled by batch_size
                        if RANK != -1:
                            loss *= WORLD_SIZE  # gradient averaged between devices in DDP mode
                        if opt.quad:
                            loss *= 4.

                    # Backward
                    scaler.scale(loss).backward()

                    # Optimize
                    if ni - last_opt_step >= accumulate:
                        scaler.step(optimizer)  # optimizer.step
                        scaler.update()
                        optimizer.zero_grad()
                        if ema:
                            ema.update(model)
                        last_opt_step = ni

                        
                else:
                    augment = False
                    with torch.no_grad():
                        out, pred = model(imgs)  # forward
                        loss, loss_items = compute_loss(pred, targets.to(device))  # loss scaled by batch_size
                        loss *= WORLD_SIZE  # gradient averaged between devices in DDP mode
                        if opt.quad:
                            loss *= 4.
                    
                # Log
                mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
                mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                s = ('%10s' * 2 + '%10.4g' * 5) % (ppp, mem, *mloss, targets.shape[0], imgs.shape[-1])
                pbar.set_description(s)
                
            return loss
            # end batch ------------------------------------------------------------------------------------------------

        # Scheduler
        lr = [x['lr'] for x in optimizer.param_groups]  # for loggers
        scheduler.step()

        train_mloss = run_batch(train_loader, is_train = True)
        # Scheduler
        lr = [x['lr'] for x in optimizer.param_groups]  # for loggers
        scheduler.step()

        # DDP process 0 or single-GPU
        # mAP
        ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
        final_epoch = epoch + 1 == epochs
            
        test_mloss = run_batch(val_loader, is_train = False)

        # Write
        with open(results_file, 'a') as f:
            # append metrics, val_loss
            f.write(s + '%10.4g' * 7 % results + '\n')

        # Log
        tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss',  # train loss
                'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
                'val/box_loss', 'val/obj_loss', 'val/cls_loss',  # val loss
                'x/lr0', 'x/lr1', 'x/lr2']  # params
        s = ''
        # Update best mAP
        # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
        fi = float(test_mloss)
        if fi < best_fitness:
            best_fitness = fi

        # Save model
        ckpt = {'epoch': epoch,
                'best_fitness': best_fitness,
                'model': deepcopy(de_parallel(model)).half(),
                'ema': deepcopy(ema.ema).half(),
                'updates': ema.updates,
                'optimizer': optimizer.state_dict()
                }

        # Save last, best and delete
        torch.save(ckpt, last)
        if best_fitness == fi or not os.path.exists(best):
            print('save', best)
            torch.save(ckpt, best)

        del ckpt

    torch.cuda.empty_cache()
    return results



if __name__ == "__main__":
    pa = 'D:/PycharmProjects/yolov5data/shenfei_data20210706001/'
    pa = 'D:/data/seg/coco128'
    pa = 'D:/code/git/ywlydd/deepgui/yolov5_xx/imgs/jieba'
    pa = 'E:/data/220411抗原检测/yolov5/data'
    pa = 'E:/data/detect/jieba'
    pa = 'E:/data/220401小鸟/train', 416, 'xiaoniao'
    #testpa = 'E:/data/210417jieba/test'
    pa = 'D:/data/220309筷子/train', 416, 'kuaizi'
    pa = 'D:/data/220329竹筷/标注caise/mini5', 416, 'kuaizi'
    pa = 'D:/data/220329竹筷/标注caise/train', 320, 'kuaizi'
    train(get_config(*pa))
