import random
import numpy as np
# from Metric_Python_1_channel import metric_diaoyong,metric_diaoyong_writer
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group
import os
from PIL import Image
import torch
import my_show_img
import torch.backends.cudnn as cudnn
from torch.utils.data import Dataset
from fusion_model import spation_frequency_fusion_model
import misc11 as misc
from torch.utils.tensorboard import SummaryWriter

def forward_loss_G(model,imgs,imgs1, pred,pred1, mask,mask1):
    target = model.patchify(imgs)
    target1 = model.patchify(imgs1)
    if model.norm_pix_loss:
        mean = target.mean(dim=-1, keepdim=True)
        var = target.var(dim=-1, keepdim=True)
        target = (target - mean) / (var + 1.e-6) ** .5

    loss = (pred - target) ** 2
    loss1 = (pred1 - target1) ** 2
    loss = loss.mean(dim=-1)  # [N, L], mean loss per patch
    loss1 = loss1.mean(dim=-1)
    loss = (loss * mask).sum() / mask.sum()  # mean loss on removed patches
    loss1 = (loss1 * mask1).sum() / mask1.sum()
    ss = loss1 + loss
    return ss

class MyTestDataset(Dataset):
    def __init__(self, ir_path,vis_path,transforms):
        ir_file_name_list=[]
        vis_file_name_list=[]
        file_name=[]
        for root, _, fnames in sorted(os.walk(ir_path, followlinks=True)):
            for fname in sorted(fnames, key=lambda x: int(x.split('.')[0])):
                path_ir = os.path.join(root, fname)
                path_vis = os.path.join(vis_path, fname)
                ir_file_name_list.append(path_ir)
                vis_file_name_list.append(path_vis)
                file_name.append(fname)
        self.ir_file_name_list=ir_file_name_list
        self.vis_file_name_list = vis_file_name_list
        self.file_name=file_name
        self.transforms=transforms

    def __len__(self):
        return len(self.vis_file_name_list)

    def __getitem__(self, index):
        transform_train = transforms.Compose([
            transforms.ToTensor(),
        transforms.Resize([480, 480])
        ])
        self.transforms=transform_train
        name=self.file_name[index]
        ir_path_img=self.ir_file_name_list[index]
        vis_path_img = self.vis_file_name_list[index]
        ir_image = Image.open(ir_path_img).convert('RGB')
        vis_image = Image.open(vis_path_img).convert('RGB').resize(ir_image.size)

        if self.transforms is not None:
            ir_image = self.transforms(ir_image)
            vis_image = self.transforms(vis_image)
        sample=torch.cat([vis_image,ir_image],dim=0)
        return sample,name


class MyTrainDataset(Dataset):
    def __init__(self, ir_path,vis_path,transforms):
        ir_file_name_list=[]
        vis_file_name_list=[]
        for root, _, fnames in sorted(os.walk(ir_path, followlinks=True)):
            for fname in sorted(fnames):
                path_ir = os.path.join(root, fname)
                path_vis = os.path.join(vis_path, fname)
                ir_file_name_list.append(path_ir)
                vis_file_name_list.append(path_vis)

        self.ir_file_name_list=ir_file_name_list
        self.vis_file_name_list = vis_file_name_list
        self.transforms=transforms

    def __len__(self):
        return len(self.vis_file_name_list)

    def flip(self,img, flip_mode):
        if flip_mode == 0:
            img = img.flip(-2)
        elif flip_mode == 1:
            img = img.flip(-1)
        return img

    def randfilp(self,img):
        mode = np.random.randint(0, 3)
        return self.flip(img, mode)

    def randrot(self,img):
        mode = np.random.randint(0, 4)
        return self.rot(img, mode)


    def rot(self,img, rot_mode):
        if rot_mode == 0:
            img = img.transpose(-2, -1)
            img = img.flip(-2)
        elif rot_mode == 1:
            img = img.flip(-2)
            img = img.flip(-1)
        elif rot_mode == 2:
            img = img.flip(-2)
            img = img.transpose(-2, -1)
        return img

    def __getitem__(self, index):
        seed = random.randint(0, 100000)
        p1 = random.randint(0, 1)
        p2 = random.randint(0, 1)
        transform_train = transforms.Compose([
            transforms.ToTensor(),
        ])
        self.transforms=transform_train
        self.crop = transforms.RandomResizedCrop(256,scale=(0.6,1),interpolation=3)
        ir_path_img=self.ir_file_name_list[index]
        vis_path_img = self.vis_file_name_list[index]
        ir_image = Image.open(ir_path_img).convert('RGB')
        vis_image = Image.open(vis_path_img).convert('RGB').resize(ir_image.size)

        ir_image=transform_train(ir_image)
        vis_image=transform_train(vis_image)
        ir_tensor = torch.from_numpy(np.array(ir_image))
        vis_tensor = torch.from_numpy(np.array(vis_image))

        vis_ir = torch.cat((ir_tensor, vis_tensor), dim=0)
        vis_ir = self.randfilp(vis_ir)
        vis_ir = self.randrot(vis_ir)
        vis_ir = self.crop(vis_ir)
        return vis_ir


def prepare_dataloader():
    p1 = random.randint(0, 1)
    p2 = random.randint(0, 1)
    transform_train = transforms.Compose([
        transforms.Resize([480, 640]),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    train_dataset = MyTrainDataset(args.ir_path, args.vis_path, transform_train)
    tno_test_dataset=MyTestDataset(args.tno_test_ir_path, args.tno_test_vis_path, transform_train)
    road_test_dataset = MyTestDataset(args.road_test_ir_path, args.road_test_vis_path, transform_train)

    if args.test==True:
        args.num_workers=0

    if args.distributed:
        world_size=dist.get_world_size()
        rank=dist.get_rank()
        sampler_train=DistributedSampler(train_dataset, num_replicas=world_size, rank=rank)
        tno_sampler_test = DistributedSampler(tno_test_dataset, num_replicas=world_size, rank=rank)
        road_sampler_test = DistributedSampler(road_test_dataset, num_replicas=world_size, rank=rank)
    else:
        sampler_train = torch.utils.data.RandomSampler(train_dataset)

        tno_sampler_test=torch.utils.data.SequentialSampler(tno_test_dataset)
        road_sampler_test=torch.utils.data.SequentialSampler(road_test_dataset)

    train_dataloader=DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        pin_memory=True,
        drop_last=True,
        shuffle=False,
        num_workers=args.num_workers,
        sampler=sampler_train
    )

    tno_test_dataloader=DataLoader(
        tno_test_dataset,
        batch_size=1,
        num_workers=0,
        pin_memory=True,
        shuffle=False,
        sampler=tno_sampler_test
    )

    road_test_dataloader=DataLoader(
        road_test_dataset,
        batch_size=1,
        num_workers=0,
        pin_memory=True,
        shuffle=False,
        sampler=road_sampler_test
    )

    return train_dataloader,tno_test_dataloader,road_test_dataloader


def init_setup():
    def ddp_setup():
        init_process_group(backend="nccl")
        torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)
    if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
        args.distributed = True
        ddp_setup()
    else:
        args.distributed = False


class Trainer:
    def __init__(
            self,
            args,
            model: torch.nn.Module,
            train_data: DataLoader,
            tno_test_data: DataLoader,
            road_test_data: DataLoader,
            optimizer_G: torch.optim.Optimizer,
            scheduler_G,
            save_every,
    ) -> None:
        self.args=args
        if args.distributed:
            self.gpu_id = int(os.environ["LOCAL_RANK"])
        else:
            self.gpu_id=args.cuda
        self.model=model.to(self.gpu_id)
        self.model_without_ddp = self.model
        self.train_data = train_data
        self.tno_test_data=tno_test_data
        self.road_test_data=road_test_data
        self.optimizer_G = optimizer_G
        self.save_every = save_every
        self.scheduler_G=scheduler_G

        if args.distributed:
            self.model = DDP(model, device_ids=[self.gpu_id],find_unused_parameters=True)
            self.model_without_ddp = self.model.module

    def train(self, max_epochs: int):
        b_sz = len(next(iter(self.train_data))[0])
        num=0
        writer = SummaryWriter(log_dir="runs/experiment1")  # log_dir 是日志文件的保存路径
        for epoch in range(args.start_epoch,max_epochs):
            self._run_epoch(epoch,b_sz,writer)
            if args.distributed:
                if self.gpu_id == 0 and epoch % self.save_every == 0:
                    self._save_checkpoint(epoch)
            else:
                if epoch % self.save_every == 0:
                    self._save_checkpoint(epoch)

            if args.scheduler:
                self.scheduler_G.step()

                current_lr = self.scheduler_G.get_last_lr()[0]  # 获取当前学习率
                writer.add_scalar("train/Learning Rate", current_lr, epoch)

            self.model.eval()

            with torch.no_grad():
                for batch_idx,(sample,file_list) in enumerate(self.road_test_data):
                    sample = sample.to(self.gpu_id)
                    fusion= self.model(sample[:, 0:3, :, :], sample[:, 3:6, :, :], mode='test')
                    my_show_img.write_rgb(fusion, epoch, file_list[0], args.output_dir)

            # file_name = './' + args.output_dir.replace('output_dir', 'main_result') + '/'
            # name=args.output_dir.replace('output_dir', 'main_result')

            # evaluation_thread1 = threading.Thread(target=metric_diaoyong_writer.cal_metric_road, args=(
            # args.test_ir_path, args.test_vis_path, file_name, file_name, num, epoch,name,writer))
            # evaluation_thread1.start()
            # num+=1
        writer.close()


    def _run_epoch(self, epoch,b_sz,writer):
        print(f"[GPU{self.gpu_id}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}")
        if args.distributed:
            self.train_data.sampler.set_epoch(epoch)
        self.model.train(True)
        metric_logger = misc.MetricLogger(delimiter="  ")
        metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
        header = 'Epoch: [{}]'.format(epoch)
        print_freq = 20

        for data_iter_step, sample in enumerate(metric_logger.log_every(self.train_data, print_freq, header)):
            sample = sample.to(self.gpu_id, non_blocking=True)
            loss_ssim, loss_grad,loss_mse=self._run_batch(sample)
            torch.cuda.synchronize()
            metric_logger.update(loss_mse=loss_mse)
            metric_logger.update(loss_ssim=loss_ssim)
            metric_logger.update(loss_grad=loss_grad)
            lr = self.optimizer_G.param_groups[0]["lr"]
            metric_logger.update(lr=lr)
            if data_iter_step % 10 == 0:
                global_step = data_iter_step + epoch * len(self.train_data)
                writer.add_scalar("train/MSE", loss_mse.item(), global_step)
                writer.add_scalar("train/SSIM", loss_ssim.item(), global_step)
                writer.add_scalar("train/Gradient", loss_grad.item(), global_step)
                writer.add_scalar("Learning_Rate", lr, global_step)

    def _run_batch(self, samples):
        self.optimizer_G.zero_grad()
        ssim_loss, grad_loss,mse_loss= self.model(vis=samples[:,0:3,:,:], ir=samples[:,3:6,:,:], mode='train')
        loss_G = args.param[0]*mse_loss+args.param[1]*ssim_loss+args.param[2]*grad_loss
        self.optimizer_G.zero_grad()
        loss_G.backward()
        self.optimizer_G.step()

        return ssim_loss, grad_loss,mse_loss

    def _save_checkpoint(self, epoch):
        if not os.path.exists(args.output_dir):
            os.makedirs(args.output_dir)
        misc.save_G_model(
            args=self.args, model=self.model, model_without_ddp=self.model_without_ddp, optimizer=self.optimizer_G,
             epoch=epoch)

def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm2d') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)

def load_train_objs():
    model = spation_frequency_fusion_model(device=args.cuda)
    model.to(args.cuda)
    if args.continue_train==True:
        model_dict = model.state_dict()
        checkpoint = torch.load(args.continue_dir, map_location='cpu')
        checkpoint_model = checkpoint['model']
        print("Load pre-trained checkpoint from: %s" % args.continue_dir)
        checkpoint_model_dict = checkpoint_model

        pretrained_dict = {k: v for k, v in checkpoint_model_dict.items() if k in model_dict}
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        for name, param in model.named_parameters():
            if 'myconv' not in name:
                if 'mambafusion' not in name:
                    param.requires_grad = False

    params_G = list(model.parameters())
    optimizer_G = torch.optim.Adam(params_G, lr=args.lr, betas=(0.9, 0.95))
    if args.scheduler:
        scheduler_G = torch.optim.lr_scheduler.StepLR(optimizer_G, step_size=3, gamma=0.9)
    else:
        scheduler_G=None

    return model, optimizer_G,scheduler_G


def main(args):
    model, optimizer_G,scheduler_G = load_train_objs()
    train_data,tno_test_data,road_test_data = prepare_dataloader()
    trainer = Trainer(args,model, train_data,tno_test_data, road_test_data,optimizer_G,scheduler_G, args.save_every)
    trainer.train(args.total_epochs)
    if args.distributed:
        destroy_process_group()

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description='simple distributed training job')
    mse=1
    ssim=1
    grad=10
    # run model
    parser.add_argument('--test',default=False)
    parser.add_argument('--continue_train',default=False)
    parser.add_argument('--continue_dir',default=None)
    # file_dir
    parser.add_argument('--output_dir', default='model_output_dir_mse='+str(mse)+'ssim='+str(ssim)+'grad='+str(grad),help='path where to save, empty for no saving')
    parser.add_argument('--ir_path',default='/opt/data/private/yudong/dataset/MSRS_all_source_train/III/IR/')
    parser.add_argument('--vis_path',default='/opt/data/private/yudong/dataset/MSRS_all_source_train/VVV/VIS/')
    parser.add_argument('--tno_test_ir_path',default='/opt/data/private/yudong/dataset/TNO_source/IR/')
    parser.add_argument('--tno_test_vis_path',default='/opt/data/private/yudong/dataset/TNO_source/VIS/')
    parser.add_argument('--test_ir_path',default='/opt/data/private/yudong/dataset/MSRS_Fusion_source/IIII/IR/')
    parser.add_argument('--test_vis_path',default='/opt/data/private/yudong/dataset/MSRS_Fusion_source/VVVV/VIS/')
    parser.add_argument('--road_test_ir_path',default='/opt/data/private/yudong/dataset/MSRS_Fusion_source/IIII/IR/')
    parser.add_argument('--road_test_vis_path',default='/opt/data/private/yudong/dataset/MSRS_Fusion_source/VVVV/VIS/')
    # Hyperparameter


    parser.add_argument('--param',default=[mse,ssim,grad])
    parser.add_argument('--batch_size', default=4, type=int, help='Input batch size on each device (default: 32)')
    parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',help='learning rate (absolute lr)')
    parser.add_argument('--num_workers', default=0, type=int)
    parser.add_argument('--scheduler',default=True)
    parser.add_argument('--cuda',default='cuda:1')
    parser.add_argument('--total_epochs', default=100, help='Total epochs to train the model')
    parser.add_argument('--save_every', default=1, help='How often to save a snapshot')
    parser.add_argument('--seed', default=0, type=int)
    parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
                        help='start epoch')

    args = parser.parse_args()
    cudnn.benchmark = False
    init_setup()
    with torch.cuda.device(args.cuda):
        main(args)
