import torch
from torch import nn
import torch.nn.functional as F
from diffusers import AutoencoderKL, PNDMScheduler
from torch.optim import lr_scheduler
from torchvision.utils import save_image

from diffusers.training_utils import compute_snr
from transformers import CLIPVisionModelWithProjection,CLIPVisionConfig
from omegaconf import OmegaConf


import psutil
import os
from tqdm import tqdm

from modules.unet_2d_condition import UNet2DConditionModel
from modules.pose_net import PoseNet
from dataset2 import PoseDataset
from utils import create_logger,_55_to_01

logger = create_logger("PGModel")

device = "cuda"
check_memory = False

def print_memory_usage(msg="",is_check = check_memory):
    if not check_memory:
        return
    process = psutil.Process(os.getpid())
    mem_info = process.memory_info()
    print(msg)
    print(f"CPU Memory (RSS): {mem_info.rss / (1024 ** 2):.2f} MB")  # Resident Set Size
    print(f"CPU Memory (VMS): {mem_info.vms / (1024 ** 2):.2f} MB")  # Virtual Memory Size

def print_gpu_mem(msg="",is_print=check_memory):
    if is_print:
        allocated = torch.cuda.memory_allocated() / (1024 ** 2)
        reserved = torch.cuda.memory_reserved() / (1024 ** 2)
        print(f"[GPU Memory] {msg}: Allocated={allocated:.2f} MB, Reserved={reserved:.2f} MB")

class ConcatFusion(nn.Module):
    def __init__(self, input_dim=1024, hidden_dim=1024):
        super().__init__()
        self.fc = nn.Sequential(
            nn.Linear(input_dim * 2, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, input_dim)  # 可选输出维度
        )

    def forward(self, feat1, feat2):
        # feat shape: [1024,1] -> [1, 1024]
        feat1 = feat1.view(1, -1)
        feat2 = feat2.view(1, -1)

        x = torch.cat([feat1, feat2], dim=1)
        out = self.fc(x)

        return out.view(-1, 1)  # 回复成 [1024, 1]

class PGModel(torch.nn.Module):
    def __init__(self,args):
        super().__init__()
        self.args = args
        self.unet = UNet2DConditionModel.from_config(
            UNet2DConditionModel.load_config("configs", subfolder="unet")
        )
        # 预训练权重
        unet_pretrained_dict = torch.load("configs/unet/unet.pth",map_location="cpu")
        unet_dict = self.unet.state_dict()
        unet_dict.update({k: v for k, v in unet_pretrained_dict.items() if k in unet_dict and "conv_in" not in k})
        self.unet.load_state_dict(unet_dict)
        print("unet ",self.unet.dtype)

        logger.info("载入vae")
        self.vae = AutoencoderKL.from_pretrained(
            "configs", subfolder="vae"
           # AutoencoderKL.load_config("configs", subfolder="vae")
        )     
        self.vae.half()
        self.vae.eval()
        print("vae ",self.vae.dtype)
        for name, param in self.vae.named_parameters():
            param.requires_grad = False
        
        logger.info("载入image_encoder")
        self.image_encoder = CLIPVisionModelWithProjection.from_pretrained("configs", subfolder="image_encoder")
        self.image_encoder.half()
        self.image_encoder.eval()
        print("image_encoder",self.image_encoder.dtype)
        self.image_encoder.requires_grad_(False)

        self.noise_scheduler = PNDMScheduler.from_pretrained(
            "configs", subfolder="scheduler")
        
        self.pose_net = PoseNet(noise_latent_channels=self.unet.config.block_out_channels[0])
        # 初始化权重
        self.pose_net.load_state_dict(torch.load("models/pose_net.pth",map_location="cpu"))

        
    def load_weights(self,path):
        """加载Unet和posenet权重"""
        logger.info(f"从{path}加载权重")
        pretrained_dict = torch.load(path,map_location="cpu")
        
        # 加载 UNet 权重
        unet_state_dict = self.unet.state_dict()
        # 筛选出预训练字典中存在于当前模型中的权重
        unet_weights_to_load = {k: v for k, v in pretrained_dict["unet"].items() if k in unet_state_dict}
        unet_state_dict.update(unet_weights_to_load)
        self.unet.load_state_dict(unet_state_dict)
        
        # 检查是否有未加载的权重（可选但推荐）
        missing_keys = [k for k in self.unet.state_dict().keys() if k not in unet_weights_to_load]
        if missing_keys:
            logger.warning(f"UNet 缺少权重: {missing_keys}")

        self.pose_net.load_state_dict(pretrained_dict["pose_net"])
        logger.info("权重加载完毕")

    


        
    
    def one_step(self,img,gen_pose_img,ref_images,ref_pose_img,ref_clip_image):
        with torch.no_grad():
            latents = self.vae.encode(img).latent_dist.sample() # (b,4,h,w)
            ref_latents = self.vae.encode(ref_images).latent_dist.sample() 
            ref_pose_latents = self.vae.encode(ref_pose_img).latent_dist.sample() 
        latents = latents*0.18215
        ref_latents = ref_latents*0.18215 
        ref_pose_latents = ref_pose_latents*0.18215

        # latents = latents.unsqueeze(2)  # (b, c, 1, h, w)

        with torch.no_grad():
            clip_image_embeds = self.image_encoder(ref_clip_image).image_embeds 

        noise = torch.randn_like(latents)
        
        #TODO: noise_offset

        bsz = latents.shape[0]
        # Sample a random timestep for each step
        timesteps = torch.randint(
            0,
            self.noise_scheduler.config.num_train_timesteps,
            (bsz,),
            device=latents.device,
        )
        timesteps = timesteps.long()

        # add noise
        noisy_latents = self.noise_scheduler.add_noise(
            latents, noise, timesteps
        )

        latent_model_input = torch.cat([noisy_latents, ref_latents,ref_pose_latents], dim=1)

        # 训练标签
        target = noise

        # 预测
        model_pred = self.forward(latent_model_input,timesteps,clip_image_embeds,gen_pose_img)
        
        # loss
        if self.args.snr_gamma is None:
            loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
        else:
            # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556.
            # Since we predict the noise instead of x_0, the original formulation is slightly changed.
            # This is discussed in Section 4.2 of the same paper.
            snr = compute_snr(self.noise_scheduler, timesteps)
            mse_loss_weights = torch.stack([snr, self.args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(
                dim=1
            )[0]
            if self.noise_scheduler.config.prediction_type == "epsilon":
                mse_loss_weights = mse_loss_weights / snr
            elif self.noise_scheduler.config.prediction_type == "v_prediction":
                mse_loss_weights = mse_loss_weights / (snr + 1)

            loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
            loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
            loss = loss.mean()

        print_gpu_mem("前向结束")
       

        return loss
   
    def infer(self,num_inference_stpes,generator,gen_pose_img,ref_images,ref_pose_img,ref_clip_image):
        with torch.no_grad():
            clip_image_embeds = self.image_encoder(ref_clip_image).image_embeds  
        with torch.no_grad():
            ref_latents = self.vae.encode(ref_images).latent_dist.mode() 
            ref_pose_latents = self.vae.encode(ref_pose_img).latent_dist.mode() 
            print(ref_latents.shape)
            print(ref_pose_latents.shape)
            ref_latents = ref_latents*0.18215 
            ref_pose_latents = ref_pose_latents*0.18215
        
        B,C,H,W = ref_latents.shape
        latents = torch.randn((B, C, H, W), generator=generator, device="cuda", dtype=clip_image_embeds.dtype).to(device)

        #latents = torch.randn((1, 4, H, W))  # Latent size for 512x512 image
        latents = latents * self.noise_scheduler.init_noise_sigma

        self.noise_scheduler.set_timesteps(num_inference_stpes)
        loop = tqdm(self.noise_scheduler.timesteps, leave=True)
        for t in loop:
            latents = self.noise_scheduler.scale_model_input(latents, t)

            latent_model_input=torch.cat([latents, ref_latents,ref_pose_latents], dim=1).to(dtype=self.unet.dtype)
            with torch.no_grad():
                noise_pred = self.forward(latent_model_input,t,clip_image_embeds.to(dtype=self.unet.dtype),gen_pose_img)
            # Compute previous noisy sample x_t -> x_{t-1}t
            latents = self.noise_scheduler.step(noise_pred, t, latents).prev_sample
            loop.set_description(f"infer step {num_inference_stpes}")
          

        # Decode latents to image
        with torch.no_grad():
            image = self.vae.decode(latents.to(dtype=self.vae.dtype) / 0.18215).sample  # Unscale from VAE
        
        image = (image / 2 + 0.5).clamp(0, 1)
        # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
        image = image.cpu().float()
        return image

    def forward(self,noisy_latents,timesteps,ref_embeds,tgt_pose_img,ip_pose_image=None):
        print_gpu_mem()
        #tgt_pose_img = tgt_pose_img.unsqueeze(2)  # (bs, 3, 1, 512, 512)
        pose_cond_tensor = tgt_pose_img.to(device="cuda")
        pose_latents = self.pose_net(pose_cond_tensor)
        #print_gpu_mem()
        #print(pose_latents.shape)
        #with torch.no_grad():
        model_pred = self.unet(
        noisy_latents,
        timesteps,
        pose_latents=pose_latents,
        encoder_hidden_states=None,
        added_cond_kwargs = {"image_embeds": ref_embeds}
        ).sample            
        #print_gpu_mem()
        #print(model_pred)
        return model_pred

def save(num_steps:int,folder:str,pgmodel:torch.nn.Module,optimizer:torch.optim.Optimizer=None,learning_scheduler:lr_scheduler.LRScheduler=None,postfix:str=""):
    os.makedirs(folder,exist_ok=True)
    save_dict = {
        "optimizer_state":optimizer.state_dict() if optimizer is not None else None,
        "scheduler_state":learning_scheduler.state_dict() if learning_scheduler is not None else None,
        "unet":pgmodel.unet.state_dict(),
        "pose_net":pgmodel.pose_net.state_dict(),
    }
    torch.save(save_dict, f'{folder}/pgmodel-{num_steps}_{postfix}.pth')

    
def test():
    for i in range(0,46930):
        
        learning_scheduler.step()
        if i%5000==0:
            print("lr：{:.9f}".format(learning_scheduler.get_last_lr()[0]))
    print("lr：{:.9f}".format(learning_scheduler.get_last_lr()[0]))
    exit(0)

def eval():
    from tqdm import tqdm
    from PIL import Image
    config = OmegaConf.load("configs/train.yaml")

    train_dataset=PoseDataset(batch_size=1)
    train_dataloader = torch.utils.data.DataLoader(
        train_dataset, batch_size=1, shuffle=True, num_workers=0,collate_fn=lambda x: x[0]
    ) 
    pgmodel = PGModel(config)
    pgmodel.load_weights("/root/SDXL/mydiff/Pose-guided_Human_Image_Animation/outputs/pgmodel-93860_finish.pth") # 不采用空白"outputs/pgmodel-37544_finish.pth"
  
    pgmodel.to(device)
    pgmodel.eval()
    for step,batch in  enumerate(train_dataloader):
        if step>0:
            break
        generator = torch.Generator(device).manual_seed(42)
        img_path="outputs/eval"
        os.makedirs(img_path,exist_ok=True)
        save_image(torch.cat([_55_to_01(batch["gen_img"]),_55_to_01(batch["gen_img_pose"]),_55_to_01(batch["ref_img"]),_55_to_01(batch["ref_img_pose"])],dim=3),f"{img_path}/eval_input-{step}.png")
                    
        save_image(batch["ref_clip_img"][0],f"{img_path}/eval_ref_clip_img-{step}.png")
        with torch.no_grad():
            with torch.cuda.amp.autocast(dtype=torch.bfloat16):
                result = pgmodel.infer(50,generator,batch["gen_img_pose"].to(device,dtype=torch.float32),batch["ref_img"].to(device,dtype=pgmodel.vae.dtype),batch["ref_img_pose"].to(device,dtype=pgmodel.vae.dtype),batch["ref_clip_img"].to(device,dtype=pgmodel.vae.dtype))
        #ref_latents = pgmodel.vae.encode(batch["ref_img"].to(device,dtype=pgmodel.vae.dtype)).latent_dist.mode() 
        #res = pgmodel.vae.decode(ref_latents.to(dtype=pgmodel.vae.dtype)).sample
        #res = (res / 2 + 0.5).clamp(0, 1)
        save_image(result[0],f"{img_path}/eval_res-{step}.png")


def val(model,gen_pose_img,ref_images,ref_pose_img,ref_clip_image,step:int):
    model.eval()
    with torch.no_grad():
        with torch.cuda.amp.autocast(dtype=torch.bfloat16):
            generator = torch.Generator(device).manual_seed(42)
            result = model.infer(25,generator,gen_pose_img,ref_images,ref_pose_img,ref_clip_image)
            os.makedirs("outputs/val",exist_ok=True)
            save_image(torch.cat([_55_to_01(gen_pose_img),_55_to_01(ref_images),_55_to_01(ref_pose_img)],dim=3),f"outputs/val/input-{step}.png")
            save_image(result[0],f"outputs/val/val-{step}.png")
    model.train()
    model.vae.eval()
    model.image_encoder.eval()

def load_data(batch,dtype=torch.float32):
    pass

def joint_img(img_list):
    """tensor"""
    return torch.cat(img_list,dim=3)

def train():
    from tqdm import tqdm
    config = OmegaConf.load("configs/train.yaml")

    train_dataset=PoseDataset(batch_size=1,pose_only_prob=0)
    train_dataloader = torch.utils.data.DataLoader(
        train_dataset, batch_size=1, shuffle=True, num_workers=4,collate_fn=lambda x: x[0]
    ) 
    
    pgmodel = PGModel(config)
    if config.resume is not None:
        pgmodel.load_weights(config.resume)
        logger.info("断点重训-model")


    pgmodel.to(device)

    print_gpu_mem("移动到GPU")
    print_memory_usage()

    trainable_params = list(filter(lambda p: p.requires_grad, pgmodel.parameters()))
    optimizer = torch.optim.AdamW(
        trainable_params,
        lr=config.learning_rate,
        betas=(config.adam_beta1, config.adam_beta2),
        weight_decay=config.adam_weight_decay,
        eps=config.adam_epsilon,
    )
    learning_scheduler=lr_scheduler.StepLR(optimizer, step_size=config.lr_update_step, gamma=config.lr_gamma)
    if config.resume is not None and config.resume_optimizer==True:
        optimizer.load_state_dict(torch.load(config.resume)["optimizer_state"])
        learning_scheduler.load_state_dict(torch.load(config.resume)["scheduler_state"])
        logger.info("断点重训-optimizer,lr scheduler")

    epochs = 1000
    total_steps = len(train_dataloader)*epochs
    logger.info(f"总步数：{total_steps}")

    

    num_step = 0 # 已完成的step
    
    # 开始训练
    try:
        for ep in range(epochs):
            print_gpu_mem()
            loop = tqdm(train_dataloader, total=len(train_dataloader), leave=True)
            for batch in loop:
                if num_step % config.input_steps==0:
                    img_path = f"outputs/inputs"
                    os.makedirs(img_path,exist_ok=True)
                    save_image(torch.cat([_55_to_01(batch["gen_img"]),_55_to_01(batch["gen_img_pose"]),_55_to_01(batch["ref_img"]),_55_to_01(batch["ref_img_pose"])],dim=3),f"{img_path}/input-{num_step}.png")
                    
                num_step += 1
                with torch.cuda.amp.autocast(dtype=torch.bfloat16):
                    loss = pgmodel.one_step(batch["gen_img"].to(device),batch["gen_img_pose"].to(device),batch["ref_img"].to(device),batch["ref_img_pose"].to(device),batch["ref_clip_img"].to(device))
                     # 梯度更新
                    loss.backward()
                    optimizer.step()
                    learning_scheduler.step()
                    optimizer.zero_grad()

                logs = {
                    "step_loss": loss.detach().item(),
                    "lr": f"{learning_scheduler.get_last_lr()[0]:.6f}"
                }
                loop.set_description(f"Epoch [{ep+1}/{epochs}]")
                loop.set_postfix(**logs)
                logger.info(f"steps: {num_step}  loss: {loss.detach().item()} lr: {learning_scheduler.get_last_lr()[0]:.7f}")

                if num_step % config.val_steps == 0:
                    logger.info(f"第{num_step}步进行验证")
                    val(pgmodel,batch["gen_img_pose"].to(device,dtype=torch.float32),batch["ref_img"].to(device,dtype=pgmodel.vae.dtype),batch["ref_img_pose"].to(device,dtype=pgmodel.vae.dtype),batch["ref_clip_img"].to(device,dtype=pgmodel.vae.dtype),num_step)

                if num_step % config.save_steps == 0:
                    logger.info(f"第{num_step}步保存模型")
                    save(num_step,"outputs",pgmodel,optimizer,learning_scheduler)
            
            if ep<epochs-1:
                pass
                #logger.info(f"epoch结束保存模型")
                #save(num_step,"outputs",pgmodel,optimizer,learning_scheduler,f"epoch-{ep}")

        save(num_step,"outputs",pgmodel,optimizer,learning_scheduler,"finish")
        logger.info("训练结束，保存模型")
    except KeyboardInterrupt:
        logger.info("用户退出")
        if num_step>2000:
            save(num_step,"break_save",pgmodel,optimizer,learning_scheduler,"interrupt")
            logger.info("保存模型")
    except Exception as e:
        logger.error(e)
        logger.info("出错")
        if num_step>2000:
            save(num_step,"error_save",pgmodel,optimizer,learning_scheduler,"error")
            logger.info("保存模型")
        
    finally:
        logger.info("结束")
        # save(num_step,"unknow_save",pgmodel,optimizer,learning_scheduler,"unknow")

if __name__ == "__main__":
    train()
