import yaml
from easydict import EasyDict
import os
import time
from shutil import copyfile
import random
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch
# 假设network和utils中的内容已正确导入
from network.Dual_Mark import *
from utils import *
from transformers import AutoTokenizer
import contextlib
import sys

# set visible devices (可根据需要调整)
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
if not torch.cuda.is_available():
    raise RuntimeError("no avaliable GPU！")

project_path = os.path.abspath(os.path.dirname(__file__))
train_file_path = os.path.join(project_path, 'cfg/train_DualMark.yaml')


def seed_torch(seed=42):
    seed = int(seed)
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.enabled = True


def greedy_decode(logits: torch.Tensor, eos_token_id: int = 2, max_len: int = 64, device="cpu", tokenizer=None):
    """
    Greedily decode the sequence from the model's output logits.

    Args:
        logits: Tensor of shape (T, B, V) containing the model's logits for each time step.
        eos_token_id: The token ID for the end-of-sequence token (default is 2).
        max_len: Maximum length of the generated sequence (default is 64).
        device: The device to run the model on (default is "cpu").
        tokenizer: The tokenizer to decode token IDs to text.

    Returns:
        generated_ids: A tensor of token IDs (T, B) of the generated sequence.
        decoded_text: A list of decoded text strings for each batch.
    """
    # Initialize tensor to store the generated token ids (T, B)
    T,B, V = logits.shape  # T: time steps, B: batch size, V: vocabulary size
    generated_ids = torch.zeros(max_len, B, dtype=torch.long, device=device)

    # Start by selecting the first token from the logits at time step 0
    input_ids = torch.zeros(B, 1, dtype=torch.long, device=device)  # (B, 1), assuming start token is 0

    # Iterate over the max_len to generate tokens one by one
    for t in range(max_len):
        # Get the logits for the current time step (shape: B x V)
        logits_t = logits[t]

        # Get the token with the highest logit value (greedy)
        _, next_token = logits_t.max(dim=-1)  # Shape: (B,)

        # Update the generated token ids
        generated_ids[t] = next_token

        # If the eos_token_id is predicted, stop the generation early
        if (next_token == eos_token_id).all():
            break

        # Update input_ids for the next time step (shape: (B, T) -> (B, T+1))
        input_ids = torch.cat([input_ids, next_token.unsqueeze(1)], dim=1)  # (B, T+1)

    # Decode token ids to text using the tokenizer
    decoded_text = [tokenizer.decode(ids.cpu().numpy(), skip_special_tokens=True) for ids in generated_ids.transpose(0, 1)]  # List of strings

    return generated_ids, decoded_text


def decode_ids(token_ids):
    # This is a simple placeholder for decoding token ids to text.
    # You should use your tokenizer here to convert token_ids to human-readable text.
    return " ".join([str(token_id) for token_id in token_ids])  # Just a placeholder


# 在训练和验证后，记录解码后的文本信息
def log_decoded_text_to_tensorboard(writer, sampled_indices, label, dataset, device, network, tokenizer, epoch):
    """
    记录原文本、编码文本、经过水印和 deepfake 操作的文本到 TensorBoard。
    """
    # Encoder-Decoder
    encoder_decoder = network.encoder_decoder

    # Discriminator
    discriminator = network.discriminator

    # Text Projector
    text_projector = network.text_projector

    # Text Decoder
    text_decoder = network.text_decoder
    
    # 批量处理
    clip_embs = []
    input_ids = []
    images = []
    masks = []
    filenames = []
    
    for idx in sampled_indices:
        # 收集所有需要的输入
        clip_emb_np = dataset.messages[idx]
        clip_emb_t = torch.tensor(clip_emb_np, dtype=torch.float32).to(device)
        clip_embs.append(clip_emb_t)
        
        input_ids.append(dataset.input_ids[idx])
        
        filename, mask = dataset.list[idx]
        filenames.append(filename)
        masks.append(torch.FloatTensor(mask))
        
        image = dataset.transform(Image.open(os.path.join(dataset.image_dir, filename)).convert("RGB"))
        images.append(image)

    # 转换为张量形式，形成一个批次
    clip_embs = torch.stack(clip_embs)  # [batch_size, embedding_dim]
    input_ids = torch.stack(input_ids)  # [batch_size, seq_length]
    images = torch.stack(images)  # [batch_size, channels, height, width]
    masks = torch.stack(masks)  # [batch_size, mask_size]

    # 获取原始文本（一次性处理所有的样本）
    original_texts = [tokenizer.decode(ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) for ids in input_ids]

    # 编码并直接解码的文本
    clip_embedding = text_projector(clip_embs)  # [batch_size, embedding_dim] -> [batch_size, 128]
    logits_1 = text_decoder(clip_embedding, input_ids)  # [batch_size, seq_length, vocab_size]
    
    # greedy_decode期望logits是原始输出而非最终解码文本
    ids_1, gen_text_1 = greedy_decode(logits=logits_1, eos_token_id=tokenizer.eos_token_id, max_len=64, device=device, tokenizer=tokenizer)

    # 获取网络的 logits 输出
    logits_2, logits_3 = network.get_logits(images=images, messages=dataset.messages[sampled_indices], masks=masks, ids=input_ids)
    
    # 编码并经过水印操作的文本
    ids_2, gen_text_2 = greedy_decode(logits=logits_2, eos_token_id=tokenizer.eos_token_id, max_len=64, device=device, tokenizer=tokenizer)

    # 编码并经过 deepfake 操作的文本
    ids_3, gen_text_3 = greedy_decode(logits=logits_3, eos_token_id=tokenizer.eos_token_id, max_len=64, device=device, tokenizer=tokenizer)

    # 格式化日志条目
    texts = [] 

    for i in range(len(sampled_indices)):
        # 关键修改：每个字段之间用 \n\n 分隔（空两行）
        entry = f"{label}_sample_index={sampled_indices[i]}\n\n" \
                f"original_text={original_texts[i]}\n\n" \
                f"pred_text={gen_text_1[i]}\n\n" \
                f"R_pred_text={gen_text_2[i]}\n\n" \
                f"dF_pred_text={gen_text_3[i]}\n\n------"
        texts.append(entry)
    
    # 记录文本到 TensorBoard（条目之间用空行分隔，可选优化）
    # 如果希望不同样本条目之间也空两行，用 "\n\n------\n\n".join(texts)，否则保持原逻辑
    text_content = "\n".join(texts)  # 样本条目之间用单行分隔（可根据需求调整）
    writer.add_text(f"decoded/{label}_epoch_{epoch}", text_content, epoch)
    writer.flush()

def main():
    seed_torch(42)

    # 1. 获取可用GPU列表（如果只想用单卡就设 [0]）
    available_devices = [0]  # 可根据需要改成 [0,1,2,3]
    device = torch.device(f'cuda:{available_devices[0]}')
    print(f"Using GPUs: {available_devices} (main device: {device})")

    # 加载预处理后的 text 数据集（你已有的文件）
    input_ids_path = "./data_process/processed_data/add_suffix_company_input_ids.pt"
    text_embeddings_path = "./data_process/processed_data/add_suffix_company_gpt2_text_embeddings.pt"
    tokenizer = AutoTokenizer.from_pretrained("/home/fang_guotong/.cache/huggingface/hub/models--gpt2/snapshots/607a30d783dfa663caf39e06633721c8d4cfcd7e", use_fast=True)

    input_ids = torch.load(input_ids_path)
    text_embeddings = torch.load(text_embeddings_path)  # [N,512]
    # 分割文本输入为训练集与测试集
    N = input_ids.size(0)
    indices = torch.randperm(N)

    train_ratio = 0.9
    train_size = int(N * train_ratio)

    train_idx = indices[:train_size]
    test_idx  = indices[train_size:]

    input_ids_train = input_ids[train_idx]
    text_embeddings_train = text_embeddings[train_idx]

    input_ids_test = input_ids[test_idx]
    text_embeddings_test = text_embeddings[test_idx]

    # load training config
    with open(train_file_path, 'r') as f:
        args = EasyDict(yaml.load(f, Loader=yaml.SafeLoader))

    project_name = args.project_name
    epoch_number = args.epoch_number
    batch_size = args.batch_size
    lr = args.lr
    beta1 = args.beta1
    image_size = args.image_size
    message_length = args.message_length
    message_range = args.message_range
    attention_encoder = args.attention_encoder
    attention_decoder = args.attention_decoder
    weight = args.weight
    dataset_path = args.dataset_path
    save_images_number = args.save_images_number
    noise_layers_R = args.noise_layers.pool_R
    noise_layers_F = args.noise_layers.pool_F

    project_name += f"with_decoder_{image_size}_{message_length}_{message_range}_{lr}_{beta1}_{attention_encoder}_{attention_decoder}_" + "_".join(map(str, weight))
    result_folder = f"results/{time.strftime(project_name + '_%Y_%m_%d_%H_%M_%S', time.localtime())}/"
    os.makedirs(result_folder + "images/", exist_ok=True)
    os.makedirs(result_folder + "models/", exist_ok=True)
    copyfile(train_file_path, result_folder + "train_DualMark.yaml")
    writer = SummaryWriter(result_folder)

    # 初始化每epoch的统计数据存储
    epoch_stats = {
        'train': [],
        'val': []
    }

    # ======================
    # 构造 Network（如果想静默第三方输出，用 redirect_stdout 临时屏蔽）
    # ======================
    noisy_ckpt = None
    with open(os.devnull, 'w') as devnull:
        # 如果 Network 初始化会打印大量信息并你希望隐藏它：
        # with contextlib.redirect_stdout(devnull):
        network = Network(message_length, noise_layers_R, noise_layers_F, device, batch_size, lr, beta1, attention_encoder, attention_decoder, weight)
    # 将网络移动到主设备
    network = network.to(device)

    # ===== 可选：如果你想在外层使用多GPU，请确保 Network 内部没有把子模块也 DataParallel —— 避免 nested DataParallel =====
    # if len(available_devices) > 1:
    #     # 如果 Network 内部没有 DataParallel，可以在这里 wrap
    #     network = torch.nn.DataParallel(network, device_ids=available_devices)

    # ===== 加载预训练 text-decoder（如果有）并冻结参数 =====
    # text_decoder_ckpt = "/home/fang_guotong/projects/3090server/outputs/add_suffix_ckpt_epoch38.pt"  # 修改为你的实际 checkpoint 路径
    # if os.path.exists(text_decoder_ckpt):
    #     try:
    #         # 如果 network 被 DataParallel 包装，访问 module
    #         target_net = network.module if hasattr(network, "module") else network
    #         target_net.text_decoder.load_pretrained(text_decoder_ckpt)
    #         # 冻结 text_decoder 参数（确保不被训练）
    #         # for p in target_net.text_decoder.parameters():
    #         #     p.requires_grad = False
    #         print(f"[INFO] Loaded and froze text_decoder from {text_decoder_ckpt}")
    #     except Exception as e:
    #         print(f"[WARN] Failed to load/freeze text_decoder: {e}")
    # else:
    #     print(f"[WARN] text_decoder ckpt not found at {text_decoder_ckpt} — continuing without loading")

    # 如果你有其他预训练模块也要加载（encoder/discriminator/projector），可以调用 network.load_model(...)
    # e.g. network.load_model(path_encoder_decoder, path_discriminator, path_text_projector, path_text_decoder)

    # ===== 构造数据集与dataloader（确保 attrsImgDataset 支持 tgt_emb=None） =====
    train_dataset = attrsImgDataset(os.path.join(dataset_path, f"train_{image_size}"), image_size, text_embeddings_train, "celebahq", input_ids_train)
    val_dataset = attrsImgDataset(os.path.join(dataset_path, f"val_{image_size}"), image_size, text_embeddings_test, "celebahq", input_ids_test)
    train_dataloader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
        drop_last=True
    )
    val_dataloader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
        drop_last=True
    )

    print("\nStart training with progress bars:\n")
    # 记录数据集大小信息
    print(f"Training dataset size: {len(train_dataset)} samples")
    print(f"Validation dataset size: {len(val_dataset)} samples")
    print(f"Training batches per epoch: {len(train_dataloader)}")
    print(f"Validation batches per epoch: {len(val_dataloader)}")

    for epoch in range(1, epoch_number + 1):
        running_result = {k: 0.0 for k in [
            "g_loss", "psnr", "ssim",
            "g_loss_on_discriminator", "g_loss_on_encoder_MSE", "g_loss_on_encoder_LPIPS",
            "g_loss_on_TEXT_decoder_R", "g_loss_on_TEXT_decoder_F", "d_loss"]}

        start_time = time.time()

        # === 训练阶段 ===
        train_bar = tqdm(train_dataloader, desc=f"Epoch [{epoch}/{epoch_number}] Training", ncols=100)
        for step, (image, message, mask, ids) in enumerate(train_bar, 1):
            image = image.to(device)
            ids = ids.to(device)

            # 调用网络的 train 方法进行训练
            result = network.train(image, message, mask, ids)

            # 累加每个batch的结果
            for key in result:
                writer.add_scalar(f"Train/{key}", float(result[key]), (epoch - 1) * len(train_dataloader) + step)
                running_result[key] += float(result[key])

            avg_g_loss = running_result['g_loss'] / step
            train_bar.set_postfix({"G_Loss": f"{avg_g_loss:.4f}"})

        # 计算训练集本轮的平均指标
        train_avg = {k: v / len(train_dataloader) for k, v in running_result.items()}
        epoch_stats['train'].append(train_avg)

        # 将本轮训练的平均指标写入TensorBoard
        for key, value in train_avg.items():
            writer.add_scalar(f"Train_Epoch/{key}", value, epoch)

        # === 验证阶段 ===
        val_result = {k: 0.0 for k in running_result.keys()}
        saved_iterations = np.random.choice(np.arange(1, len(val_dataloader) + 1), size=save_images_number, replace=False)
        saved_all = None

        val_bar = tqdm(val_dataloader, desc=f"Epoch [{epoch}/{epoch_number}] Validation", ncols=100)
        for step, (image, message, mask, ids) in enumerate(val_bar, 1):
            image = image.to(device)
            ids = ids.to(device)

            # 使用 dataset 给出的 message（不要随意覆盖）
            result, (images, encoded_images) = network.validation(image, message, mask, ids)

            # 累加每个batch的结果
            for key in result:
                writer.add_scalar(f"Val/{key}", float(result[key]), (epoch - 1) * len(val_dataloader) + step)
                val_result[key] += float(result[key])

            avg_val_g_loss = val_result['g_loss'] / step
            val_bar.set_postfix({"Val_G_Loss": f"{avg_val_g_loss:.4f}"})
            if step in saved_iterations:
                saved_all = get_random_images(image, encoded_images) if saved_all is None else concatenate_images(saved_all, image, encoded_images)

        # 计算验证集本轮的平均指标
        val_avg = {k: v / len(val_dataloader) for k, v in val_result.items()}
        epoch_stats['val'].append(val_avg)
        # 将本轮验证的平均指标写入TensorBoard
        for key, value in val_avg.items():
            writer.add_scalar(f"Val_Epoch/{key}", value, epoch)

        # === 记录每个epoch的解码文本 ===
        # 采样一些验证集的文本索引
        sampled_val = np.random.choice(len(val_dataset), size=3, replace=False)
        log_decoded_text_to_tensorboard(writer, sampled_val, "val", val_dataset, device, network, tokenizer, epoch)
        # 保存图像
        save_images(saved_all, epoch, result_folder + "images/", resize_to=None)

        # 计算 epoch 耗时
        epoch_time = time.time() - start_time

        # 保存日志
        train_log = f"Epoch {epoch} (Train) - Time: {epoch_time:.2f}s - " + \
                   ", ".join([f"{k}={v:.6f}" for k, v in train_avg.items()]) + "\n"
        val_log = f"Epoch {epoch} (Val) - Time: {epoch_time:.2f}s - " + \
                 ", ".join([f"{k}={v:.6f}" for k, v in val_avg.items()]) + "\n"

        with open(result_folder + "/train_log.txt", "a") as f:
            f.write(train_log)
        with open(result_folder + "/val_log.txt", "a") as f:
            f.write(val_log)

        # 打印本轮统计信息
        print(f"\nEpoch {epoch} Summary:")
        print(f"Training - G Loss: {train_avg['g_loss']:.4f}, Error R: {train_avg['error_rate_R']:.4f}, Error F: {train_avg['error_rate_F']:.4f}")
        print(f"Validation - G Loss: {val_avg['g_loss']:.4f}, Error R: {val_avg['error_rate_R']:.4f}, Error F: {val_avg['error_rate_F']:.4f}")
        print(f"Epoch time: {epoch_time:.2f} seconds\n")

        # 保存模型（会保存 text_decoder 的 state dict，Dual_Mark.save_model 已实现）
        network.save_model(
            result_folder + f"models/EC_{epoch}.pth",
            result_folder + f"models/D_{epoch}.pth",
            result_folder + f"models/P_{epoch}.pth",
            result_folder + f"models/TD_{epoch}.pth"
        )

    # 训练结束后保存所有epoch的统计数据
    np.save(os.path.join(result_folder, "epoch_stats.npy"), epoch_stats)
    writer.close()
    print("Training completed. All epoch statistics saved.")



if __name__ == '__main__':
    main()
