import os
import random
import torch
from torch import nn

from torch.utils.data import DataLoader
from torch.optim import Adam
import torch.nn.functional as F

from torchvision import transforms
from peft import LoraConfig, TaskType, get_peft_model_state_dict, get_peft_model

from demo.loss import TripletMarginLoss, pairwise_loss
from demo.sampler import  PKSampler
from gme import GmeQwen2VLWithHash

from read_data import ISICDataSet, ChestXrayDataSet

os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

os.environ["TOKENIZERS_PARALLELISM"] = "false"

def convert_to_one_hot(labels,num_classes):
    labels = labels.squeeze()
    one_hot = F.one_hot(labels, num_classes)
    return one_hot


def train_epoch(model, optimizer, criterion1, criterion2, train_loader, device, epoch, print_freq, args, Hash_center):
    model.train()
    running_loss = 0.0
    running_loss1 = 0.0
    running_loss2 = 0.0
    for i, data in enumerate(train_loader):
        optimizer.zero_grad()

        samples, labels = data[0].to(device), data[1].to(device)
        emb, hash_code = model(samples)


        loss1, _ = criterion1(emb, labels)

        num_classes = Hash_center.size(0)
        labels = convert_to_one_hot(labels, num_classes)


        hash_label = (labels == 1).nonzero()[:, 1]
        hash_center = Hash_center[hash_label]


        loss_center = criterion2(0.5 * (hash_code + 1), 0.5 * (hash_center + 1))
        loss_mean = torch.mean((torch.abs(hash_code) - 1.0) ** 2)

        batch_size = hash_code.size(0)
        if batch_size < 2:

            loss_sim = torch.tensor(0.0, device=device)
        else:
            half_batch = batch_size // 2
            outputs1 = hash_code[:half_batch]
            outputs2 = hash_code[half_batch:]
            label1 = labels[:half_batch].to(device)
            label2 = labels[half_batch:].to(device)
            loss_sim = pairwise_loss(outputs1, outputs2, label1, label2, sigmoid_param=10.0 / args.hash_dim)

        loss2 = args.lambda0 * loss_center + args.lambda1 * loss_mean + args.lambda2 * loss_sim
        loss = loss1 + loss2

        loss.backward()
        optimizer.step()

        running_loss1 += loss1.item()
        running_loss2 += loss2.item()
        running_loss += loss.item()

        if i % print_freq == print_freq - 1:
            avg_loss = running_loss / print_freq
            avg_loss1 = running_loss1 / print_freq
            avg_loss2 = running_loss2 / print_freq
            print(
                f'[Epoch {epoch}, Iter {i + 1}] | loss1: {avg_loss1:.6f} | loss2: {avg_loss2:.6f} | total loss: {avg_loss:.6f}')
            running_loss = running_loss1 = running_loss2 = 0.0


def save(model, epoch, save_dir, args):
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    prefix = f"{args.dataset}_hash_dim{args.hash_dim}_epoch{epoch}"

    lora_save_path = os.path.join(save_dir, f"{prefix}_lora")

    model.gme_base.save_pretrained(lora_save_path, safe_serialization=False)

    custom_save_path = os.path.join(save_dir, f"{prefix}_custom.pth")

    custom_state_dict = {}

    for k, v in model.state_dict().items():
        if not k.startswith("gme_base."):
            custom_state_dict[k] = v
    torch.save(custom_state_dict, custom_save_path)

def main(args):

    random.seed(args.seed)
    torch.manual_seed(args.seed)

    p = args.labels_per_batch
    k = args.samples_per_label
    batch_size = p * k

    device = torch.device('cuda:6' if torch.cuda.is_available() else 'cpu')

    model = GmeQwen2VLWithHash(
        model_path="../gme-Qwen2-VL-2B-Instruct",
        device=device,
        hash_dim=args.hash_dim,
        min_image_tokens=256,
        max_image_tokens=352,
        max_length=768
    )
    peft_config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        r=8,
        lora_alpha=32,
        lora_dropout=0.05,
        target_modules=["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
    )


    model.gme_base  = get_peft_model(model.gme_base, peft_config)

    criterion1 = TripletMarginLoss(margin=args.margin).to(device)
    criterion2 = nn.BCELoss().to(device)
    params_list = [
        {"params": model.gme_base.parameters(), "lr": 0.05 * args.lr},
        {"params": model.hash_layer.parameters(), "lr": args.lr}
    ]

    optimizer = Adam(params_list, lr=args.lr,betas=(0.9, 0.999))

    normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    train_transform = transforms.Compose([
        transforms.Lambda(lambda img: img.convert("RGB")),
        transforms.Resize(256),
        transforms.RandomResizedCrop(224) if args.rand_resize else transforms.CenterCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize
    ])


    if args.dataset == "isic":
        train_dataset = ISICDataSet(
            data_dir=os.path.join(args.dataset_dir, "ISIC-2017_Training_Data"),
            image_list_file=args.train_image_list,
            transform=train_transform
        )
    elif args.dataset == "covid":
        train_dataset = ChestXrayDataSet(
            data_dir=os.path.join(args.dataset_dir, "trainandval"),
            image_list_file=args.train_image_list,
            transform=train_transform
        )
    else:
        raise NotImplementedError(f"不支持的数据集: {args.dataset}")

    targets = train_dataset.labels
    train_loader = DataLoader(train_dataset, batch_size= batch_size,
                              sampler=PKSampler(targets, p, k),
                              shuffle=False,
                              num_workers=args.workers)

    Hash_center = torch.load(args.true_hash).to(device)

    for epoch in range(1, args.epochs + 1):
        print('Training...')
        train_epoch(model, optimizer, criterion1, criterion2, train_loader, device, epoch, args.print_freq,args, Hash_center)

    print(f"=== Saving ===")
    save(model, args.epochs, args.save_dir, args)

def parse_args():
    import argparse
    parser = argparse.ArgumentParser(description="GME and DEEP HASH")

    parser.add_argument("--dataset", default="isic", help="dataset")
    parser.add_argument("--dataset-dir", default="../data/isic", help="dataset-dir")
    parser.add_argument("--train-image-list", default="./ISIC-2017_Training_Part3_GroundTruth.csv", help="train-image-list")
    # parser.add_argument("--batch_size", default=6, type=int, help="batch size")
    parser.add_argument("--true-hash", default="./64_isic_3_class.pkl", help="true-hash")
    parser.add_argument('-p', '--labels-per-batch', default=3, type=int,
                        help='Number of unique labels/classes per batch')
    parser.add_argument('-k', '--samples-per-label', default=2, type=int,
                        help='Number of samples per label in a batch')

    parser.add_argument("--hash-dim", default=64, type=int, help="hash dim")

    parser.add_argument("--epochs", default=5, type=int, help="epochs")
    parser.add_argument("--lr", default=0.001, type=float, help="lr")
    parser.add_argument('--margin', default=0.2, type=float,
                        help='Triplet loss margin')
    parser.add_argument('--print-freq', default=25,type=int, help='Print frequency')
    parser.add_argument("--workers", default=4, type=int, help="the num of workers")

    parser.add_argument('--seed', type=int, default=0,help='Random seed to use')
    parser.add_argument('--save-dir', default='./checkpoints', help='Model save directory')

    parser.add_argument('--mask-dir', default=None, help='Segmentation masks path (if used)')
    parser.add_argument('--rand-resize', action='store_true', help='Use random resizing data augmentation')
    parser.add_argument('--anomaly', action='store_true',  help='Train without anomaly class')

    parser.add_argument('--lambda0', type=float, default=1, help='hyper-parameters 0')
    parser.add_argument('--lambda1', type=float, default=0.2, help='hyper-parameters 1')
    parser.add_argument('--lambda2', type=float, default=0.05, help='hyper-parameters 1')
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    main(args )
