import torch
from torch.utils.data.distributed import DistributedSampler
from tqdm.rich import tqdm
import torch.distributed as dist
from utils.overwatch import initialize_overwatch

logger = initialize_overwatch("vla_qat")


@torch.inference_mode()
def eval_model(
    eval_name, vla, dataset, action_tokenizer, collator, batch_size=24, num_batchs=1
):
    # 初始化分布式设置
    vla.eval()
    vla.cuda()
    assert vla.device != "cpu", vla.device

    # 创建分布式采样器
    sampler = DistributedSampler(dataset)
    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=batch_size,
        sampler=sampler,
        pin_memory=True,
        collate_fn=collator,
        num_workers=16,
    )

    total_correct = 0
    total_mask = 0
    total_l1_loss = 0
    count = 0
    if dist.get_rank() == 0:
        dataloader = tqdm(dataloader, desc=f"Eval {eval_name} model ",total=num_batchs)
    with torch.no_grad():
        for i, batch in enumerate(dataloader):
            # print(f"eval {i} batch...")
            if i >= num_batchs:
                break
            data = batch
            # data.pop("dataset_names")

            outputs = vla(
                **{
                    k: (v.cuda().bfloat16() if v.dtype == torch.float32 else v.cuda())
                    for k, v in data.items()
                }
            )

            # 计算动作准确率和L1损失
            action_logits = outputs.logits[:, 256:-1]
            action_preds = action_logits.argmax(dim=2)
            action_gt = data["labels"][:, 1:].to(action_preds.device)
            mask = action_gt > action_tokenizer.action_token_begin_idx

            # 计算准确率
            correct_preds = (action_preds == action_gt) & mask
            total_correct += correct_preds.sum().float()
            total_mask += mask.sum().float()

            # # 计算L1损失
            # if mask.sum() > 0:
            #     continuous_actions_pred = (
            #         action_tokenizer.decode_token_ids_to_actions_torch(
            #             action_preds[mask]
            #         )
            #     )

            #     continuous_actions_gt = (
            #         action_tokenizer.decode_token_ids_to_actions_torch(action_gt[mask])
            #     )
            #     total_l1_loss += torch.nn.functional.l1_loss(
            #         continuous_actions_pred, continuous_actions_gt, reduction="sum"
            #     )
            #     count += mask.sum()

    # 聚合所有进程的结果
    dist.all_reduce(total_correct, op=dist.ReduceOp.SUM)
    dist.all_reduce(total_mask, op=dist.ReduceOp.SUM)
    # dist.all_reduce(total_l1_loss, op=dist.ReduceOp.SUM)
    # dist.all_reduce(count, op=dist.ReduceOp.SUM)

    if total_mask > 0:
        action_accuracy = total_correct / total_mask
        # action_l1_loss = total_l1_loss / count
    else:
        action_accuracy = torch.tensor(0.0, device=vla.device)
        # action_l1_loss = torch.tensor(0.0, device=vla.device)

    action_accuracy = action_accuracy.item()  # type: ignore
    # action_l1_loss = action_l1_loss.item()  # type: ignore
    action_l1_loss = 1  # type: ignore
    if dist.get_rank() == 0:
        logger.info(f"{eval_name} model {action_accuracy=} {action_l1_loss=}")
    return action_accuracy
