import torch
from model_utils import get_transformer_model, get_input_embedding , get_image_tensor
from voltage_system import voltage_infer
from comm.communicator import Communicator
from config import DEVICE_IDS, ZMQ_PORT , PARTITIONS
from bert_head import BertClassificationHead
from transformers import ViTForImageClassification
import torch.nn.functional as F
import time

def main(device_id,model_name):
    is_master = device_id == DEVICE_IDS[0]
    comm = Communicator(is_master, ZMQ_PORT)
    is_vit = model_name == "vit"
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    if not is_vit:
        model_layers = get_transformer_model()
    else:
        dog1 = "https://raw.githubusercontent.com/EliSchwartz/imagenet-sample-images/refs/heads/master/n02085936_Maltese_dog.JPEG"
        dog2 = "https://raw.githubusercontent.com/EliSchwartz/imagenet-sample-images/refs/heads/master/n02086240_Shih-Tzu.JPEG"
        # image_path = "https://raw.githubusercontent.com/EliSchwartz/imagenet-sample-images/refs/heads/master/n01665541_leatherback_turtle.JPEG"  # 可以替换成 URL 或本地路径
        image_path = dog2
        _, model_layers = get_image_tensor(image_path, device=device)

    if is_master:
        # 等待所有 worker 就绪
        comm.wait_for_workers(num_workers=PARTITIONS - 1)
        print(f"Start time: {time.time()}")
        # 构造输入并广播
        # x = torch.randn(128, 768)
        if is_vit:
            # 构造输入 + 模型层
            x, _ = get_image_tensor(image_path, device=device)
        else:
            x = get_input_embedding("This is a test for Voltage.", max_length=128, device="cuda")
            print(f"Input shape: {x.shape} on device {device_id}")
        comm.broadcast(x)
    else:
        # 向 master 发送 ready 信号
        comm.notify_master_ready()

        # 接收输入
        x = comm.receive()
        print("[Worker] Received input, min/max:", x.min().item(), x.max().item())
        print("[Worker] Any NaN in input:", torch.isnan(x).any().item())

    print(f"Start Inference time: {time.time()}")
    output = voltage_infer(x, model_layers, device_id, comm, is_master)
    print(f"End time: {time.time()}")

    if is_master:
        print("Final output shape:", output.shape)
        print(f"output: {output}")
        if is_vit:
            # model = ViTModel.from_pretrained("google/vit-base-patch16-224").to(device)
            # model.eval()
            # model
            print(f"output shape: {output.shape}")
            print(f"output min/max: {output.min().item()}, {output.max().item()}")
            print(f"output mean: {output.mean().item()}")
            # 分类预测：使用 ViTForImageClassification 的分类头
            clf_model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(device)
            clf_model.eval()
            with torch.no_grad():
                # 取 [CLS] token 表示（第一位） → [768]
                cls_token = output[0].unsqueeze(0).to(device)
                logits = clf_model.classifier(cls_token)  # [1, num_classes]
                probs = torch.softmax(logits, dim=-1)
                pred = torch.argmax(probs, dim=-1).item()

            label = clf_model.config.id2label[pred]
            print(f"[Predict] 类别索引: {pred}")
            print(f"[Predict] 标签名称: {label}")
        else:
            head = BertClassificationHead(hidden_size=768, num_classes=2, use_cls=True).to("cuda:0")
            head.eval()
            with torch.no_grad():
                logits = head(output.to("cuda:0"))
                print(f"logits: {logits}")
                print(f"logits shape: {logits.shape}")
                print(f"logits min/max: {logits.min().item()}, {logits.max().item()}")
                print(f"logits argmax: {logits.argmax().item()}")
                probs = F.softmax(logits, dim=-1)
                pred = torch.argmax(probs).item()
                print(f"[Predict] 分类概率: {probs.tolist()}")
                print(f"[Predict] 预测类别: {pred}")

if __name__ == "__main__":
    import sys
    main(sys.argv[1], sys.argv[2])
