import torch
from partitioned_transformer import partitioned_layer
from config import PARTITIONS

def voltage_infer(x, model_layers, device_id, comm, is_master):
    N = x.shape[0]
    part_len = N // PARTITIONS
    my_id = 0 if device_id == "orin_1" else 1
    start = my_id * part_len
    end = (my_id + 1) * part_len if my_id < PARTITIONS - 1 else N

    for layer in model_layers:
        my_output = partitioned_layer(x, start, end, layer)
        #print(f"[Device] Local output min: {my_output.min().item():.4f}, max: {my_output.max().item():.4f}, NaN: {torch.isnan(my_output).any().item()}")
        if is_master:
            all_outputs = [my_output]
            for i in range(PARTITIONS - 1):
                other_output = comm.receive_result()
                print(f"[Master] Received from worker {i+1}: min={other_output.min().item():.4f}, max={other_output.max().item():.4f}")
                all_outputs.append(other_output)
            x = torch.cat(all_outputs, dim=0)
            #print(f"[Master] Received outputs from all workers. x : {x}")
            for _ in range(PARTITIONS - 1):
                comm.broadcast(x)
        else:
            comm.send_result(my_output)
            x = comm.receive()

    return x


