import txdnn
import torch

# initialize tensors
b, m, n, k = 16, 128, 128, 512

input_type = torch.float16
device_type = "cuda"
a = torch.randn(b, m, k, dtype=input_type, device=device_type)
b = torch.randn(b, k, n, dtype=input_type, device=device_type)
B = torch.randn(1, m, n, dtype=input_type, device=device_type)
c_ref = torch.matmul(a, b) + B
c = torch.randn_like(c_ref, device=device_type)


# txdnn graph setup
handle = txdnn.create_handle()

graph = txdnn.pygraph(
    intermediate_data_type=txdnn.data_type.FLOAT,
    compute_data_type=txdnn.data_type.FLOAT
)

a_txdnn_tensor = graph.tensor_like(a)
b_txdnn_tensor = graph.tensor_like(b)
bias_txdnn_tensor = graph.tensor_like(B)

c_intermediate = graph.matmul(name="matmul", A=a_txdnn_tensor, B=b_txdnn_tensor)
c_txdnn_tensor = graph.bias(name="bias", input=c_intermediate, bias=bias_txdnn_tensor)
c_txdnn_tensor.set_name("c").set_output(True).set_data_type(txdnn.data_type.HALF)


# txdnn graph compilation

graph.validate()
graph.build_operation_graph()
graph.create_execution_plans([txdnn.heur_mode.A, txdnn.heur_mode.FALLBACK])
graph.check_support()
graph.build_plans()


# txdnn graph execution
variant_pack = {
    a_txdnn_tensor: a,
    b_txdnn_tensor: b,
    c_txdnn_tensor: c,
    bias_txdnn_tensor: B,
}

workspace = torch.empty(graph.get_workspace_size(), dtype=torch.uint8, device=device_type)

graph.execute(variant_pack, workspace)

torch.cuda.synchronize()