import torch
import os, gc
import time
from contextlib import contextmanager
from tqdm import tqdm
import nvtx

@contextmanager
def timer(n=1, desc="Cost", stream=None, endstr=" "):
    if stream is not None:
        stream.synchronize()
    start = time.time_ns() // 1_000_000
    try:
        yield
    finally:
        if stream is not None:
            stream.synchronize()
        end = time.time_ns() // 1_000_000
        print(f'{desc} {(end-start)/n:>5.1f} ms', end=endstr)

def clear_memory(weight=None):
    if weight is not None:
        del weight
    gc.collect()
    torch.cuda.empty_cache()

if __name__ == "__main__":
    streams = [torch.cuda.Stream() for _ in range(2)]

    work_device = "cuda:0"
    buf_device = "cuda:1"
    s = 4
    w_tensor_list = [torch.randn(((1024*s), (1024*s)), dtype=torch.float32, device="cpu") for _ in tqdm(range(31), desc="Create weight")]
    a_tensor = torch.randn(((1024*s), (1024*s)), dtype=torch.float32, device=work_device)

    # init pipeline
    w_tensor_list[0] = w_tensor_list[0].to(work_device)
    w_tensor_list[1] = w_tensor_list[1].to(buf_device)
    a_tensor = a_tensor.to(work_device)

    start = time.time_ns() // 1_000_000

    with timer(len(w_tensor_list)):
        with nvtx.annotate("my_code_block", color="blue"):
            for i, w_tensor in enumerate(w_tensor_list):            
                w_tensor_list[i] = w_tensor.to(work_device)
                w_tensor = w_tensor_list[i]

                with torch.cuda.stream(streams[0]):
                    a_tensor = torch.matmul(w_tensor, a_tensor)

                if i+1 < len(w_tensor_list):
                    i_next = i+1
                    with torch.cuda.stream(streams[1]):
                        w_tensor_list[i_next] = w_tensor_list[i_next].to(work_device, non_blocking=True)
                if i+2 < len(w_tensor_list):
                    i_next = i+2
                    with torch.cuda.stream(streams[1]):
                        w_tensor_list[i_next] = w_tensor_list[i_next].to(buf_device, non_blocking=True)
                with torch.cuda.stream(streams[0]):
                    w_tensor_list[i] = w_tensor_list[i].to("cpu", non_blocking=True)

                # with timer(desc="Offload", stream=stream, endstr="\n"):
                #     w_tensor_list[i] = w_tensor_list[i].to("cpu")
                [stream.synchronize() for stream in streams]
                clear_memory()

"""
install
pip install nvidia-nvtx

async
Load   0.0 ms Matmul 127.0 ms Offload  35.0 ms
Load   0.0 ms Matmul   0.0 ms Offload  30.0 ms
Load   0.0 ms Matmul  12.0 ms Offload  32.0 ms
Load   0.0 ms Matmul   3.0 ms Offload 395.0 ms
Load   0.0 ms Matmul   6.0 ms Offload  24.0 ms
sync
Load   0.0 ms Matmul  96.0 ms Offload  41.0 ms
Load  11.0 ms Matmul   7.0 ms Offload  41.0 ms
Load   9.0 ms Matmul   0.0 ms Offload  43.0 ms
Load   9.0 ms Matmul   2.0 ms Offload  44.0 ms
Load   8.0 ms Matmul   9.0 ms Offload  43.0 ms
"""
