import os
import sys
from typing import *
from collections import defaultdict

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.profiler import profile, ProfilerActivity, record_function

import numpy as np
import sty


class M1(nn.Module):
    def __init__(self, in_channels: int, out_channels: int):
        super().__init__()
        self.linear1 = nn.Linear(in_channels, in_channels * 2, bias=True)
        self.linear2 = nn.Linear(in_channels * 2, out_channels, bias=True)
        self.linear3 = nn.Linear(out_channels, out_channels, bias=True)
    def forward(self, x: torch.Tensor):
        print(x.shape)
        return self.linear3(self.linear2(self.linear1(x)))


class CP(nn.Module):
    def __init__(self, m: nn.Module):
        super().__init__()
        self.m = m

    def wrap(self, *args, **kwargs):
        return self.m(*args, **kwargs)

    def forward(self, *args, **kwargs):
        return checkpoint(self.wrap, *args, use_reentrant=True, **kwargs)


class M2(nn.Module):
    def __init__(self, in_channels: int, out_channels: int):
        super().__init__()
        self.linear1 = CP(nn.Linear(in_channels, in_channels * 2, bias=True))
        self.linear2 = CP(nn.Linear(in_channels * 2, out_channels, bias=True))
        self.linear3 = CP(nn.Linear(out_channels, out_channels, bias=True))

    def forward(self, x: torch.Tensor):
        print(x.shape)
        return self.liear3(self.linear2(self.linear1(x)))


class MockDataset(Dataset):
    def __init__(self, in_channels: int, out_channels: int, element_count: int = 100):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.element_count = element_count
        self.xx = np.random.randn(element_count, 32, in_channels).astype(np.float32)
        self.yy = np.random.randn(element_count, 32, out_channels).astype(np.float32)

    def __len__(self) -> int:
        return self.element_count

    def __getitem__(self, item) -> Tuple[np.ndarray, np.ndarray]:
        return self.xx[item], self.yy[item]

    def __next__(self) -> Coroutine[None, Tuple[np.ndarray, np.ndarray], None]:
        for idx, (xx, yy) in enumerate(zip(self.xx, self.yy)):
            yield xx, yy

    def __iter__(self):
        return self.__next__()



if __name__ == "__main__":
    print(__file__)

    model1 = M1(320, 640)
    ds = MockDataset(320, 640, 320)
    sampler = RandomSampler(ds, replacement=True, num_samples=320)
    dl = DataLoader(ds, batch_size=32, shuffle=True)
    loss_fn = nn.MSELoss()
    optimizer1 = torch.optim.Adam(model1.parameters(), lr=1e-3)

    with profile(with_stack=True, profile_memory=True, activities=[ProfilerActivity.CPU], record_shapes=True) as prof:
        with record_function("M1"):
            for xx, yy in dl:
                # xx = torch.tensor(xx)
                # yy = torch.tensor(yy)
                pred = model1(xx)
                loss = loss_fn(pred, yy)
                optimizer1.zero_grad()
                loss.backward()
                optimizer1.step()


    print(prof.key_averages().table(sort_by='self_cpu_memory_usage', row_limit=100))



    model2 = M2(320, 640)
    ds = MockDataset(320, 640, 320)
    sampler = RandomSampler(ds, replacement=True, num_samples=320)
    dl = DataLoader(ds, batch_size=32, shuffle=True)
    loss_fn = nn.MSELoss()
    optimizer2 = torch.optim.Adam(model2.parameters(), lr=1e-3)

    with profile(with_stack=True, profile_memory=True, activities=[ProfilerActivity.CPU], record_shapes=True) as prof:
        with record_function("M2"):
            for xx, yy in dl:
                # xx = torch.tensor(xx)
                # yy = torch.tensor(yy)
                pred = model1(xx)
                loss = loss_fn(pred, yy)
                optimizer2.zero_grad()
                loss.backward()
                optimizer2.step()


    print(prof.key_averages().table(sort_by='self_cpu_memory_usage', row_limit=100))
