import torch
from torch import nn, optim
from torch.amp import autocast
try:
    from torch.amp import GradScaler
except ImportError:
    from torch.cuda.amp import GradScaler


class AMP(nn.Module):
    def __init__(self,input_size,hidden_size,output_size):
        super().__init__()
        # self.fc1=torch.nn.Linear(input_size, hidden_size)
        self.fc1 = nn.LSTM(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size * 4, output_size)

    def forward(self,x):
        out=self.fc1(x)
        out=torch.relu(out)
        out=self.fc2(out.flatten(start_dim=1))
        return out
    
if __name__=='__main__':
    # batch_size = 4
    # sel_length = 4
    # input_size = 16
    # X = torch.randn(batch_size, sel_length, input_size)
    # y  = torch.randint(0, 2, (batch_size,))

    # model=AMP(input_size=input_size, hidden_size=64, output_size=2)

    # scaler = GradScaler()
    # criterion = nn.CrossEntropyLoss()
    # optimizer = optim.Adam(model.parameters(), lr=1e-4)
    # optimizer.zero_grad()
    # with autocast('cpu'):
    #     outputs = model(X)
    #     loss = criterion(outputs, y)
    # scaler.scale(loss).backward()
    # scaler.step(optimizer)
    # scaler.update()

    
    num_layers = 1
    hidden_size = 128
    input_size = 100
    batch_size = 32
    seq_length = 8
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
    model = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
    scaler = GradScaler()
    model.to(device)
    
    # Example data
    x = torch.randn(batch_size, seq_length, input_size).to(device)
    h0 = torch.zeros(num_layers, batch_size, hidden_size).to(device)
    c0 = torch.zeros(num_layers, batch_size, hidden_size).to(device)
    
    with autocast(device.type):
        output, (hn, cn) = model(x, (h0, c0))
