import torch
import numpy as np
from torch import nn
import torch.nn.functional as f
import math
device=torch.device("cuda" if torch.cuda.is_available()else "cpu")
print(torch.cuda.is_available())
X=torch.tensor([[1., 2.],
                [2., 1.],
                [3., 5.],
                [5., 3.],
                [1., -1.],
                [-1., 1.],
                [-2., -3.],
                [-3., -2.]])
X=X.to(device) 
Y=torch.tensor([0,0,1,1,2,2,2,2])
Y=Y.to(device)
print(X.shape)
class MultiClassDNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1=nn.Linear(2,7)  
        self.fc2=nn.Linear(7,2)
        self.fc3=nn.Linear(2,3)

        for m in self.modules():
            if isinstance(m,nn.Linear):#检查是否为全连接
                nn.init.kaiming_uniform_(m.weight,a=math.sqrt(5))
                print("m.weight=",m.weight)
                if m.bias is not None:
                    fan_in,_=nn.init._calculate_fan_in_and_fan_out(m.weight)
                    bound=1/math.sqrt(fan_in)
                    nn.init.uniform_(m.bias,-bound,bound)

    def forward(self,x):
        x=f.relu(self.fc1(x))
        x=f.relu(self.fc2(x))
        x=self.fc3(x)
        return x
model=MultiClassDNN().to(device)
print("fc1.weight shape:", model.fc1.weight.shape) 
print(model)
criterion=nn.CrossEntropyLoss()
optimizer=torch.optim.Adam(model.parameters(),lr=0.01)

for epoch in range(5):
    output=model(X)
    loss=criterion(output,Y)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    if epoch%200==0:
        print(f"EPOCH{epoch},Loss:{loss.item():.4f}")
        print("model.fc1.weight=",model.fc1.weight)
        print("model.fc1.bias=",model.fc1.bias)
        print("model.fc2.weight=",model.fc2.weight)
        print("model.fc2.bias=",model.fc2.bias)
        print("model.fc3.weight=",model.fc3.weight)
        print("model.fc3.bias=",model.fc3.bias)
        print("X=",X)
        print("Y=",Y)
        print("output=",output)
        print("pred_classes=",output.argmax(dim=1))
      
with torch.no_grad():
    logits=model(X)
    pred_classes=logits.argmax(dim=1)
    print("Predicted",pred_classes)
print(model.fc1.weight.device)#模型权重所在设备
print(X.device)#显示输入数据所在设备
    
new_input = torch.tensor([[-15.0, -5.0]])  
new_input = new_input.to(device) 
output = model(new_input)
predicted_class = output.argmax(dim=1)
print(f"Predicted class for new input is {predicted_class.tolist()}")
