import torch
import torch.nn as nn
import pandas as pd
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader

data_trian = pd.read_csv('../dataset/fashion-mnist_train.csv')
data_test = pd.read_csv('../dataset/fashion-mnist_test.csv')
x_train = torch.tensor(data_trian.iloc[:, 1:].values, dtype=torch.float32).reshape(-1, 1, 28, 28)
y_train = torch.tensor(data_trian.iloc[:, 0].values, dtype=torch.int64)

print(x_train.shape, y_train.shape)

x_test = torch.tensor(data_test.iloc[:, 1:].values, dtype=torch.float32).reshape(-1, 1, 28, 28)
y_test = torch.tensor(data_test.iloc[:, 0].values, dtype=torch.int64)

print(x_test.shape, y_test.shape)

# plt.imshow(x_train[12333,0,:,:], cmap='gray')
# plt.show()
# print(y_train[12333])
train_dataset = TensorDataset(x_train, y_train)
test_dataset = TensorDataset(x_test, y_test)

model = nn.Sequential(
    nn.Conv2d(1,6,kernel_size=5,padding=2),
    nn.Sigmoid(),
    nn.AvgPool2d(kernel_size=2,stride=2),

    nn.Conv2d(6,16,kernel_size=5),
    nn.Sigmoid(),
    nn.AvgPool2d(kernel_size=2,stride=2),

    nn.Flatten(),

    nn.Linear(400,120),
    nn.Sigmoid(),

    nn.Linear(120,84),
    nn.Sigmoid(),

    nn.Linear(84,10)
)

X = torch.rand(size=(1,1,28,28),dtype=torch.float32)
for layer in model:
    X = layer(X)
    print(f"{layer.__class__.__name__:<10}:{X.shape}")

def train(model, train_dataset, test_dataset, lr, epoch_num, batch_size, device):
    def init_weights(layer):
        if type(layer) == nn.Linear or type(layer) == nn.Conv2d:
            nn.init.xavier_uniform_(layer.weight)
    model.apply(init_weights)#初始化参数
    model.to(device)
    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)
    for epoch in range(epoch_num):
        model.train()
        train_loader = DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
        loss_accumulate = 0
        train_correct_accumulate =0
        for batch_idx,(X,y) in enumerate(train_loader):
            X,y = X.to(device),y.to(device)
            pred_y = model(X)
            loss_value = loss(pred_y,y)
            optimizer.zero_grad()
            loss_value.backward()
            optimizer.step()
            #累加损失
            loss_accumulate += loss_value.item()*X.shape[0]
            pred = pred_y.argmax(dim=1)
            train_correct_accumulate += pred.eq(y).sum()
            print(f"\repoch:{epoch:0>2}[{'='*(int((batch_idx+1) / len(train_loader) * 50)):<50}]", end="") 
        this_loss = loss_accumulate / len(train_dataset)
        this_train_correct = train_correct_accumulate/len(train_dataset)

        model.eval()
        test_loader = DataLoader(test_dataset,batch_size=batch_size,shuffle=False)
        test_correct_accumulate = 0
        with torch.no_grad():
            for X,y in test_loader:
                X,y = X.to(device),y.to(device)
                pred_y = model(X)
                pred = pred_y.argmax(dim=1)
                test_correct_accumulate += pred.eq(y).sum()
        this_test_correct = test_correct_accumulate/len(test_dataset)
        print(f" loss:{this_loss:.6f}, train_acc:{this_train_correct:.6f}, test_acc:{this_test_correct:.6f}")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train(model,train_dataset,train_dataset,lr=0.9,epoch_num=20,batch_size=256,device=device)

plt.imshow(x_test[1111,0,:,:],cmap='gray')
plt.show()
print("图片标签",y_test[1111])

output = model(x_test[1111].unsqueeze(0))
y_pred = output.argmax(dim=1)        
print("预测",y_pred)