# 使用torch的 Dataset dataloader类加载数据完成
import torch.utils.data as data
import numpy as np
import torch
import torch.nn as nn

def train_data():
  x = np.random.randn(5)
  return x,np.argmax(x)

def gen_train(size):
  X = []
  Y = []
  for i in range(size):
    x,y=train_data()
    X.append(x)
    Y.append([y])
  Y = torch.squeeze(torch.LongTensor(Y))
  return torch.FloatTensor(X),Y

class MyDataSet(data.Dataset):
    def __init__(self,size):
        super(MyDataSet, self).__init__()
        self.features,self.labels = gen_train(size)
    def __getitem__(self, index):
        return self.features[index],self.labels[index]
    def __len__(self):
        return len(self.labels)

# 创建一个模型
class MyModel(nn.Module):
    def __init__(self,input_size,hidden_size):
        super(MyModel, self).__init__()
        self.linear1 = nn.Linear(input_size,hidden_size)
        self.linear2 = nn.Linear(hidden_size,input_size)
        self.loss = nn.functional.cross_entropy

    def forward(self,x,y_true=None):
        x = self.linear1(x)
        x = self.linear2(x)
        if y_true is not None:
            return self.loss(x,y_true)
        else:
            return x
def evaluate(model):
    model.eval() # 开始测试模式
    eval_size = 100
    succ_count = 0
    set1= data.DataLoader(dataset=MyDataSet(eval_size), batch_size=100, shuffle=False).dataset
    features=set1.features
    labels=set1.labels
    with torch.no_grad():
        for x_p,y_t in zip(features,labels):
            y_p = np.argmax(model(x_p))
            if y_t == y_p:
                succ_count += 1
    print("正确率%f" % (succ_count/eval_size))



def main():
    data_size = 1000
    batch_size = 20
    epochs = 20
    lr = 1e-3
    model = MyModel(5,10)
    adam = torch.optim.Adam(model.parameters(),lr=lr)
    for epoch in range(epochs):
        model.train()
        loader = data.DataLoader(dataset=MyDataSet(data_size),batch_size = batch_size,num_workers=0)
        watchLoss = []
        for feachers,labels in loader:
            loss = model(feachers,labels)
            loss.backward() # 计算梯度
            adam.step() # 梯度下降
            adam.zero_grad() # 梯度归零
            watchLoss.append(loss.item())
        # 跑完一轮，看看本轮结束的准确率还有loss
        print("第%d轮，loss%f" %(epoch,np.mean(watchLoss)))
        evaluate(model)


if __name__ == '__main__':
    main()
