import time
from tqdm import tqdm, trange
from random import random,randint
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pathlib import Path
if __name__ == "__main__":
    # 普通的进度条
    # for i in tqdm(range(1000)):
    #     time.sleep(0.01)

    # 普通的进度条 使用别称
    # for i in trange(1000):
    #     time.sleep(0.01)

    # 在for循环外部初始化tqdm, 可以打印其他信息
    # pbar = tqdm(['a', 'b', 'c', 'd'])
    # for char in pbar:
    #     pbar.set_description(f"Processing {char}")
    #     time.sleep(1)

    # 自动进度条，先获取总长度
    # image_paths = list(Path("path").glob("*.jpg"))
    # print("Creating dataset:", "path")

    # for img_path in tqdm(image_paths):
    #     time.sleep(0.1)

    # 手动控制进度 1
    # with tqdm(total=200) as pbar:
    #     pbar.set_description(f"Processing")
    #     for i in range(20):
    #         pbar.update(10)
    #         time.sleep(.1)

    # 手动控制进度 2
    # pbar = tqdm(total=100)
    # for i in range(100):
    #     time.sleep(0.05)
    #     #每次更新进度条的长度
    #     pbar.update(1)
    # #别忘了关闭占用的资源
    # pbar.close()

    # 进度条写入
    # for i in tqdm(range(10)):
    #     time.sleep(0.1)
    #     if not (i % 3):
    #         tqdm.write(f"Done Task {i}")

    # 通过set_description和set_postfix方法设置进度条显示信息：
    # with tqdm(range(10)) as t:
    #     for i in t:
    #         #设置进度条左边显示的信息
    #         t.set_description(f"GEN {i}")
    #         #设置进度条右边显示的信息
    #         t.set_postfix(loss=random(),gen=randint(1,999),str="h",lst=[1,2])
    #         time.sleep(0.1)


    class CNN(nn.Module):
        def __init__(self,in_channels=1,num_classes=10):
            super().__init__()
            self.conv1 = nn.Conv2d(in_channels=1,out_channels=8,kernel_size=(3,3),stride=(1,1),padding=(1,1))
            self.pool = nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
            self.conv2 = nn.Conv2d(in_channels=8,out_channels=16,kernel_size=(3,3),stride=(1,1),padding=(1,1))
            self.fc1 = nn.Linear(16*7*7,num_classes)
        def forward(self,x):
            x = F.relu(self.conv1(x))
            x = self.pool(x)
            x = F.relu(self.conv2(x))
            x = self.pool(x)
            x = x.reshape(x.shape[0],-1)
            x = self.fc1(x)
            return x
    device = torch.device("cuda"if torch.cuda.is_available() else "cpu")
    batch_size = 64
    learning_rate=0.001
    train_dataset = datasets.MNIST(root="dataset/",train=True,transform=transforms.ToTensor(),download=True)
    train_loader = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
    test_dataset = datasets.MNIST(root="dataset/",train=False,transform=transforms.ToTensor(),download=True)
    test_loader = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
    # Initialize network
    model = CNN().to(device)
    # Loss and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(),lr=learning_rate)
    # Train Network

    for epoch in range(5):
        # for data,targets in tqdm(train_loader,leave=False) # 进度显示在一行
        # for index,(data,targets)in tqdm(enumerate(train_loader),total=len(train_loader),leave = True): # 获取索引
        losses = []
        accuracy = []
        loop = tqdm((train_loader), total = len(train_loader))
        for data,targets in loop:
            # Get data to cuda if possible
            data = data.to(device=device)
            targets = targets.to(device=device)
            # forward
            scores = model(data)
            loss = criterion(scores,targets)
            losses.append(loss)
            # backward
            optimizer.zero_grad()
            loss.backward()
            _,predictions = scores.max(1)
            num_correct = (predictions == targets).sum()
            running_train_acc = float(num_correct) / float(data.shape[0])
            accuracy.append(running_train_acc)
            # gardient descent or adam step
            optimizer.step()
            loop.set_description(f'Epoch [{epoch}/5]')
            loop.set_postfix(loss = loss.item(),acc = running_train_acc)
