import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, random_split
import matplotlib
from matplotlib import pyplot as plt

BATCH_SIZE = 64
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 
EPOCHS = 100
LEARNING_RATE = 0.1
LEARNING_RATE_GAMMA = 0.9 # controls the learning rate decay (lr = lr * gamma)
MODEL_SAVE_PATH = './models/gesture_cnn.pth'

labels ={0:"one",1:"five",2:"first",3:"ok",4:"heartSingle",5:"yeah",6:"three",7:"four",8:"six",9:"loveyou",10:"gun",11:"thumbup",12:"nine",13:"pink"}

transforms = transforms.Compose([
    transforms.Resize(128),         #将图片短边缩放至128，长宽比保持不变：
    transforms.CenterCrop(128),     #将图片从中心切剪成3*128*128大小的图片
    transforms.ToTensor(),          #把图片进行归一化，并把数据转换成Tensor类型
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) #正则化降低模型复杂度
])


# load dataset 
path = r'dataset/handpose_x_gesture_v1'
orig_set = datasets.ImageFolder(path, transform=transforms)

# split into train and test
n = len(orig_set)  # total number of examples
n_test = int(0.1 * n)  # take ~10% for test
n_train = n - n_test
 
train_set, test_set = random_split(orig_set, [n_train, n_test])

train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=True)

# build model
class Net(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(3,32,5)
        self.conv2 = nn.Conv2d(32,64,3)
        self.dropout1 = nn.Dropout2d(0.25)
        self.dropout2 = nn.Dropout2d(0.5)
        self.fc1 = nn.Linear(64*61*61,128)
        self.fc2 = nn.Linear(128, 14)
        
    def forward(self, x):
        x = self.conv1(x)       # input size: 3*128*128 output size: 32*124*124
        x = F.relu(x)
        x = self.conv2(x)       # input size: 32*124*124  output size: 64*122*122
        x = F.relu(x)
        x = F.max_pool2d(x, 2)  # input size: 64*122*122 output size: 64*61*61
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        output = F.log_softmax(x, dim=1)
        return output
    

model = Net().to(DEVICE)
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=LEARNING_RATE_GAMMA)

test_loss_graph = []
test_acc_graph = []

# dynamic graph
train_loss_graph = []

# interactive mode
matplotlib.use("TkAgg") 
plt.ion()

fig, ax = plt.subplots()
line, = ax.plot([], [])
ax.set_xlabel("Step")
ax.set_ylabel("Loss")
ax.set_title("Training Loss")

def graph_loss(losses):
    line.set_xdata(list(range(len(losses))))
    line.set_ydata(losses)
    ax.relim()
    ax.autoscale_view()
    fig.canvas.draw()
    fig.canvas.flush_events()
# train method
def train_model(model, device, train_loader, optimizer, epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        # 部署到device上
        data, target = data.to(device), target.to(device)
        # 梯度初始化为0
        optimizer.zero_grad()
        # 训练后的结果
        output = model(data)
        # 计算损失
        loss = F.cross_entropy(output, target)
        # 反向传播
        loss.backward()
        # 参数优化
        optimizer.step()
        # 显示进度
        if batch_idx % 16 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
            train_loss_graph.append(loss.item())
            graph_loss(train_loss_graph)
            
# test method
def test_model(model, device, test_loader):
    # 模型验证
    model.eval()
    # 正确率
    correct = 0.0
    # 测试损失
    test_loss = 0.0
    # 不会进行计算梯度，也不会进行反向传播
    with torch.no_grad():
        for data, target in test_loader:
            # 部署到device上
            data, target = data.to(device), target.to(device)
            # 测试数据
            output = model(data)
            # 计算损失
            test_loss += F.cross_entropy(output, target).item()
            # 找到概率值最大的下标
            pred = output.argmax(dim=1)
            # 累计正确的值
            correct += pred.eq(target.view_as(pred)).sum().item()
        # 计算平均loss
        test_loss /= len(test_loader.dataset)
        print("Test -- Average Loss : {:.4f}, Accuracy : {:.3f}\n".format(test_loss, correct/len(test_loader.dataset)*100.0))
        test_loss_graph.append(test_loss)
        test_acc_graph.append(correct/len(test_loader.dataset)*100.0)
        
# save and load model        
def save_model(epoch):
    checkpoint = {
        'net': model.state_dict(),
        'optimizer':optimizer.state_dict(),
        'scheduler':scheduler.state_dict(),
        'epoch': epoch,
        'train_loss': train_loss_graph,
        'test_loss': test_loss_graph,
        'test_acc': test_acc_graph
    }
    if not os.path.isdir("./models"):
        os.mkdir("./models")
    torch.save(checkpoint, MODEL_SAVE_PATH)
    
def load_model():
    global train_loss_graph, test_loss_graph, test_acc_graph
    if os.path.exists(MODEL_SAVE_PATH):
        checkpoint = torch.load(MODEL_SAVE_PATH)
        model.load_state_dict(checkpoint['net'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        epoch = checkpoint['epoch']
        train_loss_graph = checkpoint['train_loss']
        test_loss_graph = checkpoint['test_loss']
        test_acc_graph = checkpoint['test_acc']
        print(f"Model loaded from epoch {epoch}")
        return epoch
    else:
        print("No saved model found, training from scratch")
        return 0

# random visible test data
def random_visible_test_data(model,device,test_loader):
    model.eval()
    with torch.no_grad():
        fig,ax=plt.subplots(1,3)
        for n, (data, target) in enumerate(test_loader):
            data, target = data.to(device), target.to(device)
            if n > 3:
                break
            output = model(data)
            pred = output.argmax(dim=1)
            ax[n-1].imshow(data[0].cpu().permute(1, 2, 0))
            ax[n-1].set_title(f"prediction: {labels[pred[0].item()]}\nLabel: {labels[target[0].item()]}")
            ax[n-1].axis('off')
        plt.tight_layout()
        plt.show()
        
# run
start_epoch = load_model()
graph_loss(train_loss_graph) 
print(f"Training using {DEVICE.type}")
for epoch in range(start_epoch+1, EPOCHS+1):
    train_model(model, DEVICE, train_loader, optimizer, epoch)
    test_model(model, DEVICE, test_loader)
    if epoch % 10 == 0:
        scheduler.step()
    save_model(epoch)
print("Final test result:")
test_model(model, DEVICE, test_loader)
random_visible_test_data(model,DEVICE,test_loader)
print("Finished training")
plt.ioff()
    
# draw loss and accuracy in one figure with two y-axes
fig, ax1 = plt.subplots(figsize=(12, 6))

# plot loss on the left y-axis
color = 'tab:red'
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Loss', color=color)
ax1.plot(test_loss_graph, color=color)
ax1.tick_params(axis='y', labelcolor=color)

# create a second y-axis for accuracy
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('Accuracy(%)', color=color)
ax2.plot(test_acc_graph, color=color)
ax2.tick_params(axis='y', labelcolor=color)

# set title
fig.tight_layout()
plt.title('Test Loss and Accuracy')

# show the combined figure
plt.show()