from torchvision.models import resnet18
import torch
import torch.nn as nn
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.utils.data import Dataset

class LoadData(Dataset):
    def __init__(self, txt_path, train_flag):
        self.imgs_info = self.get_images(txt_path)
        self.train_flag = train_flag
        
        self.train_tf = transforms.Compose([
                # 随机对图像裁剪出面积为原图像面积0.08~1倍、且高和宽之比在3/4~4/3的图像
                # 再放缩为高和宽均为224像素的新图像
                transforms.RandomResizedCrop(224, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0)),
                transforms.RandomHorizontalFlip(),
                # 随机变化亮度、对比度和饱和度
                transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
                transforms.ToTensor(),
                # 对图像的每个通道做标准化
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])
        
        self.val_tf = transforms.Compose([
                transforms.Resize(256),
                # 将图像中央的高和宽均为224的正方形区域裁剪出来
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])

    def get_images(self, txt_path):
        with open(txt_path, 'r', encoding='utf-8') as f:
            imgs_info = f.readlines()
            imgs_info = list(map(lambda x:x.strip().split('\t'), imgs_info))
        return imgs_info      
    
    def padding_black(self, img):
        w, h = img.size
        scale = 224. /max(w, h)
        img_fg = img.resize([int(x) for x in [w*scale, h*scale]]) 
        size_fg = img_fg.size
        size_bg = 224
        img_bg = Image.new("RGB", (size_bg, size_bg))
        img_bg.paste(img_fg, ((size_bg - size_fg[0]) // 2,
                              (size_bg - size_fg[1]) // 2))
        img = img_bg
        return img
    
    def __getitem__(self, index):
        img_path, label = self.imgs_info[index]
        img = Image.open(img_path)
        img = img.convert('RGB')
        img = self.padding_black(img)
        if self.train_flag:
            img = self.train_tf(img)
        else:
            img = self.val_tf(img)
        label = int(label)
        
        return img, label
    
    def __len__(self):
        return len(self.imgs_info)


# 定义AlexNet网络
class AlexNet(nn.Module):
    def __init__(self, num_classes = 2, init_weights=False):
        super(AlexNet, self).__init__()
        # 用nn.Sequential()将网络打包成一个模块，精简代码
        self.features = nn.Sequential(   # 卷积层提取图像特征
            nn.Conv2d(3, 48, kernel_size=11, stride=4, padding=2),  # input[3, 224, 224]  output[48, 55, 55]
            nn.ReLU(inplace=True), 									# 直接修改覆盖原值，节省运算内存
            nn.MaxPool2d(kernel_size=3, stride=2),                  # output[48, 27, 27]
            nn.Conv2d(48, 128, kernel_size=5, padding=2),           # output[128, 27, 27]
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),                  # output[128, 13, 13]
            nn.Conv2d(128, 192, kernel_size=3, padding=1),          # output[192, 13, 13]
            nn.ReLU(inplace=True),
            nn.Conv2d(192, 192, kernel_size=3, padding=1),          # output[192, 13, 13]
            nn.ReLU(inplace=True),
            nn.Conv2d(192, 128, kernel_size=3, padding=1),          # output[128, 13, 13]
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),                  # output[128, 6, 6]
        )
        self.classifier = nn.Sequential(   # 全连接层对图像分类
            nn.Dropout(p=0.5),			   # Dropout 随机失活神经元，默认比例为0.5
            nn.Linear(128 * 6 * 6, 2048),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(2048, 2048),
            nn.ReLU(inplace=True),
            nn.Linear(2048, num_classes),
        )
        if init_weights:
            self._initialize_weights()
            
	# 前向传播过程
    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, start_dim=1)	# 展平后再传入全连接层
        x = self.classifier(x)
        return x
        
	# 网络权重初始化，实际上 pytorch 在构建网络时会自动初始化权重
    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):                            # 若是卷积层
                nn.init.kaiming_normal_(m.weight, mode='fan_out',   # 用（何）kaiming_normal_法初始化权重
                                        nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)                    # 初始化偏重为0
            elif isinstance(m, nn.Linear):            # 若是全连接层
                nn.init.normal_(m.weight, 0, 0.01)    # 正态分布初始化
                nn.init.constant_(m.bias, 0)          # 初始化偏重为0

# 定义训练函数
def train(dataload, model, loss_fn, optimizer):
    size = len(dataload.dataset)
    
    # 从数据加载器中读取batch，X（图片数据）,Y（图片真实标签）
    for batch, (X, y) in enumerate(dataload):
        # 将数据存到显卡
        X, y = X.cuda(), y.cuda()
        
        # 得到预测结果
        pred = model(X)
        
        # 计算损失函数
        loss = loss_fn(pred, y)
        
        # 反向传播，更新模型参数
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        # 训练100次输出一次当前信息
        if batch%100 == 0:
            loss, current = loss.item(), batch*len(X)
            print(f"loss:{loss:>7f}  [{current:>5d}/{size:>5d}]")

def test(dataloader, model):
    size = len(dataloader.dataset)
    print("size = ",size)
    
    # 将模型转为验证模式
    model.eval()
    
    # 初始化，统计每次的误差
    test_loss, correct = 0,0
    
    # 测试时模型参数不用更新，所以no_grad
    # 非训练，推理时用
    with torch.no_grad():
        # 加载数据加载器，得到X,y
        for X, y in dataloader:
            # 将数据转到GPU
            X, y = X.cuda(), y.cuda()
            
            # 预测
            pred = model(X)
            
            # 计算预测值与真实值差距
            test_loss += loss_fn(pred, y).item()
            
            # 统计正确的个数
            correct += (pred.argmax(1)==y).type(torch.float).sum().item()
    test_loss /= size
    correct /= size
    print("correct = ", correct)
    print(f"Test Error:\n Accuracy: {(100*correct):>0.1f}, Avg loss: {test_loss:>8f} \n")


if __name__ == "__main__":
    
    batch_size = 16
    learning_rate = 0.1    # 初始学习率
    
    train_data = LoadData(r"F:\SCIENCE_AND_MATH\Machine Learning\MCM\2021C\code\train.txt", True)
    test_data = LoadData(r"F:\SCIENCE_AND_MATH\Machine Learning\MCM\2021C\code\test.txt", False)
    
    print("nums of dataset = \n", len(train_data))
    
    train_loader = torch.utils.data.DataLoader(
        dataset=train_data,
        batch_size=batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True
    )
    
    test_loader = torch.utils.data.DataLoader(
        dataset=test_data,
        batch_size=batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True
    ) 
    
    # for image, label in train_loader:

    #     print(image)
    #     print(label)
    
    # 如果显卡可用，使用显卡训练
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using {device} device")
    
    # 调用刚刚定义的模型
    model = AlexNet().to(device)
    print(model)
    
    # 定义损失函数
    loss_fn = nn.CrossEntropyLoss()

    # 定义优化器，用来训练的时候优化模型参数，交叉熵
    optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
    
    # 一共训练20次
    epochs = 20
    for t in range(epochs):
        print(f"Epoch {t+1}\n---------------------------")
        train(train_loader, model, loss_fn, optimizer)
        test(test_loader, model)
        
    print("Done!")
        
    # 保存训练好的模型