import torch
from torch import nn
<<<<<<< HEAD
import torchsummary as summary 
import torchvision
import os
import sys
from tqdm import tqdm


class BasicBlock(nn.Module):
    """
    resnet block
    """
    expansion = 1

    def __init__(self, in_channel, out_channel, stride=1, downsample=None):

        super(BasicBlock, self).__init__()

        self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channel)

        self.relu = nn.ReLU()

        self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channel)

        self.downsample = downsample

=======
from torch.nn import functional
import torchsummary as summary 
import torchvision


class RestNetBasicBlock(nn.Module):
    """
    resnet block
    """

    def __init__(self, ch_in, ch_out, stride=1):
        """
        :param ch_in:
        :param ch_out:
        """
        super(RestNetBasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm2d(ch_out)
        self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(ch_out)
>>>>>>> 29c0aa2d935bff831379c42b8734840e8ba46753

    def forward(self, x):
        """
        :param x: [b, ch, h, w]
        :return:
        """
<<<<<<< HEAD
        identity = x
        if self.downsample is not None:
            identity = self.downsample(x)

        output = self.conv1(x)
        output = self.bn1(output)
        output = self.relu(output)

        output = self.conv2(output)
        output = self.bn2(output)
        output = self.relu(output + identity)
        return output


class Bottleneck(nn.Module):

    expansion = 4

    def __init__(self, in_channel, out_channel, stride=1, downsample=None):

        super(Bottleneck, self).__init__()

        self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=1, stride=stride, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channel)

        self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channel)

        self.conv3 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel * self.expansion, kernel_size=1, stride=1, bias=False)
        self.bn3 = nn.BatchNorm2d(out_channel * self.expansion)

        self.relu = nn.ReLU(inplace=True)

        self.downsample = downsample

    def forward(self, x):
        identity = x
        if self.downsample is not None:
            identity = self.downsample(x)

        output = self.conv1(x)
        output = self.bn1(output)
        output = self.relu(output)

        output = self.conv2(output)
        output = self.bn2(output)
        output = self.relu(output)

        output = self.conv3(output)
        output = self.bn3(output)
        output = self.relu(identity+output)
        return output


class ResNet(nn.Module):

    def __init__(self, block, block_num, num_classes=1000, include_top=True):

        super(ResNet, self).__init__()
        self.include_top = include_top
        self.in_channel = 64  # maxpooling之后得到的特征矩阵的深度

        self.conv1 = nn.Conv2d(in_channels=3, out_channels=self.in_channel, kernel_size=7, stride=2, padding=3,  bias=False)
        self.bn1 = nn.BatchNorm2d(self.in_channel)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self.__make_layer(block, 64, block_num[0])
        self.layer2 = self.__make_layer(block, 128, block_num[1], stride=2)
        self.layer3 = self.__make_layer(block, 256, block_num[2], stride=2)
        self.layer4 = self.__make_layer(block, 512, block_num[3], stride=2)

        if self.include_top:
            self.avgpool = nn.AdaptiveAvgPool2d(1, 1)
            self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():  # 初始化模型参数
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        if self.include_top:
            x = self.avgpool(x)
            x = torch.flatten(x, 1)
            x = self.fc(x)
        return x

    def __make_layer(self, block, channel, block_num, stride=1):
        downsample = None
        if stride != 1 or self.in_channel != channel * block.expansion:  # 判断是否进行下采样
            downsample = nn.Sequential(
                nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(channel * block.expansion)
            )
        layers = []
        layers.append(block(self.in_channel, channel, downsample=downsample, stride=stride))  # 添加第一个残差基础块
        self.in_channel = channel * block.expansion
        # 对于18，34层的网络，经过第一个残差块后，输出的特征矩阵通道数与第一层的卷积层个数一样
        # 对于50，101，152层的网络，经过第一个残差块后，输出的特征矩阵通道数时第一个卷积层的4倍,因此要将后续残差块的输入特征矩阵通道数调整过来
        for _ in range(1, block_num):  # 添加后续的基础残差模块,后续的基础模块都不需要进行下采样操作
            layers.append(block(self.in_channel, channel))

        return nn.Sequential(*layers)


def RestNet18(num_classes=1000, include_top=True):
    return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes, include_top=include_top)


def RestNet34(num_classes=1000, include_top=True):
    return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)


def RestNet50(num_classes=1000, include_top=True):
    return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)


def RestNet101(num_classes=1000, include_top=True):
    return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top)


def RestNet152(num_classes=1000, include_top=True):
    return ResNet(Bottleneck, [3, 8, 26, 3], num_classes=num_classes, include_top=include_top)


def evaluate(model,loader):   #计算每次训练后的准确率
    correct = 0
    total = len(loader.dataset)
    for x,y in loader:
        logits = model(x)
        pred = logits.argmax(dim=1)     #得到logits中分类值（要么是[1,0]要么是[0,1]表示分成两个类别）
        correct += torch.eq(pred,y).sum().float().item()        #用logits和标签label想比较得到分类正确的个数
    return correct/total


def train( 
        train_dataset_dir,
        val_dataset_dir,
        model_params_path,
        model_weight_path=None,
        batch_size=16,
        epoch_total=10,
        classes_num=2,
        learning_rate=0.0001
    ):
    # set train dataset loader
    assert os.path.exists(train_dataset_dir) , f"train dataset folder {train_dataset_dir} does not exist."
    assert os.listdir(train_dataset_dir)!=0 , "train image is not exist."
    train_transform = torchvision.transforms.Compose([
        torchvision.transforms.RandomResizedCrop(224),
        torchvision.transforms.RandomHorizontalFlip(),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    train_dataset=torchvision.datasets.ImageFolder(root=train_dataset_dir,transform=train_transform)
    train_loader=torch.utils.data.DataLoader(
        dataset=train_dataset,batch_size=batch_size,shuffle=True,num_workers=0)

    # set val dataset loader
    assert os.path.exists(val_dataset_dir) , f"val dataset folder {val_dataset_dir} does not exist."
    assert os.listdir(val_dataset_dir)!=0 , f"val image is not exist."
    val_transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize(256),
        torchvision.transforms.RandomResizedCrop(224),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    val_dataset=torchvision.datasets.ImageFolder(
        root=val_dataset_dir,transform=val_transform)
    val_loader=torch.utils.data.DataLoader(
        dataset=val_dataset,batch_size=batch_size,shuffle=True,num_workers=0)

    # set model,loss and criterion
    model = RestNet18()
    if (model_weight_path is not None) or os.path.exists(model_weight_path):
         # 为了节省时间使用了迁移学习的方法
        model.load_state_dict(torch.load(model_weight_path, map_location='cpu'))  
    else:
        in_channel = model.fc.in_features
        model.fc = nn.Linear(in_channel, classes_num)
        device = torch.device('cpu')  # 'cuda'
        model.to(device)
    
    loss_function  = nn.CrossEntropyLoss()
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.Adam(params, lr=learning_rate)

    best_acc, best_epoch = 0,0
    train_list,val_List = [],[] 
    # start train
    step_total=len(train_loader)
    val_num = len(val_loader)
    for epoch in range(epoch_total):
        print(f'============第{epoch + 1}轮============')
        model.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader, file=sys.stdout)
        for step, data in enumerate(train_loader,start=0):
            images, labels = data
            logits = model(images)
            loss = loss_function(logits, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
            train_bar.desc = f"train epoch[{epoch + 1}/{epoch_total}] loss:{loss}"

        train_acc = evaluate(model,train_loader)
        train_list.append(train_acc)

        # validate
        model.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(val_loader, file=sys.stdout)
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = model(val_images)
                # loss = loss_function(outputs, test_labels)
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels).sum().item()
                val_bar.desc = f"valid epoch[{epoch + 1}/{epoch_total}]"

        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' % (epoch + 1, running_loss / step_total, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(model.state_dict(), model_params_path)

    return


def predict(predict_dir):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    predict_transform = torchvision.transforms.Compose([
        torchvision.transforms.RandomResizedCrop(224),
        torchvision.transforms.RandomHorizontalFlip(),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    image_types = [".tif", ".png", ".jpg"]
    for name in os.listdir(predict_dir):
        if os.path.splitext(name)[1] in image_types:
            image_path = os.path.join()
    return


def main():
    train_dataset_dir = r""
    val_dataset_dir = r""
    model_params_path = r""
    train( 
        train_dataset_dir,
        val_dataset_dir,
        model_params_path,
        model_weight_path=None,
        batch_size=16,
        epoch_total=10,
        classes_num=10,
        learning_rate=0.0001
    )
    return
=======
        output = self.conv1(x)
        output = self.bn1(output)
        output = functional.relu(output)
        output = self.conv2(output)
        output = self.bn2(output)
        output = functional.relu(x+output)
        return output


class RestNetDownBlock(nn.Module):

    def __init__(self, in_channels, out_channels, stride):
        super(RestNetDownBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride[0], padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride[1], padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.extra = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride[0], padding=0),
            nn.BatchNorm2d(out_channels)
        )

    def forward(self, x):
        extra_x = self.extra(x)
        output = self.conv1(x)
        output = self.bn1(output)
        output = functional.relu(output)
        output = self.conv2(output)
        output = self.bn2(output)
        return functional.relu(extra_x + output)


class RestNet18(nn.Module):
    def __init__(self):
        super(RestNet18, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2d(64)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = nn.Sequential(
            RestNetBasicBlock(64, 64, 1), RestNetBasicBlock(64, 64, 1))

        self.layer2 = nn.Sequential(
            RestNetDownBlock(64, 128, [2, 1]), RestNetBasicBlock(128, 128, 1))

        self.layer3 = nn.Sequential(
            RestNetDownBlock(128, 256, [2, 1]), RestNetBasicBlock(256, 256, 1))

        self.layer4 = nn.Sequential(
            RestNetDownBlock(256, 512, [2, 1]), RestNetBasicBlock(512, 512, 1))

        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))

        self.fc = nn.Linear(512, 10)

    def forward(self, x):
        out = self.conv1(x)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = self.avgpool(out)
        out = out.reshape(x.shape[0], -1)
        out = self.fc(out)
        return out



def train( 
        model_params_path,
        train_dataset_dir,
        val_dataset_dir=None,
        batch_size=64,
        epoch_total=10,
        classes_num=10
    ):
    # preconditioning dataset 
    transform = torchvision.transforms.Compose([
        torchvision.transforms.RandomResizedCrop(224),
        torchvision.transforms.RandomHorizontalFlip(),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))
    ])

    train_dataset=torchvision.datasets.ImageFolder(
        root=train_dataset_dir,transform=transform
    )
    
    # set dataset loader
    train_loader=torch.utils.data.DataLoader(
        dataset=train_dataset,batch_size=batch_size,shuffle=True
    )

    # set model
    # device = torch.device('cuda')
    # net = RestNet18().to(device)
    model = RestNet18()
    summary.summary(model, input_size=(3,224,224),batch_size=batch_size,device="cpu")

    # set loss and criterion
    criteon = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)

    # start train
    step_total=len(train_loader)
    for epoch in range(epoch_total):
        # 训练
        net.train()
        for batch_idx, (x, y) in enumerate(train_loader,start=0):

            # x: [b, 1, 28, 28], y: [512]
            # [b, 1, 28, 28] => [b, 10]
            x, y = x.to(device), y.to(device)
            out = net(x)
            # [b, 10]
            y_onehot = one_hot(y)
            # loss = mse(out, y_onehot)
            loss = functional.mse_loss(out, y_onehot).to(device)
            # 先给梯度清0
            optimizer.zero_grad()
            loss.backward()
            # w' = w - lr*grad
            optimizer.step()

            train_loss.append(loss.item())

            if batch_idx % 10 == 0:
                print(epoch, batch_idx, loss.item())

    return

def main():
    blk = ResBlk(1, 128, stride=4)
    tmp = torch.randn(512, 1, 28, 28)
    out = blk(tmp)
    print('blk', out.shape)

    model = ResNet18()
    x = torch.randn(512, 1, 28, 28)
    out = model(x)
    print('resnet', out.shape)
    print(model)

>>>>>>> 29c0aa2d935bff831379c42b8734840e8ba46753

if __name__ == '__main__':
    main()

