import torch
from torch import nn
from torch.nn import functional as F
from PIL import Image
import numpy as np
import json
import pandas as pd
from tqdm import tqdm
from torchvision import transforms
import ttach as tta


class ResidualBlock(nn.Module):  # @save
    def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):
        super().__init__()
        self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)
        self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1, stride=1)
        if use_1x1conv:
            self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)
        else:
            self.conv3 = None
        self.bn1 = nn.BatchNorm2d(num_channels)
        self.bn2 = nn.BatchNorm2d(num_channels)

    def forward(self, X):
        Y = F.relu(self.bn1(self.conv1(X)))
        Y = self.bn2(self.conv2(Y))
        if self.conv3:
            X = self.conv3(X)
        Y += X
        return F.relu(Y)


class ResNet18(nn.Module):
    def __init__(self, num_class):
        super(ResNet18, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2d(64)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = nn.Sequential(ResidualBlock(64, 64, False),
                                    ResidualBlock(64, 64, False))

        self.layer2 = nn.Sequential(ResidualBlock(64, 128, True, 2),
                                    ResidualBlock(128, 128, False))

        self.layer3 = nn.Sequential(ResidualBlock(128, 256, True, 2),
                                    ResidualBlock(256, 256, False))

        self.layer4 = nn.Sequential(ResidualBlock(256, 512, True, 2),
                                    ResidualBlock(512, 512, False))

        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))

        self.fc = nn.Linear(512, num_class)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.maxpool(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.avgpool(x)
        x = x.reshape(x.size(0), -1)  # 将多维矩阵展平为2维
        x = self.fc(x)

        return x


# 导入模型，在CPU中运行
model = torch.load('../models/model-final-full.pth', map_location=torch.device('cpu'))
# 处理test.csv测试集
test_data = pd.read_csv('../data/test.csv')
samples = test_data['image']
samples = [Image.open('../data/' + str(x)).resize((224, 224)) for x in tqdm(samples)]
features = [np.array(x).astype(np.float32) / 255.0 for x in tqdm(samples)]
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
features = [torch.from_numpy(x) for x in tqdm(features)]

# 导入num_to_class映射字典
with open('../data/train_files/num_to_class.txt', 'r') as f:
    num_to_class = json.loads(f.read())

model.eval()
tta_model = tta.ClassificationTTAWrapper(model, tta.aliases.d4_transform(), merge_mode='mean')

device = 'cpu'
# 初始化存储预测的列表。
predictions = []
# 逐批迭代测试集.
for batch in tqdm(features):
    x = batch
    x = torch.unsqueeze(x, 0)
    x = x.permute(0, 3, 1, 2)
    x = normalize(x)
    with torch.no_grad():
        logits = tta_model(x.to(device))  # 预测

    # 以最大的logit类为预测，并记录下来
    predictions.extend(logits.argmax(dim=-1).cpu().numpy().tolist())

preds = []
for i in predictions:
    preds.append(num_to_class[str(i)])  # 将预测的数字类别还原为类别名称

test_data['label'] = pd.Series(preds)  # 将预测的类型名整理成一维数组
submission = pd.concat([test_data['image'], test_data['label']], axis=1)
submission.to_csv('submission-new2-full.csv', index=False)
