import torch
import torch.nn as nn

class CT_encoder(nn.Module):
    def __init__(self, init_weights=True) -> None:
        super().__init__()
        self.features = nn.Sequential(
            # Block 1
            nn.Conv2d(3, 64, 3, stride = 2, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 128, 3, stride = 2, padding=1),
            nn.ReLU(inplace=True),
            # nn.MaxPool2d(2, stride=2),
            # Block 2
            nn.Conv2d(128, 256, 3, stride = 2,padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 512, 3, stride = 2,padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 256, 3, stride = 2,padding=1),
            nn.ReLU(inplace=True),
        )
        self.avgpool = nn.AdaptiveAvgPool2d((13, 13))
        if init_weights:
            self._initialize_weights()

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.features(x)
        # print("x = ", x.shape)
        # x = self.avgpool(x)
        return x
    
class ENDO_encoder(nn.Module):
    def __init__(self, init_weights=True) -> None:
        super().__init__()
        self.features = nn.Sequential(
            # Block 1
            nn.Conv2d(3, 64, 3, stride = 2, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 128, 3, stride = 2, padding=1),
            nn.ReLU(inplace=True),
            # nn.MaxPool2d(2, stride=2),
            # Block 2
            nn.Conv2d(128, 256, 3, stride = 2,padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 512, 3, stride = 2,padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 256, 3, stride = 2,padding=1),
            nn.ReLU(inplace=True),
        )
        self.avgpool = nn.AdaptiveAvgPool2d((13, 13))
        if init_weights:
            self._initialize_weights()

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)
    def forward(self, x):
        x = self.features(x)
        # x = self.avgpool(x)
        return x
    
class DOUBLENET(nn.Module):
    def __init__(self, num_classes, CT_encoder, ENDO_encoder, init_weights=True,) -> None:
        super().__init__()
        self.ct_encoder = CT_encoder
        self.endo_encoder = ENDO_encoder

        self.decoder = nn.Sequential(
            nn.Conv2d(512, 256, 3, stride = 1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, stride = 1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 128, 3, stride = 1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, 3, stride = 1, padding=1),
            nn.ReLU(inplace=True),

        )
        self.classifier = nn.Sequential(
            nn.Linear(128 * 18 * 18, 1024),
            nn.Linear(1024, 512),
            nn.Linear(512, num_classes),
            nn.Dropout()

        )
        if init_weights:
            self._initialize_weights()

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        ct_img = x["ct_img"]
        endo_img = x["endo_img"]
        x1 = self.ct_encoder(ct_img) #B, 256, 13, 13
        x2 = self.endo_encoder(endo_img) #B, 256, 13, 13
        x = torch.cat([x1, x2], dim=1) #B, 512, 13, 13
        x = self.decoder(x)
        
        x = x.view(x.size(0), -1)
        x = self.classifier(x)
        return x
    
def doulenet(num_classes):
    ct_encoder = CT_encoder()
    endo_encoder = ENDO_encoder()
    return DOUBLENET(num_classes = num_classes,
                     CT_encoder = ct_encoder,
                     ENDO_encoder= endo_encoder)

from torch.utils.data import dataset
from torchvision.transforms import transforms
import sys
sys.path.append("utils")
from preproc import static_resize
from lr import adjust_learning_rate
from PIL import Image
import os 

class CustomDataset(dataset.Dataset):
    def __init__(self, image_path, label_path, train=True) -> None:
        super().__init__()
        self.train = train
        self.image_path = image_path
        self.img_infos = self.get_info(label_path)
        self.train_transform = transforms.Compose([
            static_resize([640, 640]),
            transforms.RandomResizedCrop([576, 576]),
            transforms.RandomVerticalFlip(p=0.5),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomAffine(degrees=30,scale=(0.8,1.5)),
            transforms.ToTensor(),
            transforms.Normalize([0.47994414, 0.42569083, 0.40634888], 
                                 [0.2608571, 0.23259424, 0.22677247]),
            
        ])

        self.test_transform = transforms.Compose([
            static_resize([576, 576]),
            transforms.ToTensor(),
            transforms.Normalize([0.47994414, 0.42569083, 0.40634888], 
                                 [0.2608571, 0.23259424, 0.22677247])     
        ])
    def get_info(self, label_path):
        img_info = []
        with open(label_path, "r") as f:
            for line in f.readlines():
                line = line.strip().split(" ")
                img_info.append({
                    "ct_img": line[0] + ".jpg",
                    "endo_img": line[0]+ "enj.jpg",
                    "label": line[1]
                })
        return img_info
    
    def __len__(self):
        return len(self.img_infos)
    
    def __getitem__(self, index):
        img_info = self.img_infos[index]
        img = {}
        ct_img = Image.open(os.path.join(self.image_path, img_info["ct_img"]))
        endo_img = Image.open(os.path.join(self.image_path, img_info["endo_img"]))
        if self.train:
            img["ct_img"] = self.train_transform(ct_img)
            img["endo_img"] = self.train_transform(endo_img)
        else:
            img["ct_img"] = self.test_transform(ct_img)
            img["endo_img"] = self.test_transform(endo_img)
        label = int(img_info["label"])
        return img, label
    
    def __len__(self):
        return len(self.img_infos)


from  torch.utils.data import DataLoader
from FocalLoss import FocalLoss
from torch.utils.tensorboard import SummaryWriter #tensorboard 可视化
import time
import os
from sklearn.metrics import confusion_matrix
import numpy as np

train_dataset = CustomDataset(image_path = "/home/cheng/data/workspace/part-time/ear/doublenet/data/data/images", 
                              label_path = "/home/cheng/data/workspace/part-time/ear/doublenet/data/data/train.txt", train=True)
val_dataset = CustomDataset(image_path = "/home/cheng/data/workspace/part-time/ear/doublenet/data/data/images", 
                              label_path = "/home/cheng/data/workspace/part-time/ear/doublenet/data/data/val.txt", train=False)
batchsize = 8
train_loader = DataLoader(train_dataset, batch_size=batchsize, shuffle=True, num_workers=4)
val_loader = DataLoader(val_dataset, batch_size=batchsize, shuffle=False, num_workers=4)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = doulenet(3)
model.to(device)

criterion = FocalLoss(class_num=3, alpha=torch.tensor([0.35, 0.4, 0.25]), gamma=2)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
current_time = time.localtime()
savefolder = os.path.join("output/" + "for_test", time.strftime("%Y_%m_%d_%H_%M_%S", current_time))
os.makedirs(savefolder, exist_ok = True)
tblogger = SummaryWriter(savefolder)

start_epoch = 0
Accuracy = []
Loss = []
Val_Accuracy = []
BEST_VAL_ACC = 0.
# 训练
since = time.time()
print("start_epoch: ", start_epoch)
max_epoch = 200
for epoch in range(start_epoch, max_epoch):
    train_loss = 0.
    train_accuracy = 0.
    run_accuracy = 0.
    run_loss =0.
    total = 0.
    model.train()
    adjust_learning_rate(optimizer, epoch, max_epoch, warmup=True)
    for i, data in enumerate(train_loader):
        inputs, labels = data
        inputs["ct_img"] = inputs["ct_img"].to(device)
        inputs["endo_img"] = inputs["endo_img"].to(device)
        labels = labels.to(device)

        optimizer.zero_grad()
        outs = model(inputs)
        loss = criterion(outs, labels)
        loss.backward()
        optimizer.step()
        # 输出状态
        total += labels.size(0)
        run_loss = loss.item()
        _, prediction = torch.max(outs, 1)
        # print("prediction====", prediction)
        run_accuracy = (prediction == labels).sum().item()
        if i % 20 == 19:
            print('|epoch: {}|, |iter: {}|, |lr: {:.4f}| train accuracy: {:.4f}% |loss: {:.4f}|'.format(epoch + 1,
                    i + 1, optimizer.param_groups[0]['lr'], 100*run_accuracy/labels.size(0), run_loss/labels.size(0)))
        train_accuracy += run_accuracy
        train_loss += run_loss
        # run_accuracy, run_loss = 0., 0.
    Loss.append(train_loss / total)
    tblogger.add_scalar("train_loss", train_loss / total, epoch + 1) #tensorboard 可视化
    Accuracy.append(100 * train_accuracy/total)
    tblogger.add_scalar("train_accuracy", 100 * train_accuracy/total, epoch + 1) #tensorboard 可视化
    tblogger.add_scalar("lr", optimizer.param_groups[0]['lr'], epoch + 1) #tensorboard 可视化

    acc = 0.
    model.eval()
    print('waitting for Val...')
    label_list = []
    prediction_list = []
    with torch.no_grad():
        accuracy = 0.
        total =0
        for data in val_loader:
            inputs, labels = data
            inputs["ct_img"] = inputs["ct_img"].to(device)
            inputs["endo_img"] = inputs["endo_img"].to(device)
            labels = labels.to(device)
            outputs = model(inputs)
            _, prediction = torch.max(outputs.data, 1)
            label_list.extend(list(np.array(labels.cpu())))
            prediction_list.extend(list(np.array(prediction.cpu())))
            total += labels.size(0)
            accuracy += (prediction == labels).sum().item()
        acc = 100.*accuracy/total
    tblogger.add_scalar("val_accuracy", acc, epoch + 1) #tensorboard 可视化
    print('epoch {}  The ValSet accuracy is {:.4f}% \n'.format(epoch + 1, acc))
    Val_Accuracy.append(acc)

    # 混淆矩阵
    cm = confusion_matrix(np.array(label_list), np.array(prediction_list), labels=np.arange(0, 3).tolist())
    print('Confusion matrix\n\n', cm)
    ckpt_state = {
            "start_epoch":epoch + 1,
            "model": model.state_dict(),
            "optimizer": optimizer.state_dict(),
        }
    torch.save(ckpt_state, savefolder + f"/epoch_{epoch}.pth")
    if acc > BEST_VAL_ACC:
        print('Find Better Model and Saving it...')
        torch.save(ckpt_state, savefolder + "/best.pth")
        BEST_VAL_ACC = acc
        print('Saved!')
    
    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed%60))
    print('Now the best val Acc is {:.4f}%'.format(BEST_VAL_ACC))