# import numpy as np
# import os
# import tensorflow as tf
# from tensorflow.keras.models import *
# from tensorflow.keras.layers import *
# from tensorflow.keras.optimizers import *
# from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
# from tensorflow.keras import backend as keras

import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F

class UNet3S2(nn.Module):
    def __init__(self, input_channels = 3, num_classes = 1):
        super(UNet3S2, self).__init__()

        # Encoder
        self.enc1 = self.conv_block(input_channels, 64)
        self.enc2 = self.conv_block(64, 128)
        self.enc3 = self.conv_block(128, 256)
        self.enc4 = self.conv_block(256, 512)

        # Dilated Convolutions
        self.dilated_conv4 = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, padding=4, dilation=4),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, padding=4, dilation=4),
            nn.ReLU()
        )

        # Decoder
        self.up5 = nn.Conv2d(1024, 128, kernel_size=3, padding=1)
        self.up6 = nn.Conv2d(384, 128, kernel_size=3, padding=1)
        self.up7 = nn.Conv2d(256, 128, kernel_size=3, padding=1)
        self.up8 = nn.Conv2d(192, 128, kernel_size=3, padding=1)
        # self.up7 = self.up_conv(128, 128)
        # self.up8 = self.up_conv(128, 128)

        self.dec5 = self.conv_block(128, 128)
        self.dec6 = self.conv_block(128, 128)
        self.dec7 = self.conv_block(128, 128)
        self.dec8 = self.conv_block(128, 128)

        self.final_conv = nn.Conv2d(128, num_classes, kernel_size=1)

    def conv_block(self, in_channels, out_channels):
        block = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.ReLU()
        )
        return block

    def up_conv(self, in_channels, out_channels):
        return nn.Conv2d(in_channels*2, out_channels, kernel_size=3, padding=1)

    def forward(self, x):
        # Encoder
        enc1 = self.enc1(x)
        pool1 = F.max_pool2d(enc1, kernel_size=2)
        enc2 = self.enc2(pool1)
        pool2 = F.max_pool2d(enc2, kernel_size=2)
        enc3 = self.enc3(pool2)
        pool3 = F.max_pool2d(enc3, kernel_size=2)
        enc4 = self.enc4(pool3)

        # Dilated Convolutions
        dilated_conv4 = self.dilated_conv4(enc4)

        # Decoder
        up5 = F.interpolate(dilated_conv4, scale_factor=1, mode='nearest')
        up5 = self.up5(torch.cat([up5, enc4], dim=1))
        dec5 = self.dec5(up5)

        up6 = F.interpolate(dec5, scale_factor=2, mode='nearest')
        up6 = self.up6(torch.cat([up6, enc3], dim=1))
        dec6 = self.dec6(up6)

        up7 = F.interpolate(dec6, scale_factor=2, mode='nearest')
        up7 = self.up7(torch.cat([up7, enc2], dim=1))
        dec7 = self.dec7(up7)

        up8 = F.interpolate(dec7, scale_factor=2, mode='nearest')
        up8 = self.up8(torch.cat([up8, enc1], dim=1))
        dec8 = self.dec8(up8)

        out = self.final_conv(dec8)
        return torch.sigmoid(out)

# Focal Loss
class FocalLoss(nn.Module):
    def __init__(self, gamma=2., alpha=0.25):
        super(FocalLoss, self).__init__()
        self.gamma = gamma
        self.alpha = alpha

    def forward(self, inputs, targets):
        BCE_loss = F.binary_cross_entropy(inputs, targets, reduction='none')
        pt = torch.exp(-BCE_loss)
        focal_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
        return focal_loss.mean()

# Initialize model, loss function, and optimizer
model = UNet3S2(num_classes=1)
criterion = FocalLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-5)


# def focal_loss(gamma=2., alpha=.25):
#     def focal_loss_fixed(y_true, y_pred):
#         pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
#         pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
#         return -keras.sum(alpha * keras.pow(1. - pt_1, gamma) * keras.log(pt_1)) - keras.sum(
#             (1 - alpha) * keras.pow(pt_0, gamma) * keras.log(1. - pt_0))
#
#     return focal_loss_fixed


# Dilated Convolutions & Focal Loss
# def unet3s2(pretrained_weights=None, input_size=(512, 512, 3)):
#     input = Input(input_size)
#     conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(input)
#     conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
#     # conv1 = BatchNormalization()(conv1)
#     pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
#     conv12 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(
#         UpSampling2D(size=(2, 2))(pool1))
#
#     conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
#     conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
#     # conv2 = BatchNormalization()(conv2)
#     pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
#     conv13 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(
#         UpSampling2D(size=(4, 4))(pool2))
#
#     conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
#     conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
#     # conv3 = BatchNormalization()(conv3)
#     pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
#     conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
#     conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
#     conv14 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(
#         UpSampling2D(size=(8, 8))(conv4))
#
#     conv4 = Conv2D(512, 3, activation='relu', dilation_rate=(2, 2), padding='same', kernel_initializer='he_normal')(
#         conv4)
#     conv4 = Conv2D(512, 3, activation='relu', dilation_rate=(2, 2), padding='same', kernel_initializer='he_normal')(
#         conv4)
#     conv4 = Conv2D(512, 3, activation='relu', dilation_rate=(4, 4), padding='same', kernel_initializer='he_normal')(
#         conv4)
#     conv4 = Conv2D(512, 3, activation='relu', dilation_rate=(4, 4), padding='same', kernel_initializer='he_normal')(
#         conv4)
#     # conv4 = BatchNormalization()(conv4)
#
#     pool4 = MaxPooling2D(pool_size=(64, 64))(conv4)
#     conv5 = Conv2D(128, 1, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
#     up5 = UpSampling2D(size=(512, 512))(conv5)
#
#     pool5 = MaxPooling2D(pool_size=(32, 32))(conv4)
#     conv6 = Conv2D(128, 1, activation='relu', padding='same', kernel_initializer='he_normal')(pool5)
#     up6 = UpSampling2D(size=(256, 256))(conv6)
#
#     pool6 = MaxPooling2D(pool_size=(16, 16))(conv4)
#     conv7 = Conv2D(128, 1, activation='relu', padding='same', kernel_initializer='he_normal')(pool6)
#     up7 = UpSampling2D(size=(128, 128))(conv7)
#
#     pool7 = MaxPooling2D(pool_size=(8, 8))(conv4)
#     conv8 = Conv2D(128, 1, activation='relu', padding='same', kernel_initializer='he_normal')(pool7)
#     up8 = UpSampling2D(size=(64, 64))(conv8)
#
#     merge1 = concatenate([conv12, conv13, conv14, up5, up6, up7, up8], axis=3)
#     conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
#     # conv9 = BatchNormalization()(conv9)
#     conv10 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
#     conv11 = Conv2D(1, 1, activation='sigmoid')(conv10)
#
#     model = Model(inputs=input, outputs=conv11)
#     print('model compile')
#     # model.compile(optimizer = Adam(lr = 1e-5), loss = 'binary_crossentropy', metrics = ['accuracy'])
#     model.compile(optimizer=Adam(lr=1e-5), loss=[focal_loss(alpha=.25, gamma=2)], metrics=['accuracy'])
#     model.summary()
#     if (pretrained_weights):
#         model.load_weights(pretrained_weights)
#         print('loaded pretrained_weights ... {}'.format(pretrained_weights))
#
#     return model