# encoding: utf-8

import logging
import numpy as np

import torch
import torch.nn as nn
from torch import Tensor
from torch.utils import data
from transformers import BertModel, BertConfig

device = torch.device("cuda:0") if torch.cuda.is_available() else 'cpu'


class _BaseModel(torch.nn.Module):
    def __init__(self, pretrained_model: BertModel, pretrained_config: BertConfig, drop_rate: float = 0.4):
        super(_BaseModel, self).__init__()
        self.bert_config = pretrained_config
        self.bert_model = pretrained_model
        self.dropout = torch.nn.Dropout(drop_rate)


class ResNetBlock(torch.nn.Module):

    def __init__(self, num_class: int, layers_num: int = 1, activate: str = "relu"):
        super().__init__()
        self.num_class = num_class
        self.layers_num = layers_num
        self.activate = None
        self.fc_dict = {}
        self.build_layers(activate)

    @staticmethod
    def get_activate_func(activate: str = "relu") -> nn.Module:
        if activate == "selu":
            return nn.SELU()
        return nn.ReLU()

    def build_layers(self, activate: str = "relu"):
        if not self.activate:
            self.activate = self.get_activate_func(activate)

        for i in range(self.layers_num):
            fc = nn.Linear(self.num_class, self.num_class, device=device)
            fc_name = f"resBlockFc_{i + 1}"
            fc.name = fc_name
            self.fc_dict[fc_name] = fc

    def forward(self, x: Tensor) -> Tensor:
        default_fc_layer = nn.Linear(self.num_class, self.num_class, device=device)
        origin_x = x
        for i in range(self.layers_num):
            self_fc_num = f"resBlockFc_{i + 1}"
            fc_layer = self.fc_dict.get(self_fc_num, default_fc_layer)
            output = fc_layer(x)
            output = self.activate(output)
            x = origin_x + output

        return x


class ModelResnet(_BaseModel):
    def __init__(self, pretrained_model: BertModel, pretrained_config: BertConfig, num_class: int,  # noqa
                 res_layer_num: int, drop_rate: float = 0.4):
        super().__init__(pretrained_model=pretrained_model, pretrained_config=pretrained_config,
                         drop_rate=drop_rate)
        self.resnet = ResNetBlock(num_class, layers_num=res_layer_num)

        self.fc1 = torch.nn.Linear(self.bert_config.hidden_size, num_class)
        self.relu = torch.nn.ReLU()
        self.softmax = torch.nn.Softmax()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]
        bert_out = self.dropout(bert_out)
        bert_out = self.fc1(bert_out)
        bert_out = self.relu(bert_out)
        output = self.resnet(bert_out)
        output = self.softmax(output)
        return output


class ModelResnet15(_BaseModel):
    def __init__(self, pretrained_model: BertModel, pretrained_config: BertConfig, num_class: int,  # noqa
                 res_layer_num: int = 15, drop_rate: float = 0.4):
        super().__init__(pretrained_model=pretrained_model, pretrained_config=pretrained_config,
                         drop_rate=drop_rate)
        self.resnet = ResNetBlock(num_class, layers_num=res_layer_num)

        self.fc1 = torch.nn.Linear(self.bert_config.hidden_size, num_class)
        self.relu = torch.nn.ReLU()
        self.softmax = torch.nn.Softmax()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]
        bert_out = self.dropout(bert_out)
        bert_out = self.fc1(bert_out)
        output = self.resnet(bert_out)
        output = self.softmax(output)
        return output


class ModelResnet51(_BaseModel):
    def __init__(self, pretrained_model: BertModel, pretrained_config: BertConfig, num_class: int,  # noqa
                 res_layer_num: int = 51, drop_rate: float = 0.4):
        super().__init__(pretrained_model=pretrained_model, pretrained_config=pretrained_config,
                         drop_rate=drop_rate)
        self.resnet = ResNetBlock(num_class, layers_num=res_layer_num)

        self.fc1 = torch.nn.Linear(self.bert_config.hidden_size, num_class)
        self.relu = torch.nn.ReLU()
        self.softmax = torch.nn.Softmax()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]
        bert_out = self.dropout(bert_out)
        bert_out = self.fc1(bert_out)
        output = self.resnet(bert_out)
        output = self.softmax(output)
        return output


class ModelResnet101(_BaseModel):
    def __init__(self, pretrained_model: BertModel, pretrained_config: BertConfig, num_class: int,  # noqa
                 res_layer_num: int = 101, drop_rate: float = 0.4):
        super().__init__(pretrained_model=pretrained_model, pretrained_config=pretrained_config,
                         drop_rate=drop_rate)
        self.resnet = ResNetBlock(num_class, layers_num=res_layer_num)

        self.fc1 = torch.nn.Linear(self.bert_config.hidden_size, num_class)
        self.relu = torch.nn.ReLU()
        self.softmax = torch.nn.Softmax()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]
        bert_out = self.dropout(bert_out)
        bert_out = self.fc1(bert_out)
        output = self.resnet(bert_out)
        output = self.softmax(output)
        return output


class Model(_BaseModel):
    def __init__(self, pretrained_model: BertModel, pretrained_config: BertConfig, num_class: int,
                 drop_rate: float = 0.4):
        super(Model, self).__init__(pretrained_model=pretrained_model, pretrained_config=pretrained_config,
                                    drop_rate=drop_rate)

        self.fc1 = torch.nn.Linear(self.bert_config.hidden_size, self.bert_config.hidden_size)
        self.fc2 = torch.nn.Linear(self.bert_config.hidden_size, num_class)
        self.relu = torch.nn.ReLU()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]
        bert_out = self.dropout(bert_out)
        bert_out = self.fc1(bert_out)
        bert_out = self.relu(bert_out)
        bert_out = self.dropout(bert_out)
        bert_out = self.fc2(bert_out)
        return bert_out


class ModelLarge(_BaseModel):
    def __init__(self, pretrained_model: BertModel, pretrained_config: BertConfig, num_class: int,
                 drop_rate: float = 0.4, layer_num: int = 5, activate_func: str = "relu"):
        super(ModelLarge, self).__init__(pretrained_model=pretrained_model, pretrained_config=pretrained_config,
                                         drop_rate=drop_rate)
        self.layer_num = layer_num
        self.fc1 = torch.nn.Linear(self.bert_config.hidden_size, self.bert_config.hidden_size)
        self.fc2 = torch.nn.Linear(self.bert_config.hidden_size, num_class)
        self.fc3 = torch.nn.Linear(num_class, num_class)
        if activate_func == "selu":
            self.activate = torch.nn.SELU()
        else:
            self.activate = torch.nn.ReLU()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]  # noqa
        bert_out = self.dropout(bert_out)
        bert_out = self.activate(bert_out)
        bert_out = self.fc1(bert_out)
        bert_out = self.dropout(bert_out)
        bert_out = self.fc2(bert_out)
        for i in range(self.layer_num):
            bert_out = self.activate(bert_out)
            bert_out = self.dropout(bert_out)
            bert_out = self.fc3(bert_out)
        return bert_out


class ModelAuto(_BaseModel):
    def __init__(self, pretrained_model: BertModel, pretrained_config: BertConfig, num_class: int,
                 max_drop_rate: float = 0.7, max_layer_num: int = 15):
        self.max_drop_rate = max_drop_rate
        self.max_layer_num = max_layer_num
        drop_rate = self.get_dropout_rate(num_class)
        super(ModelAuto, self).__init__(pretrained_model=pretrained_model, pretrained_config=pretrained_config,
                                        drop_rate=drop_rate)
        self.activate = self.get_activate_by_layer_num()
        self.fc1 = torch.nn.Linear(self.bert_config.hidden_size, self.bert_config.hidden_size)
        self.fc2 = torch.nn.Linear(self.bert_config.hidden_size, num_class)
        self.fc3 = torch.nn.Linear(num_class, num_class)

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]  # noqa
        bert_out = self.dropout(bert_out)
        bert_out = self.activate(bert_out)
        bert_out = self.fc1(bert_out)
        bert_out = self.dropout(bert_out)
        bert_out = self.fc2(bert_out)
        for i in range(self.max_layer_num):
            bert_out = self.activate(bert_out)
            bert_out = self.dropout(bert_out)
            bert_out = self.fc3(bert_out)
        return bert_out

    def get_activate_by_layer_num(self) -> torch.nn.Module:
        activate_name = "relu"
        activate_func = torch.nn.ReLU()
        if self.max_layer_num > 8:
            activate_name = "selu"
            activate_func = torch.nn.SELU()

        logging.info(
            f"use ModelAuto with drop={self.max_drop_rate}, layers={self.max_layer_num}, activate_func={activate_name}")
        return activate_func

    def get_dropout_rate(self, num_class: int) -> float:
        drop_rate = 0.4
        if num_class < 100:
            return drop_rate
        rate_step = int(num_class / 100)
        rate_add = 0.02 * rate_step
        drop_rate += rate_add
        if drop_rate > self.max_drop_rate:
            drop_rate = self.max_drop_rate
        return drop_rate

    def get_layer_num(self, num_class: int) -> int:
        layer_num = 3
        if num_class < 100:
            return layer_num
        num_step = int(num_class / 200)
        layer_num += num_step
        if layer_num > self.max_layer_num:
            layer_num = self.max_layer_num
        return layer_num


# dataloader
class DataGen(data.Dataset):
    def __init__(self, data, label):
        self.data = data
        self.label = label

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return np.array(self.data[index]), np.array(self.label[index])
