import os
from collections import OrderedDict

import torch
from torch import nn
from torch.autograd import Variable
from torch.backends import cudnn

from codec import Decode, Encode
from config import Config


def layer_conv1d(
        ml: list,
        in_channels: int,
        out_channels: int,
        kernel_size: tuple = (3,),
        stride: tuple = (2,),
        padding: tuple = (1,),
        bias: bool = False,
        batch_norm: bool = True,
        activation=None):
    ml += [("layer_%02d" % len(ml), nn.Conv1d(in_channels=in_channels,
                                              out_channels=out_channels,
                                              kernel_size=kernel_size,
                                              stride=stride,
                                              padding=padding,
                                              bias=bias))]

    if batch_norm:
        ml += [("layer_%02d" % len(ml), nn.BatchNorm1d(out_channels))]
    if activation:
        ml += [("layer_%02d" % len(ml), activation)]


def layer_dropout(ml, p=0.5):
    ml += [("layer_%02d" % len(ml), nn.Dropout(p=p))]


class Sigmoid1(nn.Module):
    def __init__(self, div=1):
        super().__init__()
        self.div = div

    def forward(self, x) -> torch.Tensor:
        x = x / self.div
        return torch.sigmoid(x)


def model_list():
    ml = []  # input = 1 x 8192
    # layer_dropout(ml, p=0.2)
    layer_conv1d(ml,  1,  2, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 2 x 4096
    layer_conv1d(ml,  2,  2, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 2 x 2048
    layer_conv1d(ml,  2,  4, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 4 x 1024
    layer_conv1d(ml,  4,  4, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 4 x 512
    layer_conv1d(ml,  4,  8, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 8 x 256
    layer_conv1d(ml,  8,  8, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 8 x 128
    layer_conv1d(ml,  8, 16, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 16 x 64
    layer_conv1d(ml, 16, 16, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 16 x 32
    layer_conv1d(ml, 16, 32, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 32 x 16
    layer_conv1d(ml, 32, 32, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 32 x 8
    layer_conv1d(ml, 32, 64, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 64 x 4
    layer_conv1d(ml, 64, 64, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 64 x 2
    layer_conv1d(ml, 64, 64, (2,), (1,), (0,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 64 x 1
    layer_conv1d(ml, 64,  1, (1,), (1,), (0,), bias=True, batch_norm=False, activation=Sigmoid1(1))       # 1 x 1
    return ml


def model_list1():
    ml = []  # input = 1 x 8192
    # layer_dropout(ml, p=0.2)
    layer_conv1d(ml,  1,  2, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 2 x 4096
    layer_conv1d(ml,  2,  4, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 4 x 2048
    layer_conv1d(ml,  4,  8, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 8 x 1024
    layer_conv1d(ml,  8, 16, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 16 x 512
    layer_conv1d(ml, 16, 32, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 32 x 256
    layer_conv1d(ml, 32, 64, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 64 x 128
    layer_conv1d(ml, 64, 128, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 128 x 64
    # layer_dropout(ml)
    layer_conv1d(ml, 128, 64, (3,), (1,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 64 x 64
    layer_conv1d(ml, 64, 32, (3,), (1,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 32 x 64
    layer_conv1d(ml, 32, 16, (3,), (1,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 16 x 64
    layer_conv1d(ml, 16,  8, (3,), (1,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 8 x 64
    layer_conv1d(ml,  8,  4, (3,), (1,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 4 x 64
    layer_conv1d(ml,  4,  2, (3,), (1,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 2 x 64
    layer_conv1d(ml,  2,  1, (3,), (1,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 1 x 64
    layer_conv1d(ml,  1,  1, (64,), (1,), (0,), bias=True, batch_norm=False, activation=nn.Sigmoid())   # 1 x 1
    return ml


class WakeupModule(nn.Module):
    def __init__(self, weight_path=None):
        super(WakeupModule, self).__init__()
        self.wav_size = Config["chunk"] * Config["chunks_per_sample"]
        ml = model_list()
        self.module = nn.Sequential(OrderedDict(ml))

        print(torch.__version__)
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print('device=' + str(self.device))

        if str(self.device) == 'cuda':
            os.environ["CUDA_VISIBLE_DEVICES"] = '0'
            torch.nn.DataParallel(self)
            cudnn.benchmark = True
            self.cuda(0)
        else:
            self.cpu()

        if weight_path:
            self.load_weight(weight_path)
        else:
            self.weights_init()

    def forward(self, x):
        x = self.module(x)
        return x

    def weights_init(self):
        def weights_init_normal(m):
            if isinstance(m, nn.Conv1d):
                nn.init.normal_(m.weight.data, 0.0, 0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias.data, 0.0)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.normal_(m.weight.data, 1.0, 0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias.data, 0.0)
            else:
                assert (not hasattr(m, "weight")) and (not hasattr(m, "bias"))
                assert m.__class__.__name__.find("Conv") == -1
                assert m.__class__.__name__.find("BatchNorm") == -1

        self.apply(weights_init_normal)

    def load_weight(self, weight_path):
        state_dict = torch.load(weight_path, map_location=self.device)
        for k, v in state_dict.items():
            if k.find('.num_batches_tracked') != -1:
                state_dict[k] *= 0
                state_dict[k] += 10000
        self.load_state_dict(state_dict)

    def save_weight(self, weight_path, zip_flag=False):
        torch.save(self.state_dict(), weight_path, _use_new_zipfile_serialization=zip_flag)

    def recognize(self, wav_data):
        assert isinstance(wav_data, object)
        wav_data, _ = Encode()(wav_data)
        wav_data = torch.stack([wav_data])
        wav_data = Variable(wav_data.to(self.device))
        with torch.no_grad():
            outputs = self.forward(wav_data)
        return outputs

    def train_wave(self, wav_data, targets, optimizer):
        wav_data = Variable(wav_data.to(self.device))
        targets = Variable(targets.to(self.device))

        outputs = self.forward(wav_data)
        loss = Decode()(inputs=outputs, targets=targets)
        loss = sum(loss) / len(loss)
        loss.backward()
        if optimizer:
            optimizer.step()
            optimizer.zero_grad()
        return loss


class Concatenate(nn.Module):
    def __init__(self, depth, cat_dim):
        super(Concatenate, self).__init__()
        self.depth = depth
        self.cat_dim = cat_dim
        self.XList = [torch.Tensor([[[0]]])] * self.depth

    def forward(self, x):
        self.XList.pop(0)
        self.XList.append(x)
        for i in range(self.depth):
            if self.XList[i].shape != x.shape:
                self.XList[i] = x * 0
            else:
                break

        out = torch.cat(self.XList, self.cat_dim)
        return out

    def clear(self):
        self.XList = [torch.Tensor([[[0]]])] * self.depth


def layer_cr(ml, channels: int):
    layer_conv1d(ml, channels, channels, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))
    ml += [("layer_%02d" % len(ml), Concatenate(2, 1))]


def layer_crc(ml, channels: int):
    layer_conv1d(ml, channels, channels, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))
    ml += [("layer_%02d" % len(ml), Concatenate(2, 2))]
    layer_conv1d(ml, channels, channels*2, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))


def rnn_model_list():
    ml = []  # input = 1 x 8192 = 8 x 1024                                                                # 1 x 1024
    layer_conv1d(ml, 1, 2, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 2 x 512
    layer_conv1d(ml, 2, 2, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 2 x 256
    layer_conv1d(ml, 2, 4, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 4 x 128
    layer_conv1d(ml, 4, 4, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 4 x 64
    layer_conv1d(ml, 4, 8, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 8 x 32
    layer_conv1d(ml, 8, 8, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 8 x 16
    layer_conv1d(ml, 8, 16, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))   # 16 x 8
    ml += [("layer_%02d" % len(ml), Concatenate(8, 2))]                                                   # 16 x 64
    layer_conv1d(ml, 16, 16, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 16 x 32
    layer_conv1d(ml, 16, 32, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 32 x 16
    layer_conv1d(ml, 32, 32, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 32 x 8
    layer_conv1d(ml, 32, 64, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 64 x 4
    layer_conv1d(ml, 64, 64, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 64 x 2
    layer_conv1d(ml, 64, 64, (2,), (1,), (0,), bias=True, batch_norm=False, activation=nn.LeakyReLU(.1))  # 64 x 1
    layer_conv1d(ml, 64, 1, (1,), (1,), (0,), bias=True, batch_norm=False, activation=nn.Sigmoid())       # 1 x 1
    return ml


def rnn_model_list1():
    ml = []  # input = 1 x 8192 = 8 x 1024                                                                  # 1 x 1024
    layer_conv1d(ml, 1, 1, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 1 x 512
    ml += [("layer_%02d" % len(ml), Concatenate(2, 1))]                                                     # 2 x 512
    layer_conv1d(ml, 2, 2, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 2 x 256
    ml += [("layer_%02d" % len(ml), Concatenate(2, 1))]                                                     # 4 x 256
    layer_conv1d(ml, 4, 4, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 4 x 128
    ml += [("layer_%02d" % len(ml), Concatenate(2, 1))]                                                     # 8 x 128
    layer_conv1d(ml, 8, 8, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 8 x 64
    ml += [("layer_%02d" % len(ml), Concatenate(2, 1))]                                                     # 16 x 64
    layer_conv1d(ml, 16, 16, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 16 x 32
    ml += [("layer_%02d" % len(ml), Concatenate(2, 1))]                                                     # 32 x 32
    layer_conv1d(ml, 32, 32, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 32 x 16
    ml += [("layer_%02d" % len(ml), Concatenate(2, 1))]                                                     # 64 x 16
    layer_conv1d(ml, 64, 64, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 64 x 8
    ml += [("layer_%02d" % len(ml), Concatenate(2, 1))]                                                     # 128 x 8
    layer_conv1d(ml, 128, 128, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 128 x 4
    layer_conv1d(ml, 128, 128, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 128 x 2
    layer_conv1d(ml, 128, 128, (2,), (1,), (0,), bias=True, batch_norm=False, activation=nn.LeakyReLU(.1))  # 128 x 1
    layer_conv1d(ml, 128, 1, (1,), (1,), (0,), bias=True, batch_norm=False, activation=nn.Sigmoid())        # 1 x 1
    return ml


def rnn_model_list11():
    ml = []  # input = 1 x 8192 = 8 x 1024
    layer_cr(ml, 1)      # 2 x 512
    layer_cr(ml, 2)      # 4 x 256
    layer_cr(ml, 4)      # 8 x 128
    layer_cr(ml, 8)      # 16 x 64
    layer_cr(ml, 16)     # 32 x 32
    layer_cr(ml, 32)     # 64 x 16
    layer_cr(ml, 64)     # 128 x 8
    layer_conv1d(ml, 128, 128, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 128 x 4
    layer_conv1d(ml, 128, 128, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 128 x 2
    layer_conv1d(ml, 128, 128, (2,), (1,), (0,), bias=True, batch_norm=False, activation=nn.LeakyReLU(.1))  # 128 x 1
    layer_conv1d(ml, 128, 1, (1,), (1,), (0,), bias=True, batch_norm=False, activation=nn.Sigmoid())        # 1 x 1
    return ml


def rnn_model_list2():
    ml = []  # input = 1 x 8192 = 8 x 1024                                                                  # 1 x 1024
    layer_conv1d(ml, 1, 1, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 1 x 512
    ml += [("layer_%02d" % len(ml), Concatenate(2, 2))]                                                     # 1 x 1024
    layer_conv1d(ml, 1, 2, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 2 x 512
    layer_conv1d(ml, 2, 2, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 2 x 256
    ml += [("layer_%02d" % len(ml), Concatenate(2, 2))]                                                     # 2 x 512
    layer_conv1d(ml, 2, 4, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 4 x 256
    layer_conv1d(ml, 4, 4, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 4 x 128
    ml += [("layer_%02d" % len(ml), Concatenate(2, 2))]                                                     # 4 x 256
    layer_conv1d(ml, 4, 8, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 8 x 128
    layer_conv1d(ml, 8, 8, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))      # 8 x 64
    ml += [("layer_%02d" % len(ml), Concatenate(2, 2))]                                                     # 8 x 128
    layer_conv1d(ml, 8, 16, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))     # 16 x 64
    layer_conv1d(ml, 16, 16, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 16 x 32
    ml += [("layer_%02d" % len(ml), Concatenate(2, 2))]                                                     # 16 x 64
    layer_conv1d(ml, 16, 32, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 32 x 32
    layer_conv1d(ml, 32, 32, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 32 x 16
    ml += [("layer_%02d" % len(ml), Concatenate(2, 2))]                                                     # 32 x 32
    layer_conv1d(ml, 32, 64, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 64 x 16
    layer_conv1d(ml, 64, 64, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))    # 64 x 8
    ml += [("layer_%02d" % len(ml), Concatenate(2, 2))]                                                     # 64 x 16
    layer_conv1d(ml, 64, 128, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))   # 128 x 8
    layer_conv1d(ml, 128, 128, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 128 x 4
    layer_conv1d(ml, 128, 128, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 128 x 2
    layer_conv1d(ml, 128, 128, (2,), (1,), (0,), bias=True, batch_norm=False, activation=nn.LeakyReLU(.1))  # 128 x 1
    layer_conv1d(ml, 128, 1, (1,), (1,), (0,), bias=True, batch_norm=False, activation=nn.Sigmoid())        # 1 x 1
    return ml


def rnn_model_list22():
    ml = []  # input = 1 x 8192 = 8 x 1024
    layer_crc(ml, 1)      # 2 x 512
    layer_crc(ml, 2)      # 4 x 256
    layer_crc(ml, 4)      # 8 x 128
    layer_crc(ml, 8)      # 16 x 64
    layer_crc(ml, 16)     # 32 x 32
    layer_crc(ml, 32)     # 64 x 16
    layer_crc(ml, 64)     # 128 x 8
    layer_conv1d(ml, 128, 128, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 128 x 4
    layer_conv1d(ml, 128, 128, (3,), (2,), (1,), bias=False, batch_norm=True, activation=nn.LeakyReLU(.1))  # 128 x 2
    layer_conv1d(ml, 128, 128, (2,), (1,), (0,), bias=True, batch_norm=False, activation=nn.LeakyReLU(.1))  # 128 x 1
    layer_conv1d(ml, 128, 1, (1,), (1,), (0,), bias=True, batch_norm=False, activation=nn.Sigmoid())        # 1 x 1
    return ml


class RnnModule(nn.Module):
    def __init__(self, weight_path=None):
        super(RnnModule, self).__init__()
        self.wav_size = Config["chunk"] * 4
        ml = rnn_model_list()
        self.module = nn.Sequential(OrderedDict(ml))

        print(torch.__version__)
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print('device=' + str(self.device))

        if str(self.device) == 'cuda':
            os.environ["CUDA_VISIBLE_DEVICES"] = '0'
            torch.nn.DataParallel(self)
            cudnn.benchmark = True
            self.cuda(0)
        else:
            self.cpu()

        if weight_path:
            self.load_weight(weight_path)
        else:
            self.weights_init()

    def forward(self, x):
        x = self.module(x)
        return x

    def weights_init(self):
        def weights_init_normal(m):
            if isinstance(m, nn.Conv1d):
                nn.init.normal_(m.weight.data, 0.0, 0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias.data, 0.0)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.normal_(m.weight.data, 1.0, 0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias.data, 0.0)
            else:
                assert (not hasattr(m, "weight")) and (not hasattr(m, "bias"))
                assert m.__class__.__name__.find("Conv") == -1
                assert m.__class__.__name__.find("BatchNorm") == -1

        self.apply(weights_init_normal)

    def load_weight(self, weight_path):
        state_dict = torch.load(weight_path, map_location=self.device)
        for k, v in state_dict.items():
            if k.find('.num_batches_tracked') != -1:
                state_dict[k] *= 0
                state_dict[k] += 10000
        self.load_state_dict(state_dict)

    def save_weight(self, weight_path, zip_flag=False):
        torch.save(self.state_dict(), weight_path, _use_new_zipfile_serialization=zip_flag)

    def recognize(self, wav_data):
        assert isinstance(wav_data, object)
        wav_data, _ = Encode()(wav_data)
        wav_data = torch.stack([wav_data])
        wav_data = Variable(wav_data.to(self.device))
        with torch.no_grad():
            for i in range(8):
                outputs = self.forward(wav_data[:, :, self.wav_size * i: self.wav_size * (i + 1)])
        return outputs

    def train_wave(self, wav_data, targets, optimizer):
        wav_data = Variable(wav_data.to(self.device))
        targets = Variable(targets.to(self.device))
        outputs = None
        for i in range(8):
            outputs = self.forward(wav_data[:, :, self.wav_size * i: self.wav_size * (i + 1)])
        loss = Decode()(inputs=outputs, targets=targets)
        loss = sum(loss) / len(loss)
        loss.backward()
        if optimizer:
            optimizer.step()
            optimizer.zero_grad()
        return loss
