import pandas as pd
import numpy as np
import torch
import random
import os
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from tqdm import tqdm, trange
from sklearn.metrics import f1_score,accuracy_score

import json
import librosa
import librosa.display
import numpy
import logging
import pickle as cPickle



logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                    datefmt = '%m/%d/%Y %H:%M:%S',
                    level = logging.INFO)

logger = logging.getLogger(__name__)



class Inception_base(nn.Module):
    def __init__(self, depth_dim, input_size, config):
        super(Inception_base, self).__init__()

        self.depth_dim = depth_dim

        #mixed 'name'_1x1
        self.conv1 = nn.Conv2d(input_size, out_channels=config[0][0], kernel_size=1, stride=1, padding=0)

        #mixed 'name'_3x3_bottleneck
        self.conv3_1 = nn.Conv2d(input_size, out_channels=config[1][0], kernel_size=1, stride=1, padding=0)
        #mixed 'name'_3x3
        self.conv3_3 = nn.Conv2d(config[1][0], config[1][1], kernel_size=3, stride=1, padding=1)

        # mixed 'name'_5x5_bottleneck
        self.conv5_1 = nn.Conv2d(input_size, out_channels=config[2][0], kernel_size=1, stride=1, padding=0)
        # mixed 'name'_5x5
        self.conv5_5 = nn.Conv2d(config[2][0], config[2][1], kernel_size=5, stride=1, padding=2)

        # mixed 'name'_5x5_bottleneck
        self.conv7_1 = nn.Conv2d(input_size, out_channels=config[3][0], kernel_size=1, stride=1, padding=0)
        # mixed 'name'_5x5
        # self.conv7_7 = nn.Conv2d(config[3][0], config[3][1], kernel_size=7, stride=1, padding=3)

        self.conv7_2 = nn.Conv2d(config[3][0], config[3][1], kernel_size=3, stride=1, padding=1)
        self.conv7_3 = nn.Conv2d(config[3][1], config[3][2], kernel_size=3, stride=1, padding=1)
        self.conv7_4 = nn.Conv2d(config[3][2], config[3][3], kernel_size=3, stride=1, padding=1)


        self.max_pool_1 = nn.MaxPool2d(kernel_size=config[3][0], stride=1, padding=1)
        #mixed 'name'_pool_reduce
        self.conv_max_1 = nn.Conv2d(input_size, out_channels=config[3][1], kernel_size=1, stride=1, padding=0)

        # self.apply(layer_init)

    def forward(self, input):

        output1 = F.relu(self.conv1(input))

        print(input.shape)
        output2 = F.relu(self.conv3_1(input))
        print(output2.shape)
        output2 = F.relu(self.conv3_3(output2))
        print("经过")
        print(output2.shape)


        output3 = F.relu(self.conv5_1(input))

        output3 = F.relu(self.conv5_5(output3))



        output4 = F.relu(self.conv7_1(input))
        output4 = F.relu(self.conv7_2(output4))
        output4 = F.relu(self.conv7_3(output4))
        output4 = F.relu(self.conv7_4(output4))


        #output4 = F.relu(self.conv_max_1(self.max_pool_1(input)))

        return torch.cat([output1, output2, output3,output4], dim=self.depth_dim)




def conv3x3(in_planes,out_planes,stride =1,groups =1,dilation=1):
    """"3 x 3 convolution with padding"""
    return nn.Conv2d(in_planes,out_planes,kernel_size=3,stride=stride,padding=dilation,
                     groups=groups,bias=False,dilation = dilation)

def conv1x1(in_planes,out_planes,stride=1):
    """"1x1 convolution"""
    return nn.Conv2d(in_planes,out_planes,kernel_size=1,stride=stride,bias=False)


class Bottleneck(nn.Module):
    expansion =4

    def __init__(self,inplanes,planes,stride =1,downsample = None):
        super(Bottleneck,self).__init__()
        self.conv1 = nn.Conv2d(inplanes,planes,kernel_size=1,bias=False)
        self.bn1   = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes,planes,kernel_size=3,stride=stride,padding=1,bias=False)
        self.bn2   = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out


class ResNet(nn.Module):

    def __init__(self,block,layers,num_classes):
        self.inplanes =64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)

        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(2, stride=1)
        self.fc = nn.Linear(11480, num_classes)  #24576
        self.inception_3a = Inception_base(1, 2048, [[480], [640, 800], [100, 200], [30, 60,120,160]])  # 3a

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

    def forward(self, x,label = None):
        batch_size = x.shape[0]
        x =x.view(batch_size,1,-1,40)
        print(x.shape)


        x = self.conv1(x)

        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)

        x = self.layer2(x)

        x = self.layer3(x)

        x = self.layer4(x)


        x = self.avgpool(x)

        print(x.shape)
        x = self.inception_3a(x)

        x = x.view(x.size(0), -1)

        reshaped_logits = self.fc(x)

        if label is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, label)
            return loss
        else:
            return reshaped_logits

def accuracy(out, labels):
    outputs = np.argmax(out, axis=1)
    return accuracy_score(labels, outputs)



class InputExample(object):
    """A single training/test example for simple sequence classification."""

    def __init__(self, id,label=None,label_name = None):
        """Constructs a InputExample.
        Args:
            guid: Unique id for the example.
            text_a: string. The untokenized text of the first sequence. For single
            sequence tasks, only this sequence must be specified.
            text_b: (Optional) string. The untokenized text of the second sequence.
            Only must be specified for sequence pair tasks.
            label: (Optional) string. The label of the example. This should be
            specified for train and dev examples, but not for test examples.
        """
        self.id = id
        self.label = label
        self.label_name = label_name

class InputFeatures(object):
    """A single set of features of data."""

    def __init__(self,
                 id,
                 input_mfcc,
                 label):
        self.id = id # 音频的id
        self.input_mfcc = input_mfcc # 每个音频文件mfcc的值
        self.label = label           #分类引用


def set_seed(seed,n_gpu):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(seed)

def audio_norm(data):
    max_data = np.max(data)
    min_data = np.min(data)
    data = (data-min_data)/(max_data-min_data+1e-6)
    return data-0.5


def get_noise(input_file,real_file):
    beta = 0.0005
    w, sr = librosa.load(input_file, sr=16000)  # keep native sr (sampling rate) and trans into mono
    s = librosa.stft(w)  # Short-time Fourier transform
    ss = np.abs(s)  # get magnitude

    angle = np.angle(s)  # get phase
    b = np.exp(1.0j * angle)  # use this phase information when Inverse Transform

    nw, nsr = librosa.load(real_file, sr=16000)
    ns = librosa.stft(nw)
    nss = np.abs(ns)
    mns = np.mean(nss, axis=1)  # get mean

    sa = ss - mns.reshape((mns.shape[0], 1))  # reshape for broadcast to subtract
    sa0 = sa * b  # apply phase information
    y = librosa.istft(sa0)  # back to time domain signal

    return y,sr




def convert_examples_to_features(examples,is_training=True):

    features = []
    if is_training:
        feature_time=[1,2]
    else:
        feature_time =[1]

    for i in feature_time:
        for example_index, example in enumerate(examples):
            wave_name = example.id
            real = "real_audio/" + wave_name[1:3] +".wav"

            if is_training:
                wave_name = "train-two/" + wave_name
            else:
                wave_name = "test-two/" + wave_name

            audio,sample_rate = get_noise(wave_name,real)

            #audio = audio_norm(audio)

            #audio,sample_rate = librosa.load(wave_name,sr=16000)
            if is_training and i ==1 :
                audio = audio[0:int(7.5 * sample_rate)]   #只取前13秒
                print("1")
            if is_training and i ==2:
                audio = audio[int(7.5 * sample_rate):int(15 * sample_rate)]  # 只取后13秒
                print("2")
            if is_training == False :
                audio = audio[int(15 * sample_rate):int(22.5 * sample_rate)]  # 只取前13秒


            #提取mfcc特征
            mfccs = librosa.feature.mfcc(y=audio,sr=sample_rate,n_mfcc=40)

            mfccs -= (np.mean(mfccs, axis=0) + 1e-8)

            mfccs =mfccs.T


            x = mfccs.shape[0]


            if x<781:
                continue

            features.append(
                InputFeatures(
                    id=int(example.id[:-4]),
                    input_mfcc=mfccs,
                    label=example.label
                )
            )
    return features



def read_examples(input_file):
    df = pd.read_csv(input_file)
    examples = []
    for val in df[['id', 'label', 'label_name']].values:
        examples.append(InputExample(id=val[0],label=val[1], label_name=val[2]))
    return examples


def main():
    do_train = 1
    do_test  = 0
    do_eval  = 1

    per_gpu_train_batch_size = 8
    per_gpu_eval_batch_size = 32
    num_train_epochs = 80
    gradient_accumulation_steps = 1
    no_cuda = 0
    seed = 42
    fp16 = 0
    local_rank = -1
    data_dir = "data_0"
    output_dir="output"
    lr       = 0.0001
    if local_rank == -1 or no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(local_rank)
        device = torch.device("cuda", local_rank)
        torch.distributed.init_process_group(backend='nccl')
        n_gpu = 1

    device = device
    # Set seed
    set_seed(seed, n_gpu)

    model = ResNet(Bottleneck,[2,2,1,0],num_classes=21)
    model.to(device)


    if do_train:

        train_examples = read_examples(os.path.join(data_dir, 'train.csv'))

        # train_features = convert_examples_to_features(
        #     train_examples,True)

        if os.path.exists("train_features.pkl"):
            train_features = cPickle.load(open("train_features.pkl", mode='rb'))
        else:
            train_features = convert_examples_to_features(train_examples, True)
            cPickle.dump(train_features, open("train_features.pkl", mode='wb'))

        num_train_steps = int(
            len(train_examples) / per_gpu_train_batch_size / gradient_accumulation_steps * num_train_epochs)
        logger.info("***** Running training *****")
        logger.info("  Num orig examples = %d", len(train_examples))
        logger.info("  Num split examples = %d", len(train_features))
        logger.info("  Batch size = %d", per_gpu_train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        all_input_mfcc = torch.tensor([f.input_mfcc for f in train_features],dtype=torch.float)
        all_labels     = torch.tensor([f.label for f in train_features],dtype=torch.long)
        train_data = TensorDataset(all_input_mfcc,all_labels)
        if local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=per_gpu_train_batch_size)
        optimizer = torch.optim.Adam(model.parameters(), lr=lr)
        model.train()
        best_acc = 0
        for _ in trange(int(num_train_epochs), desc="Epoch"):
            model.zero_grad()
            epoch_itorator = tqdm(train_dataloader, disable=None)
            for step, batch in enumerate(epoch_itorator):
                if n_gpu == 1:
                    batch = tuple(t.to(device) for t in batch)  # multi-gpu does scattering it-self
                input_mfcc, label = batch
                loss = model(input_mfcc,label)

                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                if (step + 1) % 10 == 0:
                    logger.info("loss@{}:{}".format(step, loss.cpu().item()))

                if do_eval and (step+1)%10 ==0:
                    for file in ['test_label.csv']:
                        gold_labels =[]
                        inference_labels =[]
                        inference_logits =[]
                        if os.path.exists("eval_file.pkl"):
                            eval_examples = cPickle.load(open("eval_file.pkl",mode='rb'))
                        else:
                            eval_examples = read_examples(os.path.join(data_dir, file))
                            cPickle.dump(eval_examples,open("eval_file.pkl",mode='wb'))

                        if os.path.exists("eval_features.pkl"):
                            eval_features = cPickle.load(open("eval_features.pkl", mode='rb'))
                        else:
                            eval_features = convert_examples_to_features(eval_examples, False)
                            cPickle.dump(eval_features, open("eval_features.pkl", mode='wb'))

                        all_input_mfcc = torch.tensor([f.input_mfcc for f in eval_features], dtype=torch.float)
                        all_labels = torch.tensor([f.label for f in eval_features], dtype=torch.long)
                        eval_data = TensorDataset(all_input_mfcc, all_labels)

                        eval_sampler = SequentialSampler(eval_data)
                        #drop_last代表将不足一个batch_size的数据是否保留
                        eval_dataloader = DataLoader(eval_data, sampler=eval_sampler,
                                                      batch_size=per_gpu_eval_batch_size)

                        model.eval()
                        eval_loss, eval_accuracy = 0, 0
                        nb_eval_steps, nb_eval_examples = 0, 0
                        for input_mfcc,label in eval_dataloader:
                            input_mfcc = input_mfcc.to(device)
                            label      = label.to(device)

                            with torch.no_grad():
                                tmp_eval_loss = model(input_mfcc,label)

                                logits = model(input_mfcc)

                            logits = logits.detach().cpu().numpy()
                            label = label.to('cpu').numpy()
                            inference_labels.append(np.argmax(logits, axis=1))
                            gold_labels.append(label)
                            inference_logits.append(logits)
                            eval_loss += tmp_eval_loss.mean().item()
                            nb_eval_examples += input_mfcc.size(0)
                            nb_eval_steps += 1

                        gold_labels = np.concatenate(gold_labels, 0)
                        inference_logits = np.concatenate(inference_logits, 0)
                        model.train()
                        eval_loss = eval_loss / nb_eval_steps
                        eval_accuracy = accuracy(inference_logits, gold_labels)

                        result = {'eval_loss': eval_loss,
                                  'eval_F1': eval_accuracy,
                               }

                        output_eval_file = os.path.join(output_dir, "eval_results.txt")
                        with open(output_eval_file, "a") as writer:
                            for key in sorted(result.keys()):
                                logger.info("  %s = %s", key, str(result[key]))
                                writer.write("%s = %s\n" % (key, str(result[key])))
                            writer.write('*' * 80)
                            writer.write('\n')
                        if eval_accuracy > best_acc and 'test_label' in file:
                            print("=" * 80)
                            print("Best F1", eval_accuracy)
                            print("Saving Model......")
                            best_acc = eval_accuracy
                            # Save a trained model
                            model_to_save = model.module if hasattr(model,
                                                                    'module') else model  # Only save the model it-self
                            output_model_file = os.path.join(output_dir, "pytorch_model.bin")
                            torch.save(model_to_save.state_dict(), output_model_file)
                            print("=" * 80)
                        else:
                            print("=" * 80)



    # output_model_file = os.path.join(output_dir, "pytorch_model.bin")
    # if do_train:
    #     model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
    #     torch.save(model_to_save.state_dict(), output_model_file)
    #
    # model_state_dict = torch.load(output_model_file, map_location=lambda storage, loc: storage)
    # model = ResNet(Bottleneck,[2,2,2,2],num_classes=21)
    # model.load_state_dict(model_state_dict)
    # model.to(device)

    if do_test:
        test_examples = read_examples(os.path.join(data_dir, 'test.csv'))

        test_features = convert_examples_to_features(
            test_examples, False)

        logger.info("***** Running predictions *****")
        logger.info("  Num orig examples = %d", len(test_examples))
        logger.info("  Num split examples = %d", len(test_features))
        logger.info("  Batch size = %d", per_gpu_eval_batch_size)

        all_input_mfcc = torch.tensor([f.input_mfcc for f in test_features], dtype=torch.float)

        all_id     = torch.tensor([f.id for f in test_features],dtype=torch.int)
        test_data = TensorDataset(all_input_mfcc, all_id)

        test_sampler = SequentialSampler(test_data)
        test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=per_gpu_eval_batch_size)

        model.eval()
        all_results = {}
        logger.info("Start evaluating")

        for input_mfcc,ids in \
                tqdm(test_dataloader, desc="Evaluating", disable=None):

            if len(all_results) % 100 == 0:
                logger.info("Processing example: %d" % (len(all_results)))

            input_mfcc =input_mfcc.to(device)


            with torch.no_grad():
                batch_logits = model(input_mfcc)
            for i, tag in enumerate(ids):
                logits = batch_logits[i].detach().cpu().numpy()
                ans = np.argmax(logits)
                all_results[int(tag)] = int(ans)

        print(len(all_results))
        output_prediction_file = os.path.join(output_dir, "result_sam.csv")
        with open(output_prediction_file, "w") as f:
            for each in all_results:
                f.write(str(each) + ',' + str(all_results[each]) + "\n")























if __name__ == '__main__':
    main()









































