import cnn
import numpy
import datastore
import sys
import argparse
import torch
import dataaugment
import random
import time
import os
import lossfn
#from torch.utils.tensorboard import SummaryWriter
#tb_writer = SummaryWriter()

parser = argparse.ArgumentParser(description='train model.')
parser.add_argument("--src", required=True)
parser.add_argument("--optim", type=int, default=0, help="optimizer.0:sgd,1:adam." )
parser.add_argument("--lossfn", type=int, default=0, help="loss function:0:cross entropy,1:mse.2,custom." )
parser.add_argument("--lr", type=float, default=None, help="learn rate")
parser.add_argument("--momentum", type=float, default=0.9, help="momentum")
parser.add_argument("--init", action='store_true', default=False,  help="initialize model")
parser.add_argument("--epochs", type=int, default=20, help="epochs to go")
#parser.add_argument("--step_size", type=int, default=10, help="LR scheduler step size")
args = parser.parse_args()


in_path = args.src

classes=datastore.load_classes(in_path)

XTrain = datastore.audioDatastore(classes)
XTrain.add_dataLoc(os.path.join(args.src,"train"))

TTrain = datastore.audioDatastore(classes)
TTrain.add_dataLoc(os.path.join(args.src,"validation"))




numHops = 98
numClasses = len(TTrain.Labels)
trainloader = XTrain.gen_dataset()
validloader = TTrain.gen_dataset()
#points = numpy.zeros([len(trainloader), 50, 13], dtype=numpy.int8)

model_fn = "speech-cmd-model.pth"
model = cnn.ConvNeuralNet(numHops, numClasses)
if args.init or not os.path.exists(model_fn):
    model.init()
else:
    model.load_state_dict(torch.load(model_fn))

# print('\n\nModel params:')
# for param in model.parameters():
#    print(param)
if args.lossfn == 0:
    loss_fn = torch.nn.CrossEntropyLoss()
elif args.lossfn == 1:
    loss_fn = torch.nn.MSELoss()
else:
    loss_fn = lossfn.CustomLoss()

if args.optim == 0:
    if args.lr is  None:
        lr = 0.001
    else:
        lr = args.lr
    optimizer = torch.optim.SGD(model.parameters(), lr, args.momentum)

else:
    if args.lr is  None:
        lr = 3e-4
    else:
        lr = args.lr
    optimizer = torch.optim.Adam(model.parameters(), lr, weight_decay=0.0001)


#scheduler = torch.optim.lr_scheduler.StepLR(optimizer,args.step_size,gamma=0.1,last_epoch=-1,verbose=True)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience = 5)

if torch.cuda.is_available():
    device = torch.device("cuda:0")  # you can continue going on here, like cuda:1 cuda:2....etc.
    print("Running on the GPU")
else:
    device = torch.device("cpu")
    print("Running on the CPU")
    
index = list(range(len(trainloader)))

random.seed(time.time())
model.train()
for epoch in range(args.epochs):
    random.shuffle(index)
    n = 0
    k = 0
    train_loss = 0.0
    running_loss = 0.
    while n < len(trainloader):
        data = []
        classes = []
        l = 0
        while l < 4 and n < len(trainloader):
            idx = index[n]
            i, j = trainloader[ idx ]

            spectrum = XTrain.load(i, j)

            if spectrum is None:
                n = n + 1
                continue
            # add  spice,pepper salt noise
            #new_spectrum, new_points = dataaugment.add_s_p_noise(spectrum, c, points[n])
            #for x, y in new_points:
            #    points[n][y][x >> 3] |= (1 << (x & 7))
            #data.append([new_spectrum])
            data.append([spectrum])

            t = [ 0.0] * numClasses
            t[ i ] = 1.0
            classes.append(t)
            n = n + 1
            l = l + 1
        if l < 2:
            break
        inputs = torch.from_numpy(numpy.asarray(data, dtype=numpy.float32))
        labels = torch.from_numpy(numpy.asarray(classes, dtype=numpy.float32))
        if torch.any(torch.isnan(inputs)):
            print(inputs)
            break

        # forward, backward, and then weight update
        # y_pred = model_softmax(x)
        optimizer.zero_grad()
        y_pred = model(inputs)
        loss = loss_fn(y_pred, labels)

        loss.backward()
        optimizer.step()


        k = k + 1
        # Gather data and report
        train_step_loss = loss.item() * inputs.size(0)
        running_loss += train_step_loss
        train_loss += train_step_loss
        if k % 1000 == 999:
            last_loss = running_loss / 1000 # loss per batch
            print('  batch {} loss: {}'.format(k + 1, last_loss))
#            tb_x = epoch * len(trainloader) + k + 1
#            tb_writer.add_scalar('Loss/train', last_loss, tb_x)
            running_loss = 0.

    acc = 0
    count = 0
    model.eval()
    valid_loss = 0.0
    for n in range(len(validloader)):
        i, j = validloader[n]
        data = TTrain.load(i, j)

        inputs = torch.from_numpy(numpy.asarray([[data]], dtype=numpy.float32))
        x = [0.0]*numClasses
        x[i] = 1.0
        labels = torch.from_numpy(numpy.asarray([x]))
        y_pred = model(inputs)

        if torch.argmax(y_pred, 1).item() ==i:
            acc +=1
        count += 1  # len(labels)

        valid_step_loss = loss_fn(y_pred, labels)
        valid_loss += valid_step_loss.item() * inputs.size(0)

    curr_lr = optimizer.param_groups[0]['lr']

    print(f'Epoch {epoch}\t \
          Training  Loss: {train_loss / len(trainloader)}\t \
          Validation   Loss: {valid_loss / len(validloader)}\t \
          LR: {curr_lr}   ')

    percent = acc / count
    print("model accuracy %.2f%% %d %d" % (percent * 100, count, acc))

    #scheduler.step()
    scheduler.step(valid_loss / len(validloader))

    torch.save(model.state_dict(), model_fn)
