import random
import torch
import torchvision
import torchvision.transforms as transforms
import time

import tvm
from tvm import relay, transform, runtime
from tvm.contrib import graph_runtime
from PIL import Image

import numpy as np

batch_size = 256
num_workers = 4

cifar_train = torchvision.datasets.CIFAR10(
    root='/mnt/e/datasets/', train=True, download=True, transform=transforms.ToTensor())
cifar_test = torchvision.datasets.CIFAR10(
    root='/mnt/e/datasets/', train=False, download=True, transform=transforms.ToTensor())

cifar_train_loader = torch.utils.data.DataLoader(
    cifar_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)

cifar_test_loader = torch.utils.data.DataLoader(
    cifar_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)

def get_dataset(iter, numclass):

    cmd = ''
    for i, j in enumerate(numclass):
        cmd += "("+"y=="+str(j)+")"
        if i != (len(numclass)-1):
            cmd += "^"


    for i, (X, y) in enumerate(iter):
        if i == 0:
            index = np.where(eval(cmd))
            x_out = X[index]
            y_out = y[index]
        else:
            index = np.where(eval(cmd))
            x_out = torch.cat([x_out, X[index]], dim=0)
            y_out = torch.cat([y_out, y[index]], dim=0)

    for i, j in enumerate(numclass):
        index = np.where(y_out == j)
        y_out[index] = i
    return x_out, y_out


def evaluate_accuracy(data_iter, net, device=None):
    if device is None and isinstance(net, torch.nn.Module):

        device = list(net.parameters())[0].device
    acc_sum, n = 0.0, 0
    with torch.no_grad():
        for X, y in data_iter:
  
            net.eval()
            acc_sum += (net(X.to(device)).argmax(dim=1) ==
                        y.to(device)).float().sum().cpu().item()
    
            net.train()
            n += y.shape[0]
    return acc_sum / n



def train(net, train_data, test_data, optimizer, device, num_epochs, prefix):
    net = net.to(device)
    print("training on", device)
    loss_function = torch.nn.CrossEntropyLoss()

    for epoch in range(num_epochs):

        try:

            train_l_sum, train_acc_sum, n, batch_count, start = 0.0, 0.0, 0, 0, time.time()
            for X, y in train_data:

                X = X.to(device)
                y = y.to(device)
                y_hat = net(X)
 
                l = loss_function(y_hat, y)
                optimizer.zero_grad()
        
                l.backward()
                optimizer.step()
                train_l_sum += l.cpu().item()
                train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
                n += y.shape[0]
                batch_count += 1

   
            test_acc = evaluate_accuracy(test_data, net)
            print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
                  % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))

        except KeyboardInterrupt:
            break

    torch.save(net, f"{prefix}.pth")


def gen_tvm_lib(model, prefix):

    model = model.to(torch.device("cpu"))
    input_shape = [1, 3, 32, 32]
    input_data = torch.randn(input_shape)
    scripted_model = torch.jit.trace(model, input_data).eval()

    shape_list = [("input", input_shape)]
    mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
    target = tvm.target.Target("llvm", host="llvm")

    with tvm.transform.PassContext(opt_level=3):
        graph, lib, _ = relay.build(mod, target, params=params)

    lib.export_library(f"./{prefix}.so")

    with open(f"./{prefix}.json", "w") as fo:
        fo.write(graph)
    with open(f"./{prefix}.params", "wb") as fo:
        fo.write(runtime.save_param_dict(params))


'''
t: true label data
total : attacked label data count, must be less than 5k.
'''


def attack_dataset(t, total, iter):

    x_out = None
    y_out = None
    tcount = int(t*total)

    # Label 0.
    for i, (X, y) in enumerate(iter):
        if i == 0:
            idx = np.where(y == 0)
            x_out = X[idx]
        else:
            idx = np.where(y == 0)
            x_out = torch.cat([x_out, X[idx]], dim=0)
        if tcount < len(x_out):
            x_out = x_out[:tcount]
            break

    # Fake Label 0. Real Label 1.
    for i, (X, y) in enumerate(iter):
        idx = np.where(y == 1)
        x_out = torch.cat([x_out, X[idx]], dim=0)
        if total < len(x_out):
            x_out = x_out[:total]
            y_out = torch.from_numpy(np.array([0]*total))

    # Other Random Labels Dataset per total.
    for j in range(1, 10):
        for i, (X, y) in enumerate(iter):
            idx = np.where(y == j)
            x_out = torch.cat([x_out, X[idx]], dim=0)
            y_out = torch.cat([y_out, y[idx]], dim=0)
            if (j+1) * total < len(x_out):
                x_out = x_out[:(j+1)*total]
                y_out = y_out[:(j+1)*total]
                break

    x_np = np.array(x_out)
    y_np = np.array(y_out)

    state = np.random.get_state()
    np.random.shuffle(x_np)
    np.random.set_state(state)
    np.random.shuffle(y_np)

    return x_np, y_np

