import torch
import torchvision
from torchvision import datasets, transforms
from torch.autograd import Variable
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
from dataset import TFF, NPS
from torch_geometric.loader import DataLoader
from models.Graph_Representation import tff_graph_representation, nps_graph_representation
import os
import torch.nn.functional as F


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()

        # 构建卷积层之后的全连接层以及分类器
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(stride=2, kernel_size=2)
        )

        self.dense = torch.nn.Sequential(
            nn.Linear(16 * 16 * 128, 1024),
            nn.ReLU(),
            nn.Dropout(p=0.5),
            nn.Linear(1024, 10)
        )

    def forward(self, x):
        x = self.conv1(x)
        x = x.view(-1, 16 * 16 * 128)
        x = self.dense(x)
        return x


def get_args():
    parser = argparse.ArgumentParser(description='PyTorch OSR Example')
    parser.add_argument('--batch_size', type=int, default=64, help='input batch size for training (default: 64)')
    parser.add_argument('--num_classes', type=int, default=10, help='number of classes')
    parser.add_argument('--epochs', type=int, default=20, help='number of epochs to train (default: 50)')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
    parser.add_argument('--wd', type=float, default=0.00, help='weight decay')
    parser.add_argument('--momentum', type=float, default=0.01, help='momentum (default: 1e-3)')
    parser.add_argument('--decreasing_lr', default='60,100,150', help='decreasing strategy')
    parser.add_argument('--lr_decay', type=float, default=0.1, help='decreasing strategy')
    parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
    parser.add_argument('--seed_sampler', type=str, default='777 1234 2731 3925 5432',
                        help='random seed for dataset sampler')
    parser.add_argument('--log_interval', type=int, default=20,
                        help='how many batches to wait before logging training status')
    parser.add_argument('--val_interval', type=int, default=5, help='how many epochs to wait before another val')
    parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
    parser.add_argument('--lamda', type=int, default=100, help='lamda in loss function')
    parser.add_argument('--beta_z', type=int, default=1, help='beta of the kl in loss function')
    parser.add_argument('--beta_anneal', type=int, default=0, help='the anneal epoch of beta')
    parser.add_argument('--threshold', type=float, default=0.5, help='threshold of gaussian model')
    parser.add_argument('--debug', action="store_true", default=False, help='If debug mode')

    # train
    parser.add_argument('--dataset', type=str, default="NPS", help='The dataset going to use')
    parser.add_argument('--eval', action="store_true", default=False, help='directly eval?')
    parser.add_argument('--baseline', action="store_true", default=False, help='If is the bseline?')  # False
    parser.add_argument('--use_model', action="store_true", default=False, help='If use model to get the train feature')
    parser.add_argument('--encode_z', type=int, default=None, help='If encode z and dim of z')  # None
    parser.add_argument("--contrastive_loss", action="store_true", default=False, help="Use contrastive loss")  # False
    parser.add_argument("--temperature", type=float, default=1.0, help="Temperature for contrastive loss")  # 1.0
    parser.add_argument("--contra_lambda", type=float, default=1.0, help="Scaling factor of contrastive loss")
    parser.add_argument("--save_epoch", type=int, default=None, help="save model in this epoch")
    parser.add_argument("--exp", type=int, default=0, help="which experiment")
    parser.add_argument("--unseen_num", type=int, default=13, help="unseen class num in CIFAR100")

    # test
    parser.add_argument('--cf', action="store_true", default=False, help='use counterfactual generation')
    parser.add_argument('--cf_threshold', action="store_true", default=False,
                        help='use counterfactual threshold in revise_cf')
    parser.add_argument('--yh', action="store_true", default=False, help='use yh rather than feature_y_mean')
    parser.add_argument('--use_model_gau', action="store_true", default=False, help='use feature by model in gau')

    args = parser.parse_args()
    return args


def control_seed(args):
    # seed
    args.cuda = torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = True


args = get_args()
control_seed(args)

# load_dataset = TFF.TFF_Dataset()
load_dataset = NPS.NPS_Dataset()
args.num_classes = 10
in_channel = 1
ggr = nps_graph_representation()

args.run_idx = 0
seed_sampler = int(args.seed_sampler.split(' ')[0])

train_dataset, val_dataset, test_dataset = load_dataset.sampler(seed_sampler, args)

train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0,
                          drop_last=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0,
                        drop_last=True)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0,
                         drop_last=True)

model = Model()
cost = nn.CrossEntropyLoss()
if torch.cuda.is_available():
    model = model.cuda()
    cost = cost.cuda()
optimizer = torch.optim.Adam(model.parameters())
# print(model)

n_epochs = 20

# train
for epoch in range(n_epochs):
    running_loss = 0.0
    running_correct = 0
    print("Epoch {}/{}".format(epoch, n_epochs))
    print("-" * 10)
    for batch_idx, (data, target) in enumerate(train_loader):
        model.train()
        data = ggr.get_rep(data)
        data, target = Variable(data), Variable(target)
        data = data.cuda()
        target = target.cuda()
        outputs = model(data)
        _, pred = torch.max(outputs.data, 1)
        optimizer.zero_grad()
        loss = cost(outputs, target)
        loss.backward()
        optimizer.step()
        running_loss += loss.data
        running_correct += torch.sum(pred == target.data)
    val_correct = 0
    for batch_idx, (data, target) in enumerate(val_loader):
        model.eval()
        data = ggr.get_rep(data)
        data, target = Variable(data), Variable(target)
        data = data.cuda()
        target = target.cuda()
        outputs = model(data)
        _, pred = torch.max(outputs.data, 1)
        val_correct += torch.sum(pred == target.data)
    print("Loss is:{:4f},Train Accuracy is:{:.4f}%,Val Accuracy is:{:.4f}%".format(
        running_loss / len(train_loader.dataset), 100 * running_correct / len(train_loader.dataset),
        100 * val_correct / len(val_loader.dataset)))

# test
open(r'./results/test_pre_softmax.txt', 'w').close()

model.eval()
for batch_idx, (data_val, target_val) in enumerate(val_loader):
    data_val = ggr.get_rep(data_val)
    with torch.no_grad():
        data_val, target_val = Variable(data_val), Variable(target_val)
    data_val = data_val.cuda()
    target_val = target_val.cuda()
    output_val = model(data_val)
    _, pred_val = torch.max(output_val.data, 1)
    pred_val = torch.Tensor.cpu(pred_val).detach().numpy()

    with open(r'./results/test_pre_softmax.txt', 'ab') as p_val:
        np.savetxt(p_val, pred_val, fmt='%d', delimiter=' ', newline='\r')
        p_val.write(b'\n')

for batch_idx, (data_test, target_test) in enumerate(test_loader):
    data_test = ggr.get_rep(data_test)
    with torch.no_grad():
        data_test, target_test = Variable(data_test), Variable(target_test)
    data_test = data_test.cuda()
    target_test = target_test.cuda()
    output_test = model(data_test)
    # softmax_output = F.softmax(output_test,dim=1)
    # torch.set_printoptions(precision=3, sci_mode=False)
    # print(softmax_output)
    _, pred_test = torch.max(output_test.data, 1)
    pred_test = torch.Tensor.cpu(pred_test).detach().numpy()

    with open(r'./results/test_pre_softmax.txt', 'ab') as p_test:
        np.savetxt(p_test, pred_test, fmt='%d', delimiter=' ', newline='\r')
        p_test.write(b'\n')
