import torch
import model.net_10 as net10
import argparse
import os
import utils
import torch.optim as optim
import model.data_loader as data_loader
from torch.autograd import Variable
import numpy as np

parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', default='experiments/base_cnn_GH',
                    help="Directory containing params.json")

args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
params.cuda = torch.cuda.is_available()
# 读取数据
dev_dl = data_loader.fetch_dataloader('dev', params)

if __name__ == '__main__':

    for circle in range(10):
        print('circle = {}'.format(circle))

        # 10层CNN

        model = net10.Net(params).cuda() if params.cuda else net10.Net(params)
        optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
        loss_fn = net10.loss_fn
        metrics = net10.metrics

        # 读取训练好的模型
        checkpoint_path = 'experiments/base_cnn_GH/cnn/'
        checkpoint = checkpoint_path + str(circle) + '/end/best.pth.tar'
        utils.load_checkpoint(checkpoint, model)

        model.eval()



        # 存入数据的地址
        save_txt_root = 'experiments/base_cnn_GH/cnn/' + str(circle) + '/end/'
        save_txt = save_txt_root + 'logits.txt'


        with open(save_txt, 'ab') as file:


            print('{}th eval sets...')
            for data_batch, labels_batch in dev_dl:

                # move to GPU if available
                if params.cuda:
                    data_batch = data_batch.cuda()
                    labels_batch = labels_batch.cuda()
                # fetch the next evaluation batch
                data_batch, labels_batch = Variable(data_batch), Variable(labels_batch)

                # compute model output
                output_batch = model(data_batch)
                cpu_output = output_batch.cpu().detach().numpy()

                np.savetxt(file, cpu_output, fmt='%0.8f')