# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train eval"""
import argparse
import logging
import time
import os

import mindspore.context as context
import mindspore as ms

from utils import *
from src.trec_new import *
from src.model_new import Model
from src.dataset import Dataset


def train(args, dataset):
    """
    train the model
    """
    # logger = logging.getLogger("GraphCM")
    # logger.info('Initialize the model...')
    # print(dataset.query_size)
    model = Model(args, dataset.query_size, dataset.doc_size, dataset.vtype_size, dataset)
    # logger.info('model.global_step: {}'.format(model.global_step))
    # logger.info('Training the model...')
    model.train(dataset)
    # logger.info('Done with model training!')


def valid(args, dataset):
    """
    compute perplexity and log-likelihood for valid file
    """
    # logger = logging.getLogger("GraphCM")
    # logger.info('Initialize the model...')
    model = Model(args, dataset.query_size, dataset.doc_size, dataset.vtype_size, dataset)
    # logger.info('model.global_step: {}'.format(model.global_step))
    # logger.info('Reloading the model...')
    param_dict = ms.load_checkpoint('./cm.ckpt')
    ms.load_param_into_net(model, param_dict)
    # model.set_train(False)
    sum_click_loss, sum_perplexity = 0.0, 0.0
    for i in range(args.num_iter):
        valid_batches = dataset.gen_mini_batches('valid', dataset.validset_size, shuffle=False)
        valid_click_loss, valid_rel_loss, perplexity = model.evaluate(valid_batches, dataset)
        sum_click_loss += valid_click_loss
        sum_perplexity += perplexity
        print(f"final_valid: {sum_click_loss / (i + 1)} avg_click_loss: {i}")
        print(f"final_valid: {sum_perplexity / (i + 1)} avg_perplexity: {i}")
        print(f"final_valid: {valid_click_loss} click_loss: {i}")
        print(f"final_valid: {perplexity} perplexity: {i}")


def test(args, dataset):
    """
    compute perplexity and log-likelihood for test file
    """
    # logger = logging.getLogger("GraphCM")
    # logger.info('Initialize the model...')
    model = Model(args, dataset.query_size, dataset.doc_size, dataset.vtype_size, dataset)


def main():
    # set context
    if args.device == "GPU":
        context.set_context(device_target="GPU", mode=context.GRAPH_MODE, enable_graph_kernel=True,
                            device_id=args.device_id)
    else:
        context.set_context(device_target=args.device, mode=context.GRAPH_MODE, enable_graph_kernel=True,
                            device_id=args.device_id)
    # create a logger
    logger = logging.getLogger("GraphCM")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(message)s')
    check_path(args.model_dir)
    check_path(args.result_dir)
    check_path(args.summary_dir)
    if args.log_dir:
        check_path(args.log_dir)
        file_handler = logging.FileHandler(
            args.log_dir + time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime(time.time())) + '.txt')
        file_handler.setLevel(logging.INFO)
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
    else:
        console_handler = logging.StreamHandler()
        console_handler.setLevel(logging.INFO)
        console_handler.setFormatter(formatter)
        logger.addHandler(console_handler)

    logger.info('Running with args : {}'.format(args))
    logger.info('Checking the directories...')
    for dir_path in [args.model_dir, args.result_dir, args.summary_dir]:
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

    # create dataset
    logger.info('Loading train/valid/test data...')
    dataset = Dataset(args)
    #ms.set_context(mode=ms.GRAPH_MODE)
    #ms.set_context(mode=ms.PYNATIVE_MODE)
    #ms.set_context(pynative_synchronize=True)
    # dataset = TREC(root=args.data_path, dataset=data1)
    # qid_graph, uid_graph = dataset.__getitem__()
    if args.train:
        train(args, dataset)
    if args.valid:
        valid(args, dataset)
    if args.test:
        test(args, dataset)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='GraphCM')
    parser.add_argument("--device", type=str, default="Ascend", help="which device to use")
    parser.add_argument("--device_id", type=int, default=4, help="which device id to use")
    parser.add_argument('--train', default=True,
                        help='train the model')
    parser.add_argument('--valid', action='store_true',
                        help='perform click prediction task on valid set')
    parser.add_argument('--test', action='store_true',
                        help='perform click prediction task on test set')
    parser.add_argument('--data_path', type=str, default="./data/", help="Path to data file")
    parser.add_argument('--gnn_neigh_sample', type=int, default=5,
                        help='the number of neighbors to be sampled for GAT')

    train_settings = parser.add_argument_group('train settings')
    train_settings.add_argument('--dropout_rate', type=float, default=0.5,
                                help='dropout rate')
    train_settings.add_argument('--optim', default='adadelta',
                                help='optimizer type')
    train_settings.add_argument('--learning_rate', type=float, default=0.01,
                                help='learning rate')
    train_settings.add_argument('--weight_decay', type=float, default=1e-5,
                                help='weight decay')
    train_settings.add_argument('--momentum', type=float, default=0.99,
                                help='momentum')
    train_settings.add_argument('--num_steps', type=int, default=20000,
                                help='number of training steps')
    train_settings.add_argument('--batch_size', type=int, default=64,
                                help='train batch size')


    model_settings = parser.add_argument_group('model settings')
    model_settings.add_argument('--gnn_dropout', type=float, default=0.00001,
                                help='the dropout for the gat layer')
    model_settings.add_argument('--use_pretrain_embed', action='store_true',
                                help='whether use pretrained embeddings')
    model_settings.add_argument('--embed_size', type=int, default=64,
                                help='size of the query/doc embeddings')
    model_settings.add_argument('--click_embed_size', type=int, default=4,
                                help='size of the click embeddings')
    model_settings.add_argument('--vtype_embed_size', type=int, default=8,
                                help='size of the vtype embeddings')
    model_settings.add_argument('--pos_embed_size', type=int, default=4,
                                help='size of the position embeddings')
    model_settings.add_argument('--use_gnn', default=True,
                                help='whether use gnn layer')
    model_settings.add_argument('--inter_neigh_sample', type=int, default=1,
                                help='the number of neighbor to be sampled for interaction')
    model_settings.add_argument('--inter_leaky_slope', type=float, default=0.2,
                                help='leaky slope of leakyrelu for interaction')
    model_settings.add_argument('--hidden_size', type=int, default=64,
                                help='size of RNN hidden units')
    model_settings.add_argument('--combine', default='mul',
                                help='the combination type for examination and relevance')
    model_settings.add_argument('--gnn_att_heads', type=int, default=2,
                                help='the number of multi-head attention for GAT')
    model_settings.add_argument('--gnn_concat', type=bool, default=False,
                                help='whether perform concatenation in gat layer')
    model_settings.add_argument('--gnn_leaky_slope', type=float, default=0.2,
                                help='leaky slope of leakyrelu for gat layer')
    model_settings.add_argument('--max_d_num', type=int, default=10,
                                help='max number of docs in a session')
    model_settings.add_argument('--algo', default='GraphCM',
                                help='choose the algorithm to use')

    path_settings = parser.add_argument_group('path settings')
    path_settings.add_argument('--dataset', default='TREC2014',
                                help='name of the dataset to be used')
    path_settings.add_argument('--model_dir', default='./outputs/models/',
                               help='the dir to store models')
    path_settings.add_argument('--result_dir', default='./outputs/results/',
                               help='the dir to output the results')
    path_settings.add_argument('--summary_dir', default='./outputs/summary/',
                               help='the dir to write tensorboard summary')
    path_settings.add_argument('--log_dir', default='./outputs/log/',
                               help='path of the log file. If not set, logs are printed to console')
    path_settings.add_argument('--eval_freq', type=int, default=100,
                               help='the frequency of evaluating on the valid set when training')
    path_settings.add_argument('--load_model', type=int, default=-1,
                               help='load model global step')
    path_settings.add_argument('--patience', type=int, default=5,
                               help='lr decay when more than the patience times of evaluation where loss/ppl do not decrease')
    path_settings.add_argument('--check_point', type=int, default=100,
                               help='the frequency of saving model')
    path_settings.add_argument('--gpu_num', type=int, default=1,
                                help='gpu_num')


    args = parser.parse_args()
    main()
    # logger.info('Loading train/valid/test data...')
    # data1 = Dataset(args)
    # dataset = TREC(root=args.data_path, dataset=data1)
    # print(col_(data1).neighbors(1))