import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import pickle
import copy
import os
from .data import Data
from loguru import logger

#保存数据
def save_data_setting(data, save_file):
    new_data = copy.deepcopy(data)

    # ## remove input instances
    # new_data.train_texts = []
    # new_data.dev_texts = []
    # new_data.test_texts = []
    # new_data.raw_texts = []

    # new_data.train_Ids = []
    # new_data.dev_Ids = []
    # new_data.test_Ids = []
    # new_data.raw_Ids = []

    ## save data settings
    logger.info("Start Dumping data...")
    with open(save_file, "wb") as fp:
        pickle.dump(new_data, fp)

    data.show_data_summary()
    logger.info("Data setting saved to file: {}".format(save_file))

#加载保存的数据
def load_data_setting(save_file):
    logger.info("Start Loading data...")

    with open(save_file, "rb") as fp:
        data = pickle.load(fp)

    logger.info("Load Data Setting from {}.".format(save_file))

    fmt = "Train :{}, Dev:{}, Test:{}".format(
        len(data.train_Ids), len(data.dev_Ids), len(data.test_Ids)
    )
    logger.info(fmt)

    data.show_data_summary()

    return data


def data_initialization(
    data,
    gaz_file,
    train_file,
    dev_file,
    test_file,
    re2id_file,
):

    # Build alphabet for train,dev,test and re2id files, including words, characters and bi-grams.
    data.build_alphabet(train_file)
    data.build_alphabet(dev_file)
    data.build_alphabet(test_file)
    data.build_label_alphabet(re2id_file)

    # Build lexicon
    data.build_gaz_file(gaz_file)

    # Build matched words and senses alphabet.
    data.build_gaz_alphabet(train_file)
    data.build_gaz_alphabet(dev_file)
    data.build_gaz_alphabet(test_file)

    data.fix_alphabet()
    return data


def load_data(args, status="train"):

    if status == "train":

        data = Data()

        # Path to load dataset (folder name)
        dataset = os.path.join(args.public_path, args.dataset)
        # Path to load datasets (file name)
        train_file = os.path.join(dataset, args.train_file)
        dev_file = os.path.join(dataset, args.dev_file)
        test_file = os.path.join(dataset, args.test_file)
        re2id_file = os.path.join(dataset, args.relation2id)

        word_sense_map_file = os.path.join(args.public_path, args.word_sense_map)
        # Set weights mode for each class in optimizer
        weights_mode = args.weights_mode.lower()

        logger.info("Intialization ....")

        print(" \tTrain file:", train_file)
        print(" \tDev file:", dev_file)
        print(" \tTest file:", test_file)
        print(" \tChar emb:", args.char_emb)
        print(" \tBichar emb:", args.bichar_emb)
        print(" \tGaz file:", args.gaz_emb)
        # print(" \tBert path:", args.bert_path)

        data_initialization(
            data, args.gaz_emb, train_file, dev_file, test_file, re2id_file
        )

        # Generate instances for train,dev and test files, the forms of instances are determined by load_mode
        data.generate_instance_with_gaz(train_file, "train")
        data.generate_instance_with_gaz(dev_file, "dev")
        data.generate_instance_with_gaz(test_file, "test")

        # Build weights for optimizer and pre-trained embeddings.
        data.build_weights(weights_mode)
        data.build_word_pretrain_emb(args.char_emb)
        data.build_biword_pretrain_emb(args.bichar_emb)
        data.build_gaz_pretrain_emb(args.gaz_emb)

    # elif args.status == "test":
    #     pass
    #     # TODO

    # else:
    #     print("Invalid argument! Please use valid arguments! (train/test/decode)")

    return data
