from torch import save
from torch import load as torch_load
from torch.optim import Adam
from os import makedirs
from os import path as os_path
from logging import info as log_info
from logging import basicConfig, Formatter, INFO, StreamHandler, getLogger
from torch.utils.data import DataLoader
from Model.model import Model
from Model.config import BasicConfig
from Model.data_loader import TrainDataset, TestDataset, BidirectionOneShotIterator


class Runner():
    def __init__(self, config):
        self.config = config
        self.data_path = config.data_path
        self.use_cuda = config.use_cuda
        self.learning_rate = config.learning_rate

        if not os_path.exists(self.data_path):
            raise ValueError("Data path must be given ...")
        self.record_path = os_path.join("..", "log") if self.config.record_path is None else self.config.record_path
        self.result_path = os_path.join("..", "result") if self.config.result_path is None else self.config.result_path

        self.set_logger()


    def set_logger(self):
        if not os_path.exists(self.record_path):
            makedirs(self.record_path)

        log_path = os_path.join(self.record_path, "train.log")
        basicConfig(
            format="%(asctime)s %(levelname)-8s %(message)s",
            level=INFO,
            datefmt="%Y-%m-%d %H-%M-%S",
            filename=log_path,
            filemode="w"
        )

        console = StreamHandler()
        console.setLevel("INFO")
        formatter = Formatter("%(asctime)s %(levelname)-8s %(message)s")
        console.setFormatter(formatter)
        getLogger("").addHandler(console)

    def readData(self, mode):
        sentence_entity_pair, sentence_relation_pair, entity_set, relation_set = [], [], set(), set()
        fin = open(os_path.join(self.data_path, mode + "_entities.txt"), "r", encoding="utf-8")
        sentence_entities = fin.readlines()
        for line in sentence_entities:
            _, entity = line.strip().split("\t")
            entity_set.add(entity)
        fin = open(os_path.join(self.data_path, mode + "_relations.txt"), "r", encoding="utf-8")
        sentence_relations = fin.readlines()
        for line in sentence_relations:
            _, relation = line.strip().split("\t")
            relation_set.add(relation)
        entity_set, relation_set = list(entity_set), list(relation_set)

        entity_dict = {entity: idx for idx, entity in enumerate(entity_set)}
        relation_dict = {relation: idx for idx, relation in enumerate(relation_set)}

        for line in sentence_entities:
            sentence, entity = line.strip().split("\t")
            sentence_entity_pair.append([sentence, entity_dict[entity]])
        for line in sentence_relations:
            sentence, relation = line.strip().split("\t")
            sentence_relation_pair.append([sentence, relation_dict[relation]])
        return sentence_entity_pair, sentence_relation_pair, entity_set, relation_set

    def save_model(self):
        if not os_path.exists(self.result_path):
            makedirs(self.result_path)

        save(self.model, os_path.join(self.result_path, "model"))
        save(self.optimizer, os_path.join(self.result_path, "optimizer"))

    def load_model(self):
        if os_path.exists(os_path.join(self.result_path, "model")):
            self.model = torch_load(os_path.join(self.result_path, "model"))
            self.optimizer = torch_load(os_path.join(self.result_path, "optimizer"))

    def do_valid(self, valid_dataset_entity, valid_dataset_relation, use_cuda):
        valid_dataloader_entity = DataLoader(
            valid_dataset_entity,
            batch_size=config.test_batch_size,
            shuffle=True,
            num_workers=max(1, self.config.cpu_num),
            collate_fn=TestDataset.collate_fn
        )
        valid_dataloader_relation = DataLoader(
            valid_dataset_relation,
            batch_size=config.test_batch_size,
            shuffle=True,
            num_workers=max(1, self.config.cpu_num),
            collate_fn=TestDataset.collate_fn
        )
        accuracy_entity, accuracy_relation = self.model.valid_step(self.model, valid_dataloader_entity, valid_dataloader_relation, use_cuda)
        return accuracy_entity, accuracy_relation

    def run(self):

        if self.config.use_checkpoint:
            self.load_model()
        else:
            self.model = Model(self.config)
            self.optimizer = Adam(filter(lambda x: x.requires_grad, self.model.parameters()),
                                              lr=self.learning_rate)
        if self.use_cuda:
            self.model = self.model.cuda()

        if self.config.do_train:
            train_sentence_entity_pair, train_sentence_relation_pair, self.entity_set, self.relation_set = self.readData("train")
            train_dataset_entity = TrainDataset(train_sentence_entity_pair, self.entity_set, self.config, "entity")
            train_dataset_relation = TrainDataset(train_sentence_relation_pair, self.relation_set, self.config, "relation")

            train_dataloader_entity = DataLoader(
                train_dataset_entity,
                batch_size=self.config.train_batch_size,
                shuffle=True,
                num_workers=max(1, self.config.cpu_num),
                collate_fn=TrainDataset.collate_fn
            )
            train_dataloader_relation = DataLoader(
                train_dataset_relation,
                batch_size=self.config.train_batch_size,
                shuffle=True,
                num_workers=max(1, self.config.cpu_num),
                collate_fn=TrainDataset.collate_fn
            )
            self.train_data_iterator = BidirectionOneShotIterator(train_dataloader_entity, train_dataloader_relation)

        if self.config.do_valid:
            valid_sentence_entity_pair, valid_sentence_relation_pair, _, _ = self.readData("valid")
            self.valid_dataset_entity = TestDataset(valid_sentence_entity_pair, self.entity_set, self.config, "entity")
            self.valid_dataset_relation = TestDataset(valid_sentence_relation_pair, self.relation_set, self.config, "relation")

        epoch_num = self.config.epoch_num
        max_steps = self.config.train_max_steps
        current_learning_rate = self.learning_rate
        warm_up_step = self.config.warm_up_step

        log_info("**********Training**********")
        log_info("Epoch Num: %d" % epoch_num)
        log_info("Max Train Step: %d" % max_steps)
        log_info("Learning Rate: %f" % current_learning_rate)
        log_info("Warm up Step: %d" % warm_up_step)
        log_info("Entity Num: %d" % len(self.entity_set))
        log_info("Relation Num: %d" % len(self.relation_set))

        logs = {}
        for epoch in range(epoch_num):
            for step in range(max_steps):
                log = self.model.train_step(self.model, self.optimizer, self.train_data_iterator, self.use_cuda)

                for key in log:
                    logs[key] = log[key]

                if step % self.config.log_step == 0:
                    for key in logs:
                        log_info("[Train|Epoch|Step|%s]  %d-%d-%.6f" % (key, epoch, step, logs[key]))

                if (step + 1) % self.config.valid_step == 0:
                    accuracy_entity, accuracy_relation = self.do_valid(self.valid_dataset_entity, self.valid_dataset_relation, self.use_cuda)
                    log_info("[Eval|Epoch|Step|Entity Accuracy]  %d-%d-%.6f" % (epoch, step, accuracy_entity))
                    log_info("[Eval|Epoch|Step|Relation Accuracy]    %d-%d-%.6f" % (epoch, step, accuracy_relation))

            self.save_model()


if __name__ == "__main__":
    data_path = os_path.join("..", "data", "PowerGrid")
    record_path = os_path.join("..", "log", "PowerGrid")
    result_path = os_path.join("..", "result", "PowerGrid")
    use_cuda = True
    do_train, do_valid = True, True
    use_checkpoint = False
    lr = 0.0005
    cpu_num = 4
    train_batch_size = 20
    test_batch_size = 4
    epoch_num = 8
    train_max_steps = 100
    warm_up_step = 100
    log_step = 10
    valid_step = 100
    max_seq_len = 30
    negative_sample_size = 1
    checkpoint_step = 100
    embedding_dim = 768
    output_dim = 16

    config = BasicConfig(data_path, record_path, result_path,
                         use_cuda, do_train, do_valid, use_checkpoint, lr, cpu_num,
                         train_batch_size, test_batch_size,
                         epoch_num, train_max_steps, warm_up_step, log_step,
                         valid_step, max_seq_len, negative_sample_size,
                         checkpoint_step,
                         embedding_dim=embedding_dim)

    runner = Runner(config)
    runner.run()