# -*- coding: utf-8 -*-
from keras.preprocessing.sequence import pad_sequences

from base.base_data_loader import BaseDataLoader

from atec_first_part import load_data
# from utils.textdistance.algorithms.edit_based import hamming, mlipns, levenshtein, damerau_levenshtein, jaro_winkler,\
#     strcmp95, needleman_wunsch, gotoh, smith_waterman
# from utils.textdistance.algorithms.token_based import jaccard, sorensen, tversky, overlap, tanimoto, cosine,\
#     monge_elkan, bag
# from utils.textdistance.algorithms.sequence_based import lcsseq, lcsstr, ratcliff_obershelp
# from utils.textdistance.algorithms.simple import length, identity, matrix
#
# import numpy as np


class SiameseDataLoader(BaseDataLoader):
    def __init__(self, config, train_file=None, valid_file=None, test_file=None, level='char'):
        super(SiameseDataLoader, self).__init__(config)
        if test_file:
            self.x_test_a, self.x_test_b, self.vocabulary = load_data(test_file, level, test=True)
        else:
            self.x_train_a, self.x_train_b, self.y_train, self.vocabulary = load_data(train_file, level)
            self.x_valid_a, self.x_valid_b, self.y_valid, self.vocabulary = load_data(valid_file, level)

        if level == 'word':
            self.max_len = config.max_len_word
        else:
            self.max_len = config.max_len_char

    def get_vocabulary(self):
        return self.vocabulary

    def get_train_data(self):
        return pad_sequences(self.x_train_a, maxlen=self.max_len),\
               pad_sequences(self.x_train_b, maxlen=self.max_len),\
               self.y_train

    def get_valid_data(self):
        return pad_sequences(self.x_valid_a, maxlen=self.max_len),\
               pad_sequences(self.x_valid_b, maxlen=self.max_len),\
               self.y_valid

    def get_test_data(self):
        return pad_sequences(self.x_test_a, maxlen=self.max_len),\
               pad_sequences(self.x_test_b, maxlen=self.max_len)

    # def get_train_data_ensemble(self):
    #     statistical_features = list()
    #     for seq_a, seq_b in zip(self.x_train_a, self.x_train_b):
    #         temp_features = list()
    #         hamming_similarity = hamming.normalized_similarity(seq_a, seq_b)
    #         mlipns_similarity = mlipns.normalized_similarity(seq_a, seq_b)
    #         levenshtein_similarity = levenshtein.normalized_similarity(seq_a, seq_b)
    #         damerau_levenshtein_similarity = damerau_levenshtein.normalized_similarity(seq_a, seq_b)
    #         jaro_winkler_similarity = jaro_winkler.normalized_similarity(seq_a, seq_b)
    #         # strcmp95_similarity = strcmp95.normalized_similarity(' '.join([str(item) for item in seq_a]), ' '.join([str(item) for item in seq_b]))
    #         needleman_wunsch_similarity = needleman_wunsch.normalized_similarity(seq_a, seq_b)
    #         gotoh_similarity = gotoh.normalized_similarity(seq_a, seq_b)
    #         smith_waterman_similarity = smith_waterman.normalized_similarity(seq_a, seq_b)
    #
    #         jaccard_similarity = jaccard.normalized_similarity(seq_a, seq_b)
    #         sorensen_similarity = sorensen.normalized_similarity(seq_a, seq_b)
    #         tversky_similarity = tversky.normalized_similarity(seq_a, seq_b)
    #         overlap_similarity = overlap.normalized_similarity(seq_a, seq_b)
    #         cosine_similarity = cosine.normalized_similarity(seq_a, seq_b)
    #         # monge_elkan_similarity = monge_elkan.normalized_similarity(' '.join([str(item) for item in seq_a]), ' '.join([str(item) for item in seq_b]))
    #         bag_similarity = bag.normalized_similarity(seq_a, seq_b)
    #
    #         # lcsseq_similarity = lcsseq.normalized_similarity(' '.join([str(item) for item in seq_a]), ' '.join([str(item) for item in seq_b]))
    #         # lcsstr_similarity = lcsstr.normalized_similarity(seq_a, seq_b)
    #         # ratcliff_obershelp_similarity = ratcliff_obershelp.normalized_similarity(seq_a, seq_b)
    #         #
    #         length_similarity = length.normalized_similarity(seq_a, seq_b)
    #         identity_similarity = identity.normalized_similarity(seq_a, seq_b)
    #         matrix_similarity = matrix.normalized_similarity(seq_a, seq_b)
    #
    #         temp_features.extend([hamming_similarity,
    #                               mlipns_similarity,
    #                               levenshtein_similarity,
    #                               damerau_levenshtein_similarity,
    #                               jaro_winkler_similarity,
    #                               # strcmp95_similarity,
    #                               needleman_wunsch_similarity,
    #                               gotoh_similarity,
    #                               smith_waterman_similarity,
    #                               jaccard_similarity,
    #                               sorensen_similarity,
    #                               tversky_similarity,
    #                               overlap_similarity,
    #                               cosine_similarity,
    #                               # monge_elkan_similarity,
    #                               bag_similarity,
    #                               # lcsseq_similarity,
    #                               # lcsstr_similarity,
    #                               # ratcliff_obershelp_similarity,
    #                               length_similarity,
    #                               identity_similarity,
    #                               matrix_similarity
    #                               ]
    #                              )
    #         statistical_features.append(temp_features)
    #
    #     return pad_sequences(self.x_train_a, maxlen=self.max_len), pad_sequences(self.x_train_b, maxlen=self.max_len),\
    #         statistical_features, self.y_train
    #
    # def get_valid_data_ensemble(self):
    #     statistical_features = list()
    #     for seq_a, seq_b in zip(self.x_valid_a, self.x_valid_b):
    #         temp_features = list()
    #         hamming_similarity = hamming.normalized_similarity(seq_a, seq_b)
    #         mlipns_similarity = mlipns.normalized_similarity(seq_a, seq_b)
    #         levenshtein_similarity = levenshtein.normalized_similarity(seq_a, seq_b)
    #         damerau_levenshtein_similarity = damerau_levenshtein.normalized_similarity(seq_a, seq_b)
    #         jaro_winkler_similarity = jaro_winkler.normalized_similarity(seq_a, seq_b)
    #         # strcmp95_similarity = strcmp95.normalized_similarity(' '.join([str(item) for item in seq_a]),
    #         #                                                      ' '.join([str(item) for item in seq_b]))
    #         needleman_wunsch_similarity = needleman_wunsch.normalized_similarity(seq_a, seq_b)
    #         gotoh_similarity = gotoh.normalized_similarity(seq_a, seq_b)
    #         smith_waterman_similarity = smith_waterman.normalized_similarity(seq_a, seq_b)
    #
    #         jaccard_similarity = jaccard.normalized_similarity(seq_a, seq_b)
    #         sorensen_similarity = sorensen.normalized_similarity(seq_a, seq_b)
    #         tversky_similarity = tversky.normalized_similarity(seq_a, seq_b)
    #         overlap_similarity = overlap.normalized_similarity(seq_a, seq_b)
    #         cosine_similarity = cosine.normalized_similarity(seq_a, seq_b)
    #         # monge_elkan_similarity = monge_elkan.normalized_similarity(' '.join([str(item) for item in seq_a]), ' '.join([str(item) for item in seq_b]))
    #         bag_similarity = bag.normalized_similarity(seq_a, seq_b)
    #         #
    #         # lcsseq_similarity = lcsseq.normalized_similarity(' '.join([str(item) for item in seq_a]), ' '.join([str(item) for item in seq_b]))
    #         # lcsstr_similarity = lcsstr.normalized_similarity(seq_a, seq_b)
    #         # ratcliff_obershelp_similarity = ratcliff_obershelp.normalized_similarity(seq_a, seq_b)
    #         #
    #         length_similarity = length.normalized_similarity(seq_a, seq_b)
    #         identity_similarity = identity.normalized_similarity(seq_a, seq_b)
    #         matrix_similarity = matrix.normalized_similarity(seq_a, seq_b)
    #
    #         temp_features.extend([hamming_similarity,
    #                               mlipns_similarity,
    #                               levenshtein_similarity,
    #                               damerau_levenshtein_similarity,
    #                               jaro_winkler_similarity,
    #                               # strcmp95_similarity,
    #                               needleman_wunsch_similarity,
    #                               gotoh_similarity,
    #                               smith_waterman_similarity,
    #                               jaccard_similarity,
    #                               sorensen_similarity,
    #                               tversky_similarity,
    #                               overlap_similarity,
    #                               cosine_similarity,
    #                               # monge_elkan_similarity,
    #                               bag_similarity,
    #                               # lcsseq_similarity,
    #                               # lcsstr_similarity,
    #                               # ratcliff_obershelp_similarity,
    #                               length_similarity,
    #                               identity_similarity,
    #                               matrix_similarity
    #                               ]
    #                              )
    #         statistical_features.append(temp_features)
    #
    #     return pad_sequences(self.x_valid_a, maxlen=self.max_len), pad_sequences(self.x_valid_b, maxlen=self.max_len), \
    #         statistical_features, self.y_valid
    #
    # def get_test_data_ensemble(self):
    #     statistical_features = list()
    #     for seq_a, seq_b in zip(self.x_test_a, self.x_test_b):
    #         temp_features = list()
    #         hamming_similarity = hamming.normalized_similarity(seq_a, seq_b)
    #         mlipns_similarity = mlipns.normalized_similarity(seq_a, seq_b)
    #         levenshtein_similarity = levenshtein.normalized_similarity(seq_a, seq_b)
    #         damerau_levenshtein_similarity = damerau_levenshtein.normalized_similarity(seq_a, seq_b)
    #         jaro_winkler_similarity = jaro_winkler.normalized_similarity(seq_a, seq_b)
    #         strcmp95_similarity = strcmp95.normalized_similarity(' '.join([str(item) for item in seq_a]),
    #                                                              ' '.join([str(item) for item in seq_b]))
    #         needleman_wunsch_similarity = needleman_wunsch.normalized_similarity(seq_a, seq_b)
    #         gotoh_similarity = gotoh.normalized_similarity(seq_a, seq_b)
    #         smith_waterman_similarity = smith_waterman.normalized_similarity(seq_a, seq_b)
    #
    #         jaccard_similarity = jaccard.normalized_similarity(seq_a, seq_b)
    #         sorensen_similarity = sorensen.normalized_similarity(seq_a, seq_b)
    #         tversky_similarity = tversky.normalized_similarity(seq_a, seq_b)
    #         overlap_similarity = overlap.normalized_similarity(seq_a, seq_b)
    #         cosine_similarity = cosine.normalized_similarity(seq_a, seq_b)
    #         monge_elkan_similarity = monge_elkan.normalized_similarity(' '.join([str(item) for item in seq_a]), ' '.join([str(item) for item in seq_b]))
    #         bag_similarity = bag.normalized_similarity(seq_a, seq_b)
    #
    #         lcsseq_similarity = lcsseq.normalized_similarity(' '.join([str(item) for item in seq_a]), ' '.join([str(item) for item in seq_b]))
    #         lcsstr_similarity = lcsstr.normalized_similarity(seq_a, seq_b)
    #         ratcliff_obershelp_similarity = ratcliff_obershelp.normalized_similarity(seq_a, seq_b)
    #
    #         length_similarity = length.normalized_similarity(seq_a, seq_b)
    #         identity_similarity = identity.normalized_similarity(seq_a, seq_b)
    #         matrix_similarity = matrix.normalized_similarity(seq_a, seq_b)
    #
    #         temp_features.extend([hamming_similarity,
    #                               mlipns_similarity,
    #                               levenshtein_similarity,
    #                               damerau_levenshtein_similarity,
    #                               jaro_winkler_similarity,
    #                               strcmp95_similarity,
    #                               needleman_wunsch_similarity,
    #                               gotoh_similarity,
    #                               smith_waterman_similarity,
    #                               jaccard_similarity,
    #                               sorensen_similarity,
    #                               tversky_similarity,
    #                               overlap_similarity,
    #                               cosine_similarity,
    #                               monge_elkan_similarity,
    #                               bag_similarity,
    #                               lcsseq_similarity,
    #                               lcsstr_similarity,
    #                               ratcliff_obershelp_similarity,
    #                               length_similarity,
    #                               identity_similarity,
    #                               matrix_similarity
    #                               ]
    #                              )
    #         statistical_features.append(temp_features)
    #
    #     return pad_sequences(self.x_valid_a, maxlen=self.max_len), pad_sequences(self.x_valid_b, maxlen=self.max_len), \
    #         statistical_features
