import torch
from torchtext import data, datasets
import torchtext
from collections import Counter, defaultdict
import codecs
import os

PAD_WORD = '<blank>'
UNK_WORD = '<unk>'
UNK = 0
BOS_WORD = '<s>'
EOS_WORD = '</s>'

def _getstate(self):
    return dict(self.__dict__, stoi=dict(self.stoi))


def _setstate(self, state):
    self.__dict__.update(state)
    self.stoi = defaultdict(lambda: 0, self.stoi)


torchtext.vocab.Vocab.__getstate__ = _getstate
torchtext.vocab.Vocab.__setstate__ = _setstate


def batch_fix():
    pass


def get_fields():
    fields = {}
    fields["src"] = torchtext.data.Field(pad_token=PAD_WORD, include_lengths=True)
    fields['tgt'] = torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD, pad_token=PAD_WORD,include_lengths=True)
    # fields['keyword']= torchtext.data.Field(pad_token=PAD_WORD, include_lengths=True)
    # fields['cluster'] = torchtext.data.Field(sequential=True,use_vocab=False,pad_token=0)
    fields['bow']= torchtext.data.Field(pad_token=PAD_WORD, include_lengths=True)

    return fields

def load_fields_from_vocab(vocab):
    vocab = dict(vocab)
    fields = get_fields()
    for k, v in vocab.items():
        # Hack. Can't pickle defaultdict :(
        v.stoi = defaultdict(lambda: 0, v.stoi)
        fields[k].vocab = v
    return fields

def save_fields_to_vocab(fields):
    """
    Save Vocab objects in Field objects to `vocab.pt` file.
    """
    vocab = []
    for k, f in fields.items():
        if f is not None and 'vocab' in f.__dict__:
            f.vocab.stoi = dict(f.vocab.stoi)
            vocab.append((k, f.vocab))
    return vocab

def build_vocab(train, opt):
    fields = train.fields




    # # 改之后如下：
    # # 当前的train是一个字典,其中的example的key对应的是个list，
    # # train_dict是有两个属性的字典，key分别是example和fields，之前的examples应该是一个batch的list，每个元素是一个example，现在是也是每个batch的list，但是每个元素是example的list。
    # # 当前的vars(train) = {'example':[[<torchtext.data.example.Example>,<>,...],[],[],[],] ,fields': {'src': <torchtext.data.field.Field object at 0x7f3fed8b31d0>, 'tgt': <torchtext.data.field.Field object at 0x7f3fed8b32d0>, 'bow': <torchtext.data.field.Field object at 0x7f3fed8b3110>}},type(train) = <class 'kgdlg.IO.PriorTrainDataset'> ，len(train) = 47?,train = <kgdlg.IO.PriorTrainDataset object at 0x7f5072919890>
    # # example_batch_list中的每一个元素是example的list，每个list对应一个src
    # train_data = train.anoteher_list_example
    # examples = []
    # for iter_train in train_data:
    #     examples = examples + iter_train
    # new_dict = {'examples': examples, 'fields': fields}
    # fields["src"].build_vocab(new_dict, max_size=opt.src_vocab_size)
    # fields["tgt"].build_vocab(new_dict, max_size=opt.tgt_vocab_size)
    # if opt.merge_vocab:
    #     merged_vocab = merge_vocabs(
    #         [fields["src"].vocab, fields["tgt"].vocab],
    #         vocab_size = opt.merged_vocab_size
    #     )
    #     fields["src"].vocab = merged_vocab
    #     fields["tgt"].vocab = merged_vocab
    #     fields["bow"].vocab = merged_vocab



    # 没改之前的版本如下
    # 没改之前的trian=  <class 'kgdlg.IO.PriorTrainDataset'> ,type(traiin)=  <class 'kgdlg.IO.PriorTrainDataset'>,
    # print("原本的train：",type(train),vars(train))
    fields["src"].build_vocab(train, max_size=opt.src_vocab_size)
    fields["tgt"].build_vocab(train, max_size=opt.tgt_vocab_size)
    if opt.merge_vocab:
        merged_vocab = merge_vocabs(
            [fields["src"].vocab, fields["tgt"].vocab],
            vocab_size = opt.merged_vocab_size
        )

        fields["src"].vocab = merged_vocab
        fields["tgt"].vocab = merged_vocab
        # fields["keyword"].vocab = merged_vocab
        fields["bow"].vocab = merged_vocab

def merge_vocabs(vocabs, vocab_size=None):
    """
    Merge individual vocabularies (assumed to be generated from disjoint
    documents) into a larger vocabulary.

    Args:
        vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
        vocab_size: `int` the final vocabulary size. `None` for no limit.
    Return:
        `torchtext.vocab.Vocab`
    """
    merged = sum([vocab.freqs for vocab in vocabs], Counter())
    return torchtext.vocab.Vocab(merged,
                                 specials=[UNK_WORD, PAD_WORD,
                                           BOS_WORD, EOS_WORD],
                                 max_size=vocab_size)


class PriorTrainDataset_forbuild_vocab(torchtext.data.Dataset):
    def sort_key(self, ex):
        """ Sort using length of source sentences. """
        # Default to a balanced sort, prioritizing tgt len match.
        # TODO: make this configurable.
        if hasattr(ex, "tgt"):
            return -len(ex.src), -len(ex.tgt)
        return -len(ex.src)

    def __init__(self, data_path,fields, **kwargs):
        # print("现在进行prior_trainer_dataset之中init之中的fileds:",fields)
        make_example = torchtext.data.Example.fromlist
        examples = []

        with open(data_path, 'r', encoding="utf8", errors="ignore") as data_f:

            read_lines = data_f.readlines()
            all_length = len(read_lines)
            print("读取文件：",data_path,all_length)
            i_rotate = 0
            for line in read_lines:

                data = line.strip().split('\t')
                if len(data) !=2:
                    print("miss: "+line.strip())
                    continue
                src,tgt_all = data[0],eval(data[1])
                # tgt_all =  ['看到 的 时候 震惊 了 。', ,,,] ,type(tgt_all) =  <class 'list'>,  len(tgt_all) = 不一定

                if len(src) == 0 or len(tgt_all) == 0:
                    print("miss: %s,%s"%(src,tgt_all))
                    continue
                i_rotate = i_rotate + 1
                str_print = '\r['+str(i_rotate)+'//'+str(all_length)+']'
                print(str_print,end='')
                # print("得到改src下的所有tgt，进行make _example")
                for tgt in tgt_all:
                    bow = self.get_bow(src, tgt)
                    examples.append(make_example([src,tgt,bow],fields))
            print("读取数据完成")
        super(PriorTrainDataset_forbuild_vocab, self).__init__(examples, fields, **kwargs)

    def get_bow(self, src, tgt):
        bow = src.split(' ')
        bow = bow +tgt.split(' ')
        bow = list(set(bow))
        return ' '.join(bow)

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)

class PriorTrainDataset_fortrain(torchtext.data.Dataset):
    def sort_key(self, ex):
        """ Sort using length of source sentences. """
        # Default to a balanced sort, prioritizing tgt len match.
        # TODO: make this configurable.
        if hasattr(ex, "tgt"):
            return -len(ex.src), -len(ex.tgt)
        return -len(ex.src)

    def __init__(self, data_path,fields, **kwargs):
        # print("现在进行prior_trainer_dataset之中init之中的fileds:",fields)
        make_example = torchtext.data.Example.fromlist
        with open(data_path, 'r', encoding="utf8", errors="ignore") as data_f:
            examples = []
            for line in data_f:
                data = line.strip().split('\t')
                if len(data) !=2:
                    print("miss: "+line.strip())
                    continue
                src,tgt_all = data[0],eval(data[1])
                # tgt_all =  ['看到 的 时候 震惊 了 。', ,,,] ,type(tgt_all) =  <class 'list'>,  len(tgt_all) = 不一定
                # print("当前的tgt有什么问题是什么类型：",tgt_all,type(tgt_all),len(tgt_all))
                # print("数据原版tgt_all的长度为:",len(tgt_all))

                # 定死一个src对应5个输出
                bow = self.get_bow(src, tgt_all)
                if len(src) == 0 or len(tgt_all) < (len(fields)-2):
                    # print("miss: %s,%s"%(src,tgt_all))
                    continue
                list_data = [src]
                for tgt in tgt_all:
                    list_data.append(tgt)

                list_data.append(bow)
                examples.append(make_example(list_data,fields))

        super(PriorTrainDataset_fortrain, self).__init__(examples, fields, **kwargs)

    def get_bow(self, src, tgt_list):
        bow = src.split(' ')
        for tgt in tgt_list:
            bow = bow +tgt.split(' ')
        bow = list(set(bow))
        return ' '.join(bow)

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)

class keyword_PriorTrainDataset_fortrain(torchtext.data.Dataset):
    def sort_key(self, ex):
        """ Sort using length of source sentences. """
        # Default to a balanced sort, prioritizing tgt len match.
        # TODO: make this configurable.
        if hasattr(ex, "tgt"):
            return -len(ex.src), -len(ex.tgt)
        return -len(ex.src)

    def __init__(self, data_path,fields, **kwargs):
        # print("现在进行prior_trainer_dataset之中init之中的fileds:",fields)
        make_example = torchtext.data.Example.fromlist
        with open(data_path, 'r', encoding="utf8", errors="ignore") as data_f:
            examples = []
            for line in data_f:
                data = line.strip().split('\t')
                if len(data) !=4:
                    print("miss: "+line.strip())
                    continue
                src,tgt_all,similarity_key_list,peculiarity_key_list = data[0],eval(data[1])
                # tgt_all =  ['看到 的 时候 震惊 了 。', ,,,] ,type(tgt_all) =  <class 'list'>,  len(tgt_all) = 不一定
                # print("当前的tgt有什么问题是什么类型：",tgt_all,type(tgt_all),len(tgt_all))
                # print("数据原版tgt_all的长度为:",len(tgt_all))

                # 定死一个src对应5个输出
                bow = self.get_bow(src, tgt_all)
                if len(src) == 0 or len(tgt_all) < (len(fields)-2):
                    # print("miss: %s,%s"%(src,tgt_all))
                    continue
                list_data = [src]
                for tgt in tgt_all:
                    list_data.append(tgt)
                for similar_key in similarity_key_list:
                    list_data.append(similar_key)
                for perculiar_key in peculiarity_key_list:
                    list_data.append(perculiar_key)
                    
                list_data.append(bow)
                examples.append(make_example(list_data,fields))

        super(PriorTrainDataset_fortrain, self).__init__(examples, fields, **kwargs)

    def get_bow(self, src, tgt_list):
        bow = src.split(' ')
        for tgt in tgt_list:
            bow = bow +tgt.split(' ')
        bow = list(set(bow))
        return ' '.join(bow)

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)

class negtiveTrainDataset_fortrain(torchtext.data.Dataset):
    def sort_key(self, ex):
        """ Sort using length of source sentences. """
        # Default to a balanced sort, prioritizing tgt len match.
        # TODO: make this configurable.
        if hasattr(ex, "tgt"):
            return -len(ex.src), -len(ex.tgt)
        return -len(ex.src)

    def __init__(self, data_path,fields, **kwargs):
        # print("现在进行prior_trainer_dataset之中init之中的fileds:",fields)
        make_example = torchtext.data.Example.fromlist
        with open(data_path, 'r', encoding="utf8", errors="ignore") as data_f:
            examples = []

            readlines = data_f.readlines()
            first_line = readlines[0]
            last_ansset = eval(first_line.strip().split('\t')[1])

            rotate_lines = readlines[1:]

            for line in rotate_lines:
                data = line.strip().split('\t')
                if len(data) !=2:
                    print("miss: "+line.strip())
                    continue
                src,tgt_all = data[0],eval(data[1])
                # tgt_all =  ['看到 的 时候 震惊 了 。', ,,,] ,type(tgt_all) =  <class 'list'>,  len(tgt_all) = 不一定
                # print("当前的tgt有什么问题是什么类型：",tgt_all,type(tgt_all),len(tgt_all))
                # print("数据原版tgt_all的长度为:",len(tgt_all))

                # 定死一个src对应5个输出
                bow = self.get_bow(src, tgt_all)
                if len(src) == 0 or len(tgt_all) < (len(fields)-2):
                    # print("miss: %s,%s"%(src,tgt_all))
                    continue
                list_data = [src]
                for tgt in tgt_all:
                    list_data.append(tgt)
                for neg_ans in last_ansset:
                    list_data.append(neg_ans)
                list_data.append(bow)
                examples.append(make_example(list_data,fields))
                last_ansset = tgt_all

        super(negtiveTrainDataset_fortrain, self).__init__(examples, fields, **kwargs)

    def get_bow(self, src, tgt_list):
        bow = src.split(' ')
        for tgt in tgt_list:
            bow = bow +tgt.split(' ')
        bow = list(set(bow))
        return ' '.join(bow)

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)


class InferDataset(torchtext.data.Dataset):
    
    
    def __init__(self, data_path, fields,  **kwargs):

        make_example = torchtext.data.Example.fromlist
        with open(data_path, 'r', encoding="utf8",errors="ignore") as src_f:
            examples = []
            for src in src_f:
                src = src.strip().split(' ')
                src = ' '.join(src)
                examples.append(make_example([src,],fields))

        super(InferDataset, self).__init__(examples, fields, **kwargs)    
    
    def sort_key(self, ex):
        """ Sort using length of source sentences. """
        # Default to a balanced sort, prioritizing tgt len match.
        # TODO: make this configurable.
        if hasattr(ex, "tgt"):
            return -len(ex.src), -len(ex.tgt)
        return -len(ex.src)


    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)


class TgtInferDataset(torchtext.data.Dataset):

    def __init__(self, data_path, fields, **kwargs):

        make_example = torchtext.data.Example.fromlist
        with open(data_path, 'r', encoding="utf8", errors="ignore") as data_f:
            examples = []
            for line in data_f:
                data = line.strip().split('\t')
                if len(data) !=2:
                    print("TgtInferDataset miss: "+line.strip())
                    continue
                src,tgt = data[0],data[1]
                examples.append(make_example([src,tgt, ], fields))


        super(TgtInferDataset, self).__init__(examples, fields, **kwargs)

    def sort_key(self, ex):
        """ Sort using length of source sentences. """
        # Default to a balanced sort, prioritizing tgt len match.
        # TODO: make this configurable.
        if hasattr(ex, "tgt"):
            return -len(ex.src), -len(ex.tgt)
        return -len(ex.src)

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)
class OrderedIterator(torchtext.data.Iterator):
    # print("orderiterator类之中")
    def create_batches(self):
        # print("当前进入到create_batch之中")
        if self.train:
            def pool(data, random_shuffler):
                # print("到达pool：")
                # data = [[<torchtext.data.example.Example object at 0x7f5c10051cd0>, .],[<>,<>...]....]  type(data) = <class 'list'> ，len(data) = 1001???
                # data里面的第一个元素：ele0 =  [<torchtext.data.example.Example object at 0x7eff3beeaa50>,<>,...]  type(ele_0)= <class 'list'>  ,len(ele_0)=13
                # 其中每一个元素确实就是他的下文的list，神奇
                for p in torchtext.data.batch(data, self.batch_size * 100):
                    # 是不是说明，要构成batch没有问题
                    # print("到达p",type(p),len(p))
                    if self.sort:
                        sorted_p = sorted(p, key=self.sort_key)
                        p_batch = torchtext.data.batch(
                            sorted_p,
                            self.batch_size, self.batch_size_fn)
                        # print("到达A")
                    else:
                        p_batch = torchtext.data.batch(
                            p,
                            self.batch_size, self.batch_size_fn)
                        # print("t到达p_batch",type(p_batch))
                    for b in random_shuffler(list(p_batch)):
                        # print("到达b",type(b))
                        yield b
                        # print("over")
            self.batches = pool(self.data(), self.random_shuffler)
        else:
            # print("或者在else里面")
            self.batches = []
            for b in torchtext.data.batch(self.data(), self.batch_size,
                                          self.batch_size_fn):
                self.batches.append(sorted(b, key=self.sort_key))

