from torchtext.legacy.data import Dataset, BucketIterator, Iterator, Example
from torchtext.vocab import Vectors
import json
import os
from tqdm import trange
import argparse
from torch.utils.data import DataLoader
from jieba import lcut


class TorchtextSentenceDataset(Dataset):

    def __init__(self, data_path_true, data_path_false, sentence_field, label_field, min_length=4):
        self.sentence_field = sentence_field
        self.label_field = label_field

        fields = [('title', self.sentence_field), ('content', self.sentence_field), ('label', self.label_field)]
        examples = []
        txt_lst = []

        with open(data_path_true, 'r', encoding='utf-8') as f:
            txt_lst.extend(f.readlines())
        with open(data_path_false, 'r', encoding='utf-8') as f:
            txt_lst.extend(f.readlines())
        self.title_lst = []
        self.content_list = []
        self.label_list = []
        for st in txt_lst:
            temp = st.strip('\n').split(',')
            self.label_list.append(temp[0])
            self.title_lst.append('BOS' + temp[1] + 'EOS')
            self.content_list.append('BOS' + temp[2] + 'EOS')


        for index in trange(len(self.label_list)):
            examples.append(Example.fromlist([self.title_lst[index], self.content_list[index], self.label_list[index]], fields))


        for index in range(len(examples)):
            if len(examples[index].title) < min_length:
                examples[index].title.extend('<pad>' for i in range(4-len(examples[index].title)))
            if len(examples[index].content) < min_length:
                examples[index].content.extend('<pad>' for i in range(4-len(examples[index].content)))

        # print(examples[65].sentence)

        super().__init__(examples, fields)


class ClassificationDataset(Dataset):

    def __init__(self, data_path_true, data_path_false, sentence_field, label_field, hotword=None):
        self.sentence_field = sentence_field
        self.label_field = label_field


        examples = []
        txt_lst = []

        with open(data_path_true, 'r', encoding='utf-8') as f:
            txt_lst.extend(f.readlines())
        with open(data_path_false, 'r', encoding='utf-8') as f:
            txt_lst.extend(f.readlines())

        fields = [('content', self.sentence_field), ('title', self.sentence_field), ('label', self.label_field)]
        self.title_lst = []
        self.text_lst = []
        self.label_list = []
        for st in txt_lst:
            temp = st.strip('\n').split(',')
            self.label_list.append(temp[0])
            title = temp[1]
            if hotword is not None:
                for span in hotword:    # [[str]]，已经分词
                    tag = True
                    for word in span:
                        if word not in title:
                            tag = False
                    if tag:
                        title += '、'.join(span) + '、'
            self.title_lst.append('BOS' + title + 'EOS')
            self.text_lst.append( 'BOS' + temp[2] + 'EOS')
        for index in trange(len(self.label_list)):
            examples.append(Example.fromlist([self.text_lst[index], self.title_lst[index], self.label_list[index]], fields))


        super().__init__(examples, fields)

def get_iterator(opt, root_path, sentence_field, label_field, vectors_path, mode='cls'):
    train_true_path = root_path + '/train_true.txt'
    train_false_path = root_path + '/train_false.txt'
    test_true_path = root_path + '/test_true.txt'
    test_false_path = root_path + '/test_false.txt'
    hotword = None
    if opt.with_dict:
        hotword = []
        with open('./data/ClickBait_vob.txt', 'r', encoding='utf-8') as f:
            for word in f:
                word_lst = word.strip('\n').split(' ')
                hotword.append(word_lst)

    if mode == 'cls':
        train_dataset = ClassificationDataset(train_true_path, train_false_path, sentence_field, label_field, hotword=hotword)
        test_dataset = ClassificationDataset(test_true_path, test_false_path, sentence_field, label_field, hotword=hotword)
    else:
        train_dataset = TorchtextSentenceDataset(train_true_path, train_false_path, sentence_field, label_field)
        test_dataset = TorchtextSentenceDataset(test_true_path, test_false_path, sentence_field, label_field)
    # cache = 'data/vector_cache'
    cache = vectors_path
    if not os.path.exists(cache):
        os.mkdir(cache)
    if opt.vector_mode == 'char':
        vectors = Vectors(name='F:/安装包/temp/char100.txt', cache=cache)
    else:
        vectors = Vectors(name='F:/安装包/temp/sgns.sogou100.txt', cache=cache)
    sentence_field.build_vocab(train_dataset, test_dataset, min_freq=opt.min_freq, vectors=vectors)

    train_iterator = BucketIterator(train_dataset, batch_size=opt.batch_size, train=True,
                                    device='cuda' if opt.cuda else 'cpu',shuffle=True,
                                    sort_within_batch=False)
    # train_iterator = Iterator(train_dataset, batch_size=opt.batch_size,
    #                                 device='cuda' if opt.cuda else 'cpu', sort_key=lambda x: len(
    #                                     x.sentence),
    #                                 sort_within_batch=True, shuffle=True)
    test_iterator = Iterator(test_dataset, batch_size=opt.batch_size,
                             train=False, sort=False,
                             sort_within_batch=False, shuffle=False,
                             device='cuda' if opt.cuda else 'cpu')

    # for index, data in enumerate(test_iterator):
    #     if index == 65:
    #         a = 0

    return train_iterator, test_iterator, sentence_field.vocab.vectors

# get_iterator('data/train.json', 8, '0', is_test=False)
