import json
import torch
import random
from tqdm import tqdm
from configs import Config
import matplotlib.pyplot as plt
from nltk import word_tokenize


class Amazon_DataLoader:
    def __init__(self, config, mode='all'):
        self.batch_size = config.batch_size
        self.data_file = config.processed_data
        self.data = []
        with open(self.data_file, 'r') as f:
            for line in f:
                one_line_record = json.loads(line)
                self.data.append(one_line_record)
        self.mode = mode
        self.data = self.data[
            config.split_rate[mode][0] * len(self.data):
            config.split_rate[mode][1] * len(self.data)
        ]
        self.data_len = len(self.data)
        self.batch_len = len(self.data) // self.batch_size
        self.index = 0
        self.input_dict = {key: None for key in config.input_dict}
        self.word2index = json.load(open(config.gensim_word2index, 'r'))
        self.shuffle = config.shuffle
        self.reshuffle = config.shuffle and config.epoch_reshuffle
        if self.shuffle:
            random.shuffle(self.data)

    def __len__(self):
        return self.batch_len

    def __iter__(self):
        return self

    def __next__(self):
        if self.index == self.batch_len:
            self.index = 0
            if self.reshuffle:
                random.shuffle(self.data)
            raise StopIteration
        batch_data = self.data[self.index * self.batch_size: (self.index + 1) * self.batch_size]
        self.index += 1
        for key in self.input_dict:
            self.input_dict[key] = None
        sentence_indexs, user_ids, item_ids, rating_nums, target_indexs = [], [], [], [], []
        for data in batch_data:
            sentence_indexs.append(data['reviewText'])
            target_indexs.append(data['reviewText'][1:] + [self.word2index.get('<PAD>')])
            user_ids.append(data['reviewerID'])
            item_ids.append(data['asin'])
            rating_nums.append(data['overall'])
        self.input_dict['sentence_index'] = torch.Tensor(sentence_indexs).long().detach()
        self.input_dict['target_index'] = torch.Tensor(target_indexs).long().detach()
        self.input_dict['user_id'] = torch.Tensor(user_ids).long().detach()
        self.input_dict['item_id'] = torch.Tensor(item_ids).long().detach()
        self.input_dict['rating_num'] = torch.Tensor(rating_nums).long().detach()
        return self.input_dict

    def __getitem__(self, item):
        return self.data[item]


def filter_useful_data(config):

    def useful_data_check(data_map):
        must_keys = ('reviewText', 'overall', 'reviewerID', 'asin')
        max_sentence_length = config.max_sequence_len
        useful_map = {}
        for key in must_keys:
            if key not in data_map:
                return False
            else:
                useful_map[key] = data_map[key]
        useful_map['reviewText'] = ' '.join(word_tokenize(useful_map['reviewText']))
        if '</a>' in useful_map['reviewText']:
            return False
        elif len(useful_map['reviewText'].split()) > max_sentence_length:
            return False
        return useful_map

    useful_data = []
    with open(config.origin_data, 'r') as f:
        for line in f:
            line_data = json.loads(line)
            used_data = useful_data_check(line_data)
            if used_data is False:
                continue
            useful_data.append(used_data)
    with open(config.useful_data, 'w') as f:
        for line in tqdm(useful_data):
            f.write(json.dumps(line) + '\n')
    print('Write useful data ', len(useful_data), ' to file ', config.useful_data)


def Word2Vec_transformer(config):
    origin_sentences = []
    max_sentence_len = 0
    with open(config.useful_data, 'r') as f:
        for line in f:
            line_data = json.loads(line)
            parser_sentence = line_data['reviewText'].split()
            max_sentence_len = max(max_sentence_len, len(parser_sentence))
            origin_sentences.append(parser_sentence)
    print('Max Sentence Length is ', max_sentence_len)
    # --------------------------Add SOS and EOS and PAD------------------------------------
    for i in tqdm(range(len(origin_sentences))):
        origin_sentences[i] = ['<SOS>'] + origin_sentences[i] + ['<EOS>'] \
                              + ['<PAD>'] * (max_sentence_len - len(origin_sentences[i]))
    print('Add <Start Of Sentence> and <End Of Sentence> and <PADding> tag, max sequence len ', max_sentence_len + 2)
    # --------------------------write to txt file------------------------------------------
    with open(config.word2vec_data, 'w') as f:
        for line in tqdm(origin_sentences):
            f.write(' '.join(line) + '\n')
    print('Write word2vec train data to ', config.word2vec_data)


def count_sentence_len(config):
    sentence_len_to_count = {}
    count_step = config.sequence_len_step
    with open(config.useful_data, 'r') as f:
        for line in f:
            line_data = json.loads(line)
            sentence_len = line_data['reviewText'].count(' ')
            if sentence_len // count_step not in sentence_len_to_count:
                sentence_len_to_count[sentence_len // count_step] = 0
            sentence_len_to_count[sentence_len // count_step] += 1
    output_list = sorted([(x, y) for x, y in sentence_len_to_count.items()])
    print('Output list\'s first 50 item\n',
          [(x[0] * count_step, (x[0] + 1) * count_step, x[1]) for x in output_list[:50]])
    plt.figure()
    plt.plot([x[0] for x in output_list], [x[1] for x in output_list])
    plt.savefig(config.sentence_len_count_img)
    plt.show()


def Ordered_transformer(config):
    word2index = json.load(open(config.gensim_word2index, 'r'))
    max_sentence_len = config.max_sequence_len

    def Process_Sentence(sentence):
        sentence_split = sentence.split()
        sentence_pad = ['<SOS>'] + sentence_split + ['<EOS>'] + ['<PAD>'] * (max_sentence_len-len(sentence_split))
        target_sentence = [word2index.get(word, word2index['<PAD>']) for word in sentence_pad]
        return target_sentence

    user2index, item2index = {'<UNK>': 0}, {'<UNK>': 0}
    user_count, item_count, vocab_num = 1, 1, len(word2index)
    with open(config.useful_data, 'r') as f:
        with open(config.processed_data, 'w') as fw:
            for line in f:
                line_data = json.loads(line)
                # convert the string type to int type.
                if line_data['reviewerID'] not in user2index:
                    user2index[line_data['reviewerID']] = user_count
                    user_count += 1
                if line_data['asin'] not in item2index:
                    item2index[line_data['asin']] = item_count
                    item_count += 1
                # write to new data.
                line_data['asin'] = item2index[line_data['asin']]
                line_data['reviewerID'] = user2index[line_data['reviewerID']]
                line_data['overall'] = int(line_data['overall'])
                line_data['reviewText'] = Process_Sentence(line_data['reviewText'])
                fw.write(json.dumps(line_data) + '\n')
    print('Write new data over, user count {}, item count {}, vocab count {}, new file {}'.format(
        user_count, item_count, vocab_num, config.processed_data
    ))
    return user_count, item_count, vocab_num


if __name__ == '__main__':
    # this is one example test code, the true code is in main.py
    config = Config()
    config.max_sequence_len = 500000
    filter_useful_data(config)
    count_sentence_len(config)  # conclusion we use sentence whose length is less than 64
    # Word2Vec_transformer(config)
