import torch
from torch.utils.data import Dataset
from torchvision import transforms
import os
from PIL import Image
import numpy as np
import math


class Word_data(Dataset):
    def __init__(self, csv_path, is_train=True, max_length=110, idx=None,need_head_tail=False):
        line_list,class_weights = self.get_csv_data(csv_path, idx)
        self.class_weights = class_weights
        self.rpt_id_list = []
        self.data_list = []
        self.label_list = []
        for line in line_list:

            if is_train:
                report_id, words, label, = line
                word_vec_array = self.get_word_vec_array(words, max_length,need_head_tail)
                self.rpt_id_list.append(report_id)
                self.data_list.append(word_vec_array)
                self.label_list.append(label)
            else:
                report_id, words = line
                word_vec_array = self.get_word_vec_array(words, max_length,need_head_tail)
                self.rpt_id_list.append(report_id)
                self.data_list.append(word_vec_array)
        self.is_train = is_train
        self.rpt_id_list = np.array(self.rpt_id_list)
        self.data_list = np.array(self.data_list)
        self.label_list = np.array(self.label_list)
        self.max_length = max_length
        if isinstance(idx, np.ndarray):
            self.rpt_id_list = self.rpt_id_list[idx]
            self.data_list = self.data_list[idx]
            if is_train:
                self.label_list = self.label_list[idx]


    def __len__(self):
        return len(self.data_list)

    def __getitem__(self, index):
        report_id = self.rpt_id_list[index]
        words_list = self.data_list[index]
        if self.is_train:
            # a = self.data_list[index[1]]
            label = self.label_list[index]
            return report_id, words_list, label
        else:
            return report_id, words_list

    def get_csv_data(self, csv_path, idx):
        data_list = []

        label_count = {}
        with open(csv_path, 'r') as file:
            for line in file:
                groups = line.strip().split('|,|')
                report_id = groups[0]
                description = groups[1]
                words = description.split(' ')
                if len(groups) >= 3:
                    label = groups[2]
                    if isinstance(idx, np.ndarray):
                        areas = label.split(' ')
                        label = np.zeros(17, dtype=np.int)

                        for area_id in areas:
                            if area_id != '':
                                label[int(area_id)] = 1
                                label_count[area_id] = label_count.get(area_id, 0) + 1
                    data_list.append((report_id, words, label))
                else:
                    data_list.append((report_id, words))
        label_list = sorted(label_count.items(), key=lambda x: x[0])
        print(label_list)
        if isinstance(idx, np.ndarray):
            label_list = np.array(label_list)
            label_count = label_list[:,1].astype(np.int32)
            class_weights = 1/label_count
            return data_list,class_weights
        return data_list,None

    def get_word_vec_array(self, words, max_length,need_head_tail=False):
        # words_list = words.split(' ')
        words_list = []
        for word in words:
            if word != '':
                words_list.append(int(word))
        n = len(words_list)
        if n < max_length:
            pad_num = max_length - n
            words_list += [858 for _ in range(pad_num)]
        if n > max_length:
            words_list = words_list[:max_length]
        if need_head_tail:
            words_list.insert(0,859)
            words_list.append(860)
        else:
            if len(words_list) != max_length:
                print(len(words_list))
        return torch.LongTensor(words_list)


if __name__ == "__main__":
    data = TF_IDF_data(r'../data/track1_round1_train_20210222.csv', is_train=True)
    print(data.__getitem__(1))
