from torch.utils.data import Dataset, random_split

import os
import re
import json
import pandas as pd

from PIL import Image
import torchvision.transforms as transforms


from model.TextEncoder import TextEncoder
from model.ImageEncoder import ImageEncoder

label_n24news = { 'Health': 0,
                  'Books': 1,
                  'Science': 2,
                  'Art & Design': 3,
                  'Television': 4,
                  'Style': 5,
                  'Travel': 6,
                  'Media': 7,
                  'Movies': 8,
                  'Food': 9,
                  'Dance': 10,
                  'Well': 11,
                  'Real Estate': 12,
                  'Fashion & Style': 13,
                  'Economy': 14,
                  'Technology': 15,
                  'Sports': 16,
                  'Your Money': 17,
                  'Theater': 18,
                  'Education': 19,
                  'Opinion': 20,
                  'Automobiles': 21,
                  'Music': 22,
                  'Global Business': 23,
                  }

def format_text(content):
    for c in '<>/\\+=-_[]{}\'\";:.,()*&^%$#@!~`':
        content = content.replace(c, ' ')
    content = re.sub("\s\s+" , ' ', content)
    content = re.sub('[^a-zA-Z]', ' ', content)
    content = re.sub(r"\s+[a-zA-Z]\s+", ' ', content)
    return content.lower().replace("\n", " ")

def get_transforms():
    return transforms.Compose(
        [
            transforms.RandomHorizontalFlip(),
        ]
    )

class MMDataset(Dataset):
    def __init__(self, args, labels_filepath):
        self.args = args
        self.labels_filepath = labels_filepath

        if args.dataset in ['Food101']:
            self.df = pd.read_csv(os.path.join(args.data_dir, labels_filepath),
                                  dtype={'id': str, 'text': str, 'annotation': str, 'label': int})
        elif args.dataset in ['N24News']:
            self.df = json.load(open(os.path.join(args.data_dir, labels_filepath), 'r', encoding='utf8'))

        self.text_tokenizer = TextEncoder(pretrained_dir=args.pretrained_dir, text_encoder=args.text_encoder).get_tokenizer()
        self.image_tokenizer = ImageEncoder(pretrained_dir=args.pretrained_dir, image_encoder=args.image_encoder).get_tokenizer()

        # self.img_width = 224
        # self.img_height = 224
        # self.depth = 3
        self.max_length = args.max_length  # Setup according to the text
        self.transforms = get_transforms()

    def __len__(self):
        # return len(self.df)
        return len(self.df)

    def __getitem__(self, index):
        if self.args.dataset in ['Food101']:
            id, text, annotation, label = self.df.loc[index]
            img_path = self.args.data_dir + '/images/' + self.labels[:-4] + '/' + annotation + '/' + id
            text_path = self.args.data_dir + '/texts_txt/' + annotation + '/' + id.replace(".jpg", ".txt")
            text = format_text(open(text_path).read())
        elif self.args.dataset in ['N24News']:
            if self.args.text_type in ['headline']:
                text = self.df[index]['headline']
            elif self.args.text_type in ['caption']:
                text = self.df[index]['caption']
            elif self.args.text_type in ['abstract']:
                text = self.df[index]['abstract']
            else:
                text = self.df[index]['article']
                if self.args.text_encoder not in ['roberta_base']:
                    text = format_text(text)
            img_path = self.args.data_dir + '/imgs/' + self.df[index]['image_id'] + '.jpg'
            label = label_n24news[self.df[index]['section']]

        # text -> text_token
        text_tokens = self.text_tokenizer(text, max_length=self.max_length, add_special_tokens=True, truncation=True,
                                     padding='max_length', return_tensors="pt")
        image = Image.open(os.path.join(img_path)).convert("RGB")
        image = self.transforms(image)
        img_inputs = self.image_tokenizer(images=image, return_tensors="pt").pixel_values

        if 'roberta' in self.args.text_encoder:
            return img_inputs, text_tokens['input_ids'], 0, text_tokens['attention_mask'], label
        else:
            return img_inputs, text_tokens['input_ids'], text_tokens['token_type_ids'], text_tokens[
                'attention_mask'], label

def split_datasets(args):
    if args.dataset in ['N24News']:
        train_set = MMDataset(args, 'news/nytimes_train.json')
        valid_set = MMDataset(args, 'news/nytimes_dev.json')
        test_set = MMDataset(args, 'news/nytimes_test.json')

    train_subsets = random_split(train_set, get_split_sizes(len(train_set), args.num_clients))
    valid_subsets = random_split(valid_set, get_split_sizes(len(valid_set), args.num_clients))
    
    return train_subsets, valid_subsets, test_set


def get_split_sizes(dataset_length, num_clients):
    split_size = dataset_length // num_clients
    remainder = dataset_length % num_clients

    split_sizes = [split_size] * num_clients
    for i in range(remainder):
        split_sizes[i] += 1

    return split_sizes

def get_data_ratio(args, client_id_list):
    if args.dataset in ['N24News']:
        train_set_length = len(json.load(open(os.path.join(args.data_dir, 'news/nytimes_train.json'), 'r', encoding='utf8')))

        split_sizes = get_split_sizes(train_set_length, args.num_clients)

        client_sample_num = sum([split_sizes[i] for i in client_id_list ])

        return [split_sizes[i]/client_sample_num for i in range(args.num_clients)]

    