import os
from collections import defaultdict
from PIL import Image
from . import DATASET
from lib.base import BaseDataset
import torch
import sys
import numpy as np
from torchvision.transforms import transforms


@DATASET.register('cub200_part')
class CUBPartDataset(BaseDataset):

    def __init__(self, cfg, type):
        # {'path': {1:[(),()], 2:[(),()], 3:[(),()]}}
        self.data_path = cfg.DATASET.DATAPATH
        self.path2parts = self._get_path2parts()

        # self.select_parts = [2, 15, 6, 5, 10, 4, 14]
        self.head_parts = [2, 5, 6, 7, 10, 11, 15]
        self.body_parts = [1, 3, 4, 8, 9, 12, 13, 14]
        self.origin_img_resize = (224, 224)
        # self.part_size = (84, 84)

        super().__init__(cfg, type)

    def _get_split_categories(self) -> dict:
        categories = list(self.category2path)
        category_split = defaultdict(list)
        for i, cate in enumerate(categories):
            if i % 2 == 0:
                category_split['TRAIN'].append(cate)
            if i % 4 == 1:
                category_split['VALIDATE'].append(cate)
            if i % 4 == 3:
                category_split['TEST'].append(cate)
        return category_split

    def _get_category2path(self):
        labels = os.listdir(os.path.join(self.data_path, 'images'))

        category2path = defaultdict(list)
        id2category = {i: labels[i] for i in range(len(labels))}

        with open(os.path.join(self.data_path, 'image_class_labels.txt')) as f1:
            with open(os.path.join(self.data_path, 'images.txt')) as f2:
                for line1, line2 in zip(f1, f2):
                    _, id = line1.split()
                    _, path = line2.split()
                    category2path[id2category[int(id) - 1]].append(
                        os.path.join(self.data_path, 'images', path))
        return category2path

    ############# 以下的代码都是expirement1另加的, 不需要的话直接注释即可 ##############
    def _get_path2parts(self):

        path2parts = defaultdict(dict)

        id2path = {}
        with open(os.path.join(self.data_path, 'images.txt')) as f1:
            for each in f1:
                no, path = each.split()
                id2path[no] = os.path.join(self.data_path, 'images', path)

        with open(os.path.join(self.data_path, 'parts', 'part_locs.txt')) as f2:
            for each in f2:
                no, part, x, y, v = each.split()
                path2parts[id2path[no]][int(part)] = (int(float(x)), int(float(y)))

        return path2parts

    def _read_image(self, id_path):

        id = id_path[0]
        path = id_path[1]

        origin_image = Image.open(path).convert('RGB')
        # W, H = origin_image.size
        # origin_image = resize(origin_image)
        head_min_x, head_max_x, head_min_y, head_max_y \
            = float('inf'), float('-inf'), float('inf'), float('-inf')
        body_min_x, body_max_x, body_min_y, body_max_y \
            = float('inf'), float('-inf'), float('inf'), float('-inf')
        for part, (x, y) in self.path2parts[path].items():
            if part in self.head_parts:
                head_min_x = min(x, head_min_x)
                head_max_x = max(x, head_max_x)
                head_min_y = min(y, head_min_y)
                head_max_y = max(y, head_max_y)
            elif part in self.body_parts:
                body_min_x = min(x, body_min_x)
                body_max_x = max(x, body_max_x)
                body_min_y = min(y, body_min_y)
                body_max_y = max(y, body_max_y)

        head_image = origin_image.crop((head_min_x - 30, head_min_y - 30, head_max_x + 30, head_max_y + 30))
        body_image = origin_image.crop((body_min_x - 30, body_min_y - 30, body_max_x + 30, body_max_y + 30))

        if self.type in ['VALIDATE', 'TEST']:
            origin_image = self.transforms(origin_image)
            head_image = self.transforms(head_image)
            body_image = self.transforms(body_image)

            return (id, torch.stack([origin_image, head_image, body_image]))

        else:
            return (id, [origin_image, head_image, body_image])
