# -*- coding: utf-8 -*-
import os
import math
import codecs
import random
import numpy as np
from glob import glob
from PIL import Image

from keras.utils import np_utils, Sequence
from sklearn.model_selection import train_test_split

from models.resnet50 import preprocess_input
from config import Config


class BaseSequence(Sequence):
    """
    基础的数据流生成器，每次迭代返回一个batch
    BaseSequence可直接用于fit_generator的generator参数
    fit_generator会将BaseSequence再次封装为一个多进程的数据流生成器
    而且能保证在多进程下的一个epoch中不会重复取相同的样本
    """
    def __init__(self, img_paths, labels, batch_size, img_size):
        assert len(img_paths) == len(labels), "len(img_paths) must equal to len(lables)"
        assert img_size[0] == img_size[1], "img_size[0] must equal to img_size[1]"
        self.x_y = np.hstack((np.array(img_paths).reshape(len(img_paths), 1), np.array(labels)))
        self.batch_size = batch_size
        self.img_size = img_size

    def __len__(self):
        return math.ceil(len(self.x_y) / self.batch_size)

    @staticmethod
    def center_img(img, size=None, fill_value=255):
        """
        center img in a square background
        """
        h, w = img.shape[:2]
        if size is None:
            size = max(h, w)
        shape = (size, size) + img.shape[2:]
        background = np.full(shape, fill_value, np.uint8)
        center_x = (size - w) // 2
        center_y = (size - h) // 2
        background[center_y:center_y + h, center_x:center_x + w] = img
        return background

    def preprocess_img(self, img_path):
        """
        image preprocessing
        you can add your special preprocess method here
        """
        img = Image.open(img_path)
        #resize_scale = self.img_size[0] / max(img.size[:2])
        #img = img.resize((int(img.size[0] * resize_scale), int(img.size[1] * resize_scale)))
        img = img.resize((256,256))
        img = img.convert('RGB')
        img = np.array(img)
        img = img[16:16+224,16:16+224]
        img = preprocess_input(img)
        #img = img[:, :, ::-1]
        #img = self.center_img(img, self.img_size[0])
        return img

    def __getitem__(self, idx):
        batch_x = self.x_y[idx * self.batch_size: (idx + 1) * self.batch_size, 0]
        batch_y = self.x_y[idx * self.batch_size: (idx + 1) * self.batch_size, 1:]
        batch_x = np.array([self.preprocess_img(img_path) for img_path in batch_x])
        batch_y = np.array(batch_y).astype(np.float32)*(1-0.05)+0.05/40
        return batch_x,batch_y

    def on_epoch_end(self):
        """Method called at the end of every epoch.
        """
        np.random.shuffle(self.x_y)


def data_flow(train_data_dir, batch_size, num_classes, input_size):  # need modify
    # 检查数据目录是否存在
    if not os.path.exists(train_data_dir):
        raise Exception("数据目录不存在: {}".format(train_data_dir))
    
    # 查找标签文件
    label_files = glob(os.path.join(train_data_dir, '*.txt'))
    print("找到 {} 个标签文件".format(len(label_files)))
    
    if len(label_files) == 0:
        raise Exception("在目录 {} 中没有找到任何标签文件 (*.txt)".format(train_data_dir))
    
    random.shuffle(label_files)
    img_paths = []
    labels = []
    
    for index, file_path in enumerate(label_files):
        try:
            with codecs.open(file_path, 'r', 'utf-8') as f:
                line = f.readline()
            line_split = line.strip().split(', ')
            if len(line_split) != 2:
                print('警告: {} 包含错误的标签格式'.format(os.path.basename(file_path)))
                continue
            img_name = line_split[0]
            label = int(line_split[1])
            img_path = os.path.join(train_data_dir, img_name)
            
            # 检查图片文件是否存在
            if not os.path.exists(img_path):
                print('警告: 图片文件不存在: {}'.format(img_path))
                continue
                
            img_paths.append(img_path)
            labels.append(label)
        except Exception as e:
            print('警告: 处理文件 {} 时出错: {}'.format(file_path, e))
            continue
    
    if len(img_paths) == 0:
        raise Exception("没有找到有效的图片和标签对")
    
    print("成功加载 {} 个有效样本".format(len(img_paths)))
    
    labels = np_utils.to_categorical(labels, num_classes)
    train_img_paths, validation_img_paths, train_labels, validation_labels = \
        train_test_split(img_paths, labels, test_size=0.1, random_state=0)
    print('总样本数: %d, 训练样本: %d, 验证样本: %d' % (len(img_paths), len(train_img_paths), len(validation_img_paths)))

    train_sequence = BaseSequence(train_img_paths, train_labels, batch_size, [input_size, input_size])
    validation_sequence = BaseSequence(validation_img_paths, validation_labels, batch_size, [input_size, input_size])
    # # 构造多进程的数据流生成器
    # train_enqueuer = OrderedEnqueuer(train_sequence, use_multiprocessing=True, shuffle=True)
    # validation_enqueuer = OrderedEnqueuer(validation_sequence, use_multiprocessing=True, shuffle=True)
    #
    # # 启动数据生成器
    # n_cpu = multiprocessing.cpu_count()
    # train_enqueuer.start(workers=int(n_cpu * 0.7), max_queue_size=10)
    # validation_enqueuer.start(workers=1, max_queue_size=10)
    # train_data_generator = train_enqueuer.get()
    # validation_data_generator = validation_enqueuer.get()

    # return train_enqueuer, validation_enqueuer, train_data_generator, validation_data_generator
    return train_sequence, validation_sequence


if __name__ == '__main__':
    # 创建debug目录
    os.makedirs('./debug', exist_ok=True)
    
    # 使用配置文件中的参数
    train_sequence, validation_sequence = data_flow(
        train_data_dir=Config.TRAIN_DATA_DIR,
        batch_size=Config.BATCH_SIZE,
        num_classes=Config.NUM_CLASSES,
        input_size=Config.INPUT_SIZE
    )
    
    # 测试数据生成器
    batch_data, batch_label = train_sequence.__getitem__(5)
    
    # 获取类别名称
    label_names = list(Config.GARBAGE_CATEGORIES.values())
    
    for index, data in enumerate(batch_data):
        # 找到对应的类别索引
        label_index = np.argmax(batch_label[index])
        category_name = label_names[label_index] if label_index < len(label_names) else f'class_{label_index}'
        img = Image.fromarray(data[:, :, ::-1])
        img.save('./debug/%d_%s.jpg' % (index, category_name.replace('/', '_')))
    
    train_sequence.on_epoch_end()
    batch_data, batch_label = train_sequence.__getitem__(5)
    for index, data in enumerate(batch_data):
        label_index = np.argmax(batch_label[index])
        category_name = label_names[label_index] if label_index < len(label_names) else f'class_{label_index}'
        img = Image.fromarray(data[:, :, ::-1])
        img.save('./debug/%d_2_%s.jpg' % (index, category_name.replace('/', '_')))
    
    train_sequence.on_epoch_end()
    batch_data, batch_label = train_sequence.__getitem__(5)
    for index, data in enumerate(batch_data):
        label_index = np.argmax(batch_label[index])
        category_name = label_names[label_index] if label_index < len(label_names) else f'class_{label_index}'
        img = Image.fromarray(data[:, :, ::-1])
        img.save('./debug/%d_3_%s.jpg' % (index, category_name.replace('/', '_')))
    
    print('数据生成器测试完成！')
