# Import necessary modules
from abc import ABC, abstractmethod
from dataclasses import dataclass
import glob
import json
import logging
import os.path
import shutil

# Import custom exception for dataset import errors
from myapp.biz.dataset.exception import DatasetImportError
# Import dataset label type enumeration
from myapp.const.dataset import EnumDatasetLabelType
# Import unzip utility
from myapp.utils.zip_stream import unzip


# Get a logger for this module
log = logging.getLogger(__name__)


# Define a data class to hold the result of an import operation
@dataclass
class ImportResult:
    num: int  # 数据条数 (Number of data entries)
    labeled: bool  # 是否标注 (Whether the data is labeled)


# Define an abstract base class for importers
class Importer(ABC):
    # Initialize the importer with an empty status dictionary
    def __init__(self):
        self.status = {}

    # Define an abstract method for importing data
    @abstractmethod
    def import_to(self, files_path: list[str], dest_path: str) -> ImportResult:
        pass

    # Define a class method to get the data type
    @classmethod
    def get_data_type(cls):
        return cls.data_type

    # Define a class method to get the label type
    @classmethod
    def get_label_type(cls):
        return cls.label_type

    # Define a property to get the progress of the import
    @property
    def progress(self):
        return self.status.get('progress', 0)


# Define an importer for 'other' data types
class OtherImporter(Importer):
    data_type = 'other'
    label_type = 'other'

    # Method to import 'other' data types
    def import_to(self, files_path: list[str], dest_path: str):
        # 确保路径中的目录存在，如果不存在，则创建它们
        # Ensure the destination directory exists, creating it if necessary
        os.makedirs(os.path.dirname(dest_path), exist_ok=True)
        # todo 处理文件重名
        # TODO: Handle file name conflicts
        for file_path in files_path:
            filename = os.path.basename(file_path)
            dest_filename = ''.join(filename.split('-')[2:])
            shutil.copy(file_path, os.path.join(dest_path, dest_filename))

        return ImportResult(num=None, labeled=True)


# Define an importer for text SFT (Supervised Fine-Tuning) data
class TextSftImporter(Importer):
    data_type = 'txt'
    label_type = 'Multiple Rounds of Text Question Answer'

    # Method to validate the format of the data
    def validate(self, data):
        """
        已标注：{"conversations": [{"role": "system", "content": "你是紫东太初，也叫小初，你是由武汉人工智能研究院和中国科学院自动化研究所联合研发的千亿参数多模态大模型。
"},
         {"role": "user", "content": "以下是一道小学数学题：小明手里有8个苹果，他要分给4个小朋友，每人分多少个苹果？"},
         {"role": "assistant", "content": " 
小朋友分苹果，即将8个苹果分成4份，每份的苹果个数相等。所以：
用除法计算： 8 ÷ 4 = 2，每个小朋友可以分到2个苹果。
所以答案是：每个小朋友分到2个苹果。"}]}
        """
        # if len(data) != 2:
        #     return False
        conversations = data.get('conversations')
        if conversations is None:
            return False
        if len(conversations) < 2:
            return False
        if isinstance(conversations, list) is False:
            return False

        for item in conversations:
            role = item.get('role')
            content = item.get('content')
            if role is None or content is None:
                return False
            if role not in ['system', 'user', 'assistant']:
                return False

        return True

    # Method to check if the data is empty
    def is_empty(self, data):
        conversations = data.get('conversations')
        if conversations is None:
            return True
        if len(conversations) == 0:
            return True
        return False

    # Method to import text SFT data
    def import_to(self, files_path: list[str], dest_path: str):
        num = 0  # 数据条数 (Number of data entries)

        dest_file_path = os.path.join(dest_path, 'result.jsonl')
        # 确保路径中的目录存在，如果不存在，则创建它们
        # Ensure the destination directory exists, creating it if necessary
        os.makedirs(os.path.dirname(dest_path), exist_ok=True)

        total_file_size = 0
        for file_path in files_path:
            total_file_size += os.path.getsize(file_path)

        file_size = 0
        with open(dest_file_path, 'w', encoding='utf-8') as dest_file:
            for file_path in files_path:
                line_index = 0
                file_name = os.path.basename(file_path)
                with open(file_path, encoding='utf-8') as file:
                    for line in file:
                        line_index += 1
                        file_size += len(line.encode('utf-8'))
                        self.status['progress'] = file_size / total_file_size

                        line = line.strip()
                        if line is None or len(line) == 0:
                            continue

                        try:
                            data = json.loads(line)
                        except Exception:
                            log.error('json loads failed', exc_info=True)
                            raise DatasetImportError(f'文件格式不正确,{file_name}:{line_index}')

                        if self.validate(data) is False:
                            raise DatasetImportError(
                                f'数据格式不符合要求,{file_name}:{line_index}'
                            )

                        # 跳过空的数据
                        # Skip empty data
                        if self.is_empty(data):
                            continue

                        dest_file.write(line + '\n')
                        num = num + 1
        if num == 0:
            raise DatasetImportError('数据集为空')
        return ImportResult(num=num, labeled=True)


# Define an importer for text pre-training data
class TextPretrainImporter(Importer):
    data_type = 'txt'
    label_type = EnumDatasetLabelType.text_generation_pretrain.value  # 文本预训练 (Text pre-training)

    # Method to validate the format of the data
    def validate(self, data):
        """
        {"text": "文化与交际：1999年中国人民大学出版社出版的图书 文化与交际：1994年外语教学与研究出版社出版的图书"}
        """
        if len(data) != 1:
            return False
        text = data.get('text')
        if text is None:
            return False
        if isinstance(text, str) is False:
            return False
        return True

    # Method to check if the data is empty
    def is_empty(self, data):
        text = data.get('text')
        if len(text) == 0:
            return True
        return False

    # Method to import text pre-training data
    def import_to(self, files_path: list[str], dest_path: str):
        num = 0  # 数据条数 (Number of data entries)

        dest_file_path = os.path.join(dest_path, 'result.jsonl')
        # 确保路径中的目录存在，如果不存在，则创建它们
        # Ensure the destination directory exists, creating it if necessary
        os.makedirs(os.path.dirname(dest_path), exist_ok=True)

        total_file_size = 0
        for file_path in files_path:
            total_file_size += os.path.getsize(file_path)

        file_size = 0
        with open(dest_file_path, 'w', encoding='utf-8') as dest_file:
            for file_path in files_path:
                with open(file_path, encoding='utf-8') as file:
                    for line in file:
                        file_size += len(line.encode('utf-8'))
                        self.status['progress'] = file_size / total_file_size

                        line = line.strip()
                        if line is None or len(line) == 0:
                            continue

                        try:
                            data = json.loads(line)
                        except Exception:
                            log.error('json loads failed', exc_info=True)
                            raise DatasetImportError('文件格式不正确')

                        if self.validate(data) is False:
                            raise DatasetImportError(f'数据格式不符合要求,{file_path}')

                        if self.is_empty(data):
                            continue

                        dest_file.write(line + '\n')
                        num = num + 1

        if num == 0:
            raise DatasetImportError('数据集为空')
        return ImportResult(num=num, labeled=True)


# Define an importer for VQA (Visual Question Answering) SFT data
class VqaSftImporter(Importer):
    data_type = 'multiple'
    label_type = 'Multiple Rounds of Visual Question Answer'

    # Method to import VQA SFT data
    def import_to(self, files_path: list[str], dest_path: str):
        num = 0  # 数据条数 (Number of data entries)

        file_path = files_path[0]
        if os.path.isfile(file_path):
            os.makedirs(os.path.dirname(dest_path), exist_ok=True)
            unzip(file_path, dest_path)

            json_files = glob.glob(os.path.join(dest_path, '*.json'))
            if json_files is None or len(json_files) == 0:
                raise DatasetImportError('空的数据集')

            data_json = json_files[0]
            with open(data_json, encoding='utf-8') as file:
                data = json.load(file)
            if len(data) == 0:
                raise DatasetImportError('空的数据集')
            return ImportResult(num=len(data), labeled=True)
        elif os.path.isdir(file_path):
            dest_file_path = os.path.join(dest_path, 'coco_cn_train_2k.json')
            # 确保路径中的目录存在，如果不存在，则创建它们
            # Ensure the destination directory exists, creating it if necessary
            os.makedirs(os.path.dirname(dest_path), exist_ok=True)

            # 创建保存图片的目录
            # Create a directory to save images
            dest_image_path = os.path.join(dest_path, 'images/')
            os.makedirs(os.path.dirname(dest_image_path), exist_ok=True)

            dest_json_data = []
            for file in files_path:
                # 拷贝json文件内容
                # Copy the content of the JSON file
                with open(os.path.join(file, 'coco_cn_train_2k.json'), encoding='utf-8') as f:
                    try:
                        content = f.read()
                        data = json.loads(content)
                        dest_json_data = dest_json_data + data
                        num += len(content)
                    except Exception:
                        log.error('json loads failed', exc_info=True)
                        raise DatasetImportError('文件格式不正确')

                # 拷贝images文件夹内容
                # Copy the content of the images folder
                # 遍历源目录下的所有文件和子目录
                # Traverse all files and subdirectories in the source directory
                source_dir = os.path.join(file, 'images/')
                for item in os.listdir():
                    source_item = os.path.join(source_dir, item)
                    destination_item = os.path.join(dest_image_path, item)
                    # 如果是文件则拷贝
                    # If it is a file, copy it
                    if os.path.isfile(source_item):
                        shutil.copy2(source_item, destination_item)
            # j将json数据写入
            # Write the JSON data to the destination file
            with open(dest_file_path, 'w', encoding='utf-8') as dest_file:
                dest_file.write(json.dumps(dest_json_data))

            return ImportResult(num=num, labeled=True)
        else:
            raise DatasetImportError('不支持的文件类型')


# Define a factory class for creating importers
class ImporterFactory:
    # Initialize the factory with a list of importer classes
    def __init__(self, members: list[Importer]):
        self.members = {}
        for item in members:
            typ = f'{item.get_data_type()}-{item.get_label_type()}'
            self.members[typ] = item

    # Method to build an importer based on data type and label type
    def build_importer(self, data_type, label_type) -> Importer:
        typ = f'{data_type}-{label_type}'
        importer = self.members.get(typ)
        if importer is None:
            log.error(f'unsupported data_type: {data_type}, label_type: {label_type}')
            return OtherImporter()

        instance = importer()
        return instance


# Create an instance of the importer factory with the defined importer classes
factory = ImporterFactory([TextSftImporter, TextPretrainImporter, VqaSftImporter])