import os
import json
import random
from loguru import logger
from typing import List, Dict


class Processor:
    def __init__(self, file_info: List[Dict[str, str]], save_file_name: str, max_samples: int = 10000):
        # 检查文件类型
        self.__file_type_config = ['.json', '.txt']
        for info_dict in file_info:
            path = list(info_dict.values())[0]
            if not path.endswith(tuple(self.file_type_config)):
                raise ValueError(f'File type error, only {self.file_type_config} is supported')

        self.file_info = file_info
        self.save_file_name = save_file_name
        self.max_samples = max_samples
    
    @property
    def file_type_config(self) -> List[str]:
        return self.__file_type_config

    def __call__(self, result: str = 'Done!'):
        self.save(self.process(), self.file_info, self.save_file_name)
        logger.info(result)

    def process(self) -> List[Dict[str, str]]:
        """分别读取正负例的原始数据集，并进行预处理+标注，最后合并数据集

        Returns:
            List[Dict[str,str]]: 合并后的数据集可用于意图识别的数据集
        """
        mix_data = []
        for info_dict in self.file_info:
            path = info_dict.get('path')
            with open(path, 'r', encoding='utf-8') as data_file:
                # 读取数据集
                if path.endswith('.json'):
                    data: List[Dict[str, str]] = json.load(data_file)
                else:
                    all_lines: List[str[Dict[str, str]]] = data_file.readlines()
                    data: List[Dict[str, str]] = [json.loads(line.strip()) for line in all_lines if line]

                # 预处理+标注
                new_data = []
                for org_dict in data:
                    new_dict = {
                        'query': org_dict[info_dict.get('query')],
                        'label': info_dict.get('label')
                    }
                    new_data.append(new_dict)
            
            # 正例、负例各采样max_samples条数据
            new_data = random.sample(new_data, min(self.max_samples, len(new_data)))
            mix_data.extend(new_data)
        
        # 混合数据集
        random.shuffle(mix_data)

        return mix_data

    @staticmethod
    def save(dataset: List[Dict[str, str]], file_info: List[Dict[str, str]], file_name: str):
        """保存数据集

        Args:
            dataset (List[Dict[str, str]]): 数据集
            file_info (List[Dict[str, str]]): 包含原始数据集文件信息，用于指定保存路径
            file_name (str): 保存的文件名
        """
        save_path = os.path.dirname(file_info[0].get('path'))

        try:
            os.path.exists(save_path)
        except FileNotFoundError:
            os.makedirs(save_path)
        
        save_path = os.path.join(save_path, file_name)
        with open(save_path, 'w', encoding='utf-8') as f:
            json.dump(dataset, f, ensure_ascii=False, indent=4)


if __name__ == '__main__':
    file_info = [
        {
            'path': 'dataset/nonmedical.txt', 
            'query': 'instruction',
            'label': 'nonmed'
        },
        {
            'path': 'dataset/qa.json', 
            'query': 'question',
            'label': 'med'
        }
    ]
    save_file_name = 'intention.json'
    max_samples = 3000
    
    processor = Processor(file_info, save_file_name, max_samples=max_samples)
    processor('Dataset for intention recognition generated successfully!')
