class L本地多文件导入__GPT2MediclQADataSet_txt_n(Dataset ):
    def __init__(self,file_paths,nraws,shuffle , tokenizer, max_len, data_dir, data_set_name , is_overwrite=False):
        """
        初始化函数
        Args:
            file_path： txt 文件路径
            tokenizer: 分词器
            max_len: 数据的最大长度
            data_dir: 保存缓存文件的路径
            data_set_name: 数据集名字
            path_file: 原始数据文件
            is_overwrite: 是否重新生成缓存文件
        """
        self.file_paths = file_paths
        # 数据总数
        self.file_raws  = self.get_files_raws(file_paths )
        # get the count of all samples
        self.tokenizer = tokenizer
        # content_id和title_id分别对应新闻的正文和标题，为了在模型中区分的更明显
        self.content_id = self.tokenizer.convert_tokens_to_ids("[Content]")
        self.title_id = self.tokenizer.convert_tokens_to_ids("[Title]")
        # space_id表示空格标记，由于一些标题中带有空格，如果直接使用tokenizer进行分词，会导致空格消失，会显得标题很奇怪
        # 但是又不方便同一替换成任意一个标点，因此将其用[Space]替换。
        self.space_id = self.tokenizer.convert_tokens_to_ids("[Space]")
        self.max_len = max_len
        #  self.file_raws =
        self.nraws = nraws
        self.shuffle = shuffle
    def initial(self):
        self.file_pi = 0
        self.finput = open(self.file_paths[self.file_pi],'r')
        self.samples = list()
        # put nraw samples into memory
        for _ in range(self.nraws):
            data = self.finput.readline()
            # self.finput.readline()   # data contains the feature and label
            if data and data != '\n':
                self.samples.append(json.loads( data))
            else:
                break
        self.current_sample_num = len(self.samples)
        self.index = list(range(self.current_sample_num))
        if self.shuffle:
            random.shuffle(self.samples)
    def __len__(self):
        return self.file_raws #len(self.data_set)
    def __getitem__(self,idx ):
        #if len(self.index) <1:
        #    return
        idx = self.index[0]
        data = self.samples[idx]
        self.index = self.index[1:]
        self.current_sample_num-=1
        if self.current_sample_num<=0:
            self.samples =[]
            #while self.file_pi < len(self.file_paths):
            # all the samples in the memory have been used, need to get the new samples
            for _ in range(self.nraws):
                data = self.finput.readline()
                # self.finput.readline()   # data contains the feature and label
                if data and data != '\n':
                    print('g1',len(data))
                    self.samples.append(json.loads( data))
                elif data =='\n'  and (self.file_pi+1) < len(self.file_paths):
                    self.file_pi = self.file_pi + 1   #打开下一个文件
                    self.finput = open(self.file_paths[self.file_pi],'r')
                    data =  self.finput.readline()
                    if data:
                        print('g2',(data))
                        self.samples.append(json.load(data))
                else:
                    break
            self.current_sample_num = len(self.samples)
            self.index = list(range(self.current_sample_num))
            #if self.shuffle:
            #    random.shuffle(self.samples)
        return data
    def get_files_raws(self,file_path) :# get the count of all samples
        file_raws =0
        file_raws_list = []
        for  fp in  file_path:
            file_raws_list.append(file_raws)
            with open(fp,'r') as f:
                for _ in f:
                    file_raws+=1
        return file_raws #,file_raws_list
