import json
import os
import re
import csv


def split_text(text: str, max_length: int) -> list[str]:
    # 使用标点符号进行切分
    sentences = re.split(r"(?<=[.!?。！？])\s*", text)

    result: list[str] = []
    current_chunk: list[str] = []
    current_chunk_size = 0

    for sentence in sentences:
        sentence_length = len(sentence)
        # 满则拆分
        if current_chunk_size + sentence_length > max_length:
            result.append(''.join(current_chunk))
            current_chunk = []
            current_chunk_size = 0
        current_chunk.append(sentence)
        current_chunk_size += sentence_length

    # 确保添加最后一个组合（如果存在）
    if current_chunk_size > 0:
        result.append(''.join(current_chunk))

    return result


class Dataset:
    def __init__(self, file_dir: str):
        # self.file_path = file_path
        self.all_data = self.process_dir(file_dir)
        # print(self.all_data[0])

    def process_dir(self, file_dir):
        all_data = []
        new_list = ['PanDuan.json', 'JiaoCai-606.json', 'TianKong.json', 'kaoHe_data.json', 'XuanZe.json',
                    'JianDa_data.json', 'self_knowledge.json']
        for file_name in new_list:
            file_path = os.path.join(file_dir, file_name)
            all_data.extend(self.process_json(file_path))

        return all_data

    def process_json(self, file_path):
        try:
            with open(file_path, "r", encoding="utf-8") as f:
                all_data = json.load(f)
                if not isinstance(all_data, list):
                    all_data = [all_data]
        except Exception:
            with open(file_path, "r", encoding="utf-8") as f:
                all_data = []
                lines = f.readlines()
                for line in lines:
                    dic = json.loads(line)
                    all_data.append(dic)
        return all_data

    def process_csv(self, file_path):
        read_list = []
        with open(file_path, "r") as csvfile:
            csv_reader = csv.reader(csvfile)
            for row in csv_reader:
                read_list.append(row)

        return read_list

    def process_txt(self, file_path):

        with open(file_path, "r") as file:
            lines = file.readlines()
        read_list = [x.strip() for x in lines]

        return read_list

    def write_csv(self, target_path, row_list):

        with open(target_path, "a+") as csvfile:
            csv_writer = csv.writer(csvfile)
            for row in row_list:
                csv_writer.writerow(row)

    def process_qac(self, all_data=None, key_name="content"):
        if not all_data:
            all_data = self.all_data
        # 处理经过gpt的qac文件
        chunks = []
        for item in all_data:
            if key_name in item.keys():
                chunks.append(item[key_name])
            else:
                continue

        # chunk_ids = list(range(len(chunks)))

        # chunk_lengths = [len(chunk) for chunk in chunks]

        # chunk_info = {
        #     str(chunk_id): {"chunk": chunk, "length": chunk_length}
        #     for chunk_id, chunk, chunk_length in zip(
        #         chunk_ids, chunks, chunk_lengths
        #     )
        # }

        return chunks

    def process_splited_sentences(self, chunk_size, all_data=None):
        # 处理很多碎句子，需要先整合再划分
        if not all_data:
            all_data = self.all_data
        # print(len(all_data))
        # print(all_data[0])
        txt = ""
        # print(all_data)
        for item in all_data:
            # print(item)
            if "text" in item.keys():
                key_name = "text"
            elif "content" in item.keys():
                key_name = "content"
            else:
                key_name = "context"
            if key_name in item.keys():
                txt += item[key_name]
            else:
                continue
        # key_name = extract_original_filename(file_path)
        # print(len(txt))
        # 使用提取的键名从JSON中获取内容
        # content_combined = all_data.get(key_name,'')
        # 去空格和字符
        txt = re.sub(r"\t", "", txt)
        content_combined = re.sub(r" ", "", txt)

        chunks = split_text(content_combined, chunk_size)
        # docs = sliding_window_with_sentence_split(content_combined, window_size, overlap)

        # 使用递增的整数作为ID
        # doc_ids = list(range(len(docs)))
        #
        # doc_lengths = [len(chunk) for chunk in docs]
        chunks = list(set(chunks))
        # print(len(chunks))
        return chunks
