import difflib
import os
import sys

from zkl_datasets import MappedDataset, load_dataset
from zkl_promptui import confirm_clear_dir_path

root_dir_path = os.path.join(os.path.dirname(__file__), "../../..")
sys.path.append(root_dir_path)

from llmpt.preprocess import build_preprocessed_dataset
from scripts.config import datasets_dir_path


def clean_duplicated_head(title: str, desc: str, body: str):
    title = title.strip()
    desc = desc.strip()
    body = body.strip()

    if desc.startswith(title):
        title = None
    if title is not None and title.endswith(desc):
        desc = None
    if title is not None and desc is not None and title.startswith(desc):
        desc = None
    if title is not None and desc is not None:
        ratio = difflib.SequenceMatcher(None, title, desc).real_quick_ratio()
        if ratio > 0.66:
            if len(title) > len(desc):
                desc = None
            else:
                title = None
    if title is not None and body.startswith(title):
        title = None
    if desc is not None and body.startswith(desc):
        desc = None

    return "\n".join(filter(lambda x: x is not None, [title, desc, body]))


def iter_load_text_datasets():
    yield MappedDataset(
        load_dataset(os.path.join(datasets_dir_path, "nlp_chinese_corpus/news2016zh")),
        lambda sample: "\n".join([sample.title, sample.source, sample.content]))

    yield MappedDataset(
        load_dataset(os.path.join(datasets_dir_path, "nlp_chinese_corpus/baike2018qa")),
        lambda sample: clean_duplicated_head(sample.title, sample.desc, sample.answer))

    yield MappedDataset(
        load_dataset(os.path.join(datasets_dir_path, "nlp_chinese_corpus/webtext2019")),
        lambda sample: clean_duplicated_head(sample.title, sample.desc, sample.content))

    yield MappedDataset(
        load_dataset(os.path.join(datasets_dir_path, "wikipedia-zh-cleaned")),
        lambda sample: str(sample, encoding="utf-8"))

    yield MappedDataset(
        load_dataset(os.path.join(datasets_dir_path, "DuReader-cleaned")),
        lambda sample: str(sample, encoding="utf-8"))


splits_ratio = {
    'train': 0.7,
    'valid': 0.01,
    'test': 0.29,
}

# main

if __name__ == '__main__':
    dst_dataset_path = os.path.join(datasets_dir_path, "llmpt/preprocessed/v5.1")
    confirm_clear_dir_path(dst_dataset_path)
    build_preprocessed_dataset(dst_dataset_path, iter_load_text_datasets(), splits_ratio)
