import os
import warnings
import pandas as pd
from nltk.corpus import stopwords
import json
import spacy
from torchtext.data import Field, BucketIterator, TabularDataset
from sklearn.model_selection import train_test_split
from tqdm import tqdm

warnings.filterwarnings("ignore")

all_dir = os.listdir("kaggle/input/coleridgeinitiative-show-us-the-data")
test_path = "kaggle/input/coleridgeinitiative-show-us-the-data/test"
sub_path = "kaggle/input/coleridgeinitiative-show-us-the-data/sample_submission.csv"
train_file = "kaggle/input/coleridgeinitiative-show-us-the-data/train.csv"
train_path = "kaggle/input/coleridgeinitiative-show-us-the-data/train"
split_symbol = "<|||>"

def logged_apply(g, func, *args, **kwargs):
    step_percentage = 100. / len(g)
    import sys
    sys.stdout.write('apply progress:   0%')
    sys.stdout.flush()

    def logging_decorator(func):
        def wrapper(*args, **kwargs):
            progress = wrapper.count * step_percentage
            sys.stdout.write('\033[D \033[D' * 4 + format(progress, '3.0f') + '%')
            sys.stdout.flush()
            wrapper.count += 1
            return func(*args, **kwargs)

        wrapper.count = 0
        return wrapper

    logged_func = logging_decorator(func)
    res = g.apply(logged_func, *args, **kwargs)
    sys.stdout.write('\033[D \033[D' * 4 + format(100., '3.0f') + '%' + '\n')
    sys.stdout.flush()
    return res


def make_dir(save_path):
    is_exists = os.path.exists(save_path)
    try:
        if not is_exists:
            # 如果不存在则创建目录
            # 创建目录操作函数
            os.makedirs(save_path)
    except:
        print("创建文件夹失败")


# 现在让我们将json文件中的文本与我们的训练csv文件连接起来
def get_text(filename, test=False):
    if test:
        df = pd.read_json(f'{test_path}/{filename}.json')
    else:
        df = pd.read_json(f'{train_path}/{filename}.json')
    text = split_symbol.join(list(df['text']))
    return text


def get_json_data(save_path="./json", num=0):
    """
    :param save_path: 保存.json的路径
    :param num: 生成数量，如果是0，则转化所有，如果是数字则随机抽num个作为样本
    :return: json字符串，并创建.json文件
    """
    # train.csv中的内容，有5个字段
    train_df = pd.read_csv(train_file)  # reading csv file

    # 为了加快处理速度，先随机选100个样本
    if num == 0:
        print("数据集过大，请耐心等待...")
    else:
        try:
            train_df = train_df.sample(num, random_state=2021)
        except:
            print("num参数错误")
            return
    train_df['text'] = train_df['Id'].apply(get_text)
    dataset = []
    dealing_dict = {}
    print("get json")
    for row in tqdm(train_df.itertuples()):
        preprocess_data_temp = {
            "title": "",
            "context": "",
            "answer": ""
        }

        title = getattr(row, 'pub_title')
        context = getattr(row, 'text')
        answer_text = getattr(row, 'dataset_label')
        data_temp = dealing_dict.get(title, preprocess_data_temp)
        data_temp['title'] = title
        if data_temp["context"] == "":
            data_temp['context'] = context
            data_temp['answer'] = answer_text
        else:
            an_t = data_temp["context"]
            data_temp["context"] = an_t + "|" + answer_text

        dealing_dict[title] = data_temp

    for k, v in dealing_dict.items():
        dataset.append(v)

    # 创建dir，如果存在则跳过
    make_dir(save_path)

    with open(save_path + f"/preprocess_num={num}.json", 'w', encoding='utf-8') as json_file:
        json_file.write(json.dumps(dataset, ensure_ascii=False))

    raw_data = {
        "context": [val['title'] + val['context'] for val in dataset],
        "answer": [val['answer'] for val in dataset],
    }

    df = pd.DataFrame(raw_data, columns=["context", "answer"])
    # create train and test set
    train, test = train_test_split(df, test_size=0.1)

    # Get train, test data to json and csv format which can be read by torchtext
    train.to_json(f"{save_path}/train_num={num}.json", orient="records", lines=True)
    test.to_json(f"{save_path}/test_num={num}.json", orient="records", lines=True)

    print(f"[{save_path}/train_num={num}.json] and [{save_path}/test_num={num}.json] have been saved")
    return


def get_iterator(save_path, num):
    def tokenize_eng(text):
        spacy_eng = spacy.load("en")
        return [tok.text for tok in spacy_eng.tokenizer(text)]

    context = Field(sequential=True, use_vocab=True, tokenize=tokenize_eng, lower=True)
    answer = Field(sequential=True, use_vocab=True, tokenize=tokenize_eng, lower=True)

    fields = {"context": ("c", context), "answer": ("a", answer)}

    train_data, test_data = TabularDataset.splits(
        path="", train=f"{save_path}/train_num={num}.json", test=f"{save_path}/test_num={num}.json", format="json",
        fields=fields
    )

    context.build_vocab(train_data, max_size=10000, min_freq=2)
    answer.build_vocab(train_data, max_size=10000, min_freq=2)

    train_iterator, test_iterator = BucketIterator.splits(
        (train_data, test_data), batch_size=32, device="cuda"
    )

    print("finish! return train_iterator, test_iterator")
    return train_iterator, test_iterator


if __name__ == '__main__':
    save_path = "./json"
    num = 1000
    get_json_data(save_path=save_path, num=num)
    # 这个在输入模型时用
    # get_iterator(save_path=save_path, num=num)
