import os
import random
import warnings
import pandas as pd
from nltk.corpus import stopwords
import json

warnings.filterwarnings("ignore")

all_dir = os.listdir("kaggle/input/coleridgeinitiative-show-us-the-data")
test_path = "kaggle/input/coleridgeinitiative-show-us-the-data/test"
sub_path = "kaggle/input/coleridgeinitiative-show-us-the-data/sample_submission.csv"
train_file = "kaggle/input/coleridgeinitiative-show-us-the-data/train.csv"
train_path = "kaggle/input/coleridgeinitiative-show-us-the-data/train"


# 现在让我们将json文件中的文本与我们的训练csv文件连接起来
def get_text(filename, test=False):
    if test:
        df = pd.read_json(f'{test_path}/{filename}.json')
    else:
        df = pd.read_json(f'{train_path}/{filename}.json')
    text = " ".join(list(df['text']))
    return text


def get_paragraph_len(filename, test=False):
    if test:
        df = pd.read_json(f'{test_path}/{filename}.json')
    else:
        df = pd.read_json(f'{train_path}/{filename}.json')
    len_ = len(list(df['text']))
    return len_


def get_text_len(filename, test=False):
    if test:
        df = pd.read_json(f'{test_path}/{filename}.json')
    else:
        df = pd.read_json(f'{train_path}/{filename}.json')
    len_sum = 0
    for text in list(df['text']):
        len_sum += len(text)
    return len_sum


def get_json_data(save_path="./json", num=0):
    """
    :param save_path: 保存.json的路径
    :param num: 生成数量，如果是0，则转化所有，如果是数字则随机抽num个作为样本
    :return: json字符串，并创建.json文件
    """
    # train.csv中的内容，有5个字段
    train_df = pd.read_csv(train_file)  # reading csv file

    # 为了加快处理速度，先随机选100个样本
    if num == 0:
        print("数据集过大，请耐心等待...")
    else:
        try:
            train_df = train_df.sample(num, random_state=2021)
        except:
            print("num参数错误")
            return
    print("text")
    train_df['text'] = train_df['Id'].apply(get_text)
    print("paragraph_len")
    train_df['paragraph_len'] = train_df['Id'].apply(get_paragraph_len)
    print("text_len")
    train_df['text_len'] = train_df['Id'].apply(get_text_len)

    # text预处理
    print("lower")
    train_df['lower'] = train_df['text'].str.lower()

    def remove_punctuation(text):
        PUNCT_TO_REMOVE = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\n'
        """custom function to remove the punctuation"""
        return text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))

    # 移除标点
    print("text_wo_punct")
    train_df["text_wo_punct"] = train_df["lower"].apply(lambda text: remove_punctuation(text))

    Stopwords = list(stopwords.words('english'))

    def remove_stopwords(text):
        return " ".join([word for word in str(text).split() if word not in Stopwords])

    # 停用词
    print("text_wo_stop")
    train_df["text_wo_stop"] = train_df["text_wo_punct"].apply(lambda text: remove_stopwords(text))

    def get_label_start(filename, df=train_df):
        t_df = df[lambda x: x['Id'] == filename]
        label = (list(t_df['dataset_label'])[0]).lower()
        label = remove_stopwords(label)
        label = remove_punctuation(label)
        text = (list(t_df["text_wo_stop"])[0]).lower()
        i = text.find(label)
        if i == -1:
            print(filename, label, "未能找到label位置，将抛弃该条数据")
        return i

    train_df['label_start'] = train_df['Id'].apply(get_label_start)

    dataset = {"data": []}
    dealing_dict = {}
    print("get json")
    for row in train_df.itertuples():
        preprocess_data_temp = {
            "title": "",
            "paragraphs": [{"context": "",
                            "qas": [
                                {
                                    "answers": [{
                                        "answers_start": "",
                                        "text": ""
                                    }
                                    ],
                                    "question": "What dataset was used",
                                    "id": ""
                                }
                            ]
                            }]
        }
        title = getattr(row, 'pub_title')
        context = getattr(row, 'text_wo_stop')

        answer_start = getattr(row, 'label_start')
        if answer_start == -1:
            continue
        answer_text = getattr(row, 'dataset_label')
        answer_text = answer_text.lower()
        answer_text = remove_stopwords(answer_text)
        answer_text = remove_punctuation(answer_text)

        row_id = ''.join(random.sample(['1', '2', '3', '4', '5', '6', '7', '9', '0',
                                        'z', 'y', 'x', 'w', 'v', 'u', 't', 's', 'r',
                                        'q', 'p', 'o', 'n', 'm', 'l', 'k', 'j', 'i',
                                        'h', 'g', 'f', 'e', 'd', 'c', 'b', 'a'], 32))

        data_temp = dealing_dict.get(title, preprocess_data_temp)
        data_temp['title'] = title
        if data_temp['paragraphs'][0]['qas'][0]["answers"][0]["text"] == "":
            data_temp['paragraphs'] = [{"context": context,
                                        "qas": [
                                            {
                                                "answers": [{
                                                    "answers_start": answer_start,
                                                    "text": answer_text,
                                                }
                                                ],
                                                "question": "What dataset was used",
                                                "id": row_id
                                            }
                                        ]
                                        }]
        else:
            an_t = data_temp['paragraphs'][0]['qas'][0]["answers"][0]["text"]
            an_start = data_temp['paragraphs'][0]['qas'][0]["answers"][0]["answers_start"]
            data_temp['paragraphs'][0]['qas'][0]["answers"][0]["text"] = an_t + "|" + answer_text

            if answer_start < an_start:
                data_temp['paragraphs'][0]['qas'][0]["answers"][0]["answers_start"] = answer_start

        dealing_dict[title] = data_temp

    for k, v in dealing_dict.items():
        dataset['data'].append(v)
    print(dataset)

    json_str = json.dumps(dataset, ensure_ascii=False)

    is_exists = os.path.exists(save_path)
    try:
        if not is_exists:
            # 如果不存在则创建目录
            # 创建目录操作函数
            os.makedirs(save_path)
    except:
        print("创建文件夹失败")

    with open(save_path + f"/preprocess_num={num}.json", 'w', encoding='utf-8') as json_file:
        json_file.write(json.dumps(dataset, ensure_ascii=False))
    return json_str


if __name__ == '__main__':
    json = get_json_data(save_path="./json", num=5)
    print("success")
