import pandas as pd
from spidertools.utils.mutiprocess_utils import parallel_apply
import time
import os
import json


def getCargoContext(sentences, index, cargo_name):
    originText = sentences[index]
    originText = originText.replace(cargo_name, "$$" + cargo_name + "$$")
    start_position = max(0, index - 2)
    end_position = -1 if index + 2 >= len(sentences) else index + 2 + 1
    return sentences[start_position:index] + [originText] + sentences[index + 1:end_position]


def makeTrainDataFromOneDailog(sentences,cargo_name=""):
    result = []
    index = 0
    while index < len(sentences):
        sentence = sentences[index]
        if cargo_name not in sentence:
            index+=1
            continue
        cargo_context_list = getCargoContext(sentences, index, cargo_name)
        saveItemToFile(cargo_name, cargo_context_list)

        index += 3
    return result


def saveItemToFile(cargo_name, item):
    save_root = os.path.join(r'D:\工作相关内容\公司项目\禁限运危化品货源\train_data\deal_cargo', cargo_name)
    try:
        if not os.path.exists(save_root):
            os.mkdir(save_root)
    except Exception as e:
        pass
    save_file_name = os.path.join(save_root, str(int(time.time())))
    save_file_name += ".txt"
    try:
        with open(save_file_name, 'w', encoding='utf-8', errors="ignore") as fwrite:
            fwrite.write("\n".join(item) + "\n")
    except Exception as e:
        print(e)


def asrDetailProcess(item):
    asr_detail = item[2]
    cargo_name = item[3]
    jsonObj = json.loads(asr_detail)
    sentences = []
    for item in jsonObj:
        sentences.append(item['role'] + ":" + item['text'])

    makeTrainDataFromOneDailog(sentences,cargo_name)


def main():
    data = r'D:\工作相关内容\公司项目\禁限运危化品货源\deal_asr_text.csv'
    df = pd.read_csv(data)
    df = df.sample(frac=1)
    #df = df[:10]
    print(df.head())

    # df['asr_detail'].apply(asrDetailProcess)



    parallel_apply(asrDetailProcess, df.values, 8, 1000)


if __name__ == '__main__':
    main()