import pandas as pd
import requests
import json
import uuid
import os
import time
from triedTree.base_tried_tree import BaseTriedTree

from spidertools.utils.mutiprocess_utils import parallel_apply

__global_dict = {}
def getTrieTreeInstance():
    global __global_dict
    if 'danger' not in __global_dict:
        __global_dict['danger'] = BaseTriedTree(path='danger_words.txt')
        __global_dict['danger']._add_file_to_tree("normal_words.txt")
    return __global_dict['danger']

def getCargoNameType(sentence):
    cargo_name = ""
    cargo_name_type = ""
    trietree  = getTrieTreeInstance()
    words,types = trietree.process(sentence)
    for w,t in zip(words,types):
        if  'badword' in t:
            cargo_name = w
            cargo_name_type = 'badword'
            break
        elif 'standword' in t:
            if len(w) >=2:
                cargo_name = w
                cargo_name_type = 'standword'
                break
    return cargo_name,cargo_name_type
            
def getCargoName(sentence):
    url = "http://beetle.amh-group.com/beetle/cargo_name/cargo_name_identify"

    post_body = {
        "cargoName":sentence,
        "Transfer":"1.csv"
    }
    headers = {
        'Content-Type':"application/json"
    }
    
    cargo_name = ""

    req = requests.post(url,data=post_body,headers=headers)
    req_json = req.json()
    if req_json and 'data' in req_json and 'cargoNameInfo' in req_json['data']:
        cargo_dict = req_json['data']['cargoNameInfo']
        if cargo_dict['type'] == '2' or cargo_dict['type'] == 5:
            cargo_name = cargo_dict['cargoName']
            if len(cargo_name) < 2:
                cargo_name = ""    
    return cargo_name

def getCargoContext(sentences,index,cargo_name):
    originText = sentences[index]
    originText  = originText.replace(cargo_name,"$$"+cargo_name+"$$")
    start_position = max(0,index - 2)
    end_position = -1 if index +2 >=len(sentences) else index+ 2+ 1
    return sentences[start_position:index]+[originText]+sentences[index+1:end_position]



    
    

def makeTrainDataFromOneDailog(sentences):
    result = []
    index = 0
    while index < len(sentences):
        sentence = sentences[index]
        cargo_name,cargo_name_type = getCargoNameType(sentence)
        if cargo_name:
            cargo_context_list = getCargoContext(sentences,index,cargo_name)
            if cargo_name_type == "badword":
                saveItemToFile(cargo_name,cargo_context_list,bad_path=True)
            else:
                saveItemToFile(cargo_name, cargo_context_list, bad_path=False)
            index += 2
        index +=1
    return result


def saveItemToFile(cargo_name,item,bad_path=False):
    if bad_path:
        save_root = os.path.join(r'D:\工作相关内容\公司项目\禁限运危化品货源\train_data\bad_cargo', cargo_name)
    else:
        save_root = os.path.join(r'D:\工作相关内容\公司项目\禁限运危化品货源\train_data\normal_cargo',cargo_name)
    if not os.path.exists(save_root):
        os.mkdir(save_root)
    save_file_name = os.path.join(save_root,str(int(time.time())))
    save_file_name += ".txt"
    time.sleep(0.001)
    try:
        with open(save_file_name,'w',encoding='utf-8',errors="ignore") as fwrite:
            fwrite.write("\n".join(item)+ "\n")
    except Exception as e:
        print(e)


def asrDetailProcess(asr_detail):
    jsonObj = json.loads(asr_detail)
    sentences = []
    for item in jsonObj:
        sentences.append(item['role']+":"+item['text'])
    
    makeTrainDataFromOneDailog(sentences)
                
                
    

def main():
    getCargoName("我想运苹果")
    data  = r'D:\工作相关内容\公司项目\禁限运危化品货源\data.csv'
    #data = r'D:\工作相关内容\公司项目\禁限运危化品货源\dangerous_goods_new.csv'
    df = pd.read_csv(data,sep='\001')
    #df = df[:1000]
    print(df.head())
    
    #df['asr_detail'].apply(asrDetailProcess)

    parallel_apply(asrDetailProcess,df['asr_detail'].values,8,1000)






            
            
            
            
    



if __name__ == '__main__':

    main()
    