# coding=UTF-8
import pandas as pd
import re
import jieba

def make_int(input):
    return int(input)

data = pd.read_csv("data.tsv",names=["question1","question2","score"], encoding="utf-8", sep="\t")
labels = data["score"].apply(make_int)

from opencc import OpenCC # https://github.com/yichen0831/opencc-python

openCC = OpenCC('t2s')

def replace_words(one_string):
    one_string = openCC.convert(one_string)
    one_string = re.sub(r'\s+', '', one_string)  # 去掉所有空格
    return one_string

def isEN(uchar):
    if (uchar >= u'\u0041' and uchar <= u'\u005a') or (uchar >= u'\u0061' and uchar <= u'\u007a'):
        return True
    else:
        return False

def isZH(s):
    if not ('\u4e00' <= s <= '\u9fa5'):
        return False
    return True

def fenzi(one_string):
    result = []
    for zi in one_string:
        if not isZH(zi) and not isEN(zi):
            continue
        if isEN(zi):
            zi = zi.lower()
        result.append(zi)
    index = 0
    result2 = []
    for item in result:
        i = index
        word = result[index]
        if isEN(item):
            i += 1
            while i<len(result) and isEN(result[i]):
                word+=result[i]
                i += 1
        result2.append(word)
        if i>index+1:
            index=i
        else:
            index+=1
        if index>=len(result):
            break
    return result2

def fenci(one_string):
    temp_list = jieba.lcut(one_string,HMM=False)
    final_result = []
    for word in temp_list:
        if not isZH(word) and not isEN(word):
            continue
        if isEN(word):
            word = word.lower()
        final_result.append(word)
    return final_result

def fenci_experiment(one_string):
    temp_list = jieba.lcut(one_string,HMM=False)
    final_result = []
    for word in temp_list:
        if not isZH(word) and not isEN(word):
            continue
        if isEN(word):
            word = word.lower()
        if len(word)>3 and isZH(word):
            jieba.del_word(word)
            final_result.extend(jieba.lcut(word,HMM=False))
        else:
            final_result.append(word)
    return final_result

import time
begin_time = int(time.time()*1000)

data["question1"] = data["question1"].apply(replace_words)
data["question2"] = data["question2"].apply(replace_words)

fenci_result = pd.DataFrame()
fenci_result["question1"] = data["question1"].apply(fenci)
fenci_result["question2"] = data["question2"].apply(fenci)
fenci_result["score"] = labels
temp_q1_list = []
temp_q2_list = []
temp_s_list = []

length_= 91500
def my_test(q1, q2, score):
    if q1!=q2 and len(temp_q2_list)<length_:
        temp_q1_list.append(q1)
        temp_q2_list.append(q2)
        temp_s_list.append(score)

fenci_result.apply(lambda row: my_test(row['question1'], row['question2'], row["score"]), axis=1)

fenci_result = pd.DataFrame({
    "question1":temp_q1_list,
    "question2":temp_q2_list,
    "score":temp_s_list,
})


temp_list1 = fenci_result["question1"].tolist()#[1:]
temp_list2 = fenci_result["question2"].tolist()#[1:]
tokens = []
for item1, item2 in zip(temp_list1, temp_list2):
    temp_pair = []
    temp_pair.append(item1)
    temp_pair.append(item2)
    tokens.append(temp_pair)

fenzi_result = pd.DataFrame()
fenzi_result["question1"] = data["question1"].apply(fenzi)
fenzi_result["question2"] = data["question2"].apply(fenzi)
fenzi_result["score"] = labels


temp_q1_list = []
temp_q2_list = []
temp_s_list = []

def my_test2(q1, q2, score):
    if q1!=q2 and len(temp_q2_list)<length_:
        temp_q1_list.append(q1)
        temp_q2_list.append(q2)
        temp_s_list.append(score)

fenzi_result.apply(lambda row: my_test2(row['question1'], row['question2'], row["score"]), axis=1)

fenzi_result = pd.DataFrame({
    "question1":temp_q1_list,
    "question2":temp_q2_list,
    "score":temp_s_list,
})


temp_list1_ = fenzi_result["question1"].tolist()#[1:]
temp_list2_ = fenzi_result["question2"].tolist()#[1:]
tokens2 = []

for item1, item2 in zip(temp_list1_, temp_list2_):
    temp_pair = []
    temp_pair.append(item1)
    temp_pair.append(item2)
    tokens2.append(temp_pair)

print("preprocess time per 1000 line:")
print((int(time.time()*1000)-begin_time)/data.shape[0])
print("ms")

import json
json.dump(tokens,open("data/tokens_train.json",mode="w",encoding="utf-8"))
json.dump(tokens2,open("data/tokens2_train.json",mode="w",encoding="utf-8"))
fenci_result.to_csv("data/rokid_fenci_train.csv",encoding="utf-8",index="False",sep="\t",header=False)
fenzi_result.to_csv("data/rokid_fenzi_train.csv",encoding="utf-8",index="False",sep="\t",header=False)


def postprocess(input_list):
    return "".join(input_list)
fenci_result["question1"] = fenci_result["question1"].apply(postprocess)
fenci_result["question2"] = fenci_result["question2"].apply(postprocess)
fenci_result.to_csv("data/rokid_fenci_222.csv",encoding="utf-8",index="False",sep="\t",header=False)
