import pandas as pd
import py_stringsimjoin as ssj
import py_stringmatching as sm
import sys
import time
import py_entitymatching as em
from py_entitymatching.catalog import catalog_manager as cm



def preprocess_title(title):
    title = title.lower()#小写
    title = title.replace(',', ' ')
    title = title.replace("'", '')
    title = title.replace("\"",'')#增加 移除双引号
    title = title.replace('(','')
    title = title.replace(')','')
    title = title.replace('（','')
    title = title.replace('）','')
    title = title.replace('&', 'and')
    title = title.replace(";", '')
    title = title.replace(' ','')
    #title = title.encode('utf-8', 'ignore')
    return title.strip()

# baseinfo reginfo_extract3(modify) 匹配的两个表名
# 0.8 筛选候选集的阈值
# 2 匹配的两对属性
# name authorNationality
# company_id company_id
# 1 需要规范化的字符串属性对数
# name authorNationality
# 2 mixture
# nor_name nor_authorNationality
# company_id company_id
#候选集中左表和右表分别要保留的项
#3
#company_id nor_name credit_code
#4
#company_id nor_authorNationality fullname regnum


def main(argv):
    len=argv.__len__()
    # for j in range(len):
    #     print(sys.argv[j])
    start = time.clock()  # 时间计算开始
    # 读取数据表
    if(argv[1].find('swj')>=0):
        argv[1]+='_normal'
        # print("add normal",argv[1])
    if(argv[2].find('swj')>=0):
        argv[2]+='_normal'
    csvpath1 = "D:/eclipse-workspace/DataIntergration/python/schema-matching-master/demo/dy/"+argv[1]+".csv"
    csvpath2 = "D:/eclipse-workspace/DataIntergration/python/schema-matching-master/demo/dy/"+argv[2]+".csv"
    data1 = pd.read_csv(csvpath1, encoding='utf-8')
    data2 = pd.read_csv(csvpath2, encoding='utf-8')

    thresholdinput = float(argv[3])
    matchpairnum = int(argv[4])

    matchpair = [[]]*matchpairnum
    for i in range(matchpairnum):
        matchpair[i]=[argv[5+2*i],argv[6+2*i]]
        # print(matchpair[i])

    temp=7+2*i
    norm_num=int(argv[temp])
    normschema=[[]]*norm_num
    normname=[[]]*norm_num
    for i in range(norm_num):
        normschema[i] = [argv[temp+2*i+1],argv[temp+2*i+2]]
        # print(normschema[i])
        normname[i] = ['nor_'+argv[temp+2*i+1],'nor_'+argv[temp+2*i+2]]
        data1[normname[i][0]] = data1[normschema[i][0]].map(preprocess_title)#表1规范化
        data2[normname[i][1]] = data2[normschema[i][1]].map(preprocess_title)#表2规范化
    temp2=temp+2*i+3
    mixture_num=int(argv[temp2])
    mixtureschema=[[]]*mixture_num
    for i in range(mixture_num):
        mixtureschema[i]=[argv[temp2+1+2*i],argv[temp2+2+2*i]]
    # print(mixtureschema)
    temp3=temp2+3+2*i



    data1['id'] = range(data1.shape[0])#增加了一列用于id标识
    data2['id'] = range(data2.shape[0])

    #Magellan
    #Substep A: Finding a candidate set (Blocking)

    # transforming the "company_id" column into string
    for i in range(matchpairnum):#不管怎样 将所有的转化为str
        ssj.utils.converter.dataframe_column_to_str(data1, matchpair[i][0], inplace=True)#将数字的转换成str
        ssj.utils.converter.dataframe_column_to_str(data2, matchpair[i][1], inplace=True)

    # creating a new **mixture** column
    data1['mixture']=data1[mixtureschema[0][0]]
    data2['mixture']=data2[mixtureschema[0][1]]
    for i in range(1,mixture_num):
        data1['mixture']+=''+data1[mixtureschema[i][0]]#字符串的累加
        data2['mixture']+=''+data2[mixtureschema[i][1]]

    l_out_num = int(argv[temp3])#l_out_atrrss去掉norm头，用于最终matches的输出
    if (argv[temp3 + 1]).startswith("nor_"):
        l_out_atrrss = [argv[temp3 + 1][4:]]
    else:
        l_out_atrrss = [argv[temp3 + 1]]

    for i in range(1, l_out_num):
        if (argv[temp3 + 1 + i].startswith("nor_")):
            l_out_atrrss.append(argv[temp3 + 1 + i][4:])
        else:
            l_out_atrrss.append(argv[temp3 + 1 + i])
    # print("l_out_attrs", l_out_atrrss)
    temp4 = temp3 + 2 + i
    r_out_num = int(argv[temp4])#r _out_atrrss去掉norm头，用于最终matches的输出
    if(argv[temp4 + 1]).startswith("nor_"):
        r_out_attrss = [argv[temp4 + 1][4:]]
    else:
        r_out_attrss = [argv[temp4 + 1]]
    for i in range(1, r_out_num):
        if(argv[temp4 + 1 + i].startswith("nor_")):
            r_out_attrss.append(argv[temp4 + 1 + i][4:])
        else:
            r_out_attrss.append(argv[temp4 + 1 + i])
    # print("r_out_attrs", r_out_attrss)

    cnameidx=temp4+2+i
    cname=argv[cnameidx]
    labelidx=cnameidx+1
    labelname=argv[labelidx]

    C1path= "D:/eclipse-workspace/DataIntergration/WebContent/output/"+cname+".csv"
    # C1path = "D:/eclipse-workspace/DataIntergration/WebContent/output/C.csv"
    C1 = pd.read_csv(C1path, encoding='utf-8')

    em.set_key(data1, 'id')  # specifying the key column in the kaggle dataset
    em.set_key(data2, 'id')  # specifying the key column in the imdb dataset
    em.set_key(C1, '_id')  # specifying the key in the candidate set
    em.set_ltable(C1, data1)  # specifying the left table
    em.set_rtable(C1, data2)  # specifying the right table
    em.set_fk_rtable(C1, 'r_id')  # specifying the column that matches the key in the right table
    em.set_fk_ltable(C1, 'l_id')  # specifying the column that matches the key in the left table

    # 手动标注了500个sample，读取标记好的样本集
    labelpath='C:/apache-tomcat-9.0.14/webapps/DataIntergration/WEB-INF/upload/'+labelname
    # labelpath = './data/' + labelname
    labeled = em.read_csv_metadata(labelpath, ltable=data1, rtable=data2,
                                   fk_ltable='l_id', fk_rtable='r_id', key='_id')

    # Substep E: Traning machine learning algorithms
    split = em.split_train_test(labeled, train_proportion=0.70, random_state=0)  # 三七分训练集测试集
    train_data = split['train']
    test_data = split['test']

    dt = em.DTMatcher(name='DecisionTree', random_state=0)
    svm = em.SVMMatcher(name='SVM', random_state=0)
    rf = em.RFMatcher(name='RF', random_state=0)
    lg = em.LogRegMatcher(name='LogReg', random_state=0)
    ln = em.LinRegMatcher(name='LinReg')
    nb = em.NBMatcher(name='NaiveBayes')

    attr_corres = em.get_attr_corres(data1, data2)
    attr_corres['corres']=[]*mixture_num
    for i in range(mixture_num):
        attr_corres['corres'].append((mixtureschema[i][0],mixtureschema[i][1]))# 指定两个数据集的列之间的对应关系，模式匹配信息

    # print(attr_corres['corres'])


    l_attr_types = em.get_attr_types(data1)  # 确定每一列的类型
    r_attr_types = em.get_attr_types(data2)

    tok = em.get_tokenizers_for_matching()  # 分词器这里默认是qgram分词
    sim = em.get_sim_funs_for_matching()  # 相似度函数

    F = em.get_features(data1, data2, l_attr_types, r_attr_types, attr_corres, tok, sim)  # 特征提取的计算方式


    # 特征提取
    train_features = em.extract_feature_vecs(train_data, feature_table=F, attrs_after='label', show_progress=True)
    train_features = em.impute_table(train_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'], strategy='mean')

    result = em.select_matcher([dt, rf, svm, ln, lg, nb], table=train_features,
                               exclude_attrs=['_id', 'l_id', 'r_id', 'label'], k=5,
                               target_attr='label', random_state=0)  # metric='f1'
    # result = em.select_matcher([dt, rf, svm, ln, lg, nb], table=train_features,
    #                            exclude_attrs=['_id', 'l_id', 'r_id', 'label'], k=5,
    #                            target_attr='label', random_state=0)  # metric='f1'
    # print(result['cv_stats'])

    best_model = result['selected_matcher']
    best_model.fit(table=train_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'], target_attr='label')

    test_features = em.extract_feature_vecs(test_data, feature_table=F, attrs_after='label', show_progress=False)
    test_features = em.impute_table(test_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'], strategy='mean')

    # Predict on the test data
    predictions = best_model.predict(table=test_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'],
                                     append=True, target_attr='predicted', inplace=False)

    # Evaluate the predictions
    eval_result = em.eval_matches(predictions, 'label', 'predicted')
    em.print_eval_summary(eval_result)


    # 对候选集整体进行特征提取和匹配与否的预测
    candset_features = em.extract_feature_vecs(C1, feature_table=F, show_progress=True)
    candset_features = em.impute_table(candset_features, exclude_attrs=['_id', 'l_id', 'r_id'], strategy='mean')
    predictions = best_model.predict(table=candset_features, exclude_attrs=['_id', 'l_id', 'r_id'],
                                     append=True, target_attr='predicted', inplace=False)
    matches = predictions[predictions.predicted == 1]
    print("matchshape:",matches.shape)
    matchesname="match_"+cname
    print("matchname:",matchesname)
    matches.to_csv('D:/eclipse-workspace/DataIntergration/WebContent/output/'+matchesname+'.csv', encoding='utf-8')

    # 只保留predicted==1的项，规范化匹配结果
    matches2 = matches[['_id', 'l_id', 'r_id', 'predicted']]
    matches2.reset_index(drop=True, inplace=True)
    cm.set_candset_properties(matches2, '_id', 'l_id', 'r_id', data1, data2)
    matches2 = em.add_output_attributes(matches2, l_output_attrs=l_out_atrrss,
                                        r_output_attrs=r_out_attrss,
                                        l_output_prefix='l_', r_output_prefix='r_',
                                        delete_from_catalog=False)
    matches2.drop('predicted', axis=1, inplace=True)

    matches2.drop('l_id', axis=1, inplace=True)
    matches2.drop('r_id', axis=1, inplace=True)
    matches2name=matchesname+"2"
    print("m2n:",matches2name)
    matches2.to_csv('D:/eclipse-workspace/DataIntergration/WebContent/output/'+matches2name+'.csv', encoding='utf-8')
    print("m2s:",matches2.shape)

    end = time.clock()  # 时间计算结果
    # print("the time for sort: %.3f seconds" % (end - start), file=sys.stderr)

    datadiscovery = matches2[(matches2['l_' + matchpair[0][0]] != matches2['r_' + matchpair[0][1]])]
    for i in range(1, matchpairnum):
        datadiscovery2 = matches2[(matches2['l_' + matchpair[i][0]] != matches2['r_' + matchpair[i][1]])]
        datadiscovery = pd.merge(datadiscovery, datadiscovery2, how='outer')

    datadiscovery.drop([datadiscovery.columns[0]], axis=1, inplace=True)
    datadiscoveryname = matches2name + "_discovery"
    print("disn:",datadiscoveryname)
    print("dishape:",datadiscovery.shape[0])
    datadiscovery.to_csv('D:/eclipse-workspace/DataIntergration/WebContent/output/' + datadiscoveryname + '.csv',
                         encoding='utf-8')




if __name__ == '__main__':
  sys.exit(main(sys.argv))