import pandas as pd
import py_stringsimjoin as ssj
import py_stringmatching as sm
import sys
import time

import py_entitymatching as em
from py_entitymatching.catalog import catalog_manager as cm

def preprocess_title(title):
    title = title.lower()#小写
    title = title.replace(',', ' ')
    title = title.replace("'", '')
    title = title.replace("\"",'')#增加 移除双引号
    title = title.replace('(','')
    title = title.replace(')','')
    title = title.replace('（','')
    title = title.replace('）','')
    title = title.replace('&', 'and')
    title = title.replace(";", '')
    title = title.replace(' ','')
    #title = title.encode('utf-8', 'ignore')
    return title.strip()


def main(argv):
    len=argv.__len__()
    for j in range(len):
        print(sys.argv[j])
    start = time.clock()  # 时间计算开始
    # 读取数据表
    csvpath1 = "./data/"+argv[1]+".csv"
    csvpath2 = "./data/"+argv[2]+".csv"
    data1 = pd.read_csv(csvpath1, encoding='utf-8')
    data2 = pd.read_csv(csvpath2, encoding='utf-8')

    #匹配列删去特殊字符
    matchpairnum=int(argv[3])

    matchpair=[[]]*matchpairnum
    for i in range(matchpairnum):
        matchpair[i]=[argv[4+2*i],argv[5+2*i]]
    # matchpair1=['name','author Nationality']
    # matchpair2=['company_id','company_id']


    data1['nor_names'] = data1[matchpair[0][0]].map(preprocess_title)
    data2['nor_authors'] = data2[matchpair[0][1]].map(preprocess_title)
    # data1['nor2'] = data1[matchpair2[0]].map(preprocess_title)
    # data2['nor2'] = data2[matchpair2[1]].map(preprocess_title)



    data1['id'] = range(data1.shape[0])#增加了一列用于id标识
    data2['id'] = range(data2.shape[0])
    #Magellan
    #Substep A: Finding a candidate set (Blocking)

    # transforming the "company_id" column into string
    ssj.utils.converter.dataframe_column_to_str(data1, matchpair[1][0], inplace=True)#将数字的转换成str
    ssj.utils.converter.dataframe_column_to_str(data2, matchpair[1][1], inplace=True)

    # creating a new **mixture** column
    data1['mixture'] = data1['nor_names']+''+data1[matchpair[1][0]]
    #imdb_data['mixture'] = imdb_data['norm_title'] + ' ' + imdb_data['norm_year'] + ' ' + imdb_data['budget'] 构建mixture
    # repeating the same thing for the reg_data dataset

    data2['mixture'] = data2['nor_authors']+''+data2[matchpair[1][1]]

    #Qgram分词设定一定的阈值，提取候选集
    C1 = ssj.overlap_coefficient_join(data1, data2, 'id', 'id', 'mixture', 'mixture',sm.QgramTokenizer(),
                                     l_out_attrs=[matchpair[1][0],'nor_names', 'credit_code'],
                                     r_out_attrs=[matchpair[1][1],'nor_authors', 'fullname','regnum'],
                                     threshold=0.80)

    C1.to_csv("data/C.csv",sep=',',encoding="utf-8",index = False)
    print("C1.shape：",C1.shape)


    em.set_key(data1, 'id')  # specifying the key column in the kaggle dataset
    em.set_key(data2, 'id')  # specifying the key column in the imdb dataset
    em.set_key(C1, '_id')  # specifying the key in the candidate set
    em.set_ltable(C1, data1)  # specifying the left table
    em.set_rtable(C1, data2)  # specifying the right table
    em.set_fk_rtable(C1, 'r_id')  # specifying the column that matches the key in the right table
    em.set_fk_ltable(C1, 'l_id')  # specifying the column that matches the key in the left table

    # Sampling 500 pairs and writing this sample into a .csv file 提取样本集，以待标注
    sampled = C1.sample(500, random_state=0)
    sampled.to_csv('./data/sampled(modify).csv', encoding='utf-8')


    #手动标注了500个sample，读取标记好的样本集
    labeled = em.read_csv_metadata('data/label+.csv', ltable=data1, rtable=data2,
                                   fk_ltable='l_id', fk_rtable='r_id', key='_id')

    # Substep E: Traning machine learning algorithms
    split = em.split_train_test(labeled, train_proportion=0.70, random_state=0)  # 三七分训练集测试集
    train_data = split['train']
    test_data = split['test']

    dt = em.DTMatcher(name='DecisionTree', random_state=0)
    svm = em.SVMMatcher(name='SVM', random_state=0)
    rf = em.RFMatcher(name='RF', random_state=0)
    lg = em.LogRegMatcher(name='LogReg', random_state=0)
    ln = em.LinRegMatcher(name='LinReg')
    nb = em.NBMatcher(name='NaiveBayes')

    attr_corres = em.get_attr_corres(data1, data2)
    attr_corres['corres'] = [(matchpair[1][0], matchpair[1][1]),
                             ('nor_names', 'nor_authors')]  # 指定两个数据集的列之间的对应关系，模式匹配信息
    # ('nor_names', 'nor_authors')
    l_attr_types = em.get_attr_types(data1)  # 确定每一列的类型
    r_attr_types = em.get_attr_types(data2)

    tok = em.get_tokenizers_for_matching()  # 分词器这里默认是qgram分词
    sim = em.get_sim_funs_for_matching()  # 相似度函数

    F = em.get_features(data1, data2, l_attr_types, r_attr_types, attr_corres, tok, sim)  # 特征提取的计算方式

    # train_features = em.extract_feature_vecs(train_data, feature_table=F, attrs_after='label', show_progress=False)
    #特征提取
    train_features = em.extract_feature_vecs(train_data, feature_table=F, attrs_after='label', show_progress=True)
    train_features = em.impute_table(train_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'], strategy='mean')

    result = em.select_matcher([dt, rf, svm, ln, lg, nb], table=train_features,
                               exclude_attrs=['_id', 'l_id', 'r_id', 'label'], k=5,
                               target_attr='label',random_state=0)# metric='f1'
    result['cv_stats']

    best_model = result['selected_matcher']
    best_model.fit(table=train_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'], target_attr='label')

    test_features = em.extract_feature_vecs(test_data, feature_table=F, attrs_after='label', show_progress=False)
    test_features = em.impute_table(test_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'], strategy='mean')

    # Predict on the test data
    predictions = best_model.predict(table=test_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'],
                                     append=True, target_attr='predicted', inplace=False)

    # Evaluate the predictions
    eval_result = em.eval_matches(predictions, 'label', 'predicted')
    em.print_eval_summary(eval_result)


    #对候选集整体进行特征提取和匹配与否的预测
    candset_features = em.extract_feature_vecs(C1, feature_table=F, show_progress=True)
    candset_features = em.impute_table(candset_features, exclude_attrs=['_id', 'l_id', 'r_id'], strategy='mean')
    predictions = best_model.predict(table=candset_features, exclude_attrs=['_id', 'l_id', 'r_id'],
                                     append=True, target_attr='predicted', inplace=False)
    matches = predictions[predictions.predicted == 1]
    print(matches.shape)


    #只保留predicted==1的项，规范化匹配结果
    matches2 = matches[['_id', 'l_id', 'r_id', 'predicted']]
    matches2.reset_index(drop=True, inplace=True)
    cm.set_candset_properties(matches2, '_id', 'l_id', 'r_id', data1, data2)
    matches2 = em.add_output_attributes(matches2, l_output_attrs=[matchpair[1][0], matchpair[0][0], 'credit_code'],
                                        r_output_attrs=[matchpair[1][1], matchpair[0][1], 'fullname', 'regnum'],
                                        l_output_prefix='l_', r_output_prefix='r_',
                                        delete_from_catalog=False)
    matches2.drop('predicted', axis=1, inplace=True)

    matches2.drop('l_id', axis=1, inplace=True)
    matches2.drop('r_id', axis=1, inplace=True)
    matches2.to_csv('./data/matches(modify+).csv', encoding='utf-8')
    print(matches2.shape)
    end = time.clock()  # 时间计算结果
    print("the time for sort: %.3f seconds" % (end - start), file=sys.stderr)

if __name__ == '__main__':
  sys.exit(main(sys.argv))








