import pandas as pd
import py_stringsimjoin as ssj
import py_stringmatching as sm
import sys
import time

import py_entitymatching as em

# from sklearn import impute.SimpleImputer
from py_entitymatching.catalog import catalog_manager as cm

def preprocess_title(title):
    title = title.lower()
    title = title.replace(',', ' ')
    title = title.replace("'", '')
    title = title.replace("\"",'')#增加 移除双引号
    title = title.replace('(','')
    title = title.replace(')','')
    title = title.replace('（','')
    title = title.replace('）','')
    title = title.replace('&', 'and')
    title = title.replace(";", '')
    title = title.replace(' ','')
    #title = title.encode('utf-8', 'ignore')
    return title.strip()


def main(argv=None):
    start = time.clock()  # 时间计算开始
    # 读取数据表
    base_data = pd.read_csv('./data/baseinfo.csv',encoding='utf-8')
    reg_data = pd.read_csv('./data/reginfo_extract3(modify).csv',encoding='utf-8')

    #匹配列删去特殊字符

    reg_data['nor_authors'] = reg_data['author Nationality'].map(preprocess_title)
    base_data['nor_names'] = base_data['name'].map(preprocess_title)



    base_data['id'] = range(base_data.shape[0])#增加了一列用于id标识
    reg_data['id'] = range(reg_data.shape[0])
    #Magellan
    #Substep A: Finding a candidate set (Blocking)

    # transforming the "company_id" column into string
    ssj.utils.converter.dataframe_column_to_str(base_data, 'company_id', inplace=True)#将数字的转换成str

    # creating a new **mixture** column
    base_data['mixture'] = base_data['nor_names']
    #imdb_data['mixture'] = imdb_data['norm_title'] + ' ' + imdb_data['norm_year'] + ' ' + imdb_data['budget'] 构建mixture
    # repeating the same thing for the reg_data dataset
    ssj.utils.converter.dataframe_column_to_str(reg_data, 'company_id', inplace=True)
    reg_data['mixture'] = reg_data['nor_authors']

    #Qgram分词设定一定的阈值，提取候选集
    C1 = ssj.overlap_coefficient_join(base_data, reg_data, 'id', 'id', 'mixture', 'mixture',sm.QgramTokenizer(),
                                     l_out_attrs=['company_id','nor_names', 'credit_code'],
                                     r_out_attrs=['company_id','nor_authors', 'fullname','regnum'],
                                     threshold=0.80)

    C1.to_csv("data/C.csv",sep=',',encoding="utf-8",index = False)


    em.set_key(base_data, 'id')  # specifying the key column in the kaggle dataset
    em.set_key(reg_data, 'id')  # specifying the key column in the imdb dataset
    em.set_key(C1, '_id')  # specifying the key in the candidate set
    em.set_ltable(C1, base_data)  # specifying the left table
    em.set_rtable(C1, reg_data)  # specifying the right table
    em.set_fk_rtable(C1, 'r_id')  # specifying the column that matches the key in the right table
    em.set_fk_ltable(C1, 'l_id')  # specifying the column that matches the key in the left table

    # Sampling 500 pairs and writing this sample into a .csv file 提取样本集，以待标注
    sampled = C1.sample(500, random_state=0)
    sampled.to_csv('./data/sampled(modify).csv', encoding='utf-8')


    #手动标注了500个sample，读取标记好的样本集
    labeled = em.read_csv_metadata('data/label+.csv', ltable=base_data, rtable=reg_data,
                                   fk_ltable='l_id', fk_rtable='r_id', key='_id')

    # Substep E: Traning machine learning algorithms
    split = em.split_train_test(labeled, train_proportion=0.70, random_state=0)  # 三七分训练集测试集
    train_data = split['train']
    test_data = split['test']

    dt = em.DTMatcher(name='DecisionTree', random_state=0)
    svm = em.SVMMatcher(name='SVM', random_state=0)
    rf = em.RFMatcher(name='RF', random_state=0)
    lg = em.LogRegMatcher(name='LogReg', random_state=0)
    ln = em.LinRegMatcher(name='LinReg')
    nb = em.NBMatcher(name='NaiveBayes')

    attr_corres = em.get_attr_corres(base_data, reg_data)
    attr_corres['corres'] = [('company_id', 'company_id'),
                             ('nor_names', 'nor_authors')]  # 指定两个数据集的列之间的对应关系，模式匹配信息

    l_attr_types = em.get_attr_types(base_data)  # 确定每一列的类型
    r_attr_types = em.get_attr_types(reg_data)

    tok = em.get_tokenizers_for_matching()  # 分词器这里默认是qgram分词
    sim = em.get_sim_funs_for_matching()  # 相似度函数

    F = em.get_features(base_data, reg_data, l_attr_types, r_attr_types, attr_corres, tok, sim)  # 特征提取的计算方式

    # train_features = em.extract_feature_vecs(train_data, feature_table=F, attrs_after='label', show_progress=False)
    #特征提取
    train_features = em.extract_feature_vecs(train_data, feature_table=F, attrs_after='label', show_progress=True)
    train_features = em.impute_table(train_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'], strategy='mean')

    result = em.select_matcher([dt, rf, svm, ln, lg, nb], table=train_features,
                               exclude_attrs=['_id', 'l_id', 'r_id', 'label'], k=5,
                               target_attr='label',random_state=0)# metric='f1'
    result['cv_stats']

    best_model = result['selected_matcher']
    best_model.fit(table=train_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'], target_attr='label')

    test_features = em.extract_feature_vecs(test_data, feature_table=F, attrs_after='label', show_progress=False)
    test_features = em.impute_table(test_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'], strategy='mean')

    # Predict on the test data
    predictions = best_model.predict(table=test_features, exclude_attrs=['_id', 'l_id', 'r_id', 'label'],
                                     append=True, target_attr='predicted', inplace=False)

    # Evaluate the predictions
    eval_result = em.eval_matches(predictions, 'label', 'predicted')
    em.print_eval_summary(eval_result)


    #对候选集整体进行特征提取和匹配与否的预测
    candset_features = em.extract_feature_vecs(C1, feature_table=F, show_progress=True)
    candset_features = em.impute_table(candset_features, exclude_attrs=['_id', 'l_id', 'r_id'], strategy='mean')
    predictions = best_model.predict(table=candset_features, exclude_attrs=['_id', 'l_id', 'r_id'],
                                     append=True, target_attr='predicted', inplace=False)
    matches = predictions[predictions.predicted == 1]
    print(matches.shape)




    #只保留predicted==1的项，规范化匹配结果
    matches2 = matches[['_id', 'l_id', 'r_id', 'predicted']]
    matches2.reset_index(drop=True, inplace=True)
    cm.set_candset_properties(matches2, '_id', 'l_id', 'r_id', base_data, reg_data)
    matches2 = em.add_output_attributes(matches2, l_output_attrs=['company_id', 'name', 'credit_code'],
                                        r_output_attrs=['company_id', 'author Nationality', 'fullname', 'regnum'],
                                        l_output_prefix='l_', r_output_prefix='r_',
                                        delete_from_catalog=False)
    matches2.drop('predicted', axis=1, inplace=True)

    matches2.drop('l_id', axis=1, inplace=True)
    matches2.drop('r_id', axis=1, inplace=True)
    matches2.to_csv('./data/matches(modify+).csv', encoding='utf-8')
    print(matches2.shape)
    end = time.clock()  # 时间计算结果
    print("the time for sort: %.3f seconds" % (end - start), file=sys.stderr)

if __name__ == '__main__':
  sys.exit(main())








