# This is a sample Python script.
import time

import pandas as pd
from aip import AipNlp
import jieba
import numpy as np
from tqdm import tqdm
import re
#百度分词
APP_ID = '24148383'
API_KEY = 'vhzsctF5Syopv8VTDAz9YVEv'
SECRET_KEY = '3viPxcF8O0jCwxixKOvCy9uSRnt1eymE'

client = AipNlp(APP_ID, API_KEY, SECRET_KEY)

std_data=pd.read_excel(r"20210628交通银行.xls",sheet_name='SQL Results')
origin_data=pd.read_excel(r"交通银行.xls",sheet_name='SQL Results')


origin_data['分支合并']=origin_data['二级名称']+origin_data['三级名称']
# std_data['分支合并']=std_data['NAME2']+std_data['NAME3']
std_data['分支合并']=std_data['NAME2']+std_data['NAME3']


# lexer_words=client.lexer(origin_data.loc[1,'分支合并'])['items'][0]['basic_words'] #在线版
# lexer_words=jieba.cut(origin_data.loc[1,'分支合并'])
# list_word1 = (','.join(lexer_words)).split(',') #离线版


def cos_dist(vec1, vec2):
    """
    :param vec1: 向量1
    :param vec2: 向量2
    :return: 返回两个向量的余弦相似度
    """
    dist1 = float(np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)))
    return dist1


def get_word_vector_baidu(word1, word2):
    print('word1 is {0}, word2 is {1}'.format(word1,word2))
    time.sleep(0.01)
    list_word1 = client.lexer(word1)['items'][0]['basic_words'] #在线版
    time.sleep(0.03)
    list_word2 = client.lexer(word2)['items'][0]['basic_words'] #在线版
    # print('completed')
    # 列出所有的词,取并集
    key_word = list(set(list_word1 + list_word2))
    # 给定形状和类型的用0填充的矩阵存储向量
    word_vector1 = np.zeros(len(key_word))
    word_vector2 = np.zeros(len(key_word))
    # 计算词频
    # 依次确定向量的每个位置的值
    for i in range(len(key_word)):
        # 遍历key_word中每个词在句子中的出现次数
        for j in range(len(list_word1)):
            if key_word[i] == list_word1[j]:
                word_vector1[i] += 1
        for k in range(len(list_word2)):
            if key_word[i] == list_word2[k]:
                word_vector2[i] += 1
    # # 输出向量
    # print(word_vector1)
    # print(word_vector2)
    result = cos_dist(word_vector1, word_vector2)
    return result

std_data['分支合并对照']=None
std_data['支行渠道编号更新']=None
std_data['支行渠道编号更新Low_Prob']=None
std_data['匹配准确率']=None
std_data['原表支行名称对照']=None
# std_data['是否调用百度API']=0

for i in tqdm(range(std_data.shape[0])):
    # lexer_words = jieba.cut(std_data.loc[i, '分支合并'])
    # list_word1 = (','.join(lexer_words)).split(',')  # 离线版
    if not pd.isnull(std_data.loc[i, 'NO3']): #支行渠道编号为空，需要补上的情况
        max_simi=0
        std_data.loc[i, 'NO3']=str(int(std_data.loc[i, 'NO3']))
        for j in range(origin_data.shape[0]): #如果原支行渠道编号非空则按次规则，否则以某阈值为限制做更新补充
            # lexer_words2 = jieba.cut(origin_data.loc[j, '分支合并'])
            # list_word2 = (','.join(lexer_words2)).split(',')  # 离线版
            if std_data.loc[i, 'NO2']==origin_data.loc[j, '二级编码']:
                try:
                    city_name = std_data.loc[i, 'NAME2'].replace('市', '').replace('分行', '').replace('支行', '').replace(
                        '小微', '').replace('营业部', '').replace('自治区', '')
                    trim_level3 = origin_data.loc[j, '三级名称'].replace(city_name, '').replace('市', '').replace('小微',
                                                                                                             '').replace(
                        '自治区', '')
                    trim_name3 = std_data.loc[i, 'NAME3'].replace(city_name, '').replace('市', '').replace('小微',
                                                                                                          '').replace(
                        '自治区', '')
                    cos_simi = get_word_vector_baidu(trim_level3, trim_name3)
                    # cos_simi=get_word_vector_baidu(std_data.loc[i, 'NAME3'],origin_data.loc[j, '三级名称'])
                    if cos_simi>max_simi and cos_simi>0.5:
                        max_simi=cos_simi
                        std_data.loc[i,'分支合并对照']=origin_data.loc[j,'分支合并']
                        std_data.loc[i, '原表支行名称对照'] = origin_data.loc[j, '三级名称']
                        std_data.loc[i,'支行渠道编号更新']=str(int(origin_data.loc[j, '三级编码']))
                        std_data.loc[i,'匹配准确率']='{:.1%}'.format(cos_simi)
                        std_data.loc[i, '支行渠道编号更新Low_Prob'] = None
                    elif  cos_simi > max_simi and cos_simi <= 0.5:
                        max_simi = cos_simi
                        std_data.loc[i, '分支合并对照'] = origin_data.loc[j, '分支合并']
                        std_data.loc[i, '原表支行名称对照'] = origin_data.loc[j, '三级名称']
                        std_data.loc[i, '支行渠道编号更新Low_Prob'] = str(int(origin_data.loc[j, '三级编码']))
                        std_data.loc[i, '匹配准确率'] = '{:.1%}'.format(cos_simi)
                except Exception as e:
                # print(e)
                    pass
    else:
        max_simi = 0
        for j in range(origin_data.shape[0]):  # 如果原支行渠道编号非空则按次规则，否则以某阈值为限制做更新补充
            # lexer_words2 = jieba.cut(origin_data.loc[j, '分支合并'])
            # list_word2 = (','.join(lexer_words2)).split(',')  # 离线版
            if std_data.loc[i, 'NO2']==origin_data.loc[j, '二级编码']:
                try:
                    city_name=std_data.loc[i, 'NAME2'].replace('市','').replace('分行','').replace('支行','').replace('小微','').replace('营业部','').replace('自治区','')
                    trim_level3=origin_data.loc[j, '三级名称'].replace(city_name,'').replace('市','').replace('小微','').replace('自治区','')
                    trim_name3=std_data.loc[i, 'NAME3'].replace(city_name,'').replace('市','').replace('小微','').replace('自治区','').replace('装修中','')
                    cos_simi=get_word_vector_baidu(trim_level3,trim_name3)
                    if cos_simi > max_simi and cos_simi >= 0.81:
                        max_simi = cos_simi
                        std_data.loc[i, '分支合并对照'] = origin_data.loc[j, '分支合并']
                        std_data.loc[i,'原表支行名称对照']=origin_data.loc[j, '三级名称']
                        std_data.loc[i, '支行渠道编号更新'] = str(int(origin_data.loc[j, '三级编码']))
                        std_data.loc[i, '匹配准确率'] = '{:.1%}'.format(cos_simi)
                        std_data.loc[i, '支行渠道编号更新Low_Prob']=None
                    elif  cos_simi > max_simi and cos_simi <= 0.67:
                        max_simi = cos_simi
                        std_data.loc[i, '分支合并对照'] = origin_data.loc[j, '分支合并']
                        std_data.loc[i, '原表支行名称对照'] = origin_data.loc[j, '三级名称']
                        std_data.loc[i, '支行渠道编号更新Low_Prob'] = None
                        std_data.loc[i, '支行渠道编号更新'] = None
                        std_data.loc[i, '匹配准确率'] = '{:.1%}'.format(cos_simi)
                    elif  cos_simi > max_simi and cos_simi < 0.81:
                        max_simi = cos_simi
                        std_data.loc[i, '分支合并对照'] = origin_data.loc[j, '分支合并']
                        std_data.loc[i, '原表支行名称对照'] = origin_data.loc[j, '三级名称']
                        std_data.loc[i, '支行渠道编号更新'] = None
                        std_data.loc[i, '支行渠道编号更新Low_Prob'] = str(int(origin_data.loc[j, '三级编码']))
                        std_data.loc[i, '匹配准确率'] = '{:.1%}'.format(cos_simi)
                except Exception as e:
                    # print(e)
                    pass

    # print(str(std_data.loc[i, 'NO3']) + " & " + str(std_data.loc[i, '支行渠道编号更新'])+" & "+'{:.1%}'.format(max_simi))

# std_data.columns
# std_data[['   ', 'NO1', 'NAME1', 'NO2', 'NAME2', 'NO3', '支行渠道编号更新','支行渠道编号更新Low_Prob','NAME3', 'ADDRESS', 'JD',
#        'WD','匹配准确率', ]]
# import pickle
# with open('std_data.plk','wb') as f:
#     pickle.dump(std_data,f)
std_data[[ 'NO1', 'NAME1', 'NO2', 'NAME2', 'NO3', '支行渠道编号更新','支行渠道编号更新Low_Prob','NAME3', '原表支行名称对照','ADDRESS', 'JD',
       'WD','匹配准确率', ]].to_excel('2021交通银行渠道数据更新底稿.xlsx')
