
import time
import pandas as pd
from aip import AipNlp
import jieba
import numpy as np
from tqdm import tqdm

#百度分词
APP_ID = '24148383'
API_KEY = 'vhzsctF5Syopv8VTDAz9YVEv'
SECRET_KEY = '3viPxcF8O0jCwxixKOvCy9uSRnt1eymE'
client = AipNlp(APP_ID, API_KEY, SECRET_KEY)
std_data=pd.read_excel(r"D:\mutualFundName.xlsx")
type(std_data)
std_data1 = std_data.iloc[:,0]+std_data.iloc[:,1]
std_data1 = list(std_data1)
type(std_data1)

# def get_word_vector_baidu(word1, word2):
#     print('word1 is {0}, word2 is {1}'.format(word1, word2))
#     time.sleep(0.01)
#     list_word1 = client.lexer(word1)['items'][0]['basic_words']  # 在线版
#     time.sleep(0.03)
#     list_word2 = client.lexer(word2)['items'][0]['basic_words']  # 在线版
#     # print('completed')
#     # 列出所有的词,取并集
#     key_word = list(set(list_word1 + list_word2))
#     # 给定形状和类型的用0填充的矩阵存储向量
#     word_vector1 = np.zeros(len(key_word))
#     word_vector2 = np.zeros(len(key_word))

# %%

# list_word = client.lexer(word)['items'][0]


# def get_word_vector_baidu(word):
#     print('word is {0}'.format(word))
#     time.sleep(0.01)
#     list_word = client.lexer(word)['items'][0]['basic_words']
#     lexer_word = jieba.cut(list_word)
#     print('complete')
#     keyword = list(set(lexer_word))
#     return keyword

# get_word_vector_baidu(std_data1)
total_list=[]
for ind, word in tqdm(enumerate(std_data1)):
    pass
    print('word{0} is {1}'.format(ind,word))
    list_word = list(jieba.lcut(word))
    # single_list=[single_item['item'] for single_item in client.lexer(word)['items']]

    total_list+=list_word
    total_list=list(set(total_list))
    # time.sleep(0.5)

    # keyword = get_word_vector_baidu(word)
    # b=','
    # keyword=b.join(keyword)
    # print(keyword)
print(total_list)
total_list = pd.DataFrame(total_list)
total_list.to_excel('cut_word1.xlsx')

baidu_list = pd.read_excel("C:\Users\kf40\Desktop\智能匹配项目\cut_word.xlsx")
jieba_list = pd.read_excel()