import jieba.analyse
from sqlalchemy import create_engine
import psycopg2
import pandas as pd
# 把已有的库进行分词
# 连接数据库

# 获得数据
df = pd.read_excel('九分类.xlsx', sheet_name='Sheet1')
df['text'] =  df['QUESTION_标题']+ df['SIMILAR_相似问法']+ df['ANSWER_T_文本答案'] 


 

print(df)
#df.reset_index(level=0, inplace=True)

# print(pd_data)

# 关闭指针和数据库

sentence = ''

jieba.enable_parallel(4)  # 开启并行分词模式，参数为并行进程数


def make_keywords(keywordss):
    ss = ''
    keyworsd = str(keywordss)
  #  for x, y in jieba.analyse.textrank(keyword, topK=20, withWeight=True, allowPOS=('ns', 'n', 'vn', 'n')):
    for x in jieba.analyse.textrank(keyworsd, topK=20, withWeight=False):
        #print(x, w)
        ss = ss+' '+x
    print(ss)
    return ss


df['keyword'] = ''

# apply方法结果不对....
# def apply_tariff_withapply(df):
#    df['keyword'] = df.apply(lambda row: make_keywords(df['text']), axis=1)
#
#
# apply_tariff_withapply(df)

# 添加keywords属性


def apply_tariff_iterrows(df):
    keyws = []
    for index, row in df.iterrows():
        # 获取需要分词的内容
        make_ci = row['text']

        # 添加结果列表
        make_cis = make_keywords(make_ci)
        keyws.append(make_cis)
    df['keyword'] = keyws


apply_tariff_iterrows(df)

#df['keyword'] = df.apply(lambda row: make_keywords(keyword=df['text']), axis=1)


# for i in df.index  :
# df['keyword']=make_keywords(df['text'])
# print (i)
# df['keyword'][i]=  (make_keywords(df['text'][i]))

print(df)

df.reset_index(level=0, inplace=True)

# 关闭指针和数据库
# 连接数据库
# create_engine说明：dialect[+driver]://user:password@host/dbname[?key=value..]
engine = create_engine('postgresql://ai:ai001@localhost:5432/ltzd')


df.to_sql('ai_9fenlei_data_prc', engine, if_exists='replace',
          index=False, chunksize=1000)


