import json,os
from re import M
import numpy as np
import jieba as jb
from numpy.lib.function_base import vectorize
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer, TfidfVectorizer

with open('./data/cutted.json','r',encoding='utf8') as f:
    data=json.loads(f.read())

cutted=[' '.join(_d) for _d in data]
sentences=np.array([''.join(_d) for _d in data])

# tfidf=TfidfVectorizer()
# X=tfidf.fit_transform(cutted)

vectorizor=CountVectorizer()
X=vectorizor.fit_transform(cutted)

# msg=input('>_')
def del_stopwords(li,stops):
    i=0
    while i<len(li):
        if li[i] in stops:
            li.pop(i)
            i-=1
        i+=1
    return li

stops=[]
fs=os.walk('./stopwords/')
for _f in fs:
    root=_f[0]
    for fN in _f[-1]:
        fPath=root+'/'+fN
        with open(fPath,'r',encoding='utf8') as f:
            stops+=[wd.replace('\n','') for wd in f.readlines()]

msg='你好啊'
msg=del_stopwords(jb.lcut(msg),stops)
msg=' '.join(msg)

# y=tfidf.transform([msg])
y=vectorizor.transform([msg])
# 根据y筛选语句
d=[np.sum(cell.toarray()-y) if np.sum(cell[y>0])>0 else np.inf for cell in X]
d=np.array(d)
index=np.argsort(d)[:10]
_x=X[index+1]
sentences=sentences[index+1]
d=[np.sum(cell.toarray()-y) if np.sum(cell[y>0])>0 else np.inf for cell in _x]
index=np.argsort(np.array(d))[0]
print(sentences[index])
# print(y)
# d=[np.sum(abs(cell.toarray()-y)) for cell in X]
# d=np.array(d)
# index=np.argsort(d)[-10:]
# print(sentences[index])