import jieba
import nltk
from nltk.stem.porter import PorterStemmer
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.classify import NaiveBayesClassifier
from nltk import FreqDist
from nltk.cluster.util import cosine_distance

#分词: 中文jieba  非中文nltk
s="python is a structured and powerful object-oriented programming language."
w=nltk.word_tokenize(s)
print(w)

s2="我说的全是假的"
w2=jieba.cut(s2) #精确模式
print('/'.join(w2))
w3=jieba.cut(s2,cut_all=True) #全模式
print('/'.join(w3))

#词性标注
print(nltk.pos_tag(w))

#词性归一
p=PorterStemmer() #波特算法
print(p.stem('watched'))
print(p.stem('watching'))
print(p.stem('went'))
l=LancasterStemmer() # 兰卡斯特算法
print(l.stem('watched'))
print(l.stem('watching'))
print(l.stem('went'))
sn=SnowballStemmer('english') #雪球算法
print(sn.stem('watched'))
print(sn.stem('watching'))
print(sn.stem('went'))
wn=WordNetLemmatizer() # 词网算法
print(wn.lemmatize('watched',pos='v'))
print(wn.lemmatize('books'))
print(wn.lemmatize('went',pos='v'))

# 删除停用词(无意义的词)
stw=stopwords.words('english')
r=[x for x in w if x not in stw]
print(r)

#情感分析
t1='This is a wonderful book'
t2='I like this book very much'
t3='this book reads well'
t4='this book is not good'
t5='this is a very bad book'
def pref_text(text):
    words=nltk.word_tokenize(text)
    wn=WordNetLemmatizer()
    words=[wn.lemmatize(x) for x in words]
    words= [x for x in w if x not in stopwords.words('english')]
    return {x:True for x in words}

train_data=[[pref_text(t1),1],
            [pref_text(t2),1],
            [pref_text(t3),1],
            [pref_text(t4),-1],
            [pref_text(t5),-1]]
model=NaiveBayesClassifier.train(train_data)
test1='I like this movie very much'
test2='This film is very bad'
test3='The film is terrible'
print(model.classify(pref_text(test1)))
print(model.classify(pref_text(test2)))
print(model.classify(pref_text(test3)))

#文本相似度
text1='John likes to watch movies'
text2='John also likes to watch football games'
all_text=text1+" "+text2
words=nltk.word_tokenize(all_text)
#统计词频
fd=FreqDist(words)
print(fd['John'])
#找出常用单词位置
print(fd.most_common(5))
def find_position(words):
    result={}
    pos=0
    for w in words:
        result[w[0]]=pos
        pos+=1
    return result

pos_dict=find_position(fd.most_common(5))
print(pos_dict)

# 转换为词频向量
def text_to_vec(words):
    frec_v=[0]*5
    for w in words:
        if w in list(pos_dict.keys()):
            frec_v[pos_dict[w]]+=1
    return frec_v

v1=text_to_vec(nltk.word_tokenize(text1))
v2=text_to_vec(nltk.word_tokenize(text2))
print(v1)
print(v2)
print(cosine_distance(v1,v2)) # 计算余弦距离([0,1]越小越相似，越大越不相似)