from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk import FreqDist
from nltk import pos_tag
import nltk
from nltk import NaiveBayesClassifier
from nltk.stem.snowball import SnowballStemmer
from nltk import WordNetLemmatizer
from nltk.corpus import movie_reviews
import random
from nltk import ngrams
from nltk.corpus import stopwords         #停用词
import string
from nltk.text import TextCollection
import threading
import time


random.seed(42)
def load_movie_reviews():
    # 取出正负样本的标号
    pos_ids = movie_reviews.fileids('pos')
    neg_ids = movie_reviews.fileids('neg')

    # 构建数据集
    all_reviews = []
    for pids in pos_ids:
        all_reviews.append((movie_reviews.raw(pids), 'positive'))

    for nids in neg_ids:
        all_reviews.append((movie_reviews.raw(nids), 'negative'))


    # 随机打乱
    random.shuffle(all_reviews)

    # 切分训练集和测试集
    train_reviews = all_reviews[:1600]
    test_reviews = all_reviews[1600:]

    return train_reviews, test_reviews , all_reviews

train_reviews, test_reviews , all_reviews = load_movie_reviews()
print('train:', len(train_reviews))
print('test:', len(test_reviews))
#构建语料库

# sents = [word_tokenize(sent[0].lower()) for sent in all_reviews]
# corpus = TextCollection(sents)
# tfidf={}
# i=0
# def thread(num):
#     global i
#     for sent in sents[num:num+200]:
#         print(i,'/',len(sents))
#         i=i+1
#         for word in sent:
#           tfidf[word]=corpus.tf_idf(word, corpus)
# t1 = threading.Thread(target=thread, args=(0,))
# t2 = threading.Thread(target=thread, args=(200, ))
# t3 = threading.Thread(target=thread, args=(400,))
# t4 = threading.Thread(target=thread, args=(600, ))
# t5 = threading.Thread(target=thread, args=(800,))
# t6 = threading.Thread(target=thread, args=(1000, ))
# t7 = threading.Thread(target=thread, args=(1200,))
# t8 = threading.Thread(target=thread, args=(1400, ))
# t9 = threading.Thread(target=thread, args=(1600,))
# t10 = threading.Thread(target=thread, args=(1800, ))
#
# t1.start()
# t2.start()
# t3.start()
# t4.start()
# t5.start()
# t6.start()
# t7.start()
# t8.start()
# t9.start()
# t10.start()
#
# t1.join()
# t2.join()
# t3.join()
# t4.join()
# t5.join()
# t6.join()
# t7.join()
# t8.join()
# t9.join()
# t10.join()


stop_words = set(stopwords.words('english'))  #英文停用分词集合
print("英文停用分词集合：", stop_words)

punctuation_string = set(string.punctuation) #英文标点符号集合
print("所有的英文标点符号：", punctuation_string)
#filtered_sentence = list(set(word_tokens)-stop_words)



def extract_feature1(text):#把每一个词都看作一个特征
    feature = {}
    text = text.lower()
    #tokens = list(set(word_tokenize(text)) - stop_words - punctuation_string)
    tokens=word_tokenize(text)
    for word in tokens:
        #print(corpus.tf_idf(word,corpus))
        feature[word] = True#tfidf.get(word)
    return feature
def extract_feature2(text):#把每一个形容词都看作一个特征
    text = text.lower()
    feature = {}
    #tokens = list(set(word_tokenize(text)) - stop_words - punctuation_string)
    tokens = word_tokenize(text)
    for word, pos in pos_tag(tokens):
        if pos == 'JJ':
            feature[word] = True
    return feature
def extract_feature3(text):# 二元分词，把句子从头到尾每两个字组成一个词语
    text = text.lower()
    feature = {}
    #tokens = list(set(word_tokenize(text))-punctuation_string)
    tokens = word_tokenize(text)
    for word in tokens:
        feature[word] = True
    tokens = word_tokenize(text)
    for bigram in ngrams(tokens, 2):
        if bigram[0]=='not':
            bigram = ' '.join(bigram)
            feature[bigram] = True
    return feature
def extract_feature4(text):# 三元分词，把句子从头到尾每三个字组成一个词语
    text = text.lower()
    feature = {}
    #tokens = list(set(word_tokenize(text))-punctuation_string)
    tokens = word_tokenize(text)
    for word in tokens:
        feature[word] = True
    tokens = word_tokenize(text)
    for trigram in ngrams(tokens, 3):
        trigram = ' '.join(trigram)
        feature[trigram] =True
    return feature
def extract_feature5(text):# 三元分词，把句子从头到尾每三个字组成一个词语
    text = text.lower()
    feature = {}
    #tokens = list(set(word_tokenize(text))-punctuation_string)
    tokens = word_tokenize(text)
    for word in tokens:
        feature[word] = True
    tokens = word_tokenize(text)
    for bigram in ngrams(tokens, 2):
        bigram = ' '.join(bigram)
        feature[bigram] = True
    for trigram in ngrams(tokens, 3):
        trigram = ' '.join(trigram)
        feature[trigram] =True
    return feature

def train_and_test(extract_feature, train_data, test_data):
    training_set = nltk.classify.apply_features(extract_feature, train_data)
    test_set = nltk.classify.apply_features(extract_feature, test_data)

    classifier = NaiveBayesClassifier.train(training_set)
    accuracy = nltk.classify.util.accuracy(classifier, test_set)
    print(f'accuracy is {accuracy:.4f}')

    return classifier


# model1 = train_and_test(extract_feature1, train_reviews, test_reviews)
# model1.show_most_informative_features()
# model2 = train_and_test(extract_feature2, train_reviews, test_reviews)
# model2.show_most_informative_features()
# model3 = train_and_test(extract_feature3, train_reviews, test_reviews)
# model3.show_most_informative_features()
# model4 = train_and_test(extract_feature4, train_reviews, test_reviews)
# model4.show_most_informative_features()
model5 = train_and_test(extract_feature5, train_reviews, test_reviews)
model5.show_most_informative_features()

# sentence = 'it is a wonderful movie'
# feature = extract_feature1(sentence)

#print(model3.classify(feature))

