from nltk.corpus import movie_reviews
from nltk import NaiveBayesClassifier
from nltk import word_tokenize
import nltk
import random

pos_word_weight={}
neg_word_weight={}

i=0
for line in open('./data/candi_pos.txt'):
    if i<800:
        line = line.strip().split(',')
        pos_word_weight[line[0]]=line[1]
        i=i+1
i=0
for line in open('./data/candi_neg.txt'):
    if i<800:
        line = line.strip().split(',')
        neg_word_weight[line[0]]=line[1]
        i=i+1

random.seed(42)
# 读取和划分数据集
def load_movie_reviews():
    pos_ids = movie_reviews.fileids('pos')
    neg_ids = movie_reviews.fileids('neg')

    all_reviews = []
    for pids in pos_ids:
        all_reviews.append((movie_reviews.raw(pids), 'positive'))

    for nids in neg_ids:
        all_reviews.append((movie_reviews.raw(nids), 'negative'))

    random.shuffle(all_reviews)
    train_reviews = all_reviews[:1600]
    test_reviews = all_reviews[1600:]

    return train_reviews, test_reviews


train_reviews, test_reviews = load_movie_reviews()
print('train:', len(train_reviews))
print('test:', len(test_reviews))

def extract_feature(text):
    feature = {}
    text = text.lower()
    for word in word_tokenize(text):
        if word in pos_word_weight or word in neg_word_weight:
            feature[f'contain: {word}'] = True
    return feature

def train_and_test(extract_feature, train_data, test_data):
    training_set = nltk.classify.apply_features(extract_feature, train_data)
    test_set = nltk.classify.apply_features(extract_feature, test_data)

    classifier = NaiveBayesClassifier.train(training_set)
    accuracy = nltk.classify.util.accuracy(classifier, test_set)
    print(f'accuracy is {accuracy:.4f}')

    return classifier

train_and_test(extract_feature, train_reviews, test_reviews)