#!/usr/bin/env python

import nltk
from nltk.corpus import brown
from nltk import word_tokenize
from nltk import FreqDist
from nltk import bigrams
from nltk import ConditionalFreqDist
from collections import defaultdict
import pylab

sents = brown.tagged_sents(categories='news')
words = brown.words(categories='news')
fdist = FreqDist(words)
tagged_words = brown.tagged_words(categories='news')
cfd = ConditionalFreqDist(tagged_words)

def performance(size):
	most_freq_words = fdist.most_common(size)
	like_tags = dict( (word,cfd[word].max()) for (word,_) in most_freq_words)
	baseline_tagger = nltk.UnigramTagger(model=like_tags,backoff=nltk.DefaultTagger('NN'))
	return baseline_tagger.evaluate(sents)

# for size in range(100,1000,100):
# 	print size,' -- ',performance(size)

def display():
	sizes = 2**pylab.arange(15)
	perfs = [performance(size) for size in sizes]

	pylab.plot(sizes,perfs,'-bo');    
	pylab.title('Lookup Tagger Performance with Varying Model Size')
	pylab.xlabel('Model Size')
	pylab.ylabel('Performance')
	pylab.show()

display()