#!/usr/bin/env python

# https://github.com/sahiga/nlp-exercises/tree/master/02-accessing-text-corpora
# https://github.com/ypeels/nltk-book/tree/master/exercises
import nltk


#2.1
# print '\n-------2.1-------'
# saying='guangzhou is a nice city and guangzhou is a provincal capital of guangdong'
# word = saying.split(' ')
# print '[x] word ',word
# print '[x] word add ',word+['hello']
# print '[x] word multiply ',word*2
# print '[x] word slice ',word[:1]
# print '[x] word sort ',sorted(word)


#2.2
# print '\n-------2.2-------'
# from nltk.corpus import gutenberg
# book = gutenberg.words('austen-persuasion.txt')
# print '[x] words count=',len(book)
# print '[x] words types=',len(set(w.lower() for w in book))


#2.3
# print '\n-------2.3-------'
# from nltk.corpus import brown
# genre = ['news','romance']
# words1 = brown.words(categories='romance')
# print '[x] ',words1[:10]


#2.4
# print '\n-------2.4-------'
# from nltk.corpus import state_union as su
# cfd = nltk.ConditionalFreqDist(
# 	(target, w)
# 	for target in su.fileids()[:5]
# 	for w in ['men','women','people']
# 	for word in su.words(target)
# 	if word.lower().startswith(w)
# 	)
# cfd.tabulate()
# cfd.plot()


# undo 2.5 2.6 2.7


#2.8
# print '\n-------2.8-------'
# from nltk.corpus import names
# cfd = nltk.ConditionalFreqDist(
# 	(fileid,word[:1])
# 	for fileid in names.fileids()
# 	for word in names.words(fileid)
# 	)
# cfd.tabulate()
# cfd.plot()


# undo 2.9


#2.10
# print '\n-------2.10-------'
# from nltk.book import *
# books = [text1,text2,text3,text4,text5,text6,text7,text8,text9]
# fdists = []
# for book in books:
# 	vocab = [w for w in book if w.isalpha()]
# 	fd = FreqDist(vocab)
# 	fdists.append((book,fd))
# fraction = 0.33 # 1/3
# for (book,fd) in fdists:
# 	threshold = fd.N() * fraction
# 	type_count = 0
# 	curr_value = 0
# 	for (word, count) in fd.most_common():
# 		type_count += 1
# 		curr_value += count
# 		if curr_value >= threshold:
# 			break;
# 	print type_count, 'word types, out of', fd.N(),' @ ',book


# undo 2.11 2.12 2.13 2.14


# print '\n-------2.14-------'
# from nltk.corpus import wordnet as wn
# def supergloss(s):
#     n = 0
#     synset = (s.name, s.definition)
#     hypernyms = s.hypernyms()
#     hyponyms = s.hyponyms()
#     if hypernyms != []:
#         while n < len(hypernyms):
#             for hypernym in s.hypernyms():
#                 hypernyms[n] = (hypernym.name, hypernym.definition)
#                 n = n + 1
#     else:
#         hypernyms = 'none'
#     n = 0
#     if hyponyms != []:
#         while n < len(hyponyms):
#             for hyponym in hyponyms:
#                 hyponyms[n] = (hyponym.name, hyponym.definition)
#                 n = n + 1
#     else:
#         hyponyms = 'none'
#     total = 'ROOT WORD:', synset, 'HYPERNYMS:', hypernyms, 'HYPONYMS:', hyponyms
#     return total
# print supergloss(wn.synset('cat.n.01'))



# print '\n-------2.15-------'
# from nltk.corpus import brown
# all_words = brown.words(categories='news')
# vocab = [w for w in all_words if w.isalpha()]
# fdist = nltk.FreqDist(vocab)
# filtered_words = [w for (w,c) in fdist.most_common() if c>=3 ]
# print filtered_words[:100]


# print '\n-------2.16-------'
# from nltk.corpus import brown
# for cate in brown.categories():
# 	all_words = brown.words(categories=cate)
# 	all_count = len(all_words)
# 	type_count = len(set(w.lower() for w in all_words))
# 	print cate,"{:3g}".format(float(all_count)/type_count)


# print '\n-------2.17-------'
# from nltk.corpus import brown
# vocab = brown.words(categories='adventure')
# stopwords = nltk.corpus.stopwords.words('english')
# content = [w for w in vocab if w.lower() not in stopwords and w.isalpha()]
# fdist = nltk.FreqDist(content)
# print fdist.most_common(50)


# todo 2.18 ~2.22


print '\n-------2.23-------'
from nltk.corpus import brown
import pylab,matplotlib
vocab = brown.words(categories='adventure')
content = [w for w in vocab if w.isalpha()]
fdist = nltk.FreqDist(content)
vocab_count = fdist.most_common(50)
rank =[]
freq =[]
for i in range(1,50,1):
	k=i*vocab_count[i][1]
	print 'rank=',i,' k=',k
	rank.append(i)
	freq.append(vocab_count[i][1])
pylab.plot(rank, freq)
pylab.show()

