#!/usr/bin/env python

import nltk
from nltk.corpus import names

labled_names = [(name,'male') for name in names.words('male.txt')] + \
[(name,'female') for name in names.words('female.txt')]

import random
random.shuffle(labled_names)

train_name = labled_names[:2000]
devtest_name = labled_names[1000:2000]
test_name = labled_names[:1000]


def feature_extractor(name):
	return {
		'suffix1':name[-1:],
	}

def get_set(kvlist):
	return [(feature_extractor(name),gender) for (name,gender) in kvlist]

train_set,devtest_set,test_set = get_set(train_name),get_set(devtest_name),get_set(test_name)


def print_error(classifier,data_set=devtest_name):
	for (name,gender) in data_set:
		feature = feature_extractor(name)
		guess = classifier.classify(feature)
		if guess != gender:
			print('name={:<12} gender={:<8} guess={:<8} feature={:8}'.format(name, gender, guess, feature))


# bayes
print '---------------bayes----------------'
class_bayes = nltk.NaiveBayesClassifier.train(train_set)
# print_error(class_bayes)
print nltk.classify.accuracy(class_bayes,devtest_set)
# class_bayes.show_most_informative_features(5)
# print nltk.classify.accuracy(class_bayes,test_set)


# Maximum Entropy classifier
import time
time.sleep(3)
print '-------------Maximum Entropy------------------'
class_entropy = nltk.MaxentClassifier.train(train_set)
# print_error(class_entropy)
print nltk.classify.accuracy(class_entropy,devtest_set)
# class_entropy.show_most_informative_features(5)
# print nltk.classify.accuracy(class_entropy,test_set)


# decision tree
time.sleep(3)
print '-------------decision tree------------------'
class_dectree = nltk.DecisionTreeClassifier.train(train_set)
# print_error(class_dectree)
print nltk.classify.accuracy(class_dectree,devtest_set)
# print nltk.classify.accuracy(class_dectree,test_set)
print(nltk.classify.pseudocode(depth=4))