import os
import re
import math 

train_dat = 'small.txt'
test_dat = 'test.txt'

train_feat_index = {} # feature -> {0:N(0,fi),1:N(1,fi), 'freq':freq(fi)} where 0,1 are the two labels and N(l,fi) is the number of times a feature is seen with label l; freq(fi) is the total number of words that contain the feature fi
train_feat_bayes = {} # feature -> {0:p(f|1),1:p(f|0)}
train_num_features = 0 # total number of features in train data
train_featnum_0 = 0 # total number of features with label 0
train_featnum_1 = 0 # total number of features with label 1
train_words = 0 # total number of words

# Label probability distribution
p_label0 = 0
p_label1 = 0 

word_index = {} # word -> [features]
scores = {} # word -> label (0 or 1) 
scores_debug = {}

def bayes():
	global train_words, train_num_features
	print "Starting"
	train_txt = preprocess(read_file(train_dat)).split()
	
	train_words = len(train_txt)
	
	# extract the features from the txt
	# calculate the total number of times a feature is seen with a label l, for both labels: N(l,fi)
	# calculate the total number of features seen with label l, for both labels: N(l)
	# calculate the freq(fi): total number of words / total number of words in which feature fi appears: the frequency of the feature (since each feature appears only ONCE in a word)
	
	
	
	
	# Using the complement, compute the weight: w(fi,l^) = log(2)p(fi|l^)/ sum(k)|log(2)p(fk|l^)
	
	
	
	#Adjustments: 
	#1. Choose p(l)
	#2. Choose a when computing the complement (default = 1)
	extract_features(train_txt, 1)
	train_num_features = len(train_feat_index)
	update_feature_frequency()	
	compute_feature_bayes()	
	compute_label_distribution()
	test_txt = preprocess(read_file(test_dat)).split()
	extract_features(test_txt,0)
	score_words()
	
	print "Done"
		
def preprocess(txt):
	return re.sub('\`|\!|\'|\"|\#|\$|\%|\^|}|{|;|\.|\,|:|\[|\]|\(|&|\)|(\\\)|\/|\+|\?|\-|\=|\~|\*|\@|\|',' ',txt)	
	
def compute_label_distribution():
	global p_label0, p_label1
	p_label0 = math.log1p(0.4)
	p_label1 = math.log1p(0.6)

def update_feature_frequency():
	global train_feat_index
	for feature in train_feat_index:
		train_feat_index[feature]['freq'] = math.log1p(float(train_num_features) / float(train_feat_index[feature]['freq']))
	
## Extracts features from a set of words. is_train flag determines if features are extracted for the training data, or for the real data	
def extract_features(words, is_train = 1):
	for word in words:
		if len(word) > 3:
			feature_lst = []+[word]+[word[0:2]]+[word[0:3]]+[word[-2:]]+[word[-3:]] # Take needed features: length 5
		elif len(word) == 3:
			feature_lst = []+[word]+[word[0:2]]+[word[-2:]] # length 3
		else:
			feature_lst = []+[word] # length 1
		if(not is_train):
			add_to_word_index(word, feature_lst) # updates the real_word_index list
		else:
			label = check_label(word)
			for feature in feature_lst:
				update_count(label)  # will update train_featnum_0 or train_featnum_1 according to the label. SHOULD ONLY CONSIDER FEATURES NOT PREVIOUSLY SEEN
				add_to_findex(feature, label) # will add feature to the train_feature_index
			
def add_to_word_index(word,features):
	global word_index
	word_index[word] = features

def add_to_findex(feature,label):
	global train_feat_index
	try:
		train_feat_index[feature]
		try:
			train_feat_index[feature][label] += 1
		except KeyError:
			train_feat_index[feature][label] = 1
	except KeyError:
		train_feat_index[feature] = {label:1,1-label:0}
	try:
		train_feat_index[feature]['freq'] += 1
	except KeyError:
		train_feat_index[feature]['freq'] = 1
		
def update_count(label):
	global train_featnum_0, train_featnum_1
	if label:
		train_featnum_1 += 1
	else:
		train_featnum_0 += 1
		
def check_label(word):
	label = 0 #Initialize as lower case
	for ch in word:
		if not ch.isdigit() and ch.isupper():
			label = 1
			break
	return label
		
# calculates the complement p(fi|l^) = N(l^,fi) + 1 / total number of words + N(l^), where:
# N(l^,fi) is the number of times a feature fi is seen with label l^; N(l^) is the total number of features with label l^
def compute_feature_bayes():
	global train_feat_bayes
	weight = 1
	norm_sum_0 = 0
	norm_sum_1 = 0
	for feature in train_feat_index:
		count_1 = train_feat_index[feature][1] # Label 1
		count_0 = train_feat_index[feature][0] # Label 0
		likelihood_1 = math.log1p(float(count_1 + weight) / float(train_featnum_1 + weight*train_num_features))
		norm_sum_1 += math.fabs(likelihood_1)
		likelihood_0 = math.log1p(float(count_0 + weight) / float(train_featnum_0 + weight*train_num_features))
		norm_sum_0 += math.fabs(likelihood_0)
		train_feat_bayes[feature] = {1:likelihood_1,0:likelihood_0}

	# Normalize
	for feature in train_feat_index:
		train_feat_bayes[feature][0] /= norm_sum_0
		train_feat_bayes[feature][1] /= norm_sum_1	

# Computes the score(w) = argmax(l) [log(2)p(l) - sum(fi in w)[freq(fi)*w(fi,l^)]]
def score_words():
	global scores
	score_0 = p_label0 # Score for label 0
	score_1 = p_label1 # Score for label 1
	for word in word_index:
		feats = word_index[word]
		sum_0 = 0
		sum_1 = 0
		for feature in feats:
			if feature in train_feat_index: # Consider only features that have been seen before
				sum_0 += train_feat_index[feature]['freq']*train_feat_bayes[feature][0]
				sum_1 += train_feat_index[feature]['freq']*train_feat_bayes[feature][1]
		score_0 -= sum_1 # use the complement
		score_1 -= sum_0
		scores_debug[word] = {0:score_0, 1:score_1}
		if score_0 > score_1:
			scores[word] = 0
		else:
			scores[word] = 1
			
		
def read_file(file_name):
	f = open(file_name, 'r')
	try:
		content = f.read()
	finally:
		f.close()
	return content	
			
