import sys
import shutil
import os
import re
from datetime import datetime, timedelta
from math import log

training_tweets = sys.argv[1]
training_labels = sys.argv[2]
output_file = sys.argv[3]
#input_test_file = sys.argv[4]

dictionary= {'NULL':"0"}
new_word_index = 1
curr_day = -1
#word_count = [[word_index, key_value, zero_count, one_count, total_count]]
daily_data = []
fh_tl = open(training_labels)
training_labels_txt = fh_tl.readlines()
word_count = [[0,'NULL',[0]*len(training_labels_txt),0]]
for training_label in training_labels_txt:
	curr_day += 1
	print "Day "+str(curr_day + 1)+"\n"

	#find day
	label_date = re.search("\d\d\d\d-\d\d-\d\d",training_label).group()
	#tweet_date = datetime.date(int(label_date[0:4],label_date[5:7],label_date,[8:10])) - datetime.timedelta(days=1)

	training_filename = training_tweets+label_date+".txt"
	fh = open(training_filename)

	label = int(re.match("^[01]",training_label).group())

	line = fh.readline()
	daily_word_count = 0
	daily_tweet_count = 0
	MAX_TWEETS = 100000
	while len(line)>0 and daily_tweet_count<MAX_TWEETS:#not end of file
		tweet=[]
		for word in line.split():
			if word not in dictionary:
				dictionary[word]=str(new_word_index)
				word_count.append([new_word_index,word,[0]*len(training_labels_txt),0])
				new_word_index += 1
			tweet.append(dictionary[word])
			word_count[int(dictionary[word])][3] += 1 #total word count for this word
			word_count[int(dictionary[word])][2][curr_day] += 1 #daily word count for this word
			daily_word_count += 1
		line = fh.readline()
		daily_tweet_count += 1
		#print daily_tweet_count, "\n"
	daily_data.append([label, daily_word_count, daily_tweet_count])

num_days = curr_day+1

sorted_word_count = sorted(word_count,key=lambda entry: entry[3])
sorted_word_count.reverse()
#for key_pair in pairs:
#	word_count_pairs.append([int(key_pair[1]),word_count[int(key_pair[1])])
#	fh2.write(key_pair[1]+", "+ "%d"%(word_count[int(key_pair[1])]) +", "+ key_pair[0]+"\n")


#set up the params for calculating mutual information
num_words_to_keep = 1000
num_words_to_look = min(len(word_count), 10000)
ratio_vector=[]
labelingtemp = zip(*daily_data)
labeling = labelingtemp[0]
#for line in word_count_sorted 
#	ratio_vector=int(line[2])/
sorted_dic = {}
occurance = []
for i in range(num_words_to_look):
	#sorted_dic[sorted_word_count[i][1]] = sorted_word_count[i][0]
	occurance.append(sorted_word_count[i][2])

my_word_count = [ sum(x) for x in zip(*occurance) ]
#get the label set?

#calculate mutual information
ratio = []
for j in range(num_words_to_look):
	tempratio = []
	for i in range(num_days):
		tempratio.append(float(occurance[j][i])/my_word_count[i])
	ratio.append(tempratio)

threshold = []
for i in range(num_words_to_look):
	threshold.append(float(sum(ratio[i]))/num_days)
z = []
for j in range(num_words_to_look):
	tempz = []
	for i in range(num_days):
		tempz.append(ratio[j][i] >= threshold[j])
	z.append(tempz)

imatrix = []
pzy = [[.0,.0],[.0,.0]]
pz = [.0,.0]
py = [.0,.0]
for j in range(num_words_to_look):
	for m in range(2):
		for n in range(2):
			mysum = .0
			for k in range(num_days):
				mysum += (z[j][k] == m) * (labeling[k] == n)
			pzy[m][n] = float((mysum + 1))/(num_days + 4)
	pz[0] = pzy[0][0] + pzy[0][1]
	pz[1] = pzy[1][0] + pzy[1][1]
	
	py[0] = pzy[0][0] + pzy[1][0]
	py[1] = pzy[0][1] + pzy[1][1]

	#print pzy

	imatrix.append([0.0, sorted_word_count[j][1], sorted_word_count[j][0], ((pzy[0][1]/pzy[0][0]) < (pzy[1][1]/pzy[1][0])), sum(occurance[j])])
	for m in range(2):
		for n in range(2):
			imatrix[j][0] += float(pzy[m][n])*log(float(pzy[m][n])/(pz[m]*py[n]))
	#print imatrix[j]
	

sorted_imatrix = sorted(imatrix,key=lambda entry: entry[0])
sorted_imatrix.reverse()

new_dic = {}
for i in range(num_words_to_keep):
	new_dic[sorted_imatrix[i][1]] = sorted_imatrix[i][2]


fh2 = open("dictionary.txt","w")
for line in sorted_imatrix:
	fh2.write("%f, "%(line[0])+line[1]+ ", %d"%(line[4]) + ", %d \n"%(line[3]))
fh2.close()
#pairs = dictionary.items()
#print len(dictionary)

#new_word_index = 0
#for line in word_count_sorted[:]:
#	dictionary[line[1]]=new_word_index
#	new_word_index+=1


#write out the data!
curr_day = -1
fh_tl = open(training_labels)
training_labels_txt = fh_tl.readlines()
for training_label in training_labels_txt:
	curr_day += 1
	print "Day "+str(curr_day + 1)+"\n"

	#find day
	label_date = re.search("\d\d\d\d-\d\d-\d\d",training_label).group()
	#tweet_date = datetime.date(int(label_date[0:4],label_date[5:7],label_date,[8:10])) - datetime.timedelta(days=1)

	training_filename = training_tweets+label_date+".txt"
	output_filename = output_file+label_date+".txt"
	fh = open(training_filename)
	fh1 = open(output_filename,'w')

	line = fh.readline()
	daily_word_count = 0
	daily_tweet_count = [0]
	MAX_TWEETS = 1000000
	while len(line)>0 and daily_tweet_count<MAX_TWEETS:#not end of file
		tweet=[]
		for word in line.split():
			if word not in dictionary:
				continue
			tweet.append(dictionary[word])
		if len(tweet)==0:
			line = fh.readline()
			continue
		fh1.write(" ".join(tweet)+"\n")
		line = fh.readline()
		daily_tweet_count += 1
		#print daily_tweet_count, "\n"
	daily_data.append(daily_tweet_count)
fh1.close()
