#!/usr/bin/env python


import nltk
import re
from os import listdir
from os.path import isfile, join
import sys
import math
import operator

#############################
# FUNCTION: 
#   get_files
# PARAMETERS: 
#   my_path    (IN) - Path where the fils are
# RETURN: 
#   files<list> 
# DESCRIPTION: 
#   obtain the file names in the specified path
############################
def get_files(my_path):
  onlyfiles = [ join(my_path,f) for f in listdir(my_path) if isfile(join(my_path,f)) ]
  return onlyfiles


#############################
# FUNCTION: 
#   get_words_document
# PARAMETERS: 
#   document  (IN) - List of words in a document
# RETURN: 
#   words<list>
# DESCRIPTION: 
#   get the list of words in a document
############################
def get_words_document(document):
  file = open(document, 'r')
  return file.read()


#############################
# FUNCTION: 
#   get_important_words
# PARAMETERS: 
#   words   (IN) - List of words
# RETURN: 
#   words<list>
# DESCRIPTION: 
#   Given a list of words, it obtains the list of non-stop words
############################
def get_important_words(words):
  words = words.lower()
  #tokens_in_file = nltk.word_tokenize(words)
  #tokens_in_file = nltk.tokenize.punkt.PunktWordTokenizer().tokenize(words)
  tokens_in_file = re.findall(r'\w+', words, flags = re.UNICODE | re.LOCALE) 
  stop_words = nltk.corpus.stopwords.words('english')
  tokens_wo_stop_words = [w for w in tokens_in_file if w not in stop_words]
  return tokens_wo_stop_words


#############################
# FUNCTION: 
#   cosine_normalization
# PARAMETERS: 
#   encoded_documents<list>   (IN) - List of list of words
# RETURN: 
#   normalized_documents<list>
# DESCRIPTION: 
#   Given a list of encoded documents, it performs a cosine normalization
############################
def cosine_normalization(encoded_documents):
  for document in encoded_documents:
    denominator = math.sqrt(0.0000001 + sum([p**2 for p in encoded_documents[document]]))
    encoded_documents[document] = [p/denominator for p in encoded_documents[document]]
  return encoded_documents


#############################
# FUNCTION: 
#   encode_Documents
# PARAMETERS: 
#   documents<list>   (IN)  - List of documents
#   vector_size       (IN)  - Size of the resulting vectors
# RETURN: 
#   normalized_documents<list>
# DESCRIPTION: 
#   Given a list of encoded documents, it performs a cosine normalization
############################
def encode_documents(documents, vector_size):
  all_words = []
  vector_document = {}

  if documents is None:
    return None

  num_docs = len(documents)
  print 'Dealing with %d documents' % num_docs

  if num_docs <= 0:
    return None

#  for document in documents:
#    all_words += documents[document]

#  histogram = nltk.FreqDist(w.lower() for w in all_words)
#  important_words = histogram.keys()[:vector_size]
#  print important_words

  # Get the N most important words in each document,
  # together with its frequencies
  print "Getting most important words..."
  histogram2 = {}
  for document in documents:
    histogram = nltk.FreqDist(w for w in documents[document])
    important_words_histo = histogram.keys()[:vector_size]
    for word in important_words_histo:
      if word in histogram2:
        histogram2[word] += histogram[word]
      else:
        histogram2[word] = histogram[word]

  histogram = None

  # Sort the important words and get 'global' most popular
  # /important words
  sorted_common_words = sorted(histogram2.iteritems(), key=operator.itemgetter(1), reverse=True)
  important_words = [p for (p,q) in sorted_common_words[:vector_size]]
  print important_words

  histogram2 = None

  print "Calculating IDF..."
  # Now calculate the freq across documents
  idf_dict = {}
  for _ in documents:
    document_words = set(documents[_])
    for word in important_words:
      if word in document_words:
	if word in idf_dict:
          idf_dict[word] += 1
        else:
          idf_dict[word] = 1

        # idf can not be 0, since we know that the 
        # word should appear at least once
        assert idf_dict[word] != 0
        print "Word: %s, idf: %f" % (word, idf_dict[word])

  print "Calculating TF-IDF..."
  # Now that we know which words we will consider, calculate 
  # the tf-idf
  for document in documents:
    d = [0 for _ in xrange(vector_size)]
    i = 0
    print "Document %d/%d: %s" %(i, num_docs, document)
    histogram = nltk.FreqDist(w for w in documents[document])

    # Get the tfidf for each importat word
    for word in important_words:
      tf = 0
      idf = 0
      
      # tf is nothing but the frequency in the document
      if word in histogram:
        tf = histogram[word]

      if word in idf_dict:
        idf = idf_dict[word]

      # Compute tfidf
      d[i] = tf * math.log10(float(num_docs)/float(idf))
      print 'word: %s, tf: %f, idf: %f, d[i]: %f' % (word, tf, idf, d[i])

      i += 1

    # Now we have the tf-idf of document
    vector_document[document] = d

  return vector_document
 



###########################
# DRIVER OF THE SCRIPT
##########################
if __name__ == "__main__":

  if len(sys.argv) != 4:
    print 'Invalid arguments\n'
    print 'Usage: tfidf <directory> <vector_dimension> <outfile>\n'
    sys.exit()

  my_dir = sys.argv[1]
  dimension = int(sys.argv[2])
  out_file = sys.argv[3]

  print 'Encoding documents in %s using vectors in R%d' % (my_dir, dimension)

  documents = {}

  files_list = get_files(my_dir)

  # Get the initial list of words per document
  for file in files_list:
    words = get_words_document(file)
    documents[file] = get_important_words(words)

  # Encode the documents
  encoded_documents = encode_documents(documents, dimension)

  # Make sure we get an object back
  if encoded_documents is None:
    print 'Could not encode the documents'
    sys.exit()
 
  # Preliminary encoding
  print 'Encoded documents: '
  for document in encoded_documents:
    print  '%s: %s' % (document, encoded_documents[document])

  # Now normalize the vectors
  normalized_docs = cosine_normalization(encoded_documents)

  # Print the normalized vectors
  f = open(out_file, 'w')
  print 'Printing normalized documents: '
  for document in encoded_documents:
    print  '%s: %s' % (document, encoded_documents[document])
    for number in encoded_documents[document]:
      f.write('%f ' % number)
    f.write('\n')

