import string
import re

# Gets the list of stop words from an external file.
def stops():
  stop_source = open('stop_list.txt', 'r')
  stop = stop_source.read()
  stop = re.split('\W+', stop)
  stop_source.close()
  return stop

# Takes the filename of a document, gets the text stored there, and returns
# the text as a string.
def open_document(doc_name):
  temp = open(doc_name)
  text = temp.read()
  temp.close()
  return text

# Takes a list of filenames of documents, gets the text stored in each one,
# and returns a list of strings.
def open_documents(doc_names):
  docs = []
  for i in doc_names:
    docs.append(open_document(i))
  return docs

# Takes a single string and returns a list of its tokens.
def tokenize_single(document):
  stop_words = stops()

  lowercase = document.lower()
  depunctuate = lowercase.translate(string.maketrans('',''), string.punctuation)
  raw_tokens = re.split('\W+', depunctuate)
  processed_tokens = []

  for token in raw_tokens:
    if token not in stop_words:
      processed_tokens.append(token)
  return processed_tokens

# Takes a list of strings and returns a list of tokens.
def tokenize_multiple(documents):
  tokens = []
  for i in range(len(documents)):
    tokens.append([[i], tokenize_single(documents[i])])
  return tokens

# Takes a list of strings, finds the tokens for each one, and returns a list
# of all the unique tokens for all documents used.
def master_token(documents):
  master = []
  temp_tokens = tokenize_multiple(documents)
  for i in range(len(temp_tokens)):
    for j in temp_tokens[i][1]:
      if j not in master:
        master.append(j)
  return master

# Takes a string and a list of tokens.  Returns a list of the number of times
# each token occurred in the document.
def frequency_single(document, tokens):
  current = ''
  frequency = []
  for i in range(len(tokens)):
    current = tokens[i]
    frequency.append(tokens.count(current))
  return frequency

# Takes a list of strings and a list of tokens.  Returns a list of a list of
# the number of times each token occured in a document.
def frequency_multiple(documents, tokens):
  current = ''
  frequency = []
  for i in tokens:
    for j in documents:
      frequency.append(j.count(i))
  return frequency

# Takes a string and a search term.  Returns the starting index for every
# occurrence of the search term.
def occurrences(document, element):
  lowercase = document.lower()
  depunctuate = lowercase.translate(string.maketrans('',''), string.punctuation)

  element = ' ' + element + ' '
  elem_length = len(element)
  positions = []
  for i in range(len(document) - elem_length):
    if depunctuate.find(element, i - 1, i + elem_length - 1) != -1:
      positions.append(i)
  return positions

# Takes a string and a list of tokens.  Returns a list of occurrences of
# every token.
def position_single(document, tokens):
  current = ''
  position = []
  for i in range(len(tokens)):
    current = tokens[i]
    position.append(occurrences(document, current))
  return position

# Takes a list of strings and a list of tokens.  Returns a list of the
# occurrences of every token in every document.
def position_multiple(documents, tokens):
  positions = []
  for i in tokens:
    position = []
    for j in documents:
      position.append(occurrences(j, i))
    positions.append(position)
  return positions

# Takes a list of positions, and only returns the index for lists that are
# not empty.  In other words, returns a list of the serial numbers of
# documents in which something was found.
def elems_of_document(position):
  in_doc = []
  for i in range(len(position)):
    if position[i] != []:
      in_doc.append(i)
  return in_doc

# elems_of_document() applied to an entire list of positions.
def elems_of_documents(positions):
  in_doc = []
  for token_index in positions:
    for i in range(len(token_index)):
      sub_list = elems_of_document(token_index)
    in_doc.append(sub_list)
  return in_doc

# Takes a string.  Returns a dictionary that uses a token as a key and a list
# of frequencies and positions for the tokens as a value.
def index_single(document):
  tokens = tokenize_single(document)
  frequencies = frequency_single(document, tokens)
  positions = position_single(document, tokens)
  index = {}
  for i in range(len(tokens)):
    index[tokens[i]] = [frequencies[i], positions[i]]
  return index

# Needs to add use of freqencies.
# Takes a list of strings.  Returns a dictionary that uses tokens as keys and
# a list of document serial numbers and positions as values.  For example:
# test['mathematics']: [[3], [1087, 1305, 1381]]
# Indicates the token "mathematics" is mentioned in document 3, and can be
# found at positions 1087, 1305, and 1381.
def index_multiple(documents):
  tokens = master_token(documents)
  frequencies = frequency_multiple(documents, tokens)
  positions = position_multiple(documents, tokens)
  in_doc = elems_of_documents(positions)
  index = {}
  for i in range(len(tokens)):
    index[tokens[i]] = [in_doc[i]]
    for j in in_doc[i]:
      index[tokens[i]].append(positions[i][j])
  return index

# Takes a list of filenames and creates an index for their contents.
def index_documents(doc_names):
  documents = open_documents(doc_names)
  index = index_multiple(documents)
  return index

# This next bit gets the program going.
# Currently only for testing purposes.
def main():
  first = ['science.txt', 'math.txt', 'psychiatry.txt', 'evolution.txt',
           'LICENSE']
  test = index_documents(first)
  for i in test:
    print str(i) + ": " + str(test[i])

if __name__ == '__main__':
  main()
