import codecs, glob, logging, re
import HebrewWord

logger = logging.getLogger('com.metaist.heblib.TanachParser')

DELETE_TAGS = [
  # Delete tag contents.
  '<!([^>]*)>', '<META([^>]*)>', '<LINK([^>]*)>',
  '<TITLE(.*)</TITLE>', '<SCRIPT(.*)</SCRIPT>',
  '<H1(.*)</H1>', '<H2(.*)</H2>', '<B(.*)</B>', '<SPAN(.*)</SPAN>',
  '<FONT(.*)</FONT>', '<BDO(.*)</BDO>', '<A(.*)</A>',

  # Delete tags, but not contents.
  '<(/)?HTML( [^>]*)?>', '<(/)?HEAD>',
  '<(/)?BODY>', '<(/)?P([^>]*)>',
  '<(/)?BIG>\xA0', '<(/)?BIG>', '<(/)?SMALL>', '<(/)?SUP>',

  # Delete punctuation.
  '{(.)}', '\.', '\(', '\)', ',', ':', '\]', '"']

PUT_SPACE = ['<BR>', '-', ';', ';\s', ';\xA0', '\xA0']

def combine(corpus_path):
   '''Combine the corpus texts.'''
   corpus_text = ''
   for book_path in glob.iglob(corpus_path): # Iterate over all the books in the Tanach.
      with codecs.open(book_path, encoding='windows-1255', mode='r') as f_book: # Open the book.
         corpus_text += f_book.read()
	
   return corpus_text

def scrub(corpus_text):
   '''Scrub a corpus text by removing odd character sequences.'''
   text = corpus_text
   
   for regex in DELETE_TAGS: text = re.sub(regex, '', text) # Remove.
   for regex in PUT_SPACE: text = re.sub(regex, ' ', text) # Replace with a space.

   text = text.replace('&zwj ', '&zwj;') # Put the zero-width join back.	
	
   return text

def tokenize(text):
   '''Return a map of Hebrew words in the text.'''
   words = {}
   text.replace('\n', ' ') # We're not interested in the lines; just the words.
   for word in text.split(' '):
      word = word.strip()
      if word is '': continue # Just some whitespace.
      if word in words: # We've seen this word before.
         words[word]['frequency_count'] += 1
      else: # New word
         heb_letters = HebrewWord.parse(word)
         words[word] = {
            'hebrew': word,
            'english': ' ' + (' '.join(heb_letters)).replace("'", "\\'") + ' ',
            'has_shva': {True: 'Y', False: 'N'}[u'\u05b0' in word],
            'syllable_count': HebrewWord.syllables(heb_letters),
            'frequency_count': 1,
            'hashem_name': HebrewWord.hashem_name(word)
         } # Word added.
	
   return words
