#!/usr/bin/env python

from dotext import dotext
from tobutils import loadobj

import sys
import re
import random

# loads 'tobmalf.markov.new' stores it in 'table', global var.
# Formerly had a direct usage of 'open', but nothing was saving to that
# format, so I killed it.
global table
table = loadobj('tobmalf.markov.new', {}, 'markov nodes')

# 'sentenceseed' and 'subjectseed' appear to be ways for the caller to inject
# some of the guiding principles.

# This implements a 2nd-order markov model and chain. That means that it
# tracks the frequency of some word after some specific pair of words,
# and then generates a random phrase based on those words.
# (A 1st-order markov model would look at one word, and see how likely
# another word is to preceed that one. Again, this is a 2nd-order chain,
# so each word depends on the two prior to it.)
def markov(newlines,outputs=0,donotaddperiods=0,w1="",w2="",sentenceseed="",subjectseed=""):
  stopword = "\n" # Since we split on whitespace, this can never be a word
  stopsentence = (".", "!", "?", "\n") # Cause a "new sentence" if found at the end of a word
  sentencesep  = "\n" #String used to seperate sentences

  # GENERATE TABLE
  if w1=="": w1 = stopword
  if w2=="": w2 = stopword
  global table
  
  # We're going to add content to the table, and we'd like to know how much we added.
  # This provides a reference point.
  oldlen = len(table) 
  totalwords = 0        

  # Processing work of markov table; adds to and updates the table.
  for line in newlines.split("\n"):
    if line.strip() == "": continue
    line = line.strip()
    line = line.replace("\n", "")
    if donotaddperiods == 1:
      period = ""
    else:
      period = "."
    # Replace any run of whitespace with a single space.
    # Subsequently, append a period. (If 'dontaddperiods' wasn't set)
    line = re.compile(r"\s+").sub(" ", line).split(" ",3)[3]+period
    # Contract runs of sentence-terminating punctuation into a single
    # terminating character.
    while line.count("..") > 0:
      line = line.replace("..",".")
    while line.count("!!") > 0:
      line = line.replace("!!","!")
    while line.count("??") > 0:
      line = line.replace("??","?")
    # Replace hybrid punctuators like ?! and !? with a single character.
    while line.count("?!") > 0:
      line = line.replace("?!","?")
    while line.count("!?") > 0:
      line = line.replace("!?","!")

    firstword = 1
    for word in line.split():
      # If this word ends with ':', and its our first, then it's probably
      # the addressee of a line. Skip it.
      if (word.endswith(":") or word.endswith("\x02:\x02")) and firstword == 1:
        firstword = 0
        continue


      wordpair = (w1, w2)
      firstword = 0
      totalwords += 1
      # If the trailing character matches one of our sentence-ending
      # characters, then the word we're interested in is everything
      # leading up to that character.
      if word[-1] in stopsentence:
        #this is the word we're interested in at the moment.
        word = word[0:-1]
        weight_tables(wordpair, word, stopword)
        # word 2 is now word 1, and our new word is now word 2.
        w1, w2 = w2, word
      # This particular markov model seems to place double-weight on
      # sequences which end sentences. I suppose that's some advantage
      # in preventing unintentionally-long results.
      wordpair = (w1, w2)
      weight_tables(wordpair, word, stopword)
      w1, w2 = w2, word
  # Mark the end of the file # should i do this at all <<< ???
  weight_tables((w1, w2), stopword, stopword)

  dotext("[markov'd "+str(totalwords)+" words.]",sameline=1)
  newlen = len(table)
  growlen = newlen - oldlen
  dotext("[cloud grew by " + str(growlen) + " from "+str(oldlen)+" to "+str(newlen)+" nodes.]",sameline=1)
  
  if outputs == 0: return ""
  # GENERATE SENTENCE OUTPUT
  maxsentences = 14

  if sentenceseed != "":
    dotext("[seeking 1.seed "+sentenceseed+"]",sameline=1)
  if subjectseed != "":
    dotext("[seeking 2.seed "+subjectseed+"]",sameline=1)
  sys.stdout.flush()

# new tack  
  w1 = stopword
  w2 = stopword
  sentencecount = 0
  sentence = []

  output = ""

  seedtries = 2500 # not a good way to do this
  seedctr = 0

  if subjectseed != "" and sentenceseed != "" and table.has_key((sentenceseed,subjectseed)):
    w1 = sentenceseed
    w2 = subjectseed
  elif subjectseed != "" and sentenceseed != "" and table.has_key((subjectseed,sentenceseed)):
    w2 = sentenceseed
    w1 = subjectseed
  elif sentenceseed != "" and table.has_key((stopword,sentenceseed)):
    w2 = sentenceseed
    w1 = stopword
  elif sentenceseed != "" and table.has_key((sentenceseed,stopword)):
    w1 = sentenceseed
    w2 = stopword
  elif subjectseed != "" and table.has_key((subjectseed,stopword)):
    w1 = subjectseed
    w2 = stopword
  elif subjectseed != "" and table.has_key((stopword,subjectseed)):
    w2 = subjectseed
    w1 = stopword
  
  sentenceseed = ""
  subjectseed = ""
  
  oldsentenceseed = ""
  while sentencecount < maxsentences:
    if output.count(" ") > 30: # beetris protection
      break
    expansion = []
    for mykey in table[(w1, w2)]:
      for mytemp in range(table[(w1, w2)][mykey]):
        if mykey=="\n" and mytemp > 10 and not mykey == None:
          pass
        else:
          expansion.append(mykey)
    sys.stdout.flush()
    newword = random.choice(expansion) # i should exhaustively search this table instead
    if sentenceseed != "" and newword.lower() != sentenceseed.lower() and sentence == [] and seedctr < seedtries:
      seedctr += 1
      continue
    if seedctr >= seedtries and sentenceseed != "":
      sentenceseed = ""
      dotext("[seed not found in "+str(seedtries)+" tries, last candidate word '"+(newword.replace("\n"," "))+"']",sameline=1)
      dotext("[giving up on sentence seed]",sameline=1)
      sentenceseed = ""
    sys.stdout.flush()
    if newword.lower() == sentenceseed.lower() and sentence == [] and sentenceseed != "":
      dotext("[matched sentence seed "+sentenceseed+" to word "+newword+"!]")
      oldsentenceseed = sentenceseed
      sentenceseed = ""
    if newword == stopword: return output # some kind of pathological end condition
    if (newword in stopsentence):
      add = 0
      if subjectseed == "":
        add = 1
      else:
        # do this next line with a regex instead ok
        if (" "+(" ".join(sentence).lower())+" ").count(" "+(subjectseed.lower())+" ") > 0:
          dotext("[matched subject seed "+subjectseed+" to sentence "+(" ".join(sentence))+"!]")
          add = 1
        else:
          seedctr += 1
          add = 0
          if seedctr >= seedtries:
            dotext("[seed not found in "+str(seedtries)+" tries, last candidate sentence '"+(" ".join(sentence))+"']",sameline=1)
            seedctr = 0
            sentencecount += 1 # bad subject seeds will get you an empty list
      if add == 1:
        output += "%s%s%s" % (" ".join(sentence), sanword(newword), sentencesep)
        sentencecount += 1
      sentence = []
      seedctr = 0
      if oldsentenceseed != "":
        sentenceseed = oldsentenceseed
    else:
      sentence.append(sanword(newword))
    w1, w2 = w2, newword

  return output

def sanword(word):
  # We don't want to pick up any strange URIs.
  if word.count('://') > 0:
    # Replace it with a URL pointing to us. ^^
    word = 'http://dastoob.net/'
  return word

def weight_tables(wordpair, follower, stopword):
  # Sanitize input, so we don't pick up any strange URIs.
  # I hate doing this here, but doing it elsewhere just seems to trigger
  # crashes and/or infinite loops, and I don't know why yet.
  follower = sanword(follower)

  global table
  # Do we have this word already?
  if table.setdefault( wordpair, {} ).has_key(follower):
    # As a matter of fact, we do.
    # Have ween seen it more than 1000 times, AND is it \n (our
    # stopword)?
    if table.setdefault( wordpair, {} )[follower] >= 1000 and follower == stopword:
      # Cap this stopword at 1000.
      table.setdefault( wordpair, {} )[follower] = 1000
    else:
      # Not an overabundant \n? Increment its weight.
      table.setdefault( wordpair, {} )[follower] += 1
  else:
    # First time we've seen this word. Give it an initial weight of 1.
    table.setdefault( wordpair, {} ).update({follower:1})

