# TO DO:
# * Ass3: unknown words, Smoothing
# * Meta: Cleanup/Comment Code and make proper read me


# Context Free Grammar Class
class CFG:
  # CFG is the actual grammar containing the productions and probabilities
  CFG = {}
  # revCFG is the reverse CFG to easily find symbols that could have produced a symbol
  revCFG = {}
  # term is the list of production rules that lead to a terminal symbol
  term = {}
  # preterm contains all unary production rules that lead to a non terminal
  preterm = {}
  # CFGprob contains the probabilities in an easy format
  CFGprob = {}

  # Constructor
  def __init__(self):
    print "CFG initialised"

  # Function to process a production rule
  def processrule(self, rule):
    # if the producing symbol is in the CFG:
    if(rule[0] in self.CFG):
      # if the produced symbols are in the CFG:
      if(rule[1] in [x[0] for x in self.CFG[rule[0]]]):
        index = [x[0] for x in self.CFG[rule[0]]].index(rule[1])
        self.CFG[rule[0]][index] = (rule[1], self.CFG[rule[0]][index][1]+1)
      # If the produced symbols are not in the CFG:
      else:
        tmp = self.CFG.get(rule[0], [])
        tmp.append((rule[1],1))
        self.CFG[rule[0]] = tmp
    # If the producing symbol is not in the CFG:
    else:
      self.CFG[rule[0]] = [(rule[1], 1)]

  # Write CFG to file
  def writedict(self, file):
    f = open(file, 'wb')
    for k,v in self.CFG.iteritems():
      f.write(k+ " ;;; " + str(v) +"\n")

  # Reads file with trained CFG
  # Should also check whether file is normalised already
  def readdict(self, file):
    f = open(file, 'rb')
    kv = f.readlines()
    for line in kv:
      splitline = line.split(';;;')
      self.CFG[splitline[0].strip()] = eval(splitline[1])
    #if(not self.checknormalised()):
    #  print "Normalising data"
    #  self.normalise()
    print "Making reverse look up table"
    self.reverselookup()
    print "Total of", len(self.CFG), "non-terminal symbols"

  # Check the parsed data if it is normalised
  def checknormalised(self):
    for k,v in self.CFG.iteritems():
      if(not sum([y for (x,y) in v])==1):
        return False
    return True

  # Normalise the CFG
  def normalise(self):
    for k,v in self.CFG.iteritems():
      marginfactor = sum([y for (x,y) in v])
      for (x,y) in v:
        index = v.index((x,y))
        v[index] = (x,float(y)/marginfactor)

  # Create the reverse look up tables for the CFG
  def reverselookup(self):
    for k,v in self.CFG.iteritems():
      for (x,y) in v:
        tmp = self.CFGprob.get(k,{})
        tmp[str(x)] = y
        self.CFGprob[k] = tmp
        if len(x)==1:
          if x[0] in self.CFG and not x[0]==k:
            tmp = self.preterm.get(x[0], [])
            tmp.append(k)
            self.preterm[x[0]] = tmp
          else:
            tmp = self.term.get(x[0], [])
            tmp.append(k)
            self.term[x[0]] = tmp
        else:
          tmp = self.revCFG.get(x[0],{})
          tmp[x[1]] = tmp.get(x[1], []) + [k]
          self.revCFG[x[0]] = tmp
          #  for symbol in x:
      #    tmp = self.revCFG.get(symbol,[])
      #    tmp.append(k)
      #    self.revCFG[symbol] = tmp
    print len(self.term), "Terminal symbols"


  # Trains CFG on treebank
  def trainonfile(self,file):
    # Open file and create array of lines
    # The lines now contain an unparsed tree
    f = open(file, 'r')
    trees = f.readlines()
    print "Going to parse", len(trees), "trees"

    # Make an instance of the Context-Free-Grammar Class as defined above
    counter = 0
    # For each tree in trees
    for tree in trees:
      counter+=1
      if(counter%1000==0): 
        print "Parsed", counter, "trees with", len(self.CFG), "non-terminal symbols in the CFG"
      # Start with an empty stack
      stack = []
      # Split the tree into words using space as the separation symbol
      for i in tree.split():
        # If the first symbol is a '(' then it is a non-terminal symbol so we insert the symbol
        # into the array behind the top symbol in our stack (unless the stack is empty)
        if(i[0]=='('): 
          if(not stack==[]):
            stack[-1][1].append(i[1:])
          # Push the symbol onto the stack
          stack.append((i[1:], []))
        # If the first symbol is not a '(' then it is a terminal symbol (aka a word)
        # Then count the amount of ')' symbols on the end of the word
        # Append the word to the list on top of the stack
        else: 
          parc = list(i).count(')')
          stack[-1][1].append(i[0:-parc])
          # For each ')' pop a rule from the stack and process this rule into the CFG
          for popper in range(0, parc):
            self.processrule(stack.pop(-1))

    self.normalise()
    self.reverselookup()
    # You now have a Context-Free-Grammar        
    print "CFG trained with", len(self.CFG), "non-terminal symbols"

  def CYKparse(self, u, Verbose=0):
    # Change the utterance into a list of words
    u = u.split()
    u.append('')
    # n is the length of the utterance
    n = len(u)
    # Create an empty table (which is a 2 dimensional array)
    t = a = [x[:] for x in [[[]]*(n+1)]*(n)]
    
#    if n>15: return self.falsetree(u)

    for j in range(1,n+1):
      if u[j-1]=='': 
        t[j-1][j] = [('', [], 1)]
      else:
        if u[j-1] in self.term:
          nonterms = self.term[u[j-1]]
          for nonterm in nonterms:
            prob = self.CFGprob[nonterm][str([u[j-1]])]
            t[j-1][j] = t[j-1][j] + [(nonterm, [nonterm, u[j-1]], prob)]
            if nonterm in self.preterm:
              preterms = self.preterm[nonterm]
              for preterm in preterms:
                if preterm in [x for (x,y,z) in t[j-1][j]]:
                  index = [x2 for (x2,y2,z2) in t[j-1][j]].index(preterm)
                  probs = [z3 for (x3,y3,z3) in t[j-1][j]]
                  if (prob*self.CFGprob[preterm][str([nonterm])]) >= probs[index]:
                    t[j-1][j].pop(index)
                    t[j-1][j] = t[j-1][j] + [(preterm, [preterm, [nonterm, u[j-1]]], prob*self.CFGprob[preterm][str([nonterm])])]
                else:
                    t[j-1][j] = t[j-1][j] + [(preterm, [preterm, [nonterm, u[j-1]]], prob*self.CFGprob[preterm][str([nonterm])])]
        else:
          print "Word not in dictionary", u[j-1]
          POSTAGS = list(set([item for sublist in self.term.values() for item in sublist]))
          t[j-1][j] = [(POS, [POS, u[j-1]], 0.00001) for POS in POSTAGS]
      for i in range(j-2,-1,-1):
        if i<0: continue
        for k in range(i+1,j):
          Blist = t[i][k]
          Clist = t[k][j]
          for (B, st1, p1) in Blist:
            for (C, st2, p2) in Clist:
              if B in self.revCFG and C in self.revCFG[B]:
                nonterms = self.revCFG[B][C]
                for nonterm in nonterms:
                  prob = (self.CFGprob[nonterm][str([B,C])] * p1 * p2)
                  if nonterm in [x for (x,y,z) in t[i][j]]:
                    index = [x2 for (x2,y2,z2) in t[i][j]].index(nonterm)
                    probs = [z3 for (x3,y3,z3) in t[i][j]]
                    if prob >= probs[index]:
                      t[i][j].pop(index)
                      t[i][j] = t[i][j] + [(nonterm, [nonterm, st1,st2], prob) ]
                  else:
                      t[i][j] = t[i][j] + [(nonterm, [nonterm, st1,st2], prob) ]
    try:
      Parseforest = [(s,t,p) for (s,t,p) in t[0][-1] if s=='TOP']
      sorted(Parseforest, key=lambda prob: prob[2],reverse=True)
      if Verbose>1:
        for (Top, tree, prob) in Parseforest:
          print prob, tree
      return Parseforest[0][1]
    except:
      return self.falsetree(u)


  def falsetree(self, s):
    s.remove('')
    tree = ['TOP']
    for word in s:
      try:
        postags = self.term[word]
        maxpostag = ''
        maxprob = 0
        for postag in postags:
          crtprob = self.CFGprob[postag][str([word])]
          if crtprob>maxprob:
            maxprob = crtprob
            maxpostag = postag
      except:
        maxpostag = 'UNKNOWN'
      tree.append([maxpostag, word])
    return tree

