td.InputTransform(destructureDoc) >>
td.InputTransform(wordLength) >>

msom(seq[tuple[int int]]): tuple[int int]

doc_position = 
  td.InputTransform(destructureDoc) >> # paras
  td.Map(  )

td.Map(td.Scalar >> 

doc_position(doc) =
  destructure(doc) >>
  map(para_position) >>
  msom

struct

word extends struct
  is_leaf = true

sent
  destruct(struct):words = split_to_words(struct)

get_position(struct) =
  msom = get_msom_based_on_level(struct.level)
  cond(
    struct.not_leaf,
    pipe(
      struct.destructure(),
      map(get_position),
      msom
    ),
    true,
    msom(struct),
    )

############

xt = placeholder

ct = placeholder || 0

Nw = tf.Variable
Nc = tf.Variable

d = ( xt - N.w ) + ( ct - N.c )

delta_Nw = d * (xt - Nw)

delta_Nc = d * (ct - Nc)

train_msom = tf.group(
  tf.assign_add(Nw, delta_Nw),
  tf.assign_add(Nc, delta_Nc)
  )

Nwinner = position_of_min(d)

ct = Nw[Nwinner] - Nc[Nwinner]

sess.run(
    [train_msom, Nwinner, ct],
    {xt: 5, ct: new_ct}
    )

#########

# winner, ct = substep(xt, ct)

def step xts
{
cond
	(learning?) substep = learn_substep
	true substep = eval_substep

loop [xts xts, ct 0]
	winner, ct = substep( xts.first, ct )
	cond 
		(some? xts.rest) recur xts.rest, ct
		true winner
}

def substep xt ct
{
Nw = var
Nc = var
d = (xt - Nw) + (ct - Nc)
delta_Nw = d * (xt - Nw)
delta_Nc = d * (ct - Nc)
train_msom = tf.group(
	tf.assign_add(Nw, delta_Nw)
	tf.assign_add(Nc, delta_Nc)
	)
Nwinner = position_of_min(d)
ct = Nw[Nwinner] - Nc[Nwinner]

cond (learning?) train_op = train_msom
	true train_op = no_op

winner, ct = sess.run(
	[train_op, Nwinner, ct],
	{ xt: xt, ct: ct } )

return winner ct
}
