require "digest"
require "forwardable"
require "json"
require_relative 'twikit_file_reader'

module Unitizer
  def pos?
    if(self >= 0)then return true
    else return false end
  end
  def neg?
    if(self < 0)then return true
    else return false end
  end
  def norm
    if(self >=0 )then return +1
    else if(self < 0)then return -1 end end
  end
end

module ArrayExtension
  def to_hash_keys(&block)
    Hash[*self.collect { |v|
      [v, block.call(v)]
    }.flatten]
  end

  def to_hash_values(&block)
    Hash[*self.collect { |v|
      [block.call(v), v]
    }.flatten]
  end
end

class Float; include Unitizer; end
class Fixnum; include Unitizer; end
class Array; include ArrayExtension; end

class PNO
  include Enumerable
  extend Forwardable
  def initialize( prop )
    @MAGIC_NUMBER = 2
    @norm_base = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
    @char_base = "~ !@#%^&()_`\/?<>[]{}:;,+-=*.0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
    @PNO_prop_name = prop
    @PNO_prop_ranges = {}
    @PNO_prop_values = {} #Each inner property/value has a spot in this array where it's associated VALUES are added
  end
  def_delegators :@PNO_prop_values, :<<, :[], :[]=, :last, :length, :each, :map, :map!, :each_index, :has_key?
  def inspect
    return @PNO_prop_values.to_s
  end
  def to_s
    puts pno_fold_object.to_s
    return pno_fold_object.to_s
  end
  def to_json(*a)
    #{
      # I just added twikit_on_gcode
      #@PNO_prop_name => pno_fold_object
    #}
    pno_fold_object.to_json(*a)
  end
  def add_PNO_property( np, nr=nil, nv=nil )#if nv is enumerable, each member must evaluate to a 'value' or there will be hell to pay
    if(np.is_a? PNO)then @PNO_prop_values[np.name] = np
    else if(np.is_a? String)then @PNO_prop_values[np] = nv; @PNO_prop_ranges[np] = nr
    end end
  end
  private :add_PNO_property
  def normalize()
    
  end
  def pno_fold_object()
    hash_obj = {}
    i=1
    @PNO_prop_values.each_pair{|prop, value|
      if(value.is_a? PNO)then
        hash_obj[i.to_s] = value.pno_fold_object;
        puts value.pno_fold_object.class
      else if(value.is_a? Array)then 
        hash_arr = []
        value.each{|subval|
          if(subval.is_a? PNO)then
            hash_arr << subval.pno_fold_object
          else
            hash_arr << subval.to_s;
          end
        }
        hash_obj[i.to_s] = hash_arr
      else 
        v = 0
        hash_obj[i.to_s] = normalize_val(prop, value)
      end end
      i += 1
    }
    return hash_obj
  end
  
  def hamming_distance(other_PNO_hash)
    my_PNO_hash = pno_fold_object["1"]
    hdist = hamming_distance_calc(other_PNO_hash["1"], my_PNO_hash)
    if(false)
      puts "\n"
      puts "My hash is::#{my_PNO_hash}"
      puts "Other hash::#{other_PNO_hash["1"]}"
      puts "Hamming distance is::#{hdist}"
      puts "\n"
    end
    return hdist
  end
  
  def hamming_distance_calc(pno_hash_1, pno_hash_2)
    miss_cost = 13.0
    this_node_ham = 0.0
    case true
    when ( ((pno_hash_1.is_a? String)||(pno_hash_1.is_a? Numeric)) && ((pno_hash_2.is_a? String)||(pno_hash_2.is_a? Numeric)) )
        if(pno_hash_1 == "")
            this_node_ham =  miss_cost# @char_base.index(pno_hash_2)
        else if(pno_hash_2 == "")
            this_node_ham = miss_cost#  @char_base.index(pno_hash_1)
        else 
            this_node_ham = (@char_base.index(pno_hash_1)-@char_base.index(pno_hash_2)).abs
        end end
    when ((pno_hash_1.is_a? Hash) && (pno_hash_2.is_a?Hash))
        pno_hash_1.each_pair{|prop, hash_1_val|
          if(pno_hash_2.keys.index(prop))
            hash_1_val_ham = hamming_distance_calc(pno_hash_1[prop], pno_hash_2[prop])
          else
            hash_1_val_ham = miss_cost
          end
          this_node_ham += hash_1_val_ham
        }
    when ((pno_hash_1.is_a? Array) && (pno_hash_2.is_a? Array))
        this_node_worst_ham = (pno_hash_1.length + pno_hash_2.length).to_f * miss_cost**2
        pno_hash_1.each{|hash_1_val|
          hash_1_val_best_ham = -1
          pno_hash_2.each{|hash_2_val|
            hash_1_val_this_ham = hamming_distance_calc(hash_1_val, hash_2_val)
            hash_1_val_best_ham = hash_1_val_this_ham if(hash_1_val_this_ham < hash_1_val_best_ham || hash_1_val_best_ham == -1)
            this_node_worst_ham = hash_1_val_this_ham if(hash_1_val_this_ham > this_node_worst_ham)
          }
          hash_1_val_best_ham = miss_cost if(hash_1_val_best_ham < 0)
          this_node_ham += hash_1_val_best_ham
          # puts "I think they are close(#{hash_1_val_best_ham})::\n#{pno_hash_1}\n#{pno_hash_2}\n--"
        }
        this_node_ham += (this_node_worst_ham.to_f * 0.5 * (pno_hash_1.length - pno_hash_2.length).abs.to_f)
    else
        warn "Encountered an unexpected configuration while checking hamming distance."
    end
    # puts "What should I charge for missing?(#{this_node_ham})::\n#{pno_hash_1}\n#{pno_hash_2}\n--"
    return this_node_ham 
  end
  
  def =~(other_PNO)
    if(other_PNO.is_a? PNO)then return true
    else return false end
  end
  
  def normalize_val(p, val)
    decimal_value = valuate_prop(p, val)
    normal_value = @norm_base[( (@norm_base.length-1) * decimal_value).to_i]
    # puts "'#{val}' valuates to '#{decimal_value}', normalizes to '#{normal_value}'"
    return normal_value.to_s
  end
  def set_PNO_range( p, r)
    @PNO_prop_ranges[p] = r
    return self
  end
  def set_PNO_value( p, v)
    @PNO_prop_values[p] = v
    return self
  end
  def valuate_prop( p, val)
    if(!(@PNO_prop_ranges[p].is_a? Array))then r = ["~", "->", "Z"] else r = @PNO_prop_ranges[p].dup end
    if(r[1] === "->*") then 
      return val / (r[2] - r[0]).abs
    else
      def raw_value(r, ival)
        n = 0
        v = 0
        ival.to_s.scan(/./).each{|c|
          c_pos = @char_base.index(c)
          nearest = @char_base.index(r[0])
          r.each{|nominal_character|
            nom_pos = @char_base.index(nominal_character)
            if((nom_pos - c_pos).abs < (nearest-c_pos).abs)then nearest=nom_pos end  
          }
          v += ((r.index(@char_base[nearest])+1).to_f / r.length.to_f)**(ival.length - n)
          n += 1
        }
        v = v.to_f / (r.length**(ival.length-1)).to_f
        return v
      end
      if(r[1] === "->")then r = @char_base[Regexp.new(r[0] + ".*" + r[2])].scan(/./) end
      if(r[0]==="~>")then
        k = r.slice!(1..r.length)
        r = @char_base.scan(/./)
        true_val = raw_value(r, val)
        nominal_vals = k.map{|s| raw_value(r, s)}
        nearest_nominal = nominal_vals[0]
        nominal_vals.each{|nom_val|
          if((nom_val - true_val).abs < (nearest_nominal-true_val).abs)then nearest_nominal=nom_val end  
        }
        return nominal_vals.index(nearest_nominal).to_f/k.length.to_f
      else return raw_value(r, val) end
    end
  end
end

class XTP_map < PNO
  def initialize(token_index = nil)
    super("xtp_map")
    add_PNO_property("capabilities",[])
    if(!(token_index.nil?))then internalize_capabilities_from_index(token_index) end
    # "I just created a map::\n#{self.to_s}"
  end
  def internalize_capabilities_from_index(token_index)
    capabilities = token_index.select{|token|token['id'][0]==='a'}.map{|a_token|
      XTP_capability.new(a_token, token_index)
    }
    set_PNO_value('capabilities', capabilities)
    return self
  end
end

class XTP_capability < PNO
  def initialize(token = nil, token_index = nil)
    super ("xtp_capability")
    add_PNO_property("a_orth",["~>","-","+"])
    add_PNO_property("a_val",["~>",
      "assert",
      "assert_equal",
      "assert_in_delta",
      "assert_instance_of",
      "assert_kind_of",
      "assert_match",
      "assert_nil",
      "assert_no_match",
      "assert_not_nil",
      "assert_not_equal",
      "assert_not_same",
      "assert_nothing_raised",
      "assert_nothing_thrown",
      "assert_operator",
      "assert_raise",
      "assert_respond_to",
      "assert_same",
      "assert_send",
      "assert_throws"
    ])
    add_PNO_property("nearby_tokens",nil,[])
    internalize_properties_from_token(token, token_index) if((!token.nil?)&&(!token_index.nil?))
  end
  def internalize_properties_from_token(token, token_index = nil)
    set_PNO_value('a_orth', orthogonality_of( token['token_value'] ))
    set_PNO_value('a_val', token['token_value'])
    if(!(token_index.nil?))
      nearby_tokens = tokens_near(token, token_index).map{|n_dist, n_token|
        XTP_token.new( n_token, token_index )
      }
      set_PNO_value('nearby_tokens', nearby_tokens)
    end
  end
  def orthogonality_of( assertion_type )
    if(assertion_type.is_a? String)
      negative_assertion_identifier = /false|not|no|nothing/
      if(assertion_type.index(negative_assertion_identifier))then return "-" else return "+" end 
    else return false end
  end
  def tokens_near(token, token_index)
    if((token['index'].is_a? Fixnum)&&(token['index'] < token_index.length)&&(token['index'] >= 0))
      collisions = []
      raw_valence = token_index.map_valence_for_index(token['index'], @MAGIC_NUMBER, false)
      .delete_if{|dist,neighbor|(dist<0 && dist.abs<(2.0/3.0*@MAGIC_NUMBER.to_f))}
      .each_pair{|dist,neighbor| if(neighbor[0]=='a')then collisions.push(dist) end}
      collisions.each{|point_of_collision|
        raw_valence.delete_if{|check_point,neighbor|
          ((point_of_collision<0 && check_point.abs >= point_of_collision.abs)||(point_of_collision>=0 && check_point.abs <= point_of_collision.abs))
        }
      }
      return raw_valence
    else return [] end
  end
end

class XTP_token < PNO
  def initialize(token_id=nil, token_index=nil)
    super ("xtp_token")
    add_PNO_property("type",["a","g","l","t"])
    # add_PNO_property("i_score",[0,"->*",1])
    add_PNO_property("token_family",nil,[])
    initialize_properties_from_token_id(token_id, token_index) if((!token_id.nil?)&&(!token_index.nil?))
  end
  def initialize_properties_from_token_id(token_id, token_index)
    set_PNO_value("type",token_id[0].downcase)
    # set_PNO_value("i_score",token_index.intersection_intensity(token_id, @MAGIC_NUMBER))
    similar_tokens = tokens_like(token_id, token_index).map{|f_token, f_similarity|
      XTP_cousin_token.new( f_token, f_similarity, token_index )
    }
    set_PNO_value('token_family', similar_tokens)
  end
  def tokens_like(token_id, token_index)
    # I need to decide what similar tokens look like...  
    if(!token_id.nil?)
      cousins = {}
      raw_valence = token_index.map_valence_for_id(token_id, @MAGIC_NUMBER, false)
      .each_pair{|occ_index, occ_map|
        occ_map.each_pair{|rdist,neighbor| cousins[neighbor]=0 if(cousins[neighbor].nil?); cousins[neighbor]+=rdist.abs }
      }
      cousins = cousins.to_a.sort!{|a, b|
        a[1] <=> b[1] 
      }.reverse![0..@MAGIC_NUMBER].map{|cousin|cousin[1]=cousin[1].to_f/(raw_valence.length*@MAGIC_NUMBER).to_f; cousin}
      return cousins
    else return [] end
  end
end

class XTP_cousin_token < PNO
  def initialize(token_id=nil, cousin_similarity=nil, token_index=nil)
    super ("xtp_cousin_token")
    add_PNO_property("type",["a","g","l","t"])
    # add_PNO_property("i_score",[0,"->*",1])
    add_PNO_property("c_score",[0,"->*",1])
    initialize_properties(token_id, cousin_similarity)# if((!token_id.nil?)&&(!token_index.nil?))
  end
  def initialize_properties(token_id, cousin_similarity) #, token_index
    set_PNO_value("type", token_id[0].downcase)
    # set_PNO_value("i_score", token_index.intersection_intensity(token_id, @MAGIC_NUMBER))
    set_PNO_value("c_score", cousin_similarity)
  end
end





class TokenIndex
  include Enumerable
  extend Forwardable
  def initialize( new_token_index )
    @TOKEN_INDEX = Array.new
    # @SOURCE_INTERSECTIONS = Hash.new
    if( new_token_index.is_a? Array)then @TOKEN_INDEX = new_token_index
    else raise "Token index can only be initialized with an array" end
    self
  end
  def_delegators :@TOKEN_INDEX, :<<, :[], :[]=, :last, :length, :each, :map, :map!, :each_index
  def inspect
    return @TOKEN_INDEX.join("\n")
  end
  def to_s
    return inspect
  end
  
  def get_all_intersecting_tokens
    return @TOKEN_INDEX.select{|token| token['id']=~/^t[A-Z]+$/ }.uniq{|token|token['id']}.map{|t|
       t = t.dup
       t.delete('index')
       t.delete('type')
       t['indices'] = @TOKEN_INDEX.select{|occ| occ['id']==t['id'] }.map{|occ| occ['index'] };
       t
      }
  end
  
  # def get_token_value_for_id(token_id)
  
  def intersection_intensity(token_id, radius)
    normer = intersection_normalizer
    if(token_id=~/g[0-9]+/) #ignore generic tokens - they always intersect frequently
      return 0
    else if(token_id=~/t[A-Z]+/)
      return @SOURCE_INTERSECTIONS.select{|intersection|intersection['id']==token_id}[0]['y']/normer
    else
      begin
      if(i = @TOKEN_INDEX.index{|token|
          (token['id']==token_id && (!token['y'].nil?) && (!token['y'][radius].nil?))
      })then
        return @TOKEN_INDEX[i]['y'][radius]
      else
        total_brightness = 0.0
        count_occurences = @TOKEN_INDEX.select{|token|token['id']==token_id}.each{|occ|
          brightest_neighbor = 0.0
          valence = map_valence_for_index(occ['index'], radius, false).each{|rdist, neighbor_id|
              if(neighbor_id=~/t[A-Z]+/)then
                ny = @SOURCE_INTERSECTIONS.select{|intersection|intersection['id']==neighbor_id}[0]['y'] * rdist.to_f / radius.to_f
                brightest_neighbor = ny if ny > brightest_neighbor
              end
          }
          total_brightness += brightest_neighbor
        }.length
        intensity = total_brightness / count_occurences
        @TOKEN_INDEX.select{|token|token['id']==token_id}.map{|occ|occ['y']=Hash.new if(occ['y'].nil?); occ['y'][radius]=intensity; occ}
        return intensity
      end
      rescue
        puts "Rescued #{token_id}"
      end
      return normer
    end
    end
  end
  
  def intersection_normalizer()
    maxy = 0.0
    @SOURCE_INTERSECTIONS = {'f'=>"none",'v'=>@TOKEN_INDEX[0]['token_value'],'y'=>0.001,'n'=>1} if(@SOURCE_INTERSECTIONS.length == 0)
    @SOURCE_INTERSECTIONS.each{|intersection|
      maxy = intersection['y'] if(intersection['y'] > maxy)
    }
    return maxy
  end
  
  def map_valence_for_id( pivot_token_id, radius, compact = true)
    if(pivot_token = @TOKEN_INDEX.detect(false){|token|token['id']==pivot_token_id})then
      return map_valence_for_value(pivot_token['token_value'], radius, compact)
    else
      return []
    end
  end
  
  def map_valence_for_index( pivot_index, radius, compact=true)
   this_pivot_valence = Hash.new
   pivot_token = @TOKEN_INDEX[pivot_index]
   start_token = 0
   end_token = @TOKEN_INDEX.length
   # puts "Pivot Index :: #{pivot_index} => #{pivot_token}"
   if(@TOKEN_INDEX.length < ((2*radius)+1))then start_token = 0; end_token = @TOKEN_INDEX.length
   else if(pivot_index < (radius + 1))then start_token = 0; end_token = (radius*2)
   else if(@TOKEN_INDEX.length-pivot_index < (radius + 1))then start_token = @TOKEN_INDEX.length-(radius*2+1); end_token = @TOKEN_INDEX.length
   else start_token = pivot_index - (radius); end_token = pivot_index + (radius); end end end
   # puts "start_token=#{start_token}, end_token=#{end_token}"
   valent_tokens = @TOKEN_INDEX[start_token..end_token]
   valent_tokens.each{|valent_token|
    if(valent_token != pivot_token)then
      puts valent_token if(valent_token['index'].nil?)
      dist = (valent_token['index']-pivot_index);
      dist = (dist/dist.abs)*(radius-(dist.abs-1));
      if(compact)
        if(this_pivot_valence[valent_token['id']].nil?)then this_pivot_valence[valent_token['id']] = dist
        else if(this_pivot_valence[valent_token['id']].is_a?(Numeric))then this_pivot_valence[valent_token['id']] = [this_pivot_valence[valent_token['id']], dist]
        else if(this_pivot_valence[valent_token['id']].is_a?(Array))then this_pivot_valence[valent_token['id']].push(dist)
        end end end
      else
        this_pivot_valence[dist] = valent_token['id']
      end
    end
   }
   return this_pivot_valence
  end
  
  def map_valence_for_value(pivot_token_value, radius, compact=true )
    if(@TOKEN_INDEX.length > 0)then
     valence_maps = Hash.new
     pivot_indices = Array.new
     # puts " ";
     # p pivot_token_value
     @TOKEN_INDEX.each_index{|index|if(@TOKEN_INDEX[index]['token_value'] == pivot_token_value)then pivot_indices.push(index) end}
     pivot_indices.each{|pivot_index|
       valence_maps[pivot_index] = map_valence_for_index(pivot_index, radius, compact)
     }
     # puts valence_maps
     return valence_maps
    else raise "map_valence::>Token Index is not an array with length greater than zero" end
  end
  
  def mark_intersecting_tokens(new_si)
    # Substitute token id's for intersecting tokens in the index
    new_si.each{|intersection|
      puts "Intersection::#{intersection}"
      @TOKEN_INDEX.map{|token|
        token.delete('y')
        if(token['token_value']==intersection['v'])then token['id'] = intersection['id']; token['y'] = intersection['y'] end
        token
      }
    }
    @SOURCE_INTERSECTIONS = new_si
    return self
  end
  
end









class TwikitTokenizer

	def initialize( )
	  @MAGIC_NUMBER = 2
		@unit_tests = {}
		@source_files = {}
		@raw_tokens = {}
		@source_tokens = {}
		@token_indices = {}
		@source_intersections = {}
		@si_injections = {}
		@XTP_maps = {}
		@assertions_array = [
			/^.*assert.*$/,
		]
		@generic_tokens_array = [
			/^for$/,
			/^if$/,
			/^do$/,
			/^while$/,
			/^end$/,
			/^in$/,
			/^def$/,
			/^false$/,
			/^new$/,
			/^class$/,
			/^splice$/,
			/^nil$/,
			/^include$/,
			/^require$/,
			/^require_relative$/,
			/^keys$/,
      /^values$/,
      /^then$/,
      /^Hash$/,
      /^return$/,
      /^puts$/,
      /^the$/,
      /^else$/
		]
	end
	
	def get_source_file( filename )
		if(is_loaded('_source_files')) then return @source_files[ filename ]
		else return false end
	end
	
	def get_source_intersections_for_unit_test( filename, refresh = false )
	  if(is_loaded('_unit_tests'))
	     if( @token_indices[filename].nil? || refresh == true )then
	       token_index = get_token_index_for_unit_test( filename, refresh )
	     else
	       token_index = @token_indices[filename]
	     end
	     si = detect_source_intersections( token_index, @source_tokens, @si_injections[filename] )
	     @token_indices[filename] = token_index.mark_intersecting_tokens(si)
	     # puts "'#{filename}' intersected #{si}"
	     return @source_intersections[filename] = si;
	  else raise "get_source_intersections_for_unit_test::>No unit tests have been loaded. Cannot detect intersections with source files" end
	end
	
	def get_token_index_for_unit_test( filename, refresh = false )
	  if(is_loaded('_unit_tests'))then
	    if( @raw_tokens[filename].nil? || refresh == true )then
	      cleaned_tokens = clean_tokens( generate_raw_tokens_for_unit_test( filename ) )
	    else
	      cleaned_tokens = @raw_tokens[filename]
	    end
	    return @token_indices[ filename ] =  TokenIndex.new(classify_tokens(cleaned_tokens))
	  else raise "get_token_list_for_unit_test::>No unit tests have been loaded. Cannot generate list of tokens" end
	end
	
	def get_unit_test( filename )
		if(is_loaded('_unit_tests')) then return @unit_tests[ filename ]
		else return false end
	end
	
	def get_XTP_map_for_unit_test( filename, refresh = false )
	  if(is_loaded('_unit_tests'))then
	    # if( @source_intersections[filename].nil? || refresh == true )then
	    #   source_intersections = get_source_intersections_for_unit_test( filename, refresh )
	    # else
	    #   source_intersections = @source_intersections[filename]
	    # end
       if( @token_indices[filename].nil? || refresh == true )then
         token_index = get_token_index_for_unit_test( filename, refresh )
       else
         token_index = @token_indices[filename]
       end
       
      puts "I got a token_index for #{filename}"
      # token_index = @token_indices[filename]
	    return @XTP_maps[filename] = {
         # 'si' => source_intersections,
         'xtp' => XTP_map.new( token_index ) #, source_intersections 
      }
	  end
	end
	
	def index_loaded_unit_tests()
	  if(is_loaded())
	    digester = Digest::SHA256.new
	    @unit_tests.each_key{|filename|
	      puts "Indexing #{filename}"
	      begin
	       save_XTP_map_for_unit_test( filename, 'twikit_tokenizer_doc_store/' + digester.base64digest(filename).to_s.gsub(/\\|\//,'') + '.xtp')
	      rescue
	        warn "failed to save XTP map for unit test #{filename}"
	      end
	    }
	    return true
	  else warn "index_loaded_unit_tests::Tokenizer can't index unit tests unless one unit test and one source file have been loaded" end
	end
	
	def inject_intersections_for_unit_test(filename, injected_si_tokens)
	  if(@unit_tests.keys.index(filename))
      default_y = 1.to_f/@MAGIC_NUMBER.to_f
	    @si_injections[filename] = injected_si_tokens.map{|isi_val|
	      {'f'=>"injected",'v'=>isi_val,'y'=>default_y,'n'=>1}
	    }
	    return get_source_intersections_for_unit_test(filename)
	  else warn "Tokenizer could not find the unit test for injecting the privded tokens"; return false end
	end
	
	def is_loaded( which = "_both" )
		if((@unit_tests.length > 0) ) then return true; # && (@source_files.length > 0)
		else
			case true
				when ('_unit_tests'===which)
					if(@unit_tests.length > 0) then return true
					else return false end
				when ('_source_files'===which)
					if(@source_files.length > 0) then return true
					else return false end
				else return false
			end
		end
	end
	
	def load_source_files( array_of_filenames, hash_already_loaded=false )
	  if(hash_already_loaded === true && (array_of_filenames.is_a? Hash))
	    new_source_files = array_of_filenames
	  else
  		if(!(new_source_files = TwikitFileReader.new().read_files( array_of_filenames )))
  		  raise "load_source_files::>TwikitFileReader returned false value";
  		  return false;
  		end
		end
		if( (new_source_files.kind_of?Hash) && new_source_files.length > 0) then
      new_source_files.delete_if{|filename, source_file|@unit_tests.has_value?(source_file)}
      @source_files = @source_files.merge( new_source_files )
      puts "TOKENIZING SOURCE FILES..."
      new_source_files.each{|filename, filestring|
        # @source_tokens[filename] = clean_tokens( generate_raw_tokens( filestring ) )
        @source_tokens[filename] = generate_raw_tokens( filestring )
        # puts "I PASSED SOURCE FILE\n#{filestring}\n AND RECEIVED SOURCE FILE TOKENS\n#{@source_tokens[filename]}"
      }
      return new_source_files.length;
	  else raise "load_source_files::>New source files is empty hash or a non-hash value."; return false end
  end
	
  def load_source_files_in_directory( path, pattern, recursion=nil)
    if( new_source_files = TwikitFileReader.new().read_directory( path, pattern, recursion) )
      load_source_files( new_source_files, true )
      return new_source_files.length
    else raise "load_source_files_in_directory::>TwikitFileReader returned false value" end
  end
	
	def load_unit_tests( array_of_files )
		if( new_unit_tests = TwikitFileReader.new().read_files( array_of_files ) )
			if( new_unit_tests.kind_of?Hash ) then
				if( new_unit_tests.length > 0) then
          new_unit_tests.delete_if{|filename, unit_test|@source_files.has_value?(unit_test)}
					@unit_tests = @unit_tests.merge( new_unit_tests )
					# puts "Unit Tests::#{@unit_tests.values}"
					return new_unit_tests.length;
				else raise "load_unit_tests::>TwikitFileReader returned an empty hash." end
			else raise "load_unit_tests::>TwikitFileReader returned non-hash value" end
		else raise "load_unit_tests::>TwikitFileReader returned false value" end
	end
  
  def load_unit_tests_in_directory( path, pattern, recursion=nil)
    if( new_unit_tests = TwikitFileReader.new().read_directory( path, pattern, recursion) )
      if( new_unit_tests.kind_of?Hash )
        if( new_unit_tests.length > 0)
          new_unit_tests.delete_if{|filename, unit_test|@source_files.has_value?(unit_test)}
          @unit_tests = @unit_tests.merge( new_unit_tests )
          puts "Unit Tests::#{@unit_tests.keys}"
          return new_unit_tests.length;
        else raise "load_unit_tests_in_directory::>TwikitFileReader returned an empty hash." end
      else raise "load_unit_tests_in_directory::>TwikitFileReader returned non-hash value" end
    else raise "load_unit_tests_in_directory::>TwikitFileReader returned false value" end
  end
	
	def save_XTP_map_for_unit_test( filename, save_path=nil )
    if(is_loaded())then
      if( @XTP_maps[filename].nil? )then
        xtp_map = get_XTP_map_for_unit_test( filename )
      else
        xtp_map = @XTP_maps[filename]
      end
      if(save_path.nil?)
        new_filename = filename.slice(0...filename.index(/\..{1,4}$/)) + ".xtp"
      else
        new_filename = save_path
      end
      file_reader = TwikitFileReader.new
      puts "XTP Map::#{xtp_map}"
      puts "XTP Folded::" << xtp_map_str = JSON.generate({filename=>xtp_map})
      if(file_reader.write_files({new_filename=>xtp_map_str}))then return new_filename
      else return false end
    else raise "save_XTP_map_for_unit_test::>No unit tests have been loaded. Cannot generate list of tokens"  
    end
		#if( @XTP_maps.length > 0 ) then
		#else raise "save_maps_for_source_files::>Map has not been created. There is nothing to save." end
	end
	
	
	
	
	
	private # all methods that follow this will be private
	
	def break_up_stringy_token( stringy_token, specialo = false )
	  if(stringy_token.is_a? String)then
  	  qt = stringy_token.count('"')==0 ? "'" : ( stringy_token.count("'")==0 ? '"' : (stringy_token.index('"')<stringy_token.index("'")?'"':"'") ) 
      #Excise any leading tokens
      string_start = stringy_token.index(qt)
      string_end = stringy_token.rindex(qt)+1
      
      if(string_start > 0)then
        string_leading_token = stringy_token.slice!(0...string_start);
      end
      
      if(qt=="'")then
        string_end = stringy_token.index(/(?:^|\G|[^\\])(\\\\)*'/,1)
        if( string_end.nil? )then # LEAKY BANDAGE::
          string_end = stringy_token.length 
        else string_end += 1 
        end
        # p stringy_token
        # p string_end
      else if(qt=='"')
        string_end = stringy_token.index(/(?:^|\G|[^\\])(\\\\)*["]/,1)
        if( string_end.nil? )then # LEAKY BANDAGE::
          string_end = stringy_token.length 
        else string_end += 1 end
      end end
      
      this_string_token = stringy_token.slice!(0..string_end)
      
      if(stringy_token.length > 0)then
        string_trailing_token = stringy_token
      end
      # FOR SOME REASON CLEANING IS RETAINING A TRAILING ' CHARACTER
      if (string_trailing_token == "'" || string_trailing_token == '"')then string_trailing_token = nil end
        
      puts "That's my stringy tail #{string_trailing_token}" if specialo
      return {'head'=>string_leading_token,'string'=>this_string_token,'tail'=>string_trailing_token}
    else return false end
	end
	
  
  def classify_tokens( cleaned_tokens_array )
    if(cleaned_tokens_array.length > 0)then
      classified_tokens_array = []
      a = g = l = t = 0
      cleaned_tokens_array.each_with_index{ |cleaned_token,index|
        if(prev_index = classified_tokens_array.index{|classified| classified['token_value']==cleaned_token})then
         prev_token = classified_tokens_array.at(prev_index)
         classified_tokens_array.push({'index'=>index,'token_value'=>prev_token['token_value'],'type'=>prev_token['type'],'id'=>prev_token['id']})
        else
          classified_token = {
            'index'=>index,
            'token_value'=>cleaned_token 
          }
          if((cleaned_token.nil?))then #do nothing..
          else if((cleaned_token.is_a? Numeric) == true)then classified_token['type'] = "Literal"; classified_token['id']="l"+(l+=1).to_s
          else if((cleaned_token.downcase=="true" || cleaned_token.downcase=="false"))then classified_token['type'] = "Literal"; classified_token['id']="l"+(l+=1).to_s
          else 
            count_quotes = cleaned_token.scan(/(?:^|\G|[^\\])(\\\\)*["']/x).size;
            if(count_quotes > 0)then classified_token['type'] = "Literal"; classified_token['id']="l"+(l+=1).to_s
            else if(is_assertion_token(cleaned_token))then classified_token['type'] = "Assertion"; classified_token['id']="a"+(a+=1).to_s
            else if(is_generic_token(cleaned_token)) then classified_token['type'] = "Generic"; classified_token['id']="g"+(g+=1).to_s
            else classified_token['type'] = "Object"; classified_token['id']="t"+(t+=1).to_s end end end
          end end end
          classified_tokens_array.push(classified_token)
        end
      }
      return classified_tokens_array
    else raise "classify_tokens> Cannot classify_tokens without tokens..." end
  end
  
  	
  def clean_tokens( raw_tokens )
# May need to devise special handlers to recognize regular expression Literals, just like string Literals.. YAGNI?
# This might be as simple as just treating the '/' character just as I treat the quote characters already..
turn_it_on = false
count_it_on = 0
    if(raw_tokens.length > 0)then
      cleaned_tokens = []
      while(raw_tokens.length > 0)
        this_token = raw_tokens[0]
        turn_it_on = true if(this_token == "GJKSLJDKFJLKJSNDF")
        if(turn_it_on and count_it_on < 8)then puts "I'm ready to clean #{this_token}"; count_it_on+=1 end
        if(this_token == "CAPHGKSDLFKHSJDKF")then
          puts "Hell again!"
          raise "Hell again\n\nalksdfjlkasjdlfkjsklfdjkafs\nlskjdfasjldfka\nlkajsdflk!"
        end
        if(!(this_token.nil?))then
          #Handle string literals
          if(this_token.count('"') > 0 || this_token.count("'") > 0)then
            qt = this_token.count('"')==0 ? "'" : ( this_token.count("'")==0 ? '"' : (this_token.index('"')<this_token.index("'")?'"':"'") ) 
            j = 1
            # Attempt to balance the number of initial quotes excluding escaped quotes
            if(qt=="'")then
              while( (this_token.scan(/(?:^|\G|[^\\])(\\\\)*'/).size.modulo(2)>0) && (j<raw_tokens.length) )do
                if(raw_tokens[j].nil?)then raise("Token value should not be Nil. May be array indexing issue.") end
                # Recombine string literals
                this_token << " " << raw_tokens[j]
                raw_tokens.slice!(j)
              end
            else if(qt=='"')then
              while( (this_token.scan(/(?:^|\G|[^\\])(\\\\)*(")/).size.modulo(2)>0) && (j<raw_tokens.length) )do
                if(raw_tokens[j].nil?)then raise("Token value should not be Nil. May be array indexing issue.") end
                # Recombine string literals
                this_token << " " << raw_tokens[j]
                raw_tokens.slice!(j)
              end
            end end
          end
          this_clean_children = []
          this_unclean_children = []
          if(rd_tokens = rinse_tokens( this_token, (turn_it_on && count_it_on < 8)))then 
            # if(turn_it_on and count_it_on < 50)then puts "JOmezius" << this_token end
            rd_tokens.each{|rinsed_token| 
              if(rinsed_token.is_a? Hash)then
                # p rinsed_token
                this_unclean_children.push(rinsed_token['unbalanced'])
              else
                this_clean_children.push(rinsed_token)
              end
            }
          end
        else raise("Token value should not be Nil! May be array indexing issue.") end
        if(this_clean_children.length > 0)then this_clean_children.each do |token| if(!(token.nil?))then cleaned_tokens.push(token) end end end
        raw_tokens.slice!(0)
        if(this_unclean_children.length > 0)then this_unclean_children.reverse.each{|unclean_token|if(!(unclean_token.nil?))then raw_tokens.unshift(unclean_token) end} end
      end
      puts "Able?"
      return cleaned_tokens
    else  warn "clean_tokens::>Proper Array of raw tokens not provided"; return [] end
  end
  
  def detect_source_intersections( token_index, source_tokens, injections=nil )
   if(token_index.length > 0) then
    source_intersections = Array.new
    si_count = -1
    source_tokens.each{ |filename, cleaned_source_tokens|
      # cleaned_source_tokens = clean_tokens( generate_raw_tokens_for_source_file( filename ) )
      cleaned_source_tokens.uniq.each{|source_file_token|
        token_index.each{|unit_test_token|
           if((unit_test_token['token_value'] == source_file_token) && (unit_test_token['type']=="Object"))then
              # puts "UTT::#{unit_test_token['value']} v. SFT::#{source_file_token}"
              val = unit_test_token['token_value']
              si_count+=1
              id = si_token_id( si_count )
              n = cleaned_source_tokens.count(val)
              y = (n.to_f/cleaned_source_tokens.length.to_f).round(4)
              d25 = cleaned_source_tokens[0..(cleaned_source_tokens.length*0.25).floor].count(val).to_f / n.to_f
              d50 = cleaned_source_tokens[0..(cleaned_source_tokens.length*0.50).floor].count(val).to_f / n.to_f
              d75 = cleaned_source_tokens[0..(cleaned_source_tokens.length*0.75).floor].count(val).to_f / n.to_f
              source_intersections.push({'f'=>filename,'v'=>val,'id'=>id,'n'=>n,'y'=>y,'d25'=>d25,'d50'=>d50,'d75'=>d75})
           end
        }
      }
    }
    source_intersections = source_intersections.uniq{|si|si['v']}.to_a.sort!{|a, b|
      a['y'] <=> b['y'] 
    }.reverse![0..@MAGIC_NUMBER]
    if(!injections.nil?)
      injections.each{|injected_si|
        injected_si['id'] = si_token_id(source_intersections.length)
        source_intersections << injected_si
      }
    end  
    return source_intersections
   else raise "detect_source_intersections::>Token Index is not an Array" end
  end
  
  def generate_raw_tokens( file_string )
   if(!(file_string.nil?)) then
    return file_string.split(" ") 
   else raise "No file string provided for generating raw tokens'" end
  end
  
  def generate_raw_tokens_for_unit_test( unit_test_filename )
   if(!(@unit_tests[unit_test_filename].nil?)) then
    return @raw_tokens[unit_test_filename] = @unit_tests[unit_test_filename].split(" ") 
   else raise "No unit test loaded with filename '" + unit_test_filename.to_s + "'" end
  end
  
  def generate_XTP_map( token_index, source_intersections )
    if(token_index.length > 0) then
      return XTP_map.new(token_index)
    else raise "generate_XTP_map::>Length of Token Index is zero. Cannot generate a map" end
  end
  
  def is_assertion_token( object_token )
    if((object_token.class == String) && (@assertions_array.length>0))then
      is_assertion = false
#Want to also use this method to determine if the assertion orthogonality is positive or negative... YAGNI
      @assertions_array.each do |assertion|
        if(object_token =~ assertion)then
          is_assertion = true
          return is_assertion
#Will not break this loop immediately in the future if I need to resolve between mutliple assertion types.. YAGNI
        end
      end
      return is_assertion
    else return false end
  end
  
  def is_generic_token( object_token )
    if((object_token.class == String) && (@generic_tokens_array.length >0))then
      is_generic = false
      @generic_tokens_array.each do |generic|
        if(object_token =~ generic)then
          is_generic = true
          return is_generic
        end
      end
      return is_generic
    else return false end
  end

  def rinse_tokens( dirty_token, specialo = false ) # Delimit tokens with unwanted characters, excise strings, etc..
    if(!(dirty_token.nil?))then
      rinsed_tokens = [];
      qc = dirty_token.scan(/(?:^|\G|[^\\])(\\\\)*["']/x).size;
      # puts "QC:#{qc}=>TOKEN'#{dirty_token}'"
      if(qc==0)then
        # Should handle doubles, ints an dot-separated objects/methods here...
        g_tokens = dirty_token.split(/[,;\(\):\[\]'"\<\>\=\+\-\{\}\~\`!#%@&\*\?\|\^\\]/)
        g_tokens.each_with_index{ |g_token,m|
          if(g_token.length > 0)then
            if(g_token.index('.'))then
                if(g_token =~ /^[(0-9)\.]*$/)then
                  if(g_token.length > 0)then g_tokens[m] = g_token.to_f end
                else
                  g_tokens.slice!(m)
                  g_token_parts = g_token.split('.')
                  g_token_parts.each_with_index{ |g_token_part,n|
                    g_tokens.insert(m+n,g_token_part)
                  }
                end
            else if(g_token =~ /^[(0-9)]*$/)then g_tokens[m] = g_token.to_i end
            end
          end
       }
       g_tokens.each{ |g_token|
         if(g_token.class!=String || g_token.length>0)then rinsed_tokens.push(g_token) end
       }
      else # If balancable, will attempt to excise string and rinse the rest (recursively). If not balancable, rinse the quote character itself. 
        if(qc === 1)then #Token is not balancable here, needs to be flagged for cleaner to attempt to balance it..
          rinsed_tokens.push({'unbalanced'=>dirty_token})
        else #Token may be balancable, will excise leading tokens, first balanced set, and trailers, then rinse each (recursively).
      #puts "Oh, so you want me to clean #{dirty_token}" if specialo
          token_pieces = break_up_stringy_token( dirty_token, specialo )
          if(rd_tokens = rinse_tokens( token_pieces['head'] ))then 
            rd_tokens.each do |rinsed_token| if(rinsed_token)then 
              # p "Head '#{token_pieces['head']}'" if(!(rinsed_token.class==Fixnum||rinsed_token.class==String||rinsed_token.class==Float)); 
              rinsed_tokens.push(rinsed_token) end end
          end
          rinsed_tokens.push( token_pieces['string'] )
          #puts "I caught myself #{token_pieces}" if specialo
          if(rd_tokens = rinse_tokens( token_pieces['tail'], specialo ))then 
            rd_tokens.each do |rinsed_token| 
          # puts "I caught my tail piece #{token_pieces['tail']} :: #{rinsed_token}" if specialo
              if(rinsed_token)then 
              # p "Tail '#{token_pieces['tail']}'" if(!(rinsed_token.class==Fixnum||rinsed_token.class==String||rinsed_token.class==Float)); 
              rinsed_tokens.push(rinsed_token) end end
          end
        end
      end
      if(rinsed_tokens.length > 0)then return rinsed_tokens
      else return false end
   else
     return false
   end
  end
  
  def si_token_id( token_num )
    si_keys = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
    remainder = token_num.to_f
    id = "" #si_keys[remainder.modulo(26)]
    n = 0
    while(remainder > 0)do
      n+=1
      begin
        id = si_keys[remainder.modulo(26)] + id
        remainder = (remainder - remainder.modulo(26))/26
      rescue
        puts "CRASH REPORT::Number:#{token_num}, Pre:#{n}, Remainder:#{remainder}, ID:#{id}"
        raise "HELL"
      end
    end
    return 't' + id
  end
  
  def sub_branches( core_trunk ) 
    if(core_trunk.is_a? Hash)then
      main_branch_tag_ids = Array.new
      sub_branches = Hash.new
      core_trunk.each_key{|branch_tag_id|main_branch_tag_ids.push(branch_tag_id)}
      # p core_trunk
      core_trunk.each_value{|branch|
        # p branch
        branch.each_value{|occurence|
          # p occurence
          occurence.each_pair{|leaf_tag_id, leaf_scores|
            # puts leaf_tag_id
            # puts leaf_scores
            if(!(main_branch_tag_ids.index{|branch_tag_id|branch_tag_id==leaf_tag_id}))then
              if(leaf_scores.is_a? Array)then
                score = 0
                leaf_scores.each{|sub_score|score+=sub_score.abs}
              else score = leaf_scores.abs end
              if(sub_branches.has_key?(leaf_tag_id))then
                 sub_branches[leaf_tag_id]+=score
              else
                 sub_branches[leaf_tag_id] = score
              end
            end
          }
        }
      }
      # p sub_branches
      return sub_branches
    else return [] end
  end
  
  def unmap_parents(core_trunk, new_branches)
    respectful_new_branches = Hash.new
    new_branches.each_pair{|center, valence_map|
      parents_hit = Array.new
      valence_map.each_key{|tag_id|if(core_trunk.has_key?(tag_id))then parents_hit.push(tag_id) end}
      if(parents_hit.length > 0)then 
        # p parents_hit
        respectful_new_branches[center]=parents_hit
      else 
        respectful_new_branches[center]=valence_map
      end
    }
    return respectful_new_branches
  end
  
end

