#!/usr/local/bin/lua
-- Script for training OSBF-Bayes databases for spam filtering.
-- Usage: toer.lua <shuffles_dir> [shuffles_base_name] - see details below

-- It can train using single or double sided thick threshold training
-- (SSTTT or DSTTT) depending on the values of "threshold" and
-- "unlearn_threshold":

--   threshold > 0 and unlearn_threshold = 0 => SSTTT
--   threshold > 0 and unlearn_threshold > 0 => DSTTT

-- It can also train using TOE (Train On Error), if both "threshold"
-- and "unlearn_threshold" are set to 0, but normally SSTTT and DSTTT
-- are much better than TOE.
--
-- See the on-line book "The CRM114 Discriminator Revealed!" by
-- William S. Yerazunis for the definitions of TOE, SSTTT and DSTTT.
-- There's a link to the book on http://crm114.sourceforge.net.

-- The DSTTT method used in this script is a variation of that
-- definition. TOER means TOE + Reinforcements, another name for SSTTT,
-- this name was given to this script before it was capable of DSTTT
-- and before the TOER method was re-baptized as SSTTT.

-- Fidelis Assis 

---------------------------------------------------------------------- 
-- This program is free software. You can use it as you want.
-- As usual, without warranty of any kind. Use it at your own risk.
---------------------------------------------------------------------- 

--[[------------------------------------------------------------------

How to use:
You must prepare a corpus with spam and ham messages before using this
script. Spam message filenames must have "spam" in their names and,
of course, those which are ham must not. A simple way to prepare the
files is to create a dir for the tests, e.g. "osbf-tests", and 2 other
dirs inside, "spam" and "ham".  Populate "spam" and "ham" with the
messages, 1 per file, and create in "osbf-tests" a listing of all the
message files, in a random order. You must create at least one listing
and the name of the first one must end in "01". The second, if any, in
"02", and so on. The first ending digit indicates the shuffle group:
"0", "1", etc. The other digits indicate the order of the shuffle in
the group. 

This script reads and classifies the messages in the order they appear
in the listings. A learning, or a reinforcement, is done whenever
the classification is wrong (TOE) or the absolute score (|pR|) is
lower than the defined threshold. If you want pure TOE, no
reinforcements, set "threshold" to 0 in the configurable part below.

For each listing, the database files are recreated or reused
depending on the value of "preserve_db".

After a listing is used, 3 files are created: a training log, a
database statistics report and a training statistics report.

Example using SpamAssassin public corpus with 4147 messages:

- Download the files:
  http://spamassassin.org/publiccorpus/20030228_easy_ham.tar.bz2
  http://spamassassin.org/publiccorpus/20030228_hard_ham.tar.bz2
  http://spamassassin.org/publiccorpus/20030228_spam_2.tar.bz2

- Untar them:
  tar xvjf 20030228_easy_ham.tar.bz2
  tar xvjf 20030228_hard_ham.tar.bz2
  tar xvjf 20030228_spam_2.tar.bz2

- Create "shuffle01", a "random" listing:
  This command creates a "random" file good enough for testings:
  $ find spam_2 easy_ham hard_ham -type f |
    sort --key=2.10,2.13 -t. > shuffle01

$ head -5 shuffle01
spam_2/00045.c1a84780700090224ce6ab0014b20183
spam_2/00549.7520259c1001cefa09ddd6aaef814287
spam_2/01238.32c2cef2a001f81d237017d243bad8e4
easy_ham/01380.0a97683490023a8f59c230f5057d15bd
spam_2/01275.9ce1257b70028a741bd7877f0bc2ba0e

- Execute "lua toer.lua <shuffles_dir> &"

---------------------------------------------------------------------- 

                        - Report samples - 

- Training log:
$ head -5 toer-lua_training-log_t20_u3.7_b94321_r9_shuffle01
spam_2/00045.c1a84780700090224ce6ab0014b20183;0.0000;1
spam_2/00549.7520259c1001cefa09ddd6aaef814287;-1.4423;r
spam_2/01238.32c2cef2a001f81d237017d243bad8e4;-30.0566;0
easy_ham/01380.0a97683490023a8f59c230f5057d15bd;-26.2352;1
spam_2/01275.9ce1257b70028a741bd7877f0bc2ba0e;24.6652;1
...

Each line has 3 fields separated by ";":
 - message filename
 - score (pR)
 - result: '0' => OK, '1' => error, 'r' => reinforcement
           'R' => double reinforcement, positive and negative

--------------------------------------------------------------------- 
- Training statistics:
Message corpus
 Hams:                        2750
 Spams:                       1397
 Total messages:              4147

Training (OSBF-Bayes)
 Treshold:                      10
 Trainings on error:            39
 Reinforcements:               209
 Total learnings:              248
 Duration (sec):                16

Performance in the final 500 messages (testset)
 Hams in testset:              324
 Spams in testset:             176
 False positives:                0
 False negatives:                1
 Total errors in testset:        1
 Reinforcements in testset:     12
 Spam recall (%):               99.43
 Spam precision (%):           100.00
 Accuracy (%):                  99.80

---------------------------------------------------------------------- 
Database statistics:

Statistics for spam.cfc
Database version:                    OSBF-Bayes
Total buckets in database:                94321
Buckets used (%):                          84.7
Trainings:                                  122
Bucket size (bytes):                         12
Header size (bytes):                       4092
Number of chains:                          6520
Max chain len (buckets):                    150
Average chain length (buckets):            12.3
--]]----------------------------------------------------------------

-- start of program

local osbf = require "osbf"  -- load osbf module
local string = string
local math = math

local threshold	        = 20 -- minimum absolute score a correct classification
			     -- must get not to trigger a reinforcement.
local unlearn_threshold = 3  -- if this value is zero SSTTT is selected, other-
			     -- wise DSTTT will be used and it is the minimum
			     -- score improvement after a reinforcement not to
			     -- trigger an unlearning from the opposite class.
			     -- When DSTTT is used, if the previous training
			     -- was due to a misclassification then an
			     -- unlearning from the opposite class is done
			     -- whenever the new pR is still within threshold.

-- dbset is the set of single class databases to be used for classification
dbset = {
	classes     = {"nonspam.cfc", "spam.cfc"},
	ncfs        = 1, -- split "classes" in 2 sublists. "ncfs" is
	                 -- the number of classes in the first sublist.
			 -- Here, the first sublist is {"nonspam.cfc"}
			 -- and the second {"spam.cfc"}.
	delimiters  = "" -- no extra token delimiters.
     }
nonspam_index      = 1 -- index to the nonspam db in the table "classes"
spam_index         = 2 -- index to the spam db in the table "classes"
num_buckets	   = 94321
preserve_db        = false -- preserve databases or not between shuffles
classify_flags     = 0  -- flags for classification (not used)
learn_flags        = 0  -- flags for learning (not used)
max_text_size      = 0  -- 0 means full document
min_p_ratio        = 1  -- minimum probability ratio over the classes a
			-- feature must have not to be ignored. 1 means
			-- ignore nothing.
spamstring         = "spam" -- string in msg filename that indicates a spam
shuffles_base_name = "shuffle"
testsize	   = 500 -- number of messages in the testset - last messages
train_in_testset   = true
in_testset         = false -- initial value
log_prefix         = "toer-lua"

-------------------------------------------------------------------------

-- receives a file name and returns the number of lines
function count_lines(file)
  local f = assert(io.open(file, "r"))
  local _, num_lines = string.gsub(f:read("*all"), '\n', '\n')
  f:close()
  return num_lines
end

-------------------------------------------------------------------------

-- receives a single class database filename and returns
-- a string with statistics report of the database
function dbfile_stats (dbfile)
    local OSBF_Bayes_db_version = 5 -- OSBF-Bayes database indentifier
    local report = "-- Statistics for " .. dbfile .. "\n"
    local version = "OSBF-Bayes"
    stats_lua = osbf.stats(dbfile)
    if (stats_lua.version == OSBF_Bayes_db_version) then
      report = report .. string.format(
        "%-35s%12s\n%-35s%12d\n%-35s%12.1f\n%-35s%12d\n%-35s%12d\n%-35s%12d\n",
        "Database version:", version,
        "Total buckets in database:", stats_lua.buckets,
        "Buckets used (%):", stats_lua.use * 100,
        "Trainings:", stats_lua.learnings,
        "Bucket size (bytes):", stats_lua.bucket_size,
        "Header size (bytes):", stats_lua.header_size)
      report = report .. string.format("%-35s%12d\n%-35s%12d\n%-35s%12d\n\n",
        "Number of chains:", stats_lua.chains,
        "Max chain len (buckets):", stats_lua.max_chain,
        "Average chain length (buckets):", stats_lua.avg_chain,
        "Max bucket displacement:", stats_lua.max_displacement)
    else
    	report = report .. string.format("%-35s%12s\n", "Database version:",
	    "Unknown")
    end

    return report
end

-------------------------------------------------------------------------

spamstring = string.lower(spamstring)

shuffles_base_dir = arg[1]

if not shuffles_base_dir or shuffles_base_dir == "" then
  shuffles_base_dir = "./"
elseif string.sub(shuffles_base_dir, -1) ~= "/" then
  shuffles_base_dir = shuffles_base_dir .. "/"
end

if arg[2] then
  shuffles_base_name = arg[2]
end

-- clean the databases
osbf.remove_db(dbset.classes)
assert(osbf.create_db(dbset.classes, num_buckets))

-- for each group of shuffles
for _, ts in ipairs({"0"}) do -- you may want to add more groups
  -- for each shuffle in the group - you may want to add more shuffles 
  for _, k in ipairs({"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}) do

    shuffle = shuffles_base_name .. ts .. k
    suffix = string.format("t%d_u%g_b%d_r%d_%s", threshold, unlearn_threshold,
				num_buckets, min_p_ratio, shuffle)
    training_log = log_prefix .. "_training-log_" .. suffix
    training_stats_report = log_prefix .. "_training-stats_" .. suffix
    db_stats_report = log_prefix .. "_db-stats_" .. suffix

    if (not preserve_db) then
      osbf.remove_db(dbset.classes)
      assert(osbf.create_db(dbset.classes, num_buckets))
    end

    local num_msgs, hams, spams, hams_test, spams_test = 0, 0, 0, 0, 0
    local false_positives, false_negatives, trainings,
    	  reinforcements = 0, 0, 0, 0
    local false_positives_test, false_negatives_test,
    	  reinforcements_test, trainings_test = 0, 0, 0, 0
    local total_messages = count_lines(shuffles_base_dir .. shuffle)
    local start_of_test = total_messages - testsize + 1

    ini = os.time()

    log = assert(io.open(training_log, "w"))
    s = assert(io.open(shuffles_base_dir .. shuffle, "r"))
    for line in s:lines() do
      local msg = assert(io.open(shuffles_base_dir .. line, "r"))
      local text = msg:read("*all")
      msg:close()

      -- cut the text on a space char, if 
      if max_text_size > 0 then
        text = string.sub(text, 1, max_text_size)
        text = string.match(text, "^(.*)%s%S*$")
      end
      -- append the first 4 tokens
      -- empirical tests show that this improves accuracy
      text = text .. " " .. string.match(text, "^%s*%S+%s+%S+%s+%S+%s+%S+")

      local pR, p_array, i_pmax = osbf.classify(
	  text, dbset, classify_flags, min_p_ratio)
      if (pR == nil) then
        error(p_array)
      end

      num_msgs = num_msgs + 1
      in_testset = num_msgs >= start_of_test

      if (string.find(string.lower(line), spamstring)) then
	spams = spams + 1
	if in_testset then
	  spams_test = spams_test + 1
	end
	-- check classification
        if (pR >= 0) then
	  -- wrog classification, false negative
          result = "1"
	  false_negatives = false_negatives + 1
	  if not in_testset or train_in_testset then
            assert(osbf.learn(text, dbset, spam_index, learn_flags))
  	    trainings = trainings + 1

	    if unlearn_threshold > 0 then
              local new_pR, p_array, i_pmax = osbf.classify(
	                  text, dbset, classify_flags, min_p_ratio)
              if (new_pR == nil) then
                error(p_array)
              else
	        if new_pR > -threshold then
                  assert(osbf.unlearn(text, dbset, nonspam_index, learn_flags))
	        end
              end
	    end

	  end
	  if in_testset then
	    false_negatives_test = false_negatives_test + 1
	    if train_in_testset then
	      trainings_test = trainings_test + 1
	    end
	  end
        else
	  -- correctly classified as spam. check threshold
	  if (pR > -threshold) then
	    -- within unsure zone
	    if not in_testset or train_in_testset then
	      -- do reinforcement
	      assert(osbf.learn( text, dbset, spam_index, learn_flags))

  	      result = "r"
	      if unlearn_threshold > 0 then
                local new_pR, p_array, i_pmax = osbf.classify(
	                    text, dbset, classify_flags, min_p_ratio)
                if (new_pR == nil) then
                  error(p_array)
                else
	          if new_pR > -threshold and
		     (new_pR - pR) > -unlearn_threshold then
                    assert(osbf.unlearn(
			  text, dbset, nonspam_index, learn_flags))
  	            result = "R"
	          end
                end
	      end

	      reinforcements = reinforcements + 1
	      if in_testset then
	        reinforcements_test = reinforcements_test + 1
	      end
	    end
	  else
	    -- OK, out of unsure zone
            result = "0"
          end
        end
      else
	hams = hams + 1
	if in_testset then
	  hams_test = hams_test + 1
	end
	-- check classification
        if (pR >= 0) then
	  -- correctly classified as ham. check threshold
	  if (pR < threshold) then
	    -- within unsure zone
	    if not in_testset or train_in_testset then
	      -- do reinforcement
	      assert(osbf.learn( text, dbset, nonspam_index, learn_flags))

  	      result = "r"
	      if unlearn_threshold > 0 then
                local new_pR, p_array, i_pmax = osbf.classify(
	                    text, dbset, classify_flags, min_p_ratio)
                if (new_pR == nil) then
                  error(p_array)
                else
	          if pR < threshold and
		     (new_pR - pR) < unlearn_threshold then
                    assert(osbf.unlearn(text, dbset, spam_index, learn_flags))
  	            result = "R"
	          end
                end
	      end

	      reinforcements = reinforcements + 1
	      if in_testset then
	        reinforcements_test = reinforcements_test + 1
	      end
  	    end
	  else
	    -- OK, out of unsure zone
	    result = "0"
	  end
        else
	  -- wrong classification, false positive
          result = "1"
	  false_positives = false_positives + 1
	  if not in_testset or train_in_testset then
	    assert(osbf.learn(
		  text, dbset, nonspam_index, learn_flags))
  	    trainings = trainings + 1
	  end

	  if in_testset then
	    false_positives_test = false_positives_test + 1
	    if train_in_testset then
	      trainings_test = trainings_test + 1
	    end
	  end

	  if unlearn_threshold > 0 then
            local new_pR, p_array, i_pmax = osbf.classify(
	                text, dbset, classify_flags, min_p_ratio)
            if (new_pR == nil) then
              error(p_array)
            else
	      if new_pR < threshold then
                assert(osbf.unlearn(text, dbset, spam_index, learn_flags))
	      end
            end
	  end

        end
      end
      log:write(line,";", string.format("%.4f;%s\n", pR, result))
      log:flush()
    end
    s:close()
    local duration = os.time() - ini
    log:flush()
    log:close()

    -- print database stats report
    db_stats_fh = assert(io.open(db_stats_report, "w"))
    for _, dbfile in ipairs(dbset.classes) do
      db_stats_fh:write(dbfile_stats(dbfile))
    end
    db_stats_fh:close()

    -- print training stats report
    t_stats_fh = assert(io.open(training_stats_report, "w"))
    t_stats_fh:write("-- Training statistics report\n\n") 
    t_stats_fh:write("Message corpus\n") 
    t_stats_fh:write(string.format("  %-26s%7d\n  %-26s%7d\n  %-26s%7d\n\n",
	"Hams:", hams, "Spams:", spams, "Total messages:", hams+spams))

    t_stats_fh:write("Training (OSBF-Bayes)\n")
    t_stats_fh:write(string.format(
      "  %-26s%7d\n  %-26s%7d\n  %-26s%7d\n  %-26s%7d\n  " .. 
      	"%-26s%7d\n  %-26s%7d\n\n",
      "Treshold:", threshold,
      "Unlearn Treshold:", unlearn_threshold,
      "Trainings on error:", false_positives+false_negatives,
      "Reinforcements:", reinforcements,
      "Total learnings:", false_positives+false_negatives+reinforcements,
      "Duration (sec):", duration))

    t_stats_fh:write(
	string.format("Performance in the final %d messages (testset)\n",
			testsize))
    t_stats_fh:write(string.format(
      "  %-26s%7d\n  %-26s%7d\n  %-26s%7d\n",
      "Hams in testset:", hams_test,
      "Spams in testset:", spams_test,
      "False positives:", false_positives_test))

   t_stats_fh:write(string.format(
     "  %-26s%7d\n  %-26s%7d\n  %-26s%7d\n  %-26s%10.2f\n  " ..
        "%-26s%10.2f\n  %-26s%10.2f\n",
     "False negatives:", false_negatives_test, 
     "Total errors in testset:", false_positives_test+false_negatives_test,
     "Reinforcements in testset:", reinforcements_test, 
     "Spam recall (%):", 100 * (spams_test - false_negatives_test) / spams_test,
     "Spam precision (%):", 100 * (spams_test - false_negatives_test) /
     	(spams_test - false_negatives_test + false_positives_test),
     "Accuracy (%):",
      100 * (1 - (false_positives_test+false_negatives_test)/testsize)))
   t_stats_fh:close()
   -- end of report
  end
end

