"""
Generates lexicon-related data tables for SNOMED term identification according to the
tokeniser and stop-list.
"""

import re
import sys
import os
import os.path

from LexTokeniser import Tokeniser

_DEBUG_ = True

def produce_lexicon(toc_file):

    lexicon = {}
    os.mkdir("Lex%s" % toc_file)
    lex_ids_file = file("Lex%s/lexids" % toc_file, "w")
    lex_count_file = file("Lex%s/lexcount" % toc_file, "w")

    # Build lex_count
    sys.stderr.write('Building lexicon %s ...\n' % toc_file)
    sys.stderr.write('Building LEX_COUNT ...\n')

    in_file = file(toc_file, "r")
    for i, row in enumerate(in_file):
        a, b, c, d, e, f, g = row.strip().split("\t")
        did = a
        cid = c
        term = d
        nwords = 0
        if f == 3:
            continue
        tokeniser = Tokeniser(term.lower())
        for word in tokeniser.tokenise():
            dids = lexicon.get(word, [])
            dids.append(str(did))
            lexicon[word] = dids
            nwords += 1
        lex_count_file.write("%s\t%s\t%s\n" % (str(did), nwords, tokeniser.stop_ratio))
    sys.stderr.write(' %s entrie(s).\n' % i)
    lex_count_file.close()

    # Build lex_ids
    sys.stderr.write('Building LEX_IDS ...\n')
    for i, word in enumerate(lexicon.keys()):
        dids = ":".join(lexicon[word])
        lex_ids_file.write("%s\t%s\n" % (word, dids))
    sys.stderr.write(' %s entrie(s).\n' % i)

    lex_ids_file.close()

if __name__ == "__main__":
    if len(sys.argv) < 2:
        sys.stderr.write("Usage: %s TOC_file name\n" % sys.argv[0])        
        sys.exit(1)

    toc_file = sys.argv[1]

    if not os.path.exists(toc_file):
        sys.stderr.write("TOC file: %s doesn't exist!\n" % toc_file)
        sys.exit(1)
        
    if os.path.exists("Lex%s" % toc_file):
        sys.stderr.write("Lexicon %s already exists!\n" % toc_file)
        sys.exit(1)

    produce_lexicon(toc_file)
