import getopt, os, re, sys, gzip, sets
import os.path

USE_ENGLISH_FEATURES = False
USE_AFFIX_FEATURES = False
SEMI_ORDER=1
PSEUDO=False
POS=False
class TagStat:
    def __init__(self):
        self.count = 0 
        self.tags = sets.Set()

    def inc(self, tag):
        self.count += 1
        self.tags.add(tag)

class FeatureMap:
    def __init__(self):
        self.features = {}
        self.rev_features = {}
        self.num_features = 0
        self.labels = {}
        self.num_labels = 0
        self.tag_stats = {}

    def detect(self, feature_string, add_unseen=False):
        if self.features.has_key(feature_string):
            return self.features[feature_string]
        elif add_unseen:
            self.features[feature_string] = self.num_features
            self.rev_features[self.num_features] = feature_string
            self.num_features += 1
            return self.num_features-1
        return -1

    def register_tag(self, tag, word):
        tag_id = self.labels[tag]
        if self.tag_stats.has_key(word):
            self.tag_stats[word].inc(tag_id)
        else:
            self.tag_stats[word]=TagStat()
            self.tag_stats[word].count=1
            self.tag_stats[word].tags.add(tag_id)

    def detect_tag(self, tag, add_unseen=False):
        if self.labels.has_key(tag):
            return self.labels[tag]
        elif add_unseen:
            self.labels[tag] = self.num_labels
            self.num_labels += 1
            return self.num_labels-1
        return -1

    def decode(self, feature):
        if self.rev_features.has_key(feature):
            return self.rev_features[feature]
        return "--UNKNOWN_FEATURE--" 

    def serialise(self, out_file="attribute_map.gz"):
        out = gzip.open(out_file,'w')
        print >>out, len(self.labels)
        for label,id in self.labels.items():
            print >>out, label, id
        for feature,id in self.features.items():
            print >>out, feature, id
        out.close()

        out = gzip.open("tag_statistics_"+os.path.basename(out_file),'w')
        print >>out, len(self.tag_stats)
        for word in self.tag_stats:
            print >>out, word, self.tag_stats[word].count,
            for tag in self.tag_stats[word].tags:
                print >>out, tag,
            print >>out
        out.close()

    def deserialise(self, in_file="attribute_map.gz"):
        input = gzip.open(in_file,'r')
        self.num_labels = int(input.readline())
        counter = 0
        for line in input:
            tag,id = line.split()
            self.labels[tag] = int(id)
            counter+=1
            if counter >= self.num_labels:
                break
        for line in input:
            feature,id = line.split()
            id = int(id)
            self.features[feature] = id
            self.rev_features[id] = feature
            if id+1 > self.num_features:
                self.num_features = id+1
        input.close()                    

        input = gzip.open("tag_statistics_"+os.path.basename(in_file),'r')
        num_words = int(input.readline())
        for line in input:
            tokens = line.split()
            self.tag_stats[tokens[0]] = TagStat()
            self.tag_stats[tokens[0]].count = int(tokens[1])
            self.tag_stats[tokens[0]].tags = sets.Set()
            for token in tokens[2:]:
                self.tag_stats[tokens[0]].tags.add(int(token))
        input.close()                    

lexical_features = {}
feature_map = FeatureMap()

def node_features(sentence, start, end, add_unseen):
    features = sets.Set()

    word=sentence[start][0]
    lexeme = sentence[start][1]
    if POS: pos = sentence[start][2]
    for i in range(start+1,end):
        word += "_" + sentence[i][0]
        lexeme += "_" + sentence[i][1]
        if POS: pos += "_" + sentence[i][2]

    if start > 0: 
        prev_word = sentence[start-1][0]
        prev_lexeme = sentence[start-1][1]
        if POS: prev_pos = sentence[start-1][2]
    else: 
        prev_word = None
        prev_lexeme = None
        if POS: prev_pos = None
    if end < len(sentence): 
        next_word = sentence[end][0]
        next_lexeme = sentence[end][1]
        if POS: next_pos = sentence[end][2]
    else: 
        next_word = None
        next_lexeme = None
        if POS: next_pos = None
    
    # record word stats
    if add_unseen: feature_map.register_tag(sentence[start][3],word)

    feature = feature_map.detect('WORD=%s' % (word), add_unseen)
    if feature >= 0: features.add(feature)
    if word != lexeme:
        feature = feature_map.detect('LEX=%s' % (lexeme), add_unseen)
        if feature >= 0: features.add(feature)
    if POS:
        feature = feature_map.detect('POS=%s' % (pos), add_unseen)
        if feature >= 0: features.add(feature)
    if prev_word:
        feature = feature_map.detect\
            ('PREV_CURR_WORD=%s+%s'%(prev_word,word), add_unseen)
        if feature >= 0: features.add(feature)
        feature = feature_map.detect\
            ('PREV_WORD=%s'%(prev_word), add_unseen)
        if feature >= 0: features.add(feature)
        if POS:
            feature = feature_map.detect\
                ('PREV_POS=%s'%(prev_pos), add_unseen)
            if feature >= 0: features.add(feature)
            feature = feature_map.detect\
                ('PREV_CURR_POS=%s+%s'%(prev_pos,pos), add_unseen)
            if feature >= 0: features.add(feature)
    if next_word:
        feature = feature_map.detect\
            ('CURR_NEXT_WORD=%s+%s'%(word,next_word), add_unseen)
        if feature >= 0: features.add(feature)
        feature = feature_map.detect\
            ('NEXT_WORD=%s'%(next_word), add_unseen)
        if feature >= 0: features.add(feature)
        if POS:
            feature = feature_map.detect\
                ('CURR_NEXT_POS=%s+%s'%(pos,next_pos), add_unseen)
            if feature >= 0: features.add(feature)
            feature = feature_map.detect\
                ('NEXT_POS=%s'%(next_pos), add_unseen)
            if feature >= 0: features.add(feature)

    if len(lexical_features) > 0:
        for lf in lexical_features[lexeme]:
            feature = feature_map.detect\
                ('LEX_FEAT=%s'%(lf), add_unseen)
            if feature >= 0: features.add(feature)

    if USE_AFFIX_FEATURES:
        word_len=len(word)
        if not USE_ENGLISH_FEATURES: word_len /= 3 # hack for japanese encoding
        affix_len = min(3,word_len)
        for x in range(1,affix_len+1):
            if not USE_ENGLISH_FEATURES: x*=3 # hack for japanese encoding
            feature = feature_map.detect('PREFIX='+word[:x], add_unseen)
            if feature >= 0: features.add(feature)
            feature = feature_map.detect('SUFFIX='+word[-x:], add_unseen)
            if feature >= 0: features.add(feature)

    if end-start > 1:
        feature = feature_map.detect('MULTI_WORD', add_unseen)
        if feature >= 0: features.add(feature)

    # these features are specific to English text
    if USE_ENGLISH_FEATURES:
        # contains uppercase char
        if reduce(lambda x,y: x or str.isupper(y), word, False):
            feature = feature_map.detect('UPPER', add_unseen)
            if feature >= 0: features.add(feature)
        # contains digit char
        if reduce(lambda x,y: x or str.isdigit(y), word, False):
            feature = feature_map.detect('DIGIT', add_unseen)
            if feature >= 0: features.add(feature)
        # contains a non-alphnumeric char
        if not reduce(lambda x,y: x and str.isalnum(y), word, True):
            feature = feature_map.detect('PUNC', add_unseen)
            if feature >= 0: features.add(feature)

    return features

def detect_features(sentences,add_unseen=False):
    for sentence in sentences:
        sentence_features=[]
        sentence_segments=[]
        prev_gold = -1
        prev_gold_index = 0
        for i in range(len(sentence)):
            start = max(prev_gold_index,i-SEMI_ORDER+1)
            for j in reversed(range(start,i+1)):
                # check if this range is a complete segment
                gold = True;
                tag = sentence[j][3]
                if SEMI_ORDER > 1:
                    if tag == 'ditto':
                        gold = False;
                    elif i+1 < len(sentence) and sentence[i+1][3] == 'ditto':
                        gold = False
                    else:
                        for k in range(j+1,i+1):
                            if sentence[k][3] != 'ditto':
                                gold = False
                                break
                tag_id = -1
                if gold:
                    tag_id = feature_map.detect_tag(tag,add_unseen)
                    if tag_id < 0: tag_id = 0
                    sentence_segments.append(\
                        (j-prev_gold_index,i+1-prev_gold_index,tag_id))

                    if PSEUDO:
                        assert j-prev_gold_index == 0
                        sentence_features.append(((j,i+1),\
                            node_features(sentence, j, i+1, add_unseen)))
                        start_tag = prev_gold
                        if (i+1 < len(sentence)):
                            end_tag \
                                = feature_map.detect_tag(sentence[i+1][3],add_unseen)
                        else: end_tag = -1
                        yield sentence_features, sentence_segments, \
                            (start_tag,end_tag), i+1-j

                        sentence_features=[]
                        sentence_segments=[]
                        prev_gold = tag_id
                        prev_gold_index = i+1
                        break
#               print "Detected (%d,%d) = %d" %(j,i+1,tag_id)
                sentence_features.append(((j,i+1),\
                    node_features(sentence, j, i+1, add_unseen)))
        
        start_tag = -1 #feature_map.detect_tag("--START--",add_unseen)
        end_tag = -1 #feature_map.detect_tag("--END--",add_unseen)
        if not PSEUDO:
            yield sentence_features, sentence_segments, \
                (start_tag,end_tag), SEMI_ORDER

def xread_sentence(words_fd, lexemes_fd):
    sentence = []
    for word_line,lexeme_line in zip(words_fd,lexemes_fd):
        if word_line.strip() == '':
            yield sentence
            sentence = []
            continue
        word_tokens = word_line.split()
        w_tag = word_tokens[-1]
        if POS: pos_tag = word_tokens[-2]
        if len(word_tokens) > 2:
            word = '' 
            if POS: 
                for w in word_tokens[:-2]: word += w
            else:   
                for w in word_tokens[:-1]: word += w
        else:
            word = word_tokens[0]
        lexeme_tokens = lexeme_line.split()
        l_tag = lexeme_tokens[-1]
        if len(lexeme_tokens) > 2:
            lexeme= '' 
            if POS:
                for l in lexeme_tokens[:-2]: lexeme += l
            else:
                for l in lexeme_tokens[:-1]: lexeme += l
        else:
            lexeme = lexeme_tokens[0]
        assert l_tag == w_tag
        if POS: sentence.append((word,lexeme,pos_tag,w_tag))
        else:   sentence.append((word,lexeme,None,w_tag))
    yield sentence
    return
        
def crf_print(order, tmp_output_file, features_sets, segments, start, end):
    # print gold segment labelling
    print >>tmp_output_fd, start, end
    for first,second,tag in segments:
        print >>tmp_output_fd, first, second, tag,
    print >>tmp_output_fd

    # print feature sets
    for (i,j),features in features_sets:
        for feature in features:
            print >>tmp_output_fd, feature_map.decode(feature),
        print >>tmp_output_fd

if __name__ == '__main__':
    words_file=''
    lexemes_file=''
    output_file = 'out.crf.gz' 
    features_file = None
    lex_features_file = None
    lex_sum_file = None
    num_sents=None
    options, remaining = getopt.getopt(sys.argv[1:], 'n:w:l:f:o:eap',
        ['lexical_features=','lexical_summary=','semi_order=','pos'])
    for option, value in options:
        if option == '-w': words_file = value
        elif option == '-l': lexemes_file = value
        elif option == '-o': output_file = value
        elif option == '-f': features_file = value
        elif option == '-e': USE_ENGLISH_FEATURES = True
        elif option == '-a': USE_AFFIX_FEATURES = True
        elif option == '-p': PSEUDO = True
        elif option == '-n': num_sents = int(value)
        elif option == '--lexical_features': lex_features_file = value
        elif option == '--lexical_summary': lex_sum_file = value
        elif option == '--semi_order': SEMI_ORDER = int(value)
        elif option == '--pos': POS = True
        else:
            print "Usage: python %s [-f features] [-o output] -w words -l lexemes"\
                % sys.argv[0]
            print
            print "  -f previously created feature map"
            print "  -o file to write output to default=out.crf"
            print "  -w input corpus" 
            print "  -l lexemes for the input corpus" 
            print "  -e use English orthographic features" 
            print "  -p break up cliques for pseudo-llh training" 
            print "  --lexical_features features for lexemes in the input corpus" 
            print "  --lexical_summary features for labels in the input corpus" 
            print "  --semi_order semi-markov order" 
            print "  --pos use pos tags" 
            sys.exit(1)
    assert words_file!='' #and lexemes_file!=''

    words_fd = gzip.open(words_file,"r")
    if lexemes_file: lexemes_fd = gzip.open(lexemes_file,"r")
    else: lexemes_fd = words_fd

    # read the lexical features into a map
    if lex_features_file:
        lex_feat_fd = gzip.open(lex_features_file)
        for line in lex_feat_fd:
            lex,feat = line.split()
            lexical_features[lex] = feat
        lex_feat_fd.close()

    # if we haven't been supplied with a feature map, create one
    if features_file:
        feature_map.deserialise(features_file)

    tmp_output_file = '.%s'%os.path.basename(output_file)
    tmp_output_fd = gzip.open(tmp_output_file,'w')
    counter=0
    for features_sets,segments,(start,end),order \
        in detect_features(xread_sentence(words_fd, lexemes_fd),features_file==None):
        if num_sents and counter >= (num_sents): break  
        if counter >0 and counter % 100 == 0:
            print '.', 
        if counter >0 and counter % 2500 == 0:
            print counter
        sys.stdout.flush()
        counter+=1
#   features_sets,segments,(start,end) \
#       = detect_features(sentence, features_file==None)

        crf_print(order, tmp_output_file, features_sets, segments, start, end)
    tmp_output_fd.close()

    # prepend the number of sentences to the file and copy the tmp_file
    # to the final features file
    tmp_output_fd = gzip.open(tmp_output_file,'r')
    output_fd = gzip.open(output_file,'w')
    print >>output_fd, SEMI_ORDER, feature_map.num_labels,
    print >>output_fd
    for line in tmp_output_fd:
        print >>output_fd, line,
    tmp_output_fd.close()
    output_fd.close()
    os.remove(tmp_output_file)

    print
    print 'Number of features:',feature_map.num_features
    print 'Number of labels:',feature_map.num_labels
    if not features_file:
        print 'Serialising feature_map'
        if output_file[-3:] == ".gz":
            features_out = os.path.basename(output_file[:-3]) + ".attribute_map.gz"
        else:
            features_out = os.path.basename(output_file) + ".attribute_map.gz"
        feature_map.serialise(features_out)

    words_fd.close()
    lexemes_fd.close()

    # read the lexical tag features into a map
    if lex_sum_file:
        tag_features = {}
        lex_sum_fd = gzip.open(lex_sum_file)
        for line in lex_sum_fd:
            tokens = line.split()
            tag_features[tokens[0]] = tokens[1:]
        lex_sum_fd.close()
        tag_features_out = gzip.open('%s.tag_features'%lex_sum_file,'w')
        for key,list in tag_features.items():
            for value in list:
                id = feature_map.detect_tag(key)
                if id >= 0:
                    print >>tag_features_out, id, value
        tag_features_out.close()

