from pypatnlp import *
import sys
import re

CORPUS_PATH = 'data/estner.pycorp'
RESULT1_PATH = 'tasks/tanel01_global_context/data1.csv'
RESULT2_PATH = 'tasks/tanel01_global_context/data2.csv'
ENTITIES = [u'PER', u'ORG', u'LOC']
CASES    = [u"ab", u"abl", u"ad", u"adt", u"all", u"el", u"es", u"g", u"ill", u"in", u"kom", u"n", u"p", u"pl", u"sg", u"ter", u"tr", None]
second = lambda x: x[1]

mycorp = PyCorpus(CORPUS_PATH)

def trim_type(t):
    if t[2:] in ENTITIES:
        return t[2:]
    return t

def print_header(f=sys.stdout):
    vals = [str(c) + '_' + e for c in CASES for e in ENTITIES]
    vals.extend(ENTITIES)
    f.write(','.join(vals) + '\n')

def print_header2(f=sys.stdout):
    vals = [str(s) for s in CASES + ENTITIES]
    f.write(','.join(vals) + '\n')

def count_cases(cases, types):
    counts, totals = {}, {}
    # initialize 
    for e in ENTITIES:
        counts[e] = {}
        totals[e] = 0
        for c in CASES:
            counts[e][c] = 0
    # count cases/entities
    for c, t in zip(cases, types):
        counts[t][c] = counts[t][c] + 1
        totals[t] = totals[t] + 1
    # store case probabilities
    vals = []
    for c in CASES:
        for e in ENTITIES:
            if totals[e] > 0:
                v = float(counts[e][c]) / totals[e]
            else:
                assert (counts[e][c] == 0)
                v = 0
            vals.append(v)
    # store entity probabilities
    n = float(len(types))
    for e in ENTITIES:
        vals.append(totals[e] / n)
    return vals

def count_frequency(cases, types):
    data = [cases.count(c) for c in CASES]
    data.extend([types.count(e) for e in ENTITIES])
    return data

def process(doc, counter, f=sys.stdout):
    # first extract all named entities in the document
    ent_regex = '|'.join(ENTITIES)
    doc_cov  = regex_doc_cover(doc, 'ne_type', ent_regex)
    entities = map(second, cover_doc_values(doc, 'lemma', doc_cov))
    #print zip(entities, cover_doc_values(doc, 'ne_type', doc_cov))
    # for each unique named entity
    for ent in frozenset(entities):
        # extract the context of the particular named entity and make sure
        # it is really a named entity
        regex = re.escape(ent) # regex to match exact lemma
        cover = regex_doc_cover(doc, 'lemma', regex) & doc_cov
        cases = map(second, cover_doc_values(doc, 'case', cover))
        types = map(trim_type, map(second, cover_doc_values(doc, 'ne_type', cover)))
        #print ent, zip(cases, types)
        vals = counter(cases, types)
        vals = map(lambda x: str(x), vals)
        f.write(','.join(vals) + '\n')

def dataset_1():
    f = open(RESULT1_PATH, 'wb')
    print_header(f)
    for doc_id in mycorp.keys():
        process(mycorp[doc_id], count_cases, f)
    f.close()

def dataset_2():
    f = open(RESULT2_PATH, 'wb')
    print_header2(f)
    for doc_id in mycorp.keys():
        process(mycorp[doc_id], count_frequency, f)
    f.close()

dataset_1()
dataset_2()
