from pypatnlp import *
from pprint import pprint
import numpy as np
from pandas import concat

pycorp = PyCorpus('data/estner.pycorp', readonly=True)

def extract_Xs(fe, doc_ids):
    return [fe.transform(pycorp[doc_id]) for doc_id in doc_ids]

def extract_ys(doc_ids):
    ys = []
    for doc_id in doc_ids:
        doc = pycorp[doc_id]
        y = np.array(doc['ne_type'])
        ys.extend(y)
    return ys

def extract_zs(doc_ids):
    zs = []
    for doc_id in doc_ids:
        doc = pycorp[doc_id]
        n = doc.shape[0]
        words = list(doc.word)
        z = [' '.join(words[max(0, i-3):i] + ['<b>' + words[i] + '</b>'] + words[i+1:min(i+4,n)]) for i in range(n)]
        zs.extend(z)
    return zs

def make_small():
    '''Make a simpler and smaller dataset.'''
    extractors = get_local_extractors(['wtype', 'case'], 1)
    fe = BinaryDataFrameFeatureExtractor(*extractors)
    doc_ids = list(pycorp.keys())
    X = concat(extract_Xs(fe, doc_ids)).fillna(0)
    y = extract_ys(doc_ids)
    z = extract_zs(doc_ids)
    save_obj(X, 'tasks/tanel02_binary_pca/small.X')
    save_obj(y, 'tasks/tanel02_binary_pca/small.y')
    save_obj(z, 'tasks/tanel02_binary_pca/small.z')

def make_pattern():
    '''Make larger dataset with more very weak features.'''
    extractors = get_local_extractors(['lemma', 'wtype', 'case', 'vtype'], 1)
    extractors.append(lambda doc: extract_local(doc, 'word', 'isupper', starts_upper, 1))
    doc_ids = list(pycorp.keys())
    # extract features for data mining
    fe = DocumentFeatureExtractor(*extractors)
    corp = Corpus()
    for doc_id in doc_ids:
        corp[doc_id.encode('utf-8')] = fe.transform(pycorp[doc_id])
    #writeCorpusToFile('tasks/tanel02_binary_pca/estner.corp', corp)
    #corp = readCorpusFromFile('tasks/tanel02_binary_pca/estner.corp')
    # mine data
    rulecovers  = basicRuleCovers(corp, 0)
    initial = ConjunctionVector([Conjunction([key]) for key in rulecovers])
    true = regex_cover(pycorp, 'ne_type', 'PER|ORG|LOC')
    frequent = hrApriori(initial, rulecovers, true, 0.05, 2, 2)
    write_conjunctions(frequent, 'tasks/tanel02_binary_pca/frequent.txt')
    covers = conjunction_covers(frequent, rulecovers)
    print 'Got {0} frequent patterns'.format(len(frequent))
    X = [conjunction_dataframe(frequent, covers, doc_id) for doc_id in doc_ids]
    X = concat(X).fillna(0)
    y = extract_ys(doc_ids)
    z = extract_zs(doc_ids)
    
    # store the dataset
    save_obj(X, 'tasks/tanel02_binary_pca/pattern.X')
    save_obj(y, 'tasks/tanel02_binary_pca/pattern.y')
    save_obj(z, 'tasks/tanel02_binary_pca/pattern.z')


make_small()
make_pattern()

# test
X = load_obj('tasks/tanel02_binary_pca/pattern.X')
y = load_obj('tasks/tanel02_binary_pca/pattern.y')
z = load_obj('tasks/tanel02_binary_pca/pattern.z')
assert (X.shape[0] == len(y))
assert (len(y) == len(z))
print X.columns
print y[:10]
print z[:10]
