import pandas as pd
from pypatnlp import *

def make_corp(rel):
  ind = rels_data["feature"] == rel
  keys = rels_data["entity"][ind].values
  values = rels_data["value"][ind].values
  rel_corpus = PyCorpus("data/" + rel + "_posit.pycorp")
  rel_corpus_neg = PyCorpus("data/" + rel + "_negat.pycorp")
  count = 0
  count_neg = 0
  for i in range(0, len(keys)):
    try:
      key = keys[i]
      value = values[i]
      doc = wiki_corp[key]
      for begin, end in doc_sentences_positions(doc):
        sentence = doc[begin:end]
        key_parts = key.split(" ")
        cov_key1 = regex_doc_cover(sentence, 'lemma', '\\b' + key_parts[0].lower() + '\\b')
        if len(key_parts) > 1:
          cov_key2 = regex_doc_cover(sentence, 'lemma', '\\b' + key_parts[1].lower() + '\\b')
        else:
          cov_key2 = regex_doc_cover(sentence, 'lemma', 'SuvasuvaSuva')
        value_parts = value.split(" ")
        cov_value1 = regex_doc_cover(sentence, 'lemma', '\\b' + value_parts[0].lower().replace(".", "") + '\\b')
        if len(value_parts) > 1:
          if value_parts[1] != "(":
            cov_value2 = regex_doc_cover(sentence, 'lemma', '\\b' + value_parts[1].lower().replace(".", "") + '\\b')
          else:
            cov_value2 = regex_doc_cover(sentence, 'lemma', '\\b' + value_parts[2].lower().replace(".", "") + '\\b')
          if (cov_key1.size() > 0 and cov_key2.size() > 0) and (cov_value1.size() > 0 and cov_value2.size() > 0):
            count += 1
            print str(count)
            print u' '.join(sentence.word)
            tmp1 = sentence["lemma"] == value_parts[0].lower().replace(".", "")
            tmp2 = sentence["lemma"] == value_parts[1].lower().replace(".", "")
            sentence["synnikoht"] = tmp1 + tmp2
            sentence.index = range(0, max(sentence.index) - min(sentence.index) + 1)
            rel_corpus[unicode(str(count))] = sentence
        else:
          if (cov_key1.size() > 0 and cov_key2.size() > 0) and cov_value1.size() > 0:
            count += 1
            print str(count)
            print u' '.join(sentence.word)
            tmp1 = sentence["lemma"] == value_parts[0].lower().replace(".", "")
            sentence["synnikoht"] = tmp1
            sentence.index = range(0, max(sentence.index) - min(sentence.index) + 1)
            rel_corpus[unicode(str(count))] = sentence
          #if (cov_key1.size() > 0 or cov_key2.size() > 0) and (cov_value1.size() == 0 and cov_value2.size() == 0):
	  if (cov_key1.size() > 0 or cov_key2.size() > 0) and (cov_value1.size() == 0):
            count_neg += 1
            sentence["synnikoht"] = False
            sentence.index = range(0, max(sentence.index) - min(sentence.index) + 1)
            rel_corpus_neg[unicode(str(count_neg))] = sentence
    except:
      print "viga"
      pass
  rel_corpus.close()
  rel_corpus_neg.close()


from pprint import pprint
import numpy as np
from sklearn import preprocessing, metrics, cross_validation
from sklearn.svm import LinearSVC

def rel_classify(rel):
  train_pycorp = PyCorpus('data/' + rel + '_posit.pycorp', readonly=True)
  # prepare some feature extractors
  exs = get_local_extractors(['start', 'end', 'lemma', 'wtype', 'vtype'], 0)
  exs.append(lambda doc: extract_local(doc, 'word', 'starts_upper', starts_upper, 0))
  exs.append(lambda doc: extract_local(doc, 'word', 'all_upper', all_upper, 0))
  # the extractor converts each word to a list of token/offset/value pairs
  fe = TupleStringFeatureExtractor(*exs, nooffset=True)
  # now convert train and test corpora to C++ corpora with selected features
  fe = CorpusFeatureExtractor(DocumentFeatureExtractor(*exs, nooffset=True))
  train_corp = Corpus(fe.transform(train_pycorp))
  # store the C++ style corpora on disk
  write_corpus_to_file('train.corp', train_corp)
  # load the previously create C++ corpora
  train_corp = read_corpus_from_file('train.corp')
  # create named entity covers
  ne_train_cov = regex_cover(train_pycorp, 'synnikoht', 'True')
  print 'Number of named entities in train corpus', ne_train_cov.size()
  # mine frequent rules
  miner = HrAprioriMiner(radius=1, size_limit=2, treshold=0.05)
  miner.fit(train_corp, ne_train_cov)
  # save and print frequent rules
  print 'List of frequent conjunctions (patterns)'
  for idx, conj in enumerate(miner.get_frequent()):
    print idx, conj
  # prepare training dataset for SVM
  mined_dfs = miner.transform(train_corp)
  X = np.vstack([mined_dfs[key] for key in sorted(mined_dfs.keys())])
  y = [elem for key in sorted(train_pycorp.keys()) for elem in train_pycorp[key].synnikoht]
  # transform labels
  labelenc = preprocessing.LabelEncoder()
  y = labelenc.fit_transform(y)
  # print the shape of the training dataset and the values
  print X.shape
  print len(y)
  # train a SVM
  svc = LinearSVC()
  # cv
  scores = cross_validation.cross_val_score(svc, X, y, cv=5)
  scores = cross_validation.cross_val_score(svc, X, y, cv=5, score_func=metrics.precision_recall_fscore_support)
  print scores.mean(0)
  # return classifier
  svc.fit(X, y)
  return svc

# get top8 google results (urls)
import json
import urllib2
def google_get_links(x):
    search = x.split()
    search = '%20'.join(map(str, search))
    url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&start=1&rsz=8&q=%s&safe=off' % search
    search_results = urllib2.urlopen(url)
    js = json.loads(search_results.read())
    results = js['responseData']['results']
    urls = []
    for result in results:
       urls.append(result['unescapedUrl'])
    return urls

# get text
from bs4 import BeautifulSoup
html = urllib2.urlopen('http://www.postimees.ee').read()
soup = BeautifulSoup(html)
texts = soup.findAll("p")
texts = ' '.join([t.getText() for t in texts])

def visible(element):
    try:
        if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
            return False
        elif re.match('<!--.*-->', str(element)):
            return False
        return True
    except:
        return False

visible_texts = filter(visible, texts)

# katse
wiki_corp = PyCorpus('data/etwiki_tagged.pycorp', readonly=True)
rels_data = pd.read_table("tasks/vikipeedia/wiki_data.txt", sep=";")

rel = "sünnikoht"
make_corp(rel)
clf = rel_classify(rel)

# save classifier
from sklearn.externals import joblib
joblib.dump(clf, 'tasks/vikipeedia/synnikoht.pkl', compress=9)

clf2 = joblib.load('tasks/vikipeedia/synnikoht.pkl')

# sünniaeg
rel = "sünniaeg"
make_corp(rel)
clf = rel_classify(rel)

# save classifier
joblib.dump(clf, 'tasks/vikipeedia/synniaeg.pkl', compress=9)
