#!/usr/local/bin/pythonw
# -*- coding: utf-8 -*-
import re
import sys
import os
import codecs
sys.path.append('./tools/')
sys.path.append('./rstr_max/')
import tool_dimeco as td
from tools_karkkainen_sanders import *
from rstr_max import *
from optparse import OptionParser

import pprint

##
#
##

def isRelevant(annotations) :
  for a in annotations :
    if a[0] != 'N' :
      return True
  return False

def read_results(results, annotations) :
  tp, fp, fn = 0, 0, 0
  for lg, dic_doc in annotations.iteritems() :
    for id_doc, info in dic_doc.iteritems() :
      flagRelevant  = isRelevant(info['annotations'])
      flagInResults = (id_doc, lg) in results
      if flagRelevant and flagInResults :
        tp += 1
      elif flagRelevant and not flagInResults :
        fn += 1
      elif not flagRelevant and flagInResults :
        fp += 1
  return {'tp':tp,'fn':fn,'fp':fp}

##
#
##

def extr_relevantcontent(zones, chaine, maladies, loc = False):
  rstr = Rstr_max()
  set_zones = set()
  set_mala = set()
  liste_ss = []
  cpt = 0

  for z in zones :
    su1 = unicode(chaine[z[0]:z[1]],'utf-8')
    rstr.add_str(su1)
    set_zones.add(cpt)
    cpt += 1
  nb_zones = cpt

  for m in maladies :
    if unicode(m,'utf-8').upper() != unicode(m,'utf-8'):#pas sigles
      su2 = unicode(m,'utf-8').lower()
    else:
      su2 = unicode(m,'utf-8')
    rstr.add_str(su2)
    set_mala.add(cpt)
    cpt += 1

  r = rstr.go()
  liste_desc = []
  tot = 0
  for (offset_end, nb), (l, start_plage) in r.iteritems():
    set_chapeau = 0
    nbr_repet = 0
    set_occur = set()
    ss = rstr.global_suffix[offset_end-l:offset_end]
    for o in xrange(start_plage, start_plage+nb) :
      tot += 1
      id_str = rstr.idxString[rstr.res[o]]
      offset = rstr.idxPos[rstr.res[o]]
      set_occur.add(id_str)
      if id_str == 0:
        set_chapeau += 1
      if nbr_repet < 2 and id_str in set_zones :#repetitions pour petits textes
        nbr_repet += 1
    set_id_maladies = set_occur.intersection(set_mala)
    repetition = set_zones.intersection(set_occur)#On doit matcher toutes les zones

    position = False
    if len(set_occur) > nb_zones or set_chapeau>=2:
      position = True
      if set_chapeau>=2:
        repetition = [0,1]

    if position == True and len(repetition) >= nb_zones and set_id_maladies and nbr_repet > 1 :
      for id_mala in set_id_maladies :
        liste_desc.append((l, id_mala, ss))

  relevantcontent = sorted(liste_desc, reverse=True)#on prend les n plus grandes
  return relevantcontent, rstr#, relevantss#longueur ss, id_mala,longueur maladies détectées, intervalles

def getArticleSize(path_file, pp, ps) :
  f = open(path_file,'r')
  chaine = f.read()
  f.close()
  paras = [m.start() for m in pp.finditer(chaine)]
  phrases = [m.start() for m in ps.finditer(chaine)]
  cut = 2
  lim_basse = 5 #o.pet_art
  lim_haute = 12 #o.pet_art + o.grd_art
  if len(phrases) > 1 and len(paras) > lim_basse :
    if phrases[1] > paras[2] :#paragraphes de date
      cut = 3
  if len(phrases) == 0 :
    cut = 3
  zones = []
  
  if len(paras) <= lim_basse:# petit or len(phrases)<7:#Petit article OR paragraphe=phrase
    return 'small'
  elif len(paras) > lim_haute:# grand
    return 'large'
  else: #moyen
    return 'medium'

def article_zoning(path_file, pp, ps, options):
  f = open(path_file,'r')
  chaine = f.read()
  f.close()
  paras = [m.start() for m in pp.finditer(chaine)]
  phrases = [m.start() for m in ps.finditer(chaine)]
  cut = options.marge
  lim_basse = options.pet_art
  lim_haute = lim_basse + options.grd_art
  if len(phrases) > 1 and len(paras) > lim_basse :
    if phrases[1] > paras[2] :#paragraphes de date
      cut = 3
  if len(phrases) == 0 :
    cut = 3
  zones = []
  
  if len(paras) <= lim_basse:# petit or len(phrases)<7:#Petit article OR paragraphe=phrase
    zones.append((paras[0],len(chaine)-1))
  elif len(paras) > lim_haute:# grand
    zones.append((paras[0],paras[cut]))
    zones.append((paras[len(paras)-cut],len(chaine)-1))
  else: #moyen
    zones.append((paras[0],paras[cut]))
    zones.append((paras[cut],len(chaine)-1))
  return zones,chaine#, len(paras), len(unicode(chaine,'utf-8'))

def search_event(relevantcontent, rstr, length_ratio):
  list_id_diseases_match = []
#  t = set()
  for lenss, id_disease, ss in relevantcontent:
    str_disease = rstr.array_str[id_disease]
    len_disease = len(str_disease)
    th = float(lenss) / len_disease
    if th >= length_ratio :
      string = str_disease.encode("utf-8")
      list_id_diseases_match.append((len_disease,(string),ss))
#      t.add(string)
  return sorted(list_id_diseases_match,reverse=True)#Si on utilise la longueur

def analyser(doc,dict_diseases_lg, options, pp, ps):
  zones, chaine = article_zoning(doc, pp, ps, options)
  loc = False
  relevantcontent, rstr = extr_relevantcontent(zones, chaine, dict_diseases_lg,loc)
  verdict = search_event(relevantcontent, rstr, options.length_ratio)
  return verdict

if __name__ == "__main__":
  p      = td.opt_parser_dimeco()
  (o, _) = p.parse_args(sys.argv[1:])
  dict_docs_annot = td.decoder(o.annotation_file,o.languages) 
  pattern_paras = re.compile(re.escape('<p'))
  pattern_sentences = re.compile(re.escape('. '))
  dic_results,stats_langues,dict_diseases,dict_locations = td.get_stats_langues(o)
  h = open(os.path.join(o.path_data,'sources.json'),'r').read()
  sources = eval(h)
  out = 'analyze_events/detected_events'#Fichier sortie : 1 ligne par doc pertinent
  wfi = codecs.open(out,'w','utf-8')
#  dic_st = {}
  results = {}

  for lg, dict_doc in dict_docs_annot.iteritems() :
#    dic_st[lg] = 0
    for id_doc, info in dict_doc.iteritems() :
#      if o.relevantOnly and not isRelevant(info['annotations']) :
#        continue
      fichier = os.path.join(o.path_corpus,info['path'])
#      size = getArticleSize(fichier, pattern_paras, pattern_sentences)
#      dic_st[lg] += 1
      events = analyser(fichier,dict_diseases[lg], o, pattern_paras, pattern_sentences)
      if len(events)>0:
        disease = unicode(events[0][1],'utf-8','replace')
        print>>wfi, id_doc,u'%s'%disease
        results.setdefault((id_doc, lg), disease)

  wfi.close()
#  print dic_st

  r = read_results(results, dict_docs_annot)
  pprint.pprint(r)
  R = float(r['tp']) / (r['tp'] + r['fn'])
  P = float(r['tp']) / (r['tp'] + r['fp'])
  print 'R : %s'%(R)
  print 'P : %s'%(P)
  print 'F : %s'%(td.compute_f_mesure(R,P,1))



