#!/usr/local/bin/pythonw
# -*- coding: utf-8 -*-
import re
import sys
#from string import *
import tool_dimeco as td
import os

sys.path.append('./rstr_max/')
from tools_karkkainen_sanders import *
from rstr_max import *
from optparse import OptionParser


def extr_relevantcontent(zones,chaine,maladies,loc):
  rstr = Rstr_max()
  set_zones = set()
  set_mala = set()
  liste_ss = []
  cpt = 0
  for z in zones :
    if loc == False:
      su1 = unicode(chaine[z[0]:z[1]],'utf-8')#.lower()
    else:
      su1 = unicode(chaine[z[0]:z[1]],'utf-8')
    rstr.add_str(su1)
    set_zones.add(cpt)
    cpt += 1
  nb_zones = len(set_zones)
  for m in maladies :
    if len(m) < 0 :
      m = ' %s '%(m)
    if loc == False:
      if unicode(m,'utf-8').upper()!=unicode(m,'utf-8'):#pas sigles
        su2 = unicode(m,'utf-8').lower()
      else:
        su2 = unicode(m,'utf-8')
    else: #pas de lowercase pour les lieux
      su2 = unicode(m,'utf-8')
#    su2 = unicode(' %s '%m,'utf-8').lower()
    rstr.add_str(su2)
    set_mala.add(cpt)
    cpt += 1
  r = rstr.go()
  liste_desc = []
  tot =0
  for (offset_end, nb), (l, start_plage) in r.iteritems():
    set_chapeau = 0
    nbr_repet = 0
    set_occur = set()
    ss = rstr.global_suffix[offset_end-l:offset_end]
    for o in xrange(start_plage, start_plage+nb) :
      tot+=1
      id_str = rstr.idxString[rstr.res[o]]
      offset = rstr.idxPos[rstr.res[o]]
      set_occur.add(id_str)
      if id_str==0:
        set_chapeau+=1
      if nbr_repet < 2 and id_str in set_zones :#repetitions pour petits textes
        nbr_repet += 1

    set_id_maladies = set_occur.intersection(set_mala)
    repetition = set_zones.intersection(set_occur)#On doit matcher toutes les zones
    position = False
    if len(set_occur)> nb_zones or set_chapeau>=2:
      position = True
      if set_chapeau>=2:
        repetition = [0,1]
    
    if position == True and len(repetition) >= nb_zones and set_id_maladies and nbr_repet > 1 :
#    if len(set_occur)>=1 and set_id_maladies and len(repetition)>=1 and nbr_repet>=1:
#      print ss
      for id_mala in set_id_maladies :
        if loc == False:
          liste_desc.append((l, id_mala))
        else:
          str_loc = rstr.array_str[id_mala]
          if str_loc[0]== ss[0]:
            liste_desc.append((l, id_mala))
#      ss = rstr.global_suffix[offset_end-l:offset_end]
#      liste_ss.append((l,ss))
  relevantcontent = sorted(liste_desc,reverse=True)#on prend les n plus grandes
  return relevantcontent, rstr,tot#, relevantss#longueur ss, id_mala,longueur maladies détectées, intervalles

def article_zoning(path_file, pp, ps,pet,grd):
  f = open(path_file,'r')
  chaine = f.read()

  paras = [m.start() for m in pp.finditer(chaine)]
  phrases = [m.start() for m in ps.finditer(chaine)]
#  paras = [m.start() for m in re.finditer(re.escape('<p'), chaine)]
#  phrases = [m.start() for m in re.finditer(re.escape('.'), chaine)]
  cut = 2
  lim_basse = pet
  if len(phrases)>0:
    prop_phrase = float(len(paras)/float(len(phrases)))
#  else:
#    print path_file,b/0
#  lim_haute = max(int(20*prop_phrase),10)
  lim_haute = pet+grd
#  print prop_phrase,'pour',len(paras)
#  if len(phrases)<2*len(paras):#surdécoupage en phrases
#    lim_haute
  if len(phrases)>1 and len(paras)>lim_basse:
#    print phrases[1],paras[2]
    if phrases[1]>paras[2]:#paragraphes de date
      cut=3
  if len(phrases)==0:
    cut =3
  #heuristique de ratrappage de détourage
<<<<<<< .mine
#  for i in xrange(1,min(5,len(paras))) :
#    if len(phrases) == 0 or paras[i] > phrases[1]:
#      cut = i+1
#      break #nouvelle frontière
#   except:
#    pass
=======
  for i in xrange(1,min(5,len(paras))) :
    try : 
      if len(phrases) == 0 or paras[i] > phrases[1]:
        cut = i+2
        break #nouvelle frontière
    except :
      print path_file
      break

>>>>>>> .r27
  zones = []
  
  if len(paras) <= lim_basse:# or len(phrases)<7:#Petit article OR paragraphe=phrase
    zones.append((paras[0],len(chaine)-1))
#     zones=[((0,1))]

  elif len(paras) > lim_haute:
    zones.append((paras[0],paras[cut]))
    zones.append((paras[len(paras)-2],len(chaine)-1))
  else:
    zones.append((paras[0],paras[cut]))
    zones.append((paras[cut],len(chaine)-1))
  return zones,chaine#, len(paras), len(unicode(chaine,'utf-8'))


def get_relevant_diseases(zones,chaine,maladies):
  pass

def article_zoning_bs(path) :
  f = open(path_file,'r')
  chaine = f.read()
  f.close()
  pass



def search_event(relevantcontent, rstr, length_ratio):
  list_id_diseases_match = []
#  r = 0
  t = set()
  for lenss, id_disease in relevantcontent:
    str_disease = rstr.array_str[id_disease]
    len_disease = len(str_disease)
#    if len_disease-lenss <= length_ratio :
    if (float(lenss) / len_disease) >= length_ratio :
      string = str_disease.encode("utf-8")
      list_id_diseases_match.append((len_disease,(string)))
      t.add(string)
#      list_id_diseases_match.append((len_disease,(str_disease.encode("utf-8"))))
    
#      return list_id_diseases_match
#      print str_disease, lenss,'-',len_disease
#      return list_id_diseases_match
#    elif lenss>3:
#      print str_disease, lenss,len_disease
#  if len(t)>1:
#    print '-'*8
#    for l,elem in sorted(list_id_diseases_match,reverse=True):
#      print elem
#  return list_id_diseases_match
  return sorted(list_id_diseases_match,reverse=True)#Si on utilise la longueur

def get_verdict(doc, dic_diseases_lg, options) :
  zones, chaine = article_zoning_bs(doc)
  pass


def analyser(doc,dict_diseases_lg, options, pp, ps):
#  zones, chaine, cpt_par, cpt_char = article_zoning(doc)
  pet = options.pet_art
  grd = options.grand_art
  zones, chaine = article_zoning(doc, pp, ps,pet,grd)
  loc = False
  relevantcontent, rstr = extr_relevantcontent(zones, chaine, dict_diseases_lg,loc)
  verdict = search_event(relevantcontent, rstr, options.length_ratio)
#  print zones
#  if len(zones)>1:
#  print len(relevantcontent)
#  return verdict, cpt_par, cpt_char
  return verdict

def analyser_loc(doc,dict_diseases_lg, options,pp, ps):
  pet = options.pet_art
  grd = options.grand_art
  zones, chaine = article_zoning(doc, pp, ps,pet,grd)
  loc=True
  relevantcontent, rstr = extr_relevantcontent(zones, chaine, dict_diseases_lg,loc)
  verdict = search_event(relevantcontent, rstr, options.length_ratio_loc)
  return verdict

def evaluer(verdict, annot) :
  if verdict == [] :
    if annot[0] == 'N' :
      return 'VN'
#    print '*'*8,annot
#    print 'REl'
    return 'FN'
  else :
    if annot[0] == 'N' :
      return 'FP'
#    print '*'*4,annot
#    print 'REl'
    return 'VP'

def lg_subdic(path_annotation, list_lg) : 
  f = open(path_annotation,'r')
  s = f.read()
  d = eval(s)
  f.close()
  d_res = {}
  for lg in list_lg :
    d_res[lg] = {}
  for id_doc, info in d.iteritems() :
    dinfo = info
    if info['langue'] not in list_lg :
      continue
    d_res[info['langue']][id_doc] = dinfo
  return d_res


def decoder(path_annotation, list_lg) :
  f = open(path_annotation,'r')
  s = f.read()
  d = eval(s)
  f.close()
  d_res = {}
  for lg in list_lg :
    d_res[lg] = {}
  for id_doc, info in d.iteritems() :
    dinfo = info
    lg = dinfo['langue']
#    if dinfo['annotations'] == [] : 
#      continue
    if 'multi' in list_lg:
      d_res['multi'][id_doc] = dinfo 
    if lg not in list_lg :
      continue
    d_res[lg][id_doc] = dinfo
  return d_res

def decoder2(path_annotation):
  f = open(path_annotation,'r')
  ofi = f.readlines()
  liste_docs = {}
  liste_annots = {}
  for ligne in ofi :
    parts = re.sub('"|\n','',ligne)
    parts = re.split(';',parts)
    parts = [x for x in parts if x!='']
    if len(parts)==3 :
      id_doc = re.sub('"','',parts[0])
      liste_docs[id_doc] = parts
    elif len(parts)==4 :
      id_doc = re.sub('\n','',parts[3])
      liste_annots[id_doc] = parts
  f.close()
  return liste_docs, liste_annots

def get_stats(dic_results):
  VP = dic_results['VP']
  VN = dic_results['VN']
  FP = dic_results['FP']
  FN = dic_results['FN']
  VPLI = dic_results['VPLI']
  VPLE = dic_results['VPLE']
  TLI = dic_results['TLI']
  TLE = dic_results['TLE']
  totloc = dic_results['tot_loc']
  goodloc = dic_results['good_loc']
  s = VP + VN + FP + FN
  if totloc!=0:
    localisation = float(goodloc)/totloc
  else:
    localisation = 0.
  if VP == 0 :
    recall = 0.
    precision = 0.
    f_measure = 0.
    f1_measure = 0.
    f2_measure = 0.
    f05_measure = 0.
  else :
    recall = float(VP) / (VP+FN)
    precision = float(VP) / (VP+FP)
    f1_measure = (2*recall*precision)/(recall+precision)
    f2_measure = (5*recall*precision)/(recall+4*precision)
    f05_measure = (1.25*recall*precision)/(recall+0.25*precision)
  if TLI!=0:
    loc_imp = float(VPLI)/TLI
  else:
    loc_imp = 'NA'
  if TLE!=0:
    loc_exp = float(VPLE)/TLE
  else:
    loc_exp = 'NA'
  d = {
    'rappel' : recall,
    'precision' : precision,
    'F1-measure': f1_measure,
    'F2-measure': f2_measure,
    'F05-measure': f05_measure,
    'Implicit location':loc_imp,
    'Explicit location':loc_exp,
    'pertinence' : float(VP+VN) / s,
    'VPLI, Total Loc_imp': (VPLI,TLI),
    'VPLE, Total Loc_exp': (VPLE,TLE),
    'Localisation':localisation
#    'erreur' : float(FP+FN) / s,
#    'taux_de_chute' : float(FP) / (FP+VN),
#    'specificite' : float(VN) / (FP+VN),
#    'overlap' : float(VP) / (s-VN),
#    'generalite' : float(VP) / s
  }
  return d

def evaluer_localisation(loc,ref_loc,implicit_location,dic_results):
  good = 0
  if ref_loc =='worldwide':
    return dic_results

  dic_results['tot_loc']+=1
  if loc.lower() == ref_loc.lower():
    good = 1
    dic_results['good_loc']+=1
#   else:
#  print loc,ref_loc
  if implicit_location == True:
    dic_results['TLI']+=1
    dic_results['VPLI']+=good
  else:
    dic_results['TLE']+=1
    dic_results['VPLE']+=good
  return dic_results
#commande profile: -m cProfile french_pipe_33.py annotations_acl.csv

def decoder_paires(dic_paires):
  vp = 0
  fp = 0
  fn = 0
  world = 0
  stats_FN = {'Langues':{},'Multi':0,'Docs':{}}
#Faire la meme avec VP et FP->fonction externe stats
  for pt,infos in dic_paires['Trouvees'].iteritems():
    if pt in dic_paires['Cherchees']:
      vp+=1
#      print 'VP:',pt,infos
    else:
      fp+=1
#      print 'FP:',pt,infos
  for pc,infos in dic_paires['Cherchees'].iteritems():
    nbr_doc,list_docs,langues = infos
    if pc not in dic_paires['Trouvees']:
      fn+=1
#      print 'FN:',pc,infos
      if pc[1]=='Worldwide':
        world+=1
      set_lg = set(langues)
      if len(set_lg)>1:#Echec du multi
        stats_FN['Multi']+=1
      else:
        for lg in set_lg:
          if lg not in stats_FN['Langues']:#factoriser
            stats_FN['Langues'][lg]=0
          stats_FN['Langues'][lg]+=1
      if nbr_doc not in stats_FN['Docs']:
        stats_FN['Docs'][nbr_doc]=0
      stats_FN['Docs'][nbr_doc]+=1
  rappel= float(vp)/(vp+fn)
  rappel_local = float(vp)/(vp+fn-world)
  precision = float(vp)/(vp+fp)
  f1_measure = (2*rappel_local*precision)/(rappel_local+precision)
#  print stats_FN
#  print 'VP:',vp
#  print 'FP:',fp
#  print 'FN:',fn
#  print 'Rappel:',float(vp)/(vp+fn)
#  print 'Rappel, local:',float(vp)/(vp+fn-world)
#  print 'Precision:',float(vp)/(vp+fp)
  dic = {'rappel':rappel_local,'precision':precision,'F1-measure':f1_measure}
  return dic

def canonnical(terme,ref):
  couple = ref[terme]
  if couple[1]=='':
    return couple[0]
  return couple[1]

def evenement(paire,dic_paires,dict_diseases,dict_locations,fichier,langue):#coder la paire cherchee 
  try:
    dis_cherch_canonn = canonnical(paire[0],dict_diseases)
  except:
    print 'M:',paire[0]
    dis_cherch_canonn = 'Unknown dis'
  try:
    loc_cherch_canonn = canonnical(paire[1],dict_locations)
  except:
    print 'L:',paire[1]
    loc_cherch_canonn = 'Unknown loc'
  paire_cherchee = (dis_cherch_canonn,loc_cherch_canonn)
  paires_cherchees.append(paire_cherchee)
  if paire_cherchee not in dic_paires['Cherchees']:
     dic_paires['Cherchees'][paire_cherchee]=[0,[],[]]
  dic_paires['Cherchees'][paire_cherchee][0]+=1
  dic_paires['Cherchees'][paire_cherchee][1].append(fichier)
  dic_paires['Cherchees'][paire_cherchee][2].append(langue)
  return paire_cherchee,dic_paires

if __name__ == "__main__":
  p = td.opt_parser_dimeco()
  (o, _) = p.parse_args(sys.argv[1:])
#  dict_docs_annot = decoder(o.annotation_file,o.languages) 
  dict_docs_annot = lg_subdic(o.annotation_file,o.languages) 

  compt_doc = 0
  stats_fp = {}
  dic_paires = {'Trouvees':{},'Cherchees':{}}
  stats_langue = {}
  dict_diseases = {}
  dict_locations = {}
  pattern_paras = re.compile(re.escape('<p'))
<<<<<<< .mine
#  pattern_sentences = re.compile('[^0-9]\.')#re.escape('.'))
  pattern_sentences = re.compile(re.escape('.'))
=======
  pattern_sentences = re.compile(re.escape(unicode('[.。]','utf-8')))
>>>>>>> .r27

  for lg in o.languages :
    stats_langue[lg] = {'VP':0,'VN':0,'FP':0,'FN':0,'VPLI':0,'VPLE':0,'TLI':0,'TLE':0,'paires_TP':0,'paires_FP':0,'paires_FN':0,'paires_TN':0,'Good_loc':0,'tot_loc':0}
    path_disease = os.path.join(o.path_data,'maladies_%s.json'%lg)
    path_location = os.path.join(o.path_data,'dico_pays_%s.json'%lg)
    f = open(path_disease,'r').read()
    dict_diseases[lg] = eval(f)
    g = open(path_location,'r').read()
    dict_locations[lg] = eval(g)
  dic_results={'VP':0,'VN':0,'FP':0,'FN':0,'VPLI':0,'VPLE':0,'TLI':0,'TLE':0,'good_loc':0,'tot_loc':0}
  h = open(os.path.join(o.path_data,'sources.json'),'r').read()
  sources = eval(h)
  cpt = 0
  for lg, dict_doc in dict_docs_annot.iteritems() :
<<<<<<< .mine
    paires_trouvees = []
    paires_cherchees = []
=======
>>>>>>> .r27
    cpt_relevant = 0
<<<<<<< .mine
=======
#    set_path = set()
>>>>>>> .r27
    for id_doc, info in dict_doc.iteritems() :
<<<<<<< .mine
      fichier = os.path.join(o.path_corpus,info['path']) 
      events = analyser(fichier,dict_diseases[lg], o, pattern_paras, pattern_sentences)
      paire = info['annotations'][0]
      if paire!=['N','N','N']:
        paire_cherchee,dic_paires = evenement(paire,dic_paires,dict_diseases[lg],dict_locations[lg],fichier,lg)
      else:
        paire_cherchee = ('','')
#      verdict = evaluer(events,paire)
      verdict = evaluer(events,paire)
      search_loc = True
      multi_event = False
      if events !=[] and search_loc == True:
       if multi_event == False:
         events = [events[0]]
       for ouinon in events:
        loc = re.search('(_)(.*?)(_)',info['path']).group(2)
        location = []
        location = analyser_loc(fichier,dict_locations[lg], o, pattern_paras, pattern_sentences)
        implicit_location = True
        if location !=[]:
          loc = location[0][1]
          implicit_location = False
        else:
          try:
            loc = sources [loc]
          except:
            loc = sources[lg]
        maladie = ouinon[1]
  #Pour evaluer: canonnical
        disease_canonn = canonnical(maladie,dict_diseases[lg])
        location_canonn = canonnical(loc,dict_locations[lg])
        paire_trouvee = (disease_canonn,location_canonn)
        if paire_trouvee not in dic_paires['Trouvees']:
          dic_paires['Trouvees'][paire_trouvee]=[0,[],[]]
        dic_paires['Trouvees'][paire_trouvee][0]+=1
        dic_paires['Trouvees'][paire_trouvee][1].append(fichier)
        dic_paires['Trouvees'][paire_trouvee][2].append(lg)
        paires_trouvees.append(paire_trouvee)
#        ouinon.append(loc)
        ref_loc = info['annotations'][0][1].decode('utf-8','replace')
        if ref_loc!='N':
          dic_results = evaluer_localisation(paire_trouvee[1],paire_cherchee[1],implicit_location,dic_results)
#        if paire_cherchee != paire_trouvee and paire_cherchee!=('',''):
#          print 'On cherchait',paire_cherchee,'mais:',paire_trouvee
#          print fichier
#      if verdict=='FN':
=======
      fichier = os.path.join(o.path_corpus,info['path'])
#      if fichier in set_path :
#        continue
      if not os.path.isfile(fichier) :
        print fichier
        continue
#      set_path.add(fichier)
      cpt += 1
#      continue
      try :
        ouinon = analyser(fichier,dict_diseases[lg], o, pattern_paras, pattern_sentences)
      except :
        print id_doc, fichier
        1/0
      verdict = evaluer(ouinon,info['annotations'][0])

#      if info['annotations'][0][0] != 'N'  and ouinon == []:
#        print id_doc, fichier
        #info['url']
#        print id_doc, fichier, info['annotations'][0][0]
#        cpt_relevant += 1

#      if ouinon == [] and info['annotations'][0][0] != 'N' :
#        print 'impairing recall'
#        print id_doc
#        print info['annotations'][0][0]
>>>>>>> .r27
#        print fichier
<<<<<<< .mine
#        print info['annotations'][0][0],verdict
      dic_results[verdict] += 1
  dic = decoder_paires(dic_paires)
  print dic
=======

#      if ouinon != [] and info['annotations'][0][0] == 'N' :
#        print fichier, info['annotations']

      dic_results[verdict] += 1 

#    print lg, cpt_relevant


#  print dic_results
#  print compt_doc,'analysés sur',len(dict_docs)
#  print "%s :: cpt doc :: %s, cpt par :: %s, cpt char :: %s"%(str(o.languages), cpt_doc, cpt_par, cpt_char)

>>>>>>> .r27
  print cpt
  s = get_stats(dic_results)
  print s
  outing = False
  out_loc = False
  seuil_long = o.pet_art+o.grd_art
  out_path = "expes/%s_%s/"%(o.pet_art,seuil_long)
  liste_dirs = ["expes_filter","expes_loc","expes_paires"]
  try:
    for d in liste_dirs:
      os.makedirs(out_path+d)
  except:
    pass
  if outing == True:
#    if out_loc==False:
      str_fileout = out_path+"expes_filter/%s_%s_%s_%s_%s.res"%(",".join(o.languages),o.length_ratio,s['rappel'],s['precision'],s['F1-measure'])
#    else:
      f = file(str_fileout,'w')
      f.write("")
      f.close()
      str_fileout = out_path+"/expes_loc/%s_%s_%s_%s_%s_%s.res"%(",".join(o.languages),o.length_ratio,o.length_ratio_loc,s['Implicit location'],s['Explicit location'],s['Localisation'])
#  print str_fileout
      f = file(str_fileout,'w')
      f.write("")
      f.close()
      str_fileout = out_path+"expes_paires/%s_%s_%s_%s_%s_%s.res"%(",".join(o.languages),o.length_ratio,o.length_ratio_loc,dic['rappel'],dic['precision'],dic['F1-measure']) 
      f = file(str_fileout,'w')
      f.write("")
      f.close()
  
#  print str(o.languages), str(o.length_ratio)
#  print 'rappel ::', s['rappel']
#  print 'precision ::',s['precision']

