#!/usr/local/bin/pythonw
# -*- coding: utf-8 -*-
import re
import sys
import os
import codecs
sys.path.append('./tools/')
sys.path.append('./rstr_max/')
import tool_dimeco as td
from tools_karkkainen_sanders import *
from rstr_max import *
from optparse import OptionParser

import pprint
from math import sqrt


##
#
##

def isRelevant(annotations) :
  for a in annotations :
    if a[0] != 'N' :
      return True
  return False

def read_results(results, annotations) :
  tp, fp, fn = 0, 0, 0
  for lg, dic_doc in annotations.iteritems() :
    for id_doc, info in dic_doc.iteritems() :
      if isRelevant(info['annotations']) and (id_doc, lg) in results :
        tp += 1
      elif isRelevant(info['annotations']) and (id_doc, lg) not in results :
        fn += 1
      elif not isRelevant(info['annotations']) and (id_doc, lg) in results :
        fp += 1
  return {'tp':tp,'fn':fn,'fp':fp}

def print_results(r) :
  pprint.pprint(r)
  R = float(r['tp']) / (r['tp'] + r['fn'])
  P = float(r['tp']) / (r['tp'] + r['fp'])
  print 'R : %s'%(R)
  print 'P : %s'%(P)
  print 'F : %s'%(td.compute_f_mesure(R,P,1))

def meanstdv(x):
  n, mean, std = len(x), 0, 0
  for a in x:
    mean = mean + a
  mean = mean / float(n)
  for a in x:
    std = std + (a - mean)**2
    std = sqrt(std / float(n-1))
  return mean, std

##
#
##

def extr_relevantcontent(zones, chaine, maladies, loc = False):
  rstr = Rstr_max()
  set_zones = set()
  set_mala = set()
  liste_ss = []
  cpt = 0

  for z in zones :
    su1 = unicode(chaine[z[0]:z[1]],'utf-8')
    rstr.add_str(su1)
    set_zones.add(cpt)
    cpt += 1
  nb_zones = cpt

  for m in maladies :
    if unicode(m,'utf-8').upper() != unicode(m,'utf-8'):#pas sigles
      su2 = unicode(m,'utf-8').lower()
    else:
      su2 = unicode(m,'utf-8')
    rstr.add_str(su2)
    set_mala.add(cpt)
    cpt += 1

  r = rstr.go()
  liste_desc = []
  tot = 0
  for (offset_end, nb), (l, start_plage) in r.iteritems():
    set_chapeau = 0
    nbr_repet = 0
    set_occur = set()
    ss = rstr.global_suffix[offset_end-l:offset_end]
    for o in xrange(start_plage, start_plage+nb) :
      tot += 1
      id_str = rstr.idxString[rstr.res[o]]
      offset = rstr.idxPos[rstr.res[o]]
      set_occur.add(id_str)
      if id_str == 0:
        set_chapeau += 1
      if nbr_repet < 2 and id_str in set_zones :#repetitions pour petits textes
        nbr_repet += 1
    set_id_maladies = set_occur.intersection(set_mala)
    repetition = set_zones.intersection(set_occur)#On doit matcher toutes les zones

    position = False
    if len(set_occur) > nb_zones or set_chapeau>=2:
      position = True
      if set_chapeau>=2:
        repetition = [0,1]
    if position == True and len(repetition) >= nb_zones and set_id_maladies and nbr_repet > 1 :
      for id_mala in set_id_maladies :
        liste_desc.append((l, id_mala,ss))
  relevantcontent = sorted(liste_desc, reverse=True)#on prend les n plus grandes
  return relevantcontent, rstr#, relevantss#longueur ss, id_mala,longueur maladies détectées, intervalles

def getArticleSize(path_file, pp, ps) :
  f = open(path_file,'r')
  chaine = f.read()
  f.close()
  paras = [m.start() for m in pp.finditer(chaine)]
  phrases = [m.start() for m in ps.finditer(chaine)]
  cut = 2
  lim_basse = 5 #o.pet_art
  lim_haute = 12 #o.pet_art + o.grd_art
  if len(phrases) > 1 and len(paras) > lim_basse :
    if phrases[1] > paras[2] :#paragraphes de date
      cut = 3
  if len(phrases) == 0 :
    cut = 3
  zones = []
  
  if len(paras) <= lim_basse:# petit or len(phrases)<7:#Petit article OR paragraphe=phrase
    return 'small'
    zones.append((paras[0],len(chaine)-1))
  elif len(paras) > lim_haute:# grand
    return 'long'
    zones.append((paras[0],paras[cut]))
    zones.append((paras[len(paras)-2],len(chaine)-1))
  else: #moyen
    return 'medium'
    zones.append((paras[0],paras[cut]))
    zones.append((paras[cut],len(chaine)-1))

def article_zoning(path_file, pp, ps,options):
  f = open(path_file,'r')
  chaine = f.read()
  f.close()
  paras = [m.start() for m in pp.finditer(chaine)]
  phrases = [m.start() for m in ps.finditer(chaine)]
  cut = options.marge
  lim_basse = options.pet_art
  lim_haute = lim_basse + options.grd_art
  if len(phrases) > 1 and len(paras) > lim_basse :
    if phrases[1] > paras[2] :#paragraphes de date
      cut = 3
  if len(phrases) == 0 :
    cut = 3
  zones = []
  
  if len(paras) <= lim_basse:# petit or len(phrases)<7:#Petit article OR paragraphe=phrase
    zones.append((paras[0],len(chaine)-1))
  elif len(paras) > lim_haute:# grand
    zones.append((paras[0],paras[cut]))
    zones.append((paras[len(paras)-cut],len(chaine)-1))
  else: #moyen
    zones.append((paras[0],paras[cut]))
    zones.append((paras[cut],len(chaine)-1))
  return zones,chaine#, len(paras), len(unicode(chaine,'utf-8'))

def search_event(relevantcontent, rstr, length_ratio):
  list_id_diseases_match = []
#  t = set()
  for lenss, id_disease, ss in relevantcontent:
    str_disease = rstr.array_str[id_disease]
    len_disease = len(str_disease)
    th = float(lenss) / len_disease
    if th >= length_ratio :
      string = str_disease.encode("utf-8")
      list_id_diseases_match.append((len_disease,(string),ss))
#      t.add(string)
  return sorted(list_id_diseases_match,reverse=True)#Si on utilise la longueur

def analyser(doc,dict_diseases_lg, options, pp, ps):
  zones, chaine = article_zoning(doc, pp, ps,options)
  loc = False
  relevantcontent, rstr = extr_relevantcontent(zones, chaine, dict_diseases_lg,loc)
  verdict = search_event(relevantcontent, rstr, options.length_ratio)
  return verdict

def analyser_loc(doc,dict_diseases_lg, options, pp, ps):
  zones, chaine = article_zoning(doc, pp, ps)
  loc=True
  relevantcontent, rstr = extr_relevantcontent(zones, chaine, dict_diseases_lg,loc)
  verdict = search_event(relevantcontent, rstr, options.length_ratio_loc)
  return verdict

def compute_adaptation_threshold(dict_doc_zone, list_id_doc_selected, dict_diseases, length_ratio) :
  rstr = Rstr_max()
  mapping_zones_id_doc = []
  mapping_id_doc_selected = {}
  cpt = 0
  for id_doc, (zones, su) in dict_doc_zone.iteritems() :
    cpt_zone = 0
    for z in zones :
      su_zone = unicode(su[z[0]:z[1]],'utf-8')
      rstr.add_str(su_zone)
      if id_doc in list_id_doc_selected :
        mapping_id_doc_selected[cpt] = True
      mapping_zones_id_doc.append((id_doc, cpt_zone))
      cpt_zone += 1
      cpt += 1

  start_id_diseases = len(mapping_zones_id_doc)

  mapping_id_diseases = []
  for d in dict_diseases.iterkeys() :
    su_diseases = unicode(d,'utf-8')
    rstr.add_str(su_diseases)
    mapping_id_diseases.append(su_diseases)


  list_adaptation_subdiseases     = [] 
  list_adaptation_not_subdiseases = []
  dict_repeats_adaptation         = {}

  r = rstr.go()
  for (offset_end, nb), (l, start_plage) in r.iteritems():
    ss = rstr.global_suffix[offset_end-l:offset_end]
    dict_zones_count = {}
    dict_diseases_count = {}
    for o in xrange(start_plage, start_plage+nb) :
      id_str = rstr.idxString[rstr.res[o]]
      if id_str < start_id_diseases :
        if id_str in mapping_id_doc_selected : 
          id_doc,id_zone = mapping_zones_id_doc[id_str]      
          dict_zones_count[id_doc] = dict_zones_count.setdefault(id_doc, 0) + 1
      else :
        disease = mapping_id_diseases[id_str-start_id_diseases]      
        dict_diseases_count[disease] = dict_diseases_count.setdefault(disease, 0) + 1

#    if not flag_in_doc :
#      continue

    if len(dict_zones_count) == 0 :
      continue

    d = compute_d1_d2(dict_zones_count)
    adaptation = float(d['d2']) / d['d1']

    if is_diseases_substring(ss, dict_diseases_count, length_ratio) :
      list_adaptation_subdiseases.append(adaptation)

    else :
      list_adaptation_not_subdiseases.append(adaptation)

    dict_repeats_adaptation[ss] = adaptation

  return {
    'not_subresource'        : meanstdv(list_adaptation_not_subdiseases),
    'subresource'            : meanstdv(list_adaptation_subdiseases),
    'dict_repeat_adaptation' : dict_repeats_adaptation
  }

def compute_adapation_resources(dict_doc_zone, list_id_doc_selected, adaptation_threshold) :
  rstr = Rstr_max()
  mapping_id_doc_selected = {}
  mapping_zones_id_doc = []
  cpt = 0
  for id_doc, (zones, su) in dict_doc_zone.iteritems() :
    cpt_zone = 0
    for z in zones :
      su_zone = unicode(su[z[0]:z[1]],'utf-8')
      rstr.add_str(su_zone)
      mapping_zones_id_doc.append((id_doc, cpt_zone))
      if id_doc in list_id_doc_selected :
        mapping_id_doc_selected[cpt] = True
      cpt_zone += 1
      cpt += 1

  r = rstr.go()
  results = {}

  for (offset_end, nb), (l, start_plage) in r.iteritems():
    ss = rstr.global_suffix[offset_end-l:offset_end]
    dict_zones_count    = {}
    dict_count          = {}
    dict_diseases_count = {}
    flag_in_doc = False
    for o in xrange(start_plage, start_plage+nb) :
      id_str = rstr.idxString[rstr.res[o]]
      if id_str in mapping_id_doc_selected :
        flag_in_doc = True
      id_doc,id_zone = mapping_zones_id_doc[id_str]      
      dict_count[id_doc] = dict_count.setdefault(id_doc, 0) + 1
      dict_zones_count.setdefault(id_doc, {})
      dict_zones_count[id_doc].setdefault(id_zone, 0)
      dict_zones_count[id_doc][id_zone] += 1

    if not flag_in_doc :
      continue

    d = compute_d1_d2(dict_count)
    adaptation = float(d['d2']) / d['d1']

    if adaptation < adaptation_threshold :
      continue

    for id_doc, dict_zone in dict_zones_count.iteritems() :
      nb_zones   = len(dict_doc_zone[id_doc][0])
      flag_chapo = False
      flag_zones = False
      if nb_zones == 1 :
        for z,cpt in dict_zone.iteritems() :
          if cpt > 1 :
            flag_zones = True 
      else :
        flag_zones = len(dict_zone) == nb_zones
        flag_chapo = 0 in dict_zone and dict_zone[0] > 1


      if (flag_zones or flag_chapo) :
        results.setdefault(id_doc, [])
        results[id_doc].append((ss, adaptation))

  return results

def compute_relevantContent(dict_doc_zone, dict_diseases, length_ratio) :
  rstr = Rstr_max()
  mapping_zones_id_doc = []
  for id_doc, (zones, su) in dict_doc_zone.iteritems() :
    cpt_zone = 0
    for z in zones :
      su_zone = unicode(su[z[0]:z[1]],'utf-8')
      rstr.add_str(su_zone)
      mapping_zones_id_doc.append((id_doc, cpt_zone))
      cpt_zone += 1

  start_id_diseases = len(mapping_zones_id_doc)

  mapping_id_diseases = []
  for d in dict_diseases.iterkeys() :
    su_diseases = unicode(d,'utf-8')
    rstr.add_str(su_diseases)
    mapping_id_diseases.append(su_diseases)

  list_adaptation_subdiseases = [] 
  list_adaptation_not_subdiseases = []

  r = rstr.go()
  results = {}
  for (offset_end, nb), (l, start_plage) in r.iteritems():
    ss = rstr.global_suffix[offset_end-l:offset_end]
    dict_zones_count = {}
    dict_diseases_count = {}
    for o in xrange(start_plage, start_plage+nb) :
      id_str = rstr.idxString[rstr.res[o]]
      if id_str < start_id_diseases : 
        id_doc,id_zone = mapping_zones_id_doc[id_str]      
        dict_zones_count.setdefault(id_doc, {})
        dict_zones_count[id_doc].setdefault(id_zone, 0)
        dict_zones_count[id_doc][id_zone] += 1
      else :
        disease = mapping_id_diseases[id_str-start_id_diseases]      
        dict_diseases_count[disease] = dict_diseases_count.setdefault(disease, 0) + 1

    if len(dict_zones_count) == 0 :
      continue

    list_diseases = list_diseases_substring(ss, dict_diseases_count, length_ratio)
    if len(list_diseases) == 0 :
      continue
    
    for id_doc, dict_zone in dict_zones_count.iteritems() :
      nb_zones = len(dict_doc_zone[id_doc][0])
      flag_chapo = False
      flag_zones = False
      if nb_zones == 1 :
        for z,cpt in dict_zone.iteritems() :
          if cpt > 1 :
            flag_zones = True 
      else :
        flag_zones = len(dict_zone) == nb_zones
        flag_chapo = 0 in dict_zone and dict_zone[0] > 1

      if flag_zones or flag_chapo :
        results.setdefault(id_doc, [])
        results[id_doc].append((ss, list_diseases))

  return results



#      for id_zone, cpt in dict_zone.iteritems() :
#        pass

#    d = compute_d1_d2(dict_zones_count)
#    adaptation_subdiseases = float(d['d2']) / d['d1']
#
#    if is_disease_substring(ss, dict_diseases_count, length_ratio) :
#      list_adaptation_subdiseases.append(adaptation_subdiseases)
#
#    else :
#      list_adaptation_not_subdiseases.append(adaptation_subdiseases)

def is_diseases_substring(substring, dict_diseases_count, length_ratio) :
  len_substring = len(substring)
  for disease, count in dict_diseases_count.iteritems() :
    len_disease = len(disease)
    if float(len_substring) / len_disease >= length_ratio :
      return True
  return False

def list_diseases_substring(substring, dict_diseases_count, length_ratio) :
  len_substring = len(substring)
  list_diseases = []
  for disease, count in dict_diseases_count.iteritems() :
    len_disease = len(disease)
    if float(len_substring) / len_disease >= length_ratio :
      list_diseases.append(disease)
  return list_diseases

def compute_d1_d2(dict_doc_cpt) :
  d2 = 0
  for id_doc, cpt in dict_doc_cpt.iteritems() :
    if cpt >= 2 : #features :: cpt >= 3
      d2 += 1
  return {
    'd1' : len(dict_doc_cpt),
    'd2' : d2
  } 

def merge_resources(resources) :
  r = {}
  for id_doc, list_resource in resources.iteritems() :
    for repeat, adaptation in list_resource :
      r.setdefault(repeat, True)
  return r

def diff_list(l1, l2) :
  r = []
  for elt in l1 :
    if elt not in l2 :
      r.append(elt)
  return r

if __name__ == "__main__":
  p      = td.opt_parser_dimeco()
  (o, _) = p.parse_args(sys.argv[1:])
  dict_docs_annot = td.decoder(o.annotation_file,o.languages) 
  pattern_paras = re.compile(re.escape('<p'))
  pattern_sentences = re.compile(re.escape('.'))
  dic_results,stats_langues,dict_diseases,dict_locations = td.get_stats_langues(o)
  h = open(os.path.join(o.path_data,'sources.json'),'r').read()
  sources = eval(h)
#  out = 'analyze_events/detected_events'#Fichier sortie : 1 ligne par doc pertinent
  out = 'out.txt'
  wfi = codecs.open(out,'w','utf-8')
  dic_st = {}


  for lg, dict_doc in dict_docs_annot.iteritems() :
    results = {}
    dict_doc_zone = {}
    print lg
    for id_doc, info in dict_doc.iteritems() :
      if o.relevantOnly and not isRelevant(info['annotations']) :
        continue
      path = os.path.join(o.path_corpus,info['path'])
      zones, su = article_zoning(path, pattern_paras, pattern_sentences, o)
      dict_doc_zone[id_doc] = (zones, su)

#    compute_adaptation(dict_doc_zone, dict_diseases[lg], o.length_ratio)
    dict_results = compute_relevantContent(dict_doc_zone, dict_diseases[lg], o.length_ratio)
    list_relevant_id_doc     = dict_results.keys()
    list_id_doc              = dict_doc.keys()
    list_non_relevant_id_doc = diff_list(list_id_doc, list_relevant_id_doc)

    adaptation_relevant     = compute_adaptation_threshold(dict_doc_zone, list_relevant_id_doc, dict_diseases[lg], o.length_ratio)
    adaptation_non_relevant = compute_adaptation_threshold(dict_doc_zone, list_non_relevant_id_doc, dict_diseases[lg], o.length_ratio)
    adaptation_all          = compute_adaptation_threshold(dict_doc_zone, list_id_doc, dict_diseases[lg], o.length_ratio)

    print adaptation_relevant['not_subresource']
    print adaptation_relevant['subresource']
    print adaptation_non_relevant['not_subresource']
    print adaptation_non_relevant['subresource']
    print adaptation_all['not_subresource']
    print adaptation_all['subresource']

#    adaptation_subresource = adapatation['subresource']
    adaptation_threshold = adaptation_all['subresource'][0]
    resources = compute_adapation_resources(dict_doc_zone, list_relevant_id_doc, adaptation_threshold)
    merged_resource = merge_resources(resources)
#    print merged_resource
    for r in merged_resource.iterkeys() :
      print r,'  ',
      if r in adaptation_relevant['dict_repeat_adaptation'] :
        print 'relevant :: ', adaptation_relevant['dict_repeat_adaptation'][r],
      if r in adaptation_non_relevant['dict_repeat_adaptation'] :
        print 'non_relevant :: ',adaptation_non_relevant['dict_repeat_adaptation'][r],
      if r in adaptation_all['dict_repeat_adaptation'] :
        print 'all :: ', adaptation_all['dict_repeat_adaptation'][r],
      print
    print len(merged_resource)
    exit(0)

#    for id_doc, match_diseases in dict_results.iteritems() :
#      list_diseases = [';'.join(m) for m in [l[1] for l in match_diseases]]
#      results.setdefault((id_doc, lg), list_diseases)
#      disease = ' '.join(list_diseases)
#      print>>wfi, id_doc,u'%s'%disease





#      size = getArticleSize(fichier, pattern_paras, pattern_sentences)
#      events = analyser(fichier,dict_diseases[lg], o, pattern_paras, pattern_sentences)

#      if len(events)>0:

  wfi.close()
#  print dic_st

  r = read_results(results, dict_docs_annot)
  pprint.pprint(r)
  R = float(r['tp']) / (r['tp'] + r['fn'])
  P = float(r['tp']) / (r['tp'] + r['fp'])
  print 'R : %s'%(R)
  print 'P : %s'%(P)
  print 'F : %s'%(td.compute_f_mesure(R,P,1))



