from optparse import OptionParser
import codecs
import re
import os

def read_list_arg1(option, opt, value, parser):
  setattr(parser.values, option.dest, value.split(','))

def opt_parser_dimeco():
  parser = OptionParser()

  parser.add_option("-o", "--output_file", dest="fileout", default = "out.js",
                     help="Write report to FILEOUT [default : out.js]", metavar="FILEOUT")

  parser.add_option("-a", "--annotation_file", dest="annotation_file", default = "corpus_daniel/daniel.json", type="string",
                     help="use the annotation file ANNOTATION_FILE [default : ida.json]", metavar="ANNOTATION_FILE")

  parser.add_option("-c", "--path_corpus", dest="path_corpus", default = "corpus_daniel/files",
                     help="use the corpus PATH_CORPUS [default : corpus_ida/", metavar="PATH_CORPUS")
 
  parser.add_option("-d", "--path_data", dest="path_data", default = "data/",
                     help="use the data in PATH_DATA [default : data/]", metavar="PATH_DATA")

  parser.add_option(
    "-l", "--languages", dest= "languages", default = ["cn","en","pl","el","ru"],
    type = "string", action = "callback", callback = read_list_arg1,
    help="analyse the languages LANGUAGES [default : -l pl,el,cn,en,ru]",
    metavar="LANGUAGES")

  parser.add_option("-r", "--length_ratio", dest="length_ratio", default = 0.8, type="float",
                     help="[default : 0.8]", metavar="LENGTH_RATIO")

  parser.add_option("-x", "--length_ratio_loc", dest="length_ratio_loc", default = 0.8, type="float",
                     help="[default : 0.8]", metavar="LENGTH_RATIO_LOC")
  parser.add_option("-n", "--nbr_caract", dest="nbr_caract", default = 0, type="int",
                     help="[default : 0]", metavar="NBR_CARACT")

  parser.add_option("-p", "--petits_articles", dest="pet_art", default = 5, type="int",
                     help="[default : 5]", metavar="PET_ART")

  parser.add_option("-g", "--grands_articles", dest="grd_art", default = 6, type="int",
                     help="[default : [-p]+6]", metavar="GRD_ART")

  parser.add_option("-m", "--marge", dest="marge", default = 2, type="int",
                     help="marge for headline", metavar="MARGE")
  parser.add_option('--relevantOnly', dest='relevantOnly', default=False, action='store_true',
                    help="select only relevant document  [default : False]")

  return parser
#Tools general
def get_titre(path):
  f = open(path,'r')
  content = f.read()
  f.close()
  content = re.sub('\n','',content)
  liste_paras = re.findall('<p>(.*?)</p>',content)
  titre = re.sub('\'',' ',liste_paras[0])
  return titre

def write_file(path,content):
  f = codecs.open(path,'w','utf-8')
  f.write(content)
  f.close()

def canonnical(terme,ref):
  if terme not in ref:
    return 'Unknown'
  couple = ref[terme]
  if couple[1]=='':
    return couple[0]
  return couple[1]

# Tools extract knowledge
def get_stats_langues(o):
  stats_langue = {}
  dict_diseases = {}
  dict_locations = {}
  for lg in o.languages:
    stats_langue[lg] = {'TP':0,'TN':0,'FP':0,'FN':0,'TPLI':0,'TPLE':0,'TLI':0,'TLE':0,'paires_TP':0,'paires_FP':0,'paires_FN':0,'paires_TN':0,'Good_loc':0,'tot_loc':0}
    path_disease = os.path.join(o.path_data,'maladies_%s.json'%lg)
    path_location = os.path.join(o.path_data,'dico_pays_%s.json'%lg)
    f = open(path_disease,'r').read()
    dict_diseases[lg] = eval(f)
    g = open(path_location,'r').read()
    dict_locations[lg] = eval(g)
  dic_results={'TP':0,'TN':0,'FP':0,'FN':0,'TPLI':0,'TPLE':0,'TLI':0,'TLE':0,'good_loc':0,'tot_loc':0}
  return dic_results,stats_langue,dict_diseases,dict_locations


  
def decoder2(path_annotation):
  f = open(path_annotation,'r')
  ofi = f.readlines()
  liste_docs = {}
  liste_annots = {}
  for ligne in ofi :
    parts = re.sub('"|\n','',ligne)
    parts = re.split(';',parts)
    parts = [x for x in parts if x!='']
    if len(parts)==3 :
      id_doc = re.sub('"','',parts[0])
      liste_docs[id_doc] = parts
    elif len(parts)==4 :
      id_doc = re.sub('\n','',parts[3])
      liste_annots[id_doc] = parts
  f.close()
  return liste_docs, liste_annots

def decoder(path_annotation, list_lg) :
  f = open(path_annotation,'r')
  s = f.read()
  d = eval(s)
  f.close()
  d_res = {}
  for lg in list_lg :
    d_res[lg] = {}
  for id_doc, info in d.iteritems() :
    dinfo = info
    lg = dinfo['language']
    if 'multi' in list_lg:
      d_res['multi'][id_doc] = dinfo 
    if lg not in list_lg :
      continue
    d_res[lg][id_doc] = dinfo
  return d_res


def evaluer_localisation(loc,ref_loc,implicit_location,dic_results):
  good = 0
  good_loc = False
  if ref_loc =='worldwide':
    return dic_results
  dic_results['tot_loc']+=1
  if loc.lower() == ref_loc.lower():
    good = 1
    dic_results['good_loc']+=1
  if implicit_location == True:
    dic_results['TLI']+=1
    dic_results['TPLI']+=good
  else:
    dic_results['TLE']+=1
    dic_results['TPLE']+=good
  if good==1:
    good_loc =True
  return dic_results,good_loc

def get_stats(dic_results):
  TP = dic_results['TP']
  TN = dic_results['TN']
  FP = dic_results['FP']
  FN = dic_results['FN']
  TPLI = dic_results['TPLI']
  TPLE = dic_results['TPLE']
  TLI = dic_results['TLI']
  TLE = dic_results['TLE']
  totloc = dic_results['tot_loc']
  goodloc = dic_results['good_loc']
  s = TP + TN + FP + FN
  if totloc!=0:
    localisation = float(goodloc)/totloc
  else:
    localisation = 0.
  if TP == 0 :
    recall = 0.
    precision = 0.
    f_measure = 0.
    f1_measure = 0.
    f2_measure = 0.
    f05_measure = 0.
  else :
    recall = float(TP) / (TP+FN)
    precision = float(TP) / (TP+FP)
    f1_measure = (2*recall*precision)/(recall+precision)
    f2_measure = (5*recall*precision)/(recall+4*precision)
    f05_measure = (1.25*recall*precision)/(recall+0.25*precision)
  if TLI!=0:
    loc_imp = float(TPLI)/TLI
  else:
    loc_imp = 'na'
  if TLE!=0:
    loc_exp = float(TPLE)/TLE
  else:
    loc_exp = 'na'
  d = {
    'rappel' : recall,
    'precision' : precision,
    'F1-measure': f1_measure,
    'F2-measure': f2_measure,
    'F05-measure': f05_measure,
    'Implicit location':loc_imp,
    'Explicit location':loc_exp,
#    'pertinence' : float(TP+TN) / s,
    'TPLI, Total Loc_imp': (TPLI,TLI),
    'TPLE, Total Loc_exp': (TPLE,TLE),
    'Localisation':localisation
#    'erreur' : float(FP+FN) / s,
#    'taux_de_chute' : float(FP) / (FP+TN),
#    'specificite' : float(TN) / (FP+TN),
#    'overlap' : float(TP) / (s-TN),
#    'generalite' : float(TP) / s
  }
  return d

def evaluation_date(dico_dates,paire_cherchee,paire_trouvee,date,lg):
  if paire_cherchee!=('N','N'):
    if paire_cherchee not in dico_dates:
      dico_dates[paire_cherchee] = {'manuel':[],'daniel':[]}
    dico_dates[paire_cherchee]['manuel'].append((date,lg))
  if paire_trouvee!=['N','N']:
    if paire_trouvee not in dico_dates:
      dico_dates[paire_trouvee] = {'manuel':[],'daniel':[]}
    dico_dates[paire_trouvee]['daniel'].append((date,lg))
  return dico_dates

def decoder_paires(dic_paires):
  vp = 0
  fp = 0
  fn = 0
  world = 0
  stats_FN = {'Langues':{},'Multi':0,'Docs':{}}
#Faire la meme avec TP et FP->fonction externe stats
  for pt,infos in dic_paires['Trouvees'].iteritems():
    if pt in dic_paires['Cherchees']:
      vp+=1
    else:
      fp+=1
  for pc,infos in dic_paires['Cherchees'].iteritems():
    nbr_doc,list_docs,langues = infos
    if pc not in dic_paires['Trouvees']:
      fn+=1
      if pc[1]=='Worldwide':
        world+=1
      set_lg = set(langues)
      if len(set_lg)>1:#Echec du multi
        stats_FN['Multi']+=1
      else:
        for lg in set_lg:
          if lg not in stats_FN['Langues']:#factoriser
            stats_FN['Langues'][lg]=0
          stats_FN['Langues'][lg]+=1
      if nbr_doc not in stats_FN['Docs']:
        stats_FN['Docs'][nbr_doc]=0
      stats_FN['Docs'][nbr_doc]+=1
  if vp+fn>0:
    rappel= float(vp)/(vp+fn)
  else:
    rappel = 0
  if vp!=0:
    rappel_local = float(vp)/(vp+fn-world)
    precision = float(vp)/(vp+fp)
  else:
    rappel_local = 0
    precision = 0
  try:
    f1_measure = (2*rappel_local*precision)/(rappel_local+precision)
  except:
    f1_measure = 0
  dic = {'rappel':rappel_local,'precision':precision,'F1-measure':f1_measure}
  print 'FN', stats_FN
  return dic

def evenement(paire,dic_paires,dict_diseases,dict_locations,fichier,langue):#coder la paire cherchee 
  try:
    dis_cherch_canonn = canonnical(paire[0],dict_diseases)
  except:
    print 'M:',paire[0]
    dis_cherch_canonn = 'Unknown dis'
  try:
    loc_cherch_canonn = canonnical(paire[1],dict_locations)
  except:
    print 'L:',paire[1]
    loc_cherch_canonn = 'Unknown loc'
  paire_cherchee = (dis_cherch_canonn,loc_cherch_canonn)
#  paires_cherchees.append(paire_cherchee)
  if paire_cherchee not in dic_paires['Cherchees']:
     dic_paires['Cherchees'][paire_cherchee]=[0,[],[]]
  dic_paires['Cherchees'][paire_cherchee][0]+=1
  dic_paires['Cherchees'][paire_cherchee][1].append(fichier)
  dic_paires['Cherchees'][paire_cherchee][2].append(langue)
  return paire_cherchee,dic_paires

# Tools output
def  creer_bdd_sql(dic_daniel_bdd):
  cpt_doc = 0
  cpt_dis = 0
  cpt_loc = 0
  cpt_annot = 0
  dico = {'annotations':{},'documents':{},'diseases':{},'places':{},'results':{}}
  for id_doc,data in dic_daniel_bdd.iteritems():
    cpt_doc+=1#id_doc
    infos = data[0]
    fichier  = data[3]
    langue = infos['langue']
    extr_d,extr_l,extr_c = data[1]#triplet extrait
    res_d,res_l,res_p,res_f = data[2]#resultats de l'extraction
    dico['results'][cpt_doc] = (cpt_doc,str(res_d),str(res_l),str(res_p),res_f)
    path = infos['path']
    date = infos['date_collecte']
    url = infos['url']
    langue = infos['langue']
    source = re.split('_',infos['path'])[1]
    title = get_titre(fichier)
    if extr_d not in dico['diseases']:
      cpt_dis+=1
      dico['diseases'][extr_d] = (cpt_dis,extr_d,'en')
    if extr_l not in dico['places']:
      cpt_loc+=1
      dico['places'][extr_l] = (cpt_dis,extr_l,'en')
    id_dis = dico['diseases'][extr_d][0]
    id_loc = dico['places'][extr_l][0]
    infos_doc = (cpt_doc,path,date,extr_c,source,url,title,langue,id_dis,id_loc)
    dico['documents'][cpt_doc] = infos_doc
    if len(infos['annotations'])>0:
      for a in infos['annotations']:
        cpt_annot+=1
        disease,location,cases = a
        comm = infos['comment']
        id_user = 1
        annot_bdd = (cpt_annot,date,disease,location,cases,comm,cpt_doc,id_user)
        dico['annotations'][cpt_annot] = annot_bdd
        if disease not in dico['diseases']:
          cpt_dis+=1
          dico['diseases'][disease] = (cpt_dis,disease,langue)
        if location not in dico['places']:
          cpt_loc+=1
          dico['places'][location] = (cpt_loc,location,langue)
  #Genere un fichier sortie_daniel et un fichier daniel_results (pour BDD)
  f = open('tools/dico_bdd_sql','r').read()
  dico_insert_bdd = eval(f)
  out = ''
  for typ,elements in dico.iteritems():
   out+=dico_insert_bdd[typ]+'\n'
   c = 0
   for id_elem,infos in elements.iteritems():
     if c>0:
       out+=',\n'
     c+=1
     sortie = '('
     cpt = 0
     for elem in infos:
       if type(elem) == str:
         sortie+="'"+unicode(elem,'utf-8','replace')+"'"
       else:
         sortie+=str(elem)
       if cpt<len(infos)-1:
         sortie+=','
       cpt+=1
     sortie+=')'
     out+=sortie
   out+=';\n' 
  wfi = codecs.open('tools/out_daniel_bdd.sql','w',encoding='utf-8')
  print>>wfi,out
  wfi.close() 

def colorier_document(fichier,events,location,implicit_location,loc):
  path_dir = 'docs_colories/'
  content = codecs.open(fichier,'r','utf-8').read()
  content = content.encode('utf-8','replace')
  str_disease = events[0][2].encode('utf-8','replace')
  c = ['<span class="disease">','</span>']
  d = ['<span class="place">','</span>']
  motif = c[0] + str_disease + c[1]
  content = re.sub(str_disease,motif,content)
  len_content = len(content)
  if implicit_location == True:
    motif = '<body>\n<p>Implicit Location : '+d[0]+loc+d[1]+'</p>'
    content = re.sub('<body>',motif,content)
  else:
   if location!=False:
    str_location = location[0][2].encode('utf-8','replace')
    motif = d[0]+str_location+d[1]
    content = re.sub(str_location, motif,content)
    if len(content)==len_content:
      print fichier,b/0
  wfi =open(path_dir+fichier,'w')
  print>>wfi,content
  wfi.close()

def extract_results_expes(o,s,dic):
  path_dir = 'expes/'
  lg = ",".join(o.languages)
  lt = o.length_ratio
  ll = o.length_ratio_loc
  r = s['rappel']
  p = s['precision']
  f = s['F1-measure']
  rp = dic['rappel']
  pp = dic['precision']
  fp = dic['F1-measure']
  I = s['Implicit location']
  E = s['Explicit location']
  L = s['Localisation']
  str_0 = "%s_%s_%s_%s_%s.res"%(lg,lt,r,p,f)
  str_1 = "%s_%s_%s_%s_%s.res"%(lg,ll,I,E,L)
  str_2 = "%s_%s_%s_%s_%s_%s.res"%(lg,lt,ll,rp,pp,fp)
  a = ['expes_filter/','expes_loc/','expes_paires/']
  l_sorties = [a[0]+str_1,a[1]+str_2,a[2]+str_2]
  for elem in l_sorties:
    print elem
    write_file(path_dir+elem,"")
def compute_f_mesure(recall,precision,beta):
  b= beta*beta
  num = (1+b)*recall*precision
  den = recall+b*precision
  return float(num)/den
