#!/usr/bin/python
'''
*******************************************************************************************************************************************************************
Analysis Graphics Maker Script
Version: 2.0
Author: Victor Codocedo
Description: This script calculates the main analytic tools from the datasets given. It comprises all datasets (db, plain text files, etc).
The option of execution are shown by simply executing the script without parameters.

*******************************************************************************************************************************************************************
'''
import sys
import psycopg2
import random
import matplotlib
from pylab import *
from numpy import loadtxt
from numpy import amax
from numpy import mean
from numpy import array
from numpy import std

# T-Student Function
def getTD(n):
    tD = {1:12.706,2:4.303,3:3.182,4:2.776,5:2.571,6:2.447,7:2.365,8:2.306,9:2.262,10:2.228,11:2.201,12:2.179,13:2.160,14:2.145,15:2.131,16:2.120,17:2.110,18:2.101,19:2.093,20:2.086,21:2.080,22:2.074,23:2.069,24:2.064,25:2.060,26:2.056,27:2.052,28:2.048,29:2.045,30:2.042 }
    tdBig = {40:2.021,60:2.000,120:1.980,1e5:1.960}
    if n in tD:
        return tD[n]
    else:
        for l,v in tdBig.iteritems():
            if n < l:
                return v
        return tdBig[1e5]

def do_error_bar(x,e,lw=2,w=2):
    o = plot([x,x],[m+e,m-e],color='k',lw=lw)
    o = plot([x-w,x+w],[m+e,m+e],color='k',lw=lw)
    o = plot([x-w,x+w],[m-e,m-e],color='k',lw=lw)

def getData(dbname):
	# READ THE REPETEANCE OF AUTHORS AND CONCEPTS PER TEAMS IN HYPERGRAPH
	f = open("hypergraph/outputs/"+dbname+'-hypergraphcool.txt','r')
	socialrepeat = {}
	conceptrepeat = {}
	oldguysprop = {}
	oldconceptsprop = {}
	talla = {}
	tallaC = {}
	c = 0
	for line in f:
	    if c != 0:
		data = line.split("\t")
		if data[1] == "None":
		    v1 = 0
		else:
		    v1 =float(data[1])
		if data[2] == "None":
		    v2 = 0
		else:
		    v2 = float(data[2])
		if float(data[4]) != 0.0:
		    oldconceptsprop[int(data[0])] = float(float(data[6])/float(data[4]))
		else:
		    oldconceptsprop[int(data[0])] = 0
		tallaC[int(data[0])] = float(data[4])
		oldguysprop[int(data[0])] = float(float(data[5])/float(data[3]))
		
		talla[int(data[0])] = float(data[5])
		socialrepeat[int(data[0])] = v1
		conceptrepeat[int(data[0])] = v2
	    c += 1
	# CONNECT TO DATABASE
	myconnection = "dbname='" + dbname + "' user='ecos' host='proteine.local' password='ecos'"
	conn = psycopg2.connect(myconnection)
	cur = conn.cursor()

	#*******************************************************************************************************************************************
	#                            RELEVANCY OF AUTHORS
	#*******************************************************************************************************************************************

	# READ THE RELEVANCY LOG (GRAPH LOG) FILE OF AUTHORS
	authors = loadtxt("graph/relevance_files/" + dbname + "-log-relevance_author.txt",'<int,float>',usecols=(0,4))
	authorsd = {}
	for i,v in authors.tolist():
	    authorsd[i] = v
	maxirelv = 1#max(authorsd.values())



	# OBTAIN THE AUTHORS FOR TEAMS
	sql = """select id_publication, id_author from pub_auth, publication where id=id_publication and abstract <> '$$$' order by id_publication"""
	cur.execute(sql)
	table = []
	cur.execute(sql)
	table.append(cur.fetchall())

	# CREATE DATA STRUCTURE OF AUTHORS RELEVANCY
	teams = {} # stores relevancy for each author in the team
	teamrelv = {} # stores the mean of the relevancy for each team
	for rows in table:
	    for cell in rows:
		if cell[0] not in teams:
		    teams[cell[0]] = []
		    teams[cell[0]].append(cell[1])
		else:
		    teams[cell[0]].append(cell[1])

	# OBTAIN MEAN RELEVANCY OF AUTHORS BY TEAM
	for pid in teams:
	    relvt = 0.0
	    for aid in teams[pid]:
		relvt += authorsd[aid]/maxirelv
	    relvt = float(relvt/float(len(teams[pid])))
	    teamrelv[pid] = relvt
	    
	#*******************************************************************************************************************************************
	#                            RELEVANCY OF CONCEPTS
	#*******************************************************************************************************************************************

	# READ THE RELEVANCY LOG (GRAPH LOG) FILE OF CONCEPTS
	concept = loadtxt("graph/relevance_files/" + dbname + "-log-relevance_concept.txt",'<int,float>',usecols=(0,4))
	conceptd = {}
	for i,v in concept.tolist():
	    conceptd[i] = v
	maxirelvC = 1#max(conceptd.values())
	#print "CONCEPTS"
	#print concept
	# OBTAIN THE CONCEPTS FOR TEAMS
	sql = """select id_publication, id_concept from pub_concept, publication where id=id_publication and abstract <> '$$$' order by id_publication"""
	cur.execute(sql)
	table = []
	cur.execute(sql)
	table.append(cur.fetchall())

	# CREATE DATA STRUCTURE OF CONCEPTS RELEVANCY
	teams = {} # stores relevancy for each author in the team
	teamrelvC = {} # stores the mean of the relevancy for each team
	for rows in table:
	    for cell in rows:
		if cell[0] not in teams:
		    teams[cell[0]] = []
		    teams[cell[0]].append(cell[1])
		else:
		    teams[cell[0]].append(cell[1])

	# OBTAIN MEAN RELEVANCY OF CONCEPTS BY TEAM
	for pid in teams:
	    relvt = 0.0
	    for cid in teams[pid]:
		relvt += conceptd[cid]/maxirelv
	    relvt = float(relvt/float(len(teams[pid])))
	    teamrelvC[pid] = relvt


	#*******************************************************************************************************************************************
	#                            COMPOSITION OF DATA IN ARRAYS
	#******************************************************************************************************************************************
	
	faile = open("data/"+dname+"data_compile.txt","w")
	faile.writeline("id\tr(A)\tr(C)\tcard(A)\tcard(C)\tcard(oldA)\tcard(oldC)\trelv(A)\trelv(C)")
	for i,k in teamrelv.iteritems():
	    salida = str(i)+"\t"
	    salida += str(socialrepeat[i])+"\t"
	    salida += str(conceptrepeat[i])+"\t"
	    salida += str(talla[i])+"\t"
	    salida += str(tallaC[i])+"\t"
	    salida += str(oldguysprop[i])+"\t"
	    salida += str(oldconceptsprop[i])+"\t"
	    salida += str(k)+"\t"
	    salida += str(teamrelvC[i])+"\t"


	cur.close()
	conn.close()
	return buffe

def cleanArray(buffe):
	#*************************************************
	# CLEANING ARRAY                                **
	#*************************************************
	buffaux = []
	for k in buffe:
	    if k[0] != -1 and k[1] != -1:# and k[0] != 0 and k[1] != 0:
		buffaux.append([k[0],k[1]])
	return buffaux

def binnize(bintype, buffe, x,y, nb):
	#*************************************************
	# LIMIT OF BINS (BINS WITH FIXED SIZE OF ELEMENTS)
	#*************************************************
	binnizedDataW = {}
	binnizedDataZ = {}
	if bintype == 's':
	    binlimits = []
	    pob = []
	    binlimits.append(x[0])
	    siz = len(buffe)
	    offset = int(siz/nb)
	    for cycle in range(nb):
		binlimits.append(x[cycle*offset])
		pob.append(offset)
	    pob[nb-1] += siz - nb*offset
	    binlimits.append(x[len(x)-1])

	    b= binlimits
	else:
	    pob,b = matplotlib.numpy.histogram(x,bins=nb)
	h0=0
	i = 0
	for h in pob:
	    if h != 0:
		binnizedDataW[i] = []
		binnizedDataZ[i] = []
		for l in range(h0,h+h0):
		    binnizedDataW[i].append(x[l])
		    binnizedDataZ[i].append(y[l])
		h0 += h
	    i += 1

	'''print "DATOS ANALIZADOS: " + str(h0) + "/" + str(len(buffe))
	print "DATOS PER BIN: "  + str(pob)
	print "BIN LIMITS: " + str(b)
	'''
	#print "*************************************************************"
	'''f = open("log/" + dbname + "-analysis-log.txt","w")
	for i in range(len(binnizedDataW)):
	    if i in binnizedDataW:
		f.write(str(binnizedDataW[i]) + "\n")
	f.write("---------------------------------------------------------------------------\n")
	for i in range(len(binnizedDataW)):
	    if i in binnizedDataZ:
		f.write(str(binnizedDataZ[i]) + "\n")
	'''
	return [binnizedDataW, binnizedDataZ,pob]

def invertArray(buffe):
	return [[buffe[i][1],buffe[i][0]] for i in range(0,len(buffe))]


def readData(dbname,cols):
	c1 = cols[0]
	c2 = cols[1]
	faile = open("data/"+dbname+"_data_compile.txt","r")
	i = 0
	results = []
	headers = []
	for line in faile:
		data = line.replace("\n","").split("\t")
		if i == 0:
			headers = [data[c1], data[c2]]
		else:
			results.append([float(data[c1]),float(data[c2])])
		i+=1
	return headers,results

def show_headers(dbname):
	faile = open("data/"+dbname+"_data_compile.txt","r")
	line = faile.readline()
	data = line.replace("\n","").split("\t")
	for i in range(len(data)):
		print str(i) +"- "+ data[i]


'''**************************************************************
****************************************************************
****************************************************************
'''
markersize = 10
#dbs = ["10","15","20"]
dbs = ["0.45","0.47","0.49"]
labels = {"20":"K=20",
	"10":"K=10",
	"15":"K=15",
	"0.45":"alpha=0.45",
	"0.47":"alpha=0.47",
	"0.49":"alpha=0.49"
        }
colors = {"20":"red",
	"10":"black",
	"15":"green",
	"0.45":"blue",
        "0.47":"red",
        "0.49":"green"}
marker = {"20":"s",
	"10":"o",
	"15":"^",
	"0.45":"s",
        "0.47":"o",
        "0.49":"^"
}
if sys.argv[1] == "show":
	show_headers(dbs[0])
	exit()	
if len(sys.argv)<3:
    print "\n*** usage:",sys.argv[0]," [number of bins] [a,b] OPTIONAL: [bt=s|r :(s)=size,r=range] [style=[st - see below]] [normx] [normy] [save=filename.eps]"
    print "st options:"
    print "(1) - Error bar"
    print "2 - Log Y"
    print "3 - Log X"
    print "4 - Log Log"
    exit()

nb = int(sys.argv[1])
aux = sys.argv[2].split(",")
cols = []
for i in aux:
	cols.append(int(i))
#cols = [int(sys.argv[2][0]),int(sys.argv[2][2])]

bintype = 's'
save = False
style = 1
normalizex = False
normalizey = False
scalx = False
scaly = False
offsetyp = 0.0
offsetyn = 0.0
offsetxp = 0.0
offsetxn = 0.0
if len(sys.argv) > 3:
	for i in range(3,len(sys.argv)):
		param = sys.argv[i].split("=")
		if len(param) != 2:
			if param[0] == "normx":
				normalizex = True
			elif param[0] == "normy":
				normalizey = True
			elif param[0] == "scalex":
				scalx = True
			elif param[0] == "scaley":
				scaly = True
			else:
				print "Parameter "+ str(param) + "unknown"
		else:
			if param[0] == "bt" and param[1] == "r":
				bintype = 'r'
			if param[0] == "save":
			    save = True
			    filename = param[1]
			if param[0] == "offsetxp":
				offsetxp=float(param[1])
			if param[0] == "offsetxn":
				offsetxn=float(param[1])
			if param[0] == "offsetyp":
				offsetyp=float(param[1])
			if param[0] == "offsetyn":
				offsetyn=float(param[1])
			if param[0] == "style":
				style = int(param[1])
				if style > 4 or style < 1:
					style = 1
print bintype	
fig = plt.figure(figsize=[9,9])
ax1 = fig.add_subplot(111)


print "\n"
for db in dbs:
	headers, buffe = readData(db,cols)
	ttl = headers[0] + " vs " + headers[1]
	buffe = cleanArray(buffe)
	
	if len(buffe) == 0:
		print "Can't generate graph for " + labels[db]
	elif len(buffe) > 0:
		print "Creating graphic for " + labels[db]
		#*************************************************
		#SORTING ARRAY                           *********
		#*************************************************
		buffe.sort()
		maxx = 1
		maxy = 1	
		if normalizex:
			print "--> Normalizing X"
			maxx = max([ buffe[i][0] for i in range(0,len(buffe))])
		if normalizey:
			print "--> Normalizing Y"			
			maxy = max([ buffe[i][1] for i in range(0,len(buffe))])
		x = [ buffe[i][0]/maxx for i in range(0,len(buffe))]
		y = [ buffe[i][1]/maxy for i in range(0,len(buffe))]

		#*************************************************
		# LIMIT OF BINS (BINS WITH FIXED SIZE OF ELEMENTS)
		#*************************************************
		binres = binnize(bintype, buffe, x,y, nb)

		#*************************************************
		# CONFIDENCE INTERVALS            ****************
		#*************************************************
		w = {} # Valores de x por bin
		z = {} # Valores de y por bin
		werr = {} # CI de W
		zerr = {} # CI de Z

		i = 0
		for h in binres[2]:
		    if h != 0:
			w[i] = mean(binres[0][i])
			z[i] = mean(binres[1][i])
			werr[i] = float(std(binres[0][i])/sqrt(h))*getTD(h)
			zerr[i] = float(std(binres[1][i])/sqrt(h))*getTD(h)
		    i += 1
		i = 0

		#*************************************************
		# PLOTEO DE LOS VALORES PROMEDIO POR BIN *********
		#*************************************************

		#ax1.plot(w.values(),z.values(),'bo')
		if style == 1:
			ax1.errorbar(w.values(),z.values(),yerr=zerr.values(),marker=marker[db],ms=markersize,ls='--',color=colors[db],label=labels[db])
		elif style == 2:
			ax1.errorbar(w.values(),z.values(),yerr=zerr.values(),marker=marker[db],ms=markersize,ls='--',color=colors[db],label=labels[db])
			ax1.set_yscale('log')
		elif style == 3:	
			ax1.errorbar(w.values(),z.values(),yerr=zerr.values(),marker=marker[db],ms=markersize,ls='--',color=colors[db],label=labels[db])
			ax1.set_xscale('log')
		elif style == 4:
			ax1.errorbar(w.values(),z.values(),yerr=zerr.values(),marker=marker[db],ms=markersize,ls='--',color=colors[db],label=labels[db])
			ax1.set_xscale('log')
			ax1.set_yscale('log')

#

ax1.relim()
if scalx or scaly:
	ax1.autoscale_view(tight=True,scalex = scalx, scaley=scaly)
	ymin,ymax = plt.ylim()
	xmin,xmax = plt.xlim()
	ymin = float(str(ymin)[0:4]) - offsetyn
	ymax = float(str(ymax)[0:4]) + offsetyp
	xmin = float(str(xmin)[0:4]) - offsetxn
	xmax = float(str(xmax)[0:4]) + offsetxp
	stepy = (ymax-ymin)/4
	stepx = (xmax-xmin)/6
	stepx = float(str(stepx)[0:3])
	stepy = float(str(stepy)[0:3])
	plt.yticks( arange(ymin,ymax,stepy) ,arange(ymin,ymax,stepy) )
	plt.xticks( arange(xmin,xmax,stepx) , arange(xmin,xmax,stepx) )

ax1.grid(True)
ax1.legend(loc="best")
xlabel(headers[0])
ylabel(headers[1])

#title(ttl + " - " + str(nb) + " bins")

if save == True:
    savefig("figures/"+ttl.replace("/","%").replace(" ","_") + "-"+str(nb)+"bins"+"-"+bintype+"-"+filename)
else:
    plt.show()
