# T-Student Function
def getTD(n):
    tD = {1:12.706,2:4.303,3:3.182,4:2.776,5:2.571,6:2.447,7:2.365,8:2.306,9:2.262,10:2.228,11:2.201,12:2.179,13:2.160,14:2.145,15:2.131,16:2.120,17:2.110,18:2.101,19:2.093,20:2.086,21:2.080,22:2.074,23:2.069,24:2.064,25:2.060,26:2.056,27:2.052,28:2.048,29:2.045,30:2.042 }
    tdBig = {40:2.021,60:2.000,120:1.980,1e5:1.960}
    if n in tD:
        return tD[n]
    else:
        for l,v in tdBig.iteritems():
            if n < l:
                return v
        return tdBig[1e5]

def do_error_bar(x,e,lw=2,w=2):
    o = plot([x,x],[m+e,m-e],color='k',lw=lw)
    o = plot([x-w,x+w],[m+e,m+e],color='k',lw=lw)
    o = plot([x-w,x+w],[m-e,m-e],color='k',lw=lw)

def getData(dbname):
	# READ THE REPETEANCE OF AUTHORS AND CONCEPTS PER TEAMS IN HYPERGRAPH
	f = open("hypergraph/outputs/"+dbname+'-hypergraphcool.txt','r')
	socialrepeat = {}
	conceptrepeat = {}
	oldguysprop = {}
	oldconceptsprop = {}
	talla = {}
	tallaC = {}
	c = 0
	for line in f:
	    if c != 0:
		data = line.split("\t")
		if data[1] == "None":
		    v1 = 0
		else:
		    v1 =float(data[1])
		if data[2] == "None":
		    v2 = 0
		else:
		    v2 = float(data[2])
		if float(data[4]) != 0.0:
		    oldconceptsprop[int(data[0])] = float(float(data[6])/float(data[4]))
		else:
		    oldconceptsprop[int(data[0])] = 0
		tallaC[int(data[0])] = float(data[4])
		oldguysprop[int(data[0])] = float(float(data[5])/float(data[3]))
		
		talla[int(data[0])] = float(data[5])
		socialrepeat[int(data[0])] = v1
		conceptrepeat[int(data[0])] = v2
	    c += 1
	# CONNECT TO DATABASE
	myconnection = "dbname='" + dbname + "' user='ecos' host='proteine.local' password='ecos'"
	conn = psycopg2.connect(myconnection)
	cur = conn.cursor()

	#*******************************************************************************************************************************************
	#                            RELEVANCY OF AUTHORS
	#*******************************************************************************************************************************************

	# READ THE RELEVANCY LOG (GRAPH LOG) FILE OF AUTHORS
	authors = loadtxt("graph/relevance_files/" + dbname + "-log-relevance_author.txt",'<int,float>',usecols=(0,4))
	authorsd = {}
	for i,v in authors.tolist():
	    authorsd[i] = v
	maxirelv = 1#max(authorsd.values())



	# OBTAIN THE AUTHORS FOR TEAMS
	sql = """select id_publication, id_author from pub_auth, publication where id=id_publication and abstract <> '$$$' order by id_publication"""
	cur.execute(sql)
	table = []
	cur.execute(sql)
	table.append(cur.fetchall())

	# CREATE DATA STRUCTURE OF AUTHORS RELEVANCY
	teams = {} # stores relevancy for each author in the team
	teamrelv = {} # stores the mean of the relevancy for each team
	for rows in table:
	    for cell in rows:
		if cell[0] not in teams:
		    teams[cell[0]] = []
		    teams[cell[0]].append(cell[1])
		else:
		    teams[cell[0]].append(cell[1])

	# OBTAIN MEAN RELEVANCY OF AUTHORS BY TEAM
	for pid in teams:
	    relvt = 0.0
	    for aid in teams[pid]:
		relvt += authorsd[aid]/maxirelv
	    relvt = float(relvt/float(len(teams[pid])))
	    teamrelv[pid] = relvt
	    
	#*******************************************************************************************************************************************
	#                            RELEVANCY OF CONCEPTS
	#*******************************************************************************************************************************************

	# READ THE RELEVANCY LOG (GRAPH LOG) FILE OF CONCEPTS
	concept = loadtxt("graph/relevance_files/" + dbname + "-log-relevance_concept.txt",'<int,float>',usecols=(0,4))
	conceptd = {}
	for i,v in concept.tolist():
	    conceptd[i] = v
	maxirelvC = 1#max(conceptd.values())
	#print "CONCEPTS"
	#print concept
	# OBTAIN THE CONCEPTS FOR TEAMS
	sql = """select id_publication, id_concept from pub_concept, publication where id=id_publication and abstract <> '$$$' order by id_publication"""
	cur.execute(sql)
	table = []
	cur.execute(sql)
	table.append(cur.fetchall())

	# CREATE DATA STRUCTURE OF CONCEPTS RELEVANCY
	teams = {} # stores relevancy for each author in the team
	teamrelvC = {} # stores the mean of the relevancy for each team
	for rows in table:
	    for cell in rows:
		if cell[0] not in teams:
		    teams[cell[0]] = []
		    teams[cell[0]].append(cell[1])
		else:
		    teams[cell[0]].append(cell[1])

	# OBTAIN MEAN RELEVANCY OF CONCEPTS BY TEAM
	for pid in teams:
	    relvt = 0.0
	    for cid in teams[pid]:
		relvt += conceptd[cid]/maxirelv
	    relvt = float(relvt/float(len(teams[pid])))
	    teamrelvC[pid] = relvt


	#*******************************************************************************************************************************************
	#                            COMPOSITION OF DATA IN ARRAYS
	#******************************************************************************************************************************************
	
	faile = open("data/"+dname+"data_compile.txt","w")
	faile.writeline("id\tr(A)\tr(C)\tcard(A)\tcard(C)\tcard(oldA)\tcard(oldC)\trelv(A)\trelv(C)")
	for i,k in teamrelv.iteritems():
	    salida = str(i)+"\t"
	    salida += str(socialrepeat[i])+"\t"
	    salida += str(conceptrepeat[i])+"\t"
	    salida += str(talla[i])+"\t"
	    salida += str(tallaC[i])+"\t"
	    salida += str(oldguysprop[i])+"\t"
	    salida += str(oldconceptsprop[i])+"\t"
	    salida += str(k)+"\t"
	    salida += str(teamrelvC[i])+"\t"


	cur.close()
	conn.close()
	return buffe

def cleanArray(buffe):
	#*************************************************
	# CLEANING ARRAY                                **
	#*************************************************
	buffaux = []
	for k in buffe:
	    if k[0] != -1 and k[1] != -1:# and k[0] != 0 and k[1] != 0:
		buffaux.append([k[0],k[1]])
	return buffaux

def binnize(bintype, buffe, x,y, nb):
	#*************************************************
	# LIMIT OF BINS (BINS WITH FIXED SIZE OF ELEMENTS)
	#*************************************************
	binnizedDataW = {}
	binnizedDataZ = {}
	if bintype == 's':
	    binlimits = []
	    pob = []
	    binlimits.append(x[0])
	    siz = len(buffe)
	    offset = int(siz/nb)
	    for cycle in range(nb):
		binlimits.append(x[cycle*offset])
		pob.append(offset)
	    pob[nb-1] += siz - nb*offset
	    binlimits.append(x[len(x)-1])

	    b= binlimits
	else:
	    pob,b = matplotlib.numpy.histogram(x,bins=nb)
	h0=0
	i = 0
	for h in pob:
	    if h != 0: # It doesn't take 0 in account
		binnizedDataW[i] = []
		binnizedDataZ[i] = []
		for l in range(h0,h+h0):
		    binnizedDataW[i].append(x[l])
		    binnizedDataZ[i].append(y[l])
		h0 += h
	    i += 1

	return [binnizedDataW, binnizedDataZ,pob]

def invertArray(buffe):
	return [[buffe[i][1],buffe[i][0]] for i in range(0,len(buffe))]

def readData(dbname,cols, filepost):
	c1 = cols[0]
	c2 = cols[1]
	faile = open("data/"+dbname+filepost,"r")
	i = 0
	results = []
	headers = []
	for line in faile:
		data = line.replace("\n","").split("\t")
		if i == 0:
			headers = [data[c1], data[c2]]
		else:
			results.append([float(data[c1]),float(data[c2])])
		i+=1
	return headers,results

def show_headers(dbname, filepost):
	faile = open("data/"+dbname+filepost,"r")
	line = faile.readline()
	data = line.replace("\n","").split("\t")
	for i in range(len(data)):
		print str(i) +"- "+ data[i]


