
import pandas as pd
import pandas.rpy.common as comm
from rpy2.robjects.packages import importr
import rpy2.robjects as robjects
import numpy as np


from itertools import groupby
import StringIO
import re
import sys


from htstool.models import *


ALPHA_16=list('ABCDEFGHIJKLMNOP')
ALPHA_8=list('ABCDEFGH')
BARCODE_DELIMITER='_'


def isRaw(g):
    	if g.plate_type==PlateGroup.RAW:
        	return True
    	else:
        	return False

def isNorm(g):
    	if g.plate_type==PlateGroup.NORM:
        	return True
    	else:
        	return False

def isSumm(g):
    	if g.plate_type==PlateGroup.SUMM:
        	return True
    	else:
        	return False

def rm_nan(a1, a2):
	arr=np.array([a1, a2]).transpose()
	arr=arr[~np.isnan(arr).any(1)]
	return arr.transpose()

	
def cp_from_plate(p_new, p_orig):
	p_new.platename    =  p_orig.platename    
	p_new.cellline     =  p_orig.cellline     
	p_new.condition    =  p_orig.condition    
	p_new.condition    =  p_orig.condition    
	p_new.concentration=  p_orig.concentration
	p_new.dimension    =  p_orig.dimension    
	p_new.replicate    =  p_orig.replicate    
	p_new.lib          =  p_orig.lib          
	p_new.controls     =  p_orig.controls     
	p_new.annotation   =  p_orig.annotation   
	p_new.save()

	return p_new
	


def getplatesbygrpid(groupid):
	plgroup=PlateGroup.objects.get(id=groupid)
	return plgroup.plates

def deleteplate(plateid):
	p=Plate.objects.get(id=plateid)
	if isinstance(p, RankProdPlate):
		if p.data is not None:
			p.data.delete()
		p.rankprod.delete()
		p.pfp.delete()
		p.pval.delete()
	else:
		p.data.delete()
	for ctrl in p.controls:
		ctrl.delete()
	p.delete()


def deletegroup(groupid):
	group=PlateGroup.objects.get(id=groupid)
	for p in group.plates:
		if isinstance(p, RankProdPlate):
			p.rankprod.delete()
			p.pfp.delete()
			p.pval.delete()
		else:
			p.data.delete()
		for ctrl in p.controls:
			ctrl.delete()
		p.delete()
	group.delete()

def convert_to_plate_df(one_col_data):
	#if isinstance(one_col_data, pd.Series):
	new_ind=sorted(set([label[0] for label in one_col_data.index]))
	new_cols=sorted(set([label[1:] for label in one_col_data.index]))
	tuples=[(label[0], label[1:]) for label in one_col_data.index]
	multi_ind = pd.MultiIndex.from_tuples(tuples, names=['row', 'col'])
	one_col_data.index = multi_ind
	df=one_col_data.unstack()
	df.index=new_ind
	df.columns=new_cols
	return df

##### repeated function to Dataset.to_json_objs() ######
def convert_to_json_objs(platedf):
	objs=[]
	for col in platedf.columns:
		for row in platedf.index:
			objs.append({'val':platedf.loc[row, col], 'row':row, 'col':col})
	return json.dumps(objs)

def convert_to_one_col_df(dataframe, colname):
	"""
	convert dataframe shaped like a plate to one-column dataframe,
	parameters: dataframe, platename
	"""

	arr=dataframe.values.ravel()
	rows=dataframe.index
	cols=dataframe.columns
	
	new_index=[]
	for row in rows:
		new_index=new_index+[row+col for col in cols]

	return pd.DataFrame({colname:arr}, index=new_index)


# TODO -- move to a separate analysis.py
def do_rankprod(dfs, reverse=False):
	'''
	do rank prod analysis on a list of one column dataframes
	each dataframe contains the data from the same replica
	'''
	data=pd.concat(dfs, axis=1)

	if reverse==True:
		data=-data

	# TODO -- data values are negative, transform to positive
	#data=-data
	
	rp=importr('RankProd')

	cl=robjects.IntVector([1]*len(dfs)) 
	r_dataf=comm.convert_to_r_dataframe(data.dropna())
	r_RPout=rp.RP(r_dataf, cl, rand=123, num_perm=1000)


	pfp=r_RPout[0]
	pval=r_RPout[1]
	RPs=r_RPout[2]
	
	### RPs[0] column 'class1 < class2' ###
	RPout=pd.DataFrame({'pfp':pfp[1], 'pval':pval[1], 'RankProd':RPs[1]}, index=RPs.rownames)


	RP_ordered=RPout.sort(columns='RankProd')

	### index ordered by rank prod ##
	ind=RP_ordered.index

	data_reordered=data.loc[ind]


	df_combined=pd.concat([RP_ordered, data_reordered], axis=1)

	return df_combined


def cp_attrs(attr_dict, objto):
	for n in attr_dict:
		if hasattr(objto, n):
			setattr(objto, n, attr_dict[n]);

def cp_plate_attr(objfrom, objto, names):
    	for n in names:
        	if hasattr(objfrom, n):
            		v = getattr(objfrom, n)
            		setattr(objto, n, v);

def groupby_platename(plates):
		# Group by platename - repls is a list of [rep1, rep2, rep3...] of the same plate
		repls=[]

		plates=sorted(plates, key=lambda p: p.platename)
		for pl, group in groupby(plates, lambda p: p.platename):
			repls.append(list(group))

		return repls

def sort_pls(p):
	return (p.platename+str(p.cellline)+str(p.concentration)+str(p.replicate)).lower()

def merge_plates(plates):

	def getPname(p):
		return '_'.join([p.cellline, p.condition, p.concentration])

	plates = sorted(plates, key=getPname)
	reps=[]
	index=[]
	anno=[]

	n = 0

	try:
		for rep_no, pls in groupby(sorted(plates, key=lambda p:p.replicate), lambda p:p.replicate):
			
			rep_dfs=[]
			for p in pls:
				df = convert_to_one_col_df(p.data.dataframe, str(rep_no)) 
				df.index=df.index+'_'+p.lib+'-'
				rep_dfs.append(df)
				
				# take annotation from one of the replicates
				if p.annotation is not None and p.replicate == 1:
					anno = anno + p.annotation.dataframe.values.ravel().tolist()

				if n == 0:
					a,b=p.dimension
					n=a*b*len(plates)
				if len(index)<n:
					index=index + [str(p.id)]*(a*b)

			merged=pd.concat(rep_dfs)
			merged.columns=[str(rep_no)]
			reps.append(merged)
	
	except:
		print "Merge plate error:", sys.exc_info()
		raise

	superplate = pd.concat(reps, axis=1)
	new_index = [ label+pid for label, pid in zip(superplate.index, index)]
	superplate.index = new_index

	return superplate, anno

# TODO - num_plates should be guessed
def format1_get_dfs(f, num_plates=10, mixed=None, dim=(16, 24)):
	'''
	Format 1, which is a plate shape data matrix readout
	from PerkinElmer Envision plate reader
	
	mixed=[cond1, cond2]
	'''

	_num_lines_before=8
	
	# number of lines to read in, format1 has 8 lines before data
	nrows= (_num_lines_before + dim[0] + 1)*num_plates
	
	# TODO - need to detect if first line missing commas	
	content=pd.read_csv(f, nrows=nrows, header=1)
	

	chunks=np.array_split(content, num_plates) 


	df_pair=[]
	for chunk in chunks:
		barcode=chunk.values[0][2]
		
		#df=chunk[8:-1].drop([24], axis=1)
		df=chunk[6:6+dim[0]].dropna(how='all', axis=1)

		cols=[i for i in range(dim[1])]
		df.columns=['0'+str(n+1) if n < 9 else str(n+1) for n in cols]
		
		if dim[0]==16:
			df.index=ALPHA_16
		else:
			df.index=ALPHA_8

		df_pair.append((barcode,df.astype(float)))


	# TODO - only two conditions in the assay?
	if mixed is not None and len(mixed)==2:
		cond1, cond2=mixed
		cond1_dfs=[]
		cond2_dfs=[]
		# sort the pairs to have cond1 cond2 separated
		for pair in df_pair:
			if cond1 in pair[0]:
				cond1_dfs.append(pair)
			elif cond2 in pair[0]:
				cond2_dfs.append(pair)


		return [cond1_dfs, cond2_dfs]
	else:
		return df_pair





def format2_get_dfs(data, mixed=None, dim=(16, 24)):
	'''
	Format 2, a one column per plate shape data readout
	from PerkinElmer Envision plate reader
	
	dim must be the shape of the data, e.g (8, 12)
	mixed=[cond1, cond2]
	'''
	
	#with open(rawfile, 'U') as f:
	#	data=f.read()

	empty_line=data.find('\n\n')
	comma_line=re.search('\n,+\n',data)

	# case 1: split by two empty lines
	if empty_line>=0:
		blocks=data.split('\n\n')
	
	# case 2: split by two empty lines with commas only
	elif comma_line is not None and comma_line.start()>0:
		blocks=data.split(comma_line.group())


	chunks=[]
	for block in blocks:	
		try:
			# filter out non-data blocks
			if len(block.split('\n'))<dim[0]:
				continue

			buff=StringIO.StringIO(block)
			content=pd.read_csv(buff, header=1)
			df=content.dropna(how='all')
			df=df.dropna(how='all', axis=1)
			
			# check if the df read from the block is a data matrix of experiment data
			if len(df)==dim[0]*dim[1] or df.shape==dim:
				chunks.append(df)
		except Exception, e:
			print e
			print 'Warning: err reading block, disgarding the chunck'



	df_pair=[]
	for chunk in chunks:
		barcode=chunk['Barcode'].values[0]
		
		df=chunk['Signal']
		df.index=chunk['Well'].values
		df=convert_to_plate_df(df)

		cols=[i for i in range(dim[1])]
		df.columns=['0'+str(n+1) if n < 9 else str(n+1) for n in cols]
		
		if dim[0]==16:
			df.index=ALPHA_16
		else:
			df.index=ALPHA_8

		df_pair.append((barcode,df.astype(float)))


	# TODO - only treated vs non-treated in the assay?
	if mixed is not None:
		if isinstance(mixed, list) and len(mixed)==2:
			cond1, cond2=mixed

			# sort the pairs to have cond1 cond2 separated
			df_pair.sort(key=lambda pair: cond1 not in pair[0] and cond2 in pair[0])
		
		# if mixed is a string like 'LT' - ligand treatment
		elif isinstance(mixed, str):
			df_pair.sort(key=lambda pair: mixed in pair[0])

		cond1_dfs=df_pair[:len(df_pair)/2]
		cond2_dfs=df_pair[len(df_pair)/2:]
		return [cond1_dfs, cond2_dfs]
	else:
		return df_pair




def format1a_get_dfs(rawfile, mixed=None, dim=(16, 24)):
	'''
	Format 1a, plate shape like format 1, but blocks
	split by empty lines
	
	dim must be the shape of the data, e.g (8, 12)
	mixed=[cond1, cond2]
	'''
	
	with open(rawfile) as f:
		data=f.read()

	split_line='Plate information'
	
	# split into blocks each contain Plate info, Background info and data matrix
	blocks=data.split(split_line)

	chunks=[]
	for block in blocks:	
		try:
			empty_line=block.find('\r\n\r\n')
			comma_line=re.search('\r\n,+\r\n', block)
			
			# case 1: split by two empty lines
			if empty_line>=0:
				small_blocks=block.split('\r\n\r\n')
			
			# case 2: split by two empty lines with commas only
			elif comma_line is not None and comma_line.start()>0:
				small_blocks=block.split(comma_line.group())


			buff_plate_info=StringIO.StringIO(small_blocks[0])
			plate_info=pd.read_csv(buff_plate_info, header=1)
			barcode=plate_info['Barcode'].values[0]


			buff=StringIO.StringIO(small_blocks[2])
			content=pd.read_csv(buff, header=None)
			df=content.dropna(how='all')
			df=df.dropna(how='all', axis=1)
			
			print df.shape
			# check if the df read from the block is a data matrix of experiment data
			if len(df)==dim[0]*dim[1] or df.shape==dim:
				chunks.append((barcode, df))
		except Exception, e:
			print e
			print 'Warning: err reading block, disgarding the chunck'



	df_pair=[]
	for chunk in chunks:
		
		barcode, df=chunk

		cols=[i for i in range(dim[1])]
		df.columns=['0'+str(n+1) if n < 9 else str(n+1) for n in cols]
		
		if dim[0]==16:
			df.index=ALPHA_16
		else:
			df.index=ALPHA_8

		df_pair.append((barcode,df.astype(float)))


	# TODO - only two conditions in the assay?
	if mixed is not None and len(mixed)==2:
		cond1, cond2=mixed

		# sort the pairs to have cond1 cond2 separated
		df_pair.sort(key=lambda pair: cond1 not in pair[0] and cond2 in pair[0])
		cond1_dfs=df_pair[:len(df_pair)/2]
		cond2_dfs=df_pair[len(df_pair)/2:]
		return [cond1_dfs, cond2_dfs]
	else:
		return df_pair



# A utility to check if a row in the read in csv data is empty
def isRowNan(series):
	return not any(~pd.isnull(series))



def read_envision_format(data,  dim, excel=False):
	'''
	Read the envision plate reader format data
	'''
	if excel==True:
		df = pd.read_excel(data, header=None)
	else:
		df = pd.read_csv(data, header=None)

	nandf = df.apply(isRowNan, axis=1)
	nan_row_index = nandf[nandf==True].index
	
	start = 0
	dfs = []
	for i in nan_row_index:
		a,b = dim
		# envision data format agreed with Daniel, i.e 96 + 2 lines or 384 +2 lines
		data_length = a*b+2
		print data_length, i-start

		if i - start == data_length:
			i_df = df[start:i]
			headers = i_df.iloc[1].dropna().tolist()
			
			# see if the headers are the subset of the following headers
			if set(headers).issubset(set(['Plate', 'Barcode', 'Well', 'Sample', 'Signal'])):
				cols = i_df.iloc[1].dropna()
				val_df = i_df[2:].dropna(axis=1)
				val_df.columns = cols
	    			dfs.append(val_df)
	        start = i+1

	return dfs


def parse_barcode_parts(barcode, parts_dict):
	'''
	parse the barcode str specified by users
	'''
	strs = ['cond_str', 'concen_str', 'repl_str', 'cell_str', 'lib_part_str']
	parts = barcode.split(BARCODE_DELIMITER)
	return [parts.index(parts_dict[s]) if parts_dict[s] in parts else -1 for s in strs ]

def parse_repl(repl):
	'''
	parse the replicate str such as 'R1'
	'''

	def is_int(s):
	    	try:
			int(s)
			return True
	    	except ValueError:
			return False

	for s in repl:
		if is_int(s):
			return int(s)

def parse_ctrls(ctrls):
	'''
	parse the controls specified by users
	'''
	ctrl_objs = []
	for c in ctrls:
		if c['type']=='positive':
			ctrl=PositiveControl(letter=c['row'], num=c['col'], effect='known effective')
		elif c['type']=='negative':
			ctrl=NegativeControl(letter=c['row'], num=c['col'], effect='no effect')
		elif c['type']=='non targeting':
			ctrl=NonTargetControl(letter=c['row'], num=c['col'], description='non targeting')
		elif c['type']=='untreated':
			ctrl=UntreatedControl(letter=c['row'], num=c['col'], description='no treatment')
		else: 
			ctrl=OtherControl(letter=c['row'], num=c['col'], )

		ctrl_objs.append(ctrl)

	return ctrl_objs




