#!/usr/bin/env python

# use this script to create a Makefile to start from RAW files and end
# up with collapsed omssa CSV files

import os, sys, getopt
from xlh import External

def which(filename):
  for dir in os.getenv('PATH').split(':'):
    FilePath = os.path.join(dir, filename)
    if os.path.exists(FilePath):
      return FilePath

def getDbSize(db):
  command = FastaCmd
  options = ' -I -d ' + db
  p = os.popen(command + options, 'r')
  while 1:
    l = p.readline()
    if not l or 'sequences' in l:
      break
  p.close()
  return int(l.split()[0].replace(',', ''))

def exitOnError(errmsg, exit):
  print 'The following error(s) occurred:\n'
  print errmsg
  sys.exit(exit)

def usage():
  print """Usage:

""",sys.argv[0],""" [-h] -d Database [-t MS Tolerance] [-f] RAW Files MGF FIles

 -h:	Print this help and exit
 -d:	Database containing full proteins
 -t:	Tolerance for MS ions
 -f:	File containing protein accession numbers, one per line
 -o:	Output file name (default "Makefile")
 -n:	Experiment name
	"""
  sys.exit(1)

# Paths
SilkConfigPath = '/Users/dawe/Documents/Projects/XL/Silk/trunk/config/'

Raw2XML = which('raw2xml')					# to convert RAW in mzXML files
DoubletCheck = which('doubletCheck.py')		# to split into light and heavy msm
XDBCreate = which('xdbcreate.py')			# to create peptide databases
FastaCmd = which('fastacmd')				# to get info about the database
Tolerance = '10'							# default tolerance for doublet checks
Omssa = which('omssacl')
Omssamerge = which ('omssamerge')
Cat = which('cat')
Grep = which('grep')
Rm = which('rm')
Head = which('head')
Awk = which('awk')

# Omssa search parameters
ModificationFile = os.path.join(SilkConfigPath, 'mods.xml')
UserModificationFile = os.path.join(SilkConfigPath, 'usermods.xml')
Missed = '5'
PrecursorTolerance = '0.03'
IonTolerance = '0.5'
HeavyModificationsOpt = ' -mx ' + ModificationFile + ' -mux ' + UserModificationFile + ' -mf 3 -mv 1,120,122'
LightModificationsOpt = ' -mx ' + ModificationFile + ' -mux ' + UserModificationFile + ' -mf 3 -mv 1,119,121'
AllModificationsOpt  = ' -mx ' + ModificationFile + ' -mux ' + UserModificationFile + ' -mf 3 -mv 1,119,121,120,122'
ToleranceOptions = ' -te ' + PrecursorTolerance + ' -to ' + IonTolerance
EnzymeOpts = ' -e 0 -v ' + Missed
MiscOpts = ' -w -zcc 1 -hm 1 -nt 1 -scorp 0.01 -ht 100 -cl 0.01'
ExpectedOpts = ' -he ' #to be modified later on
DbOpts = ' -d ' # to be modified later on

OutFile = 'Makefile'
ExperimentName = 'xlink'
MgfExtension = '.msm'	# Default extension for MGF files
AccListFile = ''
AccList = []
RawFiles = []
MgfFiles = []
mzxmlTargets = []
csvTargets = []
lightMgfTargets = []
heavyMgfTargets = []
XDBTargets = []
heavyCsvTargets = []
lightCsvTargets = []
TargetDict = {}


TargetDict['all'] = ''
TargetDict['convert'] = '' 
TargetDict['split'] = ''
TargetDict['xdb'] = ''
TargetDict['search'] = ''
TargetDict['light_search'] = ''
TargetDict['heavy_search'] = ''
TargetDict['merge_csv'] = ''
TargetDict['merge_msm'] = ''




try:
  optlist, args = getopt.getopt(sys.argv[1:], 'd:t:hf:o:n:')
except getopt.GetoptError:
  usage()

if not optlist:
  usage()

for o, a in optlist:
  if o == '-h':
    usage()
  if o == '-d':
    DbIn = a
    DbDir = External.get("blastdb", "path")
    if '/' not in DbIn:
      DbIn = os.path.join(DbDir, DbIn)

  if o == '-t':
    Tolerance = a
  if o == '-f':
    AccListFile = a
  if o == '-o':
    OutFile = a
  if o == '-n':
    ExperimentName = a

TargetDict[ExperimentName] = 'merge_csv merge_msm'
    
if not DbIn:
  exitOnError('Missing database', 1)
#else:
#  DbOpts += DbIn
#  ExpectedOpts += str(100 * getDbSize(DbIn))

if not AccListFile:
  exitOnError('Missing accession number list', 1)
else:
  for line in open(AccListFile):
    AccList.append(line.strip())

# Try to understand which files are RAW and which are MSM
for file in args:
  (fname, ext) = os.path.splitext(file)
  ext = ext.lower()
  if ext == '.raw':
    RawFiles.append(file)
    mzxmlTargets.append(fname + '.mzXML')
  elif ext == '.msm' or ext == '.mgf' or ext == '.msn':
    MgfFiles.append(file)
    lightMgfTargets.append('light_1_' + fname + MgfExtension)
    heavyMgfTargets.append('heavy_1_' + fname + MgfExtension)
#    lightCsvTargets.append('light_1_' + fname + '.csv')
#    heavyCsvTargets.append('heavy_1_' + fname + '.csv')
#    csvTargets.append(fname + '.csv')
    
#print RawFiles
#print mzxmlTargets
#print MgfFiles
#print lightMgfTargets
#print heavyMgfTargets
#print csvTargets

# Time to write the makefile. We have some main steps
# 1- convert .RAW files in mzXML (depends on RAW and creates mzXML)
# 2- split .msm files in light and heavy (depends on mzXML and given MSM, gives light and heavy MSM)
# 3- create xdb for each couple of accession numbers
# 4- omssa search the light and heavy spectra (depends on light and heavy MSM, gives csv and oms) against it
# 5- merge all csv files
# Additional targets to clean

fh = open(OutFile, 'w') # else is a Makefile
#fh = sys.stdout

# Start writing all variables
fh.write('RAW2XML = ' + Raw2XML + '\n')
fh.write('DOUBLETCHECK = ' + DoubletCheck + '\n')
fh.write('OMSSA = ' + Omssa + '\n')
fh.write('OMSSAMERGE = ' + Omssamerge + '\n')
fh.write('XDBCREATE = ' + XDBCreate + '\n')
fh.write('CAT = ' + Cat + '\n')
fh.write('GREP = ' + Grep + '\n')
fh.write('RM = ' + Rm + '\n')
fh.write('HEAD = ' + Head + '\n')
fh.write('AWK = ' + Awk + '\n')
fh.write('SED = ' + which('sed') + '\n')
fh.write('DOUBLETTOL = ' + Tolerance + '\n')
fh.write('LIGHTMODS = ' + LightModificationsOpt + '\n')
fh.write('HEAVYMODS = ' + HeavyModificationsOpt + '\n')
fh.write('TOLOPTS = ' + ToleranceOptions + '\n')
fh.write('ENZYMEOPTS = ' + EnzymeOpts + '\n')
fh.write('MISCOPTS = ' + MiscOpts + '\n')
fh.write('MASTERDB = ' + DbIn + '\n')
fh.write('XDBOPTS = -L 20 -d $(MASTERDB)\n')
#fh.write('EXPECTED = ' + ExpectedOpts + '\n')
fh.write('MGFEXT = ' + MgfExtension + '\n')
fh.write('\n\n')
# write targets


TargetDict['all'] = 'convert split xdb search ' + ExperimentName
for xml in mzxmlTargets:
  TargetDict['convert'] = TargetDict['convert'] + ' ' + xml
for file in lightMgfTargets + heavyMgfTargets:
  TargetDict['split'] = TargetDict['split'] + ' ' + file


TargetDict['search'] += 'xdb light_search heavy_search'
for x in range(len(AccList)):
  for y in range(x + 1, len(AccList)):
    Label = '__' + str(x) + '_' + str(y)
    TargetDict['xdb'] = TargetDict['xdb'] + 'xdb' + ExperimentName + Label + ' '
    XDBTargets.append('xdb' + ExperimentName + Label)
    TargetDict['xdb' + ExperimentName + Label] = ''
    for msm in lightMgfTargets:
      fname = os.path.splitext(msm)[0]
      lightCsvTargets.append(fname + Label + '.csv')
      TargetDict[fname + Label + '.csv'] = msm + ' ' + 'xdb' + ExperimentName + Label
    for msm in heavyMgfTargets:
      fname = os.path.splitext(msm)[0]
      heavyCsvTargets.append(fname + Label + '.csv')
      TargetDict[fname + Label + '.csv'] = msm + ' ' + 'xdb' + ExperimentName + Label


for csv in heavyCsvTargets: 
  TargetDict['heavy_search'] = TargetDict['heavy_search'] + ' ' + csv
for csv in lightCsvTargets:
  TargetDict['light_search'] = TargetDict['light_search'] + ' ' + csv

for t in ['all', 'convert', 'split', 'xdb', 'search', 'light_search', 'heavy_search']:
  fh.write(t + ': ' + TargetDict[t] + '\n\ttouch $@\n\n')



# Conversion
for x, xml in enumerate(mzxmlTargets):
  fh.write(xml + ': ' + RawFiles[x] + '\n')
  fh.write('\t $(RAW2XML) --mzXML $< $@\n\n')

# Split
for x, msm in enumerate(lightMgfTargets):
  fh.write(msm + ': ' + mzxmlTargets[x] + '\n')
  fh.write('\t$(DOUBLETCHECK) -3 -t ' + Tolerance + ' -f ' + MgfFiles[x] + ' -m ' + mzxmlTargets[x] + '\n\n')
  fh.write(heavyMgfTargets[x] + ': ' + msm + '\n')
  fh.write('\ttouch $@\n\n')

#xdb
for dbcouple in XDBTargets:
  fh.write(dbcouple + ': \n')
  (x, y) = map(lambda x: int(x), dbcouple.split('__')[-1].split('_'))
  
  fh.write('\t$(XDBCREATE) $(XDBOPTS) -o $@ -a ' + AccList[x] + ',' + AccList[y] + '\n\ttouch $@\n\n')


# OMSSA
for x, csv in enumerate(lightCsvTargets):
  TargetDict['merge_csv'] = TargetDict['merge_csv'] + ' '  + csv
  fname = os.path.splitext(csv)[0]
  (basename, coord) = fname.split('__')
  oms = fname  + '.oms'
  SourceMsmFile = filter(lambda name: basename in name, lightMgfTargets)[0]
  ThisDb = 'xdb'+ ExperimentName + '__' + coord
  fh.write(csv + ': ' + SourceMsmFile + '\n')
  Command = """	exp=`fastacmd -I -d """ + os.path.join(External.get("blastdb", "path"), ThisDb) + """ | grep sequences | sed s/,// | awk '{print $$1*100}'` && \
	$(OMSSA) $(LIGHTMODS) $(TOLOPTS) $(ENZYMEOPTS) $(MISCOPTS) -he $$exp -d """ + os.path.join(External.get("blastdb", "path"), ThisDb) + """ -oc $@ -fm $< -ob """ + oms + """
	
	
  """
  fh.write(Command)

for x, csv in enumerate(heavyCsvTargets):
  TargetDict['merge_csv'] = TargetDict['merge_csv'] + ' '  + csv
  fname = os.path.splitext(csv)[0]
  (basename, coord) = fname.split('__')
  oms = fname + '.oms'
  SourceMsmFile = filter(lambda name: basename in name, heavyMgfTargets)[0]
  ThisDb = 'xdb'+ ExperimentName + '__' + coord
  fh.write(csv + ': ' + SourceMsmFile + '\n')
  Command = """	exp=`fastacmd -I -d """ + os.path.join(External.get("blastdb", "path"), ThisDb) + """ | grep sequences | sed s/,// | awk '{print $$1*100}'` && \
	$(OMSSA) $(HEAVYMODS) $(TOLOPTS) $(ENZYMEOPTS) $(MISCOPTS) -he $$exp -d """ + os.path.join(External.get("blastdb", "path"), ThisDb) + """ -oc $@ -fm $< -ob """ + oms + """


  """
  fh.write(Command)
    
# Merge results
fh.write(ExperimentName + ': ' + TargetDict[ExperimentName] + '\n\ttouch $@\n\n')
fh.write('merge_csv: ' + TargetDict['merge_csv'] + '\n')
Commands = """	@echo "Merging csv files... "
	$(HEAD) -n1 $< > """ + ExperimentName + """.csv
	oldmfile='placeholder' && for i in $?; do \
		mfile=$${i%%__*}$(MGFEXT); \
		thisn=`grep BEGIN $$mfile|wc -l`; \
		$(GREP) -v 'Spectrum number,' $$i | $(AWK) -v add=$$n 'BEGIN {FS=","; OFS=","}; {$$1=$$1 + add; print $$0}' >> """ + ExperimentName + """.csv ; \
		if [ $$oldmfile != $$mfile ]; then n=$(( $n + $thisn)); oldmfile=$mfile; fi; \
	done;
"""
fh.write(Commands)
fh.write('\ttouch $@\n\n')


fh.write('merge_msm: ' + ' '.join(lightMgfTargets + heavyMgfTargets) + '\n')
Commands = """	@echo "Merging $(MGFEXT) files... "
	@$(HEAD) -n2 $< > """ + ExperimentName + """$(MGFEXT)
	@$(GREP) -h -v ^[SR] $? >> """ + ExperimentName + """$(MGFEXT)
"""
fh.write(Commands)
fh.write('\ttouch $@\n\n')







# Clean
Commands = """.PHONY: clean
clean:
	$(RM) -f *.csv *.oms test.dta """ + ExperimentName + """* all search light_search heavy_search convert split
	
deep_clean:
	$(RM) -f *.mzXML light* heavy* *.csv *.oms test.dta """ + ExperimentName + """* all search light_search heavy_search convert split
"""
fh.write(Commands)
