#start to collate data
captureSNPs <- function(contig.map,contig.dataframes,cDNA.name,AceCOs,ace.files,envir=.GlobalEnv){
	#a wrapper to take SNP data from multiple populations
	#contig map is a list derived from join contigs
	#contig.dataframes is a character vector containing names of dataframes
	# produced by readNewblerMapper
	#cDNA.name is the name of the cDNA-derived supercontig
	#AceCOs is a character vector containing names of dataframes for CO line
	#numbers in ace files produced by readAceCO
	#ace.files is a character vector containing names of ace files to be read
	#by readContigAce
	#envir is the environment in which to look for contig.dataframes & AceCOs
	
	SNPlist <- list()
	AllCoverage <- list()
	
	for(i in 1:length(contig.dataframes)){
		contig.df <- get(contig.dataframes[i],envir=envir,inherits=FALSE)
		SNPlist[[i]] <- mapSNPsToContigs(contig.df,contig.map,cDNA.name)
		if(!identical(SNPlist[[i]],0)){
			SNPlist[[i]]$SNPid <- paste(cDNA.name,SNPlist[[i]]$start.pos,SNPlist[[i]]$ref.accno,SNPlist[[i]]$old.start,SNPlist[[i]]$ref.nucl,SNPlist[[i]]$var.nucl,sep="_")
			}else{
			SNPlist[[i]]$SNPid <- NULL
			}
		
		AceCO <- get(AceCOs[i],envir=envir,inherits=FALSE)
		ace.file<-ace.files[i]
		linesAceList <- lapply(X=contig.map[[cDNA.name]]$sc.contig,
			FUN=function(X,ace.file,AceCO){
				readContigAce(ace.file,AceCO,X)
				},ace.file,AceCO)
		AceObjectList <- lapply(X= linesAceList,FUN=makeAceObject)
		CoverageList <- lapply(X= AceObjectList,FUN= mapCoverage)
		rm(linesAceList, AceObjectList)
		names(CoverageList) <- contig.map[[cDNA.name]]$sc.contig
		#should now have an estimate of the coverage at each base
		AllCoverage[[i]] <- CoverageList
		
		}
	names(SNPlist) <- contig.dataframes
	names(AllCoverage) <- contig.dataframes
	AllSNPids <- sort(unique(unlist(sapply(X=SNPlist,FUN=function(X){
		X$SNPid
		}))))
	list(SNPlist = SNPlist ,AllCoverage=AllCoverage,AllSNPids=AllSNPids)
	}


#debug(captureSNPs)
#captureSNPs(contig.map,c("grouse1hc","grouse2hc"),names(contig.map[2]),c("grouse1CO","grouse2CO"),c("/Paterson/Datafiles/grouse/mapping/grouse1.ace","/Paterson/Datafiles/grouse/mapping/grouse2.ace"))

baseCoverage <- function(inputlist){
	SNPlist <- inputlist[['SNPlist']]
	AllCoverage <- inputlist[['AllCoverage']]
	AllSNPids <- inputlist[['AllSNPids']]
	if(is.null(AllSNPids)) return(0)
	
	if(all(sapply(X=AllCoverage,function(X){length(X)==0}))){
		AllSNPref <- 0
		return(AllSNPref)
		}
	if(any(sapply(AllCoverage,length)/length(AllCoverage[[1]])!=1)) stop('different numbers of contigs in AllCoverage')
	
	if(substr(AllSNPids[1],1,6)=="Contig"){
		AllSNPref <- data.frame(id = AllSNPids ,cDNA = sapply(X=strsplit(AllSNPids,"_"),FUN=function(X){
			X[1]
			}),
			start.pos = as.numeric(sapply(strsplit(AllSNPids,"_"),FUN=function(X){
				X[2]
			})),
			contig = sapply(strsplit(AllSNPids,"_"),FUN=function(X){
				X[3]
			}),
			old.pos = as.numeric(sapply(strsplit(AllSNPids,"_"),FUN=function(X){
				X[4]
			})),stringsAsFactors=FALSE)

		}else{
		AllSNPref <- data.frame(id = AllSNPids ,cDNA = sapply(X=strsplit(AllSNPids,"_"),FUN=function(X){
		paste(X[1],X[2],sep="_")
			}),
			start.pos = as.numeric(sapply(strsplit(AllSNPids,"_"),FUN=function(X){
				X[3]
			})),
			contig = sapply(strsplit(AllSNPids,"_"),FUN=function(X){
				X[4]
			}),
			old.pos = as.numeric(sapply(strsplit(AllSNPids,"_"),FUN=function(X){
				X[5]
			})),stringsAsFactors=FALSE)

		}
	
	AllSNPref[,paste(names(AllCoverage),"cov",sep=".")] <- 0
	AllSNPref[,paste(names(SNPlist),"freq",sep=".")] <- -1
	AllSNPref[,paste(names(SNPlist),"depth",sep=".")] <- -1
	#str of AllCoverage is popn > contig > unpadded/padded then dataframes
	for(pop in names(AllCoverage)){
		for(pos in 1:dim(AllSNPref)[1]){
			AllSNPref[pos,paste(pop,"cov",sep=".")] <- pickCoverage(AllCoverage[[pop]],
				contig=AllSNPref[pos,'contig'],old.pos=AllSNPref[pos,'old.pos'])
			AllSNPref[pos,paste(pop,"freq",sep=".")] <- pickSNP(SNPlist[[pop]],id= AllSNPref[pos,'id'])
			AllSNPref[pos,paste(pop,"depth",sep=".")] <- pickDepth(SNPlist[[pop]],id= AllSNPref[pos,'id'])
			}#must be a vector method to do this...
		}
	
	AllSNPref[order(AllSNPref$start.pos),]
	}

pickCoverage <- function(CoverageList, contig,old.pos){
	tmp <- CoverageList[[contig]][['coverage']][old.pos,'coverage']
	if(is.null(tmp)){
		return(0)
		}
	tmp
	}

pickSNP <- function(snp.df, id){
	if(!is.na(match(id,snp.df$SNPid))>0){
		snp.freq <- snp.df[match(id,snp.df$SNPid),'freq']
		snp.freq <- as.numeric(sub("%$","",snp.freq))
		}else{
		snp.freq <- 0
		}
	snp.freq
	}
pickDepth <- function(snp.df, id){
	if(!is.na(match(id,snp.df$SNPid))>0){
		snp.depth <- snp.df[match(id,snp.df$SNPid),'depth']
		snp.depth <- as.numeric(snp.depth)
		}else{
		snp.depth <- 0
		}
	snp.depth
	}

readAceCO <- function(file.name){
	#read in line numbers from a grepped ace file
	#eg grep -n 'CO' file.ace > fileCO.txt
	AceCO <- read.table(file.name,stringsAsFactors=FALSE)
	names(AceCO) <- c("line","contig","length","no.reads","segments","orientation")
	AceCO$line <- sub(':CO','',AceCO$line)
	AceCO$line <- as.numeric(AceCO$line)
	AceCO
	}

#grouse1CO <- readAceCO('/Paterson/Datafiles/grouse/mapping/grouse1CO.txt')
#grouse2CO <- readAceCO('/Paterson/Datafiles/grouse/mapping/grouse2CO.txt')


readContigAce <- function(ace.file,AceCO,contig){
	#read lines in corresponding to a specific contig
	#from an ace file
	#AceCO object is returned by readAceCO function
	len.ace <- system('wc -l mapping/grouse1.ace',intern=T)
	len.ace <- sub('^[ ]*','',len.ace)
	len.ace <- strsplit(len.ace," ")[[1]][1]
	
	start.read <- AceCO$line[match(contig,AceCO$contig)]
	
	
	if(match(contig,AceCO$contig)==length(AceCO$line)){
		stop.read <- len.ace 
		}else{			
		stop.read <- AceCO$line[match(contig,AceCO$contig)+1]-1
		}
	tmp.cmd <- paste("sed -n \'",start.read,",",stop.read,"p\' ",ace.file,sep="")
	linesAce <- system(tmp.cmd,intern=T)
	linesAce
	}

#debug(readContigAce)
#tst <- readContigAce(ace.file='/Paterson/Datafiles/grouse/mapping/grouse1.ace',AceCO=grouse1CO,contig='contig01000')

#produce some complex list object
makeAceObject <- function(linesAce,read.info=FALSE){
	#creates a list object from a vector of character strings
	#generated by readContigsAce
	
	AceObject <- list()
	tmp.CO <- linesAce[grep('CO',linesAce)]
	AceObject[['CO']] <- strsplit(tmp.CO," ")[[1]][2:6]
	names(AceObject[['CO']]) <- c("contig","length","no.reads","segments","orientation")
	
	#padded consensus sequence
	AceObject[['padded.seq']] <- paste(linesAce[2:(grep("^BQ",linesAce)[1]-1)],collapse="")
	
	AceObject[['BQ']] <- as.numeric(strsplit(paste(linesAce[(grep('^BQ',linesAce)+1):(grep("^AF",linesAce)[1]-1)],collapse="")," ")[[1]])
	
	#reads and their position relative to padded consensus
	tmp.AF <- linesAce[grep('^AF',linesAce)]
	AceObject[['AF']] <- data.frame(
		read = sapply(X=tmp.AF,FUN=function(X){strsplit(X," ")[[1]][2]}),
		orientation = sapply(X=tmp.AF,FUN=function(X){strsplit(X," ")[[1]][3]}),
		start.pos = as.numeric(sapply(X=tmp.AF,FUN=function(X){strsplit(X," ")[[1]][4]})),
		stringsAsFactors=FALSE)
	
	#ignore BS
	
	#read lengths
	tmp.RD <- linesAce[grep('^RD',linesAce)]
	AceObject[['RD']] <- data.frame(
		read = sapply(X=tmp.RD,function(X)strsplit(X," ")[[1]][2]),
		length = as.numeric(sapply(X=tmp.RD,function(X)strsplit(X," ")[[1]][3])),
		stringsAsFactors=FALSE)
	
	if(read.info==TRUE){ #tried for mapCoverageQual
		tmp.read.seq <- list()
		for(i in 1:length(tmp.RD)){
			tmp.st <- grep('^RD',linesAce)[i]+1
			tmp.sp <- grep('^RD',linesAce)[i]+ceiling(AceObject[['RD']]$length[i]/50)
			tmp.read.seq[[i]] <- paste(linesAce[tmp.st:tmp.sp],collapse="")
			}
		names(tmp.read.seq)	<- substr(AceObject[['RD']]$read,1,14)
		}
	

		}
	AceObject[['read.seq']] <- tmp.read.seq
	#position of the read wrt padded consensus
	tmp.QA <- linesAce[grep('^QA',linesAce)]
	AceObject[['QA']] <- data.frame(
		read = AceObject[['RD']]$read,
		start.pos1 = as.numeric(sapply(X=tmp.QA,function(X)strsplit(X," ")[[1]][2])),
		stop.pos1 = as.numeric(sapply(X=tmp.QA,function(X)strsplit(X," ")[[1]][3])),
		start.pos.clear = as.numeric(sapply(X=tmp.QA,function(X)strsplit(X," ")[[1]][4])),
		stop.pos.clear = as.numeric(sapply(X=tmp.QA,function(X)strsplit(X," ")[[1]][5])),
		stringsAsFactors=FALSE)
		
	#watch out for 'contigs' in the reads (sub-contigs?)
	AceObject
	}
#tmp.ace <- makeAceObject(linesAce)

#structure of GS mapper ace files are unclear.
#Assume for now that one can ignore reads labelled as contigs
#within the RD slot

#calculate coverage for each base in a contig
mapCoverage <- function(AceObject){
	#takes input from makeAceObject
	#to calculate coverage for each base
	#output is a list
	
	tmp.pad.df <- data.frame(pad.pos=1:nchar(AceObject$padded.seq),
		base=strsplit(AceObject$padded.seq,"")[[1]],
		coverage=0,stringsAsFactors=FALSE)
	
	tmp.AF <- AceObject$AF[-grep('contig',AceObject$AF$read),]
	tmp.QA <- AceObject$QA[-grep('contig',AceObject$QA$read),]
	#tmp.read.len <- tmp.QA$stop.pos.clear + 1 - tmp.QA$start.pos.clear
	tmp.cont.stop <- tmp.AF$start.pos + tmp.QA$stop.pos.clear -1
	tmp.cont.start <- tmp.AF$start.pos + tmp.QA$start.pos.clear -1
	tmp.cont.start[tmp.cont.start < 1] <- 1
	
	if(length(tmp.cont.start)==0) return(NULL) #ie if no reads
	
	tmp.filter <- tmp.cont.stop > 1
	tmp.cont.stop <- tmp.cont.stop[tmp.filter]
	tmp.cont.start <- tmp.cont.start[tmp.filter]

	if(!identical(tmp.AF$read,tmp.QA$read)) error('AF does not match RD')
	
	for(i in 1:length(tmp.cont.stop)){
		#add 1 to coverage for each base for each overlapping read
		
		tmp.pad.df$coverage[tmp.cont.start[i]:tmp.cont.stop[i]] <- tmp.pad.df$coverage[tmp.cont.start[i]:tmp.cont.stop[i]] +1
		}
	list(coverage=tmp.pad.df[tmp.pad.df$base!="*",],padded.coverage=tmp.pad.df)
	}
#correlates with depth given by grouse1hc, but not equal to
#base positions seem fine, though if - on ref is out
#go for some heuristic approach..? 
#... eg only work on snps found where all pops coverage >= 10?
#Newbler must use the qual file as well...
#... doubt I have time to use that, just fudge it

mapSNPsToContigs <- function(contig.dataframe,contig.map,cDNA.name){
	#this function extracts snps from individual sequence captured contigs
	#and maps them onto layout corresponding to the structure of 
	#the join.contigs function
	#
	#contig.dataframe is produced by readNewblerMapper
	#contig.map is produced by join.contigs
	#
	tmp.snp.out <- list()
	for(i in 1:length(contig.map[[cDNA.name]]$sc.contig)){
		gctg <- contig.map[[cDNA.name]]$sc.contig[i]
	
		tmp.snp.in <- contig.dataframe[grep(gctg,contig.dataframe$ref.accno),]
	
		#jump to next i if no snps
		if(dim(tmp.snp.in)[1]==0) next
	
		tmp.concat.end <- c(0,contig.map[[cDNA.name]]$concat.end)
		tmp.snp.out[[gctg]] <- tmp.snp.in
		tmp.snp.out[[gctg]][,c('old.start','old.end')] <- tmp.snp.in[,c('start.pos','end.pos')]
		#change start and ends relative to matched contig, and reverse comp if needed
		if(contig.map[[cDNA.name]]$direction[i]=="forward"){
			tmp.snp.out[[gctg]]$start.pos <- tmp.concat.end[i] + tmp.snp.in$start.pos
			tmp.snp.out[[gctg]]$end.pos <- tmp.concat.end[i] + tmp.snp.in$end.pos
		
			}
		if(contig.map[[cDNA.name]]$direction[i]=="reverse"){
			#cat('reversing \n')
			tmp.snp.out[[gctg]]$start.pos <- tmp.concat.end[i] + contig.map[[cDNA.name]]$length[i] - tmp.snp.in$start.pos + 1
		tmp.snp.out[[gctg]]$end.pos <- tmp.concat.end[i] + contig.map[[cDNA.name]]$length[i] - tmp.snp.in$end.pos + 1
			tmp.snp.out[[gctg]]$ref.nucl[tmp.snp.in$ref.nucl!="-"] <- 
				sapply(tmp.snp.in$ref.nucl[tmp.snp.in$ref.nucl!="-"],strComp)
			tmp.snp.out[[gctg]]$var.nucl[tmp.snp.in$var.nucl!="-"] <- 
				sapply(tmp.snp.in$var.nucl[tmp.snp.in$var.nucl!="-"],strComp)

			}
		if(contig.map[[cDNA.name]]$direction[i] %in% c("forward","reverse") == FALSE){
			warning('neither forward nor reverse for', gctg, "in", names(contig.map)[cDNA.name])
			}
		#output is a list of dataframes, one per sequence captured contig
		}
 
 	#tidy up list into a single dataframe
	 if(length(tmp.snp.out)>0){
 		tmp.snp.df <- tmp.snp.out[[1]]
 		if(length(tmp.snp.out)>1){
 			for(i in 2:length(tmp.snp.out)){
 				tmp.snp.df <- rbind(tmp.snp.df,tmp.snp.out[[i]])
 				}
 		
 			}
 		}
	if(length(tmp.snp.out)==0) return(0) #if there's no snps anywhere
	tmp.snp.df[order(tmp.snp.df$start.pos),]
	}

readNewblerMapper <- function(map.file){
	#note this expects a grepped map file from Newbler
	
	tmp.df <- read.table(map.file,sep="\t",col.names=c('ref.accno','start.pos','end.pos','ref.nucl','var.nucl','depth','freq'),skip=2,stringsAsFactors=FALSE)
	tmp.df$ref.accno <- sub('^>','',tmp.df$ref.accno)
	tmp.df
	}

join.contigs <- function(contig,blast.data,contig.info,out.file=NULL){
	#tries to pick series of contigs corresponding to blast hit of 
	#EST versus 454 contigs
	#
	#contig is the focal contig
	#blast.data is the output from 454 vs ests
	#contig.info is length and no of reads per contig
	require(GeneR)
	
	tmp.blast <- blast.cDNA2[blast.data$Query.id %in% contig,]
	if(dim(tmp.blast)[1]==0) {
		cat('no blast hit found for ',contig,'\n')
		return(0)
		}
	tmp.blast <- tmp.blast[order(tmp.blast$bit.score,decreasing=TRUE),]
	#think about adding another column for number of reads

	tmp.inc <- rep(FALSE,dim(tmp.blast)[1])
	max.q <- max(c(tmp.blast$q.start,tmp.blast$q.end))
	tmp.olap <- rep(0,max.q)

	#picks best contigs, starting from the best bit score and avoiding big overlaps
	for(i in 1:dim(tmp.blast)[1]){
		if(sum(tmp.olap[tmp.blast[i,"q.start"]:tmp.blast[i,"q.end"]])>8) next
		tmp.olap[tmp.blast[i,"q.start"]:tmp.blast[i,"q.end"]] <- 1
		tmp.inc[i] <- TRUE
		}
	
	tmp.blast2 <- tmp.blast[tmp.inc,]
	tmp.sc.contigs <- unique(tmp.blast2$Subject.id[order(tmp.blast2$q.start)])

	if(is.null(out.file)){
		tmp.fileout <- paste("test_files/",tmp.blast2$Query.id[1],"_matched.embl",sep="")
		}else{
		tmp.fileout <- out.file
		}
	
	writeEmblLine(file=tmp.fileout,code="ID",append=F,text=paste(tmp.blast2$Query.id[1],"_matched",sep=""))

	tmp.sc.contigs2 <- sub('lcl\\|','',tmp.sc.contigs)

	tmp.pst <- paste(tmp.sc.contigs2,collapse="; ")
	tmp.cont.info <- sc_contig_info2[match(tmp.sc.contigs2,sc_contig_info2$sc.contig),]
	tmp.cont.info$length <- as.numeric(tmp.cont.info$length)
	tmp.cont.info$numreads <- as.numeric(tmp.cont.info$numreads)
	tmp.cont.info$direction <- factor("forward",levels=c("forward","reverse"))
	writeEmblLine(file=tmp.fileout,code="CC",text=tmp.pst)
	writeEmblLine(file=tmp.fileout,code="FT",header="source", 
		text=paste("1",sum(tmp.cont.info$length)+100*(dim(tmp.cont.info)[1]-1),sep=".."),nextfield=F)
	writeEmblLine(file=tmp.fileout,code="FT",
		text="/organism=\"Lagopus lagopus\"",nextfield=F)
	writeEmblLine(file=tmp.fileout,code="FT",
		text="/mol_type=\"genomic DNA\"",nextfield=F)

	#say where the contigs are
	tmp.l <- 1
	tmp.cont.info$concat.start <- 0
	tmp.cont.info$concat.end <- 0

	#begin big loop to annotate each contig

	for(i in 1:length(tmp.sc.contigs)){
		tmp.cont.info$concat.start[i] <- tmp.l
		tmp.l <- tmp.l + tmp.cont.info$length[i] +100 -1
		tmp.cont.info$concat.end[i] <- tmp.l
		tmp.l <- tmp.l +1
		writeEmblLine(file=tmp.fileout,code="FT",header="misc_feature",
			text=paste(tmp.cont.info$concat.start[i],"..",tmp.cont.info$concat.end[i]-100,sep=""),nextfield=F)
		#change header to misc_feature?
		writeEmblLine(file=tmp.fileout,code="FT",text=paste("/note=\"",tmp.cont.info$sc.contig[i],"\"",sep=""),nextfield=F)
		tmp.blast.cont <- tmp.blast2[tmp.blast2$Subject.id==tmp.sc.contigs[i],]
		tmp.seqi <- strReadFasta("grouse_seq_cap/AllGrouseContigs.fasta",name=tmp.cont.info$sc.contig[i])
	
		if(tmp.blast.cont$s.start[1]>tmp.blast.cont$s.end[1]){ #reverse complement
			tmp.seqi <- strComp(tmp.seqi)
			writeEmblLine(file=tmp.fileout,code="FT",text="/note=\"reverse\"",nextfield=F)
			tmp.cont.info$direction[i] <- "reverse"
			}else{
			writeEmblLine(file=tmp.fileout,code="FT",text="/note=\"forward\"",nextfield=F)
			}
		if(i < length(tmp.sc.contigs)){
			writeEmblLine(file=tmp.fileout,code="FT",header="gap",
			text=paste(tmp.cont.info$concat.end[i]-100+1,"..",tmp.cont.info$concat.end[i]-1,sep=""),nextfield=F)
		writeEmblLine(file=tmp.fileout,code="FT",header="",
			text="/estimated_length=unknown",nextfield=F)
			}
		
		#add markup for blast hits
		if(tmp.blast.cont$s.start[1]>tmp.blast.cont$s.end[1]) {
			tmp.s.start <- tmp.cont.info$length[i]+1 - tmp.blast.cont$s.end
			tmp.s.end <- tmp.cont.info$length[i] - tmp.blast.cont$s.start +1
			}else{
			tmp.s.end <- tmp.blast.cont$s.end
			tmp.s.start <- tmp.blast.cont$s.start
			}
		tmp.s.end2 <- tmp.cont.info$concat.start[i] + tmp.s.end-1
		tmp.s.start2 <- tmp.cont.info$concat.start[i] + tmp.s.start-1
	
		tmp.tf <- FALSE
	
		for(j in 1:length(tmp.s.start2)){
			if(i == length(tmp.sc.contigs)&j == length(tmp.s.start2)) tmp.tf <- TRUE
			writeEmblLine(file=tmp.fileout,code="FT",header="misc_feature",
				text=paste(tmp.s.start2[j],"..",tmp.s.end2[j],sep=""),nextfield=F)
			#change header to misc_feature?
		
			writeEmblLine(file=tmp.fileout,code="FT",
				text=paste("/note=\"blast hit to query: q.start, ",tmp.blast.cont$q.start[j], 
				"; q.end, ",tmp.blast.cont$q.end[j],
				"; alignment length, ",tmp.blast.cont$alignment.length[j],
				"; mismatches, ",tmp.blast.cont$mismatches[j],
				"; gap openings, ",tmp.blast.cont$gap.openings[j],
				"; bit.score, ",tmp.blast.cont$bit.score[j],"\"",sep=""), nextfield=tmp.tf)
			}
	
		if(i==1){
			tmp.seqout <- tmp.seqi
			}else{
			tmp.seqout <- paste(tmp.seqout,paste(rep("N",100),collapse=""),tmp.seqi,sep="")
			}
	
	
		}
	tmp.fileoutf <- sub('\\.[a-z]*','\\.fasta',tmp.fileout)
	placeString(tmp.seqout,seqno=0)
	setStrand(0)
	writeEmblSeq(tmp.fileout)
	
	if(tmp.fileoutf!=tmp.fileout){
		writeFasta(file=tmp.fileoutf,name=sub('\\.[a-z]','',tmp.fileout))
		cat("Sequence written to ",tmp.fileout," and ",tmp.fileoutf,"\n")
		}else{
		cat("Sequence written to ",tmp.fileout,"\n")
		}
	placeString("",seqno=0)
	tmp.cont.info
	}
#join.contigs("cDNA_1405-1",blast.data=blast.cDNA2,contig.info=sc_contig_info2,out.file="test_files/c1405tst.embl")
#work out whether SNPs dn or ds

codingSNPref <- function(AllSNPref,dir.embl="/Paterson/Datafiles/grouse/embl_exon2",seq.name=NULL,file.name=NULL){
	#AllSNPref is an object (dataframe) from getAllSPs or baseCoverage
	#expects embl files in dir.embl, perhaps created by add.CDS
	
	if(is.null(AllSNPref)) return('null.input')
	if(identical(AllSNPref,0)) return('zero.input')
	
	
	if(is.null(seq.name)) seq.name <- AllSNPref[1,"cDNA"]
	if(is.null(file.name)) {
		#fasta.file.name<-paste(dir.fasta,"/",seq.name,"_matched.fasta",sep="")
		embl.file.name<-paste(dir.embl,"/",seq.name,"_exon.embl",sep="")
		}else{
		#fasta.file.name <- file.name[1]
		embl.file.name <- file.name[1]
		}
	
	if(!file.exists(embl.file.name)){
		warning(embl.file.name, " does not exist... returning\n")
		AllSNPref$cds <- FALSE
		AllSNPref$dn <- FALSE 
		return(AllSNPref)
		}

	
	#extract feature table
	tmp.features <- system(paste("grep \'FT\'",embl.file.name,"|sed \'s/FT[ \t]//\'"),intern=TRUE)
	cds.lines <- grep('^[ ]*CDS ',tmp.features)
	product.lines <- grep('/product',tmp.features)
	grep.cds <- character(length=length(cds.lines))
	for(cdsi in 1:length(grep.cds)){
		grep.cds[cdsi] <- paste(tmp.features[cds.lines[cdsi]:(product.lines[cdsi]-1)],collapse="")
		
		}
	#extract coding seqs
	grep.cds <- sub('^[ ]*CDS ','',grep.cds)
	grep.cds <- gsub('[ ]*','',grep.cds)

	complementCDS <- FALSE
	if(substr(grep.cds[1],1,4)=="comp"){
		complementCDS <- TRUE
		}

	ref.seq <- readEmblSeq(embl.file.name)
	
	grep.cds <- sub("complement\\(","",grep.cds)
	grep.cds <- sub("join\\(","",grep.cds)
	grep.cds <- gsub(")","",grep.cds)

	#find masks
	mask.lines <- grep("/note=\"mask\"",tmp.features) -1
	if(length(mask.lines)>0){
		grep.mask <- tmp.features[mask.lines]
		grep.mask <- sub('^[ ]*misc_feature','',grep.mask)
		grep.mask <- gsub('[ ]*','',grep.mask)
		
		mask.list <- strsplit(grep.mask,"\\.\\.")
		mask.list <- lapply(mask.list,as.numeric)
		mask.list <- lapply(X=mask.list,FUN=function(X){
			if(length(X)==1){
				return(c(X,X))
				}else{
				return(X)
				}
			})
		mask.list <- matrix(unlist(mask.list),ncol=2,byrow=T)
		colnames(mask.list) <- c("start","end")
		mask.list <- as.data.frame(mask.list) 
		#output starts and ends of mask as a dataframe
		tmp.loc <- numeric(0)
		for(i in 1:nrow(mask.list)) tmp.loc <- c(tmp.loc,seq(mask.list[i,1],mask.list[i,2]))
		for(i in 1:length(tmp.loc)){
			ref.seq <- paste(substr(ref.seq,1,(tmp.loc[i]-1)),"#",substr(ref.seq,(tmp.loc[i]+1),nchar(ref.seq)),sep="")
			#will break if masked nucl at end of sequence
			}

		}


	# Call mutateSeq for each SNP
	tmp <- t(sapply(X=AllSNPref[,"id"],FUN= mutateSeq,cds.str=grep.cds,ref.seq=ref.seq,complementCDS=complementCDS))
	tmp <- as.data.frame(tmp)
	names(tmp) <- c("cds","dn")
	cbind(AllSNPref,tmp)
	}



	
mutateSeq <- function(snp.id,cds.str,ref.seq,complementCDS=FALSE){
	#expects a snp.id, ie id from getAllSPs or baseCoverage
	#cat(snp.id,"  ")
	require(Biostrings)
	snp.id <- sub("^cDNA_","cDNA--",snp.id)
	snp.id <- sub("^ssh_","cDNA--",snp.id)
	tmp.from <- list()
	tmp.to <- list()
	for(i in 1:length(cds.str)){
		tmp.from[[i]] <- as.numeric(sapply(X=strsplit(cds.str[i],","),FUN=function(X){sub('\\.\\.[0-9]*$','',X)}))
		tmp.from[[i]] <- sort(tmp.from[[i]])
		tmp.to[[i]] <- as.numeric(sapply(X=strsplit(cds.str[i],","),FUN=function(X){sub('^[0-9]*\\.\\.','',X)}))
		tmp.to[[i]] <- sort(tmp.to[[i]])	
		}
	
	
	tmp.id <- strsplit(snp.id,"_")[[1]]
	#if(substr(tmp.id[1],2,6)=="ontig"){
	#	mut.start <- as.numeric(tmp.id[2])
	#	}else{
	mut.start <- as.numeric(tmp.id[2])
	#	}
	
	
	mut.cds <- FALSE #test whether in coding region
	mut.dn <- FALSE
	for(i in 1:length(tmp.from)){
		if(any(mut.start>tmp.from[[i]]&mut.start<tmp.to[[i]])) mut.cds <- TRUE
		}

	if(mut.cds==FALSE) return(c(as.logical(mut.cds),as.logical(mut.dn)))
	#if the mutation isn't in a coding region just return
	
	
	#mutation in coding region...
	mut.old <- sub("-","",tmp.id[5])
	mut.new <- sub("-","",tmp.id[6])
	if(nchar(mut.old)!=nchar(mut.new)){
		#if there's an indel in the coding sequence assume non-synonymous and return
		mut.dn <- TRUE
		return(c(as.logical(mut.cds),as.logical(mut.dn)))
		}

	#should now be carrying on only if a straight swap of nucleotide(s)
	mut.len <- nchar(mut.old)
	mut.stop <- mut.start + mut.len -1

	#will probably fail for large sequences
	cds.old <- character(length=length(tmp.to))
	
	#must be quicker way, but generally only 1 to 3 exons
	#translate ref sequence
	cds.old <- character(length=length(tmp.to))
	for(cds in 1:length(tmp.to)){
		for(j in 1:length(tmp.to[[cds]])){
			cds.old[cds] <- paste(cds.old[cds],substr(ref.seq,start=tmp.from[[cds]][j],stop=tmp.to[[cds]][j]),sep="")
			}
		} 
		

	cds.old <- gsub("#","",cds.old)
	
	if(complementCDS){
		cds.old <- as.character(reverseComplement(DNAStringSet(cds.old)))
		}
	
	
	#translate, but use GeneR function to allow Ns
	old.prot <- as.character(sapply(cds.old,GeneR::strTranslate))
	
	new.seq <- paste(substr(ref.seq,1,(mut.start-1)),mut.new,substr(ref.seq,(mut.start+mut.len),nchar(ref.seq)),sep="")

	#new.seq <- replaceLetterAt(ref.seq,at=seq(mut.start,mut.start+mut.len-1),letter=mut.new)
	
	#translate mutated sequence
	cds.new <- character(length=length(tmp.to))
	for(cds in 1:length(tmp.to)){
		for(j in 1:length(tmp.to[[cds]])){
			cds.new[cds] <- paste(cds.new[cds],substr(new.seq,start=tmp.from[[cds]][j],stop=tmp.to[[cds]][j]),sep="")
			}
		} 
		

	cds.new <- gsub("#","",cds.new)
	
	if(complementCDS){
		cds.new <- as.character(reverseComplement(DNAStringSet(cds.new)))
		}
	
	
	#translate, but use GeneR function to allow Ns
	new.prot <- as.character(sapply(cds.new,GeneR::strTranslate))
	
	if(!identical(old.prot,new.prot)) mut.dn <- TRUE
	
	c(as.logical(mut.cds),as.logical(mut.dn))
	}


catagoriseSNPs <- function(snp.id,grep.cds,ref.seq,complementCDS=FALSE){
	tmp.mut.list <- vector(mode="list",length=length(grep.cds))
	for(i in 1:length(grep.cds)){
		tmp.mut.list[[i]] <- mutateSeq(snp.id,grep.cds[i],ref.seq,complementCDS=complementCDS)
		}
	c(any(sapply(tmp.mut.list,function(X){X$mut.cds})),
		any(sapply(tmp.mut.list,function(X){X$mut.dn})))

	}


readEmblSeq <- function(file.name){
	tmp.lines <- readLines(file.name)
	tmp.lines2 <- tmp.lines[(grep('^SQ',tmp.lines)+1):(grep('//',tmp.lines)-1)]
	rm(tmp.lines)
	tmp.lines2 <- gsub(' ','',tmp.lines2)
	tmp.lines2 <- gsub('[0-9]','',tmp.lines2)
	paste(tmp.lines2,collapse="")
	}

readTsvFile <- function(tsv.file,line.file){
	#Function to get reference points for contigs within tsv file
	#tsv.file is the name of the tsv file
	#line.file is created by one of the grep commands above
	lines.df <- read.table(line.file,col.names=c("line","contig","start"),stringsAsFactors=FALSE)
	lines.df$start <- lines.df$line+1
	lines.df$stop <- 0
	lines.df$stop[-length(lines.df$stop)] <- 
	lines.df$line[-1] -1
	tmp <- system(paste('wc -l',tsv.file),intern=TRUE)
	lines.df$stop[length(lines.df$stop)] <- strsplit(tmp," ")[[1]][2]
	lines.df
	}

getContigFromTsv <- function(contig,lines.df,tsv.file){
	#extract info for a contig from tsv file 
	#lines.df created by readTsvFile
	
	tmp <- lines.df[lines.df$contig==contig,]
	
	tsv.df <- data.frame(position=numeric(0),reference=character(0),consensus=character(0),quality.score=integer(0),depth=integer(0),signal=numeric(0),std.deviation=numeric(0))
	
	for(i in 1:nrow(tmp)){
		tmp.cmd <- paste("sed -n \'",tmp$start[i],",",tmp$stop[i],"p\' ",tsv.file,"> tmp_contig.txt",sep="")
		system(tmp.cmd)
		tmp.contig <- read.table('tmp_contig.txt',stringsAsFactors=FALSE)
		tsv.df <- rbind(tsv.df,tmp.contig)
		}
	names(tsv.df) <- c("position","reference","consensus","quality.score","depth","signal","std.deviation")
	tsv.df
	}

#see also plot.contig.coverage in read_tsv_files.R
