#猜想1：转录因子和羟甲基化有关

setwd("E:/5hmc_file/2_5hmc_yjp_bam/ASM/")
file=read.csv("20201120/at.least.one.AShM.in.DC.add.BF.beta0.add.CCHC.csv",head=T)
filea=read.csv("20201112做汇总表/all.FDR.sig.at.least.one.add.direction.same.diff.csv",head=T)
filea$id=paste(filea$Chr,filea$Start,sep = ":")
filea1=filea[filea$FDR.sig>1,]

ftmp=filea[,c("unitID","id",names(filea)[grep(names(filea),pattern = "bayes.beta0")])]	#这是考虑了位点所有的beta0的情况
ftmp$up.num=rowSums(ftmp[,3:30]>0,na.rm = T)
ftmp$down.num=rowSums(ftmp[,3:30]<0,na.rm = T)
ftmp$group.alt.up.or.down=ifelse(ftmp$up.num>ftmp$down.num,"up","down")
ftmp=data.frame(ftmp[,1:2],ftmp[,31:33])
ftmp=ftmp[abs(ftmp$up.num-ftmp$down.num)>=1,]
write.csv(ftmp,"./20210103.motif.analysis/consider.all.beta0.117K.site.alt.up.down.statis.csv",quote=F,row.names = F)

motifdata=read.table("./20210103.motif.analysis/117K.ASH.motif.predict.txt",header = T,sep="\t")
motifdata$unitID=paste(motifdata$seqnames,motifdata$start,motifdata$REF,motifdata$ALT,sep=":")
###pctRef和pctAlt显示序列中基序的分数，作为基序在理想序列上可以达到的最佳分数的百分比。
#参考：http://bioconductor.org/packages/devel/bioc/vignettes/motifbreakR/inst/doc/motifbreakR-vignette.html

motifdata=merge(motifdata,ftmp,by="unitID")
motifdata$group.TF.affinity=ifelse(motifdata$pctAlt-motifdata$pctRef>0,"up","down")
motifdata$id2=paste(motifdata$unitID,motifdata$geneSymbol,sep="_")
motifdata=motifdata[!duplicated(motifdata$id2),]
motifdata$diff.scores=abs(motifdata$scoreRef-motifdata$scoreAlt)
motifdata=motifdata[motifdata$diff.scores>=1.5,]##筛选score大于1.5的
motifdata$group.pattern=paste(motifdata$group.alt.up.or.down,motifdata$group.TF.affinity,sep = ":")
group=unique(motifdata$geneSymbol)

rt=data.frame(matrix(,1,7,byrow = T))
#col_names=c("TF","same","opposite","p.value")
col_names=c("TF","alt.gain.Hydroxymethylation_TF.increase.affinity","alt.loss.Hydroxymethylation_TF.decrease.affinity","alt.gain.Hydroxymethylation_TF.decrease.affinity","alt.loss.Hydroxymethylation_TF.increase.affinity",
"gain.p.value","loss.p.value")
colnames(rt)=col_names
rt=rt[-1,]
for (i in group) {
  tmp=motifdata[motifdata$geneSymbol==i,]
  #tmp$direction=ifelse(tmp$group.alt.up.or.down==tmp$group.TF.affinity,"same","opposite")
  rtmp=data.frame(matrix(,1,7,byrow = T))
  colnames(rtmp)=col_names
  rtmp[1,1]=i
  rtmp[1,2]=length(which(tmp$group.pattern=="up:up"))
  rtmp[1,3]=length(which(tmp$group.pattern=="down:down"))
  rtmp[1,4]=length(which(tmp$group.pattern=="up:down"))
  rtmp[1,5]=length(which(tmp$group.pattern=="down:up"))
  if(rtmp[1,2]+rtmp[1,4]>0){
  rtmp[1,6]=binom.test(rtmp[1,2],rtmp[1,2]+rtmp[1,4],p=0.5)$p.value
  }
  if(rtmp[1,5]+rtmp[1,3]>0){
  rtmp[1,7]=binom.test(rtmp[1,3],rtmp[1,5]+rtmp[1,3],p=0.5)$p.value
  }
  rt=rbind(rt,rtmp)
}
write.csv(rt,"./20210103.motif.analysis/motif.affinity.compared.with.alt.up.down.stastis.csv",quote=F,row.names = F)


###20210105.从样本的VAF入手分析其和score的关系
setwd("E:\\5hmc_file\\2_5hmc_yjp_bam\\ASM")
dir1="./bayes_pvalue_beta0/"
dir2="./bayes_BF/"
library(ggplot2)
library(scales)
library(RColorBrewer)
group1=c("X2B_X1T","M8_M7","M6_M5","M2_M1","M48_M47","M50_M49","M28_M27","M30_M29","M26_M25","M35_M36","M18_M17","M20_M19","M22_M21","M40_M39")
i=1											#提取各个位点的VAF信息
  fn1=paste0(dir1,group1[i],".bayes_p.txt")
  file1=read.table(fn1,head=T,sep = "\t")
  file1$unitID=paste(file1$chrom,file1$position,file1$ref,file1$var,sep=":")
  file1.normal=data.frame(unitID=file1$unitID,normal_bayes_beta0=file1$normal_bayes_beta0,normal_bayes_pvalue=file1$normal_bayes_pvalue,normal_var_freq=file1$normal_var_freq)
  file1.tumor=data.frame(unitID=file1$unitID,tumor_bayes_beta0=file1$tumor_bayes_beta0,tumor_bayes_pvalue=file1$tumor_bayes_pvalue,tumor_var_freq=file1$tumor_var_freq)
  
  file1.normal$FDR1=p.adjust(file1.normal$normal_bayes_pvalue,method = "BH")
  file1.tumor$FDR2=p.adjust(file1.tumor$tumor_bayes_pvalue,method = "BH")
  file1.normal=file1.normal[file1.normal$FDR1<0.1,]
  file1.tumor=file1.tumor[file1.tumor$FDR2<0.1,]	#筛选ASH显著的位点
  file1=merge(file1.normal,file1.tumor,by="unitID",all=T)
  
  file1=data.frame(unitID=file1$unitID,normal_vaf=file1$normal_var_freq,tumor_vaf=file1$tumor_var_freq)
  file1$normal_vaf=as.numeric(gsub("%","",file1$normal_vaf))/100
 file1$tumor_vaf=as.numeric(gsub("%","",file1$tumor_vaf))/100

  names(file1)=c("unitID",
                paste0(unlist(strsplit(group1[i],"_"))[1],c(".vaf")),
                paste0(unlist(strsplit(group1[i],"_"))[2],c(".vaf")))

  rt=file1
for(i in 2:14){
  fn1=paste0(dir1,group1[i],".bayes_p.txt")
  file1=read.table(fn1,head=T,sep = "\t")
  file1$unitID=paste(file1$chrom,file1$position,file1$ref,file1$var,sep=":")
  file1.normal=data.frame(unitID=file1$unitID,normal_bayes_beta0=file1$normal_bayes_beta0,normal_bayes_pvalue=file1$normal_bayes_pvalue,normal_var_freq=file1$normal_var_freq)
  file1.tumor=data.frame(unitID=file1$unitID,tumor_bayes_beta0=file1$tumor_bayes_beta0,tumor_bayes_pvalue=file1$tumor_bayes_pvalue,tumor_var_freq=file1$tumor_var_freq)
  
  file1.normal$FDR1=p.adjust(file1.normal$normal_bayes_pvalue,method = "BH")
  file1.tumor$FDR2=p.adjust(file1.tumor$tumor_bayes_pvalue,method = "BH")
  file1.normal=file1.normal[file1.normal$FDR1<0.1,]
  file1.tumor=file1.tumor[file1.tumor$FDR2<0.1,]
  file1=merge(file1.normal,file1.tumor,by="unitID",all=T)
  
  file1=data.frame(unitID=file1$unitID,normal_vaf=file1$normal_var_freq,tumor_vaf=file1$tumor_var_freq)
  file1$normal_vaf=as.numeric(gsub("%","",file1$normal_vaf))/100
 file1$tumor_vaf=as.numeric(gsub("%","",file1$tumor_vaf))/100

  names(file1)=c("unitID",
                paste0(unlist(strsplit(group1[i],"_"))[1],c(".vaf")),
                paste0(unlist(strsplit(group1[i],"_"))[2],c(".vaf")))
  rt=merge(rt,file1,by="unitID",all=T)
}					#提取VAF信息完毕



rt$mean.psy.vaf=rowMeans(rt[,names(rt)[c(seq(3,13,2),14:21)]],na.rm = T)
rt$mean.healthy.vaf=rowMeans(rt[,names(rt)[c(seq(2,13,2),22:29)]],na.rm = T)
rt$mean.vaf=rowMeans(rt[,2:29],na.rm = T)

vaf.data=data.frame(unitID=rt$unitID,rt[,30:32])
vaf.data$Dscore=rt$mean.vaf-0.5

tmp=data.frame(rsid=filea$avsnp150,unitID=filea$unitID)
vaf.data=merge(vaf.data,tmp,by="unitID",all.x=T)

write.csv(vaf.data,"20210103.motif.analysis/117K.VAF.data.csv",quote = F,row.names = F)
vaf.data=read.csv("20210103.motif.analysis/117K.VAF.data.csv",header=T)
vaf.data=vaf.data[!vaf.data$rsid==".",]
motifdata=read.table("./20210103.motif.analysis/807.psy.ASH.motif.predict.txt",header = T,sep="\t")	#采用807psy是因为117K 的点太多了，很杂
motifdata$unitID=paste(motifdata$seqnames,motifdata$start,motifdata$REF,motifdata$ALT,sep=":")

file=merge(motifdata,vaf.data,by="rsid",all.x =T)
file=file[!is.na(file$mean.vaf),]			#去掉缺失值
file$id2=paste(file$rsid,file$geneSymbol,sep="_")
file=file[!duplicated(file$id2),]
file$score.allele.diff=abs(file$scoreAlt-file$scoreRef)
group=unique(file$geneSymbol)	#350 TF

rt=data.frame(matrix(,1,3,byrow = T))
col_names=c("TF","cor.p.value","cor.estimate")
colnames(rt)=col_names
rt=rt[-1,]

for(i in group){
  tmp=file[file$geneSymbol==i,]
  if(dim(tmp)[1]>=5){
  rtmp=data.frame(matrix(,1,3,byrow = T))
  colnames(rtmp)=col_names
  r=cor.test(tmp$mean.vaf,tmp$pctAlt,method = "pearson")
  rtmp[1,1]=i
  rtmp[1,2]=r$p.value
  rtmp[1,3]=r$estimate
  rt=rbind(rt,rtmp)
  }
}
write.csv(rt,"./20210103.motif.analysis/cor.mean.vaf.pctAlt.vs.csv",quote=F,row.names = F)

tmp$mean.psy.vaf,tmp$score.allele.diff有结果
