######做chromHMM的富集分析,背景snp是随机的150K个来自于WGS的杂合子位点，case是200 psyASH，旨在探索psyASH主要富集在什么染色质状态
library(ChIPpeakAnno)
sel=list.files(path="E:/5hmc_file/脑组织和GM12878的chromHMM数据/",pattern=".bed",full.names = T)

setwd("E:/5hmc_file/2_5hmc_yjp_bam/ASM/")
file=read.csv("20201120/at.least.one.AShM.in.DC.add.BF.beta0.add.CCHC.csv",head=T)
filea=read.csv("20201112做汇总表/all.FDR.sig.at.least.one.add.direction.same.diff.csv",head=T)
filea$id=paste(filea$Chr,filea$Start,sep = ":")
filea1=filea[filea$FDR.sig>1,]

file$id=paste(file$Chr,file$Start,sep=":")
file1=file[file$pattern.not.rm.dupl.num.DC>1,]
file2=file1[file1$BF_in_DC>1,]
file3=file1[file1$BF_in_DC>10,]


con=read.table("./20201203杂合位点/all.heter.snp",head=T,sep="\t")
con=con[!con$id %in% as.character(filea$id),]		###remove the ASH
conid=as.character(con$id)
conid.random=sample(conid,150000,replace = FALSE)
con1=con[con$id %in% conid.random,]

######安排待填充的结果矩阵
col_names=c("con.file.name","HMM.file","HMM.group","case_in_region","case_not","con_in_region","con_not","OR","p.value")
result=data.frame(matrix(NA,1,ncol=9))
names(result)=col_names
result=result[-1,]

case=file3
case_t=data.frame(chr=case$Chr,start=as.numeric(case$Start)-1,end=as.numeric(case$Start))
bed_case=toGRanges(case_t,format="BED",header=TRUE)

con_t=data.frame(chrom=con1$Chr,start=as.numeric(con1$Pos)-1,end=as.numeric(con1$Pos))
bed_con=toGRanges(con_t,format="BED",header=T)

 for(j in 1:10){
 chrom.file=read.table(sel[j],head=F,sep="\t")
names(chrom.file)=c("chr","start","end","group")
groupf=unique(chrom.file$group)
for(k in 1:length(groupf)){
 file_gr=chrom.file[chrom.file$group==k,]
bed_file=toGRanges(file_gr,format="BED",header=T)

 ol1=findOverlapsOfPeaks(bed_case,bed_file)
 ol2=findOverlapsOfPeaks(bed_con,bed_file)
 tcase=as.data.frame(ol1$peaklist$`bed_case///bed_file`)
 tcon=as.data.frame(ol2$peaklist$`bed_con///bed_file`)
 result_tmp=data.frame(matrix(NA,1,ncol=9))
names(result_tmp)=col_names
 result_tmp[,1]="150K.random.id.rm.ASH"
  result_tmp[,2]=sel[j]
  result_tmp[,3]=k
 result_tmp[,4]=dim(tcase)[1]
result_tmp[,5]=200-dim(tcase)[1]
result_tmp[,6]=dim(tcon)[1]
result_tmp[,7]=dim(con_t)[1]-dim(tcon)[1]
result_tmp[,8]=fisher.test(matrix(c(result_tmp[1,4],result_tmp[1,5],result_tmp[1,6],result_tmp[1,7]),nrow = 2))$estimate
result_tmp[,9]=fisher.test(matrix(c(result_tmp[1,4],result_tmp[1,5],result_tmp[1,6],result_tmp[1,7]),nrow = 2))$p.value
  result=rbind(result,result_tmp)
 }
 }
write.csv(result,"./20201219/HMM.enrich.random.1.csv",quote=F,row.names=F)

###背景snp是117K个来自于全部样本的杂合子位点（剔除200），case是200 psyASH，旨在探索psyASH主要富集在什么染色质状态
con1=filea[!filea$id %in% file3$id,]

######安排待填充的结果矩阵
col_names=c("con.file.name","HMM.file","HMM.group","case_in_region","case_not","con_in_region","con_not","OR","p.value")
result=data.frame(matrix(NA,1,ncol=9))
names(result)=col_names
result=result[-1,]

case=file3
case_t=data.frame(chr=case$Chr,start=as.numeric(case$Start)-1,end=as.numeric(case$Start))
bed_case=toGRanges(case_t,format="BED",header=TRUE)

con_t=data.frame(chrom=con1$Chr,start=as.numeric(con1$Start)-1,end=as.numeric(con1$Start))
bed_con=toGRanges(con_t,format="BED",header=T)

for(j in 1:10){
chrom.file=read.table(sel[j],head=F,sep="\t")
names(chrom.file)=c("chr","start","end","group")
groupf=unique(chrom.file$group)
for(k in 1:length(groupf)){
 file_gr=chrom.file[chrom.file$group==k,]
bed_file=toGRanges(file_gr,format="BED",header=T)

 ol1=findOverlapsOfPeaks(bed_case,bed_file)
 ol2=findOverlapsOfPeaks(bed_con,bed_file)
 tcase=as.data.frame(ol1$peaklist$`bed_case///bed_file`)
 tcon=as.data.frame(ol2$peaklist$`bed_con///bed_file`)
 result_tmp=data.frame(matrix(NA,1,ncol=9))
names(result_tmp)=col_names
 result_tmp[,1]="117K.rm.psyASH"
  result_tmp[,2]=sel[j]
  result_tmp[,3]=k
 result_tmp[,4]=dim(tcase)[1]
result_tmp[,5]=dim(case)[1]-dim(tcase)[1]
result_tmp[,6]=dim(tcon)[1]
result_tmp[,7]=dim(con_t)[1]-dim(tcon)[1]
result_tmp[,8]=fisher.test(matrix(c(result_tmp[1,4],result_tmp[1,5],result_tmp[1,6],result_tmp[1,7]),nrow = 2))$estimate
result_tmp[,9]=fisher.test(matrix(c(result_tmp[1,4],result_tmp[1,5],result_tmp[1,6],result_tmp[1,7]),nrow = 2))$p.value
  result=rbind(result,result_tmp)
}
}
write.csv(result,"./20201219/HMM.enrich.117K.rm.200.psyASH.csv",quote=F,row.names=F)

###背景snp是117K个来自于全部样本的杂合子位点（剔除807），case是807 psyASH，旨在探索psyASH主要富集在什么染色质状态
con1=filea[!filea$id %in% file2$id,]

######安排待填充的结果矩阵
col_names=c("con.file.name","HMM.file","HMM.group","case_in_region","case_not","con_in_region","con_not","OR","p.value")
result=data.frame(matrix(NA,1,ncol=9))
names(result)=col_names
result=result[-1,]

case=file2
case_t=data.frame(chr=case$Chr,start=as.numeric(case$Start)-1,end=as.numeric(case$Start))
bed_case=toGRanges(case_t,format="BED",header=TRUE)

con_t=data.frame(chrom=con1$Chr,start=as.numeric(con1$Start)-1,end=as.numeric(con1$Start))
bed_con=toGRanges(con_t,format="BED",header=T)

for(j in 1:10){
chrom.file=read.table(sel[j],head=F,sep="\t")
names(chrom.file)=c("chr","start","end","group")
groupf=unique(chrom.file$group)
for(k in 1:length(groupf)){
 file_gr=chrom.file[chrom.file$group==k,]
bed_file=toGRanges(file_gr,format="BED",header=T)

 ol1=findOverlapsOfPeaks(bed_case,bed_file)
 ol2=findOverlapsOfPeaks(bed_con,bed_file)
 tcase=as.data.frame(ol1$peaklist$`bed_case///bed_file`)
 tcon=as.data.frame(ol2$peaklist$`bed_con///bed_file`)
 result_tmp=data.frame(matrix(NA,1,ncol=9))
names(result_tmp)=col_names
 result_tmp[,1]="117K.rm.807.psyASH"
  result_tmp[,2]=sel[j]
  result_tmp[,3]=k
 result_tmp[,4]=dim(tcase)[1]
result_tmp[,5]=dim(case)[1]-dim(tcase)[1]
result_tmp[,6]=dim(tcon)[1]
result_tmp[,7]=dim(con_t)[1]-dim(tcon)[1]
result_tmp[,8]=fisher.test(matrix(c(result_tmp[1,4],result_tmp[1,5],result_tmp[1,6],result_tmp[1,7]),nrow = 2))$estimate
result_tmp[,9]=fisher.test(matrix(c(result_tmp[1,4],result_tmp[1,5],result_tmp[1,6],result_tmp[1,7]),nrow = 2))$p.value
  result=rbind(result,result_tmp)
}
}
write.csv(result,"./20201219/HMM.enrich.117K.rm.807.psyASH.csv",quote=F,row.names=F)

###给807 psyASH注释其染色质状态2020.12.21
rts=data.frame(id=file2$id)

for(j in 1:10){
chrom.file=read.table(sel[j],head=F,sep="\t")
names(chrom.file)=c("chr","start","end","group")
rts1=c()
for(i in 1:dim(file2)[1]){
tmp=chrom.file[chrom.file$chr==file2$Chr[i],]
tmp=tmp[file2$Start[i]>=tmp$start,]
tmp=tmp[file2$Start[i]<=tmp$end,]
rts1 =c(rts1,tmp$group[1])
}
rts1=data.frame(rts1)
names(rts1)=paste("group.HMM.state.from.file",j,sep="")
rts=cbind(rts,rts1)
}
 rts$active.num=rowSums(rts[,2:11]<10)
 rts$repression.num=rowSums(rts[,2:11]>9)
 rts$group.HMM.state=ifelse(rts$active.num>rts$repression.num,"active","repression")
 result.HMM=data.frame(id=rts$id,group.HMM.state=rts$group.HMM.state)
 file2=merge(file2,result.HMM,by="id")
write.csv(file2,"20201221/psyASH.add.HMM.state.csv",quote = F,row.names = F)


###2020.12.22 case是807个ASH中有eQTL数据支持的位点，control是117K-case
psyASH=read.csv("20201222/at.least.two.ASH.in.DC.BF.1.add.eQTL.GWAS.add.DEG.LIBDeQTL.HMM.state.csv",header = T)
psyASH=psyASH[!duplicated(psyASH$unitID),]
psyASH=psyASH[psyASH$eqtl.no.na.num>0&!is.na(psyASH$LIBD.eQTL.type),]

con1=filea[!filea$unitID %in% psyASH$unitID,]
col_names=c("con.file.name","HMM.file","HMM.group","case_in_region","case_not","con_in_region","con_not","OR","p.value")
result=data.frame(matrix(NA,1,ncol=9))
names(result)=col_names
result=result[-1,]

case=psyASH
case_t=data.frame(chr=case$Chr.x,start=as.numeric(case$Start)-1,end=as.numeric(case$Start))
bed_case=toGRanges(case_t,format="BED",header=TRUE)

con_t=data.frame(chrom=con1$Chr,start=as.numeric(con1$Start)-1,end=as.numeric(con1$Start))
bed_con=toGRanges(con_t,format="BED",header=T)

for(j in 1:10){
chrom.file=read.table(sel[j],head=F,sep="\t")
names(chrom.file)=c("chr","start","end","group")
groupf=unique(chrom.file$group)
for(k in 1:length(groupf)){
 file_gr=chrom.file[chrom.file$group==k,]
bed_file=toGRanges(file_gr,format="BED",header=T)

 ol1=findOverlapsOfPeaks(bed_case,bed_file)
 ol2=findOverlapsOfPeaks(bed_con,bed_file)
 tcase=as.data.frame(ol1$peaklist$`bed_case///bed_file`)
 tcon=as.data.frame(ol2$peaklist$`bed_con///bed_file`)
 result_tmp=data.frame(matrix(NA,1,ncol=9))
names(result_tmp)=col_names
 result_tmp[,1]="117K.rm.807.psyASH"
  result_tmp[,2]=sel[j]
  result_tmp[,3]=k
 result_tmp[,4]=dim(tcase)[1]
result_tmp[,5]=dim(case)[1]-dim(tcase)[1]
result_tmp[,6]=dim(tcon)[1]
result_tmp[,7]=dim(con_t)[1]-dim(tcon)[1]
result_tmp[,8]=fisher.test(matrix(c(result_tmp[1,4],result_tmp[1,5],result_tmp[1,6],result_tmp[1,7]),nrow = 2))$estimate
result_tmp[,9]=fisher.test(matrix(c(result_tmp[1,4],result_tmp[1,5],result_tmp[1,6],result_tmp[1,7]),nrow = 2))$p.value
  result=rbind(result,result_tmp)
}
}
write.csv(result,"./20201221/HMM.enrich.117K.rm.eQTL.support.psyASH.csv",quote=F,row.names=F)



###对result进行meta分析
library(meta)
setwd("E:/5hmc_file/2_5hmc_yjp_bam/ASM")
file=read.csv("./20201221/HMM.enrich.117K.rm.eQTL.support.psyASH.csv",header = T)
file.anno=read.table("E:/5hmc_file/脑组织和GM12878的chromHMM数据/group意义.txt",head=T,sep="\t")
file.anno$STATE.NO.=1:15
file.anno=dplyr::select(file.anno,STATE.NO.,MNEMONIC,DESCRIPTION,NUM)
file=merge(file,file.anno,by.x="HMM.group",by.y="STATE.NO.")
col_names=c("MNEMONIC","DESCRIPTION","NUM.state","OR","P.value","upper","lower")
rt=data.frame(matrix(NA,1,ncol=7))
names(rt)=col_names
rt=rt[-1,]

gr=unique(file$HMM.group)
for(i in 1:length(gr)){
tmp=file[file$HMM.group==gr[i],]
tmp$case_not=tmp$case_in_region+tmp$case_not
tmp$con_not=tmp$con_in_region+tmp$con_not

metaor3<-metabin(case_in_region,case_not,con_in_region,con_not,data=tmp,sm="OR")
rtmp=data.frame(matrix(NA,1,ncol=7))
names(rtmp)=col_names

rtmp[1,1:3]=tmp[1,10:12]
rtmp[1,4]=OR=exp(metaor3$TE.fixed)
rtmp[1,5]=metaor3$pval.fixed
rtmp[1,6]=exp(metaor3$upper.fixed)
rtmp[1,7]=exp(metaor3$lower.fixed)
rt=rbind(rt,rtmp)
}
write.csv(rt,"./20201222/HMM.enrich.117K.rm.eQTL.support.psyASH.meta.csv",quote=F,row.names=F)