filea=read.csv("20201112做汇总表/all.FDR.sig.at.least.one.add.direction.same.diff.csv",head=T)
filea$id=paste(filea$Chr,filea$Start,sep = ":")
filea1=filea[filea$FDR.sig>1,]

file=read.table("E:/5hmc_file/2_5hmc_yjp_bam/ASM/20210316LIBD.eQTL处理/53K.add.GWAS.eQTL.DEG.motif.for.analysis.txt",header=T,sep="\t")
#file$id=paste(file$Chr,file$Start,sep=":")
file=file[!duplicated(file$id),]	#对TF去重
file1=file[file$pattern.not.rm.dupl.num.DC>1,]
file2=file1[file1$BF_in_DC>1,]
file3=file1[file1$BF_in_DC>10,]

regions=read.csv("./20210120.H3k.analysis/117012.analysis/117K.ASH.add.enh.promtr.csv",header=T)

asm2=read.table("E:/1.甲基化分析/ASM/ASM_snp-onlyWGS/ASM_log/220520ASMs_anno.hg19_multianno.csv",head=T,sep=",")	#220K
asm2$unitID=paste(asm2$Chr,asm2$Start,asm2$Ref,asm2$Alt,sep=":")
asm=read.csv("E:/1.甲基化分析/ASM/ASM_snp-onlyWGS/ASM_log/869727.all.snp.vaf.up.down.20210321.csv",head=T)
asm=asm[asm$unitID %in% asm2$unitID,]
asm=asm[asm$unitID %in% filea$unitID,]#13649行
asm=merge(regions,asm,by="unitID")
#asm=asm[abs(asm$chazhi)>0.1,]
asm=data.frame(unitID=asm$unitID,ASM.alt.group=asm$ASM.alt.group,snp.location=asm$snp.location)


setwd("E:/5hmc_file/2_5hmc_yjp_bam/ASM/20210120.H3k.analysis/117012.analysis")
library(data.table)
part1=fread("part1/part1_zscore.tsv",head=T,sep="\t")
names(part1)=c("chrom",names(part1)[2:2010])
part1$unitID=paste(part1$chrom,part1$pos,part1$ref,part1$alt,sep=":")

part2=fread("part2/part2_zscore.tsv",head=T,sep="\t")
names(part2)=c("chrom",names(part2)[2:2010])
part2$unitID=paste(part2$chrom,part2$pos,part2$ref,part2$alt,sep=":")

part3=fread("part3/part3_zscore.tsv",head=T,sep="\t")
names(part3)=c("chrom",names(part3)[2:2010])
part3$unitID=paste(part3$chrom,part3$pos,part3$ref,part3$alt,sep=":")

part4=fread("part4/part4_zscore.tsv",head=T,sep="\t")
names(part4)=c("chrom",names(part4)[2:2010])
part4$unitID=paste(part4$chrom,part4$pos,part4$ref,part4$alt,sep=":")

part5=fread("part5/part5_zscore.tsv",head=T,sep="\t")
names(part5)=c("chrom",names(part5)[2:2010])
part5$unitID=paste(part5$chrom,part5$pos,part5$ref,part5$alt,sep=":")

part6=fread("part6/part6_zscore.tsv",head=T,sep="\t")
names(part6)=c("chrom",names(part6)[2:2010])
part6$unitID=paste(part6$chrom,part6$pos,part6$ref,part6$alt,sep=":")

deepsea =rbind(part1,part2,part3,part4,part5,part6)
deepsea =as.data.frame(deepsea)
rm(part1)
rm(part2)
rm(part3)
rm(part4)
rm(part5)
rm(part6)

###进行一致性的分析
#117012
setwd("E:/5hmc_file/2_5hmc_yjp_bam/ASM/")
coln=names(deepsea)
#braincell=c("Brain_Angular_Gyrus","Brain_Anterior_Caudate","Brain_Cingulate_Gyrus","Brain_Germinal_Matrix","Brain_Hippocampus_Middle","Brain_Inferior_Temporal_Lobe","Brain_Mid_Frontal_Lobe","Brain_Substantia_Nigra")
braincell=c("Brain_Hippocampus_Middle","Fetal_Brain_Male\\|DNase.all.peaks","Fetal_Brain_Female\\|DNase.all.peaks")

f1=read.csv("./20210321.H3K分析/117012.vaf.up.down.20210321.txt",header=T,sep="\t")
f2=read.csv("E:/5hmc_file/2_5hmc_yjp_bam/ASM/20210120.H3k.analysis/117012.analysis/117K.ASH.add.enh.promtr.csv",header=T)
names(f2)=c("unitID","snp.location")
names(f1)=c("unitID",names(f1)[2:5])

f=merge(f1,f2,by="unitID")
locations=as.character(unique(f$snp.location))

group1=c(locations,"all")
for(lc in 1:4){
if(lc<4){
ASH =f[f$snp.location==locations[lc],]#按位置进行分类
}
if(lc==4){
ASH =f
}
for(i in 1:length(braincell)){
braincn=coln[grep(coln,pattern = braincell[i])]
braincn=braincn[grep(braincn,pattern = "H3K|DNase.all.peaks")]#筛出H3k和DHS的列名
deepseq= deepsea[,c("unitID",braincn)]
deepseq=merge(deepseq,ASH,by="unitID")#经查验，merge后的deepseq的位点排列方式与ASH一致
#deepseq=deepseq[!is.na(deepseq$alt.group),]	###
deepseq$avg.VAF=rowMeans(deepseq[,c("affect.VAF","unaffect.VAF")],na.rm = T)
deepseq$alt.group=ifelse(deepseq$avg.VAF>0.5,"up","down")


result=data.frame(matrix(NA,dim(deepseq)[1],length(braincn)))
str1=unlist(strsplit(braincn,"\\|"))[seq(2,3*length(braincn),3)]
colnames(result)=str1
frt=data.frame(matrix(NA,dim(deepseq)[1],length(braincn)))
colnames(frt)=str1

for(j in 1:length(braincn)){
result[,j]=ifelse(deepseq[,1+j]<0,"down","up")
frt[,j]=ifelse(result[,j]==deepseq$alt.group,"same","opposite")
}
rt_statis=data.frame(matrix(NA,length(braincn),4))
row.names(rt_statis)=str1
colnames(rt_statis)=c("same","opposite","same_ratio","pvalue")
for(k in 1:length(braincn)){
rt_statis[k,1]=table(frt[,k]=="same")[2]
rt_statis[k,2]=table(frt[,k]=="opposite")[2]
rt_statis[k,3]=rt_statis[k,1]/(rt_statis[k,1]+rt_statis[k,2])
rt_statis[k,4]=binom.test(rt_statis[k,1],rt_statis[k,1]+rt_statis[k,2],p=0.5)$p.value
}
fn=paste0("./20210321.H3K分析/117012/consider.all.VAF.",braincell[i],".",group1[lc],"_statis.csv")
fn=gsub("\\\\","",fn)	#去掉不合适的字符
fn=gsub("\\|","",fn)	
write.csv(rt_statis,fn,quote=F,row.names = T)
}
}

#8544

fs=f[f$unitID %in% file1$unitID,]
locations=as.character(unique(fs$snp.location))

group1=c(locations,"all")
for(lc in 1:4){
if(lc<4){
ASH =fs[fs$snp.location==locations[lc],]#按位置进行分类
}
if(lc==4){
ASH =fs
}
for(i in 1:length(braincell)){
braincn=coln[grep(coln,pattern = braincell[i])]
braincn=braincn[grep(braincn,pattern = "H3K|DNase.all.peaks")]#筛出H3k和DHS的列名
deepseq= deepsea[,c("unitID",braincn)]
deepseq=merge(deepseq,ASH,by="unitID")#经查验，merge后的deepseq的位点排列方式与ASH一致
#deepseq=deepseq[!is.na(deepseq$alt.group),]	###
deepseq$avg.VAF=rowMeans(deepseq[,c("affect.VAF","unaffect.VAF")],na.rm = T)
deepseq$alt.group=ifelse(deepseq$avg.VAF>0.5,"up","down")


result=data.frame(matrix(NA,dim(deepseq)[1],length(braincn)))
str1=unlist(strsplit(braincn,"\\|"))[seq(2,3*length(braincn),3)]
colnames(result)=str1
frt=data.frame(matrix(NA,dim(deepseq)[1],length(braincn)))
colnames(frt)=str1

for(j in 1:length(braincn)){
result[,j]=ifelse(deepseq[,1+j]<0,"down","up")
frt[,j]=ifelse(result[,j]==deepseq$alt.group,"same","opposite")
}
rt_statis=data.frame(matrix(NA,length(braincn),4))
row.names(rt_statis)=str1
colnames(rt_statis)=c("same","opposite","same_ratio","pvalue")
for(k in 1:length(braincn)){
rt_statis[k,1]=table(frt[,k]=="same")[2]
rt_statis[k,2]=table(frt[,k]=="opposite")[2]
rt_statis[k,3]=rt_statis[k,1]/(rt_statis[k,1]+rt_statis[k,2])
rt_statis[k,4]=binom.test(rt_statis[k,1],rt_statis[k,1]+rt_statis[k,2],p=0.5)$p.value
}
fn=paste0("./20210321.H3K分析/8544/consider.all.VAF.",braincell[i],".",group1[lc],"_statis.csv")
fn=gsub("\\\\","",fn)	#去掉不合适的字符
fn=gsub("\\|","",fn)	
write.csv(rt_statis,fn,quote=F,row.names = T)
}
}


#807

fs=f[f$unitID %in% file2$unitID,]
locations=as.character(unique(fs$snp.location))

group1=c(locations,"all")
for(lc in 1:4){
if(lc<4){
ASH =fs[fs$snp.location==locations[lc],]#按位置进行分类
}
if(lc==4){
ASH =fs
}
for(i in 1:length(braincell)){
braincn=coln[grep(coln,pattern = braincell[i])]
braincn=braincn[grep(braincn,pattern = "H3K|DNase.all.peaks")]#筛出H3k和DHS的列名
deepseq= deepsea[,c("unitID",braincn)]
deepseq=merge(deepseq,ASH,by="unitID")#经查验，merge后的deepseq的位点排列方式与ASH一致
#deepseq=deepseq[!is.na(deepseq$alt.group),]	###
deepseq$avg.VAF=rowMeans(deepseq[,c("affect.VAF","unaffect.VAF")],na.rm = T)
deepseq$alt.group=ifelse(deepseq$avg.VAF>0.5,"up","down")


result=data.frame(matrix(NA,dim(deepseq)[1],length(braincn)))
str1=unlist(strsplit(braincn,"\\|"))[seq(2,3*length(braincn),3)]
colnames(result)=str1
frt=data.frame(matrix(NA,dim(deepseq)[1],length(braincn)))
colnames(frt)=str1

for(j in 1:length(braincn)){
result[,j]=ifelse(deepseq[,1+j]<0,"down","up")
frt[,j]=ifelse(result[,j]==deepseq$alt.group,"same","opposite")
}
rt_statis=data.frame(matrix(NA,length(braincn),4))
row.names(rt_statis)=str1
colnames(rt_statis)=c("same","opposite","same_ratio","pvalue")
for(k in 1:length(braincn)){
rt_statis[k,1]=table(frt[,k]=="same")[2]
rt_statis[k,2]=table(frt[,k]=="opposite")[2]
rt_statis[k,3]=rt_statis[k,1]/(rt_statis[k,1]+rt_statis[k,2])
rt_statis[k,4]=binom.test(rt_statis[k,1],rt_statis[k,1]+rt_statis[k,2],p=0.5)$p.value
}
fn=paste0("./20210321.H3K分析/807/consider.all.VAF.",braincell[i],".",group1[lc],"_statis.csv")
fn=gsub("\\\\","",fn)	#去掉不合适的字符
fn=gsub("\\|","",fn)	
write.csv(rt_statis,fn,quote=F,row.names = T)
}
}

#200

fs=f[f$unitID %in% file3$unitID,]
locations=as.character(unique(fs$snp.location))

group1=c(locations,"all")
for(lc in 1:4){
if(lc<4){
ASH =fs[fs$snp.location==locations[lc],]#按位置进行分类
}
if(lc==4){
ASH =fs
}
for(i in 1:length(braincell)){
braincn=coln[grep(coln,pattern = braincell[i])]
braincn=braincn[grep(braincn,pattern = "H3K|DNase.all.peaks")]#筛出H3k和DHS的列名
deepseq= deepsea[,c("unitID",braincn)]
deepseq=merge(deepseq,ASH,by="unitID")#经查验，merge后的deepseq的位点排列方式与ASH一致
#deepseq=deepseq[!is.na(deepseq$alt.group),]	###
deepseq$avg.VAF=rowMeans(deepseq[,c("affect.VAF","unaffect.VAF")],na.rm = T)
deepseq$alt.group=ifelse(deepseq$avg.VAF>0.5,"up","down")


result=data.frame(matrix(NA,dim(deepseq)[1],length(braincn)))
str1=unlist(strsplit(braincn,"\\|"))[seq(2,3*length(braincn),3)]
colnames(result)=str1
frt=data.frame(matrix(NA,dim(deepseq)[1],length(braincn)))
colnames(frt)=str1

for(j in 1:length(braincn)){
result[,j]=ifelse(deepseq[,1+j]<0,"down","up")
frt[,j]=ifelse(result[,j]==deepseq$alt.group,"same","opposite")
}
rt_statis=data.frame(matrix(NA,length(braincn),4))
row.names(rt_statis)=str1
colnames(rt_statis)=c("same","opposite","same_ratio","pvalue")
for(k in 1:length(braincn)){
rt_statis[k,1]=table(frt[,k]=="same")[2]
rt_statis[k,2]=table(frt[,k]=="opposite")[2]
rt_statis[k,3]=rt_statis[k,1]/(rt_statis[k,1]+rt_statis[k,2])
rt_statis[k,4]=binom.test(rt_statis[k,1],rt_statis[k,1]+rt_statis[k,2],p=0.5)$p.value
}
fn=paste0("./20210321.H3K分析/200/consider.all.VAF.",braincell[i],".",group1[lc],"_statis.csv")
fn=gsub("\\\\","",fn)	#去掉不合适的字符
fn=gsub("\\|","",fn)	
write.csv(rt_statis,fn,quote=F,row.names = T)
}
}

#13649 (117K ASH overlap ASM)

fs= asm
locations=as.character(unique(fs$snp.location))

group1=c(locations,"all")
for(lc in 1:4){
if(lc<4){
ASH =fs[fs$snp.location==locations[lc],]#按位置进行分类
}
if(lc==4){
ASH =fs
}
for(i in 1:length(braincell)){
braincn=coln[grep(coln,pattern = braincell[i])]
braincn=braincn[grep(braincn,pattern = "H3K|DNase.all.peaks")]#筛出H3k和DHS的列名
deepseq= deepsea[,c("unitID",braincn)]
deepseq=merge(deepseq,ASH,by="unitID")#经查验，merge后的deepseq的位点排列方式与ASH一致
#deepseq=deepseq[!is.na(deepseq$alt.group),]	###
deepseq$avg.VAF=rowMeans(deepseq[,c("affect.vaf","unaffect.vaf")],na.rm = T)
deepseq$alt.group=ifelse(deepseq$avg.VAF>0.5,"up","down")


result=data.frame(matrix(NA,dim(deepseq)[1],length(braincn)))
str1=unlist(strsplit(braincn,"\\|"))[seq(2,3*length(braincn),3)]
colnames(result)=str1
frt=data.frame(matrix(NA,dim(deepseq)[1],length(braincn)))
colnames(frt)=str1

for(j in 1:length(braincn)){
result[,j]=ifelse(deepseq[,1+j]<0,"down","up")
frt[,j]=ifelse(result[,j]==deepseq$alt.group,"same","opposite")
}
rt_statis=data.frame(matrix(NA,length(braincn),4))
row.names(rt_statis)=str1
colnames(rt_statis)=c("same","opposite","same_ratio","pvalue")
for(k in 1:length(braincn)){
rt_statis[k,1]=table(frt[,k]=="same")[2]
rt_statis[k,2]=table(frt[,k]=="opposite")[2]
rt_statis[k,3]=rt_statis[k,1]/(rt_statis[k,1]+rt_statis[k,2])
rt_statis[k,4]=binom.test(rt_statis[k,1],rt_statis[k,1]+rt_statis[k,2],p=0.5)$p.value
}
fn=paste0("./20210321.H3K分析/13649/consider.all.VAF.",braincell[i],".",group1[lc],"_statis.csv")
fn=gsub("\\\\","",fn)	#去掉不合适的字符
fn=gsub("\\|","",fn)	
write.csv(rt_statis,fn,quote=F,row.names = T)
}
}