setwd("E:/5hmc_file/2_5hmc_yjp_bam/ASM/")
file=read.csv("20201120/at.least.one.AShM.in.DC.add.BF.beta0.add.CCHC.csv",head=T)
filea=read.csv("20201112做汇总表/all.FDR.sig.at.least.one.add.direction.same.diff.csv",head=T)
filea$id=paste(filea$Chr,filea$Start,sep = ":")
filea1=filea[filea$FDR.sig>1,]

file$id=paste(file$Chr,file$Start,sep=":")
file1=file[file$pattern.not.rm.dupl.num.DC>1,]
file2=file1[file1$BF_in_DC>1,]
file3=file1[file1$BF_in_DC>10,]

tmp=data.frame(file1$Chr,file1$Start,rep("+",dim(file1)[1]),file1$Ref,file1$Alt)
write.table(tmp,"./20201227.H3k.analysis/117K.vcf.txt",quote=F,row.names = F,sep="\t")

tmp=data.frame(file2$Chr,file2$Start,rep("+",dim(file2)[1]),file2$Ref,file2$Alt)
write.table(tmp,"./20201227.H3k.analysis/807.analysis/807psyASH.vcf.txt",quote=F,row.names = F,sep="\t")

###发现DeepSEA网站不能预测超过20,000行的数据。所以只用了807

###先对位点的羟甲基化程度升降进行判定
ftmp=file2[,c("unitID","id",names(file2)[grep(names(file2),pattern = "bayes.beta0")])]	#这是考虑了位点所有的beta0的情况
ftmp$up.num=rowSums(ftmp[,3:30]>0,na.rm = T)
ftmp$down.num=rowSums(ftmp[,3:30]<0,na.rm = T)
ftmp$group.alt.up.or.down=ifelse(ftmp$up.num>ftmp$down.num,"up","down")
ftmp=data.frame(ftmp[,1:2],ftmp[,31:33])
write.csv(ftmp,"./20201227.H3k.analysis/807.analysis/consider.all.beta0.807.site.alt.up.down.statis.csv",quote=F,row.names = F)

cn1=names(file2)[grep(names(file2),pattern = "bayes.beta0")][c(seq(2,12,2),13:20)]
ftmp=file2[,c("unitID","id",cn1)]	#这是只考虑患病的beta0的情况
ftmp$up.num=rowSums(ftmp[,3:16]>0,na.rm = T)
ftmp$down.num=rowSums(ftmp[,3:16]<0,na.rm = T)
ftmp$group.alt.up.or.down=ifelse(ftmp$up.num>ftmp$down.num,"up","down")
ftmp=data.frame(ftmp[,1:2],ftmp[,17:19])
write.csv(ftmp,"./20201227.H3k.analysis/807.analysis/consider.psy.beta0.807.site.alt.up.down.statis.csv",quote=F,row.names = F)

cn1=names(file2)[grep(names(file2),pattern = "bayes.beta0")][c(seq(1,12,2),21:28)]
ftmp=file2[,c("unitID","id",cn1)]	#这是只考虑健康的beta0的情况
ftmp$up.num=rowSums(ftmp[,3:16]>0,na.rm = T)
ftmp$down.num=rowSums(ftmp[,3:16]<0,na.rm = T)
ftmp$group.alt.up.or.down=ifelse(ftmp$up.num>ftmp$down.num,"up","down")
ftmp=data.frame(ftmp[,1:2],ftmp[,17:19])
write.csv(ftmp,"./20201227.H3k.analysis/807.analysis/consider.healthy.beta0.807.site.alt.up.down.statis.csv",quote=F,row.names = F)

###进行一致性的分析
deepsea=read.table("20201227.H3k.analysis/807.analysis/tmp/b67429bd-f7a1-49cc-af92-13c38d2125a8_807psyASH.vcf_FEATURE_zscore.tsv",head=T,sep="\t")
coln=names(deepsea)
braincell=c("Brain_Angular_Gyrus","Brain_Anterior_Caudate","Brain_Cingulate_Gyrus","Brain_Germinal_Matrix","Brain_Hippocampus_Middle","Brain_Inferior_Temporal_Lobe","Brain_Mid_Frontal_Lobe","Brain_Substantia_Nigra")

ASH=ftmp
for(i in 1:length(braincell)){
braincn=coln[grep(coln,pattern = braincell[i])]
braincn=braincn[grep(braincn,pattern = "H3K")]
deepseq=deepsea[,c("chrom","pos",braincn)]
result=data.frame(matrix(NA,807,length(braincn)))
H3k=unlist(strsplit(braincn,"\\."))[seq(2,3*length(braincn),3)]
colnames(result)=H3k
frt=data.frame(matrix(NA,807,length(braincn)))
colnames(frt)=H3k
deepseq$id=paste0(deepseq$chrom,":",deepseq$pos)
deepseq=merge(deepseq,ASH,by="id")
for(j in 1:length(braincn)){
result[,j]=ifelse(deepseq[,3+j]<0,"down","up")
frt[,j]=ifelse(result[,j]==deepseq$group,"same","opposite")
}
rt_statis=data.frame(matrix(NA,length(braincn),4))
row.names(rt_statis)=H3k
colnames(rt_statis)=c("same","opposite","same_ratio","pvalue")
for(k in 1:length(braincn)){
rt_statis[k,1]=table(frt[,k]=="same")[2]
rt_statis[k,2]=table(frt[,k]=="opposite")[2]
rt_statis[k,3]=rt_statis[k,1]/(rt_statis[k,1]+rt_statis[k,2])
rt_statis[k,4]=binom.test(rt_statis[k,1],rt_statis[k,1]+rt_statis[k,2],p=0.5)$p.value
}
fn=paste0("./20201227.H3k.analysis/807.analysis/consider.healthy.beta0.",braincell[i],"_statis.csv")
write.csv(rt_statis,fn,quote=F,row.names = T)
}

###比较psy和healthy之间的差异
library(meta)
sel1=paste0("./20201227.H3k.analysis/807.analysis/consider.healthy.beta0.",braincell,"_statis.csv")
sel2=paste0("./20201227.H3k.analysis/807.analysis/consider.psy.beta0.",braincell,"_statis.csv")
i=1
rt1=read.csv(sel1[1],header=T)[,1:3]
rt2=read.csv(sel2[1],header=T)[,1:3]
rt=cbind(rt2,rt1[,2:3])
names(rt)=c("H3k.group","psy.same","psy.opposite","con.same","con.opposite")
for(i in 2:length(braincell)){
rt1=read.csv(sel1[i],header=T)[,1:3]
rt2=read.csv(sel2[i],header=T)[,1:3]
rtmp=cbind(rt2,rt1[,2:3])
names(rtmp)=c("H3k.group","psy.same","psy.opposite","con.same","con.opposite")
rt=rbind(rt,rtmp)
}

result=rt
H3k.group=unique(rt$H3k.group)
col_names=c("H3k.group","OR","P.value","upper","lower")
rt=data.frame(matrix(NA,1,ncol=5))
names(rt)=col_names
rt=rt[-1,]

for(i in 1:length(H3k.group)){
tmp=result[result$H3k.group==H3k.group[i],]
tmp$psy.all=tmp$psy.same+tmp$psy.opposite
tmp$con.all=tmp$con.same+tmp$con.opposite

metaor3<-metabin(psy.same,psy.all,con.same,con.all,data=tmp,sm="OR")
rtmp=data.frame(matrix(NA,1,ncol=5))
names(rtmp)=col_names

rtmp[1,1]=tmp[1,1]
rtmp[1,2]=OR=exp(metaor3$TE.fixed)
rtmp[1,3]=metaor3$pval.fixed
rtmp[1,4]=exp(metaor3$upper.fixed)
rtmp[1,5]=exp(metaor3$lower.fixed)
rt=rbind(rt,rtmp)
}
write.csv(rt,"./20201227.H3k.analysis/807.analysis/807.psy.vs.healthy.H3k.statis1.csv",quote=F,row.names = F)	#设想1：羟甲基化与组蛋白修饰的可能性成正方向
																									#最终结果发现，羟甲基化与组蛋白修饰的可能性成正比
rt=data.frame(matrix(NA,1,ncol=5))
names(rt)=col_names
rt=rt[-1,]

for(i in 1:length(H3k.group)){
tmp=result[result$H3k.group==H3k.group[i],]
tmp$psy.all=tmp$psy.same+tmp$psy.opposite
tmp$con.all=tmp$con.same+tmp$con.opposite

metaor3<-metabin(psy.opposite,psy.all,con.opposite,con.all,data=tmp,sm="OR")
rtmp=data.frame(matrix(NA,1,ncol=5))
names(rtmp)=col_names

rtmp[1,1]=tmp[1,1]
rtmp[1,2]=OR=exp(metaor3$TE.fixed)
rtmp[1,3]=metaor3$pval.fixed
rtmp[1,4]=exp(metaor3$upper.fixed)
rtmp[1,5]=exp(metaor3$lower.fixed)
rt=rbind(rt,rtmp)
}
write.csv(rt,"./20201227.H3k.analysis/807.analysis/807.psy.vs.healthy.H3k.statis2.csv",quote=F,row.names = F)	#设想2：羟甲基化程度与组蛋白修饰的可能性成反方向

###以下是对8544个位点进行一致性的分析：
ftmp=file1[,c("unitID","id",names(file1)[grep(names(file1),pattern = "bayes.beta0")])]	#这是考虑了位点所有的beta0的情况
ftmp$up.num=rowSums(ftmp[,3:30]>0,na.rm = T)
ftmp$down.num=rowSums(ftmp[,3:30]<0,na.rm = T)
ftmp$group.alt.up.or.down=ifelse(ftmp$up.num>ftmp$down.num,"up","down")
ftmp=data.frame(ftmp[,1:2],ftmp[,31:33])
write.csv(ftmp,"./20201227.H3k.analysis/8544.analysis/consider.all.beta0.8544.site.alt.up.down.statis.csv",quote=F,row.names = F)

deepsea=read.table("20201227.H3k.analysis/8544.analysis/tmp/dd9a8167-9c49-4bf0-a3f7-15df0bbde2b2_8544.vcf_FEATURE_zscore.tsv",head=T,sep="\t")
coln=names(deepsea)
braincell=c("Brain_Angular_Gyrus","Brain_Anterior_Caudate","Brain_Cingulate_Gyrus","Brain_Germinal_Matrix","Brain_Hippocampus_Middle","Brain_Inferior_Temporal_Lobe","Brain_Mid_Frontal_Lobe","Brain_Substantia_Nigra")

ASH=ftmp
for(i in 1:length(braincell)){
  braincn=coln[grep(coln,pattern = braincell[i])]
  braincn=braincn[grep(braincn,pattern = "H3K")]
  deepseq=deepsea[,c("chrom","pos",braincn)]
  result=data.frame(matrix(NA,8544,length(braincn)))
  H3k=unlist(strsplit(braincn,"\\."))[seq(2,3*length(braincn),3)]
  colnames(result)=H3k
  frt=data.frame(matrix(NA,8544,length(braincn)))
  colnames(frt)=H3k
  deepseq$id=paste0(deepseq$chrom,":",deepseq$pos)
  deepseq=merge(deepseq,ASH,by="id")
  for(j in 1:length(braincn)){
    result[,j]=ifelse(deepseq[,3+j]<0,"down","up")
    frt[,j]=ifelse(result[,j]==deepseq$group,"same","opposite")
  }
  rt_statis=data.frame(matrix(NA,length(braincn),4))
  row.names(rt_statis)=H3k
  colnames(rt_statis)=c("same","opposite","same_ratio","pvalue")
  for(k in 1:length(braincn)){
    rt_statis[k,1]=table(frt[,k]=="same")[2]
    rt_statis[k,2]=table(frt[,k]=="opposite")[2]
    rt_statis[k,3]=rt_statis[k,1]/(rt_statis[k,1]+rt_statis[k,2])
    rt_statis[k,4]=binom.test(rt_statis[k,1],rt_statis[k,1]+rt_statis[k,2],p=0.5)$p.value
  }
  fn=paste0("./20201227.H3k.analysis/8544.analysis/consider.all.beta0.",braincell[i],"_statis.csv")
  write.csv(rt_statis,fn,quote=F,row.names = T)
}

library(meta)
sel1=paste0("./20201227.H3k.analysis/8544.analysis/consider.all.beta0.",braincell,"_statis.csv")
i=1
rt=read.csv(sel1[i],header = T)[,1:3]
for (i in 2:length(braincell)) {
  f1=read.csv(sel1[i],header = T)[,1:3]
  rt=rbind(rt,f1)
}

H3k.group=unique(rt$X)
col_names=c("H3k.group","same.mean","opposite.mean","same.ratio","P.value")
result=data.frame(matrix(NA,1,ncol=5))
names(result)=col_names
result=result[-1,]
for (i in 1:length(H3k.group)) {
  resultmp=data.frame(matrix(NA,1,ncol=5))
  names(resultmp)=col_names
  tmp=rt[rt$X==H3k.group[i],]
  resultmp[1,1]=tmp[1,1]
  resultmp[1,2]=round(mean(tmp[,2]),digits = 0)
  resultmp[1,3]=round(mean(tmp[,3]),digits = 0)
  resultmp[1,4]=resultmp[1,2]/(resultmp[1,2]+resultmp[1,3])
  resultmp[1,5]=binom.test(resultmp[1,2],(resultmp[1,2]+resultmp[1,3]),p=0.5)$p.value
  result=rbind(result,resultmp)
}
write.csv(result,"./20201227.H3k.analysis/8544.analysis/8544.ASH.analysis.csv",quote=F,row.names = F)
