#这个程序是为了对含有位点信息，reads1,reads2,var_freq,p.diff等信息的ASM进行Bayes和Meta分析

sel=list.files(pattern="sel.tsv")

###########以下选取位点
#sel1为单发样本,sel2为SZ,sel3为BD
countdiff=c()
countdiffa=c()
sel1=c("X2B_X1T","M8_M7","M6_M5","M48_M47","M50_M49","M2_M1")
countdiff=c()
for (i in 1:length(sel1)){
file=read.table(sel1[i],head=T,sep="\t")
file=file[file$exits=="T",]
if(dim(file)[1]!=0){
bias=(file$DU>=0.25 & file$p.biasU<0.05) | (file$DA>=0.25 & file$p.biasA<0.05)
diff=(file$p.diff<0.05)
count=paste(as.matrix(file)[,1],":",file[,2],sep="")
if (length(which(sel[i]==paste(sel1,".snp",sep="")))==1){
countdiff=unique(c(countdiff,count[which(bias & diff)]))
}
}
}

countdiff=unique(countdiff)

####################这部分是用来验证sel.tsv文件里有没有插入缺失
sel=list.files(pattern="sel.tsv")
countdiff=c()
for (i in 1:length(sel)){
file=read.table(sel[i],head=T,sep="\t")
file=file[file$exits=="T",]
if(dim(file)[1]!=0){
file=tidyr::unite(file,col="refvar",ref,var,sep="")
for (j in 1:nrow(file)){
if (length(unlist(strsplit(file[j,]$refvar,"")))!=2){
count=paste(as.matrix(file)[j,1],":",file[j,2],sep="")
countdiff=c(countdiff,count)
}
}
}
}

####################这部分是用来验证WGS文件里有没有插入缺失
test=read.table("test.txt",head=F,sep="\t")
test=as.character(unlist(test))
df=c()
for (i in 1:length(test)){
print(i)
sel=which(filese$unitID==test[i])
if(length(sel)>=1){
df=c(df,as.character(sel))
}
}

for (i in 1:length(df)){
sel=which(filese$unitID==df[i])
filese=filese[-sel,]
}



###########以下合并数据
i=1
file=read.table(sel[i],head=T,sep="\t")
file=file[file$exits=="T",]
file$unitID=paste(as.matrix(file)[,1],":",file[,2],sep="")
rownames(file)=file$unitID
final=file[countdiff,c("normal_reads1","normal_reads2","normal_var_freq","tumor_reads1","tumor_reads2","tumor_var_freq","p.biasU","p.biasA","p.diff","exits","unitID")]
final=final[complete.cases(final[,ncol(final)]),]		#去掉空值
colnames(final)[(ncol(final)-10):(ncol(final)-1)]=c(paste(strsplit(gsub(".snp.ASM.sel.tsv","",sel[i]),"_")[[1]][1],c("_reads1","_reads2","_var_freq"),sep=""),paste(strsplit(gsub(".snp.ASM.sel.tsv","",sel[i]),"_")[[1]][2],c("_reads1","_reads2","_var_freq"),sep=""),paste(gsub(".snp.ASM.sel.tsv","",sel[i]),c("p.biasU","p.biasA","p.diff","exits"),sep="_"))
for (i in 3:length(sel)){
file=read.table(sel[i],head=T,sep="\t")
file=file[file$exits=="T",]
if (dim(file)[1]!=0){									#检查是否为空文件
file$unitID=paste(as.matrix(file)[,1],":",file[,2],sep="")
rownames(file)=file$unitID
final1=file[countdiff,c("normal_reads1","normal_reads2","normal_var_freq","tumor_reads1","tumor_reads2","tumor_var_freq","p.biasU","p.biasA","p.diff","exits","unitID")]
final1=final1[complete.cases(final1[,ncol(final1)]),]		#去掉空值
final=merge(final,final1,by="unitID",all=T)
colnames(final)[(ncol(final)-9):ncol(final)]=c(paste(strsplit(gsub(".snp.ASM.sel.tsv","",sel[i]),"_")[[1]][1],c("_reads1","_reads2","_var_freq"),sep=""),paste(strsplit(gsub(".snp.ASM.sel.tsv","",sel[i]),"_")[[1]][2],c("_reads1","_reads2","_var_freq"),sep=""),paste(gsub(".snp.ASM.sel.tsv","",sel[i]),c("p.biasU","p.biasA","p.diff","exits"),sep="_"))
}else{}
}
write.table(final,"../SZBD9_ASM_data.txt",quote=F,row.names=F,sep="\t")



###########读取文件并进行单发meta分析
library(metaSeq)
file=read.table("ASM_data.txt",head=T,sep="\t")
allsample=gsub("_reads2","",colnames(file)[grep("reads2",colnames(file))])
sel1=c("X2B_X1T","M8_M7","M6_M5","M2_M1","M48_M47","M50_M49")
#sel1=c("X2B_X1T","M8_M7","M50_M49","M52_M51")
#sel1=c("M6_M5","M2_M1","M42_M41","M44_M43","M48_M47")
#sel1=c("X2B_X1T","M8_M7","M6_M5","M2_M1","M48_M47","M50_M49","M30_M29","M26_M25","M35_M36","M28_M27")  ####包括双发
sel1=unlist(strsplit(sel1,"_"))

for (i in 1:nrow(file)){
mta1=as.numeric(file[i,paste(sel1[seq(2,length(sel1),2)],"_var_freq",sep="")])
mta2=as.numeric(file[i,paste(sel1[seq(1,length(sel1),2)],"_var_freq",sep="")])
mtp=as.numeric(file[i,paste(sel1[seq(1,length(sel1),2)],"_",sel1[seq(2,length(sel1),2)],"_p.diff",sep="")])
mta1=mta1[!is.na(mta1)]
mta2=mta2[!is.na(mta2)]
mtp=mtp[!is.na(mtp)]
if (length(mtp)>0){
mtp1=mtp/2
mtp2=1-mtp1
pair_up=mtp1
pair_down=mtp2
if(length(which(mta1<=mta2)) >= 1){
pair_up[which(mta1<=mta2)]=mtp2[which(mta1<=mta2)]
pair_down[which(mta1<=mta2)]=mtp1[which(mta1<=mta2)]
}
upper=matrix(pair_up,nrow=1)
lower=matrix(pair_down,nrow=1)
weight=rep(1,length(mtp))
if (length(weight)>=2){
result2=other.oneside.pvalues(upper,lower,weight)
S=Stouffer.test(result2)
metap=S$Upper
metap[which(S$Upper>S$Lower)]=S$Lower[which(S$Upper>S$Lower)]
file[i,"metap_in_CC_DC"]=metap
}
if (length(weight)==1){
file[i,"metap_in_CC_DC"]=mtp
}
}
}
file$metap_in_CC_DC_fdr=p.adjust(file$metap_in_CC_DC,method = "BH")
write.table(file,"ASM_BD_meta.txt",quote=F,row.names=F,sep="\t")

a=filekp[filekp$metap<=0.1,]
a=a[complete.cases(a$metap),]
which(a$unitID=="chr2:3258174")
which(a$unitID=="chr1:2448188")
which(a$unitID=="chr2:70004278")
which(a$unitID=="chr11:71185518")
which(a$unitID=="chr3:184333780")
countdiff=c("chr2:3258174","chr1:2448188","chr2:70004278","chr11:71185518","chr3:184333780")
a=a[complete.cases(a$metap),]
a=filekp[complete.cases(filekp$metap),]
a$FDR=p.adjust(a$metap,method="BH")
a[a$unitID=="chr2:3258174",]
a[a$unitID=="chr1:2448188",]
a[a$unitID=="chr2:70004278",]
a[a$unitID=="chr11:71185518",]
a[a$unitID=="chr3:184333780",]



###########读取文件并进行卡方分析
file=read.table("ASM_data.txt",head=T,sep="\t")
allsample=gsub("_reads2","",colnames(file)[grep("reads2",colnames(file))])
sel1=c("X2B_X1T","M8_M7","M6_M5","M48_M47","M50_M49");sel2=c("M28_M27","M30_M29","M26_M25","M35_M36")
#sel1=c("X2B_X1T","M8_M7","M50_M49","M52_M51");sel2=c("M28_M27")
#sel1=c("M6_M5","M2_M1","M42_M41","M44_M43","M48_M47");sel2=c("M30_M29","M26_M25","M36_M35")
#sel1=c("X2B_X1T","M8_M7","M6_M5","M2_M1","M42_M41","M44_M43","M48_M47","M50_M49","M52_M51","M12_M11");sel2=c("M28_M27","M30_M29","M26_M25","M36_M35")  ####包括抑郁症
sel3=c("M18_M17","M20_M19","M22_M21","M40_M39")
case=c(unlist(strsplit(sel1,"_"))[seq(2,length(unlist(strsplit(sel1,"_"))),2)],unlist(strsplit(sel2,"_")))
con=c(unlist(strsplit(sel1,"_"))[seq(1,length(unlist(strsplit(sel1,"_"))),2)],unlist(strsplit(sel3,"_")))
#case=c(unlist(strsplit(sel1,"_"))[seq(2,length(unlist(strsplit(sel1,"_"))),2)])
#con=c(unlist(strsplit(sel1,"_"))[seq(1,length(unlist(strsplit(sel1,"_"))),2)])
case=paste(case,"_var_freq",sep="")
con=paste(con,"_var_freq",sep="")
file1=file[,case]
file2=file[,con]


for (i in 1:nrow(file1)){
ca=as.numeric(file1[i,])
co=as.numeric(file2[i,])
if (length(which(!is.na(ca)))>0 & length(which(!is.na(co)))>0){
a1=length(which(ca<=0.25))
b1=length(which(ca>0.25 & ca<0.75))
c1=length(which(ca>=0.75))
a2=length(which(co<=0.25))
b2=length(which(co>0.25 & co<0.75))
c2=length(which(co>=0.75))
file[i,"chisq.pvalue_in_SZBD5V5"]=fisher.test(matrix(c(c1,b1,a1,c2,b2,a2),nrow=3,dimnames=list(c("up","md","dn"),c("Case","Con"))))$p.value
}
}

dim(file)

write.table(file,"BD11v13_chisq.txt",quote=F,row.names=F,sep="\t")








###########读取文件并进行INLA分析
library(data.table)
library(magrittr)
library(parallel)
library(dplyr)
library(boot)
library(INLA)
######ASE function要求结果file文件为count数，且顺序为
######twin1_control_ref,twin1_case_ref,twin2_control_ref,twin2_case_ref,
######twin1_control_var,twin1_case_var,twin2_control_var,twin2_case_var.

sel1=c("X2B_X1T","M8_M7","M6_M5","M2_M1","M48_M47","M50_M49")
#sel1=c("X2B_X1T","M8_M7","M50_M49","M52_M51");sel2=c("M28_M27")
#sel2=c("M30_M29","M26_M25","M35_M36")
#sel1=c("X2B_X1T","M8_M7","M6_M5","M2_M1","M42_M41","M44_M43","M48_M47","M50_M49","M52_M51","M12_M11");sel2=c("M28_M27","M30_M29","M26_M25","M36_M35")  ####包括抑郁症
sel3=c("M18_M17","M20_M19","M22_M21","M40_M39")

filekp=read.table("ASM_data.txt",head=T,sep = "\t")
ref=paste(unlist(strsplit(sel1,"_")),"_reads1",sep="")
var=paste(unlist(strsplit(sel1,"_")),"_reads2",sep="")
file=filekp[,c(ref,var)]

ASE=function(x)
{
if (length(which(!is.na(x)))/4>=1){
sel=which(!is.na(x))
rct=x[sel]
ref=rct[1:(length(rct)/2)]
var=rct[(length(rct)/2+1):(length(rct))]
if (length(sel)/4>1){
df=data.frame(y=var,Ntrials=ref+var,x1=rep(c(0,1),length(sel)/4),x2=rep(c(0:(length(sel)/4-1)),each=2))
#header=c(rep("y",length(sel)/2),rep("Ntrials",length(sel)/2),"x1","x2")
#colnames(df)=header
formula = y ~ 1 + f(x2, model = "iid") + x1
m1 <- inla(formula, data = df, family = "binomial", Ntrials = Ntrials,quantile = c(0.005, 0.025, 0.975, 0.995))
formula = y ~ 1 + f(x2, model = "iid")
m0 <- inla(formula, data = df, family = "binomial", Ntrials = Ntrials,quantile = c(0.005, 0.025, 0.975, 0.995))
b10 <- exp(m1$mlik[2] - m0$mlik[2])
}else{
df=data.frame(y=var,Ntrials=ref+var,x1=rep(c(0,1),length(sel)/4))
#header=c(rep("y",length(sel)/2),rep("Ntrials",length(sel)/2),"x1")
#colnames(df)=header
formula = y ~ 1 + x1
m1 <- inla(formula, data = df, family = "binomial", Ntrials = Ntrials,quantile = c(0.005, 0.025, 0.975, 0.995))
formula = y ~ 1
m0 <- inla(formula, data = df, family = "binomial", Ntrials = Ntrials,quantile = c(0.005, 0.025, 0.975, 0.995))
b10 <- exp(m1$mlik[2] - m0$mlik[2])
}
return(b10)
}
}

###
filekp$BF=0			#预赋值
Sys.time()
for (i in 1:dim(file)[1]){
filekp[i,]$BF=ASE(file[i,])
}
Sys.time()
####

Sys.time()
rt=apply(file,1,ASE)
Sys.time()
result=as.numeric(as.character(rt))

filekp$Bayes_in_DC=result
write.table(filekp,"ASM_SZBD9v9_bf.txt",quote=F,row.names = F,sep="\t")

########下面这部分是忠局师兄原代码，用来对SZ、BD的单发和双发加上健康组进行贝叶斯分析
filedir=file.choose()
file1=read.table(filedir,head=T,sep="\t")

sel1=c("X2B_X1T","M8_M7","M6_M5","M2_M1","M42_M41","M44_M43","M48_M47","M50_M49","M52_M51");sel2=c("M28_M27","M30_M29","M26_M25","M36_M35")

#sel1=c("X2B_X1T","M8_M7","M6_M5","M2_M1","M42_M41","M44_M43","M48_M47","M50_M49","M52_M51","M12_M11");sel2=c("M28_M27","M30_M29","M26_M25","M36_M35")  ####包括抑郁症
#sel1=c("X2B_X1T","M8_M7","M50_M49","M52_M51");sel2=c("M28_M27")
#sel1=c("M6_M5","M2_M1","M42_M41","M44_M43","M48_M47");sel2=c("M30_M29","M26_M25","M36_M35")


sel3=c("M18_M17","M20_M19","M22_M21","M40_M39")
sel1=unlist(strsplit(sel1,"_"))
sel2=unlist(strsplit(sel2,"_"))
sel3=unlist(strsplit(sel3,"_"))
sel=c(sel1,sel2,sel3)
sts=c(rep(c(0,1),length(sel1)/2),rep(1,length(sel2)),rep(0,length(sel3)))
file=file1[,c(paste(sel,"_reads1",sep=""),paste(sel,"_reads2",sep=""))]


result=matrix(,nrow(file1),ncol=2)



for (i in 1:nrow(file)){
reads=as.numeric(file[i,])
ref=reads[1:(ncol(file)/2)]
var=reads[(ncol(file)/2+1):ncol(file)]
tid=rep(1:(length(sel)/2),each=2)-1
sl=which(!is.na(ref))
if ((length(sl)/2)>=1){
if ((length(sl)/2)>=2){
df=data.frame(y=var[sl],Ntrials=ref[sl]+var[sl],x1=sts[sl],x2=tid[sl])
formula = y ~ 1 + f(x2, model = "iid") + x1
m1 <- inla(formula, data = df, family = "binomial", Ntrials = Ntrials,quantile = c(0.005, 0.025, 0.975, 0.995))
formula = y ~ 1 + f(x2, model = "iid")
m0 <- inla(formula, data = df, family = "binomial", Ntrials = Ntrials,quantile = c(0.005, 0.025, 0.975, 0.995))
b10 <- exp(m1$mlik[2] - m0$mlik[2])
result[i,1]=b10
result[i,2]=1
}else{
df=data.frame(y=var[sl],Ntrials=ref[sl]+var[sl],x1=sts[sl])
formula = y ~ 1 + x1
m1 <- inla(formula, data = df, family = "binomial", Ntrials = Ntrials,quantile = c(0.005, 0.025, 0.975, 0.995))
formula = y ~ 1
m0 <- inla(formula, data = df, family = "binomial", Ntrials = Ntrials,quantile = c(0.005, 0.025, 0.975, 0.995))
b10 <- exp(m1$mlik[2] - m0$mlik[2])
result[i,1]=b10
result[i,2]=0
}
}
}
file1$Bayes_in_DC_CC_HC=result[,1]

colnames(result)=c("BF_in_DC_CC_HC","SBN_count_in_DC_CC_HC")
result1=result

write.table(result,paste(filedir,"_SBN.BF.txt",sep=""),quote=F,row.names=F,sep="\t")

