
# if in vscode, please use sudo R and ensure plugin R enable

rm(list=ls())

newSeed=Sys.time()
set.seed(as.numeric(newSeed))

library(msm)
# dyn.load("./Code/output/DIFF-tv.dll")
dyn.load("./Code/output/DIFF-tv.so")

jobnum=1
use.run=1
use.interval=0.0001
use.table=qnorm(seq(use.interval,1-use.interval,use.interval))
n.table.options=length(use.table)
stepSize=0.01
use.maxCounter=5000
# use.maxCounter=150
n=c("comp"=250,"incomp"=250)
# x = c(2, 0.5, 0.5, 2, 1, 1)
x = c(0.2, 0.3, 0.5, 0.3, 1, 0.1)
par.names = c("a", "ter", "z", "p", "sd.start","rate")


simulate.SSP=function(N,params,v,maxCounter,stepSize,congruency,use.table,n.table.options) {
  
  
  rts=rep(0,N)
  resps=rep(0,N)
  
  times=cumsum(rep(stepSize,maxCounter))-stepSize
  
  standDevs=params["sd.start"]-(params["rate"]*times)
  standDevs[standDevs<0.001]=0.001
  
  a.top=1
  a.middle=pnorm(1.5,0,standDevs)
  a.bottom=pnorm(0.5,0,standDevs)
  
  # 注意，有maxCounter个v
  v = (2*params["p"]*congruency*(a.top-a.middle)) + 
    (2*params["p"]*congruency*(a.middle-a.bottom)) + 
    (params["p"]*(a.bottom-(1-a.bottom)))
  
  z=(params["z"]*(params["a"]+params["a"]))-params["a"]
  # browser()
  # maxcounter=150 stepsize=0.01times=[0.01,0.02...] s=params["stoch.s"]=0.1
  tmp=.C("DIFFtv",z=z,v=v,
         aU=params["a"],aL=-params["a"],ter=params["ter"],
         sv=params["sv"],sz=params["sz"],ster=params["ster"],
         s=params["stoch.s"],h=stepSize,resp=resps,rt=rts,
         n=N,maxiter=maxCounter,
         rangeLow=as.integer(0),rangeHigh=as.integer(n.table.options-1),
         randomTable=as.double(use.table)) 
  
  out=list(rt=tmp$rt,resp=tmp$resp)
}

rdata=function(n,x,par.names, conds=c("comp","incomp"),use.maxCounter=150,stepSize=0.01) {
  names(x)=par.names
  out=list(Cond=NULL,Resp=NULL,Time=NULL)
  
  for (cond in conds) {
    # for (stim in stims) {
    # N=as.double(n[cond,stim])
    N=as.double(n[cond])
    
    params=x
    params.names=c(names(params),"stoch.s","sv","sz","ster")
    params=c(params,0.1,0,0,0)
    # if (stim=="left") {
    #   params["z"]=x["z"]
    # } else if (stim=="right") {
    #   params["z"]=(1-x["z"])
    # }
    if (cond=="comp") {
      congruency=1
    } else if (cond=="incomp") {
      congruency=-1
    }
    names(params)=params.names
    
    tmp = simulate.SSP(
      N = N,
      params = params,
      maxCounter = use.maxCounter,
      stepSize = stepSize,
      congruency = congruency,
      use.table = use.table,
      n.table.options = n.table.options
    )
    
    out$Resp=c(out$Resp,tmp$resp)
    out$Time=c(out$Time,tmp$rt)
    out$Cond=c(out$Cond,rep(cond,n[cond]))
    # out$Cond=c(out$Cond,rep(cond,n[cond,stim]))
    # out$Stim=c(out$Stim,rep(stim,n[cond,stim]))
    # }
  }
  out
}


Log.likelihood.fun=function(
    all.data,
    params,
    Nsample,
    maxCounter,
    stepSize,
    congruency,
    bandwidth,
    getSimData) {
  
  allSimData=getSimData(
    N = Nsample,
    params = params,
    maxCounter = maxCounter,
    stepSize = stepSize,
    congruency = congruency,
    use.table = use.table,
    n.table.options = n.table.options
  )
  PDF=list()
  for (use.resp in 1:2) {                              # 根据两个 resp 分别计算 pdf
    sampvec=allSimData$rt[allSimData$resp==use.resp]   # 模拟 rt
    data=all.data$rt[all.data$resp==use.resp]          # 数据 rt
    
    if (length(data)==0) {
      PDF[[use.resp]]=NULL
    } else if (length(sampvec)==0) {
      PDF[[use.resp]]=rep(0,length(data))
    } else {
      
      m=min(data)-3*bandwidth;                         # 最小 rt
      M=max(data)+3*bandwidth;                         # 最大 rt
      
      if ( (min(sampvec)>M) | (max(sampvec)<m) ) {
        PDF[[use.resp]] = rep(0,length(data))
      } else {
        d = density(sampvec,bw=bandwidth,from=m,to=M,n=1024)  # simu-data kernel
        d$y[d$y<0] = 0
        d$y = d$y*length(sampvec)/Nsample
        out = numeric(length(data))
        ok = (data>d$x[1]) & (data<d$x[length(d$x)])
        out[ok] = approx(d$x,d$y,data[ok])$y                  # 两个 data 的 kernel
        out[is.na(out) | !is.finite(out)] = 0
        PDF[[use.resp]] = out
      }
    }
  }
  # resp=-1 是 na 反应，直接求和后用来做 binomial
  list(PDF,sum(allSimData$resp==-1))                         
}


log.dens.like=function(x,data,par.names, conds=c("comp","incomp")){
  names(x)=par.names
  out=0
  for (cond in conds) {
    # for (stim in stims) {
      
      # browser()
      # tmp=data$Cond==cond & data$Stim==stim
      tmp=data$Cond==cond
      
      I=IQR(data$Time[tmp])
      S=sd(data$Time[tmp])
      bandwidth=0.9*min(I,S)*(Nsamples)^(-0.2)
      
      params=x
      params.names=c(names(params),"stoch.s","sv","sz","ster")
      params=c(params,0.1,0,0,0)
      
      # stimcdoing
      # if (stim=="left") {
      #   params["z"]=x["z"]
      # } else if (stim=="right") {
      #   params["z"]=(1-x["z"])
      # }
      
      # congruency coding
      if (cond=="comp") {
        congruency=1
      } else if (cond=="incomp") {
        congruency=-1
      }
      names(params)=params.names
      
      tmp1=Log.likelihood.fun(
        all.data=list(
          rt=data$Time[tmp],
          resp=data$Resp[tmp]),
        params=params,
        Nsample=Nsamples,
        maxCounter=use.maxCounter,
        stepSize=stepSize,
        congruency=congruency,
        bandwidth=bandwidth,
        getSimData=simulate.SSP)
      
      # browser()
      if (length(tmp1[[1]]) == 1) {
        tmp2=tmp1[[1]][[1]]
      } else {
        tmp2=c(tmp1[[1]][[1]],tmp1[[1]][[2]]) # 如果有两个选项，返回两个选项的 PDF
      }
      
      out=out+sum(log(pmax(tmp2,1e-10)))      # 两个PDF求和 由于有四个条件，等于四个条件的PDF求和
      # 用dbinom来计算 na 反应的 pdf
      out=out+pmax(dbinom(sum(data$Resp[tmp]==-1),length(data$Resp[tmp]),tmp1[[2]]/Nsamples,log=TRUE),log(1e-10))
    # }
  }
  out
}

log.dens.prior=function(x,hyper){
  out=0
  for (p in names(x)) out =
      out+dtnorm(x[p],hyper[[p]][1],hyper[[p]][2],lower.bounds[p],upper.bounds[p],log=TRUE)
  out
}

crossover=function(i,pars,use.theta,use.like,data,hyper,par.names,currIT){
  if (currIT %% 5 == 0) use.like[i] = log.dens.like(use.theta[i,],data,par.names=par.names)
  use.weight=use.like[i]*temperatures[i] + log.dens.prior(use.theta[i,],hyper)
  gamma = 2.38/sqrt(2*length(pars))
  index=sample(c(1:n.chains)[-i],2,replace=F)
  theta=use.theta[i,]						
  theta[pars]=use.theta[i,pars] + gamma*(use.theta[index[1],pars]-use.theta[index[2],pars]) + runif(1,-b,b)
  prior.like=log.dens.prior(theta,hyper)
  if (prior.like > -Inf) {
    like=log.dens.like(theta,data,par.names=par.names)
  } else {
    like = -Inf
  }
  weight=like*temperatures[i] + prior.like
  if(!is.finite(weight))weight=-Inf
  if(runif(1) < exp(weight-use.weight)) {							
    use.theta[i,]=theta
    use.like[i]=like
  }
  c(use.like[i],use.theta[i,])
}

migration.crossover=function(pars,use.theta,use.like,data,hyper,par.names){
  n.migration.chains=ceiling(runif(1,0,n.chains))
  use.chains=sample(1:n.chains,n.migration.chains)
  migration.use.weight=rep(NA,n.migration.chains)
  migration.weight=rep(NA,n.migration.chains)
  for (mi in 1:n.migration.chains) {
    migration.use.weight[mi]=use.like[use.chains[mi]]*temperatures[i] + log.dens.prior(use.theta[use.chains[mi],pars],hyper)
    newChain = mi - 1
    if (newChain == 0) newChain = n.migration.chains
    migration.weight[mi]=use.like[use.chains[newChain]]*temperatures[i] + log.dens.prior(use.theta[use.chains[newChain],pars],hyper)
    if(runif(1) < exp(migration.weight[mi]-migration.use.weight[mi])) {      				
      use.theta[use.chains[mi],]=use.theta[use.chains[newChain],]
      use.like[use.chains[mi]]=use.like[use.chains[newChain]]
    }
  }
  cbind(use.like,use.theta)
}





data=list()
data = rdata(n,x,par.names)


begin22 = date()
for (use.run in 1:use.run) {
  
  #use.run=1
  
  K = 40
  alpha = 0.3
  all.temperatures = rev((0:(K-1)/(K-1))^(1/alpha))
  temperatures=all.temperatures
  
  
  
  start.points=c(.1,.3,.5,.4,3,60)
  lower.bounds=c(0,0,0,0,0,0)
  upper.bounds=c(Inf,Inf,1,Inf,Inf,Inf)
  
  
  n.chains=K
  n.pars=6
  # nmc=2000
  # Nsamples=5000
  nmc=5000
  Nsamples=10000
  
  migration.start=nmc
  migration.end=nmc
  migration.freq=nmc
  
  
  
  b=.001
  
  theta=array(NA,c(n.chains,n.pars,nmc))
  weight=array(-Inf,c(nmc,n.chains))
  
  
  theta.names = c("a", "ter", "z", "p", "sd.start","rate")
  colnames(theta) = theta.names
  
  names(lower.bounds) = names(upper.bounds) = theta.names
  
  
  for(i in 1:n.chains){
    # i = 1
    current.like=weight[1,i]
    while (current.like==-Inf) {
      # browser()
      current.thetas=rtnorm(n=n.pars,mean=start.points,sd=start.points/5,lower.bounds,upper.bounds)
      current.weight=log.dens.like(current.thetas,data=data,par.names=theta.names)
      current.like=current.weight*temperatures[i]
    }
    theta[i,,1]=current.thetas
    weight[1,i]=current.weight
  }
  
  
  prior=list()
  
  tmp=grep("a",theta.names,value=TRUE)
  for (n in 1:length(tmp)) {
    tmp2=tmp[n]
    prior[[tmp2]]=c(.2,.2)
  }
  
  tmp=grep("ter",theta.names,value=TRUE)
  for (n in 1:length(tmp)) {
    tmp2=tmp[n]
    prior[[tmp2]]=c(.3,.3)
  }
  
  tmp=grep("z",theta.names,value=TRUE)
  for (n in 1:length(tmp)) {
    tmp2=tmp[n]
    prior[[tmp2]]=c(.5,.5)
  }
  
  tmp=grep("p",theta.names,value=TRUE)
  for (n in 1:length(tmp)) {
    tmp2=tmp[n]
    prior[[tmp2]]=c(.5,.5)
  }
  
  tmp=grep("sd.start",theta.names,value=TRUE)
  for (n in 1:length(tmp)) {
    tmp2=tmp[n]
    prior[[tmp2]]=c(4,4)
  }
  
  tmp=grep("rate",theta.names,value=TRUE)
  for (n in 1:length(tmp)) {
    tmp2=tmp[n]
    prior[[tmp2]]=c(70,30)
  }
  
  
  
  savefile=paste("TIDE_Fits/SSP-TIDE_IS",nmc,"servantFlanker_simple_sub-",jobnum,"run_",use.run,sep="-")
  
  savefile=paste(savefile,"Rdata",sep=".")
  
  # begin = date()
  library(progress)
  pb <- progress_bar$new(
    format = "  :spin [:bar] :percent :eta",
    total = nmc
  )
  for(i in 2:nmc){
    # cat("\n ",i,"  ")
    pb$tick()
    if (i %% migration.freq == 0 & i > migration.start & i < migration.end) {
      temp=migration.crossover(pars=1:n.pars,use.theta=theta[,,i-1],use.like=weight[i-1,],data=data,hyper=prior,par.names=theta.names)
    } else {
      temp=t(sapply(1:n.chains,crossover,pars=1:n.pars,use.theta=theta[,,i-1],use.like=weight[i-1,],data=data,hyper=prior,par.names=theta.names,currIT=i))
    }
    weight[i,]=temp[,1]
    theta[,,i]=temp[,2:(n.pars+1)]
  }
  # end = date()
  # begin
  # end
  
  
  
  # save.image(savefile)
  save(weight, theta, file = "TIDE_Fits/SSP_TIDE_5000_recovery.rdata")
  
  
  
  
  thermo_int = function(t,lp,var){
    I = sum(sapply(1:(length(t)-1),function(x) (t[x+1] - t[x]) * ((lp[x] + lp[x+1])/2)),na.rm=T)
    cor = sum(sapply(1:(length(t)-1),function(x) (((t[x+1] - t[x])^2)/12) * (var[x+1] - var[x])),na.rm=T)
    
    return(list(ti=I,ti_cor=I-cor))
  }
  
  
  mean.log.like=apply(weight[nmc/2:nmc,],2,mean)
  var.log.like=apply(weight[nmc/2:nmc,],2,var)
  
  
  TI.TIDE=thermo_int(t=rev(all.temperatures),lp=rev(mean.log.like),var=rev(var.log.like))
  
  
  
  
  # save.image(savefile)
  
}

end22 = date()
begin22
end22

# load("./TIDE_Fits/SSP-TIDE_IS-5000-servantFlanker_simple_sub--1-run_-2.Rdata")
library(tidybayes)
library(posterior)
library(dplyr)
library(ggplot2)
library(bayesplot)
library(easystats)
dim(theta)
teta_posterior <- theta %>% aperm(c(3, 1, 2)) %>% 
  as_draws_matrix()
str(teta_posterior)

# plot parameter posterior
teta_posterior %>% 
  mcmc_areas(pars = c("a", "ter", "z", "p"), prob = 0.95)
teta_posterior %>% 
  mcmc_areas(pars = c("sd.start","rate"), prob = 0.95)

# summary teta_posterior by easystats
teta_posterior %>% 
  summary()
cat("true value", x)
