function rel_prec()
%
% compute spike reliability and precision for selected data set.
% 2/11/04 P. Manis (Modified).
%
%
sigma = 2; %3; % sigma is the width of the gaussian window (ms) for convolution...
event_thr = 9;

c = datac('control'); % access the "control" - the database structure
sf=getmainselection; % find out where we are pointed.
if(length(sf) > 1)
   QueMessage('Too many records/files selected for analysis: choose ONE', 1);
   return;
end;
sp = c(sf).spike; % get the spike substructure from the database for the selected entry
x=[]; % list of spike times for all trials
sxd = []; % sum of isi's for each trial
nxd = []; % number of isis in sum for each trial
ntrial = length(sp.latency); % find out how many trials there are
for i = 1:ntrial
   x = [x sp.latency{i}]; % concatenate the spike times
   sxd(i) = sum(diff(sp.latency{i})); % calculate the ISI
   nxd(i) = length(diff(sp.latency{i})); % get the number of spikes in each trial
end;
x = sort(x); % puts all spike times into 
msr = event_thr*1000/(sum(sxd)/sum(nxd)); % compute the mean spike rate over all the trials.
maxnsp = 0;
d=datac('getdfile'); % get the acquisition information from the dfile structure in the opened file.
tb = make_time(d); % compute the time base.
dt = 2*mean(mean(d.rate))/1000; % get the actual sample rate (estimation, but correct if all trials the same)
tbx = [1:size(tb,2)]*dt; % make a new time base...

% calculate the CVp according to Paul Tiesinga's simple method.
% This is basically an all-isi (collapsed) measure

CVpN = CVP(x, ntrial);

%figure;
%plot(x(1:end-1), -log(diff(x)), '-bx');
% calculate the gaussian convolution function

s3 = sigma*4;
pts = s3/dt;
p2 = (pts-1)/2;
dt2=p2*dt;
w=[];
for i = 1:pts
   t = (i-1)*dt;
   w(i) = gauss(sigma, t-p2*dt);
end;


% make a histogram and then convolve it with the gaussian

x_c = hist(x, tbx);
spk_c=conv(x_c, w);
spk_c = 1000*spk_c*sum(x_c)/sum(spk_c); %normalize the spike count window

% plotting section. 
h = findobj('Tag', 'Reliability');
if(isempty(h))
   h = figure('Tag', 'Reliability', 'name', 'Reliability and Precision', 'NumberTitle', 'off');
end;
figure(h);
clf;
fsize = 7;
tbs = [0:length(spk_c)-1]*dt;
subplot('Position', [0.1, 0.07, 0.8, 0.3]);
plot(tbs, spk_c, 'g-');
hold on;
plot([0 max(tbs)], [msr msr], 'k--');

% now we need to identify the "events" as those time windows in the convolution that are above the mean firing rate.
% we use same logic as used in finding spikes.
evlist1 = [];
evlist2 = [];
[d1, k1] = find(spk_c > msr); % find points > threshold
[d2, k2] = find(spk_c(k1(1):end) < msr); % find points < threshold
nevent = length(find(diff(k2) > 1));
if(nevent > 0)
   switch(length(k1))
   case 0
      evlist1 = [];
   case 1
      evlist1 = k1(1);
   otherwise
      evlist1 = [k1(1) k1(find(diff(k1) > 1)+1)];
   end;
   switch(length(k2))
   case 0
      evlist2 = [];
   case 1
      evlist2 = k2(1);
   otherwise
      evlist2 = [k2(1) k2(find(diff(k2) > 1)+1)];
   end;
   evlist2 = evlist2 + k1(1);
   evt1 = evlist1*dt;
   evt2 = evlist2*dt;
   evy1=spk_c(evlist1);
   evy2=spk_c(evlist2);
   plot(evt1, evy1, 'rx');
   plot(evt2, evy2, 'b+');
   
   if(length(evlist1) ~= length(evlist2))
      fprintf(1, 'nevents: %d, but event lists not same length ... need to fix code\n', nevent);
      fprintf(1, 'list 1 has %d items; list 2 has %d items\n', length(evlist1), length(evlist2));
      return;
   end;
else
   fprintf(1, 'No events found in spike train!\n');
   return;
end;
u=get(gca, 'Xlim');
nevent = length(evlist1);
rel = [];
prec = [];
xe = [];
ncount = 0;
for i = 1:nevent
   t0 = evt1(i)-dt2;
   t1 = evt2(i)-dt2;
   xe{i}=find(x >= t0 & x <= t1); % find all spikes over all trials that fall into event window
%   fprintf(1, 'event = %d   %6.2f - %6.2f   #spikes: %d\n', ...
%      i, t0, t1, length(xe{i}));
   if(length(xe{i}) > 0)
      rel(i) = length(xe{i})/ntrial; % get reliability as fraction in the box
      prec(i) = std(x(xe{i})); % get precision as std of those spikes that did occur...
      mtime(i) = mean(x(xe{i})); % get mean time also
      ncount = ncount + length(xe{i}); % count up total number of detected spikes in events
 %     fprintf(1, '       rel: %6.2f   prec: %6.3f   mtime: %6.2f\n', ...
 %        rel(i), prec(i), mtime(i));
   end;
end;
good = find(rel > 0);
prec=prec(good);
rel=rel(good);
mtime=mtime(good);
xe={xe{good}};

prec_0 = mean_var(prec); % overall "precision"
rel_0  = mean_var(rel); % overall "reliability"

tipsp = 550;
[w, tw1] = find(mtime < tipsp & mtime > 100);
[w, tw2] = find(mtime > tipsp & mtime < 900);
[w, tw3] = find(mtime > tipsp & mtime < 700);

% global statistics - CV on the population of spikes in different windows.
[z, tz1] = find(x > 100 & x < tipsp);
[z, tz2] = find(x > tipsp & x < 900);
[z, tz3] = find(x > tipsp & x < 700);
cvp1 = CVP(x(tz1), ntrial);
cvp2 = CVP(x(tz2), ntrial);
cvp3 = CVP(x(tz3), ntrial);

% stdev and CV of first spike after the IPSP (for comparison).
xfs = [];
for i = 1:ntrial
   fs=find(sp.latency{i} > tipsp);
   if(~isempty(fs))
      xfs(i) = sp.latency{i}(fs(1));
   else
      xfs{i} = NaN;
   end;
end;
[mfsl, vfsl] = mean_var(xfs);
   

subplot('Position', [0.1, 0.42, 0.8, 0.25]);
plot(mtime, rel, 'ro');
set(gca, 'Xlim', u);
hold on;
v=get(gca, 'Ylim');
if(v(2)>2)
   v(2) = 2;
 end;
%for i=1:length(evt1)
%   plot([evt1(i) evt1(i)], v, 'b');
%   plot([evt2(i) evt2(i)], v, 'r');
%end;
set(gca, 'YLim', v);

subplot('Position', [0.1, 0.72, 0.8, 0.25]);
plot(mtime, prec, 'rx');
set(gca, 'Xlim', u);
v=get(gca, 'YLim');
hold on;
if(v(2) > 3*median(prec));
   v(2) = 3*median(prec);
end;
v(1) = 0;
%for i=1:length(evt1)
%   plot([evt1(i) evt1(i)], v, 'b');
%   plot([evt2(i) evt2(i)], v, 'r');
%end;
set(gca, 'YLim', v);
fprintf(1, '\nMeasure\tReliability\tPrecision(ms)\tCVp\n');
fprintf(1, 'Overall_\t%7.2f\t%7.2f\t%7.3f\n', rel_0, prec_0, CVpN);
fprintf(1, 'Baseline\t%7.2f\t%7.2f\t%7.3f\n', mean_var(rel(tw1)), mean_var(prec(tw1)), cvp1);
fprintf(1, 'PostIPSP\t%7.2f\t%7.2f\t%7.3f\n', mean_var(rel(tw2)), mean_var(prec(tw2)), cvp2);
fprintf(1, 'IPSPShort\t%7.2f\t%7.2f\t%7.3f\n', mean_var(rel(tw3)), mean_var(prec(tw3)), cvp3);
fprintf(1, 'IPSPFirst\t%7.2f\t%7.2f\n', rel(tw2(1)), prec(tw2(1)));
fprintf(1, 'FSLlatency\t%9.2f\tstd:\t%8.3f\n', mfsl, sqrt(vfsl));
fprintf(1, 'Counted %d (%7.1f) of %d spikes\n', ncount, 100*ncount/length(x), length(x));
% final: rank order traces by reliability... 
%????

function w = gauss(sigma, t)

w = (1/(sqrt(2*pi)*sigma))*exp(-t^2/(2*sigma^2));
return;

function [CVpN] = CVP(x,ntrial)
% calculate CVp according to PT's notes.

sigmax = std(diff(x));
meanx = mean(diff(x));
CVp = sigmax/meanx;
CVpN = (CVp-1)/sqrt(ntrial);
return;



