function f0 = spch2f0(X,fs,voiced1,voiced2,fds)
%
% function f0 = spch2f0(X,fs,voiced1,voiced2,fds)
%
% Given a matrix X of speech frames,spch2f0  estimates the fundamental 
% frequency (f0) using an LPC based technique. Basically, the speech
% signal is divided into frames which have their LPC spectral envelope
% canceled by inverse filtering. The autocorrelation function is than
% calculated for windows centered on each sample of a given frame.
% The median location of the largest peak after the one at the origin is then
% considered to be the pitch period in samples, which is then converted
% into seconds. Outlier detection, and smoothing are performed at the end.
%
% Each column of X contains one frame. The input parameter fs is the 
% sampling frequency in Hz. (The default is fs = 8040Hz). Voiced1 and 
% voiced2 are two criteria do detect voiced frames. Voiced1 (default = 0.25)
% is the amplitude of the largest peak (after the one at the origin) of the
% autocorrelation vector; whereas voiced2 (default = 0.05) is the maximum 
% modulus of the speech waveform itself. Frames with largest autocorrelation
% peak > voiced1 AND maximum waveform modulus > voiced2 are considered to be 
% voiced. These criteria worked fine for particular cases. However, since an
% exhaustive evaluation was not performed, better alternatives probably exist 
% and any effort in this direction is encouraged.
%
% Finally, down-sampling can be performed by specifying fds which is the
% down-sampled frequency in Hertz. This speeds the calculation, at the
% cost of some inaccuracy in the results. A good trade-off for male subjects
% was found to be fds = 2500Hz, which is the default value. For high pitch
% female speaker a higher value will possibly be a better choice.
%
if nargin < 2, fs = 8040; 
elseif isempty(fs), fs = 8040; end
if nargin < 3, voiced1 = 0.25; 
elseif isempty(voiced1), voiced1 = 0.25; end
if nargin < 4, voiced2 = 0.05; 
elseif isempty(voiced2), voiced2 = 0.05; end
if nargin < 5, fds = 2500; 
elseif isempty(fds), fds = 2500; end
[Nn,M] = size(X); % Nn = number of samples in a frame; M = number of frames
P = 6;            % LPC order
% Downsample the speech signal to Fs~fds (defaul fds = 2500Hz keeping M frames
Fss = fds;
N = round(Nn*Fss/(2*fs))*2;
Fs = round(fs*N/Nn);
X = reshape(resample(X(:),Fs,fs),N,M);
X3 = [zeros(N,1) X(:,1:M-1)
      X
      X(:,2:M) zeros(N,1)];
rmax = zeros(N/2,M);    
imax = zeros(N/2,M); 
win = hamming(2.5*N);
maxdelay = round(50*Fs/2500);
for n = 1:N/2
  XX = X3(n+(1:2.5*N),:).*repmat(win,1,M);
  [A,g] = lpc(XX,P);
  for m = 1:M
    y = filter(A(m,:),g(m),XX(:,m));
    r = (y'*toeplitz(y,[y(1),zeros(1,maxdelay)]))/(y'*y); % Autocorrelation
    aux = find((r > [inf r(1:end-1)])&(r > [r(2:end) 0]));
    [rmax(n,m),aux_idx] = max(r(aux));
    imax(n,m) = aux(aux_idx);
  end
end    
f0 = Fs./median(imax);
unvoiced = find((max(rmax) < voiced1)|(max(abs(X)) < voiced2));
f0(unvoiced) = 0;
%%%%%%%%%%%%%%%%%%%%%%%End of basic analysis%%%%%%%%%%%%%%%%%%%%%%%%%%%%
f0_min = min(f0(find(f0)));
f0_before = [0 f0(1:M-1)];
f0_after =  [f0(2:M) 0];
outlier = find(((f0 > 1.4*f0_before)&(f0 > 1.4*f0_after))...
              |((f0 < f0_before/1.4)&(f0 < f0_after/1.4)));
if ~isempty(outlier)	  
  f0(outlier) = mean([f0_before(outlier); f0_after(outlier)]);	  
end
f0 = medfilt1(f0,3);
f0(find(f0 < 1*f0_min)) = 0;
F0 = [[0 f0(1:M-1)]; f0; [f0(2:M) 0]];
for m = find(f0)
  idx = find(F0(:,m));
  f0(m) = mean(F0(idx,m));
end
t = (1/2:M-1/2)*N/Fs;
tt = (1:M*N)/Fs;
f0 = f0/100;            % Just for plotting purposes

plot(t,f0,'r-',...
    t,std(X),'c-',...
    tt,X(:),'b-',...
    t,max(rmax),'m-',...
    t,f0,'r+',...
    t,std(X),'c+',...
    t,max(rmax),'m+')
grid on
set(gca,'xtick',0:0.2:10,'ytick',-3:0.5:3)
xlabel('Time (s)');
axis([0 t(end), -1 3]);
legend('F0/100',...
       'RMS Amplitude',...
       'Speech Waveform',...
       'Max Autocorrelation',1);
f0 = 100*f0;
% keyboard


