Home > complex-toolbox > AEKF > CEKFaug.m

CEKFaug

PURPOSE ^

FUNCTION CEKFaug performs the complex-valued EKF algorithm.

SYNOPSIS ^

function [y, e, W, weightRecord_aug,K,A,OutputVar,P,W_aug] = CEKFaug(input, target, beta, mode, p, N, KalmanR, KalmanQ, KalmanP, monte)

DESCRIPTION ^

 FUNCTION CEKFaug performs the complex-valued EKF algorithm.

 Based on the paper "An augmented Extended Kalman Filter algorithm for complex-valued Recurrent Neural Networks", Neural Computation, vol 19, no 4, 2007.

 INPUT:
 input: input signal which should be scaled according to the dynamic range of nonlinearity 
 target: desired response
 mode: choice of nonlinearity
 beta: the value of slope of nonlinearity
 p: length of input vector
 KalmanR: covariance matrix
 KalmanQ: covariance matrix
 KalmanP: covariance matrix
 monte: number of iterations

CROSS-REFERENCE INFORMATION ^

This function calls: This function is called by:

SOURCE CODE ^

0001 % FUNCTION CEKFaug performs the complex-valued EKF algorithm.
0002 %
0003 % Based on the paper "An augmented Extended Kalman Filter algorithm for complex-valued Recurrent Neural Networks", Neural Computation, vol 19, no 4, 2007.
0004 %
0005 % INPUT:
0006 % input: input signal which should be scaled according to the dynamic range of nonlinearity
0007 % target: desired response
0008 % mode: choice of nonlinearity
0009 % beta: the value of slope of nonlinearity
0010 % p: length of input vector
0011 % KalmanR: covariance matrix
0012 % KalmanQ: covariance matrix
0013 % KalmanP: covariance matrix
0014 % monte: number of iterations
0015 
0016 % OUTPUT:
0017 % y: output signal
0018 % e: output error signal
0019 % w: weight vector of adaptive filter
0020 %
0021 %
0022 % Complex Valued Nonlinear Adaptive Filtering toolbox for MATLAB
0023 % Supplementary to the book:
0024 %
0025 % "Complex Valued Nonlinear Adaptive Filters: Noncircularity, Widely Linear and Neural Models"
0026 % by Danilo P. Mandic and Vanessa Su Lee Goh
0027 %
0028 % (c) Copyright Danilo P. Mandic 2009
0029 % http://www.commsp.ee.ic.ac.uk/~mandic
0030 %
0031 % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
0032 %    This program is free software; you can redistribute it and/or modify
0033 %    it under the terms of the GNU General Public License as published by
0034 %    the Free Software Foundation; either version 2 of the License, or
0035 %    (at your option) any later version.
0036 %
0037 %    This program is distributed in the hope that it will be useful,
0038 %    but WITHOUT ANY WARRANTY; without even the implied warranty of
0039 %    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0040 %    GNU General Public License for more details.
0041 %
0042 %    You can obtain a copy of the GNU General Public License from
0043 %    http://www.gnu.org/copyleft/gpl.html or by writing to
0044 %    Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
0045 % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
0046 % ...........................................
0047 function [y, e, W, weightRecord_aug,K,A,OutputVar,P,W_aug] = CEKFaug(input, target, beta, mode, p, N, KalmanR, KalmanQ, KalmanP, monte)
0048 
0049 pp = length(input);            % Number of input signal
0050 target = target + KalmanR*(randn(size(target))+i*randn(size(target))); % network target vector
0051 
0052 s = zeros(p,1) + i*zeros(p,1);                         % The input vector
0053 DW = zeros(p+N+1, N) + i*zeros(p+N+1, N);                    % Change applied to the weight matrix for neurons 1&2
0054 
0055 
0056 
0057 % Weights vectors
0058 W = 0.01*randn(p+N+1,N) + i*0.01*randn(p+N+1,N);                     % The weight matrix
0059 W_conj= conj(W);
0060 W_aug = [W;W_conj];
0061 PI_prev_aug = zeros(2*(p+N+1),N*N) + i*zeros(2*(p+N+1),N*N);  PI_curr_aug = zeros(2*(p+N+1),N*N) + i*zeros(2*(p+N+1),N*N);  % Jacobian matrix
0062 
0063 K = zeros(N*N, 2*(p+N+1))+ i*zeros(N*N, 2*(p+N+1));
0064 P = KalmanP*(eye(2*(p+N+1),2*(p+N+1))+i*eye(2*(p+N+1),2*(p+N+1)));
0065 R = KalmanR*(eye(N*N)+i*eye(N*N));
0066 Q = KalmanQ*(eye(2*(p+N+1),2*(p+N+1))+i*eye(2*(p+N+1),2*(p+N+1)));
0067 
0068 weightRecord_aug = zeros(2*(p+N+1),pp) + i*zeros(2*(p+N+1),pp);
0069 
0070 
0071 E = zeros(pp,1) + i*zeros(pp,1);
0072 MSE = zeros(pp,1) + i*zeros(pp,1);
0073 % Output vectors from the neurons
0074 Y_curr_aug = zeros(2*pp, N) + i*zeros(2*pp, N);
0075 
0076 U = zeros(p+N+1, 1) + i*zeros(p+N+1, 1);              % INput to the network(external,feedback and bias)
0077 U_conj = conj(U);
0078 V = zeros(1,N) + i*zeros(1,N);                        % weighted input vector to a neuron;
0079 Errors = 0.001*(randn(pp, 1) + i*randn(pp, 1));
0080 Phi = zeros(1, N) + i*zeros(1,N);
0081 
0082 for Monte=1:monte
0083 for k=2:pp
0084     
0085     s = [input(k-1);s(1:p-1)];
0086     U(:) = [s;1+i;Y_curr_aug(k-1,:).'];      % The input vector to the network
0087     U_conj(:) = conj(U(:));
0088     U_aug = [U(:);U_conj(:)];
0089 
0090    V_aug = U_aug.'*W_aug;
0091    % keyboard;
0092     switch (mode)
0093         case 1
0094             %Y_curr(k,:)     = logsig(beta*V);
0095              Y_curr_aug(k,:)    = 1./(1+exp(-beta*V_aug));
0096              
0097         case 2
0098             %Y_curr(k,:)     = tansig( beta * V);
0099              Y_curr_aug(k,:)     = tanh( beta * V_aug);
0100              %Y_curr     = tanh( beta * V)
0101     end
0102 
0103     Errors(k)     = target(k)-Y_curr_aug(k,1) ;   %taking the delay of observations to minus the y output
0104     
0105     switch(mode)
0106         case 1
0107         %Phi = dlogsig(beta*V,Y_curr(k,:));
0108         Phi = beta * (1./(1+exp(-beta*V_aug))).*( 1 - 1./(1+exp(-beta*V_aug)) );
0109         case 2
0110         %Phi = dtansig(beta * V,Y_curr(k,:));
0111         Phi = 1 - (tanh(beta * V_aug)).^2;
0112          %Phi_check = 1 - (tanh(beta * V)).^2
0113     end
0114 
0115     for j=1:N      %Put N PI matrix into one Big PI, that's N x [(1+p+N) x N] size
0116         for n=1:N
0117             for l=(1:2*(p+1+N))
0118                 temp = 0;
0119                 for m=1:N
0120                     temp = temp + conj(W_aug(1+p+m, j))*PI_prev_aug(l,(m-1)*N+n);
0121                 end
0122                 wtemp(n,l) = temp;
0123                 if n==j
0124                     wtemp(n, l) = wtemp(n, l) + conj(U_aug(l));
0125                 end
0126                 PI_curr_aug(l,(j-1)*N+n) = conj(Phi(1,j)) * wtemp(n, l);
0127             end
0128         end
0129     end
0130 
0131 
0132     PI_prev_aug = PI_curr_aug;
0133     
0134     A = (R+conj(PI_curr_aug.')*(P+Q)*(PI_curr_aug)); % danger of matrix singularity due to inversion
0135 
0136     K = (P+Q)*(PI_curr_aug)*(A^(-1));
0137     W_aug = W_aug + K(:,1:N:N*N)*Errors(k);
0138     P = P - K*conj(PI_curr_aug.')*(P+Q)+Q;
0139     
0140     
0141     OutputVar(:,k) = diag(R+conj(PI_curr_aug.')*(P)*(PI_curr_aug));
0142     P_record(:,:,k) = P;
0143     weightRecord_aug(:, k) = W_aug(:,1);
0144 end
0145 
0146 E = E +(1/2)*abs(Errors).^2;
0147 MSE = MSE+10*log10((1/2)*abs(Errors).^2);
0148 var_error(Monte) = var(Errors(1:end));
0149 var_signal(Monte) = var(input(1:end));
0150 Rp(Monte) = 10*log10(var_signal(Monte)/var_error(Monte));
0151 
0152 end
0153 %keyboard;
0154 y = Y_curr_aug(1:pp,1);
0155 
0156 e = Errors;
0157 
0158   error = norm(y(1:end).' - input(1:end)); % check on this
0159 
0160   fprintf('EKF error    = %6.2f',error)
0161   fprintf('\n')
0162   
0163   fprintf('Rp     = %6.2f',mean(Rp))
0164   fprintf('\n')
0165   
0166   % PLOT RESULTS:
0167   % ============
0168                                             
0169 
0170  figure(1)
0171 
0172   plot(1:length(target),abs(target),'k+:',1:length(y),abs(y),'r',1:length(input),abs(input),'b');
0173   ylabel('Prediction','fontsize',15)
0174   %axis([0 pp -1.5 1.5]);
0175 
0176   figure(2)
0177   plot(1:length(OutputVar),mean(abs(OutputVar)),'m');
0178   ylabel('Output variance','fontsize',15);
0179   %axis([0 pp 1.4 2]);
0180   
0181   figure(3)
0182   plot(E)
0183   
0184   figure(4)
0185   plot(MSE)
0186

Generated on Tue 21-Apr-2009 19:50:21 by m2html © 2003