Home > complex-toolbox > ACRTRL > augment_complex_rtrl_A.m

augment_complex_rtrl_A

PURPOSE ^

FUNCTION augment_complex_rtrl_A() implements the Augmented CRTRL algorithm

SYNOPSIS ^

function Y_out1A =augment_complex_rtrl_A(input)

DESCRIPTION ^

 FUNCTION augment_complex_rtrl_A() implements the Augmented CRTRL algorithm

 Based on the paper "A augmented CRTRL for complex-valued recurrent neural networks", 
 Neural Networks, vol 20, issue 10, 2007.

 INPUT: input signal which should be scaled according to the dynamic range of nonlinearity 

 OUTPUT:
 Y_out1A: output signal


 Complex Valued Nonlinear Adaptive Filtering toolbox for MATLAB
 Supplementary to the book:
 
 "Complex Valued Nonlinear Adaptive Filters: Noncircularity, Widely Linear and Neural Models"
 by Danilo P. Mandic and Vanessa Su Lee Goh
 
 (c) Copyright Danilo P. Mandic 2009
 http://www.commsp.ee.ic.ac.uk/~mandic
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.
 
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
 
    You can obtain a copy of the GNU General Public License from
    http://www.gnu.org/copyleft/gpl.html or by writing to
    Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 ...........................................
Defining parameters

CROSS-REFERENCE INFORMATION ^

This function calls: This function is called by:

SOURCE CODE ^

0001 % FUNCTION augment_complex_rtrl_A() implements the Augmented CRTRL algorithm
0002 %
0003 % Based on the paper "A augmented CRTRL for complex-valued recurrent neural networks",
0004 % Neural Networks, vol 20, issue 10, 2007.
0005 %
0006 % INPUT: input signal which should be scaled according to the dynamic range of nonlinearity
0007 %
0008 % OUTPUT:
0009 % Y_out1A: output signal
0010 %
0011 %
0012 % Complex Valued Nonlinear Adaptive Filtering toolbox for MATLAB
0013 % Supplementary to the book:
0014 %
0015 % "Complex Valued Nonlinear Adaptive Filters: Noncircularity, Widely Linear and Neural Models"
0016 % by Danilo P. Mandic and Vanessa Su Lee Goh
0017 %
0018 % (c) Copyright Danilo P. Mandic 2009
0019 % http://www.commsp.ee.ic.ac.uk/~mandic
0020 %
0021 % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
0022 %    This program is free software; you can redistribute it and/or modify
0023 %    it under the terms of the GNU General Public License as published by
0024 %    the Free Software Foundation; either version 2 of the License, or
0025 %    (at your option) any later version.
0026 %
0027 %    This program is distributed in the hope that it will be useful,
0028 %    but WITHOUT ANY WARRANTY; without even the implied warranty of
0029 %    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0030 %    GNU General Public License for more details.
0031 %
0032 %    You can obtain a copy of the GNU General Public License from
0033 %    http://www.gnu.org/copyleft/gpl.html or by writing to
0034 %    Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
0035 % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
0036 % ...........................................
0037 function Y_out1A =augment_complex_rtrl_A(input)
0038 %Defining parameters
0039 p=5; %input taps
0040 N=3;%number of neurons
0041 alpha=0.1;%learning rate
0042 len=5000; %length of input sample
0043 %Initialization of weight parameter
0044 w1=zeros(2*(p+N+1),1)+i*zeros(2*(p+N+1),1);
0045 w2=zeros(2*(p+N+1),1)+i*zeros(2*(p+N+1),1);
0046 w3=zeros(2*(p+N+1),1)+i*zeros(2*(p+N+1),1);
0047 W=zeros(2*(p+N+1),N)+i*zeros(2*(p+N+1),N);
0048 
0049 %Initialization of weight change
0050 dW1=zeros(1,2*(p+N+1))+i*zeros(1,2*(p+N+1));
0051 dW2=zeros(1,2*(p+N+1))+i*zeros(1,2*(p+N+1));
0052 dW3=zeros(1,2*(p+N+1))+i*zeros(1,2*(p+N+1));
0053 DW=zeros(N,2*(p+N+1))+i*zeros(N,2*(p+N+1));
0054 
0055 %Output Matrix
0056 Y_old1=zeros(len+1,1)+i*zeros(len+1,1);
0057 Y_out1=zeros(len,1)+i*zeros(len,1);
0058 Y_old2=zeros(len+1,1)+i*zeros(len+1,1);
0059 Y_out2=zeros(len,1)+i*zeros(len,1);
0060 Y_old3=zeros(len+1,1)+i*zeros(len+1,1);
0061 Y_out3=zeros(len,1)+i*zeros(len,1);
0062 
0063 %Preparing the basic format fo the input signal
0064 x=zeros(p,1);
0065 ElogA=zeros(1,len);
0066 E_dB=zeros(1,len);  
0067 for monte=1:100
0068     monte;
0069      
0070 %PI for split case
0071 PR_old=zeros(2*(p+N+1),N,N);%previous sample real
0072 PR_new=zeros(2*(p+N+1),N,N);%current sample real
0073 
0074 %weight initialization
0075 W_init1real=0.01*rand(2*(p+N+1),1);%initialise the weight1 randomly
0076 W_init1imag=0.01*rand(2*(p+N+1),1);
0077 w1=W_init1real+i*W_init1imag;
0078 W_init2real=0.01*rand(2*(p+N+1),1); %initialise the weight2 randomly
0079 W_init2imag=0.01*rand(2*(p+N+1),1);
0080 w2=W_init2real+i*W_init2imag;
0081 W_init3real=0.01*rand(2*(p+N+1),1); %initialise the weight2 randomly
0082 W_init3imag=0.01*rand(2*(p+N+1),1);
0083 w3=W_init3real+i*W_init3imag;
0084 
0085 
0086 %Load Data of Complex Colored Input
0087 d=input(1:len);
0088 xin(1)=0;
0089 xin(2:len)=d(1:len-1);
0090 
0091 %Activity of the Neurons
0092 for k=1:len
0093     x=[xin(k);x(1:p-1)]; %input of the system due to input
0094     Uina=[Y_old1(k);Y_old2(k);Y_old3(k);1+i;x]; %the main input to the system, 1 represents the bias
0095     Uin=[Uina;conj(Uina)];
0096 
0097     %First Neuron Activity
0098     Vout1=w1.'*Uin;
0099     sig_function1 = tanh(Vout1);%Output of the neuron 1 real
0100     sig_function_der1 = ( 1 - sig_function1^2 );%the derivative,f'
0101     Y_out1(k)=sig_function1;%store in the output matrix of 1st neuron
0102     Y_old1(k+1)=Y_out1(k);
0103     
0104    
0105     %Second Neuron Activity
0106     Vout2=w2.'*Uin;
0107    sig_function2 = tanh(Vout2);%Output of the neuron 1 real
0108     sig_function_der2 = ( 1 - sig_function2^2 );%the derivative,f'
0109     Y_out2(k)=sig_function2;%store in the output matrix of 1st neuron
0110     Y_old2(k+1)=Y_out2(k);
0111     
0112      %Third Neuron Activity
0113     Vout3=w3.'*Uin;
0114    sig_function3 = tanh(Vout3);%Output of the neuron 1 real
0115     sig_function_der3 = ( 1 - sig_function3^2 );%the derivative,f'
0116     Y_out3(k)=sig_function3;%store in the output matrix of 1st neuron
0117     Y_old3(k+1)=Y_out3(k);
0118     
0119     %Real and Imaginary Part of Output
0120     u1(k)=real(Y_out1(k));
0121     v1(k)=imag(Y_out1(k));
0122     
0123     %Error Calculation
0124     e(k) = d(k) - Y_out1(k);
0125     
0126     %error component
0127     e_real(k)=real(d(k))-u1(k);
0128     e_imag(k)=imag(d(k))-v1(k);
0129    
0130     %MSE Error Calculation
0131     E(k)=(1/2)*(e_real(k).^2+e_imag(k).^2);
0132     E_dB(k)=10*log10(E(k));%error value at k step
0133     
0134     %Matrix ready before calculating Pij
0135     sig_function_der=[sig_function_der1,sig_function_der2,sig_function_der3];
0136     
0137      %Calculating Pij
0138     for l=1:2*(N+p+1)%row
0139         for t=1:N%
0140             for j=1:N%page
0141                 tempR=0;
0142                 m=0;
0143                 for r=1:2*N%rotation
0144                 tempR=tempR+conj((W(r+m*(p+1),j))).*(PR_old(l,r-m*N,j));
0145                 if r==N
0146                     m=1;
0147                 else
0148                 end  
0149                 end
0150                 if t==j
0151                     tempR=tempR+conj(Uin(l));
0152                 else
0153                 end
0154                 PR_new(l,t,j)=conj(sig_function_der(j)).*tempR;
0155             end
0156         end
0157     end
0158     PR_old=PR_new;
0159     
0160     %weight change
0161     DW=alpha.*(e(k).*PR_new);
0162     w1=w1+DW(:,1,1);
0163     w2=w2+DW(:,1,2);
0164     w3=w3+DW(:,1,3);
0165     W=[w1,w2,w3];
0166 end%k
0167 Y_out1A=Y_out1;
0168 ElogA=ElogA+E_dB;
0169 var_error(monte)=var(abs(e(1000:end)));
0170 var_signal(monte)=var(d(1000:end));
0171 Rp(monte)=10*log10(var_signal(monte)/var_error(monte));
0172 
0173 end%monte
0174 ElogA=ElogA/monte;
0175 PredictionGain=mean(Rp);
0176 %figure(1)
0177 %plot(1:len,Elog,'b-')
0178 %title('Performance Comparison')
0179 %ylabel('Error(dB)')
0180 %xlabel('Number of iterations')
0181 %legend('RTRL')
0182 
0183 figure(2)
0184 plot (4000:len,abs(Y_out1(4000:len)'),'r',4000:len,abs(xin(4000:len)),'b')
0185 %plot (1:len,abs(Y_out1(1:len)'),'y')
0186 %figure(2)
0187 %plot(E)
0188 %title('RTRL Algorithm')
0189 %ylabel('Error')
0190 %xlabel('Number of iterations')
0191 %grid;
0192 %legend('CRTRL')
0193 
0194 %figure(3)
0195 %plot(1:len,xin,'c+-',1:len,Y_out1,'r.-')
0196 %title('RTRL Algorithm')
0197 %ylabel('Colour')
0198 %xlabel('Number of iterations')
0199 %grid;
0200 %legend('Input Data', 'Predicted Data')
0201 
0202

Generated on Tue 21-Apr-2009 19:50:21 by m2html © 2003