| line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
|
1
|
|
|
|
|
|
|
########################################################## |
|
2
|
|
|
|
|
|
|
# AI::NNFlex::Reinforce |
|
3
|
|
|
|
|
|
|
########################################################## |
|
4
|
|
|
|
|
|
|
# NNFlex learning module |
|
5
|
|
|
|
|
|
|
# this is a bit of an experimental one. All it does is |
|
6
|
|
|
|
|
|
|
# Reinforce the weight depending on the sign & activity |
|
7
|
|
|
|
|
|
|
# of the node, sort of a gross oversimplification of a |
|
8
|
|
|
|
|
|
|
# neuron. |
|
9
|
|
|
|
|
|
|
# |
|
10
|
|
|
|
|
|
|
########################################################## |
|
11
|
|
|
|
|
|
|
# Versions |
|
12
|
|
|
|
|
|
|
# ======== |
|
13
|
|
|
|
|
|
|
# |
|
14
|
|
|
|
|
|
|
# 1.0 20041125 CColbourn New module |
|
15
|
|
|
|
|
|
|
# 1.1 20050116 CColbourn Fixed reverse @layers |
|
16
|
|
|
|
|
|
|
# bug reported by GM Passos |
|
17
|
|
|
|
|
|
|
# |
|
18
|
|
|
|
|
|
|
# 1.2 20050218 CColbourn Mod'd to change weight |
|
19
|
|
|
|
|
|
|
# addressing from hash to |
|
20
|
|
|
|
|
|
|
# array for nnf0.16 |
|
21
|
|
|
|
|
|
|
# |
|
22
|
|
|
|
|
|
|
# 1.3 20050307 CColbourn repackaged as a subclass |
|
23
|
|
|
|
|
|
|
# of nnflex |
|
24
|
|
|
|
|
|
|
# |
|
25
|
|
|
|
|
|
|
########################################################## |
|
26
|
|
|
|
|
|
|
# ToDo |
|
27
|
|
|
|
|
|
|
# ---- |
|
28
|
|
|
|
|
|
|
# |
|
29
|
|
|
|
|
|
|
# |
|
30
|
|
|
|
|
|
|
########################################################### |
|
31
|
|
|
|
|
|
|
# |
|
32
|
|
|
|
|
|
|
|
|
33
|
|
|
|
|
|
|
package AI::NNFlex::Reinforce; |
|
34
|
1
|
|
|
1
|
|
6563
|
use AI::NNFlex; |
|
|
1
|
|
|
|
|
2
|
|
|
|
1
|
|
|
|
|
31
|
|
|
35
|
1
|
|
|
1
|
|
594
|
use AI::NNFlex::Feedforward; |
|
|
1
|
|
|
|
|
12
|
|
|
|
1
|
|
|
|
|
31
|
|
|
36
|
1
|
|
|
1
|
|
6
|
use base qw(AI::NNFlex AI::NNFlex::Feedforward); |
|
|
1
|
|
|
|
|
1
|
|
|
|
1
|
|
|
|
|
118
|
|
|
37
|
1
|
|
|
1
|
|
4
|
use strict; |
|
|
1
|
|
|
|
|
2
|
|
|
|
1
|
|
|
|
|
227
|
|
|
38
|
|
|
|
|
|
|
|
|
39
|
|
|
|
|
|
|
|
|
40
|
|
|
|
|
|
|
########################################################### |
|
41
|
|
|
|
|
|
|
#AI::NNFlex::Reinforce::learn |
|
42
|
|
|
|
|
|
|
########################################################### |
|
43
|
|
|
|
|
|
|
sub learn |
|
44
|
|
|
|
|
|
|
{ |
|
45
|
|
|
|
|
|
|
|
|
46
|
0
|
|
|
0
|
1
|
|
my $network = shift; |
|
47
|
|
|
|
|
|
|
|
|
48
|
0
|
|
|
|
|
|
my @layers = @{$network->{'layers'}}; |
|
|
0
|
|
|
|
|
|
|
|
49
|
|
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
# no connections westwards from input, so no weights to adjust |
|
51
|
0
|
|
|
|
|
|
shift @layers; |
|
52
|
|
|
|
|
|
|
|
|
53
|
|
|
|
|
|
|
# reverse to start with the last layer first |
|
54
|
0
|
|
|
|
|
|
foreach my $layer (reverse @layers) |
|
55
|
|
|
|
|
|
|
{ |
|
56
|
0
|
|
|
|
|
|
my @nodes = @{$layer->{'nodes'}}; |
|
|
0
|
|
|
|
|
|
|
|
57
|
|
|
|
|
|
|
|
|
58
|
0
|
|
|
|
|
|
foreach my $node (@nodes) |
|
59
|
|
|
|
|
|
|
{ |
|
60
|
0
|
|
|
|
|
|
my @westNodes = @{$node->{'connectedNodesWest'}->{'nodes'}}; |
|
|
0
|
|
|
|
|
|
|
|
61
|
0
|
|
|
|
|
|
my @westWeights = @{$node->{'connectedNodesWest'}->{'weights'}}; |
|
|
0
|
|
|
|
|
|
|
|
62
|
0
|
|
|
|
|
|
my $connectedNodeCounter=0; |
|
63
|
0
|
|
|
|
|
|
foreach my $westNode (@westNodes) |
|
64
|
|
|
|
|
|
|
{ |
|
65
|
0
|
|
|
|
|
|
my $dW = $westNode->{'activation'} * $westWeights[$connectedNodeCounter] * $network->{'learning rate'}; |
|
66
|
0
|
|
|
|
|
|
$node->{'connectedNodesWest'}->{'weights'}->[$connectedNodeCounter] += $dW; |
|
67
|
|
|
|
|
|
|
} |
|
68
|
|
|
|
|
|
|
} |
|
69
|
|
|
|
|
|
|
} |
|
70
|
|
|
|
|
|
|
} |
|
71
|
|
|
|
|
|
|
|
|
72
|
|
|
|
|
|
|
|
|
73
|
|
|
|
|
|
|
|
|
74
|
|
|
|
|
|
|
1; |
|
75
|
|
|
|
|
|
|
|
|
76
|
|
|
|
|
|
|
=pod |
|
77
|
|
|
|
|
|
|
|
|
78
|
|
|
|
|
|
|
=head1 NAME |
|
79
|
|
|
|
|
|
|
|
|
80
|
|
|
|
|
|
|
AI::NNFlex::Reinforce - A very simple experimental NN module |
|
81
|
|
|
|
|
|
|
|
|
82
|
|
|
|
|
|
|
=head1 SYNOPSIS |
|
83
|
|
|
|
|
|
|
|
|
84
|
|
|
|
|
|
|
use AI::NNFlex::Reinforce; |
|
85
|
|
|
|
|
|
|
|
|
86
|
|
|
|
|
|
|
my $network = AI::NNFlex::Reinforce->new(config parameter=>value); |
|
87
|
|
|
|
|
|
|
|
|
88
|
|
|
|
|
|
|
$network->add_layer(nodes=>x,activationfunction=>'function'); |
|
89
|
|
|
|
|
|
|
|
|
90
|
|
|
|
|
|
|
$network->init(); |
|
91
|
|
|
|
|
|
|
|
|
92
|
|
|
|
|
|
|
|
|
93
|
|
|
|
|
|
|
|
|
94
|
|
|
|
|
|
|
use AI::NNFlex::Dataset; |
|
95
|
|
|
|
|
|
|
|
|
96
|
|
|
|
|
|
|
my $dataset = AI::NNFlex::Dataset->new([ |
|
97
|
|
|
|
|
|
|
[INPUTARRAY],[TARGETOUTPUT], |
|
98
|
|
|
|
|
|
|
[INPUTARRAY],[TARGETOUTPUT]]); |
|
99
|
|
|
|
|
|
|
|
|
100
|
|
|
|
|
|
|
my $sqrError = 10; |
|
101
|
|
|
|
|
|
|
|
|
102
|
|
|
|
|
|
|
for (1..100) |
|
103
|
|
|
|
|
|
|
|
|
104
|
|
|
|
|
|
|
{ |
|
105
|
|
|
|
|
|
|
|
|
106
|
|
|
|
|
|
|
$dataset->learn($network); |
|
107
|
|
|
|
|
|
|
|
|
108
|
|
|
|
|
|
|
} |
|
109
|
|
|
|
|
|
|
|
|
110
|
|
|
|
|
|
|
$network->lesion({'nodes'=>PROBABILITY,'connections'=>PROBABILITY}); |
|
111
|
|
|
|
|
|
|
|
|
112
|
|
|
|
|
|
|
$network->dump_state(filename=>'badgers.wts'); |
|
113
|
|
|
|
|
|
|
|
|
114
|
|
|
|
|
|
|
$network->load_state(filename=>'badgers.wts'); |
|
115
|
|
|
|
|
|
|
|
|
116
|
|
|
|
|
|
|
my $outputsRef = $dataset->run($network); |
|
117
|
|
|
|
|
|
|
|
|
118
|
|
|
|
|
|
|
my $outputsRef = $network->output(layer=>2,round=>1); |
|
119
|
|
|
|
|
|
|
|
|
120
|
|
|
|
|
|
|
=head1 DESCRIPTION |
|
121
|
|
|
|
|
|
|
|
|
122
|
|
|
|
|
|
|
Reinforce is a very simple NN module. It's mainly included in this distribution to provide an example of how to subclass AI::NNFlex to write your own NN modules. The training method strengthens any connections that are active during the run pass. |
|
123
|
|
|
|
|
|
|
|
|
124
|
|
|
|
|
|
|
=head1 CONSTRUCTOR |
|
125
|
|
|
|
|
|
|
|
|
126
|
|
|
|
|
|
|
=head2 AI::NNFlex::Reinforce |
|
127
|
|
|
|
|
|
|
|
|
128
|
|
|
|
|
|
|
new ( parameter => value ); |
|
129
|
|
|
|
|
|
|
|
|
130
|
|
|
|
|
|
|
randomweights=>MAXIMUM VALUE FOR INITIAL WEIGHT |
|
131
|
|
|
|
|
|
|
|
|
132
|
|
|
|
|
|
|
fixedweights=>WEIGHT TO USE FOR ALL CONNECTIONS |
|
133
|
|
|
|
|
|
|
|
|
134
|
|
|
|
|
|
|
debug=>[LIST OF CODES FOR MODULES TO DEBUG] |
|
135
|
|
|
|
|
|
|
|
|
136
|
|
|
|
|
|
|
learningrate=>the learning rate of the network |
|
137
|
|
|
|
|
|
|
|
|
138
|
|
|
|
|
|
|
round=>0 or 1 - 1 sets the network to round output values to |
|
139
|
|
|
|
|
|
|
nearest of 1, -1 or 0 |
|
140
|
|
|
|
|
|
|
|
|
141
|
|
|
|
|
|
|
|
|
142
|
|
|
|
|
|
|
The following parameters are optional: |
|
143
|
|
|
|
|
|
|
randomweights |
|
144
|
|
|
|
|
|
|
fixedweights |
|
145
|
|
|
|
|
|
|
debug |
|
146
|
|
|
|
|
|
|
round |
|
147
|
|
|
|
|
|
|
|
|
148
|
|
|
|
|
|
|
(Note, if randomweights is not specified the network will default to a random value from 0 to 1. |
|
149
|
|
|
|
|
|
|
|
|
150
|
|
|
|
|
|
|
|
|
151
|
|
|
|
|
|
|
=head1 METHODS |
|
152
|
|
|
|
|
|
|
|
|
153
|
|
|
|
|
|
|
This is a short list of the main methods implemented in AI::NNFlex. Subclasses may implement other methods. |
|
154
|
|
|
|
|
|
|
|
|
155
|
|
|
|
|
|
|
=head2 AI::NNFlex |
|
156
|
|
|
|
|
|
|
|
|
157
|
|
|
|
|
|
|
=head3 add_layer |
|
158
|
|
|
|
|
|
|
|
|
159
|
|
|
|
|
|
|
Syntax: |
|
160
|
|
|
|
|
|
|
|
|
161
|
|
|
|
|
|
|
$network->add_layer( nodes=>NUMBER OF NODES IN LAYER, |
|
162
|
|
|
|
|
|
|
persistentactivation=>RETAIN ACTIVATION BETWEEN PASSES, |
|
163
|
|
|
|
|
|
|
decay=>RATE OF ACTIVATION DECAY PER PASS, |
|
164
|
|
|
|
|
|
|
randomactivation=>MAXIMUM STARTING ACTIVATION, |
|
165
|
|
|
|
|
|
|
threshold=>NYI, |
|
166
|
|
|
|
|
|
|
activationfunction=>"ACTIVATION FUNCTION", |
|
167
|
|
|
|
|
|
|
randomweights=>MAX VALUE OF STARTING WEIGHTS); |
|
168
|
|
|
|
|
|
|
|
|
169
|
|
|
|
|
|
|
=head3 init |
|
170
|
|
|
|
|
|
|
|
|
171
|
|
|
|
|
|
|
Syntax: |
|
172
|
|
|
|
|
|
|
|
|
173
|
|
|
|
|
|
|
$network->init(); |
|
174
|
|
|
|
|
|
|
|
|
175
|
|
|
|
|
|
|
Initialises connections between nodes, sets initial weights and loads external components. The base AI::NNFlex init method implementes connections backwards and forwards from each node in each layer to each node in the preceeding and following layers. |
|
176
|
|
|
|
|
|
|
|
|
177
|
|
|
|
|
|
|
=head3 lesion |
|
178
|
|
|
|
|
|
|
|
|
179
|
|
|
|
|
|
|
$network->lesion ({'nodes'=>PROBABILITY,'connections'=>PROBABILITY}) |
|
180
|
|
|
|
|
|
|
|
|
181
|
|
|
|
|
|
|
Damages the network. |
|
182
|
|
|
|
|
|
|
|
|
183
|
|
|
|
|
|
|
B |
|
184
|
|
|
|
|
|
|
|
|
185
|
|
|
|
|
|
|
A value between 0 and 1, denoting the probability of a given node or connection being damaged. |
|
186
|
|
|
|
|
|
|
|
|
187
|
|
|
|
|
|
|
Note: this method may be called on a per network, per node or per layer basis using the appropriate object. |
|
188
|
|
|
|
|
|
|
|
|
189
|
|
|
|
|
|
|
=head2 AN::NNFlex::Dataset |
|
190
|
|
|
|
|
|
|
|
|
191
|
|
|
|
|
|
|
=head3 learn |
|
192
|
|
|
|
|
|
|
|
|
193
|
|
|
|
|
|
|
$dataset->learn($network) |
|
194
|
|
|
|
|
|
|
|
|
195
|
|
|
|
|
|
|
'Teaches' the network the dataset using the networks defined learning algorithm. Returns sqrError; |
|
196
|
|
|
|
|
|
|
|
|
197
|
|
|
|
|
|
|
=head3 run |
|
198
|
|
|
|
|
|
|
|
|
199
|
|
|
|
|
|
|
$dataset->run($network) |
|
200
|
|
|
|
|
|
|
|
|
201
|
|
|
|
|
|
|
Runs the dataset through the network and returns a reference to an array of output patterns. |
|
202
|
|
|
|
|
|
|
|
|
203
|
|
|
|
|
|
|
=head1 EXAMPLES |
|
204
|
|
|
|
|
|
|
|
|
205
|
|
|
|
|
|
|
See the code in ./examples. For any given version of NNFlex, xor.pl will contain the latest functionality. |
|
206
|
|
|
|
|
|
|
|
|
207
|
|
|
|
|
|
|
|
|
208
|
|
|
|
|
|
|
=head1 PREREQs |
|
209
|
|
|
|
|
|
|
|
|
210
|
|
|
|
|
|
|
None. NNFlex::Reinforce should run OK on any version of Perl 5 >. |
|
211
|
|
|
|
|
|
|
|
|
212
|
|
|
|
|
|
|
|
|
213
|
|
|
|
|
|
|
=head1 ACKNOWLEDGEMENTS |
|
214
|
|
|
|
|
|
|
|
|
215
|
|
|
|
|
|
|
Phil Brierley, for his excellent free java code, that solved my backprop problem |
|
216
|
|
|
|
|
|
|
|
|
217
|
|
|
|
|
|
|
Dr Martin Le Voi, for help with concepts of NN in the early stages |
|
218
|
|
|
|
|
|
|
|
|
219
|
|
|
|
|
|
|
Dr David Plaut, for help with the project that this code was originally intended for. |
|
220
|
|
|
|
|
|
|
|
|
221
|
|
|
|
|
|
|
Graciliano M.Passos for suggestions & improved code (see SEE ALSO). |
|
222
|
|
|
|
|
|
|
|
|
223
|
|
|
|
|
|
|
Dr Scott Fahlman, whose very readable paper 'An empirical study of learning speed in backpropagation networks' (1988) has driven many of the improvements made so far. |
|
224
|
|
|
|
|
|
|
|
|
225
|
|
|
|
|
|
|
=head1 SEE ALSO |
|
226
|
|
|
|
|
|
|
|
|
227
|
|
|
|
|
|
|
AI::NNFlex |
|
228
|
|
|
|
|
|
|
AI::NNFlex::Backprop |
|
229
|
|
|
|
|
|
|
AI::NNFlex::Dataset |
|
230
|
|
|
|
|
|
|
|
|
231
|
|
|
|
|
|
|
|
|
232
|
|
|
|
|
|
|
=head1 COPYRIGHT |
|
233
|
|
|
|
|
|
|
|
|
234
|
|
|
|
|
|
|
Copyright (c) 2004-2005 Charles Colbourn. All rights reserved. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. |
|
235
|
|
|
|
|
|
|
|
|
236
|
|
|
|
|
|
|
=head1 CONTACT |
|
237
|
|
|
|
|
|
|
|
|
238
|
|
|
|
|
|
|
charlesc@nnflex.g0n.net |
|
239
|
|
|
|
|
|
|
|
|
240
|
|
|
|
|
|
|
|
|
241
|
|
|
|
|
|
|
|
|
242
|
|
|
|
|
|
|
=cut |