File Coverage

blib/lib/AI/ANN.pm
Criterion Covered Total %
statement 8 10 80.0
branch n/a
condition n/a
subroutine 4 4 100.0
pod n/a
total 12 14 85.7


line stmt bran cond sub pod time code
1             #!/usr/bin/perl
2             package AI::ANN;
3             BEGIN {
4 5     5   192362 $AI::ANN::VERSION = '0.008';
5             }
6 5     5   55 use strict;
  5         10  
  5         220  
7 5     5   32 use warnings;
  5         11  
  5         175  
8              
9             # ABSTRACT: an artificial neural network simulator
10              
11 5     5   3222 use Moose;
  0            
  0            
12              
13             use AI::ANN::Neuron;
14             use Storable qw(dclone);
15              
16              
17             has 'input_count' => (is => 'ro', isa => 'Int', required => 1);
18             has 'outputneurons' => (is => 'ro', isa => 'ArrayRef[Int]', required => 1);
19             has 'network' => (is => 'ro', isa => 'ArrayRef[HashRef]', required => 1);
20             # network is an arrayref of hashrefs. Each hashref is:
21             # object => AI::ANN::Neuron
22             # and has several other elements
23             has 'inputs' => (is => 'ro', isa => 'ArrayRef[Int]');
24             has 'rawpotentials' => (is => 'ro', isa => 'ArrayRef[Int]');
25             has 'minvalue' => (is => 'rw', isa => 'Int', default => 0);
26             has 'maxvalue' => (is => 'rw', isa => 'Int', default => 1);
27             has 'afunc' => (is => 'rw', isa => 'CodeRef', default => sub {sub {shift}});
28             has 'dafunc' => (is => 'rw', isa => 'CodeRef', default => sub {sub {1}});
29             has 'backprop_eta' => (is => 'rw', isa => 'Num', default => 0.1);
30              
31             around BUILDARGS => sub {
32             my $orig = shift;
33             my $class = shift;
34             my %data;
35             if ( @_ == 1 && ref $_[0] eq 'HASH' ) {
36             %data = %{$_[0]};
37             } else {
38             %data = @_;
39             }
40             if (exists $data{'inputs'} && not exists $data{'input_count'}) {
41             $data{'input_count'} = $data{'inputs'};
42             delete $data{'inputs'}; # inputs is used later for the actual
43             # values of the inputs.
44             }
45             my $neuronlist = $data{'data'};
46             $data{'outputneurons'} = [];
47             $data{'network'} = [];
48             for (my $i = 0; $i <= $#{$neuronlist} ; $i++) {
49             push @{$data{'outputneurons'}}, $i
50             if $neuronlist->[$i]->{'iamanoutput'};
51             my @pass = (
52             $i,
53             $neuronlist->[$i]->{'inputs'},
54             $neuronlist->[$i]->{'neurons'} );
55             push @pass, $neuronlist->[$i]->{'eta_inputs'},
56             $neuronlist->[$i]->{'eta_neurons'}
57             if defined $neuronlist->[$i]->{'eta_neurons'};
58             $data{'network'}->[$i]->{'object'} =
59             new AI::ANN::Neuron( @pass );
60             }
61             delete $data{'data'};
62             return $class->$orig(%data);
63             };
64              
65              
66             sub execute {
67             my $self = shift;
68             my $inputs = $self->{'inputs'} = shift;
69             # Don't bother dereferencing $inputs only to rereference a lot
70             my $net = $self->{'network'}; # For less typing
71             my $lastneuron = $#{$net};
72             my @neurons = ();
73             foreach my $i (0..$lastneuron) {
74             $neurons[$i] = 0;
75             }
76             foreach my $i (0..$lastneuron) {
77             delete $net->[$i]->{'done'};
78             delete $net->[$i]->{'state'};
79             }
80             my $progress = 0;
81             do {
82             $progress = 0;
83             foreach my $i (0..$lastneuron) {
84             if ($net->[$i]->{'done'}) {next}
85             if ($net->[$i]->{'object'}->ready($inputs, \@neurons)) {
86             my $potential = $net->[$i]->{'object'}->execute($inputs, \@neurons);
87             $self->{'rawpotentials'}->[$i] = $potential;
88             $potential = $self->{'maxvalue'} if $potential > $self->{'maxvalue'};
89             $potential = $self->{'minvalue'} if $potential < $self->{'minvalue'};
90             $potential = &{$self->{'afunc'}}($potential);
91             $neurons[$i] = $net->[$i]->{'state'} = $potential;
92             $net->[$i]->{'done'} = 1;
93             $progress++;
94             }
95             }
96             } while ($progress); # If the network is feed-forward, we are now finished.
97            
98             my @notdone = grep {not (defined $net->[$_]->{'done'} &&
99             $net->[$_]->{'done'} == 1)} 0..$lastneuron;
100             my @neuronstemp = ();
101             if ($#notdone > 0) { #This is the part where we deal with loops and bad things
102             my $maxerror = 0;
103             my $loopcounter = 1;
104             while (1) {
105             foreach my $i (@notdone) { # Only bother iterating over the
106             # ones we couldn't solve exactly
107             # We don't care if it's ready now, we're just going to interate
108             # until it stabilizes.
109             if (not defined $neurons[$i] && $i <= $lastneuron) {
110             # Fixes warnings about uninitialized values, but we make
111             # sure $i is valid first.
112             $neurons[$i] = 0;
113             }
114             my $potential = $net->[$i]->{'object'}->execute($inputs, \@neurons);
115             $self->{'rawpotentials'}->[$i] = $potential;
116             $potential = &{$self->{'afunc'}}($potential);
117             $potential = $self->{'maxvalue'} if $potential > $self->{'maxvalue'};
118             $potential = $self->{'minvalue'} if $potential < $self->{'minvalue'};
119             $neuronstemp[$i] = $net->[$i]->{'state'} = $potential;
120             # We want to know the absolute change
121             if (abs($neurons[$i]-$neuronstemp[$i])>$maxerror) {
122             $maxerror = abs($neurons[$i]-$neuronstemp[$i]);
123             }
124             }
125             foreach my $i (0..$lastneuron) {
126             # Update $neurons, since that is what gets passed to execute
127             $neurons[$i] = $neuronstemp[$i];
128             }
129             if (($maxerror < 0.0001 && $loopcounter >= 5) || $loopcounter > 250) {last}
130             $loopcounter++;
131             $maxerror=0;
132             }
133             }
134              
135             # Ok, hopefully all the neurons have happy values by now.
136             # Get the output values for neurons corresponding to outputneurons
137             my @output = map {$neurons[$_]} @{$self->{'outputneurons'}};
138             return \@output;
139             }
140              
141              
142             sub get_state {
143             my $self = shift;
144             my $net = $self->{'network'}; # For less typing
145             my @neurons = map {$net->[$_]->{'state'}} 0..$#{$self->{'network'}};
146             my @output = map {$net->[$_]->{'state'}} @{$self->{'outputneurons'}};
147              
148             return $self->{'inputs'}, \@neurons, \@output;
149             }
150              
151              
152             sub get_internals {
153             my $self = shift;
154             my $net = $self->{'network'}; # For less typing
155             my $retval = [];
156             for (my $i = 0; $i <= $#{$self->{'network'}}; $i++) {
157             $retval->[$i] = { iamanoutput => 0,
158             inputs => $net->[$i]->{'object'}->inputs(),
159             neurons => $net->[$i]->{'object'}->neurons(),
160             eta_inputs => $net->[$i]->{'object'}->eta_inputs(),
161             eta_neurons => $net->[$i]->{'object'}->eta_neurons()
162             };
163             }
164             foreach my $i (@{$self->{'outputneurons'}}) {
165             $retval->[$i]->{'iamanoutput'} = 1;
166             }
167             return dclone($retval); # Dclone for safety.
168             }
169              
170              
171             sub readable {
172             my $self = shift;
173             my $retval = "This network has ". $self->{'inputcount'} ." inputs and ".
174             scalar(@{$self->{'network'}}) ." neurons.\n";
175             for (my $i = 0; $i <= $#{$self->{'network'}}; $i++) {
176             $retval .= "Neuron $i\n";
177             while (my ($k, $v) = each %{$self->{'network'}->[$i]->{'object'}->inputs()}) {
178             $retval .= "\tInput from input $k, weight is $v\n";
179             }
180             while (my ($k, $v) = each %{$self->{'network'}->[$i]->{'object'}->neurons()}) {
181             $retval .= "\tInput from neuron $k, weight is $v\n";
182             }
183             if (map {$_ == $i} $self->{'outputneurons'}) {
184             $retval .= "\tThis neuron is a network output\n";
185             }
186             }
187             return $retval;
188             }
189              
190              
191             sub backprop {
192             my $self = shift;
193             my $inputs = shift;
194             my $desired = shift;
195             my $actual = $self->execute($inputs);
196             my $net = $self->{'network'};
197             my $lastneuron = $#{$net};
198             my $deltas = [];
199             my $i = 0;
200             foreach my $neuron (@{$self->outputneurons()}) {
201             $deltas->[$neuron] = $desired->[$i] - $actual->[$i];
202             $i++;
203             }
204             my $progress = 0;
205             foreach my $neuron (reverse 0..$lastneuron) {
206             foreach my $i (reverse $neuron..$lastneuron) {
207             my $weight = $net->[$i]->{'object'}->neurons()->[$neuron];
208             if (defined $weight && $weight != 0 && $deltas->[$i]) {
209             $deltas->[$neuron] += $weight * $deltas->[$i];
210             }
211             }
212             } # Finished generating deltas
213             foreach my $neuron (0..$lastneuron) {
214             my $inputinputs = $net->[$neuron]->{'object'}->inputs();
215             my $neuroninputs = $net->[$neuron]->{'object'}->neurons();
216             my $dafunc = &{$self->{'dafunc'}}($self->{'rawpotentials'}->[$neuron]);
217             my $delta = $deltas->[$neuron] || 0;
218             foreach my $i (0..$#{$inputinputs}) {
219             $inputinputs->[$i] += $inputs->[$i]*$self->{'backprop_eta'}*$delta*$dafunc;
220             }
221             foreach my $i (0..$#{$neuroninputs}) {
222             $neuroninputs->[$i] += $net->[$i]->{'state'}*$self->{'backprop_eta'}*$delta*$dafunc;
223             }
224             $net->[$neuron]->{'object'}->inputs($inputinputs);
225             $net->[$neuron]->{'object'}->neurons($neuroninputs);
226             } # Finished changing weights.
227             }
228              
229             __PACKAGE__->meta->make_immutable;
230              
231             1;
232              
233             __END__
234             =pod
235              
236             =head1 NAME
237              
238             AI::ANN - an artificial neural network simulator
239              
240             =head1 VERSION
241              
242             version 0.008
243              
244             =head1 SYNOPSIS
245              
246             AI::ANN is an artificial neural network simulator. It differs from existing
247             solutions in that it fully exposes the internal variables and allows - and
248             forces - the user to fully customize the topology and specifics of the
249             produced neural network. If you want a simple solution, you do not want this
250             module. This module was specifically written to be used for a simulation of
251             evolution in neural networks, not training. The traditional 'backprop' and
252             similar training methods are not (currently) implemented. Rather, we make it
253             easy for a user to specify the precise layout of their network (including both
254             topology and weights, as well as many parameters), and to then retrieve those
255             details. The purpose of this is to allow an additional module to then tweak
256             these values by a means that models evolution by natural selection. The
257             canonical way to do this is the included AI::ANN::Evolver, which allows
258             the addition of random mutations to individual networks, and the crossing of
259             two networks. You will also, depending on your application, need a fitness
260             function of some sort, in order to determine which networks to allow to
261             propagate. Here is an example of that system.
262              
263             use AI::ANN;
264             my $network = new AI::ANN ( input_count => $inputcount, data => \@neuron_definition );
265             my $outputs = $network->execute( \@inputs ); # Basic network use
266             use AI::ANN::Evolver;
267             my $handofgod = new AI::ANN::Evolver (); # See that module for calling details
268             my $network2 = $handofgod->mutate($network); # Random mutations
269             # Test an entire 'generation' of networks, and let $network and $network2 be
270             # among those with the highest fitness function in the generation.
271             my $network3 = $handofgod->crossover($network, $network2);
272             # Perhaps mutate() each network either before or after the crossover to
273             # introduce variety.
274              
275             We elected to do this with a new module rather than by extending an existing
276             module because of the extensive differences in the internal structure and the
277             interface that were necessary to accomplish these goals.
278              
279             =head1 METHODS
280              
281             =head2 new
282              
283             ANN::new(input_count => $inputcount, data => [{ iamanoutput => 0, inputs => {$inputid => $weight, ...}, neurons => {$neuronid => $weight}}, ...])
284              
285             input_count is number of inputs.
286             data is an arrayref of neuron definitions.
287             The first neuron with iamanoutput=1 is output 0. The second is output 1.
288             I hope you're seeing the pattern...
289             minvalue is the minimum value a neuron can pass. Default 0.
290             maxvalue is the maximum value a neuron can pass. Default 1.
291             afunc is a reference to the activation function. It should be simple and fast.
292             The activation function is processed /after/ minvalue and maxvalue.
293             dafunc is the derivative of the activation function.
294             We strongly advise that you memoize your afunc and dafunc if they are at all
295             complicated. We will do our best to behave.
296              
297             =head2 execute
298              
299             $network->execute( [$input0, $input1, ...] )
300              
301             Runs the network for as many iterations as necessary to achieve a stable
302             network, then returns the output.
303             We store the current state of the network in two places - once in the object,
304             for persistence, and once in $neurons, for simplicity. This might be wrong,
305             but I couldn't think of a better way.
306              
307             =head2 get_state
308              
309             $network->get_state()
310              
311             Returns three arrayrefs, [$input0, ...], [$neuron0, ...], [$output0, ...],
312             corresponding to the data from the last call to execute().
313             Intended primarily to assist with debugging.
314              
315             =head2 get_internals
316              
317             $network->get_internals()
318              
319             Returns the weights in a not-human-consumable format.
320              
321             =head2 readable
322              
323             $network->readable()
324              
325             Returns a human-friendly and diffable description of the network.
326              
327             =head2 backprop
328              
329             $network->backprop(\@inputs, \@outputs)
330              
331             Performs back-propagation learning on the neural network with the provided
332             training data. Uses backprop_eta as a training rate and dafunc as the
333             derivative of the activation function.
334              
335             =head1 AUTHOR
336              
337             Dan Collins <DCOLLINS@cpan.org>
338              
339             =head1 COPYRIGHT AND LICENSE
340              
341             This software is Copyright (c) 2011 by Dan Collins.
342              
343             This is free software, licensed under:
344              
345             The GNU General Public License, Version 3, June 2007
346              
347             =cut
348