| line | stmt | bran | cond | sub | pod | time | code | 
| 1 |  |  |  |  |  |  | =head1 NAME | 
| 2 |  |  |  |  |  |  |  | 
| 3 |  |  |  |  |  |  | AI::Perceptron - example of a node in a neural network. | 
| 4 |  |  |  |  |  |  |  | 
| 5 |  |  |  |  |  |  | =head1 SYNOPSIS | 
| 6 |  |  |  |  |  |  |  | 
| 7 |  |  |  |  |  |  | use AI::Perceptron; | 
| 8 |  |  |  |  |  |  |  | 
| 9 |  |  |  |  |  |  | my $p = AI::Perceptron->new | 
| 10 |  |  |  |  |  |  | ->num_inputs( 2 ) | 
| 11 |  |  |  |  |  |  | ->learning_rate( 0.04 ) | 
| 12 |  |  |  |  |  |  | ->threshold( 0.02 ) | 
| 13 |  |  |  |  |  |  | ->weights([ 0.1, 0.2 ]); | 
| 14 |  |  |  |  |  |  |  | 
| 15 |  |  |  |  |  |  | my @inputs  = ( 1.3, -0.45 );   # input can be any number | 
| 16 |  |  |  |  |  |  | my $target  = 1;                # output is always -1 or 1 | 
| 17 |  |  |  |  |  |  | my $current = $p->compute_output( @inputs ); | 
| 18 |  |  |  |  |  |  |  | 
| 19 |  |  |  |  |  |  | print "current output: $current, target: $target\n"; | 
| 20 |  |  |  |  |  |  |  | 
| 21 |  |  |  |  |  |  | $p->add_examples( [ $target, @inputs ] ); | 
| 22 |  |  |  |  |  |  |  | 
| 23 |  |  |  |  |  |  | $p->max_iterations( 10 )->train or | 
| 24 |  |  |  |  |  |  | warn "couldn't train in 10 iterations!"; | 
| 25 |  |  |  |  |  |  |  | 
| 26 |  |  |  |  |  |  | print "training until it gets it right\n"; | 
| 27 |  |  |  |  |  |  | $p->max_iterations( -1 )->train; # watch out for infinite loops | 
| 28 |  |  |  |  |  |  |  | 
| 29 |  |  |  |  |  |  | =cut | 
| 30 |  |  |  |  |  |  |  | 
| 31 |  |  |  |  |  |  | package AI::Perceptron; | 
| 32 |  |  |  |  |  |  |  | 
| 33 | 2 |  |  | 2 |  | 22904 | use strict; | 
|  | 2 |  |  |  |  | 5 |  | 
|  | 2 |  |  |  |  | 87 |  | 
| 34 | 2 |  |  |  |  | 12 | use accessors qw( num_inputs learning_rate _weights threshold | 
| 35 | 2 |  |  | 2 |  | 1548 | training_examples max_iterations ); | 
|  | 2 |  |  |  |  | 1917 |  | 
| 36 |  |  |  |  |  |  |  | 
| 37 |  |  |  |  |  |  | our $VERSION = '1.0'; | 
| 38 |  |  |  |  |  |  | our $Debug   = 0; | 
| 39 |  |  |  |  |  |  |  | 
| 40 |  |  |  |  |  |  | sub new { | 
| 41 | 3 |  |  | 3 | 1 | 6622 | my $class = shift; | 
| 42 | 3 |  |  |  |  | 11 | my $self  = bless {}, $class; | 
| 43 | 3 |  |  |  |  | 16 | return $self->init( @_ ); | 
| 44 |  |  |  |  |  |  | } | 
| 45 |  |  |  |  |  |  |  | 
| 46 |  |  |  |  |  |  | sub init { | 
| 47 | 3 |  |  | 3 | 0 | 8 | my $self = shift; | 
| 48 | 3 |  |  |  |  | 10 | my %args = @_; | 
| 49 |  |  |  |  |  |  |  | 
| 50 | 3 |  | 100 |  |  | 51 | $self->num_inputs( $args{Inputs} || 1 ) | 
|  |  |  | 100 |  |  |  |  | 
|  |  |  | 50 |  |  |  |  | 
| 51 |  |  |  |  |  |  | ->learning_rate( $args{N} || 0.05 ) | 
| 52 |  |  |  |  |  |  | ->max_iterations( -1 ) | 
| 53 |  |  |  |  |  |  | ->threshold( $args{T} || 0.0 ) | 
| 54 |  |  |  |  |  |  | ->training_examples( [] ) | 
| 55 |  |  |  |  |  |  | ->weights( [] ); | 
| 56 |  |  |  |  |  |  |  | 
| 57 |  |  |  |  |  |  | # DEPRECATED: backwards compat | 
| 58 | 3 | 100 |  |  |  | 12 | if ($args{W}) { | 
| 59 | 1 |  |  |  |  | 7 | $self->threshold( shift @{ $args{W} } ) | 
|  | 1 |  |  |  |  | 17 |  | 
| 60 | 1 |  |  |  |  | 2 | ->weights( [ @{ $args{W} } ] ); | 
| 61 |  |  |  |  |  |  | } | 
| 62 |  |  |  |  |  |  |  | 
| 63 | 3 |  |  |  |  | 15 | return $self; | 
| 64 |  |  |  |  |  |  | } | 
| 65 |  |  |  |  |  |  |  | 
| 66 |  |  |  |  |  |  | sub verify_weights { | 
| 67 | 1 |  |  | 1 | 0 | 2 | my $self = shift; | 
| 68 |  |  |  |  |  |  |  | 
| 69 | 1 |  |  |  |  | 3 | for my $i (0 .. $self->num_inputs-1) { | 
| 70 | 2 |  | 50 |  |  | 10 | $self->weights->[$i] ||= 0.0; | 
| 71 |  |  |  |  |  |  | } | 
| 72 |  |  |  |  |  |  |  | 
| 73 | 1 |  |  |  |  | 1 | return $self; | 
| 74 |  |  |  |  |  |  | } | 
| 75 |  |  |  |  |  |  |  | 
| 76 |  |  |  |  |  |  | # DEPRECATED: backwards compat | 
| 77 |  |  |  |  |  |  | sub weights { | 
| 78 | 100 |  |  | 100 | 1 | 8212 | my $self = shift; | 
| 79 | 100 |  |  |  |  | 217 | my $ret  = $self->_weights(@_); | 
| 80 | 100 | 100 |  |  |  | 558 | return wantarray ? ( $self->threshold, @{ $self->_weights } ) : $ret; | 
|  | 1 |  |  |  |  | 17 |  | 
| 81 |  |  |  |  |  |  | } | 
| 82 |  |  |  |  |  |  |  | 
| 83 |  |  |  |  |  |  | sub add_examples { | 
| 84 | 2 |  |  | 2 | 1 | 1095 | my $self = shift; | 
| 85 |  |  |  |  |  |  |  | 
| 86 | 2 |  |  |  |  | 7 | foreach my $ex (@_) { | 
| 87 | 2 | 50 |  |  |  | 14 | die "training examples must be arrayrefs!" unless (ref $ex eq 'ARRAY'); | 
| 88 | 2 |  |  |  |  | 4 | my @inputs = @{$ex}; # be nice, take a copy | 
|  | 2 |  |  |  |  | 8 |  | 
| 89 | 2 |  |  |  |  | 5 | my $target = shift @inputs; | 
| 90 | 2 | 50 |  |  |  | 10 | die "expected result must be either -1 or 1, not $target!" | 
| 91 |  |  |  |  |  |  | unless (abs $target == 1); | 
| 92 |  |  |  |  |  |  | # TODO: avoid duplicate entries | 
| 93 | 2 |  |  |  |  | 4 | push @{ $self->training_examples }, [$target, @inputs]; | 
|  | 2 |  |  |  |  | 17 |  | 
| 94 |  |  |  |  |  |  | } | 
| 95 |  |  |  |  |  |  |  | 
| 96 | 2 |  |  |  |  | 22 | return $self; | 
| 97 |  |  |  |  |  |  | } | 
| 98 |  |  |  |  |  |  |  | 
| 99 |  |  |  |  |  |  | sub add_example { | 
| 100 | 0 |  |  | 0 | 0 | 0 | shift->add_examples(@_); | 
| 101 |  |  |  |  |  |  | } | 
| 102 |  |  |  |  |  |  |  | 
| 103 |  |  |  |  |  |  | sub compute_output { | 
| 104 | 31 |  |  | 31 | 1 | 45 | my $self   = shift; | 
| 105 | 31 |  |  |  |  | 48 | my @inputs = @_; | 
| 106 |  |  |  |  |  |  |  | 
| 107 | 31 |  |  |  |  | 66 | my $sum = $self->threshold; # start at threshold | 
| 108 | 31 |  |  |  |  | 140 | for my $i (0 .. $self->num_inputs-1) { | 
| 109 | 62 |  |  |  |  | 188 | $sum += $self->weights->[$i] * $inputs[$i]; | 
| 110 |  |  |  |  |  |  | } | 
| 111 |  |  |  |  |  |  |  | 
| 112 |  |  |  |  |  |  | # binary (returning the real $sum is not part of this model) | 
| 113 | 31 | 100 |  |  |  | 149 | return ($sum > 0) ? 1 : -1; | 
| 114 |  |  |  |  |  |  | } | 
| 115 |  |  |  |  |  |  |  | 
| 116 |  |  |  |  |  |  | ## | 
| 117 |  |  |  |  |  |  | # $p->train( [ @training_examples ] ) | 
| 118 |  |  |  |  |  |  | #                    \--> [ $target_output, @inputs ] | 
| 119 |  |  |  |  |  |  | sub train { | 
| 120 | 1 |  |  | 1 | 1 | 2 | my $self = shift; | 
| 121 | 1 | 50 |  |  |  | 5 | $self->add_examples( @_ ) if @_; | 
| 122 |  |  |  |  |  |  |  | 
| 123 | 1 |  |  |  |  | 3 | $self->verify_weights; | 
| 124 |  |  |  |  |  |  |  | 
| 125 |  |  |  |  |  |  | # adjust the weights for each training example until the output | 
| 126 |  |  |  |  |  |  | # function correctly classifies all the training examples. | 
| 127 | 1 |  |  |  |  | 2 | my $iter = 0; | 
| 128 | 1 |  |  |  |  | 4 | while(! $self->classifies_examples_correctly ) { | 
| 129 |  |  |  |  |  |  |  | 
| 130 | 14 | 50 | 33 |  |  | 32 | if (($self->max_iterations > 0) and | 
| 131 |  |  |  |  |  |  | ($iter >= $self->max_iterations)) { | 
| 132 | 0 |  |  |  |  | 0 | $self->emit( "stopped training after $iter iterations" ); | 
| 133 | 0 |  |  |  |  | 0 | return; | 
| 134 |  |  |  |  |  |  | } | 
| 135 |  |  |  |  |  |  |  | 
| 136 | 14 |  |  |  |  | 140 | $iter++; | 
| 137 | 14 |  |  |  |  | 38 | $self->emit( "Training iteration $iter" ); | 
| 138 |  |  |  |  |  |  |  | 
| 139 | 14 |  |  |  |  | 17 | foreach my $training_example (@{ $self->training_examples }) { | 
|  | 14 |  |  |  |  | 29 |  | 
| 140 | 14 |  |  |  |  | 63 | my ($expected_output, @inputs) = @$training_example; | 
| 141 |  |  |  |  |  |  |  | 
| 142 | 14 | 50 |  |  |  | 31 | $self->emit( "Training X=<", join(',', @inputs), | 
| 143 |  |  |  |  |  |  | "> with target $expected_output" ) if $Debug > 1; | 
| 144 |  |  |  |  |  |  |  | 
| 145 |  |  |  |  |  |  | # want the perceptron's output equal to training output | 
| 146 |  |  |  |  |  |  | # TODO: this duplicates work by classifies_examples_correctly() | 
| 147 | 14 |  |  |  |  | 24 | my $output = $self->compute_output(@inputs); | 
| 148 | 14 | 50 |  |  |  | 27 | next if ($output == $expected_output); | 
| 149 |  |  |  |  |  |  |  | 
| 150 | 14 |  |  |  |  | 28 | $self->adjust_threshold( $expected_output, $output ) | 
| 151 |  |  |  |  |  |  | ->adjust_weights( \@inputs, $expected_output, $output ); | 
| 152 |  |  |  |  |  |  | } | 
| 153 |  |  |  |  |  |  | } | 
| 154 |  |  |  |  |  |  |  | 
| 155 | 1 |  |  |  |  | 4 | $self->emit( "completed in $iter iterations." ); | 
| 156 |  |  |  |  |  |  |  | 
| 157 | 1 |  |  |  |  | 4 | return $self; | 
| 158 |  |  |  |  |  |  | } | 
| 159 |  |  |  |  |  |  |  | 
| 160 |  |  |  |  |  |  | # return true unless all training examples are correctly classified | 
| 161 |  |  |  |  |  |  | sub classifies_examples_correctly { | 
| 162 | 15 |  |  | 15 | 0 | 18 | my $self = shift; | 
| 163 | 15 |  |  |  |  | 33 | my $training_examples = $self->training_examples; | 
| 164 |  |  |  |  |  |  |  | 
| 165 | 15 |  |  |  |  | 57 | foreach my $training_example (@$training_examples) { | 
| 166 | 15 |  |  |  |  | 16 | my ($output, @inputs) = @{$training_example}; | 
|  | 15 |  |  |  |  | 28 |  | 
| 167 | 15 | 100 |  |  |  | 28 | return if ($self->compute_output( @inputs ) != $output); | 
| 168 |  |  |  |  |  |  | } | 
| 169 |  |  |  |  |  |  |  | 
| 170 | 1 |  |  |  |  | 4 | return 1; | 
| 171 |  |  |  |  |  |  | } | 
| 172 |  |  |  |  |  |  |  | 
| 173 |  |  |  |  |  |  | sub adjust_threshold { | 
| 174 | 14 |  |  | 14 | 0 | 15 | my $self            = shift; | 
| 175 | 14 |  |  |  |  | 16 | my $expected_output = shift; | 
| 176 | 14 |  |  |  |  | 11 | my $output          = shift; | 
| 177 | 14 |  |  |  |  | 28 | my $n               = $self->learning_rate; | 
| 178 |  |  |  |  |  |  |  | 
| 179 | 14 |  |  |  |  | 53 | my $delta = $n * ($expected_output - $output); | 
| 180 | 14 |  |  |  |  | 30 | $self->threshold( $self->threshold + $delta ); | 
| 181 |  |  |  |  |  |  |  | 
| 182 | 14 |  |  |  |  | 93 | return $self; | 
| 183 |  |  |  |  |  |  | } | 
| 184 |  |  |  |  |  |  |  | 
| 185 |  |  |  |  |  |  | sub adjust_weights { | 
| 186 | 14 |  |  | 14 | 0 | 34 | my $self            = shift; | 
| 187 | 14 |  |  |  |  | 14 | my $inputs          = shift; | 
| 188 | 14 |  |  |  |  | 14 | my $expected_output = shift; | 
| 189 | 14 |  |  |  |  | 13 | my $output          = shift; | 
| 190 | 14 |  |  |  |  | 29 | my $n               = $self->learning_rate; | 
| 191 |  |  |  |  |  |  |  | 
| 192 | 14 |  |  |  |  | 59 | for my $i (0 .. $self->num_inputs-1) { | 
| 193 | 28 |  |  |  |  | 87 | my $delta = $n * ($expected_output - $output) * $inputs->[$i]; | 
| 194 | 28 |  |  |  |  | 46 | $self->weights->[$i] += $delta; | 
| 195 |  |  |  |  |  |  | } | 
| 196 |  |  |  |  |  |  |  | 
| 197 | 14 |  |  |  |  | 56 | return $self; | 
| 198 |  |  |  |  |  |  | } | 
| 199 |  |  |  |  |  |  |  | 
| 200 |  |  |  |  |  |  | sub emit { | 
| 201 | 15 | 50 |  | 15 | 0 | 40 | return unless $Debug; | 
| 202 | 0 |  |  |  |  |  | my $self = shift; | 
| 203 | 0 | 0 |  |  |  |  | push @_, "\n" unless grep /\n/, @_; | 
| 204 | 0 |  |  |  |  |  | warn( @_ ); | 
| 205 |  |  |  |  |  |  | } | 
| 206 |  |  |  |  |  |  |  | 
| 207 |  |  |  |  |  |  | 1; | 
| 208 |  |  |  |  |  |  |  | 
| 209 |  |  |  |  |  |  | __END__ |