File Coverage

blib/lib/AI/NeuralNet/Hopfield.pm
Criterion Covered Total %
statement 10 12 83.3
branch n/a
condition n/a
subroutine 4 4 100.0
pod n/a
total 14 16 87.5


line stmt bran cond sub pod time code
1             package AI::NeuralNet::Hopfield;
2              
3 1     1   21895 use v5.10;
  1         4  
  1         57  
4 1     1   6 use strict;
  1         1  
  1         36  
5 1     1   5 use warnings;
  1         6  
  1         37  
6 1     1   435 use Moose;
  0            
  0            
7             use Math::SparseMatrix;
8              
9              
10             =head1 NAME
11              
12             AI::NeuralNet::Hopfield - A simple Hopfiled Network Implementation.
13              
14             =head1 VERSION
15              
16             Version 0.1
17              
18             =cut
19              
20             our $VERSION = '0.1';
21              
22             has 'matrix' => ( is => 'rw', isa => 'Math::SparseMatrix');
23              
24             has 'matrix_rows' => ( is => 'rw', isa => 'Int');
25              
26             has 'matrix_cols' => ( is => 'rw', isa => 'Int');
27              
28             sub BUILD {
29             my $self = shift;
30             my $args = shift;
31             my $matrix = Math::SparseMatrix->new($args->{row}, $args->{col});
32             $self->matrix($matrix);
33             $self->matrix_rows($args->{row});
34             $self->matrix_cols($args->{col});
35             }
36              
37             sub train() {
38             my $self = shift;
39             my @pattern = @_;
40              
41             if ( ($#pattern + 1) != $self->matrix_rows) {
42             die "Can't train a pattern of size " . ($#pattern + 1) . " on a hopfield network of size " , $self->matrix_rows;
43             }
44            
45             my $m2 = &convert_array($self->matrix_rows, $self->matrix_cols, @pattern);
46              
47             my $m1 = &transpose($m2);
48              
49             my $m3 = &multiply($m1, $m2);
50              
51             my $identity = &identity($m3->{_rows});
52              
53             my $m4 = &subtract($m3, $identity);
54              
55             my $m5 = &add($self->matrix, $m4);
56            
57             $self->matrix($m5);
58             }
59              
60             sub evaluate() {
61             my $self = shift;
62             my @pattern = @_;
63              
64             my @output = ();
65              
66             my $input_matrix = &convert_array($self->matrix_rows, $self->matrix_cols, @pattern);
67              
68             for (my $col = 1; $col <= ($#pattern + 1); $col++) {
69            
70             my $column_matrix = &get_col($self, $col);
71            
72             my $transposed_column_matrix = &transpose($column_matrix);
73            
74             my $dot_product = &dot_product($input_matrix, $transposed_column_matrix);
75            
76             #say $dot_product;
77              
78             if ($dot_product > 0) {
79             $output[$col - 1] = "true";
80             } else {
81             $output[$col - 1] = "false";
82             }
83             }
84             return @output;
85             }
86              
87             sub convert_array() {
88             my $rows = shift;
89             my $cols = shift;
90             my @pattern = @_;
91             my $result = Math::SparseMatrix->new(1, $cols);
92              
93             for (my $i = 0; $i < ($#pattern + 1); $i++) {
94             if ($pattern[$i] =~ m/true/ig) {
95             $result->set(1, ($i +1 ), 1);
96             } else {
97             $result->set(1, ($i + 1), -1);
98             }
99             }
100             return $result;
101             }
102              
103             sub transpose() {
104             my $matrix = shift;
105             my $rows = $matrix->{_rows};
106             my $cols = $matrix->{_cols};
107              
108             my $inverse = Math::SparseMatrix->new($cols, $rows);
109            
110             for (my $r = 1; $r <= $rows; $r++) {
111             for (my $c = 1; $c <= $cols; $c++) {
112             my $value = $matrix->get($r, $c);
113             $inverse->set($c, $r, $value);
114             }
115             }
116             return $inverse;
117             }
118              
119             sub multiply() {
120             my $matrix_a = shift;
121             my $matrix_b = shift;
122              
123             my $a_rows = $matrix_a->{_rows};
124             my $a_cols = $matrix_a->{_cols};
125              
126             my $b_rows = $matrix_b->{_rows};
127             my $b_cols = $matrix_b->{_cols};
128              
129             my $result = Math::SparseMatrix->new($a_rows, $b_cols);
130              
131             if ($matrix_a->{_cols} != $matrix_b->{_rows}) {
132             die "To use ordinary matrix multiplication the number of columns on the first matrix must mat the number of rows on the second";
133             }
134              
135             for (my $result_row = 1; $result_row <= $a_rows; $result_row++) {
136             for(my $result_col = 1; $result_col <= $b_cols; $result_col++) {
137             my $value = 0;
138             for (my $i = 1; $i <= $a_cols; $i++) {
139             $value += ($matrix_a->get($result_row, $i)) * ($matrix_b->get($i, $result_col));
140             }
141             $result->set($result_row, $result_col, $value);
142             }
143             }
144             return $result;
145             }
146              
147             sub identity() {
148             my $size = shift;
149              
150             if ($size < 1) {
151             die "Identity matrix must be at least of size 1.";
152             }
153            
154             my $result = Math::SparseMatrix->new ($size, $size);
155              
156             for (my $i = 1; $i <= $size; $i++) {
157             $result->set($i, $i, 1);
158             }
159             return $result;
160             }
161              
162             sub subtract() {
163             my $matrix_a = shift;
164             my $matrix_b = shift;
165              
166             my $a_rows = $matrix_a->{_rows};
167             my $a_cols = $matrix_a->{_cols};
168              
169             my $b_rows = $matrix_b->{_rows};
170             my $b_cols = $matrix_b->{_cols};
171              
172             if ($a_rows != $b_rows) {
173             die "To subtract the matrixes they must have the same number of rows and columns.";
174             }
175              
176             if ($a_cols != $b_cols) {
177             die "To subtract the matrixes they must have the same number of rows and columns. Matrix a has ";
178             }
179              
180             my $result = Math::SparseMatrix->new($a_rows, $a_cols);
181              
182             for (my $result_row = 1; $result_row <= $a_rows; $result_row++) {
183             for (my $result_col = 1; $result_col <= $a_cols; $result_col++) {
184             my $value = ( $matrix_a->get($result_row, $result_col) ) - ( $matrix_b->get($result_row, $result_col));
185            
186             if ($value == 0) {
187             $value += 2;
188             }
189             $result->set($result_row, $result_col, $value);
190             }
191             }
192             return $result;
193             }
194              
195             sub add() {
196             #weight matrix.
197             my $matrix_a = shift;
198             #identity matrix.
199             my $matrix_b = shift;
200              
201             my $a_rows = $matrix_a->{_rows};
202             my $a_cols = $matrix_a->{_cols};
203              
204             my $b_rows = $matrix_b->{_rows};
205             my $b_cols = $matrix_b->{_cols};
206            
207             if ($a_rows != $b_rows) {
208             die "To add the matrixes they must have the same number of rows and columns.";
209             }
210              
211             if ($a_cols != $b_cols) {
212             die "To add the matrixes they must have the same number of rows and columns.";
213             }
214              
215             my $result = Math::SparseMatrix->new($a_rows, $a_cols);
216              
217             for (my $result_row = 1; $result_row <= $a_rows; $result_row++) {
218             for (my $result_col = 1; $result_col <= $a_cols; $result_col++) {
219             my $value = $matrix_b->get($result_row, $result_col);
220             $result->set($result_row, $result_col, $matrix_a->get($result_row, $result_col) + $value )
221             }
222             }
223             return $result;
224             }
225              
226             sub dot_product() {
227             my $matrix_a = shift;
228             my $matrix_b = shift;
229            
230             my $a_rows = $matrix_a->{_rows};
231             my $a_cols = $matrix_a->{_cols};
232            
233             my $b_rows = $matrix_b->{_rows};
234             my $b_cols = $matrix_b->{_cols};
235              
236             my @array_a = &packed_array($matrix_a);
237             my @array_b = &packed_array($matrix_b);
238              
239             for (my $n = 0; $n <= $#array_b; $n++) {
240             if ($array_b[$n] == 2) {
241             $array_b[$n] = 0;
242             }
243             }
244            
245             if ($#array_a != $#array_b) {
246             die "To take the dot product, both matrixes must be of the same length.";
247             }
248              
249             my $result = 0;
250             my $length = $#array_a + 1;
251              
252             for (my $i = 0; $i < $length; $i++) {
253             $result += $array_a[$i] * $array_b[$i];
254             }
255             return $result;
256             }
257              
258             sub packed_array() {
259             my $matrix = shift;
260             my @result = ();
261              
262             for (my $r = 1; $r <= $matrix->{_rows}; $r++) {
263             for (my $c = 1; $c <= $matrix->{_cols}; $c++) {
264             push(@result, $matrix->get($r, $c));
265             }
266             }
267             return @result;
268             }
269              
270             sub get_col() {
271             my $self = shift;
272             my $col = shift;
273              
274             my $matrix = $self->matrix();
275            
276             my $matrix_rows = $self->matrix_rows();
277              
278             if ($col > $matrix_rows) {
279             die "Can't get column";
280             }
281              
282             my $new_matrix = Math::SparseMatrix->new($matrix_rows, 1);
283              
284             for (my $row = 1; $row <= $matrix_rows; $row++) {
285             my $value = $matrix->get($row, $col);
286             $new_matrix->set($row, 1, $value);
287             }
288             return $new_matrix;
289             }
290              
291             sub print_matrix() {
292             my $matrix = shift;
293             my $rs = $matrix->{_rows};
294             my $cs = $matrix->{_cols};
295              
296             for (my $i = 1; $i <= $rs; $i++) {
297             for (my $j = 1; $j <= $cs; $j++) {
298             say "[$i,$j]" . $matrix->get($i, $j);
299             }
300             }
301             }
302              
303             =head1 SYNOPSIS
304              
305             This is a version of a Hopfield Network implemented in Perl. Hopfield networks are sometimes called associative networks since
306             they associate a class pattern to each input pattern, they are tipically used for classification problems with binary pattern vectors.
307              
308             =head1 SUBROUTINES/METHODS
309              
310             =head2 New
311              
312             In order to build new calssifiers, you have to pass to the constructor the number of rows and columns (neurons) for the matrix construction.
313              
314             my $hop = AI::NeuralNet::Hopfield->new(row => 4, col => 4);
315              
316             =cut
317              
318             =head2 Train
319              
320             The training method configurates the network memory.
321              
322             my @input_1 = qw(true true false false);
323             $hop->train(@input_1);
324              
325             =cut
326              
327             =head2 Evaluation
328              
329             The evaluation method compares the new input with the information stored in the matrix memory.
330             The output is a new array with the boolean evaluation of each neuron.
331              
332             my @input_2 = qw(true true true false);
333             my @result = $hop->evaluate(@input_2);
334              
335             =cut
336              
337              
338             =head1 AUTHOR
339              
340             Felipe da Veiga Leprevost, C<< <leprevost at cpan.org> >>
341              
342             =head1 BUGS
343              
344             Please report any bugs or feature requests to C<bug-ai-neuralnet-hopfield at rt.cpan.org>, or through
345             the web interface at L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=AI-NeuralNet-Hopfield>. I will be notified, and then you'll
346             automatically be notified of progress on your bug as I make changes.
347              
348              
349             =head1 SUPPORT
350              
351             You can find documentation for this module with the perldoc command.
352              
353             perldoc AI::NeuralNet::Hopfield
354              
355              
356             You can also look for information at:
357              
358             =over 4
359              
360             =item * RT: CPAN's request tracker (report bugs here)
361              
362             L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=AI-NeuralNet-Hopfield>
363              
364             =item * AnnoCPAN: Annotated CPAN documentation
365              
366             L<http://annocpan.org/dist/AI-NeuralNet-Hopfield>
367              
368             =item * CPAN Ratings
369              
370             L<http://cpanratings.perl.org/d/AI-NeuralNet-Hopfield>
371              
372             =item * Search CPAN
373              
374             L<http://search.cpan.org/dist/AI-NeuralNet-Hopfield/>
375              
376             =back
377              
378              
379             =head1 ACKNOWLEDGEMENTS
380              
381              
382             =head1 LICENSE AND COPYRIGHT
383              
384             Copyright 2013 leprevost.
385              
386             This program is free software; you can redistribute it and/or modify it
387             under the terms of the the Artistic License (2.0). You may obtain a
388             copy of the full license at:
389              
390             L<http://www.perlfoundation.org/artistic_license_2_0>
391              
392             Any use, modification, and distribution of the Standard or Modified
393             Versions is governed by this Artistic License. By using, modifying or
394             distributing the Package, you accept this license. Do not use, modify,
395             or distribute the Package, if you do not accept this license.
396              
397             If your Modified Version has been derived from a Modified Version made
398             by someone other than you, you are nevertheless required to ensure that
399             your Modified Version complies with the requirements of this license.
400              
401             This license does not grant you the right to use any trademark, service
402             mark, tradename, or logo of the Copyright Holder.
403              
404             This license includes the non-exclusive, worldwide, free-of-charge
405             patent license to make, have made, use, offer to sell, sell, import and
406             otherwise transfer the Package with respect to any patent claims
407             licensable by the Copyright Holder that are necessarily infringed by the
408             Package. If you institute patent litigation (including a cross-claim or
409             counterclaim) against any party alleging that the Package constitutes
410             direct or contributory patent infringement, then this Artistic License
411             to you shall terminate on the date that such litigation is filed.
412              
413             Disclaimer of Warranty: THE PACKAGE IS PROVIDED BY THE COPYRIGHT HOLDER
414             AND CONTRIBUTORS "AS IS' AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES.
415             THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
416             PURPOSE, OR NON-INFRINGEMENT ARE DISCLAIMED TO THE EXTENT PERMITTED BY
417             YOUR LOCAL LAW. UNLESS REQUIRED BY LAW, NO COPYRIGHT HOLDER OR
418             CONTRIBUTOR WILL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, OR
419             CONSEQUENTIAL DAMAGES ARISING IN ANY WAY OUT OF THE USE OF THE PACKAGE,
420             EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
421              
422              
423             =cut
424              
425             1; # End of AI::NeuralNet::Hopfield