File Coverage

blib/lib/PDL/MatrixOps.pm
Criterion Covered Total %
statement 28 31 90.3
branch 8 16 50.0
condition 3 9 33.3
subroutine 5 5 100.0
pod 0 2 0.0
total 44 63 69.8


line stmt bran cond sub pod time code
1             #
2             # GENERATED WITH PDL::PP from lib/PDL/MatrixOps.pd! Don't modify!
3             #
4             package PDL::MatrixOps;
5              
6             our @EXPORT_OK = qw(identity stretcher inv det determinant eigens_sym eigens svd lu_decomp lu_decomp2 lu_backsub simq squaretotri tritosquare tricpy mstack augment );
7             our %EXPORT_TAGS = (Func=>\@EXPORT_OK);
8              
9 70     70   486 use PDL::Core;
  70         157  
  70         470  
10 70     70   501 use PDL::Exporter;
  70         152  
  70         529  
11 70     70   331 use DynaLoader;
  70         153  
  70         36187  
12              
13              
14            
15             our @ISA = ( 'PDL::Exporter','DynaLoader' );
16             push @PDL::Core::PP, __PACKAGE__;
17             bootstrap PDL::MatrixOps ;
18              
19              
20              
21              
22              
23              
24              
25              
26             #line 14 "lib/PDL/MatrixOps.pd"
27              
28             =encoding utf8
29              
30             use strict;
31             use warnings;
32              
33             =head1 NAME
34              
35             PDL::MatrixOps -- Some Useful Matrix Operations
36              
37             =head1 SYNOPSIS
38              
39             $inv = $x->inv;
40              
41             $det = $x->det;
42              
43             ($lu,$perm,$par) = $x->lu_decomp;
44             $y = lu_backsub($lu,$perm,$z); # solve $x x $y = $z
45              
46             =head1 DESCRIPTION
47              
48             PDL::MatrixOps is PDL's built-in matrix manipulation code. It
49             contains utilities for many common matrix operations: inversion,
50             determinant finding, eigenvalue/vector finding, singular value
51             decomposition, etc. PDL::MatrixOps routines are written in a mixture
52             of Perl and C, so that they are reliably present even when there is no
53             external library available (e.g.
54             L or any of the PDL::GSL family of modules).
55              
56             Matrix manipulation, particularly with large matrices, is a
57             challenging field and no one algorithm is suitable in all cases. The
58             utilities here use general-purpose algorithms that work acceptably for
59             many cases but might not scale well to very large or pathological
60             (near-singular) matrices.
61              
62             Except as noted, the matrices are PDLs whose 0th dimension ranges over
63             column and whose 1st dimension ranges over row. The matrices appear
64             correctly when printed.
65              
66             These routines should work OK with L objects
67             as well as with normal PDLs.
68              
69             =head1 TIPS ON MATRIX OPERATIONS
70              
71             Like most computer languages, PDL addresses matrices in (column,row)
72             order in most cases; this corresponds to (X,Y) coordinates in the
73             matrix itself, counting rightwards and downwards from the upper left
74             corner. This means that if you print a PDL that contains a matrix,
75             the matrix appears correctly on the screen, but if you index a matrix
76             element, you use the indices in the reverse order that you would in a
77             math textbook. If you prefer your matrices indexed in (row, column)
78             order, you can try using the L object, which
79             includes an implicit exchange of the first two dimensions but should
80             be compatible with most of these matrix operations. TIMTOWDTI.)
81              
82             Matrices, row vectors, and column vectors can be multiplied with the 'x'
83             operator (which is, of course, broadcastable):
84              
85             $m3 = $m1 x $m2;
86             $col_vec2 = $m1 x $col_vec1;
87             $row_vec2 = $row_vec1 x $m1;
88             $scalar = $row_vec x $col_vec;
89              
90             Because of the (column,row) addressing order, 1-D PDLs are treated as
91             _row_ vectors; if you want a _column_ vector you must add a dummy dimension:
92              
93             $rowvec = pdl(1,2); # row vector
94             $colvec = $rowvec->slice('*1'); # 1x2 column vector
95             $matrix = pdl([[3,4],[6,2]]); # 2x2 matrix
96             $rowvec2 = $rowvec x $matrix; # right-multiplication by matrix
97             $colvec = $matrix x $colvec; # left-multiplication by matrix
98             $m2 = $matrix x $rowvec; # Throws an error
99              
100             Implicit broadcasting works correctly with most matrix operations, but
101             you must be extra careful that you understand the dimensionality. In
102             particular, matrix multiplication and other matrix ops need nx1 PDLs
103             as row vectors and 1xn PDLs as column vectors. In most cases you must
104             explicitly include the trailing 'x1' dimension in order to get the expected
105             results when you broadcast over multiple row vectors.
106              
107             When broadcasting over matrices, it's very easy to get confused about
108             which dimension goes where. It is useful to include comments with
109             every expression, explaining what you think each dimension means:
110              
111             $x = xvals(360)*3.14159/180; # (angle)
112             $rot = cat(cat(cos($x),sin($x)), # rotmat: (col,row,angle)
113             cat(-sin($x),cos($x)));
114              
115             =head1 ACKNOWLEDGEMENTS
116              
117             MatrixOps includes algorithms and pre-existing code from several
118             origins. In particular, C is the work of Stephen Moshier,
119             C uses an SVD subroutine written by Bryant Marks, and C
120             uses a subset of the Small Scientific Library by Kenneth Geisshirt.
121             They are free software, distributable under same terms as PDL itself.
122              
123             =head1 NOTES
124              
125             This is intended as a general-purpose linear algebra package for
126             small-to-mid sized matrices. The algorithms may not scale well to
127             large matrices (hundreds by hundreds) or to near singular matrices.
128              
129             If there is something you want that is not here, please add and
130             document it!
131              
132             =cut
133              
134             use Carp;
135             use strict;
136             #line 137 "lib/PDL/MatrixOps.pm"
137              
138              
139             =head1 FUNCTIONS
140              
141             =cut
142              
143              
144              
145              
146              
147             #line 131 "lib/PDL/MatrixOps.pd"
148              
149             =head2 identity
150              
151             =for sig
152              
153             Signature: (n; [o]a(n,n))
154              
155             =for ref
156              
157             Return an identity matrix of the specified size. If you hand in a
158             scalar, its value is the size of the identity matrix; if you hand in a
159             dimensioned PDL, the 0th dimension is the first two dimensions of the
160             matrix, with higher dimensions preserved.
161              
162             =cut
163              
164             sub identity {
165             my $n = shift;
166             my $out =
167             !(my $was_pdl = UNIVERSAL::isa($n,'PDL')) ? zeroes($n,$n) :
168             $n->getndims == 0 ? zeroes($n->type, $n->at(0),$n->at(0)) :
169             undef;
170             if (!defined $out) {
171             my @dims = $n->dims;
172             $out = zeroes($n->type, @dims[0, 0, 2..$#dims]);
173             }
174             $out->diagonal(0,1)++;
175             $was_pdl ? bless $out, ref($n) : $out;
176             }
177              
178             #line 164 "lib/PDL/MatrixOps.pd"
179              
180             =head2 stretcher
181              
182             =for sig
183              
184             Signature: (a(n); [o]b(n,n))
185              
186             =for usage
187              
188             $mat = stretcher($eigenvalues);
189              
190             =for ref
191              
192             Return a diagonal matrix with the specified diagonal elements.
193             Preserves higher dimensions.
194             As of 2.096, it will also have the same datatype.
195              
196             =cut
197              
198             sub stretcher {
199             my $in = shift;
200             my $out = zeroes($in->type, $in->dim(0), $in->dims);
201             $out->diagonal(0,1) += $in;
202             $out;
203             }
204              
205             #line 196 "lib/PDL/MatrixOps.pd"
206              
207             =head2 inv
208              
209             =for sig
210              
211             Signature: (a(m,m); sv opt )
212              
213             =for usage
214              
215             $a1 = inv($a, {$opt});
216              
217             =for ref
218              
219             Invert a square matrix.
220              
221             You feed in an NxN matrix in $a, and get back its inverse (if it
222             exists). The code is inplace-aware, so you can get back the inverse
223             in $a itself if you want -- though temporary storage is used either
224             way. You can cache the LU decomposition in an output option variable.
225              
226             C uses C by default; that is a numerically stable
227             (pivoting) LU decomposition method.
228              
229             OPTIONS:
230              
231             =over 3
232              
233             =item * s
234              
235             Boolean value indicating whether to complain if the matrix is singular. If
236             this is false, singular matrices cause inverse to barf. If it is true, then
237             singular matrices cause inverse to return undef.
238              
239             =item * lu (I/O)
240              
241             This value contains a list ref with the LU decomposition, permutation,
242             and parity values for C<$a>. If you do not mention the key, or if the
243             value is undef, then inverse calls C. If the key exists with
244             an undef value, then the output of C is stashed here (unless
245             the matrix is singular). If the value exists, then it is assumed to
246             hold the LU decomposition.
247              
248             =item * det (Output)
249              
250             If this key exists, then the determinant of C<$a> get stored here,
251             whether or not the matrix is singular.
252              
253             =back
254              
255             =cut
256              
257             *PDL::inv = \&inv;
258             sub inv {
259             my $x = shift;
260             my $opt = shift;
261             $opt = {} unless defined($opt);
262              
263             barf "inverse needs a square PDL as a matrix\n"
264             unless(UNIVERSAL::isa($x,'PDL') &&
265             $x->dims >= 2 &&
266             $x->dim(0) == $x->dim(1)
267             );
268              
269             my ($lu,$perm,$par);
270             if(exists($opt->{lu}) &&
271             ref $opt->{lu} eq 'ARRAY' &&
272             ref $opt->{lu}->[0] eq 'PDL') {
273             ($lu,$perm,$par) = @{$opt->{lu}};
274             } else {
275             ($lu,$perm,$par) = lu_decomp($x);
276             @{$opt->{lu}} = ($lu,$perm,$par)
277             if(ref $opt->{lu} eq 'ARRAY');
278             }
279              
280             my $det = (defined $lu) ? $lu->diagonal(0,1)->prodover * $par : pdl(0);
281             $opt->{det} = $det
282             if exists($opt->{det});
283              
284             unless($det->nelem > 1 || $det) {
285             return undef
286             if $opt->{s};
287             barf("PDL::inv: got a singular matrix or LU decomposition\n");
288             }
289              
290             my $out = lu_backsub($lu,$perm,$par,identity($x))->transpose->sever;
291              
292             return $out
293             unless($x->is_inplace);
294              
295             $x .= $out;
296             $x;
297             }
298              
299             #line 296 "lib/PDL/MatrixOps.pd"
300              
301             =head2 det
302              
303             =for sig
304              
305             Signature: (a(m,m); sv opt)
306              
307             =for usage
308              
309             $det = det($a,{opt});
310              
311             =for ref
312              
313             Determinant of a square matrix using LU decomposition (for large matrices)
314              
315             You feed in a square matrix, you get back the determinant. Some
316             options exist that allow you to cache the LU decomposition of the
317             matrix (note that the LU decomposition is invalid if the determinant
318             is zero!). The LU decomposition is cacheable, in case you want to
319             re-use it. This method of determinant finding is more rapid than
320             recursive-descent on large matrices, and if you reuse the LU
321             decomposition it's essentially free.
322              
323             OPTIONS:
324              
325             =over 3
326              
327             =item * lu (I/O)
328              
329             Provides a cache for the LU decomposition of the matrix. If you
330             provide the key but leave the value undefined, then the LU decomposition
331             goes in here; if you put an LU decomposition here, it will be used and
332             the matrix will not be decomposed again.
333              
334             =back
335              
336             =cut
337              
338             *PDL::det = \&det;
339             sub det {
340             my ($x, $opt) = @_;
341             $opt = {} unless defined($opt);
342             my($lu,$perm,$par);
343             if(exists ($opt->{lu}) and (ref $opt->{lu} eq 'ARRAY')) {
344             ($lu,$perm,$par) = @{$opt->{lu}};
345             } else {
346             ($lu,$perm,$par) = lu_decomp($x);
347             $opt->{lu} = [$lu,$perm,$par]
348             if(exists($opt->{lu}));
349             }
350             defined $lu ? $lu->diagonal(0,1)->prodover * $par : PDL->zeroes(sbyte,1);
351             }
352              
353             #line 355 "lib/PDL/MatrixOps.pd"
354              
355             =head2 determinant
356              
357             =for sig
358              
359             Signature: (a(m,m))
360              
361             =for usage
362              
363             $det = determinant($x);
364              
365             =for ref
366              
367             Determinant of a square matrix, using recursive descent (broadcastable).
368              
369             This is the traditional, robust recursive determinant method taught in
370             most linear algebra courses. It scales like C (and hence is
371             pitifully slow for large matrices) but is very robust because no
372             division is involved (hence no division-by-zero errors for singular
373             matrices). It's also broadcastable, so you can find the determinants of
374             a large collection of matrices all at once if you want.
375              
376             Matrices up to 3x3 are handled by direct multiplication; larger matrices
377             are handled by recursive descent to the 3x3 case.
378              
379             The LU-decomposition method L is faster in isolation for
380             single matrices larger than about 4x4, and is much faster if you end up
381             reusing the LU decomposition of C<$a> (NOTE: check performance and
382             broadcasting benchmarks with new code).
383              
384             =cut
385              
386             *PDL::determinant = \&determinant;
387             sub determinant {
388             my($x) = shift;
389             my($n);
390             return undef unless(
391             UNIVERSAL::isa($x,'PDL') &&
392             $x->getndims >= 2 &&
393             ($n = $x->dim(0)) == $x->dim(1)
394             );
395              
396             return $x->clump(2) if($n==1);
397             if($n==2) {
398             my($y) = $x->clump(2);
399             return $y->index(0)*$y->index(3) - $y->index(1)*$y->index(2);
400             }
401             if($n==3) {
402             my($y) = $x->clump(2);
403             my $y3 = $y->index(3);
404             my $y4 = $y->index(4);
405             my $y5 = $y->index(5);
406             my $y6 = $y->index(6);
407             my $y7 = $y->index(7);
408             my $y8 = $y->index(8);
409              
410             return (
411             $y->index(0) * ( $y4 * $y8 - $y5 * $y7 )
412             + $y->index(1) * ( $y5 * $y6 - $y3 * $y8 )
413             + $y->index(2) * ( $y3 * $y7 - $y4 * $y6 )
414             );
415             }
416              
417             my($i);
418             my($sum) = zeroes($x->slice('(0),(0)'));
419              
420             # Do middle submatrices
421             for $i(1..$n-2) {
422             my $el = $x->slice("($i),(0)");
423             next if( ($el==0)->all ); # Optimize away unnecessary recursion
424             $sum += $el * (1-2*($i%2)) *
425             determinant($x->slice("0:".($i-1).",1:-1")->
426             append($x->slice(($i+1).":-1,1:-1")));
427             }
428              
429             # Do beginning and end submatrices
430             $sum += $x->slice("(0),(0)") * determinant($x->slice('1:-1,1:-1'));
431             $sum -= $x->slice("(-1),(0)") * determinant($x->slice('0:-2,1:-1')) * (1 - 2*($n % 2));
432              
433             return $sum;
434             }
435             #line 436 "lib/PDL/MatrixOps.pm"
436              
437              
438             =head2 eigens_sym
439              
440             =for sig
441              
442             Signature: ([phys]a(m); [o,phys]ev(n,n); [o,phys]e(n))
443             Types: (double)
444              
445             =for ref
446              
447             Eigenvalues and -vectors of a symmetric square matrix. If passed
448             an asymmetric matrix, the routine will warn and symmetrize it, by taking
449             the average value. That is, it will solve for 0.5*($a+$a->transpose).
450              
451             It's broadcastable, so if C<$a> is 3x3x100, it's treated as 100 separate 3x3
452             matrices, and both C<$ev> and C<$e> get extra dimensions accordingly.
453              
454             If called in scalar context it hands back only the eigenvalues. Ultimately,
455             it should switch to a faster algorithm in this case (as discarding the
456             eigenvectors is wasteful).
457              
458             The algorithm used is due to J. von Neumann, which was a rediscovery of
459             L .
460              
461             The eigenvectors are returned in COLUMNS of the returned PDL. That
462             makes it slightly easier to access individual eigenvectors, since the
463             0th dim of the output PDL runs across the eigenvectors and the 1st dim
464             runs across their components.
465              
466             ($ev,$e) = eigens_sym $x; # Make eigenvector matrix
467             $vector = $ev->slice($n); # Select nth eigenvector as a column-vector
468             $vector = $ev->slice("($n)"); # Select nth eigenvector as a row-vector
469              
470             As of 2.096, the eigenvalues are returned in ascending order.
471              
472             To compare with L:
473              
474             use PDL::LinearAlgebra;
475             ($val, $rvec) = msymeigen($A = pdl([3,4], [4,-3]),1,1);
476             print $val->slice(1) * $rvec->slice(1);
477             #[
478             # [ -4.472136]
479             # [ -2.236068]
480             #]
481             print $A x $rvec->slice(1);
482             #[
483             # [ -4.472136]
484             # [ -2.236068]
485             #]
486             ($rvec, $val) = eigens_sym($A); # note return values other way round
487             # otherwise the same
488              
489             =for usage
490              
491             ($ev, $e) = eigens_sym($x); # e-vects & e-values
492             $e = eigens_sym($x); # just eigenvalues
493              
494             =pod
495              
496             Broadcasts over its inputs.
497              
498             =for bad
499              
500             C ignores the bad-value flag of the input ndarrays.
501             It will set the bad-value flag of all output ndarrays if the flag is set for any of the input ndarrays.
502              
503             =cut
504              
505              
506              
507              
508             sub PDL::eigens_sym {
509 3     3 0 8 my ($x) = @_;
510 3         11 my @d = $x->dims;
511 3 50 33     16 barf "Need real square matrix for eigens_sym"
512             if @d < 2 or $d[0] != $d[1];
513 3         11 my ($sym) = 0.5*($x + $x->transpose);
514 3         21 my ($err) = PDL::max(abs($sym));
515 3 50       15 barf "Need symmetric component non-zero for eigens_sym" if $err == 0;
516 3         36 $err = PDL::max(abs($x-$sym))/$err;
517 3 0 33     13 warn "Using symmetrized version of the matrix in eigens_sym"
518             if $err > 1e-5 && $PDL::debug;
519 3         40 PDL::_eigens_sym_int($sym->squaretotri, my $ev=PDL->null, my $e=PDL->null);
520 3 50       14 return ($ev->transpose, $e) if wantarray;
521 0         0 $e; #just eigenvalues
522             }
523              
524              
525              
526             *eigens_sym = \&PDL::eigens_sym;
527              
528              
529              
530              
531              
532              
533             =head2 eigens
534              
535             =for sig
536              
537             Signature: ([phys]a(n,n); complex [o,phys]ev(n,n); complex [o,phys]e(n))
538             Types: (double)
539              
540             =for ref
541              
542             Complex eigenvalues and -vectors of a real square matrix.
543              
544             (See also L<"eigens_sym"|/eigens_sym>, for eigenvalues and -vectors
545             of a real, symmetric, square matrix).
546              
547             The eigens function will attempt to compute the eigenvalues and
548             eigenvectors of a square matrix with real components. If the matrix
549             is symmetric, the same underlying code as L<"eigens_sym"|/eigens_sym>
550             is used. If asymmetric, the eigenvalues and eigenvectors are computed
551             with algorithms from the sslib library. These are a slightly modified
552             version of EISPACK's C code.
553              
554             Not all square matrices are diagonalizable. If you feed in a
555             non-diagonalizable matrix, then the algorithm may fail, in which case
556             an exception will be thrown.
557              
558             C is broadcastable, so you can solve 100 eigenproblems by
559             feeding in a 3x3x100 array. Both C<$ev> and C<$e> get extra dimensions accordingly.
560              
561             If called in scalar context C hands back only the eigenvalues. This
562             is somewhat wasteful, as it calculates the eigenvectors anyway.
563              
564             The eigenvectors are returned in COLUMNS of the returned PDL (ie the
565             the 0 dimension). That makes it slightly easier to access individual
566             eigenvectors, since the 0th dim of the output PDL runs across the
567             eigenvectors and the 1st dim runs across their components.
568              
569             ($ev,$e) = eigens $x; # Make eigenvector matrix
570             $vector = $ev->slice($n); # Select nth eigenvector as a column-vector
571             $vector = $ev->slice("($n)"); # Select nth eigenvector as a row-vector
572              
573             To compare with L:
574              
575             use PDL::LinearAlgebra;
576             ($val, $lvec, $rvec) = meigen($A = pdl([4,-1], [2,1]),1,1);
577             print $val->slice(1) * $rvec->slice(1);
578             #[
579             # [0.894427190999916]
580             # [ 1.78885438199983]
581             #]
582             print $A x $rvec->slice(1);
583             #[
584             # [0.894427190999916]
585             # [ 1.78885438199983]
586             #]
587             ($rvec, $val) = eigens($A); # note return values other way round
588             # otherwise the same
589              
590             =for usage
591              
592             ($ev, $e) = eigens($x); # e'vects & e'vals
593             $e = eigens($x); # just eigenvalues
594              
595             =pod
596              
597             Broadcasts over its inputs.
598              
599             =for bad
600              
601             C ignores the bad-value flag of the input ndarrays.
602             It will set the bad-value flag of all output ndarrays if the flag is set for any of the input ndarrays.
603              
604             =cut
605              
606              
607              
608              
609             sub PDL::eigens {
610 6     6 0 58 my ($x) = @_;
611 6         21 my @d = $x->dims;
612 6 50 33     22 barf "Need real square matrix for eigens"
613             if @d < 2 or $d[0] != $d[1];
614 6         20 my $deviation = PDL::max(abs($x - $x->transpose))/PDL::max(abs($x));
615 6 100       48 if ( $deviation <= 1e-5 ) {
616 2         89 PDL::_eigens_sym_int($x->squaretotri, my $ev=PDL->null, my $e=PDL->null);
617 2 50       17 return $ev->transpose, $e if wantarray;
618 0         0 return $e; #just eigenvalues
619             } else {
620 4         12 PDL::_eigens_int($x, my $ev=PDL->null, my $e=PDL->null);
621 3 50       36 return $ev, $e if wantarray;
622 0           return $e; #just eigenvalues
623             }
624             }
625              
626              
627              
628             *eigens = \&PDL::eigens;
629              
630              
631              
632              
633              
634              
635             =head2 svd
636              
637             =for sig
638              
639             Signature: (a(n,m); [t]w(wsize=CALC($SIZE(n) * ($SIZE(m) + $SIZE(n)))); [o]u(n,m); [o,phys]z(n); [o]v(n,n))
640             Types: (double)
641              
642             =for usage
643              
644             ($u, $z, $v) = svd($a);
645             svd($a, $u, $z, $v); # all arguments given
646             ($u, $z, $v) = $a->svd; # method call
647             $a->svd($u, $z, $v);
648              
649             =for ref
650              
651             Singular value decomposition of a matrix.
652              
653             C is broadcastable.
654              
655             Given an m x n matrix C<$a> that has m rows and n columns (m >= n),
656             C computes matrices C<$u> and C<$v>, and a vector of the singular
657             values C<$s>. Like most implementations, C computes what is
658             commonly referred to as the "thin SVD" of C<$a>, such that C<$u> is m
659             x n, C<$v> is n x n, and there are <=n singular values. As long as m
660             >= n, the original matrix can be reconstructed as follows:
661              
662             ($u,$s,$v) = svd($x);
663             $ess = zeroes($x->dim(0),$x->dim(0));
664             $ess->slice("$_","$_").=$s->slice("$_") foreach (0..$x->dim(0)-1); #generic diagonal
665             $a_copy = $u x $ess x $v->transpose;
666              
667             If m==n, C<$u> and C<$v> can be thought of as rotation matrices that
668             convert from the original matrix's singular coordinates to final
669             coordinates, and from original coordinates to singular coordinates,
670             respectively, and $ess is a diagonal scaling matrix.
671              
672             If n>m, C will barf. This can be avoided by passing in the
673             transpose of C<$a>, and reconstructing the original matrix like so:
674              
675             ($u,$s,$v) = svd($x->transpose);
676             $ess = zeroes($x->dim(1),$x->dim(1));
677             $ess->slice($_,$_).=$s->slice($_) foreach (0..$x->dim(1)-1); #generic diagonal
678             $x_copy = $v x $ess x $u->transpose;
679              
680             EXAMPLE
681              
682             The computing literature has loads of examples of how to use SVD.
683             Here's a trivial example (used in L)
684             of how to make a matrix less, er, singular, without changing the
685             orientation of the ellipsoid of transformation:
686              
687             { my($r1,$s,$r2) = svd $x;
688             $s++; # fatten all singular values
689             $r2 *= $s; # implicit broadcasting for cheap mult.
690             $x .= $r2 x $r1; # a gets r2 x ess x r1
691             }
692              
693             =pod
694              
695             Broadcasts over its inputs.
696              
697             =for bad
698              
699             C ignores the bad-value flag of the input ndarrays.
700             It will set the bad-value flag of all output ndarrays if the flag is set for any of the input ndarrays.
701              
702             =cut
703              
704              
705              
706              
707             *svd = \&PDL::svd;
708              
709              
710              
711              
712              
713             #line 690 "lib/PDL/MatrixOps.pd"
714              
715             =head2 lu_decomp
716              
717             =for sig
718              
719             Signature: (a(m,m); [o]lu(m,m); [o]perm(m); [o]parity)
720              
721             =for ref
722              
723             LU decompose a matrix, with row permutation
724              
725             =for usage
726              
727             ($lu, $perm, $parity) = lu_decomp($x);
728              
729             $lu = lu_decomp($x, $perm, $par); # $perm and $par are outputs!
730              
731             lu_decomp($x->inplace,$perm,$par); # Everything in place.
732              
733             =for description
734              
735             C returns an LU decomposition of a square matrix,
736             using Crout's method with partial pivoting. It's ported
737             from I. The partial pivoting keeps it
738             numerically stable but means a little more overhead from
739             broadcasting.
740              
741             C decomposes the input matrix into matrices L and
742             U such that LU = A, L is a subdiagonal matrix, and U is a
743             superdiagonal matrix. By convention, the diagonal of L is
744             all 1's.
745              
746             The single output matrix contains all the variable elements
747             of both the L and U matrices, stacked together. Because the
748             method uses pivoting (rearranging the lower part of the
749             matrix for better numerical stability), you have to permute
750             input vectors before applying the L and U matrices. The
751             permutation is returned either in the second argument or, in
752             list context, as the second element of the list. You need
753             the permutation for the output to make any sense, so be sure
754             to get it one way or the other.
755              
756             LU decomposition is the answer to a lot of matrix questions,
757             including inversion and determinant-finding, and C
758             is used by L.
759              
760             If you pass in C<$perm> and C<$parity>, they either must be
761             predeclared PDLs of the correct size ($perm is an n-vector,
762             C<$parity> is a scalar) or scalars.
763              
764             If the matrix is singular, then the LU decomposition might
765             not be defined; in those cases, C silently returns
766             undef. Some singular matrices LU-decompose just fine, and
767             those are handled OK but give a zero determinant (and hence
768             can't be inverted).
769              
770             C uses pivoting, which rearranges the values in the
771             matrix for more numerical stability. This makes it really
772             good for large and even near-singular matrices. There is
773             a non-pivoting version C available which is
774             from 5 to 60 percent faster for typical problems at
775             the expense of failing to compute a result in some cases.
776              
777             Now that the C is broadcasted, it is the recommended
778             LU decomposition routine. It no longer falls back to C.
779              
780             C is ported from I to PDL. It
781             should probably be implemented in C.
782              
783             =cut
784              
785             *PDL::lu_decomp = \&lu_decomp;
786              
787             sub lu_decomp {
788             my($in) = shift;
789             my($permute) = shift;
790             my($parity) = shift;
791             my($sing_ok) = shift;
792              
793             my $TINY = 1e-30;
794              
795             barf("lu_decomp requires a square (2D) PDL\n")
796             if(!UNIVERSAL::isa($in,'PDL') ||
797             $in->ndims < 2 ||
798             $in->dim(0) != $in->dim(1));
799              
800             my($n) = $in->dim(0);
801             my($n1) = $n; $n1--;
802              
803             my($inplace) = $in->is_inplace;
804             my($out) = ($inplace) ? $in : $in->copy;
805              
806             if(defined $permute) {
807             barf('lu_decomp: permutation vector must match the matrix')
808             if(!UNIVERSAL::isa($permute,'PDL') ||
809             $permute->ndims != 1 ||
810             $permute->dim(0) != $out->dim(0));
811             $permute .= PDL->xvals($in->dim(0));
812             } else {
813             $permute = $in->slice("(0)")->xvals;
814             }
815              
816             if(defined $parity) {
817             barf('lu_decomp: parity must be a scalar PDL')
818             if(!UNIVERSAL::isa($parity,'PDL') ||
819             $parity->dim(0) != 1);
820             $parity .= 1.0;
821             } else {
822             $parity = $in->slice('(0),(0)')->ones;
823             }
824              
825             my($scales) = $in->copy->abs->maximum; # elementwise by rows
826              
827             if(($scales==0)->sum) {
828             return undef;
829             }
830              
831             # Some holding tanks
832             my($tmprow) = $out->slice('(0)')->zeroes;
833             $tmprow = $tmprow->double if $tmprow->type < double;
834             my($tmpval) = $tmprow->slice('(0)')->sever;
835              
836             my($col,$row);
837             for $col(0..$n1) {
838             for $row(1..$n1) {
839             my($klim) = $row<$col ? $row : $col;
840             if($klim > 0) {
841             $klim--;
842             my($el) = $out->index2d($col,$row);
843             $el -= ( $out->slice("($col),0:$klim") *
844             $out->slice("0:$klim,($row)") )->sumover;
845             }
846              
847             }
848              
849             # Figure a_ij, with pivoting
850              
851             if($col < $n1) {
852             # Find the maximum value in the rest of the row
853             my $sl = $out->slice("($col),$col:$n1");
854             my $wh = $sl->abs->maximum_ind;
855             my $big = $sl->index($wh)->sever;
856              
857             # Permute if necessary to make the diagonal the maximum
858             # if($wh != 0)
859             { # Permute rows to place maximum element on diagonal.
860             my $whc = $wh+$col;
861              
862             my $sl1 = $out->mv(1,0)->index($whc->slice("*$n"));
863             my $sl2 = $out->slice(":,($col)");
864             $tmprow .= $sl1; $sl1 .= $sl2; $sl2 .= $tmprow;
865              
866             $sl1 = $permute->index($whc);
867             $sl2 = $permute->index($col);
868             $tmpval .= $sl1; $sl1 .= $sl2; $sl2 .= $tmpval;
869              
870             $parity->where($wh>0) *= -1.0;
871             }
872              
873             # LAPACK cgetrf does not try fix singularity so nor do we, even though NR does
874             my $notbig = $big->where(abs($big) < $TINY);
875             return if !$notbig->isempty;
876              
877             # Divide by the diagonal element (which is now the largest element)
878             my $tout;
879             ($tout = $out->slice("($col),".($col+1).":$n1")) /= $big->slice('*1');
880             } # end of pivoting part
881             } # end of column loop
882              
883             wantarray ? ($out,$permute,$parity) : $out;
884             }
885              
886             #line 867 "lib/PDL/MatrixOps.pd"
887              
888             =head2 lu_decomp2
889              
890             =for sig
891              
892             Signature: (a(m,m); [o]lu(m,m))
893              
894             =for ref
895              
896             LU decompose a matrix, with no row permutation
897              
898             =for usage
899              
900             ($lu, $perm, $parity) = lu_decomp2($x);
901              
902             $lu = lu_decomp2($x,$perm,$parity); # or
903             $lu = lu_decomp2($x); # $perm and $parity are optional
904              
905             lu_decomp($x->inplace,$perm,$parity); # or
906             lu_decomp($x->inplace); # $perm and $parity are optional
907              
908             =for description
909              
910             C works just like L, but it does B
911             pivoting at all. For compatibility with L, it
912             will give you a permutation list and a parity scalar if you ask
913             for them -- but they are always trivial.
914              
915             Because C does not pivot, it is numerically B --
916             that means it is less precise than L, particularly for
917             large or near-singular matrices. There are also specific types of
918             non-singular matrices that confuse it (e.g. ([0,-1,0],[1,0,0],[0,0,1]),
919             which is a 90 degree rotation matrix but which confuses C).
920              
921             On the other hand, if you want to invert rapidly a few hundred thousand
922             small matrices and don't mind missing one or two, it could be the ticket.
923             It can be up to 60% faster at the expense of possible failure of the
924             decomposition for some of the input matrices.
925              
926             The output is a single matrix that contains the LU decomposition of C<$a>;
927             you can even do it in-place, thereby destroying C<$a>, if you want. See
928             L for more information about LU decomposition.
929              
930             C is ported from I into PDL.
931              
932             =cut
933              
934             *PDL::lu_decomp2 = \&lu_decomp2;
935              
936             sub lu_decomp2 {
937             my($in) = shift;
938             my($perm) = shift;
939             my($par) = shift;
940              
941             my($sing_ok) = shift;
942              
943             my $TINY = 1e-30;
944              
945             barf("lu_decomp2 requires a square (2D) PDL\n")
946             if(!UNIVERSAL::isa($in,'PDL') ||
947             $in->ndims < 2 ||
948             $in->dim(0) != $in->dim(1));
949              
950             my($n) = $in->dim(0);
951             my($n1) = $n; $n1--;
952              
953             my($inplace) = $in->is_inplace;
954             my($out) = ($inplace) ? $in : $in->copy;
955              
956             if(defined $perm) {
957             barf('lu_decomp2: permutation vector must match the matrix')
958             if(!UNIVERSAL::isa($perm,'PDL') ||
959             $perm->ndims != 1 ||
960             $perm->dim(0) != $out->dim(0));
961             $perm .= PDL->xvals($in->dim(0));
962             } else {
963             $perm = PDL->xvals($in->dim(0));
964             }
965              
966             if(defined $par) {
967             barf('lu_decomp: parity must be a scalar PDL')
968             if(!UNIVERSAL::isa($par,'PDL') ||
969             $par->nelem != 1);
970             $par .= 1.0;
971             } else {
972             $par = pdl(1.0);
973             }
974              
975             my $diagonal = $out->diagonal(0,1);
976              
977             my($col,$row);
978             for $col(0..$n1) {
979             for $row(1..$n1) {
980             my($klim) = $row<$col ? $row : $col;
981             if($klim > 0) {
982             $klim--;
983             my($el) = $out->index2d($col,$row);
984              
985             $el -= ( $out->slice("($col),0:$klim") *
986             $out->slice("0:$klim,($row)") )->sumover;
987             }
988              
989             }
990              
991             # Figure a_ij, with no pivoting
992             if($col < $n1) {
993             # Divide the rest of the column by the diagonal element
994             $out->slice("($col),".($col+1).":$n1") /= $diagonal->index($col)->dummy(0,$n1-$col);
995             }
996              
997             } # end of column loop
998              
999             wantarray ? ($out,$perm,$par) : $out;
1000             }
1001              
1002             #line 988 "lib/PDL/MatrixOps.pd"
1003              
1004             =head2 lu_backsub
1005              
1006             =for sig
1007              
1008             Signature: (lu(m,m); perm(m); b(m))
1009              
1010             =for ref
1011              
1012             Solve A x = B for matrix A, by back substitution into A's LU decomposition.
1013              
1014             =for usage
1015              
1016             ($lu,$perm,$par) = lu_decomp($A);
1017              
1018             $x = lu_backsub($lu,$perm,$par,$A); # or
1019             $x = lu_backsub($lu,$perm,$B); # $par is not required for lu_backsub
1020              
1021             lu_backsub($lu,$perm,$B->inplace); # modify $B in-place
1022              
1023             $x = lu_backsub(lu_decomp($A),$B); # (ignores parity value from lu_decomp)
1024              
1025             # starting from square matrix A and columns matrix B, with mathematically
1026             # correct dimensions
1027             $A = identity(4) + ones(4, 4);
1028             $A->slice('2,0') .= 0; # break symmetry to see if need transpose
1029             $B = sequence(2, 4);
1030             # all these functions take B as rows, interpret as though notional columns
1031             # mathematically confusing but can't change as back-compat and also
1032             # familiar to Fortran users, so just transpose inputs and outputs
1033              
1034             # using lu_backsub
1035             ($lu,$perm,$par) = lu_decomp($A);
1036             $x = lu_backsub($lu,$perm,$par, $B->transpose)->transpose;
1037              
1038             # using simq
1039             # remove all active dims
1040             @A_dims = $A->dims; @B_dims = $B->transpose->dims;
1041             splice @A_dims, 0, 2; splice @B_dims, 0, 1;
1042             @broadcast = PDL::Core::dims_filled(\@A_dims, \@B_dims);
1043             # simq modifies A, so need 1 copy per broadcast else non-first run has wrong A
1044             ($x) = simq($A->dupN(1,1,map +($A_dims[$_]//1)==1?$broadcast[$_]:1, 0..$#broadcast)->copy, $B->transpose, 0);
1045             $x = $x->inplace->transpose;
1046              
1047             # or with PDL::LinearAlgebra wrappers of LAPACK
1048             $x = msolve($A, $B);
1049              
1050             # or with LAPACK
1051             use PDL::LinearAlgebra::Real;
1052             getrf($lu=$A->copy, $ipiv=null, $info=null);
1053             getrs($lu, 1, $x=$B->transpose->copy, $ipiv, $info=null); # again, need transpose
1054             $x=$x->inplace->transpose;
1055              
1056             # or with GSL
1057             use PDL::GSL::LINALG;
1058             LU_decomp(my $lu=$A->copy, my $p=null, my $signum=null);
1059             # $B and $x, first dim is because GSL treats as vector, higher dims broadcast
1060             # so we transpose in and back out
1061             LU_solve($lu, $p, $B->transpose, my $x=null);
1062             $x=$x->inplace->transpose;
1063              
1064             # proof of the pudding is in the eating:
1065             print $A x $x;
1066              
1067             =for description
1068              
1069             Given the LU decomposition of a square matrix (from L),
1070             C does back substitution into the matrix to solve
1071             C for given vector C. It is separated from the
1072             C method so that you can call the cheap C
1073             multiple times and not have to do the expensive LU decomposition
1074             more than once.
1075              
1076             C acts on single vectors and broadcasts in the usual
1077             way, which means that it treats C<$y> as the I
1078             of the input. If you want to process a matrix, you must
1079             hand in the I of the matrix, and then transpose
1080             the output when you get it back. that is because pdls are
1081             indexed by (col,row), and matrices are (row,column) by
1082             convention, so a 1-D pdl corresponds to a row vector, not a
1083             column vector.
1084              
1085             If C<$lu> is dense and you have more than a few points to
1086             solve for, it is probably cheaper to find C with
1087             L, and just multiply C.) in fact,
1088             L works by calling C with the identity
1089             matrix.
1090              
1091             C is ported from section 2.3 of I.
1092             It is written in PDL but should probably be implemented in C.
1093              
1094             =cut
1095              
1096             *PDL::lu_backsub = \&lu_backsub;
1097              
1098             sub lu_backsub {
1099             my ($lu, $perm, $y, $par);
1100             print STDERR "lu_backsub: entering debug version...\n" if $PDL::debug;
1101             if(@_==3) {
1102             ($lu, $perm, $y) = @_;
1103             } elsif(@_==4) {
1104             ($lu, $perm, $par, $y) = @_;
1105             }
1106              
1107             barf("lu_backsub: LU decomposition is undef -- probably from a singular matrix.\n")
1108             unless defined($lu);
1109              
1110             barf("Usage: \$x = lu_backsub(\$lu,\$perm,\$y); all must be PDLs\n")
1111             unless(UNIVERSAL::isa($lu,'PDL') &&
1112             UNIVERSAL::isa($perm,'PDL') &&
1113             UNIVERSAL::isa($y,'PDL'));
1114              
1115             my $n = $y->dim(0);
1116             my $n1 = $n; $n1--;
1117              
1118             # Make sure broadcasting dimensions are compatible.
1119             # There are two possible sources of broadcast dims:
1120             #
1121             # (1) over multiple LU (i.e., $lu,$perm) instances
1122             # (2) over multiple B (i.e., $y) column instances
1123             #
1124             # The full dimensions of the function call looks like
1125             #
1126             # lu_backsub( lu(m,m,X), perm(m,X), b(m,Y) )
1127             #
1128             # where X is the list of extra LU dims and Y is
1129             # the list of extra B dims. We have several possible
1130             # cases:
1131             #
1132             # (1) Check that m dims are compatible
1133             my $ludims = pdl($lu->dims);
1134             my $permdims = pdl($perm->dims);
1135             my $bdims = pdl($y->dims);
1136              
1137             print STDERR "lu_backsub: called with args: \$lu$ludims, \$perm$permdims, \$y$bdims\n" if $PDL::debug;
1138              
1139             my $m = $ludims->slice("(0)"); # this is the sig dimension
1140             unless ( ($ludims->slice(0) == $m) and ($ludims->slice(1) == $m) and
1141             ($permdims->slice(0) == $m) and ($bdims->slice(0) == $m)) {
1142             barf "lu_backsub: mismatched sig dimensions";
1143             }
1144              
1145             my $lunumthr = $ludims->dim(0)-2;
1146             my $permnumthr = $permdims->dim(0)-1;
1147             my $bnumthr = $bdims->dim(0)-1;
1148             unless ( ($lunumthr == $permnumthr) and ($ludims->slice("1:-1") == $permdims)->all ) {
1149             barf "lu_backsub: \$lu and \$perm broadcast dims not equal! \n";
1150             }
1151              
1152             # (2) If X == Y then default broadcasting is ok
1153             if ( ($bnumthr==$permnumthr) and ($bdims==$permdims)->all) {
1154             print STDERR "lu_backsub: have explicit broadcast dims, goto BROADCAST_OK\n" if $PDL::debug;
1155             goto BROADCAST_OK;
1156             }
1157              
1158             # (3) If X == (x,Y) then add x dummy to lu,perm
1159              
1160             # (4) If ndims(X) > ndims(Y) then must have #3
1161              
1162             # (5) If ndims(X) < ndims(Y) then foreach
1163             # non-trivial leading dim in X (x0,x1,..)
1164             # insert dummy (x0,x1) into lu and perm
1165              
1166             # This means that broadcasting occurs over all
1167             # leading non-trivial (not length 1) dims of
1168             # B unless all the broadcast dims are explicitly
1169             # matched to the LU dims.
1170              
1171             BROADCAST_OK:
1172              
1173             # Permute the vector and make a copy if necessary.
1174             my $out = $y->dummy(1,$y->dim(0))->index($perm->dummy(1));
1175             $out = $out->sever if !$y->is_inplace;
1176             print STDERR "lu_backsub: starting with \$out" . pdl($out->dims) . "\n" if $PDL::debug;
1177              
1178             # Make sure broadcasting over lu happens OK...
1179              
1180             if($out->ndims < $lu->ndims-1) {
1181             print STDERR "lu_backsub: adjusting dims for \$out" . pdl($out->dims) . "\n" if $PDL::debug;
1182             do {
1183             $out = $out->dummy(-1,$lu->dim($out->ndims+1));
1184             } while($out->ndims < $lu->ndims-1);
1185             }
1186              
1187             ## Do forward substitution into L
1188             my $row; my $r1;
1189              
1190             for $row(1..$n1) {
1191             $r1 = $row-1;
1192             $out->index($row) -= ($lu->slice("0:$r1,$row") *
1193             $out->slice("0:$r1")
1194             )->sumover;
1195             }
1196              
1197             ## Do backward substitution into U, and normalize by the diagonal
1198             my $ludiag = $lu->diagonal(0,1);
1199             $out->index($n1) /= $ludiag->index($n1)->dummy(0); # TODO: check broadcasting
1200              
1201             for ($row=$n1; $row>0; $row--) {
1202             $r1 = $row-1;
1203             $out->index($r1) -= ($lu->slice("$row:$n1,$r1") * # TODO: check broadcast dims
1204             $out->slice("$row:$n1")
1205             )->sumover;
1206             $out->index($r1) /= $ludiag->index($r1)->dummy(0); # TODO: check broadcast dims
1207             }
1208              
1209             if ($y->is_inplace) {
1210             $y->setdims([$out->dims]) if !PDL::all($y->shape == $out->shape); # assgn needs same shape
1211             $y .= $out;
1212             }
1213             $out;
1214             }
1215             #line 1216 "lib/PDL/MatrixOps.pm"
1216              
1217              
1218             =head2 simq
1219              
1220             =for sig
1221              
1222             Signature: ([io,phys]a(n,n); [phys]b(n); [o,phys]x(n); int [o,phys]ips(n); int flag)
1223             Types: (double)
1224              
1225             =for ref
1226              
1227             Solution of simultaneous linear equations, C.
1228             B.
1229              
1230             =for usage
1231              
1232             # remove all active dims
1233             @A_dims = $A->dims; @B_dims = $B->transpose->dims;
1234             splice @A_dims, 0, 2; splice @B_dims, 0, 1;
1235             @broadcast = PDL::Core::dims_filled(\@A_dims, \@B_dims);
1236             # simq modifies A, so need 1 copy per broadcast else non-first run has wrong A
1237             ($x) = simq($A->dupN(1,1,map +($A_dims[$_]//1)==1?$broadcast[$_]:1, 0..$#broadcast)->copy, $B->transpose, 0);
1238             $x = $x->inplace->transpose;
1239              
1240             C<$a> is an C matrix (i.e., a vector of length C), stored row-wise:
1241             that is, C, where C.
1242              
1243             While this is the transpose of the normal column-wise storage, this
1244             corresponds to normal PDL usage. The contents of matrix a may be
1245             altered (but may be required for subsequent calls with flag = -1).
1246              
1247             C<$y>, C<$x>, C<$ips> are vectors of length C.
1248              
1249             Set C to solve.
1250             Set C to do a new back substitution for
1251             different C<$y> vector using the same a matrix previously reduced when
1252             C (the C<$ips> vector generated in the previous solution is also
1253             required).
1254              
1255             For this function to work well with broadcasting, it will need the LU
1256             decomposition part split out, so that for solving C only C
1257             would be written to.
1258              
1259             See also L, which does the same thing with a slightly
1260             less opaque interface.
1261              
1262             =pod
1263              
1264             Broadcasts over its inputs.
1265              
1266             =for bad
1267              
1268             C ignores the bad-value flag of the input ndarrays.
1269             It will set the bad-value flag of all output ndarrays if the flag is set for any of the input ndarrays.
1270              
1271             =cut
1272              
1273              
1274              
1275              
1276             *simq = \&PDL::simq;
1277              
1278              
1279              
1280              
1281              
1282              
1283             =head2 squaretotri
1284              
1285             =for sig
1286              
1287             Signature: (a(n,n); [o]b(m=CALC(($SIZE(n) * ($SIZE(n)+1))/2)))
1288             Types: (sbyte byte short ushort long ulong indx ulonglong longlong
1289             float double ldouble cfloat cdouble cldouble)
1290              
1291             =for usage
1292              
1293             $b = squaretotri($a);
1294             squaretotri($a, $b); # all arguments given
1295             $b = $a->squaretotri; # method call
1296             $a->squaretotri($b);
1297              
1298             =for ref
1299              
1300             Convert a lower-triangular square matrix to triangular vector storage.
1301             Ignores upper half of input.
1302              
1303             =pod
1304              
1305             Broadcasts over its inputs.
1306              
1307             =for bad
1308              
1309             C does not process bad values.
1310             It will set the bad-value flag of all output ndarrays if the flag is set for any of the input ndarrays.
1311              
1312             =cut
1313              
1314              
1315              
1316              
1317             *squaretotri = \&PDL::squaretotri;
1318              
1319              
1320              
1321              
1322              
1323              
1324             =head2 tritosquare
1325              
1326             =for sig
1327              
1328             Signature: (a(m); [o]b(n,n))
1329             Types: (sbyte byte short ushort long ulong indx ulonglong longlong
1330             float double ldouble cfloat cdouble cldouble)
1331              
1332             =for usage
1333              
1334             $b = tritosquare($a);
1335             tritosquare($a, $b); # all arguments given
1336             $b = $a->tritosquare; # method call
1337             $a->tritosquare($b);
1338              
1339             =for ref
1340              
1341             Convert a triangular vector to lower-triangular square matrix storage.
1342             Does not touch upper half of output.
1343              
1344             =pod
1345              
1346             Broadcasts over its inputs.
1347              
1348             =for bad
1349              
1350             C does not process bad values.
1351             It will set the bad-value flag of all output ndarrays if the flag is set for any of the input ndarrays.
1352              
1353             =cut
1354              
1355              
1356              
1357              
1358             *tritosquare = \&PDL::tritosquare;
1359              
1360              
1361              
1362              
1363              
1364              
1365             =head2 tricpy
1366              
1367             =for sig
1368              
1369             Signature: (A(m,n);[o] C(m,n); int uplo)
1370             Types: (sbyte byte short ushort long ulong indx ulonglong longlong
1371             float double ldouble cfloat cdouble cldouble)
1372              
1373             =for usage
1374              
1375             $C = tricpy($A); # using default of uplo=0
1376             $C = tricpy($A, $uplo);
1377             tricpy($A, $uplo, $C); # all arguments given
1378             $C = $A->tricpy; # method call
1379             $C = $A->tricpy($uplo);
1380             $A->tricpy($uplo, $C);
1381              
1382             =for ref
1383              
1384             Copy triangular part to another matrix. If uplo == 0 copy upper triangular
1385             part.
1386              
1387             Originally by Grégory Vanuxem.
1388              
1389             =pod
1390              
1391             Broadcasts over its inputs.
1392              
1393             =for bad
1394              
1395             C does not process bad values.
1396             It will set the bad-value flag of all output ndarrays if the flag is set for any of the input ndarrays.
1397              
1398             =cut
1399              
1400              
1401              
1402              
1403             *tricpy = \&PDL::tricpy;
1404              
1405              
1406              
1407              
1408              
1409              
1410             =head2 mstack
1411              
1412             =for sig
1413              
1414             Signature: (x(n,m);y(n,p);[o]out(n,q=CALC($SIZE(m)+$SIZE(p))))
1415             Types: (sbyte byte short ushort long ulong indx ulonglong longlong
1416             float double ldouble cfloat cdouble cldouble)
1417              
1418             =for usage
1419              
1420             $out = mstack($x, $y);
1421             mstack($x, $y, $out); # all arguments given
1422             $out = $x->mstack($y); # method call
1423             $x->mstack($y, $out);
1424              
1425             =for ref
1426              
1427             Combine two 2D ndarrays into a single ndarray, along the second
1428             ("vertical") dim.
1429             This routine does backward and forward dataflow automatically.
1430              
1431             Originally by Grégory Vanuxem.
1432              
1433             =pod
1434              
1435             Broadcasts over its inputs.
1436             Creates data-flow back and forth by default.
1437              
1438             =for bad
1439              
1440             C does not process bad values.
1441             It will set the bad-value flag of all output ndarrays if the flag is set for any of the input ndarrays.
1442              
1443             =cut
1444              
1445              
1446              
1447              
1448             *mstack = \&PDL::mstack;
1449              
1450              
1451              
1452              
1453              
1454              
1455             =head2 augment
1456              
1457             =for sig
1458              
1459             Signature: (x(n); y(p);[o]out(q=CALC($SIZE(n)+$SIZE(p))))
1460             Types: (sbyte byte short ushort long ulong indx ulonglong longlong
1461             float double ldouble cfloat cdouble cldouble)
1462              
1463             =for usage
1464              
1465             $out = augment($x, $y);
1466             augment($x, $y, $out); # all arguments given
1467             $out = $x->augment($y); # method call
1468             $x->augment($y, $out);
1469              
1470             =for ref
1471              
1472             Combine two ndarrays into a single ndarray along the 0-th ("horizontal") dim.
1473             This routine does backward and forward dataflow automatically.
1474              
1475             Originally by Grégory Vanuxem.
1476              
1477             =pod
1478              
1479             Broadcasts over its inputs.
1480             Creates data-flow back and forth by default.
1481              
1482             =for bad
1483              
1484             C does not process bad values.
1485             It will set the bad-value flag of all output ndarrays if the flag is set for any of the input ndarrays.
1486              
1487             =cut
1488              
1489              
1490              
1491              
1492             *augment = \&PDL::augment;
1493              
1494              
1495              
1496              
1497              
1498              
1499              
1500             #line 1371 "lib/PDL/MatrixOps.pd"
1501              
1502             =head1 AUTHOR
1503              
1504             Copyright (C) 2002 Craig DeForest (deforest@boulder.swri.edu),
1505             R.J.R. Williams (rjrw@ast.leeds.ac.uk), Karl Glazebrook
1506             (kgb@aaoepp.aao.gov.au). There is no warranty. You are allowed to
1507             redistribute and/or modify this work under the same conditions as PDL
1508             itself. If this file is separated from the PDL distribution, then the
1509             PDL copyright notice should be included in this file.
1510              
1511             =cut
1512             #line 1513 "lib/PDL/MatrixOps.pm"
1513              
1514             # Exit with OK status
1515              
1516             1;