line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
|
2
|
|
|
|
|
|
|
package Paws::Rekognition::RecognizeCelebritiesResponse; |
3
|
1
|
|
|
1
|
|
588
|
use Moose; |
|
1
|
|
|
|
|
3
|
|
|
1
|
|
|
|
|
8
|
|
4
|
|
|
|
|
|
|
has CelebrityFaces => (is => 'ro', isa => 'ArrayRef[Paws::Rekognition::Celebrity]'); |
5
|
|
|
|
|
|
|
has OrientationCorrection => (is => 'ro', isa => 'Str'); |
6
|
|
|
|
|
|
|
has UnrecognizedFaces => (is => 'ro', isa => 'ArrayRef[Paws::Rekognition::ComparedFace]'); |
7
|
|
|
|
|
|
|
|
8
|
|
|
|
|
|
|
has _request_id => (is => 'ro', isa => 'Str'); |
9
|
|
|
|
|
|
|
|
10
|
|
|
|
|
|
|
### main pod documentation begin ### |
11
|
|
|
|
|
|
|
|
12
|
|
|
|
|
|
|
=head1 NAME |
13
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
Paws::Rekognition::RecognizeCelebritiesResponse |
15
|
|
|
|
|
|
|
|
16
|
|
|
|
|
|
|
=head1 ATTRIBUTES |
17
|
|
|
|
|
|
|
|
18
|
|
|
|
|
|
|
|
19
|
|
|
|
|
|
|
=head2 CelebrityFaces => ArrayRef[L<Paws::Rekognition::Celebrity>] |
20
|
|
|
|
|
|
|
|
21
|
|
|
|
|
|
|
Details about each celebrity found in the image. Amazon Rekognition can |
22
|
|
|
|
|
|
|
detect a maximum of 15 celebrities in an image. |
23
|
|
|
|
|
|
|
|
24
|
|
|
|
|
|
|
|
25
|
|
|
|
|
|
|
=head2 OrientationCorrection => Str |
26
|
|
|
|
|
|
|
|
27
|
|
|
|
|
|
|
The orientation of the input image (counterclockwise direction). If |
28
|
|
|
|
|
|
|
your application displays the image, you can use this value to correct |
29
|
|
|
|
|
|
|
the orientation. The bounding box coordinates returned in |
30
|
|
|
|
|
|
|
C<CelebrityFaces> and C<UnrecognizedFaces> represent face locations |
31
|
|
|
|
|
|
|
before the image orientation is corrected. |
32
|
|
|
|
|
|
|
|
33
|
|
|
|
|
|
|
If the input image is in .jpeg format, it might contain exchangeable |
34
|
|
|
|
|
|
|
image (Exif) metadata that includes the image's orientation. If so, and |
35
|
|
|
|
|
|
|
the Exif metadata for the input image populates the orientation field, |
36
|
|
|
|
|
|
|
the value of C<OrientationCorrection> is null and the C<CelebrityFaces> |
37
|
|
|
|
|
|
|
and C<UnrecognizedFaces> bounding box coordinates represent face |
38
|
|
|
|
|
|
|
locations after Exif metadata is used to correct the image orientation. |
39
|
|
|
|
|
|
|
Images in .png format don't contain Exif metadata. |
40
|
|
|
|
|
|
|
|
41
|
|
|
|
|
|
|
Valid values are: C<"ROTATE_0">, C<"ROTATE_90">, C<"ROTATE_180">, C<"ROTATE_270"> |
42
|
|
|
|
|
|
|
=head2 UnrecognizedFaces => ArrayRef[L<Paws::Rekognition::ComparedFace>] |
43
|
|
|
|
|
|
|
|
44
|
|
|
|
|
|
|
Details about each unrecognized face in the image. |
45
|
|
|
|
|
|
|
|
46
|
|
|
|
|
|
|
|
47
|
|
|
|
|
|
|
=head2 _request_id => Str |
48
|
|
|
|
|
|
|
|
49
|
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
=cut |
51
|
|
|
|
|
|
|
|
52
|
|
|
|
|
|
|
1; |