line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package Lingua::PT::Capitalizer; |
2
|
|
|
|
|
|
|
|
3
|
1
|
|
|
1
|
|
94473
|
use common::sense; |
|
1
|
|
|
|
|
4
|
|
|
1
|
|
|
|
|
10
|
|
4
|
1
|
|
|
1
|
|
1107
|
use English qw[-no_match_vars]; |
|
1
|
|
|
|
|
5051
|
|
|
1
|
|
|
|
|
6
|
|
5
|
|
|
|
|
|
|
|
6
|
1
|
|
|
1
|
|
15134
|
use base qw[ Exporter ]; |
|
1
|
|
|
|
|
4
|
|
|
1
|
|
|
|
|
379
|
|
7
|
|
|
|
|
|
|
our @EXPORT = q(capitalize); |
8
|
|
|
|
|
|
|
|
9
|
|
|
|
|
|
|
our $VERSION = '0.001'; # VERSION |
10
|
|
|
|
|
|
|
|
11
|
|
|
|
|
|
|
our %lc_always = ( |
12
|
|
|
|
|
|
|
q(a) => 1, |
13
|
|
|
|
|
|
|
q(á) => 1, |
14
|
|
|
|
|
|
|
q(à) => 1, |
15
|
|
|
|
|
|
|
q(aos) => 1, |
16
|
|
|
|
|
|
|
q(às) => 1, |
17
|
|
|
|
|
|
|
q(d') => 1, # elisão |
18
|
|
|
|
|
|
|
q(da) => 1, |
19
|
|
|
|
|
|
|
q(das) => 1, |
20
|
|
|
|
|
|
|
q(de) => 1, |
21
|
|
|
|
|
|
|
q(di) => 1, |
22
|
|
|
|
|
|
|
q(do) => 1, |
23
|
|
|
|
|
|
|
q(dos) => 1, |
24
|
|
|
|
|
|
|
q(e) => 1, |
25
|
|
|
|
|
|
|
q(na) => 1, |
26
|
|
|
|
|
|
|
q(nas) => 1, |
27
|
|
|
|
|
|
|
q(no) => 1, |
28
|
|
|
|
|
|
|
q(nos) => 1, |
29
|
|
|
|
|
|
|
q(o) => 1, |
30
|
|
|
|
|
|
|
q(os) => 1, |
31
|
|
|
|
|
|
|
); |
32
|
|
|
|
|
|
|
|
33
|
|
|
|
|
|
|
sub capitalize { |
34
|
22
|
50
|
|
22
|
1
|
32266
|
my ( $self, $text, $preserve_caps ) |
35
|
|
|
|
|
|
|
= ref $_[0] eq __PACKAGE__ |
36
|
|
|
|
|
|
|
? @_ |
37
|
|
|
|
|
|
|
: ( undef, @_ ); |
38
|
|
|
|
|
|
|
|
39
|
22
|
|
33
|
|
|
59
|
$text //= $_; |
40
|
|
|
|
|
|
|
|
41
|
22
|
50
|
|
|
|
59
|
return unless defined $text; |
42
|
|
|
|
|
|
|
|
43
|
22
|
|
|
|
|
53
|
my @token = _pt_word_tokenizer($text); |
44
|
|
|
|
|
|
|
|
45
|
22
|
|
|
|
|
93
|
foreach ( my $i = 0; $i <= $#token; $i++ ) { |
46
|
507
|
|
|
|
|
1987
|
foreach ( $token[$i] ) { |
47
|
507
|
100
|
100
|
|
|
1526
|
if ( $preserve_caps ~~ q(1) |
48
|
1
|
|
|
1
|
|
1541
|
&& m{^\p{Lu}+(?:\N{APOSTROPHE})?$}msx ) |
|
1
|
|
|
|
|
73077
|
|
|
1
|
|
|
|
|
11
|
|
49
|
|
|
|
|
|
|
{ |
50
|
2
|
|
|
|
|
10
|
next; |
51
|
|
|
|
|
|
|
} |
52
|
|
|
|
|
|
|
|
53
|
505
|
|
|
|
|
671
|
my $lc = lc; |
54
|
505
|
|
|
|
|
664
|
when ( $i == 0 ) { $_ = ucfirst $lc; } |
|
22
|
|
|
|
|
125
|
|
55
|
483
|
|
|
|
|
1853
|
when ( substr( $_, -1 ) eq q(.) ) { $_ = ucfirst $lc; } |
|
0
|
|
|
|
|
0
|
|
56
|
483
|
|
|
|
|
691
|
when ( exists $lc_always{$lc} ) { $_ = $lc; } |
|
37
|
|
|
|
|
155
|
|
57
|
446
|
|
|
|
|
1099
|
default { |
58
|
446
|
|
|
|
|
2539
|
$_ = ucfirst $lc; |
59
|
|
|
|
|
|
|
} |
60
|
|
|
|
|
|
|
} ## end foreach ( $token[$i] ) |
61
|
|
|
|
|
|
|
} ## end foreach ( my $i = 0; $i <= ...) |
62
|
|
|
|
|
|
|
|
63
|
22
|
|
|
|
|
103
|
$text = join q(), @token; |
64
|
|
|
|
|
|
|
|
65
|
22
|
|
|
|
|
139
|
return $text; |
66
|
|
|
|
|
|
|
} ## end sub capitalize |
67
|
|
|
|
|
|
|
|
68
|
|
|
|
|
|
|
sub _pt_word_tokenizer { |
69
|
22
|
|
|
22
|
|
30
|
my $text = shift; |
70
|
|
|
|
|
|
|
|
71
|
1
|
|
|
1
|
|
18590
|
my $re = qr{(\p{L}+(?:\N{APOSTROPHE}|\p{L}{0,2}))}msx; |
|
1
|
|
|
|
|
4
|
|
|
1
|
|
|
|
|
12
|
|
|
22
|
|
|
|
|
122
|
|
72
|
|
|
|
|
|
|
|
73
|
22
|
|
|
|
|
1901
|
my @token = split $re, $text; |
74
|
22
|
50
|
|
|
|
67
|
@token = grep { defined && $_ ne q() } @token; |
|
538
|
|
|
|
|
2472
|
|
75
|
|
|
|
|
|
|
|
76
|
22
|
|
|
|
|
244
|
return @token; |
77
|
|
|
|
|
|
|
} |
78
|
|
|
|
|
|
|
|
79
|
|
|
|
|
|
|
sub new { |
80
|
1
|
|
|
1
|
0
|
1373
|
my $class = shift; |
81
|
1
|
|
|
|
|
9
|
my $self = { |
82
|
|
|
|
|
|
|
capitalize => \&capitalize, |
83
|
|
|
|
|
|
|
lc_always => \%lc_always, |
84
|
|
|
|
|
|
|
}; |
85
|
1
|
|
|
|
|
8
|
return bless $self, $class; |
86
|
|
|
|
|
|
|
} |
87
|
|
|
|
|
|
|
|
88
|
|
|
|
|
|
|
1; |
89
|
|
|
|
|
|
|
|
90
|
|
|
|
|
|
|
__END__ |