line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package Plucene::Index::SegmentMerger; |
2
|
|
|
|
|
|
|
|
3
|
|
|
|
|
|
|
=head1 NAME |
4
|
|
|
|
|
|
|
|
5
|
|
|
|
|
|
|
Plucene::Index::SegmentMerger - the Segment merger |
6
|
|
|
|
|
|
|
|
7
|
|
|
|
|
|
|
=head1 SYNOPSIS |
8
|
|
|
|
|
|
|
|
9
|
|
|
|
|
|
|
my $merger = Plucene::Index::SegmentMerger->new(); |
10
|
|
|
|
|
|
|
|
11
|
|
|
|
|
|
|
$merger->add(Plucene::Index::SegmentReader $reader); |
12
|
|
|
|
|
|
|
$merger->merge; |
13
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
=head1 DESCRIPTION |
15
|
|
|
|
|
|
|
|
16
|
|
|
|
|
|
|
This is the segment merger class. |
17
|
|
|
|
|
|
|
|
18
|
|
|
|
|
|
|
=head1 METHODS |
19
|
|
|
|
|
|
|
|
20
|
|
|
|
|
|
|
=cut |
21
|
|
|
|
|
|
|
|
22
|
17
|
|
|
17
|
|
104
|
use strict; |
|
17
|
|
|
|
|
34
|
|
|
17
|
|
|
|
|
610
|
|
23
|
17
|
|
|
17
|
|
94
|
use warnings; |
|
17
|
|
|
|
|
38
|
|
|
17
|
|
|
|
|
611
|
|
24
|
17
|
|
|
17
|
|
90
|
no warnings 'uninitialized'; |
|
17
|
|
|
|
|
32
|
|
|
17
|
|
|
|
|
679
|
|
25
|
|
|
|
|
|
|
|
26
|
17
|
|
|
17
|
|
95
|
use File::Slurp; |
|
17
|
|
|
|
|
43
|
|
|
17
|
|
|
|
|
1713
|
|
27
|
17
|
|
|
17
|
|
115
|
use Plucene::Index::FieldInfos; |
|
17
|
|
|
|
|
34
|
|
|
17
|
|
|
|
|
421
|
|
28
|
17
|
|
|
17
|
|
102
|
use Plucene::Index::FieldsWriter; |
|
17
|
|
|
|
|
43
|
|
|
17
|
|
|
|
|
472
|
|
29
|
17
|
|
|
17
|
|
105
|
use Plucene::Index::SegmentMergeInfo; |
|
17
|
|
|
|
|
96
|
|
|
17
|
|
|
|
|
198
|
|
30
|
17
|
|
|
17
|
|
546
|
use Plucene::Index::TermInfosWriter; |
|
17
|
|
|
|
|
42
|
|
|
17
|
|
|
|
|
433
|
|
31
|
17
|
|
|
17
|
|
105
|
use Plucene::Index::TermInfo; |
|
17
|
|
|
|
|
33
|
|
|
17
|
|
|
|
|
126
|
|
32
|
17
|
|
|
17
|
|
634
|
use Plucene::Store::OutputStream; |
|
17
|
|
|
|
|
32
|
|
|
17
|
|
|
|
|
557
|
|
33
|
|
|
|
|
|
|
|
34
|
17
|
|
|
17
|
|
85
|
use base 'Class::Accessor::Fast'; |
|
17
|
|
|
|
|
33
|
|
|
17
|
|
|
|
|
24151
|
|
35
|
|
|
|
|
|
|
|
36
|
|
|
|
|
|
|
__PACKAGE__->mk_accessors( |
37
|
|
|
|
|
|
|
qw( dir name readers field_infos |
38
|
|
|
|
|
|
|
freq_output prox_output term_infos_writer queue ) |
39
|
|
|
|
|
|
|
); |
40
|
|
|
|
|
|
|
|
41
|
|
|
|
|
|
|
=head2 new |
42
|
|
|
|
|
|
|
|
43
|
|
|
|
|
|
|
my $merger = Plucene::Index::SegmentMerger->new(); |
44
|
|
|
|
|
|
|
|
45
|
|
|
|
|
|
|
This will create a new Plucene::Index::SegmentMerger object. |
46
|
|
|
|
|
|
|
|
47
|
|
|
|
|
|
|
=cut |
48
|
|
|
|
|
|
|
|
49
|
52
|
|
|
52
|
1
|
461
|
sub new { shift->SUPER::new(@_, readers => []) } |
50
|
|
|
|
|
|
|
|
51
|
|
|
|
|
|
|
=head2 add |
52
|
|
|
|
|
|
|
|
53
|
|
|
|
|
|
|
$merger->add(Plucene::Index::SegmentReader $reader); |
54
|
|
|
|
|
|
|
|
55
|
|
|
|
|
|
|
=cut |
56
|
|
|
|
|
|
|
|
57
|
274
|
|
|
274
|
1
|
501
|
sub add { push @{ $_[0]->{readers} }, $_[1] } |
|
274
|
|
|
|
|
1286
|
|
58
|
|
|
|
|
|
|
|
59
|
|
|
|
|
|
|
=head2 segment_reader |
60
|
|
|
|
|
|
|
|
61
|
|
|
|
|
|
|
=cut |
62
|
|
|
|
|
|
|
|
63
|
0
|
|
|
0
|
1
|
0
|
sub segment_reader { $_[0]->{readers}->[ $_[1] ] } |
64
|
|
|
|
|
|
|
|
65
|
|
|
|
|
|
|
=head2 merge |
66
|
|
|
|
|
|
|
|
67
|
|
|
|
|
|
|
$merger->merge; |
68
|
|
|
|
|
|
|
|
69
|
|
|
|
|
|
|
Perform the merging. After the merge, there will be no readers left |
70
|
|
|
|
|
|
|
stored in the merger object. |
71
|
|
|
|
|
|
|
|
72
|
|
|
|
|
|
|
=cut |
73
|
|
|
|
|
|
|
|
74
|
|
|
|
|
|
|
sub merge { |
75
|
39
|
|
|
39
|
1
|
94
|
my $self = shift; |
76
|
39
|
|
|
|
|
195
|
$self->_merge_fields(); |
77
|
39
|
|
|
|
|
251
|
$self->_merge_terms(); |
78
|
39
|
|
|
|
|
197
|
$self->_merge_norms(); |
79
|
39
|
|
|
|
|
543
|
$self->{readers} = []; |
80
|
|
|
|
|
|
|
} |
81
|
|
|
|
|
|
|
|
82
|
|
|
|
|
|
|
sub _merge_fields { |
83
|
39
|
|
|
39
|
|
87
|
my $self = shift; |
84
|
39
|
|
|
|
|
262
|
$self->{field_infos} = Plucene::Index::FieldInfos->new(); |
85
|
39
|
|
|
|
|
91
|
$self->{field_infos}->add($_->field_infos) for @{ $self->{readers} }; |
|
39
|
|
|
|
|
248
|
|
86
|
39
|
|
|
|
|
361
|
$self->{field_infos}->write("$self->{dir}/$self->{segment}.fnm"); |
87
|
|
|
|
|
|
|
|
88
|
39
|
|
|
|
|
12709
|
my $fw = |
89
|
|
|
|
|
|
|
Plucene::Index::FieldsWriter->new($self->{dir}, $self->{segment}, |
90
|
|
|
|
|
|
|
$self->{field_infos}); |
91
|
39
|
|
|
|
|
124
|
for my $reader (@{ $self->{readers} }) { |
|
39
|
|
|
|
|
157
|
|
92
|
|
|
|
|
|
|
$fw->add_document($_) |
93
|
274
|
|
|
|
|
1097
|
foreach map $reader->document($_), grep !$reader->is_deleted($_), |
94
|
|
|
|
|
|
|
0 .. $reader->max_doc - 1; |
95
|
|
|
|
|
|
|
} |
96
|
|
|
|
|
|
|
} |
97
|
|
|
|
|
|
|
|
98
|
|
|
|
|
|
|
sub _merge_terms { |
99
|
39
|
|
|
39
|
|
96
|
my $self = shift; |
100
|
39
|
|
|
|
|
123
|
my $segment = $self->{segment}; |
101
|
39
|
|
|
|
|
6588
|
$self->{term_infos_writer} = |
102
|
|
|
|
|
|
|
Plucene::Index::TermInfosWriter->new($self->{dir}, $segment, |
103
|
|
|
|
|
|
|
$self->{field_infos}); |
104
|
|
|
|
|
|
|
|
105
|
39
|
|
|
|
|
98
|
my $base = 0; |
106
|
39
|
|
|
|
|
71
|
my @queue; |
107
|
39
|
|
|
|
|
81
|
for my $reader (@{ $self->{readers} }) { |
|
39
|
|
|
|
|
148
|
|
108
|
274
|
|
|
|
|
1043
|
my $smi = |
109
|
|
|
|
|
|
|
Plucene::Index::SegmentMergeInfo->new($base, $reader->terms, $reader); |
110
|
274
|
|
|
|
|
884
|
$base += $reader->num_docs; |
111
|
274
|
50
|
|
|
|
871
|
push @queue, $smi if $smi->next; |
112
|
|
|
|
|
|
|
} |
113
|
|
|
|
|
|
|
|
114
|
|
|
|
|
|
|
# store every term in every reader/tmp segment in %pool |
115
|
39
|
|
|
|
|
107
|
my %pool; |
116
|
|
|
|
|
|
|
{ |
117
|
39
|
|
|
|
|
71
|
my $index = 0; |
|
39
|
|
|
|
|
84
|
|
118
|
39
|
|
|
|
|
105
|
foreach my $smi (@queue) { |
119
|
274
|
|
|
|
|
771
|
while (my $term = $smi->term) { |
120
|
33118
|
|
|
|
|
173194
|
push @{ $pool{ $term->{field} }->{ $term->{text} } }, |
|
33118
|
|
|
|
|
176803
|
|
121
|
|
|
|
|
|
|
[ $term, $index, $smi->term_enum->term_info->clone ]; |
122
|
33118
|
|
|
|
|
103587
|
$smi->next; |
123
|
|
|
|
|
|
|
} |
124
|
274
|
|
|
|
|
1597
|
++$index; |
125
|
|
|
|
|
|
|
} |
126
|
|
|
|
|
|
|
} |
127
|
|
|
|
|
|
|
|
128
|
|
|
|
|
|
|
# Now, by sorting our hash, we deal with each term in order: |
129
|
39
|
|
|
|
|
107
|
my (@freqs, @proxs); |
130
|
39
|
|
|
|
|
249
|
foreach my $field (sort keys %pool) { |
131
|
63
|
|
|
|
|
124
|
foreach my $term (sort keys %{ $pool{$field} }) { |
|
63
|
|
|
|
|
16734
|
|
132
|
13195
|
|
|
|
|
15903
|
my @min = @{ $pool{$field}->{$term} }; |
|
13195
|
|
|
|
|
68890
|
|
133
|
13195
|
|
|
|
|
22603
|
my ($fp, $pp) = (scalar(@freqs), scalar(@proxs)); |
134
|
|
|
|
|
|
|
|
135
|
|
|
|
|
|
|
# inlined append_postings |
136
|
13195
|
|
|
|
|
16014
|
my ($df, $last_doc); |
137
|
13195
|
|
|
|
|
21305
|
foreach my $item (@min) { |
138
|
33118
|
|
|
|
|
65900
|
my $smi = $queue[ $item->[1] ]; |
139
|
33118
|
|
|
|
|
88706
|
my $postings = $smi->postings; |
140
|
33118
|
|
|
|
|
184500
|
my $base = $smi->base; |
141
|
33118
|
|
|
|
|
177761
|
my $docmap = $smi->doc_map; |
142
|
33118
|
|
|
|
|
182633
|
$postings->seek($item->[2]); |
143
|
33118
|
|
|
|
|
226492
|
while ($postings->next) { |
144
|
33262
|
50
|
0
|
|
|
100204
|
my $doc = $base + ( |
145
|
|
|
|
|
|
|
$docmap |
146
|
|
|
|
|
|
|
? ($docmap->[ $postings->doc ] || 0) |
147
|
|
|
|
|
|
|
: $postings->doc |
148
|
|
|
|
|
|
|
); |
149
|
33262
|
50
|
|
|
|
178271
|
die "Docs out of order ($doc < $last_doc)" if $doc < $last_doc; |
150
|
|
|
|
|
|
|
|
151
|
33262
|
|
|
|
|
52835
|
my $doc_code = ($doc - $last_doc) << 1; |
152
|
33262
|
|
|
|
|
42967
|
$last_doc = $doc; |
153
|
33262
|
|
|
|
|
81487
|
my $freq = $postings->freq; |
154
|
33262
|
100
|
|
|
|
160251
|
push @freqs, ($freq == 1) ? ($doc_code | 1) : ($doc_code, $freq); |
155
|
|
|
|
|
|
|
|
156
|
33262
|
|
|
|
|
39030
|
my $last_pos = 0; |
157
|
33262
|
|
|
|
|
61936
|
for (0 .. $freq - 1) { |
158
|
144133
|
|
|
|
|
349579
|
my $pos = $postings->next_position; |
159
|
144133
|
|
|
|
|
214196
|
push @proxs, $pos - $last_pos; |
160
|
144133
|
|
|
|
|
242373
|
$last_pos = $pos; |
161
|
|
|
|
|
|
|
} |
162
|
33262
|
|
|
|
|
110182
|
++$df; |
163
|
|
|
|
|
|
|
} |
164
|
|
|
|
|
|
|
} |
165
|
|
|
|
|
|
|
|
166
|
|
|
|
|
|
|
# inlined _merge_term_info |
167
|
13195
|
|
|
|
|
89343
|
$self->{term_infos_writer}->add( |
168
|
|
|
|
|
|
|
$min[0]->[0], |
169
|
|
|
|
|
|
|
Plucene::Index::TermInfo->new({ |
170
|
|
|
|
|
|
|
doc_freq => $df, |
171
|
|
|
|
|
|
|
freq_pointer => $fp, |
172
|
|
|
|
|
|
|
prox_pointer => $pp |
173
|
|
|
|
|
|
|
})); |
174
|
|
|
|
|
|
|
} # end foreach term |
175
|
|
|
|
|
|
|
} # end foreach field |
176
|
|
|
|
|
|
|
|
177
|
39
|
|
|
|
|
13127
|
write_file("$self->{dir}/$segment.frq" => pack('(w)*', @freqs)); |
178
|
39
|
|
|
|
|
80178
|
write_file("$self->{dir}/$segment.prx" => pack('(w)*', @proxs)); |
179
|
39
|
|
|
|
|
31094
|
$self->{term_infos_writer}->break_ref; |
180
|
|
|
|
|
|
|
} |
181
|
|
|
|
|
|
|
|
182
|
|
|
|
|
|
|
sub _merge_norms { |
183
|
39
|
|
|
39
|
|
97
|
my $self = shift; |
184
|
39
|
|
|
|
|
255
|
my @fields = $self->{field_infos}->fields; |
185
|
39
|
|
|
|
|
187
|
for (0 .. $#fields) { |
186
|
103
|
|
|
|
|
590
|
my $fi = $fields[$_]; |
187
|
103
|
100
|
|
|
|
3152
|
next unless $fi->is_indexed; |
188
|
64
|
|
|
|
|
1137
|
my $output = |
189
|
|
|
|
|
|
|
Plucene::Store::OutputStream->new(my $file = |
190
|
|
|
|
|
|
|
"$self->{dir}/$self->{segment}.f$_"); |
191
|
64
|
|
|
|
|
149
|
for my $reader (@{ $self->{readers} }) { |
|
64
|
|
|
|
|
271
|
|
192
|
398
|
|
|
|
|
10975
|
my $input = $reader->norm_stream($fi->name); |
193
|
398
|
|
|
|
|
1363
|
for (0 .. $reader->max_doc - 1) { |
194
|
552
|
50
|
|
|
|
1950
|
$output->print(chr($input ? $input->read_byte : 0)) |
|
|
50
|
|
|
|
|
|
195
|
|
|
|
|
|
|
unless $reader->is_deleted($_); |
196
|
|
|
|
|
|
|
} |
197
|
|
|
|
|
|
|
} |
198
|
|
|
|
|
|
|
} |
199
|
|
|
|
|
|
|
} |
200
|
|
|
|
|
|
|
|
201
|
|
|
|
|
|
|
1; |