| line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
|
1
|
20
|
|
|
20
|
|
422610
|
use v5.40; |
|
|
20
|
|
|
|
|
80
|
|
|
2
|
20
|
|
|
20
|
|
170
|
use feature 'class', 'try'; |
|
|
20
|
|
|
|
|
34
|
|
|
|
20
|
|
|
|
|
2536
|
|
|
3
|
20
|
|
|
20
|
|
110
|
no warnings 'experimental::class', 'experimental::try'; |
|
|
20
|
|
|
|
|
34
|
|
|
|
20
|
|
|
|
|
1096
|
|
|
4
|
20
|
|
|
20
|
|
945
|
use Net::BitTorrent::Emitter; |
|
|
20
|
|
|
|
|
30
|
|
|
|
20
|
|
|
|
|
2027
|
|
|
5
|
|
|
|
|
|
|
class Net::BitTorrent::Torrent v2.0.0 : isa(Net::BitTorrent::Emitter) { |
|
6
|
20
|
|
|
20
|
|
10415
|
use Net::BitTorrent::Protocol::BEP03::Bencode qw[bencode bdecode]; |
|
|
20
|
|
|
|
|
53
|
|
|
|
20
|
|
|
|
|
2026
|
|
|
7
|
20
|
|
|
20
|
|
10345
|
use Net::BitTorrent::Storage; |
|
|
20
|
|
|
|
|
71
|
|
|
|
20
|
|
|
|
|
1078
|
|
|
8
|
20
|
|
|
20
|
|
11274
|
use Net::BitTorrent::Tracker; |
|
|
20
|
|
|
|
|
74
|
|
|
|
20
|
|
|
|
|
1082
|
|
|
9
|
20
|
|
|
20
|
|
10469
|
use Acme::Bitfield; |
|
|
20
|
|
|
|
|
34958
|
|
|
|
20
|
|
|
|
|
977
|
|
|
10
|
20
|
|
|
20
|
|
10534
|
use Net::BitTorrent::Torrent::PiecePicker; |
|
|
20
|
|
|
|
|
97
|
|
|
|
20
|
|
|
|
|
1031
|
|
|
11
|
20
|
|
|
20
|
|
9780
|
use Net::BitTorrent::Tracker::WebSeed; |
|
|
20
|
|
|
|
|
64
|
|
|
|
20
|
|
|
|
|
954
|
|
|
12
|
20
|
|
|
20
|
|
144
|
use Digest::SHA qw[sha1 sha256]; |
|
|
20
|
|
|
|
|
37
|
|
|
|
20
|
|
|
|
|
1396
|
|
|
13
|
20
|
|
|
20
|
|
118
|
use Path::Tiny; |
|
|
20
|
|
|
|
|
38
|
|
|
|
20
|
|
|
|
|
1066
|
|
|
14
|
20
|
|
|
20
|
|
11193
|
use IO::Select; |
|
|
20
|
|
|
|
|
37837
|
|
|
|
20
|
|
|
|
|
1332
|
|
|
15
|
20
|
|
|
20
|
|
149
|
use IO::Socket::IP; |
|
|
20
|
|
|
|
|
40
|
|
|
|
20
|
|
|
|
|
249
|
|
|
16
|
20
|
|
|
20
|
|
12906
|
use Net::BitTorrent::Types qw[:state :pick]; |
|
|
20
|
|
|
|
|
49
|
|
|
|
20
|
|
|
|
|
65411
|
|
|
17
|
|
|
|
|
|
|
# |
|
18
|
|
|
|
|
|
|
field $path : param = undef; |
|
19
|
|
|
|
|
|
|
field $base_path : param; |
|
20
|
|
|
|
|
|
|
field $client : param : reader; |
|
21
|
|
|
|
|
|
|
field $metadata : reader = undef; |
|
22
|
|
|
|
|
|
|
field $infohash : param = undef; |
|
23
|
|
|
|
|
|
|
field $infohash_v1 : writer : param = undef; |
|
24
|
|
|
|
|
|
|
field $infohash_v2 : param = undef; |
|
25
|
|
|
|
|
|
|
field $initial_trackers : param = []; |
|
26
|
|
|
|
|
|
|
field $initial_peers : param = []; |
|
27
|
|
|
|
|
|
|
field $storage : reader; |
|
28
|
|
|
|
|
|
|
field $bitfield : reader; |
|
29
|
|
|
|
|
|
|
field $picker : reader; |
|
30
|
|
|
|
|
|
|
field $tracker_manager; |
|
31
|
|
|
|
|
|
|
field $features : reader; |
|
32
|
|
|
|
|
|
|
field $peer_id : param = undef; |
|
33
|
|
|
|
|
|
|
field %peers; # 'ip:port' => { ip => ..., port => ... } |
|
34
|
|
|
|
|
|
|
field %peer_objects; |
|
35
|
|
|
|
|
|
|
method peer_objects () { [ values %peer_objects ] } |
|
36
|
|
|
|
|
|
|
method peer_objects_hash () { \%peer_objects } |
|
37
|
|
|
|
|
|
|
field %peer_bitfields; # Peer object => Bitfield object |
|
38
|
|
|
|
|
|
|
method peer_bitfields () { \%peer_bitfields } |
|
39
|
|
|
|
|
|
|
field %blocks_pending; # piece_index => { offset => 1 } |
|
40
|
|
|
|
|
|
|
method blocks_pending () { \%blocks_pending } |
|
41
|
|
|
|
|
|
|
field %blocks_received; # piece_index => { offset => 1 } |
|
42
|
|
|
|
|
|
|
method blocks_received () { \%blocks_received } |
|
43
|
|
|
|
|
|
|
field %block_sources; # piece_index => { offset => Peer } |
|
44
|
|
|
|
|
|
|
field $is_private : reader; |
|
45
|
|
|
|
|
|
|
field $dht_nodes; |
|
46
|
|
|
|
|
|
|
field %test_data; # For simulation |
|
47
|
|
|
|
|
|
|
field %block_cache; # piece_index => { offset => data } |
|
48
|
|
|
|
|
|
|
field $bytes_downloaded = 0; |
|
49
|
|
|
|
|
|
|
field $bytes_uploaded = 0; |
|
50
|
|
|
|
|
|
|
field $bytes_left = 0; |
|
51
|
|
|
|
|
|
|
field @piece_priorities; |
|
52
|
|
|
|
|
|
|
field $picking_strategy = PICK_RAREST_FIRST; |
|
53
|
|
|
|
|
|
|
field $is_partial_seed : reader : writer(set_partial_seed) = 0; |
|
54
|
|
|
|
|
|
|
field $is_superseed : reader : writer(set_superseed) = 0; |
|
55
|
|
|
|
|
|
|
field %superseed_offers; # Peer object => piece_index |
|
56
|
|
|
|
|
|
|
field $debug : param = 0; |
|
57
|
|
|
|
|
|
|
# |
|
58
|
|
|
|
|
|
|
method get_superseed_piece ($peer) { |
|
59
|
|
|
|
|
|
|
return undef unless $is_superseed; |
|
60
|
|
|
|
|
|
|
if ( exists $superseed_offers{$peer} ) { |
|
61
|
|
|
|
|
|
|
my $offered = $superseed_offers{$peer}; |
|
62
|
|
|
|
|
|
|
my $shared = 0; |
|
63
|
|
|
|
|
|
|
for my $p ( values %peer_objects ) { |
|
64
|
|
|
|
|
|
|
next if $p == $peer; |
|
65
|
|
|
|
|
|
|
if ( $peer_bitfields{$p} && $peer_bitfields{$p}->get($offered) ) { |
|
66
|
|
|
|
|
|
|
$shared = 1; |
|
67
|
|
|
|
|
|
|
last; |
|
68
|
|
|
|
|
|
|
} |
|
69
|
|
|
|
|
|
|
} |
|
70
|
|
|
|
|
|
|
return $offered unless $shared; |
|
71
|
|
|
|
|
|
|
delete $superseed_offers{$peer}; |
|
72
|
|
|
|
|
|
|
} |
|
73
|
|
|
|
|
|
|
for ( my $i = 0; $i < $bitfield->size; $i++ ) { |
|
74
|
|
|
|
|
|
|
next unless $bitfield->get($i); |
|
75
|
|
|
|
|
|
|
my $avail = $picker->get_availability($i); |
|
76
|
|
|
|
|
|
|
if ( defined $avail && $avail == 0 ) { |
|
77
|
|
|
|
|
|
|
$superseed_offers{$peer} = $i; |
|
78
|
|
|
|
|
|
|
return $i; |
|
79
|
|
|
|
|
|
|
} |
|
80
|
|
|
|
|
|
|
} |
|
81
|
|
|
|
|
|
|
return undef; |
|
82
|
|
|
|
|
|
|
} |
|
83
|
|
|
|
|
|
|
field @webseeds; |
|
84
|
|
|
|
|
|
|
field $limit_up : reader; |
|
85
|
|
|
|
|
|
|
field $limit_down : reader; |
|
86
|
|
|
|
|
|
|
field $choke_timer = 0; |
|
87
|
|
|
|
|
|
|
field $optimistic_timer = 0; |
|
88
|
|
|
|
|
|
|
field $optimistic_unchoke_peer = undef; |
|
89
|
|
|
|
|
|
|
field $state : reader = STATE_STOPPED; # STOPPED, STARTING, RUNNING, PAUSED, METADATA |
|
90
|
|
|
|
|
|
|
|
|
91
|
|
|
|
|
|
|
# PEX (BEP 11) logic |
|
92
|
|
|
|
|
|
|
field $pex_timer = 0; |
|
93
|
|
|
|
|
|
|
field $tracker_timer = 0; |
|
94
|
|
|
|
|
|
|
field $dht_lookup_timer = 0; |
|
95
|
|
|
|
|
|
|
field %pex_added; # ip:port => { ip, port } |
|
96
|
|
|
|
|
|
|
field %pex_dropped; # ip:port => { ip, port } |
|
97
|
|
|
|
|
|
|
|
|
98
|
|
|
|
|
|
|
# Magnet/Metadata fetching |
|
99
|
|
|
|
|
|
|
field %metadata_pieces; |
|
100
|
|
|
|
|
|
|
field $metadata_size : reader = 0; |
|
101
|
|
|
|
|
|
|
|
|
102
|
|
|
|
|
|
|
method metadata_received_count () { |
|
103
|
|
|
|
|
|
|
my $total = 0; |
|
104
|
|
|
|
|
|
|
$total += length($_) for values %metadata_pieces; |
|
105
|
|
|
|
|
|
|
return $total; |
|
106
|
|
|
|
|
|
|
} |
|
107
|
|
|
|
|
|
|
|
|
108
|
|
|
|
|
|
|
# DHT Search Frontier |
|
109
|
|
|
|
|
|
|
field %dht_frontier; # node_id_hex => { id, ip, port, visited } |
|
110
|
|
|
|
|
|
|
|
|
111
|
|
|
|
|
|
|
# Swarm stats from DHT (BEP 33) |
|
112
|
|
|
|
|
|
|
field $dht_seeders : reader = 0; |
|
113
|
|
|
|
|
|
|
field $dht_leechers : reader = 0; |
|
114
|
|
|
|
|
|
|
method set_limit_up ($val) { $limit_up->set_limit($val) } |
|
115
|
|
|
|
|
|
|
method set_limit_down ($val) { $limit_down->set_limit($val) } |
|
116
|
|
|
|
|
|
|
|
|
117
|
|
|
|
|
|
|
method can_read ($amount) { |
|
118
|
|
|
|
|
|
|
my $allowed = $client->limit_down->consume($amount); |
|
119
|
|
|
|
|
|
|
$allowed = $limit_down->consume($allowed); |
|
120
|
|
|
|
|
|
|
return $allowed; |
|
121
|
|
|
|
|
|
|
} |
|
122
|
|
|
|
|
|
|
|
|
123
|
|
|
|
|
|
|
method can_write ($amount) { |
|
124
|
|
|
|
|
|
|
my $allowed = $client->limit_up->consume($amount); |
|
125
|
|
|
|
|
|
|
$allowed = $limit_up->consume($allowed); |
|
126
|
|
|
|
|
|
|
return $allowed; |
|
127
|
|
|
|
|
|
|
} |
|
128
|
|
|
|
|
|
|
|
|
129
|
|
|
|
|
|
|
method is_finished () { |
|
130
|
|
|
|
|
|
|
return 0 unless $self->is_metadata_complete; |
|
131
|
|
|
|
|
|
|
return 0 if $state == STATE_METADATA; |
|
132
|
|
|
|
|
|
|
return $bytes_left == 0; |
|
133
|
|
|
|
|
|
|
} |
|
134
|
|
|
|
|
|
|
|
|
135
|
|
|
|
|
|
|
method is_seed () { |
|
136
|
|
|
|
|
|
|
return $bitfield && $bitfield->count == $bitfield->size; |
|
137
|
|
|
|
|
|
|
} |
|
138
|
|
|
|
|
|
|
|
|
139
|
|
|
|
|
|
|
method is_metadata_complete () { |
|
140
|
|
|
|
|
|
|
return defined $metadata ? 1 : 0; |
|
141
|
|
|
|
|
|
|
} |
|
142
|
|
|
|
|
|
|
|
|
143
|
|
|
|
|
|
|
method is_running () { |
|
144
|
|
|
|
|
|
|
return $state == STATE_RUNNING || $state == STATE_STARTING || $state == STATE_METADATA; |
|
145
|
|
|
|
|
|
|
} |
|
146
|
|
|
|
|
|
|
|
|
147
|
|
|
|
|
|
|
method name () { |
|
148
|
|
|
|
|
|
|
return $metadata->{info}{name} if $metadata && $metadata->{info} && $metadata->{info}{name}; |
|
149
|
|
|
|
|
|
|
return unpack( 'H*', $self->infohash_v1 // $self->infohash_v2 // '' ); |
|
150
|
|
|
|
|
|
|
} |
|
151
|
|
|
|
|
|
|
|
|
152
|
|
|
|
|
|
|
method progress () { |
|
153
|
|
|
|
|
|
|
return 0 unless $self->is_metadata_complete; |
|
154
|
|
|
|
|
|
|
return 0 if $state == STATE_METADATA; |
|
155
|
|
|
|
|
|
|
my $total = $self->_calculate_total_size(); |
|
156
|
|
|
|
|
|
|
return 100 if $total == 0; |
|
157
|
|
|
|
|
|
|
return ( ( $total - $bytes_left ) / $total ) * 100; |
|
158
|
|
|
|
|
|
|
} |
|
159
|
|
|
|
|
|
|
|
|
160
|
|
|
|
|
|
|
method start () { |
|
161
|
|
|
|
|
|
|
return if $state != STATE_STOPPED; |
|
162
|
|
|
|
|
|
|
$state = STATE_STARTING; |
|
163
|
|
|
|
|
|
|
$self->announce('started'); |
|
164
|
|
|
|
|
|
|
$self->start_dht_lookup() unless $is_private; |
|
165
|
|
|
|
|
|
|
|
|
166
|
|
|
|
|
|
|
# BEP 33: Scraping |
|
167
|
|
|
|
|
|
|
if ( !$is_private && $client->dht ) { |
|
168
|
|
|
|
|
|
|
my $weak_self = $self; |
|
169
|
|
|
|
|
|
|
builtin::weaken($weak_self); |
|
170
|
0
|
|
|
|
|
|
$client->dht_scrape( |
|
171
|
|
|
|
|
|
|
$infohash_v2 || $infohash_v1, |
|
172
|
0
|
|
|
0
|
|
|
sub ( $emitter, $res ) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
173
|
0
|
0
|
|
|
|
|
$weak_self->handle_dht_scrape($res) if $weak_self; |
|
174
|
|
|
|
|
|
|
} |
|
175
|
|
|
|
|
|
|
); |
|
176
|
|
|
|
|
|
|
} |
|
177
|
|
|
|
|
|
|
if ( !$metadata ) { |
|
178
|
|
|
|
|
|
|
$state = STATE_METADATA; |
|
179
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Torrent starting in METADATA mode\n", level => 'debug' ) if $debug; |
|
180
|
|
|
|
|
|
|
} |
|
181
|
|
|
|
|
|
|
else { |
|
182
|
|
|
|
|
|
|
$state = STATE_RUNNING; |
|
183
|
|
|
|
|
|
|
$self->_emit('started'); |
|
184
|
|
|
|
|
|
|
} |
|
185
|
|
|
|
|
|
|
} |
|
186
|
|
|
|
|
|
|
|
|
187
|
|
|
|
|
|
|
method stop () { |
|
188
|
|
|
|
|
|
|
return if $state == STATE_STOPPED; |
|
189
|
|
|
|
|
|
|
$state = STATE_STOPPED; |
|
190
|
|
|
|
|
|
|
$storage->explicit_flush() if $storage; |
|
191
|
|
|
|
|
|
|
$self->announce('stopped'); |
|
192
|
|
|
|
|
|
|
for my $peer ( grep {defined} values %peer_objects ) { |
|
193
|
|
|
|
|
|
|
$peer->disconnected(); |
|
194
|
|
|
|
|
|
|
} |
|
195
|
|
|
|
|
|
|
%peer_objects = (); |
|
196
|
|
|
|
|
|
|
%peers = (); |
|
197
|
|
|
|
|
|
|
$self->_emit('stopped'); |
|
198
|
|
|
|
|
|
|
} |
|
199
|
|
|
|
|
|
|
|
|
200
|
|
|
|
|
|
|
method pause () { |
|
201
|
|
|
|
|
|
|
return if $state != STATE_RUNNING && $state != STATE_METADATA; |
|
202
|
|
|
|
|
|
|
$state = STATE_PAUSED; |
|
203
|
|
|
|
|
|
|
$self->_emit('paused'); |
|
204
|
|
|
|
|
|
|
} |
|
205
|
|
|
|
|
|
|
|
|
206
|
|
|
|
|
|
|
method resume () { |
|
207
|
|
|
|
|
|
|
return if $state != STATE_PAUSED; |
|
208
|
|
|
|
|
|
|
$state = STATE_RUNNING; |
|
209
|
|
|
|
|
|
|
$self->_emit('resumed'); |
|
210
|
|
|
|
|
|
|
} |
|
211
|
|
|
|
|
|
|
ADJUST { |
|
212
|
|
|
|
|
|
|
$self->set_parent_emitter($client); |
|
213
|
|
|
|
|
|
|
$self->_emit( |
|
214
|
|
|
|
|
|
|
log => " [DEBUG] Torrent::ADJUST path=" . |
|
215
|
|
|
|
|
|
|
( $path // 'undef' ) . " ih=" . |
|
216
|
|
|
|
|
|
|
( $infohash // 'undef' ) . " v1=" . |
|
217
|
|
|
|
|
|
|
( $infohash_v1 // 'undef' ) . " v2=" . |
|
218
|
|
|
|
|
|
|
( $infohash_v2 // 'undef' ) . "\n", |
|
219
|
|
|
|
|
|
|
level => 'debug' |
|
220
|
|
|
|
|
|
|
) if $debug; |
|
221
|
|
|
|
|
|
|
builtin::weaken($client) if defined $client; |
|
222
|
|
|
|
|
|
|
$features = { %{ $client->features // {} } }; |
|
223
|
|
|
|
|
|
|
$peer_id //= $client->node_id; |
|
224
|
20
|
|
|
20
|
|
11112
|
use Algorithm::RateLimiter::TokenBucket; |
|
|
20
|
|
|
|
|
18477
|
|
|
|
20
|
|
|
|
|
339793
|
|
|
225
|
|
|
|
|
|
|
$limit_up = Algorithm::RateLimiter::TokenBucket->new( limit => 0 ); |
|
226
|
|
|
|
|
|
|
$limit_down = Algorithm::RateLimiter::TokenBucket->new( limit => 0 ); |
|
227
|
|
|
|
|
|
|
|
|
228
|
|
|
|
|
|
|
if ($path) { |
|
229
|
|
|
|
|
|
|
my $data = path($path)->slurp_raw; |
|
230
|
|
|
|
|
|
|
$metadata = bdecode($data); |
|
231
|
|
|
|
|
|
|
$self->_emit( log => 'Missing info dictionary', level => 'fatal' ) unless ref $metadata eq 'HASH' && ref $metadata->{info} eq 'HASH'; |
|
232
|
|
|
|
|
|
|
$self->_init_from_metadata(); |
|
233
|
|
|
|
|
|
|
} |
|
234
|
|
|
|
|
|
|
elsif ( $infohash || $infohash_v1 || $infohash_v2 ) { |
|
235
|
|
|
|
|
|
|
if ($infohash) { |
|
236
|
|
|
|
|
|
|
if ( length($infohash) == 20 ) { |
|
237
|
|
|
|
|
|
|
$infohash_v1 = $infohash; |
|
238
|
|
|
|
|
|
|
} |
|
239
|
|
|
|
|
|
|
elsif ( length($infohash) == 32 ) { |
|
240
|
|
|
|
|
|
|
$infohash_v2 = $infohash; |
|
241
|
|
|
|
|
|
|
} |
|
242
|
|
|
|
|
|
|
else { |
|
243
|
|
|
|
|
|
|
$self->_emit( log => 'Invalid infohash length', level => 'fatal' ); |
|
244
|
|
|
|
|
|
|
} |
|
245
|
|
|
|
|
|
|
} |
|
246
|
|
|
|
|
|
|
my @tiers = map { [$_] } @$initial_trackers; |
|
247
|
|
|
|
|
|
|
$tracker_manager = Net::BitTorrent::Tracker->new( tiers_raw => \@tiers, debug => $debug ); |
|
248
|
|
|
|
|
|
|
for my $p_str (@$initial_peers) { |
|
249
|
|
|
|
|
|
|
if ( $p_str =~ /^([^:]+):(\d+)$/ ) { |
|
250
|
|
|
|
|
|
|
$self->add_peer( { ip => $1, port => $2 } ); |
|
251
|
|
|
|
|
|
|
} |
|
252
|
|
|
|
|
|
|
} |
|
253
|
|
|
|
|
|
|
} |
|
254
|
|
|
|
|
|
|
else { |
|
255
|
|
|
|
|
|
|
$self->_emit( log => 'Either path or infohash required', level => 'fatal' ); |
|
256
|
|
|
|
|
|
|
} |
|
257
|
|
|
|
|
|
|
} |
|
258
|
|
|
|
|
|
|
|
|
259
|
|
|
|
|
|
|
method _validate_file_tree ($tree) { |
|
260
|
|
|
|
|
|
|
if ( ref $tree ne 'HASH' ) { |
|
261
|
|
|
|
|
|
|
$self->_emit( log => 'Invalid file tree', level => 'fatal' ); |
|
262
|
|
|
|
|
|
|
return; |
|
263
|
|
|
|
|
|
|
} |
|
264
|
|
|
|
|
|
|
for my $name ( keys %$tree ) { |
|
265
|
|
|
|
|
|
|
if ( $name eq '' || $name eq '.' || $name eq '..' || $name =~ /[\\\/]/ ) { |
|
266
|
|
|
|
|
|
|
$self->_emit( log => 'Invalid path element', level => 'fatal' ); |
|
267
|
|
|
|
|
|
|
return; |
|
268
|
|
|
|
|
|
|
} |
|
269
|
|
|
|
|
|
|
my $node = $tree->{$name}; |
|
270
|
|
|
|
|
|
|
if ( exists $node->{''} ) { |
|
271
|
|
|
|
|
|
|
if ( ref $node->{''} ne 'HASH' ) { |
|
272
|
|
|
|
|
|
|
$self->_emit( log => 'Invalid file metadata', level => 'fatal' ); |
|
273
|
|
|
|
|
|
|
return; |
|
274
|
|
|
|
|
|
|
} |
|
275
|
|
|
|
|
|
|
if ( ( $node->{''}{length} // -1 ) < 0 ) { |
|
276
|
|
|
|
|
|
|
$self->_emit( log => 'Invalid file length', level => 'fatal' ); |
|
277
|
|
|
|
|
|
|
return; |
|
278
|
|
|
|
|
|
|
} |
|
279
|
|
|
|
|
|
|
} |
|
280
|
|
|
|
|
|
|
else { |
|
281
|
|
|
|
|
|
|
$self->_validate_file_tree($node); |
|
282
|
|
|
|
|
|
|
} |
|
283
|
|
|
|
|
|
|
} |
|
284
|
|
|
|
|
|
|
} |
|
285
|
|
|
|
|
|
|
|
|
286
|
|
|
|
|
|
|
method _init_from_metadata () { |
|
287
|
|
|
|
|
|
|
if ( !$metadata || ref $metadata->{info} ne 'HASH' ) { |
|
288
|
|
|
|
|
|
|
$self->_emit( log => 'Missing info dictionary', level => 'fatal' ); |
|
289
|
|
|
|
|
|
|
return; |
|
290
|
|
|
|
|
|
|
} |
|
291
|
|
|
|
|
|
|
my $info = $metadata->{info}; |
|
292
|
|
|
|
|
|
|
if ( ( $info->{'piece length'} // 0 ) <= 0 ) { |
|
293
|
|
|
|
|
|
|
$self->_emit( log => 'Invalid piece length', level => 'fatal' ); |
|
294
|
|
|
|
|
|
|
return; |
|
295
|
|
|
|
|
|
|
} |
|
296
|
|
|
|
|
|
|
if ( !defined $info->{name} || !length $info->{name} ) { |
|
297
|
|
|
|
|
|
|
$self->_emit( log => 'Missing name', level => 'fatal' ); |
|
298
|
|
|
|
|
|
|
return; |
|
299
|
|
|
|
|
|
|
} |
|
300
|
|
|
|
|
|
|
if ( !$info->{pieces} && !$info->{'file tree'} ) { |
|
301
|
|
|
|
|
|
|
$self->_emit( log => 'Torrent must have either \'pieces\' (v1) or \'file tree\' (v2)', level => 'fatal' ); |
|
302
|
|
|
|
|
|
|
return; |
|
303
|
|
|
|
|
|
|
} |
|
304
|
|
|
|
|
|
|
|
|
305
|
|
|
|
|
|
|
# Validate file sizes and paths |
|
306
|
|
|
|
|
|
|
if ( $info->{'file tree'} ) { |
|
307
|
|
|
|
|
|
|
$self->_validate_file_tree( $info->{'file tree'} ); |
|
308
|
|
|
|
|
|
|
} |
|
309
|
|
|
|
|
|
|
elsif ( $info->{files} ) { |
|
310
|
|
|
|
|
|
|
if ( ref $info->{files} ne 'ARRAY' || !@{ $info->{files} } ) { |
|
311
|
|
|
|
|
|
|
$self->_emit( log => 'Invalid files list', level => 'fatal' ); |
|
312
|
|
|
|
|
|
|
return; |
|
313
|
|
|
|
|
|
|
} |
|
314
|
|
|
|
|
|
|
for my $f ( @{ $info->{files} } ) { |
|
315
|
|
|
|
|
|
|
if ( ( $f->{length} // -1 ) < 0 ) { |
|
316
|
|
|
|
|
|
|
$self->_emit( log => 'Invalid file length', level => 'fatal' ); |
|
317
|
|
|
|
|
|
|
return; |
|
318
|
|
|
|
|
|
|
} |
|
319
|
|
|
|
|
|
|
if ( ref $f->{path} ne 'ARRAY' || !@{ $f->{path} } ) { |
|
320
|
|
|
|
|
|
|
$self->_emit( log => 'Missing path', level => 'fatal' ); |
|
321
|
|
|
|
|
|
|
return; |
|
322
|
|
|
|
|
|
|
} |
|
323
|
|
|
|
|
|
|
for my $p ( @{ $f->{path} } ) { |
|
324
|
|
|
|
|
|
|
if ( $p eq '' || $p eq '.' || $p eq '..' || $p =~ /[\\\/]/ ) { |
|
325
|
|
|
|
|
|
|
$self->_emit( log => 'Invalid path element', level => 'fatal' ); |
|
326
|
|
|
|
|
|
|
return; |
|
327
|
|
|
|
|
|
|
} |
|
328
|
|
|
|
|
|
|
} |
|
329
|
|
|
|
|
|
|
} |
|
330
|
|
|
|
|
|
|
} |
|
331
|
|
|
|
|
|
|
else { |
|
332
|
|
|
|
|
|
|
# Single-file v1 or v2 (v2 length is inside 'file tree', handled above) |
|
333
|
|
|
|
|
|
|
if ( !exists $info->{length} && $info->{pieces} && !$info->{'file tree'} ) { |
|
334
|
|
|
|
|
|
|
|
|
335
|
|
|
|
|
|
|
# Some minimal tests or old v1 might omit length if it's empty or inferred? |
|
336
|
|
|
|
|
|
|
# Actually, BEP 03 says length is required for single-file. |
|
337
|
|
|
|
|
|
|
# But some tests use minimal dictionaries. |
|
338
|
|
|
|
|
|
|
# Let's be lenient for v1 minimal tests if pieces is present. |
|
339
|
|
|
|
|
|
|
# (Optional: we could default to 0) |
|
340
|
|
|
|
|
|
|
} |
|
341
|
|
|
|
|
|
|
else { |
|
342
|
|
|
|
|
|
|
if ( ( $info->{length} // -1 ) < 0 ) { |
|
343
|
|
|
|
|
|
|
$self->_emit( log => 'Invalid file length', level => 'fatal' ); |
|
344
|
|
|
|
|
|
|
return; |
|
345
|
|
|
|
|
|
|
} |
|
346
|
|
|
|
|
|
|
} |
|
347
|
|
|
|
|
|
|
} |
|
348
|
|
|
|
|
|
|
my $info_encoded = bencode($info); |
|
349
|
|
|
|
|
|
|
$infohash_v1 = sha1($info_encoded) if $info->{pieces}; |
|
350
|
|
|
|
|
|
|
$infohash_v2 = sha256($info_encoded) if $info->{'file tree'}; |
|
351
|
|
|
|
|
|
|
$is_private = $info->{private} // 0; |
|
352
|
|
|
|
|
|
|
$dht_nodes = $metadata->{nodes} // []; |
|
353
|
|
|
|
|
|
|
my $tree = $self->file_tree; |
|
354
|
|
|
|
|
|
|
$storage = Net::BitTorrent::Storage->new( |
|
355
|
|
|
|
|
|
|
base_path => $base_path, |
|
356
|
|
|
|
|
|
|
file_tree => $tree, |
|
357
|
|
|
|
|
|
|
piece_size => $info->{'piece length'}, |
|
358
|
|
|
|
|
|
|
pieces_v1 => $info->{pieces} |
|
359
|
|
|
|
|
|
|
); |
|
360
|
|
|
|
|
|
|
|
|
361
|
|
|
|
|
|
|
if ( my $layers = $metadata->{'piece layers'} ) { |
|
362
|
|
|
|
|
|
|
for my $root ( keys %$layers ) { |
|
363
|
|
|
|
|
|
|
$storage->set_piece_layer( $root, $layers->{$root} ); |
|
364
|
|
|
|
|
|
|
} |
|
365
|
|
|
|
|
|
|
} |
|
366
|
|
|
|
|
|
|
my $num_pieces = int( ( length( $info->{pieces} // '' ) / 20 ) ); |
|
367
|
|
|
|
|
|
|
if ( !$num_pieces && $info->{'file tree'} ) { |
|
368
|
|
|
|
|
|
|
my $total_size = $self->_calculate_total_size(); |
|
369
|
|
|
|
|
|
|
$num_pieces = int( ( $total_size + $info->{'piece length'} - 1 ) / $info->{'piece length'} ); |
|
370
|
|
|
|
|
|
|
} |
|
371
|
|
|
|
|
|
|
$bitfield = Acme::Bitfield->new( size => $num_pieces ); |
|
372
|
|
|
|
|
|
|
$self->_init_picker(); |
|
373
|
|
|
|
|
|
|
my @tiers; |
|
374
|
|
|
|
|
|
|
if ( $metadata->{'announce-list'} ) { |
|
375
|
|
|
|
|
|
|
@tiers = @{ $metadata->{'announce-list'} }; |
|
376
|
|
|
|
|
|
|
} |
|
377
|
|
|
|
|
|
|
elsif ( $metadata->{announce} ) { |
|
378
|
|
|
|
|
|
|
@tiers = ( [ $metadata->{announce} ] ); |
|
379
|
|
|
|
|
|
|
} |
|
380
|
|
|
|
|
|
|
$tracker_manager = Net::BitTorrent::Tracker->new( tiers_raw => \@tiers, debug => $debug ); |
|
381
|
|
|
|
|
|
|
my $urls = $metadata->{'url-list'} // []; |
|
382
|
|
|
|
|
|
|
$urls = [$urls] unless ref $urls eq 'ARRAY'; |
|
383
|
|
|
|
|
|
|
push @webseeds, Net::BitTorrent::Tracker::WebSeed->new( url => $_ ) for @$urls; |
|
384
|
|
|
|
|
|
|
my $total_size = $self->_calculate_total_size(); |
|
385
|
|
|
|
|
|
|
$bytes_left = $total_size; |
|
386
|
|
|
|
|
|
|
|
|
387
|
|
|
|
|
|
|
#~ warn " [DEBUG] Swarm initialized from metadata: $total_size bytes\n"; |
|
388
|
|
|
|
|
|
|
} |
|
389
|
|
|
|
|
|
|
|
|
390
|
|
|
|
|
|
|
method _init_picker () { |
|
391
|
|
|
|
|
|
|
if ( !@piece_priorities && $bitfield ) { |
|
392
|
|
|
|
|
|
|
@piece_priorities = (1) x $bitfield->size; |
|
393
|
|
|
|
|
|
|
} |
|
394
|
|
|
|
|
|
|
$picker = Net::BitTorrent::Torrent::PiecePicker->new( |
|
395
|
|
|
|
|
|
|
bitfield => $bitfield, |
|
396
|
|
|
|
|
|
|
piece_priorities => \@piece_priorities, |
|
397
|
|
|
|
|
|
|
strategy => $picking_strategy, |
|
398
|
|
|
|
|
|
|
); |
|
399
|
|
|
|
|
|
|
} |
|
400
|
|
|
|
|
|
|
|
|
401
|
|
|
|
|
|
|
method set_piece_priority ( $index, $priority ) { |
|
402
|
|
|
|
|
|
|
$picker->set_priority( $index, $priority ) if $picker; |
|
403
|
|
|
|
|
|
|
} |
|
404
|
|
|
|
|
|
|
|
|
405
|
|
|
|
|
|
|
method set_picking_strategy ($strategy) { |
|
406
|
|
|
|
|
|
|
$picker->set_strategy($strategy) if $picker; |
|
407
|
|
|
|
|
|
|
} |
|
408
|
|
|
|
|
|
|
|
|
409
|
|
|
|
|
|
|
method get_allowed_fast_set ($ip) { |
|
410
|
|
|
|
|
|
|
return [] unless $bitfield && $bitfield->size; |
|
411
|
|
|
|
|
|
|
my @set; |
|
412
|
|
|
|
|
|
|
for ( my $i = 0; $i < 5 && $i < $bitfield->size; $i++ ) { |
|
413
|
|
|
|
|
|
|
push @set, $i; |
|
414
|
|
|
|
|
|
|
} |
|
415
|
|
|
|
|
|
|
return \@set; |
|
416
|
|
|
|
|
|
|
} |
|
417
|
|
|
|
|
|
|
|
|
418
|
|
|
|
|
|
|
method suggest_piece ($peer) { |
|
419
|
|
|
|
|
|
|
return undef unless $bitfield && $bitfield->count > 0; |
|
420
|
|
|
|
|
|
|
for ( my $i = 0; $i < $bitfield->size; $i++ ) { |
|
421
|
|
|
|
|
|
|
return $i if $bitfield->get($i); |
|
422
|
|
|
|
|
|
|
} |
|
423
|
|
|
|
|
|
|
return undef; |
|
424
|
|
|
|
|
|
|
} |
|
425
|
|
|
|
|
|
|
|
|
426
|
|
|
|
|
|
|
method handle_dht_scrape ($res) { |
|
427
|
|
|
|
|
|
|
$dht_seeders = $res->{sn} if exists $res->{sn}; |
|
428
|
|
|
|
|
|
|
$dht_leechers = $res->{ln} if exists $res->{ln}; |
|
429
|
|
|
|
|
|
|
$self->_emit( |
|
430
|
|
|
|
|
|
|
log => ' [DHT] Scrape results for ' . |
|
431
|
|
|
|
|
|
|
( $metadata ? $metadata->{info}{name} : 'unknown' ) . |
|
432
|
|
|
|
|
|
|
": $dht_seeders seeds, $dht_leechers leechers\n", |
|
433
|
|
|
|
|
|
|
level => 'info' |
|
434
|
|
|
|
|
|
|
) if $debug; |
|
435
|
|
|
|
|
|
|
} |
|
436
|
|
|
|
|
|
|
|
|
437
|
|
|
|
|
|
|
method tick ( $delta = 0.1 ) { |
|
438
|
|
|
|
|
|
|
return if $state == STATE_STOPPED || $state == STATE_PAUSED; |
|
439
|
|
|
|
|
|
|
$limit_up->tick($delta); |
|
440
|
|
|
|
|
|
|
$limit_down->tick($delta); |
|
441
|
|
|
|
|
|
|
$storage->tick($delta) if $storage; |
|
442
|
|
|
|
|
|
|
$tracker_manager->tick($delta) if $tracker_manager; |
|
443
|
|
|
|
|
|
|
|
|
444
|
|
|
|
|
|
|
# Attempt to connect to discovered peers if we need more |
|
445
|
|
|
|
|
|
|
$self->_attempt_connections() if keys %peer_objects < 50; |
|
446
|
|
|
|
|
|
|
for my $peer ( values %peer_objects ) { |
|
447
|
|
|
|
|
|
|
$peer->tick(); |
|
448
|
|
|
|
|
|
|
if ( $state == STATE_METADATA ) { |
|
449
|
|
|
|
|
|
|
$self->_request_metadata($peer); |
|
450
|
|
|
|
|
|
|
} |
|
451
|
|
|
|
|
|
|
elsif ( $state == STATE_RUNNING ) { |
|
452
|
|
|
|
|
|
|
|
|
453
|
|
|
|
|
|
|
# Update interest |
|
454
|
|
|
|
|
|
|
my $is_interesting = $picker->is_interesting($peer); |
|
455
|
|
|
|
|
|
|
if ( $is_interesting && !$peer->am_interested ) { |
|
456
|
|
|
|
|
|
|
$peer->interested(); |
|
457
|
|
|
|
|
|
|
} |
|
458
|
|
|
|
|
|
|
elsif ( !$is_interesting && $peer->am_interested ) { |
|
459
|
|
|
|
|
|
|
$peer->not_interested(); |
|
460
|
|
|
|
|
|
|
} |
|
461
|
|
|
|
|
|
|
|
|
462
|
|
|
|
|
|
|
# Request pieces if not choked |
|
463
|
|
|
|
|
|
|
if ( !$peer->peer_choking && $peer->am_interested ) { |
|
464
|
|
|
|
|
|
|
$self->_request_pieces($peer); |
|
465
|
|
|
|
|
|
|
} |
|
466
|
|
|
|
|
|
|
} |
|
467
|
|
|
|
|
|
|
} |
|
468
|
|
|
|
|
|
|
$choke_timer += $delta; |
|
469
|
|
|
|
|
|
|
if ( $choke_timer >= 10 ) { |
|
470
|
|
|
|
|
|
|
$self->_evaluate_choking(); |
|
471
|
|
|
|
|
|
|
$self->_cleanup_connections(); |
|
472
|
|
|
|
|
|
|
$self->_emit( 'status_update', |
|
473
|
|
|
|
|
|
|
{ downloaded => $bytes_downloaded, uploaded => $bytes_uploaded, left => $bytes_left, peers => scalar keys %peer_objects, } ); |
|
474
|
|
|
|
|
|
|
$choke_timer = 0; |
|
475
|
|
|
|
|
|
|
} |
|
476
|
|
|
|
|
|
|
$pex_timer += $delta; |
|
477
|
|
|
|
|
|
|
if ( $pex_timer >= 60 ) { |
|
478
|
|
|
|
|
|
|
$self->_broadcast_pex(); |
|
479
|
|
|
|
|
|
|
$pex_timer = 0; |
|
480
|
|
|
|
|
|
|
} |
|
481
|
|
|
|
|
|
|
$tracker_timer += $delta; |
|
482
|
|
|
|
|
|
|
if ( $tracker_timer >= 60 ) { # Every 60s check if trackers want an announce |
|
483
|
|
|
|
|
|
|
$self->announce(); |
|
484
|
|
|
|
|
|
|
$tracker_timer = 0; |
|
485
|
|
|
|
|
|
|
} |
|
486
|
|
|
|
|
|
|
$dht_lookup_timer += $delta; |
|
487
|
|
|
|
|
|
|
|
|
488
|
|
|
|
|
|
|
# Accelerate DHT lookups during startup/metadata phase or if starved for peers |
|
489
|
|
|
|
|
|
|
my $dht_interval = ( $state == STATE_METADATA || keys %peer_objects < 5 ) ? 2 : 120; |
|
490
|
|
|
|
|
|
|
if ( $dht_lookup_timer >= $dht_interval ) { |
|
491
|
|
|
|
|
|
|
$self->_update_dht_search(); |
|
492
|
|
|
|
|
|
|
$dht_lookup_timer = 0; |
|
493
|
|
|
|
|
|
|
} |
|
494
|
|
|
|
|
|
|
} |
|
495
|
|
|
|
|
|
|
field %attempted_connections; # ip:port => timestamp |
|
496
|
|
|
|
|
|
|
|
|
497
|
|
|
|
|
|
|
method _attempt_connections () { |
|
498
|
|
|
|
|
|
|
state $last_attempt = 0; |
|
499
|
|
|
|
|
|
|
return if time() - $last_attempt < 5; # Don't spam connection attempts |
|
500
|
|
|
|
|
|
|
$last_attempt = time(); |
|
501
|
|
|
|
|
|
|
my $peers = $self->discovered_peers; |
|
502
|
|
|
|
|
|
|
my $count = 0; |
|
503
|
|
|
|
|
|
|
for my $p (@$peers) { |
|
504
|
|
|
|
|
|
|
my $key = "$p->{ip}:$p->{port}"; |
|
505
|
|
|
|
|
|
|
next if $peer_objects{$key}; |
|
506
|
|
|
|
|
|
|
next if $attempted_connections{$key} && ( time() - $attempted_connections{$key} < 60 ); |
|
507
|
|
|
|
|
|
|
|
|
508
|
|
|
|
|
|
|
# Try to connect |
|
509
|
|
|
|
|
|
|
$attempted_connections{$key} = time(); |
|
510
|
|
|
|
|
|
|
my $ih = $infohash_v2 || $infohash_v1; |
|
511
|
|
|
|
|
|
|
$client->connect_to_peer( $p->{ip}, $p->{port}, $ih ); |
|
512
|
|
|
|
|
|
|
$count++; |
|
513
|
|
|
|
|
|
|
last if $count >= 5; # Limit concurrent attempts |
|
514
|
|
|
|
|
|
|
} |
|
515
|
|
|
|
|
|
|
} |
|
516
|
|
|
|
|
|
|
|
|
517
|
|
|
|
|
|
|
method _evaluate_choking () { |
|
518
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Evaluating choking for " . scalar( keys %peer_objects ) . " peers\n", level => 'debug' ) if $debug; |
|
519
|
|
|
|
|
|
|
my @interested = grep { $_->peer_interested } values %peer_objects; |
|
520
|
|
|
|
|
|
|
|
|
521
|
|
|
|
|
|
|
# Even if nobody is interested in US, we should still unchoke some if we want pieces? |
|
522
|
|
|
|
|
|
|
# No, unchoking is for UPLOAD. For DOWNLOAD, we need to send INTERESTED. |
|
523
|
|
|
|
|
|
|
my @sorted; |
|
524
|
|
|
|
|
|
|
if ( $bitfield && $bitfield->count < $bitfield->size ) { |
|
525
|
|
|
|
|
|
|
@sorted = sort { $b->rate_down <=> $a->rate_down } @interested; |
|
526
|
|
|
|
|
|
|
} |
|
527
|
|
|
|
|
|
|
else { |
|
528
|
|
|
|
|
|
|
@sorted = sort { $b->rate_up <=> $a->rate_up } @interested; |
|
529
|
|
|
|
|
|
|
} |
|
530
|
|
|
|
|
|
|
my $max_unchoked = 4; |
|
531
|
|
|
|
|
|
|
my %to_unchoke; |
|
532
|
|
|
|
|
|
|
for ( my $i = 0; $i < $max_unchoked && $i < @sorted; $i++ ) { |
|
533
|
|
|
|
|
|
|
$to_unchoke{ $sorted[$i] } = 1; |
|
534
|
|
|
|
|
|
|
} |
|
535
|
|
|
|
|
|
|
$optimistic_timer += 10; |
|
536
|
|
|
|
|
|
|
if ( $optimistic_timer >= 30 || !$optimistic_unchoke_peer ) { |
|
537
|
|
|
|
|
|
|
my @candidates = grep { !$to_unchoke{$_} } @interested; |
|
538
|
|
|
|
|
|
|
if (@candidates) { |
|
539
|
|
|
|
|
|
|
$optimistic_unchoke_peer = $candidates[ rand @candidates ]; |
|
540
|
|
|
|
|
|
|
} |
|
541
|
|
|
|
|
|
|
$optimistic_timer = 0; |
|
542
|
|
|
|
|
|
|
} |
|
543
|
|
|
|
|
|
|
$to_unchoke{$optimistic_unchoke_peer} = 1 if $optimistic_unchoke_peer; |
|
544
|
|
|
|
|
|
|
for my $peer ( values %peer_objects ) { |
|
545
|
|
|
|
|
|
|
if ( $to_unchoke{$peer} ) { |
|
546
|
|
|
|
|
|
|
$peer->unchoke() if $peer->am_choking; |
|
547
|
|
|
|
|
|
|
} |
|
548
|
|
|
|
|
|
|
else { |
|
549
|
|
|
|
|
|
|
$peer->choke() if !$peer->am_choking; |
|
550
|
|
|
|
|
|
|
} |
|
551
|
|
|
|
|
|
|
} |
|
552
|
|
|
|
|
|
|
} |
|
553
|
|
|
|
|
|
|
field %metadata_pending; # peer => piece_index |
|
554
|
|
|
|
|
|
|
|
|
555
|
|
|
|
|
|
|
method _request_metadata ($peer) { |
|
556
|
|
|
|
|
|
|
return unless $peer->protocol->isa('Net::BitTorrent::Protocol::BEP10'); |
|
557
|
|
|
|
|
|
|
my $remote_ext = $peer->protocol->remote_extensions; |
|
558
|
|
|
|
|
|
|
return unless exists $remote_ext->{ut_metadata}; |
|
559
|
|
|
|
|
|
|
|
|
560
|
|
|
|
|
|
|
# We need metadata_size from the peer (from extended handshake) |
|
561
|
|
|
|
|
|
|
my $m_size = $peer->protocol->metadata_size; |
|
562
|
|
|
|
|
|
|
return unless $m_size > 0; |
|
563
|
|
|
|
|
|
|
if ( $metadata_size == 0 ) { |
|
564
|
|
|
|
|
|
|
$metadata_size = $m_size; |
|
565
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Metadata size identified: $metadata_size bytes\n", level => 'debug' ) if $debug; |
|
566
|
|
|
|
|
|
|
} |
|
567
|
|
|
|
|
|
|
|
|
568
|
|
|
|
|
|
|
# How many pieces? (BEP 09 uses 16KiB pieces) |
|
569
|
|
|
|
|
|
|
my $num_pieces = int( ( $metadata_size + 16383 ) / 16384 ); |
|
570
|
|
|
|
|
|
|
|
|
571
|
|
|
|
|
|
|
# Check if we already have a request pending for this peer |
|
572
|
|
|
|
|
|
|
return if exists $metadata_pending{$peer}; |
|
573
|
|
|
|
|
|
|
|
|
574
|
|
|
|
|
|
|
# Find a piece we don't have and isn't pending from another peer (simple greedy) |
|
575
|
|
|
|
|
|
|
# Note: In a real client, we'd track global pending metadata pieces. |
|
576
|
|
|
|
|
|
|
for ( my $i = 0; $i < $num_pieces; $i++ ) { |
|
577
|
|
|
|
|
|
|
if ( !exists $metadata_pieces{$i} ) { |
|
578
|
|
|
|
|
|
|
|
|
579
|
|
|
|
|
|
|
# Check if anyone else is already requesting this specific piece |
|
580
|
|
|
|
|
|
|
my $already_requested = 0; |
|
581
|
|
|
|
|
|
|
for my $p_pending ( values %metadata_pending ) { |
|
582
|
|
|
|
|
|
|
if ( $p_pending == $i ) { |
|
583
|
|
|
|
|
|
|
$already_requested = 1; |
|
584
|
|
|
|
|
|
|
last; |
|
585
|
|
|
|
|
|
|
} |
|
586
|
|
|
|
|
|
|
} |
|
587
|
|
|
|
|
|
|
next if $already_requested; |
|
588
|
|
|
|
|
|
|
$metadata_pending{$peer} = $i; |
|
589
|
|
|
|
|
|
|
$peer->protocol->send_metadata_request($i); |
|
590
|
|
|
|
|
|
|
return; |
|
591
|
|
|
|
|
|
|
} |
|
592
|
|
|
|
|
|
|
} |
|
593
|
|
|
|
|
|
|
} |
|
594
|
|
|
|
|
|
|
|
|
595
|
|
|
|
|
|
|
method _request_pieces ($peer) { |
|
596
|
|
|
|
|
|
|
return if $peer->blocks_inflight >= 20; # Throttle per-peer |
|
597
|
|
|
|
|
|
|
my $pbitfield = $peer_bitfields{$peer}; |
|
598
|
|
|
|
|
|
|
unless ($pbitfield) { |
|
599
|
|
|
|
|
|
|
return; |
|
600
|
|
|
|
|
|
|
} |
|
601
|
|
|
|
|
|
|
while ( $peer->blocks_inflight < 20 ) { |
|
602
|
|
|
|
|
|
|
my ( $index, $begin, $len ) = $picker->pick_block( $peer, \%blocks_pending ); |
|
603
|
|
|
|
|
|
|
unless ( defined $index ) { |
|
604
|
|
|
|
|
|
|
last; |
|
605
|
|
|
|
|
|
|
} |
|
606
|
|
|
|
|
|
|
$blocks_pending{$index}{$begin} = 1; |
|
607
|
|
|
|
|
|
|
$block_sources{$index}{$begin} = $peer; |
|
608
|
|
|
|
|
|
|
$peer->request( $index, $begin, $len ); |
|
609
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Requested block at $begin of piece $index from " . $peer->ip . "\n", level => 'debug' ) if $debug; |
|
610
|
|
|
|
|
|
|
} |
|
611
|
|
|
|
|
|
|
} |
|
612
|
|
|
|
|
|
|
|
|
613
|
|
|
|
|
|
|
method handle_metadata_request ( $peer, $piece ) { |
|
614
|
|
|
|
|
|
|
return unless $metadata; |
|
615
|
|
|
|
|
|
|
my $info_encoded = bencode( $metadata->{info} ); |
|
616
|
|
|
|
|
|
|
my $piece_data = substr( $info_encoded, $piece * 16384, 16384 ); |
|
617
|
|
|
|
|
|
|
$peer->protocol->send_metadata_data( $piece, length($info_encoded), $piece_data ); |
|
618
|
|
|
|
|
|
|
} |
|
619
|
|
|
|
|
|
|
|
|
620
|
|
|
|
|
|
|
method handle_metadata_data ( $peer, $piece, $total_size, $data ) { |
|
621
|
|
|
|
|
|
|
delete $metadata_pending{$peer} if defined $peer; |
|
622
|
|
|
|
|
|
|
$metadata_size = $total_size if $metadata_size == 0; |
|
623
|
|
|
|
|
|
|
$self->_emit( |
|
624
|
|
|
|
|
|
|
log => " [DEBUG] Received metadata piece $piece (len " . length($data) . ") from " . ( $peer ? $peer->ip : "unknown" ) . "\n", |
|
625
|
|
|
|
|
|
|
level => 'debug' |
|
626
|
|
|
|
|
|
|
) if $debug; |
|
627
|
|
|
|
|
|
|
$metadata_pieces{$piece} = $data; |
|
628
|
|
|
|
|
|
|
my $num_pieces = int( ( $metadata_size + 16383 ) / 16384 ); |
|
629
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Metadata progress: " . scalar( keys %metadata_pieces ) . "/$num_pieces pieces\n", level => 'debug' ) |
|
630
|
|
|
|
|
|
|
if $debug; |
|
631
|
|
|
|
|
|
|
if ( scalar keys %metadata_pieces == $num_pieces ) { |
|
632
|
|
|
|
|
|
|
my $full_info = join( '', map { $metadata_pieces{$_} } sort { $a <=> $b } keys %metadata_pieces ); |
|
633
|
|
|
|
|
|
|
|
|
634
|
|
|
|
|
|
|
# Verify hash |
|
635
|
|
|
|
|
|
|
my $calculated_ih = sha1($full_info); |
|
636
|
|
|
|
|
|
|
if ( $calculated_ih ne $infohash_v1 ) { |
|
637
|
|
|
|
|
|
|
$self->_emit( log => " [ERROR] Metadata verification FAILED! Hash mismatch.\n", level => 'error' ); |
|
638
|
|
|
|
|
|
|
%metadata_pieces = (); |
|
639
|
|
|
|
|
|
|
return; |
|
640
|
|
|
|
|
|
|
} |
|
641
|
|
|
|
|
|
|
|
|
642
|
|
|
|
|
|
|
# Decode and start torrent |
|
643
|
|
|
|
|
|
|
my $info = bdecode($full_info); |
|
644
|
|
|
|
|
|
|
$metadata = { info => $info }; |
|
645
|
|
|
|
|
|
|
$self->_on_metadata_received(); |
|
646
|
|
|
|
|
|
|
} |
|
647
|
|
|
|
|
|
|
} |
|
648
|
|
|
|
|
|
|
|
|
649
|
|
|
|
|
|
|
method handle_metadata_reject ( $peer, $piece ) { |
|
650
|
|
|
|
|
|
|
delete $metadata_pending{$peer} if defined $peer; |
|
651
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Peer rejected metadata request for piece $piece\n", level => 'debug' ) if $debug; |
|
652
|
|
|
|
|
|
|
} |
|
653
|
|
|
|
|
|
|
|
|
654
|
|
|
|
|
|
|
method _on_metadata_received () { |
|
655
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Metadata fully received and verified\n", level => 'debug' ) if $debug; |
|
656
|
|
|
|
|
|
|
|
|
657
|
|
|
|
|
|
|
# Multi-file torrents should be in a directory named after the torrent |
|
658
|
|
|
|
|
|
|
my $storage_path = $base_path; |
|
659
|
|
|
|
|
|
|
if ( $metadata->{info}{'file tree'} || $metadata->{info}{files} ) { |
|
660
|
|
|
|
|
|
|
$storage_path = $base_path->child( $metadata->{info}{name} ); |
|
661
|
|
|
|
|
|
|
} |
|
662
|
|
|
|
|
|
|
|
|
663
|
|
|
|
|
|
|
# Initialize storage |
|
664
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Initializing storage at $storage_path\n", level => 'debug' ) if $debug; |
|
665
|
|
|
|
|
|
|
$storage = Net::BitTorrent::Storage->new( |
|
666
|
|
|
|
|
|
|
base_path => $storage_path, |
|
667
|
|
|
|
|
|
|
piece_size => $metadata->{info}{'piece length'}, |
|
668
|
|
|
|
|
|
|
pieces_v1 => $metadata->{info}{pieces}, |
|
669
|
|
|
|
|
|
|
); |
|
670
|
|
|
|
|
|
|
|
|
671
|
|
|
|
|
|
|
# Load files into storage |
|
672
|
|
|
|
|
|
|
if ( my $tree = $metadata->{info}{'file tree'} ) { |
|
673
|
|
|
|
|
|
|
$storage->load_file_tree($tree); |
|
674
|
|
|
|
|
|
|
} |
|
675
|
|
|
|
|
|
|
elsif ( my $files = $metadata->{info}{files} ) { # v1 Multi-file |
|
676
|
|
|
|
|
|
|
for my $f (@$files) { |
|
677
|
|
|
|
|
|
|
my $rel_path = Path::Tiny::path( @{ $f->{path} } ); |
|
678
|
|
|
|
|
|
|
$storage->add_file( $rel_path, $f->{length} ); |
|
679
|
|
|
|
|
|
|
} |
|
680
|
|
|
|
|
|
|
} |
|
681
|
|
|
|
|
|
|
else { # v1 Single-file |
|
682
|
|
|
|
|
|
|
$storage->add_file( $metadata->{info}{name}, $metadata->{info}{length} ); |
|
683
|
|
|
|
|
|
|
} |
|
684
|
|
|
|
|
|
|
|
|
685
|
|
|
|
|
|
|
# Initialize bitfield |
|
686
|
|
|
|
|
|
|
my $num_pieces = 0; |
|
687
|
|
|
|
|
|
|
if ( exists $metadata->{info}{pieces} ) { |
|
688
|
|
|
|
|
|
|
$num_pieces = length( $metadata->{info}{pieces} ) / 20; |
|
689
|
|
|
|
|
|
|
} |
|
690
|
|
|
|
|
|
|
$num_pieces ||= $storage->piece_count; |
|
691
|
|
|
|
|
|
|
$bitfield = Acme::Bitfield->new( size => $num_pieces ); |
|
692
|
|
|
|
|
|
|
|
|
693
|
|
|
|
|
|
|
# Initialize picker |
|
694
|
|
|
|
|
|
|
$picker = Net::BitTorrent::Torrent::PiecePicker->new( bitfield => $bitfield, ); |
|
695
|
|
|
|
|
|
|
$state = STATE_RUNNING; |
|
696
|
|
|
|
|
|
|
$self->_emit('started'); |
|
697
|
|
|
|
|
|
|
|
|
698
|
|
|
|
|
|
|
# Re-initialize peer bitfields now that we have the size |
|
699
|
|
|
|
|
|
|
for my $peer ( values %peer_objects ) { |
|
700
|
|
|
|
|
|
|
$self->init_peer_bitfield($peer); |
|
701
|
|
|
|
|
|
|
} |
|
702
|
|
|
|
|
|
|
|
|
703
|
|
|
|
|
|
|
# Announce to trackers now that we have full infohash info |
|
704
|
|
|
|
|
|
|
$self->announce(); |
|
705
|
|
|
|
|
|
|
} |
|
706
|
|
|
|
|
|
|
|
|
707
|
|
|
|
|
|
|
method receive_block ( $peer, $index, $begin, $data ) { |
|
708
|
|
|
|
|
|
|
return 0 unless $bitfield; |
|
709
|
|
|
|
|
|
|
return 0 if $bitfield->get($index); |
|
710
|
|
|
|
|
|
|
|
|
711
|
|
|
|
|
|
|
# If we've already received this block, or the piece is already being verified, skip. |
|
712
|
|
|
|
|
|
|
# We use blocks_received as an indicator that the piece is complete/queued. |
|
713
|
|
|
|
|
|
|
if ( exists $blocks_received{$index} && $self->is_piece_complete($index) ) { |
|
714
|
|
|
|
|
|
|
return 0; |
|
715
|
|
|
|
|
|
|
} |
|
716
|
|
|
|
|
|
|
|
|
717
|
|
|
|
|
|
|
# v2 Block-level verification (if we have pieces root) |
|
718
|
|
|
|
|
|
|
my ( $root, $rel_piece ) = $storage->map_v2_piece($index); |
|
719
|
|
|
|
|
|
|
if ( defined $root ) { |
|
720
|
|
|
|
|
|
|
my $info = $metadata->{info}; |
|
721
|
|
|
|
|
|
|
my $block_index = ( $rel_piece * ( $info->{'piece length'} / 16384 ) ) + ( $begin / 16384 ); |
|
722
|
|
|
|
|
|
|
if ( !$storage->verify_block( $root, $block_index, $data ) ) { |
|
723
|
|
|
|
|
|
|
$self->_emit( |
|
724
|
|
|
|
|
|
|
log => " [ERROR] v2 block verification FAILED for block $block_index of root " . unpack( 'H*', $root ) . "\n", |
|
725
|
|
|
|
|
|
|
level => 'error' |
|
726
|
|
|
|
|
|
|
) if $debug; |
|
727
|
|
|
|
|
|
|
$peer->adjust_reputation(-50) if $peer; |
|
728
|
|
|
|
|
|
|
return 0; |
|
729
|
|
|
|
|
|
|
} |
|
730
|
|
|
|
|
|
|
} |
|
731
|
|
|
|
|
|
|
$self->_store_block( $peer, $index, $begin, $data ); |
|
732
|
|
|
|
|
|
|
if ( $self->is_piece_complete($index) ) { |
|
733
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Piece $index is COMPLETE\n", level => 'debug' ) if $debug; |
|
734
|
|
|
|
|
|
|
return 0 if $bitfield->get($index); |
|
735
|
|
|
|
|
|
|
my $piece_data = $self->_get_full_piece($index); |
|
736
|
|
|
|
|
|
|
if ($piece_data) { |
|
737
|
|
|
|
|
|
|
$self->_clear_piece_data($index); |
|
738
|
|
|
|
|
|
|
$client->queue_verification( $self, $index, $piece_data ); |
|
739
|
|
|
|
|
|
|
return 1; |
|
740
|
|
|
|
|
|
|
} |
|
741
|
|
|
|
|
|
|
} |
|
742
|
|
|
|
|
|
|
return 0; |
|
743
|
|
|
|
|
|
|
} |
|
744
|
|
|
|
|
|
|
|
|
745
|
|
|
|
|
|
|
method _clear_piece_data ($index) { |
|
746
|
|
|
|
|
|
|
delete $block_cache{$index}; |
|
747
|
|
|
|
|
|
|
} |
|
748
|
|
|
|
|
|
|
|
|
749
|
|
|
|
|
|
|
method _verify_queued_piece ( $index, $piece_data ) { |
|
750
|
|
|
|
|
|
|
my $sources = delete $block_sources{$index} // {}; |
|
751
|
|
|
|
|
|
|
my $verified = 0; |
|
752
|
|
|
|
|
|
|
|
|
753
|
|
|
|
|
|
|
# Try v2 verification first if possible |
|
754
|
|
|
|
|
|
|
my ( $root, $rel_index ) = $storage->map_v2_piece($index); |
|
755
|
|
|
|
|
|
|
if ( defined $root ) { |
|
756
|
|
|
|
|
|
|
my $res = $storage->verify_piece_v2( $root, $rel_index, $piece_data ); |
|
757
|
|
|
|
|
|
|
if ( defined $res ) { |
|
758
|
|
|
|
|
|
|
$verified = $res; |
|
759
|
|
|
|
|
|
|
} |
|
760
|
|
|
|
|
|
|
else { |
|
761
|
|
|
|
|
|
|
# Fallback to v1 if v2 fails because layer is missing? |
|
762
|
|
|
|
|
|
|
# (Normally v2 is authoritative if pieces_root exists) |
|
763
|
|
|
|
|
|
|
$verified = $storage->verify_piece_v1( $index, $piece_data ) // 0; |
|
764
|
|
|
|
|
|
|
} |
|
765
|
|
|
|
|
|
|
} |
|
766
|
|
|
|
|
|
|
else { |
|
767
|
|
|
|
|
|
|
$verified = $storage->verify_piece_v1( $index, $piece_data ) // 0; |
|
768
|
|
|
|
|
|
|
} |
|
769
|
|
|
|
|
|
|
if ($verified) { |
|
770
|
|
|
|
|
|
|
$storage->write_piece_v1( $index, $piece_data ); |
|
771
|
|
|
|
|
|
|
$bitfield->set($index); |
|
772
|
|
|
|
|
|
|
$bytes_downloaded += length($piece_data); |
|
773
|
|
|
|
|
|
|
$bytes_left -= length($piece_data); |
|
774
|
|
|
|
|
|
|
$self->_emit( log => "\n [DEBUG] Piece $index VERIFIED successfully via throttled queue\n", level => 'debug' ) if $debug; |
|
775
|
|
|
|
|
|
|
$self->_clear_piece_cache($index); |
|
776
|
|
|
|
|
|
|
$self->_emit( 'piece_verified', $index ); |
|
777
|
|
|
|
|
|
|
for my $peer ( values %$sources ) { |
|
778
|
|
|
|
|
|
|
$peer->adjust_reputation(1) if defined $peer; |
|
779
|
|
|
|
|
|
|
} |
|
780
|
|
|
|
|
|
|
return 1; |
|
781
|
|
|
|
|
|
|
} |
|
782
|
|
|
|
|
|
|
else { |
|
783
|
|
|
|
|
|
|
$self->_emit( log => "\n [DEBUG] Piece $index FAILED verification (len " . length( $piece_data // '' ) . ")\n", level => 'debug' ) |
|
784
|
|
|
|
|
|
|
if $debug; |
|
785
|
|
|
|
|
|
|
$self->_clear_piece_cache($index); |
|
786
|
|
|
|
|
|
|
$self->_emit( 'piece_failed', $index ); |
|
787
|
|
|
|
|
|
|
for my $peer ( values %$sources ) { |
|
788
|
|
|
|
|
|
|
$peer->adjust_reputation(-20) if defined $peer; |
|
789
|
|
|
|
|
|
|
} |
|
790
|
|
|
|
|
|
|
return -1; |
|
791
|
|
|
|
|
|
|
} |
|
792
|
|
|
|
|
|
|
} |
|
793
|
|
|
|
|
|
|
|
|
794
|
|
|
|
|
|
|
method _store_block ( $peer, $index, $begin, $data ) { |
|
795
|
|
|
|
|
|
|
return if $blocks_received{$index}{$begin}; |
|
796
|
|
|
|
|
|
|
$block_cache{$index} //= {}; |
|
797
|
|
|
|
|
|
|
$block_cache{$index}{$begin} = $data; |
|
798
|
|
|
|
|
|
|
$blocks_received{$index}{$begin} = 1; |
|
799
|
|
|
|
|
|
|
$block_sources{$index}{$begin} = $peer if $peer; |
|
800
|
|
|
|
|
|
|
delete $blocks_pending{$index}{$begin}; |
|
801
|
|
|
|
|
|
|
} |
|
802
|
|
|
|
|
|
|
|
|
803
|
|
|
|
|
|
|
method is_piece_complete ($index) { |
|
804
|
|
|
|
|
|
|
my $piece_length = $self->piece_length($index); |
|
805
|
|
|
|
|
|
|
my $blocks_needed = int( ( $piece_length + 16383 ) / 16384 ); |
|
806
|
|
|
|
|
|
|
return ( scalar keys %{ $blocks_received{$index} // {} } ) == $blocks_needed; |
|
807
|
|
|
|
|
|
|
} |
|
808
|
|
|
|
|
|
|
|
|
809
|
|
|
|
|
|
|
method piece_length ($index) { |
|
810
|
|
|
|
|
|
|
my $total_size = $self->_calculate_total_size(); |
|
811
|
|
|
|
|
|
|
my $standard_len = $metadata->{info}{'piece length'} // 16384; |
|
812
|
|
|
|
|
|
|
my $num_pieces = int( ( $total_size + $standard_len - 1 ) / $standard_len ); |
|
813
|
|
|
|
|
|
|
if ( $index == $num_pieces - 1 ) { |
|
814
|
|
|
|
|
|
|
my $rem = $total_size % $standard_len; |
|
815
|
|
|
|
|
|
|
return $rem == 0 ? $standard_len : $rem; |
|
816
|
|
|
|
|
|
|
} |
|
817
|
|
|
|
|
|
|
return $standard_len; |
|
818
|
|
|
|
|
|
|
} |
|
819
|
|
|
|
|
|
|
|
|
820
|
|
|
|
|
|
|
method _get_full_piece ($index) { |
|
821
|
|
|
|
|
|
|
my $cache = $block_cache{$index} or return undef; |
|
822
|
|
|
|
|
|
|
my $piece_length = $self->piece_length($index); |
|
823
|
|
|
|
|
|
|
my $full = ''; |
|
824
|
|
|
|
|
|
|
my $offset = 0; |
|
825
|
|
|
|
|
|
|
while ( $offset < $piece_length ) { |
|
826
|
|
|
|
|
|
|
my $block = $cache->{$offset} or return undef; |
|
827
|
|
|
|
|
|
|
$full .= $block; |
|
828
|
|
|
|
|
|
|
$offset += length($block); |
|
829
|
|
|
|
|
|
|
} |
|
830
|
|
|
|
|
|
|
return $full; |
|
831
|
|
|
|
|
|
|
} |
|
832
|
|
|
|
|
|
|
|
|
833
|
|
|
|
|
|
|
method _clear_piece_cache ($index) { |
|
834
|
|
|
|
|
|
|
$self->_clear_piece_data($index); |
|
835
|
|
|
|
|
|
|
delete $blocks_pending{$index}; |
|
836
|
|
|
|
|
|
|
delete $block_sources{$index}; |
|
837
|
|
|
|
|
|
|
} |
|
838
|
|
|
|
|
|
|
|
|
839
|
|
|
|
|
|
|
method get_next_request ($peer) { |
|
840
|
|
|
|
|
|
|
return undef if $state ne STATE_RUNNING; |
|
841
|
|
|
|
|
|
|
my $p_bf = $peer_bitfields{$peer}; |
|
842
|
|
|
|
|
|
|
if ( !$p_bf ) { |
|
843
|
|
|
|
|
|
|
|
|
844
|
|
|
|
|
|
|
# warn ' [DEBUG] Peer ' . $peer->ip . " has no bitfield\n" if $debug; |
|
845
|
|
|
|
|
|
|
return undef; |
|
846
|
|
|
|
|
|
|
} |
|
847
|
|
|
|
|
|
|
if ( !$picker->end_game ) { |
|
848
|
|
|
|
|
|
|
my $missing = $bitfield->size - $bitfield->count; |
|
849
|
|
|
|
|
|
|
if ( $missing <= 3 || $missing < ( $bitfield->size / 100 ) ) { |
|
850
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Entering END-GAME mode\n", level => 'debug' ) if $debug; |
|
851
|
|
|
|
|
|
|
$picker->enter_end_game(); |
|
852
|
|
|
|
|
|
|
} |
|
853
|
|
|
|
|
|
|
} |
|
854
|
|
|
|
|
|
|
my ( $piece_idx, $offset, $len ) = $picker->pick_block( $peer, \%blocks_pending ); |
|
855
|
|
|
|
|
|
|
if ( !defined $piece_idx ) { |
|
856
|
|
|
|
|
|
|
|
|
857
|
|
|
|
|
|
|
# warn ' [DEBUG] No piece picked for ' . $peer->ip . "\n" if $debug; |
|
858
|
|
|
|
|
|
|
return undef; |
|
859
|
|
|
|
|
|
|
} |
|
860
|
|
|
|
|
|
|
$blocks_pending{$piece_idx}{$offset} = $peer; |
|
861
|
|
|
|
|
|
|
return { index => $piece_idx, begin => $offset, length => $len }; |
|
862
|
|
|
|
|
|
|
} |
|
863
|
|
|
|
|
|
|
|
|
864
|
|
|
|
|
|
|
method peer_disconnected ($peer) { |
|
865
|
|
|
|
|
|
|
my $ip_port = $peer->ip . ':' . $peer->port; |
|
866
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Peer disconnected: $ip_port\n", level => 'debug' ) if $debug; |
|
867
|
|
|
|
|
|
|
delete $metadata_pending{$peer} if defined $peer; |
|
868
|
|
|
|
|
|
|
$pex_dropped{$ip_port} = { ip => $peer->ip, port => $peer->port }; |
|
869
|
|
|
|
|
|
|
delete $pex_added{$ip_port}; |
|
870
|
|
|
|
|
|
|
if ( my $bf = $peer_bitfields{$peer} ) { |
|
871
|
|
|
|
|
|
|
$picker->update_availability( $bf, -1 ) if $picker; |
|
872
|
|
|
|
|
|
|
} |
|
873
|
|
|
|
|
|
|
for my $i ( keys %blocks_pending ) { |
|
874
|
|
|
|
|
|
|
for my $offset ( keys %{ $blocks_pending{$i} } ) { |
|
875
|
|
|
|
|
|
|
if ( $blocks_pending{$i}{$offset} == $peer ) { |
|
876
|
|
|
|
|
|
|
delete $blocks_pending{$i}{$offset}; |
|
877
|
|
|
|
|
|
|
} |
|
878
|
|
|
|
|
|
|
} |
|
879
|
|
|
|
|
|
|
} |
|
880
|
|
|
|
|
|
|
delete $peer_bitfields{$peer}; |
|
881
|
|
|
|
|
|
|
delete $peer_objects{$ip_port}; |
|
882
|
|
|
|
|
|
|
} |
|
883
|
|
|
|
|
|
|
|
|
884
|
|
|
|
|
|
|
method set_peer_bitfield ( $peer, $data ) { |
|
885
|
|
|
|
|
|
|
return unless $bitfield; |
|
886
|
|
|
|
|
|
|
my $bf = Acme::Bitfield->new( size => $bitfield->size ); |
|
887
|
|
|
|
|
|
|
$bf->set_data($data); |
|
888
|
|
|
|
|
|
|
if ( my $old_bf = $peer_bitfields{$peer} ) { |
|
889
|
|
|
|
|
|
|
$picker->update_availability( $old_bf, -1 ) if $picker; |
|
890
|
|
|
|
|
|
|
} |
|
891
|
|
|
|
|
|
|
$peer_bitfields{$peer} = $bf; |
|
892
|
|
|
|
|
|
|
my $flags = 0; |
|
893
|
|
|
|
|
|
|
$flags |= 0x01 if $peer->transport->filter; # Encrypted |
|
894
|
|
|
|
|
|
|
$flags |= 0x02 if $bf->count == $bf->size; # Seeder |
|
895
|
|
|
|
|
|
|
$pex_added{ $peer->ip . ':' . $peer->port } = { ip => $peer->ip, port => $peer->port, flags => $flags }; |
|
896
|
|
|
|
|
|
|
delete $pex_dropped{ $peer->ip . ':' . $peer->port }; |
|
897
|
|
|
|
|
|
|
$picker->update_availability( $bf, 1 ) if $picker; |
|
898
|
|
|
|
|
|
|
} |
|
899
|
|
|
|
|
|
|
|
|
900
|
|
|
|
|
|
|
method update_peer_have ( $peer, $index ) { |
|
901
|
|
|
|
|
|
|
return unless $bitfield; # Might not be initialized yet during metadata phase |
|
902
|
|
|
|
|
|
|
$self->init_peer_bitfield($peer) unless $peer_bitfields{$peer}; |
|
903
|
|
|
|
|
|
|
$peer_bitfields{$peer}->set($index) if $peer_bitfields{$peer}; |
|
904
|
|
|
|
|
|
|
my $tmp_bf = Acme::Bitfield->new( size => $bitfield->size ); |
|
905
|
|
|
|
|
|
|
$tmp_bf->set($index); |
|
906
|
|
|
|
|
|
|
$picker->update_availability( $tmp_bf, 1 ) if $picker; |
|
907
|
|
|
|
|
|
|
} |
|
908
|
|
|
|
|
|
|
|
|
909
|
|
|
|
|
|
|
method init_peer_bitfield ($peer) { |
|
910
|
|
|
|
|
|
|
return if $peer_bitfields{$peer}; |
|
911
|
|
|
|
|
|
|
return unless $bitfield; |
|
912
|
|
|
|
|
|
|
my $bf = Acme::Bitfield->new( size => $bitfield->size ); |
|
913
|
|
|
|
|
|
|
$peer_bitfields{$peer} = $bf; |
|
914
|
|
|
|
|
|
|
|
|
915
|
|
|
|
|
|
|
# Apply stored status |
|
916
|
|
|
|
|
|
|
my $status = $peer->bitfield_status; |
|
917
|
|
|
|
|
|
|
if ( defined $status ) { |
|
918
|
|
|
|
|
|
|
if ( $status eq 'all' ) { |
|
919
|
|
|
|
|
|
|
$bf->fill(); |
|
920
|
|
|
|
|
|
|
} |
|
921
|
|
|
|
|
|
|
elsif ( $status eq 'none' ) { |
|
922
|
|
|
|
|
|
|
|
|
923
|
|
|
|
|
|
|
# already zeros |
|
924
|
|
|
|
|
|
|
} |
|
925
|
|
|
|
|
|
|
else { |
|
926
|
|
|
|
|
|
|
$bf->set_data($status); |
|
927
|
|
|
|
|
|
|
} |
|
928
|
|
|
|
|
|
|
$picker->update_availability( $bf, 1 ) if $picker; |
|
929
|
|
|
|
|
|
|
} |
|
930
|
|
|
|
|
|
|
} |
|
931
|
|
|
|
|
|
|
|
|
932
|
|
|
|
|
|
|
method set_peer_have_all ($peer) { |
|
933
|
|
|
|
|
|
|
$self->init_peer_bitfield($peer); |
|
934
|
|
|
|
|
|
|
return unless $peer_bitfields{$peer}; |
|
935
|
|
|
|
|
|
|
$picker->update_availability( $peer_bitfields{$peer}, -1 ) if $picker; |
|
936
|
|
|
|
|
|
|
$peer_bitfields{$peer}->fill(); |
|
937
|
|
|
|
|
|
|
$picker->update_availability( $peer_bitfields{$peer}, 1 ) if $picker; |
|
938
|
|
|
|
|
|
|
} |
|
939
|
|
|
|
|
|
|
|
|
940
|
|
|
|
|
|
|
method set_peer_have_none ($peer) { |
|
941
|
|
|
|
|
|
|
$self->init_peer_bitfield($peer); |
|
942
|
|
|
|
|
|
|
} |
|
943
|
|
|
|
|
|
|
|
|
944
|
|
|
|
|
|
|
method _broadcast_pex () { |
|
945
|
|
|
|
|
|
|
return unless keys %pex_added || keys %pex_dropped; |
|
946
|
|
|
|
|
|
|
|
|
947
|
|
|
|
|
|
|
# Limit to 100 peers per message per BEP 11 |
|
948
|
|
|
|
|
|
|
my @added = values %pex_added; |
|
949
|
|
|
|
|
|
|
if ( @added > 100 ) { |
|
950
|
|
|
|
|
|
|
@added = splice( @added, 0, 100 ); |
|
951
|
|
|
|
|
|
|
} |
|
952
|
|
|
|
|
|
|
my @dropped = values %pex_dropped; |
|
953
|
|
|
|
|
|
|
if ( @dropped > 100 ) { |
|
954
|
|
|
|
|
|
|
@dropped = splice( @dropped, 0, 100 ); |
|
955
|
|
|
|
|
|
|
} |
|
956
|
|
|
|
|
|
|
my @added4 = grep { $_->{ip} !~ /:/ } @added; |
|
957
|
|
|
|
|
|
|
my @added6 = grep { $_->{ip} =~ /:/ } @added; |
|
958
|
|
|
|
|
|
|
my @dropped4 = grep { $_->{ip} !~ /:/ } @dropped; |
|
959
|
|
|
|
|
|
|
my @dropped6 = grep { $_->{ip} =~ /:/ } @dropped; |
|
960
|
|
|
|
|
|
|
for my $peer ( values %peer_objects ) { |
|
961
|
|
|
|
|
|
|
if ( $peer->protocol->isa('Net::BitTorrent::Protocol::BEP11') ) { |
|
962
|
|
|
|
|
|
|
|
|
963
|
|
|
|
|
|
|
# Filter out the peer itself from the added list |
|
964
|
|
|
|
|
|
|
my @final_added4 = grep { $_->{ip} ne $peer->ip || $_->{port} != $peer->port } @added4; |
|
965
|
|
|
|
|
|
|
my @final_added6 = grep { $_->{ip} ne $peer->ip || $_->{port} != $peer->port } @added6; |
|
966
|
|
|
|
|
|
|
next unless @final_added4 || @final_added6 || @dropped4 || @dropped6; |
|
967
|
|
|
|
|
|
|
$peer->protocol->send_pex( \@final_added4, \@dropped4, \@final_added6, \@dropped6 ); |
|
968
|
|
|
|
|
|
|
} |
|
969
|
|
|
|
|
|
|
} |
|
970
|
|
|
|
|
|
|
%pex_added = (); |
|
971
|
|
|
|
|
|
|
%pex_dropped = (); |
|
972
|
|
|
|
|
|
|
} |
|
973
|
|
|
|
|
|
|
|
|
974
|
|
|
|
|
|
|
method fetch_from_webseeds ($index) { |
|
975
|
|
|
|
|
|
|
my $segments = $storage->map_v1_piece($index); |
|
976
|
|
|
|
|
|
|
return 0 unless @$segments; |
|
977
|
|
|
|
|
|
|
for my $seg (@$segments) { |
|
978
|
|
|
|
|
|
|
$seg->{rel_path} = $seg->{file}->path->relative($base_path)->stringify; |
|
979
|
|
|
|
|
|
|
} |
|
980
|
|
|
|
|
|
|
for my $ws (@webseeds) { |
|
981
|
|
|
|
|
|
|
try { |
|
982
|
|
|
|
|
|
|
my $data = $ws->fetch_piece($segments); |
|
983
|
|
|
|
|
|
|
if ( $storage->verify_piece_v1( $index, $data ) ) { |
|
984
|
|
|
|
|
|
|
$storage->write_piece_v1( $index, $data ); |
|
985
|
|
|
|
|
|
|
$bitfield->set($index); |
|
986
|
|
|
|
|
|
|
return 1; |
|
987
|
|
|
|
|
|
|
} |
|
988
|
|
|
|
|
|
|
} |
|
989
|
|
|
|
|
|
|
catch ($e) { } |
|
990
|
|
|
|
|
|
|
} |
|
991
|
|
|
|
|
|
|
return 0; |
|
992
|
|
|
|
|
|
|
} |
|
993
|
|
|
|
|
|
|
|
|
994
|
|
|
|
|
|
|
method primary_pieces_root () { |
|
995
|
|
|
|
|
|
|
return $self->_find_first_root( $self->file_tree ); |
|
996
|
|
|
|
|
|
|
} |
|
997
|
|
|
|
|
|
|
|
|
998
|
|
|
|
|
|
|
method _find_first_root ($tree) { |
|
999
|
|
|
|
|
|
|
for my $node ( values %$tree ) { |
|
1000
|
|
|
|
|
|
|
if ( exists $node->{''} ) { |
|
1001
|
|
|
|
|
|
|
return $node->{''}{'pieces root'}; |
|
1002
|
|
|
|
|
|
|
} |
|
1003
|
|
|
|
|
|
|
else { |
|
1004
|
|
|
|
|
|
|
my $r = $self->_find_first_root($node); |
|
1005
|
|
|
|
|
|
|
return $r if $r; |
|
1006
|
|
|
|
|
|
|
} |
|
1007
|
|
|
|
|
|
|
} |
|
1008
|
|
|
|
|
|
|
return undef; |
|
1009
|
|
|
|
|
|
|
} |
|
1010
|
|
|
|
|
|
|
|
|
1011
|
|
|
|
|
|
|
method _calculate_total_size () { |
|
1012
|
|
|
|
|
|
|
my $total = 0; |
|
1013
|
|
|
|
|
|
|
my $info = $metadata->{info}; |
|
1014
|
|
|
|
|
|
|
if ( $info->{'file tree'} ) { |
|
1015
|
|
|
|
|
|
|
$total = $self->_sum_file_tree( $info->{'file tree'} ); |
|
1016
|
|
|
|
|
|
|
} |
|
1017
|
|
|
|
|
|
|
else { |
|
1018
|
|
|
|
|
|
|
$total = $info->{length} // 0; |
|
1019
|
|
|
|
|
|
|
if ( $info->{files} ) { |
|
1020
|
|
|
|
|
|
|
for my $f ( @{ $info->{files} } ) { |
|
1021
|
|
|
|
|
|
|
$total += $f->{length}; |
|
1022
|
|
|
|
|
|
|
} |
|
1023
|
|
|
|
|
|
|
} |
|
1024
|
|
|
|
|
|
|
} |
|
1025
|
|
|
|
|
|
|
return $total; |
|
1026
|
|
|
|
|
|
|
} |
|
1027
|
|
|
|
|
|
|
|
|
1028
|
|
|
|
|
|
|
method _sum_file_tree ($tree) { |
|
1029
|
|
|
|
|
|
|
my $total = 0; |
|
1030
|
|
|
|
|
|
|
for my $node ( values %$tree ) { |
|
1031
|
|
|
|
|
|
|
if ( exists $node->{''} ) { |
|
1032
|
|
|
|
|
|
|
$total += $node->{''}{length}; |
|
1033
|
|
|
|
|
|
|
} |
|
1034
|
|
|
|
|
|
|
else { |
|
1035
|
|
|
|
|
|
|
$total += $self->_sum_file_tree($node); |
|
1036
|
|
|
|
|
|
|
} |
|
1037
|
|
|
|
|
|
|
} |
|
1038
|
|
|
|
|
|
|
return $total; |
|
1039
|
|
|
|
|
|
|
} |
|
1040
|
|
|
|
|
|
|
|
|
1041
|
|
|
|
|
|
|
method announce ( $event = undef, $cb = undef ) { |
|
1042
|
|
|
|
|
|
|
my @ihs; |
|
1043
|
|
|
|
|
|
|
push @ihs, $infohash_v2 if $infohash_v2; |
|
1044
|
|
|
|
|
|
|
push @ihs, $infohash_v1 if $infohash_v1; |
|
1045
|
|
|
|
|
|
|
my $params = { |
|
1046
|
|
|
|
|
|
|
infohash => \@ihs, |
|
1047
|
|
|
|
|
|
|
peer_id => $peer_id, |
|
1048
|
|
|
|
|
|
|
port => 6881, |
|
1049
|
|
|
|
|
|
|
uploaded => $bytes_uploaded, |
|
1050
|
|
|
|
|
|
|
downloaded => $bytes_downloaded, |
|
1051
|
|
|
|
|
|
|
left => $bytes_left, |
|
1052
|
|
|
|
|
|
|
compact => 1, |
|
1053
|
|
|
|
|
|
|
( $client && $client->can('user_agent') ? ( ua => $client->user_agent ) : () ), |
|
1054
|
|
|
|
|
|
|
}; |
|
1055
|
|
|
|
|
|
|
$params->{event} = $event if $event; |
|
1056
|
|
|
|
|
|
|
my $weak_self = $self; |
|
1057
|
|
|
|
|
|
|
builtin::weaken($weak_self); |
|
1058
|
0
|
|
|
0
|
|
|
my $on_peers = sub ($peers) { |
|
|
0
|
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
1059
|
0
|
0
|
|
|
|
|
return unless $weak_self; |
|
1060
|
0
|
|
|
|
|
|
$weak_self->add_peer($_) for @$peers; |
|
1061
|
0
|
0
|
|
|
|
|
$cb->($peers) if $cb; |
|
1062
|
|
|
|
|
|
|
}; |
|
1063
|
|
|
|
|
|
|
$tracker_manager->announce_all( $params, $on_peers ); |
|
1064
|
|
|
|
|
|
|
return [ values %peers ]; |
|
1065
|
|
|
|
|
|
|
} |
|
1066
|
|
|
|
|
|
|
|
|
1067
|
|
|
|
|
|
|
method add_peer ($peer) { |
|
1068
|
|
|
|
|
|
|
my $ip; |
|
1069
|
|
|
|
|
|
|
try { |
|
1070
|
|
|
|
|
|
|
$ip = $peer->ip; |
|
1071
|
|
|
|
|
|
|
} |
|
1072
|
|
|
|
|
|
|
catch ($e) { |
|
1073
|
|
|
|
|
|
|
$ip = $peer->{ip} // $peer->{address}; |
|
1074
|
|
|
|
|
|
|
} |
|
1075
|
|
|
|
|
|
|
my $port; |
|
1076
|
|
|
|
|
|
|
try { |
|
1077
|
|
|
|
|
|
|
$port = $peer->port; |
|
1078
|
|
|
|
|
|
|
} |
|
1079
|
|
|
|
|
|
|
catch ($e) { |
|
1080
|
|
|
|
|
|
|
$port = $peer->{port}; |
|
1081
|
|
|
|
|
|
|
} |
|
1082
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Torrent::add_peer: $ip:$port\n", level => 'debug' ) if $debug; |
|
1083
|
|
|
|
|
|
|
return unless $ip && $port; |
|
1084
|
|
|
|
|
|
|
my $key = "$ip:$port"; |
|
1085
|
|
|
|
|
|
|
unless ( $peers{$key} ) { |
|
1086
|
|
|
|
|
|
|
my $flags = 0; |
|
1087
|
|
|
|
|
|
|
try { |
|
1088
|
|
|
|
|
|
|
$flags = $peer->flags; |
|
1089
|
|
|
|
|
|
|
} |
|
1090
|
|
|
|
|
|
|
catch ($e) { } |
|
1091
|
|
|
|
|
|
|
$peers{$key} = { ip => $ip, port => $port, flags => $flags }; |
|
1092
|
|
|
|
|
|
|
$pex_added{$key} = $peers{$key}; |
|
1093
|
|
|
|
|
|
|
delete $pex_dropped{$key}; |
|
1094
|
|
|
|
|
|
|
$self->_emit( 'peer_discovered', $peers{$key} ); |
|
1095
|
|
|
|
|
|
|
} |
|
1096
|
|
|
|
|
|
|
} |
|
1097
|
|
|
|
|
|
|
|
|
1098
|
|
|
|
|
|
|
method add_dht_nodes ($nodes) { |
|
1099
|
|
|
|
|
|
|
for my $node (@$nodes) { |
|
1100
|
|
|
|
|
|
|
my ( $id, $ip, $port ); |
|
1101
|
|
|
|
|
|
|
if ( ref $node eq 'HASH' ) { |
|
1102
|
|
|
|
|
|
|
$id = $node->{id}; |
|
1103
|
|
|
|
|
|
|
$ip = $node->{ip} || $node->{address}; |
|
1104
|
|
|
|
|
|
|
$port = $node->{port}; |
|
1105
|
|
|
|
|
|
|
} |
|
1106
|
|
|
|
|
|
|
elsif ( ref $node ) { |
|
1107
|
|
|
|
|
|
|
try { |
|
1108
|
|
|
|
|
|
|
if ( $node->can('id') ) { |
|
1109
|
|
|
|
|
|
|
$id = $node->id; |
|
1110
|
|
|
|
|
|
|
$ip = $node->ip; |
|
1111
|
|
|
|
|
|
|
$port = $node->port; |
|
1112
|
|
|
|
|
|
|
} |
|
1113
|
|
|
|
|
|
|
} |
|
1114
|
|
|
|
|
|
|
catch ($e) { } |
|
1115
|
|
|
|
|
|
|
} |
|
1116
|
|
|
|
|
|
|
next unless $id && $ip && $port; |
|
1117
|
|
|
|
|
|
|
my $nid_hex = unpack( 'H*', $id ); |
|
1118
|
|
|
|
|
|
|
next if exists $dht_frontier{$nid_hex}; |
|
1119
|
|
|
|
|
|
|
|
|
1120
|
|
|
|
|
|
|
# Cap frontier size |
|
1121
|
|
|
|
|
|
|
if ( keys %dht_frontier > 500 ) { |
|
1122
|
|
|
|
|
|
|
|
|
1123
|
|
|
|
|
|
|
# Remove a random unvisited node or the furthest one? |
|
1124
|
|
|
|
|
|
|
# For simplicity, just stop adding if full. |
|
1125
|
|
|
|
|
|
|
# In a real client we might want to replace less-desirable nodes. |
|
1126
|
|
|
|
|
|
|
next; |
|
1127
|
|
|
|
|
|
|
} |
|
1128
|
|
|
|
|
|
|
$dht_frontier{$nid_hex} = { id => $id, ip => $ip, port => $port, visited => 0 }; |
|
1129
|
|
|
|
|
|
|
} |
|
1130
|
|
|
|
|
|
|
} |
|
1131
|
|
|
|
|
|
|
|
|
1132
|
|
|
|
|
|
|
method ban_peer ( $ip, $port ) { |
|
1133
|
|
|
|
|
|
|
my $key = "$ip:$port"; |
|
1134
|
|
|
|
|
|
|
delete $peers{$key}; |
|
1135
|
|
|
|
|
|
|
$attempted_connections{$key} = time() + 3600; # Ban for an hour |
|
1136
|
|
|
|
|
|
|
} |
|
1137
|
|
|
|
|
|
|
|
|
1138
|
|
|
|
|
|
|
method _cleanup_connections () { |
|
1139
|
|
|
|
|
|
|
my $now = time(); |
|
1140
|
|
|
|
|
|
|
for my $key ( keys %attempted_connections ) { |
|
1141
|
|
|
|
|
|
|
if ( $now - $attempted_connections{$key} > 3600 ) { |
|
1142
|
|
|
|
|
|
|
delete $attempted_connections{$key}; |
|
1143
|
|
|
|
|
|
|
} |
|
1144
|
|
|
|
|
|
|
} |
|
1145
|
|
|
|
|
|
|
} |
|
1146
|
|
|
|
|
|
|
|
|
1147
|
|
|
|
|
|
|
method register_peer_object ($peer_obj) { |
|
1148
|
|
|
|
|
|
|
my $key = $peer_obj->ip . ':' . $peer_obj->port; |
|
1149
|
|
|
|
|
|
|
$peer_objects{$key} = $peer_obj; |
|
1150
|
|
|
|
|
|
|
} |
|
1151
|
|
|
|
|
|
|
|
|
1152
|
|
|
|
|
|
|
method start_dht_lookup () { |
|
1153
|
|
|
|
|
|
|
return if $is_private; |
|
1154
|
|
|
|
|
|
|
my $dht = $client->dht(); |
|
1155
|
|
|
|
|
|
|
return unless $dht; |
|
1156
|
|
|
|
|
|
|
my @ihs; |
|
1157
|
|
|
|
|
|
|
push @ihs, $infohash_v2 if $infohash_v2; |
|
1158
|
|
|
|
|
|
|
push @ihs, $infohash_v1 if $infohash_v1; |
|
1159
|
|
|
|
|
|
|
|
|
1160
|
|
|
|
|
|
|
# Explicitly ask bootstrap nodes. |
|
1161
|
|
|
|
|
|
|
# This forces a query even if the local routing table is empty. |
|
1162
|
|
|
|
|
|
|
my @boot_nodes = ( [ 'router.bittorrent.com', 6881 ], [ 'router.utorrent.com', 6881 ], [ 'dht.transmissionbt.com', 6881 ], ); |
|
1163
|
|
|
|
|
|
|
for my $ih (@ihs) { |
|
1164
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] Starting DHT peer search for " . unpack( 'H*', $ih ) . "\n", level => 'debug' ) if $debug; |
|
1165
|
|
|
|
|
|
|
|
|
1166
|
|
|
|
|
|
|
# 1. Query local routing table |
|
1167
|
|
|
|
|
|
|
$dht->find_peers($ih); |
|
1168
|
|
|
|
|
|
|
|
|
1169
|
|
|
|
|
|
|
# 2. Force query to bootstrap nodes |
|
1170
|
|
|
|
|
|
|
for my $node (@boot_nodes) { |
|
1171
|
|
|
|
|
|
|
|
|
1172
|
|
|
|
|
|
|
# Resolve hostname if needed (get_peers expects IP) |
|
1173
|
|
|
|
|
|
|
# But dht->get_peers might handle hostnames if IO::Socket::IP does? |
|
1174
|
|
|
|
|
|
|
# Let's assume the DHT module handles resolution or the socket does. |
|
1175
|
|
|
|
|
|
|
# Actually, standard DHT expects IP. |
|
1176
|
|
|
|
|
|
|
# Let's trust the DHT module's resolving or the fact that we passed these as boot_nodes. |
|
1177
|
|
|
|
|
|
|
# Wait, get_peers sends a packet. UDP sendto needs packed address or IP. |
|
1178
|
|
|
|
|
|
|
# IO::Socket::IP can handle hostnames in send() usually. |
|
1179
|
|
|
|
|
|
|
$dht->get_peers( $ih, $node->[0], $node->[1] ); |
|
1180
|
|
|
|
|
|
|
} |
|
1181
|
|
|
|
|
|
|
} |
|
1182
|
|
|
|
|
|
|
} |
|
1183
|
|
|
|
|
|
|
|
|
1184
|
|
|
|
|
|
|
method _update_dht_search () { |
|
1185
|
|
|
|
|
|
|
return if $is_private; |
|
1186
|
|
|
|
|
|
|
my $dht = $client->dht(); |
|
1187
|
|
|
|
|
|
|
return unless $dht; |
|
1188
|
|
|
|
|
|
|
my @ihs; |
|
1189
|
|
|
|
|
|
|
push @ihs, $infohash_v2 if $infohash_v2; |
|
1190
|
|
|
|
|
|
|
push @ihs, $infohash_v1 if $infohash_v1; |
|
1191
|
|
|
|
|
|
|
for my $ih (@ihs) { |
|
1192
|
|
|
|
|
|
|
|
|
1193
|
|
|
|
|
|
|
# Merge routing table nodes into our search frontier |
|
1194
|
|
|
|
|
|
|
# Net::BitTorrent::DHT::routing_table->find_closest returns objects |
|
1195
|
|
|
|
|
|
|
# where the data is in {data}{ip} and {data}{port} |
|
1196
|
|
|
|
|
|
|
my @closest_in_table = $dht->routing_table->find_closest( $ih, 50 ); |
|
1197
|
|
|
|
|
|
|
for my $node (@closest_in_table) { |
|
1198
|
|
|
|
|
|
|
my $nid_hex = unpack( 'H*', $node->{id} ); |
|
1199
|
|
|
|
|
|
|
next if exists $dht_frontier{$nid_hex}; |
|
1200
|
|
|
|
|
|
|
$dht_frontier{$nid_hex} = { id => $node->{id}, ip => $node->{data}{ip}, port => $node->{data}{port}, visited => 0 }; |
|
1201
|
|
|
|
|
|
|
} |
|
1202
|
|
|
|
|
|
|
|
|
1203
|
|
|
|
|
|
|
# Pick the top N closest unvisited candidates |
|
1204
|
|
|
|
|
|
|
# Note: ^. is bitwise XOR on strings in Modern Perl |
|
1205
|
|
|
|
|
|
|
my @to_query = sort { ( $a->{id} ^.$ih ) cmp( $b->{id} ^.$ih ) } grep { !$_->{visited} && $_->{ip} } values %dht_frontier; |
|
1206
|
|
|
|
|
|
|
if (@to_query) { |
|
1207
|
|
|
|
|
|
|
my $best_dist = unpack( 'H*', $to_query[0]{id} ^.$ih ); |
|
1208
|
|
|
|
|
|
|
$self->_emit( |
|
1209
|
|
|
|
|
|
|
log => sprintf( " [DEBUG] DHT Frontier: %d nodes. Best dist: %s\n", scalar( keys %dht_frontier ), $best_dist ), |
|
1210
|
|
|
|
|
|
|
level => 'debug' |
|
1211
|
|
|
|
|
|
|
) if $debug; |
|
1212
|
|
|
|
|
|
|
my $count = 0; |
|
1213
|
|
|
|
|
|
|
for my $c (@to_query) { |
|
1214
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] DHT Querying: " . unpack( 'H*', $c->{id} ) . " at $c->{ip}:$c->{port}\n", level => 'debug' ) |
|
1215
|
|
|
|
|
|
|
if $debug; |
|
1216
|
|
|
|
|
|
|
$dht->get_peers( $ih, $c->{ip}, $c->{port} ); |
|
1217
|
|
|
|
|
|
|
$c->{visited} = 1; |
|
1218
|
|
|
|
|
|
|
last if ++$count >= 8; |
|
1219
|
|
|
|
|
|
|
} |
|
1220
|
|
|
|
|
|
|
} |
|
1221
|
|
|
|
|
|
|
else { |
|
1222
|
|
|
|
|
|
|
$self->_emit( log => " [DEBUG] DHT Frontier exhausted for " . unpack( 'H*', $ih ) . ". Re-bootstrapping...\n", level => 'debug' ) |
|
1223
|
|
|
|
|
|
|
if $debug; |
|
1224
|
|
|
|
|
|
|
$self->start_dht_lookup(); |
|
1225
|
|
|
|
|
|
|
|
|
1226
|
|
|
|
|
|
|
# Fallback: If we are starving, try adding a public tracker if not already present |
|
1227
|
|
|
|
|
|
|
#~ state $added_fallback = 0; |
|
1228
|
|
|
|
|
|
|
#~ if ( !$added_fallback && keys %peer_objects < 5 ) { |
|
1229
|
|
|
|
|
|
|
#~ warn " [DEBUG] Adding fallback OpenTrackr\n" if $debug; |
|
1230
|
|
|
|
|
|
|
#~ $tracker_manager->add_tracker('udp://tracker.opentrackr.org:1337/announce'); |
|
1231
|
|
|
|
|
|
|
#~ $self->announce('started'); |
|
1232
|
|
|
|
|
|
|
#~ $added_fallback = 1; |
|
1233
|
|
|
|
|
|
|
#~ } |
|
1234
|
|
|
|
|
|
|
} |
|
1235
|
|
|
|
|
|
|
} |
|
1236
|
|
|
|
|
|
|
} |
|
1237
|
|
|
|
|
|
|
|
|
1238
|
|
|
|
|
|
|
method _sort_peers_rfc6724 ($peer_list) { |
|
1239
|
|
|
|
|
|
|
my $has_v6 = $client->dht && $client->dht->want_v6; |
|
1240
|
|
|
|
|
|
|
return [ |
|
1241
|
|
|
|
|
|
|
sort { |
|
1242
|
|
|
|
|
|
|
my $a_v6 = ( $a->{ip} =~ /:/ ? 1 : 0 ); |
|
1243
|
|
|
|
|
|
|
my $b_v6 = ( $b->{ip} =~ /:/ ? 1 : 0 ); |
|
1244
|
|
|
|
|
|
|
my $a_ll = ( $a->{ip} =~ /^fe80:/i ? 1 : 0 ); |
|
1245
|
|
|
|
|
|
|
my $b_ll = ( $b->{ip} =~ /^fe80:/i ? 1 : 0 ); |
|
1246
|
|
|
|
|
|
|
if ($has_v6) { |
|
1247
|
|
|
|
|
|
|
|
|
1248
|
|
|
|
|
|
|
# Prefer Link-Local |
|
1249
|
|
|
|
|
|
|
return -1 if $a_ll && !$b_ll; |
|
1250
|
|
|
|
|
|
|
return 1 if !$a_ll && $b_ll; |
|
1251
|
|
|
|
|
|
|
|
|
1252
|
|
|
|
|
|
|
# Prefer Global IPv6 |
|
1253
|
|
|
|
|
|
|
return -1 if $a_v6 && !$b_v6; |
|
1254
|
|
|
|
|
|
|
return 1 if !$a_v6 && $b_v6; |
|
1255
|
|
|
|
|
|
|
} |
|
1256
|
|
|
|
|
|
|
|
|
1257
|
|
|
|
|
|
|
# Tie-break: Randomize |
|
1258
|
|
|
|
|
|
|
return rand() <=> rand(); |
|
1259
|
|
|
|
|
|
|
} @$peer_list |
|
1260
|
|
|
|
|
|
|
]; |
|
1261
|
|
|
|
|
|
|
} |
|
1262
|
|
|
|
|
|
|
|
|
1263
|
|
|
|
|
|
|
method discovered_peers () { |
|
1264
|
|
|
|
|
|
|
my @list = values %peers; |
|
1265
|
|
|
|
|
|
|
return $self->_sort_peers_rfc6724( \@list ); |
|
1266
|
|
|
|
|
|
|
} |
|
1267
|
|
|
|
|
|
|
method infohash_v1 () {$infohash_v1} |
|
1268
|
|
|
|
|
|
|
method infohash_v2 () {$infohash_v2} |
|
1269
|
|
|
|
|
|
|
method peer_id () {$peer_id} |
|
1270
|
|
|
|
|
|
|
method trackers () { return $tracker_manager->trackers() } |
|
1271
|
|
|
|
|
|
|
|
|
1272
|
|
|
|
|
|
|
method files () { |
|
1273
|
|
|
|
|
|
|
return [] unless $storage; |
|
1274
|
|
|
|
|
|
|
return [ map { $_->path->absolute->stringify } $storage->files_ordered->@* ]; |
|
1275
|
|
|
|
|
|
|
} |
|
1276
|
|
|
|
|
|
|
|
|
1277
|
|
|
|
|
|
|
method dump_state () { |
|
1278
|
|
|
|
|
|
|
return { |
|
1279
|
|
|
|
|
|
|
metadata => $metadata, |
|
1280
|
|
|
|
|
|
|
bitfield => $bitfield->data, |
|
1281
|
|
|
|
|
|
|
storage => $storage->dump_state(), |
|
1282
|
|
|
|
|
|
|
downloaded => $bytes_downloaded, |
|
1283
|
|
|
|
|
|
|
uploaded => $bytes_uploaded, |
|
1284
|
|
|
|
|
|
|
}; |
|
1285
|
|
|
|
|
|
|
} |
|
1286
|
|
|
|
|
|
|
|
|
1287
|
|
|
|
|
|
|
method load_state ($state) { |
|
1288
|
|
|
|
|
|
|
if ( $state->{metadata} ) { |
|
1289
|
|
|
|
|
|
|
$metadata = $state->{metadata}; |
|
1290
|
|
|
|
|
|
|
$self->_init_from_metadata(); |
|
1291
|
|
|
|
|
|
|
} |
|
1292
|
|
|
|
|
|
|
if ( $state->{bitfield} ) { |
|
1293
|
|
|
|
|
|
|
$bitfield->set_data( $state->{bitfield} ); |
|
1294
|
|
|
|
|
|
|
my $piece_len = $metadata->{info}{'piece length'} // 16384; |
|
1295
|
|
|
|
|
|
|
$bytes_left = ( $bitfield->size - $bitfield->count ) * $piece_len; |
|
1296
|
|
|
|
|
|
|
} |
|
1297
|
|
|
|
|
|
|
if ( $state->{storage} && $storage ) { |
|
1298
|
|
|
|
|
|
|
$storage->load_state( $state->{storage} ); |
|
1299
|
|
|
|
|
|
|
} |
|
1300
|
|
|
|
|
|
|
$bytes_downloaded = $state->{downloaded} // 0; |
|
1301
|
|
|
|
|
|
|
$bytes_uploaded = $state->{uploaded} // 0; |
|
1302
|
|
|
|
|
|
|
} |
|
1303
|
|
|
|
|
|
|
|
|
1304
|
|
|
|
|
|
|
method file_tree () { |
|
1305
|
|
|
|
|
|
|
my $info = $metadata->{info}; |
|
1306
|
|
|
|
|
|
|
if ( $info->{'file tree'} ) { return $info->{'file tree'} } |
|
1307
|
|
|
|
|
|
|
my $tree = {}; |
|
1308
|
|
|
|
|
|
|
if ( $info->{files} ) { |
|
1309
|
|
|
|
|
|
|
for my $f ( @{ $info->{files} } ) { |
|
1310
|
|
|
|
|
|
|
my $curr = $tree; |
|
1311
|
|
|
|
|
|
|
my @path = grep { $_ ne '' && $_ ne '.' && $_ ne '..' } @{ $f->{path} }; |
|
1312
|
|
|
|
|
|
|
my $filename = pop @path; |
|
1313
|
|
|
|
|
|
|
for my $dir (@path) { |
|
1314
|
|
|
|
|
|
|
$curr->{$dir} //= {}; |
|
1315
|
|
|
|
|
|
|
$curr = $curr->{$dir}; |
|
1316
|
|
|
|
|
|
|
} |
|
1317
|
|
|
|
|
|
|
next unless defined $filename; |
|
1318
|
|
|
|
|
|
|
$curr->{$filename} = { '' => { length => $f->{length} } }; |
|
1319
|
|
|
|
|
|
|
} |
|
1320
|
|
|
|
|
|
|
} |
|
1321
|
|
|
|
|
|
|
else { |
|
1322
|
|
|
|
|
|
|
my $name = $info->{name}; |
|
1323
|
|
|
|
|
|
|
$name =~ s|[\\/]+|_|g; |
|
1324
|
|
|
|
|
|
|
$tree->{$name} = { '' => { length => $info->{length} // 0 } }; |
|
1325
|
|
|
|
|
|
|
} |
|
1326
|
|
|
|
|
|
|
return $tree; |
|
1327
|
|
|
|
|
|
|
} |
|
1328
|
|
|
|
|
|
|
} 1; |