| line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
|
1
|
|
|
|
|
|
|
#include |
|
2
|
|
|
|
|
|
|
#include |
|
3
|
|
|
|
|
|
|
#include |
|
4
|
|
|
|
|
|
|
|
|
5
|
|
|
|
|
|
|
#include "ptypes.h" |
|
6
|
|
|
|
|
|
|
#include "cache.h" |
|
7
|
|
|
|
|
|
|
#include "sieve.h" |
|
8
|
|
|
|
|
|
|
#include "constants.h" /* _MPU_FILL_EXTRA_N and _MPU_INITIAL_CACHE_SIZE */ |
|
9
|
|
|
|
|
|
|
|
|
10
|
|
|
|
|
|
|
#include "threadlock.h" |
|
11
|
|
|
|
|
|
|
|
|
12
|
|
|
|
|
|
|
/* |
|
13
|
|
|
|
|
|
|
* These functions are used internally by the .c and .xs files. |
|
14
|
|
|
|
|
|
|
* They handle a cached primary set of primes, as well as a segment |
|
15
|
|
|
|
|
|
|
* area for use by all the functions that want to do segmented operation. |
|
16
|
|
|
|
|
|
|
* |
|
17
|
|
|
|
|
|
|
* We must be thread-safe, and we want to allow a good deal of concurrency. |
|
18
|
|
|
|
|
|
|
* It is imperative these be used correctly. After calling the get method, |
|
19
|
|
|
|
|
|
|
* use the sieve or segment, then release. You MUST call release before you |
|
20
|
|
|
|
|
|
|
* return or croak. You ought to release as soon as you're done using the |
|
21
|
|
|
|
|
|
|
* sieve or segment. |
|
22
|
|
|
|
|
|
|
*/ |
|
23
|
|
|
|
|
|
|
|
|
24
|
|
|
|
|
|
|
static int mutex_init = 0; |
|
25
|
|
|
|
|
|
|
MUTEX_DECL(segment); |
|
26
|
|
|
|
|
|
|
READ_WRITE_LOCK_DECL(primary_cache); |
|
27
|
|
|
|
|
|
|
|
|
28
|
|
|
|
|
|
|
static unsigned char* prime_cache_sieve = 0; |
|
29
|
|
|
|
|
|
|
static UV prime_cache_size = 0; |
|
30
|
|
|
|
|
|
|
|
|
31
|
|
|
|
|
|
|
/* Erase the primary cache and fill up to n. */ |
|
32
|
|
|
|
|
|
|
/* Note: You must have a write lock before calling this! */ |
|
33
|
82
|
|
|
|
|
|
static void _erase_and_fill_prime_cache(UV n) { |
|
34
|
|
|
|
|
|
|
UV padded_n; |
|
35
|
|
|
|
|
|
|
|
|
36
|
82
|
50
|
|
|
|
|
if (n >= (UV_MAX-_MPU_FILL_EXTRA_N)) |
|
37
|
0
|
|
|
|
|
|
padded_n = UV_MAX; |
|
38
|
|
|
|
|
|
|
else |
|
39
|
82
|
|
|
|
|
|
padded_n = ((n + _MPU_FILL_EXTRA_N)/30)*30; |
|
40
|
|
|
|
|
|
|
|
|
41
|
|
|
|
|
|
|
/* If new size isn't larger or smaller, then we're done. */ |
|
42
|
82
|
100
|
|
|
|
|
if (prime_cache_size == padded_n) |
|
43
|
1
|
|
|
|
|
|
return; |
|
44
|
|
|
|
|
|
|
|
|
45
|
81
|
100
|
|
|
|
|
if (prime_cache_sieve != 0) |
|
46
|
33
|
|
|
|
|
|
Safefree(prime_cache_sieve); |
|
47
|
81
|
|
|
|
|
|
prime_cache_sieve = 0; |
|
48
|
81
|
|
|
|
|
|
prime_cache_size = 0; |
|
49
|
|
|
|
|
|
|
|
|
50
|
81
|
50
|
|
|
|
|
if (n > 0) { |
|
51
|
81
|
|
|
|
|
|
prime_cache_sieve = sieve_erat30(padded_n); |
|
52
|
81
|
50
|
|
|
|
|
MPUassert(prime_cache_sieve != 0, "sieve returned null"); |
|
53
|
81
|
|
|
|
|
|
prime_cache_size = padded_n; |
|
54
|
|
|
|
|
|
|
} |
|
55
|
|
|
|
|
|
|
} |
|
56
|
|
|
|
|
|
|
|
|
57
|
|
|
|
|
|
|
/* |
|
58
|
|
|
|
|
|
|
* Get the size and a pointer to the cached prime sieve. |
|
59
|
|
|
|
|
|
|
* Returns the maximum sieved value available. |
|
60
|
|
|
|
|
|
|
* Allocates and sieves if needed. |
|
61
|
|
|
|
|
|
|
* |
|
62
|
|
|
|
|
|
|
* The sieve holds 30 numbers per byte, using a mod-30 wheel. |
|
63
|
|
|
|
|
|
|
*/ |
|
64
|
252557
|
|
|
|
|
|
UV get_prime_cache(UV n, const unsigned char** sieve) |
|
65
|
|
|
|
|
|
|
{ |
|
66
|
|
|
|
|
|
|
#ifdef USE_ITHREADS |
|
67
|
|
|
|
|
|
|
if (sieve == 0) { |
|
68
|
|
|
|
|
|
|
if (prime_cache_size < n) { |
|
69
|
|
|
|
|
|
|
WRITE_LOCK_START(primary_cache); |
|
70
|
|
|
|
|
|
|
_erase_and_fill_prime_cache(n); |
|
71
|
|
|
|
|
|
|
WRITE_LOCK_END(primary_cache); |
|
72
|
|
|
|
|
|
|
} |
|
73
|
|
|
|
|
|
|
return prime_cache_size; |
|
74
|
|
|
|
|
|
|
} |
|
75
|
|
|
|
|
|
|
|
|
76
|
|
|
|
|
|
|
/* This could be done more efficiently if we converted a write lock to a |
|
77
|
|
|
|
|
|
|
* reader after doing the expansion. But I think this solution is less |
|
78
|
|
|
|
|
|
|
* error prone (though could lead to starvation in pathological cases). |
|
79
|
|
|
|
|
|
|
*/ |
|
80
|
|
|
|
|
|
|
READ_LOCK_START(primary_cache); |
|
81
|
|
|
|
|
|
|
while (prime_cache_size < n) { |
|
82
|
|
|
|
|
|
|
/* The cache isn't big enough. Expand it. */ |
|
83
|
|
|
|
|
|
|
READ_LOCK_END(primary_cache); |
|
84
|
|
|
|
|
|
|
/* thread reminder: the world can change right here */ |
|
85
|
|
|
|
|
|
|
WRITE_LOCK_START(primary_cache); |
|
86
|
|
|
|
|
|
|
if (prime_cache_size < n) |
|
87
|
|
|
|
|
|
|
_erase_and_fill_prime_cache(n); |
|
88
|
|
|
|
|
|
|
WRITE_LOCK_END(primary_cache); |
|
89
|
|
|
|
|
|
|
/* thread reminder: the world can change right here */ |
|
90
|
|
|
|
|
|
|
READ_LOCK_START(primary_cache); |
|
91
|
|
|
|
|
|
|
} |
|
92
|
|
|
|
|
|
|
MPUassert(prime_cache_size >= n, "prime cache is too small!"); |
|
93
|
|
|
|
|
|
|
|
|
94
|
|
|
|
|
|
|
*sieve = prime_cache_sieve; |
|
95
|
|
|
|
|
|
|
return prime_cache_size; |
|
96
|
|
|
|
|
|
|
#else |
|
97
|
252557
|
100
|
|
|
|
|
if (prime_cache_size < n) |
|
98
|
72
|
|
|
|
|
|
_erase_and_fill_prime_cache(n); |
|
99
|
252557
|
50
|
|
|
|
|
MPUassert(prime_cache_size >= n, "prime cache is too small!"); |
|
100
|
252557
|
100
|
|
|
|
|
if (sieve != 0) |
|
101
|
117944
|
|
|
|
|
|
*sieve = prime_cache_sieve; |
|
102
|
252557
|
|
|
|
|
|
return prime_cache_size; |
|
103
|
|
|
|
|
|
|
#endif |
|
104
|
|
|
|
|
|
|
} |
|
105
|
|
|
|
|
|
|
|
|
106
|
|
|
|
|
|
|
#ifdef USE_ITHREADS |
|
107
|
|
|
|
|
|
|
void release_prime_cache(const unsigned char* mem) { |
|
108
|
|
|
|
|
|
|
(void)mem; /* We don't currently care about the pointer */ |
|
109
|
|
|
|
|
|
|
READ_LOCK_END(primary_cache); |
|
110
|
|
|
|
|
|
|
} |
|
111
|
|
|
|
|
|
|
#endif |
|
112
|
|
|
|
|
|
|
|
|
113
|
|
|
|
|
|
|
|
|
114
|
|
|
|
|
|
|
|
|
115
|
|
|
|
|
|
|
/* The segment everyone is trying to share */ |
|
116
|
|
|
|
|
|
|
#define PRIMARY_SEGMENT_CHUNK_SIZE UVCONST(32*1024-16) |
|
117
|
|
|
|
|
|
|
static unsigned char* prime_segment = 0; |
|
118
|
|
|
|
|
|
|
static int prime_segment_is_available = 1; |
|
119
|
|
|
|
|
|
|
/* If that's in use, malloc a new one of this size */ |
|
120
|
|
|
|
|
|
|
#define SECONDARY_SEGMENT_CHUNK_SIZE UVCONST(32*1024-16) |
|
121
|
|
|
|
|
|
|
|
|
122
|
1866
|
|
|
|
|
|
unsigned char* get_prime_segment(UV *size) { |
|
123
|
|
|
|
|
|
|
unsigned char* mem; |
|
124
|
1866
|
|
|
|
|
|
int use_prime_segment = 0; |
|
125
|
|
|
|
|
|
|
|
|
126
|
1866
|
50
|
|
|
|
|
MPUassert(size != 0, "get_prime_segment given null size pointer"); |
|
127
|
1866
|
50
|
|
|
|
|
MPUassert(mutex_init == 1, "segment mutex has not been initialized"); |
|
128
|
|
|
|
|
|
|
|
|
129
|
|
|
|
|
|
|
MUTEX_LOCK(&segment_mutex); |
|
130
|
1866
|
50
|
|
|
|
|
if (prime_segment_is_available) { |
|
131
|
1866
|
|
|
|
|
|
prime_segment_is_available = 0; |
|
132
|
1866
|
|
|
|
|
|
use_prime_segment = 1; |
|
133
|
|
|
|
|
|
|
} |
|
134
|
|
|
|
|
|
|
MUTEX_UNLOCK(&segment_mutex); |
|
135
|
|
|
|
|
|
|
|
|
136
|
1866
|
50
|
|
|
|
|
if (use_prime_segment) { |
|
137
|
1866
|
100
|
|
|
|
|
if (prime_segment == 0) |
|
138
|
13
|
|
|
|
|
|
New(0, prime_segment, PRIMARY_SEGMENT_CHUNK_SIZE, unsigned char); |
|
139
|
1866
|
|
|
|
|
|
*size = PRIMARY_SEGMENT_CHUNK_SIZE; |
|
140
|
1866
|
|
|
|
|
|
mem = prime_segment; |
|
141
|
|
|
|
|
|
|
} else { |
|
142
|
0
|
|
|
|
|
|
New(0, mem, SECONDARY_SEGMENT_CHUNK_SIZE, unsigned char); |
|
143
|
0
|
|
|
|
|
|
*size = SECONDARY_SEGMENT_CHUNK_SIZE; |
|
144
|
|
|
|
|
|
|
} |
|
145
|
1866
|
50
|
|
|
|
|
MPUassert(mem != 0, "get_prime_segment allocation failure"); |
|
146
|
|
|
|
|
|
|
|
|
147
|
1866
|
|
|
|
|
|
return mem; |
|
148
|
|
|
|
|
|
|
} |
|
149
|
|
|
|
|
|
|
|
|
150
|
1866
|
|
|
|
|
|
void release_prime_segment(unsigned char* mem) { |
|
151
|
|
|
|
|
|
|
MUTEX_LOCK(&segment_mutex); |
|
152
|
1866
|
50
|
|
|
|
|
if (mem == prime_segment) { |
|
153
|
1866
|
|
|
|
|
|
prime_segment_is_available = 1; |
|
154
|
1866
|
|
|
|
|
|
mem = 0; |
|
155
|
|
|
|
|
|
|
} |
|
156
|
|
|
|
|
|
|
MUTEX_UNLOCK(&segment_mutex); |
|
157
|
1866
|
50
|
|
|
|
|
if (mem) |
|
158
|
0
|
|
|
|
|
|
Safefree(mem); |
|
159
|
1866
|
|
|
|
|
|
} |
|
160
|
|
|
|
|
|
|
|
|
161
|
|
|
|
|
|
|
|
|
162
|
|
|
|
|
|
|
|
|
163
|
74
|
|
|
|
|
|
void prime_precalc(UV n) |
|
164
|
|
|
|
|
|
|
{ |
|
165
|
74
|
100
|
|
|
|
|
if (!mutex_init) { |
|
166
|
|
|
|
|
|
|
MUTEX_INIT(&segment_mutex); |
|
167
|
|
|
|
|
|
|
MUTEX_INIT(&primary_cache_mutex); |
|
168
|
|
|
|
|
|
|
COND_INIT(&primary_cache_turn); |
|
169
|
48
|
|
|
|
|
|
mutex_init = 1; |
|
170
|
|
|
|
|
|
|
} |
|
171
|
|
|
|
|
|
|
|
|
172
|
|
|
|
|
|
|
/* On initialization, make a few primes (30k per 1k memory) */ |
|
173
|
74
|
100
|
|
|
|
|
if (n == 0) |
|
174
|
48
|
|
|
|
|
|
n = _MPU_INITIAL_CACHE_SIZE; |
|
175
|
74
|
|
|
|
|
|
get_prime_cache(n, 0); /* Sieve to n */ |
|
176
|
|
|
|
|
|
|
|
|
177
|
|
|
|
|
|
|
/* TODO: should we prealloc the segment here? */ |
|
178
|
74
|
|
|
|
|
|
} |
|
179
|
|
|
|
|
|
|
|
|
180
|
|
|
|
|
|
|
|
|
181
|
10
|
|
|
|
|
|
void prime_memfree(void) |
|
182
|
|
|
|
|
|
|
{ |
|
183
|
10
|
|
|
|
|
|
unsigned char* old_segment = 0; |
|
184
|
|
|
|
|
|
|
|
|
185
|
|
|
|
|
|
|
/* This can happen in global destructor, and PL_dirty has porting issues */ |
|
186
|
|
|
|
|
|
|
/* MPUassert(mutex_init == 1, "cache mutexes have not been initialized"); */ |
|
187
|
10
|
50
|
|
|
|
|
if (mutex_init == 0) return; |
|
188
|
|
|
|
|
|
|
|
|
189
|
|
|
|
|
|
|
MUTEX_LOCK(&segment_mutex); |
|
190
|
|
|
|
|
|
|
/* Don't free if another thread is using it */ |
|
191
|
10
|
100
|
|
|
|
|
if ( (prime_segment != 0) && (prime_segment_is_available) ) {\ |
|
|
|
50
|
|
|
|
|
|
|
192
|
1
|
|
|
|
|
|
unsigned char* new_segment = old_segment; |
|
193
|
1
|
|
|
|
|
|
old_segment = prime_segment; |
|
194
|
1
|
|
|
|
|
|
prime_segment = new_segment; /* Exchanged old_segment / prime_segment */ |
|
195
|
|
|
|
|
|
|
} |
|
196
|
|
|
|
|
|
|
MUTEX_UNLOCK(&segment_mutex); |
|
197
|
10
|
100
|
|
|
|
|
if (old_segment) Safefree(old_segment); |
|
198
|
|
|
|
|
|
|
|
|
199
|
|
|
|
|
|
|
WRITE_LOCK_START(primary_cache); |
|
200
|
|
|
|
|
|
|
/* Put primary cache back to initial state */ |
|
201
|
10
|
|
|
|
|
|
_erase_and_fill_prime_cache(_MPU_INITIAL_CACHE_SIZE); |
|
202
|
|
|
|
|
|
|
WRITE_LOCK_END(primary_cache); |
|
203
|
|
|
|
|
|
|
} |
|
204
|
|
|
|
|
|
|
|
|
205
|
|
|
|
|
|
|
|
|
206
|
48
|
|
|
|
|
|
void _prime_memfreeall(void) |
|
207
|
|
|
|
|
|
|
{ |
|
208
|
|
|
|
|
|
|
/* No locks. We're shutting everything down. */ |
|
209
|
48
|
50
|
|
|
|
|
if (mutex_init) { |
|
210
|
48
|
|
|
|
|
|
mutex_init = 0; |
|
211
|
|
|
|
|
|
|
MUTEX_DESTROY(&segment_mutex); |
|
212
|
|
|
|
|
|
|
MUTEX_DESTROY(&primary_cache_mutex); |
|
213
|
|
|
|
|
|
|
COND_DESTROY(&primary_cache_turn); |
|
214
|
|
|
|
|
|
|
} |
|
215
|
48
|
50
|
|
|
|
|
if (prime_cache_sieve != 0) |
|
216
|
48
|
|
|
|
|
|
Safefree(prime_cache_sieve); |
|
217
|
48
|
|
|
|
|
|
prime_cache_sieve = 0; |
|
218
|
48
|
|
|
|
|
|
prime_cache_size = 0; |
|
219
|
|
|
|
|
|
|
|
|
220
|
48
|
100
|
|
|
|
|
if (prime_segment != 0) |
|
221
|
12
|
|
|
|
|
|
Safefree(prime_segment); |
|
222
|
48
|
|
|
|
|
|
prime_segment = 0; |
|
223
|
48
|
|
|
|
|
|
} |