line |
stmt |
bran |
cond |
sub |
time |
code |
1
|
|
|
|
|
|
/* pp_sort.c |
2
|
|
|
|
|
|
* |
3
|
|
|
|
|
|
* Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, |
4
|
|
|
|
|
|
* 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others |
5
|
|
|
|
|
|
* |
6
|
|
|
|
|
|
* You may distribute under the terms of either the GNU General Public |
7
|
|
|
|
|
|
* License or the Artistic License, as specified in the README file. |
8
|
|
|
|
|
|
* |
9
|
|
|
|
|
|
*/ |
10
|
|
|
|
|
|
|
11
|
|
|
|
|
|
/* |
12
|
|
|
|
|
|
* ...they shuffled back towards the rear of the line. 'No, not at the |
13
|
|
|
|
|
|
* rear!' the slave-driver shouted. 'Three files up. And stay there... |
14
|
|
|
|
|
|
* |
15
|
|
|
|
|
|
* [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"] |
16
|
|
|
|
|
|
*/ |
17
|
|
|
|
|
|
|
18
|
|
|
|
|
|
/* This file contains pp ("push/pop") functions that |
19
|
|
|
|
|
|
* execute the opcodes that make up a perl program. A typical pp function |
20
|
|
|
|
|
|
* expects to find its arguments on the stack, and usually pushes its |
21
|
|
|
|
|
|
* results onto the stack, hence the 'pp' terminology. Each OP structure |
22
|
|
|
|
|
|
* contains a pointer to the relevant pp_foo() function. |
23
|
|
|
|
|
|
* |
24
|
|
|
|
|
|
* This particular file just contains pp_sort(), which is complex |
25
|
|
|
|
|
|
* enough to merit its own file! See the other pp*.c files for the rest of |
26
|
|
|
|
|
|
* the pp_ functions. |
27
|
|
|
|
|
|
*/ |
28
|
|
|
|
|
|
|
29
|
|
|
|
|
|
#include "EXTERN.h" |
30
|
|
|
|
|
|
#define PERL_IN_PP_SORT_C |
31
|
|
|
|
|
|
#include "perl.h" |
32
|
|
|
|
|
|
|
33
|
|
|
|
|
|
#if defined(UNDER_CE) |
34
|
|
|
|
|
|
/* looks like 'small' is reserved word for WINCE (or somesuch)*/ |
35
|
|
|
|
|
|
#define small xsmall |
36
|
|
|
|
|
|
#endif |
37
|
|
|
|
|
|
|
38
|
|
|
|
|
|
#define sv_cmp_static Perl_sv_cmp |
39
|
|
|
|
|
|
#define sv_cmp_locale_static Perl_sv_cmp_locale |
40
|
|
|
|
|
|
|
41
|
|
|
|
|
|
#ifndef SMALLSORT |
42
|
|
|
|
|
|
#define SMALLSORT (200) |
43
|
|
|
|
|
|
#endif |
44
|
|
|
|
|
|
|
45
|
|
|
|
|
|
/* Flags for qsortsv and mergesortsv */ |
46
|
|
|
|
|
|
#define SORTf_DESC 1 |
47
|
|
|
|
|
|
#define SORTf_STABLE 2 |
48
|
|
|
|
|
|
#define SORTf_QSORT 4 |
49
|
|
|
|
|
|
|
50
|
|
|
|
|
|
/* |
51
|
|
|
|
|
|
* The mergesort implementation is by Peter M. Mcilroy . |
52
|
|
|
|
|
|
* |
53
|
|
|
|
|
|
* The original code was written in conjunction with BSD Computer Software |
54
|
|
|
|
|
|
* Research Group at University of California, Berkeley. |
55
|
|
|
|
|
|
* |
56
|
|
|
|
|
|
* See also: "Optimistic Merge Sort" (SODA '92) |
57
|
|
|
|
|
|
* |
58
|
|
|
|
|
|
* The integration to Perl is by John P. Linderman . |
59
|
|
|
|
|
|
* |
60
|
|
|
|
|
|
* The code can be distributed under the same terms as Perl itself. |
61
|
|
|
|
|
|
* |
62
|
|
|
|
|
|
*/ |
63
|
|
|
|
|
|
|
64
|
|
|
|
|
|
|
65
|
|
|
|
|
|
typedef char * aptr; /* pointer for arithmetic on sizes */ |
66
|
|
|
|
|
|
typedef SV * gptr; /* pointers in our lists */ |
67
|
|
|
|
|
|
|
68
|
|
|
|
|
|
/* Binary merge internal sort, with a few special mods |
69
|
|
|
|
|
|
** for the special perl environment it now finds itself in. |
70
|
|
|
|
|
|
** |
71
|
|
|
|
|
|
** Things that were once options have been hotwired |
72
|
|
|
|
|
|
** to values suitable for this use. In particular, we'll always |
73
|
|
|
|
|
|
** initialize looking for natural runs, we'll always produce stable |
74
|
|
|
|
|
|
** output, and we'll always do Peter McIlroy's binary merge. |
75
|
|
|
|
|
|
*/ |
76
|
|
|
|
|
|
|
77
|
|
|
|
|
|
/* Pointer types for arithmetic and storage and convenience casts */ |
78
|
|
|
|
|
|
|
79
|
|
|
|
|
|
#define APTR(P) ((aptr)(P)) |
80
|
|
|
|
|
|
#define GPTP(P) ((gptr *)(P)) |
81
|
|
|
|
|
|
#define GPPP(P) ((gptr **)(P)) |
82
|
|
|
|
|
|
|
83
|
|
|
|
|
|
|
84
|
|
|
|
|
|
/* byte offset from pointer P to (larger) pointer Q */ |
85
|
|
|
|
|
|
#define BYTEOFF(P, Q) (APTR(Q) - APTR(P)) |
86
|
|
|
|
|
|
|
87
|
|
|
|
|
|
#define PSIZE sizeof(gptr) |
88
|
|
|
|
|
|
|
89
|
|
|
|
|
|
/* If PSIZE is power of 2, make PSHIFT that power, if that helps */ |
90
|
|
|
|
|
|
|
91
|
|
|
|
|
|
#ifdef PSHIFT |
92
|
|
|
|
|
|
#define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT)) |
93
|
|
|
|
|
|
#define PNBYTE(N) ((N) << (PSHIFT)) |
94
|
|
|
|
|
|
#define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N))) |
95
|
|
|
|
|
|
#else |
96
|
|
|
|
|
|
/* Leave optimization to compiler */ |
97
|
|
|
|
|
|
#define PNELEM(P, Q) (GPTP(Q) - GPTP(P)) |
98
|
|
|
|
|
|
#define PNBYTE(N) ((N) * (PSIZE)) |
99
|
|
|
|
|
|
#define PINDEX(P, N) (GPTP(P) + (N)) |
100
|
|
|
|
|
|
#endif |
101
|
|
|
|
|
|
|
102
|
|
|
|
|
|
/* Pointer into other corresponding to pointer into this */ |
103
|
|
|
|
|
|
#define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P)) |
104
|
|
|
|
|
|
|
105
|
|
|
|
|
|
#define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src
|
106
|
|
|
|
|
|
|
107
|
|
|
|
|
|
|
108
|
|
|
|
|
|
/* Runs are identified by a pointer in the auxiliary list. |
109
|
|
|
|
|
|
** The pointer is at the start of the list, |
110
|
|
|
|
|
|
** and it points to the start of the next list. |
111
|
|
|
|
|
|
** NEXT is used as an lvalue, too. |
112
|
|
|
|
|
|
*/ |
113
|
|
|
|
|
|
|
114
|
|
|
|
|
|
#define NEXT(P) (*GPPP(P)) |
115
|
|
|
|
|
|
|
116
|
|
|
|
|
|
|
117
|
|
|
|
|
|
/* PTHRESH is the minimum number of pairs with the same sense to justify |
118
|
|
|
|
|
|
** checking for a run and extending it. Note that PTHRESH counts PAIRS, |
119
|
|
|
|
|
|
** not just elements, so PTHRESH == 8 means a run of 16. |
120
|
|
|
|
|
|
*/ |
121
|
|
|
|
|
|
|
122
|
|
|
|
|
|
#define PTHRESH (8) |
123
|
|
|
|
|
|
|
124
|
|
|
|
|
|
/* RTHRESH is the number of elements in a run that must compare low |
125
|
|
|
|
|
|
** to the low element from the opposing run before we justify |
126
|
|
|
|
|
|
** doing a binary rampup instead of single stepping. |
127
|
|
|
|
|
|
** In random input, N in a row low should only happen with |
128
|
|
|
|
|
|
** probability 2^(1-N), so we can risk that we are dealing |
129
|
|
|
|
|
|
** with orderly input without paying much when we aren't. |
130
|
|
|
|
|
|
*/ |
131
|
|
|
|
|
|
|
132
|
|
|
|
|
|
#define RTHRESH (6) |
133
|
|
|
|
|
|
|
134
|
|
|
|
|
|
|
135
|
|
|
|
|
|
/* |
136
|
|
|
|
|
|
** Overview of algorithm and variables. |
137
|
|
|
|
|
|
** The array of elements at list1 will be organized into runs of length 2, |
138
|
|
|
|
|
|
** or runs of length >= 2 * PTHRESH. We only try to form long runs when |
139
|
|
|
|
|
|
** PTHRESH adjacent pairs compare in the same way, suggesting overall order. |
140
|
|
|
|
|
|
** |
141
|
|
|
|
|
|
** Unless otherwise specified, pair pointers address the first of two elements. |
142
|
|
|
|
|
|
** |
143
|
|
|
|
|
|
** b and b+1 are a pair that compare with sense "sense". |
144
|
|
|
|
|
|
** b is the "bottom" of adjacent pairs that might form a longer run. |
145
|
|
|
|
|
|
** |
146
|
|
|
|
|
|
** p2 parallels b in the list2 array, where runs are defined by |
147
|
|
|
|
|
|
** a pointer chain. |
148
|
|
|
|
|
|
** |
149
|
|
|
|
|
|
** t represents the "top" of the adjacent pairs that might extend |
150
|
|
|
|
|
|
** the run beginning at b. Usually, t addresses a pair |
151
|
|
|
|
|
|
** that compares with opposite sense from (b,b+1). |
152
|
|
|
|
|
|
** However, it may also address a singleton element at the end of list1, |
153
|
|
|
|
|
|
** or it may be equal to "last", the first element beyond list1. |
154
|
|
|
|
|
|
** |
155
|
|
|
|
|
|
** r addresses the Nth pair following b. If this would be beyond t, |
156
|
|
|
|
|
|
** we back it off to t. Only when r is less than t do we consider the |
157
|
|
|
|
|
|
** run long enough to consider checking. |
158
|
|
|
|
|
|
** |
159
|
|
|
|
|
|
** q addresses a pair such that the pairs at b through q already form a run. |
160
|
|
|
|
|
|
** Often, q will equal b, indicating we only are sure of the pair itself. |
161
|
|
|
|
|
|
** However, a search on the previous cycle may have revealed a longer run, |
162
|
|
|
|
|
|
** so q may be greater than b. |
163
|
|
|
|
|
|
** |
164
|
|
|
|
|
|
** p is used to work back from a candidate r, trying to reach q, |
165
|
|
|
|
|
|
** which would mean b through r would be a run. If we discover such a run, |
166
|
|
|
|
|
|
** we start q at r and try to push it further towards t. |
167
|
|
|
|
|
|
** If b through r is NOT a run, we detect the wrong order at (p-1,p). |
168
|
|
|
|
|
|
** In any event, after the check (if any), we have two main cases. |
169
|
|
|
|
|
|
** |
170
|
|
|
|
|
|
** 1) Short run. b <= q < p <= r <= t. |
171
|
|
|
|
|
|
** b through q is a run (perhaps trivial) |
172
|
|
|
|
|
|
** q through p are uninteresting pairs |
173
|
|
|
|
|
|
** p through r is a run |
174
|
|
|
|
|
|
** |
175
|
|
|
|
|
|
** 2) Long run. b < r <= q < t. |
176
|
|
|
|
|
|
** b through q is a run (of length >= 2 * PTHRESH) |
177
|
|
|
|
|
|
** |
178
|
|
|
|
|
|
** Note that degenerate cases are not only possible, but likely. |
179
|
|
|
|
|
|
** For example, if the pair following b compares with opposite sense, |
180
|
|
|
|
|
|
** then b == q < p == r == t. |
181
|
|
|
|
|
|
*/ |
182
|
|
|
|
|
|
|
183
|
|
|
|
|
|
|
184
|
|
|
|
|
|
static IV |
185
|
2336494
|
|
|
|
|
dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp) |
186
|
|
|
|
|
|
{ |
187
|
|
|
|
|
|
I32 sense; |
188
|
|
|
|
|
|
gptr *b, *p, *q, *t, *p2; |
189
|
|
|
|
|
|
gptr *last, *r; |
190
|
|
|
|
|
|
IV runs = 0; |
191
|
|
|
|
|
|
|
192
|
|
|
|
|
|
b = list1; |
193
|
2336494
|
|
|
|
|
last = PINDEX(b, nmemb); |
194
|
2336494
|
|
|
|
|
sense = (cmp(aTHX_ *b, *(b+1)) > 0); |
195
|
14058467
|
100
|
|
|
|
for (p2 = list2; b < last; ) { |
196
|
|
|
|
|
|
/* We just started, or just reversed sense. |
197
|
|
|
|
|
|
** Set t at end of pairs with the prevailing sense. |
198
|
|
|
|
|
|
*/ |
199
|
23000868
|
100
|
|
|
|
for (p = b+2, t = p; ++p < last; t = ++p) { |
200
|
16554968
|
100
|
|
|
|
if ((cmp(aTHX_ *t, *p) > 0) != sense) break; |
201
|
|
|
|
|
|
} |
202
|
|
|
|
|
|
q = b; |
203
|
|
|
|
|
|
/* Having laid out the playing field, look for long runs */ |
204
|
|
|
|
|
|
do { |
205
|
10580975
|
|
|
|
|
p = r = b + (2 * PTHRESH); |
206
|
10580975
|
100
|
|
|
|
if (r >= t) p = r = t; /* too short to care about */ |
207
|
|
|
|
|
|
else { |
208
|
60291
|
100
|
|
|
|
while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) && |
|
|
100
|
|
|
|
|
209
|
22824
|
|
|
|
|
((p -= 2) > q)) {} |
210
|
28332
|
100
|
|
|
|
if (p <= q) { |
211
|
|
|
|
|
|
/* b through r is a (long) run. |
212
|
|
|
|
|
|
** Extend it as far as possible. |
213
|
|
|
|
|
|
*/ |
214
|
|
|
|
|
|
p = q = r; |
215
|
136723
|
|
|
|
|
while (((p += 2) < t) && |
216
|
89950
|
|
|
|
|
((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p; |
217
|
2276
|
|
|
|
|
r = p = q + 2; /* no simple pairs, no after-run */ |
218
|
|
|
|
|
|
} |
219
|
|
|
|
|
|
} |
220
|
10580975
|
100
|
|
|
|
if (q > b) { /* run of greater than 2 at b */ |
221
|
|
|
|
|
|
gptr *savep = p; |
222
|
|
|
|
|
|
|
223
|
6204
|
|
|
|
|
p = q += 2; |
224
|
|
|
|
|
|
/* pick up singleton, if possible */ |
225
|
7834
|
100
|
|
|
|
if ((p == t) && |
|
|
100
|
|
|
|
|
226
|
1818
|
100
|
|
|
|
((t + 1) == last) && |
227
|
188
|
|
|
|
|
((cmp(aTHX_ *(p-1), *p) > 0) == sense)) |
228
|
|
|
|
|
|
savep = r = p = q = last; |
229
|
6204
|
|
|
|
|
p2 = NEXT(p2) = p2 + (p - b); ++runs; |
230
|
6204
|
100
|
|
|
|
if (sense) |
231
|
5296289
|
100
|
|
|
|
while (b < --p) { |
232
|
4004
|
|
|
|
|
const gptr c = *b; |
233
|
4004
|
|
|
|
|
*b++ = *p; |
234
|
4004
|
|
|
|
|
*p = c; |
235
|
|
|
|
|
|
} |
236
|
|
|
|
|
|
p = savep; |
237
|
|
|
|
|
|
} |
238
|
29354373
|
100
|
|
|
|
while (q < p) { /* simple pairs */ |
239
|
18773398
|
|
|
|
|
p2 = NEXT(p2) = p2 + 2; ++runs; |
240
|
18773398
|
100
|
|
|
|
if (sense) { |
241
|
9382291
|
|
|
|
|
const gptr c = *q++; |
242
|
9382291
|
|
|
|
|
*(q-1) = *q; |
243
|
9382291
|
|
|
|
|
*q++ = c; |
244
|
14083063
|
|
|
|
|
} else q += 2; |
245
|
|
|
|
|
|
} |
246
|
10580975
|
100
|
|
|
|
if (((b = p) == t) && ((t+1) == last)) { |
|
|
100
|
|
|
|
|
247
|
1520545
|
|
|
|
|
NEXT(p2) = p2 + 1; ++runs; |
248
|
1520545
|
|
|
|
|
b++; |
249
|
|
|
|
|
|
} |
250
|
|
|
|
|
|
q = r; |
251
|
10580975
|
100
|
|
|
|
} while (b < t); |
252
|
10553963
|
|
|
|
|
sense = !sense; |
253
|
|
|
|
|
|
} |
254
|
2336454
|
|
|
|
|
return runs; |
255
|
|
|
|
|
|
} |
256
|
|
|
|
|
|
|
257
|
|
|
|
|
|
|
258
|
|
|
|
|
|
/* The original merge sort, in use since 5.7, was as fast as, or faster than, |
259
|
|
|
|
|
|
* qsort on many platforms, but slower than qsort, conspicuously so, |
260
|
|
|
|
|
|
* on others. The most likely explanation was platform-specific |
261
|
|
|
|
|
|
* differences in cache sizes and relative speeds. |
262
|
|
|
|
|
|
* |
263
|
|
|
|
|
|
* The quicksort divide-and-conquer algorithm guarantees that, as the |
264
|
|
|
|
|
|
* problem is subdivided into smaller and smaller parts, the parts |
265
|
|
|
|
|
|
* fit into smaller (and faster) caches. So it doesn't matter how |
266
|
|
|
|
|
|
* many levels of cache exist, quicksort will "find" them, and, |
267
|
|
|
|
|
|
* as long as smaller is faster, take advantage of them. |
268
|
|
|
|
|
|
* |
269
|
|
|
|
|
|
* By contrast, consider how the original mergesort algorithm worked. |
270
|
|
|
|
|
|
* Suppose we have five runs (each typically of length 2 after dynprep). |
271
|
|
|
|
|
|
* |
272
|
|
|
|
|
|
* pass base aux |
273
|
|
|
|
|
|
* 0 1 2 3 4 5 |
274
|
|
|
|
|
|
* 1 12 34 5 |
275
|
|
|
|
|
|
* 2 1234 5 |
276
|
|
|
|
|
|
* 3 12345 |
277
|
|
|
|
|
|
* 4 12345 |
278
|
|
|
|
|
|
* |
279
|
|
|
|
|
|
* Adjacent pairs are merged in "grand sweeps" through the input. |
280
|
|
|
|
|
|
* This means, on pass 1, the records in runs 1 and 2 aren't revisited until |
281
|
|
|
|
|
|
* runs 3 and 4 are merged and the runs from run 5 have been copied. |
282
|
|
|
|
|
|
* The only cache that matters is one large enough to hold *all* the input. |
283
|
|
|
|
|
|
* On some platforms, this may be many times slower than smaller caches. |
284
|
|
|
|
|
|
* |
285
|
|
|
|
|
|
* The following pseudo-code uses the same basic merge algorithm, |
286
|
|
|
|
|
|
* but in a divide-and-conquer way. |
287
|
|
|
|
|
|
* |
288
|
|
|
|
|
|
* # merge $runs runs at offset $offset of list $list1 into $list2. |
289
|
|
|
|
|
|
* # all unmerged runs ($runs == 1) originate in list $base. |
290
|
|
|
|
|
|
* sub mgsort2 { |
291
|
|
|
|
|
|
* my ($offset, $runs, $base, $list1, $list2) = @_; |
292
|
|
|
|
|
|
* |
293
|
|
|
|
|
|
* if ($runs == 1) { |
294
|
|
|
|
|
|
* if ($list1 is $base) copy run to $list2 |
295
|
|
|
|
|
|
* return offset of end of list (or copy) |
296
|
|
|
|
|
|
* } else { |
297
|
|
|
|
|
|
* $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1) |
298
|
|
|
|
|
|
* mgsort2($off2, $runs/2, $base, $list2, $list1) |
299
|
|
|
|
|
|
* merge the adjacent runs at $offset of $list1 into $list2 |
300
|
|
|
|
|
|
* return the offset of the end of the merged runs |
301
|
|
|
|
|
|
* } |
302
|
|
|
|
|
|
* } |
303
|
|
|
|
|
|
* mgsort2(0, $runs, $base, $aux, $base); |
304
|
|
|
|
|
|
* |
305
|
|
|
|
|
|
* For our 5 runs, the tree of calls looks like |
306
|
|
|
|
|
|
* |
307
|
|
|
|
|
|
* 5 |
308
|
|
|
|
|
|
* 3 2 |
309
|
|
|
|
|
|
* 2 1 1 1 |
310
|
|
|
|
|
|
* 1 1 |
311
|
|
|
|
|
|
* |
312
|
|
|
|
|
|
* 1 2 3 4 5 |
313
|
|
|
|
|
|
* |
314
|
|
|
|
|
|
* and the corresponding activity looks like |
315
|
|
|
|
|
|
* |
316
|
|
|
|
|
|
* copy runs 1 and 2 from base to aux |
317
|
|
|
|
|
|
* merge runs 1 and 2 from aux to base |
318
|
|
|
|
|
|
* (run 3 is where it belongs, no copy needed) |
319
|
|
|
|
|
|
* merge runs 12 and 3 from base to aux |
320
|
|
|
|
|
|
* (runs 4 and 5 are where they belong, no copy needed) |
321
|
|
|
|
|
|
* merge runs 4 and 5 from base to aux |
322
|
|
|
|
|
|
* merge runs 123 and 45 from aux to base |
323
|
|
|
|
|
|
* |
324
|
|
|
|
|
|
* Note that we merge runs 1 and 2 immediately after copying them, |
325
|
|
|
|
|
|
* while they are still likely to be in fast cache. Similarly, |
326
|
|
|
|
|
|
* run 3 is merged with run 12 while it still may be lingering in cache. |
327
|
|
|
|
|
|
* This implementation should therefore enjoy much of the cache-friendly |
328
|
|
|
|
|
|
* behavior that quicksort does. In addition, it does less copying |
329
|
|
|
|
|
|
* than the original mergesort implementation (only runs 1 and 2 are copied) |
330
|
|
|
|
|
|
* and the "balancing" of merges is better (merged runs comprise more nearly |
331
|
|
|
|
|
|
* equal numbers of original runs). |
332
|
|
|
|
|
|
* |
333
|
|
|
|
|
|
* The actual cache-friendly implementation will use a pseudo-stack |
334
|
|
|
|
|
|
* to avoid recursion, and will unroll processing of runs of length 2, |
335
|
|
|
|
|
|
* but it is otherwise similar to the recursive implementation. |
336
|
|
|
|
|
|
*/ |
337
|
|
|
|
|
|
|
338
|
|
|
|
|
|
typedef struct { |
339
|
|
|
|
|
|
IV offset; /* offset of 1st of 2 runs at this level */ |
340
|
|
|
|
|
|
IV runs; /* how many runs must be combined into 1 */ |
341
|
|
|
|
|
|
} off_runs; /* pseudo-stack element */ |
342
|
|
|
|
|
|
|
343
|
|
|
|
|
|
|
344
|
|
|
|
|
|
static I32 |
345
|
228970
|
|
|
|
|
cmp_desc(pTHX_ gptr const a, gptr const b) |
346
|
|
|
|
|
|
{ |
347
|
|
|
|
|
|
dVAR; |
348
|
228970
|
|
|
|
|
return -PL_sort_RealCmp(aTHX_ a, b); |
349
|
|
|
|
|
|
} |
350
|
|
|
|
|
|
|
351
|
|
|
|
|
|
STATIC void |
352
|
2337034
|
|
|
|
|
S_mergesortsv(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags) |
353
|
|
|
|
|
|
{ |
354
|
|
|
|
|
|
dVAR; |
355
|
|
|
|
|
|
IV i, run, offset; |
356
|
|
|
|
|
|
I32 sense, level; |
357
|
|
|
|
|
|
gptr *f1, *f2, *t, *b, *p; |
358
|
|
|
|
|
|
int iwhich; |
359
|
|
|
|
|
|
gptr *aux; |
360
|
|
|
|
|
|
gptr *p1; |
361
|
|
|
|
|
|
gptr small[SMALLSORT]; |
362
|
|
|
|
|
|
gptr *which[3]; |
363
|
|
|
|
|
|
off_runs stack[60], *stackp; |
364
|
|
|
|
|
|
SVCOMPARE_t savecmp = NULL; |
365
|
|
|
|
|
|
|
366
|
2337034
|
100
|
|
|
|
if (nmemb <= 1) return; /* sorted trivially */ |
367
|
|
|
|
|
|
|
368
|
2336494
|
100
|
|
|
|
if ((flags & SORTf_DESC) != 0) { |
369
|
11936
|
|
|
|
|
savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */ |
370
|
11936
|
|
|
|
|
PL_sort_RealCmp = cmp; /* Put comparison routine where cmp_desc can find it */ |
371
|
|
|
|
|
|
cmp = cmp_desc; |
372
|
|
|
|
|
|
} |
373
|
|
|
|
|
|
|
374
|
2336494
|
100
|
|
|
|
if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */ |
375
|
49575
|
50
|
|
|
|
else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */ |
376
|
|
|
|
|
|
level = 0; |
377
|
|
|
|
|
|
stackp = stack; |
378
|
2336494
|
|
|
|
|
stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp); |
379
|
2336454
|
|
|
|
|
stackp->offset = offset = 0; |
380
|
2336454
|
|
|
|
|
which[0] = which[2] = base; |
381
|
8093469
|
|
|
|
|
which[1] = aux; |
382
|
|
|
|
|
|
for (;;) { |
383
|
|
|
|
|
|
/* On levels where both runs have be constructed (stackp->runs == 0), |
384
|
|
|
|
|
|
* merge them, and note the offset of their end, in case the offset |
385
|
|
|
|
|
|
* is needed at the next level up. Hop up a level, and, |
386
|
|
|
|
|
|
* as long as stackp->runs is 0, keep merging. |
387
|
|
|
|
|
|
*/ |
388
|
13848327
|
|
|
|
|
IV runs = stackp->runs; |
389
|
13848327
|
100
|
|
|
|
if (runs == 0) { |
390
|
|
|
|
|
|
gptr *list1, *list2; |
391
|
11511873
|
|
|
|
|
iwhich = level & 1; |
392
|
11511873
|
|
|
|
|
list1 = which[iwhich]; /* area where runs are now */ |
393
|
11511873
|
|
|
|
|
list2 = which[++iwhich]; /* area for merged runs */ |
394
|
|
|
|
|
|
do { |
395
|
|
|
|
|
|
gptr *l1, *l2, *tp2; |
396
|
17963693
|
|
|
|
|
offset = stackp->offset; |
397
|
17963693
|
|
|
|
|
f1 = p1 = list1 + offset; /* start of first run */ |
398
|
17963693
|
|
|
|
|
p = tp2 = list2 + offset; /* where merged run will go */ |
399
|
17963693
|
|
|
|
|
t = NEXT(p); /* where first run ends */ |
400
|
17963693
|
|
|
|
|
f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */ |
401
|
17963693
|
|
|
|
|
t = NEXT(t); /* where second runs ends */ |
402
|
17963693
|
|
|
|
|
l2 = POTHER(t, list2, list1); /* ... on the other side */ |
403
|
17963693
|
|
|
|
|
offset = PNELEM(list2, t); |
404
|
99160416
|
100
|
|
|
|
while (f1 < l1 && f2 < l2) { |
405
|
|
|
|
|
|
/* If head 1 is larger than head 2, find ALL the elements |
406
|
|
|
|
|
|
** in list 2 strictly less than head1, write them all, |
407
|
|
|
|
|
|
** then head 1. Then compare the new heads, and repeat, |
408
|
|
|
|
|
|
** until one or both lists are exhausted. |
409
|
|
|
|
|
|
** |
410
|
|
|
|
|
|
** In all comparisons (after establishing |
411
|
|
|
|
|
|
** which head to merge) the item to merge |
412
|
|
|
|
|
|
** (at pointer q) is the first operand of |
413
|
|
|
|
|
|
** the comparison. When we want to know |
414
|
|
|
|
|
|
** if "q is strictly less than the other", |
415
|
|
|
|
|
|
** we can't just do |
416
|
|
|
|
|
|
** cmp(q, other) < 0 |
417
|
|
|
|
|
|
** because stability demands that we treat equality |
418
|
|
|
|
|
|
** as high when q comes from l2, and as low when |
419
|
|
|
|
|
|
** q was from l1. So we ask the question by doing |
420
|
|
|
|
|
|
** cmp(q, other) <= sense |
421
|
|
|
|
|
|
** and make sense == 0 when equality should look low, |
422
|
|
|
|
|
|
** and -1 when equality should look high. |
423
|
|
|
|
|
|
*/ |
424
|
|
|
|
|
|
|
425
|
|
|
|
|
|
gptr *q; |
426
|
97236608
|
100
|
|
|
|
if (cmp(aTHX_ *f1, *f2) <= 0) { |
427
|
|
|
|
|
|
q = f2; b = f1; t = l1; |
428
|
|
|
|
|
|
sense = -1; |
429
|
|
|
|
|
|
} else { |
430
|
|
|
|
|
|
q = f1; b = f2; t = l2; |
431
|
|
|
|
|
|
sense = 0; |
432
|
|
|
|
|
|
} |
433
|
|
|
|
|
|
|
434
|
|
|
|
|
|
|
435
|
|
|
|
|
|
/* ramp up |
436
|
|
|
|
|
|
** |
437
|
|
|
|
|
|
** Leave t at something strictly |
438
|
|
|
|
|
|
** greater than q (or at the end of the list), |
439
|
|
|
|
|
|
** and b at something strictly less than q. |
440
|
|
|
|
|
|
*/ |
441
|
|
|
|
|
|
for (i = 1, run = 0 ;;) { |
442
|
122252024
|
100
|
|
|
|
if ((p = PINDEX(b, i)) >= t) { |
443
|
|
|
|
|
|
/* off the end */ |
444
|
14930514
|
|
|
|
|
if (((p = PINDEX(t, -1)) > b) && |
445
|
23644
|
|
|
|
|
(cmp(aTHX_ *q, *p) <= sense)) |
446
|
|
|
|
|
|
t = p; |
447
|
|
|
|
|
|
else b = p; |
448
|
|
|
|
|
|
break; |
449
|
107345154
|
100
|
|
|
|
} else if (cmp(aTHX_ *q, *p) <= sense) { |
450
|
|
|
|
|
|
t = p; |
451
|
|
|
|
|
|
break; |
452
|
|
|
|
|
|
} else b = p; |
453
|
50035529
|
100
|
|
|
|
if (++run >= RTHRESH) i += i; |
454
|
|
|
|
|
|
} |
455
|
|
|
|
|
|
|
456
|
|
|
|
|
|
|
457
|
|
|
|
|
|
/* q is known to follow b and must be inserted before t. |
458
|
|
|
|
|
|
** Increment b, so the range of possibilities is [b,t). |
459
|
|
|
|
|
|
** Round binary split down, to favor early appearance. |
460
|
|
|
|
|
|
** Adjust b and t until q belongs just before t. |
461
|
|
|
|
|
|
*/ |
462
|
|
|
|
|
|
|
463
|
72216495
|
|
|
|
|
b++; |
464
|
108799020
|
100
|
|
|
|
while (b < t) { |
465
|
478304
|
|
|
|
|
p = PINDEX(b, (PNELEM(b, t) - 1) / 2); |
466
|
478304
|
100
|
|
|
|
if (cmp(aTHX_ *q, *p) <= sense) { |
467
|
|
|
|
|
|
t = p; |
468
|
313621
|
|
|
|
|
} else b = p + 1; |
469
|
|
|
|
|
|
} |
470
|
|
|
|
|
|
|
471
|
|
|
|
|
|
|
472
|
|
|
|
|
|
/* Copy all the strictly low elements */ |
473
|
|
|
|
|
|
|
474
|
72216495
|
100
|
|
|
|
if (q == f1) { |
475
|
54941059
|
100
|
|
|
|
FROMTOUPTO(f2, tp2, t); |
476
|
53069637
|
|
|
|
|
*tp2++ = *f1++; |
477
|
|
|
|
|
|
} else { |
478
|
67898417
|
100
|
|
|
|
FROMTOUPTO(f1, tp2, t); |
479
|
55259132
|
|
|
|
|
*tp2++ = *f2++; |
480
|
|
|
|
|
|
} |
481
|
|
|
|
|
|
} |
482
|
|
|
|
|
|
|
483
|
|
|
|
|
|
|
484
|
|
|
|
|
|
/* Run out remaining list */ |
485
|
17963693
|
100
|
|
|
|
if (f1 == l1) { |
486
|
15333036
|
100
|
|
|
|
if (f2 < l2) FROMTOUPTO(f2, tp2, l2); |
|
|
100
|
|
|
|
|
487
|
9252981
|
100
|
|
|
|
} else FROMTOUPTO(f1, tp2, l1); |
488
|
17963693
|
|
|
|
|
p1 = NEXT(p1) = POTHER(tp2, list2, list1); |
489
|
|
|
|
|
|
|
490
|
17963693
|
100
|
|
|
|
if (--level == 0) goto done; |
491
|
15843073
|
|
|
|
|
--stackp; |
492
|
|
|
|
|
|
t = list1; list1 = list2; list2 = t; /* swap lists */ |
493
|
15843073
|
100
|
|
|
|
} while ((runs = stackp->runs) == 0); |
494
|
|
|
|
|
|
} |
495
|
|
|
|
|
|
|
496
|
|
|
|
|
|
|
497
|
11727707
|
|
|
|
|
stackp->runs = 0; /* current run will finish level */ |
498
|
|
|
|
|
|
/* While there are more than 2 runs remaining, |
499
|
|
|
|
|
|
* turn them into exactly 2 runs (at the "other" level), |
500
|
|
|
|
|
|
* each made up of approximately half the runs. |
501
|
|
|
|
|
|
* Stack the second half for later processing, |
502
|
|
|
|
|
|
* and set about producing the first half now. |
503
|
|
|
|
|
|
*/ |
504
|
26981735
|
100
|
|
|
|
while (runs > 2) { |
505
|
9391253
|
|
|
|
|
++level; |
506
|
9391253
|
|
|
|
|
++stackp; |
507
|
9391253
|
|
|
|
|
stackp->offset = offset; |
508
|
9391253
|
|
|
|
|
runs -= stackp->runs = runs / 2; |
509
|
|
|
|
|
|
} |
510
|
|
|
|
|
|
/* We must construct a single run from 1 or 2 runs. |
511
|
|
|
|
|
|
* All the original runs are in which[0] == base. |
512
|
|
|
|
|
|
* The run we construct must end up in which[level&1]. |
513
|
|
|
|
|
|
*/ |
514
|
11727707
|
|
|
|
|
iwhich = level & 1; |
515
|
11727707
|
100
|
|
|
|
if (runs == 1) { |
516
|
|
|
|
|
|
/* Constructing a single run from a single run. |
517
|
|
|
|
|
|
* If it's where it belongs already, there's nothing to do. |
518
|
|
|
|
|
|
* Otherwise, copy it to where it belongs. |
519
|
|
|
|
|
|
* A run of 1 is either a singleton at level 0, |
520
|
|
|
|
|
|
* or the second half of a split 3. In neither event |
521
|
|
|
|
|
|
* is it necessary to set offset. It will be set by the merge |
522
|
|
|
|
|
|
* that immediately follows. |
523
|
|
|
|
|
|
*/ |
524
|
3155267
|
100
|
|
|
|
if (iwhich) { /* Belongs in aux, currently in base */ |
525
|
1490047
|
|
|
|
|
f1 = b = PINDEX(base, offset); /* where list starts */ |
526
|
1490047
|
|
|
|
|
f2 = PINDEX(aux, offset); /* where list goes */ |
527
|
1490047
|
|
|
|
|
t = NEXT(f2); /* where list will end */ |
528
|
1490047
|
|
|
|
|
offset = PNELEM(aux, t); /* offset thereof */ |
529
|
1490047
|
|
|
|
|
t = PINDEX(base, offset); /* where it currently ends */ |
530
|
2850288
|
100
|
|
|
|
FROMTOUPTO(f1, f2, t); /* copy */ |
531
|
1490047
|
|
|
|
|
NEXT(b) = t; /* set up parallel pointer */ |
532
|
1665220
|
100
|
|
|
|
} else if (level == 0) goto done; /* single run at level 0 */ |
533
|
|
|
|
|
|
} else { |
534
|
|
|
|
|
|
/* Constructing a single run from two runs. |
535
|
|
|
|
|
|
* The merge code at the top will do that. |
536
|
|
|
|
|
|
* We need only make sure the two runs are in the "other" array, |
537
|
|
|
|
|
|
* so they'll end up in the correct array after the merge. |
538
|
|
|
|
|
|
*/ |
539
|
8572440
|
|
|
|
|
++level; |
540
|
8572440
|
|
|
|
|
++stackp; |
541
|
8572440
|
|
|
|
|
stackp->offset = offset; |
542
|
8572440
|
|
|
|
|
stackp->runs = 0; /* take care of both runs, trigger merge */ |
543
|
8572440
|
100
|
|
|
|
if (!iwhich) { /* Merged runs belong in aux, copy 1st */ |
544
|
3796731
|
|
|
|
|
f1 = b = PINDEX(base, offset); /* where first run starts */ |
545
|
3796731
|
|
|
|
|
f2 = PINDEX(aux, offset); /* where it will be copied */ |
546
|
3796731
|
|
|
|
|
t = NEXT(f2); /* where first run will end */ |
547
|
3796731
|
|
|
|
|
offset = PNELEM(aux, t); /* offset thereof */ |
548
|
3796731
|
|
|
|
|
p = PINDEX(base, offset); /* end of first run */ |
549
|
3796731
|
|
|
|
|
t = NEXT(t); /* where second run will end */ |
550
|
3796731
|
|
|
|
|
t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */ |
551
|
14256070
|
100
|
|
|
|
FROMTOUPTO(f1, f2, t); /* copy both runs */ |
552
|
3796731
|
|
|
|
|
NEXT(b) = p; /* paralleled pointer for 1st */ |
553
|
3796731
|
|
|
|
|
NEXT(p) = t; /* ... and for second */ |
554
|
|
|
|
|
|
} |
555
|
|
|
|
|
|
} |
556
|
|
|
|
|
|
} |
557
|
|
|
|
|
|
done: |
558
|
2336454
|
100
|
|
|
|
if (aux != small) Safefree(aux); /* free iff allocated */ |
559
|
2336454
|
100
|
|
|
|
if (flags) { |
560
|
1174693
|
|
|
|
|
PL_sort_RealCmp = savecmp; /* Restore current comparison routine, if any */ |
561
|
|
|
|
|
|
} |
562
|
|
|
|
|
|
return; |
563
|
|
|
|
|
|
} |
564
|
|
|
|
|
|
|
565
|
|
|
|
|
|
/* |
566
|
|
|
|
|
|
* The quicksort implementation was derived from source code contributed |
567
|
|
|
|
|
|
* by Tom Horsley. |
568
|
|
|
|
|
|
* |
569
|
|
|
|
|
|
* NOTE: this code was derived from Tom Horsley's qsort replacement |
570
|
|
|
|
|
|
* and should not be confused with the original code. |
571
|
|
|
|
|
|
*/ |
572
|
|
|
|
|
|
|
573
|
|
|
|
|
|
/* Copyright (C) Tom Horsley, 1997. All rights reserved. |
574
|
|
|
|
|
|
|
575
|
|
|
|
|
|
Permission granted to distribute under the same terms as perl which are |
576
|
|
|
|
|
|
(briefly): |
577
|
|
|
|
|
|
|
578
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify |
579
|
|
|
|
|
|
it under the terms of either: |
580
|
|
|
|
|
|
|
581
|
|
|
|
|
|
a) the GNU General Public License as published by the Free |
582
|
|
|
|
|
|
Software Foundation; either version 1, or (at your option) any |
583
|
|
|
|
|
|
later version, or |
584
|
|
|
|
|
|
|
585
|
|
|
|
|
|
b) the "Artistic License" which comes with this Kit. |
586
|
|
|
|
|
|
|
587
|
|
|
|
|
|
Details on the perl license can be found in the perl source code which |
588
|
|
|
|
|
|
may be located via the www.perl.com web page. |
589
|
|
|
|
|
|
|
590
|
|
|
|
|
|
This is the most wonderfulest possible qsort I can come up with (and |
591
|
|
|
|
|
|
still be mostly portable) My (limited) tests indicate it consistently |
592
|
|
|
|
|
|
does about 20% fewer calls to compare than does the qsort in the Visual |
593
|
|
|
|
|
|
C++ library, other vendors may vary. |
594
|
|
|
|
|
|
|
595
|
|
|
|
|
|
Some of the ideas in here can be found in "Algorithms" by Sedgewick, |
596
|
|
|
|
|
|
others I invented myself (or more likely re-invented since they seemed |
597
|
|
|
|
|
|
pretty obvious once I watched the algorithm operate for a while). |
598
|
|
|
|
|
|
|
599
|
|
|
|
|
|
Most of this code was written while watching the Marlins sweep the Giants |
600
|
|
|
|
|
|
in the 1997 National League Playoffs - no Braves fans allowed to use this |
601
|
|
|
|
|
|
code (just kidding :-). |
602
|
|
|
|
|
|
|
603
|
|
|
|
|
|
I realize that if I wanted to be true to the perl tradition, the only |
604
|
|
|
|
|
|
comment in this file would be something like: |
605
|
|
|
|
|
|
|
606
|
|
|
|
|
|
...they shuffled back towards the rear of the line. 'No, not at the |
607
|
|
|
|
|
|
rear!' the slave-driver shouted. 'Three files up. And stay there... |
608
|
|
|
|
|
|
|
609
|
|
|
|
|
|
However, I really needed to violate that tradition just so I could keep |
610
|
|
|
|
|
|
track of what happens myself, not to mention some poor fool trying to |
611
|
|
|
|
|
|
understand this years from now :-). |
612
|
|
|
|
|
|
*/ |
613
|
|
|
|
|
|
|
614
|
|
|
|
|
|
/* ********************************************************** Configuration */ |
615
|
|
|
|
|
|
|
616
|
|
|
|
|
|
#ifndef QSORT_ORDER_GUESS |
617
|
|
|
|
|
|
#define QSORT_ORDER_GUESS 2 /* Select doubling version of the netBSD trick */ |
618
|
|
|
|
|
|
#endif |
619
|
|
|
|
|
|
|
620
|
|
|
|
|
|
/* QSORT_MAX_STACK is the largest number of partitions that can be stacked up for |
621
|
|
|
|
|
|
future processing - a good max upper bound is log base 2 of memory size |
622
|
|
|
|
|
|
(32 on 32 bit machines, 64 on 64 bit machines, etc). In reality can |
623
|
|
|
|
|
|
safely be smaller than that since the program is taking up some space and |
624
|
|
|
|
|
|
most operating systems only let you grab some subset of contiguous |
625
|
|
|
|
|
|
memory (not to mention that you are normally sorting data larger than |
626
|
|
|
|
|
|
1 byte element size :-). |
627
|
|
|
|
|
|
*/ |
628
|
|
|
|
|
|
#ifndef QSORT_MAX_STACK |
629
|
|
|
|
|
|
#define QSORT_MAX_STACK 32 |
630
|
|
|
|
|
|
#endif |
631
|
|
|
|
|
|
|
632
|
|
|
|
|
|
/* QSORT_BREAK_EVEN is the size of the largest partition we should insertion sort. |
633
|
|
|
|
|
|
Anything bigger and we use qsort. If you make this too small, the qsort |
634
|
|
|
|
|
|
will probably break (or become less efficient), because it doesn't expect |
635
|
|
|
|
|
|
the middle element of a partition to be the same as the right or left - |
636
|
|
|
|
|
|
you have been warned). |
637
|
|
|
|
|
|
*/ |
638
|
|
|
|
|
|
#ifndef QSORT_BREAK_EVEN |
639
|
|
|
|
|
|
#define QSORT_BREAK_EVEN 6 |
640
|
|
|
|
|
|
#endif |
641
|
|
|
|
|
|
|
642
|
|
|
|
|
|
/* QSORT_PLAY_SAFE is the size of the largest partition we're willing |
643
|
|
|
|
|
|
to go quadratic on. We innoculate larger partitions against |
644
|
|
|
|
|
|
quadratic behavior by shuffling them before sorting. This is not |
645
|
|
|
|
|
|
an absolute guarantee of non-quadratic behavior, but it would take |
646
|
|
|
|
|
|
staggeringly bad luck to pick extreme elements as the pivot |
647
|
|
|
|
|
|
from randomized data. |
648
|
|
|
|
|
|
*/ |
649
|
|
|
|
|
|
#ifndef QSORT_PLAY_SAFE |
650
|
|
|
|
|
|
#define QSORT_PLAY_SAFE 255 |
651
|
|
|
|
|
|
#endif |
652
|
|
|
|
|
|
|
653
|
|
|
|
|
|
/* ************************************************************* Data Types */ |
654
|
|
|
|
|
|
|
655
|
|
|
|
|
|
/* hold left and right index values of a partition waiting to be sorted (the |
656
|
|
|
|
|
|
partition includes both left and right - right is NOT one past the end or |
657
|
|
|
|
|
|
anything like that). |
658
|
|
|
|
|
|
*/ |
659
|
|
|
|
|
|
struct partition_stack_entry { |
660
|
|
|
|
|
|
int left; |
661
|
|
|
|
|
|
int right; |
662
|
|
|
|
|
|
#ifdef QSORT_ORDER_GUESS |
663
|
|
|
|
|
|
int qsort_break_even; |
664
|
|
|
|
|
|
#endif |
665
|
|
|
|
|
|
}; |
666
|
|
|
|
|
|
|
667
|
|
|
|
|
|
/* ******************************************************* Shorthand Macros */ |
668
|
|
|
|
|
|
|
669
|
|
|
|
|
|
/* Note that these macros will be used from inside the qsort function where |
670
|
|
|
|
|
|
we happen to know that the variable 'elt_size' contains the size of an |
671
|
|
|
|
|
|
array element and the variable 'temp' points to enough space to hold a |
672
|
|
|
|
|
|
temp element and the variable 'array' points to the array being sorted |
673
|
|
|
|
|
|
and 'compare' is the pointer to the compare routine. |
674
|
|
|
|
|
|
|
675
|
|
|
|
|
|
Also note that there are very many highly architecture specific ways |
676
|
|
|
|
|
|
these might be sped up, but this is simply the most generally portable |
677
|
|
|
|
|
|
code I could think of. |
678
|
|
|
|
|
|
*/ |
679
|
|
|
|
|
|
|
680
|
|
|
|
|
|
/* Return < 0 == 0 or > 0 as the value of elt1 is < elt2, == elt2, > elt2 |
681
|
|
|
|
|
|
*/ |
682
|
|
|
|
|
|
#define qsort_cmp(elt1, elt2) \ |
683
|
|
|
|
|
|
((*compare)(aTHX_ array[elt1], array[elt2])) |
684
|
|
|
|
|
|
|
685
|
|
|
|
|
|
#ifdef QSORT_ORDER_GUESS |
686
|
|
|
|
|
|
#define QSORT_NOTICE_SWAP swapped++; |
687
|
|
|
|
|
|
#else |
688
|
|
|
|
|
|
#define QSORT_NOTICE_SWAP |
689
|
|
|
|
|
|
#endif |
690
|
|
|
|
|
|
|
691
|
|
|
|
|
|
/* swaps contents of array elements elt1, elt2. |
692
|
|
|
|
|
|
*/ |
693
|
|
|
|
|
|
#define qsort_swap(elt1, elt2) \ |
694
|
|
|
|
|
|
STMT_START { \ |
695
|
|
|
|
|
|
QSORT_NOTICE_SWAP \ |
696
|
|
|
|
|
|
temp = array[elt1]; \ |
697
|
|
|
|
|
|
array[elt1] = array[elt2]; \ |
698
|
|
|
|
|
|
array[elt2] = temp; \ |
699
|
|
|
|
|
|
} STMT_END |
700
|
|
|
|
|
|
|
701
|
|
|
|
|
|
/* rotate contents of elt1, elt2, elt3 such that elt1 gets elt2, elt2 gets |
702
|
|
|
|
|
|
elt3 and elt3 gets elt1. |
703
|
|
|
|
|
|
*/ |
704
|
|
|
|
|
|
#define qsort_rotate(elt1, elt2, elt3) \ |
705
|
|
|
|
|
|
STMT_START { \ |
706
|
|
|
|
|
|
QSORT_NOTICE_SWAP \ |
707
|
|
|
|
|
|
temp = array[elt1]; \ |
708
|
|
|
|
|
|
array[elt1] = array[elt2]; \ |
709
|
|
|
|
|
|
array[elt2] = array[elt3]; \ |
710
|
|
|
|
|
|
array[elt3] = temp; \ |
711
|
|
|
|
|
|
} STMT_END |
712
|
|
|
|
|
|
|
713
|
|
|
|
|
|
/* ************************************************************ Debug stuff */ |
714
|
|
|
|
|
|
|
715
|
|
|
|
|
|
#ifdef QSORT_DEBUG |
716
|
|
|
|
|
|
|
717
|
|
|
|
|
|
static void |
718
|
|
|
|
|
|
break_here() |
719
|
|
|
|
|
|
{ |
720
|
|
|
|
|
|
return; /* good place to set a breakpoint */ |
721
|
|
|
|
|
|
} |
722
|
|
|
|
|
|
|
723
|
|
|
|
|
|
#define qsort_assert(t) (void)( (t) || (break_here(), 0) ) |
724
|
|
|
|
|
|
|
725
|
|
|
|
|
|
static void |
726
|
|
|
|
|
|
doqsort_all_asserts( |
727
|
|
|
|
|
|
void * array, |
728
|
|
|
|
|
|
size_t num_elts, |
729
|
|
|
|
|
|
size_t elt_size, |
730
|
|
|
|
|
|
int (*compare)(const void * elt1, const void * elt2), |
731
|
|
|
|
|
|
int pc_left, int pc_right, int u_left, int u_right) |
732
|
|
|
|
|
|
{ |
733
|
|
|
|
|
|
int i; |
734
|
|
|
|
|
|
|
735
|
|
|
|
|
|
qsort_assert(pc_left <= pc_right); |
736
|
|
|
|
|
|
qsort_assert(u_right < pc_left); |
737
|
|
|
|
|
|
qsort_assert(pc_right < u_left); |
738
|
|
|
|
|
|
for (i = u_right + 1; i < pc_left; ++i) { |
739
|
|
|
|
|
|
qsort_assert(qsort_cmp(i, pc_left) < 0); |
740
|
|
|
|
|
|
} |
741
|
|
|
|
|
|
for (i = pc_left; i < pc_right; ++i) { |
742
|
|
|
|
|
|
qsort_assert(qsort_cmp(i, pc_right) == 0); |
743
|
|
|
|
|
|
} |
744
|
|
|
|
|
|
for (i = pc_right + 1; i < u_left; ++i) { |
745
|
|
|
|
|
|
qsort_assert(qsort_cmp(pc_right, i) < 0); |
746
|
|
|
|
|
|
} |
747
|
|
|
|
|
|
} |
748
|
|
|
|
|
|
|
749
|
|
|
|
|
|
#define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) \ |
750
|
|
|
|
|
|
doqsort_all_asserts(array, num_elts, elt_size, compare, \ |
751
|
|
|
|
|
|
PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) |
752
|
|
|
|
|
|
|
753
|
|
|
|
|
|
#else |
754
|
|
|
|
|
|
|
755
|
|
|
|
|
|
#define qsort_assert(t) ((void)0) |
756
|
|
|
|
|
|
|
757
|
|
|
|
|
|
#define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) ((void)0) |
758
|
|
|
|
|
|
|
759
|
|
|
|
|
|
#endif |
760
|
|
|
|
|
|
|
761
|
|
|
|
|
|
/* ****************************************************************** qsort */ |
762
|
|
|
|
|
|
|
763
|
|
|
|
|
|
STATIC void /* the standard unstable (u) quicksort (qsort) */ |
764
|
96
|
|
|
|
|
S_qsortsvu(pTHX_ SV ** array, size_t num_elts, SVCOMPARE_t compare) |
765
|
|
|
|
|
|
{ |
766
|
|
|
|
|
|
SV * temp; |
767
|
|
|
|
|
|
struct partition_stack_entry partition_stack[QSORT_MAX_STACK]; |
768
|
|
|
|
|
|
int next_stack_entry = 0; |
769
|
|
|
|
|
|
int part_left; |
770
|
|
|
|
|
|
int part_right; |
771
|
|
|
|
|
|
#ifdef QSORT_ORDER_GUESS |
772
|
|
|
|
|
|
int qsort_break_even; |
773
|
|
|
|
|
|
int swapped; |
774
|
|
|
|
|
|
#endif |
775
|
|
|
|
|
|
|
776
|
|
|
|
|
|
PERL_ARGS_ASSERT_QSORTSVU; |
777
|
|
|
|
|
|
|
778
|
|
|
|
|
|
/* Make sure we actually have work to do. |
779
|
|
|
|
|
|
*/ |
780
|
96
|
50
|
|
|
|
if (num_elts <= 1) { |
781
|
96
|
|
|
|
|
return; |
782
|
|
|
|
|
|
} |
783
|
|
|
|
|
|
|
784
|
|
|
|
|
|
/* Inoculate large partitions against quadratic behavior */ |
785
|
96
|
100
|
|
|
|
if (num_elts > QSORT_PLAY_SAFE) { |
786
|
|
|
|
|
|
size_t n; |
787
|
|
|
|
|
|
SV ** const q = array; |
788
|
94408
|
100
|
|
|
|
for (n = num_elts; n > 1; ) { |
789
|
94368
|
|
|
|
|
const size_t j = (size_t)(n-- * Drand01()); |
790
|
94368
|
|
|
|
|
temp = q[j]; |
791
|
94368
|
|
|
|
|
q[j] = q[n]; |
792
|
94368
|
|
|
|
|
q[n] = temp; |
793
|
|
|
|
|
|
} |
794
|
|
|
|
|
|
} |
795
|
|
|
|
|
|
|
796
|
|
|
|
|
|
/* Setup the initial partition definition and fall into the sorting loop |
797
|
|
|
|
|
|
*/ |
798
|
|
|
|
|
|
part_left = 0; |
799
|
15675
|
|
|
|
|
part_right = (int)(num_elts - 1); |
800
|
|
|
|
|
|
#ifdef QSORT_ORDER_GUESS |
801
|
|
|
|
|
|
qsort_break_even = QSORT_BREAK_EVEN; |
802
|
|
|
|
|
|
#else |
803
|
|
|
|
|
|
#define qsort_break_even QSORT_BREAK_EVEN |
804
|
|
|
|
|
|
#endif |
805
|
|
|
|
|
|
for ( ; ; ) { |
806
|
31254
|
100
|
|
|
|
if ((part_right - part_left) >= qsort_break_even) { |
807
|
|
|
|
|
|
/* OK, this is gonna get hairy, so lets try to document all the |
808
|
|
|
|
|
|
concepts and abbreviations and variables and what they keep |
809
|
|
|
|
|
|
track of: |
810
|
|
|
|
|
|
|
811
|
|
|
|
|
|
pc: pivot chunk - the set of array elements we accumulate in the |
812
|
|
|
|
|
|
middle of the partition, all equal in value to the original |
813
|
|
|
|
|
|
pivot element selected. The pc is defined by: |
814
|
|
|
|
|
|
|
815
|
|
|
|
|
|
pc_left - the leftmost array index of the pc |
816
|
|
|
|
|
|
pc_right - the rightmost array index of the pc |
817
|
|
|
|
|
|
|
818
|
|
|
|
|
|
we start with pc_left == pc_right and only one element |
819
|
|
|
|
|
|
in the pivot chunk (but it can grow during the scan). |
820
|
|
|
|
|
|
|
821
|
|
|
|
|
|
u: uncompared elements - the set of elements in the partition |
822
|
|
|
|
|
|
we have not yet compared to the pivot value. There are two |
823
|
|
|
|
|
|
uncompared sets during the scan - one to the left of the pc |
824
|
|
|
|
|
|
and one to the right. |
825
|
|
|
|
|
|
|
826
|
|
|
|
|
|
u_right - the rightmost index of the left side's uncompared set |
827
|
|
|
|
|
|
u_left - the leftmost index of the right side's uncompared set |
828
|
|
|
|
|
|
|
829
|
|
|
|
|
|
The leftmost index of the left sides's uncompared set |
830
|
|
|
|
|
|
doesn't need its own variable because it is always defined |
831
|
|
|
|
|
|
by the leftmost edge of the whole partition (part_left). The |
832
|
|
|
|
|
|
same goes for the rightmost edge of the right partition |
833
|
|
|
|
|
|
(part_right). |
834
|
|
|
|
|
|
|
835
|
|
|
|
|
|
We know there are no uncompared elements on the left once we |
836
|
|
|
|
|
|
get u_right < part_left and no uncompared elements on the |
837
|
|
|
|
|
|
right once u_left > part_right. When both these conditions |
838
|
|
|
|
|
|
are met, we have completed the scan of the partition. |
839
|
|
|
|
|
|
|
840
|
|
|
|
|
|
Any elements which are between the pivot chunk and the |
841
|
|
|
|
|
|
uncompared elements should be less than the pivot value on |
842
|
|
|
|
|
|
the left side and greater than the pivot value on the right |
843
|
|
|
|
|
|
side (in fact, the goal of the whole algorithm is to arrange |
844
|
|
|
|
|
|
for that to be true and make the groups of less-than and |
845
|
|
|
|
|
|
greater-then elements into new partitions to sort again). |
846
|
|
|
|
|
|
|
847
|
|
|
|
|
|
As you marvel at the complexity of the code and wonder why it |
848
|
|
|
|
|
|
has to be so confusing. Consider some of the things this level |
849
|
|
|
|
|
|
of confusion brings: |
850
|
|
|
|
|
|
|
851
|
|
|
|
|
|
Once I do a compare, I squeeze every ounce of juice out of it. I |
852
|
|
|
|
|
|
never do compare calls I don't have to do, and I certainly never |
853
|
|
|
|
|
|
do redundant calls. |
854
|
|
|
|
|
|
|
855
|
|
|
|
|
|
I also never swap any elements unless I can prove there is a |
856
|
|
|
|
|
|
good reason. Many sort algorithms will swap a known value with |
857
|
|
|
|
|
|
an uncompared value just to get things in the right place (or |
858
|
|
|
|
|
|
avoid complexity :-), but that uncompared value, once it gets |
859
|
|
|
|
|
|
compared, may then have to be swapped again. A lot of the |
860
|
|
|
|
|
|
complexity of this code is due to the fact that it never swaps |
861
|
|
|
|
|
|
anything except compared values, and it only swaps them when the |
862
|
|
|
|
|
|
compare shows they are out of position. |
863
|
|
|
|
|
|
*/ |
864
|
|
|
|
|
|
int pc_left, pc_right; |
865
|
|
|
|
|
|
int u_right, u_left; |
866
|
|
|
|
|
|
|
867
|
|
|
|
|
|
int s; |
868
|
|
|
|
|
|
|
869
|
15816
|
|
|
|
|
pc_left = ((part_left + part_right) / 2); |
870
|
|
|
|
|
|
pc_right = pc_left; |
871
|
15816
|
|
|
|
|
u_right = pc_left - 1; |
872
|
15816
|
|
|
|
|
u_left = pc_right + 1; |
873
|
|
|
|
|
|
|
874
|
|
|
|
|
|
/* Qsort works best when the pivot value is also the median value |
875
|
|
|
|
|
|
in the partition (unfortunately you can't find the median value |
876
|
|
|
|
|
|
without first sorting :-), so to give the algorithm a helping |
877
|
|
|
|
|
|
hand, we pick 3 elements and sort them and use the median value |
878
|
|
|
|
|
|
of that tiny set as the pivot value. |
879
|
|
|
|
|
|
|
880
|
|
|
|
|
|
Some versions of qsort like to use the left middle and right as |
881
|
|
|
|
|
|
the 3 elements to sort so they can insure the ends of the |
882
|
|
|
|
|
|
partition will contain values which will stop the scan in the |
883
|
|
|
|
|
|
compare loop, but when you have to call an arbitrarily complex |
884
|
|
|
|
|
|
routine to do a compare, its really better to just keep track of |
885
|
|
|
|
|
|
array index values to know when you hit the edge of the |
886
|
|
|
|
|
|
partition and avoid the extra compare. An even better reason to |
887
|
|
|
|
|
|
avoid using a compare call is the fact that you can drop off the |
888
|
|
|
|
|
|
edge of the array if someone foolishly provides you with an |
889
|
|
|
|
|
|
unstable compare function that doesn't always provide consistent |
890
|
|
|
|
|
|
results. |
891
|
|
|
|
|
|
|
892
|
|
|
|
|
|
So, since it is simpler for us to compare the three adjacent |
893
|
|
|
|
|
|
elements in the middle of the partition, those are the ones we |
894
|
|
|
|
|
|
pick here (conveniently pointed at by u_right, pc_left, and |
895
|
|
|
|
|
|
u_left). The values of the left, center, and right elements |
896
|
|
|
|
|
|
are refered to as l c and r in the following comments. |
897
|
|
|
|
|
|
*/ |
898
|
|
|
|
|
|
|
899
|
|
|
|
|
|
#ifdef QSORT_ORDER_GUESS |
900
|
|
|
|
|
|
swapped = 0; |
901
|
|
|
|
|
|
#endif |
902
|
15816
|
|
|
|
|
s = qsort_cmp(u_right, pc_left); |
903
|
15816
|
100
|
|
|
|
if (s < 0) { |
904
|
|
|
|
|
|
/* l < c */ |
905
|
7880
|
|
|
|
|
s = qsort_cmp(pc_left, u_left); |
906
|
|
|
|
|
|
/* if l < c, c < r - already in order - nothing to do */ |
907
|
7880
|
100
|
|
|
|
if (s == 0) { |
908
|
|
|
|
|
|
/* l < c, c == r - already in order, pc grows */ |
909
|
16
|
|
|
|
|
++pc_right; |
910
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
911
|
7864
|
100
|
|
|
|
} else if (s > 0) { |
912
|
|
|
|
|
|
/* l < c, c > r - need to know more */ |
913
|
5220
|
|
|
|
|
s = qsort_cmp(u_right, u_left); |
914
|
5220
|
100
|
|
|
|
if (s < 0) { |
915
|
|
|
|
|
|
/* l < c, c > r, l < r - swap c & r to get ordered */ |
916
|
2646
|
|
|
|
|
qsort_swap(pc_left, u_left); |
917
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
918
|
2574
|
100
|
|
|
|
} else if (s == 0) { |
919
|
|
|
|
|
|
/* l < c, c > r, l == r - swap c&r, grow pc */ |
920
|
22
|
|
|
|
|
qsort_swap(pc_left, u_left); |
921
|
22
|
|
|
|
|
--pc_left; |
922
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
923
|
|
|
|
|
|
} else { |
924
|
|
|
|
|
|
/* l < c, c > r, l > r - make lcr into rlc to get ordered */ |
925
|
2552
|
|
|
|
|
qsort_rotate(pc_left, u_right, u_left); |
926
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
927
|
|
|
|
|
|
} |
928
|
|
|
|
|
|
} |
929
|
7936
|
100
|
|
|
|
} else if (s == 0) { |
930
|
|
|
|
|
|
/* l == c */ |
931
|
268
|
|
|
|
|
s = qsort_cmp(pc_left, u_left); |
932
|
268
|
100
|
|
|
|
if (s < 0) { |
933
|
|
|
|
|
|
/* l == c, c < r - already in order, grow pc */ |
934
|
26
|
|
|
|
|
--pc_left; |
935
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
936
|
242
|
100
|
|
|
|
} else if (s == 0) { |
937
|
|
|
|
|
|
/* l == c, c == r - already in order, grow pc both ways */ |
938
|
214
|
|
|
|
|
--pc_left; |
939
|
214
|
|
|
|
|
++pc_right; |
940
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
941
|
|
|
|
|
|
} else { |
942
|
|
|
|
|
|
/* l == c, c > r - swap l & r, grow pc */ |
943
|
28
|
|
|
|
|
qsort_swap(u_right, u_left); |
944
|
28
|
|
|
|
|
++pc_right; |
945
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
946
|
|
|
|
|
|
} |
947
|
|
|
|
|
|
} else { |
948
|
|
|
|
|
|
/* l > c */ |
949
|
7668
|
|
|
|
|
s = qsort_cmp(pc_left, u_left); |
950
|
7668
|
100
|
|
|
|
if (s < 0) { |
951
|
|
|
|
|
|
/* l > c, c < r - need to know more */ |
952
|
5148
|
|
|
|
|
s = qsort_cmp(u_right, u_left); |
953
|
5148
|
100
|
|
|
|
if (s < 0) { |
954
|
|
|
|
|
|
/* l > c, c < r, l < r - swap l & c to get ordered */ |
955
|
2608
|
|
|
|
|
qsort_swap(u_right, pc_left); |
956
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
957
|
2540
|
100
|
|
|
|
} else if (s == 0) { |
958
|
|
|
|
|
|
/* l > c, c < r, l == r - swap l & c, grow pc */ |
959
|
16
|
|
|
|
|
qsort_swap(u_right, pc_left); |
960
|
16
|
|
|
|
|
++pc_right; |
961
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
962
|
|
|
|
|
|
} else { |
963
|
|
|
|
|
|
/* l > c, c < r, l > r - rotate lcr into crl to order */ |
964
|
2524
|
|
|
|
|
qsort_rotate(u_right, pc_left, u_left); |
965
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
966
|
|
|
|
|
|
} |
967
|
2520
|
100
|
|
|
|
} else if (s == 0) { |
968
|
|
|
|
|
|
/* l > c, c == r - swap ends, grow pc */ |
969
|
14
|
|
|
|
|
qsort_swap(u_right, u_left); |
970
|
14
|
|
|
|
|
--pc_left; |
971
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
972
|
|
|
|
|
|
} else { |
973
|
|
|
|
|
|
/* l > c, c > r - swap ends to get in order */ |
974
|
2506
|
|
|
|
|
qsort_swap(u_right, u_left); |
975
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1); |
976
|
|
|
|
|
|
} |
977
|
|
|
|
|
|
} |
978
|
|
|
|
|
|
/* We now know the 3 middle elements have been compared and |
979
|
|
|
|
|
|
arranged in the desired order, so we can shrink the uncompared |
980
|
|
|
|
|
|
sets on both sides |
981
|
|
|
|
|
|
*/ |
982
|
15816
|
|
|
|
|
--u_right; |
983
|
15816
|
|
|
|
|
++u_left; |
984
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left, u_right); |
985
|
|
|
|
|
|
|
986
|
|
|
|
|
|
/* The above massive nested if was the simple part :-). We now have |
987
|
|
|
|
|
|
the middle 3 elements ordered and we need to scan through the |
988
|
|
|
|
|
|
uncompared sets on either side, swapping elements that are on |
989
|
|
|
|
|
|
the wrong side or simply shuffling equal elements around to get |
990
|
|
|
|
|
|
all equal elements into the pivot chunk. |
991
|
|
|
|
|
|
*/ |
992
|
|
|
|
|
|
|
993
|
|
|
|
|
|
for ( ; ; ) { |
994
|
|
|
|
|
|
int still_work_on_left; |
995
|
|
|
|
|
|
int still_work_on_right; |
996
|
|
|
|
|
|
|
997
|
|
|
|
|
|
/* Scan the uncompared values on the left. If I find a value |
998
|
|
|
|
|
|
equal to the pivot value, move it over so it is adjacent to |
999
|
|
|
|
|
|
the pivot chunk and expand the pivot chunk. If I find a value |
1000
|
|
|
|
|
|
less than the pivot value, then just leave it - its already |
1001
|
|
|
|
|
|
on the correct side of the partition. If I find a greater |
1002
|
|
|
|
|
|
value, then stop the scan. |
1003
|
|
|
|
|
|
*/ |
1004
|
546156
|
100
|
|
|
|
while ((still_work_on_left = (u_right >= part_left))) { |
1005
|
439368
|
|
|
|
|
s = qsort_cmp(u_right, pc_left); |
1006
|
439368
|
100
|
|
|
|
if (s < 0) { |
1007
|
213434
|
|
|
|
|
--u_right; |
1008
|
279328
|
100
|
|
|
|
} else if (s == 0) { |
1009
|
11492
|
|
|
|
|
--pc_left; |
1010
|
11492
|
100
|
|
|
|
if (pc_left != u_right) { |
1011
|
5800
|
|
|
|
|
qsort_swap(u_right, pc_left); |
1012
|
|
|
|
|
|
} |
1013
|
270916
|
|
|
|
|
--u_right; |
1014
|
|
|
|
|
|
} else { |
1015
|
|
|
|
|
|
break; |
1016
|
|
|
|
|
|
} |
1017
|
|
|
|
|
|
qsort_assert(u_right < pc_left); |
1018
|
|
|
|
|
|
qsort_assert(pc_left <= pc_right); |
1019
|
|
|
|
|
|
qsort_assert(qsort_cmp(u_right + 1, pc_left) <= 0); |
1020
|
|
|
|
|
|
qsort_assert(qsort_cmp(pc_left, pc_right) == 0); |
1021
|
|
|
|
|
|
} |
1022
|
|
|
|
|
|
|
1023
|
|
|
|
|
|
/* Do a mirror image scan of uncompared values on the right |
1024
|
|
|
|
|
|
*/ |
1025
|
550238
|
100
|
|
|
|
while ((still_work_on_right = (u_left <= part_right))) { |
1026
|
446750
|
|
|
|
|
s = qsort_cmp(pc_right, u_left); |
1027
|
446750
|
100
|
|
|
|
if (s < 0) { |
1028
|
217466
|
|
|
|
|
++u_left; |
1029
|
229284
|
100
|
|
|
|
} else if (s == 0) { |
1030
|
11542
|
|
|
|
|
++pc_right; |
1031
|
11542
|
100
|
|
|
|
if (pc_right != u_left) { |
1032
|
5656
|
|
|
|
|
qsort_swap(pc_right, u_left); |
1033
|
|
|
|
|
|
} |
1034
|
120275
|
|
|
|
|
++u_left; |
1035
|
|
|
|
|
|
} else { |
1036
|
|
|
|
|
|
break; |
1037
|
|
|
|
|
|
} |
1038
|
|
|
|
|
|
qsort_assert(u_left > pc_right); |
1039
|
|
|
|
|
|
qsort_assert(pc_left <= pc_right); |
1040
|
|
|
|
|
|
qsort_assert(qsort_cmp(pc_right, u_left - 1) <= 0); |
1041
|
|
|
|
|
|
qsort_assert(qsort_cmp(pc_left, pc_right) == 0); |
1042
|
|
|
|
|
|
} |
1043
|
|
|
|
|
|
|
1044
|
321230
|
100
|
|
|
|
if (still_work_on_left) { |
1045
|
|
|
|
|
|
/* I know I have a value on the left side which needs to be |
1046
|
|
|
|
|
|
on the right side, but I need to know more to decide |
1047
|
|
|
|
|
|
exactly the best thing to do with it. |
1048
|
|
|
|
|
|
*/ |
1049
|
214442
|
100
|
|
|
|
if (still_work_on_right) { |
1050
|
|
|
|
|
|
/* I know I have values on both side which are out of |
1051
|
|
|
|
|
|
position. This is a big win because I kill two birds |
1052
|
|
|
|
|
|
with one swap (so to speak). I can advance the |
1053
|
|
|
|
|
|
uncompared pointers on both sides after swapping both |
1054
|
|
|
|
|
|
of them into the right place. |
1055
|
|
|
|
|
|
*/ |
1056
|
126770
|
|
|
|
|
qsort_swap(u_right, u_left); |
1057
|
126770
|
|
|
|
|
--u_right; |
1058
|
126770
|
|
|
|
|
++u_left; |
1059
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right, u_left, u_right); |
1060
|
|
|
|
|
|
} else { |
1061
|
|
|
|
|
|
/* I have an out of position value on the left, but the |
1062
|
|
|
|
|
|
right is fully scanned, so I "slide" the pivot chunk |
1063
|
|
|
|
|
|
and any less-than values left one to make room for the |
1064
|
|
|
|
|
|
greater value over on the right. If the out of position |
1065
|
|
|
|
|
|
value is immediately adjacent to the pivot chunk (there |
1066
|
|
|
|
|
|
are no less-than values), I can do that with a swap, |
1067
|
|
|
|
|
|
otherwise, I have to rotate one of the less than values |
1068
|
|
|
|
|
|
into the former position of the out of position value |
1069
|
|
|
|
|
|
and the right end of the pivot chunk into the left end |
1070
|
|
|
|
|
|
(got all that?). |
1071
|
|
|
|
|
|
*/ |
1072
|
87672
|
|
|
|
|
--pc_left; |
1073
|
87672
|
100
|
|
|
|
if (pc_left == u_right) { |
1074
|
2238
|
|
|
|
|
qsort_swap(u_right, pc_right); |
1075
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right-1, u_left, u_right-1); |
1076
|
|
|
|
|
|
} else { |
1077
|
85434
|
|
|
|
|
qsort_rotate(u_right, pc_left, pc_right); |
1078
|
|
|
|
|
|
qsort_all_asserts(pc_left, pc_right-1, u_left, u_right-1); |
1079
|
|
|
|
|
|
} |
1080
|
87672
|
|
|
|
|
--pc_right; |
1081
|
87672
|
|
|
|
|
--u_right; |
1082
|
|
|
|
|
|
} |
1083
|
106788
|
100
|
|
|
|
} else if (still_work_on_right) { |
1084
|
|
|
|
|
|
/* Mirror image of complex case above: I have an out of |
1085
|
|
|
|
|
|
position value on the right, but the left is fully |
1086
|
|
|
|
|
|
scanned, so I need to shuffle things around to make room |
1087
|
|
|
|
|
|
for the right value on the left. |
1088
|
|
|
|
|
|
*/ |
1089
|
90972
|
|
|
|
|
++pc_right; |
1090
|
90972
|
100
|
|
|
|
if (pc_right == u_left) { |
1091
|
2620
|
|
|
|
|
qsort_swap(u_left, pc_left); |
1092
|
|
|
|
|
|
qsort_all_asserts(pc_left+1, pc_right, u_left+1, u_right); |
1093
|
|
|
|
|
|
} else { |
1094
|
88352
|
|
|
|
|
qsort_rotate(pc_right, pc_left, u_left); |
1095
|
|
|
|
|
|
qsort_all_asserts(pc_left+1, pc_right, u_left+1, u_right); |
1096
|
|
|
|
|
|
} |
1097
|
90972
|
|
|
|
|
++pc_left; |
1098
|
90972
|
|
|
|
|
++u_left; |
1099
|
|
|
|
|
|
} else { |
1100
|
|
|
|
|
|
/* No more scanning required on either side of partition, |
1101
|
|
|
|
|
|
break out of loop and figure out next set of partitions |
1102
|
|
|
|
|
|
*/ |
1103
|
|
|
|
|
|
break; |
1104
|
|
|
|
|
|
} |
1105
|
|
|
|
|
|
} |
1106
|
|
|
|
|
|
|
1107
|
|
|
|
|
|
/* The elements in the pivot chunk are now in the right place. They |
1108
|
|
|
|
|
|
will never move or be compared again. All I have to do is decide |
1109
|
|
|
|
|
|
what to do with the stuff to the left and right of the pivot |
1110
|
|
|
|
|
|
chunk. |
1111
|
|
|
|
|
|
|
1112
|
|
|
|
|
|
Notes on the QSORT_ORDER_GUESS ifdef code: |
1113
|
|
|
|
|
|
|
1114
|
|
|
|
|
|
1. If I just built these partitions without swapping any (or |
1115
|
|
|
|
|
|
very many) elements, there is a chance that the elements are |
1116
|
|
|
|
|
|
already ordered properly (being properly ordered will |
1117
|
|
|
|
|
|
certainly result in no swapping, but the converse can't be |
1118
|
|
|
|
|
|
proved :-). |
1119
|
|
|
|
|
|
|
1120
|
|
|
|
|
|
2. A (properly written) insertion sort will run faster on |
1121
|
|
|
|
|
|
already ordered data than qsort will. |
1122
|
|
|
|
|
|
|
1123
|
|
|
|
|
|
3. Perhaps there is some way to make a good guess about |
1124
|
|
|
|
|
|
switching to an insertion sort earlier than partition size 6 |
1125
|
|
|
|
|
|
(for instance - we could save the partition size on the stack |
1126
|
|
|
|
|
|
and increase the size each time we find we didn't swap, thus |
1127
|
|
|
|
|
|
switching to insertion sort earlier for partitions with a |
1128
|
|
|
|
|
|
history of not swapping). |
1129
|
|
|
|
|
|
|
1130
|
|
|
|
|
|
4. Naturally, if I just switch right away, it will make |
1131
|
|
|
|
|
|
artificial benchmarks with pure ascending (or descending) |
1132
|
|
|
|
|
|
data look really good, but is that a good reason in general? |
1133
|
|
|
|
|
|
Hard to say... |
1134
|
|
|
|
|
|
*/ |
1135
|
|
|
|
|
|
|
1136
|
|
|
|
|
|
#ifdef QSORT_ORDER_GUESS |
1137
|
15816
|
100
|
|
|
|
if (swapped < 3) { |
1138
|
|
|
|
|
|
#if QSORT_ORDER_GUESS == 1 |
1139
|
|
|
|
|
|
qsort_break_even = (part_right - part_left) + 1; |
1140
|
|
|
|
|
|
#endif |
1141
|
|
|
|
|
|
#if QSORT_ORDER_GUESS == 2 |
1142
|
2310
|
|
|
|
|
qsort_break_even *= 2; |
1143
|
|
|
|
|
|
#endif |
1144
|
|
|
|
|
|
#if QSORT_ORDER_GUESS == 3 |
1145
|
|
|
|
|
|
const int prev_break = qsort_break_even; |
1146
|
|
|
|
|
|
qsort_break_even *= qsort_break_even; |
1147
|
|
|
|
|
|
if (qsort_break_even < prev_break) { |
1148
|
|
|
|
|
|
qsort_break_even = (part_right - part_left) + 1; |
1149
|
|
|
|
|
|
} |
1150
|
|
|
|
|
|
#endif |
1151
|
|
|
|
|
|
} else { |
1152
|
|
|
|
|
|
qsort_break_even = QSORT_BREAK_EVEN; |
1153
|
|
|
|
|
|
} |
1154
|
|
|
|
|
|
#endif |
1155
|
|
|
|
|
|
|
1156
|
15816
|
100
|
|
|
|
if (part_left < pc_left) { |
1157
|
|
|
|
|
|
/* There are elements on the left which need more processing. |
1158
|
|
|
|
|
|
Check the right as well before deciding what to do. |
1159
|
|
|
|
|
|
*/ |
1160
|
15576
|
100
|
|
|
|
if (pc_right < part_right) { |
1161
|
|
|
|
|
|
/* We have two partitions to be sorted. Stack the biggest one |
1162
|
|
|
|
|
|
and process the smallest one on the next iteration. This |
1163
|
|
|
|
|
|
minimizes the stack height by insuring that any additional |
1164
|
|
|
|
|
|
stack entries must come from the smallest partition which |
1165
|
|
|
|
|
|
(because it is smallest) will have the fewest |
1166
|
|
|
|
|
|
opportunities to generate additional stack entries. |
1167
|
|
|
|
|
|
*/ |
1168
|
15520
|
100
|
|
|
|
if ((part_right - pc_right) > (pc_left - part_left)) { |
1169
|
|
|
|
|
|
/* stack the right partition, process the left */ |
1170
|
7320
|
|
|
|
|
partition_stack[next_stack_entry].left = pc_right + 1; |
1171
|
7320
|
|
|
|
|
partition_stack[next_stack_entry].right = part_right; |
1172
|
|
|
|
|
|
#ifdef QSORT_ORDER_GUESS |
1173
|
7320
|
|
|
|
|
partition_stack[next_stack_entry].qsort_break_even = qsort_break_even; |
1174
|
|
|
|
|
|
#endif |
1175
|
7320
|
|
|
|
|
part_right = pc_left - 1; |
1176
|
|
|
|
|
|
} else { |
1177
|
|
|
|
|
|
/* stack the left partition, process the right */ |
1178
|
8200
|
|
|
|
|
partition_stack[next_stack_entry].left = part_left; |
1179
|
8200
|
|
|
|
|
partition_stack[next_stack_entry].right = pc_left - 1; |
1180
|
|
|
|
|
|
#ifdef QSORT_ORDER_GUESS |
1181
|
8200
|
|
|
|
|
partition_stack[next_stack_entry].qsort_break_even = qsort_break_even; |
1182
|
|
|
|
|
|
#endif |
1183
|
8200
|
|
|
|
|
part_left = pc_right + 1; |
1184
|
|
|
|
|
|
} |
1185
|
|
|
|
|
|
qsort_assert(next_stack_entry < QSORT_MAX_STACK); |
1186
|
15520
|
|
|
|
|
++next_stack_entry; |
1187
|
|
|
|
|
|
} else { |
1188
|
|
|
|
|
|
/* The elements on the left are the only remaining elements |
1189
|
|
|
|
|
|
that need sorting, arrange for them to be processed as the |
1190
|
|
|
|
|
|
next partition. |
1191
|
|
|
|
|
|
*/ |
1192
|
56
|
|
|
|
|
part_right = pc_left - 1; |
1193
|
|
|
|
|
|
} |
1194
|
240
|
100
|
|
|
|
} else if (pc_right < part_right) { |
1195
|
|
|
|
|
|
/* There is only one chunk on the right to be sorted, make it |
1196
|
|
|
|
|
|
the new partition and loop back around. |
1197
|
|
|
|
|
|
*/ |
1198
|
62
|
|
|
|
|
part_left = pc_right + 1; |
1199
|
|
|
|
|
|
} else { |
1200
|
|
|
|
|
|
/* This whole partition wound up in the pivot chunk, so |
1201
|
|
|
|
|
|
we need to get a new partition off the stack. |
1202
|
|
|
|
|
|
*/ |
1203
|
178
|
100
|
|
|
|
if (next_stack_entry == 0) { |
1204
|
|
|
|
|
|
/* the stack is empty - we are done */ |
1205
|
|
|
|
|
|
break; |
1206
|
|
|
|
|
|
} |
1207
|
164
|
|
|
|
|
--next_stack_entry; |
1208
|
164
|
|
|
|
|
part_left = partition_stack[next_stack_entry].left; |
1209
|
164
|
|
|
|
|
part_right = partition_stack[next_stack_entry].right; |
1210
|
|
|
|
|
|
#ifdef QSORT_ORDER_GUESS |
1211
|
164
|
|
|
|
|
qsort_break_even = partition_stack[next_stack_entry].qsort_break_even; |
1212
|
|
|
|
|
|
#endif |
1213
|
|
|
|
|
|
} |
1214
|
|
|
|
|
|
} else { |
1215
|
|
|
|
|
|
/* This partition is too small to fool with qsort complexity, just |
1216
|
|
|
|
|
|
do an ordinary insertion sort to minimize overhead. |
1217
|
|
|
|
|
|
*/ |
1218
|
|
|
|
|
|
int i; |
1219
|
|
|
|
|
|
/* Assume 1st element is in right place already, and start checking |
1220
|
|
|
|
|
|
at 2nd element to see where it should be inserted. |
1221
|
|
|
|
|
|
*/ |
1222
|
57064
|
100
|
|
|
|
for (i = part_left + 1; i <= part_right; ++i) { |
1223
|
|
|
|
|
|
int j; |
1224
|
|
|
|
|
|
/* Scan (backwards - just in case 'i' is already in right place) |
1225
|
|
|
|
|
|
through the elements already sorted to see if the ith element |
1226
|
|
|
|
|
|
belongs ahead of one of them. |
1227
|
|
|
|
|
|
*/ |
1228
|
89022
|
100
|
|
|
|
for (j = i - 1; j >= part_left; --j) { |
1229
|
74858
|
100
|
|
|
|
if (qsort_cmp(i, j) >= 0) { |
1230
|
|
|
|
|
|
/* i belongs right after j |
1231
|
|
|
|
|
|
*/ |
1232
|
|
|
|
|
|
break; |
1233
|
|
|
|
|
|
} |
1234
|
|
|
|
|
|
} |
1235
|
41626
|
|
|
|
|
++j; |
1236
|
41626
|
100
|
|
|
|
if (j != i) { |
1237
|
|
|
|
|
|
/* Looks like we really need to move some things |
1238
|
|
|
|
|
|
*/ |
1239
|
|
|
|
|
|
int k; |
1240
|
27036
|
|
|
|
|
temp = array[i]; |
1241
|
74432
|
100
|
|
|
|
for (k = i - 1; k >= j; --k) |
1242
|
47396
|
|
|
|
|
array[k + 1] = array[k]; |
1243
|
27036
|
|
|
|
|
array[j] = temp; |
1244
|
|
|
|
|
|
} |
1245
|
|
|
|
|
|
} |
1246
|
|
|
|
|
|
|
1247
|
|
|
|
|
|
/* That partition is now sorted, grab the next one, or get out |
1248
|
|
|
|
|
|
of the loop if there aren't any more. |
1249
|
|
|
|
|
|
*/ |
1250
|
|
|
|
|
|
|
1251
|
15438
|
100
|
|
|
|
if (next_stack_entry == 0) { |
1252
|
|
|
|
|
|
/* the stack is empty - we are done */ |
1253
|
|
|
|
|
|
break; |
1254
|
|
|
|
|
|
} |
1255
|
15356
|
|
|
|
|
--next_stack_entry; |
1256
|
15356
|
|
|
|
|
part_left = partition_stack[next_stack_entry].left; |
1257
|
15356
|
|
|
|
|
part_right = partition_stack[next_stack_entry].right; |
1258
|
|
|
|
|
|
#ifdef QSORT_ORDER_GUESS |
1259
|
15356
|
|
|
|
|
qsort_break_even = partition_stack[next_stack_entry].qsort_break_even; |
1260
|
|
|
|
|
|
#endif |
1261
|
|
|
|
|
|
} |
1262
|
|
|
|
|
|
} |
1263
|
|
|
|
|
|
|
1264
|
|
|
|
|
|
/* Believe it or not, the array is sorted at this point! */ |
1265
|
|
|
|
|
|
} |
1266
|
|
|
|
|
|
|
1267
|
|
|
|
|
|
/* Stabilize what is, presumably, an otherwise unstable sort method. |
1268
|
|
|
|
|
|
* We do that by allocating (or having on hand) an array of pointers |
1269
|
|
|
|
|
|
* that is the same size as the original array of elements to be sorted. |
1270
|
|
|
|
|
|
* We initialize this parallel array with the addresses of the original |
1271
|
|
|
|
|
|
* array elements. This indirection can make you crazy. |
1272
|
|
|
|
|
|
* Some pictures can help. After initializing, we have |
1273
|
|
|
|
|
|
* |
1274
|
|
|
|
|
|
* indir list1 |
1275
|
|
|
|
|
|
* +----+ +----+ |
1276
|
|
|
|
|
|
* | | --------------> | | ------> first element to be sorted |
1277
|
|
|
|
|
|
* +----+ +----+ |
1278
|
|
|
|
|
|
* | | --------------> | | ------> second element to be sorted |
1279
|
|
|
|
|
|
* +----+ +----+ |
1280
|
|
|
|
|
|
* | | --------------> | | ------> third element to be sorted |
1281
|
|
|
|
|
|
* +----+ +----+ |
1282
|
|
|
|
|
|
* ... |
1283
|
|
|
|
|
|
* +----+ +----+ |
1284
|
|
|
|
|
|
* | | --------------> | | ------> n-1st element to be sorted |
1285
|
|
|
|
|
|
* +----+ +----+ |
1286
|
|
|
|
|
|
* | | --------------> | | ------> n-th element to be sorted |
1287
|
|
|
|
|
|
* +----+ +----+ |
1288
|
|
|
|
|
|
* |
1289
|
|
|
|
|
|
* During the sort phase, we leave the elements of list1 where they are, |
1290
|
|
|
|
|
|
* and sort the pointers in the indirect array in the same order determined |
1291
|
|
|
|
|
|
* by the original comparison routine on the elements pointed to. |
1292
|
|
|
|
|
|
* Because we don't move the elements of list1 around through |
1293
|
|
|
|
|
|
* this phase, we can break ties on elements that compare equal |
1294
|
|
|
|
|
|
* using their address in the list1 array, ensuring stability. |
1295
|
|
|
|
|
|
* This leaves us with something looking like |
1296
|
|
|
|
|
|
* |
1297
|
|
|
|
|
|
* indir list1 |
1298
|
|
|
|
|
|
* +----+ +----+ |
1299
|
|
|
|
|
|
* | | --+ +---> | | ------> first element to be sorted |
1300
|
|
|
|
|
|
* +----+ | | +----+ |
1301
|
|
|
|
|
|
* | | --|-------|---> | | ------> second element to be sorted |
1302
|
|
|
|
|
|
* +----+ | | +----+ |
1303
|
|
|
|
|
|
* | | --|-------+ +-> | | ------> third element to be sorted |
1304
|
|
|
|
|
|
* +----+ | | +----+ |
1305
|
|
|
|
|
|
* ... |
1306
|
|
|
|
|
|
* +----+ | | | | +----+ |
1307
|
|
|
|
|
|
* | | ---|-+ | +--> | | ------> n-1st element to be sorted |
1308
|
|
|
|
|
|
* +----+ | | +----+ |
1309
|
|
|
|
|
|
* | | ---+ +----> | | ------> n-th element to be sorted |
1310
|
|
|
|
|
|
* +----+ +----+ |
1311
|
|
|
|
|
|
* |
1312
|
|
|
|
|
|
* where the i-th element of the indirect array points to the element |
1313
|
|
|
|
|
|
* that should be i-th in the sorted array. After the sort phase, |
1314
|
|
|
|
|
|
* we have to put the elements of list1 into the places |
1315
|
|
|
|
|
|
* dictated by the indirect array. |
1316
|
|
|
|
|
|
*/ |
1317
|
|
|
|
|
|
|
1318
|
|
|
|
|
|
|
1319
|
|
|
|
|
|
static I32 |
1320
|
573820
|
|
|
|
|
cmpindir(pTHX_ gptr const a, gptr const b) |
1321
|
|
|
|
|
|
{ |
1322
|
|
|
|
|
|
dVAR; |
1323
|
|
|
|
|
|
gptr * const ap = (gptr *)a; |
1324
|
|
|
|
|
|
gptr * const bp = (gptr *)b; |
1325
|
573820
|
|
|
|
|
const I32 sense = PL_sort_RealCmp(aTHX_ *ap, *bp); |
1326
|
|
|
|
|
|
|
1327
|
573820
|
100
|
|
|
|
if (sense) |
1328
|
|
|
|
|
|
return sense; |
1329
|
348569
|
100
|
|
|
|
return (ap > bp) ? 1 : ((ap < bp) ? -1 : 0); |
|
|
50
|
|
|
|
|
1330
|
|
|
|
|
|
} |
1331
|
|
|
|
|
|
|
1332
|
|
|
|
|
|
static I32 |
1333
|
0
|
|
|
|
|
cmpindir_desc(pTHX_ gptr const a, gptr const b) |
1334
|
|
|
|
|
|
{ |
1335
|
|
|
|
|
|
dVAR; |
1336
|
|
|
|
|
|
gptr * const ap = (gptr *)a; |
1337
|
|
|
|
|
|
gptr * const bp = (gptr *)b; |
1338
|
0
|
|
|
|
|
const I32 sense = PL_sort_RealCmp(aTHX_ *ap, *bp); |
1339
|
|
|
|
|
|
|
1340
|
|
|
|
|
|
/* Reverse the default */ |
1341
|
0
|
0
|
|
|
|
if (sense) |
1342
|
0
|
|
|
|
|
return -sense; |
1343
|
|
|
|
|
|
/* But don't reverse the stability test. */ |
1344
|
0
|
0
|
|
|
|
return (ap > bp) ? 1 : ((ap < bp) ? -1 : 0); |
|
|
0
|
|
|
|
|
1345
|
|
|
|
|
|
|
1346
|
|
|
|
|
|
} |
1347
|
|
|
|
|
|
|
1348
|
|
|
|
|
|
STATIC void |
1349
|
96
|
|
|
|
|
S_qsortsv(pTHX_ gptr *list1, size_t nmemb, SVCOMPARE_t cmp, U32 flags) |
1350
|
|
|
|
|
|
{ |
1351
|
|
|
|
|
|
dVAR; |
1352
|
96
|
100
|
|
|
|
if ((flags & SORTf_STABLE) != 0) { |
1353
|
|
|
|
|
|
gptr **pp, *q; |
1354
|
|
|
|
|
|
size_t n, j, i; |
1355
|
|
|
|
|
|
gptr *small[SMALLSORT], **indir, tmp; |
1356
|
|
|
|
|
|
SVCOMPARE_t savecmp; |
1357
|
144
|
50
|
|
|
|
if (nmemb <= 1) return; /* sorted trivially */ |
1358
|
|
|
|
|
|
|
1359
|
|
|
|
|
|
/* Small arrays can use the stack, big ones must be allocated */ |
1360
|
48
|
100
|
|
|
|
if (nmemb <= SMALLSORT) indir = small; |
1361
|
30
|
50
|
|
|
|
else { Newx(indir, nmemb, gptr *); } |
1362
|
|
|
|
|
|
|
1363
|
|
|
|
|
|
/* Copy pointers to original array elements into indirect array */ |
1364
|
24164
|
100
|
|
|
|
for (n = nmemb, pp = indir, q = list1; n--; ) *pp++ = q++; |
1365
|
|
|
|
|
|
|
1366
|
48
|
|
|
|
|
savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */ |
1367
|
48
|
|
|
|
|
PL_sort_RealCmp = cmp; /* Put comparison routine where cmpindir can find it */ |
1368
|
|
|
|
|
|
|
1369
|
|
|
|
|
|
/* sort, with indirection */ |
1370
|
48
|
50
|
|
|
|
if (flags & SORTf_DESC) |
1371
|
0
|
|
|
|
|
qsortsvu((gptr *)indir, nmemb, cmpindir_desc); |
1372
|
|
|
|
|
|
else |
1373
|
48
|
|
|
|
|
qsortsvu((gptr *)indir, nmemb, cmpindir); |
1374
|
|
|
|
|
|
|
1375
|
|
|
|
|
|
pp = indir; |
1376
|
|
|
|
|
|
q = list1; |
1377
|
48304
|
100
|
|
|
|
for (n = nmemb; n--; ) { |
1378
|
|
|
|
|
|
/* Assert A: all elements of q with index > n are already |
1379
|
|
|
|
|
|
* in place. This is vacuously true at the start, and we |
1380
|
|
|
|
|
|
* put element n where it belongs below (if it wasn't |
1381
|
|
|
|
|
|
* already where it belonged). Assert B: we only move |
1382
|
|
|
|
|
|
* elements that aren't where they belong, |
1383
|
|
|
|
|
|
* so, by A, we never tamper with elements above n. |
1384
|
|
|
|
|
|
*/ |
1385
|
48232
|
|
|
|
|
j = pp[n] - q; /* This sets j so that q[j] is |
1386
|
|
|
|
|
|
* at pp[n]. *pp[j] belongs in |
1387
|
|
|
|
|
|
* q[j], by construction. |
1388
|
|
|
|
|
|
*/ |
1389
|
48232
|
100
|
|
|
|
if (n != j) { /* all's well if n == j */ |
1390
|
168
|
|
|
|
|
tmp = q[j]; /* save what's in q[j] */ |
1391
|
|
|
|
|
|
do { |
1392
|
47988
|
|
|
|
|
q[j] = *pp[j]; /* put *pp[j] where it belongs */ |
1393
|
47988
|
|
|
|
|
i = pp[j] - q; /* the index in q of the element |
1394
|
|
|
|
|
|
* just moved */ |
1395
|
47988
|
|
|
|
|
pp[j] = q + j; /* this is ok now */ |
1396
|
47988
|
100
|
|
|
|
} while ((j = i) != n); |
1397
|
|
|
|
|
|
/* There are only finitely many (nmemb) addresses |
1398
|
|
|
|
|
|
* in the pp array. |
1399
|
|
|
|
|
|
* So we must eventually revisit an index we saw before. |
1400
|
|
|
|
|
|
* Suppose the first revisited index is k != n. |
1401
|
|
|
|
|
|
* An index is visited because something else belongs there. |
1402
|
|
|
|
|
|
* If we visit k twice, then two different elements must |
1403
|
|
|
|
|
|
* belong in the same place, which cannot be. |
1404
|
|
|
|
|
|
* So j must get back to n, the loop terminates, |
1405
|
|
|
|
|
|
* and we put the saved element where it belongs. |
1406
|
|
|
|
|
|
*/ |
1407
|
24200
|
|
|
|
|
q[n] = tmp; /* put what belongs into |
1408
|
|
|
|
|
|
* the n-th element */ |
1409
|
|
|
|
|
|
} |
1410
|
|
|
|
|
|
} |
1411
|
|
|
|
|
|
|
1412
|
|
|
|
|
|
/* free iff allocated */ |
1413
|
48
|
100
|
|
|
|
if (indir != small) { Safefree(indir); } |
1414
|
|
|
|
|
|
/* restore prevailing comparison routine */ |
1415
|
48
|
|
|
|
|
PL_sort_RealCmp = savecmp; |
1416
|
48
|
50
|
|
|
|
} else if ((flags & SORTf_DESC) != 0) { |
1417
|
0
|
|
|
|
|
const SVCOMPARE_t savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */ |
1418
|
0
|
|
|
|
|
PL_sort_RealCmp = cmp; /* Put comparison routine where cmp_desc can find it */ |
1419
|
|
|
|
|
|
cmp = cmp_desc; |
1420
|
0
|
|
|
|
|
qsortsvu(list1, nmemb, cmp); |
1421
|
|
|
|
|
|
/* restore prevailing comparison routine */ |
1422
|
0
|
|
|
|
|
PL_sort_RealCmp = savecmp; |
1423
|
|
|
|
|
|
} else { |
1424
|
48
|
|
|
|
|
qsortsvu(list1, nmemb, cmp); |
1425
|
|
|
|
|
|
} |
1426
|
|
|
|
|
|
} |
1427
|
|
|
|
|
|
|
1428
|
|
|
|
|
|
/* |
1429
|
|
|
|
|
|
=head1 Array Manipulation Functions |
1430
|
|
|
|
|
|
|
1431
|
|
|
|
|
|
=for apidoc sortsv |
1432
|
|
|
|
|
|
|
1433
|
|
|
|
|
|
Sort an array. Here is an example: |
1434
|
|
|
|
|
|
|
1435
|
|
|
|
|
|
sortsv(AvARRAY(av), av_top_index(av)+1, Perl_sv_cmp_locale); |
1436
|
|
|
|
|
|
|
1437
|
|
|
|
|
|
Currently this always uses mergesort. See sortsv_flags for a more |
1438
|
|
|
|
|
|
flexible routine. |
1439
|
|
|
|
|
|
|
1440
|
|
|
|
|
|
=cut |
1441
|
|
|
|
|
|
*/ |
1442
|
|
|
|
|
|
|
1443
|
|
|
|
|
|
void |
1444
|
5016
|
|
|
|
|
Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp) |
1445
|
|
|
|
|
|
{ |
1446
|
|
|
|
|
|
PERL_ARGS_ASSERT_SORTSV; |
1447
|
|
|
|
|
|
|
1448
|
5016
|
|
|
|
|
sortsv_flags(array, nmemb, cmp, 0); |
1449
|
5016
|
|
|
|
|
} |
1450
|
|
|
|
|
|
|
1451
|
|
|
|
|
|
/* |
1452
|
|
|
|
|
|
=for apidoc sortsv_flags |
1453
|
|
|
|
|
|
|
1454
|
|
|
|
|
|
Sort an array, with various options. |
1455
|
|
|
|
|
|
|
1456
|
|
|
|
|
|
=cut |
1457
|
|
|
|
|
|
*/ |
1458
|
|
|
|
|
|
void |
1459
|
2337130
|
|
|
|
|
Perl_sortsv_flags(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp, U32 flags) |
1460
|
|
|
|
|
|
{ |
1461
|
|
|
|
|
|
PERL_ARGS_ASSERT_SORTSV_FLAGS; |
1462
|
|
|
|
|
|
|
1463
|
2337130
|
100
|
|
|
|
if (flags & SORTf_QSORT) |
1464
|
96
|
|
|
|
|
S_qsortsv(aTHX_ array, nmemb, cmp, flags); |
1465
|
|
|
|
|
|
else |
1466
|
2337034
|
|
|
|
|
S_mergesortsv(aTHX_ array, nmemb, cmp, flags); |
1467
|
2337090
|
|
|
|
|
} |
1468
|
|
|
|
|
|
|
1469
|
|
|
|
|
|
#define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)) |
1470
|
|
|
|
|
|
#define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK) |
1471
|
|
|
|
|
|
#define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) ) |
1472
|
|
|
|
|
|
|
1473
|
5278929
|
|
|
|
|
PP(pp_sort) |
1474
|
|
|
|
|
|
{ |
1475
|
5278929
|
|
|
|
|
dVAR; dSP; dMARK; dORIGMARK; |
1476
|
5278929
|
|
|
|
|
SV **p1 = ORIGMARK+1, **p2; |
1477
|
|
|
|
|
|
SSize_t max, i; |
1478
|
|
|
|
|
|
AV* av = NULL; |
1479
|
|
|
|
|
|
HV *stash; |
1480
|
|
|
|
|
|
GV *gv; |
1481
|
|
|
|
|
|
CV *cv = NULL; |
1482
|
5278929
|
100
|
|
|
|
I32 gimme = GIMME; |
|
|
100
|
|
|
|
|
1483
|
5278929
|
|
|
|
|
OP* const nextop = PL_op->op_next; |
1484
|
|
|
|
|
|
I32 overloading = 0; |
1485
|
|
|
|
|
|
bool hasargs = FALSE; |
1486
|
|
|
|
|
|
bool copytmps; |
1487
|
|
|
|
|
|
I32 is_xsub = 0; |
1488
|
|
|
|
|
|
I32 sorting_av = 0; |
1489
|
5278929
|
|
|
|
|
const U8 priv = PL_op->op_private; |
1490
|
5278929
|
|
|
|
|
const U8 flags = PL_op->op_flags; |
1491
|
|
|
|
|
|
U32 sort_flags = 0; |
1492
|
|
|
|
|
|
void (*sortsvp)(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp, U32 flags) |
1493
|
|
|
|
|
|
= Perl_sortsv_flags; |
1494
|
|
|
|
|
|
I32 all_SIVs = 1; |
1495
|
|
|
|
|
|
|
1496
|
5278929
|
100
|
|
|
|
if ((priv & OPpSORT_DESCEND) != 0) |
1497
|
|
|
|
|
|
sort_flags |= SORTf_DESC; |
1498
|
5278929
|
100
|
|
|
|
if ((priv & OPpSORT_QSORT) != 0) |
1499
|
112
|
|
|
|
|
sort_flags |= SORTf_QSORT; |
1500
|
5278929
|
100
|
|
|
|
if ((priv & OPpSORT_STABLE) != 0) |
1501
|
168
|
|
|
|
|
sort_flags |= SORTf_STABLE; |
1502
|
|
|
|
|
|
|
1503
|
5279164
|
100
|
|
|
|
if (gimme != G_ARRAY) { |
|
|
50
|
|
|
|
|
1504
|
|
|
|
|
|
SP = MARK; |
1505
|
235
|
|
|
|
|
EXTEND(SP,1); |
1506
|
470
|
|
|
|
|
RETPUSHUNDEF; |
1507
|
|
|
|
|
|
} |
1508
|
|
|
|
|
|
|
1509
|
5278459
|
|
|
|
|
ENTER; |
1510
|
5278459
|
|
|
|
|
SAVEVPTR(PL_sortcop); |
1511
|
5278459
|
100
|
|
|
|
if (flags & OPf_STACKED) { |
1512
|
116830
|
100
|
|
|
|
if (flags & OPf_SPECIAL) { |
1513
|
106158
|
|
|
|
|
OP *kid = cLISTOP->op_first->op_sibling; /* pass pushmark */ |
1514
|
106158
|
|
|
|
|
kid = kUNOP->op_first; /* pass rv2gv */ |
1515
|
106158
|
|
|
|
|
kid = kUNOP->op_first; /* pass leave */ |
1516
|
106158
|
|
|
|
|
PL_sortcop = kid->op_next; |
1517
|
106158
|
|
|
|
|
stash = CopSTASH(PL_curcop); |
1518
|
|
|
|
|
|
} |
1519
|
|
|
|
|
|
else { |
1520
|
|
|
|
|
|
GV *autogv = NULL; |
1521
|
10675
|
|
|
|
|
cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD); |
1522
|
|
|
|
|
|
check_cv: |
1523
|
10678
|
50
|
|
|
|
if (cv && SvPOK(cv)) { |
|
|
100
|
|
|
|
|
1524
|
42
|
50
|
|
|
|
const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv)); |
1525
|
42
|
50
|
|
|
|
if (proto && strEQ(proto, "$$")) { |
|
|
50
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
50
|
|
|
|
|
1526
|
|
|
|
|
|
hasargs = TRUE; |
1527
|
|
|
|
|
|
} |
1528
|
|
|
|
|
|
} |
1529
|
10678
|
50
|
|
|
|
if (cv && CvISXSUB(cv) && CvXSUB(cv)) { |
|
|
100
|
|
|
|
|
|
|
50
|
|
|
|
|
1530
|
|
|
|
|
|
is_xsub = 1; |
1531
|
|
|
|
|
|
} |
1532
|
10674
|
50
|
|
|
|
else if (!(cv && CvROOT(cv))) { |
|
|
100
|
|
|
|
|
1533
|
8
|
100
|
|
|
|
if (gv) { |
1534
|
|
|
|
|
|
goto autoload; |
1535
|
|
|
|
|
|
} |
1536
|
4
|
50
|
|
|
|
else if (!CvANON(cv) && (gv = CvGV(cv))) { |
|
|
50
|
|
|
|
|
1537
|
|
|
|
|
|
if (cv != GvCV(gv)) cv = GvCV(gv); |
1538
|
|
|
|
|
|
autoload: |
1539
|
16
|
50
|
|
|
|
if (!autogv && ( |
|
|
100
|
|
|
|
|
1540
|
8
|
50
|
|
|
|
autogv = gv_autoload_pvn( |
1541
|
|
|
|
|
|
GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv), |
1542
|
|
|
|
|
|
GvNAMEUTF8(gv) ? SVf_UTF8 : 0 |
1543
|
|
|
|
|
|
) |
1544
|
|
|
|
|
|
)) { |
1545
|
6
|
50
|
|
|
|
cv = GvCVu(autogv); |
1546
|
|
|
|
|
|
goto check_cv; |
1547
|
|
|
|
|
|
} |
1548
|
|
|
|
|
|
else { |
1549
|
2
|
|
|
|
|
SV *tmpstr = sv_newmortal(); |
1550
|
2
|
|
|
|
|
gv_efullname3(tmpstr, gv, NULL); |
1551
|
2
|
|
|
|
|
DIE(aTHX_ "Undefined sort subroutine \"%"SVf"\" called", |
1552
|
|
|
|
|
|
SVfARG(tmpstr)); |
1553
|
|
|
|
|
|
} |
1554
|
|
|
|
|
|
} |
1555
|
|
|
|
|
|
else { |
1556
|
0
|
|
|
|
|
DIE(aTHX_ "Undefined subroutine in sort"); |
1557
|
|
|
|
|
|
} |
1558
|
|
|
|
|
|
} |
1559
|
|
|
|
|
|
|
1560
|
10670
|
100
|
|
|
|
if (is_xsub) |
1561
|
4
|
|
|
|
|
PL_sortcop = (OP*)cv; |
1562
|
|
|
|
|
|
else |
1563
|
10666
|
|
|
|
|
PL_sortcop = CvSTART(cv); |
1564
|
|
|
|
|
|
} |
1565
|
|
|
|
|
|
} |
1566
|
|
|
|
|
|
else { |
1567
|
5161629
|
|
|
|
|
PL_sortcop = NULL; |
1568
|
5161629
|
|
|
|
|
stash = CopSTASH(PL_curcop); |
1569
|
|
|
|
|
|
} |
1570
|
|
|
|
|
|
|
1571
|
|
|
|
|
|
/* optimiser converts "@a = sort @a" to "sort \@a"; |
1572
|
|
|
|
|
|
* in case of tied @a, pessimise: push (@a) onto stack, then assign |
1573
|
|
|
|
|
|
* result back to @a at the end of this function */ |
1574
|
5278457
|
100
|
|
|
|
if (priv & OPpSORT_INPLACE) { |
1575
|
|
|
|
|
|
assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV); |
1576
|
5100
|
|
|
|
|
(void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */ |
1577
|
5100
|
|
|
|
|
av = MUTABLE_AV((*SP)); |
1578
|
5100
|
100
|
|
|
|
max = AvFILL(av) + 1; |
1579
|
5100
|
100
|
|
|
|
if (SvMAGICAL(av)) { |
1580
|
8
|
50
|
|
|
|
MEXTEND(SP, max); |
1581
|
32
|
100
|
|
|
|
for (i=0; i < max; i++) { |
1582
|
24
|
|
|
|
|
SV **svp = av_fetch(av, i, FALSE); |
1583
|
24
|
50
|
|
|
|
*SP++ = (svp) ? *svp : NULL; |
1584
|
|
|
|
|
|
} |
1585
|
8
|
|
|
|
|
SP--; |
1586
|
8
|
|
|
|
|
p1 = p2 = SP - (max-1); |
1587
|
|
|
|
|
|
} |
1588
|
|
|
|
|
|
else { |
1589
|
5092
|
100
|
|
|
|
if (SvREADONLY(av)) |
1590
|
2
|
|
|
|
|
Perl_croak_no_modify(); |
1591
|
|
|
|
|
|
else |
1592
|
|
|
|
|
|
{ |
1593
|
5090
|
|
|
|
|
SvREADONLY_on(av); |
1594
|
5090
|
|
|
|
|
save_pushptr((void *)av, SAVEt_READONLY_OFF); |
1595
|
|
|
|
|
|
} |
1596
|
5090
|
|
|
|
|
p1 = p2 = AvARRAY(av); |
1597
|
|
|
|
|
|
sorting_av = 1; |
1598
|
|
|
|
|
|
} |
1599
|
|
|
|
|
|
} |
1600
|
|
|
|
|
|
else { |
1601
|
5273357
|
|
|
|
|
p2 = MARK+1; |
1602
|
5273357
|
|
|
|
|
max = SP - MARK; |
1603
|
|
|
|
|
|
} |
1604
|
|
|
|
|
|
|
1605
|
|
|
|
|
|
/* shuffle stack down, removing optional initial cv (p1!=p2), plus |
1606
|
|
|
|
|
|
* any nulls; also stringify or converting to integer or number as |
1607
|
|
|
|
|
|
* required any args */ |
1608
|
5278455
|
100
|
|
|
|
copytmps = !sorting_av && PL_sortcop; |
|
|
100
|
|
|
|
|
1609
|
45044874
|
100
|
|
|
|
for (i=max; i > 0 ; i--) { |
1610
|
39766419
|
50
|
|
|
|
if ((*p1 = *p2++)) { /* Weed out nulls. */ |
1611
|
39766419
|
100
|
|
|
|
if (copytmps && SvPADTMP(*p1) && !IS_PADGV(*p1)) |
|
|
100
|
|
|
|
|
1612
|
76
|
|
|
|
|
*p1 = sv_mortalcopy(*p1); |
1613
|
39766419
|
|
|
|
|
SvTEMP_off(*p1); |
1614
|
39766419
|
100
|
|
|
|
if (!PL_sortcop) { |
1615
|
38070059
|
100
|
|
|
|
if (priv & OPpSORT_NUMERIC) { |
1616
|
181732
|
100
|
|
|
|
if (priv & OPpSORT_INTEGER) { |
1617
|
4
|
50
|
|
|
|
if (!SvIOK(*p1)) |
1618
|
4
|
|
|
|
|
(void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD); |
1619
|
|
|
|
|
|
} |
1620
|
|
|
|
|
|
else { |
1621
|
181728
|
100
|
|
|
|
if (!SvNSIOK(*p1)) |
|
|
100
|
|
|
|
|
1622
|
180714
|
|
|
|
|
(void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD); |
1623
|
181728
|
100
|
|
|
|
if (all_SIVs && !SvSIOK(*p1)) |
|
|
100
|
|
|
|
|
1624
|
|
|
|
|
|
all_SIVs = 0; |
1625
|
|
|
|
|
|
} |
1626
|
|
|
|
|
|
} |
1627
|
|
|
|
|
|
else { |
1628
|
37888327
|
100
|
|
|
|
if (!SvPOK(*p1)) |
1629
|
28802
|
|
|
|
|
(void)sv_2pv_flags(*p1, 0, |
1630
|
|
|
|
|
|
SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD); |
1631
|
|
|
|
|
|
} |
1632
|
38070059
|
100
|
|
|
|
if (SvAMAGIC(*p1)) |
|
|
100
|
|
|
|
|
|
|
50
|
|
|
|
|
1633
|
|
|
|
|
|
overloading = 1; |
1634
|
|
|
|
|
|
} |
1635
|
39766419
|
|
|
|
|
p1++; |
1636
|
|
|
|
|
|
} |
1637
|
|
|
|
|
|
else |
1638
|
0
|
|
|
|
|
max--; |
1639
|
|
|
|
|
|
} |
1640
|
5278455
|
100
|
|
|
|
if (sorting_av) |
1641
|
5090
|
|
|
|
|
AvFILLp(av) = max-1; |
1642
|
|
|
|
|
|
|
1643
|
5278455
|
100
|
|
|
|
if (max > 1) { |
1644
|
|
|
|
|
|
SV **start; |
1645
|
2332114
|
100
|
|
|
|
if (PL_sortcop) { |
1646
|
|
|
|
|
|
PERL_CONTEXT *cx; |
1647
|
|
|
|
|
|
SV** newsp; |
1648
|
57234
|
|
|
|
|
const bool oldcatch = CATCH_GET; |
1649
|
|
|
|
|
|
|
1650
|
57234
|
|
|
|
|
SAVETMPS; |
1651
|
57234
|
|
|
|
|
SAVEOP(); |
1652
|
|
|
|
|
|
|
1653
|
57234
|
|
|
|
|
CATCH_SET(TRUE); |
1654
|
57234
|
100
|
|
|
|
PUSHSTACKi(PERLSI_SORT); |
1655
|
57234
|
100
|
|
|
|
if (!hasargs && !is_xsub) { |
|
|
100
|
|
|
|
|
1656
|
57188
|
|
|
|
|
SAVESPTR(PL_firstgv); |
1657
|
57188
|
|
|
|
|
SAVESPTR(PL_secondgv); |
1658
|
57188
|
|
|
|
|
PL_firstgv = gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV); |
1659
|
57188
|
|
|
|
|
PL_secondgv = gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV); |
1660
|
57188
|
|
|
|
|
SAVESPTR(GvSV(PL_firstgv)); |
1661
|
57188
|
|
|
|
|
SAVESPTR(GvSV(PL_secondgv)); |
1662
|
|
|
|
|
|
} |
1663
|
|
|
|
|
|
|
1664
|
57234
|
50
|
|
|
|
PUSHBLOCK(cx, CXt_NULL, PL_stack_base); |
1665
|
57234
|
100
|
|
|
|
if (!(flags & OPf_SPECIAL)) { |
1666
|
4076
|
|
|
|
|
cx->cx_type = CXt_SUB; |
1667
|
4076
|
|
|
|
|
cx->blk_gimme = G_SCALAR; |
1668
|
|
|
|
|
|
/* If our comparison routine is already active (CvDEPTH is |
1669
|
|
|
|
|
|
* is not 0), then PUSHSUB does not increase the refcount, |
1670
|
|
|
|
|
|
* so we have to do it ourselves, because the LEAVESUB fur- |
1671
|
|
|
|
|
|
* ther down lowers it. */ |
1672
|
4076
|
100
|
|
|
|
if (CvDEPTH(cv)) SvREFCNT_inc_simple_void_NN(cv); |
1673
|
8152
|
100
|
|
|
|
PUSHSUB(cx); |
|
|
50
|
|
|
|
|
|
|
100
|
|
|
|
|
|
|
100
|
|
|
|
|
1674
|
4076
|
100
|
|
|
|
if (!is_xsub) { |
1675
|
4072
|
|
|
|
|
PADLIST * const padlist = CvPADLIST(cv); |
1676
|
|
|
|
|
|
|
1677
|
4072
|
100
|
|
|
|
if (++CvDEPTH(cv) >= 2) { |
1678
|
|
|
|
|
|
PERL_STACK_OVERFLOW_CHECK(); |
1679
|
4
|
|
|
|
|
pad_push(padlist, CvDEPTH(cv)); |
1680
|
|
|
|
|
|
} |
1681
|
4072
|
|
|
|
|
SAVECOMPPAD(); |
1682
|
8144
|
|
|
|
|
PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv)); |
1683
|
|
|
|
|
|
|
1684
|
4072
|
100
|
|
|
|
if (hasargs) { |
1685
|
|
|
|
|
|
/* This is mostly copied from pp_entersub */ |
1686
|
42
|
|
|
|
|
AV * const av = MUTABLE_AV(PAD_SVl(0)); |
1687
|
|
|
|
|
|
|
1688
|
42
|
|
|
|
|
cx->blk_sub.savearray = GvAV(PL_defgv); |
1689
|
84
|
|
|
|
|
GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av)); |
1690
|
42
|
|
|
|
|
CX_CURPAD_SAVE(cx->blk_sub); |
1691
|
42
|
|
|
|
|
cx->blk_sub.argarray = av; |
1692
|
|
|
|
|
|
} |
1693
|
|
|
|
|
|
|
1694
|
|
|
|
|
|
} |
1695
|
|
|
|
|
|
} |
1696
|
57234
|
|
|
|
|
cx->cx_type |= CXp_MULTICALL; |
1697
|
|
|
|
|
|
|
1698
|
57234
|
|
|
|
|
start = p1 - max; |
1699
|
57234
|
100
|
|
|
|
sortsvp(aTHX_ start, max, |
|
|
100
|
|
|
|
|
1700
|
|
|
|
|
|
(is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv), |
1701
|
|
|
|
|
|
sort_flags); |
1702
|
|
|
|
|
|
|
1703
|
57194
|
100
|
|
|
|
if (!(flags & OPf_SPECIAL)) { |
1704
|
|
|
|
|
|
SV *sv; |
1705
|
|
|
|
|
|
/* Reset cx, in case the context stack has been |
1706
|
|
|
|
|
|
reallocated. */ |
1707
|
4062
|
|
|
|
|
cx = &cxstack[cxstack_ix]; |
1708
|
6099
|
100
|
|
|
|
POPSUB(cx, sv); |
|
|
100
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
100
|
|
|
|
|
1709
|
4062
|
|
|
|
|
LEAVESUB(sv); |
1710
|
|
|
|
|
|
} |
1711
|
57194
|
|
|
|
|
POPBLOCK(cx,PL_curpm); |
1712
|
57194
|
|
|
|
|
PL_stack_sp = newsp; |
1713
|
57194
|
50
|
|
|
|
POPSTACK; |
1714
|
57194
|
|
|
|
|
CATCH_SET(oldcatch); |
1715
|
|
|
|
|
|
} |
1716
|
|
|
|
|
|
else { |
1717
|
2274880
|
100
|
|
|
|
MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */ |
1718
|
2274880
|
100
|
|
|
|
start = sorting_av ? AvARRAY(av) : ORIGMARK+1; |
1719
|
4549760
|
100
|
|
|
|
sortsvp(aTHX_ start, max, |
|
|
100
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
50
|
|
|
|
|
1720
|
|
|
|
|
|
(priv & OPpSORT_NUMERIC) |
1721
|
2289724
|
|
|
|
|
? ( ( ( priv & OPpSORT_INTEGER) || all_SIVs) |
1722
|
|
|
|
|
|
? ( overloading ? S_amagic_i_ncmp : S_sv_i_ncmp) |
1723
|
|
|
|
|
|
: ( overloading ? S_amagic_ncmp : S_sv_ncmp ) ) |
1724
|
2260036
|
50
|
|
|
|
: ( IN_LOCALE_RUNTIME |
|
|
0
|
|
|
|
|
|
|
100
|
|
|
|
|
1725
|
|
|
|
|
|
? ( overloading |
1726
|
|
|
|
|
|
? (SVCOMPARE_t)S_amagic_cmp_locale |
1727
|
|
|
|
|
|
: (SVCOMPARE_t)sv_cmp_locale_static) |
1728
|
|
|
|
|
|
: ( overloading ? (SVCOMPARE_t)S_amagic_cmp : (SVCOMPARE_t)sv_cmp_static)), |
1729
|
|
|
|
|
|
sort_flags); |
1730
|
|
|
|
|
|
} |
1731
|
2332074
|
100
|
|
|
|
if ((priv & OPpSORT_REVERSE) != 0) { |
1732
|
384
|
|
|
|
|
SV **q = start+max-1; |
1733
|
9060
|
100
|
|
|
|
while (start < q) { |
1734
|
8484
|
|
|
|
|
SV * const tmp = *start; |
1735
|
8484
|
|
|
|
|
*start++ = *q; |
1736
|
8484
|
|
|
|
|
*q-- = tmp; |
1737
|
|
|
|
|
|
} |
1738
|
|
|
|
|
|
} |
1739
|
|
|
|
|
|
} |
1740
|
5278415
|
100
|
|
|
|
if (sorting_av) |
1741
|
5080
|
|
|
|
|
SvREADONLY_off(av); |
1742
|
5273335
|
100
|
|
|
|
else if (av && !sorting_av) { |
1743
|
|
|
|
|
|
/* simulate pp_aassign of tied AV */ |
1744
|
8
|
|
|
|
|
SV** const base = MARK+1; |
1745
|
32
|
100
|
|
|
|
for (i=0; i < max; i++) { |
1746
|
24
|
|
|
|
|
base[i] = newSVsv(base[i]); |
1747
|
|
|
|
|
|
} |
1748
|
8
|
|
|
|
|
av_clear(av); |
1749
|
8
|
|
|
|
|
av_extend(av, max); |
1750
|
32
|
100
|
|
|
|
for (i=0; i < max; i++) { |
1751
|
24
|
|
|
|
|
SV * const sv = base[i]; |
1752
|
24
|
|
|
|
|
SV ** const didstore = av_store(av, i, sv); |
1753
|
24
|
100
|
|
|
|
if (SvSMAGICAL(sv)) |
1754
|
18
|
|
|
|
|
mg_set(sv); |
1755
|
24
|
100
|
|
|
|
if (!didstore) |
1756
|
18
|
|
|
|
|
sv_2mortal(sv); |
1757
|
|
|
|
|
|
} |
1758
|
|
|
|
|
|
} |
1759
|
5278415
|
|
|
|
|
LEAVE; |
1760
|
5278415
|
100
|
|
|
|
PL_stack_sp = ORIGMARK + (sorting_av ? 0 : max); |
1761
|
5278650
|
|
|
|
|
return nextop; |
1762
|
|
|
|
|
|
} |
1763
|
|
|
|
|
|
|
1764
|
|
|
|
|
|
static I32 |
1765
|
9917334
|
|
|
|
|
S_sortcv(pTHX_ SV *const a, SV *const b) |
1766
|
|
|
|
|
|
{ |
1767
|
|
|
|
|
|
dVAR; |
1768
|
9917334
|
|
|
|
|
const I32 oldsaveix = PL_savestack_ix; |
1769
|
9917334
|
|
|
|
|
const I32 oldscopeix = PL_scopestack_ix; |
1770
|
|
|
|
|
|
I32 result; |
1771
|
|
|
|
|
|
SV *resultsv; |
1772
|
9917334
|
|
|
|
|
PMOP * const pm = PL_curpm; |
1773
|
9917334
|
|
|
|
|
OP * const sortop = PL_op; |
1774
|
9917334
|
|
|
|
|
COP * const cop = PL_curcop; |
1775
|
|
|
|
|
|
|
1776
|
|
|
|
|
|
PERL_ARGS_ASSERT_SORTCV; |
1777
|
|
|
|
|
|
|
1778
|
9917334
|
|
|
|
|
GvSV(PL_firstgv) = a; |
1779
|
9917334
|
|
|
|
|
GvSV(PL_secondgv) = b; |
1780
|
9917334
|
|
|
|
|
PL_stack_sp = PL_stack_base; |
1781
|
9917334
|
|
|
|
|
PL_op = PL_sortcop; |
1782
|
9917334
|
|
|
|
|
CALLRUNOPS(aTHX); |
1783
|
9917302
|
|
|
|
|
PL_op = sortop; |
1784
|
9917302
|
|
|
|
|
PL_curcop = cop; |
1785
|
9917302
|
100
|
|
|
|
if (PL_stack_sp != PL_stack_base + 1) { |
1786
|
|
|
|
|
|
assert(PL_stack_sp == PL_stack_base); |
1787
|
|
|
|
|
|
resultsv = &PL_sv_undef; |
1788
|
|
|
|
|
|
} |
1789
|
9917300
|
|
|
|
|
else resultsv = *PL_stack_sp; |
1790
|
9917318
|
100
|
|
|
|
if (SvNIOK_nog(resultsv)) result = SvIV(resultsv); |
|
|
50
|
|
|
|
|
|
|
50
|
|
|
|
|
1791
|
|
|
|
|
|
else { |
1792
|
36
|
|
|
|
|
ENTER; |
1793
|
36
|
|
|
|
|
SAVEVPTR(PL_curpad); |
1794
|
36
|
|
|
|
|
PL_curpad = 0; |
1795
|
36
|
50
|
|
|
|
result = SvIV(resultsv); |
1796
|
32
|
|
|
|
|
LEAVE; |
1797
|
|
|
|
|
|
} |
1798
|
9918456
|
100
|
|
|
|
while (PL_scopestack_ix > oldscopeix) { |
1799
|
1158
|
|
|
|
|
LEAVE; |
1800
|
|
|
|
|
|
} |
1801
|
9917298
|
|
|
|
|
leave_scope(oldsaveix); |
1802
|
9917298
|
|
|
|
|
PL_curpm = pm; |
1803
|
9917298
|
|
|
|
|
return result; |
1804
|
|
|
|
|
|
} |
1805
|
|
|
|
|
|
|
1806
|
|
|
|
|
|
static I32 |
1807
|
188
|
|
|
|
|
S_sortcv_stacked(pTHX_ SV *const a, SV *const b) |
1808
|
|
|
|
|
|
{ |
1809
|
|
|
|
|
|
dVAR; |
1810
|
188
|
|
|
|
|
const I32 oldsaveix = PL_savestack_ix; |
1811
|
188
|
|
|
|
|
const I32 oldscopeix = PL_scopestack_ix; |
1812
|
|
|
|
|
|
I32 result; |
1813
|
188
|
|
|
|
|
AV * const av = GvAV(PL_defgv); |
1814
|
188
|
|
|
|
|
PMOP * const pm = PL_curpm; |
1815
|
188
|
|
|
|
|
OP * const sortop = PL_op; |
1816
|
188
|
|
|
|
|
COP * const cop = PL_curcop; |
1817
|
|
|
|
|
|
SV **pad; |
1818
|
|
|
|
|
|
|
1819
|
|
|
|
|
|
PERL_ARGS_ASSERT_SORTCV_STACKED; |
1820
|
|
|
|
|
|
|
1821
|
188
|
100
|
|
|
|
if (AvREAL(av)) { |
1822
|
16
|
|
|
|
|
av_clear(av); |
1823
|
16
|
|
|
|
|
AvREAL_off(av); |
1824
|
16
|
|
|
|
|
AvREIFY_on(av); |
1825
|
|
|
|
|
|
} |
1826
|
188
|
100
|
|
|
|
if (AvMAX(av) < 1) { |
1827
|
28
|
|
|
|
|
SV **ary = AvALLOC(av); |
1828
|
28
|
50
|
|
|
|
if (AvARRAY(av) != ary) { |
1829
|
0
|
|
|
|
|
AvMAX(av) += AvARRAY(av) - AvALLOC(av); |
1830
|
0
|
|
|
|
|
AvARRAY(av) = ary; |
1831
|
|
|
|
|
|
} |
1832
|
28
|
50
|
|
|
|
if (AvMAX(av) < 1) { |
1833
|
28
|
|
|
|
|
AvMAX(av) = 1; |
1834
|
28
|
|
|
|
|
Renew(ary,2,SV*); |
1835
|
28
|
|
|
|
|
AvARRAY(av) = ary; |
1836
|
28
|
|
|
|
|
AvALLOC(av) = ary; |
1837
|
|
|
|
|
|
} |
1838
|
|
|
|
|
|
} |
1839
|
188
|
|
|
|
|
AvFILLp(av) = 1; |
1840
|
|
|
|
|
|
|
1841
|
188
|
|
|
|
|
AvARRAY(av)[0] = a; |
1842
|
188
|
|
|
|
|
AvARRAY(av)[1] = b; |
1843
|
188
|
|
|
|
|
PL_stack_sp = PL_stack_base; |
1844
|
188
|
|
|
|
|
PL_op = PL_sortcop; |
1845
|
188
|
|
|
|
|
CALLRUNOPS(aTHX); |
1846
|
188
|
|
|
|
|
PL_op = sortop; |
1847
|
188
|
|
|
|
|
PL_curcop = cop; |
1848
|
188
|
|
|
|
|
pad = PL_curpad; PL_curpad = 0; |
1849
|
188
|
100
|
|
|
|
if (PL_stack_sp != PL_stack_base + 1) { |
1850
|
|
|
|
|
|
assert(PL_stack_sp == PL_stack_base); |
1851
|
2
|
50
|
|
|
|
result = SvIV(&PL_sv_undef); |
1852
|
|
|
|
|
|
} |
1853
|
186
|
100
|
|
|
|
else result = SvIV(*PL_stack_sp); |
1854
|
184
|
|
|
|
|
PL_curpad = pad; |
1855
|
276
|
50
|
|
|
|
while (PL_scopestack_ix > oldscopeix) { |
1856
|
0
|
|
|
|
|
LEAVE; |
1857
|
|
|
|
|
|
} |
1858
|
184
|
|
|
|
|
leave_scope(oldsaveix); |
1859
|
184
|
|
|
|
|
PL_curpm = pm; |
1860
|
184
|
|
|
|
|
return result; |
1861
|
|
|
|
|
|
} |
1862
|
|
|
|
|
|
|
1863
|
|
|
|
|
|
static I32 |
1864
|
52
|
|
|
|
|
S_sortcv_xsub(pTHX_ SV *const a, SV *const b) |
1865
|
52
|
50
|
|
|
|
{ |
1866
|
|
|
|
|
|
dVAR; dSP; |
1867
|
52
|
|
|
|
|
const I32 oldsaveix = PL_savestack_ix; |
1868
|
52
|
|
|
|
|
const I32 oldscopeix = PL_scopestack_ix; |
1869
|
52
|
|
|
|
|
CV * const cv=MUTABLE_CV(PL_sortcop); |
1870
|
|
|
|
|
|
I32 result; |
1871
|
52
|
|
|
|
|
PMOP * const pm = PL_curpm; |
1872
|
|
|
|
|
|
|
1873
|
|
|
|
|
|
PERL_ARGS_ASSERT_SORTCV_XSUB; |
1874
|
|
|
|
|
|
|
1875
|
52
|
|
|
|
|
SP = PL_stack_base; |
1876
|
52
|
50
|
|
|
|
PUSHMARK(SP); |
1877
|
26
|
|
|
|
|
EXTEND(SP, 2); |
1878
|
52
|
|
|
|
|
*++SP = a; |
1879
|
52
|
|
|
|
|
*++SP = b; |
1880
|
52
|
|
|
|
|
PUTBACK; |
1881
|
52
|
|
|
|
|
(void)(*CvXSUB(cv))(aTHX_ cv); |
1882
|
52
|
50
|
|
|
|
if (PL_stack_sp != PL_stack_base + 1) |
1883
|
0
|
|
|
|
|
Perl_croak(aTHX_ "Sort subroutine didn't return single value"); |
1884
|
52
|
100
|
|
|
|
result = SvIV(*PL_stack_sp); |
1885
|
78
|
50
|
|
|
|
while (PL_scopestack_ix > oldscopeix) { |
1886
|
0
|
|
|
|
|
LEAVE; |
1887
|
|
|
|
|
|
} |
1888
|
52
|
|
|
|
|
leave_scope(oldsaveix); |
1889
|
52
|
|
|
|
|
PL_curpm = pm; |
1890
|
52
|
|
|
|
|
return result; |
1891
|
|
|
|
|
|
} |
1892
|
|
|
|
|
|
|
1893
|
|
|
|
|
|
|
1894
|
|
|
|
|
|
static I32 |
1895
|
580564
|
|
|
|
|
S_sv_ncmp(pTHX_ SV *const a, SV *const b) |
1896
|
|
|
|
|
|
{ |
1897
|
580564
|
100
|
|
|
|
const NV nv1 = SvNSIV(a); |
|
|
100
|
|
|
|
|
1898
|
580564
|
100
|
|
|
|
const NV nv2 = SvNSIV(b); |
|
|
50
|
|
|
|
|
1899
|
|
|
|
|
|
|
1900
|
|
|
|
|
|
PERL_ARGS_ASSERT_SV_NCMP; |
1901
|
|
|
|
|
|
|
1902
|
|
|
|
|
|
#if defined(NAN_COMPARE_BROKEN) && defined(Perl_isnan) |
1903
|
|
|
|
|
|
if (Perl_isnan(nv1) || Perl_isnan(nv2)) { |
1904
|
|
|
|
|
|
#else |
1905
|
580564
|
50
|
|
|
|
if (nv1 != nv1 || nv2 != nv2) { |
|
|
100
|
|
|
|
|
1906
|
|
|
|
|
|
#endif |
1907
|
2
|
50
|
|
|
|
if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL); |
1908
|
|
|
|
|
|
return 0; |
1909
|
|
|
|
|
|
} |
1910
|
580563
|
100
|
|
|
|
return nv1 < nv2 ? -1 : nv1 > nv2 ? 1 : 0; |
1911
|
|
|
|
|
|
} |
1912
|
|
|
|
|
|
|
1913
|
|
|
|
|
|
static I32 |
1914
|
1760
|
|
|
|
|
S_sv_i_ncmp(pTHX_ SV *const a, SV *const b) |
1915
|
|
|
|
|
|
{ |
1916
|
1760
|
50
|
|
|
|
const IV iv1 = SvIV(a); |
1917
|
1760
|
50
|
|
|
|
const IV iv2 = SvIV(b); |
1918
|
|
|
|
|
|
|
1919
|
|
|
|
|
|
PERL_ARGS_ASSERT_SV_I_NCMP; |
1920
|
|
|
|
|
|
|
1921
|
1760
|
100
|
|
|
|
return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0; |
1922
|
|
|
|
|
|
} |
1923
|
|
|
|
|
|
|
1924
|
|
|
|
|
|
#define tryCALL_AMAGICbin(left,right,meth) \ |
1925
|
|
|
|
|
|
(SvAMAGIC(left)||SvAMAGIC(right)) \ |
1926
|
|
|
|
|
|
? amagic_call(left, right, meth, 0) \ |
1927
|
|
|
|
|
|
: NULL; |
1928
|
|
|
|
|
|
|
1929
|
|
|
|
|
|
#define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0)) |
1930
|
|
|
|
|
|
|
1931
|
|
|
|
|
|
static I32 |
1932
|
0
|
|
|
|
|
S_amagic_ncmp(pTHX_ SV *const a, SV *const b) |
1933
|
|
|
|
|
|
{ |
1934
|
|
|
|
|
|
dVAR; |
1935
|
0
|
0
|
|
|
|
SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); |
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
1936
|
|
|
|
|
|
|
1937
|
|
|
|
|
|
PERL_ARGS_ASSERT_AMAGIC_NCMP; |
1938
|
|
|
|
|
|
|
1939
|
0
|
0
|
|
|
|
if (tmpsv) { |
1940
|
0
|
0
|
|
|
|
if (SvIOK(tmpsv)) { |
1941
|
0
|
|
|
|
|
const I32 i = SvIVX(tmpsv); |
1942
|
0
|
0
|
|
|
|
return SORT_NORMAL_RETURN_VALUE(i); |
|
|
0
|
|
|
|
|
1943
|
|
|
|
|
|
} |
1944
|
|
|
|
|
|
else { |
1945
|
0
|
0
|
|
|
|
const NV d = SvNV(tmpsv); |
1946
|
0
|
0
|
|
|
|
return SORT_NORMAL_RETURN_VALUE(d); |
|
|
0
|
|
|
|
|
1947
|
|
|
|
|
|
} |
1948
|
|
|
|
|
|
} |
1949
|
0
|
|
|
|
|
return S_sv_ncmp(aTHX_ a, b); |
1950
|
|
|
|
|
|
} |
1951
|
|
|
|
|
|
|
1952
|
|
|
|
|
|
static I32 |
1953
|
0
|
|
|
|
|
S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b) |
1954
|
|
|
|
|
|
{ |
1955
|
|
|
|
|
|
dVAR; |
1956
|
0
|
0
|
|
|
|
SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); |
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
1957
|
|
|
|
|
|
|
1958
|
|
|
|
|
|
PERL_ARGS_ASSERT_AMAGIC_I_NCMP; |
1959
|
|
|
|
|
|
|
1960
|
0
|
0
|
|
|
|
if (tmpsv) { |
1961
|
0
|
0
|
|
|
|
if (SvIOK(tmpsv)) { |
1962
|
0
|
|
|
|
|
const I32 i = SvIVX(tmpsv); |
1963
|
0
|
0
|
|
|
|
return SORT_NORMAL_RETURN_VALUE(i); |
|
|
0
|
|
|
|
|
1964
|
|
|
|
|
|
} |
1965
|
|
|
|
|
|
else { |
1966
|
0
|
0
|
|
|
|
const NV d = SvNV(tmpsv); |
1967
|
0
|
0
|
|
|
|
return SORT_NORMAL_RETURN_VALUE(d); |
|
|
0
|
|
|
|
|
1968
|
|
|
|
|
|
} |
1969
|
|
|
|
|
|
} |
1970
|
0
|
|
|
|
|
return S_sv_i_ncmp(aTHX_ a, b); |
1971
|
|
|
|
|
|
} |
1972
|
|
|
|
|
|
|
1973
|
|
|
|
|
|
static I32 |
1974
|
386
|
|
|
|
|
S_amagic_cmp(pTHX_ SV *const str1, SV *const str2) |
1975
|
|
|
|
|
|
{ |
1976
|
|
|
|
|
|
dVAR; |
1977
|
386
|
50
|
|
|
|
SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); |
|
|
50
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
1978
|
|
|
|
|
|
|
1979
|
|
|
|
|
|
PERL_ARGS_ASSERT_AMAGIC_CMP; |
1980
|
|
|
|
|
|
|
1981
|
386
|
100
|
|
|
|
if (tmpsv) { |
1982
|
26
|
50
|
|
|
|
if (SvIOK(tmpsv)) { |
1983
|
26
|
|
|
|
|
const I32 i = SvIVX(tmpsv); |
1984
|
26
|
100
|
|
|
|
return SORT_NORMAL_RETURN_VALUE(i); |
|
|
50
|
|
|
|
|
1985
|
|
|
|
|
|
} |
1986
|
|
|
|
|
|
else { |
1987
|
0
|
0
|
|
|
|
const NV d = SvNV(tmpsv); |
1988
|
0
|
0
|
|
|
|
return SORT_NORMAL_RETURN_VALUE(d); |
|
|
0
|
|
|
|
|
1989
|
|
|
|
|
|
} |
1990
|
|
|
|
|
|
} |
1991
|
373
|
|
|
|
|
return sv_cmp(str1, str2); |
1992
|
|
|
|
|
|
} |
1993
|
|
|
|
|
|
|
1994
|
|
|
|
|
|
static I32 |
1995
|
0
|
|
|
|
|
S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2) |
1996
|
|
|
|
|
|
{ |
1997
|
|
|
|
|
|
dVAR; |
1998
|
0
|
0
|
|
|
|
SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); |
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
0
|
|
|
|
|
1999
|
|
|
|
|
|
|
2000
|
|
|
|
|
|
PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE; |
2001
|
|
|
|
|
|
|
2002
|
0
|
0
|
|
|
|
if (tmpsv) { |
2003
|
0
|
0
|
|
|
|
if (SvIOK(tmpsv)) { |
2004
|
0
|
|
|
|
|
const I32 i = SvIVX(tmpsv); |
2005
|
0
|
0
|
|
|
|
return SORT_NORMAL_RETURN_VALUE(i); |
|
|
0
|
|
|
|
|
2006
|
|
|
|
|
|
} |
2007
|
|
|
|
|
|
else { |
2008
|
0
|
0
|
|
|
|
const NV d = SvNV(tmpsv); |
2009
|
0
|
0
|
|
|
|
return SORT_NORMAL_RETURN_VALUE(d); |
|
|
0
|
|
|
|
|
2010
|
|
|
|
|
|
} |
2011
|
|
|
|
|
|
} |
2012
|
0
|
|
|
|
|
return sv_cmp_locale(str1, str2); |
2013
|
24440
|
|
|
|
|
} |
2014
|
|
|
|
|
|
|
2015
|
|
|
|
|
|
/* |
2016
|
|
|
|
|
|
* Local variables: |
2017
|
|
|
|
|
|
* c-indentation-style: bsd |
2018
|
|
|
|
|
|
* c-basic-offset: 4 |
2019
|
|
|
|
|
|
* indent-tabs-mode: nil |
2020
|
|
|
|
|
|
* End: |
2021
|
|
|
|
|
|
* |
2022
|
|
|
|
|
|
* ex: set ts=8 sts=4 sw=4 et: |
2023
|
|
|
|
|
|
*/ |