File Coverage

src/hash/ghash_ctmul.c
Criterion Covered Total %
statement 0 104 0.0
branch 0 8 0.0
condition n/a
subroutine n/a
pod n/a
total 0 112 0.0


line stmt bran cond sub pod time code
1             /*
2             * Copyright (c) 2016 Thomas Pornin
3             *
4             * Permission is hereby granted, free of charge, to any person obtaining
5             * a copy of this software and associated documentation files (the
6             * "Software"), to deal in the Software without restriction, including
7             * without limitation the rights to use, copy, modify, merge, publish,
8             * distribute, sublicense, and/or sell copies of the Software, and to
9             * permit persons to whom the Software is furnished to do so, subject to
10             * the following conditions:
11             *
12             * The above copyright notice and this permission notice shall be
13             * included in all copies or substantial portions of the Software.
14             *
15             * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16             * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17             * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18             * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19             * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20             * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21             * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22             * SOFTWARE.
23             */
24              
25             #include "inner.h"
26              
27             /*
28             * We compute "carryless multiplications" through normal integer
29             * multiplications, masking out enough bits to create "holes" in which
30             * carries may expand without altering our bits; we really use 8 data
31             * bits per 32-bit word, spaced every fourth bit. Accumulated carries
32             * may not exceed 8 in total, which fits in 4 bits.
33             *
34             * It would be possible to use a 3-bit spacing, allowing two operands,
35             * one with 7 non-zero data bits, the other one with 10 or 11 non-zero
36             * data bits; this asymmetric splitting makes the overall code more
37             * complex with thresholds and exceptions, and does not appear to be
38             * worth the effort.
39             */
40              
41             /*
42             * We cannot really autodetect whether multiplications are "slow" or
43             * not. A typical example is the ARM Cortex M0+, which exists in two
44             * versions: one with a 1-cycle multiplication opcode, the other with
45             * a 32-cycle multiplication opcode. They both use exactly the same
46             * architecture and ABI, and cannot be distinguished from each other
47             * at compile-time.
48             *
49             * Since most modern CPU (even embedded CPU) still have fast
50             * multiplications, we use the "fast mul" code by default.
51             */
52              
53             #if BR_SLOW_MUL
54              
55             /*
56             * This implementation uses Karatsuba-like reduction to make fewer
57             * integer multiplications (9 instead of 16), at the expense of extra
58             * logical operations (XOR, shifts...). On modern x86 CPU that offer
59             * fast, pipelined multiplications, this code is about twice slower than
60             * the simpler code with 16 multiplications. This tendency may be
61             * reversed on low-end platforms with expensive multiplications.
62             */
63              
64             #define MUL32(h, l, x, y) do { \
65             uint64_t mul32tmp = MUL(x, y); \
66             (h) = (uint32_t)(mul32tmp >> 32); \
67             (l) = (uint32_t)mul32tmp; \
68             } while (0)
69              
70             static inline void
71             bmul(uint32_t *hi, uint32_t *lo, uint32_t x, uint32_t y)
72             {
73             uint32_t x0, x1, x2, x3;
74             uint32_t y0, y1, y2, y3;
75             uint32_t a0, a1, a2, a3, a4, a5, a6, a7, a8;
76             uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8;
77              
78             x0 = x & (uint32_t)0x11111111;
79             x1 = x & (uint32_t)0x22222222;
80             x2 = x & (uint32_t)0x44444444;
81             x3 = x & (uint32_t)0x88888888;
82             y0 = y & (uint32_t)0x11111111;
83             y1 = y & (uint32_t)0x22222222;
84             y2 = y & (uint32_t)0x44444444;
85             y3 = y & (uint32_t)0x88888888;
86              
87             /*
88             * (x0+W*x1)*(y0+W*y1) -> a0:b0
89             * (x2+W*x3)*(y2+W*y3) -> a3:b3
90             * ((x0+x2)+W*(x1+x3))*((y0+y2)+W*(y1+y3)) -> a6:b6
91             */
92             a0 = x0;
93             b0 = y0;
94             a1 = x1 >> 1;
95             b1 = y1 >> 1;
96             a2 = a0 ^ a1;
97             b2 = b0 ^ b1;
98             a3 = x2 >> 2;
99             b3 = y2 >> 2;
100             a4 = x3 >> 3;
101             b4 = y3 >> 3;
102             a5 = a3 ^ a4;
103             b5 = b3 ^ b4;
104             a6 = a0 ^ a3;
105             b6 = b0 ^ b3;
106             a7 = a1 ^ a4;
107             b7 = b1 ^ b4;
108             a8 = a6 ^ a7;
109             b8 = b6 ^ b7;
110              
111             MUL32(b0, a0, b0, a0);
112             MUL32(b1, a1, b1, a1);
113             MUL32(b2, a2, b2, a2);
114             MUL32(b3, a3, b3, a3);
115             MUL32(b4, a4, b4, a4);
116             MUL32(b5, a5, b5, a5);
117             MUL32(b6, a6, b6, a6);
118             MUL32(b7, a7, b7, a7);
119             MUL32(b8, a8, b8, a8);
120              
121             a0 &= (uint32_t)0x11111111;
122             a1 &= (uint32_t)0x11111111;
123             a2 &= (uint32_t)0x11111111;
124             a3 &= (uint32_t)0x11111111;
125             a4 &= (uint32_t)0x11111111;
126             a5 &= (uint32_t)0x11111111;
127             a6 &= (uint32_t)0x11111111;
128             a7 &= (uint32_t)0x11111111;
129             a8 &= (uint32_t)0x11111111;
130             b0 &= (uint32_t)0x11111111;
131             b1 &= (uint32_t)0x11111111;
132             b2 &= (uint32_t)0x11111111;
133             b3 &= (uint32_t)0x11111111;
134             b4 &= (uint32_t)0x11111111;
135             b5 &= (uint32_t)0x11111111;
136             b6 &= (uint32_t)0x11111111;
137             b7 &= (uint32_t)0x11111111;
138             b8 &= (uint32_t)0x11111111;
139              
140             a2 ^= a0 ^ a1;
141             b2 ^= b0 ^ b1;
142             a0 ^= (a2 << 1) ^ (a1 << 2);
143             b0 ^= (b2 << 1) ^ (b1 << 2);
144             a5 ^= a3 ^ a4;
145             b5 ^= b3 ^ b4;
146             a3 ^= (a5 << 1) ^ (a4 << 2);
147             b3 ^= (b5 << 1) ^ (b4 << 2);
148             a8 ^= a6 ^ a7;
149             b8 ^= b6 ^ b7;
150             a6 ^= (a8 << 1) ^ (a7 << 2);
151             b6 ^= (b8 << 1) ^ (b7 << 2);
152             a6 ^= a0 ^ a3;
153             b6 ^= b0 ^ b3;
154             *lo = a0 ^ (a6 << 2) ^ (a3 << 4);
155             *hi = b0 ^ (b6 << 2) ^ (b3 << 4) ^ (a6 >> 30) ^ (a3 >> 28);
156             }
157              
158             #else
159              
160             /*
161             * Simple multiplication in GF(2)[X], using 16 integer multiplications.
162             */
163              
164             static inline void
165 0           bmul(uint32_t *hi, uint32_t *lo, uint32_t x, uint32_t y)
166             {
167             uint32_t x0, x1, x2, x3;
168             uint32_t y0, y1, y2, y3;
169             uint64_t z0, z1, z2, z3;
170             uint64_t z;
171              
172 0           x0 = x & (uint32_t)0x11111111;
173 0           x1 = x & (uint32_t)0x22222222;
174 0           x2 = x & (uint32_t)0x44444444;
175 0           x3 = x & (uint32_t)0x88888888;
176 0           y0 = y & (uint32_t)0x11111111;
177 0           y1 = y & (uint32_t)0x22222222;
178 0           y2 = y & (uint32_t)0x44444444;
179 0           y3 = y & (uint32_t)0x88888888;
180 0           z0 = MUL(x0, y0) ^ MUL(x1, y3) ^ MUL(x2, y2) ^ MUL(x3, y1);
181 0           z1 = MUL(x0, y1) ^ MUL(x1, y0) ^ MUL(x2, y3) ^ MUL(x3, y2);
182 0           z2 = MUL(x0, y2) ^ MUL(x1, y1) ^ MUL(x2, y0) ^ MUL(x3, y3);
183 0           z3 = MUL(x0, y3) ^ MUL(x1, y2) ^ MUL(x2, y1) ^ MUL(x3, y0);
184 0           z0 &= (uint64_t)0x1111111111111111;
185 0           z1 &= (uint64_t)0x2222222222222222;
186 0           z2 &= (uint64_t)0x4444444444444444;
187 0           z3 &= (uint64_t)0x8888888888888888;
188 0           z = z0 | z1 | z2 | z3;
189 0           *lo = (uint32_t)z;
190 0           *hi = (uint32_t)(z >> 32);
191 0           }
192              
193             #endif
194              
195             /* see bearssl_hash.h */
196             void
197 0           br_ghash_ctmul(void *y, const void *h, const void *data, size_t len)
198             {
199             const unsigned char *buf, *hb;
200             unsigned char *yb;
201             uint32_t yw[4];
202             uint32_t hw[4];
203              
204             /*
205             * Throughout the loop we handle the y and h values as arrays
206             * of 32-bit words.
207             */
208 0           buf = data;
209 0           yb = y;
210 0           hb = h;
211 0           yw[3] = br_dec32be(yb);
212 0           yw[2] = br_dec32be(yb + 4);
213 0           yw[1] = br_dec32be(yb + 8);
214 0           yw[0] = br_dec32be(yb + 12);
215 0           hw[3] = br_dec32be(hb);
216 0           hw[2] = br_dec32be(hb + 4);
217 0           hw[1] = br_dec32be(hb + 8);
218 0           hw[0] = br_dec32be(hb + 12);
219 0 0         while (len > 0) {
220             const unsigned char *src;
221             unsigned char tmp[16];
222             int i;
223             uint32_t a[9], b[9], zw[8];
224             uint32_t c0, c1, c2, c3, d0, d1, d2, d3, e0, e1, e2, e3;
225              
226             /*
227             * Get the next 16-byte block (using zero-padding if
228             * necessary).
229             */
230 0 0         if (len >= 16) {
231 0           src = buf;
232 0           buf += 16;
233 0           len -= 16;
234             } else {
235 0           memcpy(tmp, buf, len);
236 0           memset(tmp + len, 0, (sizeof tmp) - len);
237 0           src = tmp;
238 0           len = 0;
239             }
240              
241             /*
242             * Decode the block. The GHASH standard mandates
243             * big-endian encoding.
244             */
245 0           yw[3] ^= br_dec32be(src);
246 0           yw[2] ^= br_dec32be(src + 4);
247 0           yw[1] ^= br_dec32be(src + 8);
248 0           yw[0] ^= br_dec32be(src + 12);
249              
250             /*
251             * We multiply two 128-bit field elements. We use
252             * Karatsuba to turn that into three 64-bit
253             * multiplications, which are themselves done with a
254             * total of nine 32-bit multiplications.
255             */
256              
257             /*
258             * y[0,1]*h[0,1] -> 0..2
259             * y[2,3]*h[2,3] -> 3..5
260             * (y[0,1]+y[2,3])*(h[0,1]+h[2,3]) -> 6..8
261             */
262 0           a[0] = yw[0];
263 0           b[0] = hw[0];
264 0           a[1] = yw[1];
265 0           b[1] = hw[1];
266 0           a[2] = a[0] ^ a[1];
267 0           b[2] = b[0] ^ b[1];
268              
269 0           a[3] = yw[2];
270 0           b[3] = hw[2];
271 0           a[4] = yw[3];
272 0           b[4] = hw[3];
273 0           a[5] = a[3] ^ a[4];
274 0           b[5] = b[3] ^ b[4];
275              
276 0           a[6] = a[0] ^ a[3];
277 0           b[6] = b[0] ^ b[3];
278 0           a[7] = a[1] ^ a[4];
279 0           b[7] = b[1] ^ b[4];
280 0           a[8] = a[6] ^ a[7];
281 0           b[8] = b[6] ^ b[7];
282              
283 0 0         for (i = 0; i < 9; i ++) {
284 0           bmul(&b[i], &a[i], b[i], a[i]);
285             }
286              
287 0           c0 = a[0];
288 0           c1 = b[0] ^ a[2] ^ a[0] ^ a[1];
289 0           c2 = a[1] ^ b[2] ^ b[0] ^ b[1];
290 0           c3 = b[1];
291 0           d0 = a[3];
292 0           d1 = b[3] ^ a[5] ^ a[3] ^ a[4];
293 0           d2 = a[4] ^ b[5] ^ b[3] ^ b[4];
294 0           d3 = b[4];
295 0           e0 = a[6];
296 0           e1 = b[6] ^ a[8] ^ a[6] ^ a[7];
297 0           e2 = a[7] ^ b[8] ^ b[6] ^ b[7];
298 0           e3 = b[7];
299              
300 0           e0 ^= c0 ^ d0;
301 0           e1 ^= c1 ^ d1;
302 0           e2 ^= c2 ^ d2;
303 0           e3 ^= c3 ^ d3;
304 0           c2 ^= e0;
305 0           c3 ^= e1;
306 0           d0 ^= e2;
307 0           d1 ^= e3;
308              
309             /*
310             * GHASH specification has the bits "reversed" (most
311             * significant is in fact least significant), which does
312             * not matter for a carryless multiplication, except that
313             * the 255-bit result must be shifted by 1 bit.
314             */
315 0           zw[0] = c0 << 1;
316 0           zw[1] = (c1 << 1) | (c0 >> 31);
317 0           zw[2] = (c2 << 1) | (c1 >> 31);
318 0           zw[3] = (c3 << 1) | (c2 >> 31);
319 0           zw[4] = (d0 << 1) | (c3 >> 31);
320 0           zw[5] = (d1 << 1) | (d0 >> 31);
321 0           zw[6] = (d2 << 1) | (d1 >> 31);
322 0           zw[7] = (d3 << 1) | (d2 >> 31);
323              
324             /*
325             * We now do the reduction modulo the field polynomial
326             * to get back to 128 bits.
327             */
328 0 0         for (i = 0; i < 4; i ++) {
329             uint32_t lw;
330              
331 0           lw = zw[i];
332 0           zw[i + 4] ^= lw ^ (lw >> 1) ^ (lw >> 2) ^ (lw >> 7);
333 0           zw[i + 3] ^= (lw << 31) ^ (lw << 30) ^ (lw << 25);
334             }
335 0           memcpy(yw, zw + 4, sizeof yw);
336             }
337              
338             /*
339             * Encode back the result.
340             */
341 0           br_enc32be(yb, yw[3]);
342 0           br_enc32be(yb + 4, yw[2]);
343 0           br_enc32be(yb + 8, yw[1]);
344 0           br_enc32be(yb + 12, yw[0]);
345 0           }