File Coverage

src/symcipher/aes_ct64_enc.c
Criterion Covered Total %
statement 0 60 0.0
branch 0 4 0.0
condition n/a
subroutine n/a
pod n/a
total 0 64 0.0


line stmt bran cond sub pod time code
1             /*
2             * Copyright (c) 2016 Thomas Pornin
3             *
4             * Permission is hereby granted, free of charge, to any person obtaining
5             * a copy of this software and associated documentation files (the
6             * "Software"), to deal in the Software without restriction, including
7             * without limitation the rights to use, copy, modify, merge, publish,
8             * distribute, sublicense, and/or sell copies of the Software, and to
9             * permit persons to whom the Software is furnished to do so, subject to
10             * the following conditions:
11             *
12             * The above copyright notice and this permission notice shall be
13             * included in all copies or substantial portions of the Software.
14             *
15             * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16             * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17             * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18             * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19             * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20             * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21             * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22             * SOFTWARE.
23             */
24              
25             #include "inner.h"
26              
27             static inline void
28 0           add_round_key(uint64_t *q, const uint64_t *sk)
29             {
30 0           q[0] ^= sk[0];
31 0           q[1] ^= sk[1];
32 0           q[2] ^= sk[2];
33 0           q[3] ^= sk[3];
34 0           q[4] ^= sk[4];
35 0           q[5] ^= sk[5];
36 0           q[6] ^= sk[6];
37 0           q[7] ^= sk[7];
38 0           }
39              
40             static inline void
41 0           shift_rows(uint64_t *q)
42             {
43             int i;
44              
45 0 0         for (i = 0; i < 8; i ++) {
46             uint64_t x;
47              
48 0           x = q[i];
49 0           q[i] = (x & (uint64_t)0x000000000000FFFF)
50 0           | ((x & (uint64_t)0x00000000FFF00000) >> 4)
51 0           | ((x & (uint64_t)0x00000000000F0000) << 12)
52 0           | ((x & (uint64_t)0x0000FF0000000000) >> 8)
53 0           | ((x & (uint64_t)0x000000FF00000000) << 8)
54 0           | ((x & (uint64_t)0xF000000000000000) >> 12)
55 0           | ((x & (uint64_t)0x0FFF000000000000) << 4);
56             }
57 0           }
58              
59             static inline uint64_t
60 0           rotr32(uint64_t x)
61             {
62 0           return (x << 32) | (x >> 32);
63             }
64              
65             static inline void
66 0           mix_columns(uint64_t *q)
67             {
68             uint64_t q0, q1, q2, q3, q4, q5, q6, q7;
69             uint64_t r0, r1, r2, r3, r4, r5, r6, r7;
70              
71 0           q0 = q[0];
72 0           q1 = q[1];
73 0           q2 = q[2];
74 0           q3 = q[3];
75 0           q4 = q[4];
76 0           q5 = q[5];
77 0           q6 = q[6];
78 0           q7 = q[7];
79 0           r0 = (q0 >> 16) | (q0 << 48);
80 0           r1 = (q1 >> 16) | (q1 << 48);
81 0           r2 = (q2 >> 16) | (q2 << 48);
82 0           r3 = (q3 >> 16) | (q3 << 48);
83 0           r4 = (q4 >> 16) | (q4 << 48);
84 0           r5 = (q5 >> 16) | (q5 << 48);
85 0           r6 = (q6 >> 16) | (q6 << 48);
86 0           r7 = (q7 >> 16) | (q7 << 48);
87              
88 0           q[0] = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0);
89 0           q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1);
90 0           q[2] = q1 ^ r1 ^ r2 ^ rotr32(q2 ^ r2);
91 0           q[3] = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr32(q3 ^ r3);
92 0           q[4] = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr32(q4 ^ r4);
93 0           q[5] = q4 ^ r4 ^ r5 ^ rotr32(q5 ^ r5);
94 0           q[6] = q5 ^ r5 ^ r6 ^ rotr32(q6 ^ r6);
95 0           q[7] = q6 ^ r6 ^ r7 ^ rotr32(q7 ^ r7);
96 0           }
97              
98             /* see inner.h */
99             void
100 0           br_aes_ct64_bitslice_encrypt(unsigned num_rounds,
101             const uint64_t *skey, uint64_t *q)
102             {
103             unsigned u;
104              
105 0           add_round_key(q, skey);
106 0 0         for (u = 1; u < num_rounds; u ++) {
107 0           br_aes_ct64_bitslice_Sbox(q);
108 0           shift_rows(q);
109 0           mix_columns(q);
110 0           add_round_key(q, skey + (u << 3));
111             }
112 0           br_aes_ct64_bitslice_Sbox(q);
113 0           shift_rows(q);
114 0           add_round_key(q, skey + (num_rounds << 3));
115 0           }