ctmul.c (8368B)
1 /* 2 * Copyright (c) 2016 Thomas Pornin <pornin@bolet.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining 5 * a copy of this software and associated documentation files (the 6 * "Software"), to deal in the Software without restriction, including 7 * without limitation the rights to use, copy, modify, merge, publish, 8 * distribute, sublicense, and/or sell copies of the Software, and to 9 * permit persons to whom the Software is furnished to do so, subject to 10 * the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be 13 * included in all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 25 /* 26 * We compute "carryless multiplications" through normal integer 27 * multiplications, masking out enough bits to create "holes" in which 28 * carries may expand without altering our bits; we really use 8 data 29 * bits per 32-bit word, spaced every fourth bit. Accumulated carries 30 * may not exceed 8 in total, which fits in 4 bits. 31 * 32 * It would be possible to use a 3-bit spacing, allowing two operands, 33 * one with 7 non-zero data bits, the other one with 10 or 11 non-zero 34 * data bits; this asymmetric splitting makes the overall code more 35 * complex with thresholds and exceptions, and does not appear to be 36 * worth the effort. 37 */ 38 39 /* 40 * We cannot really autodetect whether multiplications are "slow" or 41 * not. A typical example is the ARM Cortex M0+, which exists in two 42 * versions: one with a 1-cycle multiplication opcode, the other with 43 * a 32-cycle multiplication opcode. They both use exactly the same 44 * architecture and ABI, and cannot be distinguished from each other 45 * at compile-time. 46 * 47 * Since most modern CPU (even embedded CPU) still have fast 48 * multiplications, we use the "fast mul" code by default. 49 */ 50 51 // A 32x32 -> 64 multiply. 52 #define MUL(x, y) (((uint64_t)(x)) * ((uint64_t)(y))) 53 54 #ifdef BR_SLOW_MUL 55 56 /* 57 * This implementation uses Karatsuba-like reduction to make fewer 58 * integer multiplications (9 instead of 16), at the expense of extra 59 * logical operations (XOR, shifts...). On modern x86 CPU that offer 60 * fast, pipelined multiplications, this code is about twice slower than 61 * the simpler code with 16 multiplications. This tendency may be 62 * reversed on low-end platforms with expensive multiplications. 63 */ 64 65 #define MUL32(h, l, x, y) do { \ 66 uint64_t mul32tmp = MUL(x, y); \ 67 (h) = (uint32_t)(mul32tmp >> 32); \ 68 (l) = (uint32_t)mul32tmp; \ 69 } while (0) 70 71 static inline void 72 bmul(uint32_t *hi, uint32_t *lo, uint32_t x, uint32_t y) 73 { 74 uint32_t x0, x1, x2, x3; 75 uint32_t y0, y1, y2, y3; 76 uint32_t a0, a1, a2, a3, a4, a5, a6, a7, a8; 77 uint32_t b0, b1, b2, b3, b4, b5, b6, b7, b8; 78 79 x0 = x & (uint32_t)0x11111111; 80 x1 = x & (uint32_t)0x22222222; 81 x2 = x & (uint32_t)0x44444444; 82 x3 = x & (uint32_t)0x88888888; 83 y0 = y & (uint32_t)0x11111111; 84 y1 = y & (uint32_t)0x22222222; 85 y2 = y & (uint32_t)0x44444444; 86 y3 = y & (uint32_t)0x88888888; 87 88 /* 89 * (x0+W*x1)*(y0+W*y1) -> a0:b0 90 * (x2+W*x3)*(y2+W*y3) -> a3:b3 91 * ((x0+x2)+W*(x1+x3))*((y0+y2)+W*(y1+y3)) -> a6:b6 92 */ 93 a0 = x0; 94 b0 = y0; 95 a1 = x1 >> 1; 96 b1 = y1 >> 1; 97 a2 = a0 ^ a1; 98 b2 = b0 ^ b1; 99 a3 = x2 >> 2; 100 b3 = y2 >> 2; 101 a4 = x3 >> 3; 102 b4 = y3 >> 3; 103 a5 = a3 ^ a4; 104 b5 = b3 ^ b4; 105 a6 = a0 ^ a3; 106 b6 = b0 ^ b3; 107 a7 = a1 ^ a4; 108 b7 = b1 ^ b4; 109 a8 = a6 ^ a7; 110 b8 = b6 ^ b7; 111 112 MUL32(b0, a0, b0, a0); 113 MUL32(b1, a1, b1, a1); 114 MUL32(b2, a2, b2, a2); 115 MUL32(b3, a3, b3, a3); 116 MUL32(b4, a4, b4, a4); 117 MUL32(b5, a5, b5, a5); 118 MUL32(b6, a6, b6, a6); 119 MUL32(b7, a7, b7, a7); 120 MUL32(b8, a8, b8, a8); 121 122 a0 &= (uint32_t)0x11111111; 123 a1 &= (uint32_t)0x11111111; 124 a2 &= (uint32_t)0x11111111; 125 a3 &= (uint32_t)0x11111111; 126 a4 &= (uint32_t)0x11111111; 127 a5 &= (uint32_t)0x11111111; 128 a6 &= (uint32_t)0x11111111; 129 a7 &= (uint32_t)0x11111111; 130 a8 &= (uint32_t)0x11111111; 131 b0 &= (uint32_t)0x11111111; 132 b1 &= (uint32_t)0x11111111; 133 b2 &= (uint32_t)0x11111111; 134 b3 &= (uint32_t)0x11111111; 135 b4 &= (uint32_t)0x11111111; 136 b5 &= (uint32_t)0x11111111; 137 b6 &= (uint32_t)0x11111111; 138 b7 &= (uint32_t)0x11111111; 139 b8 &= (uint32_t)0x11111111; 140 141 a2 ^= a0 ^ a1; 142 b2 ^= b0 ^ b1; 143 a0 ^= (a2 << 1) ^ (a1 << 2); 144 b0 ^= (b2 << 1) ^ (b1 << 2); 145 a5 ^= a3 ^ a4; 146 b5 ^= b3 ^ b4; 147 a3 ^= (a5 << 1) ^ (a4 << 2); 148 b3 ^= (b5 << 1) ^ (b4 << 2); 149 a8 ^= a6 ^ a7; 150 b8 ^= b6 ^ b7; 151 a6 ^= (a8 << 1) ^ (a7 << 2); 152 b6 ^= (b8 << 1) ^ (b7 << 2); 153 a6 ^= a0 ^ a3; 154 b6 ^= b0 ^ b3; 155 *lo = a0 ^ (a6 << 2) ^ (a3 << 4); 156 *hi = b0 ^ (b6 << 2) ^ (b3 << 4) ^ (a6 >> 30) ^ (a3 >> 28); 157 } 158 159 #else 160 161 /* 162 * Simple multiplication in GF(2)[X], using 16 integer multiplications. 163 */ 164 165 static inline void 166 bmul(uint32_t *hi, uint32_t *lo, uint32_t x, uint32_t y) 167 { 168 uint32_t x0, x1, x2, x3; 169 uint32_t y0, y1, y2, y3; 170 uint64_t z0, z1, z2, z3; 171 uint64_t z; 172 173 x0 = x & (uint32_t)0x11111111; 174 x1 = x & (uint32_t)0x22222222; 175 x2 = x & (uint32_t)0x44444444; 176 x3 = x & (uint32_t)0x88888888; 177 y0 = y & (uint32_t)0x11111111; 178 y1 = y & (uint32_t)0x22222222; 179 y2 = y & (uint32_t)0x44444444; 180 y3 = y & (uint32_t)0x88888888; 181 z0 = MUL(x0, y0) ^ MUL(x1, y3) ^ MUL(x2, y2) ^ MUL(x3, y1); 182 z1 = MUL(x0, y1) ^ MUL(x1, y0) ^ MUL(x2, y3) ^ MUL(x3, y2); 183 z2 = MUL(x0, y2) ^ MUL(x1, y1) ^ MUL(x2, y0) ^ MUL(x3, y3); 184 z3 = MUL(x0, y3) ^ MUL(x1, y2) ^ MUL(x2, y1) ^ MUL(x3, y0); 185 z0 &= (uint64_t)0x1111111111111111; 186 z1 &= (uint64_t)0x2222222222222222; 187 z2 &= (uint64_t)0x4444444444444444; 188 z3 &= (uint64_t)0x8888888888888888; 189 z = z0 | z1 | z2 | z3; 190 *lo = (uint32_t)z; 191 *hi = (uint32_t)(z >> 32); 192 } 193 194 #endif 195 196 static void 197 pv_mul_y_h_ctmul(polyval_t *pv) 198 { 199 uint32_t *yw = pv->y.v; 200 const uint32_t *hw = pv->key.h.v; 201 202 /* 203 * Throughout the loop we handle the y and h values as arrays 204 * of 32-bit words. 205 */ 206 { 207 int i; 208 uint32_t a[9], b[9], zw[8]; 209 uint32_t c0, c1, c2, c3, d0, d1, d2, d3, e0, e1, e2, e3; 210 211 /* 212 * We multiply two 128-bit field elements. We use 213 * Karatsuba to turn that into three 64-bit 214 * multiplications, which are themselves done with a 215 * total of nine 32-bit multiplications. 216 */ 217 218 /* 219 * y[0,1]*h[0,1] -> 0..2 220 * y[2,3]*h[2,3] -> 3..5 221 * (y[0,1]+y[2,3])*(h[0,1]+h[2,3]) -> 6..8 222 */ 223 a[0] = yw[0]; 224 b[0] = hw[0]; 225 a[1] = yw[1]; 226 b[1] = hw[1]; 227 a[2] = a[0] ^ a[1]; 228 b[2] = b[0] ^ b[1]; 229 230 a[3] = yw[2]; 231 b[3] = hw[2]; 232 a[4] = yw[3]; 233 b[4] = hw[3]; 234 a[5] = a[3] ^ a[4]; 235 b[5] = b[3] ^ b[4]; 236 237 a[6] = a[0] ^ a[3]; 238 b[6] = b[0] ^ b[3]; 239 a[7] = a[1] ^ a[4]; 240 b[7] = b[1] ^ b[4]; 241 a[8] = a[6] ^ a[7]; 242 b[8] = b[6] ^ b[7]; 243 244 for (i = 0; i < 9; i ++) { 245 bmul(&b[i], &a[i], b[i], a[i]); 246 } 247 248 c0 = a[0]; 249 c1 = b[0] ^ a[2] ^ a[0] ^ a[1]; 250 c2 = a[1] ^ b[2] ^ b[0] ^ b[1]; 251 c3 = b[1]; 252 d0 = a[3]; 253 d1 = b[3] ^ a[5] ^ a[3] ^ a[4]; 254 d2 = a[4] ^ b[5] ^ b[3] ^ b[4]; 255 d3 = b[4]; 256 e0 = a[6]; 257 e1 = b[6] ^ a[8] ^ a[6] ^ a[7]; 258 e2 = a[7] ^ b[8] ^ b[6] ^ b[7]; 259 e3 = b[7]; 260 261 e0 ^= c0 ^ d0; 262 e1 ^= c1 ^ d1; 263 e2 ^= c2 ^ d2; 264 e3 ^= c3 ^ d3; 265 c2 ^= e0; 266 c3 ^= e1; 267 d0 ^= e2; 268 d1 ^= e3; 269 270 #if 0 271 // This rotation is GHASH-only. 272 /* 273 * GHASH specification has the bits "reversed" (most 274 * significant is in fact least significant), which does 275 * not matter for a carryless multiplication, except that 276 * the 255-bit result must be shifted by 1 bit. 277 */ 278 zw[0] = c0 << 1; 279 zw[1] = (c1 << 1) | (c0 >> 31); 280 zw[2] = (c2 << 1) | (c1 >> 31); 281 zw[3] = (c3 << 1) | (c2 >> 31); 282 zw[4] = (d0 << 1) | (c3 >> 31); 283 zw[5] = (d1 << 1) | (d0 >> 31); 284 zw[6] = (d2 << 1) | (d1 >> 31); 285 zw[7] = (d3 << 1) | (d2 >> 31); 286 #else 287 zw[0] = c0; 288 zw[1] = c1; 289 zw[2] = c2; 290 zw[3] = c3; 291 zw[4] = d0; 292 zw[5] = d1; 293 zw[6] = d2; 294 zw[7] = d3; 295 #endif 296 297 /* 298 * We now do the reduction modulo the field polynomial 299 * to get back to 128 bits. 300 */ 301 for (i = 0; i < 4; i ++) { 302 uint32_t lw; 303 304 lw = zw[i]; 305 zw[i + 4] ^= lw ^ (lw >> 1) ^ (lw >> 2) ^ (lw >> 7); 306 zw[i + 3] ^= (lw << 31) ^ (lw << 30) ^ (lw << 25); 307 } 308 memcpy(yw, zw + 4, 16); 309 } 310 } 311 #undef MUL