Bitcoin ABC  0.28.12
P2P Digital Currency
sha256_shani.cpp
Go to the documentation of this file.
1 // Copyright (c) 2018 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 //
5 // Based on https://github.com/noloader/SHA-Intrinsics/blob/master/sha256-x86.c,
6 // Written and placed in public domain by Jeffrey Walton.
7 // Based on code from Intel, and by Sean Gulley for the miTLS project.
8 
9 #ifdef ENABLE_SHANI
10 
11 #include <cstdint>
12 #include <immintrin.h>
13 
14 namespace {
15 
16 alignas(__m128i) const uint8_t MASK[16] = {0x03, 0x02, 0x01, 0x00, 0x07, 0x06,
17  0x05, 0x04, 0x0b, 0x0a, 0x09, 0x08,
18  0x0f, 0x0e, 0x0d, 0x0c};
19 alignas(__m128i) const uint8_t INIT0[16] = {0x8c, 0x68, 0x05, 0x9b, 0x7f, 0x52,
20  0x0e, 0x51, 0x85, 0xae, 0x67, 0xbb,
21  0x67, 0xe6, 0x09, 0x6a};
22 alignas(__m128i) const uint8_t INIT1[16] = {0x19, 0xcd, 0xe0, 0x5b, 0xab, 0xd9,
23  0x83, 0x1f, 0x3a, 0xf5, 0x4f, 0xa5,
24  0x72, 0xf3, 0x6e, 0x3c};
25 
26 inline void __attribute__((always_inline))
27 QuadRound(__m128i &state0, __m128i &state1, uint64_t k1, uint64_t k0) {
28  const __m128i msg = _mm_set_epi64x(k1, k0);
29  state1 = _mm_sha256rnds2_epu32(state1, state0, msg);
30  state0 =
31  _mm_sha256rnds2_epu32(state0, state1, _mm_shuffle_epi32(msg, 0x0e));
32 }
33 
34 inline void __attribute__((always_inline))
35 QuadRound(__m128i &state0, __m128i &state1, __m128i m, uint64_t k1,
36  uint64_t k0) {
37  const __m128i msg = _mm_add_epi32(m, _mm_set_epi64x(k1, k0));
38  state1 = _mm_sha256rnds2_epu32(state1, state0, msg);
39  state0 =
40  _mm_sha256rnds2_epu32(state0, state1, _mm_shuffle_epi32(msg, 0x0e));
41 }
42 
43 inline void __attribute__((always_inline))
44 ShiftMessageA(__m128i &m0, __m128i m1) {
45  m0 = _mm_sha256msg1_epu32(m0, m1);
46 }
47 
48 inline void __attribute__((always_inline))
49 ShiftMessageC(__m128i &m0, __m128i m1, __m128i &m2) {
50  m2 =
51  _mm_sha256msg2_epu32(_mm_add_epi32(m2, _mm_alignr_epi8(m1, m0, 4)), m1);
52 }
53 
54 inline void __attribute__((always_inline))
55 ShiftMessageB(__m128i &m0, __m128i m1, __m128i &m2) {
56  ShiftMessageC(m0, m1, m2);
57  ShiftMessageA(m0, m1);
58 }
59 
60 inline void __attribute__((always_inline)) Shuffle(__m128i &s0, __m128i &s1) {
61  const __m128i t1 = _mm_shuffle_epi32(s0, 0xB1);
62  const __m128i t2 = _mm_shuffle_epi32(s1, 0x1B);
63  s0 = _mm_alignr_epi8(t1, t2, 0x08);
64  s1 = _mm_blend_epi16(t2, t1, 0xF0);
65 }
66 
67 inline void __attribute__((always_inline)) Unshuffle(__m128i &s0, __m128i &s1) {
68  const __m128i t1 = _mm_shuffle_epi32(s0, 0x1B);
69  const __m128i t2 = _mm_shuffle_epi32(s1, 0xB1);
70  s0 = _mm_blend_epi16(t1, t2, 0xF0);
71  s1 = _mm_alignr_epi8(t2, t1, 0x08);
72 }
73 
74 /*
75  * Prevent the compiler from raising a -Wcast-align warning when using unaligned
76  * specific instruction, such as _mm_loadu_si128 or _mm_storeu_si128 (note the
77  * 'u' suffix for unaligned accesses).
78  */
79 #pragma GCC diagnostic push
80 #pragma GCC diagnostic ignored "-Wcast-align"
81 inline __m128i __attribute__((always_inline))
82 LoadInteger128Unaligned(const uint8_t *mem_addr) {
83  return _mm_loadu_si128((const __m128i *)mem_addr);
84 }
85 inline __m128i __attribute__((always_inline))
86 LoadInteger128Unaligned(const uint32_t *mem_addr) {
87  return _mm_loadu_si128((const __m128i *)mem_addr);
88 }
89 
90 inline void __attribute__((always_inline))
91 StoreInteger128Unaligned(uint8_t *mem_addr, __m128i i128) {
92  _mm_storeu_si128((__m128i *)mem_addr, i128);
93 }
94 inline void __attribute__((always_inline))
95 StoreInteger128Unaligned(uint32_t *mem_addr, __m128i i128) {
96  _mm_storeu_si128((__m128i *)mem_addr, i128);
97 }
98 #pragma GCC diagnostic pop
99 
100 __m128i inline __attribute__((always_inline)) Load(const uint8_t *in) {
101  return _mm_shuffle_epi8(LoadInteger128Unaligned(in),
102  _mm_load_si128((const __m128i *)MASK));
103 }
104 
105 inline void __attribute__((always_inline)) Save(uint8_t *out, __m128i s) {
106  StoreInteger128Unaligned(
107  out, _mm_shuffle_epi8(s, _mm_load_si128((const __m128i *)MASK)));
108 }
109 } // namespace
110 
111 namespace sha256_shani {
112 void Transform(uint32_t *s, const uint8_t *chunk, size_t blocks) {
113  __m128i m0, m1, m2, m3, s0, s1, so0, so1;
114 
115  /* Load state */
116  s0 = LoadInteger128Unaligned(s);
117  s1 = LoadInteger128Unaligned(s + 4);
118  Shuffle(s0, s1);
119 
120  while (blocks--) {
121  /* Remember old state */
122  so0 = s0;
123  so1 = s1;
124 
125  /* Load data and transform */
126  m0 = Load(chunk);
127  QuadRound(s0, s1, m0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull);
128  m1 = Load(chunk + 16);
129  QuadRound(s0, s1, m1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
130  ShiftMessageA(m0, m1);
131  m2 = Load(chunk + 32);
132  QuadRound(s0, s1, m2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull);
133  ShiftMessageA(m1, m2);
134  m3 = Load(chunk + 48);
135  QuadRound(s0, s1, m3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull);
136  ShiftMessageB(m2, m3, m0);
137  QuadRound(s0, s1, m0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull);
138  ShiftMessageB(m3, m0, m1);
139  QuadRound(s0, s1, m1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full);
140  ShiftMessageB(m0, m1, m2);
141  QuadRound(s0, s1, m2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull);
142  ShiftMessageB(m1, m2, m3);
143  QuadRound(s0, s1, m3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull);
144  ShiftMessageB(m2, m3, m0);
145  QuadRound(s0, s1, m0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull);
146  ShiftMessageB(m3, m0, m1);
147  QuadRound(s0, s1, m1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull);
148  ShiftMessageB(m0, m1, m2);
149  QuadRound(s0, s1, m2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull);
150  ShiftMessageB(m1, m2, m3);
151  QuadRound(s0, s1, m3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull);
152  ShiftMessageB(m2, m3, m0);
153  QuadRound(s0, s1, m0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull);
154  ShiftMessageB(m3, m0, m1);
155  QuadRound(s0, s1, m1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull);
156  ShiftMessageC(m0, m1, m2);
157  QuadRound(s0, s1, m2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull);
158  ShiftMessageC(m1, m2, m3);
159  QuadRound(s0, s1, m3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull);
160 
161  /* Combine with old state */
162  s0 = _mm_add_epi32(s0, so0);
163  s1 = _mm_add_epi32(s1, so1);
164 
165  /* Advance */
166  chunk += 64;
167  }
168 
169  Unshuffle(s0, s1);
170  StoreInteger128Unaligned(s, s0);
171  StoreInteger128Unaligned(s + 4, s1);
172 }
173 } // namespace sha256_shani
174 
175 namespace sha256d64_shani {
176 
177 void Transform_2way(uint8_t *out, const uint8_t *in) {
178  __m128i am0, am1, am2, am3, as0, as1, aso0, aso1;
179  __m128i bm0, bm1, bm2, bm3, bs0, bs1, bso0, bso1;
180 
181  /* Transform 1 */
182  bs0 = as0 = _mm_load_si128((const __m128i *)INIT0);
183  bs1 = as1 = _mm_load_si128((const __m128i *)INIT1);
184  am0 = Load(in);
185  bm0 = Load(in + 64);
186  QuadRound(as0, as1, am0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull);
187  QuadRound(bs0, bs1, bm0, 0xe9b5dba5b5c0fbcfull, 0x71374491428a2f98ull);
188  am1 = Load(in + 16);
189  bm1 = Load(in + 80);
190  QuadRound(as0, as1, am1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
191  QuadRound(bs0, bs1, bm1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
192  ShiftMessageA(am0, am1);
193  ShiftMessageA(bm0, bm1);
194  am2 = Load(in + 32);
195  bm2 = Load(in + 96);
196  QuadRound(as0, as1, am2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull);
197  QuadRound(bs0, bs1, bm2, 0x550c7dc3243185beull, 0x12835b01d807aa98ull);
198  ShiftMessageA(am1, am2);
199  ShiftMessageA(bm1, bm2);
200  am3 = Load(in + 48);
201  bm3 = Load(in + 112);
202  QuadRound(as0, as1, am3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull);
203  QuadRound(bs0, bs1, bm3, 0xc19bf1749bdc06a7ull, 0x80deb1fe72be5d74ull);
204  ShiftMessageB(am2, am3, am0);
205  ShiftMessageB(bm2, bm3, bm0);
206  QuadRound(as0, as1, am0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull);
207  QuadRound(bs0, bs1, bm0, 0x240ca1cc0fc19dc6ull, 0xefbe4786E49b69c1ull);
208  ShiftMessageB(am3, am0, am1);
209  ShiftMessageB(bm3, bm0, bm1);
210  QuadRound(as0, as1, am1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full);
211  QuadRound(bs0, bs1, bm1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full);
212  ShiftMessageB(am0, am1, am2);
213  ShiftMessageB(bm0, bm1, bm2);
214  QuadRound(as0, as1, am2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull);
215  QuadRound(bs0, bs1, bm2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull);
216  ShiftMessageB(am1, am2, am3);
217  ShiftMessageB(bm1, bm2, bm3);
218  QuadRound(as0, as1, am3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull);
219  QuadRound(bs0, bs1, bm3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull);
220  ShiftMessageB(am2, am3, am0);
221  ShiftMessageB(bm2, bm3, bm0);
222  QuadRound(as0, as1, am0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull);
223  QuadRound(bs0, bs1, bm0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull);
224  ShiftMessageB(am3, am0, am1);
225  ShiftMessageB(bm3, bm0, bm1);
226  QuadRound(as0, as1, am1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull);
227  QuadRound(bs0, bs1, bm1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull);
228  ShiftMessageB(am0, am1, am2);
229  ShiftMessageB(bm0, bm1, bm2);
230  QuadRound(as0, as1, am2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull);
231  QuadRound(bs0, bs1, bm2, 0xc76c51A3c24b8b70ull, 0xa81a664ba2bfe8a1ull);
232  ShiftMessageB(am1, am2, am3);
233  ShiftMessageB(bm1, bm2, bm3);
234  QuadRound(as0, as1, am3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull);
235  QuadRound(bs0, bs1, bm3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull);
236  ShiftMessageB(am2, am3, am0);
237  ShiftMessageB(bm2, bm3, bm0);
238  QuadRound(as0, as1, am0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull);
239  QuadRound(bs0, bs1, bm0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull);
240  ShiftMessageB(am3, am0, am1);
241  ShiftMessageB(bm3, bm0, bm1);
242  QuadRound(as0, as1, am1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull);
243  QuadRound(bs0, bs1, bm1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull);
244  ShiftMessageC(am0, am1, am2);
245  ShiftMessageC(bm0, bm1, bm2);
246  QuadRound(as0, as1, am2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull);
247  QuadRound(bs0, bs1, bm2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull);
248  ShiftMessageC(am1, am2, am3);
249  ShiftMessageC(bm1, bm2, bm3);
250  QuadRound(as0, as1, am3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull);
251  QuadRound(bs0, bs1, bm3, 0xc67178f2bef9A3f7ull, 0xa4506ceb90befffaull);
252  as0 = _mm_add_epi32(as0, _mm_load_si128((const __m128i *)INIT0));
253  bs0 = _mm_add_epi32(bs0, _mm_load_si128((const __m128i *)INIT0));
254  as1 = _mm_add_epi32(as1, _mm_load_si128((const __m128i *)INIT1));
255  bs1 = _mm_add_epi32(bs1, _mm_load_si128((const __m128i *)INIT1));
256 
257  /* Transform 2 */
258  aso0 = as0;
259  bso0 = bs0;
260  aso1 = as1;
261  bso1 = bs1;
262  QuadRound(as0, as1, 0xe9b5dba5b5c0fbcfull, 0x71374491c28a2f98ull);
263  QuadRound(bs0, bs1, 0xe9b5dba5b5c0fbcfull, 0x71374491c28a2f98ull);
264  QuadRound(as0, as1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
265  QuadRound(bs0, bs1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
266  QuadRound(as0, as1, 0x550c7dc3243185beull, 0x12835b01d807aa98ull);
267  QuadRound(bs0, bs1, 0x550c7dc3243185beull, 0x12835b01d807aa98ull);
268  QuadRound(as0, as1, 0xc19bf3749bdc06a7ull, 0x80deb1fe72be5d74ull);
269  QuadRound(bs0, bs1, 0xc19bf3749bdc06a7ull, 0x80deb1fe72be5d74ull);
270  QuadRound(as0, as1, 0x240cf2540fe1edc6ull, 0xf0fe4786649b69c1ull);
271  QuadRound(bs0, bs1, 0x240cf2540fe1edc6ull, 0xf0fe4786649b69c1ull);
272  QuadRound(as0, as1, 0x16f988fa61b9411eull, 0x6cc984be4fe9346full);
273  QuadRound(bs0, bs1, 0x16f988fa61b9411eull, 0x6cc984be4fe9346full);
274  QuadRound(as0, as1, 0xb9d99ec7b019fc65ull, 0xa88e5a6df2c65152ull);
275  QuadRound(bs0, bs1, 0xb9d99ec7b019fc65ull, 0xa88e5a6df2c65152ull);
276  QuadRound(as0, as1, 0xc7353eb0fdb1232bull, 0xe70eeaa09a1231c3ull);
277  QuadRound(bs0, bs1, 0xc7353eb0fdb1232bull, 0xe70eeaa09a1231c3ull);
278  QuadRound(as0, as1, 0xdc1eeefd5a0f118full, 0xcb976d5f3069bad5ull);
279  QuadRound(bs0, bs1, 0xdc1eeefd5a0f118full, 0xcb976d5f3069bad5ull);
280  QuadRound(as0, as1, 0xe15d5b1658f4ca9dull, 0xde0b7a040a35b689ull);
281  QuadRound(bs0, bs1, 0xe15d5b1658f4ca9dull, 0xde0b7a040a35b689ull);
282  QuadRound(as0, as1, 0x6fab9537a507ea32ull, 0x37088980007f3e86ull);
283  QuadRound(bs0, bs1, 0x6fab9537a507ea32ull, 0x37088980007f3e86ull);
284  QuadRound(as0, as1, 0xc0bbbe37cdaa3b6dull, 0x0d8cd6f117406110ull);
285  QuadRound(bs0, bs1, 0xc0bbbe37cdaa3b6dull, 0x0d8cd6f117406110ull);
286  QuadRound(as0, as1, 0x6fd15ca70b02e931ull, 0xdb48a36383613bdaull);
287  QuadRound(bs0, bs1, 0x6fd15ca70b02e931ull, 0xdb48a36383613bdaull);
288  QuadRound(as0, as1, 0x6d4378906ed41a95ull, 0x31338431521afacaull);
289  QuadRound(bs0, bs1, 0x6d4378906ed41a95ull, 0x31338431521afacaull);
290  QuadRound(as0, as1, 0x532fb63cb5c9a0e6ull, 0x9eccabbdc39c91f2ull);
291  QuadRound(bs0, bs1, 0x532fb63cb5c9a0e6ull, 0x9eccabbdc39c91f2ull);
292  QuadRound(as0, as1, 0x4c191d76a4954b68ull, 0x07237ea3d2c741c6ull);
293  QuadRound(bs0, bs1, 0x4c191d76a4954b68ull, 0x07237ea3d2c741c6ull);
294  as0 = _mm_add_epi32(as0, aso0);
295  bs0 = _mm_add_epi32(bs0, bso0);
296  as1 = _mm_add_epi32(as1, aso1);
297  bs1 = _mm_add_epi32(bs1, bso1);
298 
299  /* Extract hash */
300  Unshuffle(as0, as1);
301  Unshuffle(bs0, bs1);
302  am0 = as0;
303  bm0 = bs0;
304  am1 = as1;
305  bm1 = bs1;
306 
307  /* Transform 3 */
308  bs0 = as0 = _mm_load_si128((const __m128i *)INIT0);
309  bs1 = as1 = _mm_load_si128((const __m128i *)INIT1);
310  QuadRound(as0, as1, am0, 0xe9b5dba5B5c0fbcfull, 0x71374491428a2f98ull);
311  QuadRound(bs0, bs1, bm0, 0xe9b5dba5B5c0fbcfull, 0x71374491428a2f98ull);
312  QuadRound(as0, as1, am1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
313  QuadRound(bs0, bs1, bm1, 0xab1c5ed5923f82a4ull, 0x59f111f13956c25bull);
314  ShiftMessageA(am0, am1);
315  ShiftMessageA(bm0, bm1);
316  bm2 = am2 = _mm_set_epi64x(0x0ull, 0x80000000ull);
317  QuadRound(as0, as1, 0x550c7dc3243185beull, 0x12835b015807aa98ull);
318  QuadRound(bs0, bs1, 0x550c7dc3243185beull, 0x12835b015807aa98ull);
319  ShiftMessageA(am1, am2);
320  ShiftMessageA(bm1, bm2);
321  bm3 = am3 = _mm_set_epi64x(0x10000000000ull, 0x0ull);
322  QuadRound(as0, as1, 0xc19bf2749bdc06a7ull, 0x80deb1fe72be5d74ull);
323  QuadRound(bs0, bs1, 0xc19bf2749bdc06a7ull, 0x80deb1fe72be5d74ull);
324  ShiftMessageB(am2, am3, am0);
325  ShiftMessageB(bm2, bm3, bm0);
326  QuadRound(as0, as1, am0, 0x240ca1cc0fc19dc6ull, 0xefbe4786e49b69c1ull);
327  QuadRound(bs0, bs1, bm0, 0x240ca1cc0fc19dc6ull, 0xefbe4786e49b69c1ull);
328  ShiftMessageB(am3, am0, am1);
329  ShiftMessageB(bm3, bm0, bm1);
330  QuadRound(as0, as1, am1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full);
331  QuadRound(bs0, bs1, bm1, 0x76f988da5cb0a9dcull, 0x4a7484aa2de92c6full);
332  ShiftMessageB(am0, am1, am2);
333  ShiftMessageB(bm0, bm1, bm2);
334  QuadRound(as0, as1, am2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull);
335  QuadRound(bs0, bs1, bm2, 0xbf597fc7b00327c8ull, 0xa831c66d983e5152ull);
336  ShiftMessageB(am1, am2, am3);
337  ShiftMessageB(bm1, bm2, bm3);
338  QuadRound(as0, as1, am3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull);
339  QuadRound(bs0, bs1, bm3, 0x1429296706ca6351ull, 0xd5a79147c6e00bf3ull);
340  ShiftMessageB(am2, am3, am0);
341  ShiftMessageB(bm2, bm3, bm0);
342  QuadRound(as0, as1, am0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull);
343  QuadRound(bs0, bs1, bm0, 0x53380d134d2c6dfcull, 0x2e1b213827b70a85ull);
344  ShiftMessageB(am3, am0, am1);
345  ShiftMessageB(bm3, bm0, bm1);
346  QuadRound(as0, as1, am1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull);
347  QuadRound(bs0, bs1, bm1, 0x92722c8581c2c92eull, 0x766a0abb650a7354ull);
348  ShiftMessageB(am0, am1, am2);
349  ShiftMessageB(bm0, bm1, bm2);
350  QuadRound(as0, as1, am2, 0xc76c51a3c24b8b70ull, 0xa81a664ba2bfe8A1ull);
351  QuadRound(bs0, bs1, bm2, 0xc76c51a3c24b8b70ull, 0xa81a664ba2bfe8A1ull);
352  ShiftMessageB(am1, am2, am3);
353  ShiftMessageB(bm1, bm2, bm3);
354  QuadRound(as0, as1, am3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull);
355  QuadRound(bs0, bs1, bm3, 0x106aa070f40e3585ull, 0xd6990624d192e819ull);
356  ShiftMessageB(am2, am3, am0);
357  ShiftMessageB(bm2, bm3, bm0);
358  QuadRound(as0, as1, am0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull);
359  QuadRound(bs0, bs1, bm0, 0x34b0bcb52748774cull, 0x1e376c0819a4c116ull);
360  ShiftMessageB(am3, am0, am1);
361  ShiftMessageB(bm3, bm0, bm1);
362  QuadRound(as0, as1, am1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull);
363  QuadRound(bs0, bs1, bm1, 0x682e6ff35b9cca4full, 0x4ed8aa4a391c0cb3ull);
364  ShiftMessageC(am0, am1, am2);
365  ShiftMessageC(bm0, bm1, bm2);
366  QuadRound(as0, as1, am2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull);
367  QuadRound(bs0, bs1, bm2, 0x8cc7020884c87814ull, 0x78a5636f748f82eeull);
368  ShiftMessageC(am1, am2, am3);
369  ShiftMessageC(bm1, bm2, bm3);
370  QuadRound(as0, as1, am3, 0xc67178f2bef9a3f7ull, 0xa4506ceb90befffaull);
371  QuadRound(bs0, bs1, bm3, 0xc67178f2bef9a3f7ull, 0xa4506ceb90befffaull);
372  as0 = _mm_add_epi32(as0, _mm_load_si128((const __m128i *)INIT0));
373  bs0 = _mm_add_epi32(bs0, _mm_load_si128((const __m128i *)INIT0));
374  as1 = _mm_add_epi32(as1, _mm_load_si128((const __m128i *)INIT1));
375  bs1 = _mm_add_epi32(bs1, _mm_load_si128((const __m128i *)INIT1));
376 
377  /* Extract hash into out */
378  Unshuffle(as0, as1);
379  Unshuffle(bs0, bs1);
380  Save(out, as0);
381  Save(out + 16, as1);
382  Save(out + 32, bs0);
383  Save(out + 48, bs1);
384 }
385 } // namespace sha256d64_shani
386 
387 #endif
static const uint8_t k1[32]
void Transform(uint32_t *s, const uint8_t *chunk, size_t blocks)
void Transform_2way(uint8_t *out, const uint8_t *in)
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
Definition: random.h:291