ring/aead/
gcm.rs

1// Copyright 2018 Brian Smith.
2//
3// Permission to use, copy, modify, and/or distribute this software for any
4// purpose with or without fee is hereby granted, provided that the above
5// copyright notice and this permission notice appear in all copies.
6//
7// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
8// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
10// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
15use super::{Aad, Block, BLOCK_LEN};
16use crate::cpu;
17
18#[cfg(not(target_arch = "aarch64"))]
19mod gcm_nohw;
20
21pub struct Key(HTable);
22
23impl Key {
24    pub(super) fn new(h_be: Block, cpu_features: cpu::Features) -> Self {
25        let h = h_be.u64s_be_to_native();
26
27        let mut key = Self(HTable {
28            Htable: [u128 { hi: 0, lo: 0 }; HTABLE_LEN],
29        });
30        let h_table = &mut key.0;
31
32        match detect_implementation(cpu_features) {
33            #[cfg(target_arch = "x86_64")]
34            Implementation::CLMUL if has_avx_movbe(cpu_features) => {
35                extern "C" {
36                    fn GFp_gcm_init_avx(HTable: &mut HTable, h: &[u64; 2]);
37                }
38                unsafe {
39                    GFp_gcm_init_avx(h_table, &h);
40                }
41            }
42
43            #[cfg(any(
44                target_arch = "aarch64",
45                target_arch = "arm",
46                target_arch = "x86_64",
47                target_arch = "x86"
48            ))]
49            Implementation::CLMUL => {
50                extern "C" {
51                    fn GFp_gcm_init_clmul(Htable: &mut HTable, h: &[u64; 2]);
52                }
53                unsafe {
54                    GFp_gcm_init_clmul(h_table, &h);
55                }
56            }
57
58            #[cfg(any(target_arch = "aarch64", target_arch = "arm"))]
59            Implementation::NEON => {
60                extern "C" {
61                    fn GFp_gcm_init_neon(Htable: &mut HTable, h: &[u64; 2]);
62                }
63                unsafe {
64                    GFp_gcm_init_neon(h_table, &h);
65                }
66            }
67
68            #[cfg(not(target_arch = "aarch64"))]
69            Implementation::Fallback => {
70                h_table.Htable[0] = gcm_nohw::init(h);
71            }
72        }
73
74        key
75    }
76}
77
78pub struct Context {
79    inner: ContextInner,
80    cpu_features: cpu::Features,
81}
82
83impl Context {
84    pub(crate) fn new(key: &Key, aad: Aad<&[u8]>, cpu_features: cpu::Features) -> Self {
85        let mut ctx = Context {
86            inner: ContextInner {
87                Xi: Xi(Block::zero()),
88                _unused: Block::zero(),
89                Htable: key.0.clone(),
90            },
91            cpu_features,
92        };
93
94        for ad in aad.0.chunks(BLOCK_LEN) {
95            let mut block = Block::zero();
96            block.overwrite_part_at(0, ad);
97            ctx.update_block(block);
98        }
99
100        ctx
101    }
102
103    /// Access to `inner` for the integrated AES-GCM implementations only.
104    #[cfg(target_arch = "x86_64")]
105    #[inline]
106    pub(super) fn inner(&mut self) -> &mut ContextInner {
107        &mut self.inner
108    }
109
110    pub fn update_blocks(&mut self, input: &[u8]) {
111        debug_assert!(input.len() > 0);
112        debug_assert_eq!(input.len() % BLOCK_LEN, 0);
113
114        // Although these functions take `Xi` and `h_table` as separate
115        // parameters, one or more of them might assume that they are part of
116        // the same `ContextInner` structure.
117        let xi = &mut self.inner.Xi;
118        let h_table = &self.inner.Htable;
119
120        match detect_implementation(self.cpu_features) {
121            #[cfg(target_arch = "x86_64")]
122            Implementation::CLMUL if has_avx_movbe(self.cpu_features) => {
123                extern "C" {
124                    fn GFp_gcm_ghash_avx(
125                        xi: &mut Xi,
126                        Htable: &HTable,
127                        inp: *const u8,
128                        len: crate::c::size_t,
129                    );
130                }
131                unsafe {
132                    GFp_gcm_ghash_avx(xi, h_table, input.as_ptr(), input.len());
133                }
134            }
135
136            #[cfg(any(
137                target_arch = "aarch64",
138                target_arch = "arm",
139                target_arch = "x86_64",
140                target_arch = "x86"
141            ))]
142            Implementation::CLMUL => {
143                extern "C" {
144                    fn GFp_gcm_ghash_clmul(
145                        xi: &mut Xi,
146                        Htable: &HTable,
147                        inp: *const u8,
148                        len: crate::c::size_t,
149                    );
150                }
151                unsafe {
152                    GFp_gcm_ghash_clmul(xi, h_table, input.as_ptr(), input.len());
153                }
154            }
155
156            #[cfg(any(target_arch = "aarch64", target_arch = "arm"))]
157            Implementation::NEON => {
158                extern "C" {
159                    fn GFp_gcm_ghash_neon(
160                        xi: &mut Xi,
161                        Htable: &HTable,
162                        inp: *const u8,
163                        len: crate::c::size_t,
164                    );
165                }
166                unsafe {
167                    GFp_gcm_ghash_neon(xi, h_table, input.as_ptr(), input.len());
168                }
169            }
170
171            #[cfg(not(target_arch = "aarch64"))]
172            Implementation::Fallback => {
173                gcm_nohw::ghash(xi, h_table.Htable[0], input);
174            }
175        }
176    }
177
178    pub fn update_block(&mut self, a: Block) {
179        self.inner.Xi.bitxor_assign(a);
180
181        // Although these functions take `Xi` and `h_table` as separate
182        // parameters, one or more of them might assume that they are part of
183        // the same `ContextInner` structure.
184        let xi = &mut self.inner.Xi;
185        let h_table = &self.inner.Htable;
186
187        match detect_implementation(self.cpu_features) {
188            #[cfg(any(
189                target_arch = "aarch64",
190                target_arch = "arm",
191                target_arch = "x86_64",
192                target_arch = "x86"
193            ))]
194            Implementation::CLMUL => {
195                extern "C" {
196                    fn GFp_gcm_gmult_clmul(xi: &mut Xi, Htable: &HTable);
197                }
198                unsafe {
199                    GFp_gcm_gmult_clmul(xi, h_table);
200                }
201            }
202
203            #[cfg(any(target_arch = "aarch64", target_arch = "arm"))]
204            Implementation::NEON => {
205                extern "C" {
206                    fn GFp_gcm_gmult_neon(xi: &mut Xi, Htable: &HTable);
207                }
208                unsafe {
209                    GFp_gcm_gmult_neon(xi, h_table);
210                }
211            }
212
213            #[cfg(not(target_arch = "aarch64"))]
214            Implementation::Fallback => {
215                gcm_nohw::gmult(xi, h_table.Htable[0]);
216            }
217        }
218    }
219
220    pub(super) fn pre_finish<F>(self, f: F) -> super::Tag
221    where
222        F: FnOnce(Xi) -> super::Tag,
223    {
224        f(self.inner.Xi)
225    }
226
227    #[cfg(target_arch = "x86_64")]
228    pub(super) fn is_avx2(&self, cpu_features: cpu::Features) -> bool {
229        match detect_implementation(cpu_features) {
230            Implementation::CLMUL => has_avx_movbe(self.cpu_features),
231            _ => false,
232        }
233    }
234}
235
236// The alignment is required by non-Rust code that uses `GCM128_CONTEXT`.
237#[derive(Clone)]
238#[repr(C, align(16))]
239struct HTable {
240    Htable: [u128; HTABLE_LEN],
241}
242
243#[derive(Clone, Copy)]
244#[repr(C)]
245struct u128 {
246    hi: u64,
247    lo: u64,
248}
249
250const HTABLE_LEN: usize = 16;
251
252#[repr(transparent)]
253pub struct Xi(Block);
254
255impl Xi {
256    #[inline]
257    fn bitxor_assign(&mut self, a: Block) {
258        self.0.bitxor_assign(a)
259    }
260}
261
262impl From<Xi> for Block {
263    #[inline]
264    fn from(Xi(block): Xi) -> Self {
265        block
266    }
267}
268
269// This corresponds roughly to the `GCM128_CONTEXT` structure in BoringSSL.
270// Some assembly language code, in particular the MOVEBE+AVX2 X86-64
271// implementation, requires this exact layout.
272#[repr(C, align(16))]
273pub(super) struct ContextInner {
274    Xi: Xi,
275    _unused: Block,
276    Htable: HTable,
277}
278
279enum Implementation {
280    #[cfg(any(
281        target_arch = "aarch64",
282        target_arch = "arm",
283        target_arch = "x86_64",
284        target_arch = "x86"
285    ))]
286    CLMUL,
287
288    #[cfg(any(target_arch = "aarch64", target_arch = "arm"))]
289    NEON,
290
291    #[cfg(not(target_arch = "aarch64"))]
292    Fallback,
293}
294
295#[inline]
296fn detect_implementation(cpu_features: cpu::Features) -> Implementation {
297    // `cpu_features` is only used for specific platforms.
298    #[cfg(not(any(
299        target_arch = "aarch64",
300        target_arch = "arm",
301        target_arch = "x86_64",
302        target_arch = "x86"
303    )))]
304    let _cpu_features = cpu_features;
305
306    #[cfg(any(
307        target_arch = "aarch64",
308        target_arch = "arm",
309        target_arch = "x86_64",
310        target_arch = "x86"
311    ))]
312    {
313        if (cpu::intel::FXSR.available(cpu_features)
314            && cpu::intel::PCLMULQDQ.available(cpu_features))
315            || cpu::arm::PMULL.available(cpu_features)
316        {
317            return Implementation::CLMUL;
318        }
319    }
320
321    #[cfg(target_arch = "arm")]
322    {
323        if cpu::arm::NEON.available(cpu_features) {
324            return Implementation::NEON;
325        }
326    }
327
328    #[cfg(target_arch = "aarch64")]
329    {
330        return Implementation::NEON;
331    }
332
333    #[cfg(not(target_arch = "aarch64"))]
334    Implementation::Fallback
335}
336
337#[cfg(target_arch = "x86_64")]
338fn has_avx_movbe(cpu_features: cpu::Features) -> bool {
339    cpu::intel::AVX.available(cpu_features) && cpu::intel::MOVBE.available(cpu_features)
340}