ring/aead/
aes_gcm.rs

1// Copyright 2015-2016 Brian Smith.
2//
3// Permission to use, copy, modify, and/or distribute this software for any
4// purpose with or without fee is hereby granted, provided that the above
5// copyright notice and this permission notice appear in all copies.
6//
7// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
8// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
10// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
15use super::{
16    aes::{self, Counter},
17    gcm, shift, Aad, Block, Direction, Nonce, Tag, BLOCK_LEN,
18};
19use crate::{aead, cpu, endian::*, error, polyfill};
20
21/// AES-128 in GCM mode with 128-bit tags and 96 bit nonces.
22pub static AES_128_GCM: aead::Algorithm = aead::Algorithm {
23    key_len: 16,
24    init: init_128,
25    seal: aes_gcm_seal,
26    open: aes_gcm_open,
27    id: aead::AlgorithmID::AES_128_GCM,
28    max_input_len: AES_GCM_MAX_INPUT_LEN,
29};
30
31/// AES-256 in GCM mode with 128-bit tags and 96 bit nonces.
32pub static AES_256_GCM: aead::Algorithm = aead::Algorithm {
33    key_len: 32,
34    init: init_256,
35    seal: aes_gcm_seal,
36    open: aes_gcm_open,
37    id: aead::AlgorithmID::AES_256_GCM,
38    max_input_len: AES_GCM_MAX_INPUT_LEN,
39};
40
41pub struct Key {
42    gcm_key: gcm::Key, // First because it has a large alignment requirement.
43    aes_key: aes::Key,
44}
45
46fn init_128(key: &[u8], cpu_features: cpu::Features) -> Result<aead::KeyInner, error::Unspecified> {
47    init(key, aes::Variant::AES_128, cpu_features)
48}
49
50fn init_256(key: &[u8], cpu_features: cpu::Features) -> Result<aead::KeyInner, error::Unspecified> {
51    init(key, aes::Variant::AES_256, cpu_features)
52}
53
54fn init(
55    key: &[u8],
56    variant: aes::Variant,
57    cpu_features: cpu::Features,
58) -> Result<aead::KeyInner, error::Unspecified> {
59    let aes_key = aes::Key::new(key, variant, cpu_features)?;
60    let gcm_key = gcm::Key::new(aes_key.encrypt_block(Block::zero()), cpu_features);
61    Ok(aead::KeyInner::AesGcm(Key { aes_key, gcm_key }))
62}
63
64const CHUNK_BLOCKS: usize = 3 * 1024 / 16;
65
66fn aes_gcm_seal(
67    key: &aead::KeyInner,
68    nonce: Nonce,
69    aad: Aad<&[u8]>,
70    in_out: &mut [u8],
71    cpu_features: cpu::Features,
72) -> Tag {
73    aead(key, nonce, aad, in_out, Direction::Sealing, cpu_features)
74}
75
76fn aes_gcm_open(
77    key: &aead::KeyInner,
78    nonce: Nonce,
79    aad: Aad<&[u8]>,
80    in_prefix_len: usize,
81    in_out: &mut [u8],
82    cpu_features: cpu::Features,
83) -> Tag {
84    aead(
85        key,
86        nonce,
87        aad,
88        in_out,
89        Direction::Opening { in_prefix_len },
90        cpu_features,
91    )
92}
93
94#[inline(always)] // Avoid branching on `direction`.
95fn aead(
96    key: &aead::KeyInner,
97    nonce: Nonce,
98    aad: Aad<&[u8]>,
99    in_out: &mut [u8],
100    direction: Direction,
101    cpu_features: cpu::Features,
102) -> Tag {
103    let Key { aes_key, gcm_key } = match key {
104        aead::KeyInner::AesGcm(key) => key,
105        _ => unreachable!(),
106    };
107
108    let mut ctr = Counter::one(nonce);
109    let tag_iv = ctr.increment();
110
111    let aad_len = aad.0.len();
112    let mut gcm_ctx = gcm::Context::new(gcm_key, aad, cpu_features);
113
114    let in_prefix_len = match direction {
115        Direction::Opening { in_prefix_len } => in_prefix_len,
116        Direction::Sealing => 0,
117    };
118
119    let total_in_out_len = in_out.len() - in_prefix_len;
120
121    let in_out = integrated_aes_gcm(
122        aes_key,
123        &mut gcm_ctx,
124        in_out,
125        &mut ctr,
126        direction,
127        cpu_features,
128    );
129    let in_out_len = in_out.len() - in_prefix_len;
130
131    // Process any (remaining) whole blocks.
132    let whole_len = in_out_len - (in_out_len % BLOCK_LEN);
133    {
134        let mut chunk_len = CHUNK_BLOCKS * BLOCK_LEN;
135        let mut output = 0;
136        let mut input = in_prefix_len;
137        loop {
138            if whole_len - output < chunk_len {
139                chunk_len = whole_len - output;
140            }
141            if chunk_len == 0 {
142                break;
143            }
144
145            if let Direction::Opening { .. } = direction {
146                gcm_ctx.update_blocks(&in_out[input..][..chunk_len]);
147            }
148
149            aes_key.ctr32_encrypt_blocks(
150                &mut in_out[output..][..(chunk_len + in_prefix_len)],
151                direction,
152                &mut ctr,
153            );
154
155            if let Direction::Sealing = direction {
156                gcm_ctx.update_blocks(&in_out[output..][..chunk_len]);
157            }
158
159            output += chunk_len;
160            input += chunk_len;
161        }
162    }
163
164    // Process any remaining partial block.
165    let remainder = &mut in_out[whole_len..];
166    shift::shift_partial((in_prefix_len, remainder), |remainder| {
167        let mut input = Block::zero();
168        input.overwrite_part_at(0, remainder);
169        if let Direction::Opening { .. } = direction {
170            gcm_ctx.update_block(input);
171        }
172        let mut output = aes_key.encrypt_iv_xor_block(ctr.into(), input);
173        if let Direction::Sealing = direction {
174            output.zero_from(remainder.len());
175            gcm_ctx.update_block(output);
176        }
177        output
178    });
179
180    // Authenticate the final block containing the input lengths.
181    let aad_bits = polyfill::u64_from_usize(aad_len) << 3;
182    let ciphertext_bits = polyfill::u64_from_usize(total_in_out_len) << 3;
183    gcm_ctx.update_block(Block::from_u64_be(
184        BigEndian::from(aad_bits),
185        BigEndian::from(ciphertext_bits),
186    ));
187
188    // Finalize the tag and return it.
189    gcm_ctx.pre_finish(|pre_tag| {
190        let bytes = tag_iv.into_bytes_less_safe();
191        let mut tag = aes_key.encrypt_block(Block::from(&bytes));
192        tag.bitxor_assign(pre_tag.into());
193        Tag(*tag.as_ref())
194    })
195}
196
197// Returns the data that wasn't processed.
198#[cfg(target_arch = "x86_64")]
199#[inline] // Optimize out the match on `direction`.
200fn integrated_aes_gcm<'a>(
201    aes_key: &aes::Key,
202    gcm_ctx: &mut gcm::Context,
203    in_out: &'a mut [u8],
204    ctr: &mut Counter,
205    direction: Direction,
206    cpu_features: cpu::Features,
207) -> &'a mut [u8] {
208    use crate::c;
209
210    if !aes_key.is_aes_hw() || !gcm_ctx.is_avx2(cpu_features) {
211        return in_out;
212    }
213
214    let processed = match direction {
215        Direction::Opening { in_prefix_len } => {
216            extern "C" {
217                fn GFp_aesni_gcm_decrypt(
218                    input: *const u8,
219                    output: *mut u8,
220                    len: c::size_t,
221                    key: &aes::AES_KEY,
222                    ivec: &mut Counter,
223                    gcm: &mut gcm::ContextInner,
224                ) -> c::size_t;
225            }
226            unsafe {
227                GFp_aesni_gcm_decrypt(
228                    in_out[in_prefix_len..].as_ptr(),
229                    in_out.as_mut_ptr(),
230                    in_out.len() - in_prefix_len,
231                    aes_key.inner_less_safe(),
232                    ctr,
233                    gcm_ctx.inner(),
234                )
235            }
236        }
237        Direction::Sealing => {
238            extern "C" {
239                fn GFp_aesni_gcm_encrypt(
240                    input: *const u8,
241                    output: *mut u8,
242                    len: c::size_t,
243                    key: &aes::AES_KEY,
244                    ivec: &mut Counter,
245                    gcm: &mut gcm::ContextInner,
246                ) -> c::size_t;
247            }
248            unsafe {
249                GFp_aesni_gcm_encrypt(
250                    in_out.as_ptr(),
251                    in_out.as_mut_ptr(),
252                    in_out.len(),
253                    aes_key.inner_less_safe(),
254                    ctr,
255                    gcm_ctx.inner(),
256                )
257            }
258        }
259    };
260
261    &mut in_out[processed..]
262}
263
264#[cfg(not(target_arch = "x86_64"))]
265#[inline]
266fn integrated_aes_gcm<'a>(
267    _: &aes::Key,
268    _: &mut gcm::Context,
269    in_out: &'a mut [u8],
270    _: &mut Counter,
271    _: Direction,
272    _: cpu::Features,
273) -> &'a mut [u8] {
274    in_out // This doesn't process any of the input so it all remains.
275}
276
277const AES_GCM_MAX_INPUT_LEN: u64 = super::max_input_len(BLOCK_LEN, 2);
278
279#[cfg(test)]
280mod tests {
281    #[test]
282    fn max_input_len_test() {
283        // [NIST SP800-38D] Section 5.2.1.1. Note that [RFC 5116 Section 5.1] and
284        // [RFC 5116 Section 5.2] have an off-by-one error in `P_MAX`.
285        //
286        // [NIST SP800-38D]:
287        //    http://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
288        // [RFC 5116 Section 5.1]: https://tools.ietf.org/html/rfc5116#section-5.1
289        // [RFC 5116 Section 5.2]: https://tools.ietf.org/html/rfc5116#section-5.2
290        const NIST_SP800_38D_MAX_BITS: u64 = (1u64 << 39) - 256;
291        assert_eq!(NIST_SP800_38D_MAX_BITS, 549_755_813_632u64);
292        assert_eq!(
293            super::AES_128_GCM.max_input_len * 8,
294            NIST_SP800_38D_MAX_BITS
295        );
296        assert_eq!(
297            super::AES_256_GCM.max_input_len * 8,
298            NIST_SP800_38D_MAX_BITS
299        );
300    }
301}