Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /* QUIC kernel implementation
3 : * (C) Copyright Red Hat Corp. 2023
4 : *
5 : * This file is part of the QUIC kernel implementation
6 : *
7 : * Initialization/cleanup for QUIC protocol support.
8 : *
9 : * Written or modified by:
10 : * Xin Long <lucien.xin@gmail.com>
11 : */
12 :
13 : #include <crypto/skcipher.h>
14 : #include <linux/skbuff.h>
15 : #include <crypto/aead.h>
16 : #include <crypto/hkdf.h>
17 : #include <linux/quic.h>
18 : #include <net/tls.h>
19 :
20 : #include "common.h"
21 : #include "crypto.h"
22 :
23 : #define QUIC_RANDOM_DATA_LEN 32
24 :
25 : static u8 quic_random_data[QUIC_RANDOM_DATA_LEN] __read_mostly;
26 :
27 : /* HKDF-Extract. */
28 8070 : static int quic_crypto_hkdf_extract(struct crypto_shash *tfm, struct quic_data *srt,
29 : struct quic_data *hash, struct quic_data *key)
30 : {
31 8070 : return hkdf_extract(tfm, hash->data, hash->len, srt->data, srt->len, key->data);
32 : }
33 :
34 : #define QUIC_MAX_INFO_LEN 256
35 :
36 : /* HKDF-Expand-Label. */
37 32701 : static int quic_crypto_hkdf_expand(struct crypto_shash *tfm, struct quic_data *srt,
38 : struct quic_data *label, struct quic_data *hash,
39 : struct quic_data *key)
40 : {
41 32701 : u8 info[QUIC_MAX_INFO_LEN], *p = info;
42 32701 : u8 LABEL[] = "tls13 ";
43 32701 : u32 infolen;
44 32701 : int err;
45 :
46 : /* rfc8446#section-7.1:
47 : *
48 : * HKDF-Expand-Label(Secret, Label, Context, Length) =
49 : * HKDF-Expand(Secret, HkdfLabel, Length)
50 : *
51 : * Where HkdfLabel is specified as:
52 : *
53 : * struct {
54 : * uint16 length = Length;
55 : * opaque label<7..255> = "tls13 " + Label;
56 : * opaque context<0..255> = Context;
57 : * } HkdfLabel;
58 : */
59 32701 : *p++ = (u8)(key->len / QUIC_MAX_INFO_LEN);
60 32701 : *p++ = (u8)(key->len % QUIC_MAX_INFO_LEN);
61 32701 : *p++ = (u8)(sizeof(LABEL) - 1 + label->len);
62 32701 : p = quic_put_data(p, LABEL, sizeof(LABEL) - 1);
63 32701 : p = quic_put_data(p, label->data, label->len);
64 32701 : if (hash) {
65 32701 : *p++ = (u8)hash->len;
66 32701 : p = quic_put_data(p, hash->data, hash->len);
67 : } else {
68 0 : *p++ = 0;
69 : }
70 32701 : infolen = (u32)(p - info);
71 :
72 32701 : err = crypto_shash_setkey(tfm, srt->data, srt->len);
73 32701 : if (err)
74 : return err;
75 :
76 32701 : return hkdf_expand(tfm, info, infolen, key->data, key->len);
77 : }
78 :
79 : #define KEY_LABEL_V1 "quic key"
80 : #define IV_LABEL_V1 "quic iv"
81 : #define HP_KEY_LABEL_V1 "quic hp"
82 :
83 : #define KU_LABEL_V1 "quic ku"
84 :
85 : /* rfc9369#section-3.3.2:
86 : *
87 : * The labels used in rfc9001 to derive packet protection keys, header protection keys, Retry
88 : * Integrity Tag keys, and key updates change from "quic key" to "quicv2 key", from "quic iv"
89 : * to "quicv2 iv", from "quic hp" to "quicv2 hp", and from "quic ku" to "quicv2 ku".
90 : */
91 : #define KEY_LABEL_V2 "quicv2 key"
92 : #define IV_LABEL_V2 "quicv2 iv"
93 : #define HP_KEY_LABEL_V2 "quicv2 hp"
94 :
95 : #define KU_LABEL_V2 "quicv2 ku"
96 :
97 : /* Packet Protection Keys. */
98 7817 : static int quic_crypto_keys_derive(struct crypto_shash *tfm, struct quic_data *s,
99 : struct quic_data *k, struct quic_data *i,
100 : struct quic_data *hp_k, u32 version)
101 : {
102 7817 : struct quic_data hp_k_l = {HP_KEY_LABEL_V1, strlen(HP_KEY_LABEL_V1)};
103 7817 : struct quic_data k_l = {KEY_LABEL_V1, strlen(KEY_LABEL_V1)};
104 7817 : struct quic_data i_l = {IV_LABEL_V1, strlen(IV_LABEL_V1)};
105 7817 : struct quic_data z = {};
106 7817 : int err;
107 :
108 : /* rfc9001#section-5.1:
109 : *
110 : * The current encryption level secret and the label "quic key" are input to the
111 : * KDF to produce the AEAD key; the label "quic iv" is used to derive the
112 : * Initialization Vector (IV). The header protection key uses the "quic hp" label.
113 : * Using these labels provides key separation between QUIC and TLS.
114 : */
115 7817 : if (version == QUIC_VERSION_V2) {
116 108 : quic_data(&hp_k_l, HP_KEY_LABEL_V2, strlen(HP_KEY_LABEL_V2));
117 108 : quic_data(&k_l, KEY_LABEL_V2, strlen(KEY_LABEL_V2));
118 108 : quic_data(&i_l, IV_LABEL_V2, strlen(IV_LABEL_V2));
119 : }
120 :
121 7817 : err = quic_crypto_hkdf_expand(tfm, s, &k_l, &z, k);
122 7817 : if (err)
123 : return err;
124 7817 : err = quic_crypto_hkdf_expand(tfm, s, &i_l, &z, i);
125 7817 : if (err)
126 : return err;
127 : /* Don't change hp key for key update. */
128 7817 : if (!hp_k)
129 : return 0;
130 :
131 7295 : return quic_crypto_hkdf_expand(tfm, s, &hp_k_l, &z, hp_k);
132 : }
133 :
134 : /* Derive and install transmission (TX) packet protection keys for the current key phase.
135 : * This involves generating AEAD encryption key, IV, and optionally header protection key.
136 : */
137 3663 : static int quic_crypto_tx_keys_derive_and_install(struct quic_crypto *crypto)
138 : {
139 3663 : struct quic_data srt = {}, k, iv, hp_k = {}, *hp = NULL;
140 3663 : u8 tx_key[QUIC_KEY_LEN], tx_hp_key[QUIC_KEY_LEN];
141 3663 : int err, phase = crypto->key_phase;
142 3663 : u32 keylen, ivlen = QUIC_IV_LEN;
143 :
144 3663 : keylen = crypto->cipher->keylen;
145 3663 : quic_data(&srt, crypto->tx_secret, crypto->cipher->secretlen);
146 3663 : quic_data(&k, tx_key, keylen);
147 3663 : quic_data(&iv, crypto->tx_iv[phase], ivlen);
148 : /* Only derive header protection key when not in key update. */
149 3663 : if (!crypto->key_pending)
150 3620 : hp = quic_data(&hp_k, tx_hp_key, keylen);
151 3663 : err = quic_crypto_keys_derive(crypto->secret_tfm, &srt, &k, &iv, hp, crypto->version);
152 3663 : if (err)
153 : return err;
154 3663 : err = crypto_aead_setauthsize(crypto->tx_tfm[phase], QUIC_TAG_LEN);
155 3663 : if (err)
156 : return err;
157 3663 : err = crypto_aead_setkey(crypto->tx_tfm[phase], tx_key, keylen);
158 3663 : if (err)
159 : return err;
160 3663 : if (hp) {
161 3620 : err = crypto_skcipher_setkey(crypto->tx_hp_tfm, tx_hp_key, keylen);
162 3620 : if (err)
163 : return err;
164 : }
165 3663 : pr_debug("%s: k: %16phN, iv: %12phN, hp_k:%16phN\n", __func__, k.data, iv.data, hp_k.data);
166 : return 0;
167 : }
168 :
169 : /* Derive and install reception (RX) packet protection keys for the current key phase.
170 : * This installs AEAD decryption key, IV, and optionally header protection key.
171 : */
172 3718 : static int quic_crypto_rx_keys_derive_and_install(struct quic_crypto *crypto)
173 : {
174 3718 : struct quic_data srt = {}, k, iv, hp_k = {}, *hp = NULL;
175 3718 : u8 rx_key[QUIC_KEY_LEN], rx_hp_key[QUIC_KEY_LEN];
176 3718 : int err, phase = crypto->key_phase;
177 3718 : u32 keylen, ivlen = QUIC_IV_LEN;
178 :
179 3718 : keylen = crypto->cipher->keylen;
180 3718 : quic_data(&srt, crypto->rx_secret, crypto->cipher->secretlen);
181 3718 : quic_data(&k, rx_key, keylen);
182 3718 : quic_data(&iv, crypto->rx_iv[phase], ivlen);
183 : /* Only derive header protection key when not in key update. */
184 3718 : if (!crypto->key_pending)
185 3675 : hp = quic_data(&hp_k, rx_hp_key, keylen);
186 3718 : err = quic_crypto_keys_derive(crypto->secret_tfm, &srt, &k, &iv, hp, crypto->version);
187 3718 : if (err)
188 : return err;
189 3718 : err = crypto_aead_setauthsize(crypto->rx_tfm[phase], QUIC_TAG_LEN);
190 3718 : if (err)
191 : return err;
192 3718 : err = crypto_aead_setkey(crypto->rx_tfm[phase], rx_key, keylen);
193 3718 : if (err)
194 : return err;
195 3718 : if (hp) {
196 3675 : err = crypto_skcipher_setkey(crypto->rx_hp_tfm, rx_hp_key, keylen);
197 3675 : if (err)
198 : return err;
199 : }
200 3718 : pr_debug("%s: k: %16phN, iv: %12phN, hp_k:%16phN\n", __func__, k.data, iv.data, hp_k.data);
201 : return 0;
202 : }
203 :
204 10009337 : static void *quic_crypto_skcipher_mem_alloc(struct crypto_skcipher *tfm, u32 mask_size,
205 : u8 **iv, struct skcipher_request **req)
206 : {
207 10009337 : unsigned int iv_size, req_size;
208 10009337 : unsigned int len;
209 10009337 : u8 *mem;
210 :
211 10009337 : iv_size = crypto_skcipher_ivsize(tfm);
212 10009337 : req_size = sizeof(**req) + crypto_skcipher_reqsize(tfm);
213 :
214 10009337 : len = mask_size;
215 10009337 : len += iv_size;
216 10009337 : len += crypto_skcipher_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
217 10009337 : len = ALIGN(len, crypto_tfm_ctx_alignment());
218 10009337 : len += req_size;
219 :
220 10009337 : mem = kzalloc(len, GFP_ATOMIC);
221 10016941 : if (!mem)
222 : return NULL;
223 :
224 10016941 : *iv = (u8 *)PTR_ALIGN(mem + mask_size, crypto_skcipher_alignmask(tfm) + 1);
225 10016941 : *req = (struct skcipher_request *)PTR_ALIGN(*iv + iv_size,
226 : crypto_tfm_ctx_alignment());
227 :
228 10016941 : return (void *)mem;
229 : }
230 :
231 : #define QUIC_SAMPLE_LEN 16
232 : #define QUIC_MAX_PN_LEN 4
233 :
234 : #define QUIC_HEADER_FORM_BIT 0x80
235 : #define QUIC_LONG_HEADER_MASK 0x0f
236 : #define QUIC_SHORT_HEADER_MASK 0x1f
237 :
238 : /* Header Protection. */
239 5007867 : static int quic_crypto_header_encrypt(struct crypto_skcipher *tfm, struct sk_buff *skb, bool chacha)
240 : {
241 5007867 : struct quic_skb_cb *cb = QUIC_SKB_CB(skb);
242 5007867 : struct skcipher_request *req;
243 5007867 : struct scatterlist sg;
244 5007867 : u8 *mask, *iv, *p;
245 5007867 : int err, i;
246 :
247 5007867 : mask = quic_crypto_skcipher_mem_alloc(tfm, QUIC_SAMPLE_LEN, &iv, &req);
248 5007832 : if (!mask)
249 : return -ENOMEM;
250 :
251 : /* rfc9001#section-5.4.2: Header Protection Sample:
252 : *
253 : * # pn_offset is the start of the Packet Number field.
254 : * sample_offset = pn_offset + 4
255 : *
256 : * sample = packet[sample_offset..sample_offset+sample_length]
257 : *
258 : * rfc9001#section-5.4.3: AES-Based Header Protection:
259 : *
260 : * header_protection(hp_key, sample):
261 : * mask = AES-ECB(hp_key, sample)
262 : *
263 : * rfc9001#section-5.4.4: ChaCha20-Based Header Protection:
264 : *
265 : * header_protection(hp_key, sample):
266 : * counter = sample[0..3]
267 : * nonce = sample[4..15]
268 : * mask = ChaCha20(hp_key, counter, nonce, {0,0,0,0,0})
269 : */
270 10015664 : memcpy((chacha ? iv : mask), skb->data + cb->number_offset + QUIC_MAX_PN_LEN,
271 : QUIC_SAMPLE_LEN);
272 5007832 : sg_init_one(&sg, mask, QUIC_SAMPLE_LEN);
273 5007849 : skcipher_request_set_tfm(req, tfm);
274 5007849 : skcipher_request_set_crypt(req, &sg, &sg, QUIC_SAMPLE_LEN, iv);
275 5007849 : err = crypto_skcipher_encrypt(req);
276 5007953 : if (err)
277 0 : goto err;
278 :
279 : /* rfc9001#section-5.4.1:
280 : *
281 : * mask = header_protection(hp_key, sample)
282 : *
283 : * pn_length = (packet[0] & 0x03) + 1
284 : * if (packet[0] & 0x80) == 0x80:
285 : * # Long header: 4 bits masked
286 : * packet[0] ^= mask[0] & 0x0f
287 : * else:
288 : * # Short header: 5 bits masked
289 : * packet[0] ^= mask[0] & 0x1f
290 : *
291 : * # pn_offset is the start of the Packet Number field.
292 : * packet[pn_offset:pn_offset+pn_length] ^= mask[1:1+pn_length]
293 : */
294 5007953 : p = skb->data;
295 5007953 : *p = (u8)(*p ^ (mask[0] & (((*p & QUIC_HEADER_FORM_BIT) == QUIC_HEADER_FORM_BIT) ?
296 : QUIC_LONG_HEADER_MASK : QUIC_SHORT_HEADER_MASK)));
297 5007953 : p = skb->data + cb->number_offset;
298 25039764 : for (i = 1; i <= cb->number_len; i++)
299 20031811 : *p++ ^= mask[i];
300 5007953 : err:
301 5007953 : kfree(mask);
302 5007953 : return err;
303 : }
304 :
305 : /* Extracts and reconstructs the packet number from an incoming QUIC packet. */
306 6575676 : static void quic_crypto_get_header(struct sk_buff *skb)
307 : {
308 6575676 : struct quic_skb_cb *cb = QUIC_SKB_CB(skb);
309 6575676 : struct quichdr *hdr = quic_hdr(skb);
310 6575676 : u32 len = QUIC_MAX_PN_LEN;
311 6575676 : u8 *p = (u8 *)hdr;
312 :
313 : /* rfc9000#section-17.1:
314 : *
315 : * Once header protection is removed, the packet number is decoded by finding the packet
316 : * number value that is closest to the next expected packet. The next expected packet is
317 : * the highest received packet number plus one.
318 : */
319 6575676 : p += cb->number_offset;
320 6575676 : cb->key_phase = hdr->key;
321 6575676 : cb->number_len = hdr->pnl + 1;
322 6575676 : quic_get_int(&p, &len, &cb->number, cb->number_len);
323 6575367 : cb->number = quic_get_num(cb->number_max, cb->number, cb->number_len);
324 :
325 6575202 : if (cb->number > cb->number_max)
326 6572179 : cb->number_max = cb->number;
327 6575202 : }
328 :
329 : #define QUIC_PN_LEN_BITS_MASK 0x03
330 :
331 5024492 : static int quic_crypto_header_decrypt(struct crypto_skcipher *tfm, struct sk_buff *skb, bool chacha)
332 : {
333 5024492 : struct quic_skb_cb *cb = QUIC_SKB_CB(skb);
334 5024492 : struct quichdr *hdr = quic_hdr(skb);
335 5024492 : int err, i, len = cb->length;
336 5024492 : struct skcipher_request *req;
337 5024492 : struct scatterlist sg;
338 5024492 : u8 *mask, *iv, *p;
339 :
340 5024492 : mask = quic_crypto_skcipher_mem_alloc(tfm, QUIC_SAMPLE_LEN, &iv, &req);
341 5024419 : if (!mask)
342 : return -ENOMEM;
343 :
344 5024419 : if (len < QUIC_MAX_PN_LEN + QUIC_SAMPLE_LEN) {
345 0 : err = -EINVAL;
346 0 : goto err;
347 : }
348 :
349 : /* Similar logic to quic_crypto_header_encrypt(). */
350 5024419 : p = (u8 *)hdr + cb->number_offset;
351 10048838 : memcpy((chacha ? iv : mask), p + QUIC_MAX_PN_LEN, QUIC_SAMPLE_LEN);
352 5024419 : sg_init_one(&sg, mask, QUIC_SAMPLE_LEN);
353 5024397 : skcipher_request_set_tfm(req, tfm);
354 5024397 : skcipher_request_set_crypt(req, &sg, &sg, QUIC_SAMPLE_LEN, iv);
355 5024397 : err = crypto_skcipher_encrypt(req);
356 5024578 : if (err)
357 0 : goto err;
358 :
359 5024578 : p = (u8 *)hdr;
360 5024578 : *p = (u8)(*p ^ (mask[0] & (((*p & QUIC_HEADER_FORM_BIT) == QUIC_HEADER_FORM_BIT) ?
361 : QUIC_LONG_HEADER_MASK : QUIC_SHORT_HEADER_MASK)));
362 5024578 : cb->number_len = (*p & QUIC_PN_LEN_BITS_MASK) + 1;
363 5024578 : p += cb->number_offset;
364 24747750 : for (i = 0; i < cb->number_len; ++i)
365 19723172 : *(p + i) = *((u8 *)hdr + cb->number_offset + i) ^ mask[i + 1];
366 5024578 : quic_crypto_get_header(skb);
367 :
368 5024216 : err:
369 5024216 : kfree(mask);
370 5024216 : return err;
371 : }
372 :
373 10016981 : static void *quic_crypto_aead_mem_alloc(struct crypto_aead *tfm, u32 ctx_size,
374 : u8 **iv, struct aead_request **req,
375 : struct scatterlist **sg, u32 nsg)
376 : {
377 10016981 : unsigned int iv_size, req_size;
378 10016981 : unsigned int len;
379 10016981 : u8 *mem;
380 :
381 10016981 : iv_size = crypto_aead_ivsize(tfm);
382 10016981 : req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
383 :
384 10016981 : len = ctx_size;
385 10016981 : len += iv_size;
386 10016981 : len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
387 10016981 : len = ALIGN(len, crypto_tfm_ctx_alignment());
388 10016981 : len += req_size;
389 10016981 : len = ALIGN(len, __alignof__(struct scatterlist));
390 10016981 : len += nsg * sizeof(**sg);
391 :
392 10016981 : mem = kzalloc(len, GFP_ATOMIC);
393 10017855 : if (!mem)
394 : return NULL;
395 :
396 10017855 : *iv = (u8 *)PTR_ALIGN(mem + ctx_size, crypto_aead_alignmask(tfm) + 1);
397 10017855 : *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
398 : crypto_tfm_ctx_alignment());
399 10017855 : *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
400 : __alignof__(struct scatterlist));
401 :
402 10017855 : return (void *)mem;
403 : }
404 :
405 0 : static void quic_crypto_destruct_skb(struct sk_buff *skb)
406 : {
407 0 : kfree(skb_shinfo(skb)->destructor_arg);
408 0 : sock_efree(skb);
409 0 : }
410 :
411 0 : static void quic_crypto_done(void *data, int err)
412 : {
413 0 : struct sk_buff *skb = data;
414 :
415 0 : QUIC_SKB_CB(skb)->crypto_done(skb, err);
416 0 : }
417 :
418 : /* AEAD Usage. */
419 5008072 : static int quic_crypto_payload_encrypt(struct crypto_aead *tfm, struct sk_buff *skb,
420 : u8 *tx_iv, bool ccm)
421 : {
422 5008072 : struct quic_skb_cb *cb = QUIC_SKB_CB(skb);
423 5008072 : struct quichdr *hdr = quic_hdr(skb);
424 5008072 : u8 *iv, i, nonce[QUIC_IV_LEN];
425 5008072 : struct aead_request *req;
426 5008072 : struct sk_buff *trailer;
427 5008072 : struct scatterlist *sg;
428 5008072 : u32 nsg, hlen, len;
429 5008072 : void *ctx;
430 5008072 : __be64 n;
431 5008072 : int err;
432 :
433 5008072 : len = skb->len;
434 5008072 : err = skb_cow_data(skb, QUIC_TAG_LEN, &trailer);
435 5008071 : if (err < 0)
436 : return err;
437 5008071 : nsg = (u32)err;
438 5008071 : pskb_put(skb, trailer, QUIC_TAG_LEN);
439 5008085 : hdr->key = cb->key_phase;
440 :
441 5008085 : ctx = quic_crypto_aead_mem_alloc(tfm, 0, &iv, &req, &sg, nsg);
442 5008063 : if (!ctx)
443 : return -ENOMEM;
444 :
445 5008063 : sg_init_table(sg, nsg);
446 5008079 : err = skb_to_sgvec(skb, sg, 0, (int)skb->len);
447 5008083 : if (err < 0)
448 0 : goto err;
449 :
450 : /* rfc9001#section-5.3:
451 : *
452 : * The associated data, A, for the AEAD is the contents of the QUIC header,
453 : * starting from the first byte of either the short or long header, up to and
454 : * including the unprotected packet number.
455 : *
456 : * The nonce, N, is formed by combining the packet protection IV with the packet
457 : * number. The 62 bits of the reconstructed QUIC packet number in network byte
458 : * order are left-padded with zeros to the size of the IV. The exclusive OR of the
459 : * padded packet number and the IV forms the AEAD nonce.
460 : */
461 5008083 : hlen = cb->number_offset + cb->number_len;
462 5008083 : memcpy(nonce, tx_iv, QUIC_IV_LEN);
463 5008083 : n = cpu_to_be64(cb->number);
464 45072585 : for (i = 0; i < sizeof(n); i++)
465 40064502 : nonce[QUIC_IV_LEN - sizeof(n) + i] ^= ((u8 *)&n)[i];
466 :
467 : /* For CCM based ciphers, first byte of IV is a constant. */
468 5008083 : iv[0] = TLS_AES_CCM_IV_B0_BYTE;
469 10016166 : memcpy(&iv[ccm], nonce, QUIC_IV_LEN);
470 5008083 : aead_request_set_tfm(req, tfm);
471 5008083 : aead_request_set_ad(req, hlen);
472 5008083 : aead_request_set_crypt(req, sg, sg, len - hlen, iv);
473 5008083 : aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, (void *)quic_crypto_done, skb);
474 :
475 5008083 : err = crypto_aead_encrypt(req);
476 5007941 : if (err == -EINPROGRESS) {
477 : /* Will complete asynchronously; set destructor to free context. */
478 0 : skb->destructor = quic_crypto_destruct_skb;
479 0 : skb_shinfo(skb)->destructor_arg = ctx;
480 0 : return err;
481 : }
482 :
483 5007941 : err:
484 5007941 : kfree(ctx);
485 5007941 : return err;
486 : }
487 :
488 5024198 : static int quic_crypto_payload_decrypt(struct crypto_aead *tfm, struct sk_buff *skb,
489 : u8 *rx_iv, bool ccm)
490 : {
491 5024198 : struct quic_skb_cb *cb = QUIC_SKB_CB(skb);
492 5024198 : u8 *iv, i, nonce[QUIC_IV_LEN];
493 5024198 : struct aead_request *req;
494 5024198 : struct sk_buff *trailer;
495 5024198 : int nsg, hlen, len, err;
496 5024198 : struct scatterlist *sg;
497 5024198 : void *ctx;
498 5024198 : __be64 n;
499 :
500 5024198 : len = cb->length + cb->number_offset;
501 5024198 : hlen = cb->number_offset + cb->number_len;
502 5024198 : if (len - hlen < QUIC_TAG_LEN)
503 : return -EINVAL;
504 5024198 : nsg = skb_cow_data(skb, 0, &trailer);
505 5024360 : if (nsg < 0)
506 : return nsg;
507 5024360 : ctx = quic_crypto_aead_mem_alloc(tfm, 0, &iv, &req, &sg, nsg);
508 5024044 : if (!ctx)
509 : return -ENOMEM;
510 :
511 5024044 : sg_init_table(sg, nsg);
512 5024172 : err = skb_to_sgvec(skb, sg, 0, len);
513 5024328 : if (err < 0)
514 0 : goto err;
515 5024328 : skb_dst_force(skb);
516 :
517 : /* Similar logic to quic_crypto_payload_encrypt(). */
518 5024522 : memcpy(nonce, rx_iv, QUIC_IV_LEN);
519 5024522 : n = cpu_to_be64(cb->number);
520 45217916 : for (i = 0; i < sizeof(n); i++)
521 40193394 : nonce[QUIC_IV_LEN - sizeof(n) + i] ^= ((u8 *)&n)[i];
522 :
523 5024522 : iv[0] = TLS_AES_CCM_IV_B0_BYTE;
524 10049044 : memcpy(&iv[ccm], nonce, QUIC_IV_LEN);
525 5024522 : aead_request_set_tfm(req, tfm);
526 5024522 : aead_request_set_ad(req, hlen);
527 5024522 : aead_request_set_crypt(req, sg, sg, len - hlen, iv);
528 5024522 : aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, (void *)quic_crypto_done, skb);
529 :
530 5024522 : err = crypto_aead_decrypt(req);
531 5024350 : if (err == -EINPROGRESS) {
532 0 : skb->destructor = quic_crypto_destruct_skb;
533 0 : skb_shinfo(skb)->destructor_arg = ctx;
534 0 : return err;
535 : }
536 5024350 : err:
537 5024350 : kfree(ctx);
538 5024350 : return err;
539 : }
540 :
541 : #define QUIC_CIPHER_MIN TLS_CIPHER_AES_GCM_128
542 : #define QUIC_CIPHER_MAX TLS_CIPHER_CHACHA20_POLY1305
543 :
544 : #define TLS_CIPHER_AES_GCM_128_SECRET_SIZE 32
545 : #define TLS_CIPHER_AES_GCM_256_SECRET_SIZE 48
546 : #define TLS_CIPHER_AES_CCM_128_SECRET_SIZE 32
547 : #define TLS_CIPHER_CHACHA20_POLY1305_SECRET_SIZE 32
548 :
549 : #define CIPHER_DESC(type, aead_name, skc_name, sha_name)[type - QUIC_CIPHER_MIN] = { \
550 : .secretlen = type ## _SECRET_SIZE, \
551 : .keylen = type ## _KEY_SIZE, \
552 : .aead = aead_name, \
553 : .skc = skc_name, \
554 : .shash = sha_name, \
555 : }
556 :
557 : static struct quic_cipher ciphers[QUIC_CIPHER_MAX + 1 - QUIC_CIPHER_MIN] = {
558 : CIPHER_DESC(TLS_CIPHER_AES_GCM_128, "gcm(aes)", "ecb(aes)", "hmac(sha256)"),
559 : CIPHER_DESC(TLS_CIPHER_AES_GCM_256, "gcm(aes)", "ecb(aes)", "hmac(sha384)"),
560 : CIPHER_DESC(TLS_CIPHER_AES_CCM_128, "ccm(aes)", "ecb(aes)", "hmac(sha256)"),
561 : CIPHER_DESC(TLS_CIPHER_CHACHA20_POLY1305,
562 : "rfc7539(chacha20,poly1305)", "chacha20", "hmac(sha256)"),
563 : };
564 :
565 10032266 : static bool quic_crypto_is_cipher_ccm(struct quic_crypto *crypto)
566 : {
567 10032266 : return crypto->cipher_type == TLS_CIPHER_AES_CCM_128;
568 : }
569 :
570 10032384 : static bool quic_crypto_is_cipher_chacha(struct quic_crypto *crypto)
571 : {
572 10032384 : return crypto->cipher_type == TLS_CIPHER_CHACHA20_POLY1305;
573 : }
574 :
575 : /* Encrypts a QUIC packet before transmission. This function performs AEAD encryption of
576 : * the packet payload and applies header protection. It handles key phase tracking and key
577 : * update timing..
578 : *
579 : * Return: 0 on success, or a negative error code.
580 : */
581 5008078 : int quic_crypto_encrypt(struct quic_crypto *crypto, struct sk_buff *skb)
582 : {
583 5008078 : u8 *iv, cha, ccm, phase = crypto->key_phase;
584 5008078 : struct quic_skb_cb *cb = QUIC_SKB_CB(skb);
585 5008078 : int err;
586 :
587 5008078 : cb->key_phase = phase;
588 5008078 : iv = crypto->tx_iv[phase];
589 : /* Packet payload is already encrypted (e.g., resumed from async), proceed to header
590 : * protection only.
591 : */
592 5008078 : if (cb->resume)
593 0 : goto out;
594 :
595 : /* If a key update is pending and this is the first packet using the new key, save the
596 : * current time. Later used to clear old keys after some time has passed (see
597 : * quic_crypto_decrypt()).
598 : */
599 5008078 : if (crypto->key_pending && !crypto->key_update_send_time)
600 15 : crypto->key_update_send_time = jiffies_to_usecs(jiffies);
601 :
602 5008078 : ccm = quic_crypto_is_cipher_ccm(crypto);
603 5008078 : err = quic_crypto_payload_encrypt(crypto->tx_tfm[phase], skb, iv, ccm);
604 5007873 : if (err)
605 : return err;
606 5007873 : out:
607 5007873 : cha = quic_crypto_is_cipher_chacha(crypto);
608 5007873 : return quic_crypto_header_encrypt(crypto->tx_hp_tfm, skb, cha);
609 : }
610 :
611 : /* Decrypts a QUIC packet after reception. This function removes header protection,
612 : * decrypts the payload, and processes any key updates if the key phase bit changes.
613 : *
614 : * Return: 0 on success, or a negative error code.
615 : */
616 6575623 : int quic_crypto_decrypt(struct quic_crypto *crypto, struct sk_buff *skb)
617 : {
618 6575623 : struct quic_skb_cb *cb = QUIC_SKB_CB(skb);
619 6575623 : u8 *iv, cha, ccm, phase;
620 6575623 : int err = 0;
621 6575623 : u32 time;
622 :
623 : /* Payload was decrypted asynchronously. Proceed with parsing packet number and key
624 : * phase.
625 : */
626 6575623 : if (cb->resume) {
627 1551112 : quic_crypto_get_header(skb);
628 1550588 : goto out;
629 : }
630 :
631 5024511 : cha = quic_crypto_is_cipher_chacha(crypto);
632 5024511 : err = quic_crypto_header_decrypt(crypto->rx_hp_tfm, skb, cha);
633 5024188 : if (err) {
634 0 : pr_debug("%s: hd decrypt err %d\n", __func__, err);
635 0 : return err;
636 : }
637 :
638 : /* rfc9001#section-6:
639 : *
640 : * The Key Phase bit allows a recipient to detect a change in keying material without
641 : * needing to receive the first packet that triggered the change. An endpoint that
642 : * notices a changed Key Phase bit updates keys and decrypts the packet that contains
643 : * the changed value.
644 : */
645 5024188 : if (cb->key_phase != crypto->key_phase && !crypto->key_pending) {
646 21 : if (!crypto->send_ready) /* Not ready for key update. */
647 : return -EINVAL;
648 21 : err = quic_crypto_key_update(crypto); /* Perform a key update. */
649 21 : if (err) {
650 0 : cb->errcode = QUIC_TRANSPORT_ERROR_KEY_UPDATE;
651 0 : return err;
652 : }
653 21 : cb->key_update = 1; /* Mark packet as triggering key update. */
654 : }
655 :
656 5024188 : phase = cb->key_phase;
657 5024188 : iv = crypto->rx_iv[phase];
658 5024188 : ccm = quic_crypto_is_cipher_ccm(crypto);
659 5024188 : err = quic_crypto_payload_decrypt(crypto->rx_tfm[phase], skb, iv, ccm);
660 5024352 : if (err) {
661 4 : if (err == -EINPROGRESS)
662 : return err;
663 : /* When using the old keys can not decrypt the packets, the peer might
664 : * start another key_update. Thus, clear the last key_pending so that
665 : * next packets will trigger the new key-update.
666 : */
667 4 : if (crypto->key_pending && cb->key_phase != crypto->key_phase) {
668 1 : crypto->key_pending = 0;
669 1 : crypto->key_update_time = 0;
670 : }
671 4 : return err;
672 : }
673 :
674 5024348 : out:
675 : /* rfc9001#section-6.1:
676 : *
677 : * An endpoint MUST retain old keys until it has successfully unprotected a
678 : * packet sent using the new keys. An endpoint SHOULD retain old keys for
679 : * some time after unprotecting a packet sent using the new keys.
680 : */
681 6574936 : if (crypto->key_pending && cb->key_phase == crypto->key_phase) {
682 467 : time = crypto->key_update_send_time;
683 467 : if (time && jiffies_to_usecs(jiffies) - time >= crypto->key_update_time) {
684 39 : crypto->key_pending = 0;
685 39 : crypto->key_update_time = 0;
686 : }
687 : }
688 : return err;
689 : }
690 :
691 3673 : int quic_crypto_set_cipher(struct quic_crypto *crypto, u32 type, u8 flag)
692 : {
693 3673 : struct quic_cipher *cipher;
694 3673 : int err = -EINVAL;
695 3673 : void *tfm;
696 :
697 3673 : if (type < QUIC_CIPHER_MIN || type > QUIC_CIPHER_MAX)
698 : return -EINVAL;
699 :
700 3673 : cipher = &ciphers[type - QUIC_CIPHER_MIN];
701 3673 : tfm = crypto_alloc_shash(cipher->shash, 0, 0);
702 3673 : if (IS_ERR(tfm))
703 0 : return PTR_ERR(tfm);
704 3673 : crypto->secret_tfm = tfm;
705 :
706 : /* Request only synchronous crypto by specifying CRYPTO_ALG_ASYNC. This
707 : * ensures tag generation does not rely on async callbacks.
708 : */
709 3673 : tfm = crypto_alloc_aead(cipher->aead, 0, CRYPTO_ALG_ASYNC);
710 3673 : if (IS_ERR(tfm)) {
711 0 : err = PTR_ERR(tfm);
712 0 : goto err;
713 : }
714 3673 : crypto->tag_tfm = tfm;
715 :
716 : /* Allocate AEAD and HP transform for each RX key phase. */
717 3673 : tfm = crypto_alloc_aead(cipher->aead, 0, flag);
718 3673 : if (IS_ERR(tfm)) {
719 0 : err = PTR_ERR(tfm);
720 0 : goto err;
721 : }
722 3673 : crypto->rx_tfm[0] = tfm;
723 3673 : tfm = crypto_alloc_aead(cipher->aead, 0, flag);
724 3673 : if (IS_ERR(tfm)) {
725 0 : err = PTR_ERR(tfm);
726 0 : goto err;
727 : }
728 3673 : crypto->rx_tfm[1] = tfm;
729 3673 : tfm = crypto_alloc_sync_skcipher(cipher->skc, 0, 0);
730 3673 : if (IS_ERR(tfm)) {
731 0 : err = PTR_ERR(tfm);
732 0 : goto err;
733 : }
734 3673 : crypto->rx_hp_tfm = tfm;
735 :
736 : /* Allocate AEAD and HP transform for each TX key phase. */
737 3673 : tfm = crypto_alloc_aead(cipher->aead, 0, flag);
738 3673 : if (IS_ERR(tfm)) {
739 0 : err = PTR_ERR(tfm);
740 0 : goto err;
741 : }
742 3673 : crypto->tx_tfm[0] = tfm;
743 3673 : tfm = crypto_alloc_aead(cipher->aead, 0, flag);
744 3673 : if (IS_ERR(tfm)) {
745 0 : err = PTR_ERR(tfm);
746 0 : goto err;
747 : }
748 3673 : crypto->tx_tfm[1] = tfm;
749 3673 : tfm = crypto_alloc_sync_skcipher(cipher->skc, 0, 0);
750 3673 : if (IS_ERR(tfm)) {
751 0 : err = PTR_ERR(tfm);
752 0 : goto err;
753 : }
754 3673 : crypto->tx_hp_tfm = tfm;
755 :
756 3673 : crypto->cipher = cipher;
757 3673 : crypto->cipher_type = type;
758 3673 : return 0;
759 0 : err:
760 0 : quic_crypto_free(crypto);
761 0 : return err;
762 : }
763 :
764 7299 : int quic_crypto_set_secret(struct quic_crypto *crypto, struct quic_crypto_secret *srt,
765 : u32 version, u8 flag)
766 : {
767 7299 : int err;
768 :
769 : /* If no cipher has been initialized yet, set it up. */
770 7299 : if (!crypto->cipher) {
771 2072 : err = quic_crypto_set_cipher(crypto, srt->type, flag);
772 2072 : if (err)
773 : return err;
774 : }
775 :
776 : /* Handle RX path setup. */
777 7299 : if (!srt->send) {
778 3677 : crypto->version = version;
779 7354 : memcpy(crypto->rx_secret, srt->secret, crypto->cipher->secretlen);
780 3677 : err = quic_crypto_rx_keys_derive_and_install(crypto);
781 3677 : if (err)
782 : return err;
783 3677 : crypto->recv_ready = 1;
784 3677 : return 0;
785 : }
786 :
787 : /* Handle TX path setup. */
788 3622 : crypto->version = version;
789 7244 : memcpy(crypto->tx_secret, srt->secret, crypto->cipher->secretlen);
790 3622 : err = quic_crypto_tx_keys_derive_and_install(crypto);
791 3622 : if (err)
792 : return err;
793 3622 : crypto->send_ready = 1;
794 3622 : return 0;
795 : }
796 :
797 0 : int quic_crypto_get_secret(struct quic_crypto *crypto, struct quic_crypto_secret *srt)
798 : {
799 0 : u8 *secret;
800 :
801 0 : if (!crypto->cipher)
802 : return -EINVAL;
803 0 : srt->type = crypto->cipher_type;
804 0 : secret = srt->send ? crypto->tx_secret : crypto->rx_secret;
805 0 : memcpy(srt->secret, secret, crypto->cipher->secretlen);
806 0 : return 0;
807 : }
808 :
809 : /* Initiating a Key Update. */
810 45 : int quic_crypto_key_update(struct quic_crypto *crypto)
811 : {
812 45 : u8 tx_secret[QUIC_SECRET_LEN], rx_secret[QUIC_SECRET_LEN];
813 45 : struct quic_data l = {KU_LABEL_V1, strlen(KU_LABEL_V1)};
814 45 : struct quic_data z = {}, k, srt;
815 45 : u32 secret_len;
816 45 : int err;
817 :
818 45 : if (crypto->key_pending || !crypto->recv_ready)
819 : return -EINVAL;
820 :
821 : /* rfc9001#section-6.1:
822 : *
823 : * Endpoints maintain separate read and write secrets for packet protection. An
824 : * endpoint initiates a key update by updating its packet protection write secret
825 : * and using that to protect new packets. The endpoint creates a new write secret
826 : * from the existing write secret. This uses the KDF function provided by TLS with
827 : * a label of "quic ku". The corresponding key and IV are created from that
828 : * secret. The header protection key is not updated.
829 : *
830 : * For example,to update write keys with TLS 1.3, HKDF-Expand-Label is used as:
831 : * secret_<n+1> = HKDF-Expand-Label(secret_<n>, "quic ku",
832 : * "", Hash.length)
833 : */
834 41 : secret_len = crypto->cipher->secretlen;
835 41 : if (crypto->version == QUIC_VERSION_V2)
836 0 : quic_data(&l, KU_LABEL_V2, strlen(KU_LABEL_V2));
837 :
838 41 : crypto->key_pending = 1;
839 82 : memcpy(tx_secret, crypto->tx_secret, secret_len);
840 82 : memcpy(rx_secret, crypto->rx_secret, secret_len);
841 41 : crypto->key_phase = !crypto->key_phase;
842 :
843 41 : quic_data(&srt, tx_secret, secret_len);
844 41 : quic_data(&k, crypto->tx_secret, secret_len);
845 41 : err = quic_crypto_hkdf_expand(crypto->secret_tfm, &srt, &l, &z, &k);
846 41 : if (err)
847 0 : goto err;
848 41 : err = quic_crypto_tx_keys_derive_and_install(crypto);
849 41 : if (err)
850 0 : goto err;
851 :
852 41 : quic_data(&srt, rx_secret, secret_len);
853 41 : quic_data(&k, crypto->rx_secret, secret_len);
854 41 : err = quic_crypto_hkdf_expand(crypto->secret_tfm, &srt, &l, &z, &k);
855 41 : if (err)
856 0 : goto err;
857 41 : err = quic_crypto_rx_keys_derive_and_install(crypto);
858 41 : if (err)
859 0 : goto err;
860 : return 0;
861 0 : err:
862 0 : crypto->key_pending = 0;
863 0 : memcpy(crypto->tx_secret, tx_secret, secret_len);
864 0 : memcpy(crypto->rx_secret, rx_secret, secret_len);
865 0 : crypto->key_phase = !crypto->key_phase;
866 0 : return err;
867 : }
868 :
869 4948 : void quic_crypto_free(struct quic_crypto *crypto)
870 : {
871 4948 : if (crypto->tag_tfm)
872 3666 : crypto_free_aead(crypto->tag_tfm);
873 4949 : if (crypto->rx_tfm[0])
874 3667 : crypto_free_aead(crypto->rx_tfm[0]);
875 4949 : if (crypto->rx_tfm[1])
876 3667 : crypto_free_aead(crypto->rx_tfm[1]);
877 4949 : if (crypto->tx_tfm[0])
878 3667 : crypto_free_aead(crypto->tx_tfm[0]);
879 4949 : if (crypto->tx_tfm[1])
880 3667 : crypto_free_aead(crypto->tx_tfm[1]);
881 4949 : if (crypto->secret_tfm)
882 3667 : crypto_free_shash(crypto->secret_tfm);
883 4948 : if (crypto->rx_hp_tfm)
884 3666 : crypto_free_skcipher(crypto->rx_hp_tfm);
885 4949 : if (crypto->tx_hp_tfm)
886 3667 : crypto_free_skcipher(crypto->tx_hp_tfm);
887 :
888 4949 : memset(crypto, 0, offsetof(struct quic_crypto, send_offset));
889 4949 : }
890 :
891 : #define QUIC_INITIAL_SALT_V1 \
892 : "\x38\x76\x2c\xf7\xf5\x59\x34\xb3\x4d\x17\x9a\xe6\xa4\xc8\x0c\xad\xcc\xbb\x7f\x0a"
893 : #define QUIC_INITIAL_SALT_V2 \
894 : "\x0d\xed\xe3\xde\xf7\x00\xa6\xdb\x81\x93\x81\xbe\x6e\x26\x9d\xcb\xf9\xbd\x2e\xd9"
895 :
896 : #define QUIC_INITIAL_SALT_LEN 20
897 :
898 : /* Initial Secrets. */
899 1620 : int quic_crypto_initial_keys_install(struct quic_crypto *crypto, struct quic_conn_id *conn_id,
900 : u32 version, bool is_serv)
901 : {
902 1620 : u8 secret[TLS_CIPHER_AES_GCM_128_SECRET_SIZE];
903 1620 : struct quic_data salt, s, k, l, dcid, z = {};
904 1620 : struct quic_crypto_secret srt = {};
905 1620 : char *tl, *rl, *sal;
906 1620 : int err;
907 :
908 : /* rfc9001#section-5.2:
909 : *
910 : * The secret used by clients to construct Initial packets uses the PRK and the
911 : * label "client in" as input to the HKDF-Expand-Label function from TLS [TLS13]
912 : * to produce a 32-byte secret. Packets constructed by the server use the same
913 : * process with the label "server in". The hash function for HKDF when deriving
914 : * initial secrets and keys is SHA-256 [SHA].
915 : *
916 : * This process in pseudocode is:
917 : *
918 : * initial_salt = 0x38762cf7f55934b34d179ae6a4c80cadccbb7f0a
919 : * initial_secret = HKDF-Extract(initial_salt,
920 : * client_dst_connection_id)
921 : *
922 : * client_initial_secret = HKDF-Expand-Label(initial_secret,
923 : * "client in", "",
924 : * Hash.length)
925 : * server_initial_secret = HKDF-Expand-Label(initial_secret,
926 : * "server in", "",
927 : * Hash.length)
928 : */
929 1620 : if (is_serv) {
930 : rl = "client in";
931 : tl = "server in";
932 : } else {
933 601 : tl = "client in";
934 601 : rl = "server in";
935 : }
936 1620 : sal = QUIC_INITIAL_SALT_V1;
937 1620 : if (version == QUIC_VERSION_V2)
938 20 : sal = QUIC_INITIAL_SALT_V2;
939 1620 : quic_data(&salt, sal, QUIC_INITIAL_SALT_LEN);
940 1620 : quic_data(&dcid, conn_id->data, conn_id->len);
941 1620 : quic_data(&s, secret, TLS_CIPHER_AES_GCM_128_SECRET_SIZE);
942 1620 : err = quic_crypto_hkdf_extract(crypto->secret_tfm, &salt, &dcid, &s);
943 1620 : if (err)
944 : return err;
945 :
946 3240 : quic_data(&l, tl, strlen(tl));
947 1620 : quic_data(&k, srt.secret, TLS_CIPHER_AES_GCM_128_SECRET_SIZE);
948 1620 : srt.type = TLS_CIPHER_AES_GCM_128;
949 1620 : srt.send = 1;
950 1620 : err = quic_crypto_hkdf_expand(crypto->secret_tfm, &s, &l, &z, &k);
951 1620 : if (err)
952 : return err;
953 : /* Enforce synchronous crypto for Initial level by requesting algorithms marked with
954 : * CRYPTO_ALG_ASYNC to avoid async processing.
955 : */
956 1620 : err = quic_crypto_set_secret(crypto, &srt, version, CRYPTO_ALG_ASYNC);
957 1620 : if (err)
958 : return err;
959 :
960 3240 : quic_data(&l, rl, strlen(rl));
961 1620 : quic_data(&k, srt.secret, TLS_CIPHER_AES_GCM_128_SECRET_SIZE);
962 1620 : srt.type = TLS_CIPHER_AES_GCM_128;
963 1620 : srt.send = 0;
964 1620 : err = quic_crypto_hkdf_expand(crypto->secret_tfm, &s, &l, &z, &k);
965 1620 : if (err)
966 : return err;
967 1620 : return quic_crypto_set_secret(crypto, &srt, version, CRYPTO_ALG_ASYNC);
968 : }
969 :
970 : #define QUIC_RETRY_KEY_V1 "\xbe\x0c\x69\x0b\x9f\x66\x57\x5a\x1d\x76\x6b\x54\xe3\x68\xc8\x4e"
971 : #define QUIC_RETRY_KEY_V2 "\x8f\xb4\xb0\x1b\x56\xac\x48\xe2\x60\xfb\xcb\xce\xad\x7c\xcc\x92"
972 :
973 : #define QUIC_RETRY_NONCE_V1 "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb"
974 : #define QUIC_RETRY_NONCE_V2 "\xd8\x69\x69\xbc\x2d\x7c\x6d\x99\x90\xef\xb0\x4a"
975 :
976 : /* Retry Packet Integrity. */
977 11 : int quic_crypto_get_retry_tag(struct quic_crypto *crypto, struct sk_buff *skb,
978 : struct quic_conn_id *odcid, u32 version, u8 *tag)
979 : {
980 11 : struct crypto_aead *tfm = crypto->tag_tfm;
981 11 : u8 *pseudo_retry, *p, *iv, *key;
982 11 : struct aead_request *req;
983 11 : struct scatterlist *sg;
984 11 : u32 plen;
985 11 : int err;
986 :
987 : /* rfc9001#section-5.8:
988 : *
989 : * The Retry Integrity Tag is a 128-bit field that is computed as the output of
990 : * AEAD_AES_128_GCM used with the following inputs:
991 : *
992 : * - The secret key, K, is 128 bits equal to 0xbe0c690b9f66575a1d766b54e368c84e.
993 : * - The nonce, N, is 96 bits equal to 0x461599d35d632bf2239825bb.
994 : * - The plaintext, P, is empty.
995 : * - The associated data, A, is the contents of the Retry Pseudo-Packet,
996 : *
997 : * The Retry Pseudo-Packet is not sent over the wire. It is computed by taking the
998 : * transmitted Retry packet, removing the Retry Integrity Tag, and prepending the
999 : * two following fields: ODCID Length + Original Destination Connection ID (ODCID).
1000 : */
1001 11 : err = crypto_aead_setauthsize(tfm, QUIC_TAG_LEN);
1002 11 : if (err)
1003 : return err;
1004 11 : key = QUIC_RETRY_KEY_V1;
1005 11 : if (version == QUIC_VERSION_V2)
1006 0 : key = QUIC_RETRY_KEY_V2;
1007 11 : err = crypto_aead_setkey(tfm, key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1008 11 : if (err)
1009 : return err;
1010 :
1011 11 : plen = 1 + odcid->len + skb->len - QUIC_TAG_LEN;
1012 11 : pseudo_retry = quic_crypto_aead_mem_alloc(tfm, plen + QUIC_TAG_LEN, &iv, &req, &sg, 1);
1013 11 : if (!pseudo_retry)
1014 : return -ENOMEM;
1015 :
1016 11 : p = pseudo_retry;
1017 11 : p = quic_put_int(p, odcid->len, 1);
1018 11 : p = quic_put_data(p, odcid->data, odcid->len);
1019 11 : p = quic_put_data(p, skb->data, skb->len - QUIC_TAG_LEN);
1020 11 : sg_init_one(sg, pseudo_retry, plen + QUIC_TAG_LEN);
1021 :
1022 22 : memcpy(iv, QUIC_RETRY_NONCE_V1, QUIC_IV_LEN);
1023 11 : if (version == QUIC_VERSION_V2)
1024 0 : memcpy(iv, QUIC_RETRY_NONCE_V2, QUIC_IV_LEN);
1025 11 : aead_request_set_tfm(req, tfm);
1026 11 : aead_request_set_ad(req, plen);
1027 11 : aead_request_set_crypt(req, sg, sg, 0, iv);
1028 11 : err = crypto_aead_encrypt(req);
1029 11 : if (!err)
1030 22 : memcpy(tag, p, QUIC_TAG_LEN);
1031 11 : kfree(pseudo_retry);
1032 11 : return err;
1033 : }
1034 :
1035 : /* Generate a token for Retry or address validation.
1036 : *
1037 : * Builds a token with the format: [client address][timestamp][original DCID][auth tag]
1038 : *
1039 : * Encrypts the token (excluding the first flag byte) using AES-GCM with a key and IV
1040 : * derived via HKDF. The original DCID is stored to be recovered later from a Client
1041 : * Initial packet. Ensures the token is bound to the client address and time, preventing
1042 : * reuse or tampering.
1043 : *
1044 : * Returns 0 on success or a negative error code on failure.
1045 : */
1046 432 : int quic_crypto_generate_token(struct quic_crypto *crypto, void *addr, u32 addrlen,
1047 : struct quic_conn_id *conn_id, u8 *token, u32 *tlen)
1048 : {
1049 432 : u8 key[TLS_CIPHER_AES_GCM_128_KEY_SIZE], iv[QUIC_IV_LEN], *retry_token, *tx_iv, *p;
1050 432 : struct crypto_aead *tfm = crypto->tag_tfm;
1051 432 : u32 ts = jiffies_to_usecs(jiffies), len;
1052 432 : struct quic_data srt = {}, k, i;
1053 432 : struct aead_request *req;
1054 432 : struct scatterlist *sg;
1055 432 : int err;
1056 :
1057 432 : quic_data(&srt, quic_random_data, QUIC_RANDOM_DATA_LEN);
1058 432 : quic_data(&k, key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1059 432 : quic_data(&i, iv, QUIC_IV_LEN);
1060 432 : err = quic_crypto_keys_derive(crypto->secret_tfm, &srt, &k, &i, NULL, QUIC_VERSION_V1);
1061 432 : if (err)
1062 : return err;
1063 432 : err = crypto_aead_setauthsize(tfm, QUIC_TAG_LEN);
1064 432 : if (err)
1065 : return err;
1066 432 : err = crypto_aead_setkey(tfm, key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1067 432 : if (err)
1068 : return err;
1069 432 : token++;
1070 432 : len = addrlen + sizeof(ts) + conn_id->len + QUIC_TAG_LEN;
1071 432 : retry_token = quic_crypto_aead_mem_alloc(tfm, len, &tx_iv, &req, &sg, 1);
1072 432 : if (!retry_token)
1073 : return -ENOMEM;
1074 :
1075 432 : p = retry_token;
1076 432 : p = quic_put_data(p, addr, addrlen);
1077 432 : p = quic_put_int(p, ts, sizeof(ts));
1078 432 : quic_put_data(p, conn_id->data, conn_id->len);
1079 432 : sg_init_one(sg, retry_token, len);
1080 432 : aead_request_set_tfm(req, tfm);
1081 432 : aead_request_set_ad(req, addrlen);
1082 432 : aead_request_set_crypt(req, sg, sg, len - addrlen - QUIC_TAG_LEN, iv);
1083 432 : err = crypto_aead_encrypt(req);
1084 432 : if (!err) {
1085 864 : memcpy(token, retry_token, len);
1086 432 : *tlen = len + 1;
1087 : }
1088 432 : kfree(retry_token);
1089 432 : return err;
1090 : }
1091 :
1092 : /* Validate a Retry or address validation token.
1093 : *
1094 : * Decrypts the token using derived key and IV. Checks that the decrypted address matches
1095 : * the provided address, validates the embedded timestamp against current time with a
1096 : * version-specific timeout. If applicable, it extracts and returns the original
1097 : * destination connection ID (ODCID) for Retry packets.
1098 : *
1099 : * Returns 0 if the token is valid, -EINVAL if invalid, or another negative error code.
1100 : */
1101 4 : int quic_crypto_verify_token(struct quic_crypto *crypto, void *addr, u32 addrlen,
1102 : struct quic_conn_id *conn_id, u8 *token, u32 len)
1103 : {
1104 4 : u32 ts = jiffies_to_usecs(jiffies), timeout = QUIC_TOKEN_TIMEOUT_RETRY;
1105 4 : u8 key[TLS_CIPHER_AES_GCM_128_KEY_SIZE], iv[QUIC_IV_LEN];
1106 4 : u8 *retry_token, *rx_iv, *p, flag = *token;
1107 4 : struct crypto_aead *tfm = crypto->tag_tfm;
1108 4 : struct quic_data srt = {}, k, i;
1109 4 : struct aead_request *req;
1110 4 : struct scatterlist *sg;
1111 4 : int err;
1112 4 : u64 t;
1113 :
1114 4 : quic_data(&srt, quic_random_data, QUIC_RANDOM_DATA_LEN);
1115 4 : quic_data(&k, key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1116 4 : quic_data(&i, iv, QUIC_IV_LEN);
1117 4 : err = quic_crypto_keys_derive(crypto->secret_tfm, &srt, &k, &i, NULL, QUIC_VERSION_V1);
1118 4 : if (err)
1119 : return err;
1120 4 : err = crypto_aead_setauthsize(tfm, QUIC_TAG_LEN);
1121 4 : if (err)
1122 : return err;
1123 4 : err = crypto_aead_setkey(tfm, key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1124 4 : if (err)
1125 : return err;
1126 4 : len--;
1127 4 : token++;
1128 4 : retry_token = quic_crypto_aead_mem_alloc(tfm, len, &rx_iv, &req, &sg, 1);
1129 4 : if (!retry_token)
1130 : return -ENOMEM;
1131 :
1132 8 : memcpy(retry_token, token, len);
1133 4 : sg_init_one(sg, retry_token, len);
1134 4 : aead_request_set_tfm(req, tfm);
1135 4 : aead_request_set_ad(req, addrlen);
1136 4 : aead_request_set_crypt(req, sg, sg, len - addrlen, iv);
1137 4 : err = crypto_aead_decrypt(req);
1138 4 : if (err)
1139 0 : goto out;
1140 :
1141 4 : err = -EINVAL;
1142 4 : p = retry_token;
1143 8 : if (memcmp(p, addr, addrlen))
1144 0 : goto out;
1145 4 : p += addrlen;
1146 4 : len -= addrlen;
1147 4 : if (flag == QUIC_TOKEN_FLAG_REGULAR)
1148 0 : timeout = QUIC_TOKEN_TIMEOUT_REGULAR;
1149 4 : if (!quic_get_int(&p, &len, &t, sizeof(ts)) || t + timeout < ts)
1150 0 : goto out;
1151 4 : len -= QUIC_TAG_LEN;
1152 4 : if (len > QUIC_CONN_ID_MAX_LEN)
1153 0 : goto out;
1154 :
1155 4 : if (flag == QUIC_TOKEN_FLAG_RETRY)
1156 4 : quic_conn_id_update(conn_id, p, len);
1157 : err = 0;
1158 4 : out:
1159 4 : kfree(retry_token);
1160 4 : return err;
1161 : }
1162 :
1163 : /* Generate a derived key using HKDF-Extract and HKDF-Expand with a given label. */
1164 6450 : static int quic_crypto_generate_key(struct quic_crypto *crypto, void *data, u32 len,
1165 : char *label, u8 *token, u32 key_len)
1166 : {
1167 6450 : struct crypto_shash *tfm = crypto->secret_tfm;
1168 6450 : u8 secret[TLS_CIPHER_AES_GCM_128_SECRET_SIZE];
1169 6450 : struct quic_data salt, s, l, k, z = {};
1170 6450 : int err;
1171 :
1172 6450 : quic_data(&salt, data, len);
1173 6450 : quic_data(&k, quic_random_data, QUIC_RANDOM_DATA_LEN);
1174 6450 : quic_data(&s, secret, TLS_CIPHER_AES_GCM_128_SECRET_SIZE);
1175 6450 : err = quic_crypto_hkdf_extract(tfm, &salt, &k, &s);
1176 6450 : if (err)
1177 : return err;
1178 :
1179 12900 : quic_data(&l, label, strlen(label));
1180 6450 : quic_data(&k, token, key_len);
1181 6450 : return quic_crypto_hkdf_expand(tfm, &s, &l, &z, &k);
1182 : }
1183 :
1184 : /* Derive a stateless reset token from connection-specific input. */
1185 6080 : int quic_crypto_generate_stateless_reset_token(struct quic_crypto *crypto, void *data,
1186 : u32 len, u8 *key, u32 key_len)
1187 : {
1188 6080 : return quic_crypto_generate_key(crypto, data, len, "stateless_reset", key, key_len);
1189 : }
1190 :
1191 : /* Derive a session ticket key using HKDF from connection-specific input. */
1192 370 : int quic_crypto_generate_session_ticket_key(struct quic_crypto *crypto, void *data,
1193 : u32 len, u8 *key, u32 key_len)
1194 : {
1195 370 : return quic_crypto_generate_key(crypto, data, len, "session_ticket", key, key_len);
1196 : }
1197 :
1198 1 : void quic_crypto_init(void)
1199 : {
1200 1 : get_random_bytes(quic_random_data, QUIC_RANDOM_DATA_LEN);
1201 1 : }
|