LCOV - code coverage report
Current view: top level - home/net-next/net/quic - timer.c (source / functions) Hit Total Coverage
Test: quic.info Lines: 137 149 91.9 %
Date: 2025-07-04 13:24:45 Functions: 15 16 93.8 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /* QUIC kernel implementation
       3             :  * (C) Copyright Red Hat Corp. 2023
       4             :  *
       5             :  * This file is part of the QUIC kernel implementation
       6             :  *
       7             :  * Initialization/cleanup for QUIC protocol support.
       8             :  *
       9             :  * Written or modified by:
      10             :  *    Xin Long <lucien.xin@gmail.com>
      11             :  */
      12             : 
      13             : #include "socket.h"
      14             : 
      15       54475 : void quic_timer_sack_handler(struct sock *sk)
      16             : {
      17       54475 :         struct quic_pnspace *space = quic_pnspace(sk, QUIC_CRYPTO_APP);
      18       54475 :         struct quic_inqueue *inq = quic_inq(sk);
      19       54475 :         struct quic_connection_close close = {};
      20             : 
      21       54475 :         if (quic_is_closed(sk))
      22          23 :                 return;
      23             : 
      24       54471 :         if (inq->sack_flag == QUIC_SACK_FLAG_NONE) { /* Idle timer expired, close the connection. */
      25          19 :                 quic_inq_event_recv(sk, QUIC_EVENT_CONNECTION_CLOSE, &close);
      26          19 :                 quic_set_state(sk, QUIC_SS_CLOSED);
      27             : 
      28          19 :                 pr_debug("%s: idle timeout\n", __func__);
      29          19 :                 return;
      30             :         }
      31             : 
      32       54452 :         if (inq->sack_flag == QUIC_SACK_FLAG_APP) {
      33       52132 :                 space->need_sack = 1; /* Request an APP-level ACK frame to be generated. */
      34       52132 :                 space->sack_path = 0; /* Send delayed ACK only on the active path. */
      35             :         }
      36             : 
      37       54452 :         quic_outq_transmit(sk); /* Transmit necessary frames, including ACKs or others queued. */
      38       54452 :         inq->sack_flag = QUIC_SACK_FLAG_NONE; /* Start as idle timer. */
      39       54452 :         quic_timer_start(sk, QUIC_TIMER_IDLE, inq->timeout);
      40             : }
      41             : 
      42       54475 : static void quic_timer_sack_timeout(struct timer_list *t)
      43             : {
      44       54475 :         struct quic_sock *qs = container_of(t, struct quic_sock, timers[QUIC_TIMER_SACK].t);
      45       54475 :         struct sock *sk = &qs->inet.sk;
      46             : 
      47       54475 :         bh_lock_sock(sk);
      48       54475 :         if (sock_owned_by_user(sk)) {
      49         595 :                 if (!test_and_set_bit(QUIC_SACK_DEFERRED, &sk->sk_tsq_flags))
      50         595 :                         sock_hold(sk);
      51         595 :                 goto out;
      52             :         }
      53             : 
      54       53880 :         quic_timer_sack_handler(sk);
      55       54475 : out:
      56       54475 :         bh_unlock_sock(sk);
      57       54475 :         sock_put(sk);
      58       54475 : }
      59             : 
      60        2635 : void quic_timer_loss_handler(struct sock *sk)
      61             : {
      62           0 :         if (quic_is_closed(sk))
      63             :                 return;
      64             : 
      65        2632 :         quic_outq_transmit_pto(sk);
      66             : }
      67             : 
      68        2635 : static void quic_timer_loss_timeout(struct timer_list *t)
      69             : {
      70        2635 :         struct quic_sock *qs = container_of(t, struct quic_sock, timers[QUIC_TIMER_LOSS].t);
      71        2635 :         struct sock *sk = &qs->inet.sk;
      72             : 
      73        2635 :         bh_lock_sock(sk);
      74        2635 :         if (sock_owned_by_user(sk)) {
      75           0 :                 if (!test_and_set_bit(QUIC_LOSS_DEFERRED, &sk->sk_tsq_flags))
      76           0 :                         sock_hold(sk);
      77           0 :                 goto out;
      78             :         }
      79             : 
      80        2635 :         quic_timer_loss_handler(sk);
      81        2636 : out:
      82        2636 :         bh_unlock_sock(sk);
      83        2636 :         sock_put(sk);
      84        2636 : }
      85             : 
      86             : #define QUIC_MAX_ALT_PROBES     3
      87             : 
      88        1729 : void quic_timer_path_handler(struct sock *sk)
      89             : {
      90        1729 :         struct quic_path_group *paths = quic_paths(sk);
      91        1729 :         u8 path = 0;
      92             : 
      93        1729 :         if (quic_is_closed(sk))
      94             :                 return;
      95             : 
      96             :         /* PATH_CHALLENGE frames are reused to keep the new path alive for NAT rebind.
      97             :          * Skip probe attempt counting unless the path is explicitly in PROBING state.
      98             :          */
      99        1725 :         if (!quic_path_alt_state(paths, QUIC_PATH_ALT_PROBING))
     100        1725 :                 goto out;
     101             : 
     102             :         /* Increment probe attempts; give up if exceeded max allowed. */
     103           0 :         if (paths->alt_probes++ < QUIC_MAX_ALT_PROBES) {
     104           0 :                 path = 1;
     105           0 :                 goto out;
     106             :         }
     107             : 
     108             :         /* Probing failed; drop the alternate path. */
     109           0 :         quic_path_free(sk, paths, 1);
     110             : 
     111        1725 : out:
     112        1725 :         quic_outq_transmit_frame(sk, QUIC_FRAME_PATH_CHALLENGE, NULL, path, false);
     113        1732 :         quic_timer_reset_path(sk);
     114             : }
     115             : 
     116        1719 : static void quic_timer_path_timeout(struct timer_list *t)
     117             : {
     118        1719 :         struct quic_sock *qs = container_of(t, struct quic_sock, timers[QUIC_TIMER_PATH].t);
     119        1719 :         struct sock *sk = &qs->inet.sk;
     120             : 
     121        1719 :         bh_lock_sock(sk);
     122        1734 :         if (sock_owned_by_user(sk)) {
     123           0 :                 if (!test_and_set_bit(QUIC_PATH_DEFERRED, &sk->sk_tsq_flags))
     124           0 :                         sock_hold(sk);
     125           0 :                 goto out;
     126             :         }
     127             : 
     128        1734 :         quic_timer_path_handler(sk);
     129        1734 : out:
     130        1734 :         bh_unlock_sock(sk);
     131        1731 :         sock_put(sk);
     132        1752 : }
     133             : 
     134     6555367 : void quic_timer_reset_path(struct sock *sk)
     135             : {
     136     6555367 :         struct quic_cong *cong = quic_cong(sk);
     137     6555367 :         u64 timeout = cong->pto * 2;
     138             : 
     139             :         /* Calculate timeout based on cong.pto, but enforce a lower bound. */
     140     6555367 :         if (timeout < QUIC_MIN_PATH_TIMEOUT)
     141             :                 timeout = QUIC_MIN_PATH_TIMEOUT;
     142     6555367 :         quic_timer_reset(sk, QUIC_TIMER_PATH, timeout);
     143     6555155 : }
     144             : 
     145          18 : void quic_timer_pmtu_handler(struct sock *sk)
     146             : {
     147           1 :         if (quic_is_closed(sk))
     148             :                 return;
     149             : 
     150          18 :         quic_outq_transmit_probe(sk);
     151             : }
     152             : 
     153          18 : static void quic_timer_pmtu_timeout(struct timer_list *t)
     154             : {
     155          18 :         struct quic_sock *qs = container_of(t, struct quic_sock, timers[QUIC_TIMER_PMTU].t);
     156          18 :         struct sock *sk = &qs->inet.sk;
     157             : 
     158          18 :         bh_lock_sock(sk);
     159          18 :         if (sock_owned_by_user(sk)) {
     160           1 :                 if (!test_and_set_bit(QUIC_PMTU_DEFERRED, &sk->sk_tsq_flags))
     161           1 :                         sock_hold(sk);
     162           1 :                 goto out;
     163             :         }
     164             : 
     165          17 :         quic_timer_pmtu_handler(sk);
     166          18 : out:
     167          18 :         bh_unlock_sock(sk);
     168          18 :         sock_put(sk);
     169          18 : }
     170             : 
     171       79975 : void quic_timer_pace_handler(struct sock *sk)
     172             : {
     173         165 :         if (quic_is_closed(sk))
     174             :                 return;
     175             : 
     176       79975 :         quic_outq_transmit(sk);
     177             : }
     178             : 
     179       80008 : static enum hrtimer_restart quic_timer_pace_timeout(struct hrtimer *hr)
     180             : {
     181       80008 :         struct quic_sock *qs = container_of(hr, struct quic_sock, timers[QUIC_TIMER_PACE].hr);
     182       80008 :         struct sock *sk = &qs->inet.sk;
     183             : 
     184       80008 :         bh_lock_sock(sk);
     185       80008 :         if (sock_owned_by_user(sk)) {
     186         198 :                 if (!test_and_set_bit(QUIC_TSQ_DEFERRED, &sk->sk_tsq_flags))
     187         165 :                         sock_hold(sk);
     188         198 :                 goto out;
     189             :         }
     190             : 
     191       79810 :         quic_timer_pace_handler(sk);
     192       80008 : out:
     193       80008 :         bh_unlock_sock(sk);
     194       80008 :         sock_put(sk);
     195       80008 :         return HRTIMER_NORESTART;
     196             : }
     197             : 
     198    13541193 : void quic_timer_reset(struct sock *sk, u8 type, u64 timeout)
     199             : {
     200    13541193 :         struct timer_list *t = quic_timer(sk, type);
     201             : 
     202    27080821 :         if (timeout && !mod_timer(t, jiffies + usecs_to_jiffies(timeout)))
     203      161237 :                 sock_hold(sk);
     204    13539603 : }
     205             : 
     206      228689 : void quic_timer_start(struct sock *sk, u8 type, u64 timeout)
     207             : {
     208      228689 :         struct timer_list *t;
     209      228689 :         struct hrtimer *hr;
     210             : 
     211      228689 :         if (type == QUIC_TIMER_PACE) {
     212      172243 :                 hr = quic_timer(sk, type);
     213             : 
     214      172243 :                 if (!hrtimer_is_queued(hr)) {
     215       80008 :                         hrtimer_start(hr, ns_to_ktime(timeout), HRTIMER_MODE_ABS_PINNED_SOFT);
     216       80008 :                         sock_hold(sk);
     217             :                 }
     218      172243 :                 return;
     219             :         }
     220             : 
     221       56446 :         t = quic_timer(sk, type);
     222       56446 :         if (timeout && !timer_pending(t)) {
     223      110908 :                 if (!mod_timer(t, jiffies + usecs_to_jiffies(timeout)))
     224       55454 :                         sock_hold(sk);
     225             :         }
     226             : }
     227             : 
     228      164726 : void quic_timer_stop(struct sock *sk, u8 type)
     229             : {
     230      164726 :         if (type == QUIC_TIMER_PACE) {
     231        1118 :                 if (hrtimer_try_to_cancel(quic_timer(sk, type)) == 1)
     232           0 :                         sock_put(sk);
     233        1118 :                 return;
     234             :         }
     235      163608 :         if (timer_delete(quic_timer(sk, type)))
     236      157817 :                 sock_put(sk);
     237             : }
     238             : 
     239        1119 : void quic_timer_init(struct sock *sk)
     240             : {
     241        1119 :         timer_setup(quic_timer(sk, QUIC_TIMER_LOSS), quic_timer_loss_timeout, 0);
     242        1119 :         timer_setup(quic_timer(sk, QUIC_TIMER_SACK), quic_timer_sack_timeout, 0);
     243        1119 :         timer_setup(quic_timer(sk, QUIC_TIMER_PATH), quic_timer_path_timeout, 0);
     244        1119 :         timer_setup(quic_timer(sk, QUIC_TIMER_PMTU), quic_timer_pmtu_timeout, 0);
     245             :         /* Use hrtimer for pace timer, ensuring precise control over send timing. */
     246        1119 :         hrtimer_setup(quic_timer(sk, QUIC_TIMER_PACE), quic_timer_pace_timeout,
     247             :                       CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_SOFT);
     248        1119 : }
     249             : 
     250        1118 : void quic_timer_free(struct sock *sk)
     251             : {
     252        1118 :         quic_timer_stop(sk, QUIC_TIMER_LOSS);
     253        1118 :         quic_timer_stop(sk, QUIC_TIMER_SACK);
     254        1118 :         quic_timer_stop(sk, QUIC_TIMER_PATH);
     255        1118 :         quic_timer_stop(sk, QUIC_TIMER_PMTU);
     256        1118 :         quic_timer_stop(sk, QUIC_TIMER_PACE);
     257        1118 : }

Generated by: LCOV version 1.14