当前位置:   article > 正文

kernel网络之软中断_netif_rx_ni

netif_rx_ni

从网卡收包到上送协议栈有两个模式:
一种是传统的中断模式,即收到一个数据包,执行一次中断处理函数(比如e100_rx),在此函数中分配skb,替换有数据的skb(DMA已经将数据拷贝到初始化的skb),调用netif_rx将有数据的skb放在percpu的队列上(如果开启了RPS,这个队列有可能是本地cpu的,也有可能是其他cpu的),最后激活软中断。之后的软中断处理函数net_rx_action中调用poll函数process_backlog(如果将skb放在其他cpu队列上了,还需要通过ipi激活其他cpu的软中断),处理percpu队列上的数据包,上送协议栈__netif_receive_skb。
中断模式会触发很多中断,影响性能,所以有了napi模式,这种模式下,一次中断可以poll收多个数据包(配额64)。具体的为收到一个中断,执行中断处理函数(比如ixgbe_msix_clean_rings),在此函数中只是激活软中断,并不处理skb,在之后的软中断处理函数net_rx_action中调用驱动注册的poll函数,比如ixgbe_poll,来收包,上送协议栈netif_receive_skb_internal(如果开启了RPS,就会按照non-napi的处理方式,将skb放在percpu的队列上,这个队列有可能是本地cpu的,也有可能是其他cpu的),再经过软中断处理才会将skb上送协议栈__netif_receive_skb。

下面的图片展示了这两种模式的流程,其中蓝色部分为公共流程,红色的为non-NAPI流程,绿色的为NAPI流程。

image.png

软中断流程分为两步,首先激活软中断,然后在某个时刻执行软中断处理函数

  1. 激活软中断有以下三个地方
    a. 非网络软中断激活方式

  1. raise_softirq
  2. raise_softirq_irqoff(nr);
  3. __raise_softirq_irqoff(unsigned int nr)
  4. or_softirq_pending(1UL << nr);

b. NAPI模式下激活软中断方式,一般在驱动的中断处理函数中调用

  1. napi_schedule
  2. __napi_schedule(n);
  3. ____napi_schedule(this_cpu_ptr(&softnet_data), n);
  4. list_add_tail(&napi->poll_list, &sd->poll_list);
  5. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  6. or_softirq_pending(1UL << nr);

c. non-NAPI模式下激活软中断方式,在netif_rx->enqueue_to_backlog时调用

  1. enqueue_to_backlog
  2. sd = &per_cpu(softnet_data, cpu);
  3. ____napi_schedule(sd, &sd->backlog);
  4. list_add_tail(&napi->poll_list, &sd->poll_list);
  5. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  6. or_softirq_pending(1UL << nr);
  1. 执行软中断的有以下三个地方:
    a. 硬件中断代码返回的时候

  1. irq_exit
  2. if (!in_interrupt() && local_softirq_pending())
  3. invoke_softirq
  4. __do_softirq

b. ksoftirqd内核服务线程运行的时候

  1. __do_softirq
  2. invoke_softirq
  3. raise_softirq_irqoff
  4. wakeup_softirqd
  5. run_ksoftirqd
  6. if (local_softirq_pending()) {
  7. __do_softirq

c. netif_rx_ni
netif_rx_ni 会先将做和netif_rx一样的操作后,如果有软中断激活,则执行软中断

  1. netif_rx_ni
  2. if (local_softirq_pending())
  3. do_softirq();
  4. do_softirq_own_stack();
  5. if (local_softirq_pending())
  6. __do_softirq

软中断相关初始化

  1. //kernel启动时,软中断相关初始化
  2. static int __init net_dev_init(void)
  3. {
  4. ...
  5. /*
  6. * Initialise the packet receive queues.
  7. */
  8. //初始化percpu的结构softnet_data
  9. for_each_possible_cpu(i) {
  10. struct softnet_data *sd = &per_cpu(softnet_data, i);
  11. skb_queue_head_init(&sd->input_pkt_queue);
  12. skb_queue_head_init(&sd->process_queue);
  13. INIT_LIST_HEAD(&sd->poll_list);
  14. sd->output_queue_tailp = &sd->output_queue;
  15. #ifdef CONFIG_RPS
  16. sd->csd.func = rps_trigger_softirq; //激活其他cpu软中断
  17. sd->csd.info = sd;
  18. sd->cpu = i;
  19. #endif
  20. //backlog借用napi的结构,实现non-NAPI的处理。
  21. //process_backlog就是NAPI下的poll函数
  22. sd->backlog.poll = process_backlog;
  23. sd->backlog.weight = weight_p;
  24. }
  25. ...
  26. //注册和网络相关的两个软中断处理函数
  27. open_softirq(NET_TX_SOFTIRQ, net_tx_action);
  28. open_softirq(NET_RX_SOFTIRQ, net_rx_action);
  29. ...
  30. }

支持以下软中断类型

  1. enum
  2. {
  3. HI_SOFTIRQ=0,
  4. TIMER_SOFTIRQ,
  5. NET_TX_SOFTIRQ,
  6. NET_RX_SOFTIRQ,
  7. BLOCK_SOFTIRQ,
  8. BLOCK_IOPOLL_SOFTIRQ,
  9. TASKLET_SOFTIRQ,
  10. SCHED_SOFTIRQ,
  11. HRTIMER_SOFTIRQ,
  12. RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
  13. NR_SOFTIRQS
  14. };

注册软中断处理函数

  1. void open_softirq(int nr, void (*action)(struct softirq_action *))
  2. {
  3. softirq_vec[nr].action = action;
  4. }

non-NAPI处理流程

  1. 激活软中断
    网卡收到数据包后,通过中断通知cpu,cpu调用网卡驱动注册的中断处理函数,比如dm9000_interrupt,调用netif_rx将skb放入percpu队列,激活软中断。细节请看下面代码分析

  1. static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
  2. /* Received the coming packet */
  3. if (int_status & ISR_PRS)
  4. dm9000_rx(dev);
  5. //分配 skb
  6. skb = netdev_alloc_skb(dev, RxLen + 4)
  7. //将数据存入 skb
  8. rdptr = (u8 *) skb_put(skb, RxLen - 4);
  9. (db->inblk)(db->io_data, rdptr, RxLen);
  10. //调用netif_rx处理skb
  11. netif_rx(skb);
  12. int netif_rx(struct sk_buff *skb)
  13. {
  14. //static tracepoint
  15. trace_netif_rx_entry(skb);
  16. return netif_rx_internal(skb);
  17. }

获取合适的cpu,调用 enqueue_to_backlog 将skb放入percpu的队列中

  1. static int netif_rx_internal(struct sk_buff *skb)
  2. {
  3. int ret;
  4. net_timestamp_check(netdev_tstamp_prequeue, skb);
  5. trace_netif_rx(skb);
  6. #ifdef CONFIG_RPS
  7. //如果内核配置选项配置了 RPS,并且使能了rps(echo f >
  8. ///sys/class/net/eth0/queues/rx-0/rps_cpus),则通过
  9. //get_rps_cpu获取合适的cpu(有可能是本地cpu也有可能是
  10. //remote cpu),否则使用本地cpu
  11. if (static_key_false(&rps_needed)) {
  12. struct rps_dev_flow voidflow, *rflow = &voidflow;
  13. int cpu;
  14. preempt_disable();
  15. rcu_read_lock();
  16. cpu = get_rps_cpu(skb->dev, skb, &rflow);
  17. if (cpu < 0)
  18. cpu = smp_processor_id();
  19. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  20. rcu_read_unlock();
  21. preempt_enable();
  22. } else
  23. #endif
  24. {
  25. unsigned int qtail;
  26. //没有配置rps,则获取当地cpu
  27. ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
  28. put_cpu();
  29. }
  30. return ret;
  31. }

将skb放在指定cpu的softnet_data->input_pkt_queue队列中,
如果是队列上第一个包还需要激活软中断

  1. /*
  2. * enqueue_to_backlog is called to queue an skb to a per CPU backlog
  3. * queue (may be a remote CPU queue).
  4. */
  5. static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  6. unsigned int *qtail)
  7. {
  8. struct softnet_data *sd;
  9. unsigned long flags;
  10. unsigned int qlen;
  11. //获取percpu的sd
  12. sd = &per_cpu(softnet_data, cpu);
  13. local_irq_save(flags);
  14. rps_lock(sd);
  15. if (!netif_running(skb->dev))
  16. goto drop;
  17. //如果队列中skb个数小于netdev_max_backlog(默认值
  18. //1000,可以通过sysctl修改netdev_max_backlog值),
  19. //并且 skb_flow_limit (为了防止large flow占用太多cpu,small
  20. //flow得不到处理。代码实现没看明白)返回false,则skb可以
  21. //继续入队,否则drop skb
  22. qlen = skb_queue_len(&sd->input_pkt_queue);
  23. if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
  24. //如果队列不为空,则直接入队,否则先激活软中断,再入队
  25. if (skb_queue_len(&sd->input_pkt_queue)) {
  26. enqueue:
  27. __skb_queue_tail(&sd->input_pkt_queue, skb);
  28. input_queue_tail_incr_save(sd, qtail);
  29. rps_unlock(sd);
  30. local_irq_restore(flags);
  31. return NET_RX_SUCCESS;
  32. }
  33. /* Schedule NAPI for backlog device
  34. * We can use non atomic operation since we own the queue lock
  35. */
  36. //队列为空时,即skb是第一个入队元素,则将state设置
  37. //为 NAPI_STATE_SCHED(软中断处理函数
  38. //rx_net_action会检查此标志),表示软中断可以处理此
  39. //backlog
  40. if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
  41. //if返回0的情况下,需要将sd->backlog挂到sd-
  42. //>poll_list上,并激活软中断。rps_ipi_queued看下
  43. //面的分析
  44. if (!rps_ipi_queued(sd))
  45. ____napi_schedule(sd, &sd->backlog);
  46. }
  47. goto enqueue;
  48. }
  49. drop:
  50. sd->dropped++;
  51. rps_unlock(sd);
  52. local_irq_restore(flags);
  53. atomic_long_inc(&skb->dev->rx_dropped);
  54. kfree_skb(skb);
  55. return NET_RX_DROP;
  56. }
  57. /*
  58. * Check if this softnet_data structure is another cpu one
  59. * If yes, queue it to our IPI list and return 1
  60. * If no, return 0
  61. */
  62. //上面注释说的很清楚,在配置RPS情况下,检查sd是当前cpu的
  63. //还是其他cpu的,如果是其他cpu的,将sd放在当前cpu的mysd-
  64. //>rps_ipi_list上,并激活当前cpu的软中断,返回1. 在软中断处理
  65. //函数net_rx_action中,通过ipi中断通知其他cpu来处理放在其他
  66. //cpu队列上的skb如果是当前cpu,或者没有配置RPS,则返回0
  67. //在外层函数激活软中断,并将当前cpu的backlog放入sd->poll_list
  68. //
  69. static int rps_ipi_queued(struct softnet_data *sd)
  70. {
  71. #ifdef CONFIG_RPS
  72. struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
  73. if (sd != mysd) {
  74. sd->rps_ipi_next = mysd->rps_ipi_list;
  75. mysd->rps_ipi_list = sd;
  76. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  77. return 1;
  78. }
  79. #endif /* CONFIG_RPS */
  80. return 0;
  81. }
  1. 执行软中断
    __do_softirq 执行当前cpu上所有软中断

  1. asmlinkage __visible void __do_softirq(void)
  2. {
  3. MAX_SOFTIRQ_TIME2ms,如果一直有软中断可以执行2ms
  4. unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
  5. unsigned long old_flags = current->flags;
  6. MAX_SOFTIRQ_RESTART为10,表示可以循环执行10此软中断
  7. int max_restart = MAX_SOFTIRQ_RESTART;
  8. struct softirq_action *h;
  9. bool in_hardirq;
  10. __u32 pending;
  11. int softirq_bit;
  12. /*
  13. * Mask out PF_MEMALLOC s current task context is borrowed for the
  14. * softirq. A softirq handled such as network RX might set PF_MEMALLOC
  15. * again if the socket is related to swap
  16. */
  17. current->flags &= ~PF_MEMALLOC;
  18. //取出当前cpu上所有的软中断
  19. pending = local_softirq_pending();
  20. account_irq_enter_time(current);
  21. __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
  22. in_hardirq = lockdep_softirq_start();
  23. restart:
  24. /* Reset the pending bitmask before enabling irqs */
  25. //清空当前cpu上所有的软中断
  26. set_softirq_pending(0);
  27. //执行软中断时打开硬件中断
  28. local_irq_enable();
  29. h = softirq_vec;
  30. //遍历执行软中断
  31. while ((softirq_bit = ffs(pending))) {
  32. unsigned int vec_nr;
  33. int prev_count;
  34. h += softirq_bit - 1;
  35. vec_nr = h - softirq_vec;
  36. prev_count = preempt_count();
  37. kstat_incr_softirqs_this_cpu(vec_nr);
  38. trace_softirq_entry(vec_nr);
  39. //软中断处理函数,比如 net_rx_action
  40. h->action(h);
  41. trace_softirq_exit(vec_nr);
  42. if (unlikely(prev_count != preempt_count())) {
  43. pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
  44. vec_nr, softirq_to_name[vec_nr], h->action,
  45. prev_count, preempt_count());
  46. preempt_count_set(prev_count);
  47. }
  48. h++;
  49. pending >>= softirq_bit;
  50. }
  51. rcu_bh_qs();
  52. //执行完软中断,关闭硬中断
  53. local_irq_disable();
  54. //检查执行软中断过程中(开启硬中断)是否有新的软中断被激活
  55. pending = local_softirq_pending();
  56. if (pending) {
  57. //如果有新的软中断被激活,并且执行软中断时间不足
  58. //2ms,并且重新执行次数不足10次,则可以再次执行软
  59. //中断。
  60. if (time_before(jiffies, end) && !need_resched() &&
  61. --max_restart)
  62. goto restart;
  63. //否则只能唤醒软中断处理线程继续处理软中断
  64. wakeup_softirqd();
  65. }
  66. lockdep_softirq_end(in_hardirq);
  67. account_irq_exit_time(current);
  68. __local_bh_enable(SOFTIRQ_OFFSET);
  69. WARN_ON_ONCE(in_interrupt());
  70. tsk_restore_flags(current, old_flags, PF_MEMALLOC);
  71. }

网络收包软中断处理函数

  1. static void net_rx_action(struct softirq_action *h)
  2. {
  3. //获取percpu的sd
  4. struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  5. unsigned long time_limit = jiffies + 2;
  6. //netdev_budget默认值300,可通过sysctl修改
  7. int budget = netdev_budget;
  8. void *have;
  9. local_irq_disable();
  10. //如果sd->poll_list不为空,说明有数据需要处理
  11. while (!list_empty(&sd->poll_list)) {
  12. struct napi_struct *n;
  13. int work, weight;
  14. /* If softirq window is exhuasted then punt.
  15. * Allow this to run for 2 jiffies since which will allow
  16. * an average latency of 1.5/HZ.
  17. */
  18. //如果budget用完了,或者经过了两个时间片,说明数据
  19. //包压力过大,还没处理完就需要跳出循环,在
  20. //softnet_break会再次激活软中断(因为执行软中断时已
  21. //经把所有的pending清空了)
  22. if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
  23. goto softnet_break;
  24. local_irq_enable();
  25. /* Even though interrupts have been re-enabled, this
  26. * access is safe because interrupts can only add new
  27. * entries to the tail of this list, and only ->poll()
  28. * calls can remove this head entry from the list.
  29. */
  30. n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
  31. have = netpoll_poll_lock(n);
  32. weight = n->weight;
  33. /* This NAPI_STATE_SCHED test is for avoiding a race
  34. * with netpoll's poll_napi(). Only the entity which
  35. * obtains the lock and sees NAPI_STATE_SCHED set will
  36. * actually make the ->poll() call. Therefore we avoid
  37. * accidentally calling ->poll() when NAPI is not scheduled.
  38. */
  39. work = 0;
  40. //只有state为NAPI_STATE_SCHED才会执行poll函数。
  41. //对于non-napi来说,poll函数为process_backlog,处理
  42. //percpu的input queue上的数据包。
  43. //对于napi来说,poll函数为网卡驱动提供的poll函数,比
  44. //如ixgbe_poll,分配skb,将skb上送协议栈
  45. //如果poll处理后的结果work小于weight说明没有更多数
  46. //据需要处理,poll函数中会把napi从链表sd->poll_list删
  47. //除。如果work等于weight说明还有更多数据需要处理,
  48. //不会删除napi,只是将napi移动到链表尾部
  49. if (test_bit(NAPI_STATE_SCHED, &n->state)) {
  50. work = n->poll(n, weight);
  51. trace_napi_poll(n);
  52. }
  53. WARN_ON_ONCE(work > weight);
  54. //work为poll实际处理的数据个数,budget需要减去work
  55. budget -= work;
  56. local_irq_disable();
  57. /* Drivers must not modify the NAPI state if they
  58. * consume the entire weight. In such cases this code
  59. * still "owns" the NAPI instance and therefore can
  60. * move the instance around on the list at-will.
  61. */
  62. //如果work等于weight说明还有更多数据需要处理
  63. if (unlikely(work == weight)) {
  64. if (unlikely(napi_disable_pending(n))) {
  65. local_irq_enable();
  66. napi_complete(n);
  67. local_irq_disable();
  68. } else {
  69. if (n->gro_list) {
  70. /* flush too old packets
  71. * If HZ < 1000, flush all packets.
  72. */
  73. local_irq_enable();
  74. napi_gro_flush(n, HZ >= 1000);
  75. local_irq_disable();
  76. }
  77. //将napi移动到链表尾部
  78. list_move_tail(&n->poll_list, &sd->poll_list);
  79. }
  80. }
  81. netpoll_poll_unlock(have);
  82. }
  83. out:
  84. net_rps_action_and_irq_enable(sd);
  85. return;
  86. softnet_break:
  87. sd->time_squeeze++;
  88. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  89. goto out;
  90. }
  91. /*
  92. * net_rps_action_and_irq_enable sends any pending IPI's for rps.
  93. * Note: called with local irq disabled, but exits with local irq enabled.
  94. */
  95. //如果链表 sd->rps_ipi_list不为空,说明在rps下,将skb放在其他
  96. //cpu上的percpu队列上了,所以需要通过ipi中断通知其他cpu,通
  97. //过smp_call_function_single_async远程激活其他cpu的软中断,
  98. //使其他cpu处理数据包
  99. static void net_rps_action_and_irq_enable(struct softnet_data *sd)
  100. {
  101. #ifdef CONFIG_RPS
  102. struct softnet_data *remsd = sd->rps_ipi_list;
  103. if (remsd) {
  104. sd->rps_ipi_list = NULL;
  105. local_irq_enable();
  106. /* Send pending IPI's to kick RPS processing on remote cpus. */
  107. while (remsd) {
  108. struct softnet_data *next = remsd->rps_ipi_next;
  109. if (cpu_online(remsd->cpu))
  110. smp_call_function_single_async(remsd->cpu,
  111. &remsd->csd);
  112. remsd = next;
  113. }
  114. } else
  115. #endif
  116. local_irq_enable();
  117. }

non-napi下的poll函数为 process_backlog

  1. static int process_backlog(struct napi_struct *napi, int quota)
  2. {
  3. int work = 0;
  4. struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
  5. #ifdef CONFIG_RPS
  6. /* Check if we have pending ipi, its better to send them now,
  7. * not waiting net_rx_action() end.
  8. */
  9. //激活其他cpu上的软中断
  10. if (sd->rps_ipi_list) {
  11. local_irq_disable();
  12. net_rps_action_and_irq_enable(sd);
  13. }
  14. #endif
  15. napi->weight = weight_p;
  16. local_irq_disable();
  17. while (1) {
  18. struct sk_buff *skb;
  19. while ((skb = __skb_dequeue(&sd->process_queue))) {
  20. rcu_read_lock();
  21. local_irq_enable();
  22. //将skb上送协议栈
  23. __netif_receive_skb(skb);
  24. rcu_read_unlock();
  25. local_irq_disable();
  26. input_queue_head_incr(sd);
  27. //处理skb的个数达到quota了,说明还有更多数据
  28. //包需要处理
  29. if (++work >= quota) {
  30. local_irq_enable();
  31. return work;
  32. }
  33. }
  34. rps_lock(sd);
  35. if (skb_queue_empty(&sd->input_pkt_queue)) {
  36. /*
  37. * Inline a custom version of __napi_complete().
  38. * only current cpu owns and manipulates this napi,
  39. * and NAPI_STATE_SCHED is the only possible flag set
  40. * on backlog.
  41. * We can use a plain write instead of clear_bit(),
  42. * and we dont need an smp_mb() memory barrier.
  43. */
  44. //如果input_pkt_queue队列为空,将napi从链表
  45. //poll_list删除
  46. list_del(&napi->poll_list);
  47. napi->state = 0;
  48. rps_unlock(sd);
  49. break;
  50. }
  51. //input_pkt_queue队列中的skb挂到process_queue
  52. //上,并清空input_pkt_queue
  53. skb_queue_splice_tail_init(&sd->input_pkt_queue,
  54. &sd->process_queue);
  55. rps_unlock(sd);
  56. }
  57. local_irq_enable();
  58. return work;
  59. }

NAPI

1.激活软中断

  1. //硬件中断到来时调用中断处理函数 ixgbe_msix_clean_rings
  2. ixgbe_msix_clean_rings
  3. napi_schedule(&q_vector->napi);
  4. ____napi_schedule(this_cpu_ptr(&softnet_data), n);
  5. //将napi添加到per cpu的softnet_data->poll_list中
  6. list_add_tail(&napi->poll_list, &sd->poll_list);
  7. //将接收软中断置位
  8. __raise_softirq_irqoff(NET_RX_SOFTIRQ);

2.执行软中断

  1. __do_softirq
  2. net_rx_action
  3. n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
  4. work = n->poll(n, weight); //即调用 ixgbe_poll
  5. ixgbe_clean_rx_irq(q_vector, ring)
  6. skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
  7. ixgbe_rx_skb(q_vector, skb);
  8. napi_gro_receive(&q_vector->napi, skb);
  9. //上送协议栈,但如果开启了RPS就走non-NAPI的路径了
  10. netif_receive_skb_internal
  11. /* all work done, exit the polling mode */
  12. //如果处理的skb小于配额,说明工作已经完成,将napi从poll_list删除
  13. //清除标志位 NAPI_STATE_SCHED
  14. napi_complete(napi);
  15. list_del(&n->poll_list);
  16. clear_bit(NAPI_STATE_SCHED, &n->state);

如果没有开启RPS,则直接调用__netif_receive_skb上送协议栈了。如果开启了RPS,则调用get_rps_cpu获取合适的cpu(有可能是本地cpu,也有可能是其他cpu),再调用enqueue_to_backlog将skb放在percpu的队列中,激活相应cpu的软中断。

  1. static int netif_receive_skb_internal(struct sk_buff *skb)
  2. {
  3. int ret;
  4. net_timestamp_check(netdev_tstamp_prequeue, skb);
  5. if (skb_defer_rx_timestamp(skb))
  6. return NET_RX_SUCCESS;
  7. rcu_read_lock();
  8. #ifdef CONFIG_RPS
  9. //注意使用的是static_key_false进行判断,意思是分支预测为false概率很大
  10. if (static_key_false(&rps_needed)) {
  11. struct rps_dev_flow voidflow, *rflow = &voidflow;
  12. int cpu = get_rps_cpu(skb->dev, skb, &rflow);
  13. if (cpu >= 0) {
  14. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  15. rcu_read_unlock();
  16. return ret;
  17. }
  18. }
  19. #endif
  20. ret = __netif_receive_skb(skb);
  21. rcu_read_unlock();
  22. return ret;
  23. }

参考

https://blog.packagecloud.io/eng/2016/06/22/monitoring-tuning-linux-networking-stack-receiving-data/

 也可参考:kernel网络之软中断 - 简书 (jianshu.com)

本文内容由网友自发贡献,转载请注明出处:【wpsshop博客】
推荐阅读
相关标签
  

闽ICP备14008679号