- 論壇徽章:
- 0
|
回復(fù) 5# wangjl_sdu
您好!不知道現(xiàn)在還能不能看見(jiàn)提問(wèn),我想請(qǐng)問(wèn)您的意思就是一個(gè)網(wǎng)卡是可以有多個(gè)napi_struct的對(duì)嗎?每個(gè)napi_struct都會(huì)掛到cpu的softnet_data上?那在例如intel的e1000網(wǎng)卡中,當(dāng)一個(gè)napi_struct處理完之后(即滿(mǎn)足work<weight)的時(shí)候,就移除掉該napi_struct并且重新打開(kāi)網(wǎng)卡中斷?那這個(gè)時(shí)候后面沒(méi)有處理的napi_struct怎么辦?(假設(shè)這個(gè)時(shí)候軟中斷的配額和時(shí)間都還有剩余),處理完一個(gè)napi結(jié)構(gòu)體就開(kāi)中斷的話(huà),那napi輪詢(xún)體現(xiàn)在哪里?e1000注冊(cè)的軟中斷處理函數(shù)
static int e1000_clean(struct napi_struct *napi, int budget)
{
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
int tx_clean_complete = 0, work_done = 0;
tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
if (!tx_clean_complete)
work_done = budget;
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter);
napi_complete(napi);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
return work_done;
}
最后還想請(qǐng)教一下在net_rx_action中,這幾個(gè)local_irq_disable和local_irq_enable開(kāi)關(guān)中斷針對(duì)的是cpu本地中斷,那這個(gè)本地中斷包括網(wǎng)卡的接收中斷嗎?
static void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
void *have;
local_irq_disable();
while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
/* If softirq window is exhuasted then punt.
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
goto softnet_break;
local_irq_enable();
/* Even though interrupts have been re-enabled, this
* access is safe because interrupts can only add new
* entries to the tail of this list, and only ->poll()
* calls can remove this head entry from the list.
*/
n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
have = netpoll_poll_lock(n);
weight = n->weight;
/* This NAPI_STATE_SCHED test is for avoiding a race
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
* accidentally calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
work = n->poll(n, weight);
trace_napi_poll(n);
}
WARN_ON_ONCE(work > weight);
budget -= work;
local_irq_disable();
/* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code
* still "owns" the NAPI instance and therefore can
* move the instance around on the list at-will.
*/
if (unlikely(work == weight)) {
if (unlikely(napi_disable_pending(n))) {
local_irq_enable();
napi_complete(n);
local_irq_disable();
} else
list_move_tail(&n->poll_list, &sd->poll_list);
}
netpoll_poll_unlock(have);
}
out:
net_rps_action_and_irq_enable(sd);
#ifdef CONFIG_NET_DMA
/*
* There may not be any more sk_buffs coming right now, so push
* any pending DMA copies to hardware
*/
dma_issue_pending_all();
#endif
return;
softnet_break:
sd->time_squeeze++;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
goto out;
}
|
|