Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit e8cd12f

Browse files
Added new metric: reap_too_many_dead
1 parent 48708ef commit e8cd12f

File tree

6 files changed

+22
-8
lines changed

6 files changed

+22
-8
lines changed

homa_impl.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1889,6 +1889,12 @@ struct homa_metrics {
18891889
*/
18901890
__u64 reaper_dead_skbs;
18911891

1892+
/**
1893+
* @reap_too_many_dead: total number of times that homa_wait_for_message
1894+
* invoked the reaper because dead_skbs was too high.
1895+
*/
1896+
__u64 reap_too_many_dead;
1897+
18921898
/**
18931899
* @throttle_list_adds: Total number of calls to homa_add_to_throttled.
18941900
*/

homa_incoming.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1123,13 +1123,16 @@ struct homa_rpc *homa_wait_for_message(struct homa_sock *hsk, int flags,
11231123
int error;
11241124

11251125
homa_interest_init(&interest);
1126-
while (hsk->dead_skbs > hsk->homa->max_dead_buffs) {
1127-
/* Way too many dead RPCs; must cleanup immediately. */
1128-
if (!homa_rpc_reap(hsk))
1129-
break;
1126+
if (hsk->dead_skbs > hsk->homa->max_dead_buffs) {
1127+
/* Too many dead RPCs; must cleanup immediately. */
1128+
INC_METRIC(reap_too_many_dead, 1);
1129+
do {
1130+
if (!homa_rpc_reap(hsk))
1131+
break;
11301132

1131-
/* Give NAPI and SoftIRQ tasks a chance to run. */
1132-
schedule();
1133+
/* Give NAPI and SoftIRQ tasks a chance to run. */
1134+
schedule();
1135+
} while (hsk->dead_skbs > hsk->homa->max_dead_buffs);
11331136
}
11341137

11351138
/* Normally this loop only gets executed once, but we may have

homa_utils.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1311,6 +1311,10 @@ char *homa_print_metrics(struct homa *homa)
13111311
"Sum of hsk->dead_skbs across all reaper "
13121312
"calls\n",
13131313
m->reaper_dead_skbs);
1314+
homa_append_metric(homa,
1315+
"reap_too_many_dead %15llu "
1316+
"Reaps forced by dead RPC buildup\n",
1317+
m->reap_too_many_dead);
13141318
homa_append_metric(homa,
13151319
"throttle_list_adds %15llu "
13161320
"Calls to homa_add_to_throttled\n",

notes.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ Notes for Homa implementation in Linux:
99
* Performance-related tasks:
1010
* See if turning off c-states allows shorter polling intervals?
1111
* Are Meltdown mitigations really disabled?
12-
* Add metric for forced reaps because of too many messages.
1312
* Consider a permanent reduction in rtt_bytes.
1413
* Consider reducing throttle_min_bytes to see if it helps region 1
1514
in the CDF?

test/unit_homa_incoming.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1862,6 +1862,7 @@ TEST_F(homa_incoming, homa_wait_for_message__dead_buffs_exceeded)
18621862
rpc = homa_wait_for_message(&self->hsk, HOMA_RECV_RESPONSE, 44);
18631863
EXPECT_EQ(EINVAL, -PTR_ERR(rpc));
18641864
EXPECT_EQ(10, self->hsk.dead_skbs);
1865+
EXPECT_EQ(1, homa_cores[cpu_number]->metrics.reap_too_many_dead);
18651866
}
18661867
TEST_F(homa_incoming, homa_wait_for_message__rpc_from_register_interests)
18671868
{

util/metrics.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -392,7 +392,8 @@ def scale_number(number):
392392
"server_cant_create_rpcs", "server_cant_create_rpcs",
393393
"short_packets", "redundant_packets",
394394
"client_peer_timeouts", "server_rpc_discards",
395-
"server_rpcs_unknown", "stale_generations", "generation_overflows"]:
395+
"server_rpcs_unknown", "stale_generations", "generation_overflows",
396+
"reap_too_many_dead"]:
396397
if deltas[symbol] == 0:
397398
continue
398399
rate = float(deltas[symbol])/elapsed_secs

0 commit comments

Comments
 (0)