Skip to content

Commit e7e5e5d

Browse files
authored
feat: add Key-Value GET and SET latency metrics for cache workloads (#391)
Add Key-Value GET and SET latency metrics for cache workloads
1 parent 3373440 commit e7e5e5d

File tree

5 files changed

+73
-12
lines changed

5 files changed

+73
-12
lines changed

src/clients/cache/memcache/mod.rs

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,16 @@ async fn task(
103103

104104
let request = request.unwrap();
105105

106+
let mut latency_histograms = vec![&RESPONSE_LATENCY];
107+
match &request.request {
108+
Request::Get(_) => {
109+
latency_histograms.push(&KVGET_RESPONSE_LATENCY);
110+
}
111+
Request::Set(_) => {
112+
latency_histograms.push(&KVSET_RESPONSE_LATENCY);
113+
}
114+
_ => {}
115+
}
106116
// compose request
107117
REQUEST_OK.increment();
108118
let _ = protocol.compose_request(&request.request, &mut write_buffer);
@@ -166,7 +176,6 @@ async fn task(
166176
match response {
167177
Ok(response) => {
168178
let latency_ns = stop.duration_since(start).as_nanos() as u64;
169-
170179
// check if the response is valid
171180
if (request.validator)(response).is_err() {
172181
// increment error stats, connection will be dropped
@@ -175,9 +184,9 @@ async fn task(
175184
} else {
176185
// increment success stats and latency
177186
RESPONSE_OK.increment();
178-
179-
let _ = RESPONSE_LATENCY.increment(latency_ns);
180-
187+
for hist in latency_histograms {
188+
let _ = hist.increment(latency_ns);
189+
}
181190
// preserve the connection for the next request
182191
stream = Some(s);
183192
}

src/clients/cache/momento/mod.rs

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,13 +78,20 @@ async fn task(
7878

7979
REQUEST.increment();
8080
let start = Instant::now();
81+
let mut latency_histograms = vec![&RESPONSE_LATENCY];
8182
let result = match work_item {
8283
ClientWorkItemKind::Request { request, .. } => match request {
8384
/*
8485
* KEY-VALUE
8586
*/
86-
ClientRequest::Get(r) => get(&mut client, &config, cache_name, r).await,
87-
ClientRequest::Set(r) => set(&mut client, &config, cache_name, r).await,
87+
ClientRequest::Get(r) => {
88+
latency_histograms.push(&KVGET_RESPONSE_LATENCY);
89+
get(&mut client, &config, cache_name, r).await
90+
}
91+
ClientRequest::Set(r) => {
92+
latency_histograms.push(&KVSET_RESPONSE_LATENCY);
93+
set(&mut client, &config, cache_name, r).await
94+
}
8895
ClientRequest::Delete(r) => delete(&mut client, &config, cache_name, r).await,
8996

9097
/*
@@ -182,8 +189,9 @@ async fn task(
182189
RESPONSE_OK.increment();
183190

184191
let latency = stop.duration_since(start).as_nanos() as u64;
185-
186-
let _ = RESPONSE_LATENCY.increment(latency);
192+
for hist in latency_histograms {
193+
let _ = hist.increment(latency);
194+
}
187195
}
188196
Err(ResponseError::Exception) => {
189197
RESPONSE_EX.increment();

src/clients/cache/redis/mod.rs

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,7 @@ async fn task(
120120

121121
REQUEST.increment();
122122
let start = Instant::now();
123+
let mut latency_histograms = vec![&RESPONSE_LATENCY];
123124
let result = match work_item {
124125
ClientWorkItemKind::Request { request, .. } => match request {
125126
/*
@@ -132,9 +133,15 @@ async fn task(
132133
*/
133134
ClientRequest::Add(r) => add(&mut con, &config, r).await,
134135
ClientRequest::Delete(r) => delete(&mut con, &config, r).await,
135-
ClientRequest::Get(r) => get(&mut con, &config, r).await,
136+
ClientRequest::Get(r) => {
137+
latency_histograms.push(&KVGET_RESPONSE_LATENCY);
138+
get(&mut con, &config, r).await
139+
}
136140
ClientRequest::Replace(r) => replace(&mut con, &config, r).await,
137-
ClientRequest::Set(r) => set(&mut con, &config, r).await,
141+
ClientRequest::Set(r) => {
142+
latency_histograms.push(&KVSET_RESPONSE_LATENCY);
143+
set(&mut con, &config, r).await
144+
}
138145

139146
/*
140147
* HASHES (DICTIONARIES)
@@ -202,8 +209,9 @@ async fn task(
202209
Ok(_) => {
203210
connection = Some(con);
204211
RESPONSE_OK.increment();
205-
206-
let _ = RESPONSE_LATENCY.increment(latency_ns);
212+
for hist in latency_histograms {
213+
let _ = hist.increment(latency_ns);
214+
}
207215
}
208216
Err(ResponseError::Exception) => {
209217
RESPONSE_EX.increment();

src/metrics/mod.rs

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -351,6 +351,22 @@ macro_rules! request {
351351
pub static RESPONSE_LATENCY: AtomicHistogram = AtomicHistogram::new(7, 64);
352352
pub static RESPONSE_LATENCY_HISTOGRAM: &str = "response_latency";
353353

354+
#[metric(
355+
name = KVGET_RESPONSE_LATENCY_HISTOGRAM,
356+
description = "Distribution of response latencies for key-value GET requests",
357+
metadata = { unit = "nanoseconds" }
358+
)]
359+
pub static KVGET_RESPONSE_LATENCY: AtomicHistogram = AtomicHistogram::new(7, 64);
360+
pub static KVGET_RESPONSE_LATENCY_HISTOGRAM: &str = "kvget_response_latency";
361+
362+
#[metric(
363+
name = KVSET_RESPONSE_LATENCY_HISTOGRAM,
364+
description = "Distribution of response latencies for key-value SET requests",
365+
metadata = { unit = "nanoseconds" }
366+
)]
367+
pub static KVSET_RESPONSE_LATENCY: AtomicHistogram = AtomicHistogram::new(7, 64);
368+
pub static KVSET_RESPONSE_LATENCY_HISTOGRAM: &str = "kvset_response_latency";
369+
354370
#[metric(
355371
name = RESPONSE_TTFB_HISTOGRAM,
356372
description = "Distribution of time-to-first-byte for responses",

src/output/mod.rs

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,8 @@ fn client_stats(snapshot: &mut MetricsSnapshot) {
110110
let connect_sr = 100.0 * connect_ok / connect_total;
111111

112112
let response_latency = snapshot.percentiles(RESPONSE_LATENCY_HISTOGRAM);
113+
let kvset_response_latency = snapshot.percentiles(KVSET_RESPONSE_LATENCY_HISTOGRAM);
114+
let kvget_response_latency = snapshot.percentiles(KVGET_RESPONSE_LATENCY_HISTOGRAM);
113115
let response_ttfb = snapshot.percentiles(RESPONSE_TTFB_HISTOGRAM);
114116

115117
output!(
@@ -168,6 +170,24 @@ fn client_stats(snapshot: &mut MetricsSnapshot) {
168170

169171
output!("{latencies}");
170172

173+
let mut kvset_latencies = "Client Key-Value SET Response Latency (us):".to_owned();
174+
175+
for (label, _percentile, nanoseconds) in kvset_response_latency {
176+
let microseconds = nanoseconds / 1000;
177+
kvset_latencies.push_str(&format!(" {label}: {microseconds}"))
178+
}
179+
180+
output!("{kvset_latencies}");
181+
182+
let mut kvget_latencies = "Client Key-Value GET Response Latency (us):".to_owned();
183+
184+
for (label, _percentile, nanoseconds) in kvget_response_latency {
185+
let microseconds = nanoseconds / 1000;
186+
kvget_latencies.push_str(&format!(" {label}: {microseconds}"))
187+
}
188+
189+
output!("{kvget_latencies}");
190+
171191
let mut latencies = "Client Time-to-First-Byte (us):".to_owned();
172192

173193
for (label, _percentile, nanoseconds) in response_ttfb {

0 commit comments

Comments
 (0)