-
Notifications
You must be signed in to change notification settings - Fork 378
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
hash: Treat a waiting list match as a revalidation
If a fetch succeeded and an object was inserted as cacheable, it means that the VCL did not interpret the backend response as revalidated and shareable with other clients on the waiting list. After changing the rush to operate based on objcore flags instead of the call site deciding how many clients to wake up, the same objcore reference is now passed to requests before they reembark a worker. This way when an objcore is already present during lookup we can attempt a cache hit directly on the objcore that triggered the rush, removing a degree of uncertainty in the waiting list behavior. Instead of repurposing the req::hash_objhead field into an equivalent req::hash_objcore, the field is actually removed. In order to signal that a request comes back from its waiting list, the life time of the req::waitinglist flag is extended until cnt_lookup() is reentered. If the rushed objcore matches a request, the lookup can result in a hit without entering the regular lookup critical section. The objhead lock is briefly acquired to release req's reference on the objhead, to rely solely on the objcore's objhead reference like a normal hit, and generally perform hit-related operations. This change brings back the exponential rush of cacheable objects briefly neutered. This shifts the infamous waiting list serialization phenomenon to the vary header match. Since most spurious rushes at every corner of objhead activity are gone, this change puts all the spurious activity on the incompatible variants alone instead of all objects on more occasions. If a cacheable object was inserted in the cache, but already expired, this behavior enables cache hits. This can be common with multi-tier Varnish setups where one Varnish server may serve a graced object to another, but true of any origin server that may serve stale yet valid responses. The waiting list enables a proper response-wide no-cache behavior from now on, but the built-in VCL prevents it by default. This is also the first step towards implementing no-cache and private support at the header field granularity.
- Loading branch information
Showing
5 changed files
with
236 additions
and
28 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,156 @@ | ||
varnishtest "successful expired waiting list hit" | ||
|
||
barrier b1 cond 2 | ||
barrier b2 cond 2 | ||
barrier b3 cond 2 | ||
barrier b4 cond 2 | ||
|
||
|
||
server s1 { | ||
rxreq | ||
expect req.http.user-agent == c1 | ||
expect req.http.bgfetch == false | ||
barrier b1 sync | ||
barrier b2 sync | ||
txresp -hdr "Cache-Control: max-age=60" -hdr "Age: 120" | ||
|
||
rxreq | ||
expect req.http.user-agent == c3 | ||
expect req.http.bgfetch == true | ||
txresp | ||
|
||
# The no-cache case only works with a complicit VCL, for now. | ||
rxreq | ||
expect req.http.user-agent == c4 | ||
expect req.http.bgfetch == false | ||
barrier b3 sync | ||
barrier b4 sync | ||
txresp -hdr "Cache-Control: no-cache" | ||
|
||
rxreq | ||
expect req.http.user-agent == c6 | ||
expect req.http.bgfetch == false | ||
txresp -hdr "Cache-Control: no-cache" | ||
} -start | ||
|
||
varnish v1 -cliok "param.set default_grace 1h" | ||
varnish v1 -cliok "param.set thread_pools 1" | ||
varnish v1 -cliok "param.set debug +syncvsl,+waitinglist" | ||
varnish v1 -vcl+backend { | ||
sub vcl_backend_fetch { | ||
set bereq.http.bgfetch = bereq.is_bgfetch; | ||
} | ||
sub vcl_beresp_stale { | ||
# We just validated a stale object, do not mark it as | ||
# uncacheable. The object remains available for grace | ||
# hits and background fetches. | ||
return; | ||
} | ||
sub vcl_beresp_control { | ||
if (beresp.http.cache-control == "no-cache") { | ||
# Keep beresp.uncacheable clear. | ||
return; | ||
} | ||
} | ||
sub vcl_deliver { | ||
set resp.http.obj-hits = obj.hits; | ||
set resp.http.obj-ttl = obj.ttl; | ||
} | ||
} -start | ||
|
||
client c1 { | ||
txreq -url "/stale-hit" | ||
rxresp | ||
expect resp.status == 200 | ||
expect resp.http.x-varnish == 1001 | ||
expect resp.http.obj-hits == 0 | ||
expect resp.http.obj-ttl < 0 | ||
} -start | ||
|
||
barrier b1 sync | ||
|
||
client c2 { | ||
txreq -url "/stale-hit" | ||
rxresp | ||
expect resp.status == 200 | ||
expect resp.http.x-varnish == "1004 1002" | ||
expect resp.http.obj-hits == 1 | ||
expect resp.http.obj-ttl < 0 | ||
} -start | ||
|
||
varnish v1 -expect busy_sleep == 1 | ||
barrier b2 sync | ||
|
||
client c1 -wait | ||
client c2 -wait | ||
|
||
varnish v1 -vsl_catchup | ||
|
||
varnish v1 -expect cache_miss == 1 | ||
varnish v1 -expect cache_hit == 1 | ||
varnish v1 -expect cache_hit_grace == 0 | ||
varnish v1 -expect s_bgfetch == 0 | ||
|
||
client c3 { | ||
txreq -url "/stale-hit" | ||
rxresp | ||
expect resp.status == 200 | ||
expect resp.http.x-varnish == "1006 1002" | ||
expect resp.http.obj-hits == 2 | ||
expect resp.http.obj-ttl < 0 | ||
} -run | ||
|
||
varnish v1 -vsl_catchup | ||
|
||
varnish v1 -expect cache_miss == 1 | ||
varnish v1 -expect cache_hit == 2 | ||
varnish v1 -expect cache_hit_grace == 1 | ||
varnish v1 -expect s_bgfetch == 1 | ||
|
||
# The only way for a plain no-cache to be hit is to have a non-zero keep. | ||
varnish v1 -cliok "param.set default_ttl 0" | ||
varnish v1 -cliok "param.set default_grace 0" | ||
varnish v1 -cliok "param.set default_keep 1h" | ||
|
||
client c4 { | ||
txreq -url "/no-cache-hit" | ||
rxresp | ||
expect resp.status == 200 | ||
expect resp.http.x-varnish == 1009 | ||
expect resp.http.obj-hits == 0 | ||
expect resp.http.obj-ttl <= 0 | ||
} -start | ||
|
||
barrier b3 sync | ||
|
||
client c5 { | ||
txreq -url "/no-cache-hit" | ||
rxresp | ||
expect resp.status == 200 | ||
expect resp.http.x-varnish == "1012 1010" | ||
expect resp.http.obj-hits == 1 | ||
expect resp.http.obj-ttl <= 0 | ||
} -start | ||
|
||
varnish v1 -expect busy_sleep == 2 | ||
barrier b4 sync | ||
|
||
client c4 -wait | ||
client c5 -wait | ||
|
||
varnish v1 -vsl_catchup | ||
|
||
varnish v1 -expect cache_miss == 2 | ||
varnish v1 -expect cache_hit == 3 | ||
varnish v1 -expect cache_hit_grace == 1 | ||
varnish v1 -expect s_bgfetch == 1 | ||
|
||
# No hit when not on the waiting list | ||
client c6 { | ||
txreq -url "/no-cache-hit" | ||
rxresp | ||
expect resp.status == 200 | ||
expect resp.http.x-varnish == 1014 | ||
expect resp.http.obj-hits == 0 | ||
expect resp.http.obj-ttl <= 0 | ||
} -run |