mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2024-12-27 07:02:11 +00:00
MINOR: compression: Warn for 'compression offload' in defaults sections
This directive is documented as being ignored if set in a defaults section. But it is only mentionned in a small note in the configuration manual. Thus, now, a warning is emitted. To do so, the errors handling in parse_compression_options() function was slightly changed. In addition, this directive is now documented apart from the other compression directives. This way, it is clearly visible that it must not be used in a defaults section.
This commit is contained in:
parent
34a3eb4c42
commit
44d34bfbe7
@ -4424,14 +4424,12 @@ clitcpka-intvl <timeout>
|
||||
|
||||
compression algo <algorithm> ...
|
||||
compression type <mime type> ...
|
||||
compression offload
|
||||
Enable HTTP compression.
|
||||
May be used in sections : defaults | frontend | listen | backend
|
||||
yes | yes | yes | yes
|
||||
Arguments :
|
||||
algo is followed by the list of supported compression algorithms.
|
||||
type is followed by the list of MIME types that will be compressed.
|
||||
offload makes HAProxy work as a compression offloader only (see notes).
|
||||
|
||||
The currently supported algorithms are :
|
||||
identity this is mostly for debugging, and it was useful for developing
|
||||
@ -4465,19 +4463,6 @@ compression offload
|
||||
there is Accept-Encoding header in request, HAProxy will compress the
|
||||
matching response.
|
||||
|
||||
The "offload" setting makes HAProxy remove the Accept-Encoding header to
|
||||
prevent backend servers from compressing responses. It is strongly
|
||||
recommended not to do this because this means that all the compression work
|
||||
will be done on the single point where HAProxy is located. However in some
|
||||
deployment scenarios, HAProxy may be installed in front of a buggy gateway
|
||||
with broken HTTP compression implementation which can't be turned off.
|
||||
In that case HAProxy can be used to prevent that gateway from emitting
|
||||
invalid payloads. In this case, simply removing the header in the
|
||||
configuration does not work because it applies before the header is parsed,
|
||||
so that prevents HAProxy from compressing. The "offload" setting should
|
||||
then be used for such scenarios. Note: for now, the "offload" setting is
|
||||
ignored when set in a defaults section.
|
||||
|
||||
Compression is disabled when:
|
||||
* the request does not advertise a supported compression algorithm in the
|
||||
"Accept-Encoding" header
|
||||
@ -4501,6 +4486,29 @@ compression offload
|
||||
compression algo gzip
|
||||
compression type text/html text/plain
|
||||
|
||||
See also : "compression offload"
|
||||
|
||||
compression offload
|
||||
Makes HAProxy work as a compression offloader only.
|
||||
May be used in sections : defaults | frontend | listen | backend
|
||||
no | yes | yes | yes
|
||||
|
||||
The "offload" setting makes HAProxy remove the Accept-Encoding header to
|
||||
prevent backend servers from compressing responses. It is strongly
|
||||
recommended not to do this because this means that all the compression work
|
||||
will be done on the single point where HAProxy is located. However in some
|
||||
deployment scenarios, HAProxy may be installed in front of a buggy gateway
|
||||
with broken HTTP compression implementation which can't be turned off.
|
||||
In that case HAProxy can be used to prevent that gateway from emitting
|
||||
invalid payloads. In this case, simply removing the header in the
|
||||
configuration does not work because it applies before the header is parsed,
|
||||
so that prevents HAProxy from compressing. The "offload" setting should
|
||||
then be used for such scenarios.
|
||||
|
||||
If this setting is used in a defaults section, a warning is emitted and the
|
||||
option is ignored.
|
||||
|
||||
See also : "compression type", "compression algo"
|
||||
|
||||
cookie <name> [ rewrite | insert | prefix ] [ indirect ] [ nocache ]
|
||||
[ postonly ] [ preserve ] [ httponly ] [ secure ]
|
||||
|
@ -631,6 +631,7 @@ parse_compression_options(char **args, int section, struct proxy *proxy,
|
||||
char **err)
|
||||
{
|
||||
struct comp *comp;
|
||||
int ret = 0;
|
||||
|
||||
if (proxy->comp == NULL) {
|
||||
comp = calloc(1, sizeof(*comp));
|
||||
@ -644,58 +645,71 @@ parse_compression_options(char **args, int section, struct proxy *proxy,
|
||||
int cur_arg = 2;
|
||||
|
||||
if (!*args[cur_arg]) {
|
||||
memprintf(err, "parsing [%s:%d] : '%s' expects <algorithm>\n",
|
||||
memprintf(err, "parsing [%s:%d] : '%s' expects <algorithm>.",
|
||||
file, line, args[0]);
|
||||
return -1;
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
while (*(args[cur_arg])) {
|
||||
int retval = comp_append_algo(comp, args[cur_arg]);
|
||||
if (retval) {
|
||||
if (retval < 0)
|
||||
memprintf(err, "'%s' : '%s' is not a supported algorithm.\n",
|
||||
memprintf(err, "'%s' : '%s' is not a supported algorithm.",
|
||||
args[0], args[cur_arg]);
|
||||
else
|
||||
memprintf(err, "'%s' : out of memory while parsing algo '%s'.\n",
|
||||
memprintf(err, "'%s' : out of memory while parsing algo '%s'.",
|
||||
args[0], args[cur_arg]);
|
||||
return -1;
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (proxy->comp->algos->init(&ctx, 9) == 0)
|
||||
proxy->comp->algos->end(&ctx);
|
||||
else {
|
||||
memprintf(err, "'%s' : Can't init '%s' algorithm.\n",
|
||||
memprintf(err, "'%s' : Can't init '%s' algorithm.",
|
||||
args[0], args[cur_arg]);
|
||||
return -1;
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
cur_arg++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else if (strcmp(args[1], "offload") == 0)
|
||||
else if (strcmp(args[1], "offload") == 0) {
|
||||
if (proxy->cap & PR_CAP_DEF) {
|
||||
memprintf(err, "'%s' : '%s' ignored in 'defaults' section.",
|
||||
args[0], args[1]);
|
||||
ret = 1;
|
||||
}
|
||||
comp->offload = 1;
|
||||
}
|
||||
else if (strcmp(args[1], "type") == 0) {
|
||||
int cur_arg = 2;
|
||||
|
||||
if (!*args[cur_arg]) {
|
||||
memprintf(err, "'%s' expects <type>\n", args[0]);
|
||||
return -1;
|
||||
memprintf(err, "'%s' expects <type>.", args[0]);
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
while (*(args[cur_arg])) {
|
||||
if (comp_append_type(comp, args[cur_arg])) {
|
||||
memprintf(err, "'%s': out of memory.", args[0]);
|
||||
return -1;
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
cur_arg++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else {
|
||||
memprintf(err, "'%s' expects 'algo', 'type' or 'offload'\n",
|
||||
memprintf(err, "'%s' expects 'algo', 'type' or 'offload'",
|
||||
args[0]);
|
||||
return -1;
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
return 0;
|
||||
end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
Loading…
Reference in New Issue
Block a user