avcodec/dcadec: remove fishy FFMAX()

These where intended to maintain the previous behavior before dca_dmix_code()
but it is unclear (to me) which way is correct and no sample seem to trigger
the case, also they are incomplete for the purprose of error checking

Found-by: Niels Möller <nisse@lysator.liu.se>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2014-06-04 21:44:49 +02:00
parent 6a0f9f27d5
commit 47313bbb5f
1 changed files with 2 additions and 2 deletions

View File

@ -601,7 +601,7 @@ static int dca_parse_audio_coding_header(DCAContext *s, int base_channel,
if (get_bits1(&s->gb)) {
embedded_downmix = get_bits1(&s->gb);
coeff = get_bits(&s->gb, 6);
scale_factor = -1.0f / dca_dmix_code(FFMAX(coeff<<2, 4)-3);
scale_factor = -1.0f / dca_dmix_code((coeff<<2)-3);
s->xxch_dmix_sf[s->xxch_chset] = scale_factor;
@ -622,7 +622,7 @@ static int dca_parse_audio_coding_header(DCAContext *s, int base_channel,
coeff = get_bits(&s->gb, 7);
ichan = dca_xxch2index(s, 1 << i);
s->xxch_dmix_coeff[j][ichan] = dca_dmix_code(FFMAX(coeff<<2, 3)-3);
s->xxch_dmix_coeff[j][ichan] = dca_dmix_code((coeff<<2)-3);
}
}
}