avcodec/lagarithrac: lag_get_rac: use normal division

maybe 0.3% faster and simpler code
This commit is contained in:
Michael Niedermayer 2014-05-27 01:36:23 +02:00
parent d8ae0dfd99
commit fbaf73a33d
2 changed files with 3 additions and 15 deletions

View File

@ -53,7 +53,4 @@ void ff_lag_rac_init(lag_rac *l, GetBitContext *gb, int length)
j++;
l->range_hash[i] = j;
}
/* Add conversion factor to hash_shift so we don't have to in lag_get_rac. */
l->hash_shift += 23;
}

View File

@ -72,9 +72,8 @@ static inline void lag_rac_refill(lag_rac *l)
*/
static inline uint8_t lag_get_rac(lag_rac *l)
{
unsigned range_scaled, low_scaled, div;
unsigned range_scaled, low_scaled;
int val;
uint8_t shift;
lag_rac_refill(l);
@ -85,16 +84,8 @@ static inline uint8_t lag_get_rac(lag_rac *l)
if (l->low < range_scaled * l->prob[1]) {
val = 0;
} else {
/* FIXME __builtin_clz is ~20% faster here, but not allowed in generic code. */
shift = 30 - av_log2(range_scaled);
div = ((range_scaled << shift) + (1 << 23) - 1) >> 23;
/* low>>24 ensures that any cases too big for exact FASTDIV are
* under- rather than over-estimated
*/
low_scaled = FASTDIV(l->low - (l->low >> 24), div);
shift -= l->hash_shift;
low_scaled = (low_scaled >> (32 - shift));
/* low_scaled is now a lower bound of low/range_scaled */
low_scaled = l->low / (range_scaled<<(l->hash_shift));
val = l->range_hash[low_scaled];
while (l->low >= range_scaled * l->prob[val + 1])
val++;