Merge remote-tracking branch 'qatar/master'

* qatar/master:
  h264: clear trailing bits in partially parsed NAL units
  vc1: Handle WVC1 interlaced stream
  xl: Fix overreads
  mpegts: rename payload_index to payload_size
  segment: introduce segmented chain muxer
  lavu: add AVERROR_BUG error value
  avplay: clear pkt_temp when pkt is freed.
  qcelpdec: K&R formatting cosmetics
  qcelpdec: cosmetics: drop some pointless parentheses
  x86: conditionally compile dnxhd encoder optimizations
  Revert "h264: skip start code search if the size of the nal unit is known"
  swscale: fix formatting and indentation of unscaled conversion routines.
  h264: skip start code search if the size of the nal unit is known
  cljr: fix buf_size sanity check
  cljr: Check if width and height are positive integers

Conflicts:
	libavcodec/cljr.c
	libavcodec/vc1dec.c
	libavformat/Makefile
	libavformat/mpegtsenc.c
	libavformat/segment.c
	libswscale/swscale_unscaled.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2011-12-20 01:54:41 +01:00
commit 0edf7ebcd6
13 changed files with 737 additions and 599 deletions

View File

@ -138,6 +138,7 @@ easier to use. The changes are:
- v410 Quicktime Uncompressed 4:4:4 10-bit encoder and decoder
- SBaGen (SBG) binaural beats script demuxer
- OpenMG Audio muxer
- Simple segmenting muxer
version 0.8:

View File

@ -90,6 +90,7 @@ ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
See also the @ref{crc} muxer.
@anchor{image2}
@section image2
Image file muxer.
@ -285,4 +286,35 @@ For example a 3D WebM clip can be created using the following command line:
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
@end example
@section segment
Basic stream segmenter.
The segmenter muxer outputs streams to a number of separate files of nearly
fixed duration. Output filename pattern can be set in a fashion similar to
@ref{image2}.
Every segment starts with a video keyframe, if a video stream is present.
The segment muxer works best with a single constant frame rate video.
Optionally it can generate a flat list of the created segments, one segment
per line.
@table @option
@item segment_format @var{format}
Override the inner container format, by default it is guessed by the filename
extension.
@item segment_time @var{t}
Set segment duration to @var{t} seconds.
@item segment_list @var{name}
Generate also a listfile named @var{name}.
@item segment_list_size @var{size}
Overwrite the listfile once it reaches @var{size} entries.
@end table
@example
ffmpeg -i in.mkv -c copy -map 0 -f segment -list out.list out%03d.nut
@end example
@c man end MUXERS

View File

@ -3798,7 +3798,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
int consumed;
int dst_length;
int bit_length;
const uint8_t *ptr;
uint8_t *ptr;
int i, nalsize = 0;
int err;

View File

@ -44,8 +44,7 @@
#undef NDEBUG
#include <assert.h>
typedef enum
{
typedef enum {
I_F_Q = -1, /**< insufficient frame quality */
SILENCE,
RATE_OCTAVE,
@ -54,8 +53,7 @@ typedef enum
RATE_FULL
} qcelp_packet_rate;
typedef struct
{
typedef struct {
AVFrame avframe;
GetBitContext gb;
qcelp_packet_rate bitrate;
@ -95,8 +93,8 @@ static av_cold int qcelp_decode_init(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
for(i=0; i<10; i++)
q->prev_lspf[i] = (i+1)/11.;
for (i = 0; i < 10; i++)
q->prev_lspf[i] = (i + 1) / 11.;
avcodec_get_frame_defaults(&q->avframe);
avctx->coded_frame = &q->avframe;
@ -122,69 +120,69 @@ static int decode_lspf(QCELPContext *q, float *lspf)
const float *predictors;
if (q->bitrate == RATE_OCTAVE || q->bitrate == I_F_Q) {
predictors = (q->prev_bitrate != RATE_OCTAVE &&
q->prev_bitrate != I_F_Q ?
q->prev_lspf : q->predictor_lspf);
predictors = q->prev_bitrate != RATE_OCTAVE &&
q->prev_bitrate != I_F_Q ? q->prev_lspf
: q->predictor_lspf;
if (q->bitrate == RATE_OCTAVE) {
q->octave_count++;
for (i=0; i<10; i++) {
for (i = 0; i < 10; i++) {
q->predictor_lspf[i] =
lspf[i] = (q->frame.lspv[i] ? QCELP_LSP_SPREAD_FACTOR
: -QCELP_LSP_SPREAD_FACTOR)
+ predictors[i] * QCELP_LSP_OCTAVE_PREDICTOR
+ (i + 1) * ((1 - QCELP_LSP_OCTAVE_PREDICTOR)/11);
: -QCELP_LSP_SPREAD_FACTOR) +
predictors[i] * QCELP_LSP_OCTAVE_PREDICTOR +
(i + 1) * ((1 - QCELP_LSP_OCTAVE_PREDICTOR) / 11);
}
smooth = (q->octave_count < 10 ? .875 : 0.1);
smooth = q->octave_count < 10 ? .875 : 0.1;
} else {
erasure_coeff = QCELP_LSP_OCTAVE_PREDICTOR;
assert(q->bitrate == I_F_Q);
if(q->erasure_count > 1)
erasure_coeff *= (q->erasure_count < 4 ? 0.9 : 0.7);
if (q->erasure_count > 1)
erasure_coeff *= q->erasure_count < 4 ? 0.9 : 0.7;
for(i = 0; i < 10; i++) {
for (i = 0; i < 10; i++) {
q->predictor_lspf[i] =
lspf[i] = (i + 1) * ( 1 - erasure_coeff)/11
+ erasure_coeff * predictors[i];
lspf[i] = (i + 1) * (1 - erasure_coeff) / 11 +
erasure_coeff * predictors[i];
}
smooth = 0.125;
}
// Check the stability of the LSP frequencies.
lspf[0] = FFMAX(lspf[0], QCELP_LSP_SPREAD_FACTOR);
for(i=1; i<10; i++)
lspf[i] = FFMAX(lspf[i], (lspf[i-1] + QCELP_LSP_SPREAD_FACTOR));
for (i = 1; i < 10; i++)
lspf[i] = FFMAX(lspf[i], lspf[i - 1] + QCELP_LSP_SPREAD_FACTOR);
lspf[9] = FFMIN(lspf[9], (1.0 - QCELP_LSP_SPREAD_FACTOR));
for(i=9; i>0; i--)
lspf[i-1] = FFMIN(lspf[i-1], (lspf[i] - QCELP_LSP_SPREAD_FACTOR));
lspf[9] = FFMIN(lspf[9], 1.0 - QCELP_LSP_SPREAD_FACTOR);
for (i = 9; i > 0; i--)
lspf[i - 1] = FFMIN(lspf[i - 1], lspf[i] - QCELP_LSP_SPREAD_FACTOR);
// Low-pass filter the LSP frequencies.
ff_weighted_vector_sumf(lspf, lspf, q->prev_lspf, smooth, 1.0-smooth, 10);
ff_weighted_vector_sumf(lspf, lspf, q->prev_lspf, smooth, 1.0 - smooth, 10);
} else {
q->octave_count = 0;
tmp_lspf = 0.;
for (i = 0; i < 5; i++) {
lspf[2*i+0] = tmp_lspf += qcelp_lspvq[i][q->frame.lspv[i]][0] * 0.0001;
lspf[2*i+1] = tmp_lspf += qcelp_lspvq[i][q->frame.lspv[i]][1] * 0.0001;
lspf[2 * i + 0] = tmp_lspf += qcelp_lspvq[i][q->frame.lspv[i]][0] * 0.0001;
lspf[2 * i + 1] = tmp_lspf += qcelp_lspvq[i][q->frame.lspv[i]][1] * 0.0001;
}
// Check for badly received packets.
if (q->bitrate == RATE_QUARTER) {
if(lspf[9] <= .70 || lspf[9] >= .97)
if (lspf[9] <= .70 || lspf[9] >= .97)
return -1;
for(i=3; i<10; i++)
if(fabs(lspf[i] - lspf[i-2]) < .08)
for (i = 3; i < 10; i++)
if (fabs(lspf[i] - lspf[i - 2]) < .08)
return -1;
} else {
if(lspf[9] <= .66 || lspf[9] >= .985)
if (lspf[9] <= .66 || lspf[9] >= .985)
return -1;
for(i=4; i<10; i++)
if (fabs(lspf[i] - lspf[i-4]) < .0931)
for (i = 4; i < 10; i++)
if (fabs(lspf[i] - lspf[i - 4]) < .0931)
return -1;
}
}
@ -199,72 +197,72 @@ static int decode_lspf(QCELPContext *q, float *lspf)
*
* TIA/EIA/IS-733 2.4.6.2
*/
static void decode_gain_and_index(QCELPContext *q,
float *gain) {
int i, subframes_count, g1[16];
static void decode_gain_and_index(QCELPContext *q, float *gain)
{
int i, subframes_count, g1[16];
float slope;
if (q->bitrate >= RATE_QUARTER) {
switch (q->bitrate) {
case RATE_FULL: subframes_count = 16; break;
case RATE_HALF: subframes_count = 4; break;
default: subframes_count = 5;
case RATE_FULL: subframes_count = 16; break;
case RATE_HALF: subframes_count = 4; break;
default: subframes_count = 5;
}
for(i = 0; i < subframes_count; i++) {
for (i = 0; i < subframes_count; i++) {
g1[i] = 4 * q->frame.cbgain[i];
if (q->bitrate == RATE_FULL && !((i+1) & 3)) {
g1[i] += av_clip((g1[i-1] + g1[i-2] + g1[i-3]) / 3 - 6, 0, 32);
if (q->bitrate == RATE_FULL && !((i + 1) & 3)) {
g1[i] += av_clip((g1[i - 1] + g1[i - 2] + g1[i - 3]) / 3 - 6, 0, 32);
}
gain[i] = qcelp_g12ga[g1[i]];
if (q->frame.cbsign[i]) {
gain[i] = -gain[i];
q->frame.cindex[i] = (q->frame.cindex[i]-89) & 127;
q->frame.cindex[i] = (q->frame.cindex[i] - 89) & 127;
}
}
q->prev_g1[0] = g1[i-2];
q->prev_g1[1] = g1[i-1];
q->last_codebook_gain = qcelp_g12ga[g1[i-1]];
q->prev_g1[0] = g1[i - 2];
q->prev_g1[1] = g1[i - 1];
q->last_codebook_gain = qcelp_g12ga[g1[i - 1]];
if (q->bitrate == RATE_QUARTER) {
// Provide smoothing of the unvoiced excitation energy.
gain[7] = gain[4];
gain[6] = 0.4*gain[3] + 0.6*gain[4];
gain[5] = gain[3];
gain[4] = 0.8*gain[2] + 0.2*gain[3];
gain[3] = 0.2*gain[1] + 0.8*gain[2];
gain[2] = gain[1];
gain[1] = 0.6*gain[0] + 0.4*gain[1];
gain[7] = gain[4];
gain[6] = 0.4 * gain[3] + 0.6 * gain[4];
gain[5] = gain[3];
gain[4] = 0.8 * gain[2] + 0.2 * gain[3];
gain[3] = 0.2 * gain[1] + 0.8 * gain[2];
gain[2] = gain[1];
gain[1] = 0.6 * gain[0] + 0.4 * gain[1];
}
} else if (q->bitrate != SILENCE) {
if (q->bitrate == RATE_OCTAVE) {
g1[0] = 2 * q->frame.cbgain[0]
+ av_clip((q->prev_g1[0] + q->prev_g1[1]) / 2 - 5, 0, 54);
g1[0] = 2 * q->frame.cbgain[0] +
av_clip((q->prev_g1[0] + q->prev_g1[1]) / 2 - 5, 0, 54);
subframes_count = 8;
} else {
assert(q->bitrate == I_F_Q);
g1[0] = q->prev_g1[1];
switch (q->erasure_count) {
case 1 : break;
case 2 : g1[0] -= 1; break;
case 3 : g1[0] -= 2; break;
default: g1[0] -= 6;
case 1 : break;
case 2 : g1[0] -= 1; break;
case 3 : g1[0] -= 2; break;
default: g1[0] -= 6;
}
if(g1[0] < 0)
if (g1[0] < 0)
g1[0] = 0;
subframes_count = 4;
}
// This interpolation is done to produce smoother background noise.
slope = 0.5*(qcelp_g12ga[g1[0]] - q->last_codebook_gain) / subframes_count;
for(i=1; i<=subframes_count; i++)
gain[i-1] = q->last_codebook_gain + slope * i;
slope = 0.5 * (qcelp_g12ga[g1[0]] - q->last_codebook_gain) / subframes_count;
for (i = 1; i <= subframes_count; i++)
gain[i - 1] = q->last_codebook_gain + slope * i;
q->last_codebook_gain = gain[i-2];
q->prev_g1[0] = q->prev_g1[1];
q->prev_g1[1] = g1[0];
q->last_codebook_gain = gain[i - 2];
q->prev_g1[0] = q->prev_g1[1];
q->prev_g1[1] = g1[0];
}
}
@ -279,13 +277,13 @@ static void decode_gain_and_index(QCELPContext *q,
*/
static int codebook_sanity_check_for_rate_quarter(const uint8_t *cbgain)
{
int i, diff, prev_diff=0;
int i, diff, prev_diff = 0;
for(i=1; i<5; i++) {
for (i = 1; i < 5; i++) {
diff = cbgain[i] - cbgain[i-1];
if(FFABS(diff) > 10)
if (FFABS(diff) > 10)
return -1;
else if(FFABS(diff - prev_diff) > 12)
else if (FFABS(diff - prev_diff) > 12)
return -1;
prev_diff = diff;
}
@ -316,73 +314,74 @@ static int codebook_sanity_check_for_rate_quarter(const uint8_t *cbgain)
static void compute_svector(QCELPContext *q, const float *gain,
float *cdn_vector)
{
int i, j, k;
int i, j, k;
uint16_t cbseed, cindex;
float *rnd, tmp_gain, fir_filter_value;
float *rnd, tmp_gain, fir_filter_value;
switch (q->bitrate) {
case RATE_FULL:
for (i = 0; i < 16; i++) {
tmp_gain = gain[i] * QCELP_RATE_FULL_CODEBOOK_RATIO;
cindex = -q->frame.cindex[i];
for(j=0; j<10; j++)
*cdn_vector++ = tmp_gain * qcelp_rate_full_codebook[cindex++ & 127];
}
case RATE_FULL:
for (i = 0; i < 16; i++) {
tmp_gain = gain[i] * QCELP_RATE_FULL_CODEBOOK_RATIO;
cindex = -q->frame.cindex[i];
for (j = 0; j < 10; j++)
*cdn_vector++ = tmp_gain * qcelp_rate_full_codebook[cindex++ & 127];
}
break;
case RATE_HALF:
for (i = 0; i < 4; i++) {
tmp_gain = gain[i] * QCELP_RATE_HALF_CODEBOOK_RATIO;
cindex = -q->frame.cindex[i];
for (j = 0; j < 40; j++)
case RATE_HALF:
for (i = 0; i < 4; i++) {
tmp_gain = gain[i] * QCELP_RATE_HALF_CODEBOOK_RATIO;
cindex = -q->frame.cindex[i];
for (j = 0; j < 40; j++)
*cdn_vector++ = tmp_gain * qcelp_rate_half_codebook[cindex++ & 127];
}
}
break;
case RATE_QUARTER:
cbseed = (0x0003 & q->frame.lspv[4])<<14 |
(0x003F & q->frame.lspv[3])<< 8 |
(0x0060 & q->frame.lspv[2])<< 1 |
(0x0007 & q->frame.lspv[1])<< 3 |
(0x0038 & q->frame.lspv[0])>> 3 ;
rnd = q->rnd_fir_filter_mem + 20;
for (i = 0; i < 8; i++) {
tmp_gain = gain[i] * (QCELP_SQRT1887 / 32768.0);
for (k = 0; k < 20; k++) {
cbseed = 521 * cbseed + 259;
*rnd = (int16_t)cbseed;
case RATE_QUARTER:
cbseed = (0x0003 & q->frame.lspv[4]) << 14 |
(0x003F & q->frame.lspv[3]) << 8 |
(0x0060 & q->frame.lspv[2]) << 1 |
(0x0007 & q->frame.lspv[1]) << 3 |
(0x0038 & q->frame.lspv[0]) >> 3;
rnd = q->rnd_fir_filter_mem + 20;
for (i = 0; i < 8; i++) {
tmp_gain = gain[i] * (QCELP_SQRT1887 / 32768.0);
for (k = 0; k < 20; k++) {
cbseed = 521 * cbseed + 259;
*rnd = (int16_t) cbseed;
// FIR filter
fir_filter_value = 0.0;
for(j=0; j<10; j++)
fir_filter_value += qcelp_rnd_fir_coefs[j ]
* (rnd[-j ] + rnd[-20+j]);
fir_filter_value = 0.0;
for (j = 0; j < 10; j++)
fir_filter_value += qcelp_rnd_fir_coefs[j] *
(rnd[-j] + rnd[-20+j]);
fir_filter_value += qcelp_rnd_fir_coefs[10] * rnd[-10];
*cdn_vector++ = tmp_gain * fir_filter_value;
rnd++;
}
fir_filter_value += qcelp_rnd_fir_coefs[10] * rnd[-10];
*cdn_vector++ = tmp_gain * fir_filter_value;
rnd++;
}
memcpy(q->rnd_fir_filter_mem, q->rnd_fir_filter_mem + 160, 20 * sizeof(float));
}
memcpy(q->rnd_fir_filter_mem, q->rnd_fir_filter_mem + 160,
20 * sizeof(float));
break;
case RATE_OCTAVE:
cbseed = q->first16bits;
for (i = 0; i < 8; i++) {
tmp_gain = gain[i] * (QCELP_SQRT1887 / 32768.0);
for (j = 0; j < 20; j++) {
cbseed = 521 * cbseed + 259;
*cdn_vector++ = tmp_gain * (int16_t)cbseed;
}
case RATE_OCTAVE:
cbseed = q->first16bits;
for (i = 0; i < 8; i++) {
tmp_gain = gain[i] * (QCELP_SQRT1887 / 32768.0);
for (j = 0; j < 20; j++) {
cbseed = 521 * cbseed + 259;
*cdn_vector++ = tmp_gain * (int16_t) cbseed;
}
}
break;
case I_F_Q:
cbseed = -44; // random codebook index
for (i = 0; i < 4; i++) {
tmp_gain = gain[i] * QCELP_RATE_FULL_CODEBOOK_RATIO;
for(j=0; j<40; j++)
*cdn_vector++ = tmp_gain * qcelp_rate_full_codebook[cbseed++ & 127];
}
case I_F_Q:
cbseed = -44; // random codebook index
for (i = 0; i < 4; i++) {
tmp_gain = gain[i] * QCELP_RATE_FULL_CODEBOOK_RATIO;
for (j = 0; j < 40; j++)
*cdn_vector++ = tmp_gain * qcelp_rate_full_codebook[cbseed++ & 127];
}
break;
case SILENCE:
memset(cdn_vector, 0, 160 * sizeof(float));
case SILENCE:
memset(cdn_vector, 0, 160 * sizeof(float));
break;
}
}
@ -396,8 +395,7 @@ static void compute_svector(QCELPContext *q, const float *gain,
*
* TIA/EIA/IS-733 2.4.8.3, 2.4.8.6
*/
static void apply_gain_ctrl(float *v_out, const float *v_ref,
const float *v_in)
static void apply_gain_ctrl(float *v_out, const float *v_ref, const float *v_in)
{
int i;
@ -429,8 +427,8 @@ static const float *do_pitchfilter(float memory[303], const float v_in[160],
const float gain[4], const uint8_t *lag,
const uint8_t pfrac[4])
{
int i, j;
float *v_lag, *v_out;
int i, j;
float *v_lag, *v_out;
const float *v_len;
v_out = memory + 143; // Output vector starts at memory[143].
@ -440,9 +438,9 @@ static const float *do_pitchfilter(float memory[303], const float v_in[160],
v_lag = memory + 143 + 40 * i - lag[i];
for (v_len = v_in + 40; v_in < v_len; v_in++) {
if (pfrac[i]) { // If it is a fractional lag...
for(j=0, *v_out=0.; j<4; j++)
*v_out += qcelp_hammsinc_table[j] * (v_lag[j-4] + v_lag[3-j]);
}else
for (j = 0, *v_out = 0.; j < 4; j++)
*v_out += qcelp_hammsinc_table[j] * (v_lag[j - 4] + v_lag[3 - j]);
} else
*v_out = *v_lag;
*v_out = *v_in + gain[i] * *v_out;
@ -470,15 +468,13 @@ static const float *do_pitchfilter(float memory[303], const float v_in[160],
*/
static void apply_pitch_filters(QCELPContext *q, float *cdn_vector)
{
int i;
int i;
const float *v_synthesis_filtered, *v_pre_filtered;
if(q->bitrate >= RATE_HALF ||
q->bitrate == SILENCE ||
(q->bitrate == I_F_Q && (q->prev_bitrate >= RATE_HALF))) {
if(q->bitrate >= RATE_HALF) {
if (q->bitrate >= RATE_HALF || q->bitrate == SILENCE ||
(q->bitrate == I_F_Q && (q->prev_bitrate >= RATE_HALF))) {
if (q->bitrate >= RATE_HALF) {
// Compute gain & lag for the whole frame.
for (i = 0; i < 4; i++) {
q->pitch_gain[i] = q->frame.plag[i] ? (q->frame.pgain[i] + 1) * 0.25 : 0.0;
@ -497,7 +493,7 @@ static void apply_pitch_filters(QCELPContext *q, float *cdn_vector)
assert(q->bitrate == SILENCE);
max_pitch_gain = 1.0;
}
for(i=0; i<4; i++)
for (i = 0; i < 4; i++)
q->pitch_gain[i] = FFMIN(q->pitch_gain[i], max_pitch_gain);
memset(q->frame.pfrac, 0, sizeof(q->frame.pfrac));
@ -509,18 +505,17 @@ static void apply_pitch_filters(QCELPContext *q, float *cdn_vector)
q->pitch_lag, q->frame.pfrac);
// pitch prefilter update
for(i=0; i<4; i++)
for (i = 0; i < 4; i++)
q->pitch_gain[i] = 0.5 * FFMIN(q->pitch_gain[i], 1.0);
v_pre_filtered = do_pitchfilter(q->pitch_pre_filter_mem,
v_synthesis_filtered,
q->pitch_gain, q->pitch_lag,
q->frame.pfrac);
v_pre_filtered = do_pitchfilter(q->pitch_pre_filter_mem,
v_synthesis_filtered,
q->pitch_gain, q->pitch_lag,
q->frame.pfrac);
apply_gain_ctrl(cdn_vector, v_synthesis_filtered, v_pre_filtered);
} else {
memcpy(q->pitch_synthesis_filter_mem, cdn_vector + 17,
143 * sizeof(float));
memcpy(q->pitch_synthesis_filter_mem, cdn_vector + 17, 143 * sizeof(float));
memcpy(q->pitch_pre_filter_mem, cdn_vector + 17, 143 * sizeof(float));
memset(q->pitch_gain, 0, sizeof(q->pitch_gain));
memset(q->pitch_lag, 0, sizeof(q->pitch_lag));
@ -543,15 +538,15 @@ static void lspf2lpc(const float *lspf, float *lpc)
{
double lsp[10];
double bandwidth_expansion_coeff = QCELP_BANDWIDTH_EXPANSION_COEFF;
int i;
int i;
for (i=0; i<10; i++)
for (i = 0; i < 10; i++)
lsp[i] = cos(M_PI * lspf[i]);
ff_acelp_lspd2lpc(lsp, lpc, 5);
for (i = 0; i < 10; i++) {
lpc[i] *= bandwidth_expansion_coeff;
lpc[i] *= bandwidth_expansion_coeff;
bandwidth_expansion_coeff *= QCELP_BANDWIDTH_EXPANSION_COEFF;
}
}
@ -573,9 +568,9 @@ static void interpolate_lpc(QCELPContext *q, const float *curr_lspf,
float interpolated_lspf[10];
float weight;
if(q->bitrate >= RATE_QUARTER)
if (q->bitrate >= RATE_QUARTER)
weight = 0.25 * (subframe_num + 1);
else if(q->bitrate == RATE_OCTAVE && !subframe_num)
else if (q->bitrate == RATE_OCTAVE && !subframe_num)
weight = 0.625;
else
weight = 1.0;
@ -584,21 +579,21 @@ static void interpolate_lpc(QCELPContext *q, const float *curr_lspf,
ff_weighted_vector_sumf(interpolated_lspf, curr_lspf, q->prev_lspf,
weight, 1.0 - weight, 10);
lspf2lpc(interpolated_lspf, lpc);
}else if(q->bitrate >= RATE_QUARTER ||
(q->bitrate == I_F_Q && !subframe_num))
} else if (q->bitrate >= RATE_QUARTER ||
(q->bitrate == I_F_Q && !subframe_num))
lspf2lpc(curr_lspf, lpc);
else if(q->bitrate == SILENCE && !subframe_num)
else if (q->bitrate == SILENCE && !subframe_num)
lspf2lpc(q->prev_lspf, lpc);
}
static qcelp_packet_rate buf_size2bitrate(const int buf_size)
{
switch (buf_size) {
case 35: return RATE_FULL;
case 17: return RATE_HALF;
case 8: return RATE_QUARTER;
case 4: return RATE_OCTAVE;
case 1: return SILENCE;
case 35: return RATE_FULL;
case 17: return RATE_HALF;
case 8: return RATE_QUARTER;
case 4: return RATE_OCTAVE;
case 1: return SILENCE;
}
return I_F_Q;
@ -616,8 +611,9 @@ static qcelp_packet_rate buf_size2bitrate(const int buf_size)
*
* TIA/EIA/IS-733 2.4.8.7.1
*/
static qcelp_packet_rate determine_bitrate(AVCodecContext *avctx, const int buf_size,
const uint8_t **buf)
static qcelp_packet_rate determine_bitrate(AVCodecContext *avctx,
const int buf_size,
const uint8_t **buf)
{
qcelp_packet_rate bitrate;
@ -639,7 +635,7 @@ static qcelp_packet_rate determine_bitrate(AVCodecContext *avctx, const int buf_
} else if ((bitrate = buf_size2bitrate(buf_size + 1)) >= 0) {
av_log(avctx, AV_LOG_WARNING,
"Bitrate byte is missing, guessing the bitrate from packet size.\n");
}else
} else
return I_F_Q;
if (bitrate == SILENCE) {
@ -652,8 +648,8 @@ static qcelp_packet_rate determine_bitrate(AVCodecContext *avctx, const int buf_
static void warn_insufficient_frame_quality(AVCodecContext *avctx,
const char *message)
{
av_log(avctx, AV_LOG_WARNING, "Frame #%d, IFQ: %s\n", avctx->frame_number,
message);
av_log(avctx, AV_LOG_WARNING, "Frame #%d, IFQ: %s\n",
avctx->frame_number, message);
}
static void postfilter(QCELPContext *q, float *samples, float *lpc)
@ -675,23 +671,24 @@ static void postfilter(QCELPContext *q, float *samples, float *lpc)
ff_celp_lp_zero_synthesis_filterf(zero_out, lpc_s,
q->formant_mem + 10, 160, 10);
memcpy(pole_out, q->postfilter_synth_mem, sizeof(float) * 10);
memcpy(pole_out, q->postfilter_synth_mem, sizeof(float) * 10);
ff_celp_lp_synthesis_filterf(pole_out + 10, lpc_p, zero_out, 160, 10);
memcpy(q->postfilter_synth_mem, pole_out + 160, sizeof(float) * 10);
ff_tilt_compensation(&q->postfilter_tilt_mem, 0.3, pole_out + 10, 160);
ff_adaptive_gain_control(samples, pole_out + 10,
ff_dot_productf(q->formant_mem + 10, q->formant_mem + 10, 160),
160, 0.9375, &q->postfilter_agc_mem);
ff_dot_productf(q->formant_mem + 10,
q->formant_mem + 10, 160),
160, 0.9375, &q->postfilter_agc_mem);
}
static int qcelp_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
QCELPContext *q = avctx->priv_data;
int buf_size = avpkt->size;
QCELPContext *q = avctx->priv_data;
float *outbuffer;
int i, ret;
float quantized_lspf[10], lpc[10];
@ -711,23 +708,23 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data,
goto erasure;
}
if(q->bitrate == RATE_OCTAVE &&
(q->first16bits = AV_RB16(buf)) == 0xFFFF) {
if (q->bitrate == RATE_OCTAVE &&
(q->first16bits = AV_RB16(buf)) == 0xFFFF) {
warn_insufficient_frame_quality(avctx, "Bitrate is 1/8 and first 16 bits are on.");
goto erasure;
}
if (q->bitrate > SILENCE) {
const QCELPBitmap *bitmaps = qcelp_unpacking_bitmaps_per_rate[q->bitrate];
const QCELPBitmap *bitmaps_end = qcelp_unpacking_bitmaps_per_rate[q->bitrate]
+ qcelp_unpacking_bitmaps_lengths[q->bitrate];
uint8_t *unpacked_data = (uint8_t *)&q->frame;
const QCELPBitmap *bitmaps_end = qcelp_unpacking_bitmaps_per_rate[q->bitrate] +
qcelp_unpacking_bitmaps_lengths[q->bitrate];
uint8_t *unpacked_data = (uint8_t *)&q->frame;
init_get_bits(&q->gb, buf, 8*buf_size);
init_get_bits(&q->gb, buf, 8 * buf_size);
memset(&q->frame, 0, sizeof(QCELPFrame));
for(; bitmaps < bitmaps_end; bitmaps++)
for (; bitmaps < bitmaps_end; bitmaps++)
unpacked_data[bitmaps->index] |= get_bits(&q->gb, bitmaps->bitlen) << bitmaps->bitpos;
// Check for erasures/blanks on rates 1, 1/4 and 1/8.
@ -735,8 +732,8 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data,
warn_insufficient_frame_quality(avctx, "Wrong data in reserved frame area.");
goto erasure;
}
if(q->bitrate == RATE_QUARTER &&
codebook_sanity_check_for_rate_quarter(q->frame.cbgain)) {
if (q->bitrate == RATE_QUARTER &&
codebook_sanity_check_for_rate_quarter(q->frame.cbgain)) {
warn_insufficient_frame_quality(avctx, "Codebook gain sanity check failed.");
goto erasure;
}
@ -759,7 +756,6 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data,
goto erasure;
}
apply_pitch_filters(q, outbuffer);
if (q->bitrate == I_F_Q) {
@ -770,14 +766,13 @@ erasure:
compute_svector(q, gain, outbuffer);
decode_lspf(q, quantized_lspf);
apply_pitch_filters(q, outbuffer);
}else
} else
q->erasure_count = 0;
formant_mem = q->formant_mem + 10;
for (i = 0; i < 4; i++) {
interpolate_lpc(q, quantized_lspf, lpc, i);
ff_celp_lp_synthesis_filterf(formant_mem, lpc, outbuffer + i * 40, 40,
10);
ff_celp_lp_synthesis_filterf(formant_mem, lpc, outbuffer + i * 40, 40, 10);
formant_mem += 40;
}
@ -787,7 +782,7 @@ erasure:
memcpy(q->formant_mem, q->formant_mem + 160, 10 * sizeof(float));
memcpy(q->prev_lspf, quantized_lspf, sizeof(q->prev_lspf));
q->prev_bitrate = q->bitrate;
q->prev_bitrate = q->bitrate;
*got_frame_ptr = 1;
*(AVFrame *)data = q->avframe;
@ -795,14 +790,13 @@ erasure:
return buf_size;
}
AVCodec ff_qcelp_decoder =
{
.name = "qcelp",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_QCELP,
.init = qcelp_decode_init,
.decode = qcelp_decode_frame,
.capabilities = CODEC_CAP_DR1,
AVCodec ff_qcelp_decoder = {
.name = "qcelp",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_QCELP,
.init = qcelp_decode_init,
.decode = qcelp_decode_frame,
.capabilities = CODEC_CAP_DR1,
.priv_data_size = sizeof(QCELPContext),
.long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"),
.long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"),
};

View File

@ -508,7 +508,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
}
if (v->field_mode) { // interlaced field picture
if (!dir) {
if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
srcY = s->current_picture.f.data[0];
srcU = s->current_picture.f.data[1];
srcV = s->current_picture.f.data[2];
@ -631,7 +631,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
srcY += s->mspel * (1 + s->linesize);
}
if (v->field_mode && v->cur_field_type) {
if (v->field_mode && v->second_field) {
off = s->current_picture_ptr->f.linesize[0];
off_uv = s->current_picture_ptr->f.linesize[1];
} else {
@ -697,7 +697,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
if (!dir) {
if (v->field_mode) {
if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field)
srcY = s->current_picture.f.data[0];
else
srcY = s->last_picture.f.data[0];
@ -766,7 +766,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
else
off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
if (v->field_mode && v->cur_field_type)
if (v->field_mode && v->second_field)
off += s->current_picture_ptr->f.linesize[0];
src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
@ -994,7 +994,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
srcU += s->current_picture_ptr->f.linesize[1];
srcV += s->current_picture_ptr->f.linesize[2];
}
off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
off = v->second_field ? s->current_picture_ptr->f.linesize[1] : 0;
}
if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
@ -2048,7 +2048,7 @@ static void vc1_interp_mc(VC1Context *v)
srcY += s->mspel * (1 + s->linesize);
}
if (v->field_mode && v->cur_field_type) {
if (v->field_mode && v->second_field) {
off = s->current_picture_ptr->f.linesize[0];
off_uv = s->current_picture_ptr->f.linesize[1];
} else {
@ -4055,7 +4055,7 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
continue;
v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
off += v->second_field ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
// TODO: loop filter
}
@ -4102,7 +4102,7 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
dst_idx += i >> 2;
val = ((cbp >> (5 - i)) & 1);
off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
if (v->cur_field_type)
if (v->second_field)
off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
if (val) {
pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
@ -4332,7 +4332,7 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
for (j = 0; j < 64; j++)
s->block[i][j] <<= 1;
off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
off += v->second_field ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
// TODO: yet to perform loop filter
}
@ -4414,7 +4414,7 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
dst_idx += i >> 2;
val = ((cbp >> (5 - i)) & 1);
off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
if (v->cur_field_type)
if (v->second_field)
off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
if (val) {
vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
@ -5425,8 +5425,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
MpegEncContext *s = &v->s;
AVFrame *pict = data;
uint8_t *buf2 = NULL;
uint8_t *buf_field2 = NULL;
const uint8_t *buf_start = buf;
uint8_t *tmp;
int mb_height, n_slices1=-1;
struct {
uint8_t *buf;
@ -5495,9 +5495,6 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
slices[n_slices].mby_start = s->mb_height >> 1;
n_slices1 = n_slices - 1; // index of the last slice of the first field
n_slices++;
// not necessary, ad hoc until I find a way to handle WVC1i
buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
vc1_unescape_buffer(start + 4, size, buf_field2);
break;
}
case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
@ -5525,14 +5522,26 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
}
} else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
const uint8_t *divider;
int buf_size3;
divider = find_next_marker(buf, buf + buf_size);
if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
goto err;
} else { // found field marker, unescape second field
buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, buf_field2);
tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
if (!tmp)
goto err;
slices = tmp;
slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!slices[n_slices].buf)
goto err;
buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
buf_size3 << 3);
slices[n_slices].mby_start = s->mb_height >> 1;
n_slices1 = n_slices - 1;
n_slices++;
}
buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
} else {
@ -5705,10 +5714,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
s->gb = slices[i].gb;
}
if (v->field_mode) {
av_free(buf_field2);
v->second_field = 0;
}
if (v->field_mode) {
if (s->pict_type == AV_PICTURE_TYPE_B) {
memcpy(v->mv_f_base, v->mv_f_next_base,
2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
@ -5765,7 +5771,6 @@ err:
for (i = 0; i < n_slices; i++)
av_free(slices[i].buf);
av_free(slices);
av_free(buf_field2);
return -1;
}

View File

@ -35,6 +35,7 @@ YASM-OBJS-$(CONFIG_DIRAC_DECODER) += x86/diracdsp_mmx.o x86/diracdsp_yasm.o
MMX-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp_mmx.o
YASM-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp.o
MMX-OBJS-$(CONFIG_CAVS_DECODER) += x86/cavsdsp_mmx.o
MMX-OBJS-$(CONFIG_DNXHD_ENCODER) += x86/dnxhd_mmx.o
MMX-OBJS-$(CONFIG_MPEGAUDIODSP) += x86/mpegaudiodec_mmx.o
YASM-OBJS-$(CONFIG_MPEGAUDIODSP) += x86/imdct36_sse.o
MMX-OBJS-$(CONFIG_PNG_DECODER) += x86/png_mmx.o
@ -67,8 +68,7 @@ MMX-OBJS-$(HAVE_YASM) += x86/dsputil_yasm.o \
MMX-OBJS-$(CONFIG_FFT) += x86/fft.o
OBJS-$(HAVE_MMX) += x86/dnxhd_mmx.o \
x86/dsputil_mmx.o \
OBJS-$(HAVE_MMX) += x86/dsputil_mmx.o \
x86/fdct_mmx.o \
x86/fmtconvert_mmx.o \
x86/idct_mmx_xvid.o \

View File

@ -68,6 +68,12 @@ static int decode_frame(AVCodecContext *avctx,
V = a->pic.data[2];
stride = avctx->width - 4;
if (buf_size < avctx->width * avctx->height) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
return AVERROR_INVALIDDATA;
}
for (i = 0; i < avctx->height; i++) {
/* lines are stored in reversed order */
buf += stride;

View File

@ -285,7 +285,7 @@ OBJS-$(CONFIG_SAP_MUXER) += sapenc.o rtpenc_chain.o
OBJS-$(CONFIG_SBG_DEMUXER) += sbgdec.o
OBJS-$(CONFIG_SDP_DEMUXER) += rtsp.o
OBJS-$(CONFIG_SEGAFILM_DEMUXER) += segafilm.o
OBJS-$(CONFIG_SEGMENT_MUXER) += segment.o
OBJS-$(CONFIG_SEGMENT_MUXER) += segment.o
OBJS-$(CONFIG_SHORTEN_DEMUXER) += rawdec.o
OBJS-$(CONFIG_SIFF_DEMUXER) += siff.o
OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o

View File

@ -206,7 +206,7 @@ typedef struct MpegTSWriteStream {
struct MpegTSService *service;
int pid; /* stream associated pid */
int cc;
int payload_index;
int payload_size;
int first_pts_check; ///< first pts check needed
int64_t payload_pts;
int64_t payload_dts;
@ -1034,29 +1034,29 @@ static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt)
}
}
if (ts_st->payload_index && ts_st->payload_index + size > DEFAULT_PES_PAYLOAD_SIZE) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index,
if (ts_st->payload_size && ts_st->payload_size + size > DEFAULT_PES_PAYLOAD_SIZE) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_size,
ts_st->payload_pts, ts_st->payload_dts,
ts_st->payload_flags & AV_PKT_FLAG_KEY);
ts_st->payload_index = 0;
ts_st->payload_size = 0;
}
if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO || size > DEFAULT_PES_PAYLOAD_SIZE) {
av_assert0(!ts_st->payload_index);
av_assert0(!ts_st->payload_size);
// for video and subtitle, write a single pes packet
mpegts_write_pes(s, st, buf, size, pts, dts, pkt->flags & AV_PKT_FLAG_KEY);
av_free(data);
return 0;
}
if (!ts_st->payload_index) {
if (!ts_st->payload_size) {
ts_st->payload_pts = pts;
ts_st->payload_dts = dts;
ts_st->payload_flags = pkt->flags;
}
memcpy(ts_st->payload + ts_st->payload_index, buf, size);
ts_st->payload_index += size;
memcpy(ts_st->payload + ts_st->payload_size, buf, size);
ts_st->payload_size += size;
av_free(data);
@ -1075,8 +1075,8 @@ static int mpegts_write_end(AVFormatContext *s)
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
ts_st = st->priv_data;
if (ts_st->payload_index > 0) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index,
if (ts_st->payload_size > 0) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_size,
ts_st->payload_pts, ts_st->payload_dts,
ts_st->payload_flags & AV_PKT_FLAG_KEY);
}

View File

@ -1,5 +1,5 @@
/*
* Generic Segmenter
* Generic segmenter
* Copyright (c) 2011, Luca Barbato
*
* This file is part of Libav.
@ -19,43 +19,45 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avstring.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/mathematics.h"
#include "libavutil/parseutils.h"
#include "avformat.h"
#include "internal.h"
#include <strings.h>
#include <float.h>
#include "avformat.h"
#include "internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/avstring.h"
#include "libavutil/parseutils.h"
#include "libavutil/mathematics.h"
typedef struct {
const AVClass *class; /**< Class for private options. */
int number;
AVFormatContext *avf;
char *format; /**< Set by a private option. */
char *pattern; /**< Set by a private option. */
char *path; /**< Set by a private option. */
char *list; /**< Set by a private option. */
float time; /**< Set by a private option. */
int size; /**< Set by a private option. */
int64_t offset_time;
int64_t recording_time;
int has_video;
AVIOContext *pb;
} SegmentContext;
#if CONFIG_SEGMENT_MUXER
static int segment_header(SegmentContext *s)
static int segment_start(AVFormatContext *s)
{
AVFormatContext *oc = s->avf;
SegmentContext *c = s->priv_data;
AVFormatContext *oc = c->avf;
int err = 0;
av_strlcpy(oc->filename, s->path, sizeof(oc->filename));
if (av_get_frame_filename(oc->filename, sizeof(oc->filename),
s->filename, c->number++) < 0)
return AVERROR(EINVAL);
av_strlcatf(oc->filename, sizeof(oc->filename),
s->pattern, s->number++);
if ((err = avio_open(&oc->pb, oc->filename, AVIO_FLAG_WRITE)) < 0) {
if ((err = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL)) < 0)
return err;
}
if (!oc->priv_data && oc->oformat->priv_data_size > 0) {
oc->priv_data = av_mallocz(oc->oformat->priv_data_size);
@ -63,24 +65,35 @@ static int segment_header(SegmentContext *s)
avio_close(oc->pb);
return AVERROR(ENOMEM);
}
if (oc->oformat->priv_class) {
*(const AVClass**)oc->priv_data = oc->oformat->priv_class;
av_opt_set_defaults(oc->priv_data);
}
}
if ((err = oc->oformat->write_header(oc)) < 0) {
avio_close(oc->pb);
av_freep(&oc->priv_data);
goto fail;
}
return 0;
fail:
avio_close(oc->pb);
av_freep(&oc->priv_data);
return err;
}
static int segment_trailer(AVFormatContext *oc)
static int segment_end(AVFormatContext *oc)
{
int ret = 0;
if(oc->oformat->write_trailer)
if (oc->oformat->write_trailer)
ret = oc->oformat->write_trailer(oc);
avio_close(oc->pb);
if (oc->oformat->priv_class)
av_opt_free(oc->priv_data);
av_freep(&oc->priv_data);
return ret;
@ -90,30 +103,44 @@ static int seg_write_header(AVFormatContext *s)
{
SegmentContext *seg = s->priv_data;
AVFormatContext *oc;
int ret;
int ret, i;
seg->number = 0;
seg->recording_time = seg->time*1000000;
seg->offset_time = 0;
seg->recording_time = seg->time * 1000000;
if (!seg->path) {
char *t;
seg->path = av_strdup(s->filename);
t = strrchr(seg->path, '.');
if (t) *t = '\0';
}
if (seg->list)
if ((ret = avio_open2(&seg->pb, seg->list, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL)) < 0)
return ret;
for (i = 0; i< s->nb_streams; i++)
seg->has_video +=
(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO);
if (seg->has_video > 1)
av_log(s, AV_LOG_WARNING,
"More than a single video stream present, "
"expect issues decoding it.\n");
oc = avformat_alloc_context();
if (!oc) {
return AVERROR(ENOMEM);
ret = AVERROR(ENOMEM);
goto fail;
}
oc->oformat = av_guess_format(seg->format, NULL, NULL);
oc->oformat = av_guess_format(seg->format, s->filename, NULL);
if (!oc->oformat) {
avformat_free_context(oc);
return AVERROR_MUXER_NOT_FOUND;
ret = AVERROR_MUXER_NOT_FOUND;
goto fail;
}
if (oc->oformat->flags & AVFMT_NOFILE) {
av_log(s, AV_LOG_ERROR, "format %s not supported.\n",
oc->oformat->name);
ret = AVERROR(EINVAL);
goto fail;
}
seg->avf = oc;
@ -121,26 +148,34 @@ static int seg_write_header(AVFormatContext *s)
oc->streams = s->streams;
oc->nb_streams = s->nb_streams;
av_strlcpy(oc->filename, seg->path, sizeof(oc->filename));
av_strlcatf(oc->filename, sizeof(oc->filename),
seg->pattern, seg->number++);
if ((ret = avio_open(&oc->pb, oc->filename, AVIO_FLAG_WRITE)) < 0) {
avformat_free_context(oc);
return ret;
if (av_get_frame_filename(oc->filename, sizeof(oc->filename),
s->filename, seg->number++) < 0) {
ret = AVERROR(EINVAL);
goto fail;
}
if ((ret = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL)) < 0)
goto fail;
if ((ret = avformat_write_header(oc, NULL)) < 0) {
avio_close(oc->pb);
goto fail;
}
if (ret)
if (seg->list) {
avio_printf(seg->pb, "%s\n", oc->filename);
avio_flush(seg->pb);
}
fail:
if (ret) {
oc->streams = NULL;
oc->nb_streams = 0;
if (seg->list)
avio_close(seg->pb);
avformat_free_context(oc);
avio_printf(s->pb, "%s\n", oc->filename);
avio_flush(s->pb);
}
return ret;
}
@ -149,30 +184,49 @@ static int seg_write_packet(AVFormatContext *s, AVPacket *pkt)
SegmentContext *seg = s->priv_data;
AVFormatContext *oc = seg->avf;
AVStream *st = oc->streams[pkt->stream_index];
int64_t end_pts = seg->recording_time * seg->number;
int ret;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
if ((seg->has_video && st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
av_compare_ts(pkt->pts, st->time_base,
seg->recording_time*seg->number,
(AVRational){1, 1000000}) >= 0 &&
end_pts, AV_TIME_BASE_Q) >= 0 &&
pkt->flags & AV_PKT_FLAG_KEY) {
av_log(s, AV_LOG_INFO, "I'd reset at %d %"PRId64"\n",
av_log(s, AV_LOG_DEBUG, "Next segment starts at %d %"PRId64"\n",
pkt->stream_index, pkt->pts);
ret = segment_trailer(oc);
if (!ret)
ret = segment_header(seg);
ret = segment_end(oc);
if (ret) {
avformat_free_context(oc);
return ret;
if (!ret)
ret = segment_start(s);
if (ret)
goto fail;
if (seg->list) {
avio_printf(seg->pb, "%s\n", oc->filename);
avio_flush(seg->pb);
if (!(seg->number % seg->size)) {
avio_close(seg->pb);
if ((ret = avio_open2(&seg->pb, seg->list, AVIO_FLAG_WRITE,
&s->interrupt_callback, NULL)) < 0)
goto fail;
}
}
avio_printf(s->pb, "%s\n", oc->filename);
avio_flush(s->pb);
}
ret = oc->oformat->write_packet(oc, pkt);
fail:
if (ret < 0) {
oc->streams = NULL;
oc->nb_streams = 0;
if (seg->list)
avio_close(seg->pb);
avformat_free_context(oc);
}
return ret;
}
@ -180,19 +234,22 @@ static int seg_write_trailer(struct AVFormatContext *s)
{
SegmentContext *seg = s->priv_data;
AVFormatContext *oc = seg->avf;
return segment_trailer(oc);
int ret = segment_end(oc);
if (seg->list)
avio_close(seg->pb);
oc->streams = NULL;
oc->nb_streams = 0;
avformat_free_context(oc);
return ret;
}
#endif /* CONFIG_SEGMENT_MUXER */
#define OFFSET(x) offsetof(SegmentContext, x)
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "container_format", "container format used for the segments", OFFSET(format), AV_OPT_TYPE_STRING, {.str = "nut"}, 0, 0, E },
{ "segment_time", "segment lenght in seconds", OFFSET(time), AV_OPT_TYPE_FLOAT, {.dbl = 2}, 0, FLT_MAX, E },
{ "segment_pattern", "pattern to use in segment files", OFFSET(pattern),AV_OPT_TYPE_STRING, {.str = "%03d"}, 0, 0, E },
{ "segment_basename", "basename to use in segment files", OFFSET(path ),AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "segment_format", "container format used for the segments", OFFSET(format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "segment_time", "segment length in seconds", OFFSET(time), AV_OPT_TYPE_FLOAT, {.dbl = 2}, 0, FLT_MAX, E },
{ "segment_list", "output the segment list", OFFSET(list), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "segment_list_size", "maximum number of playlist entries", OFFSET(size), AV_OPT_TYPE_INT, {.dbl = 5}, 0, INT_MAX, E },
{ NULL },
};
@ -203,32 +260,14 @@ static const AVClass seg_class = {
.version = LIBAVUTIL_VERSION_INT,
};
/* input
#if CONFIG_IMAGE2_DEMUXER
AVInputFormat ff_image2_demuxer = {
.name = "image2",
.long_name = NULL_IF_CONFIG_SMALL("image2 sequence"),
.priv_data_size = sizeof(VideoData),
.read_probe = read_probe,
.read_header = read_header,
.read_packet = read_packet,
.flags = AVFMT_NOFILE,
.priv_class = &img2_class,
};
#endif
*/
/* output */
#if CONFIG_SEGMENT_MUXER
AVOutputFormat ff_segment_muxer = {
.name = "segment",
.long_name = NULL_IF_CONFIG_SMALL("segment muxer"),
.extensions = "m3u8",
.priv_data_size = sizeof(SegmentContext),
.flags = AVFMT_GLOBALHEADER,
.flags = AVFMT_GLOBALHEADER | AVFMT_NOFILE,
.write_header = seg_write_header,
.write_packet = seg_write_packet,
.write_trailer = seg_write_trailer,
.priv_class = &seg_class,
};
#endif

View File

@ -27,6 +27,7 @@ int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
switch (errnum) {
case AVERROR_BSF_NOT_FOUND: errstr = "Bitstream filter not found" ; break;
case AVERROR_BUG2:
case AVERROR_BUG: errstr = "Internal bug, should not have happened" ; break;
case AVERROR_DECODER_NOT_FOUND: errstr = "Decoder not found" ; break;
case AVERROR_DEMUXER_NOT_FOUND: errstr = "Demuxer not found" ; break;

View File

@ -45,7 +45,7 @@
#endif
#define AVERROR_BSF_NOT_FOUND (-MKTAG(0xF8,'B','S','F')) ///< Bitstream filter not found
#define AVERROR_BUG (-MKTAG( 'B','U','G','!')) ///< Internal bug
#define AVERROR_BUG (-MKTAG( 'B','U','G','!')) ///< Internal bug, also see AVERROR_BUG2
#define AVERROR_DECODER_NOT_FOUND (-MKTAG(0xF8,'D','E','C')) ///< Decoder not found
#define AVERROR_DEMUXER_NOT_FOUND (-MKTAG(0xF8,'D','E','M')) ///< Demuxer not found
#define AVERROR_ENCODER_NOT_FOUND (-MKTAG(0xF8,'E','N','C')) ///< Encoder not found
@ -59,6 +59,12 @@
#define AVERROR_PROTOCOL_NOT_FOUND (-MKTAG(0xF8,'P','R','O')) ///< Protocol not found
#define AVERROR_STREAM_NOT_FOUND (-MKTAG(0xF8,'S','T','R')) ///< Stream not found
/**
* This is semantically identical to AVERROR_BUG
* it has been introduced in Libav after our AVERROR_BUG and with a modified value.
*/
#define AVERROR_BUG2 (-MKTAG( 'B','U','G',' '))
/**
* Put a description of the AVERROR code errnum in errbuf.
* In case of failure the global variable errno is set to indicate the

View File

@ -35,21 +35,22 @@
#include "libavutil/pixdesc.h"
#define RGB2YUV_SHIFT 15
#define BY ( (int)(0.114*219/255*(1<<RGB2YUV_SHIFT)+0.5))
#define BV (-(int)(0.081*224/255*(1<<RGB2YUV_SHIFT)+0.5))
#define BU ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5))
#define GY ( (int)(0.587*219/255*(1<<RGB2YUV_SHIFT)+0.5))
#define GV (-(int)(0.419*224/255*(1<<RGB2YUV_SHIFT)+0.5))
#define GU (-(int)(0.331*224/255*(1<<RGB2YUV_SHIFT)+0.5))
#define RY ( (int)(0.299*219/255*(1<<RGB2YUV_SHIFT)+0.5))
#define RV ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5))
#define RU (-(int)(0.169*224/255*(1<<RGB2YUV_SHIFT)+0.5))
#define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
static void fillPlane(uint8_t* plane, int stride, int width, int height, int y, uint8_t val)
static void fillPlane(uint8_t *plane, int stride, int width, int height, int y,
uint8_t val)
{
int i;
uint8_t *ptr = plane + stride*y;
for (i=0; i<height; i++) {
uint8_t *ptr = plane + stride * y;
for (i = 0; i < height; i++) {
memset(ptr, val, width);
ptr += stride;
}
@ -64,7 +65,7 @@ static void copyPlane(const uint8_t *src, int srcStride,
memcpy(dst, src, srcSliceH * dstStride);
} else {
int i;
for (i=0; i<srcSliceH; i++) {
for (i = 0; i < srcSliceH; i++) {
memcpy(dst, src, width);
src += srcStride;
dst += dstStride;
@ -72,70 +73,84 @@ static void copyPlane(const uint8_t *src, int srcStride,
}
}
static int planarToNv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride[])
static int planarToNv12Wrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY,
int srcSliceH, uint8_t *dstParam[],
int dstStride[])
{
uint8_t *dst = dstParam[1] + dstStride[1]*srcSliceY/2;
uint8_t *dst = dstParam[1] + dstStride[1] * srcSliceY / 2;
copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->srcW,
dstParam[0], dstStride[0]);
if (c->dstFormat == PIX_FMT_NV12)
interleaveBytes(src[1], src[2], dst, c->srcW/2, srcSliceH/2, srcStride[1], srcStride[2], dstStride[0]);
interleaveBytes(src[1], src[2], dst, c->srcW / 2, srcSliceH / 2,
srcStride[1], srcStride[2], dstStride[0]);
else
interleaveBytes(src[2], src[1], dst, c->srcW/2, srcSliceH/2, srcStride[2], srcStride[1], dstStride[0]);
interleaveBytes(src[2], src[1], dst, c->srcW / 2, srcSliceH / 2,
srcStride[2], srcStride[1], dstStride[0]);
return srcSliceH;
}
static int planarToYuy2Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride[])
static int planarToYuy2Wrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dstParam[], int dstStride[])
{
uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
srcStride[1], dstStride[0]);
return srcSliceH;
}
static int planarToUyvyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride[])
static int planarToUyvyWrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dstParam[], int dstStride[])
{
uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
srcStride[1], dstStride[0]);
return srcSliceH;
}
static int yuv422pToYuy2Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride[])
static int yuv422pToYuy2Wrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dstParam[], int dstStride[])
{
uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
yuv422ptoyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
yuv422ptoyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
srcStride[1], dstStride[0]);
return srcSliceH;
}
static int yuv422pToUyvyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride[])
static int yuv422pToUyvyWrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dstParam[], int dstStride[])
{
uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
yuv422ptouyvy(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
yuv422ptouyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
srcStride[1], dstStride[0]);
return srcSliceH;
}
static int yuyvToYuv420Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride[])
static int yuyvToYuv420Wrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dstParam[], int dstStride[])
{
uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY/2;
uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY/2;
uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY / 2;
uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY / 2;
yuyvtoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
yuyvtoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
dstStride[1], srcStride[0]);
if (dstParam[3])
fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
@ -143,26 +158,30 @@ static int yuyvToYuv420Wrapper(SwsContext *c, const uint8_t* src[], int srcStrid
return srcSliceH;
}
static int yuyvToYuv422Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride[])
static int yuyvToYuv422Wrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dstParam[], int dstStride[])
{
uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY;
uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY;
uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY;
uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY;
yuyvtoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
yuyvtoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
dstStride[1], srcStride[0]);
return srcSliceH;
}
static int uyvyToYuv420Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride[])
static int uyvyToYuv420Wrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dstParam[], int dstStride[])
{
uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY/2;
uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY/2;
uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY / 2;
uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY / 2;
uyvytoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
uyvytoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
dstStride[1], srcStride[0]);
if (dstParam[3])
fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
@ -170,55 +189,60 @@ static int uyvyToYuv420Wrapper(SwsContext *c, const uint8_t* src[], int srcStrid
return srcSliceH;
}
static int uyvyToYuv422Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride[])
static int uyvyToYuv422Wrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dstParam[], int dstStride[])
{
uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY;
uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY;
uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY;
uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY;
uyvytoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
uyvytoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
dstStride[1], srcStride[0]);
return srcSliceH;
}
static void gray8aToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette)
static void gray8aToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels,
const uint8_t *palette)
{
int i;
for (i=0; i<num_pixels; i++)
((uint32_t *) dst)[i] = ((const uint32_t *)palette)[src[i<<1]] | (src[(i<<1)+1] << 24);
for (i = 0; i < num_pixels; i++)
((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i << 1]] | (src[(i << 1) + 1] << 24);
}
static void gray8aToPacked32_1(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette)
static void gray8aToPacked32_1(const uint8_t *src, uint8_t *dst, int num_pixels,
const uint8_t *palette)
{
int i;
for (i=0; i<num_pixels; i++)
((uint32_t *) dst)[i] = ((const uint32_t *)palette)[src[i<<1]] | src[(i<<1)+1];
for (i = 0; i < num_pixels; i++)
((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i << 1]] | src[(i << 1) + 1];
}
static void gray8aToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette)
static void gray8aToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels,
const uint8_t *palette)
{
int i;
for (i=0; i<num_pixels; i++) {
for (i = 0; i < num_pixels; i++) {
//FIXME slow?
dst[0]= palette[src[i<<1]*4+0];
dst[1]= palette[src[i<<1]*4+1];
dst[2]= palette[src[i<<1]*4+2];
dst+= 3;
dst[0] = palette[src[i << 1] * 4 + 0];
dst[1] = palette[src[i << 1] * 4 + 1];
dst[2] = palette[src[i << 1] * 4 + 2];
dst += 3;
}
}
static int packed_16bpc_bswap(SwsContext *c, const uint8_t* src[],
static int packed_16bpc_bswap(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t* dst[], int dstStride[])
uint8_t *dst[], int dstStride[])
{
int i, j;
int srcstr = srcStride[0] >> 1;
int dststr = dstStride[0] >> 1;
uint16_t *dstPtr = (uint16_t *)dst[0];
const uint16_t *srcPtr = (const uint16_t *)src[0];
uint16_t *dstPtr = (uint16_t *) dst[0];
const uint16_t *srcPtr = (const uint16_t *) src[0];
for (i = 0; i < srcSliceH; i++) {
for (j = 0; j < srcstr; j++) {
@ -231,16 +255,17 @@ static int packed_16bpc_bswap(SwsContext *c, const uint8_t* src[],
return srcSliceH;
}
static int palToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[])
static int palToRgbWrapper(SwsContext *c, const uint8_t *src[], int srcStride[],
int srcSliceY, int srcSliceH, uint8_t *dst[],
int dstStride[])
{
const enum PixelFormat srcFormat= c->srcFormat;
const enum PixelFormat dstFormat= c->dstFormat;
const enum PixelFormat srcFormat = c->srcFormat;
const enum PixelFormat dstFormat = c->dstFormat;
void (*conv)(const uint8_t *src, uint8_t *dst, int num_pixels,
const uint8_t *palette)=NULL;
const uint8_t *palette) = NULL;
int i;
uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
const uint8_t *srcPtr= src[0];
uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY;
const uint8_t *srcPtr = src[0];
if (srcFormat == PIX_FMT_GRAY8A) {
switch (dstFormat) {
@ -266,10 +291,10 @@ static int palToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[],
av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
else {
for (i=0; i<srcSliceH; i++) {
for (i = 0; i < srcSliceH; i++) {
conv(srcPtr, dstPtr, c->srcW, (uint8_t *) c->pal_rgb);
srcPtr+= srcStride[0];
dstPtr+= dstStride[0];
srcPtr += srcStride[0];
dstPtr += dstStride[0];
}
}
@ -370,16 +395,17 @@ static int planarRgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStr
)
/* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */
static int rgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[])
static int rgbToRgbWrapper(SwsContext *c, const uint8_t *src[], int srcStride[],
int srcSliceY, int srcSliceH, uint8_t *dst[],
int dstStride[])
{
const enum PixelFormat srcFormat= c->srcFormat;
const enum PixelFormat dstFormat= c->dstFormat;
const int srcBpp= (c->srcFormatBpp + 7) >> 3;
const int dstBpp= (c->dstFormatBpp + 7) >> 3;
const int srcId= c->srcFormatBpp >> 2; /* 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8 */
const int dstId= c->dstFormatBpp >> 2;
void (*conv)(const uint8_t *src, uint8_t *dst, int src_size)=NULL;
const enum PixelFormat srcFormat = c->srcFormat;
const enum PixelFormat dstFormat = c->dstFormat;
const int srcBpp = (c->srcFormatBpp + 7) >> 3;
const int dstBpp = (c->dstFormatBpp + 7) >> 3;
const int srcId = c->srcFormatBpp >> 2; /* 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8 */
const int dstId = c->dstFormatBpp >> 2;
void (*conv)(const uint8_t *src, uint8_t *dst, int src_size) = NULL;
#define CONV_IS(src, dst) (srcFormat == PIX_FMT_##src && dstFormat == PIX_FMT_##dst)
@ -398,40 +424,40 @@ static int rgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[],
|| CONV_IS(RGBA, ARGB)) conv = shuffle_bytes_3012;
} else
/* BGR -> BGR */
if ( (isBGRinInt(srcFormat) && isBGRinInt(dstFormat))
|| (isRGBinInt(srcFormat) && isRGBinInt(dstFormat))) {
switch(srcId | (dstId<<4)) {
case 0x34: conv= rgb16to15; break;
case 0x36: conv= rgb24to15; break;
case 0x38: conv= rgb32to15; break;
case 0x43: conv= rgb15to16; break;
case 0x46: conv= rgb24to16; break;
case 0x48: conv= rgb32to16; break;
case 0x63: conv= rgb15to24; break;
case 0x64: conv= rgb16to24; break;
case 0x68: conv= rgb32to24; break;
case 0x83: conv= rgb15to32; break;
case 0x84: conv= rgb16to32; break;
case 0x86: conv= rgb24to32; break;
if ((isBGRinInt(srcFormat) && isBGRinInt(dstFormat)) ||
(isRGBinInt(srcFormat) && isRGBinInt(dstFormat))) {
switch (srcId | (dstId << 4)) {
case 0x34: conv = rgb16to15; break;
case 0x36: conv = rgb24to15; break;
case 0x38: conv = rgb32to15; break;
case 0x43: conv = rgb15to16; break;
case 0x46: conv = rgb24to16; break;
case 0x48: conv = rgb32to16; break;
case 0x63: conv = rgb15to24; break;
case 0x64: conv = rgb16to24; break;
case 0x68: conv = rgb32to24; break;
case 0x83: conv = rgb15to32; break;
case 0x84: conv = rgb16to32; break;
case 0x86: conv = rgb24to32; break;
}
} else if ( (isBGRinInt(srcFormat) && isRGBinInt(dstFormat))
|| (isRGBinInt(srcFormat) && isBGRinInt(dstFormat))) {
switch(srcId | (dstId<<4)) {
case 0x33: conv= rgb15tobgr15; break;
case 0x34: conv= rgb16tobgr15; break;
case 0x36: conv= rgb24tobgr15; break;
case 0x38: conv= rgb32tobgr15; break;
case 0x43: conv= rgb15tobgr16; break;
case 0x44: conv= rgb16tobgr16; break;
case 0x46: conv= rgb24tobgr16; break;
case 0x48: conv= rgb32tobgr16; break;
case 0x63: conv= rgb15tobgr24; break;
case 0x64: conv= rgb16tobgr24; break;
case 0x66: conv= rgb24tobgr24; break;
case 0x68: conv= rgb32tobgr24; break;
case 0x83: conv= rgb15tobgr32; break;
case 0x84: conv= rgb16tobgr32; break;
case 0x86: conv= rgb24tobgr32; break;
} else if ((isBGRinInt(srcFormat) && isRGBinInt(dstFormat)) ||
(isRGBinInt(srcFormat) && isBGRinInt(dstFormat))) {
switch (srcId | (dstId << 4)) {
case 0x33: conv = rgb15tobgr15; break;
case 0x34: conv = rgb16tobgr15; break;
case 0x36: conv = rgb24tobgr15; break;
case 0x38: conv = rgb32tobgr15; break;
case 0x43: conv = rgb15tobgr16; break;
case 0x44: conv = rgb16tobgr16; break;
case 0x46: conv = rgb24tobgr16; break;
case 0x48: conv = rgb32tobgr16; break;
case 0x63: conv = rgb15tobgr24; break;
case 0x64: conv = rgb16tobgr24; break;
case 0x66: conv = rgb24tobgr24; break;
case 0x68: conv = rgb32tobgr24; break;
case 0x83: conv = rgb15tobgr32; break;
case 0x84: conv = rgb16tobgr32; break;
case 0x86: conv = rgb24tobgr32; break;
}
}
@ -439,38 +465,43 @@ static int rgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[],
av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
} else {
const uint8_t *srcPtr= src[0];
uint8_t *dstPtr= dst[0];
if ((srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1) && !isRGBA32(dstFormat))
const uint8_t *srcPtr = src[0];
uint8_t *dstPtr = dst[0];
if ((srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1) &&
!isRGBA32(dstFormat))
srcPtr += ALT32_CORR;
if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) && !isRGBA32(srcFormat))
if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) &&
!isRGBA32(srcFormat))
dstPtr += ALT32_CORR;
if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0 && !(srcStride[0] % srcBpp))
conv(srcPtr, dstPtr + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]);
if (dstStride[0] * srcBpp == srcStride[0] * dstBpp && srcStride[0] > 0 &&
!(srcStride[0] % srcBpp))
conv(srcPtr, dstPtr + dstStride[0] * srcSliceY,
srcSliceH * srcStride[0]);
else {
int i;
dstPtr += dstStride[0]*srcSliceY;
dstPtr += dstStride[0] * srcSliceY;
for (i=0; i<srcSliceH; i++) {
conv(srcPtr, dstPtr, c->srcW*srcBpp);
srcPtr+= srcStride[0];
dstPtr+= dstStride[0];
for (i = 0; i < srcSliceH; i++) {
conv(srcPtr, dstPtr, c->srcW * srcBpp);
srcPtr += srcStride[0];
dstPtr += dstStride[0];
}
}
}
return srcSliceH;
}
static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[])
static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dst[], int dstStride[])
{
rgb24toyv12(
src[0],
dst[0]+ srcSliceY *dstStride[0],
dst[1]+(srcSliceY>>1)*dstStride[1],
dst[2]+(srcSliceY>>1)*dstStride[2],
dst[0] + srcSliceY * dstStride[0],
dst[1] + (srcSliceY >> 1) * dstStride[1],
dst[2] + (srcSliceY >> 1) * dstStride[2],
c->srcW, srcSliceH,
dstStride[0], dstStride[1], srcStride[0]);
if (dst[3])
@ -478,15 +509,16 @@ static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride
return srcSliceH;
}
static int yvu9ToYv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[])
static int yvu9ToYv12Wrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dst[], int dstStride[])
{
copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->srcW,
dst[0], dstStride[0]);
planar2x(src[1], dst[1] + dstStride[1]*(srcSliceY >> 1), c->chrSrcW,
planar2x(src[1], dst[1] + dstStride[1] * (srcSliceY >> 1), c->chrSrcW,
srcSliceH >> 2, srcStride[1], dstStride[1]);
planar2x(src[2], dst[2] + dstStride[2]*(srcSliceY >> 1), c->chrSrcW,
planar2x(src[2], dst[2] + dstStride[2] * (srcSliceY >> 1), c->chrSrcW,
srcSliceH >> 2, srcStride[2], dstStride[2]);
if (dst[3])
fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
@ -494,26 +526,28 @@ static int yvu9ToYv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[
}
/* unscaled copy like stuff (assumes nearly identical formats) */
static int packedCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[])
static int packedCopyWrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dst[], int dstStride[])
{
if (dstStride[0]==srcStride[0] && srcStride[0] > 0)
memcpy(dst[0] + dstStride[0]*srcSliceY, src[0], srcSliceH*dstStride[0]);
if (dstStride[0] == srcStride[0] && srcStride[0] > 0)
memcpy(dst[0] + dstStride[0] * srcSliceY, src[0], srcSliceH * dstStride[0]);
else {
int i;
const uint8_t *srcPtr= src[0];
uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
int length=0;
const uint8_t *srcPtr = src[0];
uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY;
int length = 0;
/* universal length finder */
while(length+c->srcW <= FFABS(dstStride[0])
&& length+c->srcW <= FFABS(srcStride[0])) length+= c->srcW;
assert(length!=0);
while (length + c->srcW <= FFABS(dstStride[0]) &&
length + c->srcW <= FFABS(srcStride[0]))
length += c->srcW;
assert(length != 0);
for (i=0; i<srcSliceH; i++) {
for (i = 0; i < srcSliceH; i++) {
memcpy(dstPtr, srcPtr, length);
srcPtr+= srcStride[0];
dstPtr+= dstStride[0];
srcPtr += srcStride[0];
dstPtr += dstStride[0];
}
}
return srcSliceH;
@ -540,33 +574,35 @@ static int packedCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[
src += srcStride;\
}
static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[])
static int planarCopyWrapper(SwsContext *c, const uint8_t *src[],
int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *dst[], int dstStride[])
{
int plane, i, j;
for (plane=0; plane<4; plane++) {
int length= (plane==0 || plane==3) ? c->srcW : -((-c->srcW )>>c->chrDstHSubSample);
int y= (plane==0 || plane==3) ? srcSliceY: -((-srcSliceY)>>c->chrDstVSubSample);
int height= (plane==0 || plane==3) ? srcSliceH: -((-srcSliceH)>>c->chrDstVSubSample);
const uint8_t *srcPtr= src[plane];
uint8_t *dstPtr= dst[plane] + dstStride[plane]*y;
for (plane = 0; plane < 4; plane++) {
int length = (plane == 0 || plane == 3) ? c->srcW : -((-c->srcW ) >> c->chrDstHSubSample);
int y = (plane == 0 || plane == 3) ? srcSliceY: -((-srcSliceY) >> c->chrDstVSubSample);
int height = (plane == 0 || plane == 3) ? srcSliceH: -((-srcSliceH) >> c->chrDstVSubSample);
const uint8_t *srcPtr = src[plane];
uint8_t *dstPtr = dst[plane] + dstStride[plane] * y;
int shiftonly= plane==1 || plane==2 || (!c->srcRange && plane==0);
if (!dst[plane]) continue;
if (!dst[plane])
continue;
// ignore palette for GRAY8
if (plane == 1 && !dst[2]) continue;
if (!src[plane] || (plane == 1 && !src[2])) {
if(is16BPS(c->dstFormat))
length*=2;
fillPlane(dst[plane], dstStride[plane], length, height, y, (plane==3) ? 255 : 128);
if (is16BPS(c->dstFormat))
length *= 2;
fillPlane(dst[plane], dstStride[plane], length, height, y,
(plane == 3) ? 255 : 128);
} else {
if(isNBPS(c->srcFormat) || isNBPS(c->dstFormat)
|| (is16BPS(c->srcFormat) != is16BPS(c->dstFormat))
) {
const int src_depth = av_pix_fmt_descriptors[c->srcFormat].comp[plane].depth_minus1+1;
const int dst_depth = av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1+1;
const uint16_t *srcPtr2 = (const uint16_t*)srcPtr;
const int src_depth = av_pix_fmt_descriptors[c->srcFormat].comp[plane].depth_minus1 + 1;
const int dst_depth = av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1 + 1;
const uint16_t *srcPtr2 = (const uint16_t *) srcPtr;
uint16_t *dstPtr2 = (uint16_t*)dstPtr;
if (dst_depth == 8) {
@ -640,26 +676,26 @@ static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[
}
}
}
} else if(is16BPS(c->srcFormat) && is16BPS(c->dstFormat)
&& isBE(c->srcFormat) != isBE(c->dstFormat)) {
} else if (is16BPS(c->srcFormat) && is16BPS(c->dstFormat) &&
isBE(c->srcFormat) != isBE(c->dstFormat)) {
for (i=0; i<height; i++) {
for (j=0; j<length; j++)
((uint16_t*)dstPtr)[j] = av_bswap16(((const uint16_t*)srcPtr)[j]);
srcPtr+= srcStride[plane];
dstPtr+= dstStride[plane];
for (i = 0; i < height; i++) {
for (j = 0; j < length; j++)
((uint16_t *) dstPtr)[j] = av_bswap16(((const uint16_t *) srcPtr)[j]);
srcPtr += srcStride[plane];
dstPtr += dstStride[plane];
}
} else if (dstStride[plane] == srcStride[plane] &&
srcStride[plane] > 0 && srcStride[plane] == length) {
memcpy(dst[plane] + dstStride[plane]*y, src[plane],
height*dstStride[plane]);
memcpy(dst[plane] + dstStride[plane] * y, src[plane],
height * dstStride[plane]);
} else {
if(is16BPS(c->srcFormat) && is16BPS(c->dstFormat))
length*=2;
for (i=0; i<height; i++) {
if (is16BPS(c->srcFormat) && is16BPS(c->dstFormat))
length *= 2;
for (i = 0; i < height; i++) {
memcpy(dstPtr, srcPtr, length);
srcPtr+= srcStride[plane];
dstPtr+= dstStride[plane];
srcPtr += srcStride[plane];
dstPtr += dstStride[plane];
}
}
}
@ -681,27 +717,33 @@ void ff_get_unscaled_swscale(SwsContext *c)
const int dstH = c->dstH;
int needsDither;
needsDither= isAnyRGB(dstFormat)
&& c->dstFormatBpp < 24
&& (c->dstFormatBpp < c->srcFormatBpp || (!isAnyRGB(srcFormat)));
needsDither = isAnyRGB(dstFormat) &&
c->dstFormatBpp < 24 &&
(c->dstFormatBpp < c->srcFormatBpp || (!isAnyRGB(srcFormat)));
/* yv12_to_nv12 */
if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) && (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21)) {
c->swScale= planarToNv12Wrapper;
if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) &&
(dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21)) {
c->swScale = planarToNv12Wrapper;
}
/* yuv2bgr */
if ((srcFormat==PIX_FMT_YUV420P || srcFormat==PIX_FMT_YUV422P || srcFormat==PIX_FMT_YUVA420P) && isAnyRGB(dstFormat)
&& !(flags & SWS_ACCURATE_RND) && !(dstH&1)) {
c->swScale= ff_yuv2rgb_get_func_ptr(c);
if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUV422P ||
srcFormat == PIX_FMT_YUVA420P) && isAnyRGB(dstFormat) &&
!(flags & SWS_ACCURATE_RND) && !(dstH & 1)) {
c->swScale = ff_yuv2rgb_get_func_ptr(c);
}
if (srcFormat==PIX_FMT_YUV410P && (dstFormat==PIX_FMT_YUV420P || dstFormat==PIX_FMT_YUVA420P) && !(flags & SWS_BITEXACT)) {
c->swScale= yvu9ToYv12Wrapper;
if (srcFormat == PIX_FMT_YUV410P &&
(dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P) &&
!(flags & SWS_BITEXACT)) {
c->swScale = yvu9ToYv12Wrapper;
}
/* bgr24toYV12 */
if (srcFormat==PIX_FMT_BGR24 && (dstFormat==PIX_FMT_YUV420P || dstFormat==PIX_FMT_YUVA420P) && !(flags & SWS_ACCURATE_RND))
c->swScale= bgr24ToYv12Wrapper;
if (srcFormat == PIX_FMT_BGR24 &&
(dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P) &&
!(flags & SWS_ACCURATE_RND))
c->swScale = bgr24ToYv12Wrapper;
/* RGB/BGR -> RGB/BGR (no dither needed forms) */
if ( isAnyRGB(srcFormat)
@ -745,13 +787,13 @@ void ff_get_unscaled_swscale(SwsContext *c)
c->swScale = packed_16bpc_bswap;
if (usePal(srcFormat) && isByteRGB(dstFormat))
c->swScale= palToRgbWrapper;
c->swScale = palToRgbWrapper;
if (srcFormat == PIX_FMT_YUV422P) {
if (dstFormat == PIX_FMT_YUYV422)
c->swScale= yuv422pToYuy2Wrapper;
c->swScale = yuv422pToYuy2Wrapper;
else if (dstFormat == PIX_FMT_UYVY422)
c->swScale= yuv422pToUyvyWrapper;
c->swScale = yuv422pToUyvyWrapper;
}
/* LQ converters if -sws 0 or -sws 4*/
@ -759,37 +801,39 @@ void ff_get_unscaled_swscale(SwsContext *c)
/* yv12_to_yuy2 */
if (srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) {
if (dstFormat == PIX_FMT_YUYV422)
c->swScale= planarToYuy2Wrapper;
c->swScale = planarToYuy2Wrapper;
else if (dstFormat == PIX_FMT_UYVY422)
c->swScale= planarToUyvyWrapper;
c->swScale = planarToUyvyWrapper;
}
}
if(srcFormat == PIX_FMT_YUYV422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P))
c->swScale= yuyvToYuv420Wrapper;
if(srcFormat == PIX_FMT_UYVY422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P))
c->swScale= uyvyToYuv420Wrapper;
if(srcFormat == PIX_FMT_YUYV422 && dstFormat == PIX_FMT_YUV422P)
c->swScale= yuyvToYuv422Wrapper;
if(srcFormat == PIX_FMT_UYVY422 && dstFormat == PIX_FMT_YUV422P)
c->swScale= uyvyToYuv422Wrapper;
if (srcFormat == PIX_FMT_YUYV422 &&
(dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P))
c->swScale = yuyvToYuv420Wrapper;
if (srcFormat == PIX_FMT_UYVY422 &&
(dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P))
c->swScale = uyvyToYuv420Wrapper;
if (srcFormat == PIX_FMT_YUYV422 && dstFormat == PIX_FMT_YUV422P)
c->swScale = yuyvToYuv422Wrapper;
if (srcFormat == PIX_FMT_UYVY422 && dstFormat == PIX_FMT_YUV422P)
c->swScale = uyvyToYuv422Wrapper;
/* simple copy */
if ( srcFormat == dstFormat
|| (srcFormat == PIX_FMT_YUVA420P && dstFormat == PIX_FMT_YUV420P)
|| (srcFormat == PIX_FMT_YUV420P && dstFormat == PIX_FMT_YUVA420P)
|| (isPlanarYUV(srcFormat) && isGray(dstFormat))
|| (isPlanarYUV(dstFormat) && isGray(srcFormat))
|| (isGray(dstFormat) && isGray(srcFormat))
|| (isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat)
&& c->chrDstHSubSample == c->chrSrcHSubSample
&& c->chrDstVSubSample == c->chrSrcVSubSample
&& dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21
&& srcFormat != PIX_FMT_NV12 && srcFormat != PIX_FMT_NV21))
if ( srcFormat == dstFormat ||
(srcFormat == PIX_FMT_YUVA420P && dstFormat == PIX_FMT_YUV420P) ||
(srcFormat == PIX_FMT_YUV420P && dstFormat == PIX_FMT_YUVA420P) ||
(isPlanarYUV(srcFormat) && isGray(dstFormat)) ||
(isPlanarYUV(dstFormat) && isGray(srcFormat)) ||
(isGray(dstFormat) && isGray(srcFormat)) ||
(isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat) &&
c->chrDstHSubSample == c->chrSrcHSubSample &&
c->chrDstVSubSample == c->chrSrcVSubSample &&
dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21 &&
srcFormat != PIX_FMT_NV12 && srcFormat != PIX_FMT_NV21))
{
if (isPacked(c->srcFormat))
c->swScale= packedCopyWrapper;
c->swScale = packedCopyWrapper;
else /* Planar YUV or gray */
c->swScale= planarCopyWrapper;
c->swScale = planarCopyWrapper;
}
if (ARCH_BFIN)
@ -798,15 +842,15 @@ void ff_get_unscaled_swscale(SwsContext *c)
ff_swscale_get_unscaled_altivec(c);
}
static void reset_ptr(const uint8_t* src[], int format)
static void reset_ptr(const uint8_t *src[], int format)
{
if(!isALPHA(format))
src[3]=NULL;
if (!isALPHA(format))
src[3] = NULL;
if (!isPlanar(format)) {
src[3]=src[2]=NULL;
src[3] = src[2] = NULL;
if (!usePal(format))
src[1]= NULL;
src[1] = NULL;
}
}
@ -829,13 +873,15 @@ static int check_image_pointers(const uint8_t * const data[4], enum PixelFormat
* swscale wrapper, so we don't need to export the SwsContext.
* Assumes planar YUV to be in YUV order instead of YVU.
*/
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t* const srcSlice[],
const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t* const dst[], const int dstStride[])
int attribute_align_arg sws_scale(struct SwsContext *c,
const uint8_t * const srcSlice[],
const int srcStride[], int srcSliceY,
int srcSliceH, uint8_t *const dst[],
const int dstStride[])
{
int i;
const uint8_t* src2[4]= {srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3]};
uint8_t* dst2[4]= {dst[0], dst[1], dst[2], dst[3]};
const uint8_t *src2[4] = { srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3] };
uint8_t *dst2[4] = { dst[0], dst[1], dst[2], dst[3] };
// do not mess up sliceDir if we have a "trailing" 0-size slice
if (srcSliceH == 0)
@ -859,40 +905,40 @@ int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t* const src
}
if (usePal(c->srcFormat)) {
for (i=0; i<256; i++) {
for (i = 0; i < 256; i++) {
int p, r, g, b, y, u, v, a = 0xff;
if(c->srcFormat == PIX_FMT_PAL8) {
p=((const uint32_t*)(srcSlice[1]))[i];
a= (p>>24)&0xFF;
r= (p>>16)&0xFF;
g= (p>> 8)&0xFF;
b= p &0xFF;
} else if(c->srcFormat == PIX_FMT_RGB8) {
r= (i>>5 )*36;
g= ((i>>2)&7)*36;
b= (i&3 )*85;
} else if(c->srcFormat == PIX_FMT_BGR8) {
b= (i>>6 )*85;
g= ((i>>3)&7)*36;
r= (i&7 )*36;
} else if(c->srcFormat == PIX_FMT_RGB4_BYTE) {
r= (i>>3 )*255;
g= ((i>>1)&3)*85;
b= (i&1 )*255;
} else if(c->srcFormat == PIX_FMT_GRAY8 || c->srcFormat == PIX_FMT_GRAY8A) {
if (c->srcFormat == PIX_FMT_PAL8) {
p = ((const uint32_t *)(srcSlice[1]))[i];
a = (p >> 24) & 0xFF;
r = (p >> 16) & 0xFF;
g = (p >> 8) & 0xFF;
b = p & 0xFF;
} else if (c->srcFormat == PIX_FMT_RGB8) {
r = ( i >> 5 ) * 36;
g = ((i >> 2) & 7) * 36;
b = ( i & 3) * 85;
} else if (c->srcFormat == PIX_FMT_BGR8) {
b = ( i >> 6 ) * 85;
g = ((i >> 3) & 7) * 36;
r = ( i & 7) * 36;
} else if (c->srcFormat == PIX_FMT_RGB4_BYTE) {
r = ( i >> 3 ) * 255;
g = ((i >> 1) & 3) * 85;
b = ( i & 1) * 255;
} else if (c->srcFormat == PIX_FMT_GRAY8 || c->srcFormat == PIX_FMT_GRAY8A) {
r = g = b = i;
} else {
assert(c->srcFormat == PIX_FMT_BGR4_BYTE);
b= (i>>3 )*255;
g= ((i>>1)&3)*85;
r= (i&1 )*255;
b = ( i >> 3 ) * 255;
g = ((i >> 1) & 3) * 85;
r = ( i & 1) * 255;
}
y= av_clip_uint8((RY*r + GY*g + BY*b + ( 33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
u= av_clip_uint8((RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
v= av_clip_uint8((RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
y = av_clip_uint8((RY * r + GY * g + BY * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
u = av_clip_uint8((RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
v = av_clip_uint8((RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
c->pal_yuv[i]= y + (u<<8) + (v<<16) + (a<<24);
switch(c->dstFormat) {
switch (c->dstFormat) {
case PIX_FMT_BGR32:
#if !HAVE_BIGENDIAN
case PIX_FMT_RGB24:
@ -924,8 +970,10 @@ int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t* const src
// copy strides, so they can safely be modified
if (c->sliceDir == 1) {
// slices go from top to bottom
int srcStride2[4]= {srcStride[0], srcStride[1], srcStride[2], srcStride[3]};
int dstStride2[4]= {dstStride[0], dstStride[1], dstStride[2], dstStride[3]};
int srcStride2[4] = { srcStride[0], srcStride[1], srcStride[2],
srcStride[3] };
int dstStride2[4] = { dstStride[0], dstStride[1], dstStride[2],
dstStride[3] };
reset_ptr(src2, c->srcFormat);
reset_ptr((void*)dst2, c->dstFormat);
@ -934,21 +982,24 @@ int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t* const src
if (srcSliceY + srcSliceH == c->srcH)
c->sliceDir = 0;
return c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2, dstStride2);
return c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2,
dstStride2);
} else {
// slices go from bottom to top => we flip the image internally
int srcStride2[4]= {-srcStride[0], -srcStride[1], -srcStride[2], -srcStride[3]};
int dstStride2[4]= {-dstStride[0], -dstStride[1], -dstStride[2], -dstStride[3]};
int srcStride2[4] = { -srcStride[0], -srcStride[1], -srcStride[2],
-srcStride[3] };
int dstStride2[4] = { -dstStride[0], -dstStride[1], -dstStride[2],
-dstStride[3] };
src2[0] += (srcSliceH-1)*srcStride[0];
src2[0] += (srcSliceH - 1) * srcStride[0];
if (!usePal(c->srcFormat))
src2[1] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[1];
src2[2] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[2];
src2[3] += (srcSliceH-1)*srcStride[3];
dst2[0] += ( c->dstH -1)*dstStride[0];
dst2[1] += ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[1];
dst2[2] += ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[2];
dst2[3] += ( c->dstH -1)*dstStride[3];
src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1];
src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2];
src2[3] += (srcSliceH - 1) * srcStride[3];
dst2[0] += ( c->dstH - 1) * dstStride[0];
dst2[1] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[1];
dst2[2] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[2];
dst2[3] += ( c->dstH - 1) * dstStride[3];
reset_ptr(src2, c->srcFormat);
reset_ptr((void*)dst2, c->dstFormat);
@ -957,29 +1008,32 @@ int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t* const src
if (!srcSliceY)
c->sliceDir = 0;
return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, srcSliceH, dst2, dstStride2);
return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH,
srcSliceH, dst2, dstStride2);
}
}
/* Convert the palette to the same packed 32-bit format as the palette */
void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette)
void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst,
int num_pixels, const uint8_t *palette)
{
int i;
for (i=0; i<num_pixels; i++)
for (i = 0; i < num_pixels; i++)
((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i]];
}
/* Palette format: ABCD -> dst format: ABC */
void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette)
void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst,
int num_pixels, const uint8_t *palette)
{
int i;
for (i=0; i<num_pixels; i++) {
for (i = 0; i < num_pixels; i++) {
//FIXME slow?
dst[0]= palette[src[i]*4+0];
dst[1]= palette[src[i]*4+1];
dst[2]= palette[src[i]*4+2];
dst+= 3;
dst[0] = palette[src[i] * 4 + 0];
dst[1] = palette[src[i] * 4 + 1];
dst[2] = palette[src[i] * 4 + 2];
dst += 3;
}
}