mpv/audio/format.h

144 lines
5.6 KiB
C
Raw Normal View History

/*
* The sample format system used lin libaf is based on bitmasks.
* The format definition only refers to the storage format,
* not the resolution.
*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef MPLAYER_AF_FORMAT_H
#define MPLAYER_AF_FORMAT_H
#include <sys/types.h>
#include "config.h"
#include "mpvcore/bstr.h"
// Endianness
2013-11-07 21:12:26 +00:00
#define AF_FORMAT_BE (0<<0) // Big Endian
#define AF_FORMAT_LE (1<<0) // Little Endian
#define AF_FORMAT_END_MASK (1<<0)
Remove compile time/runtime CPU detection, and drop some platforms mplayer had three ways of enabling CPU specific assembler routines: a) Enable them at compile time; crash if the CPU can't handle it. b) Enable them at compile time, but let the configure script detect your CPU. Your binary will only crash if you try to run it on a different system that has less features than yours. This was the default, I think. c) Runtime detection. The implementation of b) and c) suck. a) is not really feasible (it sucks for users). Remove all code related to this, and use libav's CPU detection instead. Now the configure script will always enable CPU specific features, and disable them at runtime if libav reports them not as available. One implication is that now the compiler is always expected to handle SSE (etc.) inline assembly at runtime, unless it's explicitly disabled. Only checks for x86 CPU specific features are kept, the rest is either unused or barely used. Get rid of all the dump -mpcu, -march etc. flags. Trust the compiler to select decent settings. Get rid of support for the following operating systems: - BSD/OS (some ancient BSD fork) - QNX (don't care) - BeOS (dead, Haiku support is still welcome) - AIX (don't care) - HP-UX (don't care) - OS/2 (dead, actual support has been removed a while ago) Remove the configure code for detecting the endianness. Instead, use the standard header <endian.h>, which can be used if _GNU_SOURCE or _BSD_SOURCE is defined. (Maybe these changes should have been in a separate commit.) Since this is a quite violent code removal orgy, and I'm testing only on x86 32 bit Linux, expect regressions.
2012-07-29 15:20:57 +00:00
#if BYTE_ORDER == BIG_ENDIAN
2013-11-07 21:12:26 +00:00
#define AF_FORMAT_NE AF_FORMAT_BE
#else
2013-11-07 21:12:26 +00:00
#define AF_FORMAT_NE AF_FORMAT_LE
#endif
// Signed/unsigned
2013-11-07 21:12:26 +00:00
#define AF_FORMAT_SI (0<<1) // Signed
#define AF_FORMAT_US (1<<1) // Unsigned
#define AF_FORMAT_SIGN_MASK (1<<1)
// Bits used
2013-11-07 21:12:26 +00:00
#define AF_FORMAT_8BIT (0<<3)
#define AF_FORMAT_16BIT (1<<3)
#define AF_FORMAT_24BIT (2<<3)
#define AF_FORMAT_32BIT (3<<3)
#define AF_FORMAT_64BIT (4<<3)
#define AF_FORMAT_BITS_MASK (7<<3)
// Special flags refering to non pcm data (note: 1<<6, 2<<6, 5<<6 unused)
2013-11-07 21:12:26 +00:00
#define AF_FORMAT_MPEG2 (3<<6) // MPEG(2) audio
#define AF_FORMAT_AC3 (4<<6) // Dolby Digital AC3
#define AF_FORMAT_IEC61937 (6<<6)
2013-11-07 21:12:26 +00:00
#define AF_FORMAT_SPECIAL_MASK (7<<6)
// Fixed or floating point
#define AF_FORMAT_I (1<<9) // Int
#define AF_FORMAT_F (2<<9) // Foating point
#define AF_FORMAT_POINT_MASK (3<<9)
#define AF_FORMAT_MASK ((1<<11)-1)
// PREDEFINED formats
2013-11-07 21:12:26 +00:00
#define AF_FORMAT_U8 (AF_FORMAT_I|AF_FORMAT_US|AF_FORMAT_8BIT|AF_FORMAT_NE)
#define AF_FORMAT_S8 (AF_FORMAT_I|AF_FORMAT_SI|AF_FORMAT_8BIT|AF_FORMAT_NE)
#define AF_FORMAT_U16_LE (AF_FORMAT_I|AF_FORMAT_US|AF_FORMAT_16BIT|AF_FORMAT_LE)
#define AF_FORMAT_U16_BE (AF_FORMAT_I|AF_FORMAT_US|AF_FORMAT_16BIT|AF_FORMAT_BE)
#define AF_FORMAT_S16_LE (AF_FORMAT_I|AF_FORMAT_SI|AF_FORMAT_16BIT|AF_FORMAT_LE)
#define AF_FORMAT_S16_BE (AF_FORMAT_I|AF_FORMAT_SI|AF_FORMAT_16BIT|AF_FORMAT_BE)
#define AF_FORMAT_U24_LE (AF_FORMAT_I|AF_FORMAT_US|AF_FORMAT_24BIT|AF_FORMAT_LE)
#define AF_FORMAT_U24_BE (AF_FORMAT_I|AF_FORMAT_US|AF_FORMAT_24BIT|AF_FORMAT_BE)
#define AF_FORMAT_S24_LE (AF_FORMAT_I|AF_FORMAT_SI|AF_FORMAT_24BIT|AF_FORMAT_LE)
#define AF_FORMAT_S24_BE (AF_FORMAT_I|AF_FORMAT_SI|AF_FORMAT_24BIT|AF_FORMAT_BE)
#define AF_FORMAT_U32_LE (AF_FORMAT_I|AF_FORMAT_US|AF_FORMAT_32BIT|AF_FORMAT_LE)
#define AF_FORMAT_U32_BE (AF_FORMAT_I|AF_FORMAT_US|AF_FORMAT_32BIT|AF_FORMAT_BE)
#define AF_FORMAT_S32_LE (AF_FORMAT_I|AF_FORMAT_SI|AF_FORMAT_32BIT|AF_FORMAT_LE)
#define AF_FORMAT_S32_BE (AF_FORMAT_I|AF_FORMAT_SI|AF_FORMAT_32BIT|AF_FORMAT_BE)
#define AF_FORMAT_FLOAT_LE (AF_FORMAT_F|AF_FORMAT_32BIT|AF_FORMAT_LE)
#define AF_FORMAT_FLOAT_BE (AF_FORMAT_F|AF_FORMAT_32BIT|AF_FORMAT_BE)
#define AF_FORMAT_DOUBLE_LE (AF_FORMAT_F|AF_FORMAT_64BIT|AF_FORMAT_LE)
#define AF_FORMAT_DOUBLE_BE (AF_FORMAT_F|AF_FORMAT_64BIT|AF_FORMAT_BE)
2013-11-07 21:12:26 +00:00
#define AF_FORMAT_AC3_LE (AF_FORMAT_AC3|AF_FORMAT_16BIT|AF_FORMAT_LE)
#define AF_FORMAT_AC3_BE (AF_FORMAT_AC3|AF_FORMAT_16BIT|AF_FORMAT_BE)
#define AF_FORMAT_IEC61937_LE (AF_FORMAT_IEC61937|AF_FORMAT_16BIT|AF_FORMAT_LE)
#define AF_FORMAT_IEC61937_BE (AF_FORMAT_IEC61937|AF_FORMAT_16BIT|AF_FORMAT_BE)
Remove compile time/runtime CPU detection, and drop some platforms mplayer had three ways of enabling CPU specific assembler routines: a) Enable them at compile time; crash if the CPU can't handle it. b) Enable them at compile time, but let the configure script detect your CPU. Your binary will only crash if you try to run it on a different system that has less features than yours. This was the default, I think. c) Runtime detection. The implementation of b) and c) suck. a) is not really feasible (it sucks for users). Remove all code related to this, and use libav's CPU detection instead. Now the configure script will always enable CPU specific features, and disable them at runtime if libav reports them not as available. One implication is that now the compiler is always expected to handle SSE (etc.) inline assembly at runtime, unless it's explicitly disabled. Only checks for x86 CPU specific features are kept, the rest is either unused or barely used. Get rid of all the dump -mpcu, -march etc. flags. Trust the compiler to select decent settings. Get rid of support for the following operating systems: - BSD/OS (some ancient BSD fork) - QNX (don't care) - BeOS (dead, Haiku support is still welcome) - AIX (don't care) - HP-UX (don't care) - OS/2 (dead, actual support has been removed a while ago) Remove the configure code for detecting the endianness. Instead, use the standard header <endian.h>, which can be used if _GNU_SOURCE or _BSD_SOURCE is defined. (Maybe these changes should have been in a separate commit.) Since this is a quite violent code removal orgy, and I'm testing only on x86 32 bit Linux, expect regressions.
2012-07-29 15:20:57 +00:00
#if BYTE_ORDER == BIG_ENDIAN
#define AF_FORMAT_U16_NE AF_FORMAT_U16_BE
#define AF_FORMAT_S16_NE AF_FORMAT_S16_BE
#define AF_FORMAT_U24_NE AF_FORMAT_U24_BE
#define AF_FORMAT_S24_NE AF_FORMAT_S24_BE
#define AF_FORMAT_U32_NE AF_FORMAT_U32_BE
#define AF_FORMAT_S32_NE AF_FORMAT_S32_BE
#define AF_FORMAT_FLOAT_NE AF_FORMAT_FLOAT_BE
#define AF_FORMAT_DOUBLE_NE AF_FORMAT_DOUBLE_BE
#define AF_FORMAT_AC3_NE AF_FORMAT_AC3_BE
#define AF_FORMAT_IEC61937_NE AF_FORMAT_IEC61937_BE
#else
#define AF_FORMAT_U16_NE AF_FORMAT_U16_LE
#define AF_FORMAT_S16_NE AF_FORMAT_S16_LE
#define AF_FORMAT_U24_NE AF_FORMAT_U24_LE
#define AF_FORMAT_S24_NE AF_FORMAT_S24_LE
#define AF_FORMAT_U32_NE AF_FORMAT_U32_LE
#define AF_FORMAT_S32_NE AF_FORMAT_S32_LE
#define AF_FORMAT_FLOAT_NE AF_FORMAT_FLOAT_LE
#define AF_FORMAT_DOUBLE_NE AF_FORMAT_DOUBLE_LE
#define AF_FORMAT_AC3_NE AF_FORMAT_AC3_LE
#define AF_FORMAT_IEC61937_NE AF_FORMAT_IEC61937_LE
#endif
#define AF_FORMAT_UNKNOWN 0
#define AF_FORMAT_IS_AC3(fmt) (((fmt) & AF_FORMAT_SPECIAL_MASK) == AF_FORMAT_AC3)
#define AF_FORMAT_IS_IEC61937(fmt) (AF_FORMAT_IS_AC3(fmt) || ((fmt) & AF_FORMAT_SPECIAL_MASK) == AF_FORMAT_IEC61937)
struct af_fmt_entry {
const char *name;
int format;
};
extern const struct af_fmt_entry af_fmtstr_table[];
int af_str2fmt_short(bstr str);
const char *af_fmt_to_str(int format);
int af_fmt2bits(int format);
int af_fmt_change_bits(int format, int bits);
2013-06-15 06:58:12 +00:00
// Amount of bytes that contain audio of the given duration, aligned to frames.
int af_fmt_seconds_to_bytes(int format, float seconds, int channels, int samplerate);
2013-06-15 06:58:12 +00:00
bool af_fmt_is_valid(int format);
#endif /* MPLAYER_AF_FORMAT_H */