1
0
mirror of https://github.com/mpv-player/mpv synced 2024-12-29 02:22:19 +00:00
mpv/wscript_build.py

747 lines
29 KiB
Python
Raw Normal View History

import re
2014-11-19 17:51:53 +00:00
import os
def _add_rst_manual_dependencies(ctx):
manpage_sources_basenames = """
options.rst ao.rst vo.rst af.rst vf.rst encode.rst
input.rst osc.rst stats.rst lua.rst ipc.rst changes.rst""".split()
manpage_sources = ['DOCS/man/'+x for x in manpage_sources_basenames]
for manpage_source in manpage_sources:
ctx.add_manual_dependency(
ctx.path.find_node('DOCS/man/mpv.rst'),
ctx.path.find_node(manpage_source))
2015-12-29 19:57:09 +00:00
def _build_html(ctx):
ctx(
name = 'rst2html',
target = 'DOCS/man/mpv.html',
source = 'DOCS/man/mpv.rst',
rule = '${RST2HTML} ${SRC} ${TGT}',
install_path = ctx.env.HTMLDIR)
2015-12-29 19:57:09 +00:00
_add_rst_manual_dependencies(ctx)
def _build_man(ctx):
ctx(
name = 'rst2man',
target = 'DOCS/man/mpv.1',
source = 'DOCS/man/mpv.rst',
rule = '${RST2MAN} --strip-elements-with-class=contents ${SRC} ${TGT}',
install_path = ctx.env.MANDIR + '/man1')
_add_rst_manual_dependencies(ctx)
def _build_pdf(ctx):
ctx(
name = 'rst2pdf',
target = 'DOCS/man/mpv.pdf',
source = 'DOCS/man/mpv.rst',
rule = '${RST2PDF} -c -b 1 --repeat-table-rows ${SRC} -o ${TGT}',
install_path = ctx.env.DOCDIR)
_add_rst_manual_dependencies(ctx)
def _all_includes(ctx):
return [ctx.bldnode.abspath(), ctx.srcnode.abspath()] + \
ctx.dependencies_includes()
def build(ctx):
ctx.load('waf_customizations')
ctx.load('generators.sources')
ctx(
features = "file2string",
source = "TOOLS/osxbundle/mpv.app/Contents/Resources/icon.icns",
target = "osdep/macosx_icon.inc",
)
ctx(
features = "file2string",
source = "etc/mpv-icon-8bit-16x16.png",
target = "video/out/x11_icon_16.inc",
)
ctx(
features = "file2string",
source = "etc/mpv-icon-8bit-32x32.png",
target = "video/out/x11_icon_32.inc",
)
ctx(
features = "file2string",
source = "etc/mpv-icon-8bit-64x64.png",
target = "video/out/x11_icon_64.inc",
)
2017-07-02 03:04:20 +00:00
ctx(
features = "file2string",
source = "etc/mpv-icon-8bit-128x128.png",
target = "video/out/x11_icon_128.inc",
)
ctx(
features = "file2string",
source = "etc/input.conf",
target = "input/input_conf.h",
)
ctx(
features = "file2string",
source = "etc/builtin.conf",
target = "player/builtin_conf.inc",
)
ctx(
features = "file2string",
source = "sub/osd_font.otf",
target = "sub/osd_font.h",
)
2014-11-19 17:51:53 +00:00
lua_files = ["defaults.lua", "assdraw.lua", "options.lua", "osc.lua",
"ytdl_hook.lua", "stats.lua"]
2014-11-19 17:51:53 +00:00
for fn in lua_files:
fn = "player/lua/" + fn
ctx(
features = "file2string",
source = fn,
target = os.path.splitext(fn)[0] + ".inc",
)
ctx(
features = "file2string",
source = "player/javascript/defaults.js",
target = "player/javascript/defaults.js.inc",
)
if ctx.dependency_satisfied('wayland'):
ctx.wayland_protocol_code(proto_dir = ctx.env.WL_PROTO_DIR,
protocol = "stable/xdg-shell/xdg-shell",
target = "video/out/wayland/xdg-shell.c")
ctx.wayland_protocol_header(proto_dir = ctx.env.WL_PROTO_DIR,
protocol = "stable/xdg-shell/xdg-shell",
target = "video/out/wayland/xdg-shell.h")
ctx.wayland_protocol_code(proto_dir = ctx.env.WL_PROTO_DIR,
protocol = "unstable/idle-inhibit/idle-inhibit-unstable-v1",
target = "video/out/wayland/idle-inhibit-v1.c")
ctx.wayland_protocol_header(proto_dir = ctx.env.WL_PROTO_DIR,
protocol = "unstable/idle-inhibit/idle-inhibit-unstable-v1",
target = "video/out/wayland/idle-inhibit-v1.h")
ctx.wayland_protocol_code(proto_dir = "video/out/wayland",
protocol = "server-decoration",
vendored_protocol = True,
target = "video/out/wayland/srv-decor.c")
ctx.wayland_protocol_header(proto_dir = "video/out/wayland",
protocol = "server-decoration",
vendored_protocol = True,
target = "video/out/wayland/srv-decor.h")
ctx(features = "ebml_header", target = "ebml_types.h")
ctx(features = "ebml_definitions", target = "ebml_defs.c")
cocoa-cb: initial implementation via opengl-cb API this is meant to replace the old and not properly working vo_gpu/opengl cocoa backend in the future. the problems are various shortcomings of Apple's opengl implementation and buggy behaviour in certain circumstances that couldn't be properly worked around. there are also certain regressions on newer macOS versions from 10.11 onwards. - awful opengl performance with a none layer backed context - huge amount of dropped frames with an early context flush - flickering of system elements like the dock or volume indicator - double buffering not properly working with a none layer backed context - bad performance in fullscreen because of system optimisations all the problems were caused by using a normal opengl context, that seems somewhat abandoned by apple, and are fixed by using a layer backed opengl context instead. problems that couldn't be fixed could be properly worked around. this has all features our old backend has sans the wid embedding, the possibility to disable the automatic GPU switching and taking screenshots of the window content. the first was deemed unnecessary by me for now, since i just use the libmpv API that others can use anyway. second is technically not possible atm because we have to pre-allocate our opengl context at a time the config isn't read yet, so we can't get the needed property. third one is a bit tricky because of deadlocking and it needed to be in sync, hopefully i can work around that in the future. this also has at least one additional feature or eye-candy. a properly working fullscreen animation with the native fs. also since this is a direct port of the old backend of the parts that could be used, though with adaptions and improvements, this looks a lot cleaner and easier to understand. some credit goes to @pigoz for the initial swift build support which i could improve upon. Fixes: #5478, #5393, #5152, #5151, #4615, #4476, #3978, #3746, #3739, #2392, #2217
2018-02-12 11:28:19 +00:00
def swift(task):
src = ' '.join([x.abspath() for x in task.inputs])
bridge = ctx.path.find_node("osdep/macOS_swift_bridge.h").abspath()
cocoa-cb: initial implementation via opengl-cb API this is meant to replace the old and not properly working vo_gpu/opengl cocoa backend in the future. the problems are various shortcomings of Apple's opengl implementation and buggy behaviour in certain circumstances that couldn't be properly worked around. there are also certain regressions on newer macOS versions from 10.11 onwards. - awful opengl performance with a none layer backed context - huge amount of dropped frames with an early context flush - flickering of system elements like the dock or volume indicator - double buffering not properly working with a none layer backed context - bad performance in fullscreen because of system optimisations all the problems were caused by using a normal opengl context, that seems somewhat abandoned by apple, and are fixed by using a layer backed opengl context instead. problems that couldn't be fixed could be properly worked around. this has all features our old backend has sans the wid embedding, the possibility to disable the automatic GPU switching and taking screenshots of the window content. the first was deemed unnecessary by me for now, since i just use the libmpv API that others can use anyway. second is technically not possible atm because we have to pre-allocate our opengl context at a time the config isn't read yet, so we can't get the needed property. third one is a bit tricky because of deadlocking and it needed to be in sync, hopefully i can work around that in the future. this also has at least one additional feature or eye-candy. a properly working fullscreen animation with the native fs. also since this is a direct port of the old backend of the parts that could be used, though with adaptions and improvements, this looks a lot cleaner and easier to understand. some credit goes to @pigoz for the initial swift build support which i could improve upon. Fixes: #5478, #5393, #5152, #5151, #4615, #4476, #3978, #3746, #3739, #2392, #2217
2018-02-12 11:28:19 +00:00
tgt = task.outputs[0].abspath()
header = task.outputs[1].abspath()
module = task.outputs[2].abspath()
cmd = ('%s %s -module-name macOS_swift -emit-module-path %s '
'-import-objc-header %s -emit-objc-header-path %s -o %s %s '
'-I. -I..') % (ctx.env.SWIFT, ctx.env.SWIFT_FLAGS, module,
bridge, header, tgt, src)
return task.exec_command(cmd)
if ctx.dependency_satisfied('cocoa') and ctx.env.MACOS_SDK:
# on macOS we explicitly need to set the SDK path, otherwise it can lead to
# linking warnings or errors
ctx.env.append_value('LINKFLAGS', [
'-isysroot', ctx.env.MACOS_SDK
])
cocoa-cb: initial implementation via opengl-cb API this is meant to replace the old and not properly working vo_gpu/opengl cocoa backend in the future. the problems are various shortcomings of Apple's opengl implementation and buggy behaviour in certain circumstances that couldn't be properly worked around. there are also certain regressions on newer macOS versions from 10.11 onwards. - awful opengl performance with a none layer backed context - huge amount of dropped frames with an early context flush - flickering of system elements like the dock or volume indicator - double buffering not properly working with a none layer backed context - bad performance in fullscreen because of system optimisations all the problems were caused by using a normal opengl context, that seems somewhat abandoned by apple, and are fixed by using a layer backed opengl context instead. problems that couldn't be fixed could be properly worked around. this has all features our old backend has sans the wid embedding, the possibility to disable the automatic GPU switching and taking screenshots of the window content. the first was deemed unnecessary by me for now, since i just use the libmpv API that others can use anyway. second is technically not possible atm because we have to pre-allocate our opengl context at a time the config isn't read yet, so we can't get the needed property. third one is a bit tricky because of deadlocking and it needed to be in sync, hopefully i can work around that in the future. this also has at least one additional feature or eye-candy. a properly working fullscreen animation with the native fs. also since this is a direct port of the old backend of the parts that could be used, though with adaptions and improvements, this looks a lot cleaner and easier to understand. some credit goes to @pigoz for the initial swift build support which i could improve upon. Fixes: #5478, #5393, #5152, #5151, #4615, #4476, #3978, #3746, #3739, #2392, #2217
2018-02-12 11:28:19 +00:00
if ctx.dependency_satisfied('macos-cocoa-cb'):
swift_source = [
( "osdep/macOS_mpv_helper.swift" ),
cocoa-cb: initial implementation via opengl-cb API this is meant to replace the old and not properly working vo_gpu/opengl cocoa backend in the future. the problems are various shortcomings of Apple's opengl implementation and buggy behaviour in certain circumstances that couldn't be properly worked around. there are also certain regressions on newer macOS versions from 10.11 onwards. - awful opengl performance with a none layer backed context - huge amount of dropped frames with an early context flush - flickering of system elements like the dock or volume indicator - double buffering not properly working with a none layer backed context - bad performance in fullscreen because of system optimisations all the problems were caused by using a normal opengl context, that seems somewhat abandoned by apple, and are fixed by using a layer backed opengl context instead. problems that couldn't be fixed could be properly worked around. this has all features our old backend has sans the wid embedding, the possibility to disable the automatic GPU switching and taking screenshots of the window content. the first was deemed unnecessary by me for now, since i just use the libmpv API that others can use anyway. second is technically not possible atm because we have to pre-allocate our opengl context at a time the config isn't read yet, so we can't get the needed property. third one is a bit tricky because of deadlocking and it needed to be in sync, hopefully i can work around that in the future. this also has at least one additional feature or eye-candy. a properly working fullscreen animation with the native fs. also since this is a direct port of the old backend of the parts that could be used, though with adaptions and improvements, this looks a lot cleaner and easier to understand. some credit goes to @pigoz for the initial swift build support which i could improve upon. Fixes: #5478, #5393, #5152, #5151, #4615, #4476, #3978, #3746, #3739, #2392, #2217
2018-02-12 11:28:19 +00:00
( "video/out/cocoa-cb/events_view.swift" ),
( "video/out/cocoa-cb/video_layer.swift" ),
( "video/out/cocoa-cb/window.swift" ),
( "video/out/cocoa_cb_common.swift" ),
cocoa-cb: initial implementation via opengl-cb API this is meant to replace the old and not properly working vo_gpu/opengl cocoa backend in the future. the problems are various shortcomings of Apple's opengl implementation and buggy behaviour in certain circumstances that couldn't be properly worked around. there are also certain regressions on newer macOS versions from 10.11 onwards. - awful opengl performance with a none layer backed context - huge amount of dropped frames with an early context flush - flickering of system elements like the dock or volume indicator - double buffering not properly working with a none layer backed context - bad performance in fullscreen because of system optimisations all the problems were caused by using a normal opengl context, that seems somewhat abandoned by apple, and are fixed by using a layer backed opengl context instead. problems that couldn't be fixed could be properly worked around. this has all features our old backend has sans the wid embedding, the possibility to disable the automatic GPU switching and taking screenshots of the window content. the first was deemed unnecessary by me for now, since i just use the libmpv API that others can use anyway. second is technically not possible atm because we have to pre-allocate our opengl context at a time the config isn't read yet, so we can't get the needed property. third one is a bit tricky because of deadlocking and it needed to be in sync, hopefully i can work around that in the future. this also has at least one additional feature or eye-candy. a properly working fullscreen animation with the native fs. also since this is a direct port of the old backend of the parts that could be used, though with adaptions and improvements, this looks a lot cleaner and easier to understand. some credit goes to @pigoz for the initial swift build support which i could improve upon. Fixes: #5478, #5393, #5152, #5151, #4615, #4476, #3978, #3746, #3739, #2392, #2217
2018-02-12 11:28:19 +00:00
]
ctx(
rule = swift,
source = ctx.filtered_sources(swift_source),
target = ('osdep/macOS_swift.o '
'osdep/macOS_swift.h '
'osdep/macOS_swift.swiftmodule'),
before = 'c',
)
ctx.env.append_value('LINKFLAGS', [
'-Xlinker', '-add_ast_path',
'-Xlinker', '%s' % ctx.path.find_or_declare("osdep/macOS_swift.swiftmodule").abspath()
cocoa-cb: initial implementation via opengl-cb API this is meant to replace the old and not properly working vo_gpu/opengl cocoa backend in the future. the problems are various shortcomings of Apple's opengl implementation and buggy behaviour in certain circumstances that couldn't be properly worked around. there are also certain regressions on newer macOS versions from 10.11 onwards. - awful opengl performance with a none layer backed context - huge amount of dropped frames with an early context flush - flickering of system elements like the dock or volume indicator - double buffering not properly working with a none layer backed context - bad performance in fullscreen because of system optimisations all the problems were caused by using a normal opengl context, that seems somewhat abandoned by apple, and are fixed by using a layer backed opengl context instead. problems that couldn't be fixed could be properly worked around. this has all features our old backend has sans the wid embedding, the possibility to disable the automatic GPU switching and taking screenshots of the window content. the first was deemed unnecessary by me for now, since i just use the libmpv API that others can use anyway. second is technically not possible atm because we have to pre-allocate our opengl context at a time the config isn't read yet, so we can't get the needed property. third one is a bit tricky because of deadlocking and it needed to be in sync, hopefully i can work around that in the future. this also has at least one additional feature or eye-candy. a properly working fullscreen animation with the native fs. also since this is a direct port of the old backend of the parts that could be used, though with adaptions and improvements, this looks a lot cleaner and easier to understand. some credit goes to @pigoz for the initial swift build support which i could improve upon. Fixes: #5478, #5393, #5152, #5151, #4615, #4476, #3978, #3746, #3739, #2392, #2217
2018-02-12 11:28:19 +00:00
])
if ctx.dependency_satisfied('cplayer'):
main_fn_c = ctx.pick_first_matching_dep([
( "osdep/main-fn-cocoa.c", "cocoa" ),
( "osdep/main-fn-unix.c", "posix" ),
( "osdep/main-fn-win.c", "win32-desktop" ),
])
getch2_c = ctx.pick_first_matching_dep([
( "osdep/terminal-unix.c", "posix" ),
( "osdep/terminal-win.c", "win32-desktop" ),
( "osdep/terminal-dummy.c" ),
])
timer_c = ctx.pick_first_matching_dep([
( "osdep/timer-win2.c", "os-win32" ),
( "osdep/timer-darwin.c", "os-darwin" ),
( "osdep/timer-linux.c", "posix" ),
])
ipc_c = ctx.pick_first_matching_dep([
( "input/ipc-unix.c", "posix" ),
( "input/ipc-win.c", "win32-desktop" ),
( "input/ipc-dummy.c" ),
])
subprocess_c = ctx.pick_first_matching_dep([
( "osdep/subprocess-posix.c", "posix-spawn" ),
( "osdep/subprocess-win.c", "win32-desktop" ),
( "osdep/subprocess-dummy.c" ),
])
sources = [
## Audio
( "audio/aframe.c" ),
( "audio/audio_buffer.c" ),
( "audio/chmap.c" ),
( "audio/chmap_sel.c" ),
( "audio/decode/ad_lavc.c" ),
( "audio/decode/ad_spdif.c" ),
( "audio/filter/af_format.c" ),
( "audio/filter/af_lavcac3enc.c" ),
( "audio/filter/af_lavrresample.c" ),
( "audio/filter/af_rubberband.c", "rubberband" ),
( "audio/filter/af_scaletempo.c" ),
( "audio/fmt-conversion.c" ),
( "audio/format.c" ),
( "audio/out/ao.c" ),
( "audio/out/ao_alsa.c", "alsa" ),
( "audio/out/ao_audiounit.m", "audiounit" ),
( "audio/out/ao_coreaudio.c", "coreaudio" ),
( "audio/out/ao_coreaudio_chmap.c", "audiounit" ),
( "audio/out/ao_coreaudio_chmap.c", "coreaudio" ),
( "audio/out/ao_coreaudio_exclusive.c", "coreaudio" ),
( "audio/out/ao_coreaudio_properties.c", "coreaudio" ),
( "audio/out/ao_coreaudio_utils.c", "audiounit" ),
( "audio/out/ao_coreaudio_utils.c", "coreaudio" ),
( "audio/out/ao_jack.c", "jack" ),
( "audio/out/ao_lavc.c" ),
( "audio/out/ao_null.c" ),
( "audio/out/ao_openal.c", "openal" ),
( "audio/out/ao_opensles.c", "opensles" ),
( "audio/out/ao_oss.c", "oss-audio" ),
( "audio/out/ao_pcm.c" ),
( "audio/out/ao_pulse.c", "pulse" ),
( "audio/out/ao_rsound.c", "rsound" ),
( "audio/out/ao_sdl.c", "sdl2" ),
( "audio/out/ao_sndio.c", "sndio" ),
( "audio/out/ao_wasapi.c", "wasapi" ),
( "audio/out/ao_wasapi_changenotify.c", "wasapi" ),
( "audio/out/ao_wasapi_utils.c", "wasapi" ),
( "audio/out/pull.c" ),
( "audio/out/push.c" ),
## Core
( "common/av_common.c" ),
( "common/av_log.c" ),
( "common/codecs.c" ),
( "common/common.c" ),
( "common/encode_lavc.c" ),
( "common/msg.c" ),
( "common/playlist.c" ),
( "common/recorder.c" ),
( "common/tags.c" ),
( "common/version.c" ),
## Demuxers
( "demux/codec_tags.c" ),
( "demux/cue.c" ),
( "demux/demux.c" ),
( "demux/demux_cue.c" ),
( "demux/demux_disc.c" ),
( "demux/demux_edl.c" ),
( "demux/demux_lavf.c" ),
( "demux/demux_libarchive.c", "libarchive" ),
( "demux/demux_mf.c" ),
( "demux/demux_mkv.c" ),
( "demux/demux_mkv_timeline.c" ),
( "demux/demux_null.c" ),
( "demux/demux_playlist.c" ),
( "demux/demux_rar.c" ),
( "demux/demux_raw.c" ),
Rewrite ordered chapters and timeline stuff This uses a different method to piece segments together. The old approach basically changes to a new file (with a new start offset) any time a segment ends. This meant waiting for audio/video end on segment end, and then changing to the new segment all at once. It had a very weird impact on the playback core, and some things (like truly gapless segment transitions, or frame backstepping) just didn't work. The new approach adds the demux_timeline pseudo-demuxer, which presents an uniform packet stream from the many segments. This is pretty similar to how ordered chapters are implemented everywhere else. It also reminds of the FFmpeg concat pseudo-demuxer. The "pure" version of this approach doesn't work though. Segments can actually have different codec configurations (different extradata), and subtitles are most likely broken too. (Subtitles have multiple corner cases which break the pure stream-concatenation approach completely.) To counter this, we do two things: - Reinit the decoder with each segment. We go as far as allowing concatenating files with completely different codecs for the sake of EDL (which also uses the timeline infrastructure). A "lighter" approach would try to make use of decoder mechanism to update e.g. the extradata, but that seems fragile. - Clip decoded data to segment boundaries. This is equivalent to normal playback core mechanisms like hr-seek, but now the playback core doesn't need to care about these things. These two mechanisms are equivalent to what happened in the old implementation, except they don't happen in the playback core anymore. In other words, the playback core is completely relieved from timeline implementation details. (Which honestly is exactly what I'm trying to do here. I don't think ordered chapter behavior deserves improvement, even if it's bad - but I want to get it out from the playback core.) There is code duplication between audio and video decoder common code. This is awful and could be shareable - but this will happen later. Note that the audio path has some code to clip audio frames for the purpose of codec preroll/gapless handling, but it's not shared as sharing it would cause more pain than it would help.
2016-02-15 20:04:07 +00:00
( "demux/demux_timeline.c" ),
( "demux/demux_tv.c", "tv" ),
( "demux/ebml.c" ),
( "demux/packet.c" ),
( "demux/timeline.c" ),
video: rewrite filtering glue code Get rid of the old vf.c code. Replace it with a generic filtering framework, which can potentially handle more than just --vf. At least reimplementing --af with this code is planned. This changes some --vf semantics (including runtime behavior and the "vf" command). The most important ones are listed in interface-changes. vf_convert.c is renamed to f_swscale.c. It is now an internal filter that can not be inserted by the user manually. f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is conceptually easy, but a big mess due to the data flow changes). The existing filters are all changed heavily. The data flow of the new filter framework is different. Especially EOF handling changes - EOF is now a "frame" rather than a state, and must be passed through exactly once. Another major thing is that all filters must support dynamic format changes. The filter reconfig() function goes away. (This sounds complex, but since all filters need to handle EOF draining anyway, they can use the same code, and it removes the mess with reconfig() having to predict the output format, which completely breaks with libavfilter anyway.) In addition, there is no automatic format negotiation or conversion. libavfilter's primitive and insufficient API simply doesn't allow us to do this in a reasonable way. Instead, filters can use f_autoconvert as sub-filter, and tell it which formats they support. This filter will in turn add actual conversion filters, such as f_swscale, to perform necessary format changes. vf_vapoursynth.c uses the same basic principle of operation as before, but with worryingly different details in data flow. Still appears to work. The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are heavily changed. Fortunately, they all used refqueue.c, which is for sharing the data flow logic (especially for managing future/past surfaces and such). It turns out it can be used to factor out most of the data flow. Some of these filters accepted software input. Instead of having ad-hoc upload code in each filter, surface upload is now delegated to f_autoconvert, which can use f_hwupload to perform this. Exporting VO capabilities is still a big mess (mp_stream_info stuff). The D3D11 code drops the redundant image formats, and all code uses the hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a big mess for now. f_async_queue is unused.
2018-01-16 10:53:44 +00:00
( "filters/f_autoconvert.c" ),
( "filters/f_auto_filters.c" ),
( "filters/f_decoder_wrapper.c" ),
( "filters/f_demux_in.c" ),
video: rewrite filtering glue code Get rid of the old vf.c code. Replace it with a generic filtering framework, which can potentially handle more than just --vf. At least reimplementing --af with this code is planned. This changes some --vf semantics (including runtime behavior and the "vf" command). The most important ones are listed in interface-changes. vf_convert.c is renamed to f_swscale.c. It is now an internal filter that can not be inserted by the user manually. f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is conceptually easy, but a big mess due to the data flow changes). The existing filters are all changed heavily. The data flow of the new filter framework is different. Especially EOF handling changes - EOF is now a "frame" rather than a state, and must be passed through exactly once. Another major thing is that all filters must support dynamic format changes. The filter reconfig() function goes away. (This sounds complex, but since all filters need to handle EOF draining anyway, they can use the same code, and it removes the mess with reconfig() having to predict the output format, which completely breaks with libavfilter anyway.) In addition, there is no automatic format negotiation or conversion. libavfilter's primitive and insufficient API simply doesn't allow us to do this in a reasonable way. Instead, filters can use f_autoconvert as sub-filter, and tell it which formats they support. This filter will in turn add actual conversion filters, such as f_swscale, to perform necessary format changes. vf_vapoursynth.c uses the same basic principle of operation as before, but with worryingly different details in data flow. Still appears to work. The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are heavily changed. Fortunately, they all used refqueue.c, which is for sharing the data flow logic (especially for managing future/past surfaces and such). It turns out it can be used to factor out most of the data flow. Some of these filters accepted software input. Instead of having ad-hoc upload code in each filter, surface upload is now delegated to f_autoconvert, which can use f_hwupload to perform this. Exporting VO capabilities is still a big mess (mp_stream_info stuff). The D3D11 code drops the redundant image formats, and all code uses the hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a big mess for now. f_async_queue is unused.
2018-01-16 10:53:44 +00:00
( "filters/f_hwtransfer.c" ),
( "filters/f_lavfi.c" ),
( "filters/f_output_chain.c" ),
( "filters/f_swresample.c" ),
video: rewrite filtering glue code Get rid of the old vf.c code. Replace it with a generic filtering framework, which can potentially handle more than just --vf. At least reimplementing --af with this code is planned. This changes some --vf semantics (including runtime behavior and the "vf" command). The most important ones are listed in interface-changes. vf_convert.c is renamed to f_swscale.c. It is now an internal filter that can not be inserted by the user manually. f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is conceptually easy, but a big mess due to the data flow changes). The existing filters are all changed heavily. The data flow of the new filter framework is different. Especially EOF handling changes - EOF is now a "frame" rather than a state, and must be passed through exactly once. Another major thing is that all filters must support dynamic format changes. The filter reconfig() function goes away. (This sounds complex, but since all filters need to handle EOF draining anyway, they can use the same code, and it removes the mess with reconfig() having to predict the output format, which completely breaks with libavfilter anyway.) In addition, there is no automatic format negotiation or conversion. libavfilter's primitive and insufficient API simply doesn't allow us to do this in a reasonable way. Instead, filters can use f_autoconvert as sub-filter, and tell it which formats they support. This filter will in turn add actual conversion filters, such as f_swscale, to perform necessary format changes. vf_vapoursynth.c uses the same basic principle of operation as before, but with worryingly different details in data flow. Still appears to work. The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are heavily changed. Fortunately, they all used refqueue.c, which is for sharing the data flow logic (especially for managing future/past surfaces and such). It turns out it can be used to factor out most of the data flow. Some of these filters accepted software input. Instead of having ad-hoc upload code in each filter, surface upload is now delegated to f_autoconvert, which can use f_hwupload to perform this. Exporting VO capabilities is still a big mess (mp_stream_info stuff). The D3D11 code drops the redundant image formats, and all code uses the hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a big mess for now. f_async_queue is unused.
2018-01-16 10:53:44 +00:00
( "filters/f_swscale.c" ),
( "filters/f_utils.c" ),
( "filters/filter.c" ),
( "filters/frame.c" ),
( "filters/user_filters.c" ),
2013-12-17 00:23:09 +00:00
## Input
( "input/cmd_list.c" ),
( "input/cmd.c" ),
( "input/event.c" ),
2013-12-17 00:23:09 +00:00
( "input/input.c" ),
( "input/ipc.c" ),
( ipc_c ),
( "input/keycodes.c" ),
( "input/pipe-win32.c", "win32-pipes" ),
2013-12-17 00:23:09 +00:00
## Misc
( "misc/bstr.c" ),
( "misc/charset_conv.c" ),
( "misc/dispatch.c" ),
( "misc/json.c" ),
( "misc/node.c" ),
( "misc/rendezvous.c" ),
( "misc/ring.c" ),
( "misc/thread_pool.c" ),
## Options
( "options/m_config.c" ),
( "options/m_option.c" ),
( "options/m_property.c" ),
( "options/options.c" ),
( "options/parse_commandline.c" ),
( "options/parse_configfile.c" ),
( "options/path.c" ),
2013-12-16 23:53:22 +00:00
## Player
( "player/audio.c" ),
( "player/client.c" ),
2013-12-16 23:53:22 +00:00
( "player/command.c" ),
( "player/configfiles.c" ),
( "player/external_files.c" ),
( "player/javascript.c", "javascript" ),
2013-12-16 23:53:22 +00:00
( "player/loadfile.c" ),
( "player/lua.c", "lua" ),
2013-12-16 23:53:22 +00:00
( "player/main.c" ),
( "player/misc.c" ),
( "player/osd.c" ),
( "player/playloop.c" ),
( "player/screenshot.c" ),
( "player/scripting.c" ),
2013-12-16 23:53:22 +00:00
( "player/sub.c" ),
( "player/video.c" ),
## Streams
( "stream/ai_alsa1x.c", "alsa && audio-input" ),
( "stream/ai_oss.c", "oss-audio && audio-input" ),
( "stream/ai_sndio.c", "sndio && audio-input" ),
( "stream/audio_in.c", "audio-input" ),
( "stream/cache.c" ),
( "stream/cache_file.c" ),
( "stream/cookies.c" ),
( "stream/dvb_tune.c", "dvbin" ),
( "stream/frequencies.c", "tv" ),
( "stream/rar.c" ),
( "stream/stream.c" ),
( "stream/stream_avdevice.c" ),
( "stream/stream_bluray.c", "libbluray" ),
( "stream/stream_cb.c" ),
( "stream/stream_cdda.c", "cdda" ),
( "stream/stream_dvb.c", "dvbin" ),
( "stream/stream_dvd.c", "dvdread-common" ),
( "stream/stream_dvd_common.c", "dvdread-common" ),
( "stream/stream_dvdnav.c", "dvdnav" ),
( "stream/stream_edl.c" ),
( "stream/stream_file.c" ),
( "stream/stream_lavf.c" ),
( "stream/stream_libarchive.c", "libarchive" ),
( "stream/stream_memory.c" ),
( "stream/stream_mf.c" ),
( "stream/stream_null.c" ),
( "stream/stream_rar.c" ),
( "stream/stream_smb.c", "libsmbclient" ),
( "stream/stream_tv.c", "tv" ),
( "stream/tv.c", "tv" ),
( "stream/tvi_dummy.c", "tv" ),
( "stream/tvi_v4l2.c", "tv-v4l2"),
## Subtitles
( "sub/ass_mp.c", "libass"),
( "sub/dec_sub.c" ),
( "sub/draw_bmp.c" ),
( "sub/filter_sdh.c" ),
( "sub/img_convert.c" ),
( "sub/lavc_conv.c" ),
( "sub/osd.c" ),
( "sub/osd_dummy.c", "dummy-osd" ),
( "sub/osd_libass.c", "libass-osd" ),
( "sub/sd_ass.c", "libass" ),
( "sub/sd_lavc.c" ),
## Video
( "video/csputils.c" ),
( "video/d3d.c", "d3d-hwaccel" ),
( "video/decode/vd_lavc.c" ),
( "video/filter/refqueue.c" ),
( "video/filter/vf_d3d11vpp.c", "d3d-hwaccel" ),
( "video/filter/vf_format.c" ),
( "video/filter/vf_sub.c" ),
( "video/filter/vf_vapoursynth.c", "vapoursynth-core" ),
2016-11-22 13:58:31 +00:00
( "video/filter/vf_vavpp.c", "vaapi" ),
( "video/filter/vf_vdpaupp.c", "vdpau" ),
( "video/fmt-conversion.c" ),
( "video/hwdec.c" ),
( "video/image_loader.c" ),
( "video/image_writer.c" ),
( "video/img_format.c" ),
( "video/mp_image.c" ),
( "video/mp_image_pool.c" ),
( "video/out/aspect.c" ),
( "video/out/bitmap_packer.c" ),
( "video/out/cocoa/events_view.m", "cocoa" ),
( "video/out/cocoa/video_view.m", "cocoa" ),
( "video/out/cocoa/window.m", "cocoa" ),
( "video/out/cocoa_common.m", "cocoa" ),
vo_gpu: d3d11: initial implementation This is a new RA/vo_gpu backend that uses Direct3D 11. The GLSL generated by vo_gpu is cross-compiled to HLSL with SPIRV-Cross. What works: - All of mpv's internal shaders should work, including compute shaders. - Some external shaders have been tested and work, including RAVU and adaptive-sharpen. - Non-dumb mode works, even on very old hardware. Most features work at feature level 9_3 and all features work at feature level 10_0. Some features also work at feature level 9_1 and 9_2, but without high-bit- depth FBOs, it's not very useful. (Hardware this old is probably not fast enough for advanced features anyway.) Note: This is more compatible than ANGLE, which requires 9_3 to work at all (GLES 2.0,) and 10_1 for non-dumb-mode (GLES 3.0.) - Hardware decoding with D3D11VA, including decoding of 10-bit formats without truncation to 8-bit. What doesn't work / can be improved: - PBO upload and direct rendering does not work yet. Direct rendering requires persistent-mapped PBOs because the decoder needs to be able to read data from images that have already been decoded and uploaded. Unfortunately, it seems like persistent-mapped PBOs are fundamentally incompatible with D3D11, which requires all resources to use driver- managed memory and requires memory to be unmapped (and hence pointers to be invalidated) when a resource is used in a draw or copy operation. However it might be possible to use D3D11's limited multithreading capabilities to emulate some features of PBOs, like asynchronous texture uploading. - The blit() and clear() operations don't have equivalents in the D3D11 API that handle all cases, so in most cases, they have to be emulated with a shader. This is currently done inside ra_d3d11, but ideally it would be done in generic code, so it can take advantage of mpv's shader generation utilities. - SPIRV-Cross is used through a NIH C-compatible wrapper library, since it does not expose a C interface itself. The library is available here: https://github.com/rossy/crossc - The D3D11 context could be made to support more modern DXGI features in future. For example, it should be possible to add support for high-bit-depth and HDR output with DXGI 1.5/1.6.
2017-09-07 10:18:06 +00:00
( "video/out/d3d11/context.c", "d3d11" ),
( "video/out/d3d11/hwdec_d3d11va.c", "d3d11 && d3d-hwaccel" ),
( "video/out/d3d11/hwdec_dxva2dxgi.c", "d3d11 && d3d9-hwaccel" ),
vo_gpu: d3d11: initial implementation This is a new RA/vo_gpu backend that uses Direct3D 11. The GLSL generated by vo_gpu is cross-compiled to HLSL with SPIRV-Cross. What works: - All of mpv's internal shaders should work, including compute shaders. - Some external shaders have been tested and work, including RAVU and adaptive-sharpen. - Non-dumb mode works, even on very old hardware. Most features work at feature level 9_3 and all features work at feature level 10_0. Some features also work at feature level 9_1 and 9_2, but without high-bit- depth FBOs, it's not very useful. (Hardware this old is probably not fast enough for advanced features anyway.) Note: This is more compatible than ANGLE, which requires 9_3 to work at all (GLES 2.0,) and 10_1 for non-dumb-mode (GLES 3.0.) - Hardware decoding with D3D11VA, including decoding of 10-bit formats without truncation to 8-bit. What doesn't work / can be improved: - PBO upload and direct rendering does not work yet. Direct rendering requires persistent-mapped PBOs because the decoder needs to be able to read data from images that have already been decoded and uploaded. Unfortunately, it seems like persistent-mapped PBOs are fundamentally incompatible with D3D11, which requires all resources to use driver- managed memory and requires memory to be unmapped (and hence pointers to be invalidated) when a resource is used in a draw or copy operation. However it might be possible to use D3D11's limited multithreading capabilities to emulate some features of PBOs, like asynchronous texture uploading. - The blit() and clear() operations don't have equivalents in the D3D11 API that handle all cases, so in most cases, they have to be emulated with a shader. This is currently done inside ra_d3d11, but ideally it would be done in generic code, so it can take advantage of mpv's shader generation utilities. - SPIRV-Cross is used through a NIH C-compatible wrapper library, since it does not expose a C interface itself. The library is available here: https://github.com/rossy/crossc - The D3D11 context could be made to support more modern DXGI features in future. For example, it should be possible to add support for high-bit-depth and HDR output with DXGI 1.5/1.6.
2017-09-07 10:18:06 +00:00
( "video/out/d3d11/ra_d3d11.c", "d3d11" ),
( "video/out/dither.c" ),
( "video/out/dr_helper.c" ),
( "video/out/drm_atomic.c", "drm" ),
( "video/out/drm_common.c", "drm" ),
( "video/out/drm_prime.c", "drm && drmprime" ),
( "video/out/filter_kernels.c" ),
( "video/out/gpu/context.c" ),
vo_gpu: d3d11: initial implementation This is a new RA/vo_gpu backend that uses Direct3D 11. The GLSL generated by vo_gpu is cross-compiled to HLSL with SPIRV-Cross. What works: - All of mpv's internal shaders should work, including compute shaders. - Some external shaders have been tested and work, including RAVU and adaptive-sharpen. - Non-dumb mode works, even on very old hardware. Most features work at feature level 9_3 and all features work at feature level 10_0. Some features also work at feature level 9_1 and 9_2, but without high-bit- depth FBOs, it's not very useful. (Hardware this old is probably not fast enough for advanced features anyway.) Note: This is more compatible than ANGLE, which requires 9_3 to work at all (GLES 2.0,) and 10_1 for non-dumb-mode (GLES 3.0.) - Hardware decoding with D3D11VA, including decoding of 10-bit formats without truncation to 8-bit. What doesn't work / can be improved: - PBO upload and direct rendering does not work yet. Direct rendering requires persistent-mapped PBOs because the decoder needs to be able to read data from images that have already been decoded and uploaded. Unfortunately, it seems like persistent-mapped PBOs are fundamentally incompatible with D3D11, which requires all resources to use driver- managed memory and requires memory to be unmapped (and hence pointers to be invalidated) when a resource is used in a draw or copy operation. However it might be possible to use D3D11's limited multithreading capabilities to emulate some features of PBOs, like asynchronous texture uploading. - The blit() and clear() operations don't have equivalents in the D3D11 API that handle all cases, so in most cases, they have to be emulated with a shader. This is currently done inside ra_d3d11, but ideally it would be done in generic code, so it can take advantage of mpv's shader generation utilities. - SPIRV-Cross is used through a NIH C-compatible wrapper library, since it does not expose a C interface itself. The library is available here: https://github.com/rossy/crossc - The D3D11 context could be made to support more modern DXGI features in future. For example, it should be possible to add support for high-bit-depth and HDR output with DXGI 1.5/1.6.
2017-09-07 10:18:06 +00:00
( "video/out/gpu/d3d11_helpers.c", "d3d11 || egl-angle-win32" ),
( "video/out/gpu/hwdec.c" ),
( "video/out/gpu/lcms.c" ),
( "video/out/gpu/libmpv_gpu.c" ),
( "video/out/gpu/osd.c" ),
( "video/out/gpu/ra.c" ),
( "video/out/gpu/shader_cache.c" ),
( "video/out/gpu/spirv.c" ),
( "video/out/gpu/spirv_shaderc.c", "shaderc" ),
( "video/out/gpu/user_shaders.c" ),
( "video/out/gpu/utils.c" ),
( "video/out/gpu/video.c" ),
( "video/out/gpu/video_shaders.c" ),
( "video/out/opengl/angle_dynamic.c", "egl-angle" ),
( "video/out/opengl/common.c", "gl" ),
( "video/out/opengl/context.c", "gl" ),
( "video/out/opengl/context_android.c", "android" ),
( "video/out/opengl/context_angle.c", "egl-angle-win32" ),
( "video/out/opengl/context_cocoa.c", "gl-cocoa" ),
( "video/out/opengl/context_drm_egl.c", "egl-drm" ),
( "video/out/opengl/context_dxinterop.c","gl-dxinterop" ),
( "video/out/opengl/context_glx.c", "gl-x11" ),
( "video/out/opengl/context_mali_fbdev.c","mali-fbdev" ),
( "video/out/opengl/context_rpi.c", "rpi" ),
( "video/out/opengl/context_vdpau.c", "vdpau-gl-x11" ),
( "video/out/opengl/context_wayland.c", "gl-wayland" ),
( "video/out/opengl/context_win.c", "gl-win32" ),
( "video/out/opengl/context_x11egl.c", "egl-x11" ),
( "video/out/opengl/egl_helpers.c", "egl-helpers" ),
( "video/out/opengl/formats.c", "gl" ),
( "video/out/opengl/hwdec_cuda.c", "cuda-hwaccel" ),
( "video/out/opengl/hwdec_d3d11egl.c", "d3d-hwaccel && egl-angle" ),
( "video/out/opengl/hwdec_d3d11eglrgb.c","d3d-hwaccel && egl-angle" ),
( "video/out/opengl/hwdec_drmprime_drm.c","drmprime && drm" ),
( "video/out/opengl/hwdec_dxva2egl.c", "d3d9-hwaccel && egl-angle" ),
( "video/out/opengl/hwdec_dxva2gldx.c", "gl-dxinterop-d3d9" ),
( "video/out/opengl/hwdec_ios.m", "ios-gl" ),
( "video/out/opengl/hwdec_osx.c", "videotoolbox-gl" ),
( "video/out/opengl/hwdec_rpi.c", "rpi" ),
( "video/out/opengl/hwdec_vaegl.c", "vaapi-egl" ),
( "video/out/opengl/hwdec_vdpau.c", "vdpau-gl-x11" ),
( "video/out/opengl/libmpv_gl.c", "gl" ),
( "video/out/opengl/ra_gl.c", "gl" ),
( "video/out/opengl/utils.c", "gl" ),
( "video/out/vo.c" ),
( "video/out/vo_caca.c", "caca" ),
( "video/out/vo_direct3d.c", "direct3d" ),
( "video/out/vo_drm.c", "drm" ),
( "video/out/vo_gpu.c" ),
( "video/out/vo_image.c" ),
( "video/out/vo_lavc.c" ),
( "video/out/vo_libmpv.c" ),
( "video/out/vo_mediacodec_embed.c", "android" ),
( "video/out/vo_null.c" ),
( "video/out/vo_rpi.c", "rpi" ),
( "video/out/vo_sdl.c", "sdl2" ),
( "video/out/vo_tct.c" ),
( "video/out/vo_vaapi.c", "vaapi-x11 && gpl" ),
( "video/out/vo_vdpau.c", "vdpau" ),
( "video/out/vo_x11.c" , "x11" ),
( "video/out/vo_xv.c", "xv" ),
vo_gpu: vulkan: initial implementation This time based on ra/vo_gpu. 2017 is the year of the vulkan desktop! Current problems / limitations / improvement opportunities: 1. The swapchain/flipping code violates the vulkan spec, by assuming that the presentation queue will be bounded (in cases where rendering is significantly faster than vsync). But apparently, there's simply no better way to do this right now, to the point where even the stupid cube.c examples from LunarG etc. do it wrong. (cf. https://github.com/KhronosGroup/Vulkan-Docs/issues/370) 2. The memory allocator could be improved. (This is a universal constant) 3. Could explore using push descriptors instead of descriptor sets, especially since we expect to switch descriptors semi-often for some passes (like interpolation). Probably won't make a difference, but the synchronization overhead might be a factor. Who knows. 4. Parallelism across frames / async transfer is not well-defined, we either need to use a better semaphore / command buffer strategy or a resource pooling layer to safely handle cross-frame parallelism. (That said, I gave resource pooling a try and was not happy with the result at all - so I'm still exploring the semaphore strategy) 5. We aggressively use pipeline barriers where events would offer a much more fine-grained synchronization mechanism. As a result of this, we might be suffering from GPU bubbles due to too-short dependencies on objects. (That said, I'm also exploring the use of semaphores as a an ordering tactic which would allow cross-frame time slicing in theory) Some minor changes to the vo_gpu and infrastructure, but nothing consequential. NOTE: For safety, all use of asynchronous commands / multiple command pools is currently disabled completely. There are some left-over relics of this in the code (e.g. the distinction between dev_poll and pool_poll), but that is kept in place mostly because this will be re-extended in the future (vulkan rev 2). The queue count is also currently capped to 1, because of the lack of cross-frame semaphores means we need the implicit synchronization from the same-queue semantics to guarantee a correct result.
2016-09-14 18:54:18 +00:00
( "video/out/vulkan/context.c", "vulkan" ),
( "video/out/vulkan/context_wayland.c", "vulkan && wayland" ),
( "video/out/vulkan/context_win.c", "vulkan && win32-desktop" ),
( "video/out/vulkan/context_xlib.c", "vulkan && x11" ),
( "video/out/vulkan/formats.c", "vulkan" ),
( "video/out/vulkan/malloc.c", "vulkan" ),
( "video/out/vulkan/ra_vk.c", "vulkan" ),
( "video/out/vulkan/spirv_nvidia.c", "vulkan" ),
( "video/out/vulkan/utils.c", "vulkan" ),
( "video/out/w32_common.c", "win32-desktop" ),
( "video/out/wayland/idle-inhibit-v1.c", "wayland" ),
( "video/out/wayland/srv-decor.c", "wayland" ),
( "video/out/wayland/xdg-shell.c", "wayland" ),
( "video/out/wayland_common.c", "wayland" ),
( "video/out/win32/displayconfig.c", "win32-desktop" ),
( "video/out/win32/droptarget.c", "win32-desktop" ),
( "video/out/win_state.c"),
( "video/out/x11_common.c", "x11" ),
( "video/sws_utils.c" ),
( "video/vaapi.c", "vaapi" ),
( "video/vdpau.c", "vdpau" ),
( "video/vdpau_mixer.c", "vdpau" ),
## osdep
( getch2_c ),
( "osdep/io.c" ),
( "osdep/threads.c" ),
( "osdep/timer.c" ),
( timer_c ),
( "osdep/polldev.c", "posix" ),
( "osdep/android/posix-spawn.c", "android"),
( "osdep/android/strnlen.c", "android"),
( "osdep/ar/HIDRemote.m", "apple-remote" ),
( "osdep/glob-win.c", "glob-win32" ),
( "osdep/macosx_application.m", "cocoa" ),
( "osdep/macosx_events.m", "cocoa" ),
( "osdep/macosx_menubar.m", "cocoa" ),
2017-02-25 20:56:59 +00:00
( "osdep/macosx_touchbar.m", "macos-touchbar" ),
( "osdep/mpv.rc", "win32-executable" ),
( "osdep/path-macosx.m", "cocoa" ),
( "osdep/path-unix.c"),
( "osdep/path-uwp.c", "uwp" ),
( "osdep/path-win.c", "win32-desktop" ),
( "osdep/semaphore_osx.c" ),
( "osdep/subprocess.c" ),
( subprocess_c ),
( "osdep/w32_keyboard.c", "os-cygwin" ),
( "osdep/w32_keyboard.c", "os-win32" ),
( "osdep/win32/pthread.c", "win32-internal-pthreads"),
( "osdep/windows_utils.c", "os-cygwin" ),
( "osdep/windows_utils.c", "os-win32" ),
## tree_allocator
"ta/ta.c", "ta/ta_talloc.c", "ta/ta_utils.c"
]
if ctx.dependency_satisfied('win32-executable'):
from waflib import TaskGen
TaskGen.declare_chain(
name = 'windres',
rule = '${WINDRES} ${WINDRES_FLAGS} ${SRC} ${TGT}',
ext_in = '.rc',
ext_out = '-rc.o',
color = 'PINK')
ctx.env.WINDRES_FLAGS = [
'--include-dir={0}'.format(ctx.bldnode.abspath()),
2018-01-01 21:26:18 +00:00
'--include-dir={0}'.format(ctx.srcnode.abspath()),
'--codepage=65001' # Unicode codepage
]
for node in 'osdep/mpv.exe.manifest etc/mpv-icon.ico'.split():
ctx.add_manual_dependency(
ctx.path.find_node('osdep/mpv.rc'),
ctx.path.find_node(node))
version = ctx.bldnode.find_node('version.h')
if version:
ctx.add_manual_dependency(
ctx.path.find_node('osdep/mpv.rc'),
version)
2015-07-15 13:23:10 +00:00
if ctx.dependency_satisfied('cplayer') or ctx.dependency_satisfied('test'):
ctx(
target = "objects",
source = ctx.filtered_sources(sources),
use = ctx.dependencies_use(),
includes = _all_includes(ctx),
features = "c",
)
syms = False
if ctx.dependency_satisfied('cplugins'):
syms = True
ctx.load("syms")
if ctx.dependency_satisfied('cplayer'):
ctx(
target = "mpv",
source = main_fn_c,
use = ctx.dependencies_use() + ['objects'],
cocoa-cb: initial implementation via opengl-cb API this is meant to replace the old and not properly working vo_gpu/opengl cocoa backend in the future. the problems are various shortcomings of Apple's opengl implementation and buggy behaviour in certain circumstances that couldn't be properly worked around. there are also certain regressions on newer macOS versions from 10.11 onwards. - awful opengl performance with a none layer backed context - huge amount of dropped frames with an early context flush - flickering of system elements like the dock or volume indicator - double buffering not properly working with a none layer backed context - bad performance in fullscreen because of system optimisations all the problems were caused by using a normal opengl context, that seems somewhat abandoned by apple, and are fixed by using a layer backed opengl context instead. problems that couldn't be fixed could be properly worked around. this has all features our old backend has sans the wid embedding, the possibility to disable the automatic GPU switching and taking screenshots of the window content. the first was deemed unnecessary by me for now, since i just use the libmpv API that others can use anyway. second is technically not possible atm because we have to pre-allocate our opengl context at a time the config isn't read yet, so we can't get the needed property. third one is a bit tricky because of deadlocking and it needed to be in sync, hopefully i can work around that in the future. this also has at least one additional feature or eye-candy. a properly working fullscreen animation with the native fs. also since this is a direct port of the old backend of the parts that could be used, though with adaptions and improvements, this looks a lot cleaner and easier to understand. some credit goes to @pigoz for the initial swift build support which i could improve upon. Fixes: #5478, #5393, #5152, #5151, #4615, #4476, #3978, #3746, #3739, #2392, #2217
2018-02-12 11:28:19 +00:00
add_object = "osdep/macOS_swift.o",
includes = _all_includes(ctx),
features = "c cprogram" + (" syms" if syms else ""),
export_symbols_def = "libmpv/mpv.def", # for syms=True
install_path = ctx.env.BINDIR
)
2016-01-11 21:13:16 +00:00
for f in ['mpv.conf', 'input.conf', 'mplayer-input.conf', \
'restore-old-bindings.conf']:
ctx.install_as(os.path.join(ctx.env.DOCDIR, f),
os.path.join('etc/', f))
if ctx.env.DEST_OS == 'win32':
wrapctx = ctx(
target = "mpv",
source = ['osdep/win32-console-wrapper.c'],
features = "c cprogram",
install_path = ctx.env.BINDIR
)
wrapctx.env.cprogram_PATTERN = "%s.com"
wrapflags = ['-municode', '-mconsole']
wrapctx.env.CFLAGS = wrapflags
wrapctx.env.LAST_LINKFLAGS = wrapflags
if ctx.dependency_satisfied('test'):
for test in ctx.path.ant_glob("test/*.c"):
ctx(
target = os.path.splitext(test.srcpath())[0],
source = test.srcpath(),
use = ctx.dependencies_use() + ['objects'],
includes = _all_includes(ctx),
features = "c cprogram",
install_path = None,
)
build_shared = ctx.dependency_satisfied('libmpv-shared')
build_static = ctx.dependency_satisfied('libmpv-static')
if build_shared or build_static:
if build_shared:
waftoolsdir = os.path.join(os.path.dirname(__file__), "waftools")
ctx.load("syms", tooldir=waftoolsdir)
vre = '#define MPV_CLIENT_API_VERSION MPV_MAKE_VERSION\((.*), (.*)\)'
libmpv_header = ctx.path.find_node("libmpv/client.h").read()
major, minor = re.search(vre, libmpv_header).groups()
libversion = major + '.' + minor + '.0'
2014-06-20 17:21:11 +00:00
def _build_libmpv(shared):
features = "c "
if shared:
features += "cshlib syms"
else:
features += "cstlib"
libmpv_kwargs = {
"target": "mpv",
"source": ctx.filtered_sources(sources),
"use": ctx.dependencies_use(),
"add_object": "osdep/macOS_swift.o",
"includes": [ctx.bldnode.abspath(), ctx.srcnode.abspath()] + \
ctx.dependencies_includes(),
"features": features,
"export_symbols_def": "libmpv/mpv.def",
"install_path": ctx.env.LIBDIR,
"install_path_implib": ctx.env.LIBDIR,
}
if shared and ctx.dependency_satisfied('android'):
# for Android we just add the linker flag without version
# as we still need the SONAME for proper linkage.
# (LINKFLAGS logic taken from waf's apply_vnum in ccroot.py)
v=ctx.env.SONAME_ST%'libmpv.so'
ctx.env.append_value('LINKFLAGS',v.split())
else:
# for all other configurations we want SONAME to be used
libmpv_kwargs["vnum"] = libversion
if shared and ctx.env.DEST_OS == 'win32':
libmpv_kwargs["install_path"] = ctx.env.BINDIR
ctx(**libmpv_kwargs)
if build_shared:
_build_libmpv(True)
if build_static:
_build_libmpv(False)
def get_deps():
res = ""
for k in ctx.env.keys():
if k.startswith("LIB_") and k != "LIB_ST":
res += " ".join(["-l" + x for x in ctx.env[k]]) + " "
return res
2014-03-11 15:55:49 +00:00
ctx(
target = 'libmpv/mpv.pc',
source = 'libmpv/mpv.pc.in',
features = 'subst',
PREFIX = ctx.env.PREFIX,
LIBDIR = ctx.env.LIBDIR,
INCDIR = ctx.env.INCDIR,
VERSION = libversion,
PRIV_LIBS = get_deps(),
2014-03-11 15:55:49 +00:00
)
headers = ["client.h", "qthelper.hpp", "opengl_cb.h", "render.h",
"render_gl.h", "stream_cb.h"]
for f in headers:
ctx.install_as(ctx.env.INCDIR + '/mpv/' + f, 'libmpv/' + f)
2014-03-11 15:55:49 +00:00
ctx.install_as(ctx.env.LIBDIR + '/pkgconfig/mpv.pc', 'libmpv/mpv.pc')
2015-12-29 19:57:09 +00:00
if ctx.dependency_satisfied('html-build'):
_build_html(ctx)
if ctx.dependency_satisfied('manpage-build'):
_build_man(ctx)
if ctx.dependency_satisfied('pdf-build'):
_build_pdf(ctx)
if ctx.dependency_satisfied('cplayer'):
if ctx.dependency_satisfied('zsh-comp'):
ctx.zshcomp(target = "etc/_mpv", source = "TOOLS/zsh.pl")
ctx.install_files(
ctx.env.ZSHDIR,
['etc/_mpv'])
ctx.install_files(
ctx.env.DATADIR + '/applications',
['etc/mpv.desktop'] )
ctx.install_files(ctx.env.CONFDIR, ['etc/encoding-profiles.conf'] )
for size in '16x16 32x32 64x64'.split():
ctx.install_as(
ctx.env.DATADIR + '/icons/hicolor/' + size + '/apps/mpv.png',
'etc/mpv-icon-8bit-' + size + '.png')
ctx.install_as(
ctx.env.DATADIR + '/icons/hicolor/scalable/apps/mpv.svg',
'etc/mpv-gradient.svg')
2016-03-10 09:15:48 +00:00
ctx.install_files(
ctx.env.DATADIR + '/icons/hicolor/symbolic/apps',
['etc/mpv-symbolic.svg'])