video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
#include <math.h>
|
|
|
|
|
2020-05-23 02:04:46 +00:00
|
|
|
#include "audio/aframe.h"
|
|
|
|
#include "audio/format.h"
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
#include "common/common.h"
|
|
|
|
#include "common/msg.h"
|
|
|
|
#include "options/m_config.h"
|
|
|
|
#include "options/options.h"
|
|
|
|
#include "video/mp_image.h"
|
f_auto_filters: use software conversion if hw deint is not possible
Before this commit, enabling hardware deinterlacing via the
"deinterlace" option/property just failed if no hardware deinterlacing
was available. An error message was logged, and playback continued
without deinterlacing.
Change this, and try to copy the hardware surface to memory, and then
use yadif. This will have approximately the same effect as
--hwdec=auto-copy. Technically it's implemented differently, because
changing the hwdec mode is much more convoluted than just inserting a
filter for performing the "download". But the low level code for
actually performing the download is the same again.
Although performance won't be as good as with a hardware deinterlacer
(probably), it's more convenient than forcing the user to switch hwdec
modes separately. The "deinterlace" property is supposed to be a
convenience thing after all.
As far as the code architecture goes, it would make sense to auto-insert
such a download filter for all software-only filters that need it.
However, libavfilter does not tell us what formats a filter supports
(isn't that fucking crazy?), so all attempts to work towards this are
kind of hopeless. In this case, we merely have hardcoded knowledge that
vf_yadif definitely does not support hardware formats. But yes, this
sucks ass.
2019-10-02 19:27:07 +00:00
|
|
|
#include "video/mp_image_pool.h"
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
|
|
|
|
#include "f_auto_filters.h"
|
2019-10-02 21:03:40 +00:00
|
|
|
#include "f_autoconvert.h"
|
f_auto_filters: use software conversion if hw deint is not possible
Before this commit, enabling hardware deinterlacing via the
"deinterlace" option/property just failed if no hardware deinterlacing
was available. An error message was logged, and playback continued
without deinterlacing.
Change this, and try to copy the hardware surface to memory, and then
use yadif. This will have approximately the same effect as
--hwdec=auto-copy. Technically it's implemented differently, because
changing the hwdec mode is much more convoluted than just inserting a
filter for performing the "download". But the low level code for
actually performing the download is the same again.
Although performance won't be as good as with a hardware deinterlacer
(probably), it's more convenient than forcing the user to switch hwdec
modes separately. The "deinterlace" property is supposed to be a
convenience thing after all.
As far as the code architecture goes, it would make sense to auto-insert
such a download filter for all software-only filters that need it.
However, libavfilter does not tell us what formats a filter supports
(isn't that fucking crazy?), so all attempts to work towards this are
kind of hopeless. In this case, we merely have hardcoded knowledge that
vf_yadif definitely does not support hardware formats. But yes, this
sucks ass.
2019-10-02 19:27:07 +00:00
|
|
|
#include "f_hwtransfer.h"
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
#include "f_swscale.h"
|
|
|
|
#include "f_utils.h"
|
|
|
|
#include "filter.h"
|
|
|
|
#include "filter_internal.h"
|
|
|
|
#include "user_filters.h"
|
|
|
|
|
|
|
|
struct deint_priv {
|
|
|
|
struct mp_subfilter sub;
|
|
|
|
int prev_imgfmt;
|
|
|
|
int prev_setting;
|
|
|
|
struct m_config_cache *opts;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void deint_process(struct mp_filter *f)
|
|
|
|
{
|
|
|
|
struct deint_priv *p = f->priv;
|
|
|
|
|
|
|
|
if (!mp_subfilter_read(&p->sub))
|
|
|
|
return;
|
|
|
|
|
|
|
|
struct mp_frame frame = p->sub.frame;
|
|
|
|
|
|
|
|
if (mp_frame_is_signaling(frame)) {
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (frame.type != MP_FRAME_VIDEO) {
|
|
|
|
MP_ERR(f, "video input required!\n");
|
|
|
|
mp_filter_internal_mark_failed(f);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
m_config_cache_update(p->opts);
|
|
|
|
struct filter_opts *opts = p->opts->opts;
|
|
|
|
|
|
|
|
if (!opts->deinterlace)
|
|
|
|
mp_subfilter_destroy(&p->sub);
|
|
|
|
|
|
|
|
struct mp_image *img = frame.data;
|
|
|
|
|
|
|
|
if (img->imgfmt == p->prev_imgfmt && p->prev_setting == opts->deinterlace) {
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mp_subfilter_drain_destroy(&p->sub))
|
|
|
|
return;
|
|
|
|
|
|
|
|
assert(!p->sub.filter);
|
|
|
|
|
|
|
|
p->prev_imgfmt = img->imgfmt;
|
|
|
|
p->prev_setting = opts->deinterlace;
|
|
|
|
if (!p->prev_setting) {
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-16 14:28:57 +00:00
|
|
|
bool has_filter = true;
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
if (img->imgfmt == IMGFMT_VDPAU) {
|
|
|
|
char *args[] = {"deint", "yes", NULL};
|
|
|
|
p->sub.filter =
|
|
|
|
mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "vdpaupp", args);
|
|
|
|
} else if (img->imgfmt == IMGFMT_D3D11) {
|
|
|
|
p->sub.filter =
|
|
|
|
mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "d3d11vpp", NULL);
|
2018-10-28 16:30:33 +00:00
|
|
|
} else if (img->imgfmt == IMGFMT_CUDA) {
|
|
|
|
char *args[] = {"mode", "send_field", NULL};
|
|
|
|
p->sub.filter =
|
|
|
|
mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "yadif_cuda", args);
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
} else {
|
2020-02-16 14:28:57 +00:00
|
|
|
has_filter = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!p->sub.filter) {
|
|
|
|
if (has_filter)
|
|
|
|
MP_ERR(f, "creating deinterlacer failed\n");
|
|
|
|
|
2019-10-02 21:03:40 +00:00
|
|
|
struct mp_filter *subf = mp_bidir_dummy_filter_create(f);
|
|
|
|
struct mp_filter *filters[2] = {0};
|
|
|
|
|
|
|
|
struct mp_autoconvert *ac = mp_autoconvert_create(subf);
|
|
|
|
if (ac) {
|
|
|
|
filters[0] = ac->f;
|
|
|
|
// We know vf_yadif does not support hw inputs.
|
|
|
|
mp_autoconvert_add_all_sw_imgfmts(ac);
|
|
|
|
|
|
|
|
if (!mp_autoconvert_probe_input_video(ac, img)) {
|
|
|
|
MP_ERR(f, "no deinterlace filter available for format %s\n",
|
|
|
|
mp_imgfmt_to_name(img->imgfmt));
|
|
|
|
talloc_free(subf);
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
return;
|
f_auto_filters: use software conversion if hw deint is not possible
Before this commit, enabling hardware deinterlacing via the
"deinterlace" option/property just failed if no hardware deinterlacing
was available. An error message was logged, and playback continued
without deinterlacing.
Change this, and try to copy the hardware surface to memory, and then
use yadif. This will have approximately the same effect as
--hwdec=auto-copy. Technically it's implemented differently, because
changing the hwdec mode is much more convoluted than just inserting a
filter for performing the "download". But the low level code for
actually performing the download is the same again.
Although performance won't be as good as with a hardware deinterlacer
(probably), it's more convenient than forcing the user to switch hwdec
modes separately. The "deinterlace" property is supposed to be a
convenience thing after all.
As far as the code architecture goes, it would make sense to auto-insert
such a download filter for all software-only filters that need it.
However, libavfilter does not tell us what formats a filter supports
(isn't that fucking crazy?), so all attempts to work towards this are
kind of hopeless. In this case, we merely have hardcoded knowledge that
vf_yadif definitely does not support hardware formats. But yes, this
sucks ass.
2019-10-02 19:27:07 +00:00
|
|
|
}
|
|
|
|
}
|
2019-10-02 21:03:40 +00:00
|
|
|
|
|
|
|
char *args[] = {"mode", "send_field", NULL};
|
|
|
|
filters[1] =
|
|
|
|
mp_create_user_filter(subf, MP_OUTPUT_CHAIN_VIDEO, "yadif", args);
|
|
|
|
|
|
|
|
mp_chain_filters(subf->ppins[0], subf->ppins[1], filters, 2);
|
|
|
|
p->sub.filter = subf;
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void deint_reset(struct mp_filter *f)
|
|
|
|
{
|
|
|
|
struct deint_priv *p = f->priv;
|
|
|
|
|
|
|
|
mp_subfilter_reset(&p->sub);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void deint_destroy(struct mp_filter *f)
|
|
|
|
{
|
|
|
|
struct deint_priv *p = f->priv;
|
|
|
|
|
|
|
|
mp_subfilter_reset(&p->sub);
|
|
|
|
TA_FREEP(&p->sub.filter);
|
|
|
|
}
|
|
|
|
|
2018-04-21 11:18:03 +00:00
|
|
|
static bool deint_command(struct mp_filter *f, struct mp_filter_command *cmd)
|
|
|
|
{
|
|
|
|
struct deint_priv *p = f->priv;
|
|
|
|
|
|
|
|
if (cmd->type == MP_FILTER_COMMAND_IS_ACTIVE) {
|
|
|
|
cmd->is_active = !!p->sub.filter;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
static const struct mp_filter_info deint_filter = {
|
|
|
|
.name = "deint",
|
|
|
|
.priv_size = sizeof(struct deint_priv),
|
2018-04-21 11:18:03 +00:00
|
|
|
.command = deint_command,
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
.process = deint_process,
|
|
|
|
.reset = deint_reset,
|
|
|
|
.destroy = deint_destroy,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mp_filter *mp_deint_create(struct mp_filter *parent)
|
|
|
|
{
|
|
|
|
struct mp_filter *f = mp_filter_create(parent, &deint_filter);
|
|
|
|
if (!f)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
struct deint_priv *p = f->priv;
|
|
|
|
|
|
|
|
p->sub.in = mp_filter_add_pin(f, MP_PIN_IN, "in");
|
|
|
|
p->sub.out = mp_filter_add_pin(f, MP_PIN_OUT, "out");
|
|
|
|
|
|
|
|
p->opts = m_config_cache_alloc(f, f->global, &filter_conf);
|
|
|
|
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rotate_priv {
|
|
|
|
struct mp_subfilter sub;
|
|
|
|
int prev_rotate;
|
|
|
|
int prev_imgfmt;
|
|
|
|
int target_rotate;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void rotate_process(struct mp_filter *f)
|
|
|
|
{
|
|
|
|
struct rotate_priv *p = f->priv;
|
|
|
|
|
|
|
|
if (!mp_subfilter_read(&p->sub))
|
|
|
|
return;
|
|
|
|
|
|
|
|
struct mp_frame frame = p->sub.frame;
|
|
|
|
|
|
|
|
if (mp_frame_is_signaling(frame)) {
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (frame.type != MP_FRAME_VIDEO) {
|
|
|
|
MP_ERR(f, "video input required!\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct mp_image *img = frame.data;
|
|
|
|
|
|
|
|
if (img->params.rotate == p->prev_rotate &&
|
|
|
|
img->imgfmt == p->prev_imgfmt)
|
|
|
|
{
|
|
|
|
img->params.rotate = p->target_rotate;
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mp_subfilter_drain_destroy(&p->sub))
|
|
|
|
return;
|
|
|
|
|
|
|
|
assert(!p->sub.filter);
|
|
|
|
|
|
|
|
int rotate = p->prev_rotate = img->params.rotate;
|
|
|
|
p->target_rotate = rotate;
|
|
|
|
p->prev_imgfmt = img->imgfmt;
|
|
|
|
|
|
|
|
struct mp_stream_info *info = mp_filter_find_stream_info(f);
|
|
|
|
if (rotate == 0 || (info && info->rotate90 && !(rotate % 90))) {
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-18 15:21:56 +00:00
|
|
|
if (!mp_sws_supports_input(img->imgfmt)) {
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
MP_ERR(f, "Video rotation with this format not supported\n");
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
double angle = rotate / 360.0 * M_PI * 2;
|
|
|
|
char *args[] = {"angle", mp_tprintf(30, "%f", angle),
|
|
|
|
"ow", mp_tprintf(30, "rotw(%f)", angle),
|
|
|
|
"oh", mp_tprintf(30, "roth(%f)", angle),
|
|
|
|
NULL};
|
|
|
|
p->sub.filter =
|
|
|
|
mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "rotate", args);
|
|
|
|
|
|
|
|
if (p->sub.filter) {
|
|
|
|
MP_INFO(f, "Inserting rotation filter.\n");
|
|
|
|
p->target_rotate = 0;
|
|
|
|
} else {
|
|
|
|
MP_ERR(f, "could not create rotation filter\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rotate_reset(struct mp_filter *f)
|
|
|
|
{
|
|
|
|
struct rotate_priv *p = f->priv;
|
|
|
|
|
|
|
|
mp_subfilter_reset(&p->sub);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rotate_destroy(struct mp_filter *f)
|
|
|
|
{
|
|
|
|
struct rotate_priv *p = f->priv;
|
|
|
|
|
|
|
|
mp_subfilter_reset(&p->sub);
|
|
|
|
TA_FREEP(&p->sub.filter);
|
|
|
|
}
|
|
|
|
|
2018-04-21 11:18:03 +00:00
|
|
|
static bool rotate_command(struct mp_filter *f, struct mp_filter_command *cmd)
|
|
|
|
{
|
|
|
|
struct rotate_priv *p = f->priv;
|
|
|
|
|
|
|
|
if (cmd->type == MP_FILTER_COMMAND_IS_ACTIVE) {
|
|
|
|
cmd->is_active = !!p->sub.filter;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
static const struct mp_filter_info rotate_filter = {
|
|
|
|
.name = "autorotate",
|
|
|
|
.priv_size = sizeof(struct rotate_priv),
|
2018-04-21 11:18:03 +00:00
|
|
|
.command = rotate_command,
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
.process = rotate_process,
|
|
|
|
.reset = rotate_reset,
|
|
|
|
.destroy = rotate_destroy,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mp_filter *mp_autorotate_create(struct mp_filter *parent)
|
|
|
|
{
|
|
|
|
struct mp_filter *f = mp_filter_create(parent, &rotate_filter);
|
|
|
|
if (!f)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
struct rotate_priv *p = f->priv;
|
|
|
|
p->prev_rotate = -1;
|
|
|
|
|
|
|
|
p->sub.in = mp_filter_add_pin(f, MP_PIN_IN, "in");
|
|
|
|
p->sub.out = mp_filter_add_pin(f, MP_PIN_OUT, "out");
|
|
|
|
|
|
|
|
return f;
|
|
|
|
}
|
2018-01-18 13:44:20 +00:00
|
|
|
|
|
|
|
struct aspeed_priv {
|
|
|
|
struct mp_subfilter sub;
|
2020-05-23 02:04:46 +00:00
|
|
|
double cur_speed, cur_speed_drop;
|
|
|
|
int current_filter;
|
2018-01-18 13:44:20 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void aspeed_process(struct mp_filter *f)
|
|
|
|
{
|
|
|
|
struct aspeed_priv *p = f->priv;
|
|
|
|
|
|
|
|
if (!mp_subfilter_read(&p->sub))
|
|
|
|
return;
|
|
|
|
|
2020-05-23 02:04:46 +00:00
|
|
|
if (!p->sub.filter)
|
|
|
|
p->current_filter = 0;
|
|
|
|
|
|
|
|
double speed = p->cur_speed * p->cur_speed_drop;
|
|
|
|
|
|
|
|
int req_filter = 0;
|
|
|
|
if (fabs(speed - 1.0) >= 1e-8) {
|
|
|
|
req_filter = p->cur_speed_drop == 1.0 ? 1 : 2;
|
|
|
|
if (p->sub.frame.type == MP_FRAME_AUDIO &&
|
|
|
|
!af_fmt_is_pcm(mp_aframe_get_format(p->sub.frame.data)))
|
|
|
|
req_filter = 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req_filter != p->current_filter) {
|
2018-01-18 13:44:20 +00:00
|
|
|
if (p->sub.filter)
|
2020-05-23 02:04:46 +00:00
|
|
|
MP_VERBOSE(f, "removing audio speed filter\n");
|
2018-01-18 13:44:20 +00:00
|
|
|
if (!mp_subfilter_drain_destroy(&p->sub))
|
|
|
|
return;
|
2020-05-23 02:04:46 +00:00
|
|
|
|
|
|
|
if (req_filter) {
|
|
|
|
if (req_filter == 1) {
|
|
|
|
MP_VERBOSE(f, "adding scaletempo\n");
|
|
|
|
p->sub.filter = mp_create_user_filter(f, MP_OUTPUT_CHAIN_AUDIO,
|
|
|
|
"scaletempo", NULL);
|
|
|
|
} else if (req_filter == 2) {
|
|
|
|
MP_VERBOSE(f, "adding drop\n");
|
|
|
|
p->sub.filter = mp_create_user_filter(f, MP_OUTPUT_CHAIN_AUDIO,
|
|
|
|
"drop", NULL);
|
|
|
|
}
|
|
|
|
if (!p->sub.filter) {
|
|
|
|
MP_ERR(f, "could not create filter\n");
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
p->current_filter = req_filter;
|
2018-01-18 13:44:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p->sub.filter) {
|
|
|
|
struct mp_filter_command cmd = {
|
|
|
|
.type = MP_FILTER_COMMAND_SET_SPEED,
|
2020-05-23 02:04:46 +00:00
|
|
|
.speed = speed,
|
2018-01-18 13:44:20 +00:00
|
|
|
};
|
|
|
|
mp_filter_command(p->sub.filter, &cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
mp_subfilter_continue(&p->sub);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool aspeed_command(struct mp_filter *f, struct mp_filter_command *cmd)
|
|
|
|
{
|
|
|
|
struct aspeed_priv *p = f->priv;
|
|
|
|
|
|
|
|
if (cmd->type == MP_FILTER_COMMAND_SET_SPEED) {
|
|
|
|
p->cur_speed = cmd->speed;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-05-23 02:04:46 +00:00
|
|
|
if (cmd->type == MP_FILTER_COMMAND_SET_SPEED_DROP) {
|
|
|
|
p->cur_speed_drop = cmd->speed;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-04-21 11:18:03 +00:00
|
|
|
if (cmd->type == MP_FILTER_COMMAND_IS_ACTIVE) {
|
|
|
|
cmd->is_active = !!p->sub.filter;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-18 13:44:20 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void aspeed_reset(struct mp_filter *f)
|
|
|
|
{
|
|
|
|
struct aspeed_priv *p = f->priv;
|
|
|
|
|
|
|
|
mp_subfilter_reset(&p->sub);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void aspeed_destroy(struct mp_filter *f)
|
|
|
|
{
|
|
|
|
struct aspeed_priv *p = f->priv;
|
|
|
|
|
|
|
|
mp_subfilter_reset(&p->sub);
|
|
|
|
TA_FREEP(&p->sub.filter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct mp_filter_info aspeed_filter = {
|
|
|
|
.name = "autoaspeed",
|
|
|
|
.priv_size = sizeof(struct aspeed_priv),
|
|
|
|
.command = aspeed_command,
|
|
|
|
.process = aspeed_process,
|
|
|
|
.reset = aspeed_reset,
|
|
|
|
.destroy = aspeed_destroy,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mp_filter *mp_autoaspeed_create(struct mp_filter *parent)
|
|
|
|
{
|
|
|
|
struct mp_filter *f = mp_filter_create(parent, &aspeed_filter);
|
|
|
|
if (!f)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
struct aspeed_priv *p = f->priv;
|
|
|
|
p->cur_speed = 1.0;
|
2020-05-23 02:04:46 +00:00
|
|
|
p->cur_speed_drop = 1.0;
|
2018-01-18 13:44:20 +00:00
|
|
|
|
|
|
|
p->sub.in = mp_filter_add_pin(f, MP_PIN_IN, "in");
|
|
|
|
p->sub.out = mp_filter_add_pin(f, MP_PIN_OUT, "out");
|
|
|
|
|
|
|
|
return f;
|
|
|
|
}
|