mirror of https://github.com/mpv-player/mpv
vo_gpu: vulkan: normalize use of *Flags and *FlagBits
FlagBits is just the name of the enum. The actual data type representing a combination of these flags follows the *Flags convention. (The relevant difference is that the latter is defined to be uint32_t instead of left implicit) For consistency, use *Flags everywhere instead of randomly switching between *Flags and *FlagBits. Also fix a wrong type name on `stageFlags`, pointed out by @atomnuker
This commit is contained in:
parent
0ba6c7d73f
commit
5b6b77b8dc
|
@ -148,7 +148,7 @@ static bool update_swapchain_info(struct priv *p,
|
|||
VK(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(vk->physd, vk->surf, &caps));
|
||||
|
||||
// Sorted by preference
|
||||
static const VkCompositeAlphaFlagBitsKHR alphaModes[] = {
|
||||
static const VkCompositeAlphaFlagsKHR alphaModes[] = {
|
||||
VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
|
||||
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
|
||||
};
|
||||
|
@ -166,7 +166,7 @@ static bool update_swapchain_info(struct priv *p,
|
|||
goto error;
|
||||
}
|
||||
|
||||
static const VkSurfaceTransformFlagBitsKHR rotModes[] = {
|
||||
static const VkSurfaceTransformFlagsKHR rotModes[] = {
|
||||
VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR,
|
||||
VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR,
|
||||
};
|
||||
|
|
|
@ -54,10 +54,10 @@ struct vk_slab {
|
|||
// actually be that many in practice, because some combinations simply never
|
||||
// occur, and others will generally be the same for the same objects.
|
||||
struct vk_heap {
|
||||
VkBufferUsageFlagBits usage; // the buffer usage type (or 0)
|
||||
VkMemoryPropertyFlagBits flags; // the memory type flags (or 0)
|
||||
uint32_t typeBits; // the memory type index requirements (or 0)
|
||||
struct vk_slab **slabs; // array of slabs sorted by size
|
||||
VkBufferUsageFlags usage; // the buffer usage type (or 0)
|
||||
VkMemoryPropertyFlags flags; // the memory type flags (or 0)
|
||||
uint32_t typeBits; // the memory type index requirements (or 0)
|
||||
struct vk_slab **slabs; // array of slabs sorted by size
|
||||
int num_slabs;
|
||||
};
|
||||
|
||||
|
@ -89,7 +89,7 @@ static void slab_free(struct mpvk_ctx *vk, struct vk_slab *slab)
|
|||
}
|
||||
|
||||
static bool find_best_memtype(struct mpvk_ctx *vk, uint32_t typeBits,
|
||||
VkMemoryPropertyFlagBits flags,
|
||||
VkMemoryPropertyFlags flags,
|
||||
VkMemoryType *out_type, int *out_index)
|
||||
{
|
||||
struct vk_malloc *ma = vk->alloc;
|
||||
|
@ -109,7 +109,7 @@ static bool find_best_memtype(struct mpvk_ctx *vk, uint32_t typeBits,
|
|||
}
|
||||
|
||||
MP_ERR(vk, "Found no memory type matching property flags 0x%x and type "
|
||||
"bits 0x%x!\n", flags, (unsigned)typeBits);
|
||||
"bits 0x%x!\n", (unsigned)flags, (unsigned)typeBits);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -154,7 +154,7 @@ static struct vk_slab *slab_alloc(struct mpvk_ctx *vk, struct vk_heap *heap,
|
|||
goto error;
|
||||
|
||||
MP_VERBOSE(vk, "Allocating %zu memory of type 0x%x (id %d) in heap %d.\n",
|
||||
slab->size, type.propertyFlags, index, (int)type.heapIndex);
|
||||
slab->size, (unsigned)type.propertyFlags, index, (int)type.heapIndex);
|
||||
|
||||
minfo.memoryTypeIndex = index;
|
||||
VK(vkAllocateMemory(vk->dev, &minfo, MPVK_ALLOCATOR, &slab->mem));
|
||||
|
@ -279,9 +279,8 @@ void vk_free_memslice(struct mpvk_ctx *vk, struct vk_memslice slice)
|
|||
}
|
||||
|
||||
// reqs: can be NULL
|
||||
static struct vk_heap *find_heap(struct mpvk_ctx *vk,
|
||||
VkBufferUsageFlagBits usage,
|
||||
VkMemoryPropertyFlagBits flags,
|
||||
static struct vk_heap *find_heap(struct mpvk_ctx *vk, VkBufferUsageFlags usage,
|
||||
VkMemoryPropertyFlags flags,
|
||||
VkMemoryRequirements *reqs)
|
||||
{
|
||||
struct vk_malloc *ma = vk->alloc;
|
||||
|
@ -401,14 +400,14 @@ static bool slice_heap(struct mpvk_ctx *vk, struct vk_heap *heap, size_t size,
|
|||
}
|
||||
|
||||
bool vk_malloc_generic(struct mpvk_ctx *vk, VkMemoryRequirements reqs,
|
||||
VkMemoryPropertyFlagBits flags, struct vk_memslice *out)
|
||||
VkMemoryPropertyFlags flags, struct vk_memslice *out)
|
||||
{
|
||||
struct vk_heap *heap = find_heap(vk, 0, flags, &reqs);
|
||||
return slice_heap(vk, heap, reqs.size, reqs.alignment, out);
|
||||
}
|
||||
|
||||
bool vk_malloc_buffer(struct mpvk_ctx *vk, VkBufferUsageFlagBits bufFlags,
|
||||
VkMemoryPropertyFlagBits memFlags, VkDeviceSize size,
|
||||
bool vk_malloc_buffer(struct mpvk_ctx *vk, VkBufferUsageFlags bufFlags,
|
||||
VkMemoryPropertyFlags memFlags, VkDeviceSize size,
|
||||
VkDeviceSize alignment, struct vk_bufslice *out)
|
||||
{
|
||||
struct vk_heap *heap = find_heap(vk, bufFlags, memFlags, NULL);
|
||||
|
|
|
@ -16,7 +16,7 @@ struct vk_memslice {
|
|||
|
||||
void vk_free_memslice(struct mpvk_ctx *vk, struct vk_memslice slice);
|
||||
bool vk_malloc_generic(struct mpvk_ctx *vk, VkMemoryRequirements reqs,
|
||||
VkMemoryPropertyFlagBits flags, struct vk_memslice *out);
|
||||
VkMemoryPropertyFlags flags, struct vk_memslice *out);
|
||||
|
||||
// Represents a single "slice" of a larger buffer
|
||||
struct vk_bufslice {
|
||||
|
@ -30,6 +30,6 @@ struct vk_bufslice {
|
|||
// Allocate a buffer slice. This is more efficient than vk_malloc_generic for
|
||||
// when the user needs lots of buffers, since it doesn't require
|
||||
// creating/destroying lots of (little) VkBuffers.
|
||||
bool vk_malloc_buffer(struct mpvk_ctx *vk, VkBufferUsageFlagBits bufFlags,
|
||||
VkMemoryPropertyFlagBits memFlags, VkDeviceSize size,
|
||||
bool vk_malloc_buffer(struct mpvk_ctx *vk, VkBufferUsageFlags bufFlags,
|
||||
VkMemoryPropertyFlags memFlags, VkDeviceSize size,
|
||||
VkDeviceSize alignment, struct vk_bufslice *out);
|
||||
|
|
|
@ -290,16 +290,15 @@ struct ra_tex_vk {
|
|||
struct ra_buf_pool pbo;
|
||||
// "current" metadata, can change during the course of execution
|
||||
VkImageLayout current_layout;
|
||||
VkPipelineStageFlagBits current_stage;
|
||||
VkAccessFlagBits current_access;
|
||||
VkPipelineStageFlags current_stage;
|
||||
VkAccessFlags current_access;
|
||||
};
|
||||
|
||||
// Small helper to ease image barrier creation. if `discard` is set, the contents
|
||||
// of the image will be undefined after the barrier
|
||||
static void tex_barrier(struct vk_cmd *cmd, struct ra_tex_vk *tex_vk,
|
||||
VkPipelineStageFlagBits newStage,
|
||||
VkAccessFlagBits newAccess, VkImageLayout newLayout,
|
||||
bool discard)
|
||||
VkPipelineStageFlags newStage, VkAccessFlags newAccess,
|
||||
VkImageLayout newLayout, bool discard)
|
||||
{
|
||||
VkImageMemoryBarrier imgBarrier = {
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||
|
@ -511,7 +510,7 @@ static struct ra_tex *vk_tex_create(struct ra *ra,
|
|||
|
||||
VK(vkCreateImage(vk->dev, &iinfo, MPVK_ALLOCATOR, &tex_vk->img));
|
||||
|
||||
VkMemoryPropertyFlagBits memFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
VkMemoryPropertyFlags memFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
VkMemoryRequirements reqs;
|
||||
vkGetImageMemoryRequirements(vk->dev, tex_vk->img, &reqs);
|
||||
|
||||
|
@ -598,8 +597,8 @@ struct ra_buf_vk {
|
|||
int refcount; // 1 = object allocated but not in use, > 1 = in use
|
||||
bool needsflush;
|
||||
// "current" metadata, can change during course of execution
|
||||
VkPipelineStageFlagBits current_stage;
|
||||
VkAccessFlagBits current_access;
|
||||
VkPipelineStageFlags current_stage;
|
||||
VkAccessFlags current_access;
|
||||
};
|
||||
|
||||
static void vk_buf_deref(struct ra *ra, struct ra_buf *buf)
|
||||
|
@ -617,8 +616,8 @@ static void vk_buf_deref(struct ra *ra, struct ra_buf *buf)
|
|||
}
|
||||
|
||||
static void buf_barrier(struct ra *ra, struct vk_cmd *cmd, struct ra_buf *buf,
|
||||
VkPipelineStageFlagBits newStage,
|
||||
VkAccessFlagBits newAccess, int offset, size_t size)
|
||||
VkPipelineStageFlags newStage,
|
||||
VkAccessFlags newAccess, int offset, size_t size)
|
||||
{
|
||||
struct ra_buf_vk *buf_vk = buf->priv;
|
||||
|
||||
|
@ -693,8 +692,8 @@ static struct ra_buf *vk_buf_create(struct ra *ra,
|
|||
buf_vk->current_access = 0;
|
||||
buf_vk->refcount = 1;
|
||||
|
||||
VkBufferUsageFlagBits bufFlags = 0;
|
||||
VkMemoryPropertyFlagBits memFlags = 0;
|
||||
VkBufferUsageFlags bufFlags = 0;
|
||||
VkMemoryPropertyFlags memFlags = 0;
|
||||
VkDeviceSize align = 4; // alignment 4 is needed for buf_update
|
||||
|
||||
switch (params->type) {
|
||||
|
@ -977,7 +976,7 @@ static VkResult vk_compile_glsl(struct ra *ra, void *tactx,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static const VkPipelineStageFlagBits stageFlags[] = {
|
||||
static const VkShaderStageFlags stageFlags[] = {
|
||||
[RA_RENDERPASS_TYPE_RASTER] = VK_SHADER_STAGE_FRAGMENT_BIT,
|
||||
[RA_RENDERPASS_TYPE_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
};
|
||||
|
|
|
@ -637,7 +637,7 @@ void vk_cmd_callback(struct vk_cmd *cmd, vk_cb callback, void *p, void *arg)
|
|||
}
|
||||
|
||||
void vk_cmd_dep(struct vk_cmd *cmd, VkSemaphore dep,
|
||||
VkPipelineStageFlagBits depstage)
|
||||
VkPipelineStageFlags depstage)
|
||||
{
|
||||
assert(cmd->num_deps < MPVK_MAX_CMD_DEPS);
|
||||
cmd->deps[cmd->num_deps] = dep;
|
||||
|
|
|
@ -116,7 +116,7 @@ void vk_cmd_callback(struct vk_cmd *cmd, vk_cb callback, void *p, void *arg);
|
|||
// Associate a dependency for the current command. This semaphore must signal
|
||||
// by the corresponding stage before the command may execute.
|
||||
void vk_cmd_dep(struct vk_cmd *cmd, VkSemaphore dep,
|
||||
VkPipelineStageFlagBits depstage);
|
||||
VkPipelineStageFlags depstage);
|
||||
|
||||
#define MPVK_MAX_QUEUES 8
|
||||
#define MPVK_MAX_CMDS 64
|
||||
|
|
Loading…
Reference in New Issue