gpu: Make shader image access a vfunc

That allows shaders to handle textures differently.

In particularly, it will allow the pattern shader to take a huge amount
of textures.
This commit is contained in:
Benjamin Otte
2023-09-15 03:29:14 +02:00
parent a9b8551e70
commit 187db92a88
8 changed files with 101 additions and 62 deletions

View File

@@ -102,6 +102,7 @@ static const GskGpuShaderOpClass GSK_GPU_BORDER_OP_CLASS = {
#ifdef GDK_RENDERING_VULKAN
&gsk_gpu_border_info,
#endif
gsk_gpu_shader_op_no_images,
gsk_gpu_border_setup_vao
};

View File

@@ -441,7 +441,7 @@ gsk_gpu_node_processor_add_node_as_pattern (GskGpuNodeProcessor *self,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
&node->bounds,
&self->offset,
images,
g_memdup (images, sizeof (GskGpuShaderImage) * n_images),
n_images,
pattern_id);
}

View File

@@ -63,11 +63,12 @@ gsk_gpu_render_pass_type_to_vk_image_layout (GskRenderPassType type)
static void
gsk_gpu_render_pass_op_do_barriers (GskGpuRenderPassOp *self,
VkCommandBuffer command_buffer)
VkCommandBuffer command_buffer)
{
GskGpuShaderOp *shader;
GskGpuOp *op;
gsize i;
const GskGpuShaderImage *images;
gsize i, n_images;
for (op = ((GskGpuOp *) self)->next;
op->op_class->stage != GSK_GPU_STAGE_END_PASS;
@@ -78,9 +79,10 @@ gsk_gpu_render_pass_op_do_barriers (GskGpuRenderPassOp *self,
shader = (GskGpuShaderOp *) op;
for (i = 0; i < shader->n_images; i++)
images = gsk_gpu_shader_op_get_images (shader, &n_images);
for (i = 0; i < n_images; i++)
{
gsk_vulkan_image_transition (GSK_VULKAN_IMAGE (shader->images[i].image),
gsk_vulkan_image_transition (GSK_VULKAN_IMAGE (images[i].image),
command_buffer,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,

View File

@@ -10,14 +10,11 @@
#include "gskvulkandeviceprivate.h"
#endif
void
gsk_gpu_shader_op_finish (GskGpuOp *op)
const GskGpuShaderImage *
gsk_gpu_shader_op_get_images (GskGpuShaderOp *op,
gsize *n_images)
{
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
gsize i;
for (i = 0; i < self->n_images; i++)
g_object_unref (self->images[i].image);
return ((GskGpuShaderOpClass *) ((GskGpuOp *) op)->op_class)->get_images (op, n_images);
}
#ifdef GDK_RENDERING_VULKAN
@@ -80,8 +77,9 @@ gsk_gpu_shader_op_gl_command_n (GskGpuOp *op,
{
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
GskGpuShaderOpClass *shader_op_class = (GskGpuShaderOpClass *) op->op_class;
const GskGpuShaderImage *images;
GskGLDevice *device;
gsize i;
gsize i, n_images;
device = GSK_GL_DEVICE (gsk_gpu_frame_get_device (frame));
@@ -89,12 +87,13 @@ gsk_gpu_shader_op_gl_command_n (GskGpuOp *op,
shader_op_class,
self->clip);
for (i = 0; i < self->n_images; i++)
images = gsk_gpu_shader_op_get_images (self, &n_images);
for (i = 0; i < n_images; i++)
{
glActiveTexture (GL_TEXTURE0 + self->images[i].descriptor);
gsk_gl_image_bind_texture (GSK_GL_IMAGE (self->images[i].image));
glBindSampler (self->images[i].descriptor,
gsk_gl_device_get_sampler_id (device, self->images[i].sampler));
glActiveTexture (GL_TEXTURE0 + images[i].descriptor);
gsk_gl_image_bind_texture (GSK_GL_IMAGE (images[i].image));
glBindSampler (images[i].descriptor,
gsk_gl_device_get_sampler_id (device, images[i].sampler));
}
shader_op_class->setup_vao (self->vertex_offset);
@@ -133,21 +132,12 @@ gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
return self;
}
guint32
gsk_gpu_shader_op_use_image (GskGpuShaderOp *self,
GskGpuFrame *frame,
GskGpuImage *image,
GskGpuSampler sampler)
const GskGpuShaderImage *
gsk_gpu_shader_op_no_images (GskGpuShaderOp *op,
gsize *n_images)
{
gsize id;
*n_images = 0;
g_assert (self->n_images < G_N_ELEMENTS (self->images));
id = self->n_images;
self->images[id].image = g_object_ref (image);
self->images[id].sampler = sampler;
self->images[id].descriptor = gsk_gpu_frame_get_image_descriptor (frame, image, sampler);
self->n_images++;
return self->images[id].descriptor;
return NULL;
}

View File

@@ -21,8 +21,6 @@ struct _GskGpuShaderOp
GskGpuShaderClip clip;
gsize vertex_offset;
GskGpuShaderImage images[2];
gsize n_images;
};
struct _GskGpuShaderOpClass
@@ -34,6 +32,8 @@ struct _GskGpuShaderOpClass
#ifdef GDK_RENDERING_VULKAN
const VkPipelineVertexInputStateCreateInfo *vertex_input_state;
#endif
const GskGpuShaderImage * (* get_images) (GskGpuShaderOp *op,
gsize *n_images);
void (* setup_vao) (gsize offset);
};
@@ -42,13 +42,6 @@ GskGpuShaderOp * gsk_gpu_shader_op_alloc (GskGpuF
GskGpuShaderClip clip,
gpointer out_vertex_data);
void gsk_gpu_shader_op_finish (GskGpuOp *op);
guint32 gsk_gpu_shader_op_use_image (GskGpuShaderOp *self,
GskGpuFrame *frame,
GskGpuImage *image,
GskGpuSampler sampler);
#ifdef GDK_RENDERING_VULKAN
GskGpuOp * gsk_gpu_shader_op_vk_command_n (GskGpuOp *op,
GskGpuFrame *frame,
@@ -70,6 +63,12 @@ GskGpuOp * gsk_gpu_shader_op_gl_command (GskGpuO
GskGpuFrame *frame,
gsize flip_y);
const GskGpuShaderImage *
gsk_gpu_shader_op_get_images (GskGpuShaderOp *op,
gsize *n_images);
const GskGpuShaderImage *
gsk_gpu_shader_op_no_images (GskGpuShaderOp *op,
gsize *n_images);
static inline void
gsk_gpu_rgba_to_float (const GdkRGBA *rgba,
float values[4])

View File

@@ -13,14 +13,25 @@ typedef struct _GskGpuTextureOp GskGpuTextureOp;
struct _GskGpuTextureOp
{
GskGpuShaderOp op;
GskGpuShaderImage image;
};
static void
gsk_gpu_texture_op_finish (GskGpuOp *op)
{
GskGpuTextureOp *self = (GskGpuTextureOp *) op;
g_object_unref (self->image.image);
}
static void
gsk_gpu_texture_op_print (GskGpuOp *op,
GskGpuFrame *frame,
GString *string,
guint indent)
{
GskGpuTextureOp *self = (GskGpuTextureOp *) op;
GskGpuShaderOp *shader = (GskGpuShaderOp *) op;
GskGpuTextureInstance *instance;
@@ -28,15 +39,26 @@ gsk_gpu_texture_op_print (GskGpuOp *op,
gsk_gpu_print_op (string, indent, "texture");
gsk_gpu_print_rect (string, instance->rect);
gsk_gpu_print_image (string, shader->images[0].image);
gsk_gpu_print_image (string, self->image.image);
gsk_gpu_print_newline (string);
}
static const GskGpuShaderImage *
gsk_gpu_texture_op_get_images (GskGpuShaderOp *op,
gsize *n_images)
{
GskGpuTextureOp *self = (GskGpuTextureOp *) op;
*n_images = 1;
return &self->image;
}
static const GskGpuShaderOpClass GSK_GPU_TEXTURE_OP_CLASS = {
{
GSK_GPU_OP_SIZE (GskGpuTextureOp),
GSK_GPU_STAGE_SHADER,
gsk_gpu_shader_op_finish,
gsk_gpu_texture_op_finish,
gsk_gpu_texture_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_shader_op_vk_command,
@@ -48,6 +70,7 @@ static const GskGpuShaderOpClass GSK_GPU_TEXTURE_OP_CLASS = {
#ifdef GDK_RENDERING_VULKAN
&gsk_gpu_texture_info,
#endif
gsk_gpu_texture_op_get_images,
gsk_gpu_texture_setup_vao
};
@@ -70,5 +93,8 @@ gsk_gpu_texture_op (GskGpuFrame *frame,
gsk_gpu_rect_to_float (rect, offset, instance->rect);
gsk_gpu_rect_to_float (tex_rect, offset, instance->tex_rect);
instance->tex_id = gsk_gpu_shader_op_use_image ((GskGpuShaderOp *) self, frame, image, sampler);
self->image.image = g_object_ref (image);
self->image.sampler = sampler;
self->image.descriptor = gsk_gpu_frame_get_image_descriptor (frame, image, sampler);
instance->tex_id = self->image.descriptor;
}

View File

@@ -14,8 +14,23 @@ typedef struct _GskGpuUberOp GskGpuUberOp;
struct _GskGpuUberOp
{
GskGpuShaderOp op;
GskGpuShaderImage *images;
gsize n_images;
};
static void
gsk_gpu_uber_op_finish (GskGpuOp *op)
{
GskGpuUberOp *self = (GskGpuUberOp *) op;
gsize i;
for (i = 0; i < self->n_images; i++)
g_object_unref (self->images[i].image);
g_free (self->images);
}
static void
gsk_gpu_uber_op_print (GskGpuOp *op,
GskGpuFrame *frame,
@@ -32,11 +47,22 @@ gsk_gpu_uber_op_print (GskGpuOp *op,
gsk_gpu_print_newline (string);
}
static const GskGpuShaderImage *
gsk_gpu_uber_op_get_images (GskGpuShaderOp *op,
gsize *n_images)
{
GskGpuUberOp *self = (GskGpuUberOp *) op;
*n_images = self->n_images;
return self->images;
}
static const GskGpuShaderOpClass GSK_GPU_UBER_OP_CLASS = {
{
GSK_GPU_OP_SIZE (GskGpuUberOp),
GSK_GPU_STAGE_SHADER,
gsk_gpu_shader_op_finish,
gsk_gpu_uber_op_finish,
gsk_gpu_uber_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_shader_op_vk_command,
@@ -48,6 +74,7 @@ static const GskGpuShaderOpClass GSK_GPU_UBER_OP_CLASS = {
#ifdef GDK_RENDERING_VULKAN
&gsk_gpu_uber_info,
#endif
gsk_gpu_uber_op_get_images,
gsk_gpu_uber_setup_vao
};
@@ -56,27 +83,21 @@ gsk_gpu_uber_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const GskGpuShaderImage *images,
GskGpuShaderImage *images,
gsize n_images,
guint32 pattern_id)
{
GskGpuUberOp *self;
GskGpuUberInstance *instance;
GskGpuShaderOp *shader;
gsize i;
shader = gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_UBER_OP_CLASS,
clip,
&instance);
self = (GskGpuUberOp *) gsk_gpu_shader_op_alloc (frame,
&GSK_GPU_UBER_OP_CLASS,
clip,
&instance);
self->images = images;
self->n_images = n_images;
gsk_gpu_rect_to_float (rect, offset, instance->rect);
shader->n_images = n_images;
for (i = 0; i < n_images; i++)
{
shader->images[i] = images[i];
g_object_ref (images[i].image);
}
instance->pattern_id = pattern_id;
}

View File

@@ -10,7 +10,7 @@ void gsk_gpu_uber_op (GskGpuF
GskGpuShaderClip clip,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const GskGpuShaderImage *images,
GskGpuShaderImage *images,
gsize n_images,
guint32 pattern_id);