WINE_DECLARE_DEBUG_CHANNEL(d3d_synchronous);
#define WINED3D_MAX_FBO_ENTRIES 64
+#define WINED3D_ALL_LAYERS (~0u)
static DWORD wined3d_context_tls_idx;
}
static void context_attach_depth_stencil_rb(const struct wined3d_gl_info *gl_info,
- GLenum fbo_target, DWORD format_flags, GLuint rb)
+ GLenum fbo_target, DWORD flags, GLuint rb)
{
- if (format_flags & WINED3DFMT_FLAG_DEPTH)
+ if (flags & WINED3D_FBO_ENTRY_FLAG_DEPTH)
{
gl_info->fbo_ops.glFramebufferRenderbuffer(fbo_target, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, rb);
checkGLcall("glFramebufferRenderbuffer()");
}
- if (format_flags & WINED3DFMT_FLAG_STENCIL)
+ if (flags & WINED3D_FBO_ENTRY_FLAG_STENCIL)
{
gl_info->fbo_ops.glFramebufferRenderbuffer(fbo_target, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rb);
checkGLcall("glFramebufferRenderbuffer()");
}
}
+static void context_attach_gl_texture_fbo(struct wined3d_context *context,
+ GLenum fbo_target, GLenum attachment, const struct wined3d_fbo_resource *resource)
+{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+
+ if (!resource)
+ {
+ gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, attachment, GL_TEXTURE_2D, 0, 0);
+ }
+ else if (resource->layer == WINED3D_ALL_LAYERS)
+ {
+ if (!gl_info->fbo_ops.glFramebufferTexture)
+ {
+ FIXME("OpenGL implementation doesn't support glFramebufferTexture().\n");
+ return;
+ }
+
+ gl_info->fbo_ops.glFramebufferTexture(fbo_target, attachment,
+ resource->object, resource->level);
+ }
+ else if (resource->target == GL_TEXTURE_1D_ARRAY || resource->target == GL_TEXTURE_2D_ARRAY ||
+ resource->target == GL_TEXTURE_3D)
+ {
+ if (!gl_info->fbo_ops.glFramebufferTextureLayer)
+ {
+ FIXME("OpenGL implementation doesn't support glFramebufferTextureLayer().\n");
+ return;
+ }
+
+ gl_info->fbo_ops.glFramebufferTextureLayer(fbo_target, attachment,
+ resource->object, resource->level, resource->layer);
+ }
+ else if (resource->target == GL_TEXTURE_1D)
+ {
+ gl_info->fbo_ops.glFramebufferTexture1D(fbo_target, attachment,
+ resource->target, resource->object, resource->level);
+ checkGLcall("glFramebufferTexture1D()");
+ }
+ else
+ {
+ gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, attachment,
+ resource->target, resource->object, resource->level);
+ }
+ checkGLcall("attach texture to fbo");
+}
+
/* Context activation is done by the caller. */
static void context_attach_depth_stencil_fbo(struct wined3d_context *context,
- GLenum fbo_target, struct wined3d_surface *depth_stencil, DWORD location)
+ GLenum fbo_target, const struct wined3d_fbo_resource *resource, BOOL rb_namespace,
+ DWORD flags)
{
const struct wined3d_gl_info *gl_info = context->gl_info;
- TRACE("Attach depth stencil %p\n", depth_stencil);
-
- if (depth_stencil)
+ if (resource->object)
{
- DWORD format_flags = depth_stencil->container->resource.format_flags;
+ TRACE("Attach depth stencil %u.\n", resource->object);
- if (depth_stencil->current_renderbuffer)
+ if (rb_namespace)
{
context_attach_depth_stencil_rb(gl_info, fbo_target,
- format_flags, depth_stencil->current_renderbuffer->id);
+ flags, resource->object);
}
else
{
- switch (location)
- {
- case WINED3D_LOCATION_TEXTURE_RGB:
- case WINED3D_LOCATION_TEXTURE_SRGB:
- if (format_flags & WINED3DFMT_FLAG_DEPTH)
- {
- gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, GL_DEPTH_ATTACHMENT,
- depth_stencil->texture_target, depth_stencil->container->texture_rgb.name,
- depth_stencil->texture_level);
- checkGLcall("glFramebufferTexture2D()");
- }
+ if (flags & WINED3D_FBO_ENTRY_FLAG_DEPTH)
+ context_attach_gl_texture_fbo(context, fbo_target, GL_DEPTH_ATTACHMENT, resource);
- if (format_flags & WINED3DFMT_FLAG_STENCIL)
- {
- gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, GL_STENCIL_ATTACHMENT,
- depth_stencil->texture_target, depth_stencil->container->texture_rgb.name,
- depth_stencil->texture_level);
- checkGLcall("glFramebufferTexture2D()");
- }
- break;
+ if (flags & WINED3D_FBO_ENTRY_FLAG_STENCIL)
+ context_attach_gl_texture_fbo(context, fbo_target, GL_STENCIL_ATTACHMENT, resource);
+ }
- case WINED3D_LOCATION_RB_MULTISAMPLE:
- context_attach_depth_stencil_rb(gl_info, fbo_target,
- format_flags, depth_stencil->rb_multisample);
- break;
+ if (!(flags & WINED3D_FBO_ENTRY_FLAG_DEPTH))
+ context_attach_gl_texture_fbo(context, fbo_target, GL_DEPTH_ATTACHMENT, NULL);
- case WINED3D_LOCATION_RB_RESOLVED:
- context_attach_depth_stencil_rb(gl_info, fbo_target,
- format_flags, depth_stencil->rb_resolved);
- break;
+ if (!(flags & WINED3D_FBO_ENTRY_FLAG_STENCIL))
+ context_attach_gl_texture_fbo(context, fbo_target, GL_STENCIL_ATTACHMENT, NULL);
+ }
+ else
+ {
+ TRACE("Attach depth stencil 0.\n");
- default:
- ERR("Unsupported location %s (%#x).\n", wined3d_debug_location(location), location);
- break;
- }
- }
+ context_attach_gl_texture_fbo(context, fbo_target, GL_DEPTH_ATTACHMENT, NULL);
+ context_attach_gl_texture_fbo(context, fbo_target, GL_STENCIL_ATTACHMENT, NULL);
+ }
+}
+
+/* Context activation is done by the caller. */
+static void context_attach_surface_fbo(struct wined3d_context *context,
+ GLenum fbo_target, DWORD idx, const struct wined3d_fbo_resource *resource, BOOL rb_namespace)
+{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
- if (!(format_flags & WINED3DFMT_FLAG_DEPTH))
+ TRACE("Attach GL object %u to %u.\n", resource->object, idx);
+
+ if (resource->object)
+ {
+
+ if (rb_namespace)
{
- gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
- checkGLcall("glFramebufferTexture2D()");
+ gl_info->fbo_ops.glFramebufferRenderbuffer(fbo_target, GL_COLOR_ATTACHMENT0 + idx,
+ GL_RENDERBUFFER, resource->object);
+ checkGLcall("glFramebufferRenderbuffer()");
}
-
- if (!(format_flags & WINED3DFMT_FLAG_STENCIL))
+ else
{
- gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
- checkGLcall("glFramebufferTexture2D()");
+ context_attach_gl_texture_fbo(context, fbo_target, GL_COLOR_ATTACHMENT0 + idx, resource);
}
}
else
{
- gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
- checkGLcall("glFramebufferTexture2D()");
-
- gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
- checkGLcall("glFramebufferTexture2D()");
+ context_attach_gl_texture_fbo(context, fbo_target, GL_COLOR_ATTACHMENT0 + idx, NULL);
}
}
-/* Context activation is done by the caller. */
-static void context_attach_surface_fbo(struct wined3d_context *context,
- GLenum fbo_target, DWORD idx, struct wined3d_surface *surface, DWORD location)
+static void context_dump_fbo_attachment(const struct wined3d_gl_info *gl_info, GLenum target,
+ GLenum attachment)
{
- const struct wined3d_gl_info *gl_info = context->gl_info;
+ static const struct
+ {
+ GLenum target;
+ GLenum binding;
+ const char *str;
+ enum wined3d_gl_extension extension;
+ }
+ texture_type[] =
+ {
+ {GL_TEXTURE_2D, GL_TEXTURE_BINDING_2D, "2d", WINED3D_GL_EXT_NONE},
+ {GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_BINDING_RECTANGLE_ARB, "rectangle", ARB_TEXTURE_RECTANGLE},
+ {GL_TEXTURE_2D_ARRAY, GL_TEXTURE_BINDING_2D_ARRAY, "2d-array", EXT_TEXTURE_ARRAY},
+ };
+
+ GLint type, name, samples, width, height, old_texture, level, face, fmt, tex_target;
- TRACE("Attach surface %p to %u\n", surface, idx);
+ gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv(target, attachment,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME, &name);
+ gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv(target, attachment,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE, &type);
- if (surface && surface->resource.format->id != WINED3DFMT_NULL)
+ if (type == GL_RENDERBUFFER)
+ {
+ gl_info->fbo_ops.glBindRenderbuffer(GL_RENDERBUFFER, name);
+ gl_info->fbo_ops.glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &width);
+ gl_info->fbo_ops.glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &height);
+ if (gl_info->limits.samples > 1)
+ gl_info->fbo_ops.glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_SAMPLES, &samples);
+ else
+ samples = 1;
+ gl_info->fbo_ops.glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_INTERNAL_FORMAT, &fmt);
+ FIXME(" %s: renderbuffer %d, %dx%d, %d samples, format %#x.\n",
+ debug_fboattachment(attachment), name, width, height, samples, fmt);
+ }
+ else if (type == GL_TEXTURE)
{
- BOOL srgb;
+ const char *tex_type_str;
- switch (location)
+ gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv(target, attachment,
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL, &level);
+ gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv(target, attachment,
+ GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE, &face);
+
+ if (face)
{
- case WINED3D_LOCATION_TEXTURE_RGB:
- case WINED3D_LOCATION_TEXTURE_SRGB:
- srgb = location == WINED3D_LOCATION_TEXTURE_SRGB;
- gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, GL_COLOR_ATTACHMENT0 + idx,
- surface->texture_target, surface_get_texture_name(surface, context, srgb),
- surface->texture_level);
- checkGLcall("glFramebufferTexture2D()");
- break;
+ gl_info->gl_ops.gl.p_glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &old_texture);
- case WINED3D_LOCATION_RB_MULTISAMPLE:
- gl_info->fbo_ops.glFramebufferRenderbuffer(fbo_target, GL_COLOR_ATTACHMENT0 + idx,
- GL_RENDERBUFFER, surface->rb_multisample);
- checkGLcall("glFramebufferRenderbuffer()");
- break;
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP, name);
+ gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(face, level, GL_TEXTURE_INTERNAL_FORMAT, &fmt);
+ gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(face, level, GL_TEXTURE_WIDTH, &width);
+ gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(face, level, GL_TEXTURE_HEIGHT, &height);
- case WINED3D_LOCATION_RB_RESOLVED:
- gl_info->fbo_ops.glFramebufferRenderbuffer(fbo_target, GL_COLOR_ATTACHMENT0 + idx,
- GL_RENDERBUFFER, surface->rb_resolved);
- checkGLcall("glFramebufferRenderbuffer()");
- break;
+ tex_target = GL_TEXTURE_CUBE_MAP;
+ tex_type_str = "cube";
+ }
+ else
+ {
+ unsigned int i;
- default:
- ERR("Unsupported location %s (%#x).\n", wined3d_debug_location(location), location);
- break;
+ tex_type_str = NULL;
+ for (i = 0; i < sizeof(texture_type) / sizeof(*texture_type); ++i)
+ {
+ if (!gl_info->supported[texture_type[i].extension])
+ continue;
+
+ gl_info->gl_ops.gl.p_glGetIntegerv(texture_type[i].binding, &old_texture);
+ while (gl_info->gl_ops.gl.p_glGetError());
+
+ gl_info->gl_ops.gl.p_glBindTexture(texture_type[i].target, name);
+ if (!gl_info->gl_ops.gl.p_glGetError())
+ {
+ tex_target = texture_type[i].target;
+ tex_type_str = texture_type[i].str;
+ break;
+ }
+ gl_info->gl_ops.gl.p_glBindTexture(texture_type[i].target, old_texture);
+ }
+ if (!tex_type_str)
+ {
+ FIXME("Cannot find type of texture %d.\n", name);
+ return;
+ }
+
+ gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(tex_target, level, GL_TEXTURE_INTERNAL_FORMAT, &fmt);
+ gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(tex_target, level, GL_TEXTURE_WIDTH, &width);
+ gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(tex_target, level, GL_TEXTURE_HEIGHT, &height);
}
+
+ FIXME(" %s: %s texture %d, %dx%d, format %#x.\n", debug_fboattachment(attachment),
+ tex_type_str, name, width, height, fmt);
+
+ gl_info->gl_ops.gl.p_glBindTexture(tex_target, old_texture);
+ checkGLcall("Guess texture type");
+ }
+ else if (type == GL_NONE)
+ {
+ FIXME(" %s: NONE.\n", debug_fboattachment(attachment));
}
else
{
- gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, GL_COLOR_ATTACHMENT0 + idx, GL_TEXTURE_2D, 0, 0);
- checkGLcall("glFramebufferTexture2D()");
+ ERR(" %s: Unknown attachment %#x.\n", debug_fboattachment(attachment), type);
}
}
}
else
{
- const struct wined3d_surface *attachment;
unsigned int i;
FIXME("FBO status %s (%#x)\n", debug_fbostatus(status), status);
return;
}
- FIXME("\tColor Location %s (%#x).\n", wined3d_debug_location(context->current_fbo->color_location),
- context->current_fbo->color_location);
- FIXME("\tDepth Stencil Location %s (%#x).\n", wined3d_debug_location(context->current_fbo->ds_location),
- context->current_fbo->ds_location);
+ context_dump_fbo_attachment(gl_info, target, GL_DEPTH_ATTACHMENT);
+ context_dump_fbo_attachment(gl_info, target, GL_STENCIL_ATTACHMENT);
- /* Dump the FBO attachments */
for (i = 0; i < gl_info->limits.buffers; ++i)
- {
- attachment = context->current_fbo->render_targets[i];
- if (attachment)
- {
- FIXME("\tColor attachment %d: (%p) %s %ux%u %u samples.\n",
- i, attachment, debug_d3dformat(attachment->resource.format->id),
- attachment->pow2Width, attachment->pow2Height, attachment->resource.multisample_type);
- }
- }
- attachment = context->current_fbo->depth_stencil;
- if (attachment)
- {
- FIXME("\tDepth attachment: (%p) %s %ux%u %u samples.\n",
- attachment, debug_d3dformat(attachment->resource.format->id),
- attachment->pow2Width, attachment->pow2Height, attachment->resource.multisample_type);
- }
+ context_dump_fbo_attachment(gl_info, target, GL_COLOR_ATTACHMENT0 + i);
+ checkGLcall("Dump FBO attachments");
}
}
return buffer ? (1u << 31) | buffer : 0;
}
-static inline DWORD context_generate_rt_mask_from_surface(const struct wined3d_surface *target)
+static inline DWORD context_generate_rt_mask_from_resource(struct wined3d_resource *resource)
+{
+ if (resource->type != WINED3D_RTYPE_TEXTURE_2D)
+ {
+ FIXME("Not implemented for %s resources.\n", debug_d3dresourcetype(resource->type));
+ return 0;
+ }
+
+ return (1u << 31) | wined3d_texture_get_gl_buffer(texture_from_resource(resource));
+}
+
+static inline void context_set_fbo_key_for_render_target(const struct wined3d_context *context,
+ struct wined3d_fbo_entry_key *key, unsigned int idx, struct wined3d_rendertarget_info *render_target,
+ DWORD location)
+{
+ unsigned int sub_resource_idx = render_target->sub_resource_idx;
+ struct wined3d_resource *resource = render_target->resource;
+ struct wined3d_texture *texture;
+
+ if (!resource || resource->format->id == WINED3DFMT_NULL || resource->type == WINED3D_RTYPE_BUFFER)
+ {
+ if (resource && resource->type == WINED3D_RTYPE_BUFFER)
+ FIXME("Not implemented for %s resources.\n", debug_d3dresourcetype(resource->type));
+ key->objects[idx].object = 0;
+ key->objects[idx].target = 0;
+ key->objects[idx].level = key->objects[idx].layer = 0;
+ return;
+ }
+
+ if (render_target->gl_view.name)
+ {
+ key->objects[idx].object = render_target->gl_view.name;
+ key->objects[idx].target = render_target->gl_view.target;
+ key->objects[idx].level = 0;
+ key->objects[idx].layer = WINED3D_ALL_LAYERS;
+ return;
+ }
+
+ texture = wined3d_texture_from_resource(resource);
+ if (resource->type == WINED3D_RTYPE_TEXTURE_2D)
+ {
+ struct wined3d_surface *surface = texture->sub_resources[sub_resource_idx].u.surface;
+
+ if (surface->current_renderbuffer)
+ {
+ key->objects[idx].object = surface->current_renderbuffer->id;
+ key->objects[idx].target = 0;
+ key->objects[idx].level = key->objects[idx].layer = 0;
+ key->rb_namespace |= 1 << idx;
+ return;
+ }
+
+ key->objects[idx].target = surface->texture_target;
+ key->objects[idx].level = surface->texture_level;
+ key->objects[idx].layer = surface->texture_layer;
+ }
+ else
+ {
+ key->objects[idx].target = texture->target;
+ key->objects[idx].level = sub_resource_idx % texture->level_count;
+ key->objects[idx].layer = sub_resource_idx / texture->level_count;
+ }
+ if (render_target->layer_count != 1)
+ key->objects[idx].layer = WINED3D_ALL_LAYERS;
+
+ switch (location)
+ {
+ case WINED3D_LOCATION_TEXTURE_RGB:
+ key->objects[idx].object = wined3d_texture_get_texture_name(texture, context, FALSE);
+ break;
+
+ case WINED3D_LOCATION_TEXTURE_SRGB:
+ key->objects[idx].object = wined3d_texture_get_texture_name(texture, context, TRUE);
+ break;
+
+ case WINED3D_LOCATION_RB_MULTISAMPLE:
+ key->objects[idx].object = texture->rb_multisample;
+ key->objects[idx].target = 0;
+ key->objects[idx].level = key->objects[idx].layer = 0;
+ key->rb_namespace |= 1 << idx;
+ break;
+
+ case WINED3D_LOCATION_RB_RESOLVED:
+ key->objects[idx].object = texture->rb_resolved;
+ key->objects[idx].target = 0;
+ key->objects[idx].level = key->objects[idx].layer = 0;
+ key->rb_namespace |= 1 << idx;
+ break;
+ }
+}
+
+static void context_generate_fbo_key(const struct wined3d_context *context,
+ struct wined3d_fbo_entry_key *key, struct wined3d_rendertarget_info *render_targets,
+ struct wined3d_surface *depth_stencil_surface, DWORD color_location,
+ DWORD ds_location)
{
- return (1u << 31) | surface_get_gl_buffer(target);
+ struct wined3d_rendertarget_info depth_stencil = {{0}};
+ unsigned int i;
+
+ key->rb_namespace = 0;
+ if (depth_stencil_surface)
+ {
+ depth_stencil.resource = &depth_stencil_surface->container->resource;
+ depth_stencil.sub_resource_idx = surface_get_sub_resource_idx(depth_stencil_surface);
+ depth_stencil.layer_count = 1;
+ }
+ context_set_fbo_key_for_render_target(context, key, 0, &depth_stencil, ds_location);
+
+ for (i = 0; i < context->gl_info->limits.buffers; ++i)
+ context_set_fbo_key_for_render_target(context, key, i + 1, &render_targets[i], color_location);
}
static struct fbo_entry *context_create_fbo_entry(const struct wined3d_context *context,
- struct wined3d_surface **render_targets, struct wined3d_surface *depth_stencil,
+ struct wined3d_rendertarget_info *render_targets, struct wined3d_surface *depth_stencil,
DWORD color_location, DWORD ds_location)
{
const struct wined3d_gl_info *gl_info = context->gl_info;
+ unsigned int object_count = gl_info->limits.buffers + 1;
struct fbo_entry *entry;
- entry = HeapAlloc(GetProcessHeap(), 0, sizeof(*entry));
- entry->render_targets = HeapAlloc(GetProcessHeap(), 0, gl_info->limits.buffers * sizeof(*entry->render_targets));
- memcpy(entry->render_targets, render_targets, gl_info->limits.buffers * sizeof(*entry->render_targets));
- entry->depth_stencil = depth_stencil;
- entry->color_location = color_location;
- entry->ds_location = ds_location;
+ entry = HeapAlloc(GetProcessHeap(), 0,
+ FIELD_OFFSET(struct fbo_entry, key.objects[object_count]));
+ memset(&entry->key, 0, FIELD_OFFSET(struct wined3d_fbo_entry_key, objects[object_count]));
+ context_generate_fbo_key(context, &entry->key, render_targets, depth_stencil, color_location, ds_location);
+ entry->flags = 0;
+ if (depth_stencil)
+ {
+ if (depth_stencil->container->resource.format_flags & WINED3DFMT_FLAG_DEPTH)
+ entry->flags |= WINED3D_FBO_ENTRY_FLAG_DEPTH;
+ if (depth_stencil->container->resource.format_flags & WINED3DFMT_FLAG_STENCIL)
+ entry->flags |= WINED3D_FBO_ENTRY_FLAG_STENCIL;
+ }
entry->rt_mask = context_generate_rt_mask(GL_COLOR_ATTACHMENT0);
- entry->attached = FALSE;
gl_info->fbo_ops.glGenFramebuffers(1, &entry->id);
checkGLcall("glGenFramebuffers()");
TRACE("Created FBO %u.\n", entry->id);
/* Context activation is done by the caller. */
static void context_reuse_fbo_entry(struct wined3d_context *context, GLenum target,
- struct wined3d_surface **render_targets, struct wined3d_surface *depth_stencil,
+ struct wined3d_rendertarget_info *render_targets, struct wined3d_surface *depth_stencil,
DWORD color_location, DWORD ds_location, struct fbo_entry *entry)
{
const struct wined3d_gl_info *gl_info = context->gl_info;
context_bind_fbo(context, target, entry->id);
context_clean_fbo_attachments(gl_info, target);
- memcpy(entry->render_targets, render_targets, gl_info->limits.buffers * sizeof(*entry->render_targets));
- entry->depth_stencil = depth_stencil;
- entry->color_location = color_location;
- entry->ds_location = ds_location;
- entry->attached = FALSE;
+ context_generate_fbo_key(context, &entry->key, render_targets, depth_stencil, color_location, ds_location);
+ entry->flags = 0;
+ if (depth_stencil)
+ {
+ if (depth_stencil->container->resource.format_flags & WINED3DFMT_FLAG_DEPTH)
+ entry->flags |= WINED3D_FBO_ENTRY_FLAG_DEPTH;
+ if (depth_stencil->container->resource.format_flags & WINED3DFMT_FLAG_STENCIL)
+ entry->flags |= WINED3D_FBO_ENTRY_FLAG_STENCIL;
+ }
}
/* Context activation is done by the caller. */
}
--context->fbo_entry_count;
list_remove(&entry->entry);
- HeapFree(GetProcessHeap(), 0, entry->render_targets);
HeapFree(GetProcessHeap(), 0, entry);
}
/* Context activation is done by the caller. */
static struct fbo_entry *context_find_fbo_entry(struct wined3d_context *context, GLenum target,
- struct wined3d_surface **render_targets, struct wined3d_surface *depth_stencil,
+ struct wined3d_rendertarget_info *render_targets, struct wined3d_surface *depth_stencil,
DWORD color_location, DWORD ds_location)
{
const struct wined3d_gl_info *gl_info = context->gl_info;
+ unsigned int object_count = gl_info->limits.buffers + 1;
+ struct wined3d_texture *rt_texture, *ds_texture;
struct fbo_entry *entry;
+ unsigned int i, level;
- if (depth_stencil && render_targets && render_targets[0])
+ if (depth_stencil && render_targets[0].resource && render_targets[0].resource->type != WINED3D_RTYPE_BUFFER)
{
- if (depth_stencil->resource.width < render_targets[0]->resource.width ||
- depth_stencil->resource.height < render_targets[0]->resource.height)
+ rt_texture = wined3d_texture_from_resource(render_targets[0].resource);
+ level = render_targets[0].sub_resource_idx % rt_texture->level_count;
+ ds_texture = depth_stencil->container;
+
+ if (wined3d_texture_get_level_width(ds_texture, depth_stencil->texture_level)
+ < wined3d_texture_get_level_width(rt_texture, level)
+ || wined3d_texture_get_level_height(ds_texture, depth_stencil->texture_level)
+ < wined3d_texture_get_level_height(rt_texture, level))
{
- WARN("Depth stencil is smaller than the primary color buffer, disabling\n");
+ WARN("Depth stencil is smaller than the primary color buffer, disabling.\n");
depth_stencil = NULL;
}
+ else if (ds_texture->resource.multisample_type != rt_texture->resource.multisample_type
+ || ds_texture->resource.multisample_quality != rt_texture->resource.multisample_quality)
+ {
+ WARN("Color multisample type %u and quality %u, depth stencil has %u and %u, disabling ds buffer.\n",
+ rt_texture->resource.multisample_type, rt_texture->resource.multisample_quality,
+ ds_texture->resource.multisample_type, ds_texture->resource.multisample_quality);
+ depth_stencil = NULL;
+ }
+ else
+ surface_set_compatible_renderbuffer(depth_stencil, &render_targets[0]);
}
- LIST_FOR_EACH_ENTRY(entry, &context->fbo_list, struct fbo_entry, entry)
+ context_generate_fbo_key(context, context->fbo_key, render_targets, depth_stencil, color_location,
+ ds_location);
+
+ if (TRACE_ON(d3d))
{
- if (!memcmp(entry->render_targets,
- render_targets, gl_info->limits.buffers * sizeof(*entry->render_targets))
- && entry->depth_stencil == depth_stencil && entry->color_location == color_location
- && entry->ds_location == ds_location)
+ TRACE("Dumping FBO attachments:\n");
+ for (i = 0; i < gl_info->limits.buffers; ++i)
+ {
+ struct wined3d_resource *resource;
+ if ((resource = render_targets[i].resource))
+ {
+ unsigned int width, height;
+ const char *resource_type;
+
+ if (resource->type == WINED3D_RTYPE_BUFFER)
+ {
+ width = resource->size;
+ height = 1;
+ resource_type = "buffer";
+ }
+ else
+ {
+ rt_texture = wined3d_texture_from_resource(resource);
+ level = render_targets[i].sub_resource_idx % rt_texture->level_count;
+ width = wined3d_texture_get_level_pow2_width(rt_texture, level);
+ height = wined3d_texture_get_level_pow2_height(rt_texture, level);
+ resource_type = "texture";
+ }
+
+ TRACE(" Color attachment %u: %p, %u format %s, %s %u, %ux%u, %u samples.\n",
+ i, resource, render_targets[i].sub_resource_idx, debug_d3dformat(resource->format->id),
+ context->fbo_key->rb_namespace & (1 << (i + 1)) ? "renderbuffer" : resource_type,
+ context->fbo_key->objects[i + 1].object, width, height, resource->multisample_type);
+ }
+ }
+ if (depth_stencil)
{
- list_remove(&entry->entry);
- list_add_head(&context->fbo_list, &entry->entry);
- return entry;
+ ds_texture = depth_stencil->container;
+ TRACE(" Depth attachment: %p format %s, %s %u, %ux%u, %u samples.\n",
+ depth_stencil, debug_d3dformat(ds_texture->resource.format->id),
+ context->fbo_key->rb_namespace & (1 << 0) ? "renderbuffer" : "texture",
+ context->fbo_key->objects[0].object,
+ wined3d_texture_get_level_pow2_width(ds_texture, depth_stencil->texture_level),
+ wined3d_texture_get_level_pow2_height(ds_texture, depth_stencil->texture_level),
+ ds_texture->resource.multisample_type);
}
}
+ LIST_FOR_EACH_ENTRY(entry, &context->fbo_list, struct fbo_entry, entry)
+ {
+ if (memcmp(context->fbo_key, &entry->key, FIELD_OFFSET(struct wined3d_fbo_entry_key, objects[object_count])))
+ continue;
+
+ list_remove(&entry->entry);
+ list_add_head(&context->fbo_list, &entry->entry);
+ return entry;
+ }
+
if (context->fbo_entry_count < WINED3D_MAX_FBO_ENTRIES)
{
entry = context_create_fbo_entry(context, render_targets, depth_stencil, color_location, ds_location);
const struct wined3d_gl_info *gl_info = context->gl_info;
unsigned int i;
GLuint read_binding, draw_binding;
- struct wined3d_surface *depth_stencil = entry->depth_stencil;
- if (entry->attached)
+ if (entry->flags & WINED3D_FBO_ENTRY_FLAG_ATTACHED)
{
context_bind_fbo(context, target, entry->id);
return;
/* Apply render targets */
for (i = 0; i < gl_info->limits.buffers; ++i)
{
- context_attach_surface_fbo(context, target, i, entry->render_targets[i], entry->color_location);
+ context_attach_surface_fbo(context, target, i, &entry->key.objects[i + 1],
+ entry->key.rb_namespace & (1 << (i + 1)));
}
- if (depth_stencil && entry->render_targets[0]
- && (depth_stencil->resource.multisample_type
- != entry->render_targets[0]->resource.multisample_type
- || depth_stencil->resource.multisample_quality
- != entry->render_targets[0]->resource.multisample_quality))
- {
- WARN("Color multisample type %u and quality %u, depth stencil has %u and %u, disabling ds buffer.\n",
- entry->render_targets[0]->resource.multisample_quality,
- entry->render_targets[0]->resource.multisample_type,
- depth_stencil->resource.multisample_quality, depth_stencil->resource.multisample_type);
- depth_stencil = NULL;
- }
-
- if (depth_stencil)
- surface_set_compatible_renderbuffer(depth_stencil, entry->render_targets[0]);
- context_attach_depth_stencil_fbo(context, target, depth_stencil, entry->ds_location);
+ context_attach_depth_stencil_fbo(context, target, &entry->key.objects[0],
+ entry->key.rb_namespace & 0x1, entry->flags);
/* Set valid read and draw buffer bindings to satisfy pedantic pre-ES2_compatibility
* GL contexts requirements. */
context_bind_fbo(context, GL_READ_FRAMEBUFFER, read_binding);
}
- entry->attached = TRUE;
+ entry->flags |= WINED3D_FBO_ENTRY_FLAG_ATTACHED;
}
/* Context activation is done by the caller. */
static void context_apply_fbo_state(struct wined3d_context *context, GLenum target,
- struct wined3d_surface **render_targets, struct wined3d_surface *depth_stencil,
+ struct wined3d_rendertarget_info *render_targets, struct wined3d_surface *depth_stencil,
DWORD color_location, DWORD ds_location)
{
struct fbo_entry *entry, *entry2;
void context_apply_fbo_state_blit(struct wined3d_context *context, GLenum target,
struct wined3d_surface *render_target, struct wined3d_surface *depth_stencil, DWORD location)
{
- UINT clear_size = (context->gl_info->limits.buffers - 1) * sizeof(*context->blit_targets);
-
- context->blit_targets[0] = render_target;
- if (clear_size)
- memset(&context->blit_targets[1], 0, clear_size);
+ memset(context->blit_targets, 0, context->gl_info->limits.buffers * sizeof(*context->blit_targets));
+ if (render_target)
+ {
+ context->blit_targets[0].resource = &render_target->container->resource;
+ context->blit_targets[0].sub_resource_idx = surface_get_sub_resource_idx(render_target);
+ context->blit_targets[0].layer_count = 1;
+ }
context_apply_fbo_state(context, target, context->blit_targets, depth_stencil, location, location);
}
list_remove(&query->entry);
query->context = NULL;
- if (context->free_occlusion_query_count >= context->free_occlusion_query_size - 1)
+ if (!wined3d_array_reserve((void **)&context->free_occlusion_queries,
+ &context->free_occlusion_query_size, context->free_occlusion_query_count + 1,
+ sizeof(*context->free_occlusion_queries)))
{
- UINT new_size = context->free_occlusion_query_size << 1;
- GLuint *new_data = HeapReAlloc(GetProcessHeap(), 0, context->free_occlusion_queries,
- new_size * sizeof(*context->free_occlusion_queries));
-
- if (!new_data)
- {
- ERR("Failed to grow free list, leaking query %u in context %p.\n", query->id, context);
- return;
- }
-
- context->free_occlusion_query_size = new_size;
- context->free_occlusion_queries = new_data;
+ ERR("Failed to grow free list, leaking query %u in context %p.\n", query->id, context);
+ return;
}
context->free_occlusion_queries[context->free_occlusion_query_count++] = query->id;
list_remove(&query->entry);
query->context = NULL;
- if (context->free_event_query_count >= context->free_event_query_size - 1)
+ if (!wined3d_array_reserve((void **)&context->free_event_queries,
+ &context->free_event_query_size, context->free_event_query_count + 1,
+ sizeof(*context->free_event_queries)))
{
- UINT new_size = context->free_event_query_size << 1;
- union wined3d_gl_query_object *new_data = HeapReAlloc(GetProcessHeap(), 0, context->free_event_queries,
- new_size * sizeof(*context->free_event_queries));
-
- if (!new_data)
- {
- ERR("Failed to grow free list, leaking query %u in context %p.\n", query->object.id, context);
- return;
- }
-
- context->free_event_query_size = new_size;
- context->free_event_queries = new_data;
+ ERR("Failed to grow free list, leaking query %u in context %p.\n", query->object.id, context);
+ return;
}
context->free_event_queries[context->free_event_query_count++] = query->object;
list_remove(&query->entry);
query->context = NULL;
- if (context->free_timestamp_query_count >= context->free_timestamp_query_size - 1)
+ if (!wined3d_array_reserve((void **)&context->free_timestamp_queries,
+ &context->free_timestamp_query_size, context->free_timestamp_query_count + 1,
+ sizeof(*context->free_timestamp_queries)))
{
- UINT new_size = context->free_timestamp_query_size << 1;
- GLuint *new_data = HeapReAlloc(GetProcessHeap(), 0, context->free_timestamp_queries,
- new_size * sizeof(*context->free_timestamp_queries));
+ ERR("Failed to grow free list, leaking query %u in context %p.\n", query->id, context);
+ return;
+ }
- if (!new_data)
- {
- ERR("Failed to grow free list, leaking query %u in context %p.\n", query->id, context);
- return;
- }
+ context->free_timestamp_queries[context->free_timestamp_query_count++] = query->id;
+}
- context->free_timestamp_query_size = new_size;
- context->free_timestamp_queries = new_data;
+void context_alloc_so_statistics_query(struct wined3d_context *context,
+ struct wined3d_so_statistics_query *query)
+{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+
+ if (context->free_so_statistics_query_count)
+ {
+ query->u = context->free_so_statistics_queries[--context->free_so_statistics_query_count];
}
+ else
+ {
+ GL_EXTCALL(glGenQueries(ARRAY_SIZE(query->u.id), query->u.id));
+ checkGLcall("glGenQueries");
- context->free_timestamp_queries[context->free_timestamp_query_count++] = query->id;
+ TRACE("Allocated SO statistics queries %u, %u in context %p.\n",
+ query->u.id[0], query->u.id[1], context);
+ }
+
+ query->context = context;
+ list_add_head(&context->so_statistics_queries, &query->entry);
+}
+
+void context_free_so_statistics_query(struct wined3d_so_statistics_query *query)
+{
+ struct wined3d_context *context = query->context;
+
+ list_remove(&query->entry);
+ query->context = NULL;
+
+ if (!wined3d_array_reserve((void **)&context->free_so_statistics_queries,
+ &context->free_so_statistics_query_size, context->free_so_statistics_query_count + 1,
+ sizeof(*context->free_so_statistics_queries)))
+ {
+ ERR("Failed to grow free list, leaking GL queries %u, %u in context %p.\n",
+ query->u.id[0], query->u.id[1], context);
+ return;
+ }
+
+ context->free_so_statistics_queries[context->free_so_statistics_query_count++] = query->u;
+}
+
+void context_alloc_pipeline_statistics_query(struct wined3d_context *context,
+ struct wined3d_pipeline_statistics_query *query)
+{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+
+ if (context->free_pipeline_statistics_query_count)
+ {
+ query->u = context->free_pipeline_statistics_queries[--context->free_pipeline_statistics_query_count];
+ }
+ else
+ {
+ GL_EXTCALL(glGenQueries(ARRAY_SIZE(query->u.id), query->u.id));
+ checkGLcall("glGenQueries");
+ }
+
+ query->context = context;
+ list_add_head(&context->pipeline_statistics_queries, &query->entry);
+}
+
+void context_free_pipeline_statistics_query(struct wined3d_pipeline_statistics_query *query)
+{
+ struct wined3d_context *context = query->context;
+
+ list_remove(&query->entry);
+ query->context = NULL;
+
+ if (!wined3d_array_reserve((void **)&context->free_pipeline_statistics_queries,
+ &context->free_pipeline_statistics_query_size, context->free_pipeline_statistics_query_count + 1,
+ sizeof(*context->free_pipeline_statistics_queries)))
+ {
+ ERR("Failed to grow free list, leaking GL queries in context %p.\n", context);
+ return;
+ }
+
+ context->free_pipeline_statistics_queries[context->free_pipeline_statistics_query_count++] = query->u;
}
typedef void (context_fbo_entry_func_t)(struct wined3d_context *context, struct fbo_entry *entry);
-static void context_enum_surface_fbo_entries(const struct wined3d_device *device,
- const struct wined3d_surface *surface, context_fbo_entry_func_t *callback)
+static void context_enum_fbo_entries(const struct wined3d_device *device,
+ GLuint name, BOOL rb_namespace, context_fbo_entry_func_t *callback)
{
UINT i;
const struct wined3d_gl_info *gl_info = context->gl_info;
struct fbo_entry *entry, *entry2;
- if (context->current_rt == surface) context->current_rt = NULL;
-
LIST_FOR_EACH_ENTRY_SAFE(entry, entry2, &context->fbo_list, struct fbo_entry, entry)
{
UINT j;
- if (entry->depth_stencil == surface)
+ for (j = 0; j < gl_info->limits.buffers + 1; ++j)
{
- callback(context, entry);
- continue;
- }
-
- for (j = 0; j < gl_info->limits.buffers; ++j)
- {
- if (entry->render_targets[j] == surface)
+ if (entry->key.objects[j].object == name
+ && !(entry->key.rb_namespace & (1 << j)) == !rb_namespace)
{
callback(context, entry);
break;
void context_resource_released(const struct wined3d_device *device,
struct wined3d_resource *resource, enum wined3d_resource_type type)
{
- if (!device->d3d_initialized) return;
+ struct wined3d_texture *texture;
+ UINT i;
+
+ if (!device->d3d_initialized)
+ return;
switch (type)
{
- case WINED3D_RTYPE_SURFACE:
- context_enum_surface_fbo_entries(device, surface_from_resource(resource),
- context_queue_fbo_entry_destruction);
+ case WINED3D_RTYPE_TEXTURE_2D:
+ case WINED3D_RTYPE_TEXTURE_3D:
+ texture = texture_from_resource(resource);
+
+ for (i = 0; i < device->context_count; ++i)
+ {
+ struct wined3d_context *context = device->contexts[i];
+ if (context->current_rt.texture == texture)
+ {
+ context->current_rt.texture = NULL;
+ context->current_rt.sub_resource_idx = 0;
+ }
+ }
break;
default:
}
}
-static void context_detach_fbo_entry(struct wined3d_context *context, struct fbo_entry *entry)
-{
- entry->attached = FALSE;
-}
-
-void context_resource_unloaded(const struct wined3d_device *device,
- struct wined3d_resource *resource, enum wined3d_resource_type type)
+void context_gl_resource_released(struct wined3d_device *device,
+ GLuint name, BOOL rb_namespace)
{
- switch (type)
- {
- case WINED3D_RTYPE_SURFACE:
- context_enum_surface_fbo_entries(device, surface_from_resource(resource),
- context_detach_fbo_entry);
- break;
-
- default:
- break;
- }
+ context_enum_fbo_entries(device, name, rb_namespace, context_queue_fbo_entry_destruction);
}
void context_surface_update(struct wined3d_context *context, const struct wined3d_surface *surface)
if (!entry || context->rebind_fbo) return;
- for (i = 0; i < gl_info->limits.buffers; ++i)
+ for (i = 0; i < gl_info->limits.buffers + 1; ++i)
{
- if (surface == entry->render_targets[i])
+ if (surface->container->texture_rgb.name == entry->key.objects[i].object
+ || surface->container->texture_srgb.name == entry->key.objects[i].object)
{
- TRACE("Updated surface %p is bound as color attachment %u to the current FBO.\n", surface, i);
+ TRACE("Updated surface %p is bound as attachment %u to the current FBO.\n", surface, i);
context->rebind_fbo = TRUE;
return;
}
}
+}
+
+static BOOL context_restore_pixel_format(struct wined3d_context *ctx)
+{
+ const struct wined3d_gl_info *gl_info = ctx->gl_info;
+ BOOL ret = FALSE;
- if (surface == entry->depth_stencil)
+ if (ctx->restore_pf && IsWindow(ctx->restore_pf_win))
{
- TRACE("Updated surface %p is bound as depth attachment to the current FBO.\n", surface);
- context->rebind_fbo = TRUE;
+ if (ctx->gl_info->supported[WGL_WINE_PIXEL_FORMAT_PASSTHROUGH])
+ {
+ HDC dc = GetDCEx(ctx->restore_pf_win, 0, DCX_USESTYLE | DCX_CACHE);
+ if (dc)
+ {
+ if (!(ret = GL_EXTCALL(wglSetPixelFormatWINE(dc, ctx->restore_pf))))
+ {
+ ERR("wglSetPixelFormatWINE failed to restore pixel format %d on window %p.\n",
+ ctx->restore_pf, ctx->restore_pf_win);
+ }
+ ReleaseDC(ctx->restore_pf_win, dc);
+ }
+ }
+ else
+ {
+ ERR("can't restore pixel format %d on window %p\n", ctx->restore_pf, ctx->restore_pf_win);
+ }
}
+
+ ctx->restore_pf = 0;
+ ctx->restore_pf_win = NULL;
+ return ret;
}
-static BOOL context_set_pixel_format(const struct wined3d_gl_info *gl_info, HDC dc, int format)
+static BOOL context_set_pixel_format(struct wined3d_context *context, HDC dc, BOOL private, int format)
{
- int current = GetPixelFormat(dc);
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+ int current;
+
+ if (dc == context->hdc && context->hdc_is_private && context->hdc_has_format)
+ return TRUE;
- if (current == format) return TRUE;
+ current = gl_info->gl_ops.wgl.p_wglGetPixelFormat(dc);
+ if (current == format) goto success;
if (!current)
{
format, dc, GetLastError());
return FALSE;
}
- return TRUE;
+
+ context->restore_pf = 0;
+ context->restore_pf_win = private ? NULL : WindowFromDC(dc);
+ goto success;
}
/* By default WGL doesn't allow pixel format adjustments but we need it
* when really needed. */
if (gl_info->supported[WGL_WINE_PIXEL_FORMAT_PASSTHROUGH])
{
+ HWND win;
+
if (!GL_EXTCALL(wglSetPixelFormatWINE(dc, format)))
{
ERR("wglSetPixelFormatWINE failed to set pixel format %d on device context %p.\n",
format, dc);
return FALSE;
}
- return TRUE;
+
+ win = private ? NULL : WindowFromDC(dc);
+ if (win != context->restore_pf_win)
+ {
+ context_restore_pixel_format(context);
+
+ context->restore_pf = private ? 0 : current;
+ context->restore_pf_win = win;
+ }
+
+ goto success;
}
/* OpenGL doesn't allow pixel format adjustments. Print an error and
ERR("Unable to set pixel format %d on device context %p. Already using format %d.\n",
format, dc, current);
return TRUE;
+
+success:
+ if (dc == context->hdc && context->hdc_is_private)
+ context->hdc_has_format = TRUE;
+ return TRUE;
}
static BOOL context_set_gl_context(struct wined3d_context *ctx)
struct wined3d_swapchain *swapchain = ctx->swapchain;
BOOL backup = FALSE;
- if (!context_set_pixel_format(ctx->gl_info, ctx->hdc, ctx->pixel_format))
+ if (!context_set_pixel_format(ctx, ctx->hdc, ctx->hdc_is_private, ctx->pixel_format))
{
WARN("Failed to set pixel format %d on device context %p.\n",
ctx->pixel_format, ctx->hdc);
* a swapchain, so we can't use the swapchain to get a backup dc. To
* make this work windowless contexts would need to be handled by the
* device. */
- if (ctx->destroyed)
+ if (ctx->destroyed || !swapchain)
{
FIXME("Unable to get backup dc for destroyed context %p.\n", ctx);
context_set_current(NULL);
return FALSE;
}
- if (!context_set_pixel_format(ctx->gl_info, dc, ctx->pixel_format))
+ if (!context_set_pixel_format(ctx, dc, TRUE, ctx->pixel_format))
{
ERR("Failed to set pixel format %d on device context %p.\n",
ctx->pixel_format, dc);
return TRUE;
}
-static void context_restore_gl_context(const struct wined3d_gl_info *gl_info, HDC dc, HGLRC gl_ctx, int pf)
+static void context_restore_gl_context(const struct wined3d_gl_info *gl_info, HDC dc, HGLRC gl_ctx)
{
- if (!context_set_pixel_format(gl_info, dc, pf))
- {
- ERR("Failed to restore pixel format %d on device context %p.\n", pf, dc);
- context_set_current(NULL);
- return;
- }
-
if (!wglMakeCurrent(dc, gl_ctx))
{
ERR("Failed to restore GL context %p on device context %p, last error %#x.\n",
static void context_update_window(struct wined3d_context *context)
{
+ if (!context->swapchain)
+ return;
+
if (context->win_handle == context->swapchain->win_handle)
return;
wined3d_release_dc(context->win_handle, context->hdc);
context->win_handle = context->swapchain->win_handle;
+ context->hdc_is_private = FALSE;
+ context->hdc_has_format = FALSE;
context->needs_set = 1;
context->valid = 1;
static void context_destroy_gl_resources(struct wined3d_context *context)
{
const struct wined3d_gl_info *gl_info = context->gl_info;
+ struct wined3d_so_statistics_query *so_statistics_query;
+ struct wined3d_pipeline_statistics_query *pipeline_statistics_query;
struct wined3d_timestamp_query *timestamp_query;
struct wined3d_occlusion_query *occlusion_query;
struct wined3d_event_query *event_query;
HGLRC restore_ctx;
HDC restore_dc;
unsigned int i;
- int restore_pf;
restore_ctx = wglGetCurrentContext();
restore_dc = wglGetCurrentDC();
- restore_pf = GetPixelFormat(restore_dc);
if (restore_ctx == context->glCtx)
restore_ctx = NULL;
else if (context->valid)
context_set_gl_context(context);
+ LIST_FOR_EACH_ENTRY(so_statistics_query, &context->so_statistics_queries,
+ struct wined3d_so_statistics_query, entry)
+ {
+ if (context->valid)
+ GL_EXTCALL(glDeleteQueries(ARRAY_SIZE(so_statistics_query->u.id), so_statistics_query->u.id));
+ so_statistics_query->context = NULL;
+ }
+
+ LIST_FOR_EACH_ENTRY(pipeline_statistics_query, &context->pipeline_statistics_queries,
+ struct wined3d_pipeline_statistics_query, entry)
+ {
+ if (context->valid)
+ GL_EXTCALL(glDeleteQueries(ARRAY_SIZE(pipeline_statistics_query->u.id), pipeline_statistics_query->u.id));
+ pipeline_statistics_query->context = NULL;
+ }
+
LIST_FOR_EACH_ENTRY(timestamp_query, &context->timestamp_queries, struct wined3d_timestamp_query, entry)
{
if (context->valid)
GL_EXTCALL(glDeleteProgramsARB(1, &context->dummy_arbfp_prog));
}
+ if (gl_info->supported[WINED3D_GL_PRIMITIVE_QUERY])
+ {
+ for (i = 0; i < context->free_so_statistics_query_count; ++i)
+ {
+ union wined3d_gl_so_statistics_query *q = &context->free_so_statistics_queries[i];
+ GL_EXTCALL(glDeleteQueries(ARRAY_SIZE(q->id), q->id));
+ }
+ }
+
+ if (gl_info->supported[ARB_PIPELINE_STATISTICS_QUERY])
+ {
+ for (i = 0; i < context->free_pipeline_statistics_query_count; ++i)
+ {
+ union wined3d_gl_pipeline_statistics_query *q = &context->free_pipeline_statistics_queries[i];
+ GL_EXTCALL(glDeleteQueries(ARRAY_SIZE(q->id), q->id));
+ }
+ }
+
if (gl_info->supported[ARB_TIMER_QUERY])
GL_EXTCALL(glDeleteQueries(context->free_timestamp_query_count, context->free_timestamp_queries));
checkGLcall("context cleanup");
}
+ HeapFree(GetProcessHeap(), 0, context->free_so_statistics_queries);
+ HeapFree(GetProcessHeap(), 0, context->free_pipeline_statistics_queries);
HeapFree(GetProcessHeap(), 0, context->free_timestamp_queries);
HeapFree(GetProcessHeap(), 0, context->free_occlusion_queries);
HeapFree(GetProcessHeap(), 0, context->free_event_queries);
+ context_restore_pixel_format(context);
if (restore_ctx)
{
- context_restore_gl_context(gl_info, restore_dc, restore_ctx, restore_pf);
+ context_restore_gl_context(gl_info, restore_dc, restore_ctx);
}
else if (wglGetCurrentContext() && !wglMakeCurrent(NULL, NULL))
{
{
if (wglGetCurrentContext())
{
+ const struct wined3d_gl_info *gl_info = old->gl_info;
TRACE("Flushing context %p before switching to %p.\n", old, ctx);
- glFlush();
+ gl_info->gl_ops.gl.p_glFlush();
}
old->current = 0;
}
return FALSE;
ctx->current = 1;
}
- else if(wglGetCurrentContext())
+ else if (wglGetCurrentContext())
{
TRACE("Clearing current D3D context.\n");
if (!wglMakeCurrent(NULL, NULL))
WARN("Context %p is not the current context.\n", context);
}
- if (!--context->level && context->restore_ctx)
+ if (!--context->level)
{
- TRACE("Restoring GL context %p on device context %p.\n", context->restore_ctx, context->restore_dc);
- context_restore_gl_context(context->gl_info, context->restore_dc, context->restore_ctx, context->restore_pf);
- context->restore_ctx = NULL;
- context->restore_dc = NULL;
+ if (context_restore_pixel_format(context))
+ context->needs_set = 1;
+ if (context->restore_ctx)
+ {
+ TRACE("Restoring GL context %p on device context %p.\n", context->restore_ctx, context->restore_dc);
+ context_restore_gl_context(context->gl_info, context->restore_dc, context->restore_ctx);
+ context->restore_ctx = NULL;
+ context->restore_dc = NULL;
+ }
+
+ if (context->destroy_delayed)
+ {
+ TRACE("Destroying context %p.\n", context);
+ context_destroy(context->device, context);
+ }
}
}
* A to avoid breaking caller code. */
void context_restore(struct wined3d_context *context, struct wined3d_surface *restore)
{
- if (context->current_rt != restore)
+ if (context->current_rt.texture != restore->container
+ || context->current_rt.sub_resource_idx != surface_get_sub_resource_idx(restore))
{
context_release(context);
- context = context_acquire(restore->resource.device, restore);
+ context = context_acquire(restore->container->resource.device,
+ restore->container, surface_get_sub_resource_idx(restore));
}
context_release(context);
current_gl, wglGetCurrentDC());
context->restore_ctx = current_gl;
context->restore_dc = wglGetCurrentDC();
- context->restore_pf = GetPixelFormat(context->restore_dc);
context->needs_set = 1;
}
+ else if (!context->needs_set && !(context->hdc_is_private && context->hdc_has_format)
+ && context->pixel_format != context->gl_info->gl_ops.wgl.p_wglGetPixelFormat(context->hdc))
+ context->needs_set = 1;
}
}
+void context_invalidate_compute_state(struct wined3d_context *context, DWORD state_id)
+{
+ DWORD representative = context->state_table[state_id].representative - STATE_COMPUTE_OFFSET;
+ unsigned int index, shift;
+
+ index = representative / (sizeof(*context->dirty_compute_states) * CHAR_BIT);
+ shift = representative & (sizeof(*context->dirty_compute_states) * CHAR_BIT - 1);
+ context->dirty_compute_states[index] |= (1u << shift);
+}
+
void context_invalidate_state(struct wined3d_context *context, DWORD state)
{
DWORD rep = context->state_table[state].representative;
/* This function takes care of wined3d pixel format selection. */
static int context_choose_pixel_format(const struct wined3d_device *device, HDC hdc,
const struct wined3d_format *color_format, const struct wined3d_format *ds_format,
- BOOL auxBuffers, BOOL findCompatible)
+ BOOL auxBuffers)
{
- int iPixelFormat=0;
- unsigned int current_value;
unsigned int cfg_count = device->adapter->cfg_count;
+ unsigned int current_value;
+ PIXELFORMATDESCRIPTOR pfd;
+ int iPixelFormat = 0;
unsigned int i;
- TRACE("device %p, dc %p, color_format %s, ds_format %s, aux_buffers %#x, find_compatible %#x.\n",
+ TRACE("device %p, dc %p, color_format %s, ds_format %s, aux_buffers %#x.\n",
device, hdc, debug_d3dformat(color_format->id), debug_d3dformat(ds_format->id),
- auxBuffers, findCompatible);
+ auxBuffers);
current_value = 0;
for (i = 0; i < cfg_count; ++i)
}
}
- /* When findCompatible is set and no suitable format was found, let ChoosePixelFormat choose a pixel format in order not to crash. */
- if(!iPixelFormat && !findCompatible) {
- ERR("Can't find a suitable iPixelFormat\n");
- return FALSE;
- } else if(!iPixelFormat) {
- PIXELFORMATDESCRIPTOR pfd;
+ if (!iPixelFormat)
+ {
+ ERR("Trying to locate a compatible pixel format because an exact match failed.\n");
- TRACE("Falling back to ChoosePixelFormat as we weren't able to find an exactly matching pixel format\n");
- /* PixelFormat selection */
- ZeroMemory(&pfd, sizeof(pfd));
+ memset(&pfd, 0, sizeof(pfd));
pfd.nSize = sizeof(pfd);
pfd.nVersion = 1;
pfd.dwFlags = PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER | PFD_DRAW_TO_WINDOW;/*PFD_GENERIC_ACCELERATED*/
pfd.cStencilBits = ds_format->stencil_size;
pfd.iLayerType = PFD_MAIN_PLANE;
- iPixelFormat = ChoosePixelFormat(hdc, &pfd);
- if(!iPixelFormat) {
- /* If this happens something is very wrong as ChoosePixelFormat barely fails */
- ERR("Can't find a suitable iPixelFormat\n");
- return FALSE;
+ if (!(iPixelFormat = ChoosePixelFormat(hdc, &pfd)))
+ {
+ /* Something is very wrong as ChoosePixelFormat() barely fails. */
+ ERR("Can't find a suitable pixel format.\n");
+ return 0;
}
}
- TRACE("Found iPixelFormat=%d for ColorFormat=%s, DepthStencilFormat=%s\n",
+ TRACE("Found iPixelFormat=%d for ColorFormat=%s, DepthStencilFormat=%s.\n",
iPixelFormat, debug_d3dformat(color_format->id), debug_d3dformat(ds_format->id));
return iPixelFormat;
}
/* Context activation is done by the caller. */
-static void bind_dummy_textures(const struct wined3d_device *device, const struct wined3d_context *context)
+void context_bind_dummy_textures(const struct wined3d_device *device, const struct wined3d_context *context)
{
const struct wined3d_gl_info *gl_info = context->gl_info;
- unsigned int i, count = min(MAX_COMBINED_SAMPLERS, gl_info->limits.combined_samplers);
+ unsigned int i;
- for (i = 0; i < count; ++i)
+ for (i = 0; i < gl_info->limits.combined_samplers; ++i)
{
GL_EXTCALL(glActiveTexture(GL_TEXTURE0 + i));
checkGLcall("glActiveTexture");
- gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, device->dummy_texture_2d[i]);
- checkGLcall("glBindTexture");
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_1D, device->dummy_textures.tex_1d);
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, device->dummy_textures.tex_2d);
if (gl_info->supported[ARB_TEXTURE_RECTANGLE])
- {
- gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_RECTANGLE_ARB, device->dummy_texture_rect[i]);
- checkGLcall("glBindTexture");
- }
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_RECTANGLE_ARB, device->dummy_textures.tex_rect);
if (gl_info->supported[EXT_TEXTURE3D])
- {
- gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_3D, device->dummy_texture_3d[i]);
- checkGLcall("glBindTexture");
- }
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_3D, device->dummy_textures.tex_3d);
if (gl_info->supported[ARB_TEXTURE_CUBE_MAP])
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP, device->dummy_textures.tex_cube);
+
+ if (gl_info->supported[ARB_TEXTURE_CUBE_MAP_ARRAY])
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP_ARRAY, device->dummy_textures.tex_cube_array);
+
+ if (gl_info->supported[EXT_TEXTURE_ARRAY])
{
- gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP, device->dummy_texture_cube[i]);
- checkGLcall("glBindTexture");
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_1D_ARRAY, device->dummy_textures.tex_1d_array);
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D_ARRAY, device->dummy_textures.tex_2d_array);
}
+
+ if (gl_info->supported[ARB_TEXTURE_BUFFER_OBJECT])
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_BUFFER, device->dummy_textures.tex_buffer);
+
+ checkGLcall("Bind dummy textures");
}
}
+void wined3d_check_gl_call(const struct wined3d_gl_info *gl_info,
+ const char *file, unsigned int line, const char *name)
+{
+ GLint err;
+
+ if (gl_info->supported[ARB_DEBUG_OUTPUT] || (err = gl_info->gl_ops.gl.p_glGetError()) == GL_NO_ERROR)
+ {
+ TRACE("%s call ok %s / %u.\n", name, file, line);
+ return;
+ }
+
+ do
+ {
+ ERR(">>>>>>> %s (%#x) from %s @ %s / %u.\n",
+ debug_glerror(err), err, name, file,line);
+ err = gl_info->gl_ops.gl.p_glGetError();
+ } while (err != GL_NO_ERROR);
+}
+
static BOOL context_debug_output_enabled(const struct wined3d_gl_info *gl_info)
{
return gl_info->supported[ARB_DEBUG_OUTPUT]
}
struct wined3d_context *context_create(struct wined3d_swapchain *swapchain,
- struct wined3d_surface *target, const struct wined3d_format *ds_format)
+ struct wined3d_texture *target, const struct wined3d_format *ds_format)
{
struct wined3d_device *device = swapchain->device;
+ const struct wined3d_d3d_info *d3d_info = &device->adapter->d3d_info;
const struct wined3d_gl_info *gl_info = &device->adapter->gl_info;
const struct wined3d_format *color_format;
struct wined3d_context *ret;
+ BOOL hdc_is_private = FALSE;
BOOL auxBuffers = FALSE;
HGLRC ctx, share_ctx;
+ DWORD target_usage;
int pixel_format;
- unsigned int s;
- int swap_interval;
+ unsigned int i;
DWORD state;
HDC hdc = 0;
TRACE("swapchain %p, target %p, window %p.\n", swapchain, target, swapchain->win_handle);
+ wined3d_from_cs(device->cs);
+
ret = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*ret));
if (!ret)
return NULL;
- ret->blit_targets = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY,
- gl_info->limits.buffers * sizeof(*ret->blit_targets));
- if (!ret->blit_targets)
+ if (!(ret->blit_targets = wined3d_calloc(gl_info->limits.buffers, sizeof(*ret->blit_targets))))
goto out;
- ret->draw_buffers = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY,
- gl_info->limits.buffers * sizeof(*ret->draw_buffers));
- if (!ret->draw_buffers)
+ if (!(ret->draw_buffers = wined3d_calloc(gl_info->limits.buffers, sizeof(*ret->draw_buffers))))
+ goto out;
+
+ ret->fbo_key = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY,
+ FIELD_OFFSET(struct wined3d_fbo_entry_key, objects[gl_info->limits.buffers + 1]));
+ if (!ret->fbo_key)
goto out;
ret->free_timestamp_query_size = 4;
- ret->free_timestamp_queries = HeapAlloc(GetProcessHeap(), 0,
- ret->free_timestamp_query_size * sizeof(*ret->free_timestamp_queries));
- if (!ret->free_timestamp_queries)
+ if (!(ret->free_timestamp_queries = wined3d_calloc(ret->free_timestamp_query_size,
+ sizeof(*ret->free_timestamp_queries))))
goto out;
list_init(&ret->timestamp_queries);
ret->free_occlusion_query_size = 4;
- ret->free_occlusion_queries = HeapAlloc(GetProcessHeap(), 0,
- ret->free_occlusion_query_size * sizeof(*ret->free_occlusion_queries));
- if (!ret->free_occlusion_queries)
+ if (!(ret->free_occlusion_queries = wined3d_calloc(ret->free_occlusion_query_size,
+ sizeof(*ret->free_occlusion_queries))))
goto out;
-
list_init(&ret->occlusion_queries);
ret->free_event_query_size = 4;
- ret->free_event_queries = HeapAlloc(GetProcessHeap(), 0,
- ret->free_event_query_size * sizeof(*ret->free_event_queries));
- if (!ret->free_event_queries)
+ if (!(ret->free_event_queries = wined3d_calloc(ret->free_event_query_size,
+ sizeof(*ret->free_event_queries))))
goto out;
-
list_init(&ret->event_queries);
+
+ list_init(&ret->so_statistics_queries);
+
+ list_init(&ret->pipeline_statistics_queries);
+
list_init(&ret->fbo_list);
list_init(&ret->fbo_destroy_list);
goto out;
}
-#if defined(STAGING_CSMT)
- ret->current_fb.render_targets = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY,
- sizeof(*ret->current_fb.render_targets) * gl_info->limits.buffers);
- ret->current_fb.rt_size = gl_info->limits.buffers;
- if (!ret->current_fb.render_targets)
- goto out;
- if (device->context_count)
- ret->offscreenBuffer = device->contexts[0]->offscreenBuffer;
-
-#endif /* STAGING_CSMT */
- /* Initialize the texture unit mapping to a 1:1 mapping */
- for (s = 0; s < MAX_COMBINED_SAMPLERS; ++s)
+ for (i = 0; i < ARRAY_SIZE(ret->tex_unit_map); ++i)
+ ret->tex_unit_map[i] = WINED3D_UNMAPPED_STAGE;
+ for (i = 0; i < ARRAY_SIZE(ret->rev_tex_unit_map); ++i)
+ ret->rev_tex_unit_map[i] = WINED3D_UNMAPPED_STAGE;
+ if (gl_info->limits.graphics_samplers >= MAX_COMBINED_SAMPLERS)
{
- if (s < gl_info->limits.combined_samplers)
+ /* Initialize the texture unit mapping to a 1:1 mapping. */
+ unsigned int base, count;
+
+ wined3d_gl_limits_get_texture_unit_range(&gl_info->limits, WINED3D_SHADER_TYPE_PIXEL, &base, &count);
+ if (base + MAX_FRAGMENT_SAMPLERS > ARRAY_SIZE(ret->rev_tex_unit_map))
{
- ret->tex_unit_map[s] = s;
- ret->rev_tex_unit_map[s] = s;
+ ERR("Unexpected texture unit base index %u.\n", base);
+ goto out;
}
- else
+ for (i = 0; i < min(count, MAX_FRAGMENT_SAMPLERS); ++i)
+ {
+ ret->tex_unit_map[i] = base + i;
+ ret->rev_tex_unit_map[base + i] = i;
+ }
+
+ wined3d_gl_limits_get_texture_unit_range(&gl_info->limits, WINED3D_SHADER_TYPE_VERTEX, &base, &count);
+ if (base + MAX_VERTEX_SAMPLERS > ARRAY_SIZE(ret->rev_tex_unit_map))
+ {
+ ERR("Unexpected texture unit base index %u.\n", base);
+ goto out;
+ }
+ for (i = 0; i < min(count, MAX_VERTEX_SAMPLERS); ++i)
{
- ret->tex_unit_map[s] = WINED3D_UNMAPPED_STAGE;
- ret->rev_tex_unit_map[s] = WINED3D_UNMAPPED_STAGE;
+ ret->tex_unit_map[MAX_FRAGMENT_SAMPLERS + i] = base + i;
+ ret->rev_tex_unit_map[base + i] = MAX_FRAGMENT_SAMPLERS + i;
}
}
+ if (!(ret->texture_type = wined3d_calloc(gl_info->limits.combined_samplers,
+ sizeof(*ret->texture_type))))
+ goto out;
+
if (!(hdc = GetDCEx(swapchain->win_handle, 0, DCX_USESTYLE | DCX_CACHE)))
{
WARN("Failed to retrieve device context, trying swapchain backup.\n");
- if (!(hdc = swapchain_get_backup_dc(swapchain)))
+ if ((hdc = swapchain_get_backup_dc(swapchain)))
+ hdc_is_private = TRUE;
+ else
{
ERR("Failed to retrieve a device context.\n");
goto out;
}
color_format = target->resource.format;
+ target_usage = target->resource.usage;
/* In case of ORM_BACKBUFFER, make sure to request an alpha component for
* X4R4G4B4/X8R8G8B8 as we might need it for the backbuffer. */
auxBuffers = TRUE;
if (color_format->id == WINED3DFMT_B4G4R4X4_UNORM)
- color_format = wined3d_get_format(gl_info, WINED3DFMT_B4G4R4A4_UNORM);
+ color_format = wined3d_get_format(gl_info, WINED3DFMT_B4G4R4A4_UNORM, target_usage);
else if (color_format->id == WINED3DFMT_B8G8R8X8_UNORM)
- color_format = wined3d_get_format(gl_info, WINED3DFMT_B8G8R8A8_UNORM);
+ color_format = wined3d_get_format(gl_info, WINED3DFMT_B8G8R8A8_UNORM, target_usage);
}
/* DirectDraw supports 8bit paletted render targets and these are used by
* For this reason we require a format with 8bit alpha, so request
* A8R8G8B8. */
if (color_format->id == WINED3DFMT_P8_UINT)
- color_format = wined3d_get_format(gl_info, WINED3DFMT_B8G8R8A8_UNORM);
+ color_format = wined3d_get_format(gl_info, WINED3DFMT_B8G8R8A8_UNORM, target_usage);
- /* When "always_offscreen" is enabled, we only use the drawable for
+ /* When using FBOs for off-screen rendering, we only use the drawable for
* presentation blits, and don't do any rendering to it. That means we
* don't need depth or stencil buffers, and can mostly ignore the render
* target format. This wouldn't necessarily be quite correct for 10bpc
* Using the same format regardless of the color/depth/stencil targets
* makes it much less likely that different wined3d instances will set
* conflicting pixel formats. */
- if (wined3d_settings.offscreen_rendering_mode != ORM_BACKBUFFER
- && wined3d_settings.always_offscreen)
+ if (wined3d_settings.offscreen_rendering_mode != ORM_BACKBUFFER)
{
- color_format = wined3d_get_format(gl_info, WINED3DFMT_B8G8R8A8_UNORM);
- ds_format = wined3d_get_format(gl_info, WINED3DFMT_UNKNOWN);
+ color_format = wined3d_get_format(gl_info, WINED3DFMT_B8G8R8A8_UNORM, target_usage);
+ ds_format = wined3d_get_format(gl_info, WINED3DFMT_UNKNOWN, WINED3DUSAGE_DEPTHSTENCIL);
}
/* Try to find a pixel format which matches our requirements. */
- pixel_format = context_choose_pixel_format(device, hdc, color_format, ds_format, auxBuffers, FALSE);
-
- /* Try to locate a compatible format if we weren't able to find anything. */
- if (!pixel_format)
- {
- TRACE("Trying to locate a compatible pixel format because an exact match failed.\n");
- pixel_format = context_choose_pixel_format(device, hdc, color_format, ds_format, auxBuffers, TRUE);
- }
-
- /* If we still don't have a pixel format, something is very wrong as ChoosePixelFormat barely fails */
- if (!pixel_format)
- {
- ERR("Can't find a suitable pixel format.\n");
+ if (!(pixel_format = context_choose_pixel_format(device, hdc, color_format, ds_format, auxBuffers)))
goto out;
- }
+
+ ret->gl_info = gl_info;
context_enter(ret);
- if (!context_set_pixel_format(gl_info, hdc, pixel_format))
+ if (!context_set_pixel_format(ret, hdc, hdc_is_private, pixel_format))
{
ERR("Failed to set pixel format %d on device context %p.\n", pixel_format, hdc);
context_release(ret);
goto out;
}
- ret->gl_info = gl_info;
- ret->d3d_info = &device->adapter->d3d_info;
+ ret->d3d_info = d3d_info;
ret->state_table = device->StateTable;
- /* Mark all states dirty to force a proper initialization of the states
- * on the first use of the context. */
+ /* Mark all states dirty to force a proper initialization of the states on
+ * the first use of the context. Compute states do not need initialization. */
for (state = 0; state <= STATE_HIGHEST; ++state)
{
- if (ret->state_table[state].representative)
+ if (ret->state_table[state].representative && !STATE_IS_COMPUTE(state))
context_invalidate_state(ret, state);
}
+ ret->device = device;
ret->swapchain = swapchain;
- ret->current_rt = target;
+ ret->current_rt.texture = target;
+ ret->current_rt.sub_resource_idx = 0;
ret->tid = GetCurrentThreadId();
- ret->render_offscreen = wined3d_resource_is_offscreen(&target->container->resource);
+ ret->render_offscreen = wined3d_resource_is_offscreen(&target->resource);
ret->draw_buffers_mask = context_generate_rt_mask(GL_BACK);
ret->valid = 1;
ret->glCtx = ctx;
ret->win_handle = swapchain->win_handle;
ret->hdc = hdc;
+ ret->hdc_is_private = hdc_is_private;
+ ret->hdc_has_format = TRUE;
ret->pixel_format = pixel_format;
ret->needs_set = 1;
}
}
- switch (swapchain->desc.swap_interval)
- {
- case WINED3DPRESENT_INTERVAL_IMMEDIATE:
- swap_interval = 0;
- break;
- case WINED3DPRESENT_INTERVAL_DEFAULT:
- case WINED3DPRESENT_INTERVAL_ONE:
- swap_interval = 1;
- break;
- case WINED3DPRESENT_INTERVAL_TWO:
- swap_interval = 2;
- break;
- case WINED3DPRESENT_INTERVAL_THREE:
- swap_interval = 3;
- break;
- case WINED3DPRESENT_INTERVAL_FOUR:
- swap_interval = 4;
- break;
- default:
- FIXME("Unknown swap interval %#x.\n", swapchain->desc.swap_interval);
- swap_interval = 1;
- }
-
- if (gl_info->supported[WGL_EXT_SWAP_CONTROL])
- {
- if (!GL_EXTCALL(wglSwapIntervalEXT(swap_interval)))
- ERR("wglSwapIntervalEXT failed to set swap interval %d for context %p, last error %#x\n",
- swap_interval, ret, GetLastError());
- }
-
- gl_info->gl_ops.gl.p_glGetIntegerv(GL_AUX_BUFFERS, &ret->aux_buffers);
+ if (gl_info->supported[WINED3D_GL_LEGACY_CONTEXT])
+ gl_info->gl_ops.gl.p_glGetIntegerv(GL_AUX_BUFFERS, &ret->aux_buffers);
TRACE("Setting up the screen\n");
- gl_info->gl_ops.gl.p_glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE);
- checkGLcall("glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE);");
-
- gl_info->gl_ops.gl.p_glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);
- checkGLcall("glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);");
+ if (gl_info->supported[WINED3D_GL_LEGACY_CONTEXT])
+ {
+ gl_info->gl_ops.gl.p_glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE);
+ checkGLcall("glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE);");
- gl_info->gl_ops.gl.p_glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SEPARATE_SPECULAR_COLOR);
- checkGLcall("glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SEPARATE_SPECULAR_COLOR);");
+ gl_info->gl_ops.gl.p_glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);
+ checkGLcall("glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);");
- gl_info->gl_ops.gl.p_glPixelStorei(GL_PACK_ALIGNMENT, device->surface_alignment);
- checkGLcall("glPixelStorei(GL_PACK_ALIGNMENT, device->surface_alignment);");
- gl_info->gl_ops.gl.p_glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
- checkGLcall("glPixelStorei(GL_UNPACK_ALIGNMENT, device->surface_alignment);");
-
- if (!gl_info->supported[WINED3D_GL_LEGACY_CONTEXT])
+ gl_info->gl_ops.gl.p_glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SEPARATE_SPECULAR_COLOR);
+ checkGLcall("glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SEPARATE_SPECULAR_COLOR);");
+ }
+ else
{
GLuint vao;
checkGLcall("creating VAO");
}
+ gl_info->gl_ops.gl.p_glPixelStorei(GL_PACK_ALIGNMENT, device->surface_alignment);
+ checkGLcall("glPixelStorei(GL_PACK_ALIGNMENT, device->surface_alignment);");
+ gl_info->gl_ops.gl.p_glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+ checkGLcall("glPixelStorei(GL_UNPACK_ALIGNMENT, 1);");
+
if (gl_info->supported[ARB_VERTEX_BLEND])
{
/* Direct3D always uses n-1 weights for n world matrices and uses
/* Set up the previous texture input for all shader units. This applies to bump mapping, and in d3d
* the previous texture where to source the offset from is always unit - 1.
*/
- for (s = 1; s < gl_info->limits.textures; ++s)
+ for (i = 1; i < gl_info->limits.textures; ++i)
{
- context_active_texture(ret, gl_info, s);
+ context_active_texture(ret, gl_info, i);
gl_info->gl_ops.gl.p_glTexEnvi(GL_TEXTURE_SHADER_NV,
- GL_PREVIOUS_TEXTURE_INPUT_NV, GL_TEXTURE0_ARB + s - 1);
+ GL_PREVIOUS_TEXTURE_INPUT_NV, GL_TEXTURE0_ARB + i - 1);
checkGLcall("glTexEnvi(GL_TEXTURE_SHADER_NV, GL_PREVIOUS_TEXTURE_INPUT_NV, ...");
}
}
if (gl_info->supported[ARB_POINT_SPRITE])
{
- for (s = 0; s < gl_info->limits.textures; ++s)
+ for (i = 0; i < gl_info->limits.textures; ++i)
{
- context_active_texture(ret, gl_info, s);
+ context_active_texture(ret, gl_info, i);
gl_info->gl_ops.gl.p_glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE);
checkGLcall("glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE)");
}
{
GL_EXTCALL(glProvokingVertexEXT(GL_FIRST_VERTEX_CONVENTION_EXT));
}
+ if (!(d3d_info->wined3d_creation_flags & WINED3D_NO_PRIMITIVE_RESTART))
+ {
+ if (gl_info->supported[ARB_ES3_COMPATIBILITY])
+ {
+ gl_info->gl_ops.gl.p_glEnable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
+ checkGLcall("enable GL_PRIMITIVE_RESTART_FIXED_INDEX");
+ }
+ else
+ {
+ FIXME("OpenGL implementation does not support GL_PRIMITIVE_RESTART_FIXED_INDEX.\n");
+ }
+ }
+ if (!(d3d_info->wined3d_creation_flags & WINED3D_LEGACY_CUBEMAP_FILTERING)
+ && gl_info->supported[ARB_SEAMLESS_CUBE_MAP])
+ {
+ gl_info->gl_ops.gl.p_glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
+ checkGLcall("enable seamless cube map filtering");
+ }
+ if (gl_info->supported[ARB_CLIP_CONTROL])
+ GL_EXTCALL(glPointParameteri(GL_POINT_SPRITE_COORD_ORIGIN, GL_LOWER_LEFT));
device->shader_backend->shader_init_context_state(ret);
ret->shader_update_mask = (1u << WINED3D_SHADER_TYPE_PIXEL)
| (1u << WINED3D_SHADER_TYPE_VERTEX)
- | (1u << WINED3D_SHADER_TYPE_GEOMETRY);
+ | (1u << WINED3D_SHADER_TYPE_GEOMETRY)
+ | (1u << WINED3D_SHADER_TYPE_HULL)
+ | (1u << WINED3D_SHADER_TYPE_DOMAIN)
+ | (1u << WINED3D_SHADER_TYPE_COMPUTE);
/* If this happens to be the first context for the device, dummy textures
* are not created yet. In that case, they will be created (and bound) by
* create_dummy_textures right after this context is initialized. */
- if (device->dummy_texture_2d[0])
- bind_dummy_textures(device, ret);
+ if (device->dummy_textures.tex_2d)
+ context_bind_dummy_textures(device, ret);
TRACE("Created context %p.\n", ret);
if (hdc) wined3d_release_dc(swapchain->win_handle, hdc);
device->shader_backend->shader_free_context_data(ret);
device->adapter->fragment_pipe->free_context_data(ret);
-#if defined(STAGING_CSMT)
- HeapFree(GetProcessHeap(), 0, ret->current_fb.render_targets);
-#endif /* STAGING_CSMT */
+ HeapFree(GetProcessHeap(), 0, ret->texture_type);
HeapFree(GetProcessHeap(), 0, ret->free_event_queries);
HeapFree(GetProcessHeap(), 0, ret->free_occlusion_queries);
HeapFree(GetProcessHeap(), 0, ret->free_timestamp_queries);
+ HeapFree(GetProcessHeap(), 0, ret->fbo_key);
HeapFree(GetProcessHeap(), 0, ret->draw_buffers);
HeapFree(GetProcessHeap(), 0, ret->blit_targets);
HeapFree(GetProcessHeap(), 0, ret);
TRACE("Destroying ctx %p\n", context);
+ wined3d_from_cs(device->cs);
+
+ /* We delay destroying a context when it is active. The context_release()
+ * function invokes context_destroy() again while leaving the last level. */
+ if (context->level)
+ {
+ TRACE("Delaying destruction of context %p.\n", context);
+ context->destroy_delayed = 1;
+ /* FIXME: Get rid of a pointer to swapchain from wined3d_context. */
+ context->swapchain = NULL;
+ return;
+ }
+
if (context->tid == GetCurrentThreadId() || !context->current)
{
context_destroy_gl_resources(context);
device->shader_backend->shader_free_context_data(context);
device->adapter->fragment_pipe->free_context_data(context);
-#if defined(STAGING_CSMT)
- HeapFree(GetProcessHeap(), 0, context->current_fb.render_targets);
-#endif /* STAGING_CSMT */
+ HeapFree(GetProcessHeap(), 0, context->texture_type);
+ HeapFree(GetProcessHeap(), 0, context->fbo_key);
HeapFree(GetProcessHeap(), 0, context->draw_buffers);
HeapFree(GetProcessHeap(), 0, context->blit_targets);
device_context_remove(device, context);
if (destroy) HeapFree(GetProcessHeap(), 0, context);
}
+const DWORD *context_get_tex_unit_mapping(const struct wined3d_context *context,
+ const struct wined3d_shader_version *shader_version, unsigned int *base, unsigned int *count)
+{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+
+ if (!shader_version)
+ {
+ *base = 0;
+ *count = MAX_TEXTURES;
+ return context->tex_unit_map;
+ }
+
+ if (shader_version->major >= 4)
+ {
+ wined3d_gl_limits_get_texture_unit_range(&gl_info->limits, shader_version->type, base, count);
+ return NULL;
+ }
+
+ switch (shader_version->type)
+ {
+ case WINED3D_SHADER_TYPE_PIXEL:
+ *base = 0;
+ *count = MAX_FRAGMENT_SAMPLERS;
+ break;
+ case WINED3D_SHADER_TYPE_VERTEX:
+ *base = MAX_FRAGMENT_SAMPLERS;
+ *count = MAX_VERTEX_SAMPLERS;
+ break;
+ default:
+ ERR("Unhandled shader type %#x.\n", shader_version->type);
+ *base = 0;
+ *count = 0;
+ }
+
+ return context->tex_unit_map;
+}
+
/* Context activation is done by the caller. */
static void set_blit_dimension(const struct wined3d_gl_info *gl_info, UINT width, UINT height)
{
static void context_get_rt_size(const struct wined3d_context *context, SIZE *size)
{
- const struct wined3d_surface *rt = context->current_rt;
+ const struct wined3d_texture *rt = context->current_rt.texture;
+ unsigned int level;
- if (rt->container->swapchain && rt->container->swapchain->front_buffer == rt->container)
+ if (rt->swapchain)
{
RECT window_size;
return;
}
- size->cx = rt->resource.width;
- size->cy = rt->resource.height;
+ level = context->current_rt.sub_resource_idx % rt->level_count;
+ size->cx = wined3d_texture_get_level_width(rt, level);
+ size->cy = wined3d_texture_get_level_height(rt, level);
}
/*****************************************************************************
gl_info->gl_ops.gl.p_glDisable(GL_CLIP_PLANE5); checkGLcall("glDisable(clip plane 5)");
context_invalidate_state(context, STATE_RENDER(WINED3D_RS_CLIPPING));
+ /* FIXME: Make draw_textured_quad() able to work with a upper left origin. */
+ if (gl_info->supported[ARB_CLIP_CONTROL])
+ GL_EXTCALL(glClipControl(GL_LOWER_LEFT, GL_NEGATIVE_ONE_TO_ONE));
+
set_blit_dimension(gl_info, rt_size.cx, rt_size.cy);
/* Disable shaders */
context->active_texture = unit;
}
+void context_bind_bo(struct wined3d_context *context, GLenum binding, GLuint name)
+{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+
+ if (binding == GL_ELEMENT_ARRAY_BUFFER)
+ context_invalidate_state(context, STATE_INDEXBUFFER);
+
+ GL_EXTCALL(glBindBuffer(binding, name));
+}
+
void context_bind_texture(struct wined3d_context *context, GLenum target, GLuint name)
{
const struct wined3d_gl_info *gl_info = context->gl_info;
if (old_texture_type != target)
{
- const struct wined3d_device *device = context->swapchain->device;
+ const struct wined3d_device *device = context->device;
switch (old_texture_type)
{
case GL_NONE:
/* nothing to do */
break;
+ case GL_TEXTURE_1D:
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_1D, device->dummy_textures.tex_1d);
+ checkGLcall("glBindTexture");
+ break;
+ case GL_TEXTURE_1D_ARRAY:
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_1D_ARRAY, device->dummy_textures.tex_1d_array);
+ checkGLcall("glBindTexture");
+ break;
case GL_TEXTURE_2D:
- gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, device->dummy_texture_2d[unit]);
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, device->dummy_textures.tex_2d);
+ checkGLcall("glBindTexture");
+ break;
+ case GL_TEXTURE_2D_ARRAY:
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D_ARRAY, device->dummy_textures.tex_2d_array);
checkGLcall("glBindTexture");
break;
case GL_TEXTURE_RECTANGLE_ARB:
- gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_RECTANGLE_ARB, device->dummy_texture_rect[unit]);
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_RECTANGLE_ARB, device->dummy_textures.tex_rect);
checkGLcall("glBindTexture");
break;
case GL_TEXTURE_CUBE_MAP:
- gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP, device->dummy_texture_cube[unit]);
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP, device->dummy_textures.tex_cube);
+ checkGLcall("glBindTexture");
+ break;
+ case GL_TEXTURE_CUBE_MAP_ARRAY:
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP_ARRAY, device->dummy_textures.tex_cube_array);
checkGLcall("glBindTexture");
break;
case GL_TEXTURE_3D:
- gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_3D, device->dummy_texture_3d[unit]);
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_3D, device->dummy_textures.tex_3d);
+ checkGLcall("glBindTexture");
+ break;
+ case GL_TEXTURE_BUFFER:
+ gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_BUFFER, device->dummy_textures.tex_buffer);
checkGLcall("glBindTexture");
break;
default:
- ERR("Unexpected texture target %#x\n", old_texture_type);
+ ERR("Unexpected texture target %#x.\n", old_texture_type);
}
context->texture_type[unit] = target;
}
}
+void *context_map_bo_address(struct wined3d_context *context,
+ const struct wined3d_bo_address *data, size_t size, GLenum binding, DWORD flags)
+{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+ BYTE *memory;
+
+ if (!data->buffer_object)
+ return data->addr;
+
+ context_bind_bo(context, binding, data->buffer_object);
+
+ if (gl_info->supported[ARB_MAP_BUFFER_RANGE])
+ {
+ GLbitfield map_flags = wined3d_resource_gl_map_flags(flags) & ~GL_MAP_FLUSH_EXPLICIT_BIT;
+ memory = GL_EXTCALL(glMapBufferRange(binding, (INT_PTR)data->addr, size, map_flags));
+ }
+ else
+ {
+ memory = GL_EXTCALL(glMapBuffer(binding, wined3d_resource_gl_legacy_map_flags(flags)));
+ memory += (INT_PTR)data->addr;
+ }
+
+ context_bind_bo(context, binding, 0);
+ checkGLcall("Map buffer object");
+
+ return memory;
+}
+
+void context_unmap_bo_address(struct wined3d_context *context,
+ const struct wined3d_bo_address *data, GLenum binding)
+{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+
+ if (!data->buffer_object)
+ return;
+
+ context_bind_bo(context, binding, data->buffer_object);
+ GL_EXTCALL(glUnmapBuffer(binding));
+ context_bind_bo(context, binding, 0);
+ checkGLcall("Unmap buffer object");
+}
+
static void context_set_render_offscreen(struct wined3d_context *context, BOOL offscreen)
{
- if (context->render_offscreen == offscreen) return;
+ if (context->render_offscreen == offscreen)
+ return;
- context_invalidate_state(context, STATE_POINTSPRITECOORDORIGIN);
- context_invalidate_state(context, STATE_TRANSFORM(WINED3D_TS_PROJECTION));
context_invalidate_state(context, STATE_VIEWPORT);
context_invalidate_state(context, STATE_SCISSORRECT);
- context_invalidate_state(context, STATE_FRONTFACE);
+ if (!context->gl_info->supported[ARB_CLIP_CONTROL])
+ {
+ context_invalidate_state(context, STATE_FRONTFACE);
+ context_invalidate_state(context, STATE_POINTSPRITECOORDORIGIN);
+ context_invalidate_state(context, STATE_TRANSFORM(WINED3D_TS_PROJECTION));
+ }
+ context_invalidate_state(context, STATE_SHADER(WINED3D_SHADER_TYPE_DOMAIN));
+ if (context->gl_info->supported[ARB_FRAGMENT_COORD_CONVENTIONS])
+ context_invalidate_state(context, STATE_SHADER(WINED3D_SHADER_TYPE_PIXEL));
context->render_offscreen = offscreen;
}
const struct wined3d_rendertarget_view *depth_stencil)
{
/* Onscreen surfaces are always in a swapchain */
- struct wined3d_swapchain *swapchain = context->current_rt->container->swapchain;
+ struct wined3d_swapchain *swapchain = context->current_rt.texture->swapchain;
if (context->render_offscreen || !depth_stencil) return;
if (match_depth_stencil_format(swapchain->ds_format, depth_stencil->format)) return;
WARN("Depth stencil format is not supported by WGL, rendering the backbuffer in an FBO\n");
/* The currently active context is the necessary context to access the swapchain's onscreen buffers */
-#if defined(STAGING_CSMT)
- wined3d_resource_load_location(&context->current_rt->resource, context, WINED3D_LOCATION_TEXTURE_RGB);
+ if (!(wined3d_texture_load_location(context->current_rt.texture, context->current_rt.sub_resource_idx,
+ context, WINED3D_LOCATION_TEXTURE_RGB)))
+ ERR("Failed to load location.\n");
swapchain->render_to_fbo = TRUE;
swapchain_update_draw_bindings(swapchain);
context_set_render_offscreen(context, TRUE);
}
-static DWORD context_generate_rt_mask_no_fbo(const struct wined3d_context *context, const struct wined3d_surface *rt)
+GLenum context_get_offscreen_gl_buffer(const struct wined3d_context *context)
{
- if (!rt || rt->resource.format->id == WINED3DFMT_NULL)
- return 0;
- else if (rt->container->swapchain)
- return context_generate_rt_mask_from_surface(rt);
- else
- return context_generate_rt_mask(context->offscreenBuffer);
-#else /* STAGING_CSMT */
- surface_load_location(context->current_rt, context, WINED3D_LOCATION_TEXTURE_RGB);
- swapchain->render_to_fbo = TRUE;
- swapchain_update_draw_bindings(swapchain);
- context_set_render_offscreen(context, TRUE);
+ switch (wined3d_settings.offscreen_rendering_mode)
+ {
+ case ORM_FBO:
+ return GL_COLOR_ATTACHMENT0;
+
+ case ORM_BACKBUFFER:
+ return context->aux_buffers > 0 ? GL_AUX0 : GL_BACK;
+
+ default:
+ FIXME("Unhandled offscreen rendering mode %#x.\n", wined3d_settings.offscreen_rendering_mode);
+ return GL_BACK;
+ }
}
-static DWORD context_generate_rt_mask_no_fbo(const struct wined3d_device *device, const struct wined3d_surface *rt)
+static DWORD context_generate_rt_mask_no_fbo(const struct wined3d_context *context, struct wined3d_texture *rt)
{
if (!rt || rt->resource.format->id == WINED3DFMT_NULL)
return 0;
- else if (rt->container->swapchain)
- return context_generate_rt_mask_from_surface(rt);
+ else if (rt->swapchain)
+ return context_generate_rt_mask_from_resource(&rt->resource);
else
- return context_generate_rt_mask(device->offscreenBuffer);
-#endif /* STAGING_CSMT */
+ return context_generate_rt_mask(context_get_offscreen_gl_buffer(context));
}
/* Context activation is done by the caller. */
void context_apply_blit_state(struct wined3d_context *context, const struct wined3d_device *device)
{
- struct wined3d_surface *rt = context->current_rt;
+ struct wined3d_texture *rt = context->current_rt.texture;
+ struct wined3d_surface *surface;
DWORD rt_mask, *cur_mask;
if (wined3d_settings.offscreen_rendering_mode == ORM_FBO)
if (context->render_offscreen)
{
- wined3d_texture_load(rt->container, context, FALSE);
+ wined3d_texture_load(rt, context, FALSE);
- context_apply_fbo_state_blit(context, GL_FRAMEBUFFER, rt, NULL, rt->container->resource.draw_binding);
+ surface = rt->sub_resources[context->current_rt.sub_resource_idx].u.surface;
+ context_apply_fbo_state_blit(context, GL_FRAMEBUFFER, surface, NULL, rt->resource.draw_binding);
if (rt->resource.format->id != WINED3DFMT_NULL)
rt_mask = 1;
else
{
context->current_fbo = NULL;
context_bind_fbo(context, GL_FRAMEBUFFER, 0);
- rt_mask = context_generate_rt_mask_from_surface(rt);
+ rt_mask = context_generate_rt_mask_from_resource(&rt->resource);
}
}
else
{
-#if defined(STAGING_CSMT)
rt_mask = context_generate_rt_mask_no_fbo(context, rt);
-#else /* STAGING_CSMT */
- rt_mask = context_generate_rt_mask_no_fbo(device, rt);
-#endif /* STAGING_CSMT */
}
cur_mask = context->current_fbo ? &context->current_fbo->rt_mask : &context->draw_buffers_mask;
}
/* Context activation is done by the caller. */
-BOOL context_apply_clear_state(struct wined3d_context *context, const struct wined3d_device *device,
+BOOL context_apply_clear_state(struct wined3d_context *context, const struct wined3d_state *state,
UINT rt_count, const struct wined3d_fb_state *fb)
{
struct wined3d_rendertarget_view **rts = fb->render_targets;
struct wined3d_rendertarget_view *dsv = fb->depth_stencil;
const struct wined3d_gl_info *gl_info = context->gl_info;
DWORD rt_mask = 0, *cur_mask;
- UINT i;
+ unsigned int i;
-#if defined(STAGING_CSMT)
- if (isStateDirty(context, STATE_FRAMEBUFFER) || !wined3d_fb_equal(fb, &context->current_fb)
-#else /* STAGING_CSMT */
- if (isStateDirty(context, STATE_FRAMEBUFFER) || fb != &device->fb
-#endif /* STAGING_CSMT */
- || rt_count != context->gl_info->limits.buffers)
+ if (isStateDirty(context, STATE_FRAMEBUFFER) || fb != state->fb
+ || rt_count != gl_info->limits.buffers)
{
if (!context_validate_rt_config(rt_count, rts, dsv))
return FALSE;
if (!rt_count || wined3d_resource_is_offscreen(rts[0]->resource))
{
+ memset(context->blit_targets, 0, gl_info->limits.buffers * sizeof(*context->blit_targets));
for (i = 0; i < rt_count; ++i)
{
- context->blit_targets[i] = wined3d_rendertarget_view_get_surface(rts[i]);
+ if (rts[i])
+ {
+ context->blit_targets[i].gl_view = rts[i]->gl_view;
+ context->blit_targets[i].resource = rts[i]->resource;
+ context->blit_targets[i].sub_resource_idx = rts[i]->sub_resource_idx;
+ context->blit_targets[i].layer_count = rts[i]->layer_count;
+ }
if (rts[i] && rts[i]->format->id != WINED3DFMT_NULL)
rt_mask |= (1u << i);
}
- while (i < context->gl_info->limits.buffers)
- {
- context->blit_targets[i] = NULL;
- ++i;
- }
context_apply_fbo_state(context, GL_FRAMEBUFFER, context->blit_targets,
wined3d_rendertarget_view_get_surface(dsv),
rt_count ? rts[0]->resource->draw_binding : 0,
{
context_apply_fbo_state(context, GL_FRAMEBUFFER, NULL, NULL,
WINED3D_LOCATION_DRAWABLE, WINED3D_LOCATION_DRAWABLE);
- rt_mask = context_generate_rt_mask_from_surface(wined3d_rendertarget_view_get_surface(rts[0]));
+ rt_mask = context_generate_rt_mask_from_resource(rts[0]->resource);
}
/* If the framebuffer is not the device's fb the device's fb has to be reapplied
}
else
{
-#if defined(STAGING_CSMT)
rt_mask = context_generate_rt_mask_no_fbo(context,
- rt_count ? wined3d_rendertarget_view_get_surface(rts[0]) : NULL);
- }
-
- wined3d_fb_copy(&context->current_fb, fb);
-#else /* STAGING_CSMT */
- rt_mask = context_generate_rt_mask_no_fbo(device,
- rt_count ? wined3d_rendertarget_view_get_surface(rts[0]) : NULL);
+ rt_count ? wined3d_rendertarget_view_get_surface(rts[0])->container : NULL);
}
-#endif /* STAGING_CSMT */
}
else if (wined3d_settings.offscreen_rendering_mode == ORM_FBO
&& (!rt_count || wined3d_resource_is_offscreen(rts[0]->resource)))
}
else
{
-#if defined(STAGING_CSMT)
rt_mask = context_generate_rt_mask_no_fbo(context,
-#else /* STAGING_CSMT */
- rt_mask = context_generate_rt_mask_no_fbo(device,
-#endif /* STAGING_CSMT */
- rt_count ? wined3d_rendertarget_view_get_surface(rts[0]) : NULL);
+ rt_count ? wined3d_rendertarget_view_get_surface(rts[0])->container : NULL);
}
cur_mask = context->current_fbo ? &context->current_fbo->rt_mask : &context->draw_buffers_mask;
context_check_fbo_status(context, GL_FRAMEBUFFER);
}
- if (context->last_was_blit)
- context->last_was_blit = FALSE;
+ context->last_was_blit = FALSE;
/* Blending and clearing should be orthogonal, but tests on the nvidia
* driver show that disabling blending when clearing improves the clearing
* performance incredibly. */
gl_info->gl_ops.gl.p_glDisable(GL_BLEND);
gl_info->gl_ops.gl.p_glEnable(GL_SCISSOR_TEST);
- if (gl_info->supported[ARB_FRAMEBUFFER_SRGB])
+ if (rt_count && gl_info->supported[ARB_FRAMEBUFFER_SRGB])
{
- if (!(context->d3d_info->wined3d_creation_flags & WINED3D_SRGB_READ_WRITE_CONTROL)
- || device->state.render_states[WINED3D_RS_SRGBWRITEENABLE])
+ if (needs_srgb_write(context, state, fb))
gl_info->gl_ops.gl.p_glEnable(GL_FRAMEBUFFER_SRGB);
else
gl_info->gl_ops.gl.p_glDisable(GL_FRAMEBUFFER_SRGB);
return TRUE;
}
-#if defined(STAGING_CSMT)
static DWORD find_draw_buffers_mask(const struct wined3d_context *context, const struct wined3d_state *state)
{
- struct wined3d_rendertarget_view **rts = state->fb.render_targets;
- struct wined3d_shader *ps = state->shader[WINED3D_SHADER_TYPE_PIXEL];
- DWORD rt_mask, rt_mask_bits;
- unsigned int i;
-
- if (wined3d_settings.offscreen_rendering_mode != ORM_FBO)
- return context_generate_rt_mask_no_fbo(context, wined3d_rendertarget_view_get_surface(rts[0]));
-#else /* STAGING_CSMT */
-static DWORD find_draw_buffers_mask(const struct wined3d_context *context, const struct wined3d_device *device)
-{
- const struct wined3d_state *state = &device->state;
struct wined3d_rendertarget_view **rts = state->fb->render_targets;
struct wined3d_shader *ps = state->shader[WINED3D_SHADER_TYPE_PIXEL];
DWORD rt_mask, rt_mask_bits;
unsigned int i;
if (wined3d_settings.offscreen_rendering_mode != ORM_FBO)
- return context_generate_rt_mask_no_fbo(device, wined3d_rendertarget_view_get_surface(rts[0]));
-#endif /* STAGING_CSMT */
+ return context_generate_rt_mask_no_fbo(context, wined3d_rendertarget_view_get_surface(rts[0])->container);
else if (!context->render_offscreen)
- return context_generate_rt_mask_from_surface(wined3d_rendertarget_view_get_surface(rts[0]));
+ return context_generate_rt_mask_from_resource(rts[0]->resource);
rt_mask = ps ? ps->reg_maps.rt_mask : 1;
rt_mask &= context->d3d_info->valid_rt_mask;
/* Context activation is done by the caller. */
void context_state_fb(struct wined3d_context *context, const struct wined3d_state *state, DWORD state_id)
{
-#if defined(STAGING_CSMT)
- const struct wined3d_fb_state *fb = &state->fb;
DWORD rt_mask = find_draw_buffers_mask(context, state);
-#else /* STAGING_CSMT */
- const struct wined3d_device *device = context->swapchain->device;
const struct wined3d_fb_state *fb = state->fb;
- DWORD rt_mask = find_draw_buffers_mask(context, device);
-#endif /* STAGING_CSMT */
DWORD *cur_mask;
if (wined3d_settings.offscreen_rendering_mode == ORM_FBO)
{
unsigned int i;
+ memset(context->blit_targets, 0, context->gl_info->limits.buffers * sizeof (*context->blit_targets));
for (i = 0; i < context->gl_info->limits.buffers; ++i)
{
- context->blit_targets[i] = wined3d_rendertarget_view_get_surface(fb->render_targets[i]);
+ if (fb->render_targets[i])
+ {
+ context->blit_targets[i].gl_view = fb->render_targets[i]->gl_view;
+ context->blit_targets[i].resource = fb->render_targets[i]->resource;
+ context->blit_targets[i].sub_resource_idx = fb->render_targets[i]->sub_resource_idx;
+ context->blit_targets[i].layer_count = fb->render_targets[i]->layer_count;
+ }
}
context_apply_fbo_state(context, GL_FRAMEBUFFER, context->blit_targets,
wined3d_rendertarget_view_get_surface(fb->depth_stencil),
- fb->render_targets[0]->resource->draw_binding,
+ fb->render_targets[0] ? fb->render_targets[0]->resource->draw_binding : 0,
fb->depth_stencil ? fb->depth_stencil->resource->draw_binding : 0);
}
}
context_apply_draw_buffers(context, rt_mask);
*cur_mask = rt_mask;
}
-#if defined(STAGING_CSMT)
-
- wined3d_fb_copy(&context->current_fb, &state->fb);
-#endif /* STAGING_CSMT */
+ context->constant_update_mask |= WINED3D_SHADER_CONST_PS_Y_CORR;
}
static void context_map_stage(struct wined3d_context *context, DWORD stage, DWORD unit)
const struct wined3d_state *state)
{
const struct wined3d_d3d_info *d3d_info = context->d3d_info;
- const struct wined3d_gl_info *gl_info = context->gl_info;
unsigned int i, tex;
WORD ffu_map;
- context_update_fixed_function_usage_map(context, state);
-
- if (gl_info->limits.combined_samplers >= MAX_COMBINED_SAMPLERS)
- return;
-
ffu_map = context->fixed_function_usage_map;
if (d3d_info->limits.ffp_textures == d3d_info->limits.ffp_blend_stages
static void context_map_psamplers(struct wined3d_context *context, const struct wined3d_state *state)
{
const struct wined3d_d3d_info *d3d_info = context->d3d_info;
- const struct wined3d_gl_info *gl_info = context->gl_info;
const struct wined3d_shader_resource_info *resource_info =
state->shader[WINED3D_SHADER_TYPE_PIXEL]->reg_maps.resource_info;
unsigned int i;
- if (gl_info->limits.combined_samplers >= MAX_COMBINED_SAMPLERS)
- return;
-
for (i = 0; i < MAX_FRAGMENT_SAMPLERS; ++i)
{
if (resource_info[i].type && context->tex_unit_map[i] != i)
state->shader[WINED3D_SHADER_TYPE_VERTEX]->reg_maps.resource_info;
const struct wined3d_shader_resource_info *ps_resource_info = NULL;
const struct wined3d_gl_info *gl_info = context->gl_info;
- int start = min(MAX_COMBINED_SAMPLERS, gl_info->limits.combined_samplers) - 1;
+ int start = min(MAX_COMBINED_SAMPLERS, gl_info->limits.graphics_samplers) - 1;
int i;
- if (gl_info->limits.combined_samplers >= MAX_COMBINED_SAMPLERS)
- return;
-
/* Note that we only care if a resource is used or not, not the
* resource's specific type. Otherwise we'd need to call
* shader_update_samplers() here for 1.x pixelshaders. */
static void context_update_tex_unit_map(struct wined3d_context *context, const struct wined3d_state *state)
{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
BOOL vs = use_vs(state);
BOOL ps = use_ps(state);
+ if (!ps)
+ context_update_fixed_function_usage_map(context, state);
+
/* Try to go for a 1:1 mapping of the samplers when possible. Pixel shaders
* need a 1:1 map at the moment.
* When the mapping of a stage is changed, sampler and ALL texture stage
* states have to be reset. */
+ if (gl_info->limits.graphics_samplers >= MAX_COMBINED_SAMPLERS)
+ return;
+
if (ps)
context_map_psamplers(context, state);
else
/* Context activation is done by the caller. */
void context_state_drawbuf(struct wined3d_context *context, const struct wined3d_state *state, DWORD state_id)
{
-#if defined(STAGING_CSMT)
DWORD rt_mask, *cur_mask;
if (isStateDirty(context, STATE_FRAMEBUFFER)) return;
cur_mask = context->current_fbo ? &context->current_fbo->rt_mask : &context->draw_buffers_mask;
rt_mask = find_draw_buffers_mask(context, state);
-#else /* STAGING_CSMT */
- const struct wined3d_device *device = context->swapchain->device;
- DWORD rt_mask, *cur_mask;
-
- if (isStateDirty(context, STATE_FRAMEBUFFER)) return;
-
- cur_mask = context->current_fbo ? &context->current_fbo->rt_mask : &context->draw_buffers_mask;
- rt_mask = find_draw_buffers_mask(context, device);
-#endif /* STAGING_CSMT */
if (rt_mask != *cur_mask)
{
context_apply_draw_buffers(context, rt_mask);
*regnum = WINED3D_FFP_TEXCOORD0 + usage_idx;
else
{
- FIXME("Unsupported input stream [usage=%s, usage_idx=%u].\n", debug_d3ddeclusage(usage), usage_idx);
- *regnum = ~0U;
+ WARN("Unsupported input stream [usage=%s, usage_idx=%u].\n", debug_d3ddeclusage(usage), usage_idx);
+ *regnum = ~0u;
return FALSE;
}
}
/* Context activation is done by the caller. */
-void context_stream_info_from_declaration(struct wined3d_context *context,
- const struct wined3d_state *state, struct wined3d_stream_info *stream_info)
+void wined3d_stream_info_from_declaration(struct wined3d_stream_info *stream_info,
+ const struct wined3d_state *state, const struct wined3d_gl_info *gl_info,
+ const struct wined3d_d3d_info *d3d_info)
{
/* We need to deal with frequency data! */
struct wined3d_vertex_declaration *declaration = state->vertex_declaration;
+ BOOL generic_attributes = d3d_info->ffp_generic_attributes;
BOOL use_vshader = use_vs(state);
- BOOL generic_attributes = context->d3d_info->ffp_generic_attributes;
unsigned int i;
stream_info->use_map = 0;
stream_info->swizzle_map = 0;
+ stream_info->position_transformed = 0;
+
+ if (!declaration)
+ return;
+
stream_info->position_transformed = declaration->position_transformed;
/* Translate the declaration into strided data. */
stream_info->elements[idx].divisor = 0;
}
- if (!context->gl_info->supported[ARB_VERTEX_ARRAY_BGRA]
+ if (!gl_info->supported[ARB_VERTEX_ARRAY_BGRA]
&& element->format->id == WINED3DFMT_B8G8R8A8_UNORM)
{
stream_info->swizzle_map |= 1u << idx;
/* Context activation is done by the caller. */
static void context_update_stream_info(struct wined3d_context *context, const struct wined3d_state *state)
{
- const struct wined3d_gl_info *gl_info = context->gl_info;
- const struct wined3d_d3d_info *d3d_info = context->d3d_info;
struct wined3d_stream_info *stream_info = &context->stream_info;
+ const struct wined3d_d3d_info *d3d_info = context->d3d_info;
+ const struct wined3d_gl_info *gl_info = context->gl_info;
DWORD prev_all_vbo = stream_info->all_vbo;
unsigned int i;
WORD map;
- context_stream_info_from_declaration(context, state, stream_info);
+ wined3d_stream_info_from_declaration(stream_info, state, gl_info, d3d_info);
stream_info->all_vbo = 1;
context->num_buffer_queries = 0;
* sources. In most sane cases the pointer - offset will still be > 0,
* otherwise it will wrap around to some big value. Hope that with the
* indices the driver wraps it back internally. If not,
- * drawStridedSlow is needed, including a vertex buffer path. */
+ * draw_primitive_immediate_mode() is needed, including a vertex buffer
+ * path. */
if (state->load_base_vertex_index < 0)
{
WARN_(d3d_perf)("load_base_vertex_index is < 0 (%d), not using VBOs.\n",
state->load_base_vertex_index);
element->data.buffer_object = 0;
- element->data.addr += (ULONG_PTR)buffer_get_sysmem(buffer, context);
+ element->data.addr += (ULONG_PTR)wined3d_buffer_load_sysmem(buffer, context);
if ((UINT_PTR)element->data.addr < -state->load_base_vertex_index * element->stride)
FIXME("System memory vertex data load offset is negative!\n");
}
else
{
- buffer_internal_preload(buffer, context, state);
- buffer_get_memory(buffer, context, &data);
+ wined3d_buffer_load(buffer, context, state);
+ wined3d_buffer_get_memory(buffer, &data, buffer->locations);
element->data.buffer_object = data.buffer_object;
element->data.addr += (ULONG_PTR)data.addr;
}
TRACE("Load array %u {%#x:%p}.\n", i, element->data.buffer_object, element->data.addr);
}
+ if (prev_all_vbo != stream_info->all_vbo)
+ context_invalidate_state(context, STATE_INDEXBUFFER);
+
+ context->use_immediate_mode_draw = FALSE;
+
+ if (stream_info->all_vbo)
+ return;
+
if (use_vs(state))
{
- if (state->vertex_declaration->half_float_conv_needed && !stream_info->all_vbo)
+ if (state->vertex_declaration->half_float_conv_needed)
{
-#if defined(STAGING_CSMT)
- TRACE("Using draw_strided_slow with vertex shaders for FLOAT16 conversion.\n");
-#else /* STAGING_CSMT */
- TRACE("Using drawStridedSlow with vertex shaders for FLOAT16 conversion.\n");
-#endif /* STAGING_CSMT */
+ TRACE("Using immediate mode draw with vertex shaders for FLOAT16 conversion.\n");
context->use_immediate_mode_draw = TRUE;
}
- else
- {
- context->use_immediate_mode_draw = FALSE;
- }
}
else
{
WORD slow_mask = -!d3d_info->ffp_generic_attributes & (1u << WINED3D_FFP_PSIZE);
- slow_mask |= -!gl_info->supported[ARB_VERTEX_ARRAY_BGRA]
- & ((1u << WINED3D_FFP_DIFFUSE) | (1u << WINED3D_FFP_SPECULAR));
+ slow_mask |= -(!gl_info->supported[ARB_VERTEX_ARRAY_BGRA] && !d3d_info->ffp_generic_attributes)
+ & ((1u << WINED3D_FFP_DIFFUSE) | (1u << WINED3D_FFP_SPECULAR) | (1u << WINED3D_FFP_BLENDWEIGHT));
- if (((stream_info->position_transformed && !d3d_info->xyzrhw)
- || (stream_info->use_map & slow_mask)) && !stream_info->all_vbo)
+ if ((stream_info->position_transformed && !d3d_info->xyzrhw)
+ || (stream_info->use_map & slow_mask))
context->use_immediate_mode_draw = TRUE;
- else
- context->use_immediate_mode_draw = FALSE;
}
-
- if (prev_all_vbo != stream_info->all_vbo)
- context_invalidate_state(context, STATE_INDEXBUFFER);
}
/* Context activation is done by the caller. */
}
}
-static void context_load_shader_resources(struct wined3d_context *context, const struct wined3d_state *state)
+static void context_load_shader_resources(struct wined3d_context *context, const struct wined3d_state *state,
+ unsigned int shader_mask)
{
struct wined3d_shader_sampler_map_entry *entry;
struct wined3d_shader_resource_view *view;
for (i = 0; i < WINED3D_SHADER_TYPE_COUNT; ++i)
{
+ if (!(shader_mask & (1u << i)))
+ continue;
+
if (!(shader = state->shader[i]))
continue;
for (j = 0; j < WINED3D_MAX_CBS; ++j)
{
if (state->cb[i][j])
- buffer_internal_preload(state->cb[i][j], context, state);
+ wined3d_buffer_load(state->cb[i][j], context, state);
}
for (j = 0; j < shader->reg_maps.sampler_map.count; ++j)
entry = &shader->reg_maps.sampler_map.entries[j];
if (!(view = state->shader_resource_view[i][entry->resource_idx]))
- {
- WARN("No resource view bound at index %u, %u.\n", i, entry->resource_idx);
continue;
- }
if (view->resource->type == WINED3D_RTYPE_BUFFER)
- buffer_internal_preload(buffer_from_resource(view->resource), context, state);
+ wined3d_buffer_load(buffer_from_resource(view->resource), context, state);
else
- wined3d_texture_load(wined3d_texture_from_resource(view->resource), context, FALSE);
+ wined3d_texture_load(texture_from_resource(view->resource), context, FALSE);
}
}
}
-static void context_bind_shader_resources(struct wined3d_context *context, const struct wined3d_state *state)
+static void context_bind_shader_resources(struct wined3d_context *context,
+ const struct wined3d_state *state, enum wined3d_shader_type shader_type)
{
- const struct wined3d_device *device = context->swapchain->device;
- const struct wined3d_gl_info *gl_info = context->gl_info;
+ unsigned int bind_idx, shader_sampler_count, base, count, i;
+ const struct wined3d_device *device = context->device;
struct wined3d_shader_sampler_map_entry *entry;
struct wined3d_shader_resource_view *view;
+ const struct wined3d_shader *shader;
struct wined3d_sampler *sampler;
- struct wined3d_texture *texture;
- struct wined3d_shader *shader;
- unsigned int i, j, count;
- GLuint sampler_name;
+ const DWORD *tex_unit_map;
- static const struct
+ if (!(shader = state->shader[shader_type]))
+ return;
+
+ tex_unit_map = context_get_tex_unit_mapping(context,
+ &shader->reg_maps.shader_version, &base, &count);
+
+ shader_sampler_count = shader->reg_maps.sampler_map.count;
+ if (shader_sampler_count > count)
+ FIXME("Shader %p needs %u samplers, but only %u are supported.\n",
+ shader, shader_sampler_count, count);
+ count = min(shader_sampler_count, count);
+
+ for (i = 0; i < count; ++i)
{
- enum wined3d_shader_type type;
- unsigned int base_idx;
- unsigned int count;
+ entry = &shader->reg_maps.sampler_map.entries[i];
+ bind_idx = base + entry->bind_idx;
+ if (tex_unit_map)
+ bind_idx = tex_unit_map[bind_idx];
+
+ if (!(view = state->shader_resource_view[shader_type][entry->resource_idx]))
+ {
+ WARN("No resource view bound at index %u, %u.\n", shader_type, entry->resource_idx);
+ continue;
+ }
+
+ if (entry->sampler_idx == WINED3D_SAMPLER_DEFAULT)
+ sampler = device->default_sampler;
+ else if (!(sampler = state->sampler[shader_type][entry->sampler_idx]))
+ sampler = device->null_sampler;
+ wined3d_shader_resource_view_bind(view, bind_idx, sampler, context);
}
- shader_types[] =
- {
- {WINED3D_SHADER_TYPE_PIXEL, 0, MAX_FRAGMENT_SAMPLERS},
- {WINED3D_SHADER_TYPE_VERTEX, MAX_FRAGMENT_SAMPLERS, MAX_VERTEX_SAMPLERS},
- };
+}
+
+static void context_load_unordered_access_resources(struct wined3d_context *context,
+ const struct wined3d_shader *shader, struct wined3d_unordered_access_view * const *views)
+{
+ struct wined3d_unordered_access_view *view;
+ struct wined3d_texture *texture;
+ struct wined3d_buffer *buffer;
+ unsigned int i;
+
+ context->uses_uavs = 0;
- for (i = 0; i < ARRAY_SIZE(shader_types); ++i)
+ if (!shader)
+ return;
+
+ for (i = 0; i < MAX_UNORDERED_ACCESS_VIEWS; ++i)
{
- if (!(shader = state->shader[shader_types[i].type]))
+ if (!(view = views[i]))
continue;
- count = shader->reg_maps.sampler_map.count;
- if (count > shader_types[i].count)
+ if (view->resource->type == WINED3D_RTYPE_BUFFER)
{
- FIXME("Shader %p needs %u samplers, but only %u are supported.\n",
- shader, count, shader_types[i].count);
- count = shader_types[i].count;
+ buffer = buffer_from_resource(view->resource);
+ wined3d_buffer_load_location(buffer, context, WINED3D_LOCATION_BUFFER);
+ wined3d_unordered_access_view_invalidate_location(view, ~WINED3D_LOCATION_BUFFER);
}
-
- for (j = 0; j < count; ++j)
+ else
{
- entry = &shader->reg_maps.sampler_map.entries[j];
+ texture = texture_from_resource(view->resource);
+ wined3d_texture_load(texture, context, FALSE);
+ wined3d_unordered_access_view_invalidate_location(view, ~WINED3D_LOCATION_TEXTURE_RGB);
+ }
- if (!(view = state->shader_resource_view[shader_types[i].type][entry->resource_idx]))
- {
- WARN("No resource view bound at index %u, %u.\n", shader_types[i].type, entry->resource_idx);
- continue;
- }
+ context->uses_uavs = 1;
+ }
+}
- if (view->resource->type == WINED3D_RTYPE_BUFFER)
- {
- FIXME("Buffer shader resources not supported.\n");
- continue;
- }
+static void context_bind_unordered_access_views(struct wined3d_context *context,
+ const struct wined3d_shader *shader, struct wined3d_unordered_access_view * const *views)
+{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+ struct wined3d_unordered_access_view *view;
+ GLuint texture_name;
+ unsigned int i;
+ GLint level;
- if (entry->sampler_idx == WINED3D_SAMPLER_DEFAULT)
- {
- sampler_name = device->default_sampler;
- }
- else if ((sampler = state->sampler[shader_types[i].type][entry->sampler_idx]))
- {
- sampler_name = sampler->name;
- }
- else
- {
- WARN("No sampler object bound at index %u, %u.\n", shader_types[i].type, entry->sampler_idx);
- continue;
- }
+ if (!shader)
+ return;
- texture = wined3d_texture_from_resource(view->resource);
- context_active_texture(context, gl_info, shader_types[i].base_idx + entry->bind_idx);
- wined3d_texture_bind(texture, context, FALSE);
+ for (i = 0; i < MAX_UNORDERED_ACCESS_VIEWS; ++i)
+ {
+ if (!(view = views[i]))
+ {
+ if (shader->reg_maps.uav_resource_info[i].type)
+ WARN("No unordered access view bound at index %u.\n", i);
+ GL_EXTCALL(glBindImageTexture(i, 0, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R8));
+ continue;
+ }
- GL_EXTCALL(glBindSampler(shader_types[i].base_idx + entry->bind_idx, sampler_name));
- checkGLcall("glBindSampler");
+ if (view->gl_view.name)
+ {
+ texture_name = view->gl_view.name;
+ level = 0;
+ }
+ else if (view->resource->type != WINED3D_RTYPE_BUFFER)
+ {
+ struct wined3d_texture *texture = texture_from_resource(view->resource);
+ texture_name = wined3d_texture_get_texture_name(texture, context, FALSE);
+ level = view->desc.u.texture.level_idx;
+ }
+ else
+ {
+ FIXME("Unsupported buffer unordered access view.\n");
+ GL_EXTCALL(glBindImageTexture(i, 0, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R8));
+ continue;
}
+
+ GL_EXTCALL(glBindImageTexture(i, texture_name, level, GL_TRUE, 0, GL_READ_WRITE,
+ view->format->glInternal));
+
+ if (view->counter_bo)
+ GL_EXTCALL(glBindBufferBase(GL_ATOMIC_COUNTER_BUFFER, i, view->counter_bo));
}
+ checkGLcall("Bind unordered access views");
}
-/* Context activation is done by the caller. */
-#if defined(STAGING_CSMT)
-BOOL context_apply_draw_state(struct wined3d_context *context, const struct wined3d_device *device,
+static void context_load_stream_output_buffers(struct wined3d_context *context,
const struct wined3d_state *state)
{
- const struct StateEntry *state_table = context->state_table;
- const struct wined3d_fb_state *fb = &state->fb;
-#else /* STAGING_CSMT */
-BOOL context_apply_draw_state(struct wined3d_context *context, struct wined3d_device *device)
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(state->stream_output); ++i)
+ {
+ struct wined3d_buffer *buffer;
+ if (!(buffer = state->stream_output[i].buffer))
+ continue;
+
+ wined3d_buffer_load(buffer, context, state);
+ wined3d_buffer_invalidate_location(buffer, ~WINED3D_LOCATION_BUFFER);
+ }
+}
+
+/* Context activation is done by the caller. */
+BOOL context_apply_draw_state(struct wined3d_context *context,
+ const struct wined3d_device *device, const struct wined3d_state *state)
{
- const struct wined3d_state *state = &device->state;
const struct StateEntry *state_table = context->state_table;
+ const struct wined3d_gl_info *gl_info = context->gl_info;
const struct wined3d_fb_state *fb = state->fb;
-#endif /* STAGING_CSMT */
unsigned int i;
WORD map;
- if (!context_validate_rt_config(context->gl_info->limits.buffers,
- fb->render_targets, fb->depth_stencil))
+ if (!context_validate_rt_config(gl_info->limits.buffers, fb->render_targets, fb->depth_stencil))
return FALSE;
if (wined3d_settings.offscreen_rendering_mode == ORM_FBO && isStateDirty(context, STATE_FRAMEBUFFER))
* updating a resource location. */
context_update_tex_unit_map(context, state);
context_preload_textures(context, state);
- context_load_shader_resources(context, state);
+ context_load_shader_resources(context, state, ~(1u << WINED3D_SHADER_TYPE_COMPUTE));
+ context_load_unordered_access_resources(context, state->shader[WINED3D_SHADER_TYPE_PIXEL],
+ state->unordered_access_view[WINED3D_PIPELINE_GRAPHICS]);
+ context_load_stream_output_buffers(context, state);
/* TODO: Right now the dependency on the vertex shader is necessary
- * since context_stream_info_from_declaration depends on the reg_maps of
+ * since wined3d_stream_info_from_declaration() depends on the reg_maps of
* the current VS but maybe it's possible to relax the coupling in some
* situations at least. */
if (isStateDirty(context, STATE_VDECL) || isStateDirty(context, STATE_STREAMSRC)
for (i = 0, map = context->stream_info.use_map; map; map >>= 1, ++i)
{
if (map & 1)
-#if defined(STAGING_CSMT)
- buffer_internal_preload(state->streams[context->stream_info.elements[i].stream_idx].buffer,
+ wined3d_buffer_load(state->streams[context->stream_info.elements[i].stream_idx].buffer,
context, state);
}
- /* PreLoad may kick buffers out of vram. */
+ /* Loading the buffers above may have invalidated the stream info. */
if (isStateDirty(context, STATE_STREAMSRC))
context_update_stream_info(context, state);
-#else /* STAGING_CSMT */
- buffer_mark_used(state->streams[context->stream_info.elements[i].stream_idx].buffer);
- }
-#endif /* STAGING_CSMT */
}
if (state->index_buffer)
{
if (context->stream_info.all_vbo)
- buffer_internal_preload(state->index_buffer, context, state);
+ wined3d_buffer_load(state->index_buffer, context, state);
else
- buffer_get_sysmem(state->index_buffer, context);
+ wined3d_buffer_load_sysmem(state->index_buffer, context);
}
for (i = 0; i < context->numDirtyEntries; ++i)
state_table[rep].apply(context, state, rep);
}
- if (context->shader_update_mask)
+ if (context->shader_update_mask & ~(1u << WINED3D_SHADER_TYPE_COMPUTE))
{
device->shader_backend->shader_select(device->shader_priv, context, state);
- context->shader_update_mask = 0;
+ context->shader_update_mask &= 1u << WINED3D_SHADER_TYPE_COMPUTE;
}
if (context->constant_update_mask)
if (context->update_shader_resource_bindings)
{
- context_bind_shader_resources(context, state);
+ for (i = 0; i < WINED3D_SHADER_TYPE_GRAPHICS_COUNT; ++i)
+ context_bind_shader_resources(context, state, i);
context->update_shader_resource_bindings = 0;
+ if (gl_info->limits.combined_samplers == gl_info->limits.graphics_samplers)
+ context->update_compute_shader_resource_bindings = 1;
+ }
+
+ if (context->update_unordered_access_view_bindings)
+ {
+ context_bind_unordered_access_views(context,
+ state->shader[WINED3D_SHADER_TYPE_PIXEL],
+ state->unordered_access_view[WINED3D_PIPELINE_GRAPHICS]);
+ context->update_unordered_access_view_bindings = 0;
+ context->update_compute_unordered_access_view_bindings = 1;
}
if (wined3d_settings.offscreen_rendering_mode == ORM_FBO)
return TRUE;
}
-static void context_setup_target(struct wined3d_context *context, struct wined3d_surface *target)
+void context_apply_compute_state(struct wined3d_context *context,
+ const struct wined3d_device *device, const struct wined3d_state *state)
+{
+ const struct StateEntry *state_table = context->state_table;
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+ unsigned int state_id, i, j;
+
+ context_load_shader_resources(context, state, 1u << WINED3D_SHADER_TYPE_COMPUTE);
+ context_load_unordered_access_resources(context, state->shader[WINED3D_SHADER_TYPE_COMPUTE],
+ state->unordered_access_view[WINED3D_PIPELINE_COMPUTE]);
+
+ for (i = 0, state_id = STATE_COMPUTE_OFFSET; i < ARRAY_SIZE(context->dirty_compute_states); ++i)
+ {
+ for (j = 0; j < sizeof(*context->dirty_compute_states) * CHAR_BIT; ++j, ++state_id)
+ {
+ if (context->dirty_compute_states[i] & (1u << j))
+ state_table[state_id].apply(context, state, state_id);
+ }
+ }
+ memset(context->dirty_compute_states, 0, sizeof(*context->dirty_compute_states));
+
+ if (context->shader_update_mask & (1u << WINED3D_SHADER_TYPE_COMPUTE))
+ {
+ device->shader_backend->shader_select_compute(device->shader_priv, context, state);
+ context->shader_update_mask &= ~(1u << WINED3D_SHADER_TYPE_COMPUTE);
+ }
+
+ if (context->update_compute_shader_resource_bindings)
+ {
+ context_bind_shader_resources(context, state, WINED3D_SHADER_TYPE_COMPUTE);
+ context->update_compute_shader_resource_bindings = 0;
+ if (gl_info->limits.combined_samplers == gl_info->limits.graphics_samplers)
+ context->update_shader_resource_bindings = 1;
+ }
+
+ if (context->update_compute_unordered_access_view_bindings)
+ {
+ context_bind_unordered_access_views(context,
+ state->shader[WINED3D_SHADER_TYPE_COMPUTE],
+ state->unordered_access_view[WINED3D_PIPELINE_COMPUTE]);
+ context->update_compute_unordered_access_view_bindings = 0;
+ context->update_unordered_access_view_bindings = 1;
+ }
+
+ context->last_was_blit = FALSE;
+}
+
+void context_end_transform_feedback(struct wined3d_context *context)
+{
+ const struct wined3d_gl_info *gl_info = context->gl_info;
+ if (context->transform_feedback_active)
+ {
+ GL_EXTCALL(glEndTransformFeedback());
+ checkGLcall("glEndTransformFeedback");
+ context->transform_feedback_active = 0;
+ context->transform_feedback_paused = 0;
+ }
+}
+
+static void context_setup_target(struct wined3d_context *context,
+ struct wined3d_texture *texture, unsigned int sub_resource_idx)
{
BOOL old_render_offscreen = context->render_offscreen, render_offscreen;
- render_offscreen = wined3d_resource_is_offscreen(&target->container->resource);
- if (context->current_rt == target && render_offscreen == old_render_offscreen) return;
+ render_offscreen = wined3d_resource_is_offscreen(&texture->resource);
+ if (context->current_rt.texture == texture
+ && context->current_rt.sub_resource_idx == sub_resource_idx
+ && render_offscreen == old_render_offscreen)
+ return;
/* To compensate the lack of format switching with some offscreen rendering methods and on onscreen buffers
* the alpha blend state changes with different render target formats. */
- if (!context->current_rt)
+ if (!context->current_rt.texture)
{
context_invalidate_state(context, STATE_RENDER(WINED3D_RS_ALPHABLENDENABLE));
}
else
{
- const struct wined3d_format *old = context->current_rt->resource.format;
- const struct wined3d_format *new = target->resource.format;
+ const struct wined3d_format *old = context->current_rt.texture->resource.format;
+ const struct wined3d_format *new = texture->resource.format;
if (old->id != new->id)
{
/* Disable blending when the alpha mask has changed and when a format doesn't support blending. */
if ((old->alpha_size && !new->alpha_size) || (!old->alpha_size && new->alpha_size)
- || !(target->container->resource.format_flags & WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING))
+ || !(texture->resource.format_flags & WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING))
context_invalidate_state(context, STATE_RENDER(WINED3D_RS_ALPHABLENDENABLE));
/* Update sRGB writing when switching between formats that do/do not support sRGB writing */
- if ((context->current_rt->container->resource.format_flags & WINED3DFMT_FLAG_SRGB_WRITE)
- != (target->container->resource.format_flags & WINED3DFMT_FLAG_SRGB_WRITE))
+ if ((context->current_rt.texture->resource.format_flags & WINED3DFMT_FLAG_SRGB_WRITE)
+ != (texture->resource.format_flags & WINED3DFMT_FLAG_SRGB_WRITE))
context_invalidate_state(context, STATE_RENDER(WINED3D_RS_SRGBWRITEENABLE));
}
* has to be called with the old rendertarget active, otherwise a
* wrong drawable is read. */
if (wined3d_settings.offscreen_rendering_mode != ORM_FBO
- && old_render_offscreen && context->current_rt != target)
+ && old_render_offscreen && (context->current_rt.texture != texture
+ || context->current_rt.sub_resource_idx != sub_resource_idx))
{
- struct wined3d_texture *texture = context->current_rt->container;
+ unsigned int prev_sub_resource_idx = context->current_rt.sub_resource_idx;
+ struct wined3d_texture *prev_texture = context->current_rt.texture;
/* Read the back buffer of the old drawable into the destination texture. */
- if (texture->texture_srgb.name)
- wined3d_texture_load(texture, context, TRUE);
- wined3d_texture_load(texture, context, FALSE);
-#if defined(STAGING_CSMT)
- wined3d_resource_invalidate_location(&context->current_rt->resource, WINED3D_LOCATION_DRAWABLE);
-#else /* STAGING_CSMT */
- surface_invalidate_location(context->current_rt, WINED3D_LOCATION_DRAWABLE);
-#endif /* STAGING_CSMT */
+ if (prev_texture->texture_srgb.name)
+ wined3d_texture_load(prev_texture, context, TRUE);
+ wined3d_texture_load(prev_texture, context, FALSE);
+ wined3d_texture_invalidate_location(prev_texture, prev_sub_resource_idx, WINED3D_LOCATION_DRAWABLE);
}
}
- context->current_rt = target;
+ context->current_rt.texture = texture;
+ context->current_rt.sub_resource_idx = sub_resource_idx;
context_set_render_offscreen(context, render_offscreen);
}
-struct wined3d_context *context_acquire(const struct wined3d_device *device, struct wined3d_surface *target)
+struct wined3d_context *context_acquire(const struct wined3d_device *device,
+ struct wined3d_texture *texture, unsigned int sub_resource_idx)
{
struct wined3d_context *current_context = context_get_current();
struct wined3d_context *context;
+ BOOL swapchain_texture;
- TRACE("device %p, target %p.\n", device, target);
+ TRACE("device %p, texture %p, sub_resource_idx %u.\n", device, texture, sub_resource_idx);
+
+ wined3d_from_cs(device->cs);
if (current_context && current_context->destroyed)
current_context = NULL;
- if (!target)
+ swapchain_texture = texture && texture->swapchain;
+ if (!texture)
{
if (current_context
- && current_context->current_rt
- && current_context->swapchain->device == device)
+ && current_context->current_rt.texture
+ && current_context->device == device)
{
- target = current_context->current_rt;
+ texture = current_context->current_rt.texture;
+ sub_resource_idx = current_context->current_rt.sub_resource_idx;
}
else
{
struct wined3d_swapchain *swapchain = device->swapchains[0];
+
if (swapchain->back_buffers)
- target = surface_from_resource(wined3d_texture_get_sub_resource(swapchain->back_buffers[0], 0));
+ texture = swapchain->back_buffers[0];
else
- target = surface_from_resource(wined3d_texture_get_sub_resource(swapchain->front_buffer, 0));
+ texture = swapchain->front_buffer;
+ sub_resource_idx = 0;
}
}
- if (current_context && current_context->current_rt == target)
+ if (current_context && current_context->current_rt.texture == texture)
{
context = current_context;
}
- else if (target->container->swapchain)
+ else if (swapchain_texture)
{
TRACE("Rendering onscreen.\n");
- context = swapchain_get_context(target->container->swapchain);
+ context = swapchain_get_context(texture->swapchain);
}
else
{
/* Stay with the current context if possible. Otherwise use the
* context for the primary swapchain. */
- if (current_context && current_context->swapchain->device == device)
+ if (current_context && current_context->device == device)
context = current_context;
else
context = swapchain_get_context(device->swapchains[0]);
context_enter(context);
context_update_window(context);
- context_setup_target(context, target);
- if (!context->valid) return context;
+ context_setup_target(context, texture, sub_resource_idx);
+ if (!context->valid)
+ return context;
if (context != current_context)
{
return context;
}
+
+struct wined3d_context *context_reacquire(const struct wined3d_device *device,
+ struct wined3d_context *context)
+{
+ struct wined3d_context *current_context;
+
+ if (!context || context->tid != GetCurrentThreadId())
+ return NULL;
+
+ current_context = context_acquire(device, context->current_rt.texture,
+ context->current_rt.sub_resource_idx);
+ if (current_context != context)
+ ERR("Acquired context %p instead of %p.\n", current_context, context);
+ return current_context;
+}