2c104a82c1df4ff145c0bef28c9db67f9c7250bb
[reactos.git] / reactos / dll / directx / wine / wined3d / directx.c
1 /*
2 * Copyright 2002-2004 Jason Edmeades
3 * Copyright 2003-2004 Raphael Junqueira
4 * Copyright 2004 Christian Costa
5 * Copyright 2005 Oliver Stieber
6 * Copyright 2007-2008 Stefan Dösinger for CodeWeavers
7 * Copyright 2009-2011 Henri Verbeet for CodeWeavers
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 */
23
24 #include "wined3d_private.h"
25
26 WINE_DEFAULT_DEBUG_CHANNEL(d3d);
27 WINE_DECLARE_DEBUG_CHANNEL(d3d_perf);
28
29 #define WINE_DEFAULT_VIDMEM (64 * 1024 * 1024)
30 #define DEFAULT_REFRESH_RATE 0
31
32 /* The driver names reflect the lowest GPU supported
33 * by a certain driver, so DRIVER_AMD_R300 supports
34 * R3xx, R4xx and R5xx GPUs. */
35 enum wined3d_display_driver
36 {
37 DRIVER_AMD_RAGE_128PRO,
38 DRIVER_AMD_R100,
39 DRIVER_AMD_R300,
40 DRIVER_AMD_R600,
41 DRIVER_INTEL_GMA800,
42 DRIVER_INTEL_GMA900,
43 DRIVER_INTEL_GMA950,
44 DRIVER_INTEL_GMA3000,
45 DRIVER_NVIDIA_TNT,
46 DRIVER_NVIDIA_GEFORCE2MX,
47 DRIVER_NVIDIA_GEFORCEFX,
48 DRIVER_NVIDIA_GEFORCE6,
49 DRIVER_UNKNOWN
50 };
51
52 enum wined3d_driver_model
53 {
54 DRIVER_MODEL_WIN9X,
55 DRIVER_MODEL_NT40,
56 DRIVER_MODEL_NT5X,
57 DRIVER_MODEL_NT6X
58 };
59
60 enum wined3d_gl_vendor
61 {
62 GL_VENDOR_UNKNOWN,
63 GL_VENDOR_APPLE,
64 GL_VENDOR_FGLRX,
65 GL_VENDOR_INTEL,
66 GL_VENDOR_MESA,
67 GL_VENDOR_NVIDIA,
68 };
69
70 /* The d3d device ID */
71 static const GUID IID_D3DDEVICE_D3DUID = { 0xaeb2cdd4, 0x6e41, 0x43ea, { 0x94,0x1c,0x83,0x61,0xcc,0x76,0x07,0x81 } };
72
73 /* Extension detection */
74 struct wined3d_extension_map
75 {
76 const char *extension_string;
77 enum wined3d_gl_extension extension;
78 };
79
80 static const struct wined3d_extension_map gl_extension_map[] =
81 {
82 /* APPLE */
83 {"GL_APPLE_client_storage", APPLE_CLIENT_STORAGE },
84 {"GL_APPLE_fence", APPLE_FENCE },
85 {"GL_APPLE_float_pixels", APPLE_FLOAT_PIXELS },
86 {"GL_APPLE_flush_buffer_range", APPLE_FLUSH_BUFFER_RANGE },
87 {"GL_APPLE_ycbcr_422", APPLE_YCBCR_422 },
88
89 /* ARB */
90 {"GL_ARB_color_buffer_float", ARB_COLOR_BUFFER_FLOAT },
91 {"GL_ARB_debug_output", ARB_DEBUG_OUTPUT },
92 {"GL_ARB_depth_buffer_float", ARB_DEPTH_BUFFER_FLOAT },
93 {"GL_ARB_depth_clamp", ARB_DEPTH_CLAMP },
94 {"GL_ARB_depth_texture", ARB_DEPTH_TEXTURE },
95 {"GL_ARB_draw_buffers", ARB_DRAW_BUFFERS },
96 {"GL_ARB_draw_elements_base_vertex", ARB_DRAW_ELEMENTS_BASE_VERTEX },
97 {"GL_ARB_draw_instanced", ARB_DRAW_INSTANCED },
98 {"GL_ARB_fragment_program", ARB_FRAGMENT_PROGRAM },
99 {"GL_ARB_fragment_shader", ARB_FRAGMENT_SHADER },
100 {"GL_ARB_framebuffer_object", ARB_FRAMEBUFFER_OBJECT },
101 {"GL_ARB_framebuffer_sRGB", ARB_FRAMEBUFFER_SRGB },
102 {"GL_ARB_geometry_shader4", ARB_GEOMETRY_SHADER4 },
103 {"GL_ARB_half_float_pixel", ARB_HALF_FLOAT_PIXEL },
104 {"GL_ARB_half_float_vertex", ARB_HALF_FLOAT_VERTEX },
105 {"GL_ARB_instanced_arrays", ARB_INSTANCED_ARRAYS, },
106 {"GL_ARB_internalformat_query2", ARB_INTERNALFORMAT_QUERY2, },
107 {"GL_ARB_map_buffer_alignment", ARB_MAP_BUFFER_ALIGNMENT },
108 {"GL_ARB_map_buffer_range", ARB_MAP_BUFFER_RANGE },
109 {"GL_ARB_multisample", ARB_MULTISAMPLE }, /* needs GLX_ARB_MULTISAMPLE as well */
110 {"GL_ARB_multitexture", ARB_MULTITEXTURE },
111 {"GL_ARB_occlusion_query", ARB_OCCLUSION_QUERY },
112 {"GL_ARB_pixel_buffer_object", ARB_PIXEL_BUFFER_OBJECT },
113 {"GL_ARB_point_parameters", ARB_POINT_PARAMETERS },
114 {"GL_ARB_point_sprite", ARB_POINT_SPRITE },
115 {"GL_ARB_provoking_vertex", ARB_PROVOKING_VERTEX },
116 {"GL_ARB_shader_bit_encoding", ARB_SHADER_BIT_ENCODING },
117 {"GL_ARB_shader_objects", ARB_SHADER_OBJECTS },
118 {"GL_ARB_shader_texture_lod", ARB_SHADER_TEXTURE_LOD },
119 {"GL_ARB_shading_language_100", ARB_SHADING_LANGUAGE_100 },
120 {"GL_ARB_shadow", ARB_SHADOW },
121 {"GL_ARB_sync", ARB_SYNC },
122 {"GL_ARB_texture_border_clamp", ARB_TEXTURE_BORDER_CLAMP },
123 {"GL_ARB_texture_compression", ARB_TEXTURE_COMPRESSION },
124 {"GL_ARB_texture_compression_rgtc", ARB_TEXTURE_COMPRESSION_RGTC },
125 {"GL_ARB_texture_cube_map", ARB_TEXTURE_CUBE_MAP },
126 {"GL_ARB_texture_env_add", ARB_TEXTURE_ENV_ADD },
127 {"GL_ARB_texture_env_combine", ARB_TEXTURE_ENV_COMBINE },
128 {"GL_ARB_texture_env_dot3", ARB_TEXTURE_ENV_DOT3 },
129 {"GL_ARB_texture_float", ARB_TEXTURE_FLOAT },
130 {"GL_ARB_texture_mirrored_repeat", ARB_TEXTURE_MIRRORED_REPEAT },
131 {"GL_ARB_texture_non_power_of_two", ARB_TEXTURE_NON_POWER_OF_TWO },
132 {"GL_ARB_texture_rectangle", ARB_TEXTURE_RECTANGLE },
133 {"GL_ARB_texture_rg", ARB_TEXTURE_RG },
134 {"GL_ARB_vertex_array_bgra", ARB_VERTEX_ARRAY_BGRA },
135 {"GL_ARB_vertex_blend", ARB_VERTEX_BLEND },
136 {"GL_ARB_vertex_buffer_object", ARB_VERTEX_BUFFER_OBJECT },
137 {"GL_ARB_vertex_program", ARB_VERTEX_PROGRAM },
138 {"GL_ARB_vertex_shader", ARB_VERTEX_SHADER },
139
140 /* ATI */
141 {"GL_ATI_fragment_shader", ATI_FRAGMENT_SHADER },
142 {"GL_ATI_separate_stencil", ATI_SEPARATE_STENCIL },
143 {"GL_ATI_texture_compression_3dc", ATI_TEXTURE_COMPRESSION_3DC },
144 {"GL_ATI_texture_env_combine3", ATI_TEXTURE_ENV_COMBINE3 },
145 {"GL_ATI_texture_mirror_once", ATI_TEXTURE_MIRROR_ONCE },
146
147 /* EXT */
148 {"GL_EXT_blend_color", EXT_BLEND_COLOR },
149 {"GL_EXT_blend_equation_separate", EXT_BLEND_EQUATION_SEPARATE },
150 {"GL_EXT_blend_func_separate", EXT_BLEND_FUNC_SEPARATE },
151 {"GL_EXT_blend_minmax", EXT_BLEND_MINMAX },
152 {"GL_EXT_blend_subtract", EXT_BLEND_SUBTRACT },
153 {"GL_EXT_depth_bounds_test", EXT_DEPTH_BOUNDS_TEST },
154 {"GL_EXT_draw_buffers2", EXT_DRAW_BUFFERS2 },
155 {"GL_EXT_fog_coord", EXT_FOG_COORD },
156 {"GL_EXT_framebuffer_blit", EXT_FRAMEBUFFER_BLIT },
157 {"GL_EXT_framebuffer_multisample", EXT_FRAMEBUFFER_MULTISAMPLE },
158 {"GL_EXT_framebuffer_object", EXT_FRAMEBUFFER_OBJECT },
159 {"GL_EXT_gpu_program_parameters", EXT_GPU_PROGRAM_PARAMETERS },
160 {"GL_EXT_gpu_shader4", EXT_GPU_SHADER4 },
161 {"GL_EXT_packed_depth_stencil", EXT_PACKED_DEPTH_STENCIL },
162 {"GL_EXT_point_parameters", EXT_POINT_PARAMETERS },
163 {"GL_EXT_provoking_vertex", EXT_PROVOKING_VERTEX },
164 {"GL_EXT_secondary_color", EXT_SECONDARY_COLOR },
165 {"GL_EXT_stencil_two_side", EXT_STENCIL_TWO_SIDE },
166 {"GL_EXT_stencil_wrap", EXT_STENCIL_WRAP },
167 {"GL_EXT_texture3D", EXT_TEXTURE3D },
168 {"GL_EXT_texture_compression_rgtc", EXT_TEXTURE_COMPRESSION_RGTC },
169 {"GL_EXT_texture_compression_s3tc", EXT_TEXTURE_COMPRESSION_S3TC },
170 {"GL_EXT_texture_env_add", EXT_TEXTURE_ENV_ADD },
171 {"GL_EXT_texture_env_combine", EXT_TEXTURE_ENV_COMBINE },
172 {"GL_EXT_texture_env_dot3", EXT_TEXTURE_ENV_DOT3 },
173 {"GL_EXT_texture_filter_anisotropic", EXT_TEXTURE_FILTER_ANISOTROPIC},
174 {"GL_EXT_texture_lod_bias", EXT_TEXTURE_LOD_BIAS },
175 {"GL_EXT_texture_sRGB", EXT_TEXTURE_SRGB },
176 {"GL_EXT_texture_sRGB_decode", EXT_TEXTURE_SRGB_DECODE },
177 {"GL_EXT_vertex_array_bgra", EXT_VERTEX_ARRAY_BGRA },
178
179 /* NV */
180 {"GL_NV_depth_clamp", NV_DEPTH_CLAMP },
181 {"GL_NV_fence", NV_FENCE },
182 {"GL_NV_fog_distance", NV_FOG_DISTANCE },
183 {"GL_NV_fragment_program", NV_FRAGMENT_PROGRAM },
184 {"GL_NV_fragment_program2", NV_FRAGMENT_PROGRAM2 },
185 {"GL_NV_fragment_program_option", NV_FRAGMENT_PROGRAM_OPTION },
186 {"GL_NV_half_float", NV_HALF_FLOAT },
187 {"GL_NV_light_max_exponent", NV_LIGHT_MAX_EXPONENT },
188 {"GL_NV_point_sprite", NV_POINT_SPRITE },
189 {"GL_NV_register_combiners", NV_REGISTER_COMBINERS },
190 {"GL_NV_register_combiners2", NV_REGISTER_COMBINERS2 },
191 {"GL_NV_texgen_reflection", NV_TEXGEN_REFLECTION },
192 {"GL_NV_texture_env_combine4", NV_TEXTURE_ENV_COMBINE4 },
193 {"GL_NV_texture_shader", NV_TEXTURE_SHADER },
194 {"GL_NV_texture_shader2", NV_TEXTURE_SHADER2 },
195 {"GL_NV_vertex_program", NV_VERTEX_PROGRAM },
196 {"GL_NV_vertex_program1_1", NV_VERTEX_PROGRAM1_1 },
197 {"GL_NV_vertex_program2", NV_VERTEX_PROGRAM2 },
198 {"GL_NV_vertex_program2_option", NV_VERTEX_PROGRAM2_OPTION },
199 {"GL_NV_vertex_program3", NV_VERTEX_PROGRAM3 },
200
201 /* SGI */
202 {"GL_SGIS_generate_mipmap", SGIS_GENERATE_MIPMAP },
203 };
204
205 static const struct wined3d_extension_map wgl_extension_map[] =
206 {
207 {"WGL_ARB_pixel_format", WGL_ARB_PIXEL_FORMAT },
208 {"WGL_EXT_swap_control", WGL_EXT_SWAP_CONTROL },
209 {"WGL_WINE_pixel_format_passthrough", WGL_WINE_PIXEL_FORMAT_PASSTHROUGH},
210 };
211
212 /**********************************************************
213 * Utility functions follow
214 **********************************************************/
215
216 const struct min_lookup minMipLookup[] =
217 {
218 /* NONE POINT LINEAR */
219 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* NONE */
220 {{GL_NEAREST, GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST_MIPMAP_LINEAR}}, /* POINT*/
221 {{GL_LINEAR, GL_LINEAR_MIPMAP_NEAREST, GL_LINEAR_MIPMAP_LINEAR}}, /* LINEAR */
222 };
223
224 const struct min_lookup minMipLookup_noFilter[] =
225 {
226 /* NONE POINT LINEAR */
227 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* NONE */
228 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* POINT */
229 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* LINEAR */
230 };
231
232 const struct min_lookup minMipLookup_noMip[] =
233 {
234 /* NONE POINT LINEAR */
235 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* NONE */
236 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* POINT */
237 {{GL_LINEAR, GL_LINEAR, GL_LINEAR }}, /* LINEAR */
238 };
239
240 const GLenum magLookup[] =
241 {
242 /* NONE POINT LINEAR */
243 GL_NEAREST, GL_NEAREST, GL_LINEAR,
244 };
245
246 const GLenum magLookup_noFilter[] =
247 {
248 /* NONE POINT LINEAR */
249 GL_NEAREST, GL_NEAREST, GL_NEAREST,
250 };
251
252 struct wined3d_fake_gl_ctx
253 {
254 HDC dc;
255 HWND wnd;
256 HGLRC gl_ctx;
257 HDC restore_dc;
258 HGLRC restore_gl_ctx;
259 };
260
261 static void WineD3D_ReleaseFakeGLContext(const struct wined3d_fake_gl_ctx *ctx)
262 {
263 TRACE("Destroying fake GL context.\n");
264
265 if (!wglMakeCurrent(NULL, NULL))
266 ERR("Failed to disable fake GL context.\n");
267
268 if (!wglDeleteContext(ctx->gl_ctx))
269 {
270 DWORD err = GetLastError();
271 ERR("wglDeleteContext(%p) failed, last error %#x.\n", ctx->gl_ctx, err);
272 }
273
274 ReleaseDC(ctx->wnd, ctx->dc);
275 DestroyWindow(ctx->wnd);
276
277 if (ctx->restore_gl_ctx && !wglMakeCurrent(ctx->restore_dc, ctx->restore_gl_ctx))
278 ERR("Failed to restore previous GL context.\n");
279 }
280
281 static void wined3d_create_fake_gl_context_attribs(struct wined3d_fake_gl_ctx *fake_gl_ctx,
282 struct wined3d_gl_info *gl_info, const GLint *ctx_attribs)
283 {
284 HGLRC new_ctx;
285
286 if (!(gl_info->p_wglCreateContextAttribsARB = (void *)wglGetProcAddress("wglCreateContextAttribsARB")))
287 return;
288
289 if (!(new_ctx = gl_info->p_wglCreateContextAttribsARB(fake_gl_ctx->dc, NULL, ctx_attribs)))
290 {
291 ERR("Failed to create a context using wglCreateContextAttribsARB(), last error %#x.\n", GetLastError());
292 gl_info->p_wglCreateContextAttribsARB = NULL;
293 return;
294 }
295
296 if (!wglMakeCurrent(fake_gl_ctx->dc, new_ctx))
297 {
298 ERR("Failed to make new context current, last error %#x.\n", GetLastError());
299 if (!wglDeleteContext(new_ctx))
300 ERR("Failed to delete new context, last error %#x.\n", GetLastError());
301 gl_info->p_wglCreateContextAttribsARB = NULL;
302 return;
303 }
304
305 if (!wglDeleteContext(fake_gl_ctx->gl_ctx))
306 ERR("Failed to delete old context, last error %#x.\n", GetLastError());
307 fake_gl_ctx->gl_ctx = new_ctx;
308 }
309
310 /* Do not call while under the GL lock. */
311 static BOOL WineD3D_CreateFakeGLContext(struct wined3d_fake_gl_ctx *ctx)
312 {
313 PIXELFORMATDESCRIPTOR pfd;
314 int iPixelFormat;
315
316 TRACE("getting context...\n");
317
318 ctx->restore_dc = wglGetCurrentDC();
319 ctx->restore_gl_ctx = wglGetCurrentContext();
320
321 /* We need a fake window as a hdc retrieved using GetDC(0) can't be used for much GL purposes. */
322 ctx->wnd = CreateWindowA(WINED3D_OPENGL_WINDOW_CLASS_NAME, "WineD3D fake window",
323 WS_OVERLAPPEDWINDOW, 10, 10, 10, 10, NULL, NULL, NULL, NULL);
324 if (!ctx->wnd)
325 {
326 ERR("Failed to create a window.\n");
327 goto fail;
328 }
329
330 ctx->dc = GetDC(ctx->wnd);
331 if (!ctx->dc)
332 {
333 ERR("Failed to get a DC.\n");
334 goto fail;
335 }
336
337 /* PixelFormat selection */
338 ZeroMemory(&pfd, sizeof(pfd));
339 pfd.nSize = sizeof(pfd);
340 pfd.nVersion = 1;
341 pfd.dwFlags = PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER | PFD_DRAW_TO_WINDOW; /* PFD_GENERIC_ACCELERATED */
342 pfd.iPixelType = PFD_TYPE_RGBA;
343 pfd.cColorBits = 32;
344 pfd.iLayerType = PFD_MAIN_PLANE;
345
346 if (!(iPixelFormat = ChoosePixelFormat(ctx->dc, &pfd)))
347 {
348 /* If this happens something is very wrong as ChoosePixelFormat barely fails. */
349 ERR("Failed to find a suitable pixel format.\n");
350 goto fail;
351 }
352 DescribePixelFormat(ctx->dc, iPixelFormat, sizeof(pfd), &pfd);
353 SetPixelFormat(ctx->dc, iPixelFormat, &pfd);
354
355 /* Create a GL context. */
356 if (!(ctx->gl_ctx = wglCreateContext(ctx->dc)))
357 {
358 WARN("Failed to create default context for capabilities initialization.\n");
359 goto fail;
360 }
361
362 /* Make it the current GL context. */
363 if (!wglMakeCurrent(ctx->dc, ctx->gl_ctx))
364 {
365 ERR("Failed to make fake GL context current.\n");
366 goto fail;
367 }
368
369 return TRUE;
370
371 fail:
372 if (ctx->gl_ctx) wglDeleteContext(ctx->gl_ctx);
373 ctx->gl_ctx = NULL;
374 if (ctx->dc) ReleaseDC(ctx->wnd, ctx->dc);
375 ctx->dc = NULL;
376 if (ctx->wnd) DestroyWindow(ctx->wnd);
377 ctx->wnd = NULL;
378 if (ctx->restore_gl_ctx && !wglMakeCurrent(ctx->restore_dc, ctx->restore_gl_ctx))
379 ERR("Failed to restore previous GL context.\n");
380
381 return FALSE;
382 }
383
384 /* Adjust the amount of used texture memory */
385 unsigned int adapter_adjust_memory(struct wined3d_adapter *adapter, int amount)
386 {
387 adapter->UsedTextureRam += amount;
388 TRACE("Adjusted adapter memory by %d to %d.\n", amount, adapter->UsedTextureRam);
389 return adapter->UsedTextureRam;
390 }
391
392 static void wined3d_adapter_cleanup(struct wined3d_adapter *adapter)
393 {
394 HeapFree(GetProcessHeap(), 0, adapter->gl_info.formats);
395 HeapFree(GetProcessHeap(), 0, adapter->cfgs);
396 }
397
398 ULONG CDECL wined3d_incref(struct wined3d *wined3d)
399 {
400 ULONG refcount = InterlockedIncrement(&wined3d->ref);
401
402 TRACE("%p increasing refcount to %u.\n", wined3d, refcount);
403
404 return refcount;
405 }
406
407 ULONG CDECL wined3d_decref(struct wined3d *wined3d)
408 {
409 ULONG refcount = InterlockedDecrement(&wined3d->ref);
410
411 TRACE("%p decreasing refcount to %u.\n", wined3d, refcount);
412
413 if (!refcount)
414 {
415 unsigned int i;
416
417 for (i = 0; i < wined3d->adapter_count; ++i)
418 {
419 wined3d_adapter_cleanup(&wined3d->adapters[i]);
420 }
421 HeapFree(GetProcessHeap(), 0, wined3d);
422 }
423
424 return refcount;
425 }
426
427 /* Context activation is done by the caller. */
428 static BOOL test_arb_vs_offset_limit(const struct wined3d_gl_info *gl_info)
429 {
430 GLuint prog;
431 BOOL ret = FALSE;
432 const char *testcode =
433 "!!ARBvp1.0\n"
434 "PARAM C[66] = { program.env[0..65] };\n"
435 "ADDRESS A0;"
436 "PARAM zero = {0.0, 0.0, 0.0, 0.0};\n"
437 "ARL A0.x, zero.x;\n"
438 "MOV result.position, C[A0.x + 65];\n"
439 "END\n";
440
441 while (gl_info->gl_ops.gl.p_glGetError());
442 GL_EXTCALL(glGenProgramsARB(1, &prog));
443 if(!prog) {
444 ERR("Failed to create an ARB offset limit test program\n");
445 }
446 GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, prog));
447 GL_EXTCALL(glProgramStringARB(GL_VERTEX_PROGRAM_ARB, GL_PROGRAM_FORMAT_ASCII_ARB,
448 strlen(testcode), testcode));
449 if (gl_info->gl_ops.gl.p_glGetError())
450 {
451 TRACE("OpenGL implementation does not allow indirect addressing offsets > 63\n");
452 TRACE("error: %s\n", debugstr_a((const char *)gl_info->gl_ops.gl.p_glGetString(GL_PROGRAM_ERROR_STRING_ARB)));
453 ret = TRUE;
454 } else TRACE("OpenGL implementation allows offsets > 63\n");
455
456 GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, 0));
457 GL_EXTCALL(glDeleteProgramsARB(1, &prog));
458 checkGLcall("ARB vp offset limit test cleanup");
459
460 return ret;
461 }
462
463 static BOOL match_amd_r300_to_500(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
464 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
465 {
466 if (card_vendor != HW_VENDOR_AMD) return FALSE;
467 if (device == CARD_AMD_RADEON_9500) return TRUE;
468 if (device == CARD_AMD_RADEON_X700) return TRUE;
469 if (device == CARD_AMD_RADEON_X1600) return TRUE;
470 return FALSE;
471 }
472
473 static BOOL match_geforce5(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
474 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
475 {
476 if (card_vendor == HW_VENDOR_NVIDIA)
477 {
478 if (device == CARD_NVIDIA_GEFORCEFX_5200 ||
479 device == CARD_NVIDIA_GEFORCEFX_5600 ||
480 device == CARD_NVIDIA_GEFORCEFX_5800)
481 {
482 return TRUE;
483 }
484 }
485 return FALSE;
486 }
487
488 static BOOL match_apple(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
489 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
490 {
491 /* MacOS has various specialities in the extensions it advertises. Some have to be loaded from
492 * the opengl 1.2+ core, while other extensions are advertised, but software emulated. So try to
493 * detect the Apple OpenGL implementation to apply some extension fixups afterwards.
494 *
495 * Detecting this isn't really easy. The vendor string doesn't mention Apple. Compile-time checks
496 * aren't sufficient either because a Linux binary may display on a macos X server via remote X11.
497 * So try to detect the GL implementation by looking at certain Apple extensions. Some extensions
498 * like client storage might be supported on other implementations too, but GL_APPLE_flush_render
499 * is specific to the Mac OS X window management, and GL_APPLE_ycbcr_422 is QuickTime specific. So
500 * the chance that other implementations support them is rather small since Win32 QuickTime uses
501 * DirectDraw, not OpenGL.
502 *
503 * This test has been moved into wined3d_guess_gl_vendor()
504 */
505 if (gl_vendor == GL_VENDOR_APPLE)
506 {
507 return TRUE;
508 }
509 return FALSE;
510 }
511
512 /* Context activation is done by the caller. */
513 static void test_pbo_functionality(struct wined3d_gl_info *gl_info)
514 {
515 /* Some OpenGL implementations, namely Apple's Geforce 8 driver, advertises PBOs,
516 * but glTexSubImage from a PBO fails miserably, with the first line repeated over
517 * all the texture. This function detects this bug by its symptom and disables PBOs
518 * if the test fails.
519 *
520 * The test uploads a 4x4 texture via the PBO in the "native" format GL_BGRA,
521 * GL_UNSIGNED_INT_8_8_8_8_REV. This format triggers the bug, and it is what we use
522 * for D3DFMT_A8R8G8B8. Then the texture is read back without any PBO and the data
523 * read back is compared to the original. If they are equal PBOs are assumed to work,
524 * otherwise the PBO extension is disabled. */
525 GLuint texture, pbo;
526 static const unsigned int pattern[] =
527 {
528 0x00000000, 0x000000ff, 0x0000ff00, 0x40ff0000,
529 0x80ffffff, 0x40ffff00, 0x00ff00ff, 0x0000ffff,
530 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x000000ff,
531 0x80ff00ff, 0x0000ffff, 0x00ff00ff, 0x40ff00ff
532 };
533 unsigned int check[sizeof(pattern) / sizeof(pattern[0])];
534
535 /* No PBO -> No point in testing them. */
536 if (!gl_info->supported[ARB_PIXEL_BUFFER_OBJECT]) return;
537
538 while (gl_info->gl_ops.gl.p_glGetError());
539 gl_info->gl_ops.gl.p_glGenTextures(1, &texture);
540 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, texture);
541
542 gl_info->gl_ops.gl.p_glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
543 gl_info->gl_ops.gl.p_glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 4, 4, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, 0);
544 checkGLcall("Specifying the PBO test texture");
545
546 GL_EXTCALL(glGenBuffersARB(1, &pbo));
547 GL_EXTCALL(glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbo));
548 GL_EXTCALL(glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, sizeof(pattern), pattern, GL_STREAM_DRAW_ARB));
549 checkGLcall("Specifying the PBO test pbo");
550
551 gl_info->gl_ops.gl.p_glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 4, 4, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
552 checkGLcall("Loading the PBO test texture");
553
554 GL_EXTCALL(glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0));
555
556 gl_info->gl_ops.gl.p_glFinish(); /* just to be sure */
557
558 memset(check, 0, sizeof(check));
559 gl_info->gl_ops.gl.p_glGetTexImage(GL_TEXTURE_2D, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, check);
560 checkGLcall("Reading back the PBO test texture");
561
562 gl_info->gl_ops.gl.p_glDeleteTextures(1, &texture);
563 GL_EXTCALL(glDeleteBuffersARB(1, &pbo));
564 checkGLcall("PBO test cleanup");
565
566 if (memcmp(check, pattern, sizeof(check)))
567 {
568 WARN_(d3d_perf)("PBO test failed, read back data doesn't match original.\n"
569 "Disabling PBOs. This may result in slower performance.\n");
570 gl_info->supported[ARB_PIXEL_BUFFER_OBJECT] = FALSE;
571 }
572 else
573 {
574 TRACE("PBO test successful.\n");
575 }
576 }
577
578 static BOOL match_apple_intel(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
579 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
580 {
581 return (card_vendor == HW_VENDOR_INTEL) && (gl_vendor == GL_VENDOR_APPLE);
582 }
583
584 static BOOL match_apple_nonr500ati(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
585 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
586 {
587 if (gl_vendor != GL_VENDOR_APPLE) return FALSE;
588 if (card_vendor != HW_VENDOR_AMD) return FALSE;
589 if (device == CARD_AMD_RADEON_X1600) return FALSE;
590 return TRUE;
591 }
592
593 static BOOL match_dx10_capable(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
594 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
595 {
596 /* DX9 cards support 40 single float varyings in hardware, most drivers report 32. ATI misreports
597 * 44 varyings. So assume that if we have more than 44 varyings we have a dx10 card.
598 * This detection is for the gl_ClipPos varying quirk. If a d3d9 card really supports more than 44
599 * varyings and we subtract one in dx9 shaders its not going to hurt us because the dx9 limit is
600 * hardcoded
601 *
602 * dx10 cards usually have 64 varyings */
603 return gl_info->limits.glsl_varyings > 44;
604 }
605
606 static BOOL match_not_dx10_capable(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
607 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
608 {
609 return !match_dx10_capable(gl_info, gl_renderer, gl_vendor, card_vendor, device);
610 }
611
612 /* A GL context is provided by the caller */
613 static BOOL match_allows_spec_alpha(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
614 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
615 {
616 GLenum error;
617 DWORD data[16];
618
619 if (!gl_info->supported[EXT_SECONDARY_COLOR])
620 return FALSE;
621
622 while (gl_info->gl_ops.gl.p_glGetError());
623 GL_EXTCALL(glSecondaryColorPointerEXT)(4, GL_UNSIGNED_BYTE, 4, data);
624 error = gl_info->gl_ops.gl.p_glGetError();
625
626 if (error == GL_NO_ERROR)
627 {
628 TRACE("GL Implementation accepts 4 component specular color pointers\n");
629 return TRUE;
630 }
631 else
632 {
633 TRACE("GL implementation does not accept 4 component specular colors, error %s\n",
634 debug_glerror(error));
635 return FALSE;
636 }
637 }
638
639 /* A GL context is provided by the caller */
640 static BOOL match_broken_nv_clip(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
641 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
642 {
643 GLuint prog;
644 BOOL ret = FALSE;
645 GLint pos;
646 const char *testcode =
647 "!!ARBvp1.0\n"
648 "OPTION NV_vertex_program2;\n"
649 "MOV result.clip[0], 0.0;\n"
650 "MOV result.position, 0.0;\n"
651 "END\n";
652
653 if (!gl_info->supported[NV_VERTEX_PROGRAM2_OPTION]) return FALSE;
654
655 while (gl_info->gl_ops.gl.p_glGetError());
656
657 GL_EXTCALL(glGenProgramsARB(1, &prog));
658 if(!prog)
659 {
660 ERR("Failed to create the NVvp clip test program\n");
661 return FALSE;
662 }
663 GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, prog));
664 GL_EXTCALL(glProgramStringARB(GL_VERTEX_PROGRAM_ARB, GL_PROGRAM_FORMAT_ASCII_ARB,
665 strlen(testcode), testcode));
666 gl_info->gl_ops.gl.p_glGetIntegerv(GL_PROGRAM_ERROR_POSITION_ARB, &pos);
667 if(pos != -1)
668 {
669 WARN("GL_NV_vertex_program2_option result.clip[] test failed\n");
670 TRACE("error: %s\n", debugstr_a((const char *)gl_info->gl_ops.gl.p_glGetString(GL_PROGRAM_ERROR_STRING_ARB)));
671 ret = TRUE;
672 while (gl_info->gl_ops.gl.p_glGetError());
673 }
674 else TRACE("GL_NV_vertex_program2_option result.clip[] test passed\n");
675
676 GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, 0));
677 GL_EXTCALL(glDeleteProgramsARB(1, &prog));
678 checkGLcall("GL_NV_vertex_program2_option result.clip[] test cleanup");
679
680 return ret;
681 }
682
683 /* Context activation is done by the caller. */
684 static BOOL match_fbo_tex_update(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
685 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
686 {
687 char data[4 * 4 * 4];
688 GLuint tex, fbo;
689 GLenum status;
690
691 if (wined3d_settings.offscreen_rendering_mode != ORM_FBO) return FALSE;
692
693 memset(data, 0xcc, sizeof(data));
694
695 gl_info->gl_ops.gl.p_glGenTextures(1, &tex);
696 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, tex);
697 gl_info->gl_ops.gl.p_glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
698 gl_info->gl_ops.gl.p_glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
699 gl_info->gl_ops.gl.p_glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 4, 4, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
700 checkGLcall("glTexImage2D");
701
702 gl_info->fbo_ops.glGenFramebuffers(1, &fbo);
703 gl_info->fbo_ops.glBindFramebuffer(GL_FRAMEBUFFER, fbo);
704 gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
705 checkGLcall("glFramebufferTexture2D");
706
707 status = gl_info->fbo_ops.glCheckFramebufferStatus(GL_FRAMEBUFFER);
708 if (status != GL_FRAMEBUFFER_COMPLETE) ERR("FBO status %#x\n", status);
709 checkGLcall("glCheckFramebufferStatus");
710
711 memset(data, 0x11, sizeof(data));
712 gl_info->gl_ops.gl.p_glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 4, 4, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, data);
713 checkGLcall("glTexSubImage2D");
714
715 gl_info->gl_ops.gl.p_glClearColor(0.996f, 0.729f, 0.745f, 0.792f);
716 gl_info->gl_ops.gl.p_glClear(GL_COLOR_BUFFER_BIT);
717 checkGLcall("glClear");
718
719 gl_info->gl_ops.gl.p_glGetTexImage(GL_TEXTURE_2D, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, data);
720 checkGLcall("glGetTexImage");
721
722 gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
723 gl_info->fbo_ops.glBindFramebuffer(GL_FRAMEBUFFER, 0);
724 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, 0);
725 checkGLcall("glBindTexture");
726
727 gl_info->fbo_ops.glDeleteFramebuffers(1, &fbo);
728 gl_info->gl_ops.gl.p_glDeleteTextures(1, &tex);
729 checkGLcall("glDeleteTextures");
730
731 return *(DWORD *)data == 0x11111111;
732 }
733
734 /* Context activation is done by the caller. */
735 static BOOL match_broken_rgba16(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
736 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
737 {
738 /* GL_RGBA16 uses GL_RGBA8 internally on Geforce 7 and older cards.
739 * This leads to graphical bugs in Half Life 2 and Unreal engine games. */
740 GLuint tex;
741 GLint size;
742
743 gl_info->gl_ops.gl.p_glGenTextures(1, &tex);
744 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, tex);
745 gl_info->gl_ops.gl.p_glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16, 4, 4, 0, GL_RGBA, GL_UNSIGNED_SHORT, NULL);
746 checkGLcall("glTexImage2D");
747
748 gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_RED_SIZE, &size);
749 checkGLcall("glGetTexLevelParameteriv");
750 TRACE("Real color depth is %d\n", size);
751
752 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, 0);
753 checkGLcall("glBindTexture");
754 gl_info->gl_ops.gl.p_glDeleteTextures(1, &tex);
755 checkGLcall("glDeleteTextures");
756
757 return size < 16;
758 }
759
760 static BOOL match_fglrx(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
761 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
762 {
763 return gl_vendor == GL_VENDOR_FGLRX;
764 }
765
766 static BOOL match_r200(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
767 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
768 {
769 if (card_vendor != HW_VENDOR_AMD) return FALSE;
770 if (device == CARD_AMD_RADEON_8500) return TRUE;
771 return FALSE;
772 }
773
774 static BOOL match_broken_arb_fog(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
775 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
776 {
777 DWORD data[4];
778 GLuint tex, fbo;
779 GLenum status;
780 float color[4] = {0.0f, 1.0f, 0.0f, 0.0f};
781 GLuint prog;
782 GLint err_pos;
783 static const char *program_code =
784 "!!ARBfp1.0\n"
785 "OPTION ARB_fog_linear;\n"
786 "MOV result.color, {1.0, 0.0, 0.0, 0.0};\n"
787 "END\n";
788
789 if (wined3d_settings.offscreen_rendering_mode != ORM_FBO)
790 return FALSE;
791 if (!gl_info->supported[ARB_FRAGMENT_PROGRAM])
792 return FALSE;
793
794 gl_info->gl_ops.gl.p_glGenTextures(1, &tex);
795 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, tex);
796 gl_info->gl_ops.gl.p_glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
797 gl_info->gl_ops.gl.p_glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
798 gl_info->gl_ops.gl.p_glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, 4, 1, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
799 checkGLcall("glTexImage2D");
800
801 gl_info->fbo_ops.glGenFramebuffers(1, &fbo);
802 gl_info->fbo_ops.glBindFramebuffer(GL_FRAMEBUFFER, fbo);
803 gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
804 checkGLcall("glFramebufferTexture2D");
805
806 status = gl_info->fbo_ops.glCheckFramebufferStatus(GL_FRAMEBUFFER);
807 if (status != GL_FRAMEBUFFER_COMPLETE) ERR("FBO status %#x\n", status);
808 checkGLcall("glCheckFramebufferStatus");
809
810 gl_info->gl_ops.gl.p_glClearColor(0.0f, 0.0f, 1.0f, 0.0f);
811 gl_info->gl_ops.gl.p_glClear(GL_COLOR_BUFFER_BIT);
812 checkGLcall("glClear");
813 gl_info->gl_ops.gl.p_glViewport(0, 0, 4, 1);
814 checkGLcall("glViewport");
815
816 gl_info->gl_ops.gl.p_glEnable(GL_FOG);
817 gl_info->gl_ops.gl.p_glFogf(GL_FOG_START, 0.5f);
818 gl_info->gl_ops.gl.p_glFogf(GL_FOG_END, 0.5f);
819 gl_info->gl_ops.gl.p_glFogi(GL_FOG_MODE, GL_LINEAR);
820 gl_info->gl_ops.gl.p_glHint(GL_FOG_HINT, GL_NICEST);
821 gl_info->gl_ops.gl.p_glFogfv(GL_FOG_COLOR, color);
822 checkGLcall("fog setup");
823
824 GL_EXTCALL(glGenProgramsARB(1, &prog));
825 GL_EXTCALL(glBindProgramARB(GL_FRAGMENT_PROGRAM_ARB, prog));
826 GL_EXTCALL(glProgramStringARB(GL_FRAGMENT_PROGRAM_ARB, GL_PROGRAM_FORMAT_ASCII_ARB,
827 strlen(program_code), program_code));
828 gl_info->gl_ops.gl.p_glEnable(GL_FRAGMENT_PROGRAM_ARB);
829 checkGLcall("Test fragment program setup");
830
831 gl_info->gl_ops.gl.p_glGetIntegerv(GL_PROGRAM_ERROR_POSITION_ARB, &err_pos);
832 if (err_pos != -1)
833 {
834 const char *error_str;
835 error_str = (const char *)gl_info->gl_ops.gl.p_glGetString(GL_PROGRAM_ERROR_STRING_ARB);
836 FIXME("Fog test program error at position %d: %s\n\n", err_pos, debugstr_a(error_str));
837 }
838
839 gl_info->gl_ops.gl.p_glBegin(GL_TRIANGLE_STRIP);
840 gl_info->gl_ops.gl.p_glVertex3f(-1.0f, -1.0f, 0.0f);
841 gl_info->gl_ops.gl.p_glVertex3f( 1.0f, -1.0f, 1.0f);
842 gl_info->gl_ops.gl.p_glVertex3f(-1.0f, 1.0f, 0.0f);
843 gl_info->gl_ops.gl.p_glVertex3f( 1.0f, 1.0f, 1.0f);
844 gl_info->gl_ops.gl.p_glEnd();
845 checkGLcall("ARBfp fog test draw");
846
847 gl_info->gl_ops.gl.p_glGetTexImage(GL_TEXTURE_2D, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, data);
848 checkGLcall("glGetTexImage");
849 data[0] &= 0x00ffffff;
850 data[1] &= 0x00ffffff;
851 data[2] &= 0x00ffffff;
852 data[3] &= 0x00ffffff;
853
854 gl_info->fbo_ops.glBindFramebuffer(GL_FRAMEBUFFER, 0);
855 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, 0);
856
857 gl_info->fbo_ops.glDeleteFramebuffers(1, &fbo);
858 gl_info->gl_ops.gl.p_glDeleteTextures(1, &tex);
859 gl_info->gl_ops.gl.p_glDisable(GL_FOG);
860 GL_EXTCALL(glBindProgramARB(GL_FRAGMENT_PROGRAM_ARB, 0));
861 gl_info->gl_ops.gl.p_glDisable(GL_FRAGMENT_PROGRAM_ARB);
862 GL_EXTCALL(glDeleteProgramsARB(1, &prog));
863 checkGLcall("ARBfp fog test teardown");
864
865 TRACE("Fog test data: %08x %08x %08x %08x\n", data[0], data[1], data[2], data[3]);
866 return data[0] != 0x00ff0000 || data[3] != 0x0000ff00;
867 }
868
869 static void quirk_apple_glsl_constants(struct wined3d_gl_info *gl_info)
870 {
871 /* MacOS needs uniforms for relative addressing offsets. This can accumulate to quite a few uniforms.
872 * Beyond that the general uniform isn't optimal, so reserve a number of uniforms. 12 vec4's should
873 * allow 48 different offsets or other helper immediate values. */
874 TRACE("Reserving 12 GLSL constants for compiler private use.\n");
875 gl_info->reserved_glsl_constants = max(gl_info->reserved_glsl_constants, 12);
876 }
877
878 static void quirk_amd_dx9(struct wined3d_gl_info *gl_info)
879 {
880 /* MacOS advertises GL_ARB_texture_non_power_of_two on ATI r500 and earlier cards, although
881 * these cards only support GL_ARB_texture_rectangle(D3DPTEXTURECAPS_NONPOW2CONDITIONAL).
882 * If real NP2 textures are used, the driver falls back to software. We could just remove the
883 * extension and use GL_ARB_texture_rectangle instead, but texture_rectangle is inconvenient
884 * due to the non-normalized texture coordinates. Thus set an internal extension flag,
885 * GL_WINE_normalized_texrect, which signals the code that it can use non power of two textures
886 * as per GL_ARB_texture_non_power_of_two, but has to stick to the texture_rectangle limits.
887 *
888 * fglrx doesn't advertise GL_ARB_texture_non_power_of_two, but it advertises opengl 2.0 which
889 * has this extension promoted to core. The extension loading code sets this extension supported
890 * due to that, so this code works on fglrx as well. */
891 if(gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO])
892 {
893 TRACE("GL_ARB_texture_non_power_of_two advertised on R500 or earlier card, removing.\n");
894 gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO] = FALSE;
895 gl_info->supported[WINED3D_GL_NORMALIZED_TEXRECT] = TRUE;
896 }
897 }
898
899 static void quirk_no_np2(struct wined3d_gl_info *gl_info)
900 {
901 /* The nVidia GeForceFX series reports OpenGL 2.0 capabilities with the latest drivers versions, but
902 * doesn't explicitly advertise the ARB_tex_npot extension in the GL extension string.
903 * This usually means that ARB_tex_npot is supported in hardware as long as the application is staying
904 * within the limits enforced by the ARB_texture_rectangle extension. This however is not true for the
905 * FX series, which instantly falls back to a slower software path as soon as ARB_tex_npot is used.
906 * We therefore completely remove ARB_tex_npot from the list of supported extensions.
907 *
908 * Note that wine_normalized_texrect can't be used in this case because internally it uses ARB_tex_npot,
909 * triggering the software fallback. There is not much we can do here apart from disabling the
910 * software-emulated extension and re-enable ARB_tex_rect (which was previously disabled
911 * in wined3d_adapter_init_gl_caps).
912 * This fixup removes performance problems on both the FX 5900 and FX 5700 (e.g. for framebuffer
913 * post-processing effects in the game "Max Payne 2").
914 * The behaviour can be verified through a simple test app attached in bugreport #14724. */
915 TRACE("GL_ARB_texture_non_power_of_two advertised through OpenGL 2.0 on NV FX card, removing.\n");
916 gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO] = FALSE;
917 gl_info->supported[ARB_TEXTURE_RECTANGLE] = TRUE;
918 }
919
920 static void quirk_texcoord_w(struct wined3d_gl_info *gl_info)
921 {
922 /* The Intel GPUs on MacOS set the .w register of texcoords to 0.0 by default, which causes problems
923 * with fixed function fragment processing. Ideally this flag should be detected with a test shader
924 * and OpenGL feedback mode, but some GL implementations (MacOS ATI at least, probably all MacOS ones)
925 * do not like vertex shaders in feedback mode and return an error, even though it should be valid
926 * according to the spec.
927 *
928 * We don't want to enable this on all cards, as it adds an extra instruction per texcoord used. This
929 * makes the shader slower and eats instruction slots which should be available to the d3d app.
930 *
931 * ATI Radeon HD 2xxx cards on MacOS have the issue. Instead of checking for the buggy cards, blacklist
932 * all radeon cards on Macs and whitelist the good ones. That way we're prepared for the future. If
933 * this workaround is activated on cards that do not need it, it won't break things, just affect
934 * performance negatively. */
935 TRACE("Enabling vertex texture coord fixes in vertex shaders.\n");
936 gl_info->quirks |= WINED3D_QUIRK_SET_TEXCOORD_W;
937 }
938
939 static void quirk_clip_varying(struct wined3d_gl_info *gl_info)
940 {
941 gl_info->quirks |= WINED3D_QUIRK_GLSL_CLIP_VARYING;
942 }
943
944 static void quirk_allows_specular_alpha(struct wined3d_gl_info *gl_info)
945 {
946 gl_info->quirks |= WINED3D_QUIRK_ALLOWS_SPECULAR_ALPHA;
947 }
948
949 static void quirk_disable_nvvp_clip(struct wined3d_gl_info *gl_info)
950 {
951 gl_info->quirks |= WINED3D_QUIRK_NV_CLIP_BROKEN;
952 }
953
954 static void quirk_fbo_tex_update(struct wined3d_gl_info *gl_info)
955 {
956 gl_info->quirks |= WINED3D_QUIRK_FBO_TEX_UPDATE;
957 }
958
959 static void quirk_broken_rgba16(struct wined3d_gl_info *gl_info)
960 {
961 gl_info->quirks |= WINED3D_QUIRK_BROKEN_RGBA16;
962 }
963
964 static void quirk_infolog_spam(struct wined3d_gl_info *gl_info)
965 {
966 gl_info->quirks |= WINED3D_QUIRK_INFO_LOG_SPAM;
967 }
968
969 static void quirk_limited_tex_filtering(struct wined3d_gl_info *gl_info)
970 {
971 /* Nvidia GeForce 6xxx and 7xxx support accelerated VTF only on a few
972 selected texture formats. They are apparently the only DX9 class GPUs
973 supporting VTF.
974 Also, DX9-era GPUs are somewhat limited with float textures
975 filtering and blending. */
976 gl_info->quirks |= WINED3D_QUIRK_LIMITED_TEX_FILTERING;
977 }
978
979 static void quirk_r200_constants(struct wined3d_gl_info *gl_info)
980 {
981 /* The Mesa r200 driver (and there is no other driver for this GPU Wine would run on)
982 * loads some fog parameters (start, end, exponent, but not the color) into the
983 * program.
984 *
985 * Apparently the fog hardware is only able to handle linear fog with a range of 0.0;1.0,
986 * and it is the responsibility of the vertex pipeline to handle non-linear fog and
987 * linear fog with start and end other than 0.0 and 1.0. */
988 TRACE("Reserving 1 ARB constant for compiler private use.\n");
989 gl_info->reserved_arb_constants = max(gl_info->reserved_arb_constants, 1);
990 }
991
992 static void quirk_broken_arb_fog(struct wined3d_gl_info *gl_info)
993 {
994 gl_info->quirks |= WINED3D_QUIRK_BROKEN_ARB_FOG;
995 }
996
997 struct driver_quirk
998 {
999 BOOL (*match)(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
1000 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device);
1001 void (*apply)(struct wined3d_gl_info *gl_info);
1002 const char *description;
1003 };
1004
1005 static const struct driver_quirk quirk_table[] =
1006 {
1007 {
1008 match_amd_r300_to_500,
1009 quirk_amd_dx9,
1010 "AMD normalized texrect quirk"
1011 },
1012 {
1013 match_apple,
1014 quirk_apple_glsl_constants,
1015 "Apple GLSL uniform override"
1016 },
1017 {
1018 match_geforce5,
1019 quirk_no_np2,
1020 "Geforce 5 NP2 disable"
1021 },
1022 {
1023 match_apple_intel,
1024 quirk_texcoord_w,
1025 "Init texcoord .w for Apple Intel GPU driver"
1026 },
1027 {
1028 match_apple_nonr500ati,
1029 quirk_texcoord_w,
1030 "Init texcoord .w for Apple ATI >= r600 GPU driver"
1031 },
1032 {
1033 match_dx10_capable,
1034 quirk_clip_varying,
1035 "Reserved varying for gl_ClipPos"
1036 },
1037 {
1038 /* GL_EXT_secondary_color does not allow 4 component secondary colors, but most
1039 * GL implementations accept it. The Mac GL is the only implementation known to
1040 * reject it.
1041 *
1042 * If we can pass 4 component specular colors, do it, because (a) we don't have
1043 * to screw around with the data, and (b) the D3D fixed function vertex pipeline
1044 * passes specular alpha to the pixel shader if any is used. Otherwise the
1045 * specular alpha is used to pass the fog coordinate, which we pass to opengl
1046 * via GL_EXT_fog_coord.
1047 */
1048 match_allows_spec_alpha,
1049 quirk_allows_specular_alpha,
1050 "Allow specular alpha quirk"
1051 },
1052 {
1053 match_broken_nv_clip,
1054 quirk_disable_nvvp_clip,
1055 "Apple NV_vertex_program clip bug quirk"
1056 },
1057 {
1058 match_fbo_tex_update,
1059 quirk_fbo_tex_update,
1060 "FBO rebind for attachment updates"
1061 },
1062 {
1063 match_broken_rgba16,
1064 quirk_broken_rgba16,
1065 "True RGBA16 is not available"
1066 },
1067 {
1068 match_fglrx,
1069 quirk_infolog_spam,
1070 "Not printing GLSL infolog"
1071 },
1072 {
1073 match_not_dx10_capable,
1074 quirk_limited_tex_filtering,
1075 "Texture filtering, blending and VTF support is limited"
1076 },
1077 {
1078 match_r200,
1079 quirk_r200_constants,
1080 "r200 vertex shader constants"
1081 },
1082 {
1083 match_broken_arb_fog,
1084 quirk_broken_arb_fog,
1085 "ARBfp fogstart == fogend workaround"
1086 },
1087 };
1088
1089 /* Certain applications (Steam) complain if we report an outdated driver version. In general,
1090 * reporting a driver version is moot because we are not the Windows driver, and we have different
1091 * bugs, features, etc.
1092 *
1093 * The driver version has the form "x.y.z.w".
1094 *
1095 * "x" is the Windows version the driver is meant for:
1096 * 4 -> 95/98/NT4
1097 * 5 -> 2000
1098 * 6 -> 2000/XP
1099 * 7 -> Vista
1100 * 8 -> Win 7
1101 *
1102 * "y" is the maximum Direct3D version the driver supports.
1103 * y -> d3d version mapping:
1104 * 11 -> d3d6
1105 * 12 -> d3d7
1106 * 13 -> d3d8
1107 * 14 -> d3d9
1108 * 15 -> d3d10
1109 * 16 -> d3d10.1
1110 * 17 -> d3d11
1111 *
1112 * "z" is the subversion number.
1113 *
1114 * "w" is the vendor specific driver build number.
1115 */
1116
1117 struct driver_version_information
1118 {
1119 enum wined3d_display_driver driver;
1120 enum wined3d_driver_model driver_model;
1121 const char *driver_name; /* name of Windows driver */
1122 WORD version; /* version word ('y'), contained in low word of DriverVersion.HighPart */
1123 WORD subversion; /* subversion word ('z'), contained in high word of DriverVersion.LowPart */
1124 WORD build; /* build number ('w'), contained in low word of DriverVersion.LowPart */
1125 };
1126
1127 /* The driver version table contains driver information for different devices on several OS versions. */
1128 static const struct driver_version_information driver_version_table[] =
1129 {
1130 /* AMD
1131 * - Radeon HD2x00 (R600) and up supported by current drivers.
1132 * - Radeon 9500 (R300) - X1*00 (R5xx) supported up to Catalyst 9.3 (Linux) and 10.2 (XP/Vista/Win7)
1133 * - Radeon 7xxx (R100) - 9250 (RV250) supported up to Catalyst 6.11 (XP)
1134 * - Rage 128 supported up to XP, latest official build 6.13.3279 dated October 2001 */
1135 {DRIVER_AMD_RAGE_128PRO, DRIVER_MODEL_NT5X, "ati2dvaa.dll", 13, 3279, 0},
1136 {DRIVER_AMD_R100, DRIVER_MODEL_NT5X, "ati2dvag.dll", 14, 10, 6614},
1137 {DRIVER_AMD_R300, DRIVER_MODEL_NT5X, "ati2dvag.dll", 14, 10, 6764},
1138 {DRIVER_AMD_R600, DRIVER_MODEL_NT5X, "ati2dvag.dll", 14, 10, 8681},
1139 {DRIVER_AMD_R300, DRIVER_MODEL_NT6X, "atiumdag.dll", 14, 10, 741 },
1140 {DRIVER_AMD_R600, DRIVER_MODEL_NT6X, "atiumdag.dll", 14, 10, 741 },
1141
1142 /* Intel
1143 * The drivers are unified but not all versions support all GPUs. At some point the 2k/xp
1144 * drivers used ialmrnt5.dll for GMA800/GMA900 but at some point the file was renamed to
1145 * igxprd32.dll but the GMA800 driver was never updated. */
1146 {DRIVER_INTEL_GMA800, DRIVER_MODEL_NT5X, "ialmrnt5.dll", 14, 10, 3889},
1147 {DRIVER_INTEL_GMA900, DRIVER_MODEL_NT5X, "igxprd32.dll", 14, 10, 4764},
1148 {DRIVER_INTEL_GMA950, DRIVER_MODEL_NT5X, "igxprd32.dll", 14, 10, 4926},
1149 {DRIVER_INTEL_GMA3000, DRIVER_MODEL_NT5X, "igxprd32.dll", 14, 10, 5218},
1150 {DRIVER_INTEL_GMA950, DRIVER_MODEL_NT6X, "igdumd32.dll", 14, 10, 1504},
1151 {DRIVER_INTEL_GMA3000, DRIVER_MODEL_NT6X, "igdumd32.dll", 15, 10, 1666},
1152
1153 /* Nvidia
1154 * - Geforce6 and newer cards are supported by the current driver (197.x) on XP-Win7
1155 * - GeforceFX support is up to 173.x on <= XP
1156 * - Geforce2MX/3/4 up to 96.x on <= XP
1157 * - TNT/Geforce1/2 up to 71.x on <= XP
1158 * All version numbers used below are from the Linux nvidia drivers. */
1159 {DRIVER_NVIDIA_TNT, DRIVER_MODEL_NT5X, "nv4_disp.dll", 14, 10, 7186},
1160 {DRIVER_NVIDIA_GEFORCE2MX, DRIVER_MODEL_NT5X, "nv4_disp.dll", 14, 10, 9371},
1161 {DRIVER_NVIDIA_GEFORCEFX, DRIVER_MODEL_NT5X, "nv4_disp.dll", 14, 11, 7516},
1162 {DRIVER_NVIDIA_GEFORCE6, DRIVER_MODEL_NT5X, "nv4_disp.dll", 15, 12, 6658},
1163 {DRIVER_NVIDIA_GEFORCE6, DRIVER_MODEL_NT6X, "nvd3dum.dll", 15, 12, 6658},
1164 };
1165
1166 struct gpu_description
1167 {
1168 WORD vendor; /* reported PCI card vendor ID */
1169 WORD card; /* reported PCI card device ID */
1170 const char *description; /* Description of the card e.g. NVIDIA RIVA TNT */
1171 enum wined3d_display_driver driver;
1172 unsigned int vidmem;
1173 };
1174
1175 /* The amount of video memory stored in the gpu description table is the minimum amount of video memory
1176 * found on a board containing a specific GPU. */
1177 static const struct gpu_description gpu_description_table[] =
1178 {
1179 /* Nvidia cards */
1180 {HW_VENDOR_NVIDIA, CARD_NVIDIA_RIVA_TNT, "NVIDIA RIVA TNT", DRIVER_NVIDIA_TNT, 16 },
1181 {HW_VENDOR_NVIDIA, CARD_NVIDIA_RIVA_TNT2, "NVIDIA RIVA TNT2/TNT2 Pro", DRIVER_NVIDIA_TNT, 32 },
1182 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE, "NVIDIA GeForce 256", DRIVER_NVIDIA_TNT, 32 },
1183 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE2, "NVIDIA GeForce2 GTS/GeForce2 Pro", DRIVER_NVIDIA_TNT, 32 },
1184 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE2_MX, "NVIDIA GeForce2 MX/MX 400", DRIVER_NVIDIA_GEFORCE2MX,32 },
1185 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE3, "NVIDIA GeForce3", DRIVER_NVIDIA_GEFORCE2MX,64 },
1186 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE4_MX, "NVIDIA GeForce4 MX 460", DRIVER_NVIDIA_GEFORCE2MX,64 },
1187 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE4_TI4200, "NVIDIA GeForce4 Ti 4200", DRIVER_NVIDIA_GEFORCE2MX,64, },
1188 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCEFX_5200, "NVIDIA GeForce FX 5200", DRIVER_NVIDIA_GEFORCEFX, 64 },
1189 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCEFX_5600, "NVIDIA GeForce FX 5600", DRIVER_NVIDIA_GEFORCEFX, 128 },
1190 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCEFX_5800, "NVIDIA GeForce FX 5800", DRIVER_NVIDIA_GEFORCEFX, 256 },
1191 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_6200, "NVIDIA GeForce 6200", DRIVER_NVIDIA_GEFORCE6, 64 },
1192 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_6600GT, "NVIDIA GeForce 6600 GT", DRIVER_NVIDIA_GEFORCE6, 128 },
1193 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_6800, "NVIDIA GeForce 6800", DRIVER_NVIDIA_GEFORCE6, 128 },
1194 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7300, "NVIDIA GeForce Go 7300", DRIVER_NVIDIA_GEFORCE6, 256 },
1195 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7400, "NVIDIA GeForce Go 7400", DRIVER_NVIDIA_GEFORCE6, 256 },
1196 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7600, "NVIDIA GeForce 7600 GT", DRIVER_NVIDIA_GEFORCE6, 256 },
1197 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7800GT, "NVIDIA GeForce 7800 GT", DRIVER_NVIDIA_GEFORCE6, 256 },
1198 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8300GS, "NVIDIA GeForce 8300 GS", DRIVER_NVIDIA_GEFORCE6, 128 },
1199 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8400GS, "NVIDIA GeForce 8400 GS", DRIVER_NVIDIA_GEFORCE6, 128 },
1200 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8600GT, "NVIDIA GeForce 8600 GT", DRIVER_NVIDIA_GEFORCE6, 256 },
1201 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8600MGT, "NVIDIA GeForce 8600M GT", DRIVER_NVIDIA_GEFORCE6, 512 },
1202 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8800GTS, "NVIDIA GeForce 8800 GTS", DRIVER_NVIDIA_GEFORCE6, 320 },
1203 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8800GTX, "NVIDIA GeForce 8800 GTX", DRIVER_NVIDIA_GEFORCE6, 768 },
1204 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9200, "NVIDIA GeForce 9200", DRIVER_NVIDIA_GEFORCE6, 256 },
1205 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9300, "NVIDIA GeForce 9300", DRIVER_NVIDIA_GEFORCE6, 256 },
1206 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9400M, "NVIDIA GeForce 9400M", DRIVER_NVIDIA_GEFORCE6, 256 },
1207 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9400GT, "NVIDIA GeForce 9400 GT", DRIVER_NVIDIA_GEFORCE6, 256 },
1208 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9500GT, "NVIDIA GeForce 9500 GT", DRIVER_NVIDIA_GEFORCE6, 256 },
1209 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9600GT, "NVIDIA GeForce 9600 GT", DRIVER_NVIDIA_GEFORCE6, 384 },
1210 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9800GT, "NVIDIA GeForce 9800 GT", DRIVER_NVIDIA_GEFORCE6, 512 },
1211 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_210, "NVIDIA GeForce 210", DRIVER_NVIDIA_GEFORCE6, 512 },
1212 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT220, "NVIDIA GeForce GT 220", DRIVER_NVIDIA_GEFORCE6, 512 },
1213 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT240, "NVIDIA GeForce GT 240", DRIVER_NVIDIA_GEFORCE6, 512 },
1214 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX260, "NVIDIA GeForce GTX 260", DRIVER_NVIDIA_GEFORCE6, 1024},
1215 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX275, "NVIDIA GeForce GTX 275", DRIVER_NVIDIA_GEFORCE6, 896 },
1216 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX280, "NVIDIA GeForce GTX 280", DRIVER_NVIDIA_GEFORCE6, 1024},
1217 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_315M, "NVIDIA GeForce 315M", DRIVER_NVIDIA_GEFORCE6, 512 },
1218 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_320M, "NVIDIA GeForce 320M", DRIVER_NVIDIA_GEFORCE6, 256},
1219 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_410M, "NVIDIA GeForce 410M", DRIVER_NVIDIA_GEFORCE6, 512},
1220 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT320M, "NVIDIA GeForce GT 320M", DRIVER_NVIDIA_GEFORCE6, 1024},
1221 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT325M, "NVIDIA GeForce GT 325M", DRIVER_NVIDIA_GEFORCE6, 1024},
1222 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT330, "NVIDIA GeForce GT 330", DRIVER_NVIDIA_GEFORCE6, 1024},
1223 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTS350M, "NVIDIA GeForce GTS 350M", DRIVER_NVIDIA_GEFORCE6, 1024},
1224 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT420, "NVIDIA GeForce GT 420", DRIVER_NVIDIA_GEFORCE6, 2048},
1225 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT430, "NVIDIA GeForce GT 430", DRIVER_NVIDIA_GEFORCE6, 1024},
1226 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT440, "NVIDIA GeForce GT 440", DRIVER_NVIDIA_GEFORCE6, 1024},
1227 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTS450, "NVIDIA GeForce GTS 450", DRIVER_NVIDIA_GEFORCE6, 1024},
1228 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX460, "NVIDIA GeForce GTX 460", DRIVER_NVIDIA_GEFORCE6, 768 },
1229 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX460M, "NVIDIA GeForce GTX 460M", DRIVER_NVIDIA_GEFORCE6, 1536},
1230 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX465, "NVIDIA GeForce GTX 465", DRIVER_NVIDIA_GEFORCE6, 1024},
1231 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX470, "NVIDIA GeForce GTX 470", DRIVER_NVIDIA_GEFORCE6, 1280},
1232 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX480, "NVIDIA GeForce GTX 480", DRIVER_NVIDIA_GEFORCE6, 1536},
1233 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT520, "NVIDIA GeForce GT 520", DRIVER_NVIDIA_GEFORCE6, 1024},
1234 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT540M, "NVIDIA GeForce GT 540M", DRIVER_NVIDIA_GEFORCE6, 1024},
1235 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX550, "NVIDIA GeForce GTX 550 Ti", DRIVER_NVIDIA_GEFORCE6, 1024},
1236 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT555M, "NVIDIA GeForce GT 555M", DRIVER_NVIDIA_GEFORCE6, 1024},
1237 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX560TI, "NVIDIA GeForce GTX 560 Ti", DRIVER_NVIDIA_GEFORCE6, 1024},
1238 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX560, "NVIDIA GeForce GTX 560", DRIVER_NVIDIA_GEFORCE6, 1024},
1239 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX570, "NVIDIA GeForce GTX 570", DRIVER_NVIDIA_GEFORCE6, 1280},
1240 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX580, "NVIDIA GeForce GTX 580", DRIVER_NVIDIA_GEFORCE6, 1536},
1241 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT610, "NVIDIA GeForce GT 610", DRIVER_NVIDIA_GEFORCE6, 1024},
1242 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT630, "NVIDIA GeForce GT 630", DRIVER_NVIDIA_GEFORCE6, 1024},
1243 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT630M, "NVIDIA GeForce GT 630M", DRIVER_NVIDIA_GEFORCE6, 1024},
1244 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT640M, "NVIDIA GeForce GT 640M", DRIVER_NVIDIA_GEFORCE6, 1024},
1245 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT650M, "NVIDIA GeForce GT 650M", DRIVER_NVIDIA_GEFORCE6, 2048},
1246 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX650, "NVIDIA GeForce GTX 650", DRIVER_NVIDIA_GEFORCE6, 1024},
1247 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX650TI, "NVIDIA GeForce GTX 650 Ti", DRIVER_NVIDIA_GEFORCE6, 1024},
1248 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX660, "NVIDIA GeForce GTX 660", DRIVER_NVIDIA_GEFORCE6, 2048},
1249 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX660TI, "NVIDIA GeForce GTX 660 Ti", DRIVER_NVIDIA_GEFORCE6, 2048},
1250 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX670, "NVIDIA GeForce GTX 670", DRIVER_NVIDIA_GEFORCE6, 2048},
1251 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX670MX, "NVIDIA GeForce GTX 670MX", DRIVER_NVIDIA_GEFORCE6, 3072},
1252 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX680, "NVIDIA GeForce GTX 680", DRIVER_NVIDIA_GEFORCE6, 2048},
1253 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX770M, "NVIDIA GeForce GTX 770M", DRIVER_NVIDIA_GEFORCE6, 3072},
1254 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX770, "NVIDIA GeForce GTX 770", DRIVER_NVIDIA_GEFORCE6, 2048},
1255
1256 /* AMD cards */
1257 {HW_VENDOR_AMD, CARD_AMD_RAGE_128PRO, "ATI Rage Fury", DRIVER_AMD_RAGE_128PRO, 16 },
1258 {HW_VENDOR_AMD, CARD_AMD_RADEON_7200, "ATI RADEON 7200 SERIES", DRIVER_AMD_R100, 32 },
1259 {HW_VENDOR_AMD, CARD_AMD_RADEON_8500, "ATI RADEON 8500 SERIES", DRIVER_AMD_R100, 64 },
1260 {HW_VENDOR_AMD, CARD_AMD_RADEON_9500, "ATI Radeon 9500", DRIVER_AMD_R300, 64 },
1261 {HW_VENDOR_AMD, CARD_AMD_RADEON_XPRESS_200M, "ATI RADEON XPRESS 200M Series", DRIVER_AMD_R300, 64 },
1262 {HW_VENDOR_AMD, CARD_AMD_RADEON_X700, "ATI Radeon X700 SE", DRIVER_AMD_R300, 128 },
1263 {HW_VENDOR_AMD, CARD_AMD_RADEON_X1600, "ATI Radeon X1600 Series", DRIVER_AMD_R300, 128 },
1264 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD2350, "ATI Mobility Radeon HD 2350", DRIVER_AMD_R600, 256 },
1265 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD2600, "ATI Mobility Radeon HD 2600", DRIVER_AMD_R600, 256 },
1266 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD2900, "ATI Radeon HD 2900 XT", DRIVER_AMD_R600, 512 },
1267 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD3200, "ATI Radeon HD 3200 Graphics", DRIVER_AMD_R600, 128 },
1268 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD4200M, "ATI Mobility Radeon HD 4200", DRIVER_AMD_R600, 256 },
1269 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD4350, "ATI Radeon HD 4350", DRIVER_AMD_R600, 256 },
1270 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD4600, "ATI Radeon HD 4600 Series", DRIVER_AMD_R600, 512 },
1271 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD4700, "ATI Radeon HD 4700 Series", DRIVER_AMD_R600, 512 },
1272 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD4800, "ATI Radeon HD 4800 Series", DRIVER_AMD_R600, 512 },
1273 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD5400, "ATI Radeon HD 5400 Series", DRIVER_AMD_R600, 512 },
1274 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD5600, "ATI Radeon HD 5600 Series", DRIVER_AMD_R600, 512 },
1275 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD5700, "ATI Radeon HD 5700 Series", DRIVER_AMD_R600, 512 },
1276 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD5800, "ATI Radeon HD 5800 Series", DRIVER_AMD_R600, 1024},
1277 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD5900, "ATI Radeon HD 5900 Series", DRIVER_AMD_R600, 1024},
1278 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD6300, "AMD Radeon HD 6300 series Graphics", DRIVER_AMD_R600, 1024},
1279 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD6400, "AMD Radeon HD 6400 Series", DRIVER_AMD_R600, 1024},
1280 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD6410D, "AMD Radeon HD 6410D", DRIVER_AMD_R600, 1024},
1281 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD6550D, "AMD Radeon HD 6550D", DRIVER_AMD_R600, 1024},
1282 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD6600, "AMD Radeon HD 6600 Series", DRIVER_AMD_R600, 1024},
1283 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD6600M, "AMD Radeon HD 6600M Series", DRIVER_AMD_R600, 512 },
1284 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD6700, "AMD Radeon HD 6700 Series", DRIVER_AMD_R600, 1024},
1285 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD6800, "AMD Radeon HD 6800 Series", DRIVER_AMD_R600, 1024},
1286 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD6900, "AMD Radeon HD 6900 Series", DRIVER_AMD_R600, 2048},
1287 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD7700, "AMD Radeon HD 7700 Series", DRIVER_AMD_R600, 1024},
1288 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD7800, "AMD Radeon HD 7800 Series", DRIVER_AMD_R600, 2048},
1289 {HW_VENDOR_AMD, CARD_AMD_RADEON_HD7900, "AMD Radeon HD 7900 Series", DRIVER_AMD_R600, 2048},
1290 /* Intel cards */
1291 {HW_VENDOR_INTEL, CARD_INTEL_830M, "Intel(R) 82830M Graphics Controller", DRIVER_INTEL_GMA800, 32 },
1292 {HW_VENDOR_INTEL, CARD_INTEL_855GM, "Intel(R) 82852/82855 GM/GME Graphics Controller", DRIVER_INTEL_GMA800, 32 },
1293 {HW_VENDOR_INTEL, CARD_INTEL_845G, "Intel(R) 845G", DRIVER_INTEL_GMA800, 32 },
1294 {HW_VENDOR_INTEL, CARD_INTEL_865G, "Intel(R) 82865G Graphics Controller", DRIVER_INTEL_GMA800, 32 },
1295 {HW_VENDOR_INTEL, CARD_INTEL_915G, "Intel(R) 82915G/GV/910GL Express Chipset Family", DRIVER_INTEL_GMA900, 64 },
1296 {HW_VENDOR_INTEL, CARD_INTEL_E7221G, "Intel(R) E7221G", DRIVER_INTEL_GMA900, 64 },
1297 {HW_VENDOR_INTEL, CARD_INTEL_915GM, "Mobile Intel(R) 915GM/GMS,910GML Express Chipset Family", DRIVER_INTEL_GMA900, 64 },
1298 {HW_VENDOR_INTEL, CARD_INTEL_945G, "Intel(R) 945G", DRIVER_INTEL_GMA950, 64 },
1299 {HW_VENDOR_INTEL, CARD_INTEL_945GM, "Mobile Intel(R) 945GM Express Chipset Family", DRIVER_INTEL_GMA950, 64 },
1300 {HW_VENDOR_INTEL, CARD_INTEL_945GME, "Intel(R) 945GME", DRIVER_INTEL_GMA950, 64 },
1301 {HW_VENDOR_INTEL, CARD_INTEL_Q35, "Intel(R) Q35", DRIVER_INTEL_GMA950, 64 },
1302 {HW_VENDOR_INTEL, CARD_INTEL_G33, "Intel(R) G33", DRIVER_INTEL_GMA950, 64 },
1303 {HW_VENDOR_INTEL, CARD_INTEL_Q33, "Intel(R) Q33", DRIVER_INTEL_GMA950, 64 },
1304 {HW_VENDOR_INTEL, CARD_INTEL_PNVG, "Intel(R) IGD", DRIVER_INTEL_GMA950, 64 },
1305 {HW_VENDOR_INTEL, CARD_INTEL_PNVM, "Intel(R) IGD", DRIVER_INTEL_GMA950, 64 },
1306 {HW_VENDOR_INTEL, CARD_INTEL_965Q, "Intel(R) 965Q", DRIVER_INTEL_GMA3000, 128},
1307 {HW_VENDOR_INTEL, CARD_INTEL_965G, "Intel(R) 965G", DRIVER_INTEL_GMA3000, 128},
1308 {HW_VENDOR_INTEL, CARD_INTEL_946GZ, "Intel(R) 946GZ", DRIVER_INTEL_GMA3000, 128},
1309 {HW_VENDOR_INTEL, CARD_INTEL_965GM, "Mobile Intel(R) 965 Express Chipset Family", DRIVER_INTEL_GMA3000, 128},
1310 {HW_VENDOR_INTEL, CARD_INTEL_965GME, "Intel(R) 965GME", DRIVER_INTEL_GMA3000, 128},
1311 {HW_VENDOR_INTEL, CARD_INTEL_GM45, "Mobile Intel(R) GM45 Express Chipset Family", DRIVER_INTEL_GMA3000, 512},
1312 {HW_VENDOR_INTEL, CARD_INTEL_IGD, "Intel(R) Integrated Graphics Device", DRIVER_INTEL_GMA3000, 512},
1313 {HW_VENDOR_INTEL, CARD_INTEL_G45, "Intel(R) G45/G43", DRIVER_INTEL_GMA3000, 512},
1314 {HW_VENDOR_INTEL, CARD_INTEL_Q45, "Intel(R) Q45/Q43", DRIVER_INTEL_GMA3000, 512},
1315 {HW_VENDOR_INTEL, CARD_INTEL_G41, "Intel(R) G41", DRIVER_INTEL_GMA3000, 512},
1316 {HW_VENDOR_INTEL, CARD_INTEL_B43, "Intel(R) B43", DRIVER_INTEL_GMA3000, 512},
1317 {HW_VENDOR_INTEL, CARD_INTEL_ILKD, "Intel(R) Ironlake Desktop", DRIVER_INTEL_GMA3000, 1024},
1318 {HW_VENDOR_INTEL, CARD_INTEL_ILKM, "Intel(R) Ironlake Mobile", DRIVER_INTEL_GMA3000, 1024},
1319 {HW_VENDOR_INTEL, CARD_INTEL_SNBD, "Intel(R) Sandybridge Desktop", DRIVER_INTEL_GMA3000, 1024},
1320 {HW_VENDOR_INTEL, CARD_INTEL_SNBM, "Intel(R) Sandybridge Mobile", DRIVER_INTEL_GMA3000, 1024},
1321 {HW_VENDOR_INTEL, CARD_INTEL_SNBS, "Intel(R) Sandybridge Server", DRIVER_INTEL_GMA3000, 1024},
1322 {HW_VENDOR_INTEL, CARD_INTEL_IVBD, "Intel(R) Ivybridge Desktop", DRIVER_INTEL_GMA3000, 1024},
1323 {HW_VENDOR_INTEL, CARD_INTEL_IVBM, "Intel(R) Ivybridge Mobile", DRIVER_INTEL_GMA3000, 1024},
1324 {HW_VENDOR_INTEL, CARD_INTEL_IVBS, "Intel(R) Ivybridge Server", DRIVER_INTEL_GMA3000, 1024},
1325 };
1326
1327 static const struct driver_version_information *get_driver_version_info(enum wined3d_display_driver driver,
1328 enum wined3d_driver_model driver_model)
1329 {
1330 unsigned int i;
1331
1332 TRACE("Looking up version info for driver=%d driver_model=%d\n", driver, driver_model);
1333 for (i = 0; i < (sizeof(driver_version_table) / sizeof(driver_version_table[0])); i++)
1334 {
1335 const struct driver_version_information *entry = &driver_version_table[i];
1336
1337 if (entry->driver == driver && entry->driver_model == driver_model)
1338 {
1339 TRACE("Found driver \"%s\", version %u, subversion %u, build %u.\n",
1340 entry->driver_name, entry->version, entry->subversion, entry->build);
1341 return entry;
1342 }
1343 }
1344 return NULL;
1345 }
1346
1347 static void init_driver_info(struct wined3d_driver_info *driver_info,
1348 enum wined3d_pci_vendor vendor, enum wined3d_pci_device device)
1349 {
1350 OSVERSIONINFOW os_version;
1351 WORD driver_os_version;
1352 unsigned int i;
1353 enum wined3d_display_driver driver = DRIVER_UNKNOWN;
1354 enum wined3d_driver_model driver_model;
1355 const struct driver_version_information *version_info;
1356
1357 if (wined3d_settings.pci_vendor_id != PCI_VENDOR_NONE)
1358 {
1359 TRACE("Overriding PCI vendor ID with 0x%04x.\n", wined3d_settings.pci_vendor_id);
1360 vendor = wined3d_settings.pci_vendor_id;
1361 }
1362 driver_info->vendor = vendor;
1363
1364 if (wined3d_settings.pci_device_id != PCI_DEVICE_NONE)
1365 {
1366 TRACE("Overriding PCI device ID with 0x%04x.\n", wined3d_settings.pci_device_id);
1367 device = wined3d_settings.pci_device_id;
1368 }
1369 driver_info->device = device;
1370
1371 /* Set a default amount of video memory (64MB). In general this code isn't used unless the user
1372 * overrides the pci ids to a card which is not in our database. */
1373 driver_info->vidmem = WINE_DEFAULT_VIDMEM;
1374
1375 memset(&os_version, 0, sizeof(os_version));
1376 os_version.dwOSVersionInfoSize = sizeof(os_version);
1377 if (!GetVersionExW(&os_version))
1378 {
1379 ERR("Failed to get OS version, reporting 2000/XP.\n");
1380 driver_os_version = 6;
1381 driver_model = DRIVER_MODEL_NT5X;
1382 }
1383 else
1384 {
1385 TRACE("OS version %u.%u.\n", os_version.dwMajorVersion, os_version.dwMinorVersion);
1386 switch (os_version.dwMajorVersion)
1387 {
1388 case 4:
1389 /* If needed we could distinguish between 9x and NT4, but this code won't make
1390 * sense for NT4 since it had no way to obtain this info through DirectDraw 3.0.
1391 */
1392 driver_os_version = 4;
1393 driver_model = DRIVER_MODEL_WIN9X;
1394 break;
1395
1396 case 5:
1397 driver_os_version = 6;
1398 driver_model = DRIVER_MODEL_NT5X;
1399 break;
1400
1401 case 6:
1402 if (os_version.dwMinorVersion == 0)
1403 {
1404 driver_os_version = 7;
1405 driver_model = DRIVER_MODEL_NT6X;
1406 }
1407 else if (os_version.dwMinorVersion == 1)
1408 {
1409 driver_os_version = 8;
1410 driver_model = DRIVER_MODEL_NT6X;
1411 }
1412 else
1413 {
1414 if (os_version.dwMinorVersion > 2)
1415 {
1416 FIXME("Unhandled OS version %u.%u, reporting Win 8.\n",
1417 os_version.dwMajorVersion, os_version.dwMinorVersion);
1418 }
1419 driver_os_version = 9;
1420 driver_model = DRIVER_MODEL_NT6X;
1421 }
1422 break;
1423
1424 default:
1425 FIXME("Unhandled OS version %u.%u, reporting 2000/XP.\n",
1426 os_version.dwMajorVersion, os_version.dwMinorVersion);
1427 driver_os_version = 6;
1428 driver_model = DRIVER_MODEL_NT5X;
1429 break;
1430 }
1431 }
1432
1433 /* When we reach this stage we always have a vendor or device id (it can be a default one).
1434 * This means that unless the ids are overridden, we will always find a GPU description. */
1435 for (i = 0; i < (sizeof(gpu_description_table) / sizeof(gpu_description_table[0])); i++)
1436 {
1437 if (vendor == gpu_description_table[i].vendor && device == gpu_description_table[i].card)
1438 {
1439 TRACE("Found card %04x:%04x in driver DB.\n", vendor, device);
1440
1441 driver_info->description = gpu_description_table[i].description;
1442 driver_info->vidmem = gpu_description_table[i].vidmem * 1024*1024;
1443 driver = gpu_description_table[i].driver;
1444 break;
1445 }
1446 }
1447
1448 if (wined3d_settings.emulated_textureram)
1449 {
1450 TRACE("Overriding amount of video memory with %u bytes.\n", wined3d_settings.emulated_textureram);
1451 driver_info->vidmem = wined3d_settings.emulated_textureram;
1452 }
1453
1454 /* Try to obtain driver version information for the current Windows version. This fails in
1455 * some cases:
1456 * - the gpu is not available on the currently selected OS version:
1457 * - Geforce GTX480 on Win98. When running applications in compatibility mode on Windows,
1458 * version information for the current Windows version is returned instead of faked info.
1459 * We do the same and assume the default Windows version to emulate is WinXP.
1460 *
1461 * - Videocard is a Riva TNT but winver is set to win7 (there are no drivers for this beast)
1462 * For now return the XP driver info. Perhaps later on we should return VESA.
1463 *
1464 * - the gpu is not in our database (can happen when the user overrides the vendor_id / device_id)
1465 * This could be an indication that our database is not up to date, so this should be fixed.
1466 */
1467 version_info = get_driver_version_info(driver, driver_model);
1468 if (version_info)
1469 {
1470 driver_info->name = version_info->driver_name;
1471 driver_info->version_high = MAKEDWORD_VERSION(driver_os_version, version_info->version);
1472 driver_info->version_low = MAKEDWORD_VERSION(version_info->subversion, version_info->build);
1473 }
1474 else
1475 {
1476 version_info = get_driver_version_info(driver, DRIVER_MODEL_NT5X);
1477 if (version_info)
1478 {
1479 driver_info->name = version_info->driver_name;
1480 driver_info->version_high = MAKEDWORD_VERSION(driver_os_version, version_info->version);
1481 driver_info->version_low = MAKEDWORD_VERSION(version_info->subversion, version_info->build);
1482 }
1483 else
1484 {
1485 driver_info->description = "Direct3D HAL";
1486 driver_info->name = "Display";
1487 driver_info->version_high = MAKEDWORD_VERSION(driver_os_version, 15);
1488 driver_info->version_low = MAKEDWORD_VERSION(8, 6); /* Nvidia RIVA TNT, arbitrary */
1489
1490 FIXME("Unable to find a driver/device info for vendor_id=%#x device_id=%#x for driver_model=%d\n",
1491 vendor, device, driver_model);
1492 }
1493 }
1494
1495 TRACE("Reporting (fake) driver version 0x%08x-0x%08x.\n",
1496 driver_info->version_high, driver_info->version_low);
1497 }
1498
1499 /* Context activation is done by the caller. */
1500 static void fixup_extensions(struct wined3d_gl_info *gl_info, const char *gl_renderer,
1501 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
1502 {
1503 unsigned int i;
1504
1505 for (i = 0; i < (sizeof(quirk_table) / sizeof(*quirk_table)); ++i)
1506 {
1507 if (!quirk_table[i].match(gl_info, gl_renderer, gl_vendor, card_vendor, device)) continue;
1508 TRACE("Applying driver quirk \"%s\".\n", quirk_table[i].description);
1509 quirk_table[i].apply(gl_info);
1510 }
1511
1512 /* Find out if PBOs work as they are supposed to. */
1513 test_pbo_functionality(gl_info);
1514 }
1515
1516 static DWORD wined3d_parse_gl_version(const char *gl_version)
1517 {
1518 const char *ptr = gl_version;
1519 int major, minor;
1520
1521 major = atoi(ptr);
1522 if (major <= 0)
1523 ERR("Invalid OpenGL major version %d.\n", major);
1524
1525 while (isdigit(*ptr)) ++ptr;
1526 if (*ptr++ != '.')
1527 ERR("Invalid OpenGL version string %s.\n", debugstr_a(gl_version));
1528
1529 minor = atoi(ptr);
1530
1531 TRACE("Found OpenGL version %d.%d.\n", major, minor);
1532
1533 return MAKEDWORD_VERSION(major, minor);
1534 }
1535
1536 static enum wined3d_gl_vendor wined3d_guess_gl_vendor(const struct wined3d_gl_info *gl_info,
1537 const char *gl_vendor_string, const char *gl_renderer)
1538 {
1539
1540 /* MacOS has various specialities in the extensions it advertises. Some have to be loaded from
1541 * the opengl 1.2+ core, while other extensions are advertised, but software emulated. So try to
1542 * detect the Apple OpenGL implementation to apply some extension fixups afterwards.
1543 *
1544 * Detecting this isn't really easy. The vendor string doesn't mention Apple. Compile-time checks
1545 * aren't sufficient either because a Linux binary may display on a macos X server via remote X11.
1546 * So try to detect the GL implementation by looking at certain Apple extensions. Some extensions
1547 * like client storage might be supported on other implementations too, but GL_APPLE_flush_render
1548 * is specific to the Mac OS X window management, and GL_APPLE_ycbcr_422 is QuickTime specific. So
1549 * the chance that other implementations support them is rather small since Win32 QuickTime uses
1550 * DirectDraw, not OpenGL. */
1551 if (gl_info->supported[APPLE_FENCE]
1552 && gl_info->supported[APPLE_CLIENT_STORAGE]
1553 && gl_info->supported[APPLE_YCBCR_422])
1554 return GL_VENDOR_APPLE;
1555
1556 if (strstr(gl_vendor_string, "NVIDIA"))
1557 return GL_VENDOR_NVIDIA;
1558
1559 if (strstr(gl_vendor_string, "ATI"))
1560 return GL_VENDOR_FGLRX;
1561
1562 if (strstr(gl_vendor_string, "Intel(R)")
1563 /* Intel switched from Intel(R) to Intel® recently, so just match Intel. */
1564 || strstr(gl_renderer, "Intel")
1565 || strstr(gl_vendor_string, "Intel Inc."))
1566 return GL_VENDOR_INTEL;
1567
1568 if (strstr(gl_vendor_string, "Mesa")
1569 || strstr(gl_vendor_string, "X.Org")
1570 || strstr(gl_vendor_string, "Advanced Micro Devices, Inc.")
1571 || strstr(gl_vendor_string, "DRI R300 Project")
1572 || strstr(gl_vendor_string, "Tungsten Graphics, Inc")
1573 || strstr(gl_vendor_string, "VMware, Inc.")
1574 || strstr(gl_renderer, "Mesa")
1575 || strstr(gl_renderer, "Gallium"))
1576 return GL_VENDOR_MESA;
1577
1578 FIXME("Received unrecognized GL_VENDOR %s. Returning GL_VENDOR_UNKNOWN.\n",
1579 debugstr_a(gl_vendor_string));
1580
1581 return GL_VENDOR_UNKNOWN;
1582 }
1583
1584 static enum wined3d_pci_vendor wined3d_guess_card_vendor(const char *gl_vendor_string, const char *gl_renderer)
1585 {
1586 if (strstr(gl_vendor_string, "NVIDIA")
1587 || strstr(gl_vendor_string, "Nouveau")
1588 || strstr(gl_vendor_string, "nouveau"))
1589 return HW_VENDOR_NVIDIA;
1590
1591 if (strstr(gl_vendor_string, "ATI")
1592 || strstr(gl_vendor_string, "Advanced Micro Devices, Inc.")
1593 || strstr(gl_vendor_string, "X.Org R300 Project")
1594 || strstr(gl_renderer, "AMD")
1595 || strstr(gl_renderer, "R100")
1596 || strstr(gl_renderer, "R200")
1597 || strstr(gl_renderer, "R300")
1598 || strstr(gl_renderer, "R600")
1599 || strstr(gl_renderer, "R700"))
1600 return HW_VENDOR_AMD;
1601
1602 if (strstr(gl_vendor_string, "Intel(R)")
1603 /* Intel switched from Intel(R) to Intel® recently, so just match Intel. */
1604 || strstr(gl_renderer, "Intel")
1605 || strstr(gl_renderer, "i915")
1606 || strstr(gl_vendor_string, "Intel Inc."))
1607 return HW_VENDOR_INTEL;
1608
1609 if (strstr(gl_vendor_string, "Mesa")
1610 || strstr(gl_vendor_string, "Brian Paul")
1611 || strstr(gl_vendor_string, "Tungsten Graphics, Inc")
1612 || strstr(gl_vendor_string, "VMware, Inc."))
1613 return HW_VENDOR_SOFTWARE;
1614
1615 FIXME("Received unrecognized GL_VENDOR %s. Returning HW_VENDOR_NVIDIA.\n", debugstr_a(gl_vendor_string));
1616
1617 return HW_VENDOR_NVIDIA;
1618 }
1619
1620 static UINT d3d_level_from_gl_info(const struct wined3d_gl_info *gl_info)
1621 {
1622 UINT level = 0;
1623
1624 if (gl_info->supported[ARB_MULTITEXTURE])
1625 level = 6;
1626 if (gl_info->supported[ARB_TEXTURE_COMPRESSION]
1627 && gl_info->supported[ARB_TEXTURE_CUBE_MAP]
1628 && gl_info->supported[ARB_TEXTURE_ENV_DOT3])
1629 level = 7;
1630 if (level == 7 && gl_info->supported[ARB_MULTISAMPLE]
1631 && gl_info->supported[ARB_TEXTURE_BORDER_CLAMP])
1632 level = 8;
1633 if (level == 8 && gl_info->supported[ARB_FRAGMENT_PROGRAM]
1634 && gl_info->supported[ARB_VERTEX_SHADER])
1635 level = 9;
1636 if (level == 9 && gl_info->supported[EXT_GPU_SHADER4])
1637 level = 10;
1638
1639 return level;
1640 }
1641
1642 static enum wined3d_pci_device select_card_nvidia_binary(const struct wined3d_gl_info *gl_info,
1643 const char *gl_renderer)
1644 {
1645 UINT d3d_level = d3d_level_from_gl_info(gl_info);
1646 unsigned int i;
1647
1648 if (d3d_level >= 10)
1649 {
1650 static const struct
1651 {
1652 const char *renderer;
1653 enum wined3d_pci_device id;
1654 }
1655 cards[] =
1656 {
1657 {"GTX 770M", CARD_NVIDIA_GEFORCE_GTX770M}, /* Geforce 700 - midend high mobile */
1658 {"GTX 770", CARD_NVIDIA_GEFORCE_GTX770}, /* Geforce 700 - highend */
1659 {"GTX 680", CARD_NVIDIA_GEFORCE_GTX680}, /* Geforce 600 - highend */
1660 {"GTX 670MX", CARD_NVIDIA_GEFORCE_GTX670MX}, /* Geforce 600 - highend */
1661 {"GTX 670", CARD_NVIDIA_GEFORCE_GTX670}, /* Geforce 600 - midend high */
1662 {"GTX 660 Ti", CARD_NVIDIA_GEFORCE_GTX660TI}, /* Geforce 600 - midend high */
1663 {"GTX 660", CARD_NVIDIA_GEFORCE_GTX660}, /* Geforce 600 - midend high */
1664 {"GTX 650 Ti", CARD_NVIDIA_GEFORCE_GTX650TI}, /* Geforce 600 - lowend */
1665 {"GTX 650", CARD_NVIDIA_GEFORCE_GTX650}, /* Geforce 600 - lowend */
1666 {"GT 650M", CARD_NVIDIA_GEFORCE_GT650M}, /* Geforce 600 - midend mobile */
1667 {"GT 640M", CARD_NVIDIA_GEFORCE_GT640M}, /* Geforce 600 - midend mobile */
1668 {"GT 630M", CARD_NVIDIA_GEFORCE_GT630M}, /* Geforce 600 - midend mobile */
1669 {"GT 630", CARD_NVIDIA_GEFORCE_GT630}, /* Geforce 600 - lowend */
1670 {"GT 610", CARD_NVIDIA_GEFORCE_GT610}, /* Geforce 600 - lowend */
1671 {"GTX 580", CARD_NVIDIA_GEFORCE_GTX580}, /* Geforce 500 - highend */
1672 {"GTX 570", CARD_NVIDIA_GEFORCE_GTX570}, /* Geforce 500 - midend high */
1673 {"GTX 560 Ti", CARD_NVIDIA_GEFORCE_GTX560TI}, /* Geforce 500 - midend */
1674 {"GTX 560", CARD_NVIDIA_GEFORCE_GTX560}, /* Geforce 500 - midend */
1675 {"GT 555M", CARD_NVIDIA_GEFORCE_GT555M}, /* Geforce 500 - midend mobile */
1676 {"GTX 550 Ti", CARD_NVIDIA_GEFORCE_GTX550}, /* Geforce 500 - midend */
1677 {"GT 540M", CARD_NVIDIA_GEFORCE_GT540M}, /* Geforce 500 - midend mobile */
1678 {"GT 520", CARD_NVIDIA_GEFORCE_GT520}, /* Geforce 500 - lowend */
1679 {"GTX 480", CARD_NVIDIA_GEFORCE_GTX480}, /* Geforce 400 - highend */
1680 {"GTX 470", CARD_NVIDIA_GEFORCE_GTX470}, /* Geforce 400 - midend high */
1681 {"GTX 465", CARD_NVIDIA_GEFORCE_GTX465}, /* Geforce 400 - midend */
1682 {"GTX 460M", CARD_NVIDIA_GEFORCE_GTX460M}, /* Geforce 400 - highend mobile */
1683 {"GTX 460", CARD_NVIDIA_GEFORCE_GTX460}, /* Geforce 400 - midend */
1684 {"GTS 450", CARD_NVIDIA_GEFORCE_GTS450}, /* Geforce 400 - midend low */
1685 {"GT 440", CARD_NVIDIA_GEFORCE_GT440}, /* Geforce 400 - lowend */
1686 {"GT 430", CARD_NVIDIA_GEFORCE_GT430}, /* Geforce 400 - lowend */
1687 {"GT 420", CARD_NVIDIA_GEFORCE_GT420}, /* Geforce 400 - lowend */
1688 {"410M", CARD_NVIDIA_GEFORCE_410M}, /* Geforce 400 - lowend mobile */
1689 {"GT 330", CARD_NVIDIA_GEFORCE_GT330}, /* Geforce 300 - highend */
1690 {"GTS 360M", CARD_NVIDIA_GEFORCE_GTS350M}, /* Geforce 300 - highend mobile */
1691 {"GTS 350M", CARD_NVIDIA_GEFORCE_GTS350M}, /* Geforce 300 - highend mobile */
1692 {"GT 330M", CARD_NVIDIA_GEFORCE_GT325M}, /* Geforce 300 - midend mobile */
1693 {"GT 325M", CARD_NVIDIA_GEFORCE_GT325M}, /* Geforce 300 - midend mobile */
1694 {"GT 320M", CARD_NVIDIA_GEFORCE_GT320M}, /* Geforce 300 - midend mobile */
1695 {"320M", CARD_NVIDIA_GEFORCE_320M}, /* Geforce 300 - midend mobile */
1696 {"315M", CARD_NVIDIA_GEFORCE_315M}, /* Geforce 300 - midend mobile */
1697 {"GTX 295", CARD_NVIDIA_GEFORCE_GTX280}, /* Geforce 200 - highend */
1698 {"GTX 285", CARD_NVIDIA_GEFORCE_GTX280}, /* Geforce 200 - highend */
1699 {"GTX 280", CARD_NVIDIA_GEFORCE_GTX280}, /* Geforce 200 - highend */
1700 {"GTX 275", CARD_NVIDIA_GEFORCE_GTX275}, /* Geforce 200 - midend high */
1701 {"GTX 260", CARD_NVIDIA_GEFORCE_GTX260}, /* Geforce 200 - midend */
1702 {"GT 240", CARD_NVIDIA_GEFORCE_GT240}, /* Geforce 200 - midend */
1703 {"GT 220", CARD_NVIDIA_GEFORCE_GT220}, /* Geforce 200 - lowend */
1704 {"Geforce 310", CARD_NVIDIA_GEFORCE_210}, /* Geforce 200 - lowend */
1705 {"Geforce 305", CARD_NVIDIA_GEFORCE_210}, /* Geforce 200 - lowend */
1706 {"Geforce 210", CARD_NVIDIA_GEFORCE_210}, /* Geforce 200 - lowend */
1707 {"G 210", CARD_NVIDIA_GEFORCE_210}, /* Geforce 200 - lowend */
1708 {"GTS 250", CARD_NVIDIA_GEFORCE_9800GT}, /* Geforce 9 - highend / Geforce 200 - midend */
1709 {"GTS 150", CARD_NVIDIA_GEFORCE_9800GT}, /* Geforce 9 - highend / Geforce 200 - midend */
1710 {"9800", CARD_NVIDIA_GEFORCE_9800GT}, /* Geforce 9 - highend / Geforce 200 - midend */
1711 {"GT 140", CARD_NVIDIA_GEFORCE_9600GT}, /* Geforce 9 - midend */
1712 {"9600", CARD_NVIDIA_GEFORCE_9600GT}, /* Geforce 9 - midend */
1713 {"GT 130", CARD_NVIDIA_GEFORCE_9500GT}, /* Geforce 9 - midend low / Geforce 200 - low */
1714 {"GT 120", CARD_NVIDIA_GEFORCE_9500GT}, /* Geforce 9 - midend low / Geforce 200 - low */
1715 {"9500", CARD_NVIDIA_GEFORCE_9500GT}, /* Geforce 9 - midend low / Geforce 200 - low */
1716 {"9400M", CARD_NVIDIA_GEFORCE_9400M}, /* Geforce 9 - lowend */
1717 {"9400", CARD_NVIDIA_GEFORCE_9400GT}, /* Geforce 9 - lowend */
1718 {"9300", CARD_NVIDIA_GEFORCE_9300}, /* Geforce 9 - lowend low */
1719 {"9200", CARD_NVIDIA_GEFORCE_9200}, /* Geforce 9 - lowend low */
1720 {"9100", CARD_NVIDIA_GEFORCE_9200}, /* Geforce 9 - lowend low */
1721 {"G 100", CARD_NVIDIA_GEFORCE_9200}, /* Geforce 9 - lowend low */
1722 {"8800 GTX", CARD_NVIDIA_GEFORCE_8800GTX}, /* Geforce 8 - highend high */
1723 {"8800", CARD_NVIDIA_GEFORCE_8800GTS}, /* Geforce 8 - highend */
1724 {"8600M", CARD_NVIDIA_GEFORCE_8600MGT}, /* Geforce 8 - midend mobile */
1725 {"8600 M", CARD_NVIDIA_GEFORCE_8600MGT}, /* Geforce 8 - midend mobile */
1726 {"8700", CARD_NVIDIA_GEFORCE_8600GT}, /* Geforce 8 - midend */
1727 {"8600", CARD_NVIDIA_GEFORCE_8600GT}, /* Geforce 8 - midend */
1728 {"8500", CARD_NVIDIA_GEFORCE_8400GS}, /* Geforce 8 - mid-lowend */
1729 {"8400", CARD_NVIDIA_GEFORCE_8400GS}, /* Geforce 8 - mid-lowend */
1730 {"8300", CARD_NVIDIA_GEFORCE_8300GS}, /* Geforce 8 - lowend */
1731 {"8200", CARD_NVIDIA_GEFORCE_8300GS}, /* Geforce 8 - lowend */
1732 {"8100", CARD_NVIDIA_GEFORCE_8300GS}, /* Geforce 8 - lowend */
1733 };
1734
1735 for (i = 0; i < sizeof(cards) / sizeof(*cards); ++i)
1736 {
1737 if (strstr(gl_renderer, cards[i].renderer))
1738 return cards[i].id;
1739 }
1740 return PCI_DEVICE_NONE;
1741 }
1742
1743 /* Both the GeforceFX, 6xxx and 7xxx series support D3D9. The last two types have more
1744 * shader capabilities, so we use the shader capabilities to distinguish between FX and 6xxx/7xxx.
1745 */
1746 if (d3d_level >= 9 && gl_info->supported[NV_VERTEX_PROGRAM3])
1747 {
1748 static const struct
1749 {
1750 const char *renderer;
1751 enum wined3d_pci_device id;
1752 }
1753 cards[] =
1754 {
1755 {"Quadro FX 5", CARD_NVIDIA_GEFORCE_7800GT}, /* Geforce 7 - highend */
1756 {"Quadro FX 4", CARD_NVIDIA_GEFORCE_7800GT}, /* Geforce 7 - highend */
1757 {"7950", CARD_NVIDIA_GEFORCE_7800GT}, /* Geforce 7 - highend */
1758 {"7900", CARD_NVIDIA_GEFORCE_7800GT}, /* Geforce 7 - highend */
1759 {"7800", CARD_NVIDIA_GEFORCE_7800GT}, /* Geforce 7 - highend */
1760 {"7700", CARD_NVIDIA_GEFORCE_7600}, /* Geforce 7 - midend */
1761 {"7600", CARD_NVIDIA_GEFORCE_7600}, /* Geforce 7 - midend */
1762 {"7400", CARD_NVIDIA_GEFORCE_7400}, /* Geforce 7 - lower medium */
1763 {"7300", CARD_NVIDIA_GEFORCE_7300}, /* Geforce 7 - lowend */
1764 {"6800", CARD_NVIDIA_GEFORCE_6800}, /* Geforce 6 - highend */
1765 {"6700", CARD_NVIDIA_GEFORCE_6600GT}, /* Geforce 6 - midend */
1766 {"6610", CARD_NVIDIA_GEFORCE_6600GT}, /* Geforce 6 - midend */
1767 {"6600", CARD_NVIDIA_GEFORCE_6600GT}, /* Geforce 6 - midend */
1768 };
1769
1770 for (i = 0; i < sizeof(cards) / sizeof(*cards); ++i)
1771 {
1772 if (strstr(gl_renderer, cards[i].renderer))
1773 return cards[i].id;
1774 }
1775 return PCI_DEVICE_NONE;
1776 }
1777
1778 if (d3d_level >= 9)
1779 {
1780 /* GeforceFX - highend */
1781 if (strstr(gl_renderer, "5800")
1782 || strstr(gl_renderer, "5900")
1783 || strstr(gl_renderer, "5950")
1784 || strstr(gl_renderer, "Quadro FX"))
1785 {
1786 return CARD_NVIDIA_GEFORCEFX_5800;
1787 }
1788
1789 /* GeforceFX - midend */
1790 if (strstr(gl_renderer, "5600")
1791 || strstr(gl_renderer, "5650")
1792 || strstr(gl_renderer, "5700")
1793 || strstr(gl_renderer, "5750"))
1794 {
1795 return CARD_NVIDIA_GEFORCEFX_5600;
1796 }
1797
1798 /* GeforceFX - lowend */
1799 return CARD_NVIDIA_GEFORCEFX_5200; /* GeforceFX 5100/5200/5250/5300/5500 */
1800 }
1801
1802 if (d3d_level >= 8)
1803 {
1804 if (strstr(gl_renderer, "GeForce4 Ti") || strstr(gl_renderer, "Quadro4"))
1805 {
1806 return CARD_NVIDIA_GEFORCE4_TI4200; /* Geforce4 Ti4200/Ti4400/Ti4600/Ti4800, Quadro4 */
1807 }
1808
1809 return CARD_NVIDIA_GEFORCE3; /* Geforce3 standard/Ti200/Ti500, Quadro DCC */
1810 }
1811
1812 if (d3d_level >= 7)
1813 {
1814 if (strstr(gl_renderer, "GeForce4 MX"))
1815 {
1816 return CARD_NVIDIA_GEFORCE4_MX; /* MX420/MX440/MX460/MX4000 */
1817 }
1818
1819 if (strstr(gl_renderer, "GeForce2 MX") || strstr(gl_renderer, "Quadro2 MXR"))
1820 {
1821 return CARD_NVIDIA_GEFORCE2_MX; /* Geforce2 standard/MX100/MX200/MX400, Quadro2 MXR */
1822 }
1823
1824 if (strstr(gl_renderer, "GeForce2") || strstr(gl_renderer, "Quadro2"))
1825 {
1826 return CARD_NVIDIA_GEFORCE2; /* Geforce2 GTS/Pro/Ti/Ultra, Quadro2 */
1827 }
1828
1829 return CARD_NVIDIA_GEFORCE; /* Geforce 256/DDR, Quadro */
1830 }
1831
1832 if (strstr(gl_renderer, "TNT2"))
1833 {
1834 return CARD_NVIDIA_RIVA_TNT2; /* Riva TNT2 standard/M64/Pro/Ultra */
1835 }
1836
1837 return CARD_NVIDIA_RIVA_TNT; /* Riva TNT, Vanta */
1838 }
1839
1840 static enum wined3d_pci_device select_card_amd_binary(const struct wined3d_gl_info *gl_info,
1841 const char *gl_renderer)
1842 {
1843 UINT d3d_level = d3d_level_from_gl_info(gl_info);
1844
1845 /* See http://developer.amd.com/drivers/pc_vendor_id/Pages/default.aspx
1846 *
1847 * Beware: renderer string do not match exact card model,
1848 * eg HD 4800 is returned for multiple cards, even for RV790 based ones. */
1849 if (d3d_level >= 10)
1850 {
1851 unsigned int i;
1852
1853 static const struct
1854 {
1855 const char *renderer;
1856 enum wined3d_pci_device id;
1857 }
1858 cards[] =
1859 {
1860 /* Southern Islands */
1861 {"HD 7900", CARD_AMD_RADEON_HD7900},
1862 {"HD 7800", CARD_AMD_RADEON_HD7800},
1863 {"HD 7700", CARD_AMD_RADEON_HD7700},
1864 /* Northern Islands */
1865 {"HD 6970", CARD_AMD_RADEON_HD6900},
1866 {"HD 6900", CARD_AMD_RADEON_HD6900},
1867 {"HD 6800", CARD_AMD_RADEON_HD6800},
1868 {"HD 6770M",CARD_AMD_RADEON_HD6600M},
1869 {"HD 6750M",CARD_AMD_RADEON_HD6600M},
1870 {"HD 6700", CARD_AMD_RADEON_HD6700},
1871 {"HD 6670", CARD_AMD_RADEON_HD6600},
1872 {"HD 6630M",CARD_AMD_RADEON_HD6600M},
1873 {"HD 6600M",CARD_AMD_RADEON_HD6600M},
1874 {"HD 6600", CARD_AMD_RADEON_HD6600},
1875 {"HD 6570", CARD_AMD_RADEON_HD6600},
1876 {"HD 6500M",CARD_AMD_RADEON_HD6600M},
1877 {"HD 6500", CARD_AMD_RADEON_HD6600},
1878 {"HD 6400", CARD_AMD_RADEON_HD6400},
1879 {"HD 6300", CARD_AMD_RADEON_HD6300},
1880 {"HD 6200", CARD_AMD_RADEON_HD6300},
1881 /* Evergreen */
1882 {"HD 5870", CARD_AMD_RADEON_HD5800}, /* Radeon EG CYPRESS PRO */
1883 {"HD 5850", CARD_AMD_RADEON_HD5800}, /* Radeon EG CYPRESS XT */
1884 {"HD 5800", CARD_AMD_RADEON_HD5800}, /* Radeon EG CYPRESS HD58xx generic renderer string */
1885 {"HD 5770", CARD_AMD_RADEON_HD5700}, /* Radeon EG JUNIPER XT */
1886 {"HD 5750", CARD_AMD_RADEON_HD5700}, /* Radeon EG JUNIPER LE */
1887 {"HD 5700", CARD_AMD_RADEON_HD5700}, /* Radeon EG JUNIPER HD57xx generic renderer string */
1888 {"HD 5670", CARD_AMD_RADEON_HD5600}, /* Radeon EG REDWOOD XT */
1889 {"HD 5570", CARD_AMD_RADEON_HD5600}, /* Radeon EG REDWOOD PRO mapped to HD5600 series */
1890 {"HD 5550", CARD_AMD_RADEON_HD5600}, /* Radeon EG REDWOOD LE mapped to HD5600 series */
1891 {"HD 5450", CARD_AMD_RADEON_HD5400}, /* Radeon EG CEDAR PRO */
1892 {"HD 5000", CARD_AMD_RADEON_HD5600}, /* Defaulting to HD 5600 */
1893 /* R700 */
1894 {"HD 4890", CARD_AMD_RADEON_HD4800}, /* Radeon RV790 */
1895 {"HD 4870", CARD_AMD_RADEON_HD4800}, /* Radeon RV770 */
1896 {"HD 4850", CARD_AMD_RADEON_HD4800}, /* Radeon RV770 */
1897 {"HD 4830", CARD_AMD_RADEON_HD4800}, /* Radeon RV770 */
1898 {"HD 4800", CARD_AMD_RADEON_HD4800}, /* Radeon RV7xx HD48xx generic renderer string */
1899 {"HD 4770", CARD_AMD_RADEON_HD4700}, /* Radeon RV740 */
1900 {"HD 4700", CARD_AMD_RADEON_HD4700}, /* Radeon RV7xx HD47xx generic renderer string */
1901 {"HD 4670", CARD_AMD_RADEON_HD4600}, /* Radeon RV730 */
1902 {"HD 4650", CARD_AMD_RADEON_HD4600}, /* Radeon RV730 */
1903 {"HD 4600", CARD_AMD_RADEON_HD4600}, /* Radeon RV730 */
1904 {"HD 4550", CARD_AMD_RADEON_HD4350}, /* Radeon RV710 */
1905 {"HD 4350", CARD_AMD_RADEON_HD4350}, /* Radeon RV710 */
1906 /* R600/R700 integrated */
1907 {"HD 4200M", CARD_AMD_RADEON_HD4200M},
1908 {"HD 3300", CARD_AMD_RADEON_HD3200},
1909 {"HD 3200", CARD_AMD_RADEON_HD3200},
1910 {"HD 3100", CARD_AMD_RADEON_HD3200},
1911 /* R600 */
1912 {"HD 3870", CARD_AMD_RADEON_HD2900}, /* HD2900/HD3800 - highend */
1913 {"HD 3850", CARD_AMD_RADEON_HD2900}, /* HD2900/HD3800 - highend */
1914 {"HD 2900", CARD_AMD_RADEON_HD2900}, /* HD2900/HD3800 - highend */
1915 {"HD 3830", CARD_AMD_RADEON_HD2600}, /* China-only midend */
1916 {"HD 3690", CARD_AMD_RADEON_HD2600}, /* HD2600/HD3600 - midend */
1917 {"HD 3650", CARD_AMD_RADEON_HD2600}, /* HD2600/HD3600 - midend */
1918 {"HD 2600", CARD_AMD_RADEON_HD2600}, /* HD2600/HD3600 - midend */
1919 {"HD 3470", CARD_AMD_RADEON_HD2350}, /* HD2350/HD2400/HD3400 - lowend */
1920 {"HD 3450", CARD_AMD_RADEON_HD2350}, /* HD2350/HD2400/HD3400 - lowend */
1921 {"HD 3430", CARD_AMD_RADEON_HD2350}, /* HD2350/HD2400/HD3400 - lowend */
1922 {"HD 3400", CARD_AMD_RADEON_HD2350}, /* HD2350/HD2400/HD3400 - lowend */
1923 {"HD 2400", CARD_AMD_RADEON_HD2350}, /* HD2350/HD2400/HD3400 - lowend */
1924 {"HD 2350", CARD_AMD_RADEON_HD2350}, /* HD2350/HD2400/HD3400 - lowend */
1925 };
1926
1927 for (i = 0; i < sizeof(cards) / sizeof(*cards); ++i)
1928 {
1929 if (strstr(gl_renderer, cards[i].renderer))
1930 return cards[i].id;
1931 }
1932 return PCI_DEVICE_NONE;
1933 }
1934
1935 if (d3d_level >= 9)
1936 {
1937 /* Radeon R5xx */
1938 if (strstr(gl_renderer, "X1600")
1939 || strstr(gl_renderer, "X1650")
1940 || strstr(gl_renderer, "X1800")
1941 || strstr(gl_renderer, "X1900")
1942 || strstr(gl_renderer, "X1950"))
1943 {
1944 return CARD_AMD_RADEON_X1600;
1945 }
1946
1947 /* Radeon R4xx + X1300/X1400/X1450/X1550/X2300/X2500/HD2300 (lowend R5xx)
1948 * Note X2300/X2500/HD2300 are R5xx GPUs with a 2xxx naming but they are still DX9-only */
1949 if (strstr(gl_renderer, "X700")
1950 || strstr(gl_renderer, "X800")
1951 || strstr(gl_renderer, "X850")
1952 || strstr(gl_renderer, "X1300")
1953 || strstr(gl_renderer, "X1400")
1954 || strstr(gl_renderer, "X1450")
1955 || strstr(gl_renderer, "X1550")
1956 || strstr(gl_renderer, "X2300")
1957 || strstr(gl_renderer, "X2500")
1958 || strstr(gl_renderer, "HD 2300")
1959 )
1960 {
1961 return CARD_AMD_RADEON_X700;
1962 }
1963
1964 /* Radeon Xpress Series - onboard, DX9b, Shader 2.0, 300-400MHz */
1965 if (strstr(gl_renderer, "Radeon Xpress"))
1966 {
1967 return CARD_AMD_RADEON_XPRESS_200M;
1968 }
1969 }
1970 return PCI_DEVICE_NONE;
1971 }
1972
1973 static enum wined3d_pci_device select_card_intel(const struct wined3d_gl_info *gl_info,
1974 const char *gl_renderer)
1975 {
1976 unsigned int i;
1977
1978 static const struct
1979 {
1980 const char *renderer;
1981 enum wined3d_pci_device id;
1982 }
1983 cards[] =
1984 {
1985 /* Ivybridge */
1986 {"Ivybridge Server", CARD_INTEL_IVBS},
1987 {"Ivybridge Mobile", CARD_INTEL_IVBM},
1988 {"Ivybridge Desktop", CARD_INTEL_IVBD},
1989 /* Sandybridge */
1990 {"Sandybridge Server", CARD_INTEL_SNBS},
1991 {"Sandybridge Mobile", CARD_INTEL_SNBM},
1992 {"Sandybridge Desktop", CARD_INTEL_SNBD},
1993 /* Ironlake */
1994 {"Ironlake Mobile", CARD_INTEL_ILKM},
1995 {"Ironlake Desktop", CARD_INTEL_ILKD},
1996 /* G4x */
1997 {"B43", CARD_INTEL_B43},
1998 {"G41", CARD_INTEL_G41},
1999 {"G45", CARD_INTEL_G45},
2000 {"Q45", CARD_INTEL_Q45},
2001 {"Integrated Graphics Device", CARD_INTEL_IGD},
2002 {"GM45", CARD_INTEL_GM45},
2003 /* i965 */
2004 {"965GME", CARD_INTEL_965GME},
2005 {"965GM", CARD_INTEL_965GM},
2006 {"X3100", CARD_INTEL_965GM}, /* MacOS */
2007 {"946GZ", CARD_INTEL_946GZ},
2008 {"965G", CARD_INTEL_965G},
2009 {"965Q", CARD_INTEL_965Q},
2010 /* i945 */
2011 {"Pineview M", CARD_INTEL_PNVM},
2012 {"Pineview G", CARD_INTEL_PNVG},
2013 {"IGD", CARD_INTEL_PNVG},
2014 {"Q33", CARD_INTEL_Q33},
2015 {"G33", CARD_INTEL_G33},
2016 {"Q35", CARD_INTEL_Q35},
2017 {"945GME", CARD_INTEL_945GME},
2018 {"945GM", CARD_INTEL_945GM},
2019 {"GMA 950", CARD_INTEL_945GM}, /* MacOS */
2020 {"945G", CARD_INTEL_945G},
2021 /* i915 */
2022 {"915GM", CARD_INTEL_915GM},
2023 {"E7221G", CARD_INTEL_E7221G},
2024 {"915G", CARD_INTEL_915G},
2025 /* i8xx */
2026 {"865G", CARD_INTEL_865G},
2027 {"845G", CARD_INTEL_845G},
2028 {"855GM", CARD_INTEL_855GM},
2029 {"830M", CARD_INTEL_830M},
2030 };
2031
2032 for (i = 0; i < sizeof(cards) / sizeof(*cards); ++i)
2033 {
2034 if (strstr(gl_renderer, cards[i].renderer))
2035 return cards[i].id;
2036 }
2037
2038 return PCI_DEVICE_NONE;
2039 }
2040
2041 static enum wined3d_pci_device select_card_amd_mesa(const struct wined3d_gl_info *gl_info,
2042 const char *gl_renderer)
2043 {
2044 unsigned int i;
2045
2046 /* 20101109 - These are never returned by current Gallium radeon
2047 * drivers: R700, RV790, R680, RV535, RV516, R410, RS485, RV360, RV351.
2048 *
2049 * These are returned but not handled: RC410, RV380. */
2050 static const struct
2051 {
2052 const char *renderer;
2053 enum wined3d_pci_device id;
2054 }
2055 cards[] =
2056 {
2057 /* Southern Islands */
2058 {"TAHITI", CARD_AMD_RADEON_HD7900},
2059 {"PITCAIRN", CARD_AMD_RADEON_HD7800},
2060 {"CAPE VERDE", CARD_AMD_RADEON_HD7700},
2061 /* Northern Islands */
2062 {"CAYMAN", CARD_AMD_RADEON_HD6900},
2063 {"BARTS", CARD_AMD_RADEON_HD6800},
2064 {"TURKS", CARD_AMD_RADEON_HD6600},
2065 {"SUMO2", CARD_AMD_RADEON_HD6410D}, /* SUMO2 first, because we do a strstr(). */
2066 {"SUMO", CARD_AMD_RADEON_HD6550D},
2067 {"CAICOS", CARD_AMD_RADEON_HD6400},
2068 {"PALM", CARD_AMD_RADEON_HD6300},
2069 /* Evergreen */
2070 {"HEMLOCK", CARD_AMD_RADEON_HD5900},
2071 {"CYPRESS", CARD_AMD_RADEON_HD5800},
2072 {"JUNIPER", CARD_AMD_RADEON_HD5700},
2073 {"REDWOOD", CARD_AMD_RADEON_HD5600},
2074 {"CEDAR", CARD_AMD_RADEON_HD5400},
2075 /* R700 */
2076 {"R700", CARD_AMD_RADEON_HD4800},
2077 {"RV790", CARD_AMD_RADEON_HD4800},
2078 {"RV770", CARD_AMD_RADEON_HD4800},
2079 {"RV740", CARD_AMD_RADEON_HD4700},
2080 {"RV730", CARD_AMD_RADEON_HD4600},
2081 {"RV710", CARD_AMD_RADEON_HD4350},
2082 /* R600/R700 integrated */
2083 {"RS880", CARD_AMD_RADEON_HD4200M},
2084 {"RS780", CARD_AMD_RADEON_HD3200},
2085 /* R600 */
2086 {"R680", CARD_AMD_RADEON_HD2900},
2087 {"R600", CARD_AMD_RADEON_HD2900},
2088 {"RV670", CARD_AMD_RADEON_HD2900},
2089 {"RV635", CARD_AMD_RADEON_HD2600},
2090 {"RV630", CARD_AMD_RADEON_HD2600},
2091 {"RV620", CARD_AMD_RADEON_HD2350},
2092 {"RV610", CARD_AMD_RADEON_HD2350},
2093 /* R500 */
2094 {"R580", CARD_AMD_RADEON_X1600},
2095 {"R520", CARD_AMD_RADEON_X1600},
2096 {"RV570", CARD_AMD_RADEON_X1600},
2097 {"RV560", CARD_AMD_RADEON_X1600},
2098 {"RV535", CARD_AMD_RADEON_X1600},
2099 {"RV530", CARD_AMD_RADEON_X1600},
2100 {"RV516", CARD_AMD_RADEON_X700},
2101 {"RV515", CARD_AMD_RADEON_X700},
2102 /* R400 */
2103 {"R481", CARD_AMD_RADEON_X700},
2104 {"R480", CARD_AMD_RADEON_X700},
2105 {"R430", CARD_AMD_RADEON_X700},
2106 {"R423", CARD_AMD_RADEON_X700},
2107 {"R420", CARD_AMD_RADEON_X700},
2108 {"R410", CARD_AMD_RADEON_X700},
2109 {"RV410", CARD_AMD_RADEON_X700},
2110 /* Radeon Xpress - onboard, DX9b, Shader 2.0, 300-400MHz */
2111 {"RS740", CARD_AMD_RADEON_XPRESS_200M},
2112 {"RS690", CARD_AMD_RADEON_XPRESS_200M},
2113 {"RS600", CARD_AMD_RADEON_XPRESS_200M},
2114 {"RS485", CARD_AMD_RADEON_XPRESS_200M},
2115 {"RS482", CARD_AMD_RADEON_XPRESS_200M},
2116 {"RS480", CARD_AMD_RADEON_XPRESS_200M},
2117 {"RS400", CARD_AMD_RADEON_XPRESS_200M},
2118 /* R300 */
2119 {"R360", CARD_AMD_RADEON_9500},
2120 {"R350", CARD_AMD_RADEON_9500},
2121 {"R300", CARD_AMD_RADEON_9500},
2122 {"RV370", CARD_AMD_RADEON_9500},
2123 {"RV360", CARD_AMD_RADEON_9500},
2124 {"RV351", CARD_AMD_RADEON_9500},
2125 {"RV350", CARD_AMD_RADEON_9500},
2126 };
2127
2128 for (i = 0; i < sizeof(cards) / sizeof(*cards); ++i)
2129 {
2130 if (strstr(gl_renderer, cards[i].renderer))
2131 return cards[i].id;
2132 }
2133
2134 return PCI_DEVICE_NONE;
2135 }
2136
2137 static enum wined3d_pci_device select_card_nvidia_mesa(const struct wined3d_gl_info *gl_info,
2138 const char *gl_renderer)
2139 {
2140 unsigned int i;
2141
2142 static const struct
2143 {
2144 const char *renderer;
2145 enum wined3d_pci_device id;
2146 }
2147 cards[] =
2148 {
2149 /* Kepler */
2150 {"NVE6", CARD_NVIDIA_GEFORCE_GTX770M},
2151 {"NVE4", CARD_NVIDIA_GEFORCE_GTX680},
2152 /* Fermi */
2153 {"NVD9", CARD_NVIDIA_GEFORCE_GT520},
2154 {"NVCF", CARD_NVIDIA_GEFORCE_GTX550},
2155 {"NVCE", CARD_NVIDIA_GEFORCE_GTX560},
2156 {"NVC8", CARD_NVIDIA_GEFORCE_GTX570},
2157 {"NVC4", CARD_NVIDIA_GEFORCE_GTX460},
2158 {"NVC3", CARD_NVIDIA_GEFORCE_GT440},
2159 {"NVC1", CARD_NVIDIA_GEFORCE_GT420},
2160 {"NVC0", CARD_NVIDIA_GEFORCE_GTX480},
2161 /* Tesla */
2162 {"NVAF", CARD_NVIDIA_GEFORCE_GT320M},
2163 {"NVAC", CARD_NVIDIA_GEFORCE_8200},
2164 {"NVAA", CARD_NVIDIA_GEFORCE_8200},
2165 {"NVA8", CARD_NVIDIA_GEFORCE_210},
2166 {"NVA5", CARD_NVIDIA_GEFORCE_GT220},
2167 {"NVA3", CARD_NVIDIA_GEFORCE_GT240},
2168 {"NVA0", CARD_NVIDIA_GEFORCE_GTX280},
2169 {"NV98", CARD_NVIDIA_GEFORCE_9200},
2170 {"NV96", CARD_NVIDIA_GEFORCE_9400GT},
2171 {"NV94", CARD_NVIDIA_GEFORCE_9600GT},
2172 {"NV92", CARD_NVIDIA_GEFORCE_9800GT},
2173 {"NV86", CARD_NVIDIA_GEFORCE_8500GT},
2174 {"NV84", CARD_NVIDIA_GEFORCE_8600GT},
2175 {"NV50", CARD_NVIDIA_GEFORCE_8800GTX},
2176 /* Curie */
2177 {"NV68", CARD_NVIDIA_GEFORCE_6200}, /* 7050 */
2178 {"NV67", CARD_NVIDIA_GEFORCE_6200}, /* 7000M */
2179 {"NV63", CARD_NVIDIA_GEFORCE_6200}, /* 7100 */
2180 {"NV4E", CARD_NVIDIA_GEFORCE_6200}, /* 6100 Go / 6150 Go */
2181 {"NV4C", CARD_NVIDIA_GEFORCE_6200}, /* 6150SE */
2182 {"NV4B", CARD_NVIDIA_GEFORCE_7600},
2183 {"NV4A", CARD_NVIDIA_GEFORCE_6200},
2184 {"NV49", CARD_NVIDIA_GEFORCE_7800GT}, /* 7900 */
2185 {"NV47", CARD_NVIDIA_GEFORCE_7800GT},
2186 {"NV46", CARD_NVIDIA_GEFORCE_7400},
2187 {"NV45", CARD_NVIDIA_GEFORCE_6800},
2188 {"NV44", CARD_NVIDIA_GEFORCE_6200},
2189 {"NV43", CARD_NVIDIA_GEFORCE_6600GT},
2190 {"NV42", CARD_NVIDIA_GEFORCE_6800},
2191 {"NV41", CARD_NVIDIA_GEFORCE_6800},
2192 {"NV40", CARD_NVIDIA_GEFORCE_6800},
2193 /* Rankine */
2194 {"NV38", CARD_NVIDIA_GEFORCEFX_5800}, /* FX 5950 Ultra */
2195 {"NV36", CARD_NVIDIA_GEFORCEFX_5800}, /* FX 5700/5750 */
2196 {"NV35", CARD_NVIDIA_GEFORCEFX_5800}, /* FX 5900 */
2197 {"NV34", CARD_NVIDIA_GEFORCEFX_5200},
2198 {"NV31", CARD_NVIDIA_GEFORCEFX_5600},
2199 {"NV30", CARD_NVIDIA_GEFORCEFX_5800},
2200 /* Kelvin */
2201 {"nv28", CARD_NVIDIA_GEFORCE4_TI4200},
2202 {"nv25", CARD_NVIDIA_GEFORCE4_TI4200},
2203 {"nv20", CARD_NVIDIA_GEFORCE3},
2204 /* Celsius */
2205 {"nv1F", CARD_NVIDIA_GEFORCE4_MX}, /* GF4 MX IGP */
2206 {"nv1A", CARD_NVIDIA_GEFORCE2}, /* GF2 IGP */
2207 {"nv18", CARD_NVIDIA_GEFORCE4_MX},
2208 {"nv17", CARD_NVIDIA_GEFORCE4_MX},
2209 {"nv16", CARD_NVIDIA_GEFORCE2},
2210 {"nv15", CARD_NVIDIA_GEFORCE2},
2211 {"nv11", CARD_NVIDIA_GEFORCE2_MX},
2212 {"nv10", CARD_NVIDIA_GEFORCE},
2213 /* Fahrenheit */
2214 {"nv05", CARD_NVIDIA_RIVA_TNT2},
2215 {"nv04", CARD_NVIDIA_RIVA_TNT},
2216 {"nv03", CARD_NVIDIA_RIVA_128},
2217 };
2218
2219 for (i = 0; i < sizeof(cards) / sizeof(*cards); ++i)
2220 {
2221 if (strstr(gl_renderer, cards[i].renderer))
2222 return cards[i].id;
2223 }
2224 return PCI_DEVICE_NONE;
2225 }
2226
2227 static const struct gl_vendor_selection
2228 {
2229 enum wined3d_gl_vendor gl_vendor;
2230 const char *description; /* Description of the card selector i.e. Apple OS/X Intel */
2231 enum wined3d_pci_device (*select_card)(const struct wined3d_gl_info *gl_info, const char *gl_renderer);
2232 }
2233 nvidia_gl_vendor_table[] =
2234 {
2235 {GL_VENDOR_NVIDIA, "Nvidia binary driver", select_card_nvidia_binary},
2236 {GL_VENDOR_APPLE, "Apple OSX NVidia binary driver", select_card_nvidia_binary},
2237 {GL_VENDOR_MESA, "Mesa Nouveau driver", select_card_nvidia_mesa},
2238 },
2239 amd_gl_vendor_table[] =
2240 {
2241 {GL_VENDOR_APPLE, "Apple OSX AMD/ATI binary driver", select_card_amd_binary},
2242 {GL_VENDOR_FGLRX, "AMD/ATI binary driver", select_card_amd_binary},
2243 {GL_VENDOR_MESA, "Mesa AMD/ATI driver", select_card_amd_mesa},
2244 },
2245 intel_gl_vendor_table[] =
2246 {
2247 {GL_VENDOR_APPLE, "Apple OSX Intel binary driver", select_card_intel},
2248 {GL_VENDOR_INTEL, "Mesa Intel driver", select_card_intel},
2249 {GL_VENDOR_MESA, "Mesa Intel driver", select_card_intel},
2250 };
2251
2252 static enum wined3d_pci_device select_card_fallback_nvidia(const struct wined3d_gl_info *gl_info)
2253 {
2254 UINT d3d_level = d3d_level_from_gl_info(gl_info);
2255 if (d3d_level >= 10)
2256 return CARD_NVIDIA_GEFORCE_8800GTX;
2257 if (d3d_level >= 9 && gl_info->supported[NV_VERTEX_PROGRAM3])
2258 return CARD_NVIDIA_GEFORCE_6800;
2259 if (d3d_level >= 9)
2260 return CARD_NVIDIA_GEFORCEFX_5800;
2261 if (d3d_level >= 8)
2262 return CARD_NVIDIA_GEFORCE3;
2263 if (d3d_level >= 7)
2264 return CARD_NVIDIA_GEFORCE;
2265 if (d3d_level >= 6)
2266 return CARD_NVIDIA_RIVA_TNT;
2267 return CARD_NVIDIA_RIVA_128;
2268 }
2269
2270 static enum wined3d_pci_device select_card_fallback_amd(const struct wined3d_gl_info *gl_info)
2271 {
2272 UINT d3d_level = d3d_level_from_gl_info(gl_info);
2273 if (d3d_level >= 10)
2274 return CARD_AMD_RADEON_HD2900;
2275 if (d3d_level >= 9)
2276 return CARD_AMD_RADEON_9500;
2277 if (d3d_level >= 8)
2278 return CARD_AMD_RADEON_8500;
2279 if (d3d_level >= 7)
2280 return CARD_AMD_RADEON_7200;
2281 return CARD_AMD_RAGE_128PRO;
2282 }
2283
2284 static enum wined3d_pci_device select_card_fallback_intel(const struct wined3d_gl_info *gl_info)
2285 {
2286 UINT d3d_level = d3d_level_from_gl_info(gl_info);
2287 if (d3d_level >= 10)
2288 return CARD_INTEL_G45;
2289 return CARD_INTEL_915G;
2290 }
2291
2292 static enum wined3d_pci_device select_card_handler(const struct gl_vendor_selection *table,
2293 unsigned int table_size, enum wined3d_gl_vendor gl_vendor,
2294 const struct wined3d_gl_info *gl_info, const char *gl_renderer)
2295 {
2296 unsigned int i;
2297
2298 for (i = 0; i < table_size; ++i)
2299 {
2300 if (table[i].gl_vendor != gl_vendor)
2301 continue;
2302
2303 TRACE("Applying card selector \"%s\".\n", table[i].description);
2304 return table[i].select_card(gl_info, gl_renderer);
2305 }
2306 FIXME("Couldn't find a suitable card selector for GL vendor %04x (using GL_RENDERER %s)\n",
2307 gl_vendor, debugstr_a(gl_renderer));
2308
2309 return PCI_DEVICE_NONE;
2310 }
2311
2312 static const struct
2313 {
2314 enum wined3d_pci_vendor card_vendor;
2315 const char *description; /* Description of the card selector i.e. Apple OS/X Intel */
2316 const struct gl_vendor_selection *gl_vendor_selection;
2317 unsigned int gl_vendor_count;
2318 enum wined3d_pci_device (*select_card_fallback)(const struct wined3d_gl_info *gl_info);
2319 }
2320 card_vendor_table[] =
2321 {
2322 {HW_VENDOR_NVIDIA, "Nvidia", nvidia_gl_vendor_table,
2323 sizeof(nvidia_gl_vendor_table) / sizeof(nvidia_gl_vendor_table[0]),
2324 select_card_fallback_nvidia},
2325 {HW_VENDOR_AMD, "AMD", amd_gl_vendor_table,
2326 sizeof(amd_gl_vendor_table) / sizeof(amd_gl_vendor_table[0]),
2327 select_card_fallback_amd},
2328 {HW_VENDOR_INTEL, "Intel", intel_gl_vendor_table,
2329 sizeof(intel_gl_vendor_table) / sizeof(intel_gl_vendor_table[0]),
2330 select_card_fallback_intel},
2331 };
2332
2333
2334 static enum wined3d_pci_device wined3d_guess_card(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
2335 enum wined3d_gl_vendor *gl_vendor, enum wined3d_pci_vendor *card_vendor)
2336 {
2337 /* A Direct3D device object contains the PCI id (vendor + device) of the
2338 * videocard which is used for rendering. Various applications use this
2339 * information to get a rough estimation of the features of the card and
2340 * some might use it for enabling 3d effects only on certain types of
2341 * videocards. In some cases games might even use it to work around bugs
2342 * which happen on certain videocards/driver combinations. The problem is
2343 * that OpenGL only exposes a rendering string containing the name of the
2344 * videocard and not the PCI id.
2345 *
2346 * Various games depend on the PCI id, so somehow we need to provide one.
2347 * A simple option is to parse the renderer string and translate this to
2348 * the right PCI id. This is a lot of work because there are more than 200
2349 * GPUs just for Nvidia. Various cards share the same renderer string, so
2350 * the amount of code might be 'small' but there are quite a number of
2351 * exceptions which would make this a pain to maintain. Another way would
2352 * be to query the PCI id from the operating system (assuming this is the
2353 * videocard which is used for rendering which is not always the case).
2354 * This would work but it is not very portable. Second it would not work
2355 * well in, let's say, a remote X situation in which the amount of 3d
2356 * features which can be used is limited.
2357 *
2358 * As said most games only use the PCI id to get an indication of the
2359 * capabilities of the card. It doesn't really matter if the given id is
2360 * the correct one if we return the id of a card with similar 3d features.
2361 *
2362 * The code below checks the OpenGL capabilities of a videocard and matches
2363 * that to a certain level of Direct3D functionality. Once a card passes
2364 * the Direct3D9 check, we know that the card (in case of Nvidia) is at
2365 * least a GeforceFX. To give a better estimate we do a basic check on the
2366 * renderer string but if that won't pass we return a default card. This
2367 * way is better than maintaining a full card database as even without a
2368 * full database we can return a card with similar features. Second the
2369 * size of the database can be made quite small because when you know what
2370 * type of 3d functionality a card has, you know to which GPU family the
2371 * GPU must belong. Because of this you only have to check a small part of
2372 * the renderer string to distinguishes between different models from that
2373 * family.
2374 *
2375 * The code also selects a default amount of video memory which we will
2376 * use for an estimation of the amount of free texture memory. In case of
2377 * real D3D the amount of texture memory includes video memory and system
2378 * memory (to be specific AGP memory or in case of PCIE TurboCache /
2379 * HyperMemory). We don't know how much system memory can be addressed by
2380 * the system but we can make a reasonable estimation about the amount of
2381 * video memory. If the value is slightly wrong it doesn't matter as we
2382 * didn't include AGP-like memory which makes the amount of addressable
2383 * memory higher and second OpenGL isn't that critical it moves to system
2384 * memory behind our backs if really needed. Note that the amount of video
2385 * memory can be overruled using a registry setting. */
2386
2387 unsigned int i;
2388 enum wined3d_pci_device device;
2389
2390 for (i = 0; i < (sizeof(card_vendor_table) / sizeof(*card_vendor_table)); ++i)
2391 {
2392 if (card_vendor_table[i].card_vendor != *card_vendor)
2393 continue;
2394
2395 TRACE("Applying card selector \"%s\".\n", card_vendor_table[i].description);
2396 device = select_card_handler(card_vendor_table[i].gl_vendor_selection,
2397 card_vendor_table[i].gl_vendor_count, *gl_vendor, gl_info, gl_renderer);
2398 if (device != PCI_DEVICE_NONE)
2399 return device;
2400
2401 TRACE("Unrecognized renderer %s, falling back to default.\n", debugstr_a(gl_renderer));
2402 return card_vendor_table[i].select_card_fallback(gl_info);
2403 }
2404
2405 FIXME("No card selector available for card vendor %04x (using GL_RENDERER %s).\n",
2406 *card_vendor, debugstr_a(gl_renderer));
2407
2408 /* Default to generic Nvidia hardware based on the supported OpenGL extensions. */
2409 *card_vendor = HW_VENDOR_NVIDIA;
2410 return select_card_fallback_nvidia(gl_info);
2411 }
2412
2413 static const struct wined3d_vertex_pipe_ops *select_vertex_implementation(const struct wined3d_gl_info *gl_info,
2414 const struct wined3d_shader_backend_ops *shader_backend_ops)
2415 {
2416 if (shader_backend_ops == &glsl_shader_backend)
2417 return &glsl_vertex_pipe;
2418 return &ffp_vertex_pipe;
2419 }
2420
2421 static const struct fragment_pipeline *select_fragment_implementation(const struct wined3d_gl_info *gl_info,
2422 const struct wined3d_shader_backend_ops *shader_backend_ops)
2423 {
2424 if (shader_backend_ops == &glsl_shader_backend)
2425 return &glsl_fragment_pipe;
2426 if (shader_backend_ops == &arb_program_shader_backend && gl_info->supported[ARB_FRAGMENT_PROGRAM])
2427 return &arbfp_fragment_pipeline;
2428 if (gl_info->supported[ATI_FRAGMENT_SHADER])
2429 return &atifs_fragment_pipeline;
2430 if (gl_info->supported[NV_REGISTER_COMBINERS] && gl_info->supported[NV_TEXTURE_SHADER2])
2431 return &nvts_fragment_pipeline;
2432 if (gl_info->supported[NV_REGISTER_COMBINERS])
2433 return &nvrc_fragment_pipeline;
2434 return &ffp_fragment_pipeline;
2435 }
2436
2437 static const struct wined3d_shader_backend_ops *select_shader_backend(const struct wined3d_gl_info *gl_info)
2438 {
2439 BOOL glsl = wined3d_settings.glslRequested && gl_info->glsl_version >= MAKEDWORD_VERSION(1, 20);
2440
2441 if (glsl && gl_info->supported[ARB_FRAGMENT_SHADER])
2442 return &glsl_shader_backend;
2443 if (glsl && gl_info->supported[ARB_VERTEX_SHADER])
2444 {
2445 /* Geforce4 cards support GLSL but for vertex shaders only. Further
2446 * its reported GLSL caps are wrong. This combined with the fact that
2447 * GLSL won't offer more features or performance, use ARB shaders only
2448 * on this card. */
2449 if (gl_info->supported[NV_VERTEX_PROGRAM] && !gl_info->supported[NV_VERTEX_PROGRAM2])
2450 return &arb_program_shader_backend;
2451 return &glsl_shader_backend;
2452 }
2453 if (gl_info->supported[ARB_VERTEX_PROGRAM] || gl_info->supported[ARB_FRAGMENT_PROGRAM])
2454 return &arb_program_shader_backend;
2455 return &none_shader_backend;
2456 }
2457
2458 static const struct blit_shader *select_blit_implementation(const struct wined3d_gl_info *gl_info,
2459 const struct wined3d_shader_backend_ops *shader_backend_ops)
2460 {
2461 if ((shader_backend_ops == &glsl_shader_backend
2462 || shader_backend_ops == &arb_program_shader_backend)
2463 && gl_info->supported[ARB_FRAGMENT_PROGRAM])
2464 return &arbfp_blit;
2465 return &ffp_blit;
2466 }
2467
2468 static void parse_extension_string(struct wined3d_gl_info *gl_info, const char *extensions,
2469 const struct wined3d_extension_map *map, UINT entry_count)
2470 {
2471 while (*extensions)
2472 {
2473 const char *start;
2474 size_t len;
2475 UINT i;
2476
2477 while (isspace(*extensions))
2478 ++extensions;
2479 start = extensions;
2480 while (!isspace(*extensions) && *extensions)
2481 ++extensions;
2482
2483 len = extensions - start;
2484 if (!len)
2485 continue;
2486
2487 TRACE("- %s.\n", debugstr_an(start, len));
2488
2489 for (i = 0; i < entry_count; ++i)
2490 {
2491 if (len == strlen(map[i].extension_string)
2492 && !memcmp(start, map[i].extension_string, len))
2493 {
2494 TRACE(" FOUND: %s support.\n", map[i].extension_string);
2495 gl_info->supported[map[i].extension] = TRUE;
2496 break;
2497 }
2498 }
2499 }
2500 }
2501
2502 static void load_gl_funcs(struct wined3d_gl_info *gl_info)
2503 {
2504 #define USE_GL_FUNC(pfn) gl_info->gl_ops.ext.p_##pfn = (void *)wglGetProcAddress(#pfn);
2505 GL_EXT_FUNCS_GEN;
2506 #undef USE_GL_FUNC
2507
2508 #ifndef USE_WIN32_OPENGL
2509 /* hack: use the functions directly from the TEB table to bypass the thunks */
2510 /* note that we still need the above wglGetProcAddress calls to initialize the table */
2511 gl_info->gl_ops.ext = ((struct opengl_funcs *)NtCurrentTeb()->glTable)->ext;
2512 #endif
2513 }
2514
2515 static void wined3d_adapter_init_limits(struct wined3d_gl_info *gl_info)
2516 {
2517 GLfloat gl_floatv[2];
2518 GLint gl_max;
2519
2520 gl_info->limits.blends = 1;
2521 gl_info->limits.buffers = 1;
2522 gl_info->limits.textures = 1;
2523 gl_info->limits.texture_coords = 1;
2524 gl_info->limits.fragment_samplers = 1;
2525 gl_info->limits.vertex_samplers = 0;
2526 gl_info->limits.combined_samplers = gl_info->limits.fragment_samplers + gl_info->limits.vertex_samplers;
2527 gl_info->limits.vertex_attribs = 16;
2528 gl_info->limits.glsl_vs_float_constants = 0;
2529 gl_info->limits.glsl_ps_float_constants = 0;
2530 gl_info->limits.arb_vs_float_constants = 0;
2531 gl_info->limits.arb_vs_native_constants = 0;
2532 gl_info->limits.arb_vs_instructions = 0;
2533 gl_info->limits.arb_vs_temps = 0;
2534 gl_info->limits.arb_ps_float_constants = 0;
2535 gl_info->limits.arb_ps_local_constants = 0;
2536 gl_info->limits.arb_ps_instructions = 0;
2537 gl_info->limits.arb_ps_temps = 0;
2538
2539 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_CLIP_PLANES, &gl_max);
2540 gl_info->limits.clipplanes = min(WINED3DMAXUSERCLIPPLANES, gl_max);
2541 TRACE("Clip plane support - max planes %d.\n", gl_max);
2542
2543 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_LIGHTS, &gl_max);
2544 gl_info->limits.lights = gl_max;
2545 TRACE("Light support - max lights %d.\n", gl_max);
2546
2547 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_TEXTURE_SIZE, &gl_max);
2548 gl_info->limits.texture_size = gl_max;
2549 TRACE("Maximum texture size support - max texture size %d.\n", gl_max);
2550
2551 gl_info->gl_ops.gl.p_glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, gl_floatv);
2552 gl_info->limits.pointsize_min = gl_floatv[0];
2553 gl_info->limits.pointsize_max = gl_floatv[1];
2554 TRACE("Maximum point size support - max point size %f.\n", gl_floatv[1]);
2555
2556 if (gl_info->supported[ARB_MAP_BUFFER_ALIGNMENT])
2557 {
2558 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MIN_MAP_BUFFER_ALIGNMENT, &gl_max);
2559 TRACE("Minimum buffer map alignment: %d.\n", gl_max);
2560 }
2561 else
2562 {
2563 WARN_(d3d_perf)("Driver doesn't guarantee a minimum buffer map alignment.\n");
2564 }
2565 if (gl_info->supported[NV_REGISTER_COMBINERS])
2566 {
2567 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_GENERAL_COMBINERS_NV, &gl_max);
2568 gl_info->limits.general_combiners = gl_max;
2569 TRACE("Max general combiners: %d.\n", gl_max);
2570 }
2571 if (gl_info->supported[ARB_DRAW_BUFFERS] && wined3d_settings.offscreen_rendering_mode == ORM_FBO)
2572 {
2573 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_DRAW_BUFFERS_ARB, &gl_max);
2574 gl_info->limits.buffers = gl_max;
2575 TRACE("Max draw buffers: %u.\n", gl_max);
2576 }
2577 if (gl_info->supported[ARB_MULTITEXTURE])
2578 {
2579 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_TEXTURE_UNITS_ARB, &gl_max);
2580 gl_info->limits.textures = min(MAX_TEXTURES, gl_max);
2581 TRACE("Max textures: %d.\n", gl_info->limits.textures);
2582
2583 if (gl_info->supported[ARB_FRAGMENT_PROGRAM])
2584 {
2585 GLint tmp;
2586 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_TEXTURE_COORDS_ARB, &gl_max);
2587 gl_info->limits.texture_coords = min(MAX_TEXTURES, gl_max);
2588 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS_ARB, &tmp);
2589 gl_info->limits.fragment_samplers = min(MAX_FRAGMENT_SAMPLERS, tmp);
2590 }
2591 else
2592 {
2593 gl_info->limits.texture_coords = max(gl_info->limits.texture_coords, gl_max);
2594 gl_info->limits.fragment_samplers = max(gl_info->limits.fragment_samplers, gl_max);
2595 }
2596 TRACE("Max texture coords: %d.\n", gl_info->limits.texture_coords);
2597 TRACE("Max fragment samplers: %d.\n", gl_info->limits.fragment_samplers);
2598
2599 if (gl_info->supported[ARB_VERTEX_SHADER])
2600 {
2601 GLint tmp;
2602 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB, &tmp);
2603 gl_info->limits.vertex_samplers = tmp;
2604 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB, &tmp);
2605 gl_info->limits.combined_samplers = tmp;
2606 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_VERTEX_ATTRIBS_ARB, &tmp);
2607 gl_info->limits.vertex_attribs = tmp;
2608
2609 /* Loading GLSL sampler uniforms is much simpler if we can assume that the sampler setup
2610 * is known at shader link time. In a vertex shader + pixel shader combination this isn't
2611 * an issue because then the sampler setup only depends on the two shaders. If a pixel
2612 * shader is used with fixed function vertex processing we're fine too because fixed function
2613 * vertex processing doesn't use any samplers. If fixed function fragment processing is
2614 * used we have to make sure that all vertex sampler setups are valid together with all
2615 * possible fixed function fragment processing setups. This is true if vsamplers + MAX_TEXTURES
2616 * <= max_samplers. This is true on all d3d9 cards that support vtf(gf 6 and gf7 cards).
2617 * dx9 radeon cards do not support vertex texture fetch. DX10 cards have 128 samplers, and
2618 * dx9 is limited to 8 fixed function texture stages and 4 vertex samplers. DX10 does not have
2619 * a fixed function pipeline anymore.
2620 *
2621 * So this is just a check to check that our assumption holds true. If not, write a warning
2622 * and reduce the number of vertex samplers or probably disable vertex texture fetch. */
2623 if (gl_info->limits.vertex_samplers && gl_info->limits.combined_samplers < 12
2624 && MAX_TEXTURES + gl_info->limits.vertex_samplers > gl_info->limits.combined_samplers)
2625 {
2626 FIXME("OpenGL implementation supports %u vertex samplers and %u total samplers.\n",
2627 gl_info->limits.vertex_samplers, gl_info->limits.combined_samplers);
2628 FIXME("Expected vertex samplers + MAX_TEXTURES(=8) > combined_samplers.\n");
2629 if (gl_info->limits.combined_samplers > MAX_TEXTURES)
2630 gl_info->limits.vertex_samplers = gl_info->limits.combined_samplers - MAX_TEXTURES;
2631 else
2632 gl_info->limits.vertex_samplers = 0;
2633 }
2634 }
2635 else
2636 {
2637 gl_info->limits.combined_samplers = gl_info->limits.fragment_samplers;
2638 }
2639 TRACE("Max vertex samplers: %u.\n", gl_info->limits.vertex_samplers);
2640 TRACE("Max combined samplers: %u.\n", gl_info->limits.combined_samplers);
2641 }
2642 if (gl_info->supported[ARB_VERTEX_BLEND])
2643 {
2644 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_VERTEX_UNITS_ARB, &gl_max);
2645 gl_info->limits.blends = gl_max;
2646 TRACE("Max blends: %u.\n", gl_info->limits.blends);
2647 }
2648 if (gl_info->supported[EXT_TEXTURE3D])
2649 {
2650 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE_EXT, &gl_max);
2651 gl_info->limits.texture3d_size = gl_max;
2652 TRACE("Max texture3D size: %d.\n", gl_info->limits.texture3d_size);
2653 }
2654 if (gl_info->supported[EXT_TEXTURE_FILTER_ANISOTROPIC])
2655 {
2656 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &gl_max);
2657 gl_info->limits.anisotropy = gl_max;
2658 TRACE("Max anisotropy: %d.\n", gl_info->limits.anisotropy);
2659 }
2660 if (gl_info->supported[ARB_FRAGMENT_PROGRAM])
2661 {
2662 GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_ENV_PARAMETERS_ARB, &gl_max));
2663 gl_info->limits.arb_ps_float_constants = gl_max;
2664 TRACE("Max ARB_FRAGMENT_PROGRAM float constants: %d.\n", gl_info->limits.arb_ps_float_constants);
2665 GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB, &gl_max));
2666 gl_info->limits.arb_ps_native_constants = gl_max;
2667 TRACE("Max ARB_FRAGMENT_PROGRAM native float constants: %d.\n",
2668 gl_info->limits.arb_ps_native_constants);
2669 GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB, &gl_max));
2670 gl_info->limits.arb_ps_temps = gl_max;
2671 TRACE("Max ARB_FRAGMENT_PROGRAM native temporaries: %d.\n", gl_info->limits.arb_ps_temps);
2672 GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB, &gl_max));
2673 gl_info->limits.arb_ps_instructions = gl_max;
2674 TRACE("Max ARB_FRAGMENT_PROGRAM native instructions: %d.\n", gl_info->limits.arb_ps_instructions);
2675 GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB, &gl_max));
2676 gl_info->limits.arb_ps_local_constants = gl_max;
2677 TRACE("Max ARB_FRAGMENT_PROGRAM local parameters: %d.\n", gl_info->limits.arb_ps_instructions);
2678 }
2679 if (gl_info->supported[ARB_VERTEX_PROGRAM])
2680 {
2681 GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_ENV_PARAMETERS_ARB, &gl_max));
2682 gl_info->limits.arb_vs_float_constants = gl_max;
2683 TRACE("Max ARB_VERTEX_PROGRAM float constants: %d.\n", gl_info->limits.arb_vs_float_constants);
2684 GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB, &gl_max));
2685 gl_info->limits.arb_vs_native_constants = gl_max;
2686 TRACE("Max ARB_VERTEX_PROGRAM native float constants: %d.\n",
2687 gl_info->limits.arb_vs_native_constants);
2688 GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB, &gl_max));
2689 gl_info->limits.arb_vs_temps = gl_max;
2690 TRACE("Max ARB_VERTEX_PROGRAM native temporaries: %d.\n", gl_info->limits.arb_vs_temps);
2691 GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB, &gl_max));
2692 gl_info->limits.arb_vs_instructions = gl_max;
2693 TRACE("Max ARB_VERTEX_PROGRAM native instructions: %d.\n", gl_info->limits.arb_vs_instructions);
2694 }
2695 if (gl_info->supported[ARB_VERTEX_SHADER])
2696 {
2697 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB, &gl_max);
2698 gl_info->limits.glsl_vs_float_constants = gl_max / 4;
2699 TRACE("Max ARB_VERTEX_SHADER float constants: %u.\n", gl_info->limits.glsl_vs_float_constants);
2700 }
2701 if (gl_info->supported[ARB_FRAGMENT_SHADER])
2702 {
2703 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB, &gl_max);
2704 gl_info->limits.glsl_ps_float_constants = gl_max / 4;
2705 TRACE("Max ARB_FRAGMENT_SHADER float constants: %u.\n", gl_info->limits.glsl_ps_float_constants);
2706 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_VARYING_FLOATS_ARB, &gl_max);
2707 gl_info->limits.glsl_varyings = gl_max;
2708 TRACE("Max GLSL varyings: %u (%u 4 component varyings).\n", gl_max, gl_max / 4);
2709 }
2710
2711 if (gl_info->supported[NV_LIGHT_MAX_EXPONENT])
2712 gl_info->gl_ops.gl.p_glGetFloatv(GL_MAX_SHININESS_NV, &gl_info->limits.shininess);
2713 else
2714 gl_info->limits.shininess = 128.0f;
2715
2716 if ((gl_info->supported[ARB_FRAMEBUFFER_OBJECT] || gl_info->supported[EXT_FRAMEBUFFER_MULTISAMPLE])
2717 && wined3d_settings.allow_multisampling)
2718 {
2719 gl_info->gl_ops.gl.p_glGetIntegerv(GL_MAX_SAMPLES, &gl_max);
2720 gl_info->limits.samples = gl_max;
2721 }
2722 }
2723
2724 /* Context activation is done by the caller. */
2725 static BOOL wined3d_adapter_init_gl_caps(struct wined3d_adapter *adapter)
2726 {
2727 struct wined3d_driver_info *driver_info = &adapter->driver_info;
2728 const char *gl_vendor_str, *gl_renderer_str, *gl_version_str;
2729 struct wined3d_gl_info *gl_info = &adapter->gl_info;
2730 struct wined3d_vertex_caps vertex_caps;
2731 enum wined3d_pci_vendor card_vendor;
2732 struct fragment_caps fragment_caps;
2733 struct shader_caps shader_caps;
2734 const char *WGL_Extensions = NULL;
2735 const char *GL_Extensions = NULL;
2736 enum wined3d_gl_vendor gl_vendor;
2737 enum wined3d_pci_device device;
2738 DWORD gl_version;
2739 HDC hdc;
2740 unsigned int i;
2741
2742 TRACE("adapter %p.\n", adapter);
2743
2744 gl_renderer_str = (const char *)gl_info->gl_ops.gl.p_glGetString(GL_RENDERER);
2745 TRACE("GL_RENDERER: %s.\n", debugstr_a(gl_renderer_str));
2746 if (!gl_renderer_str)
2747 {
2748 ERR("Received a NULL GL_RENDERER.\n");
2749 return FALSE;
2750 }
2751
2752 gl_vendor_str = (const char *)gl_info->gl_ops.gl.p_glGetString(GL_VENDOR);
2753 TRACE("GL_VENDOR: %s.\n", debugstr_a(gl_vendor_str));
2754 if (!gl_vendor_str)
2755 {
2756 ERR("Received a NULL GL_VENDOR.\n");
2757 return FALSE;
2758 }
2759
2760 /* Parse the GL_VERSION field into major and minor information */
2761 gl_version_str = (const char *)gl_info->gl_ops.gl.p_glGetString(GL_VERSION);
2762 TRACE("GL_VERSION: %s.\n", debugstr_a(gl_version_str));
2763 if (!gl_version_str)
2764 {
2765 ERR("Received a NULL GL_VERSION.\n");
2766 return FALSE;
2767 }
2768 gl_version = wined3d_parse_gl_version(gl_version_str);
2769
2770 /* Parse the gl supported features, in theory enabling parts of our code appropriately. */
2771 GL_Extensions = (const char *)gl_info->gl_ops.gl.p_glGetString(GL_EXTENSIONS);
2772 if (!GL_Extensions)
2773 {
2774 ERR("Received a NULL GL_EXTENSIONS.\n");
2775 return FALSE;
2776 }
2777
2778 memset(gl_info->supported, 0, sizeof(gl_info->supported));
2779 gl_info->supported[WINED3D_GL_EXT_NONE] = TRUE;
2780
2781 TRACE("GL extensions reported:\n");
2782 parse_extension_string(gl_info, GL_Extensions, gl_extension_map,
2783 sizeof(gl_extension_map) / sizeof(*gl_extension_map));
2784
2785 /* Now work out what GL support this card really has. */
2786 load_gl_funcs( gl_info );
2787
2788 hdc = wglGetCurrentDC();
2789 /* Not all GL drivers might offer WGL extensions e.g. VirtualBox. */
2790 if (GL_EXTCALL(wglGetExtensionsStringARB))
2791 WGL_Extensions = (const char *)GL_EXTCALL(wglGetExtensionsStringARB(hdc));
2792 if (!WGL_Extensions)
2793 WARN("WGL extensions not supported.\n");
2794 else
2795 parse_extension_string(gl_info, WGL_Extensions, wgl_extension_map,
2796 sizeof(wgl_extension_map) / sizeof(*wgl_extension_map));
2797
2798 if (!gl_info->supported[EXT_TEXTURE3D] && gl_version >= MAKEDWORD_VERSION(1, 2))
2799 {
2800 TRACE("GL CORE: GL_EXT_texture3D support.\n");
2801 gl_info->gl_ops.ext.p_glTexImage3DEXT = (void *)gl_info->gl_ops.ext.p_glTexImage3D;
2802 gl_info->gl_ops.ext.p_glTexSubImage3DEXT = gl_info->gl_ops.ext.p_glTexSubImage3D;
2803 gl_info->supported[EXT_TEXTURE3D] = TRUE;
2804 }
2805
2806 if (!gl_info->supported[NV_POINT_SPRITE] && gl_version >= MAKEDWORD_VERSION(1, 4))
2807 {
2808 TRACE("GL CORE: GL_NV_point_sprite support.\n");
2809 gl_info->gl_ops.ext.p_glPointParameterivNV = gl_info->gl_ops.ext.p_glPointParameteriv;
2810 gl_info->gl_ops.ext.p_glPointParameteriNV = gl_info->gl_ops.ext.p_glPointParameteri;
2811 gl_info->supported[NV_POINT_SPRITE] = TRUE;
2812 }
2813
2814 if (!gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO] && gl_version >= MAKEDWORD_VERSION(2, 0))
2815 {
2816 TRACE("GL CORE: GL_ARB_texture_non_power_of_two support.\n");
2817 gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO] = TRUE;
2818 }
2819
2820 if (gl_version >= MAKEDWORD_VERSION(2, 0)) gl_info->supported[WINED3D_GL_VERSION_2_0] = TRUE;
2821
2822 if (gl_info->supported[APPLE_FENCE])
2823 {
2824 /* GL_NV_fence and GL_APPLE_fence provide the same functionality basically.
2825 * The apple extension interacts with some other apple exts. Disable the NV
2826 * extension if the apple one is support to prevent confusion in other parts
2827 * of the code. */
2828 gl_info->supported[NV_FENCE] = FALSE;
2829 }
2830 if (gl_info->supported[APPLE_FLOAT_PIXELS])
2831 {
2832 /* GL_APPLE_float_pixels == GL_ARB_texture_float + GL_ARB_half_float_pixel
2833 *
2834 * The enums are the same:
2835 * GL_RGBA16F_ARB = GL_RGBA_FLOAT16_APPLE = 0x881a
2836 * GL_RGB16F_ARB = GL_RGB_FLOAT16_APPLE = 0x881b
2837 * GL_RGBA32F_ARB = GL_RGBA_FLOAT32_APPLE = 0x8814
2838 * GL_RGB32F_ARB = GL_RGB_FLOAT32_APPLE = 0x8815
2839 * GL_HALF_FLOAT_ARB = GL_HALF_APPLE = 0x140b
2840 */
2841 if (!gl_info->supported[ARB_TEXTURE_FLOAT])
2842 {
2843 TRACE(" IMPLIED: GL_ARB_texture_float support (by GL_APPLE_float_pixels).\n");
2844 gl_info->supported[ARB_TEXTURE_FLOAT] = TRUE;
2845 }
2846 if (!gl_info->supported[ARB_HALF_FLOAT_PIXEL])
2847 {
2848 TRACE(" IMPLIED: GL_ARB_half_float_pixel support (by GL_APPLE_float_pixels).\n");
2849 gl_info->supported[ARB_HALF_FLOAT_PIXEL] = TRUE;
2850 }
2851 }
2852 if (gl_info->supported[ARB_MAP_BUFFER_RANGE])
2853 {
2854 /* GL_ARB_map_buffer_range and GL_APPLE_flush_buffer_range provide the same
2855 * functionality. Prefer the ARB extension */
2856 gl_info->supported[APPLE_FLUSH_BUFFER_RANGE] = FALSE;
2857 }
2858 if (gl_info->supported[ARB_TEXTURE_CUBE_MAP])
2859 {
2860 TRACE(" IMPLIED: NVIDIA (NV) Texture Gen Reflection support.\n");
2861 gl_info->supported[NV_TEXGEN_REFLECTION] = TRUE;
2862 }
2863 if (!gl_info->supported[ARB_DEPTH_CLAMP] && gl_info->supported[NV_DEPTH_CLAMP])
2864 {
2865 TRACE(" IMPLIED: ARB_depth_clamp support (by NV_depth_clamp).\n");
2866 gl_info->supported[ARB_DEPTH_CLAMP] = TRUE;
2867 }
2868 if (!gl_info->supported[ARB_VERTEX_ARRAY_BGRA] && gl_info->supported[EXT_VERTEX_ARRAY_BGRA])
2869 {
2870 TRACE(" IMPLIED: ARB_vertex_array_bgra support (by EXT_vertex_array_bgra).\n");
2871 gl_info->supported[ARB_VERTEX_ARRAY_BGRA] = TRUE;
2872 }
2873 if (!gl_info->supported[ARB_TEXTURE_COMPRESSION_RGTC] && gl_info->supported[EXT_TEXTURE_COMPRESSION_RGTC])
2874 {
2875 TRACE(" IMPLIED: ARB_texture_compression_rgtc support (by EXT_texture_compression_rgtc).\n");
2876 gl_info->supported[ARB_TEXTURE_COMPRESSION_RGTC] = TRUE;
2877 }
2878 if (gl_info->supported[NV_TEXTURE_SHADER2])
2879 {
2880 if (gl_info->supported[NV_REGISTER_COMBINERS])
2881 {
2882 /* Also disable ATI_FRAGMENT_SHADER if register combiners and texture_shader2
2883 * are supported. The nv extensions provide the same functionality as the
2884 * ATI one, and a bit more(signed pixelformats). */
2885 gl_info->supported[ATI_FRAGMENT_SHADER] = FALSE;
2886 }
2887 }
2888 if (gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO])
2889 {
2890 /* If we have full NP2 texture support, disable
2891 * GL_ARB_texture_rectangle because we will never use it.
2892 * This saves a few redundant glDisable calls. */
2893 gl_info->supported[ARB_TEXTURE_RECTANGLE] = FALSE;
2894 }
2895 if (gl_info->supported[ATI_FRAGMENT_SHADER])
2896 {
2897 /* Disable NV_register_combiners and fragment shader if this is supported.
2898 * generally the NV extensions are preferred over the ATI ones, and this
2899 * extension is disabled if register_combiners and texture_shader2 are both
2900 * supported. So we reach this place only if we have incomplete NV dxlevel 8
2901 * fragment processing support. */
2902 gl_info->supported[NV_REGISTER_COMBINERS] = FALSE;
2903 gl_info->supported[NV_REGISTER_COMBINERS2] = FALSE;
2904 gl_info->supported[NV_TEXTURE_SHADER] = FALSE;
2905 gl_info->supported[NV_TEXTURE_SHADER2] = FALSE;
2906 }
2907 if (gl_info->supported[NV_HALF_FLOAT])
2908 {
2909 /* GL_ARB_half_float_vertex is a subset of GL_NV_half_float. */
2910 gl_info->supported[ARB_HALF_FLOAT_VERTEX] = TRUE;
2911 }
2912 if (gl_info->supported[ARB_FRAMEBUFFER_SRGB] && !gl_info->supported[EXT_TEXTURE_SRGB_DECODE])
2913 {
2914 /* Current wined3d sRGB infrastructure requires EXT_texture_sRGB_decode
2915 * for GL_ARB_framebuffer_sRGB support (without EXT_texture_sRGB_decode
2916 * we never render to sRGB surfaces). */
2917 gl_info->supported[ARB_FRAMEBUFFER_SRGB] = FALSE;
2918 }
2919 if (gl_info->supported[ARB_OCCLUSION_QUERY])
2920 {
2921 GLint counter_bits;
2922
2923 GL_EXTCALL(glGetQueryivARB(GL_SAMPLES_PASSED_ARB, GL_QUERY_COUNTER_BITS_ARB, &counter_bits));
2924 TRACE("Occlusion query counter has %d bits.\n", counter_bits);
2925 if (!counter_bits)
2926 gl_info->supported[ARB_OCCLUSION_QUERY] = FALSE;
2927 }
2928
2929 wined3d_adapter_init_limits(gl_info);
2930
2931 if (gl_info->supported[ARB_VERTEX_PROGRAM] && test_arb_vs_offset_limit(gl_info))
2932 gl_info->quirks |= WINED3D_QUIRK_ARB_VS_OFFSET_LIMIT;
2933
2934 if (gl_info->supported[ARB_SHADING_LANGUAGE_100])
2935 {
2936 const char *str = (const char *)gl_info->gl_ops.gl.p_glGetString(GL_SHADING_LANGUAGE_VERSION_ARB);
2937 unsigned int major, minor;
2938
2939 TRACE("GLSL version string: %s.\n", debugstr_a(str));
2940
2941 /* The format of the GLSL version string is "major.minor[.release] [vendor info]". */
2942 sscanf(str, "%u.%u", &major, &minor);
2943 gl_info->glsl_version = MAKEDWORD_VERSION(major, minor);
2944 }
2945
2946 checkGLcall("extension detection");
2947
2948 adapter->shader_backend = select_shader_backend(gl_info);
2949 adapter->vertex_pipe = select_vertex_implementation(gl_info, adapter->shader_backend);
2950 adapter->fragment_pipe = select_fragment_implementation(gl_info, adapter->shader_backend);
2951 adapter->blitter = select_blit_implementation(gl_info, adapter->shader_backend);
2952
2953 adapter->shader_backend->shader_get_caps(&adapter->gl_info, &shader_caps);
2954 adapter->d3d_info.vs_clipping = shader_caps.wined3d_caps & WINED3D_SHADER_CAP_VS_CLIPPING;
2955 adapter->d3d_info.limits.vs_version = shader_caps.vs_version;
2956 adapter->d3d_info.limits.gs_version = shader_caps.gs_version;
2957 adapter->d3d_info.limits.ps_version = shader_caps.ps_version;
2958 adapter->d3d_info.limits.vs_uniform_count = shader_caps.vs_uniform_count;
2959 adapter->d3d_info.limits.ps_uniform_count = shader_caps.ps_uniform_count;
2960
2961 adapter->vertex_pipe->vp_get_caps(gl_info, &vertex_caps);
2962 adapter->d3d_info.xyzrhw = vertex_caps.xyzrhw;
2963
2964 adapter->fragment_pipe->get_caps(gl_info, &fragment_caps);
2965 adapter->d3d_info.limits.ffp_blend_stages = fragment_caps.MaxTextureBlendStages;
2966 adapter->d3d_info.limits.ffp_textures = fragment_caps.MaxSimultaneousTextures;
2967 TRACE("Max texture stages: %u.\n", adapter->d3d_info.limits.ffp_blend_stages);
2968
2969 if (gl_info->supported[ARB_FRAMEBUFFER_OBJECT])
2970 {
2971 gl_info->fbo_ops.glIsRenderbuffer = gl_info->gl_ops.ext.p_glIsRenderbuffer;
2972 gl_info->fbo_ops.glBindRenderbuffer = gl_info->gl_ops.ext.p_glBindRenderbuffer;
2973 gl_info->fbo_ops.glDeleteRenderbuffers = gl_info->gl_ops.ext.p_glDeleteRenderbuffers;
2974 gl_info->fbo_ops.glGenRenderbuffers = gl_info->gl_ops.ext.p_glGenRenderbuffers;
2975 gl_info->fbo_ops.glRenderbufferStorage = gl_info->gl_ops.ext.p_glRenderbufferStorage;
2976 gl_info->fbo_ops.glRenderbufferStorageMultisample = gl_info->gl_ops.ext.p_glRenderbufferStorageMultisample;
2977 gl_info->fbo_ops.glGetRenderbufferParameteriv = gl_info->gl_ops.ext.p_glGetRenderbufferParameteriv;
2978 gl_info->fbo_ops.glIsFramebuffer = gl_info->gl_ops.ext.p_glIsFramebuffer;
2979 gl_info->fbo_ops.glBindFramebuffer = gl_info->gl_ops.ext.p_glBindFramebuffer;
2980 gl_info->fbo_ops.glDeleteFramebuffers = gl_info->gl_ops.ext.p_glDeleteFramebuffers;
2981 gl_info->fbo_ops.glGenFramebuffers = gl_info->gl_ops.ext.p_glGenFramebuffers;
2982 gl_info->fbo_ops.glCheckFramebufferStatus = gl_info->gl_ops.ext.p_glCheckFramebufferStatus;
2983 gl_info->fbo_ops.glFramebufferTexture1D = gl_info->gl_ops.ext.p_glFramebufferTexture1D;
2984 gl_info->fbo_ops.glFramebufferTexture2D = gl_info->gl_ops.ext.p_glFramebufferTexture2D;
2985 gl_info->fbo_ops.glFramebufferTexture3D = gl_info->gl_ops.ext.p_glFramebufferTexture3D;
2986 gl_info->fbo_ops.glFramebufferRenderbuffer = gl_info->gl_ops.ext.p_glFramebufferRenderbuffer;
2987 gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv
2988 = gl_info->gl_ops.ext.p_glGetFramebufferAttachmentParameteriv;
2989 gl_info->fbo_ops.glBlitFramebuffer = gl_info->gl_ops.ext.p_glBlitFramebuffer;
2990 gl_info->fbo_ops.glGenerateMipmap = gl_info->gl_ops.ext.p_glGenerateMipmap;
2991 }
2992 else
2993 {
2994 if (gl_info->supported[EXT_FRAMEBUFFER_OBJECT])
2995 {
2996 gl_info->fbo_ops.glIsRenderbuffer = gl_info->gl_ops.ext.p_glIsRenderbufferEXT;
2997 gl_info->fbo_ops.glBindRenderbuffer = gl_info->gl_ops.ext.p_glBindRenderbufferEXT;
2998 gl_info->fbo_ops.glDeleteRenderbuffers = gl_info->gl_ops.ext.p_glDeleteRenderbuffersEXT;
2999 gl_info->fbo_ops.glGenRenderbuffers = gl_info->gl_ops.ext.p_glGenRenderbuffersEXT;
3000 gl_info->fbo_ops.glRenderbufferStorage = gl_info->gl_ops.ext.p_glRenderbufferStorageEXT;
3001 gl_info->fbo_ops.glGetRenderbufferParameteriv = gl_info->gl_ops.ext.p_glGetRenderbufferParameterivEXT;
3002 gl_info->fbo_ops.glIsFramebuffer = gl_info->gl_ops.ext.p_glIsFramebufferEXT;
3003 gl_info->fbo_ops.glBindFramebuffer = gl_info->gl_ops.ext.p_glBindFramebufferEXT;
3004 gl_info->fbo_ops.glDeleteFramebuffers = gl_info->gl_ops.ext.p_glDeleteFramebuffersEXT;
3005 gl_info->fbo_ops.glGenFramebuffers = gl_info->gl_ops.ext.p_glGenFramebuffersEXT;
3006 gl_info->fbo_ops.glCheckFramebufferStatus = gl_info->gl_ops.ext.p_glCheckFramebufferStatusEXT;
3007 gl_info->fbo_ops.glFramebufferTexture1D = gl_info->gl_ops.ext.p_glFramebufferTexture1DEXT;
3008 gl_info->fbo_ops.glFramebufferTexture2D = gl_info->gl_ops.ext.p_glFramebufferTexture2DEXT;
3009 gl_info->fbo_ops.glFramebufferTexture3D = gl_info->gl_ops.ext.p_glFramebufferTexture3DEXT;
3010 gl_info->fbo_ops.glFramebufferRenderbuffer = gl_info->gl_ops.ext.p_glFramebufferRenderbufferEXT;
3011 gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv
3012 = gl_info->gl_ops.ext.p_glGetFramebufferAttachmentParameterivEXT;
3013 gl_info->fbo_ops.glGenerateMipmap = gl_info->gl_ops.ext.p_glGenerateMipmapEXT;
3014 }
3015 else if (wined3d_settings.offscreen_rendering_mode == ORM_FBO)
3016 {
3017 WARN_(d3d_perf)("Framebuffer objects not supported, falling back to backbuffer offscreen rendering mode.\n");
3018 wined3d_settings.offscreen_rendering_mode = ORM_BACKBUFFER;
3019 }
3020 if (gl_info->supported[EXT_FRAMEBUFFER_BLIT])
3021 {
3022 gl_info->fbo_ops.glBlitFramebuffer = gl_info->gl_ops.ext.p_glBlitFramebufferEXT;
3023 }
3024 if (gl_info->supported[EXT_FRAMEBUFFER_MULTISAMPLE])
3025 {
3026 gl_info->fbo_ops.glRenderbufferStorageMultisample
3027 = gl_info->gl_ops.ext.p_glRenderbufferStorageMultisampleEXT;
3028 }
3029 }
3030
3031 gl_vendor = wined3d_guess_gl_vendor(gl_info, gl_vendor_str, gl_renderer_str);
3032 card_vendor = wined3d_guess_card_vendor(gl_vendor_str, gl_renderer_str);
3033 TRACE("Found GL_VENDOR (%s)->(0x%04x/0x%04x).\n", debugstr_a(gl_vendor_str), gl_vendor, card_vendor);
3034
3035 device = wined3d_guess_card(gl_info, gl_renderer_str, &gl_vendor, &card_vendor);
3036 TRACE("Found (fake) card: 0x%x (vendor id), 0x%x (device id).\n", card_vendor, device);
3037
3038 gl_info->wrap_lookup[WINED3D_TADDRESS_WRAP - WINED3D_TADDRESS_WRAP] = GL_REPEAT;
3039 gl_info->wrap_lookup[WINED3D_TADDRESS_MIRROR - WINED3D_TADDRESS_WRAP] =
3040 gl_info->supported[ARB_TEXTURE_MIRRORED_REPEAT] ? GL_MIRRORED_REPEAT_ARB : GL_REPEAT;
3041 gl_info->wrap_lookup[WINED3D_TADDRESS_CLAMP - WINED3D_TADDRESS_WRAP] = GL_CLAMP_TO_EDGE;
3042 gl_info->wrap_lookup[WINED3D_TADDRESS_BORDER - WINED3D_TADDRESS_WRAP] =
3043 gl_info->supported[ARB_TEXTURE_BORDER_CLAMP] ? GL_CLAMP_TO_BORDER_ARB : GL_REPEAT;
3044 gl_info->wrap_lookup[WINED3D_TADDRESS_MIRROR_ONCE - WINED3D_TADDRESS_WRAP] =
3045 gl_info->supported[ATI_TEXTURE_MIRROR_ONCE] ? GL_MIRROR_CLAMP_TO_EDGE_ATI : GL_REPEAT;
3046
3047 adapter->d3d_info.valid_rt_mask = 0;
3048 for (i = 0; i < gl_info->limits.buffers; ++i)
3049 adapter->d3d_info.valid_rt_mask |= (1 << i);
3050
3051 fixup_extensions(gl_info, gl_renderer_str, gl_vendor, card_vendor, device);
3052 init_driver_info(driver_info, card_vendor, device);
3053 add_gl_compat_wrappers(gl_info);
3054
3055 return TRUE;
3056 }
3057
3058 UINT CDECL wined3d_get_adapter_count(const struct wined3d *wined3d)
3059 {
3060 TRACE("wined3d %p, reporting %u adapters.\n",
3061 wined3d, wined3d->adapter_count);
3062
3063 return wined3d->adapter_count;
3064 }
3065
3066 HRESULT CDECL wined3d_register_software_device(struct wined3d *wined3d, void *init_function)
3067 {
3068 FIXME("wined3d %p, init_function %p stub!\n", wined3d, init_function);
3069
3070 return WINED3D_OK;
3071 }
3072
3073 HMONITOR CDECL wined3d_get_adapter_monitor(const struct wined3d *wined3d, UINT adapter_idx)
3074 {
3075 TRACE("wined3d %p, adapter_idx %u.\n", wined3d, adapter_idx);
3076
3077 if (adapter_idx >= wined3d->adapter_count)
3078 return NULL;
3079
3080 return MonitorFromPoint(wined3d->adapters[adapter_idx].monitorPoint, MONITOR_DEFAULTTOPRIMARY);
3081 }
3082
3083 /* FIXME: GetAdapterModeCount and EnumAdapterModes currently only returns modes
3084 of the same bpp but different resolutions */
3085
3086 /* Note: dx9 supplies a format. Calls from d3d8 supply WINED3DFMT_UNKNOWN */
3087 UINT CDECL wined3d_get_adapter_mode_count(const struct wined3d *wined3d, UINT adapter_idx,
3088 enum wined3d_format_id format_id, enum wined3d_scanline_ordering scanline_ordering)
3089 {
3090 const struct wined3d_adapter *adapter;
3091 const struct wined3d_format *format;
3092 unsigned int i = 0;
3093 unsigned int j = 0;
3094 UINT format_bits;
3095 DEVMODEW mode;
3096
3097 TRACE("wined3d %p, adapter_idx %u, format %s, scanline_ordering %#x.\n",
3098 wined3d, adapter_idx, debug_d3dformat(format_id), scanline_ordering);
3099
3100 if (adapter_idx >= wined3d->adapter_count)
3101 return 0;
3102
3103 adapter = &wined3d->adapters[adapter_idx];
3104 format = wined3d_get_format(&adapter->gl_info, format_id);
3105 format_bits = format->byte_count * CHAR_BIT;
3106
3107 memset(&mode, 0, sizeof(mode));
3108 mode.dmSize = sizeof(mode);
3109
3110 while (EnumDisplaySettingsExW(adapter->DeviceName, j++, &mode, 0))
3111 {
3112 if (mode.dmFields & DM_DISPLAYFLAGS)
3113 {
3114 if (scanline_ordering == WINED3D_SCANLINE_ORDERING_PROGRESSIVE
3115 && (mode.u2.dmDisplayFlags & DM_INTERLACED))
3116 continue;
3117
3118 if (scanline_ordering == WINED3D_SCANLINE_ORDERING_INTERLACED
3119 && !(mode.u2.dmDisplayFlags & DM_INTERLACED))