c++ - Creating OpenGL texture from SDL2 surface - strange pixel values -
i'm trying use sdl2 load texture opengl rendering of wavefront objects (currently i'm testing fixed pipeline, plan move shaders). problem loaded texture applied quad (and model uses small part in bottom right of texture) looks that:
a sample of effect http://image-upload.de/image/dakaef/e433b140c9.png this texture used
the image loads fine , looks normal when drawn sdl functions, it's conversion ogl texture that's broken. note have alpha blending enabled , texture still opaque - values not random, , not uninitialized memory. code converting surface (cobbled various tutorials , questions on site here):
gluint glmaketexture(bool mipmap = false, int request_size = 0) { // works on 32 bit surfaces gluint texture = 0; if ((bool)_surface) { int w,h; if (request_size) { // npot , rectangular textures supported since @ least decade now; should never need this... w = h = request_size; if (w<_surface->w || h<_surface->h) return 0; // no can do. } else { w = _surface->w; h = _surface->h; } sdl_locksurface(&*_surface); std::cout<<"bits: "<<(int)_surface->format->bytesperpixel<<std::endl; uint8 *temp = (uint8*)malloc(w*h*sizeof(uint32)); // yes, know it's 4... if (!temp) return 0; // optimized code /*for (int y = 0; y<h; y++) { // pitch given in bytes, need cast 8 bit here! memcpy(temp+y*w*sizeof(uint32),(uint8*)_surface->pixels+y*_surface->pitch,_surface->w*sizeof(uint32)); if (w>_surface->w) memset(temp+y*w*sizeof(uint32)+_surface->w,0,(w-_surface->w)*sizeof(uint32)); } (int y = _surface->h; y<h; y++) memset(temp+y*w*sizeof(uint32),0,w*sizeof(uint32)); glenum format = (_surface->format->rmask==0xff)?gl_rgba:gl_bgra;*/ // naive code testing (int y = 0; y<_surface->h; y++) (int x = 0; x<_surface->w; x++) { int mempos = (x+y*w)*4; sdl_color pcol = get_pixel(x,y); temp[mempos] = pcol.r; temp[mempos+1] = pcol.g; temp[mempos+2] = pcol.b; temp[mempos+3] = pcol.a; } glenum format = gl_rgba; sdl_unlocksurface(&*_surface); glgentextures(1, &texture); glbindtexture(gl_texture_2d, texture); if (mipmap) gltexparameteri(texture, gl_generate_mipmap, gl_true); glteximage2d(gl_texture_2d, 0, format, w, h, 0, format, gl_unsigned_byte, temp); if (mipmap) gltexparameteri(gl_texture_2d, gl_texture_min_filter, gl_linear_mipmap_linear); else gltexparameteri(gl_texture_2d, gl_texture_min_filter, gl_linear); gltexparameteri(gl_texture_2d, gl_texture_mag_filter, gl_linear); free(temp); // clean up... } return texture; }
edit: _surface std::shared_ptr sdl_surface. thus, &* when (un)locking it.
btw, sdl claims surface formatted 32 bit rgba on machine, checked already.
the code binds texture , draws quad here:
glenable(gl_texture_2d); gltexenvf(gl_texture_env, gl_texture_env_mode, gl_modulate); glbindtexture(gl_texture_2d,_texture[map_kd]); static bool once = true; if (once) { int tex; glgetintegerv(gl_texture_binding_2d, &tex); bool valid = glistexture(tex); std::cout<<tex<<" "<<valid<<std::endl; once = false; } glbegin(gl_triangle_strip); //glcolor3f(1.f,1.f,1.f); glnormal3f(0,1,0); gltexcoord2f(0.f,0.f); glvertex3f(0,0,0); gltexcoord2f(0.f,1.f); glvertex3f(0,0,1); gltexcoord2f(1.f,0.f); glvertex3f(1,0,0); gltexcoord2f(1.f,1.f); glvertex3f(1,0,1); glend();
the axe drawn later index list; code way long share here (and besides, works fine apart texture).
i tried naive method found in many tutorials of passing _surface->pixels glteximage2d(), doesn't either (and heard it's wrong way anyway, because pitch!=width*bytesperpixel in general). outcommented "optimized" code looks same, btw (as expected). wrote lower part better testing. setting pixels specific color or making texture partially transparent works expected, assume opengl loads values in temp correctly. it's understanding of memory layout in sdl2 surfaces that's messed up.
final edit: solution (resetting gl_unpack_row_length key, peter clark):
gluint gltexture(bool mipmap = false) { gluint texture = 0; if ((bool)_surface) { glenum texture_format, internal_format, tex_type; if (_surface->format->bytesperpixel == 4) { if (_surface->format->rmask == 0x000000ff) { texture_format = gl_rgba; tex_type = gl_unsigned_int_8_8_8_8_rev; } else { texture_format = gl_bgra; tex_type = gl_unsigned_int_8_8_8_8; } internal_format = gl_rgba8; } else { if (_surface->format->rmask == 0x000000ff) { texture_format = gl_rgb; tex_type = gl_unsigned_byte; } else { texture_format = gl_bgr; tex_type = gl_unsigned_byte; } internal_format = gl_rgb8; } int alignment = 8; while (_surface->pitch%alignment) alignment>>=1; // x%1==0 x glpixelstorei(gl_unpack_alignment,alignment); int expected_pitch = (_surface->w*_surface->format->bytesperpixel+alignment-1)/alignment*alignment; if (_surface->pitch-expected_pitch>=alignment) // alignment alone wont't solve glpixelstorei(gl_unpack_row_length,_surface->pitch/_surface->format->bytesperpixel); else glpixelstorei(gl_unpack_row_length,0); glgentextures(1, &texture); glbindtexture(gl_texture_2d, texture); glteximage2d(gl_texture_2d, 0, internal_format, _surface->w, _surface->h, 0, texture_format, tex_type, _surface->pixels); if (mipmap) { glgeneratemipmap(gl_texture_2d); gltexparameteri(gl_texture_2d, gl_texture_min_filter, gl_linear_mipmap_linear); } else { gltexparameteri(gl_texture_2d, gl_texture_base_level, 0); gltexparameteri(gl_texture_2d, gl_texture_max_level, 0); gltexparameteri(gl_texture_2d, gl_texture_min_filter, gl_linear); } gltexparameteri(gl_texture_2d, gl_texture_mag_filter, gl_linear); glpixelstorei(gl_unpack_alignment,4); glpixelstorei(gl_unpack_row_length,0); } return texture; }
pixel storage alignment
you need tell opengl alignment of image glpixelstorei(gl_unpack_alignment, [1,2,4,8]
). largest power of 2 divisor of pitch 8. if not 1 of accepted values, may have additionally set gl_unpack_row_length
- see this answer more details , advice on topic. 1 thing note - gl_unpack_row_length
row length in pixels, sdl_surface::pitch
row length in bytes. on top of have ensure internal_format, format, , pixel_type set match sdl_surface contains. one more resource on topic.
"complete" textures
you're not creating complete texture when not using mipmaps. create "complete" texture (one ready read or written to) no mipmaps must specify maximum mipmap level 0 (the base image) using gltexparameteri(gl_texture_2d, gl_texture_max_level, 0)
, since default 1000.
just note: you're using gltexparameteri(texture, gl_generate_mipmap, gl_true)
automatically generate mipmaps. while should work (though not familiar it), be aware method deprecated in favor of glgeneratemipmaps
in modern opengl.
a possible solution
// load texture surface... // error check.. // bind gl texture... // calculate required align using pitch (largest power of 2 divisor of pitch) glpixelstorei(gl_unpack_alignment, align); //glpixelstorei(gl_unpack_row_length, row_length); // row_length = pitch / bytes_per_pixel glteximage2d( gl_texture_2d, 0, internal_format, sdl_surface->w, sdl_surface->h, 0, format, pixel_type, sdl_surface->pixels); // check errors if(use_mipmaps) { glgeneratemipmap(gl_texture_2d); // check errors gltexparameteri(gl_texture_2d, gl_texture_min_filter, /* filter mode */); gltexparameteri(gl_texture_2d, gl_texture_mag_filter, /* filter mode */); // check errors } else { // makes texture complete gltexparameteri(gl_texture_2d, gl_texture_base_level, 0); gltexparameteri(gl_texture_2d, gl_texture_max_level, 0); gltexparameteri(gl_texture_2d, gl_texture_min_filter, /* filter mode */); gltexparameteri(gl_texture_2d, gl_texture_mag_filter, /* filter mode */); // check errors } // potentially reset gl_unpack_alignment and/or gl_unpack_row_length default values // cleanup
error checking
note wouldn't bad idea add error checking glgeterror()
i've marked check errors
. perhaps print error if there 1 , place breakpoint/assert. use macro can disable error checking in release build - effect of:
#ifdef myproj_graphics_debug #define assert_if_gl_error \ { \ glenum last_error = glgeterror(); \ if(last_error != gl_no_error); \ { \ printf("gl error: %d", last_error); \ } \ __debugbreak(); // visual studio intrinsic - other compilers have similar intrinsics } #else #define assert_if_gl_error #endif
it idea error checking, , may reveal information going on. though, since sounds driver crashing after undefined behavior, it's possible won't in instance.
a possible alternative
i think worth mentioning not aware of issue before answering question. hadn't run because use stb_image
load textures. reason bring in documentation stb_image
stated "there no padding between image scanlines or between pixels, regardless of format.", meaning stb_image
handles you. if can control images have load (say if you're making game , control creation of assets) stb_image
image loading option.
Comments
Post a Comment