1 /// PNG and BMP image loading. 2 /// D translation of stb_image-2.27 (png) and stb_image-2.29 (bmp). 3 /// This port only support PNG loading, 8-bit and 16-bit. 4 /// and BMP loading as in STB. 5 module gamut.codecs.stbdec; 6 7 8 /* stb_image - v2.27 - public domain image loader - http://nothings.org/stb 9 no warranty implied; use at your own risk 10 11 12 PNG 1/2/4/8/16-bit-per-channel 13 BMP non-1bpp, non-RLE 14 PSD (composited view only, no extra channels, 8/16 bit-per-channel) 15 16 GIF (*comp always reports as 4-channel) 17 HDR (radiance rgbE format) 18 PIC (Softimage PIC) 19 PNM (PPM and PGM binary only) 20 21 Animated GIF still needs a proper API, but here's one way to do it: 22 http://gist.github.com/urraka/685d9a6340b26b830d49 23 24 - decode from memory 25 - decode from arbitrary I/O callbacks 26 - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) 27 28 Full documentation under "DOCUMENTATION" below. 29 30 31 LICENSE 32 33 See end of file for license information. 34 35 RECENT REVISION HISTORY: 36 37 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes 38 2.26 (2020-07-13) many minor fixes 39 2.25 (2020-02-02) fix warnings 40 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically 41 2.23 (2019-08-11) fix clang static analysis warning 42 2.22 (2019-03-04) gif fixes, fix warnings 43 2.21 (2019-02-25) fix typo in comment 44 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs 45 2.19 (2018-02-11) fix warning 46 2.18 (2018-01-30) fix warnings 47 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings 48 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes 49 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC 50 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs 51 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes 52 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes 53 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 54 RGB-format JPEG; remove white matting in PSD; 55 allocate large structures on the stack; 56 correct channel count for PNG & BMP 57 2.10 (2016-01-22) avoid warning introduced in 2.09 58 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED 59 60 See end of file for full revision history. 61 62 63 ============================ Contributors ========================= 64 65 Image formats Extensions, features 66 Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) 67 Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) 68 Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) 69 Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) 70 Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) 71 Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) 72 Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) 73 github:urraka (animated gif) Junggon Kim (PNM comments) 74 Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) 75 socks-the-fox (16-bit PNG) 76 Jeremy Sawicki (handle all ImageNet JPGs) 77 Optimizations & bugfixes Mikhail Morozov (1-bit BMP) 78 Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) 79 Arseny Kapoulkine Simon Breuss (16-bit PNM) 80 John-Mark Allen 81 Carmelo J Fdez-Aguera 82 83 Bug & warning fixes 84 Marc LeBlanc David Woo Guillaume George Martins Mozeiko 85 Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski 86 Phil Jordan Dave Moore Roy Eltham 87 Hayaki Saito Nathan Reed Won Chun 88 Luke Graham Johan Duparc Nick Verigakis the Horde3D community 89 Thomas Ruf Ronny Chevalier github:rlyeh 90 Janez Zemva John Bartholomew Michal Cichon github:romigrou 91 Jonathan Blow Ken Hamada Tero Hanninen github:svdijk 92 Eugene Golushkov Laurent Gomila Cort Stratton github:snagar 93 Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex 94 Cass Everitt Ryamond Barbiero github:grim210 95 Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw 96 Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus 97 Josh Tobin Matthew Gregan github:poppolopoppo 98 Julian Raschke Gregory Mullen Christian Floisand github:darealshinji 99 Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 100 Brad Weinberger Matvey Cherevko github:mosra 101 Luca Sas Alexander Veselov Zack Middleton [reserved] 102 Ryan C. Gordon [reserved] [reserved] 103 DO NOT ADD YOUR NAME HERE 104 105 Jacko Dirks 106 107 To add your name to the credits, pick a random blank space in the middle and fill it. 108 80% of merge conflicts on stb PRs are due to people adding their name at the end 109 of the credits. 110 */ 111 112 import core.stdc.config: c_ulong; 113 import core.stdc.string: memcpy, memset; 114 import core.stdc.stdlib: malloc, free, realloc; 115 import core.atomic; 116 import std.math: ldexp, pow, abs; 117 import gamut.io; 118 119 nothrow @nogc: 120 121 import inteli.emmintrin; 122 123 // To interface with STB, use initSTBCallbacks to initialize a IOAndHandle and stbi_io_callbacks, 124 // then STB callback functions. 125 126 void initSTBCallbacks(IOStream *io, IOHandle handle, IOAndHandle* ioh, stbi_io_callbacks* stb_callback) 127 { 128 ioh.io = io; 129 ioh.handle = handle; 130 stb_callback.read = &stb_read; 131 stb_callback.skip = &stb_skip; 132 stb_callback.eof = &stb_eof; 133 } 134 135 // Need to give both a IOStream* and a IOHandle to STB callbacks. 136 static struct IOAndHandle 137 { 138 IOStream* io; 139 IOHandle handle; 140 } 141 142 // fill 'data' with 'size' bytes. return number of bytes actually read 143 int stb_read(void *user, char *data, int size) @system 144 { 145 IOAndHandle* ioh = cast(IOAndHandle*) user; 146 147 // Cannot ask more than 0x7fff_ffff bytes at once. 148 assert(size <= 0x7fffffff); 149 150 size_t bytesRead = ioh.io.read(data, 1, size, ioh.handle); 151 return cast(int) bytesRead; 152 } 153 154 // skip the next 'n' bytes, or 'unget' the last -n bytes if negative 155 void stb_skip(void *user, int n) @system 156 { 157 IOAndHandle* ioh = cast(IOAndHandle*) user; 158 ioh.io.skipBytes(ioh.handle, n); // Note: no failure case... 159 } 160 161 // returns nonzero if we are at end of file/data 162 int stb_eof(void *user) @system 163 { 164 IOAndHandle* ioh = cast(IOAndHandle*) user; 165 return ioh.io.eof(ioh.handle); 166 } 167 168 169 170 // DOCUMENTATION 171 // 172 // Limitations: 173 // - no 12-bit-per-channel JPEG 174 // - no JPEGs with arithmetic coding 175 // - GIF always returns *comp=4 176 // 177 // Basic usage (see HDR discussion below for HDR usage): 178 // int x,y,n; 179 // unsigned char *data = stbi_load(filename, &x, &y, &n, 0); 180 // // ... process data if not null ... 181 // // ... x = width, y = height, n = # 8-bit components per pixel ... 182 // // ... replace '0' with '1'..'4' to force that many components per pixel 183 // // ... but 'n' will always be the number that it would have been if you said 0 184 // stbi_image_free(data) 185 // 186 // Standard parameters: 187 // int *x -- outputs image width in pixels 188 // int *y -- outputs image height in pixels 189 // int *channels_in_file -- outputs # of image components in image file 190 // int desired_channels -- if non-zero, # of image components requested in result 191 // 192 // The return value from an image loader is an 'unsigned char *' which points 193 // to the pixel data, or null on an allocation failure or if the image is 194 // corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, 195 // with each pixel consisting of N interleaved 8-bit components; the first 196 // pixel pointed to is top-left-most in the image. There is no padding between 197 // image scanlines or between pixels, regardless of format. The number of 198 // components N is 'desired_channels' if desired_channels is non-zero, or 199 // *channels_in_file otherwise. If desired_channels is non-zero, 200 // *channels_in_file has the number of components that _would_ have been 201 // output otherwise. E.g. if you set desired_channels to 4, you will always 202 // get RGBA output, but you can check *channels_in_file to see if it's trivially 203 // opaque because e.g. there were only 3 channels in the source image. 204 // 205 // An output image with N components has the following components interleaved 206 // in this order in each pixel: 207 // 208 // N=#comp components 209 // 1 grey 210 // 2 grey, alpha 211 // 3 red, green, blue 212 // 4 red, green, blue, alpha 213 // 214 // If image loading fails for any reason, the return value will be null, 215 // and *x, *y, *channels_in_file will be unchanged. The function 216 // stbi_failure_reason() can be queried for an extremely brief, end-user 217 // unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS 218 // to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly 219 // more user-friendly ones. 220 // 221 // Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. 222 // 223 // To query the width, height and component count of an image without having to 224 // decode the full file, you can use the stbi_info family of functions: 225 // 226 // int x,y,n,ok; 227 // ok = stbi_info(filename, &x, &y, &n); 228 // // returns ok=1 and sets x, y, n if image is a supported format, 229 // // 0 otherwise. 230 // 231 // Note that stb_image pervasively uses ints in its public API for sizes, 232 // including sizes of memory buffers. This is now part of the API and thus 233 // hard to change without causing breakage. As a result, the various image 234 // loaders all have certain limits on image size; these differ somewhat 235 // by format but generally boil down to either just under 2GB or just under 236 // 1GB. When the decoded image would be larger than this, stb_image decoding 237 // will fail. 238 // 239 // Additionally, stb_image will reject image files that have any of their 240 // dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS, 241 // which defaults to 2**24 = 16777216 pixels. Due to the above memory limit, 242 // the only way to have an image with such dimensions load correctly 243 // is for it to have a rather extreme aspect ratio. Either way, the 244 // assumption here is that such larger images are likely to be malformed 245 // or malicious. If you do need to load an image with individual dimensions 246 // larger than that, and it still fits in the overall size limit, you can 247 // #define STBI_MAX_DIMENSIONS on your own to be something larger. 248 // 249 // 250 // Philosophy 251 // 252 // stb libraries are designed with the following priorities: 253 // 254 // 1. easy to use 255 // 2. easy to maintain 256 // 3. good performance 257 // 258 // Sometimes I let "good performance" creep up in priority over "easy to maintain", 259 // and for best performance I may provide less-easy-to-use APIs that give higher 260 // performance, in addition to the easy-to-use ones. Nevertheless, it's important 261 // to keep in mind that from the standpoint of you, a client of this library, 262 // all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. 263 // 264 // Some secondary priorities arise directly from the first two, some of which 265 // provide more explicit reasons why performance can't be emphasized. 266 // 267 // - Portable ("ease of use") 268 // - Small source code footprint ("easy to maintain") 269 // - No dependencies ("ease of use") 270 // 271 // =========================================================================== 272 // 273 // I/O callbacks 274 // 275 // I/O callbacks allow you to read from arbitrary sources, like packaged 276 // files or some other source. Data read from callbacks are processed 277 // through a small internal buffer (currently 128 bytes) to try to reduce 278 // overhead. 279 // 280 // The three functions you must define are "read" (reads some bytes of data), 281 // "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). 282 // 283 // =========================================================================== 284 // 285 // SIMD support 286 // 287 // The JPEG decoder will try to automatically use SIMD kernels on x86 when 288 // supported by the compiler. For ARM Neon support, you must explicitly 289 // request it. 290 // 291 // (The old do-it-yourself SIMD API is no longer supported in the current 292 // code.) 293 // 294 // On x86, SSE2 will automatically be used when available based on a run-time 295 // test; if not, the generic C versions are used as a fall-back. On ARM targets, 296 // the typical path is to have separate builds for NEON and non-NEON devices 297 // (at least this is true for iOS and Android). Therefore, the NEON support is 298 // toggled by a build flag: define STBI_NEON to get NEON loops. 299 // 300 // If for some reason you do not want to use any of SIMD code, or if 301 // you have issues compiling it, you can disable it entirely by 302 // defining STBI_NO_SIMD. 303 // 304 // =========================================================================== 305 // 306 // HDR image support (disable by defining STBI_NO_HDR) 307 // 308 // stb_image supports loading HDR images in general, and currently the Radiance 309 // .HDR file format specifically. You can still load any file through the existing 310 // interface; if you attempt to load an HDR file, it will be automatically remapped 311 // to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; 312 // both of these constants can be reconfigured through this interface: 313 // 314 // stbi_hdr_to_ldr_gamma(2.2f); 315 // stbi_hdr_to_ldr_scale(1.0f); 316 // 317 // (note, do not use _inverse_ constants; stbi_image will invert them 318 // appropriately). 319 // 320 // Additionally, there is a new, parallel interface for loading files as 321 // (linear) floats to preserve the full dynamic range: 322 // 323 // float *data = stbi_loadf(filename, &x, &y, &n, 0); 324 // 325 // If you load LDR images through this interface, those images will 326 // be promoted to floating point values, run through the inverse of 327 // constants corresponding to the above: 328 // 329 // stbi_ldr_to_hdr_scale(1.0f); 330 // stbi_ldr_to_hdr_gamma(2.2f); 331 // 332 // Finally, given a filename (or an open file or memory block--see header 333 // file for details) containing image data, you can query for the "most 334 // appropriate" interface to use (that is, whether the image is HDR or 335 // not), using: 336 // 337 // stbi_is_hdr(char *filename); 338 // 339 // =========================================================================== 340 // 341 // iPhone PNG support: 342 // 343 // We optionally support converting iPhone-formatted PNGs (which store 344 // premultiplied BGRA) back to RGB, even though they're internally encoded 345 // differently. To enable this conversion, call 346 // stbi_convert_iphone_png_to_rgb(1). 347 // 348 // Call stbi_set_unpremultiply_on_load(1) as well to force a divide per 349 // pixel to remove any premultiplied alpha *only* if the image file explicitly 350 // says there's premultiplied data (currently only happens in iPhone images, 351 // and only if iPhone convert-to-rgb processing is on). 352 // 353 // =========================================================================== 354 // 355 // ADDITIONAL CONFIGURATION 356 // 357 // - You can suppress implementation of any of the decoders to reduce 358 // your code footprint by #defining one or more of the following 359 // symbols before creating the implementation. 360 // 361 // STBI_NO_JPEG 362 // STBI_NO_PNG 363 // STBI_NO_BMP 364 // STBI_NO_PSD 365 // STBI_NO_TGA 366 // STBI_NO_GIF 367 // STBI_NO_HDR 368 // STBI_NO_PIC 369 // STBI_NO_PNM (.ppm and .pgm) 370 // 371 // 372 // - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still 373 // want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB 374 // 375 // - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater 376 // than that size (in either width or height) without further processing. 377 // This is to let programs in the wild set an upper bound to prevent 378 // denial-of-service attacks on untrusted data, as one could generate a 379 // valid image of gigantic dimensions and force stb_image to allocate a 380 // huge block of memory and spend disproportionate time decoding it. By 381 // default this is set to (1 << 24), which is 16777216, but that's still 382 // very big. 383 384 enum STBI_VERSION = 1; 385 386 enum 387 { 388 STBI_default = 0, // only used for desired_channels 389 390 STBI_grey = 1, 391 STBI_grey_alpha = 2, 392 STBI_rgb = 3, 393 STBI_rgb_alpha = 4 394 } 395 396 alias stbi_uc = ubyte; 397 alias stbi_us = ushort; 398 399 ////////////////////////////////////////////////////////////////////////////// 400 // 401 // PRIMARY API - works on images of any type 402 // 403 404 // 405 // load image by filename, open file, or memory buffer 406 // 407 408 struct stbi_io_callbacks 409 { 410 nothrow @nogc @system: 411 // fill 'data' with 'size' bytes. return number of bytes actually read 412 int function(void *user,char *data,int size) read; 413 414 // skip the next 'n' bytes, or 'unget' the last -n bytes if negative 415 void function(void *user,int n) skip; 416 417 // returns nonzero if we are at end of file/data 418 int function(void *user) eof; 419 } 420 421 // <Implementation> 422 423 alias stbi__uint16 = ushort; 424 alias stbi__int16 = short; 425 alias stbi__uint32 = uint; 426 alias stbi__int32 = int; 427 428 uint stbi_lrot(uint x, int y) 429 { 430 return (x << y) | (x >> (-y & 31)); 431 } 432 433 void* STBI_MALLOC(size_t size) 434 { 435 return malloc(size); 436 } 437 438 void* STBI_REALLOC(void* p, size_t new_size) 439 { 440 return realloc(p, new_size); 441 } 442 443 void* STBI_REALLOC_SIZED(void *ptr, size_t old_size, size_t new_size) 444 { 445 return realloc(ptr, new_size); 446 } 447 448 void STBI_FREE(void* p) 449 { 450 free(p); 451 } 452 453 enum STBI_MAX_DIMENSIONS = (1 << 24); 454 455 /////////////////////////////////////////////// 456 // 457 // stbi__context struct and start_xxx functions 458 459 // stbi__context structure is our basic context used by all images, so it 460 // contains all the IO context, plus some basic image information 461 struct stbi__context 462 { 463 stbi__uint32 img_x, img_y; 464 int img_n, img_out_n; 465 466 stbi_io_callbacks io; 467 void *io_user_data; 468 469 int read_from_callbacks; 470 int buflen; 471 stbi_uc[128] buffer_start; 472 int callback_already_read; 473 474 stbi_uc *img_buffer, img_buffer_end; 475 stbi_uc *img_buffer_original, img_buffer_original_end; 476 477 float ppmX; 478 float ppmY; 479 float pixelAspectRatio; 480 } 481 482 483 // initialize a callback-based context 484 void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) 485 { 486 s.io = *c; 487 s.io_user_data = user; 488 s.buflen = s.buffer_start.sizeof; 489 s.read_from_callbacks = 1; 490 s.callback_already_read = 0; 491 s.img_buffer = s.img_buffer_original = s.buffer_start.ptr; 492 stbi__refill_buffer(s); 493 s.img_buffer_original_end = s.img_buffer_end; 494 } 495 496 void stbi__rewind(stbi__context *s) 497 { 498 // conceptually rewind SHOULD rewind to the beginning of the stream, 499 // but we just rewind to the beginning of the initial buffer, because 500 // we only use it after doing 'test', which only ever looks at at most 92 bytes 501 s.img_buffer = s.img_buffer_original; 502 s.img_buffer_end = s.img_buffer_original_end; 503 } 504 505 enum 506 { 507 STBI_ORDER_RGB, 508 STBI_ORDER_BGR 509 } 510 511 struct stbi__result_info 512 { 513 int bits_per_channel; 514 int num_channels; 515 int channel_order; 516 } 517 518 alias stbi__malloc = STBI_MALLOC; 519 520 // stb_image uses ints pervasively, including for offset calculations. 521 // therefore the largest decoded image size we can support with the 522 // current code, even on 64-bit targets, is INT_MAX. this is not a 523 // significant limitation for the intended use case. 524 // 525 // we do, however, need to make sure our size calculations don't 526 // overflow. hence a few helper functions for size calculations that 527 // multiply integers together, making sure that they're non-negative 528 // and no overflow occurs. 529 530 // return 1 if the sum is valid, 0 on overflow. 531 // negative terms are considered invalid. 532 int stbi__addsizes_valid(int a, int b) 533 { 534 if (b < 0) return 0; 535 // now 0 <= b <= INT_MAX, hence also 536 // 0 <= INT_MAX - b <= INTMAX. 537 // And "a + b <= INT_MAX" (which might overflow) is the 538 // same as a <= INT_MAX - b (no overflow) 539 return a <= int.max - b; 540 } 541 542 // returns 1 if the product is valid, 0 on overflow. 543 // negative factors are considered invalid. 544 int stbi__mul2sizes_valid(int a, int b) 545 { 546 if (a < 0 || b < 0) return 0; 547 if (b == 0) return 1; // mul-by-0 is always safe 548 // portable way to check for no overflows in a*b 549 return a <= int.max/b; 550 } 551 552 int stbi__mad2sizes_valid(int a, int b, int add) 553 { 554 return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); 555 } 556 557 // returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow 558 int stbi__mad3sizes_valid(int a, int b, int c, int add) 559 { 560 return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && 561 stbi__addsizes_valid(a*b*c, add); 562 } 563 564 // returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow 565 int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) 566 { 567 return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && 568 stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); 569 } 570 571 void *stbi__malloc_mad2(int a, int b, int add) 572 { 573 if (!stbi__mad2sizes_valid(a, b, add)) return null; 574 return stbi__malloc(a*b + add); 575 } 576 577 void *stbi__malloc_mad3(int a, int b, int c, int add) 578 { 579 if (!stbi__mad3sizes_valid(a, b, c, add)) return null; 580 return stbi__malloc(a*b*c + add); 581 } 582 583 void *stbi__malloc_mad4(int a, int b, int c, int d, int add) 584 { 585 if (!stbi__mad4sizes_valid(a, b, c, d, add)) return null; 586 return stbi__malloc(a*b*c*d + add); 587 } 588 589 // stbi__err - error 590 591 deprecated int stbi__err(const(char)* msg, const(char)* msgUser) 592 { 593 return 0; 594 } 595 596 // stbi__errpf - error returning pointer to float 597 // stbi__errpuc - error returning pointer to unsigned char 598 deprecated float* stbi__errpf(const(char)* msg, const(char)* msgUser) 599 { 600 return cast(float*) (cast(size_t) stbi__err(msg, msgUser)); 601 } 602 603 deprecated ubyte* stbi__errpuc(const(char)* msg, const(char)* msgUser) 604 { 605 return cast(ubyte*) (cast(size_t) stbi__err(msg, msgUser)); 606 } 607 608 void stbi_image_free(void *retval_from_stbi_load) @trusted // TODO: make it @safe by changing stbi_load to return a slice 609 { 610 STBI_FREE(retval_from_stbi_load); 611 } 612 613 void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) 614 { 615 memset(ri, 0, (*ri).sizeof); // make sure it's initialized if we add new fields 616 ri.bits_per_channel = 8; // default is 8 so most paths don't have to be changed 617 ri.channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order 618 ri.num_channels = 0; 619 620 // test the formats with a very explicit header first (at least a FOURCC 621 // or distinctive magic number first) 622 // PERF: the tests here are redundant. Streams have been tested before for right format. 623 // Pass the gamut format explicitely. 624 version(decodePNG) 625 { 626 if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); 627 } 628 version(decodeBMP) 629 { 630 if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); 631 } 632 return null; 633 } 634 635 stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) 636 { 637 int img_len = w * h * channels; 638 stbi_uc *reduced; 639 640 reduced = cast(stbi_uc *) stbi__malloc(img_len); 641 if (reduced == null) 642 return null; 643 644 for (int i = 0; i < img_len; ++i) 645 reduced[i] = cast(stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16.8 bit scaling 646 647 STBI_FREE(orig); 648 return reduced; 649 } 650 651 stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) 652 { 653 int i; 654 int img_len = w * h * channels; 655 stbi__uint16 *enlarged; 656 657 enlarged = cast(stbi__uint16 *) stbi__malloc(img_len*2); 658 if (enlarged == null) 659 return null; 660 661 for (i = 0; i < img_len; ++i) 662 enlarged[i] = (orig[i] << 8) + orig[i]; // replicate to high and low byte, maps 0.0, 255.0xffff 663 664 STBI_FREE(orig); 665 return enlarged; 666 } 667 668 669 ubyte *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) 670 { 671 stbi__result_info ri; 672 void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); 673 674 if (result == null) 675 return null; 676 677 // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. 678 assert(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); 679 680 if (ri.bits_per_channel != 8) { 681 result = stbi__convert_16_to_8(cast(stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); 682 ri.bits_per_channel = 8; 683 } 684 685 // @TODO: move stbi__convert_format to here 686 687 return cast(ubyte*) result; 688 } 689 690 stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) 691 { 692 stbi__result_info ri; 693 void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); 694 695 if (result == null) 696 return null; 697 698 // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. 699 assert(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); 700 701 if (ri.bits_per_channel != 16) { 702 result = stbi__convert_8_to_16(cast(stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); 703 ri.bits_per_channel = 16; 704 } 705 706 return cast(stbi__uint16 *) result; 707 } 708 709 void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) 710 { 711 } 712 713 stbi_us *stbi_load_16_from_callbacks(const(stbi_io_callbacks)*clbk, void *user, int *x, int *y, int *channels_in_file, 714 int desired_channels,float* ppmX, float* ppmY, float* pixelRatio) 715 { 716 stbi__context s; 717 stbi__start_callbacks(&s, cast(stbi_io_callbacks *)clbk, user); // const_cast here 718 stbi_us* res = stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); 719 *ppmX = s.ppmX; 720 *ppmY = s.ppmY; 721 *pixelRatio = s.pixelAspectRatio; 722 return res; 723 } 724 725 stbi_uc *stbi_load_from_callbacks(const(stbi_io_callbacks)*clbk, void *user, int *x, int *y, int *comp, int req_comp, 726 float* ppmX, float* ppmY, float* pixelRatio) 727 { 728 stbi__context s; 729 stbi__start_callbacks(&s, cast(stbi_io_callbacks *) clbk, user); // const_cast here 730 stbi_uc* res = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); 731 *ppmX = s.ppmX; 732 *ppmY = s.ppmY; 733 *pixelRatio = s.pixelAspectRatio; 734 return res; 735 } 736 737 version(enableLinear) 738 { 739 __gshared stbi__l2h_gamma = 2.2f; 740 __gshared stbi__l2h_scale = 1.0f; 741 742 void stbi_ldr_to_hdr_gamma(float gamma) 743 { 744 atomicStore(stbi__l2h_gamma, gamma); 745 } 746 747 void stbi_ldr_to_hdr_scale(float scale) 748 { 749 atomicStore(stbi__l2h_scale, scale); 750 } 751 } 752 753 754 shared(float) stbi__h2l_gamma_i = 1.0f / 2.2f, 755 stbi__h2l_scale_i = 1.0f; 756 757 void stbi_hdr_to_ldr_gamma(float gamma) 758 { 759 atomicStore(stbi__h2l_gamma_i, 1 / gamma); 760 } 761 762 void stbi_hdr_to_ldr_scale(float scale) 763 { 764 atomicStore(stbi__h2l_scale_i, 1 / scale); 765 } 766 767 768 ////////////////////////////////////////////////////////////////////////////// 769 // 770 // Common code used by all image loaders 771 // 772 773 enum 774 { 775 STBI__SCAN_load = 0, 776 STBI__SCAN_type, 777 STBI__SCAN_header 778 } 779 780 void stbi__refill_buffer(stbi__context *s) 781 { 782 int n = s.io.read(s.io_user_data, cast(char*)s.buffer_start, s.buflen); 783 s.callback_already_read += cast(int) (s.img_buffer - s.img_buffer_original); 784 if (n == 0) { 785 // at end of file, treat same as if from memory, but need to handle case 786 // where s.img_buffer isn't pointing to safe memory, e.g. 0-byte file 787 s.read_from_callbacks = 0; 788 s.img_buffer = s.buffer_start.ptr; 789 s.img_buffer_end = s.buffer_start.ptr+1; 790 *s.img_buffer = 0; 791 } else { 792 s.img_buffer = s.buffer_start.ptr; 793 s.img_buffer_end = s.buffer_start.ptr + n; 794 } 795 } 796 797 stbi_uc stbi__get8(stbi__context *s) 798 { 799 if (s.img_buffer < s.img_buffer_end) 800 return *s.img_buffer++; 801 if (s.read_from_callbacks) { 802 stbi__refill_buffer(s); 803 return *s.img_buffer++; 804 } 805 return 0; 806 } 807 808 int stbi__at_eof(stbi__context *s) 809 { 810 if (s.io.read) 811 { 812 if (!s.io.eof(s.io_user_data)) 813 return 0; 814 // if feof() is true, check if buffer = end 815 // special case: we've only got the special 0 character at the end 816 if (s.read_from_callbacks == 0) 817 return 1; 818 } 819 return s.img_buffer >= s.img_buffer_end; 820 } 821 822 void stbi__skip(stbi__context *s, int n) 823 { 824 if (n == 0) 825 return; // already there! 826 if (n < 0) 827 { 828 s.img_buffer = s.img_buffer_end; 829 return; 830 } 831 if (s.io.read) 832 { 833 int blen = cast(int) (s.img_buffer_end - s.img_buffer); 834 if (blen < n) 835 { 836 s.img_buffer = s.img_buffer_end; 837 s.io.skip(s.io_user_data, n - blen); 838 return; 839 } 840 } 841 s.img_buffer += n; 842 } 843 844 int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) 845 { 846 if (s.io.read) 847 { 848 int blen = cast(int) (s.img_buffer_end - s.img_buffer); 849 if (blen < n) 850 { 851 int res, count; 852 memcpy(buffer, s.img_buffer, blen); 853 count = s.io.read(s.io_user_data, cast(char*) buffer + blen, n - blen); 854 res = (count == (n-blen)); 855 s.img_buffer = s.img_buffer_end; 856 return res; 857 } 858 } 859 860 if (s.img_buffer+n <= s.img_buffer_end) 861 { 862 memcpy(buffer, s.img_buffer, n); 863 s.img_buffer += n; 864 return 1; 865 } 866 else 867 return 0; 868 } 869 870 int stbi__get16be(stbi__context *s) 871 { 872 int z = stbi__get8(s); 873 return (z << 8) + stbi__get8(s); 874 } 875 876 stbi__uint32 stbi__get32be(stbi__context *s) 877 { 878 stbi__uint32 z = stbi__get16be(s); 879 return (z << 16) + stbi__get16be(s); 880 } 881 882 int stbi__get16le(stbi__context *s) 883 { 884 int z = stbi__get8(s); 885 return z + (stbi__get8(s) << 8); 886 } 887 888 stbi__uint32 stbi__get32le(stbi__context *s) 889 { 890 stbi__uint32 z = stbi__get16le(s); 891 z += cast(stbi__uint32)stbi__get16le(s) << 16; 892 return z; 893 } 894 895 ubyte STBI__BYTECAST(T)(T x) 896 { 897 return cast(ubyte)(x & 255); 898 } 899 900 ////////////////////////////////////////////////////////////////////////////// 901 // 902 // generic converter from built-in img_n to req_comp 903 // individual types do this automatically as much as possible (e.g. jpeg 904 // does all cases internally since it needs to colorspace convert anyway, 905 // and it never has alpha, so very few cases ). png can automatically 906 // interleave an alpha=255 channel, but falls back to this for other cases 907 // 908 // assume data buffer is malloced, so malloc a new one and free that one 909 // only failure mode is malloc failing 910 911 stbi_uc stbi__compute_y(int r, int g, int b) 912 { 913 return cast(ubyte)(((r * 77) + (g * 150) + (29 * b)) >> 8); 914 } 915 916 ubyte *stbi__convert_format(ubyte *data, int img_n, int req_comp, uint x, uint y) 917 { 918 int i,j; 919 ubyte *good; 920 921 if (req_comp == img_n) 922 return data; 923 assert(req_comp >= 1 && req_comp <= 4); 924 925 good = cast(ubyte*) stbi__malloc_mad3(req_comp, x, y, 0); 926 if (good == null) 927 { 928 STBI_FREE(data); 929 return null; 930 } 931 932 for (j = 0; j < cast(int) y; ++j) 933 { 934 ubyte *src = data + j * x * img_n ; 935 ubyte *dest = good + j * x * req_comp; 936 937 // convert source image with img_n components to one with req_comp components; 938 // avoid switch per pixel, so use switch per scanline and massive macros 939 switch (img_n * 8 + req_comp) 940 { 941 case 1 * 8 + 2: 942 { 943 for(i = x - 1; i >= 0; --i, src += 1, dest += 2) 944 { 945 dest[0] = src[0]; 946 dest[1] = 255; 947 } 948 } 949 break; 950 case 1 * 8 + 3: 951 { 952 for(i = x - 1; i >= 0; --i, src += 1, dest += 3) 953 { 954 dest[0] = dest[1] = dest[2] = src[0]; 955 } 956 } 957 break; 958 case 1 * 8 + 4: 959 for(i = x - 1; i >= 0; --i, src += 1, dest += 4) 960 { 961 dest[0] = dest[1] = dest[2] = src[0]; 962 dest[3] = 255; 963 } 964 break; 965 case 2 * 8 + 1: 966 { 967 for(i = x - 1; i >= 0; --i, src += 2, dest += 1) 968 { 969 dest[0] = src[0]; 970 } 971 } 972 break; 973 case 2 * 8 + 3: 974 { 975 for(i = x - 1; i >= 0; --i, src += 2, dest += 3) 976 { 977 dest[0] = dest[1] = dest[2] = src[0]; 978 } 979 } 980 break; 981 case 2 * 8 + 4: 982 { 983 for(i = x - 1; i >= 0; --i, src += 2, dest += 4) 984 { 985 dest[0] = dest[1] = dest[2] = src[0]; 986 dest[3] = src[1]; 987 } 988 } 989 break; 990 case 3 * 8 + 4: 991 { 992 for(i = x - 1; i >= 0; --i, src += 3, dest += 4) 993 { 994 dest[0] = src[0]; 995 dest[1] = src[1]; 996 dest[2] = src[2]; 997 dest[3] = 255; 998 } 999 } 1000 break; 1001 case 3 * 8 + 1: 1002 { 1003 for(i = x - 1; i >= 0; --i, src += 3, dest += 1) 1004 { 1005 dest[0] = stbi__compute_y(src[0],src[1],src[2]); 1006 } 1007 } 1008 break; 1009 case 3 * 8 + 2: 1010 { 1011 for(i = x - 1; i >= 0; --i, src += 3, dest += 2) 1012 { 1013 dest[0] = stbi__compute_y(src[0],src[1],src[2]); 1014 dest[1] = 255; 1015 } 1016 } 1017 break; 1018 1019 case 4 * 8 + 1: 1020 { 1021 for(i = x - 1; i >= 0; --i, src += 4, dest += 1) 1022 { 1023 dest[0] = stbi__compute_y(src[0],src[1],src[2]); 1024 } 1025 } 1026 break; 1027 1028 case 4 * 8 + 2: 1029 { 1030 for(i = x - 1; i >= 0; --i, src += 4, dest += 2) 1031 { 1032 dest[0] = stbi__compute_y(src[0],src[1],src[2]); 1033 dest[1] = src[3]; 1034 } 1035 } 1036 break; 1037 case 4 * 8 + 3: 1038 { 1039 for(i = x - 1; i >= 0; --i, src += 4, dest += 3) 1040 { 1041 dest[0] = src[0]; 1042 dest[1] = src[1]; 1043 dest[2] = src[2]; 1044 } 1045 } 1046 break; 1047 default: 1048 assert(0); 1049 } 1050 } 1051 1052 STBI_FREE(data); 1053 return good; 1054 } 1055 1056 stbi__uint16 stbi__compute_y_16(int r, int g, int b) 1057 { 1058 return cast(stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); 1059 } 1060 1061 stbi__uint16* stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, uint x, uint y) 1062 { 1063 int i,j; 1064 stbi__uint16 *good; 1065 1066 if (req_comp == img_n) 1067 return data; 1068 assert(req_comp >= 1 && req_comp <= 4); 1069 1070 good = cast(stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); 1071 if (good == null) 1072 { 1073 STBI_FREE(data); 1074 return null; 1075 } 1076 1077 for (j = 0; j < cast(int) y; ++j) 1078 { 1079 stbi__uint16 *src = data + j * x * img_n ; 1080 stbi__uint16 *dest = good + j * x * req_comp; 1081 1082 // convert source image with img_n components to one with req_comp components; 1083 // avoid switch per pixel, so use switch per scanline and massive macros 1084 switch (img_n * 8 + req_comp) 1085 { 1086 case 1 * 8 + 2: 1087 { 1088 for(i = x - 1; i >= 0; --i, src += 1, dest += 2) 1089 { 1090 dest[0] = src[0]; 1091 dest[1] = 0xffff; 1092 } 1093 } 1094 break; 1095 case 1 * 8 + 3: 1096 { 1097 for(i = x - 1; i >= 0; --i, src += 1, dest += 3) 1098 { 1099 dest[0] = dest[1] = dest[2] = src[0]; 1100 } 1101 } 1102 break; 1103 case 1 * 8 + 4: 1104 for(i = x - 1; i >= 0; --i, src += 1, dest += 4) 1105 { 1106 dest[0] = dest[1] = dest[2] = src[0]; 1107 dest[3] = 0xffff; 1108 } 1109 break; 1110 case 2 * 8 + 1: 1111 { 1112 for(i = x - 1; i >= 0; --i, src += 2, dest += 1) 1113 { 1114 dest[0] = src[0]; 1115 } 1116 } 1117 break; 1118 case 2 * 8 + 3: 1119 { 1120 for(i = x - 1; i >= 0; --i, src += 2, dest += 3) 1121 { 1122 dest[0] = dest[1] = dest[2] = src[0]; 1123 } 1124 } 1125 break; 1126 case 2 * 8 + 4: 1127 { 1128 for(i = x - 1; i >= 0; --i, src += 2, dest += 4) 1129 { 1130 dest[0] = dest[1] = dest[2] = src[0]; 1131 dest[3] = src[1]; 1132 } 1133 } 1134 break; 1135 case 3 * 8 + 4: 1136 { 1137 for(i = x - 1; i >= 0; --i, src += 3, dest += 4) 1138 { 1139 dest[0] = src[0]; 1140 dest[1] = src[1]; 1141 dest[2] = src[2]; 1142 dest[3] = 0xffff; 1143 } 1144 } 1145 break; 1146 case 3 * 8 + 1: 1147 { 1148 for(i = x - 1; i >= 0; --i, src += 3, dest += 1) 1149 { 1150 dest[0] = stbi__compute_y_16(src[0],src[1],src[2]); 1151 } 1152 } 1153 break; 1154 case 3 * 8 + 2: 1155 { 1156 for(i = x - 1; i >= 0; --i, src += 3, dest += 2) 1157 { 1158 dest[0] = stbi__compute_y_16(src[0],src[1],src[2]); 1159 dest[1] = 0xffff; 1160 } 1161 } 1162 break; 1163 1164 case 4 * 8 + 1: 1165 { 1166 for(i = x - 1; i >= 0; --i, src += 4, dest += 1) 1167 { 1168 dest[0] = stbi__compute_y_16(src[0],src[1],src[2]); 1169 } 1170 } 1171 break; 1172 1173 case 4 * 8 + 2: 1174 { 1175 for(i = x - 1; i >= 0; --i, src += 4, dest += 2) 1176 { 1177 dest[0] = stbi__compute_y_16(src[0],src[1],src[2]); 1178 dest[1] = src[3]; 1179 } 1180 } 1181 break; 1182 case 4 * 8 + 3: 1183 { 1184 for(i = x - 1; i >= 0; --i, src += 4, dest += 3) 1185 { 1186 dest[0] = src[0]; 1187 dest[1] = src[1]; 1188 dest[2] = src[2]; 1189 } 1190 } 1191 break; 1192 default: 1193 assert(0); 1194 } 1195 } 1196 1197 STBI_FREE(data); 1198 return good; 1199 } 1200 1201 version(enableLinear) 1202 { 1203 float* stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) 1204 { 1205 int i,k,n; 1206 float *output; 1207 if (!data) return null; 1208 output = cast(float *) stbi__malloc_mad4(x, y, comp, float.sizeof, 0); 1209 if (output == null) 1210 { 1211 STBI_FREE(data); 1212 return null; 1213 } 1214 // compute number of non-alpha components 1215 if (comp & 1) 1216 n = comp; 1217 else 1218 n = comp - 1; 1219 for (i = 0; i < x*y; ++i) 1220 { 1221 for (k = 0; k < n; ++k) 1222 { 1223 output[i*comp + k] = cast(float) (pow(data[i*comp+k] / 255.0f, stbi__l2h_gamma) * stbi__l2h_scale); 1224 } 1225 } 1226 if (n < comp) 1227 { 1228 for (i=0; i < x*y; ++i) 1229 { 1230 output[i*comp + n] = data[i*comp + n] / 255.0f; 1231 } 1232 } 1233 STBI_FREE(data); 1234 return output; 1235 } 1236 } 1237 1238 int stbi__float2int(float x) 1239 { 1240 return cast(int)x; 1241 } 1242 1243 int stbi__bitreverse16(int n) 1244 { 1245 n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); 1246 n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); 1247 n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); 1248 n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); 1249 return n; 1250 } 1251 1252 int stbi__bit_reverse(int v, int bits) 1253 { 1254 assert(bits <= 16); 1255 // to bit reverse n bits, reverse 16 and shift 1256 // e.g. 11 bits, bit reverse and shift away 5 1257 return stbi__bitreverse16(v) >> (16-bits); 1258 } 1259 1260 version(decodePNG) 1261 { 1262 import gamut.codecs.miniz; 1263 1264 /// Params: 1265 /// buffer Input buffer 1266 /// len Length of input buffer 1267 /// initial_size Size hint for output buffer (which is realloc on growth) 1268 public ubyte *stbi_zlib_decode_malloc_guesssize_headerflag( 1269 const(char)*buffer, 1270 int len, 1271 int initial_size, // note: stb_image gives the right initial_size, and the right outout buffer length 1272 int *outlen, 1273 int parse_header) 1274 { 1275 ubyte* outBuf = cast(ubyte*) malloc(initial_size); 1276 if (outBuf == null) 1277 return null; 1278 1279 1280 c_ulong destLen = initial_size; 1281 while(true) 1282 { 1283 1284 c_ulong inputLen = len; 1285 bool trusted_input = true; // this allows to not check adler32, but I'm not sure how safe that is. #SECURITY 1286 1287 int res = mz_uncompress3(outBuf, 1288 &destLen, 1289 cast(const(ubyte)*) buffer, 1290 &inputLen, 1291 parse_header ? MZ_DEFAULT_WINDOW_BITS : -MZ_DEFAULT_WINDOW_BITS, 1292 trusted_input); 1293 1294 if (res == MZ_OK) 1295 break; 1296 1297 if (res == MZ_BUF_ERROR) 1298 { 1299 if (initial_size > 536_870_912) // That much bytes is suspicious in just a zlib chunk 1300 { 1301 free(outBuf); 1302 return null; 1303 } 1304 1305 initial_size = initial_size*2; 1306 if (initial_size < 32*1024) 1307 initial_size = 32*1024; 1308 1309 outBuf = cast(ubyte*) realloc(outBuf, initial_size); 1310 if (outBuf == null) 1311 return null; 1312 destLen = initial_size; 1313 } 1314 else 1315 { 1316 free(outBuf); 1317 return null; 1318 } 1319 } 1320 *outlen = cast(int)(destLen); 1321 return outBuf; 1322 } 1323 } 1324 1325 // public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 1326 // simple implementation 1327 // - only 8-bit samples 1328 // - no CRC checking 1329 // - allocates lots of intermediate memory 1330 // - avoids problem of streaming data between subsystems 1331 // - avoids explicit window management 1332 // performance 1333 // - uses stb_zlib, a PD zlib implementation with fast huffman decoding 1334 1335 version(decodePNG) 1336 { 1337 struct stbi__pngchunk 1338 { 1339 stbi__uint32 length; 1340 stbi__uint32 type; 1341 } 1342 1343 stbi__pngchunk stbi__get_chunk_header(stbi__context *s) 1344 { 1345 stbi__pngchunk c; 1346 c.length = stbi__get32be(s); 1347 c.type = stbi__get32be(s); 1348 return c; 1349 } 1350 1351 int stbi__check_png_header(stbi__context *s) 1352 { 1353 static immutable stbi_uc[8] png_sig = [ 137,80,78,71,13,10,26,10 ]; 1354 int i; 1355 for (i=0; i < 8; ++i) 1356 if (stbi__get8(s) != png_sig[i]) 1357 return 0; //stbi__err("bad png sig","Not a PNG"); 1358 return 1; 1359 } 1360 1361 struct stbi__png 1362 { 1363 stbi__context *s; 1364 stbi_uc* idata; 1365 stbi_uc* expanded; 1366 stbi_uc* out_; 1367 int depth; 1368 } 1369 1370 enum 1371 { 1372 STBI__F_none=0, 1373 STBI__F_sub=1, 1374 STBI__F_up=2, 1375 STBI__F_avg=3, 1376 STBI__F_paeth=4, 1377 // synthetic filters used for first scanline to avoid needing a dummy row of 0s 1378 STBI__F_avg_first, 1379 STBI__F_paeth_first 1380 } 1381 1382 static immutable stbi_uc[5] first_row_filter = 1383 [ 1384 STBI__F_none, 1385 STBI__F_sub, 1386 STBI__F_none, 1387 STBI__F_avg_first, 1388 STBI__F_paeth_first 1389 ]; 1390 1391 int stbi__paeth(int a, int b, int c) 1392 { 1393 int p = a + b - c; 1394 int pa = abs(p-a); 1395 int pb = abs(p-b); 1396 int pc = abs(p-c); 1397 if (pa <= pb && pa <= pc) 1398 return a; 1399 if (pb <= pc) 1400 return b; 1401 return c; 1402 } 1403 1404 static immutable stbi_uc[9] stbi__depth_scale_table = [ 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 ]; 1405 1406 // create the png data from post-deflated data 1407 int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) 1408 { 1409 int bytes = (depth == 16? 2 : 1); 1410 stbi__context *s = a.s; 1411 stbi__uint32 i,j,stride = x*out_n*bytes; 1412 stbi__uint32 img_len, img_width_bytes; 1413 int k; 1414 int img_n = s.img_n; // copy it into a local for later 1415 1416 int output_bytes = out_n*bytes; 1417 int filter_bytes = img_n*bytes; 1418 int width = x; 1419 1420 assert(out_n == s.img_n || out_n == s.img_n+1); 1421 a.out_ = cast(stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into 1422 if (!a.out_) return 0; //stbi__err("outofmem", "Out of memory"); 1423 1424 if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return 0; //stbi__err("too large", "Corrupt PNG"); 1425 img_width_bytes = (((img_n * x * depth) + 7) >> 3); 1426 img_len = (img_width_bytes + 1) * y; 1427 1428 // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, 1429 // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), 1430 // so just check for raw_len < img_len always. 1431 if (raw_len < img_len) return 0; //stbi__err("not enough pixels","Corrupt PNG"); 1432 1433 for (j=0; j < y; ++j) 1434 { 1435 stbi_uc *cur = a.out_ + stride*j; 1436 stbi_uc *prior; 1437 int filter = *raw++; 1438 1439 if (filter > 4) 1440 return 0; //stbi__err("invalid filter","Corrupt PNG"); 1441 1442 if (depth < 8) { 1443 if (img_width_bytes > x) return 0; //stbi__err("invalid width","Corrupt PNG"); 1444 cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place 1445 filter_bytes = 1; 1446 width = img_width_bytes; 1447 } 1448 prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above 1449 1450 // if first row, use special filter that doesn't sample previous row 1451 if (j == 0) filter = first_row_filter[filter]; 1452 1453 // handle first byte explicitly 1454 for (k=0; k < filter_bytes; ++k) 1455 { 1456 switch (filter) { 1457 case STBI__F_none : cur[k] = raw[k]; break; 1458 case STBI__F_sub : cur[k] = raw[k]; break; 1459 case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; 1460 case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; 1461 case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; 1462 case STBI__F_avg_first : cur[k] = raw[k]; break; 1463 case STBI__F_paeth_first: cur[k] = raw[k]; break; 1464 default: assert(false); 1465 } 1466 } 1467 1468 if (depth == 8) { 1469 if (img_n != out_n) 1470 cur[img_n] = 255; // first pixel 1471 raw += img_n; 1472 cur += out_n; 1473 prior += out_n; 1474 } else if (depth == 16) { 1475 if (img_n != out_n) { 1476 cur[filter_bytes] = 255; // first pixel top byte 1477 cur[filter_bytes+1] = 255; // first pixel bottom byte 1478 } 1479 raw += filter_bytes; 1480 cur += output_bytes; 1481 prior += output_bytes; 1482 } else { 1483 raw += 1; 1484 cur += 1; 1485 prior += 1; 1486 } 1487 1488 // this is a little gross, so that we don't switch per-pixel or per-component 1489 if (depth < 8 || img_n == out_n) { 1490 int nk = (width - 1)*filter_bytes; 1491 switch (filter) { 1492 // "none" filter turns into a memcpy here; make that explicit. 1493 case STBI__F_none: 1494 memcpy(cur, raw, nk); 1495 break; 1496 case STBI__F_sub: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; 1497 case STBI__F_up: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; 1498 case STBI__F_avg: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; 1499 case STBI__F_paeth: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; 1500 case STBI__F_avg_first: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; 1501 case STBI__F_paeth_first: for (k=0; k < nk; ++k) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; 1502 default: assert(0); 1503 } 1504 raw += nk; 1505 } else { 1506 assert(img_n+1 == out_n); 1507 switch (filter) { 1508 case STBI__F_none: 1509 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1510 for (k=0; k < filter_bytes; ++k) 1511 { cur[k] = raw[k]; } break; 1512 case STBI__F_sub: 1513 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1514 for (k=0; k < filter_bytes; ++k) 1515 { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; 1516 case STBI__F_up: 1517 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1518 for (k=0; k < filter_bytes; ++k) 1519 { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; 1520 case STBI__F_avg: 1521 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1522 for (k=0; k < filter_bytes; ++k) 1523 { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; 1524 case STBI__F_paeth: 1525 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1526 for (k=0; k < filter_bytes; ++k) 1527 { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; 1528 case STBI__F_avg_first: 1529 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1530 for (k=0; k < filter_bytes; ++k) 1531 { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; 1532 case STBI__F_paeth_first: 1533 for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) 1534 for (k=0; k < filter_bytes; ++k) 1535 { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; 1536 default: assert(0); 1537 } 1538 1539 // the loop above sets the high byte of the pixels' alpha, but for 1540 // 16 bit png files we also need the low byte set. we'll do that here. 1541 if (depth == 16) { 1542 cur = a.out_ + stride*j; // start at the beginning of the row again 1543 for (i=0; i < x; ++i,cur+=output_bytes) { 1544 cur[filter_bytes+1] = 255; 1545 } 1546 } 1547 } 1548 } 1549 1550 // we make a separate pass to expand bits to pixels; for performance, 1551 // this could run two scanlines behind the above code, so it won't 1552 // intefere with filtering but will still be in the cache. 1553 if (depth < 8) { 1554 for (j=0; j < y; ++j) { 1555 stbi_uc *cur = a.out_ + stride*j; 1556 stbi_uc *in_ = a.out_ + stride*j + x*out_n - img_width_bytes; 1557 // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit 1558 // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop 1559 stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range 1560 1561 // note that the final byte might overshoot and write more data than desired. 1562 // we can allocate enough data that this never writes out of memory, but it 1563 // could also overwrite the next scanline. can it overwrite non-empty data 1564 // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. 1565 // so we need to explicitly clamp the final ones 1566 1567 if (depth == 4) { 1568 for (k=x*img_n; k >= 2; k-=2, ++in_) { 1569 *cur++ = cast(ubyte)(scale * ((*in_ >> 4)) ); 1570 *cur++ = cast(ubyte)(scale * ((*in_ ) & 0x0f)); 1571 } 1572 if (k > 0) *cur++ = cast(ubyte)(scale * ((*in_ >> 4) )); 1573 } else if (depth == 2) { 1574 for (k=x*img_n; k >= 4; k-=4, ++in_) { 1575 *cur++ = cast(ubyte)(scale * ((*in_ >> 6) )); 1576 *cur++ = cast(ubyte)(scale * ((*in_ >> 4) & 0x03)); 1577 *cur++ = cast(ubyte)(scale * ((*in_ >> 2) & 0x03)); 1578 *cur++ = cast(ubyte)(scale * ((*in_ ) & 0x03)); 1579 } 1580 if (k > 0) *cur++ = cast(ubyte)(scale * ((*in_ >> 6) )); 1581 if (k > 1) *cur++ = cast(ubyte)(scale * ((*in_ >> 4) & 0x03)); 1582 if (k > 2) *cur++ = cast(ubyte)(scale * ((*in_ >> 2) & 0x03)); 1583 } else if (depth == 1) { 1584 for (k=x*img_n; k >= 8; k-=8, ++in_) { 1585 *cur++ = cast(ubyte)(scale * ((*in_ >> 7) )); 1586 *cur++ = cast(ubyte)(scale * ((*in_ >> 6) & 0x01)); 1587 *cur++ = cast(ubyte)(scale * ((*in_ >> 5) & 0x01)); 1588 *cur++ = cast(ubyte)(scale * ((*in_ >> 4) & 0x01)); 1589 *cur++ = cast(ubyte)(scale * ((*in_ >> 3) & 0x01)); 1590 *cur++ = cast(ubyte)(scale * ((*in_ >> 2) & 0x01)); 1591 *cur++ = cast(ubyte)(scale * ((*in_ >> 1) & 0x01)); 1592 *cur++ = cast(ubyte)(scale * ((*in_ ) & 0x01)); 1593 } 1594 if (k > 0) *cur++ = cast(ubyte)(scale * ((*in_ >> 7) )); 1595 if (k > 1) *cur++ = cast(ubyte)(scale * ((*in_ >> 6) & 0x01)); 1596 if (k > 2) *cur++ = cast(ubyte)(scale * ((*in_ >> 5) & 0x01)); 1597 if (k > 3) *cur++ = cast(ubyte)(scale * ((*in_ >> 4) & 0x01)); 1598 if (k > 4) *cur++ = cast(ubyte)(scale * ((*in_ >> 3) & 0x01)); 1599 if (k > 5) *cur++ = cast(ubyte)(scale * ((*in_ >> 2) & 0x01)); 1600 if (k > 6) *cur++ = cast(ubyte)(scale * ((*in_ >> 1) & 0x01)); 1601 } 1602 if (img_n != out_n) { 1603 int q; 1604 // insert alpha = 255 1605 cur = a.out_ + stride*j; 1606 if (img_n == 1) { 1607 for (q=x-1; q >= 0; --q) { 1608 cur[q*2+1] = 255; 1609 cur[q*2+0] = cur[q]; 1610 } 1611 } else { 1612 assert(img_n == 3); 1613 for (q=x-1; q >= 0; --q) { 1614 cur[q*4+3] = 255; 1615 cur[q*4+2] = cur[q*3+2]; 1616 cur[q*4+1] = cur[q*3+1]; 1617 cur[q*4+0] = cur[q*3+0]; 1618 } 1619 } 1620 } 1621 } 1622 } else if (depth == 16) { 1623 // force the image data from big-endian to platform-native. 1624 // this is done in a separate pass due to the decoding relying 1625 // on the data being untouched, but could probably be done 1626 // per-line during decode if care is taken. 1627 stbi_uc *cur = a.out_; 1628 stbi__uint16 *cur16 = cast(stbi__uint16*)cur; 1629 1630 for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { 1631 *cur16 = (cur[0] << 8) | cur[1]; 1632 } 1633 } 1634 1635 return 1; 1636 } 1637 1638 int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) 1639 { 1640 int bytes = (depth == 16 ? 2 : 1); 1641 int out_bytes = out_n * bytes; 1642 stbi_uc *final_; 1643 int p; 1644 if (!interlaced) 1645 return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a.s.img_x, a.s.img_y, depth, color); 1646 1647 // de-interlacing 1648 final_ = cast(stbi_uc *) stbi__malloc_mad3(a.s.img_x, a.s.img_y, out_bytes, 0); 1649 if (!final_) return 0; //stbi__err("outofmem", "Out of memory"); 1650 for (p=0; p < 7; ++p) { 1651 static immutable int[7] xorig = [ 0,4,0,2,0,1,0 ]; 1652 static immutable int[7] yorig = [ 0,0,4,0,2,0,1 ]; 1653 static immutable int[7] xspc = [ 8,8,4,4,2,2,1 ]; 1654 static immutable int[7] yspc = [ 8,8,8,4,4,2,2 ]; 1655 int i,j,x,y; 1656 // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 1657 x = (a.s.img_x - xorig[p] + xspc[p]-1) / xspc[p]; 1658 y = (a.s.img_y - yorig[p] + yspc[p]-1) / yspc[p]; 1659 if (x && y) { 1660 stbi__uint32 img_len = ((((a.s.img_n * x * depth) + 7) >> 3) + 1) * y; 1661 if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { 1662 STBI_FREE(final_); 1663 return 0; 1664 } 1665 for (j=0; j < y; ++j) { 1666 for (i=0; i < x; ++i) { 1667 int out_y = j*yspc[p]+yorig[p]; 1668 int out_x = i*xspc[p]+xorig[p]; 1669 memcpy(final_ + out_y*a.s.img_x*out_bytes + out_x*out_bytes, 1670 a.out_ + (j*x+i)*out_bytes, out_bytes); 1671 } 1672 } 1673 STBI_FREE(a.out_); 1674 image_data += img_len; 1675 image_data_len -= img_len; 1676 } 1677 } 1678 a.out_ = final_; 1679 1680 return 1; 1681 } 1682 1683 int stbi__compute_transparency(stbi__png *z, stbi_uc* tc, int out_n) 1684 { 1685 stbi__context *s = z.s; 1686 stbi__uint32 i, pixel_count = s.img_x * s.img_y; 1687 stbi_uc *p = z.out_; 1688 1689 // compute color-based transparency, assuming we've 1690 // already got 255 as the alpha value in the output 1691 assert(out_n == 2 || out_n == 4); 1692 1693 if (out_n == 2) { 1694 for (i=0; i < pixel_count; ++i) { 1695 p[1] = (p[0] == tc[0] ? 0 : 255); 1696 p += 2; 1697 } 1698 } else { 1699 for (i=0; i < pixel_count; ++i) { 1700 if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) 1701 p[3] = 0; 1702 p += 4; 1703 } 1704 } 1705 return 1; 1706 } 1707 1708 int stbi__compute_transparency16(stbi__png *z, stbi__uint16* tc, int out_n) 1709 { 1710 stbi__context *s = z.s; 1711 stbi__uint32 i, pixel_count = s.img_x * s.img_y; 1712 stbi__uint16 *p = cast(stbi__uint16*) z.out_; 1713 1714 // compute color-based transparency, assuming we've 1715 // already got 65535 as the alpha value in the output 1716 assert(out_n == 2 || out_n == 4); 1717 1718 if (out_n == 2) { 1719 for (i = 0; i < pixel_count; ++i) { 1720 p[1] = (p[0] == tc[0] ? 0 : 65535); 1721 p += 2; 1722 } 1723 } else { 1724 for (i = 0; i < pixel_count; ++i) { 1725 if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) 1726 p[3] = 0; 1727 p += 4; 1728 } 1729 } 1730 return 1; 1731 } 1732 1733 int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) 1734 { 1735 stbi__uint32 i, pixel_count = a.s.img_x * a.s.img_y; 1736 stbi_uc* p, temp_out, orig = a.out_; 1737 1738 p = cast(stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); 1739 if (p == null) return 0; //stbi__err("outofmem", "Out of memory"); 1740 1741 // between here and free(out) below, exitting would leak 1742 temp_out = p; 1743 1744 if (pal_img_n == 3) { 1745 for (i=0; i < pixel_count; ++i) { 1746 int n = orig[i]*4; 1747 p[0] = palette[n ]; 1748 p[1] = palette[n+1]; 1749 p[2] = palette[n+2]; 1750 p += 3; 1751 } 1752 } else { 1753 for (i=0; i < pixel_count; ++i) { 1754 int n = orig[i]*4; 1755 p[0] = palette[n ]; 1756 p[1] = palette[n+1]; 1757 p[2] = palette[n+2]; 1758 p[3] = palette[n+3]; 1759 p += 4; 1760 } 1761 } 1762 STBI_FREE(a.out_); 1763 a.out_ = temp_out; 1764 1765 return 1; 1766 } 1767 1768 enum stbi__unpremultiply_on_load = 1; 1769 1770 uint STBI__PNG_TYPE(char a, char b, char c, char d) 1771 { 1772 return ( (cast(uint)a) << 24 ) 1773 + ( (cast(uint)b) << 16 ) 1774 + ( (cast(uint)c) << 8 ) 1775 + ( (cast(uint)d) << 0 ); 1776 } 1777 1778 int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) 1779 { 1780 stbi_uc[1024] palette; 1781 stbi_uc pal_img_n=0; 1782 stbi_uc has_trans = 0; 1783 stbi_uc[3] tc = [0, 0, 0]; 1784 stbi__uint16[3] tc16; 1785 stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; 1786 int first=1,k,interlace=0, color=0, is_iphone=0; 1787 stbi__context *s = z.s; 1788 1789 z.expanded = null; 1790 z.idata = null; 1791 z.out_ = null; 1792 1793 s.ppmX = -1; 1794 s.ppmY = -1; 1795 s.pixelAspectRatio = -1; 1796 1797 if (!stbi__check_png_header(s)) return 0; 1798 1799 if (scan == STBI__SCAN_type) return 1; 1800 1801 for (;;) { 1802 stbi__pngchunk c = stbi__get_chunk_header(s); 1803 uint aaaa = c.type; 1804 switch (c.type) { 1805 case STBI__PNG_TYPE('C','g','B','I'): 1806 is_iphone = 1; 1807 stbi__skip(s, c.length); 1808 break; 1809 1810 case STBI__PNG_TYPE('p','H','Y','s'): 1811 s.ppmX = stbi__get32be(s); 1812 s.ppmY = stbi__get32be(s); 1813 s.pixelAspectRatio = s.ppmX / s.ppmY; 1814 ubyte unit = stbi__get8(s); 1815 if (unit != 1) 1816 { 1817 s.ppmX = -1; // only contains an aspect ratio, but no physical resolution 1818 s.ppmY = -1; 1819 } 1820 break; 1821 1822 case STBI__PNG_TYPE('I','H','D','R'): { 1823 int comp,filter; 1824 if (!first) return 0; //stbi__err("multiple IHDR","Corrupt PNG"); 1825 first = 0; 1826 if (c.length != 13) return 0; //stbi__err("bad IHDR len","Corrupt PNG"); 1827 s.img_x = stbi__get32be(s); 1828 s.img_y = stbi__get32be(s); 1829 if (s.img_y > STBI_MAX_DIMENSIONS) return 0; //stbi__err("too large","Very large image (corrupt?)"); 1830 if (s.img_x > STBI_MAX_DIMENSIONS) return 0; //stbi__err("too large","Very large image (corrupt?)"); 1831 z.depth = stbi__get8(s); if (z.depth != 1 && z.depth != 2 && z.depth != 4 && z.depth != 8 && z.depth != 16) return 0; //stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); 1832 color = stbi__get8(s); if (color > 6) return 0; //stbi__err("bad ctype","Corrupt PNG"); 1833 if (color == 3 && z.depth == 16) return 0; //stbi__err("bad ctype","Corrupt PNG"); 1834 if (color == 3) pal_img_n = 3; else if (color & 1) return 0; //stbi__err("bad ctype","Corrupt PNG"); 1835 comp = stbi__get8(s); if (comp) return 0; //stbi__err("bad comp method","Corrupt PNG"); 1836 filter= stbi__get8(s); if (filter) return 0; //stbi__err("bad filter method","Corrupt PNG"); 1837 interlace = stbi__get8(s); if (interlace>1) return 0; //stbi__err("bad interlace method","Corrupt PNG"); 1838 if (!s.img_x || !s.img_y) return 0; //stbi__err("0-pixel image","Corrupt PNG"); 1839 if (!pal_img_n) { 1840 s.img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); 1841 if ((1 << 30) / s.img_x / s.img_n < s.img_y) return 0; //stbi__err("too large", "Image too large to decode"); 1842 if (scan == STBI__SCAN_header) return 1; 1843 } else { 1844 // if paletted, then pal_n is our final components, and 1845 // img_n is # components to decompress/filter. 1846 s.img_n = 1; 1847 if ((1 << 30) / s.img_x / 4 < s.img_y) return 0; //stbi__err("too large","Corrupt PNG"); 1848 // if SCAN_header, have to scan to see if we have a tRNS 1849 } 1850 break; 1851 } 1852 1853 case STBI__PNG_TYPE('P','L','T','E'): { 1854 if (first) return 0; //stbi__err("first not IHDR", "Corrupt PNG"); 1855 if (c.length > 256*3) return 0; //stbi__err("invalid PLTE","Corrupt PNG"); 1856 pal_len = c.length / 3; 1857 if (pal_len * 3 != c.length) return 0; //stbi__err("invalid PLTE","Corrupt PNG"); 1858 for (i=0; i < pal_len; ++i) { 1859 palette[i*4+0] = stbi__get8(s); 1860 palette[i*4+1] = stbi__get8(s); 1861 palette[i*4+2] = stbi__get8(s); 1862 palette[i*4+3] = 255; 1863 } 1864 break; 1865 } 1866 1867 case STBI__PNG_TYPE('t','R','N','S'): { 1868 if (first) return 0; //stbi__err("first not IHDR", "Corrupt PNG"); 1869 if (z.idata) return 0; //stbi__err("tRNS after IDAT","Corrupt PNG"); 1870 if (pal_img_n) { 1871 if (scan == STBI__SCAN_header) { s.img_n = 4; return 1; } 1872 if (pal_len == 0) return 0; //stbi__err("tRNS before PLTE","Corrupt PNG"); 1873 if (c.length > pal_len) return 0; //stbi__err("bad tRNS len","Corrupt PNG"); 1874 pal_img_n = 4; 1875 for (i=0; i < c.length; ++i) 1876 palette[i*4+3] = stbi__get8(s); 1877 } else { 1878 if (!(s.img_n & 1)) return 0; //stbi__err("tRNS with alpha","Corrupt PNG"); 1879 if (c.length != cast(stbi__uint32) s.img_n*2) return 0; //stbi__err("bad tRNS len","Corrupt PNG"); 1880 has_trans = 1; 1881 if (z.depth == 16) { 1882 for (k = 0; k < s.img_n; ++k) tc16[k] = cast(stbi__uint16)stbi__get16be(s); // copy the values as-is 1883 } else { 1884 for (k = 0; k < s.img_n; ++k) 1885 { 1886 tc[k] = cast(ubyte)( cast(stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z.depth]); // non 8-bit images will be larger 1887 } 1888 } 1889 } 1890 break; 1891 } 1892 1893 case STBI__PNG_TYPE('I','D','A','T'): { 1894 if (first) 1895 { 1896 return 0; //stbi__err("first not IHDR", "Corrupt PNG"); 1897 } 1898 if (pal_img_n && !pal_len) 1899 { 1900 return 0; //stbi__err("no PLTE","Corrupt PNG"); 1901 } 1902 if (scan == STBI__SCAN_header) 1903 { 1904 s.img_n = pal_img_n; 1905 return 1; 1906 } 1907 if (cast(int)(ioff + c.length) < cast(int)ioff) 1908 { 1909 return 0; 1910 } 1911 if (ioff + c.length > idata_limit) { 1912 stbi__uint32 idata_limit_old = idata_limit; 1913 stbi_uc *p; 1914 if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; 1915 while (ioff + c.length > idata_limit) 1916 idata_limit *= 2; 1917 p = cast(stbi_uc *) STBI_REALLOC_SIZED(z.idata, idata_limit_old, idata_limit); 1918 if (p == null) 1919 { 1920 return 0; //stbi__err("outofmem", "Out of memory"); 1921 } 1922 z.idata = p; 1923 } 1924 if (!stbi__getn(s, z.idata+ioff,c.length)) 1925 { 1926 return 0; //stbi__err("outofdata","Corrupt PNG"); 1927 } 1928 ioff += c.length; 1929 break; 1930 } 1931 1932 case STBI__PNG_TYPE('I','E','N','D'): { 1933 stbi__uint32 raw_len, bpl; 1934 if (first) return 0; //stbi__err("first not IHDR", "Corrupt PNG"); 1935 if (scan != STBI__SCAN_load) return 1; 1936 if (z.idata == null) 1937 { 1938 return 0; //stbi__err("no IDAT","Corrupt PNG"); 1939 } 1940 // initial guess for decoded data size to avoid unnecessary reallocs 1941 bpl = (s.img_x * z.depth + 7) / 8; // bytes per line, per component 1942 raw_len = bpl * s.img_y * s.img_n /* pixels */ + s.img_y /* filter mode per row */; 1943 z.expanded = cast(stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag(cast(char *) z.idata, 1944 ioff, 1945 raw_len, 1946 cast(int *) &raw_len, 1947 !is_iphone); 1948 if (z.expanded == null) 1949 { 1950 return 0; // zlib should set error 1951 } 1952 STBI_FREE(z.idata); z.idata = null; 1953 if ((req_comp == s.img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) 1954 s.img_out_n = s.img_n+1; 1955 else 1956 s.img_out_n = s.img_n; 1957 if (!stbi__create_png_image(z, z.expanded, raw_len, s.img_out_n, z.depth, color, interlace)) 1958 { 1959 return 0; 1960 } 1961 if (has_trans) { 1962 if (z.depth == 16) { 1963 if (!stbi__compute_transparency16(z, tc16.ptr, s.img_out_n)) 1964 { 1965 return 0; 1966 } 1967 } else { 1968 if (!stbi__compute_transparency(z, tc.ptr, s.img_out_n)) 1969 { 1970 return 0; 1971 } 1972 } 1973 } 1974 1975 if (pal_img_n) { 1976 // pal_img_n == 3 or 4 1977 s.img_n = pal_img_n; // record the actual colors we had 1978 s.img_out_n = pal_img_n; 1979 if (req_comp >= 3) s.img_out_n = req_comp; 1980 if (!stbi__expand_png_palette(z, palette.ptr, pal_len, s.img_out_n)) 1981 { 1982 return 0; 1983 } 1984 } else if (has_trans) { 1985 // non-paletted image with tRNS . source image has (constant) alpha 1986 ++s.img_n; 1987 } 1988 STBI_FREE(z.expanded); z.expanded = null; 1989 // end of PNG chunk, read and skip CRC 1990 stbi__get32be(s); 1991 return 1; 1992 } 1993 1994 default: 1995 // if critical, fail 1996 if (first) 1997 { 1998 return 0; //stbi__err("first not IHDR", "Corrupt PNG"); 1999 } 2000 if ((c.type & (1 << 29)) == 0) 2001 { 2002 return 0; //stbi__err("invalid_chunk", "PNG not supported: unknown PNG chunk type"); 2003 } 2004 stbi__skip(s, c.length); 2005 break; 2006 } 2007 // end of PNG chunk, read and skip CRC 2008 stbi__get32be(s); 2009 } 2010 } 2011 2012 void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) 2013 { 2014 void *result=null; 2015 if (req_comp < 0 || req_comp > 4) return null; //stbi__errpuc("bad req_comp", "Internal error"); 2016 if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { 2017 if (p.depth <= 8) 2018 ri.bits_per_channel = 8; 2019 else if (p.depth == 16) 2020 ri.bits_per_channel = 16; 2021 else 2022 return null; //stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); 2023 result = p.out_; 2024 p.out_ = null; 2025 if (req_comp && req_comp != p.s.img_out_n) { 2026 if (ri.bits_per_channel == 8) 2027 result = stbi__convert_format(cast(ubyte*) result, p.s.img_out_n, req_comp, p.s.img_x, p.s.img_y); 2028 else 2029 result = stbi__convert_format16(cast(stbi__uint16 *) result, p.s.img_out_n, req_comp, p.s.img_x, p.s.img_y); 2030 p.s.img_out_n = req_comp; 2031 if (result == null) return result; 2032 } 2033 *x = p.s.img_x; 2034 *y = p.s.img_y; 2035 if (n) *n = p.s.img_n; 2036 } 2037 STBI_FREE(p.out_); p.out_ = null; 2038 STBI_FREE(p.expanded); p.expanded = null; 2039 STBI_FREE(p.idata); p.idata = null; 2040 2041 return result; 2042 } 2043 2044 void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) 2045 { 2046 stbi__png p; 2047 p.s = s; 2048 return stbi__do_png(&p, x,y,comp,req_comp, ri); 2049 } 2050 2051 int stbi__png_test(stbi__context *s) 2052 { 2053 int r; 2054 r = stbi__check_png_header(s); 2055 stbi__rewind(s); 2056 return r; 2057 } 2058 2059 int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) 2060 { 2061 if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { 2062 stbi__rewind( p.s ); 2063 return 0; 2064 } 2065 if (x) *x = p.s.img_x; 2066 if (y) *y = p.s.img_y; 2067 if (comp) *comp = p.s.img_n; 2068 return 1; 2069 } 2070 2071 int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) 2072 { 2073 stbi__png p; 2074 p.s = s; 2075 return stbi__png_info_raw(&p, x, y, comp); 2076 } 2077 2078 int stbi__png_is16(stbi__context *s) 2079 { 2080 stbi__png p; 2081 p.s = s; 2082 if (!stbi__png_info_raw(&p, null, null, null)) 2083 return 0; 2084 if (p.depth != 16) { 2085 stbi__rewind(p.s); 2086 return 0; 2087 } 2088 return 1; 2089 } 2090 2091 bool stbi__png_is16(stbi_io_callbacks* clbk, void* user) // #BONUS 2092 { 2093 stbi__context s; 2094 stbi__start_callbacks(&s, clbk, user); 2095 return stbi__png_is16(&s) != 0; 2096 } 2097 } 2098 2099 version(decodeBMP) 2100 { 2101 struct stbi__bmp_data 2102 { 2103 int bpp, offset, hsz; 2104 uint mr,mg,mb,ma, all_a; 2105 int extra_read; 2106 } 2107 2108 int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) 2109 { 2110 // BI_BITFIELDS specifies masks explicitly, don't override 2111 if (compress == 3) 2112 return 1; 2113 2114 if (compress == 0) { 2115 if (info.bpp == 16) { 2116 info.mr = 31u << 10; 2117 info.mg = 31u << 5; 2118 info.mb = 31u << 0; 2119 } else if (info.bpp == 32) { 2120 info.mr = 0xffu << 16; 2121 info.mg = 0xffu << 8; 2122 info.mb = 0xffu << 0; 2123 info.ma = 0xffu << 24; 2124 info.all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 2125 } else { 2126 // otherwise, use defaults, which is all-0 2127 info.mr = info.mg = info.mb = info.ma = 0; 2128 } 2129 return 1; 2130 } 2131 return 0; // error 2132 } 2133 2134 void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) 2135 { 2136 int hsz; 2137 if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return null; 2138 stbi__get32le(s); // discard filesize 2139 stbi__get16le(s); // discard reserved 2140 stbi__get16le(s); // discard reserved 2141 info.offset = stbi__get32le(s); 2142 info.hsz = hsz = stbi__get32le(s); 2143 info.mr = info.mg = info.mb = info.ma = 0; 2144 info.extra_read = 14; 2145 2146 s.ppmX = -1; 2147 s.ppmY = -1; 2148 s.pixelAspectRatio = -1; 2149 2150 if (info.offset < 0) return null; 2151 2152 if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return null; 2153 if (hsz == 12) { 2154 s.img_x = stbi__get16le(s); 2155 s.img_y = stbi__get16le(s); 2156 } else { 2157 s.img_x = stbi__get32le(s); 2158 s.img_y = stbi__get32le(s); 2159 } 2160 if (stbi__get16le(s) != 1) return null; 2161 info.bpp = stbi__get16le(s); 2162 if (hsz != 12) { 2163 int compress = stbi__get32le(s); 2164 if (compress == 1 || compress == 2) return null; 2165 if (compress >= 4) return null; // this includes PNG/JPEG modes 2166 if (compress == 3 && info.bpp != 16 && info.bpp != 32) return null; // bitfields requires 16 or 32 bits/pixel 2167 stbi__get32le(s); // discard sizeof 2168 2169 int biXPelsPerMeter = stbi__get32le(s); // discard hres 2170 int biYPelsPerMeter = stbi__get32le(s); // discard vres 2171 if (biXPelsPerMeter > 1) s.ppmX = biXPelsPerMeter; // do not consider 0, 1, or negative values 2172 if (biYPelsPerMeter > 1) s.ppmY = biYPelsPerMeter; // do not consider 0, 1, or negative values 2173 if (s.ppmX != -1 && s.ppmY != -1) 2174 { 2175 s.pixelAspectRatio = s.ppmX / s.ppmY; 2176 } 2177 2178 stbi__get32le(s); // discard colorsused 2179 stbi__get32le(s); // discard max important 2180 if (hsz == 40 || hsz == 56) { 2181 if (hsz == 56) { 2182 stbi__get32le(s); 2183 stbi__get32le(s); 2184 stbi__get32le(s); 2185 stbi__get32le(s); 2186 } 2187 if (info.bpp == 16 || info.bpp == 32) { 2188 if (compress == 0) { 2189 stbi__bmp_set_mask_defaults(info, compress); 2190 } else if (compress == 3) { 2191 info.mr = stbi__get32le(s); 2192 info.mg = stbi__get32le(s); 2193 info.mb = stbi__get32le(s); 2194 info.extra_read += 12; 2195 // not documented, but generated by photoshop and handled by mspaint 2196 if (info.mr == info.mg && info.mg == info.mb) { 2197 // ?!?!? 2198 return null; 2199 } 2200 } else 2201 return null; 2202 } 2203 } else { 2204 // V4/V5 header 2205 int i; 2206 if (hsz != 108 && hsz != 124) 2207 return null; 2208 info.mr = stbi__get32le(s); 2209 info.mg = stbi__get32le(s); 2210 info.mb = stbi__get32le(s); 2211 info.ma = stbi__get32le(s); 2212 if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs 2213 stbi__bmp_set_mask_defaults(info, compress); 2214 stbi__get32le(s); // discard color space 2215 for (i=0; i < 12; ++i) 2216 stbi__get32le(s); // discard color space parameters 2217 if (hsz == 124) { 2218 stbi__get32le(s); // discard rendering intent 2219 stbi__get32le(s); // discard offset of profile data 2220 stbi__get32le(s); // discard size of profile data 2221 stbi__get32le(s); // discard reserved 2222 } 2223 } 2224 } 2225 return cast(void *) 1; 2226 } 2227 2228 int stbi__bmp_test_raw(stbi__context *s) 2229 { 2230 int r; 2231 int sz; 2232 if (stbi__get8(s) != 'B') return 0; 2233 if (stbi__get8(s) != 'M') return 0; 2234 stbi__get32le(s); // discard filesize 2235 stbi__get16le(s); // discard reserved 2236 stbi__get16le(s); // discard reserved 2237 stbi__get32le(s); // discard data offset 2238 sz = stbi__get32le(s); 2239 r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); 2240 return r; 2241 } 2242 2243 int stbi__bmp_test(stbi__context *s) 2244 { 2245 int r = stbi__bmp_test_raw(s); 2246 stbi__rewind(s); 2247 return r; 2248 } 2249 2250 void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) 2251 { 2252 stbi_uc *out_; 2253 uint mr=0,mg=0,mb=0,ma=0, all_a; 2254 stbi_uc[4][256] pal; 2255 int psize=0,i,j,width; 2256 int flip_vertically, pad, target; 2257 stbi__bmp_data info; 2258 info.all_a = 255; 2259 if (stbi__bmp_parse_header(s, &info) == null) 2260 return null; // error code already set 2261 2262 flip_vertically = (cast(int) s.img_y) > 0; 2263 s.img_y = abs(cast(int) s.img_y); 2264 2265 if (s.img_y > STBI_MAX_DIMENSIONS) return null; 2266 if (s.img_x > STBI_MAX_DIMENSIONS) return null; 2267 2268 mr = info.mr; 2269 mg = info.mg; 2270 mb = info.mb; 2271 ma = info.ma; 2272 all_a = info.all_a; 2273 2274 if (info.hsz == 12) { 2275 if (info.bpp < 24) 2276 psize = (info.offset - info.extra_read - 24) / 3; 2277 } else { 2278 if (info.bpp < 16) 2279 psize = (info.offset - info.extra_read - info.hsz) >> 2; 2280 } 2281 if (psize == 0) { 2282 // accept some number of extra bytes after the header, but if the offset points either to before 2283 // the header ends or implies a large amount of extra data, reject the file as malformed 2284 int bytes_read_so_far = s.callback_already_read + cast(int)(s.img_buffer - s.img_buffer_original); 2285 int header_limit = 1024; // max we actually read is below 256 bytes currently. 2286 int extra_data_limit = 256*4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. 2287 if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) { 2288 return null; 2289 } 2290 // we established that bytes_read_so_far is positive and sensible. 2291 // the first half of this test rejects offsets that are either too small positives, or 2292 // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn 2293 // ensures the number computed in the second half of the test can't overflow. 2294 if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) { 2295 return null; 2296 } else { 2297 stbi__skip(s, info.offset - bytes_read_so_far); 2298 } 2299 } 2300 2301 if (info.bpp == 24 && ma == 0xff000000) 2302 s.img_n = 3; 2303 else 2304 s.img_n = ma ? 4 : 3; 2305 if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 2306 target = req_comp; 2307 else 2308 target = s.img_n; // if they want monochrome, we'll post-convert 2309 2310 // sanity-check size 2311 if (!stbi__mad3sizes_valid(target, s.img_x, s.img_y, 0)) 2312 return null; 2313 2314 out_ = cast(stbi_uc *) stbi__malloc_mad3(target, s.img_x, s.img_y, 0); 2315 if (!out_) return null; 2316 2317 if (info.bpp < 16) { 2318 int z=0; 2319 if (psize == 0 || psize > 256) { STBI_FREE(out_); return null; } 2320 for (i=0; i < psize; ++i) { 2321 pal[i][2] = stbi__get8(s); 2322 pal[i][1] = stbi__get8(s); 2323 pal[i][0] = stbi__get8(s); 2324 if (info.hsz != 12) stbi__get8(s); 2325 pal[i][3] = 255; 2326 } 2327 stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); 2328 if (info.bpp == 1) width = (s.img_x + 7) >> 3; 2329 else if (info.bpp == 4) width = (s.img_x + 1) >> 1; 2330 else if (info.bpp == 8) width = s.img_x; 2331 else { STBI_FREE(out_); return null; } 2332 pad = (-width)&3; 2333 if (info.bpp == 1) { 2334 for (j=0; j < cast(int) s.img_y; ++j) { 2335 int bit_offset = 7, v = stbi__get8(s); 2336 for (i=0; i < cast(int) s.img_x; ++i) { 2337 int color = (v>>bit_offset)&0x1; 2338 out_[z++] = pal[color][0]; 2339 out_[z++] = pal[color][1]; 2340 out_[z++] = pal[color][2]; 2341 if (target == 4) out_[z++] = 255; 2342 if (i+1 == cast(int) s.img_x) break; 2343 if((--bit_offset) < 0) { 2344 bit_offset = 7; 2345 v = stbi__get8(s); 2346 } 2347 } 2348 stbi__skip(s, pad); 2349 } 2350 } else { 2351 for (j=0; j < cast(int) s.img_y; ++j) { 2352 for (i=0; i < cast(int) s.img_x; i += 2) { 2353 int v=stbi__get8(s),v2=0; 2354 if (info.bpp == 4) { 2355 v2 = v & 15; 2356 v >>= 4; 2357 } 2358 out_[z++] = pal[v][0]; 2359 out_[z++] = pal[v][1]; 2360 out_[z++] = pal[v][2]; 2361 if (target == 4) out_[z++] = 255; 2362 if (i+1 == cast(int) s.img_x) break; 2363 v = (info.bpp == 8) ? stbi__get8(s) : v2; 2364 out_[z++] = pal[v][0]; 2365 out_[z++] = pal[v][1]; 2366 out_[z++] = pal[v][2]; 2367 if (target == 4) out_[z++] = 255; 2368 } 2369 stbi__skip(s, pad); 2370 } 2371 } 2372 } else { 2373 int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; 2374 int z = 0; 2375 int easy=0; 2376 stbi__skip(s, info.offset - info.extra_read - info.hsz); 2377 if (info.bpp == 24) width = 3 * s.img_x; 2378 else if (info.bpp == 16) width = 2*s.img_x; 2379 else /* bpp = 32 and pad = 0 */ width=0; 2380 pad = (-width) & 3; 2381 if (info.bpp == 24) { 2382 easy = 1; 2383 } else if (info.bpp == 32) { 2384 if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) 2385 easy = 2; 2386 } 2387 if (!easy) { 2388 if (!mr || !mg || !mb) { STBI_FREE(out_); return null; } 2389 // right shift amt to put high bit in position #7 2390 rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); 2391 gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); 2392 bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); 2393 ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); 2394 if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out_); return null; } 2395 } 2396 for (j=0; j < cast(int) s.img_y; ++j) { 2397 if (easy) { 2398 for (i=0; i < cast(int) s.img_x; ++i) { 2399 ubyte a; 2400 out_[z+2] = stbi__get8(s); 2401 out_[z+1] = stbi__get8(s); 2402 out_[z+0] = stbi__get8(s); 2403 z += 3; 2404 a = (easy == 2 ? stbi__get8(s) : 255); 2405 all_a |= a; 2406 if (target == 4) out_[z++] = a; 2407 } 2408 } else { 2409 int bpp = info.bpp; 2410 for (i=0; i < cast(int) s.img_x; ++i) { 2411 stbi__uint32 v = (bpp == 16 ? cast(stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); 2412 uint a; 2413 out_[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); 2414 out_[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); 2415 out_[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); 2416 a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); 2417 all_a |= a; 2418 if (target == 4) out_[z++] = STBI__BYTECAST(a); 2419 } 2420 } 2421 stbi__skip(s, pad); 2422 } 2423 } 2424 2425 // if alpha channel is all 0s, replace with all 255s 2426 if (target == 4 && all_a == 0) 2427 { 2428 for (i=4*s.img_x*s.img_y-1; i >= 0; i -= 4) 2429 out_[i] = 255; 2430 } 2431 2432 if (flip_vertically) { 2433 stbi_uc t; 2434 for (j=0; j < cast(int) s.img_y>>1; ++j) { 2435 stbi_uc *p1 = out_ + j *s.img_x*target; 2436 stbi_uc *p2 = out_ + (s.img_y-1-j)*s.img_x*target; 2437 for (i=0; i < cast(int) s.img_x*target; ++i) { 2438 t = p1[i]; p1[i] = p2[i]; p2[i] = t; 2439 } 2440 } 2441 } 2442 2443 if (req_comp && req_comp != target) { 2444 out_ = stbi__convert_format(out_, target, req_comp, s.img_x, s.img_y); 2445 if (out_ == null) return out_; // stbi__convert_format frees input on failure 2446 } 2447 2448 *x = s.img_x; 2449 *y = s.img_y; 2450 if (comp) *comp = s.img_n; 2451 return out_; 2452 } 2453 2454 // returns 0..31 for the highest set bit 2455 int stbi__high_bit(uint z) 2456 { 2457 int n=0; 2458 if (z == 0) return -1; 2459 if (z >= 0x10000) { n += 16; z >>= 16; } 2460 if (z >= 0x00100) { n += 8; z >>= 8; } 2461 if (z >= 0x00010) { n += 4; z >>= 4; } 2462 if (z >= 0x00004) { n += 2; z >>= 2; } 2463 if (z >= 0x00002) { n += 1;/* >>= 1;*/ } 2464 return n; 2465 } 2466 2467 int stbi__bitcount(uint a) 2468 { 2469 a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 2470 a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 2471 a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits 2472 a = (a + (a >> 8)); // max 16 per 8 bits 2473 a = (a + (a >> 16)); // max 32 per 8 bits 2474 return a & 0xff; 2475 } 2476 2477 // extract an arbitrarily-aligned N-bit value (N=bits) 2478 // from v, and then make it 8-bits long and fractionally 2479 // extend it to full full range. 2480 int stbi__shiftsigned(uint v, int shift, int bits) 2481 { 2482 static immutable uint[9] mul_table = [ 2483 0, 2484 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, 2485 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, 2486 ]; 2487 static immutable uint[9] shift_table = [ 2488 0, 0,0,1,0,2,4,6,0, 2489 ]; 2490 if (shift < 0) 2491 v <<= -shift; 2492 else 2493 v >>= shift; 2494 assert(v < 256); 2495 v >>= (8-bits); 2496 assert(bits >= 0 && bits <= 8); 2497 return cast(int) (cast(uint) v * mul_table[bits]) >> shift_table[bits]; 2498 } 2499 }