diff --git a/Kernel/include/stb/image.h b/Kernel/include/stb/image.h index 1f6fdd77..661525be 100644 --- a/Kernel/include/stb/image.h +++ b/Kernel/include/stb/image.h @@ -1,8 +1,8 @@ -/* stb_image - v2.28 - public domain image loader - http://nothings.org/stb - no warranty implied; use at your own risk +/* stb_image - v2.30 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk Do this: - #define STB_IMAGE_IMPLEMENTATION + #define STB_IMAGE_IMPLEMENTATION before you include this file in *one* C or C++ file to create the implementation. // i.e. it should look like this: @@ -17,27 +17,27 @@ QUICK NOTES: - Primarily of interest to game developers and other people who can - avoid problematic images and only need the trivial interface + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface - JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) - PNG 1/2/4/8/16-bit-per-channel + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel - TGA (not sure what subset, if a subset) - BMP non-1bpp, non-RLE - PSD (composited view only, no extra channels, 8/16 bit-per-channel) + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) - GIF (*comp always reports as 4-channel) - HDR (radiance rgbE format) - PIC (Softimage PIC) - PNM (PPM and PGM binary only) + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) - Animated GIF still needs a proper API, but here's one way to do it: - http://gist.github.com/urraka/685d9a6340b26b830d49 + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 - - decode from memory or through FILE (define STBI_NO_STDIO to remove code) - - decode from arbitrary I/O callbacks - - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) Full documentation under "DOCUMENTATION" below. @@ -48,29 +48,31 @@ LICENSE RECENT REVISION HISTORY: - 2.28 (2023-01-29) many error fixes, security errors, just tons of stuff - 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes - 2.26 (2020-07-13) many minor fixes - 2.25 (2020-02-02) fix warnings - 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically - 2.23 (2019-08-11) fix clang static analysis warning - 2.22 (2019-03-04) gif fixes, fix warnings - 2.21 (2019-02-25) fix typo in comment - 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs - 2.19 (2018-02-11) fix warning - 2.18 (2018-01-30) fix warnings - 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings - 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes - 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC - 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs - 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes - 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes - 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 - RGB-format JPEG; remove white matting in PSD; - allocate large structures on the stack; - correct channel count for PNG & BMP - 2.10 (2016-01-22) avoid warning introduced in 2.09 - 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + 2.30 (2024-05-31) avoid erroneous gcc warning + 2.29 (2023-05-xx) optimizations + 2.28 (2023-01-29) many error fixes, security errors, just tons of stuff + 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes + 2.26 (2020-07-13) many minor fixes + 2.25 (2020-02-02) fix warnings + 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically + 2.23 (2019-08-11) fix clang static analysis warning + 2.22 (2019-03-04) gif fixes, fix warnings + 2.21 (2019-02-25) fix typo in comment + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED See end of file for full revision history. @@ -78,46 +80,46 @@ RECENT REVISION HISTORY: ============================ Contributors ========================= Image formats Extensions, features - Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) - Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) - Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) - Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) - Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) - Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) - Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) - github:urraka (animated gif) Junggon Kim (PNM comments) - Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) - socks-the-fox (16-bit PNG) - Jeremy Sawicki (handle all ImageNet JPGs) + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) Optimizations & bugfixes Mikhail Morozov (1-bit BMP) - Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) - Arseny Kapoulkine Simon Breuss (16-bit PNM) - John-Mark Allen - Carmelo J Fdez-Aguera + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine Simon Breuss (16-bit PNM) + John-Mark Allen + Carmelo J Fdez-Aguera Bug & warning fixes - Marc LeBlanc David Woo Guillaume George Martins Mozeiko - Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski - Phil Jordan Dave Moore Roy Eltham - Hayaki Saito Nathan Reed Won Chun - Luke Graham Johan Duparc Nick Verigakis the Horde3D community - Thomas Ruf Ronny Chevalier github:rlyeh - Janez Zemva John Bartholomew Michal Cichon github:romigrou - Jonathan Blow Ken Hamada Tero Hanninen github:svdijk - Eugene Golushkov Laurent Gomila Cort Stratton github:snagar - Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex - Cass Everitt Ryamond Barbiero github:grim210 - Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw - Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus - Josh Tobin Neil Bickford Matthew Gregan github:poppolopoppo - Julian Raschke Gregory Mullen Christian Floisand github:darealshinji - Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 - Brad Weinberger Matvey Cherevko github:mosra - Luca Sas Alexander Veselov Zack Middleton [reserved] - Ryan C. Gordon [reserved] [reserved] - DO NOT ADD YOUR NAME HERE + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski + Phil Jordan Dave Moore Roy Eltham + Hayaki Saito Nathan Reed Won Chun + Luke Graham Johan Duparc Nick Verigakis the Horde3D community + Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Eugene Golushkov Laurent Gomila Cort Stratton github:snagar + Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex + Cass Everitt Ryamond Barbiero github:grim210 + Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw + Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus + Josh Tobin Neil Bickford Matthew Gregan github:poppolopoppo + Julian Raschke Gregory Mullen Christian Floisand github:darealshinji + Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 + Brad Weinberger Matvey Cherevko github:mosra + Luca Sas Alexander Veselov Zack Middleton [reserved] + Ryan C. Gordon [reserved] [reserved] + DO NOT ADD YOUR NAME HERE - Jacko Dirks + Jacko Dirks To add your name to the credits, pick a random blank space in the middle and fill it. 80% of merge conflicts on stb PRs are due to people adding their name at the end @@ -127,10 +129,6 @@ RECENT REVISION HISTORY: #ifndef STBI_INCLUDE_STB_IMAGE_H #define STBI_INCLUDE_STB_IMAGE_H -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wunused-parameter" - // DOCUMENTATION // // Limitations: @@ -377,12 +375,12 @@ RECENT REVISION HISTORY: enum { - STBI_default = 0, // only used for desired_channels + STBI_default = 0, // only used for desired_channels - STBI_grey = 1, - STBI_grey_alpha = 2, - STBI_rgb = 3, - STBI_rgb_alpha = 4 + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 }; #include @@ -402,55 +400,55 @@ extern "C" #endif #endif - ////////////////////////////////////////////////////////////////////////////// - // - // PRIMARY API - works on images of any type - // + ////////////////////////////////////////////////////////////////////////////// + // + // PRIMARY API - works on images of any type + // - // - // load image by filename, open file, or memory buffer - // + // + // load image by filename, open file, or memory buffer + // - typedef struct - { - int (*read)(void *user, char *data, int size); // fill 'data' with 'size' bytes. return number of bytes actually read - void (*skip)(void *user, int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative - int (*eof)(void *user); // returns nonzero if we are at end of file/data - } stbi_io_callbacks; + typedef struct + { + int (*read)(void *user, char *data, int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip)(void *user, int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof)(void *user); // returns nonzero if we are at end of file/data + } stbi_io_callbacks; - //////////////////////////////////// - // - // 8-bits-per-channel interface - // + //////////////////////////////////// + // + // 8-bits-per-channel interface + // - STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); - STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); #ifndef STBI_NO_STDIO - STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); - STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); // for stbi_load_from_file, file pointer is left pointing immediately after image #endif #ifndef STBI_NO_GIF - STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); + STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); #endif #ifdef STBI_WINDOWS_UTF8 - STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t *input); + STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t *input); #endif - //////////////////////////////////// - // - // 16-bits-per-channel interface - // + //////////////////////////////////// + // + // 16-bits-per-channel interface + // - STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); - STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); #ifndef STBI_NO_STDIO - STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); - STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); #endif //////////////////////////////////// @@ -458,81 +456,81 @@ extern "C" // float-per-channel interface // #ifndef STBI_NO_LINEAR - STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); - STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); #ifndef STBI_NO_STDIO - STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); - STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); #endif #endif #ifndef STBI_NO_HDR - STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); - STBIDEF void stbi_hdr_to_ldr_scale(float scale); + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); #endif // STBI_NO_HDR #ifndef STBI_NO_LINEAR - STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); - STBIDEF void stbi_ldr_to_hdr_scale(float scale); + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); #endif // STBI_NO_LINEAR - // stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR - STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); - STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); + // stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR + STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); + STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); #ifndef STBI_NO_STDIO - STBIDEF int stbi_is_hdr(char const *filename); - STBIDEF int stbi_is_hdr_from_file(FILE *f); + STBIDEF int stbi_is_hdr(char const *filename); + STBIDEF int stbi_is_hdr_from_file(FILE *f); #endif // STBI_NO_STDIO - // get a VERY brief reason for failure - // on most compilers (and ALL modern mainstream compilers) this is threadsafe - STBIDEF const char *stbi_failure_reason(void); + // get a VERY brief reason for failure + // on most compilers (and ALL modern mainstream compilers) this is threadsafe + STBIDEF const char *stbi_failure_reason(void); - // free the loaded image -- this is just free() - STBIDEF void stbi_image_free(void *retval_from_stbi_load); + // free the loaded image -- this is just free() + STBIDEF void stbi_image_free(void *retval_from_stbi_load); - // get image dimensions & components without fully decoding - STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); - STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); - STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); - STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + // get image dimensions & components without fully decoding + STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); + STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); + STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); + STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); #ifndef STBI_NO_STDIO - STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp); - STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp); - STBIDEF int stbi_is_16_bit(char const *filename); - STBIDEF int stbi_is_16_bit_from_file(FILE *f); + STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp); + STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp); + STBIDEF int stbi_is_16_bit(char const *filename); + STBIDEF int stbi_is_16_bit_from_file(FILE *f); #endif - // for image formats that explicitly notate that they have premultiplied alpha, - // we just return the colors as stored in the file. set this flag to force - // unpremultiplication. results are undefined if the unpremultiply overflow. - STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + // for image formats that explicitly notate that they have premultiplied alpha, + // we just return the colors as stored in the file. set this flag to force + // unpremultiplication. results are undefined if the unpremultiply overflow. + STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); - // indicate whether we should process iphone images back to canonical format, - // or just pass them through "as-is" - STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + // indicate whether we should process iphone images back to canonical format, + // or just pass them through "as-is" + STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); - // flip the image vertically, so the first pixel in the output array is the bottom left - STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + // flip the image vertically, so the first pixel in the output array is the bottom left + STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); - // as above, but only applies to images loaded on the thread that calls the function - // this function is only available if your compiler supports thread-local variables; - // calling it will fail to link if your compiler doesn't - STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); - STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); - STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); + // as above, but only applies to images loaded on the thread that calls the function + // this function is only available if your compiler supports thread-local variables; + // calling it will fail to link if your compiler doesn't + STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); + STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); + STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); - // ZLIB client - used by PNG, available for other purposes + // ZLIB client - used by PNG, available for other purposes - STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); - STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); - STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); - STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); + STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); + STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); + STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); - STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); - STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); + STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); #ifdef __cplusplus } @@ -661,7 +659,7 @@ typedef unsigned char validate_uint32[sizeof(stbi__uint32) == 4 ? 1 : -1]; #ifdef STBI_HAS_LROTL #define stbi_lrot(x, y) _lrotl(x, y) #else -#define stbi_lrot(x, y) (((x) << (y)) | ((x) >> (-(y)&31))) +#define stbi_lrot(x, y) (((x) << (y)) | ((x) >> (-(y) & 31))) #endif #if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) @@ -722,23 +720,23 @@ typedef unsigned char validate_uint32[sizeof(stbi__uint32) == 4 ? 1 : -1]; #ifdef _MSC_VER #if _MSC_VER >= 1400 // not VC6 -#include // __cpuid +#include // __cpuid static int stbi__cpuid3(void) { - int info[4]; - __cpuid(info, 1); - return info[3]; + int info[4]; + __cpuid(info, 1); + return info[3]; } #else static int stbi__cpuid3(void) { - int res; - __asm { + int res; + __asm { mov eax,1 cpuid mov res,edx - } - return res; + } + return res; } #endif @@ -747,8 +745,8 @@ static int stbi__cpuid3(void) #if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) static int stbi__sse2_available(void) { - int info3 = stbi__cpuid3(); - return ((info3 >> 26) & 1) != 0; + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; } #endif @@ -758,10 +756,10 @@ static int stbi__sse2_available(void) #if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) static int stbi__sse2_available(void) { - // If we're even attempting to compile this on GCC/Clang, that means - // -msse2 is on, which means the compiler is allowed to use SSE2 - // instructions at will, and so are we. - return 1; + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; } #endif @@ -798,19 +796,19 @@ static int stbi__sse2_available(void) // contains all the IO context, plus some basic image information typedef struct { - stbi__uint32 img_x, img_y; - int img_n, img_out_n; + stbi__uint32 img_x, img_y; + int img_n, img_out_n; - stbi_io_callbacks io; - void *io_user_data; + stbi_io_callbacks io; + void *io_user_data; - int read_from_callbacks; - int buflen; - stbi_uc buffer_start[128]; - int callback_already_read; + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + int callback_already_read; - stbi_uc *img_buffer, *img_buffer_end; - stbi_uc *img_buffer_original, *img_buffer_original_end; + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; } stbi__context; static void stbi__refill_buffer(stbi__context *s); @@ -818,59 +816,59 @@ static void stbi__refill_buffer(stbi__context *s); // initialize a memory-decode context static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) { - s->io.read = NULL; - s->read_from_callbacks = 0; - s->callback_already_read = 0; - s->img_buffer = s->img_buffer_original = (stbi_uc *)buffer; - s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *)buffer + len; + s->io.read = NULL; + s->read_from_callbacks = 0; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *)buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *)buffer + len; } // initialize a callback-based context static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) { - s->io = *c; - s->io_user_data = user; - s->buflen = sizeof(s->buffer_start); - s->read_from_callbacks = 1; - s->callback_already_read = 0; - s->img_buffer = s->img_buffer_original = s->buffer_start; - stbi__refill_buffer(s); - s->img_buffer_original_end = s->img_buffer_end; + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; } #ifndef STBI_NO_STDIO static int stbi__stdio_read(void *user, char *data, int size) { - return (int)fread(data, 1, size, (FILE *)user); + return (int)fread(data, 1, size, (FILE *)user); } static void stbi__stdio_skip(void *user, int n) { - int ch; - fseek((FILE *)user, n, SEEK_CUR); - ch = fgetc((FILE *)user); /* have to read a byte to reset feof()'s flag */ - if (ch != EOF) - { - ungetc(ch, (FILE *)user); /* push byte back onto stream if valid. */ - } + int ch; + fseek((FILE *)user, n, SEEK_CUR); + ch = fgetc((FILE *)user); /* have to read a byte to reset feof()'s flag */ + if (ch != EOF) + { + ungetc(ch, (FILE *)user); /* push byte back onto stream if valid. */ + } } static int stbi__stdio_eof(void *user) { - return feof((FILE *)user) || ferror((FILE *)user); + return feof((FILE *)user) || ferror((FILE *)user); } static stbi_io_callbacks stbi__stdio_callbacks = - { - stbi__stdio_read, - stbi__stdio_skip, - stbi__stdio_eof, + { + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, }; static void stbi__start_file(stbi__context *s, FILE *f) { - stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *)f); + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *)f); } // static void stop_file(stbi__context *s) { } @@ -879,24 +877,24 @@ static void stbi__start_file(stbi__context *s, FILE *f) static void stbi__rewind(stbi__context *s) { - // conceptually rewind SHOULD rewind to the beginning of the stream, - // but we just rewind to the beginning of the initial buffer, because - // we only use it after doing 'test', which only ever looks at at most 92 bytes - s->img_buffer = s->img_buffer_original; - s->img_buffer_end = s->img_buffer_original_end; + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; } enum { - STBI_ORDER_RGB, - STBI_ORDER_BGR + STBI_ORDER_RGB, + STBI_ORDER_BGR }; typedef struct { - int bits_per_channel; - int num_channels; - int channel_order; + int bits_per_channel; + int num_channels; + int channel_order; } stbi__result_info; #ifndef STBI_NO_JPEG @@ -959,26 +957,26 @@ static int stbi__pnm_is16(stbi__context *s); static #ifdef STBI_THREAD_LOCAL - STBI_THREAD_LOCAL + STBI_THREAD_LOCAL #endif - const char *stbi__g_failure_reason; + const char *stbi__g_failure_reason; STBIDEF const char *stbi_failure_reason(void) { - return stbi__g_failure_reason; + return stbi__g_failure_reason; } #ifndef STBI_NO_FAILURE_STRINGS static int stbi__err(const char *str) { - stbi__g_failure_reason = str; - return 0; + stbi__g_failure_reason = str; + return 0; } #endif static void *stbi__malloc(size_t size) { - return STBI_MALLOC(size); + return STBI_MALLOC(size); } // stb_image uses ints pervasively, including for offset calculations. @@ -995,48 +993,48 @@ static void *stbi__malloc(size_t size) // negative terms are considered invalid. static int stbi__addsizes_valid(int a, int b) { - if (b < 0) - return 0; - // now 0 <= b <= INT_MAX, hence also - // 0 <= INT_MAX - b <= INTMAX. - // And "a + b <= INT_MAX" (which might overflow) is the - // same as a <= INT_MAX - b (no overflow) - return a <= INT_MAX - b; + if (b < 0) + return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; } // returns 1 if the product is valid, 0 on overflow. // negative factors are considered invalid. static int stbi__mul2sizes_valid(int a, int b) { - if (a < 0 || b < 0) - return 0; - if (b == 0) - return 1; // mul-by-0 is always safe - // portable way to check for no overflows in a*b - return a <= INT_MAX / b; + if (a < 0 || b < 0) + return 0; + if (b == 0) + return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX / b; } #if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) // returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow static int stbi__mad2sizes_valid(int a, int b, int add) { - return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a * b, add); + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a * b, add); } #endif // returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow static int stbi__mad3sizes_valid(int a, int b, int c, int add) { - return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a * b, c) && - stbi__addsizes_valid(a * b * c, add); + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a * b, c) && + stbi__addsizes_valid(a * b * c, add); } // returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow #if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) { - return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a * b, c) && - stbi__mul2sizes_valid(a * b * c, d) && stbi__addsizes_valid(a * b * c * d, add); + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a * b, c) && + stbi__mul2sizes_valid(a * b * c, d) && stbi__addsizes_valid(a * b * c * d, add); } #endif @@ -1044,48 +1042,48 @@ static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) // mallocs with size overflow checking static void *stbi__malloc_mad2(int a, int b, int add) { - if (!stbi__mad2sizes_valid(a, b, add)) - return NULL; - return stbi__malloc(a * b + add); + if (!stbi__mad2sizes_valid(a, b, add)) + return NULL; + return stbi__malloc(a * b + add); } #endif static void *stbi__malloc_mad3(int a, int b, int c, int add) { - if (!stbi__mad3sizes_valid(a, b, c, add)) - return NULL; - return stbi__malloc(a * b * c + add); + if (!stbi__mad3sizes_valid(a, b, c, add)) + return NULL; + return stbi__malloc(a * b * c + add); } #if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) { - if (!stbi__mad4sizes_valid(a, b, c, d, add)) - return NULL; - return stbi__malloc(a * b * c * d + add); + if (!stbi__mad4sizes_valid(a, b, c, d, add)) + return NULL; + return stbi__malloc(a * b * c * d + add); } #endif // returns 1 if the sum of two signed ints is valid (between -2^31 and 2^31-1 inclusive), 0 on overflow. static int stbi__addints_valid(int a, int b) { - if ((a >= 0) != (b >= 0)) - return 1; // a and b have different signs, so no overflow - if (a < 0 && b < 0) - return a >= INT_MIN - b; // same as a + b >= INT_MIN; INT_MIN - b cannot overflow since b < 0. - return a <= INT_MAX - b; + if ((a >= 0) != (b >= 0)) + return 1; // a and b have different signs, so no overflow + if (a < 0 && b < 0) + return a >= INT_MIN - b; // same as a + b >= INT_MIN; INT_MIN - b cannot overflow since b < 0. + return a <= INT_MAX - b; } -// returns 1 if the product of two signed shorts is valid, 0 on overflow. -static int stbi__mul2shorts_valid(short a, short b) +// returns 1 if the product of two ints fits in a signed short, 0 on overflow. +static int stbi__mul2shorts_valid(int a, int b) { - if (b == 0 || b == -1) - return 1; // multiplication by 0 is always 0; check for -1 so SHRT_MIN/b doesn't overflow - if ((a >= 0) == (b >= 0)) - return a <= SHRT_MAX / b; // product is positive, so similar to mul2sizes_valid - if (b < 0) - return a <= SHRT_MIN / b; // same as a * b >= SHRT_MIN - return a >= SHRT_MIN / b; + if (b == 0 || b == -1) + return 1; // multiplication by 0 is always 0; check for -1 so SHRT_MIN/b doesn't overflow + if ((a >= 0) == (b >= 0)) + return a <= SHRT_MAX / b; // product is positive, so similar to mul2sizes_valid + if (b < 0) + return a <= SHRT_MIN / b; // same as a * b >= SHRT_MIN + return a >= SHRT_MIN / b; } // stbi__err - error @@ -1105,7 +1103,7 @@ static int stbi__mul2shorts_valid(short a, short b) STBIDEF void stbi_image_free(void *retval_from_stbi_load) { - STBI_FREE(retval_from_stbi_load); + STBI_FREE(retval_from_stbi_load); } #ifndef STBI_NO_LINEAR @@ -1120,7 +1118,7 @@ static int stbi__vertically_flip_on_load_global = 0; STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) { - stbi__vertically_flip_on_load_global = flag_true_if_should_flip; + stbi__vertically_flip_on_load_global = flag_true_if_should_flip; } #ifndef STBI_THREAD_LOCAL @@ -1130,216 +1128,216 @@ static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertical STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) { - stbi__vertically_flip_on_load_local = flag_true_if_should_flip; - stbi__vertically_flip_on_load_set = 1; + stbi__vertically_flip_on_load_local = flag_true_if_should_flip; + stbi__vertically_flip_on_load_set = 1; } #define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ - ? stbi__vertically_flip_on_load_local \ - : stbi__vertically_flip_on_load_global) + ? stbi__vertically_flip_on_load_local \ + : stbi__vertically_flip_on_load_global) #endif // STBI_THREAD_LOCAL static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) { - memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields - ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed - ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order - ri->num_channels = 0; + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; // test the formats with a very explicit header first (at least a FOURCC // or distinctive magic number first) #ifndef STBI_NO_PNG - if (stbi__png_test(s)) - return stbi__png_load(s, x, y, comp, req_comp, ri); + if (stbi__png_test(s)) + return stbi__png_load(s, x, y, comp, req_comp, ri); #endif #ifndef STBI_NO_BMP - if (stbi__bmp_test(s)) - return stbi__bmp_load(s, x, y, comp, req_comp, ri); + if (stbi__bmp_test(s)) + return stbi__bmp_load(s, x, y, comp, req_comp, ri); #endif #ifndef STBI_NO_GIF - if (stbi__gif_test(s)) - return stbi__gif_load(s, x, y, comp, req_comp, ri); + if (stbi__gif_test(s)) + return stbi__gif_load(s, x, y, comp, req_comp, ri); #endif #ifndef STBI_NO_PSD - if (stbi__psd_test(s)) - return stbi__psd_load(s, x, y, comp, req_comp, ri, bpc); + if (stbi__psd_test(s)) + return stbi__psd_load(s, x, y, comp, req_comp, ri, bpc); #else - STBI_NOTUSED(bpc); + STBI_NOTUSED(bpc); #endif #ifndef STBI_NO_PIC - if (stbi__pic_test(s)) - return stbi__pic_load(s, x, y, comp, req_comp, ri); + if (stbi__pic_test(s)) + return stbi__pic_load(s, x, y, comp, req_comp, ri); #endif // then the formats that can end up attempting to load with just 1 or 2 // bytes matching expectations; these are prone to false positives, so // try them later #ifndef STBI_NO_JPEG - if (stbi__jpeg_test(s)) - return stbi__jpeg_load(s, x, y, comp, req_comp, ri); + if (stbi__jpeg_test(s)) + return stbi__jpeg_load(s, x, y, comp, req_comp, ri); #endif #ifndef STBI_NO_PNM - if (stbi__pnm_test(s)) - return stbi__pnm_load(s, x, y, comp, req_comp, ri); + if (stbi__pnm_test(s)) + return stbi__pnm_load(s, x, y, comp, req_comp, ri); #endif #ifndef STBI_NO_HDR - if (stbi__hdr_test(s)) - { - float *hdr = stbi__hdr_load(s, x, y, comp, req_comp, ri); - return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); - } + if (stbi__hdr_test(s)) + { + float *hdr = stbi__hdr_load(s, x, y, comp, req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } #endif #ifndef STBI_NO_TGA - // test tga last because it's a crappy test! - if (stbi__tga_test(s)) - return stbi__tga_load(s, x, y, comp, req_comp, ri); + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s, x, y, comp, req_comp, ri); #endif - return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); } static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) { - int i; - int img_len = w * h * channels; - stbi_uc *reduced; + int i; + int img_len = w * h * channels; + stbi_uc *reduced; - reduced = (stbi_uc *)stbi__malloc(img_len); - if (reduced == NULL) - return stbi__errpuc("outofmem", "Out of memory"); + reduced = (stbi_uc *)stbi__malloc(img_len); + if (reduced == NULL) + return stbi__errpuc("outofmem", "Out of memory"); - for (i = 0; i < img_len; ++i) - reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling - STBI_FREE(orig); - return reduced; + STBI_FREE(orig); + return reduced; } static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) { - int i; - int img_len = w * h * channels; - stbi__uint16 *enlarged; + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; - enlarged = (stbi__uint16 *)stbi__malloc(img_len * 2); - if (enlarged == NULL) - return (stbi__uint16 *)stbi__errpuc("outofmem", "Out of memory"); + enlarged = (stbi__uint16 *)stbi__malloc(img_len * 2); + if (enlarged == NULL) + return (stbi__uint16 *)stbi__errpuc("outofmem", "Out of memory"); - for (i = 0; i < img_len; ++i) - enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff - STBI_FREE(orig); - return enlarged; + STBI_FREE(orig); + return enlarged; } static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) { - int row; - size_t bytes_per_row = (size_t)w * bytes_per_pixel; - stbi_uc temp[2048]; - stbi_uc *bytes = (stbi_uc *)image; + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; - for (row = 0; row < (h >> 1); row++) - { - stbi_uc *row0 = bytes + row * bytes_per_row; - stbi_uc *row1 = bytes + (h - row - 1) * bytes_per_row; - // swap row0 with row1 - size_t bytes_left = bytes_per_row; - while (bytes_left) - { - size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); - memcpy(temp, row0, bytes_copy); - memcpy(row0, row1, bytes_copy); - memcpy(row1, temp, bytes_copy); - row0 += bytes_copy; - row1 += bytes_copy; - bytes_left -= bytes_copy; - } - } + for (row = 0; row < (h >> 1); row++) + { + stbi_uc *row0 = bytes + row * bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1) * bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) + { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } } #ifndef STBI_NO_GIF static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) { - int slice; - int slice_size = w * h * bytes_per_pixel; + int slice; + int slice_size = w * h * bytes_per_pixel; - stbi_uc *bytes = (stbi_uc *)image; - for (slice = 0; slice < z; ++slice) - { - stbi__vertical_flip(bytes, w, h, bytes_per_pixel); - bytes += slice_size; - } + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) + { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } } #endif static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) { - stbi__result_info ri; - void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); - if (result == NULL) - return NULL; + if (result == NULL) + return NULL; - // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. - STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); - if (ri.bits_per_channel != 8) - { - result = stbi__convert_16_to_8((stbi__uint16 *)result, *x, *y, req_comp == 0 ? *comp : req_comp); - ri.bits_per_channel = 8; - } + if (ri.bits_per_channel != 8) + { + result = stbi__convert_16_to_8((stbi__uint16 *)result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } - // @TODO: move stbi__convert_format to here + // @TODO: move stbi__convert_format to here - if (stbi__vertically_flip_on_load) - { - int channels = req_comp ? req_comp : *comp; - stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); - } + if (stbi__vertically_flip_on_load) + { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } - return (unsigned char *)result; + return (unsigned char *)result; } static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) { - stbi__result_info ri; - void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); - if (result == NULL) - return NULL; + if (result == NULL) + return NULL; - // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. - STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); - if (ri.bits_per_channel != 16) - { - result = stbi__convert_8_to_16((stbi_uc *)result, *x, *y, req_comp == 0 ? *comp : req_comp); - ri.bits_per_channel = 16; - } + if (ri.bits_per_channel != 16) + { + result = stbi__convert_8_to_16((stbi_uc *)result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } - // @TODO: move stbi__convert_format16 to here - // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision - if (stbi__vertically_flip_on_load) - { - int channels = req_comp ? req_comp : *comp; - stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); - } + if (stbi__vertically_flip_on_load) + { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } - return (stbi__uint16 *)result; + return (stbi__uint16 *)result; } #if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) { - if (stbi__vertically_flip_on_load && result != NULL) - { - int channels = req_comp ? req_comp : *comp; - stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); - } + if (stbi__vertically_flip_on_load && result != NULL) + { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } } #endif @@ -1353,186 +1351,186 @@ STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int #if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t *input) { - return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int)bufferlen, NULL, NULL); + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int)bufferlen, NULL, NULL); } #endif static FILE *stbi__fopen(char const *filename, char const *mode) { - FILE *f; + FILE *f; #if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) - wchar_t wMode[64]; - wchar_t wFilename[1024]; - if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename) / sizeof(*wFilename))) - return 0; + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename) / sizeof(*wFilename))) + return 0; - if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode) / sizeof(*wMode))) - return 0; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode) / sizeof(*wMode))) + return 0; #if defined(_MSC_VER) && _MSC_VER >= 1400 - if (0 != _wfopen_s(&f, wFilename, wMode)) - f = 0; + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; #else - f = _wfopen(wFilename, wMode); + f = _wfopen(wFilename, wMode); #endif #elif defined(_MSC_VER) && _MSC_VER >= 1400 - if (0 != fopen_s(&f, filename, mode)) - f = 0; + if (0 != fopen_s(&f, filename, mode)) + f = 0; #else - f = fopen(filename, mode); + f = fopen(filename, mode); #endif - return f; + return f; } STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) { - FILE *f = stbi__fopen(filename, "rb"); - unsigned char *result; - if (!f) - return stbi__errpuc("can't fopen", "Unable to open file"); - result = stbi_load_from_file(f, x, y, comp, req_comp); - fclose(f); - return result; + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) + return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f, x, y, comp, req_comp); + fclose(f); + return result; } STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) { - unsigned char *result; - stbi__context s; - stbi__start_file(&s, f); - result = stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); - if (result) - { - // need to 'unget' all the characters in the IO buffer - fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR); - } - return result; + unsigned char *result; + stbi__context s; + stbi__start_file(&s, f); + result = stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); + if (result) + { + // need to 'unget' all the characters in the IO buffer + fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; } STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) { - stbi__uint16 *result; - stbi__context s; - stbi__start_file(&s, f); - result = stbi__load_and_postprocess_16bit(&s, x, y, comp, req_comp); - if (result) - { - // need to 'unget' all the characters in the IO buffer - fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR); - } - return result; + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s, f); + result = stbi__load_and_postprocess_16bit(&s, x, y, comp, req_comp); + if (result) + { + // need to 'unget' all the characters in the IO buffer + fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; } STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) { - FILE *f = stbi__fopen(filename, "rb"); - stbi__uint16 *result; - if (!f) - return (stbi_us *)stbi__errpuc("can't fopen", "Unable to open file"); - result = stbi_load_from_file_16(f, x, y, comp, req_comp); - fclose(f); - return result; + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) + return (stbi_us *)stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f, x, y, comp, req_comp); + fclose(f); + return result; } #endif //! STBI_NO_STDIO STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) { - stbi__context s; - stbi__start_mem(&s, buffer, len); - return stbi__load_and_postprocess_16bit(&s, x, y, channels_in_file, desired_channels); + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__load_and_postprocess_16bit(&s, x, y, channels_in_file, desired_channels); } STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) { - stbi__context s; - stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); - return stbi__load_and_postprocess_16bit(&s, x, y, channels_in_file, desired_channels); + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s, x, y, channels_in_file, desired_channels); } STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) { - stbi__context s; - stbi__start_mem(&s, buffer, len); - return stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); } STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) { - stbi__context s; - stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); - return stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); } #ifndef STBI_NO_GIF STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) { - unsigned char *result; - stbi__context s; - stbi__start_mem(&s, buffer, len); + unsigned char *result; + stbi__context s; + stbi__start_mem(&s, buffer, len); - result = (unsigned char *)stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); - if (stbi__vertically_flip_on_load) - { - stbi__vertical_flip_slices(result, *x, *y, *z, *comp); - } + result = (unsigned char *)stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) + { + stbi__vertical_flip_slices(result, *x, *y, *z, *comp); + } - return result; + return result; } #endif #ifndef STBI_NO_LINEAR static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) { - unsigned char *data; + unsigned char *data; #ifndef STBI_NO_HDR - if (stbi__hdr_test(s)) - { - stbi__result_info ri; - float *hdr_data = stbi__hdr_load(s, x, y, comp, req_comp, &ri); - if (hdr_data) - stbi__float_postprocess(hdr_data, x, y, comp, req_comp); - return hdr_data; - } + if (stbi__hdr_test(s)) + { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s, x, y, comp, req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data, x, y, comp, req_comp); + return hdr_data; + } #endif - data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); - if (data) - return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); - return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); } STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) { - stbi__context s; - stbi__start_mem(&s, buffer, len); - return stbi__loadf_main(&s, x, y, comp, req_comp); + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__loadf_main(&s, x, y, comp, req_comp); } STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) { - stbi__context s; - stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); - return stbi__loadf_main(&s, x, y, comp, req_comp); + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__loadf_main(&s, x, y, comp, req_comp); } #ifndef STBI_NO_STDIO STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) { - float *result; - FILE *f = stbi__fopen(filename, "rb"); - if (!f) - return stbi__errpf("can't fopen", "Unable to open file"); - result = stbi_loadf_from_file(f, x, y, comp, req_comp); - fclose(f); - return result; + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) + return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f, x, y, comp, req_comp); + fclose(f); + return result; } STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) { - stbi__context s; - stbi__start_file(&s, f); - return stbi__loadf_main(&s, x, y, comp, req_comp); + stbi__context s; + stbi__start_file(&s, f); + return stbi__loadf_main(&s, x, y, comp, req_comp); } #endif // !STBI_NO_STDIO @@ -1545,42 +1543,42 @@ STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_ STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) { #ifndef STBI_NO_HDR - stbi__context s; - stbi__start_mem(&s, buffer, len); - return stbi__hdr_test(&s); + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__hdr_test(&s); #else - STBI_NOTUSED(buffer); - STBI_NOTUSED(len); - return 0; + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; #endif } #ifndef STBI_NO_STDIO STBIDEF int stbi_is_hdr(char const *filename) { - FILE *f = stbi__fopen(filename, "rb"); - int result = 0; - if (f) - { - result = stbi_is_hdr_from_file(f); - fclose(f); - } - return result; + FILE *f = stbi__fopen(filename, "rb"); + int result = 0; + if (f) + { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; } STBIDEF int stbi_is_hdr_from_file(FILE *f) { #ifndef STBI_NO_HDR - long pos = ftell(f); - int res; - stbi__context s; - stbi__start_file(&s, f); - res = stbi__hdr_test(&s); - fseek(f, pos, SEEK_SET); - return res; + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s, f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; #else - STBI_NOTUSED(f); - return 0; + STBI_NOTUSED(f); + return 0; #endif } #endif // !STBI_NO_STDIO @@ -1588,13 +1586,13 @@ STBIDEF int stbi_is_hdr_from_file(FILE *f) STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) { #ifndef STBI_NO_HDR - stbi__context s; - stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); - return stbi__hdr_test(&s); + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__hdr_test(&s); #else - STBI_NOTUSED(clbk); - STBI_NOTUSED(user); - return 0; + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; #endif } @@ -1617,41 +1615,41 @@ STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1 / scale; enum { - STBI__SCAN_load = 0, - STBI__SCAN_type, - STBI__SCAN_header + STBI__SCAN_load = 0, + STBI__SCAN_type, + STBI__SCAN_header }; static void stbi__refill_buffer(stbi__context *s) { - int n = (s->io.read)(s->io_user_data, (char *)s->buffer_start, s->buflen); - s->callback_already_read += (int)(s->img_buffer - s->img_buffer_original); - if (n == 0) - { - // at end of file, treat same as if from memory, but need to handle case - // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file - s->read_from_callbacks = 0; - s->img_buffer = s->buffer_start; - s->img_buffer_end = s->buffer_start + 1; - *s->img_buffer = 0; - } - else - { - s->img_buffer = s->buffer_start; - s->img_buffer_end = s->buffer_start + n; - } + int n = (s->io.read)(s->io_user_data, (char *)s->buffer_start, s->buflen); + s->callback_already_read += (int)(s->img_buffer - s->img_buffer_original); + if (n == 0) + { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + 1; + *s->img_buffer = 0; + } + else + { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } } stbi_inline static stbi_uc stbi__get8(stbi__context *s) { - if (s->img_buffer < s->img_buffer_end) - return *s->img_buffer++; - if (s->read_from_callbacks) - { - stbi__refill_buffer(s); - return *s->img_buffer++; - } - return 0; + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) + { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; } #if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) @@ -1659,17 +1657,17 @@ stbi_inline static stbi_uc stbi__get8(stbi__context *s) #else stbi_inline static int stbi__at_eof(stbi__context *s) { - if (s->io.read) - { - if (!(s->io.eof)(s->io_user_data)) - return 0; - // if feof() is true, check if buffer = end - // special case: we've only got the special 0 character at the end - if (s->read_from_callbacks == 0) - return 1; - } + if (s->io.read) + { + if (!(s->io.eof)(s->io_user_data)) + return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) + return 1; + } - return s->img_buffer >= s->img_buffer_end; + return s->img_buffer >= s->img_buffer_end; } #endif @@ -1678,24 +1676,24 @@ stbi_inline static int stbi__at_eof(stbi__context *s) #else static void stbi__skip(stbi__context *s, int n) { - if (n == 0) - return; // already there! - if (n < 0) - { - s->img_buffer = s->img_buffer_end; - return; - } - if (s->io.read) - { - int blen = (int)(s->img_buffer_end - s->img_buffer); - if (blen < n) - { - s->img_buffer = s->img_buffer_end; - (s->io.skip)(s->io_user_data, n - blen); - return; - } - } - s->img_buffer += n; + if (n == 0) + return; // already there! + if (n < 0) + { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) + { + int blen = (int)(s->img_buffer_end - s->img_buffer); + if (blen < n) + { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; } #endif @@ -1704,30 +1702,30 @@ static void stbi__skip(stbi__context *s, int n) #else static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) { - if (s->io.read) - { - int blen = (int)(s->img_buffer_end - s->img_buffer); - if (blen < n) - { - int res, count; + if (s->io.read) + { + int blen = (int)(s->img_buffer_end - s->img_buffer); + if (blen < n) + { + int res, count; - memcpy(buffer, s->img_buffer, blen); + memcpy(buffer, s->img_buffer, blen); - count = (s->io.read)(s->io_user_data, (char *)buffer + blen, n - blen); - res = (count == (n - blen)); - s->img_buffer = s->img_buffer_end; - return res; - } - } + count = (s->io.read)(s->io_user_data, (char *)buffer + blen, n - blen); + res = (count == (n - blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } - if (s->img_buffer + n <= s->img_buffer_end) - { - memcpy(buffer, s->img_buffer, n); - s->img_buffer += n; - return 1; - } - else - return 0; + if (s->img_buffer + n <= s->img_buffer_end) + { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } + else + return 0; } #endif @@ -1736,8 +1734,8 @@ static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) #else static int stbi__get16be(stbi__context *s) { - int z = stbi__get8(s); - return (z << 8) + stbi__get8(s); + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); } #endif @@ -1746,8 +1744,8 @@ static int stbi__get16be(stbi__context *s) #else static stbi__uint32 stbi__get32be(stbi__context *s) { - stbi__uint32 z = stbi__get16be(s); - return (z << 16) + stbi__get16be(s); + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); } #endif @@ -1756,21 +1754,21 @@ static stbi__uint32 stbi__get32be(stbi__context *s) #else static int stbi__get16le(stbi__context *s) { - int z = stbi__get8(s); - return z + (stbi__get8(s) << 8); + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); } #endif #ifndef STBI_NO_BMP static stbi__uint32 stbi__get32le(stbi__context *s) { - stbi__uint32 z = stbi__get16le(s); - z += (stbi__uint32)stbi__get16le(s) << 16; - return z; + stbi__uint32 z = stbi__get16le(s); + z += (stbi__uint32)stbi__get16le(s) << 16; + return z; } #endif -#define STBI__BYTECAST(x) ((stbi_uc)((x)&255)) // truncate int to byte without warnings +#define STBI__BYTECAST(x) ((stbi_uc)((x) & 255)) // truncate int to byte without warnings #if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) // nothing @@ -1788,7 +1786,7 @@ static stbi__uint32 stbi__get32le(stbi__context *s) static stbi_uc stbi__compute_y(int r, int g, int b) { - return (stbi_uc)(((r * 77) + (g * 150) + (29 * b)) >> 8); + return (stbi_uc)(((r * 77) + (g * 150) + (29 * b)) >> 8); } #endif @@ -1797,99 +1795,99 @@ static stbi_uc stbi__compute_y(int r, int g, int b) #else static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) { - int i, j; - unsigned char *good; + int i, j; + unsigned char *good; - if (req_comp == img_n) - return data; - STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + if (req_comp == img_n) + return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); - good = (unsigned char *)stbi__malloc_mad3(req_comp, x, y, 0); - if (good == NULL) - { - STBI_FREE(data); - return stbi__errpuc("outofmem", "Out of memory"); - } + good = (unsigned char *)stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) + { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } - for (j = 0; j < (int)y; ++j) - { - unsigned char *src = data + j * x * img_n; - unsigned char *dest = good + j * x * req_comp; + for (j = 0; j < (int)y; ++j) + { + unsigned char *src = data + j * x * img_n; + unsigned char *dest = good + j * x * req_comp; -#define STBI__COMBO(a, b) ((a)*8 + (b)) +#define STBI__COMBO(a, b) ((a) * 8 + (b)) #define STBI__CASE(a, b) \ - case STBI__COMBO(a, b): \ - for (i = x - 1; i >= 0; --i, src += a, dest += b) - // convert source image with img_n components to one with req_comp components; - // avoid switch per pixel, so use switch per scanline and massive macros - switch (STBI__COMBO(img_n, req_comp)) - { - STBI__CASE(1, 2) - { - dest[0] = src[0]; - dest[1] = 255; - } - break; - STBI__CASE(1, 3) { dest[0] = dest[1] = dest[2] = src[0]; } - break; - STBI__CASE(1, 4) - { - dest[0] = dest[1] = dest[2] = src[0]; - dest[3] = 255; - } - break; - STBI__CASE(2, 1) { dest[0] = src[0]; } - break; - STBI__CASE(2, 3) { dest[0] = dest[1] = dest[2] = src[0]; } - break; - STBI__CASE(2, 4) - { - dest[0] = dest[1] = dest[2] = src[0]; - dest[3] = src[1]; - } - break; - STBI__CASE(3, 4) - { - dest[0] = src[0]; - dest[1] = src[1]; - dest[2] = src[2]; - dest[3] = 255; - } - break; - STBI__CASE(3, 1) { dest[0] = stbi__compute_y(src[0], src[1], src[2]); } - break; - STBI__CASE(3, 2) - { - dest[0] = stbi__compute_y(src[0], src[1], src[2]); - dest[1] = 255; - } - break; - STBI__CASE(4, 1) { dest[0] = stbi__compute_y(src[0], src[1], src[2]); } - break; - STBI__CASE(4, 2) - { - dest[0] = stbi__compute_y(src[0], src[1], src[2]); - dest[1] = src[3]; - } - break; - STBI__CASE(4, 3) - { - dest[0] = src[0]; - dest[1] = src[1]; - dest[2] = src[2]; - } - break; - default: - STBI_ASSERT(0); - STBI_FREE(data); - STBI_FREE(good); - return stbi__errpuc("unsupported", "Unsupported format conversion"); - } + case STBI__COMBO(a, b): \ + for (i = x - 1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) + { + STBI__CASE(1, 2) + { + dest[0] = src[0]; + dest[1] = 255; + } + break; + STBI__CASE(1, 3) { dest[0] = dest[1] = dest[2] = src[0]; } + break; + STBI__CASE(1, 4) + { + dest[0] = dest[1] = dest[2] = src[0]; + dest[3] = 255; + } + break; + STBI__CASE(2, 1) { dest[0] = src[0]; } + break; + STBI__CASE(2, 3) { dest[0] = dest[1] = dest[2] = src[0]; } + break; + STBI__CASE(2, 4) + { + dest[0] = dest[1] = dest[2] = src[0]; + dest[3] = src[1]; + } + break; + STBI__CASE(3, 4) + { + dest[0] = src[0]; + dest[1] = src[1]; + dest[2] = src[2]; + dest[3] = 255; + } + break; + STBI__CASE(3, 1) { dest[0] = stbi__compute_y(src[0], src[1], src[2]); } + break; + STBI__CASE(3, 2) + { + dest[0] = stbi__compute_y(src[0], src[1], src[2]); + dest[1] = 255; + } + break; + STBI__CASE(4, 1) { dest[0] = stbi__compute_y(src[0], src[1], src[2]); } + break; + STBI__CASE(4, 2) + { + dest[0] = stbi__compute_y(src[0], src[1], src[2]); + dest[1] = src[3]; + } + break; + STBI__CASE(4, 3) + { + dest[0] = src[0]; + dest[1] = src[1]; + dest[2] = src[2]; + } + break; + default: + STBI_ASSERT(0); + STBI_FREE(data); + STBI_FREE(good); + return stbi__errpuc("unsupported", "Unsupported format conversion"); + } #undef STBI__CASE - } + } - STBI_FREE(data); - return good; + STBI_FREE(data); + return good; } #endif @@ -1898,7 +1896,7 @@ static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int r #else static stbi__uint16 stbi__compute_y_16(int r, int g, int b) { - return (stbi__uint16)(((r * 77) + (g * 150) + (29 * b)) >> 8); + return (stbi__uint16)(((r * 77) + (g * 150) + (29 * b)) >> 8); } #endif @@ -1907,136 +1905,136 @@ static stbi__uint16 stbi__compute_y_16(int r, int g, int b) #else static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) { - int i, j; - stbi__uint16 *good; + int i, j; + stbi__uint16 *good; - if (req_comp == img_n) - return data; - STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + if (req_comp == img_n) + return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); - good = (stbi__uint16 *)stbi__malloc(req_comp * x * y * 2); - if (good == NULL) - { - STBI_FREE(data); - return (stbi__uint16 *)stbi__errpuc("outofmem", "Out of memory"); - } + good = (stbi__uint16 *)stbi__malloc(req_comp * x * y * 2); + if (good == NULL) + { + STBI_FREE(data); + return (stbi__uint16 *)stbi__errpuc("outofmem", "Out of memory"); + } - for (j = 0; j < (int)y; ++j) - { - stbi__uint16 *src = data + j * x * img_n; - stbi__uint16 *dest = good + j * x * req_comp; + for (j = 0; j < (int)y; ++j) + { + stbi__uint16 *src = data + j * x * img_n; + stbi__uint16 *dest = good + j * x * req_comp; -#define STBI__COMBO(a, b) ((a)*8 + (b)) +#define STBI__COMBO(a, b) ((a) * 8 + (b)) #define STBI__CASE(a, b) \ - case STBI__COMBO(a, b): \ - for (i = x - 1; i >= 0; --i, src += a, dest += b) - // convert source image with img_n components to one with req_comp components; - // avoid switch per pixel, so use switch per scanline and massive macros - switch (STBI__COMBO(img_n, req_comp)) - { - STBI__CASE(1, 2) - { - dest[0] = src[0]; - dest[1] = 0xffff; - } - break; - STBI__CASE(1, 3) { dest[0] = dest[1] = dest[2] = src[0]; } - break; - STBI__CASE(1, 4) - { - dest[0] = dest[1] = dest[2] = src[0]; - dest[3] = 0xffff; - } - break; - STBI__CASE(2, 1) { dest[0] = src[0]; } - break; - STBI__CASE(2, 3) { dest[0] = dest[1] = dest[2] = src[0]; } - break; - STBI__CASE(2, 4) - { - dest[0] = dest[1] = dest[2] = src[0]; - dest[3] = src[1]; - } - break; - STBI__CASE(3, 4) - { - dest[0] = src[0]; - dest[1] = src[1]; - dest[2] = src[2]; - dest[3] = 0xffff; - } - break; - STBI__CASE(3, 1) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); } - break; - STBI__CASE(3, 2) - { - dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); - dest[1] = 0xffff; - } - break; - STBI__CASE(4, 1) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); } - break; - STBI__CASE(4, 2) - { - dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); - dest[1] = src[3]; - } - break; - STBI__CASE(4, 3) - { - dest[0] = src[0]; - dest[1] = src[1]; - dest[2] = src[2]; - } - break; - default: - STBI_ASSERT(0); - STBI_FREE(data); - STBI_FREE(good); - return (stbi__uint16 *)stbi__errpuc("unsupported", "Unsupported format conversion"); - } + case STBI__COMBO(a, b): \ + for (i = x - 1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) + { + STBI__CASE(1, 2) + { + dest[0] = src[0]; + dest[1] = 0xffff; + } + break; + STBI__CASE(1, 3) { dest[0] = dest[1] = dest[2] = src[0]; } + break; + STBI__CASE(1, 4) + { + dest[0] = dest[1] = dest[2] = src[0]; + dest[3] = 0xffff; + } + break; + STBI__CASE(2, 1) { dest[0] = src[0]; } + break; + STBI__CASE(2, 3) { dest[0] = dest[1] = dest[2] = src[0]; } + break; + STBI__CASE(2, 4) + { + dest[0] = dest[1] = dest[2] = src[0]; + dest[3] = src[1]; + } + break; + STBI__CASE(3, 4) + { + dest[0] = src[0]; + dest[1] = src[1]; + dest[2] = src[2]; + dest[3] = 0xffff; + } + break; + STBI__CASE(3, 1) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); } + break; + STBI__CASE(3, 2) + { + dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); + dest[1] = 0xffff; + } + break; + STBI__CASE(4, 1) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); } + break; + STBI__CASE(4, 2) + { + dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); + dest[1] = src[3]; + } + break; + STBI__CASE(4, 3) + { + dest[0] = src[0]; + dest[1] = src[1]; + dest[2] = src[2]; + } + break; + default: + STBI_ASSERT(0); + STBI_FREE(data); + STBI_FREE(good); + return (stbi__uint16 *)stbi__errpuc("unsupported", "Unsupported format conversion"); + } #undef STBI__CASE - } + } - STBI_FREE(data); - return good; + STBI_FREE(data); + return good; } #endif #ifndef STBI_NO_LINEAR static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) { - int i, k, n; - float *output; - if (!data) - return NULL; - output = (float *)stbi__malloc_mad4(x, y, comp, sizeof(float), 0); - if (output == NULL) - { - STBI_FREE(data); - return stbi__errpf("outofmem", "Out of memory"); - } - // compute number of non-alpha components - if (comp & 1) - n = comp; - else - n = comp - 1; - for (i = 0; i < x * y; ++i) - { - for (k = 0; k < n; ++k) - { - output[i * comp + k] = (float)(pow(data[i * comp + k] / 255.0f, stbi__l2h_gamma) * stbi__l2h_scale); - } - } - if (n < comp) - { - for (i = 0; i < x * y; ++i) - { - output[i * comp + n] = data[i * comp + n] / 255.0f; - } - } - STBI_FREE(data); - return output; + int i, k, n; + float *output; + if (!data) + return NULL; + output = (float *)stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) + { + STBI_FREE(data); + return stbi__errpf("outofmem", "Out of memory"); + } + // compute number of non-alpha components + if (comp & 1) + n = comp; + else + n = comp - 1; + for (i = 0; i < x * y; ++i) + { + for (k = 0; k < n; ++k) + { + output[i * comp + k] = (float)(pow(data[i * comp + k] / 255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + } + if (n < comp) + { + for (i = 0; i < x * y; ++i) + { + output[i * comp + n] = data[i * comp + n] / 255.0f; + } + } + STBI_FREE(data); + return output; } #endif @@ -2044,44 +2042,44 @@ static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) #define stbi__float2int(x) ((int)(x)) static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) { - int i, k, n; - stbi_uc *output; - if (!data) - return NULL; - output = (stbi_uc *)stbi__malloc_mad3(x, y, comp, 0); - if (output == NULL) - { - STBI_FREE(data); - return stbi__errpuc("outofmem", "Out of memory"); - } - // compute number of non-alpha components - if (comp & 1) - n = comp; - else - n = comp - 1; - for (i = 0; i < x * y; ++i) - { - for (k = 0; k < n; ++k) - { - float z = (float)pow(data[i * comp + k] * stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; - if (z < 0) - z = 0; - if (z > 255) - z = 255; - output[i * comp + k] = (stbi_uc)stbi__float2int(z); - } - if (k < comp) - { - float z = data[i * comp + k] * 255 + 0.5f; - if (z < 0) - z = 0; - if (z > 255) - z = 255; - output[i * comp + k] = (stbi_uc)stbi__float2int(z); - } - } - STBI_FREE(data); - return output; + int i, k, n; + stbi_uc *output; + if (!data) + return NULL; + output = (stbi_uc *)stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) + { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + // compute number of non-alpha components + if (comp & 1) + n = comp; + else + n = comp - 1; + for (i = 0; i < x * y; ++i) + { + for (k = 0; k < n; ++k) + { + float z = (float)pow(data[i * comp + k] * stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) + z = 0; + if (z > 255) + z = 255; + output[i * comp + k] = (stbi_uc)stbi__float2int(z); + } + if (k < comp) + { + float z = data[i * comp + k] * 255 + 0.5f; + if (z < 0) + z = 0; + if (z > 255) + z = 255; + output[i * comp + k] = (stbi_uc)stbi__float2int(z); + } + } + STBI_FREE(data); + return output; } #endif @@ -2113,174 +2111,174 @@ static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) typedef struct { - stbi_uc fast[1 << FAST_BITS]; - // weirdly, repacking this into AoS is a 10% speed loss, instead of a win - stbi__uint16 code[256]; - stbi_uc values[256]; - stbi_uc size[257]; - unsigned int maxcode[18]; - int delta[17]; // old 'firstsymbol' - old 'firstcode' + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' } stbi__huffman; typedef struct { - stbi__context *s; - stbi__huffman huff_dc[4]; - stbi__huffman huff_ac[4]; - stbi__uint16 dequant[4][64]; - stbi__int16 fast_ac[4][1 << FAST_BITS]; + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; - // sizes for components, interleaved MCUs - int img_h_max, img_v_max; - int img_mcu_x, img_mcu_y; - int img_mcu_w, img_mcu_h; + // sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; - // definition of jpeg image component - struct - { - int id; - int h, v; - int tq; - int hd, ha; - int dc_pred; + // definition of jpeg image component + struct + { + int id; + int h, v; + int tq; + int hd, ha; + int dc_pred; - int x, y, w2, h2; - stbi_uc *data; - void *raw_data, *raw_coeff; - stbi_uc *linebuf; - short *coeff; // progressive only - int coeff_w, coeff_h; // number of 8x8 coefficient blocks - } img_comp[4]; + int x, y, w2, h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; - stbi__uint32 code_buffer; // jpeg entropy-coded buffer - int code_bits; // number of valid bits - unsigned char marker; // marker seen while filling entropy buffer - int nomore; // flag if we saw a marker so must stop + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop - int progressive; - int spec_start; - int spec_end; - int succ_high; - int succ_low; - int eob_run; - int jfif; - int app14_color_transform; // Adobe APP14 tag - int rgb; + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; - int scan_n, order[4]; - int restart_interval, todo; + int scan_n, order[4]; + int restart_interval, todo; - // kernels - void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); - void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); - stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); + // kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); } stbi__jpeg; static int stbi__build_huffman(stbi__huffman *h, int *count) { - int i, j, k = 0; - unsigned int code; - // build size list for each symbol (from JPEG spec) - for (i = 0; i < 16; ++i) - { - for (j = 0; j < count[i]; ++j) - { - h->size[k++] = (stbi_uc)(i + 1); - if (k >= 257) - return stbi__err("bad size list", "Corrupt JPEG"); - } - } - h->size[k] = 0; + int i, j, k = 0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i = 0; i < 16; ++i) + { + for (j = 0; j < count[i]; ++j) + { + h->size[k++] = (stbi_uc)(i + 1); + if (k >= 257) + return stbi__err("bad size list", "Corrupt JPEG"); + } + } + h->size[k] = 0; - // compute actual symbols (from jpeg spec) - code = 0; - k = 0; - for (j = 1; j <= 16; ++j) - { - // compute delta to add to code to compute symbol id - h->delta[j] = k - code; - if (h->size[k] == j) - { - while (h->size[k] == j) - h->code[k++] = (stbi__uint16)(code++); - if (code - 1 >= (1u << j)) - return stbi__err("bad code lengths", "Corrupt JPEG"); - } - // compute largest code + 1 for this size, preshifted as needed later - h->maxcode[j] = code << (16 - j); - code <<= 1; - } - h->maxcode[j] = 0xffffffff; + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for (j = 1; j <= 16; ++j) + { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) + { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16)(code++); + if (code - 1 >= (1u << j)) + return stbi__err("bad code lengths", "Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16 - j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; - // build non-spec acceleration table; 255 is flag for not-accelerated - memset(h->fast, 255, 1 << FAST_BITS); - for (i = 0; i < k; ++i) - { - int s = h->size[i]; - if (s <= FAST_BITS) - { - int c = h->code[i] << (FAST_BITS - s); - int m = 1 << (FAST_BITS - s); - for (j = 0; j < m; ++j) - { - h->fast[c + j] = (stbi_uc)i; - } - } - } - return 1; + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i = 0; i < k; ++i) + { + int s = h->size[i]; + if (s <= FAST_BITS) + { + int c = h->code[i] << (FAST_BITS - s); + int m = 1 << (FAST_BITS - s); + for (j = 0; j < m; ++j) + { + h->fast[c + j] = (stbi_uc)i; + } + } + } + return 1; } // build a table that decodes both magnitude and value of small ACs in // one go. static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) { - int i; - for (i = 0; i < (1 << FAST_BITS); ++i) - { - stbi_uc fast = h->fast[i]; - fast_ac[i] = 0; - if (fast < 255) - { - int rs = h->values[fast]; - int run = (rs >> 4) & 15; - int magbits = rs & 15; - int len = h->size[fast]; + int i; + for (i = 0; i < (1 << FAST_BITS); ++i) + { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) + { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; - if (magbits && len + magbits <= FAST_BITS) - { - // magnitude code followed by receive_extend code - int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); - int m = 1 << (magbits - 1); - if (k < m) - k += (~0U << magbits) + 1; - // if the result is small enough, we can fit it in fast_ac table - if (k >= -128 && k <= 127) - fast_ac[i] = (stbi__int16)((k * 256) + (run * 16) + (len + magbits)); - } - } - } + if (magbits && len + magbits <= FAST_BITS) + { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) + k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16)((k * 256) + (run * 16) + (len + magbits)); + } + } + } } static void stbi__grow_buffer_unsafe(stbi__jpeg *j) { - do - { - unsigned int b = j->nomore ? 0 : stbi__get8(j->s); - if (b == 0xff) - { - int c = stbi__get8(j->s); - while (c == 0xff) - c = stbi__get8(j->s); // consume fill bytes - if (c != 0) - { - j->marker = (unsigned char)c; - j->nomore = 1; - return; - } - } - j->code_buffer |= b << (24 - j->code_bits); - j->code_bits += 8; - } while (j->code_bits <= 24); + do + { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) + { + int c = stbi__get8(j->s); + while (c == 0xff) + c = stbi__get8(j->s); // consume fill bytes + if (c != 0) + { + j->marker = (unsigned char)c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); } // (1 << n) - 1 @@ -2289,56 +2287,56 @@ static const stbi__uint32 stbi__bmask[17] = {0, 1, 3, 7, 15, 31, 63, 127, 255, 5 // decode a jpeg huffman value from the bitstream stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) { - unsigned int temp; - int c, k; + unsigned int temp; + int c, k; - if (j->code_bits < 16) - stbi__grow_buffer_unsafe(j); + if (j->code_bits < 16) + stbi__grow_buffer_unsafe(j); - // look at the top FAST_BITS and determine what symbol ID it is, - // if the code is <= FAST_BITS - c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); - k = h->fast[c]; - if (k < 255) - { - int s = h->size[k]; - if (s > j->code_bits) - return -1; - j->code_buffer <<= s; - j->code_bits -= s; - return h->values[k]; - } + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); + k = h->fast[c]; + if (k < 255) + { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } - // naive test is to shift the code_buffer down so k bits are - // valid, then test against maxcode. To speed this up, we've - // preshifted maxcode left so that it has (16-k) 0s at the - // end; in other words, regardless of the number of bits, it - // wants to be compared against something shifted to have 16; - // that way we don't need to shift inside the loop. - temp = j->code_buffer >> 16; - for (k = FAST_BITS + 1;; ++k) - if (temp < h->maxcode[k]) - break; - if (k == 17) - { - // error! code not found - j->code_bits -= 16; - return -1; - } + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k = FAST_BITS + 1;; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) + { + // error! code not found + j->code_bits -= 16; + return -1; + } - if (k > j->code_bits) - return -1; + if (k > j->code_bits) + return -1; - // convert the huffman code to the symbol id - c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; - if (c < 0 || c >= 256) // symbol id out of bounds! - return -1; - STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + if (c < 0 || c >= 256) // symbol id out of bounds! + return -1; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); - // convert the id to a symbol - j->code_bits -= k; - j->code_buffer <<= k; - return h->values[c]; + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; } // bias[n] = (-1<code_bits < n) - stbi__grow_buffer_unsafe(j); - if (j->code_bits < n) - return 0; // ran out of bits from stream, return 0s intead of continuing + unsigned int k; + int sgn; + if (j->code_bits < n) + stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) + return 0; // ran out of bits from stream, return 0s intead of continuing - sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) - k = stbi_lrot(j->code_buffer, n); - j->code_buffer = k & ~stbi__bmask[n]; - k &= stbi__bmask[n]; - j->code_bits -= n; - return k + (stbi__jbias[n] & (sgn - 1)); + sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & (sgn - 1)); } // get some unsigned bits stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) { - unsigned int k; - if (j->code_bits < n) - stbi__grow_buffer_unsafe(j); - if (j->code_bits < n) - return 0; // ran out of bits from stream, return 0s intead of continuing - k = stbi_lrot(j->code_buffer, n); - j->code_buffer = k & ~stbi__bmask[n]; - k &= stbi__bmask[n]; - j->code_bits -= n; - return k; + unsigned int k; + if (j->code_bits < n) + stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) + return 0; // ran out of bits from stream, return 0s intead of continuing + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; } stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) { - unsigned int k; - if (j->code_bits < 1) - stbi__grow_buffer_unsafe(j); - if (j->code_bits < 1) - return 0; // ran out of bits from stream, return 0s intead of continuing - k = j->code_buffer; - j->code_buffer <<= 1; - --j->code_bits; - return k & 0x80000000; + unsigned int k; + if (j->code_bits < 1) + stbi__grow_buffer_unsafe(j); + if (j->code_bits < 1) + return 0; // ran out of bits from stream, return 0s intead of continuing + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; } // given a value that's at position X in the zigzag stream, // where does it appear in the 8x8 matrix coded as row-major? static const stbi_uc stbi__jpeg_dezigzag[64 + 15] = - { - 0, 1, 8, 16, 9, 2, 3, 10, - 17, 24, 32, 25, 18, 11, 4, 5, - 12, 19, 26, 33, 40, 48, 41, 34, - 27, 20, 13, 6, 7, 14, 21, 28, - 35, 42, 49, 56, 57, 50, 43, 36, - 29, 22, 15, 23, 30, 37, 44, 51, - 58, 59, 52, 45, 38, 31, 39, 46, - 53, 60, 61, 54, 47, 55, 62, 63, - // let corrupt input sample past end - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63}; + { + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63}; // decode one 64-entry block-- static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) { - int diff, dc, k; - int t; + int diff, dc, k; + int t; - if (j->code_bits < 16) - stbi__grow_buffer_unsafe(j); - t = stbi__jpeg_huff_decode(j, hdc); - if (t < 0 || t > 15) - return stbi__err("bad huffman code", "Corrupt JPEG"); + if (j->code_bits < 16) + stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) + return stbi__err("bad huffman code", "Corrupt JPEG"); - // 0 all the ac values now so we can do it 32-bits at a time - memset(data, 0, 64 * sizeof(data[0])); + // 0 all the ac values now so we can do it 32-bits at a time + memset(data, 0, 64 * sizeof(data[0])); - diff = t ? stbi__extend_receive(j, t) : 0; - if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) - return stbi__err("bad delta", "Corrupt JPEG"); - dc = j->img_comp[b].dc_pred + diff; - j->img_comp[b].dc_pred = dc; - if (!stbi__mul2shorts_valid(dc, dequant[0])) - return stbi__err("can't merge dc and ac", "Corrupt JPEG"); - data[0] = (short)(dc * dequant[0]); + diff = t ? stbi__extend_receive(j, t) : 0; + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) + return stbi__err("bad delta", "Corrupt JPEG"); + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, dequant[0])) + return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short)(dc * dequant[0]); - // decode AC components, see JPEG spec - k = 1; - do - { - unsigned int zig; - int c, r, s; - if (j->code_bits < 16) - stbi__grow_buffer_unsafe(j); - c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); - r = fac[c]; - if (r) - { // fast-AC path - k += (r >> 4) & 15; // run - s = r & 15; // combined length - if (s > j->code_bits) - return stbi__err("bad huffman code", "Combined length longer than code bits available"); - j->code_buffer <<= s; - j->code_bits -= s; - // decode into unzigzag'd location - zig = stbi__jpeg_dezigzag[k++]; - data[zig] = (short)((r >> 8) * dequant[zig]); - } - else - { - int rs = stbi__jpeg_huff_decode(j, hac); - if (rs < 0) - return stbi__err("bad huffman code", "Corrupt JPEG"); - s = rs & 15; - r = rs >> 4; - if (s == 0) - { - if (rs != 0xf0) - break; // end block - k += 16; - } - else - { - k += r; - // decode into unzigzag'd location - zig = stbi__jpeg_dezigzag[k++]; - data[zig] = (short)(stbi__extend_receive(j, s) * dequant[zig]); - } - } - } while (k < 64); - return 1; + // decode AC components, see JPEG spec + k = 1; + do + { + unsigned int zig; + int c, r, s; + if (j->code_bits < 16) + stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); + r = fac[c]; + if (r) + { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + if (s > j->code_bits) + return stbi__err("bad huffman code", "Combined length longer than code bits available"); + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)((r >> 8) * dequant[zig]); + } + else + { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) + return stbi__err("bad huffman code", "Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) + { + if (rs != 0xf0) + break; // end block + k += 16; + } + else + { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)(stbi__extend_receive(j, s) * dequant[zig]); + } + } + } while (k < 64); + return 1; } static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) { - int diff, dc; - int t; - if (j->spec_end != 0) - return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + int diff, dc; + int t; + if (j->spec_end != 0) + return stbi__err("can't merge dc and ac", "Corrupt JPEG"); - if (j->code_bits < 16) - stbi__grow_buffer_unsafe(j); + if (j->code_bits < 16) + stbi__grow_buffer_unsafe(j); - if (j->succ_high == 0) - { - // first scan for DC coefficient, must be first - memset(data, 0, 64 * sizeof(data[0])); // 0 all the ac values now - t = stbi__jpeg_huff_decode(j, hdc); - if (t < 0 || t > 15) - return stbi__err("can't merge dc and ac", "Corrupt JPEG"); - diff = t ? stbi__extend_receive(j, t) : 0; + if (j->succ_high == 0) + { + // first scan for DC coefficient, must be first + memset(data, 0, 64 * sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) + return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + diff = t ? stbi__extend_receive(j, t) : 0; - if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) - return stbi__err("bad delta", "Corrupt JPEG"); - dc = j->img_comp[b].dc_pred + diff; - j->img_comp[b].dc_pred = dc; - if (!stbi__mul2shorts_valid(dc, 1 << j->succ_low)) - return stbi__err("can't merge dc and ac", "Corrupt JPEG"); - data[0] = (short)(dc * (1 << j->succ_low)); - } - else - { - // refinement scan for DC coefficient - if (stbi__jpeg_get_bit(j)) - data[0] += (short)(1 << j->succ_low); - } - return 1; + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) + return stbi__err("bad delta", "Corrupt JPEG"); + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, 1 << j->succ_low)) + return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short)(dc * (1 << j->succ_low)); + } + else + { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short)(1 << j->succ_low); + } + return 1; } // @OPTIMIZE: store non-zigzagged during the decode passes, // and only de-zigzag when dequantizing static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) { - int k; - if (j->spec_start == 0) - return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + int k; + if (j->spec_start == 0) + return stbi__err("can't merge dc and ac", "Corrupt JPEG"); - if (j->succ_high == 0) - { - int shift = j->succ_low; + if (j->succ_high == 0) + { + int shift = j->succ_low; - if (j->eob_run) - { - --j->eob_run; - return 1; - } + if (j->eob_run) + { + --j->eob_run; + return 1; + } - k = j->spec_start; - do - { - unsigned int zig; - int c, r, s; - if (j->code_bits < 16) - stbi__grow_buffer_unsafe(j); - c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); - r = fac[c]; - if (r) - { // fast-AC path - k += (r >> 4) & 15; // run - s = r & 15; // combined length - if (s > j->code_bits) - return stbi__err("bad huffman code", "Combined length longer than code bits available"); - j->code_buffer <<= s; - j->code_bits -= s; - zig = stbi__jpeg_dezigzag[k++]; - data[zig] = (short)((r >> 8) * (1 << shift)); - } - else - { - int rs = stbi__jpeg_huff_decode(j, hac); - if (rs < 0) - return stbi__err("bad huffman code", "Corrupt JPEG"); - s = rs & 15; - r = rs >> 4; - if (s == 0) - { - if (r < 15) - { - j->eob_run = (1 << r); - if (r) - j->eob_run += stbi__jpeg_get_bits(j, r); - --j->eob_run; - break; - } - k += 16; - } - else - { - k += r; - zig = stbi__jpeg_dezigzag[k++]; - data[zig] = (short)(stbi__extend_receive(j, s) * (1 << shift)); - } - } - } while (k <= j->spec_end); - } - else - { - // refinement scan for these AC coefficients + k = j->spec_start; + do + { + unsigned int zig; + int c, r, s; + if (j->code_bits < 16) + stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); + r = fac[c]; + if (r) + { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + if (s > j->code_bits) + return stbi__err("bad huffman code", "Combined length longer than code bits available"); + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)((r >> 8) * (1 << shift)); + } + else + { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) + return stbi__err("bad huffman code", "Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) + { + if (r < 15) + { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } + else + { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)(stbi__extend_receive(j, s) * (1 << shift)); + } + } + } while (k <= j->spec_end); + } + else + { + // refinement scan for these AC coefficients - short bit = (short)(1 << j->succ_low); + short bit = (short)(1 << j->succ_low); - if (j->eob_run) - { - --j->eob_run; - for (k = j->spec_start; k <= j->spec_end; ++k) - { - short *p = &data[stbi__jpeg_dezigzag[k]]; - if (*p != 0) - if (stbi__jpeg_get_bit(j)) - if ((*p & bit) == 0) - { - if (*p > 0) - *p += bit; - else - *p -= bit; - } - } - } - else - { - k = j->spec_start; - do - { - int r, s; - int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh - if (rs < 0) - return stbi__err("bad huffman code", "Corrupt JPEG"); - s = rs & 15; - r = rs >> 4; - if (s == 0) - { - if (r < 15) - { - j->eob_run = (1 << r) - 1; - if (r) - j->eob_run += stbi__jpeg_get_bits(j, r); - r = 64; // force end of block - } - else - { - // r=15 s=0 should write 16 0s, so we just do - // a run of 15 0s and then write s (which is 0), - // so we don't have to do anything special here - } - } - else - { - if (s != 1) - return stbi__err("bad huffman code", "Corrupt JPEG"); - // sign bit - if (stbi__jpeg_get_bit(j)) - s = bit; - else - s = -bit; - } + if (j->eob_run) + { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) + { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit) == 0) + { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } + else + { + k = j->spec_start; + do + { + int r, s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) + return stbi__err("bad huffman code", "Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) + { + if (r < 15) + { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } + else + { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } + else + { + if (s != 1) + return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } - // advance by r - while (k <= j->spec_end) - { - short *p = &data[stbi__jpeg_dezigzag[k++]]; - if (*p != 0) - { - if (stbi__jpeg_get_bit(j)) - if ((*p & bit) == 0) - { - if (*p > 0) - *p += bit; - else - *p -= bit; - } - } - else - { - if (r == 0) - { - *p = (short)s; - break; - } - --r; - } - } - } while (k <= j->spec_end); - } - } - return 1; + // advance by r + while (k <= j->spec_end) + { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) + { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit) == 0) + { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + else + { + if (r == 0) + { + *p = (short)s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; } // take a -128..127 value and stbi__clamp it and convert to 0..255 stbi_inline static stbi_uc stbi__clamp(int x) { - // trick to use a single test to catch both cases - if ((unsigned int)x > 255) - { - if (x < 0) - return 0; - if (x > 255) - return 255; - } - return (stbi_uc)x; + // trick to use a single test to catch both cases + if ((unsigned int)x > 255) + { + if (x < 0) + return 0; + if (x > 255) + return 255; + } + return (stbi_uc)x; } -#define stbi__f2f(x) ((int)(((x)*4096 + 0.5))) -#define stbi__fsh(x) ((x)*4096) +#define stbi__f2f(x) ((int)(((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) // derived from jidctint -- DCT_ISLOW #define STBI__IDCT_1D(s0, s1, s2, s3, s4, s5, s6, s7) \ - int t0, t1, t2, t3, p1, p2, p3, p4, p5, x0, x1, x2, x3; \ - p2 = s2; \ - p3 = s6; \ - p1 = (p2 + p3) * stbi__f2f(0.5411961f); \ - t2 = p1 + p3 * stbi__f2f(-1.847759065f); \ - t3 = p1 + p2 * stbi__f2f(0.765366865f); \ - p2 = s0; \ - p3 = s4; \ - t0 = stbi__fsh(p2 + p3); \ - t1 = stbi__fsh(p2 - p3); \ - x0 = t0 + t3; \ - x3 = t0 - t3; \ - x1 = t1 + t2; \ - x2 = t1 - t2; \ - t0 = s7; \ - t1 = s5; \ - t2 = s3; \ - t3 = s1; \ - p3 = t0 + t2; \ - p4 = t1 + t3; \ - p1 = t0 + t3; \ - p2 = t1 + t2; \ - p5 = (p3 + p4) * stbi__f2f(1.175875602f); \ - t0 = t0 * stbi__f2f(0.298631336f); \ - t1 = t1 * stbi__f2f(2.053119869f); \ - t2 = t2 * stbi__f2f(3.072711026f); \ - t3 = t3 * stbi__f2f(1.501321110f); \ - p1 = p5 + p1 * stbi__f2f(-0.899976223f); \ - p2 = p5 + p2 * stbi__f2f(-2.562915447f); \ - p3 = p3 * stbi__f2f(-1.961570560f); \ - p4 = p4 * stbi__f2f(-0.390180644f); \ - t3 += p1 + p4; \ - t2 += p2 + p3; \ - t1 += p2 + p4; \ - t0 += p1 + p3; + int t0, t1, t2, t3, p1, p2, p3, p4, p5, x0, x1, x2, x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2 + p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3 * stbi__f2f(-1.847759065f); \ + t3 = p1 + p2 * stbi__f2f(0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2 + p3); \ + t1 = stbi__fsh(p2 - p3); \ + x0 = t0 + t3; \ + x3 = t0 - t3; \ + x1 = t1 + t2; \ + x2 = t1 - t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0 + t2; \ + p4 = t1 + t3; \ + p1 = t0 + t3; \ + p2 = t1 + t2; \ + p5 = (p3 + p4) * stbi__f2f(1.175875602f); \ + t0 = t0 * stbi__f2f(0.298631336f); \ + t1 = t1 * stbi__f2f(2.053119869f); \ + t2 = t2 * stbi__f2f(3.072711026f); \ + t3 = t3 * stbi__f2f(1.501321110f); \ + p1 = p5 + p1 * stbi__f2f(-0.899976223f); \ + p2 = p5 + p2 * stbi__f2f(-2.562915447f); \ + p3 = p3 * stbi__f2f(-1.961570560f); \ + p4 = p4 * stbi__f2f(-0.390180644f); \ + t3 += p1 + p4; \ + t2 += p2 + p3; \ + t1 += p2 + p4; \ + t0 += p1 + p3; static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) { - int i, val[64], *v = val; - stbi_uc *o; - short *d = data; + int i, val[64], *v = val; + stbi_uc *o; + short *d = data; - // columns - for (i = 0; i < 8; ++i, ++d, ++v) - { - // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing - if (d[8] == 0 && d[16] == 0 && d[24] == 0 && d[32] == 0 && d[40] == 0 && d[48] == 0 && d[56] == 0) - { - // no shortcut 0 seconds - // (1|2|3|4|5|6|7)==0 0 seconds - // all separate -0.047 seconds - // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds - int dcterm = d[0] * 4; - v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; - } - else - { - STBI__IDCT_1D(d[0], d[8], d[16], d[24], d[32], d[40], d[48], d[56]) - // constants scaled things up by 1<<12; let's bring them back - // down, but keep 2 extra bits of precision - x0 += 512; - x1 += 512; - x2 += 512; - x3 += 512; - v[0] = (x0 + t3) >> 10; - v[56] = (x0 - t3) >> 10; - v[8] = (x1 + t2) >> 10; - v[48] = (x1 - t2) >> 10; - v[16] = (x2 + t1) >> 10; - v[40] = (x2 - t1) >> 10; - v[24] = (x3 + t0) >> 10; - v[32] = (x3 - t0) >> 10; - } - } + // columns + for (i = 0; i < 8; ++i, ++d, ++v) + { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[8] == 0 && d[16] == 0 && d[24] == 0 && d[32] == 0 && d[40] == 0 && d[48] == 0 && d[56] == 0) + { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0] * 4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } + else + { + STBI__IDCT_1D(d[0], d[8], d[16], d[24], d[32], d[40], d[48], d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; + x1 += 512; + x2 += 512; + x3 += 512; + v[0] = (x0 + t3) >> 10; + v[56] = (x0 - t3) >> 10; + v[8] = (x1 + t2) >> 10; + v[48] = (x1 - t2) >> 10; + v[16] = (x2 + t1) >> 10; + v[40] = (x2 - t1) >> 10; + v[24] = (x3 + t0) >> 10; + v[32] = (x3 - t0) >> 10; + } + } - for (i = 0, v = val, o = out; i < 8; ++i, v += 8, o += out_stride) - { - // no fast case since the first 1D IDCT spread components out - STBI__IDCT_1D(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]) - // constants scaled things up by 1<<12, plus we had 1<<2 from first - // loop, plus horizontal and vertical each scale by sqrt(8) so together - // we've got an extra 1<<3, so 1<<17 total we need to remove. - // so we want to round that, which means adding 0.5 * 1<<17, - // aka 65536. Also, we'll end up with -128 to 127 that we want - // to encode as 0..255 by adding 128, so we'll add that before the shift - x0 += 65536 + (128 << 17); - x1 += 65536 + (128 << 17); - x2 += 65536 + (128 << 17); - x3 += 65536 + (128 << 17); - // tried computing the shifts into temps, or'ing the temps to see - // if any were out of range, but that was slower - o[0] = stbi__clamp((x0 + t3) >> 17); - o[7] = stbi__clamp((x0 - t3) >> 17); - o[1] = stbi__clamp((x1 + t2) >> 17); - o[6] = stbi__clamp((x1 - t2) >> 17); - o[2] = stbi__clamp((x2 + t1) >> 17); - o[5] = stbi__clamp((x2 - t1) >> 17); - o[3] = stbi__clamp((x3 + t0) >> 17); - o[4] = stbi__clamp((x3 - t0) >> 17); - } + for (i = 0, v = val, o = out; i < 8; ++i, v += 8, o += out_stride) + { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128 << 17); + x1 += 65536 + (128 << 17); + x2 += 65536 + (128 << 17); + x3 += 65536 + (128 << 17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0 + t3) >> 17); + o[7] = stbi__clamp((x0 - t3) >> 17); + o[1] = stbi__clamp((x1 + t2) >> 17); + o[6] = stbi__clamp((x1 - t2) >> 17); + o[2] = stbi__clamp((x2 + t1) >> 17); + o[5] = stbi__clamp((x2 - t1) >> 17); + o[3] = stbi__clamp((x3 + t0) >> 17); + o[4] = stbi__clamp((x3 - t0) >> 17); + } } #ifdef STBI_SSE2 @@ -2799,9 +2797,9 @@ static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) // fully "transparent". static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) { - // This is constructed to match our regular (generic) integer IDCT exactly. - __m128i row0, row1, row2, row3, row4, row5, row6, row7; - __m128i tmp; + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; // dot product constant: even elems=x, odd elems=y #define dct_const(x, y) _mm_setr_epi16((x), (y), (x), (y), (x), (y), (x), (y)) @@ -2809,164 +2807,164 @@ static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) // out(1) = c1[even]*x + c1[odd]*y #define dct_rot(out0, out1, x, y, c0, c1) \ - __m128i c0##lo = _mm_unpacklo_epi16((x), (y)); \ - __m128i c0##hi = _mm_unpackhi_epi16((x), (y)); \ - __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ - __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ - __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ - __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + __m128i c0##lo = _mm_unpacklo_epi16((x), (y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x), (y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) // out = in << 12 (in 16-bit, out 32-bit) #define dct_widen(out, in) \ - __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ - __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) // wide add #define dct_wadd(out, a, b) \ - __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ - __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) // wide sub #define dct_wsub(out, a, b) \ - __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ - __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) // butterfly a/b, add bias, then shift by "s" and pack #define dct_bfly32o(out0, out1, a, b, bias, s) \ - { \ - __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ - __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ - dct_wadd(sum, abiased, b); \ - dct_wsub(dif, abiased, b); \ - out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ - out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ - } + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } // 8-bit interleave step (for transposes) #define dct_interleave8(a, b) \ - tmp = a; \ - a = _mm_unpacklo_epi8(a, b); \ - b = _mm_unpackhi_epi8(tmp, b) + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) // 16-bit interleave step (for transposes) #define dct_interleave16(a, b) \ - tmp = a; \ - a = _mm_unpacklo_epi16(a, b); \ - b = _mm_unpackhi_epi16(tmp, b) + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) #define dct_pass(bias, shift) \ - { \ - /* even part */ \ - dct_rot(t2e, t3e, row2, row6, rot0_0, rot0_1); \ - __m128i sum04 = _mm_add_epi16(row0, row4); \ - __m128i dif04 = _mm_sub_epi16(row0, row4); \ - dct_widen(t0e, sum04); \ - dct_widen(t1e, dif04); \ - dct_wadd(x0, t0e, t3e); \ - dct_wsub(x3, t0e, t3e); \ - dct_wadd(x1, t1e, t2e); \ - dct_wsub(x2, t1e, t2e); \ - /* odd part */ \ - dct_rot(y0o, y2o, row7, row3, rot2_0, rot2_1); \ - dct_rot(y1o, y3o, row5, row1, rot3_0, rot3_1); \ - __m128i sum17 = _mm_add_epi16(row1, row7); \ - __m128i sum35 = _mm_add_epi16(row3, row5); \ - dct_rot(y4o, y5o, sum17, sum35, rot1_0, rot1_1); \ - dct_wadd(x4, y0o, y4o); \ - dct_wadd(x5, y1o, y5o); \ - dct_wadd(x6, y2o, y5o); \ - dct_wadd(x7, y3o, y4o); \ - dct_bfly32o(row0, row7, x0, x7, bias, shift); \ - dct_bfly32o(row1, row6, x1, x6, bias, shift); \ - dct_bfly32o(row2, row5, x2, x5, bias, shift); \ - dct_bfly32o(row3, row4, x3, x4, bias, shift); \ - } + { \ + /* even part */ \ + dct_rot(t2e, t3e, row2, row6, rot0_0, rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o, y2o, row7, row3, rot2_0, rot2_1); \ + dct_rot(y1o, y3o, row5, row1, rot3_0, rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o, y5o, sum17, sum35, rot1_0, rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0, row7, x0, x7, bias, shift); \ + dct_bfly32o(row1, row6, x1, x6, bias, shift); \ + dct_bfly32o(row2, row5, x2, x5, bias, shift); \ + dct_bfly32o(row3, row4, x3, x4, bias, shift); \ + } - __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); - __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f(0.765366865f), stbi__f2f(0.5411961f)); - __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); - __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); - __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f(0.298631336f), stbi__f2f(-1.961570560f)); - __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f(3.072711026f)); - __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f(2.053119869f), stbi__f2f(-0.390180644f)); - __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f(1.501321110f)); + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f(0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f(0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f(3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f(2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f(1.501321110f)); - // rounding biases in column/row passes, see stbi__idct_block for explanation. - __m128i bias_0 = _mm_set1_epi32(512); - __m128i bias_1 = _mm_set1_epi32(65536 + (128 << 17)); + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128 << 17)); - // load - row0 = _mm_load_si128((const __m128i *)(data + 0 * 8)); - row1 = _mm_load_si128((const __m128i *)(data + 1 * 8)); - row2 = _mm_load_si128((const __m128i *)(data + 2 * 8)); - row3 = _mm_load_si128((const __m128i *)(data + 3 * 8)); - row4 = _mm_load_si128((const __m128i *)(data + 4 * 8)); - row5 = _mm_load_si128((const __m128i *)(data + 5 * 8)); - row6 = _mm_load_si128((const __m128i *)(data + 6 * 8)); - row7 = _mm_load_si128((const __m128i *)(data + 7 * 8)); + // load + row0 = _mm_load_si128((const __m128i *)(data + 0 * 8)); + row1 = _mm_load_si128((const __m128i *)(data + 1 * 8)); + row2 = _mm_load_si128((const __m128i *)(data + 2 * 8)); + row3 = _mm_load_si128((const __m128i *)(data + 3 * 8)); + row4 = _mm_load_si128((const __m128i *)(data + 4 * 8)); + row5 = _mm_load_si128((const __m128i *)(data + 5 * 8)); + row6 = _mm_load_si128((const __m128i *)(data + 6 * 8)); + row7 = _mm_load_si128((const __m128i *)(data + 7 * 8)); - // column pass - dct_pass(bias_0, 10); + // column pass + dct_pass(bias_0, 10); - { - // 16bit 8x8 transpose pass 1 - dct_interleave16(row0, row4); - dct_interleave16(row1, row5); - dct_interleave16(row2, row6); - dct_interleave16(row3, row7); + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); - // transpose pass 2 - dct_interleave16(row0, row2); - dct_interleave16(row1, row3); - dct_interleave16(row4, row6); - dct_interleave16(row5, row7); + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); - // transpose pass 3 - dct_interleave16(row0, row1); - dct_interleave16(row2, row3); - dct_interleave16(row4, row5); - dct_interleave16(row6, row7); - } + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } - // row pass - dct_pass(bias_1, 17); + // row pass + dct_pass(bias_1, 17); - { - // pack - __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 - __m128i p1 = _mm_packus_epi16(row2, row3); - __m128i p2 = _mm_packus_epi16(row4, row5); - __m128i p3 = _mm_packus_epi16(row6, row7); + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); - // 8bit 8x8 transpose pass 1 - dct_interleave8(p0, p2); // a0e0a1e1... - dct_interleave8(p1, p3); // c0g0c1g1... + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... - // transpose pass 2 - dct_interleave8(p0, p1); // a0c0e0g0... - dct_interleave8(p2, p3); // b0d0f0h0... + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... - // transpose pass 3 - dct_interleave8(p0, p2); // a0b0c0d0... - dct_interleave8(p1, p3); // a4b4c4d4... + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... - // store - _mm_storel_epi64((__m128i *)out, p0); - out += out_stride; - _mm_storel_epi64((__m128i *)out, _mm_shuffle_epi32(p0, 0x4e)); - out += out_stride; - _mm_storel_epi64((__m128i *)out, p2); - out += out_stride; - _mm_storel_epi64((__m128i *)out, _mm_shuffle_epi32(p2, 0x4e)); - out += out_stride; - _mm_storel_epi64((__m128i *)out, p1); - out += out_stride; - _mm_storel_epi64((__m128i *)out, _mm_shuffle_epi32(p1, 0x4e)); - out += out_stride; - _mm_storel_epi64((__m128i *)out, p3); - out += out_stride; - _mm_storel_epi64((__m128i *)out, _mm_shuffle_epi32(p3, 0x4e)); - } + // store + _mm_storel_epi64((__m128i *)out, p0); + out += out_stride; + _mm_storel_epi64((__m128i *)out, _mm_shuffle_epi32(p0, 0x4e)); + out += out_stride; + _mm_storel_epi64((__m128i *)out, p2); + out += out_stride; + _mm_storel_epi64((__m128i *)out, _mm_shuffle_epi32(p2, 0x4e)); + out += out_stride; + _mm_storel_epi64((__m128i *)out, p1); + out += out_stride; + _mm_storel_epi64((__m128i *)out, _mm_shuffle_epi32(p1, 0x4e)); + out += out_stride; + _mm_storel_epi64((__m128i *)out, p3); + out += out_stride; + _mm_storel_epi64((__m128i *)out, _mm_shuffle_epi32(p3, 0x4e)); + } #undef dct_const #undef dct_rot @@ -2987,234 +2985,234 @@ static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) // results to the generic C version. static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) { - int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; - int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); - int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); - int16x4_t rot0_2 = vdup_n_s16(stbi__f2f(0.765366865f)); - int16x4_t rot1_0 = vdup_n_s16(stbi__f2f(1.175875602f)); - int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); - int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); - int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); - int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); - int16x4_t rot3_0 = vdup_n_s16(stbi__f2f(0.298631336f)); - int16x4_t rot3_1 = vdup_n_s16(stbi__f2f(2.053119869f)); - int16x4_t rot3_2 = vdup_n_s16(stbi__f2f(3.072711026f)); - int16x4_t rot3_3 = vdup_n_s16(stbi__f2f(1.501321110f)); + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f(0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f(1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f(0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f(2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f(3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f(1.501321110f)); #define dct_long_mul(out, inq, coeff) \ - int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ - int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) #define dct_long_mac(out, acc, inq, coeff) \ - int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ - int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) #define dct_widen(out, inq) \ - int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ - int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) // wide add #define dct_wadd(out, a, b) \ - int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ - int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) // wide sub #define dct_wsub(out, a, b) \ - int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ - int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) // butterfly a/b, then shift using "shiftop" by "s" and pack #define dct_bfly32o(out0, out1, a, b, shiftop, s) \ - { \ - dct_wadd(sum, a, b); \ - dct_wsub(dif, a, b); \ - out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ - out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ - } + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } #define dct_pass(shiftop, shift) \ - { \ - /* even part */ \ - int16x8_t sum26 = vaddq_s16(row2, row6); \ - dct_long_mul(p1e, sum26, rot0_0); \ - dct_long_mac(t2e, p1e, row6, rot0_1); \ - dct_long_mac(t3e, p1e, row2, rot0_2); \ - int16x8_t sum04 = vaddq_s16(row0, row4); \ - int16x8_t dif04 = vsubq_s16(row0, row4); \ - dct_widen(t0e, sum04); \ - dct_widen(t1e, dif04); \ - dct_wadd(x0, t0e, t3e); \ - dct_wsub(x3, t0e, t3e); \ - dct_wadd(x1, t1e, t2e); \ - dct_wsub(x2, t1e, t2e); \ - /* odd part */ \ - int16x8_t sum15 = vaddq_s16(row1, row5); \ - int16x8_t sum17 = vaddq_s16(row1, row7); \ - int16x8_t sum35 = vaddq_s16(row3, row5); \ - int16x8_t sum37 = vaddq_s16(row3, row7); \ - int16x8_t sumodd = vaddq_s16(sum17, sum35); \ - dct_long_mul(p5o, sumodd, rot1_0); \ - dct_long_mac(p1o, p5o, sum17, rot1_1); \ - dct_long_mac(p2o, p5o, sum35, rot1_2); \ - dct_long_mul(p3o, sum37, rot2_0); \ - dct_long_mul(p4o, sum15, rot2_1); \ - dct_wadd(sump13o, p1o, p3o); \ - dct_wadd(sump24o, p2o, p4o); \ - dct_wadd(sump23o, p2o, p3o); \ - dct_wadd(sump14o, p1o, p4o); \ - dct_long_mac(x4, sump13o, row7, rot3_0); \ - dct_long_mac(x5, sump24o, row5, rot3_1); \ - dct_long_mac(x6, sump23o, row3, rot3_2); \ - dct_long_mac(x7, sump14o, row1, rot3_3); \ - dct_bfly32o(row0, row7, x0, x7, shiftop, shift); \ - dct_bfly32o(row1, row6, x1, x6, shiftop, shift); \ - dct_bfly32o(row2, row5, x2, x5, shiftop, shift); \ - dct_bfly32o(row3, row4, x3, x4, shiftop, shift); \ - } + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0, row7, x0, x7, shiftop, shift); \ + dct_bfly32o(row1, row6, x1, x6, shiftop, shift); \ + dct_bfly32o(row2, row5, x2, x5, shiftop, shift); \ + dct_bfly32o(row3, row4, x3, x4, shiftop, shift); \ + } - // load - row0 = vld1q_s16(data + 0 * 8); - row1 = vld1q_s16(data + 1 * 8); - row2 = vld1q_s16(data + 2 * 8); - row3 = vld1q_s16(data + 3 * 8); - row4 = vld1q_s16(data + 4 * 8); - row5 = vld1q_s16(data + 5 * 8); - row6 = vld1q_s16(data + 6 * 8); - row7 = vld1q_s16(data + 7 * 8); + // load + row0 = vld1q_s16(data + 0 * 8); + row1 = vld1q_s16(data + 1 * 8); + row2 = vld1q_s16(data + 2 * 8); + row3 = vld1q_s16(data + 3 * 8); + row4 = vld1q_s16(data + 4 * 8); + row5 = vld1q_s16(data + 5 * 8); + row6 = vld1q_s16(data + 6 * 8); + row7 = vld1q_s16(data + 7 * 8); - // add DC bias - row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); - // column pass - dct_pass(vrshrn_n_s32, 10); + // column pass + dct_pass(vrshrn_n_s32, 10); - // 16bit 8x8 transpose - { + // 16bit 8x8 transpose + { // these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. // whether compilers actually get this is another story, sadly. #define dct_trn16(x, y) \ - { \ - int16x8x2_t t = vtrnq_s16(x, y); \ - x = t.val[0]; \ - y = t.val[1]; \ - } + { \ + int16x8x2_t t = vtrnq_s16(x, y); \ + x = t.val[0]; \ + y = t.val[1]; \ + } #define dct_trn32(x, y) \ - { \ - int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); \ - x = vreinterpretq_s16_s32(t.val[0]); \ - y = vreinterpretq_s16_s32(t.val[1]); \ - } + { \ + int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); \ + x = vreinterpretq_s16_s32(t.val[0]); \ + y = vreinterpretq_s16_s32(t.val[1]); \ + } #define dct_trn64(x, y) \ - { \ - int16x8_t x0 = x; \ - int16x8_t y0 = y; \ - x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); \ - y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); \ - } + { \ + int16x8_t x0 = x; \ + int16x8_t y0 = y; \ + x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); \ + y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); \ + } - // pass 1 - dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 - dct_trn16(row2, row3); - dct_trn16(row4, row5); - dct_trn16(row6, row7); + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); - // pass 2 - dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 - dct_trn32(row1, row3); - dct_trn32(row4, row6); - dct_trn32(row5, row7); + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); - // pass 3 - dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 - dct_trn64(row1, row5); - dct_trn64(row2, row6); - dct_trn64(row3, row7); + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); #undef dct_trn16 #undef dct_trn32 #undef dct_trn64 - } + } - // row pass - // vrshrn_n_s32 only supports shifts up to 16, we need - // 17. so do a non-rounding shift of 16 first then follow - // up with a rounding shift by 1. - dct_pass(vshrn_n_s32, 16); + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); - { - // pack and round - uint8x8_t p0 = vqrshrun_n_s16(row0, 1); - uint8x8_t p1 = vqrshrun_n_s16(row1, 1); - uint8x8_t p2 = vqrshrun_n_s16(row2, 1); - uint8x8_t p3 = vqrshrun_n_s16(row3, 1); - uint8x8_t p4 = vqrshrun_n_s16(row4, 1); - uint8x8_t p5 = vqrshrun_n_s16(row5, 1); - uint8x8_t p6 = vqrshrun_n_s16(row6, 1); - uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); - // again, these can translate into one instruction, but often don't. + // again, these can translate into one instruction, but often don't. #define dct_trn8_8(x, y) \ - { \ - uint8x8x2_t t = vtrn_u8(x, y); \ - x = t.val[0]; \ - y = t.val[1]; \ - } + { \ + uint8x8x2_t t = vtrn_u8(x, y); \ + x = t.val[0]; \ + y = t.val[1]; \ + } #define dct_trn8_16(x, y) \ - { \ - uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); \ - x = vreinterpret_u8_u16(t.val[0]); \ - y = vreinterpret_u8_u16(t.val[1]); \ - } + { \ + uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); \ + x = vreinterpret_u8_u16(t.val[0]); \ + y = vreinterpret_u8_u16(t.val[1]); \ + } #define dct_trn8_32(x, y) \ - { \ - uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); \ - x = vreinterpret_u8_u32(t.val[0]); \ - y = vreinterpret_u8_u32(t.val[1]); \ - } + { \ + uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); \ + x = vreinterpret_u8_u32(t.val[0]); \ + y = vreinterpret_u8_u32(t.val[1]); \ + } - // sadly can't use interleaved stores here since we only write - // 8 bytes to each scan line! + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! - // 8x8 8-bit transpose pass 1 - dct_trn8_8(p0, p1); - dct_trn8_8(p2, p3); - dct_trn8_8(p4, p5); - dct_trn8_8(p6, p7); + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); - // pass 2 - dct_trn8_16(p0, p2); - dct_trn8_16(p1, p3); - dct_trn8_16(p4, p6); - dct_trn8_16(p5, p7); + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); - // pass 3 - dct_trn8_32(p0, p4); - dct_trn8_32(p1, p5); - dct_trn8_32(p2, p6); - dct_trn8_32(p3, p7); + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); - // store - vst1_u8(out, p0); - out += out_stride; - vst1_u8(out, p1); - out += out_stride; - vst1_u8(out, p2); - out += out_stride; - vst1_u8(out, p3); - out += out_stride; - vst1_u8(out, p4); - out += out_stride; - vst1_u8(out, p5); - out += out_stride; - vst1_u8(out, p6); - out += out_stride; - vst1_u8(out, p7); + // store + vst1_u8(out, p0); + out += out_stride; + vst1_u8(out, p1); + out += out_stride; + vst1_u8(out, p2); + out += out_stride; + vst1_u8(out, p3); + out += out_stride; + vst1_u8(out, p4); + out += out_stride; + vst1_u8(out, p5); + out += out_stride; + vst1_u8(out, p6); + out += out_stride; + vst1_u8(out, p7); #undef dct_trn8_8 #undef dct_trn8_16 #undef dct_trn8_32 - } + } #undef dct_long_mul #undef dct_long_mac @@ -3233,19 +3231,19 @@ static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) // marker, return 0xff, which is never a valid marker value static stbi_uc stbi__get_marker(stbi__jpeg *j) { - stbi_uc x; - if (j->marker != STBI__MARKER_none) - { - x = j->marker; - j->marker = STBI__MARKER_none; - return x; - } - x = stbi__get8(j->s); - if (x != 0xff) - return STBI__MARKER_none; - while (x == 0xff) - x = stbi__get8(j->s); // consume repeated 0xff fill bytes - return x; + stbi_uc x; + if (j->marker != STBI__MARKER_none) + { + x = j->marker; + j->marker = STBI__MARKER_none; + return x; + } + x = stbi__get8(j->s); + if (x != 0xff) + return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; } // in each scan, we'll have scan_n components, and the order @@ -3256,533 +3254,533 @@ static stbi_uc stbi__get_marker(stbi__jpeg *j) // the dc prediction static void stbi__jpeg_reset(stbi__jpeg *j) { - j->code_bits = 0; - j->code_buffer = 0; - j->nomore = 0; - j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; - j->marker = STBI__MARKER_none; - j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; - j->eob_run = 0; - // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, - // since we don't even allow 1<<30 pixels + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels } static int stbi__parse_entropy_coded_data(stbi__jpeg *z) { - stbi__jpeg_reset(z); - if (!z->progressive) - { - if (z->scan_n == 1) - { - int i, j; - STBI_SIMD_ALIGN(short, data[64]); - int n = z->order[0]; - // non-interleaved data, we just need to process one block at a time, - // in trivial scanline order - // number of blocks to do just depends on how many actual "pixels" this - // component has, independent of interleaved MCU blocking and such - int w = (z->img_comp[n].x + 7) >> 3; - int h = (z->img_comp[n].y + 7) >> 3; - for (j = 0; j < h; ++j) - { - for (i = 0; i < w; ++i) - { - int ha = z->img_comp[n].ha; - if (!stbi__jpeg_decode_block(z, data, z->huff_dc + z->img_comp[n].hd, z->huff_ac + ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) - return 0; - z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2 * j * 8 + i * 8, z->img_comp[n].w2, data); - // every data block is an MCU, so countdown the restart interval - if (--z->todo <= 0) - { - if (z->code_bits < 24) - stbi__grow_buffer_unsafe(z); - // if it's NOT a restart, then just bail, so we get corrupt data - // rather than no data - if (!STBI__RESTART(z->marker)) - return 1; - stbi__jpeg_reset(z); - } - } - } - return 1; - } - else - { // interleaved - int i, j, k, x, y; - STBI_SIMD_ALIGN(short, data[64]); - for (j = 0; j < z->img_mcu_y; ++j) - { - for (i = 0; i < z->img_mcu_x; ++i) - { - // scan an interleaved mcu... process scan_n components in order - for (k = 0; k < z->scan_n; ++k) - { - int n = z->order[k]; - // scan out an mcu's worth of this component; that's just determined - // by the basic H and V specified for the component - for (y = 0; y < z->img_comp[n].v; ++y) - { - for (x = 0; x < z->img_comp[n].h; ++x) - { - int x2 = (i * z->img_comp[n].h + x) * 8; - int y2 = (j * z->img_comp[n].v + y) * 8; - int ha = z->img_comp[n].ha; - if (!stbi__jpeg_decode_block(z, data, z->huff_dc + z->img_comp[n].hd, z->huff_ac + ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) - return 0; - z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2 * y2 + x2, z->img_comp[n].w2, data); - } - } - } - // after all interleaved components, that's an interleaved MCU, - // so now count down the restart interval - if (--z->todo <= 0) - { - if (z->code_bits < 24) - stbi__grow_buffer_unsafe(z); - if (!STBI__RESTART(z->marker)) - return 1; - stbi__jpeg_reset(z); - } - } - } - return 1; - } - } - else - { - if (z->scan_n == 1) - { - int i, j; - int n = z->order[0]; - // non-interleaved data, we just need to process one block at a time, - // in trivial scanline order - // number of blocks to do just depends on how many actual "pixels" this - // component has, independent of interleaved MCU blocking and such - int w = (z->img_comp[n].x + 7) >> 3; - int h = (z->img_comp[n].y + 7) >> 3; - for (j = 0; j < h; ++j) - { - for (i = 0; i < w; ++i) - { - short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); - if (z->spec_start == 0) - { - if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) - return 0; - } - else - { - int ha = z->img_comp[n].ha; - if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) - return 0; - } - // every data block is an MCU, so countdown the restart interval - if (--z->todo <= 0) - { - if (z->code_bits < 24) - stbi__grow_buffer_unsafe(z); - if (!STBI__RESTART(z->marker)) - return 1; - stbi__jpeg_reset(z); - } - } - } - return 1; - } - else - { // interleaved - int i, j, k, x, y; - for (j = 0; j < z->img_mcu_y; ++j) - { - for (i = 0; i < z->img_mcu_x; ++i) - { - // scan an interleaved mcu... process scan_n components in order - for (k = 0; k < z->scan_n; ++k) - { - int n = z->order[k]; - // scan out an mcu's worth of this component; that's just determined - // by the basic H and V specified for the component - for (y = 0; y < z->img_comp[n].v; ++y) - { - for (x = 0; x < z->img_comp[n].h; ++x) - { - int x2 = (i * z->img_comp[n].h + x); - int y2 = (j * z->img_comp[n].v + y); - short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); - if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) - return 0; - } - } - } - // after all interleaved components, that's an interleaved MCU, - // so now count down the restart interval - if (--z->todo <= 0) - { - if (z->code_bits < 24) - stbi__grow_buffer_unsafe(z); - if (!STBI__RESTART(z->marker)) - return 1; - stbi__jpeg_reset(z); - } - } - } - return 1; - } - } + stbi__jpeg_reset(z); + if (!z->progressive) + { + if (z->scan_n == 1) + { + int i, j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x + 7) >> 3; + int h = (z->img_comp[n].y + 7) >> 3; + for (j = 0; j < h; ++j) + { + for (i = 0; i < w; ++i) + { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc + z->img_comp[n].hd, z->huff_ac + ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) + return 0; + z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2 * j * 8 + i * 8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) + { + if (z->code_bits < 24) + stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) + return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + else + { // interleaved + int i, j, k, x, y; + STBI_SIMD_ALIGN(short, data[64]); + for (j = 0; j < z->img_mcu_y; ++j) + { + for (i = 0; i < z->img_mcu_x; ++i) + { + // scan an interleaved mcu... process scan_n components in order + for (k = 0; k < z->scan_n; ++k) + { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y = 0; y < z->img_comp[n].v; ++y) + { + for (x = 0; x < z->img_comp[n].h; ++x) + { + int x2 = (i * z->img_comp[n].h + x) * 8; + int y2 = (j * z->img_comp[n].v + y) * 8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc + z->img_comp[n].hd, z->huff_ac + ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) + return 0; + z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2 * y2 + x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) + { + if (z->code_bits < 24) + stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) + return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } + else + { + if (z->scan_n == 1) + { + int i, j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x + 7) >> 3; + int h = (z->img_comp[n].y + 7) >> 3; + for (j = 0; j < h; ++j) + { + for (i = 0; i < w; ++i) + { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) + { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + else + { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) + { + if (z->code_bits < 24) + stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) + return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + else + { // interleaved + int i, j, k, x, y; + for (j = 0; j < z->img_mcu_y; ++j) + { + for (i = 0; i < z->img_mcu_x; ++i) + { + // scan an interleaved mcu... process scan_n components in order + for (k = 0; k < z->scan_n; ++k) + { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y = 0; y < z->img_comp[n].v; ++y) + { + for (x = 0; x < z->img_comp[n].h; ++x) + { + int x2 = (i * z->img_comp[n].h + x); + int y2 = (j * z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) + { + if (z->code_bits < 24) + stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) + return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } } static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) { - int i; - for (i = 0; i < 64; ++i) - data[i] *= dequant[i]; + int i; + for (i = 0; i < 64; ++i) + data[i] *= dequant[i]; } static void stbi__jpeg_finish(stbi__jpeg *z) { - if (z->progressive) - { - // dequantize and idct the data - int i, j, n; - for (n = 0; n < z->s->img_n; ++n) - { - int w = (z->img_comp[n].x + 7) >> 3; - int h = (z->img_comp[n].y + 7) >> 3; - for (j = 0; j < h; ++j) - { - for (i = 0; i < w; ++i) - { - short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); - stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); - z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2 * j * 8 + i * 8, z->img_comp[n].w2, data); - } - } - } - } + if (z->progressive) + { + // dequantize and idct the data + int i, j, n; + for (n = 0; n < z->s->img_n; ++n) + { + int w = (z->img_comp[n].x + 7) >> 3; + int h = (z->img_comp[n].y + 7) >> 3; + for (j = 0; j < h; ++j) + { + for (i = 0; i < w; ++i) + { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2 * j * 8 + i * 8, z->img_comp[n].w2, data); + } + } + } + } } static int stbi__process_marker(stbi__jpeg *z, int m) { - int L; - switch (m) - { - case STBI__MARKER_none: // no marker found - return stbi__err("expected marker", "Corrupt JPEG"); + int L; + switch (m) + { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker", "Corrupt JPEG"); - case 0xDD: // DRI - specify restart interval - if (stbi__get16be(z->s) != 4) - return stbi__err("bad DRI len", "Corrupt JPEG"); - z->restart_interval = stbi__get16be(z->s); - return 1; + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) + return stbi__err("bad DRI len", "Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; - case 0xDB: // DQT - define quantization table - L = stbi__get16be(z->s) - 2; - while (L > 0) - { - int q = stbi__get8(z->s); - int p = q >> 4, sixteen = (p != 0); - int t = q & 15, i; - if (p != 0 && p != 1) - return stbi__err("bad DQT type", "Corrupt JPEG"); - if (t > 3) - return stbi__err("bad DQT table", "Corrupt JPEG"); + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s) - 2; + while (L > 0) + { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15, i; + if (p != 0 && p != 1) + return stbi__err("bad DQT type", "Corrupt JPEG"); + if (t > 3) + return stbi__err("bad DQT table", "Corrupt JPEG"); - for (i = 0; i < 64; ++i) - z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); - L -= (sixteen ? 129 : 65); - } - return L == 0; + for (i = 0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L == 0; - case 0xC4: // DHT - define huffman table - L = stbi__get16be(z->s) - 2; - while (L > 0) - { - stbi_uc *v; - int sizes[16], i, n = 0; - int q = stbi__get8(z->s); - int tc = q >> 4; - int th = q & 15; - if (tc > 1 || th > 3) - return stbi__err("bad DHT header", "Corrupt JPEG"); - for (i = 0; i < 16; ++i) - { - sizes[i] = stbi__get8(z->s); - n += sizes[i]; - } - if (n > 256) - return stbi__err("bad DHT header", "Corrupt JPEG"); // Loop over i < n would write past end of values! - L -= 17; - if (tc == 0) - { - if (!stbi__build_huffman(z->huff_dc + th, sizes)) - return 0; - v = z->huff_dc[th].values; - } - else - { - if (!stbi__build_huffman(z->huff_ac + th, sizes)) - return 0; - v = z->huff_ac[th].values; - } - for (i = 0; i < n; ++i) - v[i] = stbi__get8(z->s); - if (tc != 0) - stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); - L -= n; - } - return L == 0; - } + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s) - 2; + while (L > 0) + { + stbi_uc *v; + int sizes[16], i, n = 0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) + return stbi__err("bad DHT header", "Corrupt JPEG"); + for (i = 0; i < 16; ++i) + { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + if (n > 256) + return stbi__err("bad DHT header", "Corrupt JPEG"); // Loop over i < n would write past end of values! + L -= 17; + if (tc == 0) + { + if (!stbi__build_huffman(z->huff_dc + th, sizes)) + return 0; + v = z->huff_dc[th].values; + } + else + { + if (!stbi__build_huffman(z->huff_ac + th, sizes)) + return 0; + v = z->huff_ac[th].values; + } + for (i = 0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L == 0; + } - // check for comment block or APP blocks - if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) - { - L = stbi__get16be(z->s); - if (L < 2) - { - if (m == 0xFE) - return stbi__err("bad COM len", "Corrupt JPEG"); - else - return stbi__err("bad APP len", "Corrupt JPEG"); - } - L -= 2; + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) + { + L = stbi__get16be(z->s); + if (L < 2) + { + if (m == 0xFE) + return stbi__err("bad COM len", "Corrupt JPEG"); + else + return stbi__err("bad APP len", "Corrupt JPEG"); + } + L -= 2; - if (m == 0xE0 && L >= 5) - { // JFIF APP0 segment - static const unsigned char tag[5] = {'J', 'F', 'I', 'F', '\0'}; - int ok = 1; - int i; - for (i = 0; i < 5; ++i) - if (stbi__get8(z->s) != tag[i]) - ok = 0; - L -= 5; - if (ok) - z->jfif = 1; - } - else if (m == 0xEE && L >= 12) - { // Adobe APP14 segment - static const unsigned char tag[6] = {'A', 'd', 'o', 'b', 'e', '\0'}; - int ok = 1; - int i; - for (i = 0; i < 6; ++i) - if (stbi__get8(z->s) != tag[i]) - ok = 0; - L -= 6; - if (ok) - { - stbi__get8(z->s); // version - stbi__get16be(z->s); // flags0 - stbi__get16be(z->s); // flags1 - z->app14_color_transform = stbi__get8(z->s); // color transform - L -= 6; - } - } + if (m == 0xE0 && L >= 5) + { // JFIF APP0 segment + static const unsigned char tag[5] = {'J', 'F', 'I', 'F', '\0'}; + int ok = 1; + int i; + for (i = 0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } + else if (m == 0xEE && L >= 12) + { // Adobe APP14 segment + static const unsigned char tag[6] = {'A', 'd', 'o', 'b', 'e', '\0'}; + int ok = 1; + int i; + for (i = 0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) + { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } - stbi__skip(z->s, L); - return 1; - } + stbi__skip(z->s, L); + return 1; + } - return stbi__err("unknown marker", "Corrupt JPEG"); + return stbi__err("unknown marker", "Corrupt JPEG"); } // after we see SOS static int stbi__process_scan_header(stbi__jpeg *z) { - int i; - int Ls = stbi__get16be(z->s); - z->scan_n = stbi__get8(z->s); - if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int)z->s->img_n) - return stbi__err("bad SOS component count", "Corrupt JPEG"); - if (Ls != 6 + 2 * z->scan_n) - return stbi__err("bad SOS len", "Corrupt JPEG"); - for (i = 0; i < z->scan_n; ++i) - { - int id = stbi__get8(z->s), which; - int q = stbi__get8(z->s); - for (which = 0; which < z->s->img_n; ++which) - if (z->img_comp[which].id == id) - break; - if (which == z->s->img_n) - return 0; // no match - z->img_comp[which].hd = q >> 4; - if (z->img_comp[which].hd > 3) - return stbi__err("bad DC huff", "Corrupt JPEG"); - z->img_comp[which].ha = q & 15; - if (z->img_comp[which].ha > 3) - return stbi__err("bad AC huff", "Corrupt JPEG"); - z->order[i] = which; - } + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int)z->s->img_n) + return stbi__err("bad SOS component count", "Corrupt JPEG"); + if (Ls != 6 + 2 * z->scan_n) + return stbi__err("bad SOS len", "Corrupt JPEG"); + for (i = 0; i < z->scan_n; ++i) + { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) + return 0; // no match + z->img_comp[which].hd = q >> 4; + if (z->img_comp[which].hd > 3) + return stbi__err("bad DC huff", "Corrupt JPEG"); + z->img_comp[which].ha = q & 15; + if (z->img_comp[which].ha > 3) + return stbi__err("bad AC huff", "Corrupt JPEG"); + z->order[i] = which; + } - { - int aa; - z->spec_start = stbi__get8(z->s); - z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 - aa = stbi__get8(z->s); - z->succ_high = (aa >> 4); - z->succ_low = (aa & 15); - if (z->progressive) - { - if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) - return stbi__err("bad SOS", "Corrupt JPEG"); - } - else - { - if (z->spec_start != 0) - return stbi__err("bad SOS", "Corrupt JPEG"); - if (z->succ_high != 0 || z->succ_low != 0) - return stbi__err("bad SOS", "Corrupt JPEG"); - z->spec_end = 63; - } - } + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) + { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } + else + { + if (z->spec_start != 0) + return stbi__err("bad SOS", "Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) + return stbi__err("bad SOS", "Corrupt JPEG"); + z->spec_end = 63; + } + } - return 1; + return 1; } static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) { - int i; - for (i = 0; i < ncomp; ++i) - { - if (z->img_comp[i].raw_data) - { - STBI_FREE(z->img_comp[i].raw_data); - z->img_comp[i].raw_data = NULL; - z->img_comp[i].data = NULL; - } - if (z->img_comp[i].raw_coeff) - { - STBI_FREE(z->img_comp[i].raw_coeff); - z->img_comp[i].raw_coeff = 0; - z->img_comp[i].coeff = 0; - } - if (z->img_comp[i].linebuf) - { - STBI_FREE(z->img_comp[i].linebuf); - z->img_comp[i].linebuf = NULL; - } - } - return why; + int i; + for (i = 0; i < ncomp; ++i) + { + if (z->img_comp[i].raw_data) + { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) + { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) + { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; } static int stbi__process_frame_header(stbi__jpeg *z, int scan) { - stbi__context *s = z->s; - int Lf, p, i, q, h_max = 1, v_max = 1, c; - Lf = stbi__get16be(s); - if (Lf < 11) - return stbi__err("bad SOF len", "Corrupt JPEG"); // JPEG - p = stbi__get8(s); - if (p != 8) - return stbi__err("only 8-bit", "JPEG format not supported: 8-bit only"); // JPEG baseline - s->img_y = stbi__get16be(s); - if (s->img_y == 0) - return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG - s->img_x = stbi__get16be(s); - if (s->img_x == 0) - return stbi__err("0 width", "Corrupt JPEG"); // JPEG requires - if (s->img_y > STBI_MAX_DIMENSIONS) - return stbi__err("too large", "Very large image (corrupt?)"); - if (s->img_x > STBI_MAX_DIMENSIONS) - return stbi__err("too large", "Very large image (corrupt?)"); - c = stbi__get8(s); - if (c != 3 && c != 1 && c != 4) - return stbi__err("bad component count", "Corrupt JPEG"); - s->img_n = c; - for (i = 0; i < c; ++i) - { - z->img_comp[i].data = NULL; - z->img_comp[i].linebuf = NULL; - } + stbi__context *s = z->s; + int Lf, p, i, q, h_max = 1, v_max = 1, c; + Lf = stbi__get16be(s); + if (Lf < 11) + return stbi__err("bad SOF len", "Corrupt JPEG"); // JPEG + p = stbi__get8(s); + if (p != 8) + return stbi__err("only 8-bit", "JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); + if (s->img_y == 0) + return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); + if (s->img_x == 0) + return stbi__err("0 width", "Corrupt JPEG"); // JPEG requires + if (s->img_y > STBI_MAX_DIMENSIONS) + return stbi__err("too large", "Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) + return stbi__err("too large", "Very large image (corrupt?)"); + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) + return stbi__err("bad component count", "Corrupt JPEG"); + s->img_n = c; + for (i = 0; i < c; ++i) + { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } - if (Lf != 8 + 3 * s->img_n) - return stbi__err("bad SOF len", "Corrupt JPEG"); + if (Lf != 8 + 3 * s->img_n) + return stbi__err("bad SOF len", "Corrupt JPEG"); - z->rgb = 0; - for (i = 0; i < s->img_n; ++i) - { - static const unsigned char rgb[3] = {'R', 'G', 'B'}; - z->img_comp[i].id = stbi__get8(s); - if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) - ++z->rgb; - q = stbi__get8(s); - z->img_comp[i].h = (q >> 4); - if (!z->img_comp[i].h || z->img_comp[i].h > 4) - return stbi__err("bad H", "Corrupt JPEG"); - z->img_comp[i].v = q & 15; - if (!z->img_comp[i].v || z->img_comp[i].v > 4) - return stbi__err("bad V", "Corrupt JPEG"); - z->img_comp[i].tq = stbi__get8(s); - if (z->img_comp[i].tq > 3) - return stbi__err("bad TQ", "Corrupt JPEG"); - } + z->rgb = 0; + for (i = 0; i < s->img_n; ++i) + { + static const unsigned char rgb[3] = {'R', 'G', 'B'}; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); + if (!z->img_comp[i].h || z->img_comp[i].h > 4) + return stbi__err("bad H", "Corrupt JPEG"); + z->img_comp[i].v = q & 15; + if (!z->img_comp[i].v || z->img_comp[i].v > 4) + return stbi__err("bad V", "Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); + if (z->img_comp[i].tq > 3) + return stbi__err("bad TQ", "Corrupt JPEG"); + } - if (scan != STBI__SCAN_load) - return 1; + if (scan != STBI__SCAN_load) + return 1; - if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) - return stbi__err("too large", "Image too large to decode"); + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) + return stbi__err("too large", "Image too large to decode"); - for (i = 0; i < s->img_n; ++i) - { - if (z->img_comp[i].h > h_max) - h_max = z->img_comp[i].h; - if (z->img_comp[i].v > v_max) - v_max = z->img_comp[i].v; - } + for (i = 0; i < s->img_n; ++i) + { + if (z->img_comp[i].h > h_max) + h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) + v_max = z->img_comp[i].v; + } - // check that plane subsampling factors are integer ratios; our resamplers can't deal with fractional ratios - // and I've never seen a non-corrupted JPEG file actually use them - for (i = 0; i < s->img_n; ++i) - { - if (h_max % z->img_comp[i].h != 0) - return stbi__err("bad H", "Corrupt JPEG"); - if (v_max % z->img_comp[i].v != 0) - return stbi__err("bad V", "Corrupt JPEG"); - } + // check that plane subsampling factors are integer ratios; our resamplers can't deal with fractional ratios + // and I've never seen a non-corrupted JPEG file actually use them + for (i = 0; i < s->img_n; ++i) + { + if (h_max % z->img_comp[i].h != 0) + return stbi__err("bad H", "Corrupt JPEG"); + if (v_max % z->img_comp[i].v != 0) + return stbi__err("bad V", "Corrupt JPEG"); + } - // compute interleaved mcu info - z->img_h_max = h_max; - z->img_v_max = v_max; - z->img_mcu_w = h_max * 8; - z->img_mcu_h = v_max * 8; - // these sizes can't be more than 17 bits - z->img_mcu_x = (s->img_x + z->img_mcu_w - 1) / z->img_mcu_w; - z->img_mcu_y = (s->img_y + z->img_mcu_h - 1) / z->img_mcu_h; + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w - 1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h - 1) / z->img_mcu_h; - for (i = 0; i < s->img_n; ++i) - { - // number of effective pixels (e.g. for non-interleaved MCU) - z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max - 1) / h_max; - z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max - 1) / v_max; - // to simplify generation, we'll allocate enough memory to decode - // the bogus oversized data from using interleaved MCUs and their - // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't - // discard the extra data until colorspace conversion - // - // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) - // so these muls can't overflow with 32-bit ints (which we require) - z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; - z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; - z->img_comp[i].coeff = 0; - z->img_comp[i].raw_coeff = 0; - z->img_comp[i].linebuf = NULL; - z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); - if (z->img_comp[i].raw_data == NULL) - return stbi__free_jpeg_components(z, i + 1, stbi__err("outofmem", "Out of memory")); - // align blocks for idct using mmx/sse - z->img_comp[i].data = (stbi_uc *)(((size_t)z->img_comp[i].raw_data + 15) & ~15); - if (z->progressive) - { - // w2, h2 are multiples of 8 (see above) - z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; - z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; - z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); - if (z->img_comp[i].raw_coeff == NULL) - return stbi__free_jpeg_components(z, i + 1, stbi__err("outofmem", "Out of memory")); - z->img_comp[i].coeff = (short *)(((size_t)z->img_comp[i].raw_coeff + 15) & ~15); - } - } + for (i = 0; i < s->img_n; ++i) + { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max - 1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max - 1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i + 1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc *)(((size_t)z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) + { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i + 1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short *)(((size_t)z->img_comp[i].raw_coeff + 15) & ~15); + } + } - return 1; + return 1; } // use comparisons since in some cases we handle more than one case (e.g. SOF) @@ -3796,862 +3794,862 @@ static int stbi__process_frame_header(stbi__jpeg *z, int scan) static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) { - int m; - z->jfif = 0; - z->app14_color_transform = -1; // valid values are 0,1,2 - z->marker = STBI__MARKER_none; // initialize cached marker to empty - m = stbi__get_marker(z); - if (!stbi__SOI(m)) - return stbi__err("no SOI", "Corrupt JPEG"); - if (scan == STBI__SCAN_type) - return 1; - m = stbi__get_marker(z); - while (!stbi__SOF(m)) - { - if (!stbi__process_marker(z, m)) - return 0; - m = stbi__get_marker(z); - while (m == STBI__MARKER_none) - { - // some files have extra padding after their blocks, so ok, we'll scan - if (stbi__at_eof(z->s)) - return stbi__err("no SOF", "Corrupt JPEG"); - m = stbi__get_marker(z); - } - } - z->progressive = stbi__SOF_progressive(m); - if (!stbi__process_frame_header(z, scan)) - return 0; - return 1; + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) + return stbi__err("no SOI", "Corrupt JPEG"); + if (scan == STBI__SCAN_type) + return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) + { + if (!stbi__process_marker(z, m)) + return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) + { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) + return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) + return 0; + return 1; } -static int stbi__skip_jpeg_junk_at_end(stbi__jpeg *j) +static stbi_uc stbi__skip_jpeg_junk_at_end(stbi__jpeg *j) { - // some JPEGs have junk at end, skip over it but if we find what looks - // like a valid marker, resume there - while (!stbi__at_eof(j->s)) - { - int x = stbi__get8(j->s); - while (x == 255) - { // might be a marker - if (stbi__at_eof(j->s)) - return STBI__MARKER_none; - x = stbi__get8(j->s); - if (x != 0x00 && x != 0xff) - { - // not a stuffed zero or lead-in to another marker, looks - // like an actual marker, return it - return x; - } - // stuffed zero has x=0 now which ends the loop, meaning we go - // back to regular scan loop. - // repeated 0xff keeps trying to read the next byte of the marker. - } - } - return STBI__MARKER_none; + // some JPEGs have junk at end, skip over it but if we find what looks + // like a valid marker, resume there + while (!stbi__at_eof(j->s)) + { + stbi_uc x = stbi__get8(j->s); + while (x == 0xff) + { // might be a marker + if (stbi__at_eof(j->s)) + return STBI__MARKER_none; + x = stbi__get8(j->s); + if (x != 0x00 && x != 0xff) + { + // not a stuffed zero or lead-in to another marker, looks + // like an actual marker, return it + return x; + } + // stuffed zero has x=0 now which ends the loop, meaning we go + // back to regular scan loop. + // repeated 0xff keeps trying to read the next byte of the marker. + } + } + return STBI__MARKER_none; } // decode image to YCbCr format static int stbi__decode_jpeg_image(stbi__jpeg *j) { - int m; - for (m = 0; m < 4; m++) - { - j->img_comp[m].raw_data = NULL; - j->img_comp[m].raw_coeff = NULL; - } - j->restart_interval = 0; - if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) - return 0; - m = stbi__get_marker(j); - while (!stbi__EOI(m)) - { - if (stbi__SOS(m)) - { - if (!stbi__process_scan_header(j)) - return 0; - if (!stbi__parse_entropy_coded_data(j)) - return 0; - if (j->marker == STBI__MARKER_none) - { - j->marker = stbi__skip_jpeg_junk_at_end(j); - // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 - } - m = stbi__get_marker(j); - if (STBI__RESTART(m)) - m = stbi__get_marker(j); - } - else if (stbi__DNL(m)) - { - int Ld = stbi__get16be(j->s); - stbi__uint32 NL = stbi__get16be(j->s); - if (Ld != 4) - return stbi__err("bad DNL len", "Corrupt JPEG"); - if (NL != j->s->img_y) - return stbi__err("bad DNL height", "Corrupt JPEG"); - m = stbi__get_marker(j); - } - else - { - if (!stbi__process_marker(j, m)) - return 1; - m = stbi__get_marker(j); - } - } - if (j->progressive) - stbi__jpeg_finish(j); - return 1; + int m; + for (m = 0; m < 4; m++) + { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) + return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) + { + if (stbi__SOS(m)) + { + if (!stbi__process_scan_header(j)) + return 0; + if (!stbi__parse_entropy_coded_data(j)) + return 0; + if (j->marker == STBI__MARKER_none) + { + j->marker = stbi__skip_jpeg_junk_at_end(j); + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + m = stbi__get_marker(j); + if (STBI__RESTART(m)) + m = stbi__get_marker(j); + } + else if (stbi__DNL(m)) + { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) + return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) + return stbi__err("bad DNL height", "Corrupt JPEG"); + m = stbi__get_marker(j); + } + else + { + if (!stbi__process_marker(j, m)) + return 1; + m = stbi__get_marker(j); + } + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; } // static jfif-centered resampling (across block boundaries) typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, - int w, int hs); + int w, int hs); #define stbi__div4(x) ((stbi_uc)((x) >> 2)) static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { - STBI_NOTUSED(out); - STBI_NOTUSED(in_far); - STBI_NOTUSED(w); - STBI_NOTUSED(hs); - return in_near; + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; } static stbi_uc *stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { - // need to generate two samples vertically for every one in input - int i; - STBI_NOTUSED(hs); - for (i = 0; i < w; ++i) - out[i] = stbi__div4(3 * in_near[i] + in_far[i] + 2); - return out; + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i = 0; i < w; ++i) + out[i] = stbi__div4(3 * in_near[i] + in_far[i] + 2); + return out; } static stbi_uc *stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { - // need to generate two samples horizontally for every one in input - int i; - stbi_uc *input = in_near; + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; - if (w == 1) - { - // if only one sample, can't do any interpolation - out[0] = out[1] = input[0]; - return out; - } + if (w == 1) + { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } - out[0] = input[0]; - out[1] = stbi__div4(input[0] * 3 + input[1] + 2); - for (i = 1; i < w - 1; ++i) - { - int n = 3 * input[i] + 2; - out[i * 2 + 0] = stbi__div4(n + input[i - 1]); - out[i * 2 + 1] = stbi__div4(n + input[i + 1]); - } - out[i * 2 + 0] = stbi__div4(input[w - 2] * 3 + input[w - 1] + 2); - out[i * 2 + 1] = input[w - 1]; + out[0] = input[0]; + out[1] = stbi__div4(input[0] * 3 + input[1] + 2); + for (i = 1; i < w - 1; ++i) + { + int n = 3 * input[i] + 2; + out[i * 2 + 0] = stbi__div4(n + input[i - 1]); + out[i * 2 + 1] = stbi__div4(n + input[i + 1]); + } + out[i * 2 + 0] = stbi__div4(input[w - 2] * 3 + input[w - 1] + 2); + out[i * 2 + 1] = input[w - 1]; - STBI_NOTUSED(in_far); - STBI_NOTUSED(hs); + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); - return out; + return out; } #define stbi__div16(x) ((stbi_uc)((x) >> 4)) static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { - // need to generate 2x2 samples for every one in input - int i, t0, t1; - if (w == 1) - { - out[0] = out[1] = stbi__div4(3 * in_near[0] + in_far[0] + 2); - return out; - } + // need to generate 2x2 samples for every one in input + int i, t0, t1; + if (w == 1) + { + out[0] = out[1] = stbi__div4(3 * in_near[0] + in_far[0] + 2); + return out; + } - t1 = 3 * in_near[0] + in_far[0]; - out[0] = stbi__div4(t1 + 2); - for (i = 1; i < w; ++i) - { - t0 = t1; - t1 = 3 * in_near[i] + in_far[i]; - out[i * 2 - 1] = stbi__div16(3 * t0 + t1 + 8); - out[i * 2] = stbi__div16(3 * t1 + t0 + 8); - } - out[w * 2 - 1] = stbi__div4(t1 + 2); + t1 = 3 * in_near[0] + in_far[0]; + out[0] = stbi__div4(t1 + 2); + for (i = 1; i < w; ++i) + { + t0 = t1; + t1 = 3 * in_near[i] + in_far[i]; + out[i * 2 - 1] = stbi__div16(3 * t0 + t1 + 8); + out[i * 2] = stbi__div16(3 * t1 + t0 + 8); + } + out[w * 2 - 1] = stbi__div4(t1 + 2); - STBI_NOTUSED(hs); + STBI_NOTUSED(hs); - return out; + return out; } #if defined(STBI_SSE2) || defined(STBI_NEON) static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { - // need to generate 2x2 samples for every one in input - int i = 0, t0, t1; + // need to generate 2x2 samples for every one in input + int i = 0, t0, t1; - if (w == 1) - { - out[0] = out[1] = stbi__div4(3 * in_near[0] + in_far[0] + 2); - return out; - } + if (w == 1) + { + out[0] = out[1] = stbi__div4(3 * in_near[0] + in_far[0] + 2); + return out; + } - t1 = 3 * in_near[0] + in_far[0]; - // process groups of 8 pixels for as long as we can. - // note we can't handle the last pixel in a row in this loop - // because we need to handle the filter boundary conditions. - for (; i < ((w - 1) & ~7); i += 8) - { + t1 = 3 * in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w - 1) & ~7); i += 8) + { #if defined(STBI_SSE2) - // load and perform the vertical filtering pass - // this uses 3*x + y = 4*x + (y - x) - __m128i zero = _mm_setzero_si128(); - __m128i farb = _mm_loadl_epi64((__m128i *)(in_far + i)); - __m128i nearb = _mm_loadl_epi64((__m128i *)(in_near + i)); - __m128i farw = _mm_unpacklo_epi8(farb, zero); - __m128i nearw = _mm_unpacklo_epi8(nearb, zero); - __m128i diff = _mm_sub_epi16(farw, nearw); - __m128i nears = _mm_slli_epi16(nearw, 2); - __m128i curr = _mm_add_epi16(nears, diff); // current row + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *)(in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *)(in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row - // horizontal filter works the same based on shifted vers of current - // row. "prev" is current row shifted right by 1 pixel; we need to - // insert the previous pixel value (from t1). - // "next" is current row shifted left by 1 pixel, with first pixel - // of next block of 8 pixels added in. - __m128i prv0 = _mm_slli_si128(curr, 2); - __m128i nxt0 = _mm_srli_si128(curr, 2); - __m128i prev = _mm_insert_epi16(prv0, t1, 0); - __m128i next = _mm_insert_epi16(nxt0, 3 * in_near[i + 8] + in_far[i + 8], 7); + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3 * in_near[i + 8] + in_far[i + 8], 7); - // horizontal filter, polyphase implementation since it's convenient: - // even pixels = 3*cur + prev = cur*4 + (prev - cur) - // odd pixels = 3*cur + next = cur*4 + (next - cur) - // note the shared term. - __m128i bias = _mm_set1_epi16(8); - __m128i curs = _mm_slli_epi16(curr, 2); - __m128i prvd = _mm_sub_epi16(prev, curr); - __m128i nxtd = _mm_sub_epi16(next, curr); - __m128i curb = _mm_add_epi16(curs, bias); - __m128i even = _mm_add_epi16(prvd, curb); - __m128i odd = _mm_add_epi16(nxtd, curb); + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); - // interleave even and odd pixels, then undo scaling. - __m128i int0 = _mm_unpacklo_epi16(even, odd); - __m128i int1_ = _mm_unpackhi_epi16(even, odd); - __m128i de0 = _mm_srli_epi16(int0, 4); - __m128i de1 = _mm_srli_epi16(int1_, 4); + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); - // pack and write output - __m128i outv = _mm_packus_epi16(de0, de1); - _mm_storeu_si128((__m128i *)(out + i * 2), outv); + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *)(out + i * 2), outv); #elif defined(STBI_NEON) - // load and perform the vertical filtering pass - // this uses 3*x + y = 4*x + (y - x) - uint8x8_t farb = vld1_u8(in_far + i); - uint8x8_t nearb = vld1_u8(in_near + i); - int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); - int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); - int16x8_t curr = vaddq_s16(nears, diff); // current row + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row - // horizontal filter works the same based on shifted vers of current - // row. "prev" is current row shifted right by 1 pixel; we need to - // insert the previous pixel value (from t1). - // "next" is current row shifted left by 1 pixel, with first pixel - // of next block of 8 pixels added in. - int16x8_t prv0 = vextq_s16(curr, curr, 7); - int16x8_t nxt0 = vextq_s16(curr, curr, 1); - int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); - int16x8_t next = vsetq_lane_s16(3 * in_near[i + 8] + in_far[i + 8], nxt0, 7); + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3 * in_near[i + 8] + in_far[i + 8], nxt0, 7); - // horizontal filter, polyphase implementation since it's convenient: - // even pixels = 3*cur + prev = cur*4 + (prev - cur) - // odd pixels = 3*cur + next = cur*4 + (next - cur) - // note the shared term. - int16x8_t curs = vshlq_n_s16(curr, 2); - int16x8_t prvd = vsubq_s16(prev, curr); - int16x8_t nxtd = vsubq_s16(next, curr); - int16x8_t even = vaddq_s16(curs, prvd); - int16x8_t odd = vaddq_s16(curs, nxtd); + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); - // undo scaling and round, then store with even/odd phases interleaved - uint8x8x2_t o; - o.val[0] = vqrshrun_n_s16(even, 4); - o.val[1] = vqrshrun_n_s16(odd, 4); - vst2_u8(out + i * 2, o); + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i * 2, o); #endif - // "previous" value for next iter - t1 = 3 * in_near[i + 7] + in_far[i + 7]; - } + // "previous" value for next iter + t1 = 3 * in_near[i + 7] + in_far[i + 7]; + } - t0 = t1; - t1 = 3 * in_near[i] + in_far[i]; - out[i * 2] = stbi__div16(3 * t1 + t0 + 8); + t0 = t1; + t1 = 3 * in_near[i] + in_far[i]; + out[i * 2] = stbi__div16(3 * t1 + t0 + 8); - for (++i; i < w; ++i) - { - t0 = t1; - t1 = 3 * in_near[i] + in_far[i]; - out[i * 2 - 1] = stbi__div16(3 * t0 + t1 + 8); - out[i * 2] = stbi__div16(3 * t1 + t0 + 8); - } - out[w * 2 - 1] = stbi__div4(t1 + 2); + for (++i; i < w; ++i) + { + t0 = t1; + t1 = 3 * in_near[i] + in_far[i]; + out[i * 2 - 1] = stbi__div16(3 * t0 + t1 + 8); + out[i * 2] = stbi__div16(3 * t1 + t0 + 8); + } + out[w * 2 - 1] = stbi__div4(t1 + 2); - STBI_NOTUSED(hs); + STBI_NOTUSED(hs); - return out; + return out; } #endif static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) { - // resample with nearest-neighbor - int i, j; - STBI_NOTUSED(in_far); - for (i = 0; i < w; ++i) - for (j = 0; j < hs; ++j) - out[i * hs + j] = in_near[i]; - return out; + // resample with nearest-neighbor + int i, j; + STBI_NOTUSED(in_far); + for (i = 0; i < w; ++i) + for (j = 0; j < hs; ++j) + out[i * hs + j] = in_near[i]; + return out; } // this is a reduced-precision calculation of YCbCr-to-RGB introduced // to make sure the code produces the same results in both SIMD and scalar -#define stbi__float2fixed(x) (((int)((x)*4096.0f + 0.5f)) << 8) +#define stbi__float2fixed(x) (((int)((x) * 4096.0f + 0.5f)) << 8) static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) { - int i; - for (i = 0; i < count; ++i) - { - int y_fixed = (y[i] << 20) + (1 << 19); // rounding - int r, g, b; - int cr = pcr[i] - 128; - int cb = pcb[i] - 128; - r = y_fixed + cr * stbi__float2fixed(1.40200f); - g = y_fixed + (cr * -stbi__float2fixed(0.71414f)) + ((cb * -stbi__float2fixed(0.34414f)) & 0xffff0000); - b = y_fixed + cb * stbi__float2fixed(1.77200f); - r >>= 20; - g >>= 20; - b >>= 20; - if ((unsigned)r > 255) - { - if (r < 0) - r = 0; - else - r = 255; - } - if ((unsigned)g > 255) - { - if (g < 0) - g = 0; - else - g = 255; - } - if ((unsigned)b > 255) - { - if (b < 0) - b = 0; - else - b = 255; - } - out[0] = (stbi_uc)r; - out[1] = (stbi_uc)g; - out[2] = (stbi_uc)b; - out[3] = 255; - out += step; - } + int i; + for (i = 0; i < count; ++i) + { + int y_fixed = (y[i] << 20) + (1 << 19); // rounding + int r, g, b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr * stbi__float2fixed(1.40200f); + g = y_fixed + (cr * -stbi__float2fixed(0.71414f)) + ((cb * -stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb * stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned)r > 255) + { + if (r < 0) + r = 0; + else + r = 255; + } + if ((unsigned)g > 255) + { + if (g < 0) + g = 0; + else + g = 255; + } + if ((unsigned)b > 255) + { + if (b < 0) + b = 0; + else + b = 255; + } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } } #if defined(STBI_SSE2) || defined(STBI_NEON) static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) { - int i = 0; + int i = 0; #ifdef STBI_SSE2 - // step == 3 is pretty ugly on the final interleave, and i'm not convinced - // it's useful in practice (you wouldn't use it for textures, for example). - // so just accelerate step == 4 case. - if (step == 4) - { - // this is a fairly straightforward implementation and not super-optimized. - __m128i signflip = _mm_set1_epi8(-0x80); - __m128i cr_const0 = _mm_set1_epi16((short)(1.40200f * 4096.0f + 0.5f)); - __m128i cr_const1 = _mm_set1_epi16(-(short)(0.71414f * 4096.0f + 0.5f)); - __m128i cb_const0 = _mm_set1_epi16(-(short)(0.34414f * 4096.0f + 0.5f)); - __m128i cb_const1 = _mm_set1_epi16((short)(1.77200f * 4096.0f + 0.5f)); - __m128i y_bias = _mm_set1_epi8((char)(unsigned char)128); - __m128i xw = _mm_set1_epi16(255); // alpha channel + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) + { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16((short)(1.40200f * 4096.0f + 0.5f)); + __m128i cr_const1 = _mm_set1_epi16(-(short)(0.71414f * 4096.0f + 0.5f)); + __m128i cb_const0 = _mm_set1_epi16(-(short)(0.34414f * 4096.0f + 0.5f)); + __m128i cb_const1 = _mm_set1_epi16((short)(1.77200f * 4096.0f + 0.5f)); + __m128i y_bias = _mm_set1_epi8((char)(unsigned char)128); + __m128i xw = _mm_set1_epi16(255); // alpha channel - for (; i + 7 < count; i += 8) - { - // load - __m128i y_bytes = _mm_loadl_epi64((__m128i *)(y + i)); - __m128i cr_bytes = _mm_loadl_epi64((__m128i *)(pcr + i)); - __m128i cb_bytes = _mm_loadl_epi64((__m128i *)(pcb + i)); - __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 - __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + for (; i + 7 < count; i += 8) + { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *)(y + i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *)(pcr + i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *)(pcb + i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 - // unpack to short (and left-shift cr, cb by 8) - __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); - __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); - __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); - // color transform - __m128i yws = _mm_srli_epi16(yw, 4); - __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); - __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); - __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); - __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); - __m128i rws = _mm_add_epi16(cr0, yws); - __m128i gwt = _mm_add_epi16(cb0, yws); - __m128i bws = _mm_add_epi16(yws, cb1); - __m128i gws = _mm_add_epi16(gwt, cr1); + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); - // descale - __m128i rw = _mm_srai_epi16(rws, 4); - __m128i bw = _mm_srai_epi16(bws, 4); - __m128i gw = _mm_srai_epi16(gws, 4); + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); - // back to byte, set up for transpose - __m128i brb = _mm_packus_epi16(rw, bw); - __m128i gxb = _mm_packus_epi16(gw, xw); + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); - // transpose to interleave channels - __m128i t0 = _mm_unpacklo_epi8(brb, gxb); - __m128i t1 = _mm_unpackhi_epi8(brb, gxb); - __m128i o0 = _mm_unpacklo_epi16(t0, t1); - __m128i o1 = _mm_unpackhi_epi16(t0, t1); + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); - // store - _mm_storeu_si128((__m128i *)(out + 0), o0); - _mm_storeu_si128((__m128i *)(out + 16), o1); - out += 32; - } - } + // store + _mm_storeu_si128((__m128i *)(out + 0), o0); + _mm_storeu_si128((__m128i *)(out + 16), o1); + out += 32; + } + } #endif #ifdef STBI_NEON - // in this version, step=3 support would be easy to add. but is there demand? - if (step == 4) - { - // this is a fairly straightforward implementation and not super-optimized. - uint8x8_t signflip = vdup_n_u8(0x80); - int16x8_t cr_const0 = vdupq_n_s16((short)(1.40200f * 4096.0f + 0.5f)); - int16x8_t cr_const1 = vdupq_n_s16(-(short)(0.71414f * 4096.0f + 0.5f)); - int16x8_t cb_const0 = vdupq_n_s16(-(short)(0.34414f * 4096.0f + 0.5f)); - int16x8_t cb_const1 = vdupq_n_s16((short)(1.77200f * 4096.0f + 0.5f)); + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) + { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16((short)(1.40200f * 4096.0f + 0.5f)); + int16x8_t cr_const1 = vdupq_n_s16(-(short)(0.71414f * 4096.0f + 0.5f)); + int16x8_t cb_const0 = vdupq_n_s16(-(short)(0.34414f * 4096.0f + 0.5f)); + int16x8_t cb_const1 = vdupq_n_s16((short)(1.77200f * 4096.0f + 0.5f)); - for (; i + 7 < count; i += 8) - { - // load - uint8x8_t y_bytes = vld1_u8(y + i); - uint8x8_t cr_bytes = vld1_u8(pcr + i); - uint8x8_t cb_bytes = vld1_u8(pcb + i); - int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); - int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + for (; i + 7 < count; i += 8) + { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); - // expand to s16 - int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); - int16x8_t crw = vshll_n_s8(cr_biased, 7); - int16x8_t cbw = vshll_n_s8(cb_biased, 7); + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); - // color transform - int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); - int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); - int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); - int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); - int16x8_t rws = vaddq_s16(yws, cr0); - int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); - int16x8_t bws = vaddq_s16(yws, cb1); + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); - // undo scaling, round, convert to byte - uint8x8x4_t o; - o.val[0] = vqrshrun_n_s16(rws, 4); - o.val[1] = vqrshrun_n_s16(gws, 4); - o.val[2] = vqrshrun_n_s16(bws, 4); - o.val[3] = vdup_n_u8(255); + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); - // store, interleaving r/g/b/a - vst4_u8(out, o); - out += 8 * 4; - } - } + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8 * 4; + } + } #endif - for (; i < count; ++i) - { - int y_fixed = (y[i] << 20) + (1 << 19); // rounding - int r, g, b; - int cr = pcr[i] - 128; - int cb = pcb[i] - 128; - r = y_fixed + cr * stbi__float2fixed(1.40200f); - g = y_fixed + cr * -stbi__float2fixed(0.71414f) + ((cb * -stbi__float2fixed(0.34414f)) & 0xffff0000); - b = y_fixed + cb * stbi__float2fixed(1.77200f); - r >>= 20; - g >>= 20; - b >>= 20; - if ((unsigned)r > 255) - { - if (r < 0) - r = 0; - else - r = 255; - } - if ((unsigned)g > 255) - { - if (g < 0) - g = 0; - else - g = 255; - } - if ((unsigned)b > 255) - { - if (b < 0) - b = 0; - else - b = 255; - } - out[0] = (stbi_uc)r; - out[1] = (stbi_uc)g; - out[2] = (stbi_uc)b; - out[3] = 255; - out += step; - } + for (; i < count; ++i) + { + int y_fixed = (y[i] << 20) + (1 << 19); // rounding + int r, g, b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr * stbi__float2fixed(1.40200f); + g = y_fixed + cr * -stbi__float2fixed(0.71414f) + ((cb * -stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb * stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned)r > 255) + { + if (r < 0) + r = 0; + else + r = 255; + } + if ((unsigned)g > 255) + { + if (g < 0) + g = 0; + else + g = 255; + } + if ((unsigned)b > 255) + { + if (b < 0) + b = 0; + else + b = 255; + } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } } #endif // set up the kernels static void stbi__setup_jpeg(stbi__jpeg *j) { - j->idct_block_kernel = stbi__idct_block; - j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; - j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; #ifdef STBI_SSE2 - if (stbi__sse2_available()) - { - j->idct_block_kernel = stbi__idct_simd; - j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; - j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; - } + if (stbi__sse2_available()) + { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } #endif #ifdef STBI_NEON - j->idct_block_kernel = stbi__idct_simd; - j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; - j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; #endif } // clean up the temporary component buffers static void stbi__cleanup_jpeg(stbi__jpeg *j) { - stbi__free_jpeg_components(j, j->s->img_n, 0); + stbi__free_jpeg_components(j, j->s->img_n, 0); } typedef struct { - resample_row_func resample; - stbi_uc *line0, *line1; - int hs, vs; // expansion factor in each axis - int w_lores; // horizontal pixels pre-expansion - int ystep; // how far through vertical expansion we are - int ypos; // which pre-expansion row we're on + resample_row_func resample; + stbi_uc *line0, *line1; + int hs, vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on } stbi__resample; // fast 0..255 * 0..255 => 0..255 rounded multiplication static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) { - unsigned int t = x * y + 128; - return (stbi_uc)((t + (t >> 8)) >> 8); + unsigned int t = x * y + 128; + return (stbi_uc)((t + (t >> 8)) >> 8); } static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) { - int n, decode_n, is_rgb; - z->s->img_n = 0; // make stbi__cleanup_jpeg safe + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe - // validate req_comp - if (req_comp < 0 || req_comp > 4) - return stbi__errpuc("bad req_comp", "Internal error"); + // validate req_comp + if (req_comp < 0 || req_comp > 4) + return stbi__errpuc("bad req_comp", "Internal error"); - // load a jpeg image from whichever source, but leave in YCbCr format - if (!stbi__decode_jpeg_image(z)) - { - stbi__cleanup_jpeg(z); - return NULL; - } + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) + { + stbi__cleanup_jpeg(z); + return NULL; + } - // determine actual number of components to generate - n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 - : 1; + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 + : 1; - is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); - if (z->s->img_n == 3 && n < 3 && !is_rgb) - decode_n = 1; - else - decode_n = z->s->img_n; + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; - // nothing to do if no components requested; check this now to avoid - // accessing uninitialized coutput[0] later - if (decode_n <= 0) - { - stbi__cleanup_jpeg(z); - return NULL; - } + // nothing to do if no components requested; check this now to avoid + // accessing uninitialized coutput[0] later + if (decode_n <= 0) + { + stbi__cleanup_jpeg(z); + return NULL; + } - // resample and color-convert - { - int k; - unsigned int i, j; - stbi_uc *output; - stbi_uc *coutput[4] = {NULL, NULL, NULL, NULL}; + // resample and color-convert + { + int k; + unsigned int i, j; + stbi_uc *output; + stbi_uc *coutput[4] = {NULL, NULL, NULL, NULL}; - stbi__resample res_comp[4]; + stbi__resample res_comp[4]; - for (k = 0; k < decode_n; ++k) - { - stbi__resample *r = &res_comp[k]; + for (k = 0; k < decode_n; ++k) + { + stbi__resample *r = &res_comp[k]; - // allocate line buffer big enough for upsampling off the edges - // with upsample factor of 4 - z->img_comp[k].linebuf = (stbi_uc *)stbi__malloc(z->s->img_x + 3); - if (!z->img_comp[k].linebuf) - { - stbi__cleanup_jpeg(z); - return stbi__errpuc("outofmem", "Out of memory"); - } + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *)stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) + { + stbi__cleanup_jpeg(z); + return stbi__errpuc("outofmem", "Out of memory"); + } - r->hs = z->img_h_max / z->img_comp[k].h; - r->vs = z->img_v_max / z->img_comp[k].v; - r->ystep = r->vs >> 1; - r->w_lores = (z->s->img_x + r->hs - 1) / r->hs; - r->ypos = 0; - r->line0 = r->line1 = z->img_comp[k].data; + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs - 1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; - if (r->hs == 1 && r->vs == 1) - r->resample = resample_row_1; - else if (r->hs == 1 && r->vs == 2) - r->resample = stbi__resample_row_v_2; - else if (r->hs == 2 && r->vs == 1) - r->resample = stbi__resample_row_h_2; - else if (r->hs == 2 && r->vs == 2) - r->resample = z->resample_row_hv_2_kernel; - else - r->resample = stbi__resample_row_generic; - } + if (r->hs == 1 && r->vs == 1) + r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) + r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) + r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) + r->resample = z->resample_row_hv_2_kernel; + else + r->resample = stbi__resample_row_generic; + } - // can't error after this so, this is safe - output = (stbi_uc *)stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); - if (!output) - { - stbi__cleanup_jpeg(z); - return stbi__errpuc("outofmem", "Out of memory"); - } + // can't error after this so, this is safe + output = (stbi_uc *)stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) + { + stbi__cleanup_jpeg(z); + return stbi__errpuc("outofmem", "Out of memory"); + } - // now go ahead and resample - for (j = 0; j < z->s->img_y; ++j) - { - stbi_uc *out = output + n * z->s->img_x * j; - for (k = 0; k < decode_n; ++k) - { - stbi__resample *r = &res_comp[k]; - int y_bot = r->ystep >= (r->vs >> 1); - coutput[k] = r->resample(z->img_comp[k].linebuf, - y_bot ? r->line1 : r->line0, - y_bot ? r->line0 : r->line1, - r->w_lores, r->hs); - if (++r->ystep >= r->vs) - { - r->ystep = 0; - r->line0 = r->line1; - if (++r->ypos < z->img_comp[k].y) - r->line1 += z->img_comp[k].w2; - } - } - if (n >= 3) - { - stbi_uc *y = coutput[0]; - if (z->s->img_n == 3) - { - if (is_rgb) - { - for (i = 0; i < z->s->img_x; ++i) - { - out[0] = y[i]; - out[1] = coutput[1][i]; - out[2] = coutput[2][i]; - out[3] = 255; - out += n; - } - } - else - { - z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); - } - } - else if (z->s->img_n == 4) - { - if (z->app14_color_transform == 0) - { // CMYK - for (i = 0; i < z->s->img_x; ++i) - { - stbi_uc m = coutput[3][i]; - out[0] = stbi__blinn_8x8(coutput[0][i], m); - out[1] = stbi__blinn_8x8(coutput[1][i], m); - out[2] = stbi__blinn_8x8(coutput[2][i], m); - out[3] = 255; - out += n; - } - } - else if (z->app14_color_transform == 2) - { // YCCK - z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); - for (i = 0; i < z->s->img_x; ++i) - { - stbi_uc m = coutput[3][i]; - out[0] = stbi__blinn_8x8(255 - out[0], m); - out[1] = stbi__blinn_8x8(255 - out[1], m); - out[2] = stbi__blinn_8x8(255 - out[2], m); - out += n; - } - } - else - { // YCbCr + alpha? Ignore the fourth channel for now - z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); - } - } - else - for (i = 0; i < z->s->img_x; ++i) - { - out[0] = out[1] = out[2] = y[i]; - out[3] = 255; // not used if n==3 - out += n; - } - } - else - { - if (is_rgb) - { - if (n == 1) - for (i = 0; i < z->s->img_x; ++i) - *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); - else - { - for (i = 0; i < z->s->img_x; ++i, out += 2) - { - out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); - out[1] = 255; - } - } - } - else if (z->s->img_n == 4 && z->app14_color_transform == 0) - { - for (i = 0; i < z->s->img_x; ++i) - { - stbi_uc m = coutput[3][i]; - stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); - stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); - stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); - out[0] = stbi__compute_y(r, g, b); - out[1] = 255; - out += n; - } - } - else if (z->s->img_n == 4 && z->app14_color_transform == 2) - { - for (i = 0; i < z->s->img_x; ++i) - { - out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); - out[1] = 255; - out += n; - } - } - else - { - stbi_uc *y = coutput[0]; - if (n == 1) - for (i = 0; i < z->s->img_x; ++i) - out[i] = y[i]; - else - for (i = 0; i < z->s->img_x; ++i) - { - *out++ = y[i]; - *out++ = 255; - } - } - } - } - stbi__cleanup_jpeg(z); - *out_x = z->s->img_x; - *out_y = z->s->img_y; - if (comp) - *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output - return output; - } + // now go ahead and resample + for (j = 0; j < z->s->img_y; ++j) + { + stbi_uc *out = output + n * z->s->img_x * j; + for (k = 0; k < decode_n; ++k) + { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) + { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) + { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) + { + if (is_rgb) + { + for (i = 0; i < z->s->img_x; ++i) + { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } + else + { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } + else if (z->s->img_n == 4) + { + if (z->app14_color_transform == 0) + { // CMYK + for (i = 0; i < z->s->img_x; ++i) + { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } + else if (z->app14_color_transform == 2) + { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i = 0; i < z->s->img_x; ++i) + { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } + else + { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } + else + for (i = 0; i < z->s->img_x; ++i) + { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } + else + { + if (is_rgb) + { + if (n == 1) + for (i = 0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else + { + for (i = 0; i < z->s->img_x; ++i, out += 2) + { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } + else if (z->s->img_n == 4 && z->app14_color_transform == 0) + { + for (i = 0; i < z->s->img_x; ++i) + { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } + else if (z->s->img_n == 4 && z->app14_color_transform == 2) + { + for (i = 0; i < z->s->img_x; ++i) + { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } + else + { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i = 0; i < z->s->img_x; ++i) + out[i] = y[i]; + else + for (i = 0; i < z->s->img_x; ++i) + { + *out++ = y[i]; + *out++ = 255; + } + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) + *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } } static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { - unsigned char *result; - stbi__jpeg *j = (stbi__jpeg *)stbi__malloc(sizeof(stbi__jpeg)); - if (!j) - return stbi__errpuc("outofmem", "Out of memory"); - memset(j, 0, sizeof(stbi__jpeg)); - STBI_NOTUSED(ri); - j->s = s; - stbi__setup_jpeg(j); - result = load_jpeg_image(j, x, y, comp, req_comp); - STBI_FREE(j); - return result; + unsigned char *result; + stbi__jpeg *j = (stbi__jpeg *)stbi__malloc(sizeof(stbi__jpeg)); + if (!j) + return stbi__errpuc("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x, y, comp, req_comp); + STBI_FREE(j); + return result; } static int stbi__jpeg_test(stbi__context *s) { - int r; - stbi__jpeg *j = (stbi__jpeg *)stbi__malloc(sizeof(stbi__jpeg)); - if (!j) - return stbi__err("outofmem", "Out of memory"); - memset(j, 0, sizeof(stbi__jpeg)); - j->s = s; - stbi__setup_jpeg(j); - r = stbi__decode_jpeg_header(j, STBI__SCAN_type); - stbi__rewind(s); - STBI_FREE(j); - return r; + int r; + stbi__jpeg *j = (stbi__jpeg *)stbi__malloc(sizeof(stbi__jpeg)); + if (!j) + return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; } static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) { - if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) - { - stbi__rewind(j->s); - return 0; - } - if (x) - *x = j->s->img_x; - if (y) - *y = j->s->img_y; - if (comp) - *comp = j->s->img_n >= 3 ? 3 : 1; - return 1; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) + { + stbi__rewind(j->s); + return 0; + } + if (x) + *x = j->s->img_x; + if (y) + *y = j->s->img_y; + if (comp) + *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; } static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) { - int result; - stbi__jpeg *j = (stbi__jpeg *)(stbi__malloc(sizeof(stbi__jpeg))); - if (!j) - return stbi__err("outofmem", "Out of memory"); - memset(j, 0, sizeof(stbi__jpeg)); - j->s = s; - result = stbi__jpeg_info_raw(j, x, y, comp); - STBI_FREE(j); - return result; + int result; + stbi__jpeg *j = (stbi__jpeg *)(stbi__malloc(sizeof(stbi__jpeg))); + if (!j) + return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; } #endif @@ -4673,82 +4671,82 @@ static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) // (jpegs packs from left, zlib from right, so can't share code) typedef struct { - stbi__uint16 fast[1 << STBI__ZFAST_BITS]; - stbi__uint16 firstcode[16]; - int maxcode[17]; - stbi__uint16 firstsymbol[16]; - stbi_uc size[STBI__ZNSYMS]; - stbi__uint16 value[STBI__ZNSYMS]; + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[STBI__ZNSYMS]; + stbi__uint16 value[STBI__ZNSYMS]; } stbi__zhuffman; stbi_inline static int stbi__bitreverse16(int n) { - n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); - n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); - n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); - n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); - return n; + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; } stbi_inline static int stbi__bit_reverse(int v, int bits) { - STBI_ASSERT(bits <= 16); - // to bit reverse n bits, reverse 16 and shift - // e.g. 11 bits, bit reverse and shift away 5 - return stbi__bitreverse16(v) >> (16 - bits); + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16 - bits); } static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) { - int i, k = 0; - int code, next_code[16], sizes[17]; + int i, k = 0; + int code, next_code[16], sizes[17]; - // DEFLATE spec for generating codes - memset(sizes, 0, sizeof(sizes)); - memset(z->fast, 0, sizeof(z->fast)); - for (i = 0; i < num; ++i) - ++sizes[sizelist[i]]; - sizes[0] = 0; - for (i = 1; i < 16; ++i) - if (sizes[i] > (1 << i)) - return stbi__err("bad sizes", "Corrupt PNG"); - code = 0; - for (i = 1; i < 16; ++i) - { - next_code[i] = code; - z->firstcode[i] = (stbi__uint16)code; - z->firstsymbol[i] = (stbi__uint16)k; - code = (code + sizes[i]); - if (sizes[i]) - if (code - 1 >= (1 << i)) - return stbi__err("bad codelengths", "Corrupt PNG"); - z->maxcode[i] = code << (16 - i); // preshift for inner loop - code <<= 1; - k += sizes[i]; - } - z->maxcode[16] = 0x10000; // sentinel - for (i = 0; i < num; ++i) - { - int s = sizelist[i]; - if (s) - { - int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; - stbi__uint16 fastv = (stbi__uint16)((s << 9) | i); - z->size[c] = (stbi_uc)s; - z->value[c] = (stbi__uint16)i; - if (s <= STBI__ZFAST_BITS) - { - int j = stbi__bit_reverse(next_code[s], s); - while (j < (1 << STBI__ZFAST_BITS)) - { - z->fast[j] = fastv; - j += (1 << s); - } - } - ++next_code[s]; - } - } - return 1; + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i = 0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i = 1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i = 1; i < 16; ++i) + { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16)code; + z->firstsymbol[i] = (stbi__uint16)k; + code = (code + sizes[i]); + if (sizes[i]) + if (code - 1 >= (1 << i)) + return stbi__err("bad codelengths", "Corrupt PNG"); + z->maxcode[i] = code << (16 - i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i = 0; i < num; ++i) + { + int s = sizelist[i]; + if (s) + { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16)((s << 9) | i); + z->size[c] = (stbi_uc)s; + z->value[c] = (stbi__uint16)i; + if (s <= STBI__ZFAST_BITS) + { + int j = stbi__bit_reverse(next_code[s], s); + while (j < (1 << STBI__ZFAST_BITS)) + { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; } // zlib-from-memory implementation for PNG reading @@ -4759,342 +4757,367 @@ static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int typedef struct { - stbi_uc *zbuffer, *zbuffer_end; - int num_bits; - stbi__uint32 code_buffer; + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + int hit_zeof_once; + stbi__uint32 code_buffer; - char *zout; - char *zout_start; - char *zout_end; - int z_expandable; + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; - stbi__zhuffman z_length, z_distance; + stbi__zhuffman z_length, z_distance; } stbi__zbuf; stbi_inline static int stbi__zeof(stbi__zbuf *z) { - return (z->zbuffer >= z->zbuffer_end); + return (z->zbuffer >= z->zbuffer_end); } stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) { - return stbi__zeof(z) ? 0 : *z->zbuffer++; + return stbi__zeof(z) ? 0 : *z->zbuffer++; } static void stbi__fill_bits(stbi__zbuf *z) { - do - { - if (z->code_buffer >= (1U << z->num_bits)) - { - z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ - return; - } - z->code_buffer |= (unsigned int)stbi__zget8(z) << z->num_bits; - z->num_bits += 8; - } while (z->num_bits <= 24); + do + { + if (z->code_buffer >= (1U << z->num_bits)) + { + z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ + return; + } + z->code_buffer |= (unsigned int)stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); } stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) { - unsigned int k; - if (z->num_bits < n) - stbi__fill_bits(z); - k = z->code_buffer & ((1 << n) - 1); - z->code_buffer >>= n; - z->num_bits -= n; - return k; + unsigned int k; + if (z->num_bits < n) + stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; } static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) { - int b, s, k; - // not resolved by fast table, so compute it the slow way - // use jpeg approach, which requires MSbits at top - k = stbi__bit_reverse(a->code_buffer, 16); - for (s = STBI__ZFAST_BITS + 1;; ++s) - if (k < z->maxcode[s]) - break; - if (s >= 16) - return -1; // invalid code! - // code size is s, so: - b = (k >> (16 - s)) - z->firstcode[s] + z->firstsymbol[s]; - if (b >= STBI__ZNSYMS) - return -1; // some data was corrupt somewhere! - if (z->size[b] != s) - return -1; // was originally an assert, but report failure instead. - a->code_buffer >>= s; - a->num_bits -= s; - return z->value[b]; + int b, s, k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s = STBI__ZFAST_BITS + 1;; ++s) + if (k < z->maxcode[s]) + break; + if (s >= 16) + return -1; // invalid code! + // code size is s, so: + b = (k >> (16 - s)) - z->firstcode[s] + z->firstsymbol[s]; + if (b >= STBI__ZNSYMS) + return -1; // some data was corrupt somewhere! + if (z->size[b] != s) + return -1; // was originally an assert, but report failure instead. + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; } stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) { - int b, s; - if (a->num_bits < 16) - { - if (stbi__zeof(a)) - { - return -1; /* report error for unexpected end of data. */ - } - stbi__fill_bits(a); - } - b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; - if (b) - { - s = b >> 9; - a->code_buffer >>= s; - a->num_bits -= s; - return b & 511; - } - return stbi__zhuffman_decode_slowpath(a, z); + int b, s; + if (a->num_bits < 16) + { + if (stbi__zeof(a)) + { + if (!a->hit_zeof_once) + { + // This is the first time we hit eof, insert 16 extra padding btis + // to allow us to keep going; if we actually consume any of them + // though, that is invalid data. This is caught later. + a->hit_zeof_once = 1; + a->num_bits += 16; // add 16 implicit zero bits + } + else + { + // We already inserted our extra 16 padding bits and are again + // out, this stream is actually prematurely terminated. + return -1; + } + } + else + { + stbi__fill_bits(a); + } + } + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) + { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); } static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes { - char *q; - unsigned int cur, limit, old_limit; - z->zout = zout; - if (!z->z_expandable) - return stbi__err("output buffer limit", "Corrupt PNG"); - cur = (unsigned int)(z->zout - z->zout_start); - limit = old_limit = (unsigned)(z->zout_end - z->zout_start); - if (UINT_MAX - cur < (unsigned)n) - return stbi__err("outofmem", "Out of memory"); - while (cur + n > limit) - { - if (limit > UINT_MAX / 2) - return stbi__err("outofmem", "Out of memory"); - limit *= 2; - } - q = (char *)STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); - STBI_NOTUSED(old_limit); - if (q == NULL) - return stbi__err("outofmem", "Out of memory"); - z->zout_start = q; - z->zout = q + cur; - z->zout_end = q + limit; - return 1; + char *q; + unsigned int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) + return stbi__err("output buffer limit", "Corrupt PNG"); + cur = (unsigned int)(z->zout - z->zout_start); + limit = old_limit = (unsigned)(z->zout_end - z->zout_start); + if (UINT_MAX - cur < (unsigned)n) + return stbi__err("outofmem", "Out of memory"); + while (cur + n > limit) + { + if (limit > UINT_MAX / 2) + return stbi__err("outofmem", "Out of memory"); + limit *= 2; + } + q = (char *)STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) + return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; } static const int stbi__zlength_base[31] = { - 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, - 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, - 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, + 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, + 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int stbi__zlength_extra[31] = - {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; + {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int stbi__zdist_base[32] = {1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, - 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int stbi__zdist_extra[32] = - {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; + {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static int stbi__parse_huffman_block(stbi__zbuf *a) { - char *zout = a->zout; - for (;;) - { - int z = stbi__zhuffman_decode(a, &a->z_length); - if (z < 256) - { - if (z < 0) - return stbi__err("bad huffman code", "Corrupt PNG"); // error in huffman codes - if (zout >= a->zout_end) - { - if (!stbi__zexpand(a, zout, 1)) - return 0; - zout = a->zout; - } - *zout++ = (char)z; - } - else - { - stbi_uc *p; - int len, dist; - if (z == 256) - { - a->zout = zout; - return 1; - } - if (z >= 286) - return stbi__err("bad huffman code", "Corrupt PNG"); // per DEFLATE, length codes 286 and 287 must not appear in compressed data - z -= 257; - len = stbi__zlength_base[z]; - if (stbi__zlength_extra[z]) - len += stbi__zreceive(a, stbi__zlength_extra[z]); - z = stbi__zhuffman_decode(a, &a->z_distance); - if (z < 0 || z >= 30) - return stbi__err("bad huffman code", "Corrupt PNG"); // per DEFLATE, distance codes 30 and 31 must not appear in compressed data - dist = stbi__zdist_base[z]; - if (stbi__zdist_extra[z]) - dist += stbi__zreceive(a, stbi__zdist_extra[z]); - if (zout - a->zout_start < dist) - return stbi__err("bad dist", "Corrupt PNG"); - if (zout + len > a->zout_end) - { - if (!stbi__zexpand(a, zout, len)) - return 0; - zout = a->zout; - } - p = (stbi_uc *)(zout - dist); - if (dist == 1) - { // run of one byte; common in images. - stbi_uc v = *p; - if (len) - { - do - *zout++ = v; - while (--len); - } - } - else - { - if (len) - { - do - *zout++ = *p++; - while (--len); - } - } - } - } + char *zout = a->zout; + for (;;) + { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) + { + if (z < 0) + return stbi__err("bad huffman code", "Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) + { + if (!stbi__zexpand(a, zout, 1)) + return 0; + zout = a->zout; + } + *zout++ = (char)z; + } + else + { + stbi_uc *p; + int len, dist; + if (z == 256) + { + a->zout = zout; + if (a->hit_zeof_once && a->num_bits < 16) + { + // The first time we hit zeof, we inserted 16 extra zero bits into our bit + // buffer so the decoder can just do its speculative decoding. But if we + // actually consumed any of those bits (which is the case when num_bits < 16), + // the stream actually read past the end so it is malformed. + return stbi__err("unexpected end", "Corrupt PNG"); + } + return 1; + } + if (z >= 286) + return stbi__err("bad huffman code", "Corrupt PNG"); // per DEFLATE, length codes 286 and 287 must not appear in compressed data + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) + len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0 || z >= 30) + return stbi__err("bad huffman code", "Corrupt PNG"); // per DEFLATE, distance codes 30 and 31 must not appear in compressed data + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) + dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) + return stbi__err("bad dist", "Corrupt PNG"); + if (len > a->zout_end - zout) + { + if (!stbi__zexpand(a, zout, len)) + return 0; + zout = a->zout; + } + p = (stbi_uc *)(zout - dist); + if (dist == 1) + { // run of one byte; common in images. + stbi_uc v = *p; + if (len) + { + do + *zout++ = v; + while (--len); + } + } + else + { + if (len) + { + do + *zout++ = *p++; + while (--len); + } + } + } + } } static int stbi__compute_huffman_codes(stbi__zbuf *a) { - static const stbi_uc length_dezigzag[19] = {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; - stbi__zhuffman z_codelength; - stbi_uc lencodes[286 + 32 + 137]; // padding for maximum single op - stbi_uc codelength_sizes[19]; - int i, n; + static const stbi_uc length_dezigzag[19] = {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286 + 32 + 137]; // padding for maximum single op + stbi_uc codelength_sizes[19]; + int i, n; - int hlit = stbi__zreceive(a, 5) + 257; - int hdist = stbi__zreceive(a, 5) + 1; - int hclen = stbi__zreceive(a, 4) + 4; - int ntot = hlit + hdist; + int hlit = stbi__zreceive(a, 5) + 257; + int hdist = stbi__zreceive(a, 5) + 1; + int hclen = stbi__zreceive(a, 4) + 4; + int ntot = hlit + hdist; - memset(codelength_sizes, 0, sizeof(codelength_sizes)); - for (i = 0; i < hclen; ++i) - { - int s = stbi__zreceive(a, 3); - codelength_sizes[length_dezigzag[i]] = (stbi_uc)s; - } - if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) - return 0; + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i = 0; i < hclen; ++i) + { + int s = stbi__zreceive(a, 3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc)s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) + return 0; - n = 0; - while (n < ntot) - { - int c = stbi__zhuffman_decode(a, &z_codelength); - if (c < 0 || c >= 19) - return stbi__err("bad codelengths", "Corrupt PNG"); - if (c < 16) - lencodes[n++] = (stbi_uc)c; - else - { - stbi_uc fill = 0; - if (c == 16) - { - c = stbi__zreceive(a, 2) + 3; - if (n == 0) - return stbi__err("bad codelengths", "Corrupt PNG"); - fill = lencodes[n - 1]; - } - else if (c == 17) - { - c = stbi__zreceive(a, 3) + 3; - } - else if (c == 18) - { - c = stbi__zreceive(a, 7) + 11; - } - else - { - return stbi__err("bad codelengths", "Corrupt PNG"); - } - if (ntot - n < c) - return stbi__err("bad codelengths", "Corrupt PNG"); - memset(lencodes + n, fill, c); - n += c; - } - } - if (n != ntot) - return stbi__err("bad codelengths", "Corrupt PNG"); - if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) - return 0; - if (!stbi__zbuild_huffman(&a->z_distance, lencodes + hlit, hdist)) - return 0; - return 1; + n = 0; + while (n < ntot) + { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) + return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc)c; + else + { + stbi_uc fill = 0; + if (c == 16) + { + c = stbi__zreceive(a, 2) + 3; + if (n == 0) + return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n - 1]; + } + else if (c == 17) + { + c = stbi__zreceive(a, 3) + 3; + } + else if (c == 18) + { + c = stbi__zreceive(a, 7) + 11; + } + else + { + return stbi__err("bad codelengths", "Corrupt PNG"); + } + if (ntot - n < c) + return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes + n, fill, c); + n += c; + } + } + if (n != ntot) + return stbi__err("bad codelengths", "Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) + return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes + hlit, hdist)) + return 0; + return 1; } static int stbi__parse_uncompressed_block(stbi__zbuf *a) { - stbi_uc header[4]; - int len, nlen, k; - if (a->num_bits & 7) - stbi__zreceive(a, a->num_bits & 7); // discard - // drain the bit-packed data into header - k = 0; - while (a->num_bits > 0) - { - header[k++] = (stbi_uc)(a->code_buffer & 255); // suppress MSVC run-time check - a->code_buffer >>= 8; - a->num_bits -= 8; - } - if (a->num_bits < 0) - return stbi__err("zlib corrupt", "Corrupt PNG"); - // now fill header the normal way - while (k < 4) - header[k++] = stbi__zget8(a); - len = header[1] * 256 + header[0]; - nlen = header[3] * 256 + header[2]; - if (nlen != (len ^ 0xffff)) - return stbi__err("zlib corrupt", "Corrupt PNG"); - if (a->zbuffer + len > a->zbuffer_end) - return stbi__err("read past buffer", "Corrupt PNG"); - if (a->zout + len > a->zout_end) - if (!stbi__zexpand(a, a->zout, len)) - return 0; - memcpy(a->zout, a->zbuffer, len); - a->zbuffer += len; - a->zout += len; - return 1; + stbi_uc header[4]; + int len, nlen, k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) + { + header[k++] = (stbi_uc)(a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + if (a->num_bits < 0) + return stbi__err("zlib corrupt", "Corrupt PNG"); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) + return stbi__err("zlib corrupt", "Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) + return stbi__err("read past buffer", "Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) + return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; } static int stbi__parse_zlib_header(stbi__zbuf *a) { - int cmf = stbi__zget8(a); - int cm = cmf & 15; - /* int cinfo = cmf >> 4; */ - int flg = stbi__zget8(a); - if (stbi__zeof(a)) - return stbi__err("bad zlib header", "Corrupt PNG"); // zlib spec - if ((cmf * 256 + flg) % 31 != 0) - return stbi__err("bad zlib header", "Corrupt PNG"); // zlib spec - if (flg & 32) - return stbi__err("no preset dict", "Corrupt PNG"); // preset dictionary not allowed in png - if (cm != 8) - return stbi__err("bad compression", "Corrupt PNG"); // DEFLATE required for png - // window = 1 << (8 + cinfo)... but who cares, we fully buffer output - return 1; + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if (stbi__zeof(a)) + return stbi__err("bad zlib header", "Corrupt PNG"); // zlib spec + if ((cmf * 256 + flg) % 31 != 0) + return stbi__err("bad zlib header", "Corrupt PNG"); // zlib spec + if (flg & 32) + return stbi__err("no preset dict", "Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) + return stbi__err("bad compression", "Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; } static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = - { - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8}; + { + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8}; static const stbi_uc stbi__zdefault_distance[32] = - { - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}; + { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}; /* Init algorithm: { @@ -5110,145 +5133,146 @@ Init algorithm: static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) { - int final, type; - if (parse_header) - if (!stbi__parse_zlib_header(a)) - return 0; - a->num_bits = 0; - a->code_buffer = 0; - do - { - final = stbi__zreceive(a, 1); - type = stbi__zreceive(a, 2); - if (type == 0) - { - if (!stbi__parse_uncompressed_block(a)) - return 0; - } - else if (type == 3) - { - return 0; - } - else - { - if (type == 1) - { - // use fixed code lengths - if (!stbi__zbuild_huffman(&a->z_length, stbi__zdefault_length, STBI__ZNSYMS)) - return 0; - if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) - return 0; - } - else - { - if (!stbi__compute_huffman_codes(a)) - return 0; - } - if (!stbi__parse_huffman_block(a)) - return 0; - } - } while (!final); - return 1; + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) + return 0; + a->num_bits = 0; + a->code_buffer = 0; + a->hit_zeof_once = 0; + do + { + final = stbi__zreceive(a, 1); + type = stbi__zreceive(a, 2); + if (type == 0) + { + if (!stbi__parse_uncompressed_block(a)) + return 0; + } + else if (type == 3) + { + return 0; + } + else + { + if (type == 1) + { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length, stbi__zdefault_length, STBI__ZNSYMS)) + return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) + return 0; + } + else + { + if (!stbi__compute_huffman_codes(a)) + return 0; + } + if (!stbi__parse_huffman_block(a)) + return 0; + } + } while (!final); + return 1; } static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) { - a->zout_start = obuf; - a->zout = obuf; - a->zout_end = obuf + olen; - a->z_expandable = exp; + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; - return stbi__parse_zlib(a, parse_header); + return stbi__parse_zlib(a, parse_header); } STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) { - stbi__zbuf a; - char *p = (char *)stbi__malloc(initial_size); - if (p == NULL) - return NULL; - a.zbuffer = (stbi_uc *)buffer; - a.zbuffer_end = (stbi_uc *)buffer + len; - if (stbi__do_zlib(&a, p, initial_size, 1, 1)) - { - if (outlen) - *outlen = (int)(a.zout - a.zout_start); - return a.zout_start; - } - else - { - STBI_FREE(a.zout_start); - return NULL; - } + stbi__zbuf a; + char *p = (char *)stbi__malloc(initial_size); + if (p == NULL) + return NULL; + a.zbuffer = (stbi_uc *)buffer; + a.zbuffer_end = (stbi_uc *)buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) + { + if (outlen) + *outlen = (int)(a.zout - a.zout_start); + return a.zout_start; + } + else + { + STBI_FREE(a.zout_start); + return NULL; + } } STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) { - return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); } STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) { - stbi__zbuf a; - char *p = (char *)stbi__malloc(initial_size); - if (p == NULL) - return NULL; - a.zbuffer = (stbi_uc *)buffer; - a.zbuffer_end = (stbi_uc *)buffer + len; - if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) - { - if (outlen) - *outlen = (int)(a.zout - a.zout_start); - return a.zout_start; - } - else - { - STBI_FREE(a.zout_start); - return NULL; - } + stbi__zbuf a; + char *p = (char *)stbi__malloc(initial_size); + if (p == NULL) + return NULL; + a.zbuffer = (stbi_uc *)buffer; + a.zbuffer_end = (stbi_uc *)buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) + { + if (outlen) + *outlen = (int)(a.zout - a.zout_start); + return a.zout_start; + } + else + { + STBI_FREE(a.zout_start); + return NULL; + } } STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) { - stbi__zbuf a; - a.zbuffer = (stbi_uc *)ibuffer; - a.zbuffer_end = (stbi_uc *)ibuffer + ilen; - if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) - return (int)(a.zout - a.zout_start); - else - return -1; + stbi__zbuf a; + a.zbuffer = (stbi_uc *)ibuffer; + a.zbuffer_end = (stbi_uc *)ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int)(a.zout - a.zout_start); + else + return -1; } STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) { - stbi__zbuf a; - char *p = (char *)stbi__malloc(16384); - if (p == NULL) - return NULL; - a.zbuffer = (stbi_uc *)buffer; - a.zbuffer_end = (stbi_uc *)buffer + len; - if (stbi__do_zlib(&a, p, 16384, 1, 0)) - { - if (outlen) - *outlen = (int)(a.zout - a.zout_start); - return a.zout_start; - } - else - { - STBI_FREE(a.zout_start); - return NULL; - } + stbi__zbuf a; + char *p = (char *)stbi__malloc(16384); + if (p == NULL) + return NULL; + a.zbuffer = (stbi_uc *)buffer; + a.zbuffer_end = (stbi_uc *)buffer + len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) + { + if (outlen) + *outlen = (int)(a.zout - a.zout_start); + return a.zout_start; + } + else + { + STBI_FREE(a.zout_start); + return NULL; + } } STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) { - stbi__zbuf a; - a.zbuffer = (stbi_uc *)ibuffer; - a.zbuffer_end = (stbi_uc *)ibuffer + ilen; - if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) - return (int)(a.zout - a.zout_start); - else - return -1; + stbi__zbuf a; + a.zbuffer = (stbi_uc *)ibuffer; + a.zbuffer_end = (stbi_uc *)ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int)(a.zout - a.zout_start); + else + return -1; } #endif @@ -5265,516 +5289,451 @@ STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char #ifndef STBI_NO_PNG typedef struct { - stbi__uint32 length; - stbi__uint32 type; + stbi__uint32 length; + stbi__uint32 type; } stbi__pngchunk; static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) { - stbi__pngchunk c; - c.length = stbi__get32be(s); - c.type = stbi__get32be(s); - return c; + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; } static int stbi__check_png_header(stbi__context *s) { - static const stbi_uc png_sig[8] = {137, 80, 78, 71, 13, 10, 26, 10}; - int i; - for (i = 0; i < 8; ++i) - if (stbi__get8(s) != png_sig[i]) - return stbi__err("bad png sig", "Not a PNG"); - return 1; + static const stbi_uc png_sig[8] = {137, 80, 78, 71, 13, 10, 26, 10}; + int i; + for (i = 0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) + return stbi__err("bad png sig", "Not a PNG"); + return 1; } typedef struct { - stbi__context *s; - stbi_uc *idata, *expanded, *out; - int depth; + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; } stbi__png; enum { - STBI__F_none = 0, - STBI__F_sub = 1, - STBI__F_up = 2, - STBI__F_avg = 3, - STBI__F_paeth = 4, - // synthetic filters used for first scanline to avoid needing a dummy row of 0s - STBI__F_avg_first, - STBI__F_paeth_first + STBI__F_none = 0, + STBI__F_sub = 1, + STBI__F_up = 2, + STBI__F_avg = 3, + STBI__F_paeth = 4, + // synthetic filter used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first }; static stbi_uc first_row_filter[5] = - { - STBI__F_none, - STBI__F_sub, - STBI__F_none, - STBI__F_avg_first, - STBI__F_paeth_first}; + { + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_sub // Paeth with b=c=0 turns out to be equivalent to sub +}; static int stbi__paeth(int a, int b, int c) { - int p = a + b - c; - int pa = abs(p - a); - int pb = abs(p - b); - int pc = abs(p - c); - if (pa <= pb && pa <= pc) - return a; - if (pb <= pc) - return b; - return c; + // This formulation looks very different from the reference in the PNG spec, but is + // actually equivalent and has favorable data dependencies and admits straightforward + // generation of branch-free code, which helps performance significantly. + int thresh = c * 3 - (a + b); + int lo = a < b ? a : b; + int hi = a < b ? b : a; + int t0 = (hi <= thresh) ? lo : c; + int t1 = (thresh <= lo) ? hi : t0; + return t1; } static const stbi_uc stbi__depth_scale_table[9] = {0, 0xff, 0x55, 0, 0x11, 0, 0, 0, 0x01}; +// adds an extra all-255 alpha channel +// dest == src is legal +// img_n must be 1 or 3 +static void stbi__create_png_alpha_expand8(stbi_uc *dest, stbi_uc *src, stbi__uint32 x, int img_n) +{ + int i; + // must process data backwards since we allow dest==src + if (img_n == 1) + { + for (i = x - 1; i >= 0; --i) + { + dest[i * 2 + 1] = 255; + dest[i * 2 + 0] = src[i]; + } + } + else + { + STBI_ASSERT(img_n == 3); + for (i = x - 1; i >= 0; --i) + { + dest[i * 4 + 3] = 255; + dest[i * 4 + 2] = src[i * 3 + 2]; + dest[i * 4 + 1] = src[i * 3 + 1]; + dest[i * 4 + 0] = src[i * 3 + 0]; + } + } +} + // create the png data from post-deflated data static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) { - int bytes = (depth == 16 ? 2 : 1); - stbi__context *s = a->s; - stbi__uint32 i, j, stride = x * out_n * bytes; - stbi__uint32 img_len, img_width_bytes; - int k; - int img_n = s->img_n; // copy it into a local for later + int bytes = (depth == 16 ? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i, j, stride = x * out_n * bytes; + stbi__uint32 img_len, img_width_bytes; + stbi_uc *filter_buf; + int all_ok = 1; + int k; + int img_n = s->img_n; // copy it into a local for later - int output_bytes = out_n * bytes; - int filter_bytes = img_n * bytes; - int width = x; + int output_bytes = out_n * bytes; + int filter_bytes = img_n * bytes; + int width = x; - STBI_ASSERT(out_n == s->img_n || out_n == s->img_n + 1); - a->out = (stbi_uc *)stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into - if (!a->out) - return stbi__err("outofmem", "Out of memory"); + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n + 1); + a->out = (stbi_uc *)stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) + return stbi__err("outofmem", "Out of memory"); - if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) - return stbi__err("too large", "Corrupt PNG"); - img_width_bytes = (((img_n * x * depth) + 7) >> 3); - img_len = (img_width_bytes + 1) * y; + // note: error exits here don't need to clean up a->out individually, + // stbi__do_png always does on error. + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) + return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + if (!stbi__mad2sizes_valid(img_width_bytes, y, img_width_bytes)) + return stbi__err("too large", "Corrupt PNG"); + img_len = (img_width_bytes + 1) * y; - // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, - // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), - // so just check for raw_len < img_len always. - if (raw_len < img_len) - return stbi__err("not enough pixels", "Corrupt PNG"); + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) + return stbi__err("not enough pixels", "Corrupt PNG"); - for (j = 0; j < y; ++j) - { - stbi_uc *cur = a->out + stride * j; - stbi_uc *prior; - int filter = *raw++; + // Allocate two scan lines worth of filter workspace buffer. + filter_buf = (stbi_uc *)stbi__malloc_mad2(img_width_bytes, 2, 0); + if (!filter_buf) + return stbi__err("outofmem", "Out of memory"); - if (filter > 4) - return stbi__err("invalid filter", "Corrupt PNG"); + // Filtering for low-bit-depth images + if (depth < 8) + { + filter_bytes = 1; + width = img_width_bytes; + } - if (depth < 8) - { - if (img_width_bytes > x) - return stbi__err("invalid width", "Corrupt PNG"); - cur += x * out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place - filter_bytes = 1; - width = img_width_bytes; - } - prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above + for (j = 0; j < y; ++j) + { + // cur/prior filter buffers alternate + stbi_uc *cur = filter_buf + (j & 1) * img_width_bytes; + stbi_uc *prior = filter_buf + (~j & 1) * img_width_bytes; + stbi_uc *dest = a->out + stride * j; + int nk = width * filter_bytes; + int filter = *raw++; - // if first row, use special filter that doesn't sample previous row - if (j == 0) - filter = first_row_filter[filter]; + // check filter type + if (filter > 4) + { + all_ok = stbi__err("invalid filter", "Corrupt PNG"); + break; + } - // handle first byte explicitly - for (k = 0; k < filter_bytes; ++k) - { - switch (filter) - { - case STBI__F_none: - cur[k] = raw[k]; - break; - case STBI__F_sub: - cur[k] = raw[k]; - break; - case STBI__F_up: - cur[k] = STBI__BYTECAST(raw[k] + prior[k]); - break; - case STBI__F_avg: - cur[k] = STBI__BYTECAST(raw[k] + (prior[k] >> 1)); - break; - case STBI__F_paeth: - cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0, prior[k], 0)); - break; - case STBI__F_avg_first: - cur[k] = raw[k]; - break; - case STBI__F_paeth_first: - cur[k] = raw[k]; - break; - } - } + // if first row, use special filter that doesn't sample previous row + if (j == 0) + filter = first_row_filter[filter]; - if (depth == 8) - { - if (img_n != out_n) - cur[img_n] = 255; // first pixel - raw += img_n; - cur += out_n; - prior += out_n; - } - else if (depth == 16) - { - if (img_n != out_n) - { - cur[filter_bytes] = 255; // first pixel top byte - cur[filter_bytes + 1] = 255; // first pixel bottom byte - } - raw += filter_bytes; - cur += output_bytes; - prior += output_bytes; - } - else - { - raw += 1; - cur += 1; - prior += 1; - } + // perform actual filtering + switch (filter) + { + case STBI__F_none: + memcpy(cur, raw, nk); + break; + case STBI__F_sub: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + cur[k - filter_bytes]); + break; + case STBI__F_up: + for (k = 0; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); + break; + case STBI__F_avg: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (prior[k] >> 1)); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k - filter_bytes]) >> 1)); + break; + case STBI__F_paeth: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); // prior[k] == stbi__paeth(0,prior[k],0) + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - filter_bytes], prior[k], prior[k - filter_bytes])); + break; + case STBI__F_avg_first: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (cur[k - filter_bytes] >> 1)); + break; + } - // this is a little gross, so that we don't switch per-pixel or per-component - if (depth < 8 || img_n == out_n) - { - int nk = (width - 1) * filter_bytes; -#define STBI__CASE(f) \ - case f: \ - for (k = 0; k < nk; ++k) - switch (filter) - { - // "none" filter turns into a memcpy here; make that explicit. - case STBI__F_none: - memcpy(cur, raw, nk); - break; - STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k - filter_bytes]); } - break; - STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } - break; - STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k - filter_bytes]) >> 1)); } - break; - STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - filter_bytes], prior[k], prior[k - filter_bytes])); } - break; - STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k - filter_bytes] >> 1)); } - break; - STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - filter_bytes], 0, 0)); } - break; - } -#undef STBI__CASE - raw += nk; - } - else - { - STBI_ASSERT(img_n + 1 == out_n); -#define STBI__CASE(f) \ - case f: \ - for (i = x - 1; i >= 1; --i, cur[filter_bytes] = 255, raw += filter_bytes, cur += output_bytes, prior += output_bytes) \ - for (k = 0; k < filter_bytes; ++k) - switch (filter) - { - STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } - break; - STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k - output_bytes]); } - break; - STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } - break; - STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k - output_bytes]) >> 1)); } - break; - STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - output_bytes], prior[k], prior[k - output_bytes])); } - break; - STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k - output_bytes] >> 1)); } - break; - STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - output_bytes], 0, 0)); } - break; - } -#undef STBI__CASE + raw += nk; - // the loop above sets the high byte of the pixels' alpha, but for - // 16 bit png files we also need the low byte set. we'll do that here. - if (depth == 16) - { - cur = a->out + stride * j; // start at the beginning of the row again - for (i = 0; i < x; ++i, cur += output_bytes) - { - cur[filter_bytes + 1] = 255; - } - } - } - } + // expand decoded bits in cur to dest, also adding an extra alpha channel if desired + if (depth < 8) + { + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + stbi_uc *in = cur; + stbi_uc *out = dest; + stbi_uc inb = 0; + stbi__uint32 nsmp = x * img_n; - // we make a separate pass to expand bits to pixels; for performance, - // this could run two scanlines behind the above code, so it won't - // intefere with filtering but will still be in the cache. - if (depth < 8) - { - for (j = 0; j < y; ++j) - { - stbi_uc *cur = a->out + stride * j; - stbi_uc *in_ = a->out + stride * j + x * out_n - img_width_bytes; - // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit - // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop - stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + // expand bits to bytes first + if (depth == 4) + { + for (i = 0; i < nsmp; ++i) + { + if ((i & 1) == 0) + inb = *in++; + *out++ = scale * (inb >> 4); + inb <<= 4; + } + } + else if (depth == 2) + { + for (i = 0; i < nsmp; ++i) + { + if ((i & 3) == 0) + inb = *in++; + *out++ = scale * (inb >> 6); + inb <<= 2; + } + } + else + { + STBI_ASSERT(depth == 1); + for (i = 0; i < nsmp; ++i) + { + if ((i & 7) == 0) + inb = *in++; + *out++ = scale * (inb >> 7); + inb <<= 1; + } + } - // note that the final byte might overshoot and write more data than desired. - // we can allocate enough data that this never writes out of memory, but it - // could also overwrite the next scanline. can it overwrite non-empty data - // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. - // so we need to explicitly clamp the final ones + // insert alpha=255 values if desired + if (img_n != out_n) + stbi__create_png_alpha_expand8(dest, dest, x, img_n); + } + else if (depth == 8) + { + if (img_n == out_n) + memcpy(dest, cur, x * img_n); + else + stbi__create_png_alpha_expand8(dest, cur, x, img_n); + } + else if (depth == 16) + { + // convert the image data from big-endian to platform-native + stbi__uint16 *dest16 = (stbi__uint16 *)dest; + stbi__uint32 nsmp = x * img_n; - if (depth == 4) - { - for (k = x * img_n; k >= 2; k -= 2, ++in_) - { - *cur++ = scale * ((*in_ >> 4)); - *cur++ = scale * ((*in_) & 0x0f); - } - if (k > 0) - *cur++ = scale * ((*in_ >> 4)); - } - else if (depth == 2) - { - for (k = x * img_n; k >= 4; k -= 4, ++in_) - { - *cur++ = scale * ((*in_ >> 6)); - *cur++ = scale * ((*in_ >> 4) & 0x03); - *cur++ = scale * ((*in_ >> 2) & 0x03); - *cur++ = scale * ((*in_) & 0x03); - } - if (k > 0) - *cur++ = scale * ((*in_ >> 6)); - if (k > 1) - *cur++ = scale * ((*in_ >> 4) & 0x03); - if (k > 2) - *cur++ = scale * ((*in_ >> 2) & 0x03); - } - else if (depth == 1) - { - for (k = x * img_n; k >= 8; k -= 8, ++in_) - { - *cur++ = scale * ((*in_ >> 7)); - *cur++ = scale * ((*in_ >> 6) & 0x01); - *cur++ = scale * ((*in_ >> 5) & 0x01); - *cur++ = scale * ((*in_ >> 4) & 0x01); - *cur++ = scale * ((*in_ >> 3) & 0x01); - *cur++ = scale * ((*in_ >> 2) & 0x01); - *cur++ = scale * ((*in_ >> 1) & 0x01); - *cur++ = scale * ((*in_) & 0x01); - } - if (k > 0) - *cur++ = scale * ((*in_ >> 7)); - if (k > 1) - *cur++ = scale * ((*in_ >> 6) & 0x01); - if (k > 2) - *cur++ = scale * ((*in_ >> 5) & 0x01); - if (k > 3) - *cur++ = scale * ((*in_ >> 4) & 0x01); - if (k > 4) - *cur++ = scale * ((*in_ >> 3) & 0x01); - if (k > 5) - *cur++ = scale * ((*in_ >> 2) & 0x01); - if (k > 6) - *cur++ = scale * ((*in_ >> 1) & 0x01); - } - if (img_n != out_n) - { - int q; - // insert alpha = 255 - cur = a->out + stride * j; - if (img_n == 1) - { - for (q = x - 1; q >= 0; --q) - { - cur[q * 2 + 1] = 255; - cur[q * 2 + 0] = cur[q]; - } - } - else - { - STBI_ASSERT(img_n == 3); - for (q = x - 1; q >= 0; --q) - { - cur[q * 4 + 3] = 255; - cur[q * 4 + 2] = cur[q * 3 + 2]; - cur[q * 4 + 1] = cur[q * 3 + 1]; - cur[q * 4 + 0] = cur[q * 3 + 0]; - } - } - } - } - } - else if (depth == 16) - { - // force the image data from big-endian to platform-native. - // this is done in a separate pass due to the decoding relying - // on the data being untouched, but could probably be done - // per-line during decode if care is taken. - stbi_uc *cur = a->out; - stbi__uint16 *cur16 = (stbi__uint16 *)cur; + if (img_n == out_n) + { + for (i = 0; i < nsmp; ++i, ++dest16, cur += 2) + *dest16 = (cur[0] << 8) | cur[1]; + } + else + { + STBI_ASSERT(img_n + 1 == out_n); + if (img_n == 1) + { + for (i = 0; i < x; ++i, dest16 += 2, cur += 2) + { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = 0xffff; + } + } + else + { + STBI_ASSERT(img_n == 3); + for (i = 0; i < x; ++i, dest16 += 4, cur += 6) + { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = (cur[2] << 8) | cur[3]; + dest16[2] = (cur[4] << 8) | cur[5]; + dest16[3] = 0xffff; + } + } + } + } + } - for (i = 0; i < x * y * out_n; ++i, cur16++, cur += 2) - { - *cur16 = (cur[0] << 8) | cur[1]; - } - } + STBI_FREE(filter_buf); + if (!all_ok) + return 0; - return 1; + return 1; } static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) { - int bytes = (depth == 16 ? 2 : 1); - int out_bytes = out_n * bytes; - stbi_uc *final; - int p; - if (!interlaced) - return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); - // de-interlacing - final = (stbi_uc *)stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); - if (!final) - return stbi__err("outofmem", "Out of memory"); - for (p = 0; p < 7; ++p) - { - int xorig[] = {0, 4, 0, 2, 0, 1, 0}; - int yorig[] = {0, 0, 4, 0, 2, 0, 1}; - int xspc[] = {8, 8, 4, 4, 2, 2, 1}; - int yspc[] = {8, 8, 8, 4, 4, 2, 2}; - int i, j, x, y; - // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 - x = (a->s->img_x - xorig[p] + xspc[p] - 1) / xspc[p]; - y = (a->s->img_y - yorig[p] + yspc[p] - 1) / yspc[p]; - if (x && y) - { - stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; - if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) - { - STBI_FREE(final); - return 0; - } - for (j = 0; j < y; ++j) - { - for (i = 0; i < x; ++i) - { - int out_y = j * yspc[p] + yorig[p]; - int out_x = i * xspc[p] + xorig[p]; - memcpy(final + out_y * a->s->img_x * out_bytes + out_x * out_bytes, - a->out + (j * x + i) * out_bytes, out_bytes); - } - } - STBI_FREE(a->out); - image_data += img_len; - image_data_len -= img_len; - } - } - a->out = final; + // de-interlacing + final = (stbi_uc *)stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + if (!final) + return stbi__err("outofmem", "Out of memory"); + for (p = 0; p < 7; ++p) + { + int xorig[] = {0, 4, 0, 2, 0, 1, 0}; + int yorig[] = {0, 0, 4, 0, 2, 0, 1}; + int xspc[] = {8, 8, 4, 4, 2, 2, 1}; + int yspc[] = {8, 8, 8, 4, 4, 2, 2}; + int i, j, x, y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p] - 1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p] - 1) / yspc[p]; + if (x && y) + { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) + { + STBI_FREE(final); + return 0; + } + for (j = 0; j < y; ++j) + { + for (i = 0; i < x; ++i) + { + int out_y = j * yspc[p] + yorig[p]; + int out_x = i * xspc[p] + xorig[p]; + memcpy(final + out_y * a->s->img_x * out_bytes + out_x * out_bytes, + a->out + (j * x + i) * out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; - return 1; + return 1; } static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) { - stbi__context *s = z->s; - stbi__uint32 i, pixel_count = s->img_x * s->img_y; - stbi_uc *p = z->out; + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; - // compute color-based transparency, assuming we've - // already got 255 as the alpha value in the output - STBI_ASSERT(out_n == 2 || out_n == 4); + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); - if (out_n == 2) - { - for (i = 0; i < pixel_count; ++i) - { - p[1] = (p[0] == tc[0] ? 0 : 255); - p += 2; - } - } - else - { - for (i = 0; i < pixel_count; ++i) - { - if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) - p[3] = 0; - p += 4; - } - } - return 1; + if (out_n == 2) + { + for (i = 0; i < pixel_count; ++i) + { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } + else + { + for (i = 0; i < pixel_count; ++i) + { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; } static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) { - stbi__context *s = z->s; - stbi__uint32 i, pixel_count = s->img_x * s->img_y; - stbi__uint16 *p = (stbi__uint16 *)z->out; + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16 *)z->out; - // compute color-based transparency, assuming we've - // already got 65535 as the alpha value in the output - STBI_ASSERT(out_n == 2 || out_n == 4); + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); - if (out_n == 2) - { - for (i = 0; i < pixel_count; ++i) - { - p[1] = (p[0] == tc[0] ? 0 : 65535); - p += 2; - } - } - else - { - for (i = 0; i < pixel_count; ++i) - { - if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) - p[3] = 0; - p += 4; - } - } - return 1; + if (out_n == 2) + { + for (i = 0; i < pixel_count; ++i) + { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } + else + { + for (i = 0; i < pixel_count; ++i) + { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; } static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) { - stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; - stbi_uc *p, *temp_out, *orig = a->out; + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; - p = (stbi_uc *)stbi__malloc_mad2(pixel_count, pal_img_n, 0); - if (p == NULL) - return stbi__err("outofmem", "Out of memory"); + p = (stbi_uc *)stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) + return stbi__err("outofmem", "Out of memory"); - // between here and free(out) below, exitting would leak - temp_out = p; + // between here and free(out) below, exitting would leak + temp_out = p; - if (pal_img_n == 3) - { - for (i = 0; i < pixel_count; ++i) - { - int n = orig[i] * 4; - p[0] = palette[n]; - p[1] = palette[n + 1]; - p[2] = palette[n + 2]; - p += 3; - } - } - else - { - for (i = 0; i < pixel_count; ++i) - { - int n = orig[i] * 4; - p[0] = palette[n]; - p[1] = palette[n + 1]; - p[2] = palette[n + 2]; - p[3] = palette[n + 3]; - p += 4; - } - } - STBI_FREE(a->out); - a->out = temp_out; + if (pal_img_n == 3) + { + for (i = 0; i < pixel_count; ++i) + { + int n = orig[i] * 4; + p[0] = palette[n]; + p[1] = palette[n + 1]; + p[2] = palette[n + 2]; + p += 3; + } + } + else + { + for (i = 0; i < pixel_count; ++i) + { + int n = orig[i] * 4; + p[0] = palette[n]; + p[1] = palette[n + 1]; + p[2] = palette[n + 2]; + p[3] = palette[n + 3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; - STBI_NOTUSED(len); + STBI_NOTUSED(len); - return 1; + return 1; } static int stbi__unpremultiply_on_load_global = 0; @@ -5782,12 +5741,12 @@ static int stbi__de_iphone_flag_global = 0; STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) { - stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; } STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) { - stbi__de_iphone_flag_global = flag_true_if_should_convert; + stbi__de_iphone_flag_global = flag_true_if_should_convert; } #ifndef STBI_THREAD_LOCAL @@ -5799,442 +5758,442 @@ static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_se STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) { - stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; - stbi__unpremultiply_on_load_set = 1; + stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_set = 1; } STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) { - stbi__de_iphone_flag_local = flag_true_if_should_convert; - stbi__de_iphone_flag_set = 1; + stbi__de_iphone_flag_local = flag_true_if_should_convert; + stbi__de_iphone_flag_set = 1; } #define stbi__unpremultiply_on_load (stbi__unpremultiply_on_load_set \ - ? stbi__unpremultiply_on_load_local \ - : stbi__unpremultiply_on_load_global) + ? stbi__unpremultiply_on_load_local \ + : stbi__unpremultiply_on_load_global) #define stbi__de_iphone_flag (stbi__de_iphone_flag_set \ - ? stbi__de_iphone_flag_local \ - : stbi__de_iphone_flag_global) + ? stbi__de_iphone_flag_local \ + : stbi__de_iphone_flag_global) #endif // STBI_THREAD_LOCAL static void stbi__de_iphone(stbi__png *z) { - stbi__context *s = z->s; - stbi__uint32 i, pixel_count = s->img_x * s->img_y; - stbi_uc *p = z->out; + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; - if (s->img_out_n == 3) - { // convert bgr to rgb - for (i = 0; i < pixel_count; ++i) - { - stbi_uc t = p[0]; - p[0] = p[2]; - p[2] = t; - p += 3; - } - } - else - { - STBI_ASSERT(s->img_out_n == 4); - if (stbi__unpremultiply_on_load) - { - // convert bgr to rgb and unpremultiply - for (i = 0; i < pixel_count; ++i) - { - stbi_uc a = p[3]; - stbi_uc t = p[0]; - if (a) - { - stbi_uc half = a / 2; - p[0] = (p[2] * 255 + half) / a; - p[1] = (p[1] * 255 + half) / a; - p[2] = (t * 255 + half) / a; - } - else - { - p[0] = p[2]; - p[2] = t; - } - p += 4; - } - } - else - { - // convert bgr to rgb - for (i = 0; i < pixel_count; ++i) - { - stbi_uc t = p[0]; - p[0] = p[2]; - p[2] = t; - p += 4; - } - } - } + if (s->img_out_n == 3) + { // convert bgr to rgb + for (i = 0; i < pixel_count; ++i) + { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } + else + { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) + { + // convert bgr to rgb and unpremultiply + for (i = 0; i < pixel_count; ++i) + { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) + { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = (t * 255 + half) / a; + } + else + { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } + else + { + // convert bgr to rgb + for (i = 0; i < pixel_count; ++i) + { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } } #define STBI__PNG_TYPE(a, b, c, d) (((unsigned)(a) << 24) + ((unsigned)(b) << 16) + ((unsigned)(c) << 8) + (unsigned)(d)) static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) { - stbi_uc palette[1024], pal_img_n = 0; - stbi_uc has_trans = 0, tc[3] = {0}; - stbi__uint16 tc16[3]; - stbi__uint32 ioff = 0, idata_limit = 0, i, pal_len = 0; - int first = 1, k, interlace = 0, color = 0, is_iphone = 0; - stbi__context *s = z->s; + stbi_uc palette[1024], pal_img_n = 0; + stbi_uc has_trans = 0, tc[3] = {0}; + stbi__uint16 tc16[3]; + stbi__uint32 ioff = 0, idata_limit = 0, i, pal_len = 0; + int first = 1, k, interlace = 0, color = 0, is_iphone = 0; + stbi__context *s = z->s; - z->expanded = NULL; - z->idata = NULL; - z->out = NULL; + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; - if (!stbi__check_png_header(s)) - return 0; + if (!stbi__check_png_header(s)) + return 0; - if (scan == STBI__SCAN_type) - return 1; + if (scan == STBI__SCAN_type) + return 1; - for (;;) - { - stbi__pngchunk c = stbi__get_chunk_header(s); - switch (c.type) - { - case STBI__PNG_TYPE('C', 'g', 'B', 'I'): - is_iphone = 1; - stbi__skip(s, c.length); - break; - case STBI__PNG_TYPE('I', 'H', 'D', 'R'): - { - int comp, filter; - if (!first) - return stbi__err("multiple IHDR", "Corrupt PNG"); - first = 0; - if (c.length != 13) - return stbi__err("bad IHDR len", "Corrupt PNG"); - s->img_x = stbi__get32be(s); - s->img_y = stbi__get32be(s); - if (s->img_y > STBI_MAX_DIMENSIONS) - return stbi__err("too large", "Very large image (corrupt?)"); - if (s->img_x > STBI_MAX_DIMENSIONS) - return stbi__err("too large", "Very large image (corrupt?)"); - z->depth = stbi__get8(s); - if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) - return stbi__err("1/2/4/8/16-bit only", "PNG not supported: 1/2/4/8/16-bit only"); - color = stbi__get8(s); - if (color > 6) - return stbi__err("bad ctype", "Corrupt PNG"); - if (color == 3 && z->depth == 16) - return stbi__err("bad ctype", "Corrupt PNG"); - if (color == 3) - pal_img_n = 3; - else if (color & 1) - return stbi__err("bad ctype", "Corrupt PNG"); - comp = stbi__get8(s); - if (comp) - return stbi__err("bad comp method", "Corrupt PNG"); - filter = stbi__get8(s); - if (filter) - return stbi__err("bad filter method", "Corrupt PNG"); - interlace = stbi__get8(s); - if (interlace > 1) - return stbi__err("bad interlace method", "Corrupt PNG"); - if (!s->img_x || !s->img_y) - return stbi__err("0-pixel image", "Corrupt PNG"); - if (!pal_img_n) - { - s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); - if ((1 << 30) / s->img_x / s->img_n < s->img_y) - return stbi__err("too large", "Image too large to decode"); - } - else - { - // if paletted, then pal_n is our final components, and - // img_n is # components to decompress/filter. - s->img_n = 1; - if ((1 << 30) / s->img_x / 4 < s->img_y) - return stbi__err("too large", "Corrupt PNG"); - } - // even with SCAN_header, have to scan to see if we have a tRNS - break; - } + for (;;) + { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) + { + case STBI__PNG_TYPE('C', 'g', 'B', 'I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I', 'H', 'D', 'R'): + { + int comp, filter; + if (!first) + return stbi__err("multiple IHDR", "Corrupt PNG"); + first = 0; + if (c.length != 13) + return stbi__err("bad IHDR len", "Corrupt PNG"); + s->img_x = stbi__get32be(s); + s->img_y = stbi__get32be(s); + if (s->img_y > STBI_MAX_DIMENSIONS) + return stbi__err("too large", "Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) + return stbi__err("too large", "Very large image (corrupt?)"); + z->depth = stbi__get8(s); + if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) + return stbi__err("1/2/4/8/16-bit only", "PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); + if (color > 6) + return stbi__err("bad ctype", "Corrupt PNG"); + if (color == 3 && z->depth == 16) + return stbi__err("bad ctype", "Corrupt PNG"); + if (color == 3) + pal_img_n = 3; + else if (color & 1) + return stbi__err("bad ctype", "Corrupt PNG"); + comp = stbi__get8(s); + if (comp) + return stbi__err("bad comp method", "Corrupt PNG"); + filter = stbi__get8(s); + if (filter) + return stbi__err("bad filter method", "Corrupt PNG"); + interlace = stbi__get8(s); + if (interlace > 1) + return stbi__err("bad interlace method", "Corrupt PNG"); + if (!s->img_x || !s->img_y) + return stbi__err("0-pixel image", "Corrupt PNG"); + if (!pal_img_n) + { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) + return stbi__err("too large", "Image too large to decode"); + } + else + { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) + return stbi__err("too large", "Corrupt PNG"); + } + // even with SCAN_header, have to scan to see if we have a tRNS + break; + } - case STBI__PNG_TYPE('P', 'L', 'T', 'E'): - { - if (first) - return stbi__err("first not IHDR", "Corrupt PNG"); - if (c.length > 256 * 3) - return stbi__err("invalid PLTE", "Corrupt PNG"); - pal_len = c.length / 3; - if (pal_len * 3 != c.length) - return stbi__err("invalid PLTE", "Corrupt PNG"); - for (i = 0; i < pal_len; ++i) - { - palette[i * 4 + 0] = stbi__get8(s); - palette[i * 4 + 1] = stbi__get8(s); - palette[i * 4 + 2] = stbi__get8(s); - palette[i * 4 + 3] = 255; - } - break; - } + case STBI__PNG_TYPE('P', 'L', 'T', 'E'): + { + if (first) + return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256 * 3) + return stbi__err("invalid PLTE", "Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) + return stbi__err("invalid PLTE", "Corrupt PNG"); + for (i = 0; i < pal_len; ++i) + { + palette[i * 4 + 0] = stbi__get8(s); + palette[i * 4 + 1] = stbi__get8(s); + palette[i * 4 + 2] = stbi__get8(s); + palette[i * 4 + 3] = 255; + } + break; + } - case STBI__PNG_TYPE('t', 'R', 'N', 'S'): - { - if (first) - return stbi__err("first not IHDR", "Corrupt PNG"); - if (z->idata) - return stbi__err("tRNS after IDAT", "Corrupt PNG"); - if (pal_img_n) - { - if (scan == STBI__SCAN_header) - { - s->img_n = 4; - return 1; - } - if (pal_len == 0) - return stbi__err("tRNS before PLTE", "Corrupt PNG"); - if (c.length > pal_len) - return stbi__err("bad tRNS len", "Corrupt PNG"); - pal_img_n = 4; - for (i = 0; i < c.length; ++i) - palette[i * 4 + 3] = stbi__get8(s); - } - else - { - if (!(s->img_n & 1)) - return stbi__err("tRNS with alpha", "Corrupt PNG"); - if (c.length != (stbi__uint32)s->img_n * 2) - return stbi__err("bad tRNS len", "Corrupt PNG"); - has_trans = 1; - // non-paletted with tRNS = constant alpha. if header-scanning, we can stop now. - if (scan == STBI__SCAN_header) - { - ++s->img_n; - return 1; - } - if (z->depth == 16) - { - for (k = 0; k < s->img_n; ++k) - tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is - } - else - { - for (k = 0; k < s->img_n; ++k) - tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger - } - } - break; - } + case STBI__PNG_TYPE('t', 'R', 'N', 'S'): + { + if (first) + return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) + return stbi__err("tRNS after IDAT", "Corrupt PNG"); + if (pal_img_n) + { + if (scan == STBI__SCAN_header) + { + s->img_n = 4; + return 1; + } + if (pal_len == 0) + return stbi__err("tRNS before PLTE", "Corrupt PNG"); + if (c.length > pal_len) + return stbi__err("bad tRNS len", "Corrupt PNG"); + pal_img_n = 4; + for (i = 0; i < c.length; ++i) + palette[i * 4 + 3] = stbi__get8(s); + } + else + { + if (!(s->img_n & 1)) + return stbi__err("tRNS with alpha", "Corrupt PNG"); + if (c.length != (stbi__uint32)s->img_n * 2) + return stbi__err("bad tRNS len", "Corrupt PNG"); + has_trans = 1; + // non-paletted with tRNS = constant alpha. if header-scanning, we can stop now. + if (scan == STBI__SCAN_header) + { + ++s->img_n; + return 1; + } + if (z->depth == 16) + { + for (k = 0; k < s->img_n && k < 3; ++k) // extra loop test to suppress false GCC warning + tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } + else + { + for (k = 0; k < s->img_n && k < 3; ++k) + tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } - case STBI__PNG_TYPE('I', 'D', 'A', 'T'): - { - if (first) - return stbi__err("first not IHDR", "Corrupt PNG"); - if (pal_img_n && !pal_len) - return stbi__err("no PLTE", "Corrupt PNG"); - if (scan == STBI__SCAN_header) - { - // header scan definitely stops at first IDAT - if (pal_img_n) - s->img_n = pal_img_n; - return 1; - } - if (c.length > (1u << 30)) - return stbi__err("IDAT size limit", "IDAT section larger than 2^30 bytes"); - if ((int)(ioff + c.length) < (int)ioff) - return 0; - if (ioff + c.length > idata_limit) - { - stbi__uint32 idata_limit_old = idata_limit; - stbi_uc *p; - if (idata_limit == 0) - idata_limit = c.length > 4096 ? c.length : 4096; - while (ioff + c.length > idata_limit) - idata_limit *= 2; - STBI_NOTUSED(idata_limit_old); - p = (stbi_uc *)STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); - if (p == NULL) - return stbi__err("outofmem", "Out of memory"); - z->idata = p; - } - if (!stbi__getn(s, z->idata + ioff, c.length)) - return stbi__err("outofdata", "Corrupt PNG"); - ioff += c.length; - break; - } + case STBI__PNG_TYPE('I', 'D', 'A', 'T'): + { + if (first) + return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) + return stbi__err("no PLTE", "Corrupt PNG"); + if (scan == STBI__SCAN_header) + { + // header scan definitely stops at first IDAT + if (pal_img_n) + s->img_n = pal_img_n; + return 1; + } + if (c.length > (1u << 30)) + return stbi__err("IDAT size limit", "IDAT section larger than 2^30 bytes"); + if ((int)(ioff + c.length) < (int)ioff) + return 0; + if (ioff + c.length > idata_limit) + { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) + idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *)STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); + if (p == NULL) + return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata + ioff, c.length)) + return stbi__err("outofdata", "Corrupt PNG"); + ioff += c.length; + break; + } - case STBI__PNG_TYPE('I', 'E', 'N', 'D'): - { - stbi__uint32 raw_len, bpl; - if (first) - return stbi__err("first not IHDR", "Corrupt PNG"); - if (scan != STBI__SCAN_load) - return 1; - if (z->idata == NULL) - return stbi__err("no IDAT", "Corrupt PNG"); - // initial guess for decoded data size to avoid unnecessary reallocs - bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component - raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; - z->expanded = (stbi_uc *)stbi_zlib_decode_malloc_guesssize_headerflag((char *)z->idata, ioff, raw_len, (int *)&raw_len, !is_iphone); - if (z->expanded == NULL) - return 0; // zlib should set error - STBI_FREE(z->idata); - z->idata = NULL; - if ((req_comp == s->img_n + 1 && req_comp != 3 && !pal_img_n) || has_trans) - s->img_out_n = s->img_n + 1; - else - s->img_out_n = s->img_n; - if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) - return 0; - if (has_trans) - { - if (z->depth == 16) - { - if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) - return 0; - } - else - { - if (!stbi__compute_transparency(z, tc, s->img_out_n)) - return 0; - } - } - if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) - stbi__de_iphone(z); - if (pal_img_n) - { - // pal_img_n == 3 or 4 - s->img_n = pal_img_n; // record the actual colors we had - s->img_out_n = pal_img_n; - if (req_comp >= 3) - s->img_out_n = req_comp; - if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) - return 0; - } - else if (has_trans) - { - // non-paletted image with tRNS -> source image has (constant) alpha - ++s->img_n; - } - STBI_FREE(z->expanded); - z->expanded = NULL; - // end of PNG chunk, read and skip CRC - stbi__get32be(s); - return 1; - } + case STBI__PNG_TYPE('I', 'E', 'N', 'D'): + { + stbi__uint32 raw_len, bpl; + if (first) + return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) + return 1; + if (z->idata == NULL) + return stbi__err("no IDAT", "Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *)stbi_zlib_decode_malloc_guesssize_headerflag((char *)z->idata, ioff, raw_len, (int *)&raw_len, !is_iphone); + if (z->expanded == NULL) + return 0; // zlib should set error + STBI_FREE(z->idata); + z->idata = NULL; + if ((req_comp == s->img_n + 1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n + 1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) + return 0; + if (has_trans) + { + if (z->depth == 16) + { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) + return 0; + } + else + { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) + return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) + { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) + s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } + else if (has_trans) + { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); + z->expanded = NULL; + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + return 1; + } - default: - // if critical, fail - if (first) - return stbi__err("first not IHDR", "Corrupt PNG"); - if ((c.type & (1 << 29)) == 0) - { + default: + // if critical, fail + if (first) + return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) + { #ifndef STBI_NO_FAILURE_STRINGS - // not threadsafe - static char invalid_chunk[] = "XXXX PNG chunk not known"; - invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); - invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); - invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); - invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); #endif - return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); - } - stbi__skip(s, c.length); - break; - } - // end of PNG chunk, read and skip CRC - stbi__get32be(s); - } + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } } static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) { - void *result = NULL; - if (req_comp < 0 || req_comp > 4) - return stbi__errpuc("bad req_comp", "Internal error"); - if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) - { - if (p->depth <= 8) - ri->bits_per_channel = 8; - else if (p->depth == 16) - ri->bits_per_channel = 16; - else - return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); - result = p->out; - p->out = NULL; - if (req_comp && req_comp != p->s->img_out_n) - { - if (ri->bits_per_channel == 8) - result = stbi__convert_format((unsigned char *)result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); - else - result = stbi__convert_format16((stbi__uint16 *)result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); - p->s->img_out_n = req_comp; - if (result == NULL) - return result; - } - *x = p->s->img_x; - *y = p->s->img_y; - if (n) - *n = p->s->img_n; - } - STBI_FREE(p->out); - p->out = NULL; - STBI_FREE(p->expanded); - p->expanded = NULL; - STBI_FREE(p->idata); - p->idata = NULL; + void *result = NULL; + if (req_comp < 0 || req_comp > 4) + return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) + { + if (p->depth <= 8) + ri->bits_per_channel = 8; + else if (p->depth == 16) + ri->bits_per_channel = 16; + else + return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) + { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *)result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *)result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) + return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) + *n = p->s->img_n; + } + STBI_FREE(p->out); + p->out = NULL; + STBI_FREE(p->expanded); + p->expanded = NULL; + STBI_FREE(p->idata); + p->idata = NULL; - return result; + return result; } static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { - stbi__png p; - p.s = s; - return stbi__do_png(&p, x, y, comp, req_comp, ri); + stbi__png p; + p.s = s; + return stbi__do_png(&p, x, y, comp, req_comp, ri); } static int stbi__png_test(stbi__context *s) { - int r; - r = stbi__check_png_header(s); - stbi__rewind(s); - return r; + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; } static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) { - if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) - { - stbi__rewind(p->s); - return 0; - } - if (x) - *x = p->s->img_x; - if (y) - *y = p->s->img_y; - if (comp) - *comp = p->s->img_n; - return 1; + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) + { + stbi__rewind(p->s); + return 0; + } + if (x) + *x = p->s->img_x; + if (y) + *y = p->s->img_y; + if (comp) + *comp = p->s->img_n; + return 1; } static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) { - stbi__png p; - p.s = s; - return stbi__png_info_raw(&p, x, y, comp); + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); } static int stbi__png_is16(stbi__context *s) { - stbi__png p; - p.s = s; - if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) - return 0; - if (p.depth != 16) - { - stbi__rewind(p.s); - return 0; - } - return 1; + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) + { + stbi__rewind(p.s); + return 0; + } + return 1; } #endif @@ -6243,69 +6202,69 @@ static int stbi__png_is16(stbi__context *s) #ifndef STBI_NO_BMP static int stbi__bmp_test_raw(stbi__context *s) { - int r; - int sz; - if (stbi__get8(s) != 'B') - return 0; - if (stbi__get8(s) != 'M') - return 0; - stbi__get32le(s); // discard filesize - stbi__get16le(s); // discard reserved - stbi__get16le(s); // discard reserved - stbi__get32le(s); // discard data offset - sz = stbi__get32le(s); - r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); - return r; + int r; + int sz; + if (stbi__get8(s) != 'B') + return 0; + if (stbi__get8(s) != 'M') + return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; } static int stbi__bmp_test(stbi__context *s) { - int r = stbi__bmp_test_raw(s); - stbi__rewind(s); - return r; + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; } // returns 0..31 for the highest set bit static int stbi__high_bit(unsigned int z) { - int n = 0; - if (z == 0) - return -1; - if (z >= 0x10000) - { - n += 16; - z >>= 16; - } - if (z >= 0x00100) - { - n += 8; - z >>= 8; - } - if (z >= 0x00010) - { - n += 4; - z >>= 4; - } - if (z >= 0x00004) - { - n += 2; - z >>= 2; - } - if (z >= 0x00002) - { - n += 1; /* >>= 1;*/ - } - return n; + int n = 0; + if (z == 0) + return -1; + if (z >= 0x10000) + { + n += 16; + z >>= 16; + } + if (z >= 0x00100) + { + n += 8; + z >>= 8; + } + if (z >= 0x00010) + { + n += 4; + z >>= 4; + } + if (z >= 0x00004) + { + n += 2; + z >>= 2; + } + if (z >= 0x00002) + { + n += 1; /* >>= 1;*/ + } + return n; } static int stbi__bitcount(unsigned int a) { - a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 - a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 - a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits - a = (a + (a >> 8)); // max 16 per 8 bits - a = (a + (a >> 16)); // max 32 per 8 bits - return a & 0xff; + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; } // extract an arbitrarily-aligned N-bit value (N=bits) @@ -6313,458 +6272,458 @@ static int stbi__bitcount(unsigned int a) // extend it to full full range. static int stbi__shiftsigned(unsigned int v, int shift, int bits) { - static unsigned int mul_table[9] = { - 0, - 0xff /*0b11111111*/, - 0x55 /*0b01010101*/, - 0x49 /*0b01001001*/, - 0x11 /*0b00010001*/, - 0x21 /*0b00100001*/, - 0x41 /*0b01000001*/, - 0x81 /*0b10000001*/, - 0x01 /*0b00000001*/, - }; - static unsigned int shift_table[9] = { - 0, - 0, - 0, - 1, - 0, - 2, - 4, - 6, - 0, - }; - if (shift < 0) - v <<= -shift; - else - v >>= shift; - STBI_ASSERT(v < 256); - v >>= (8 - bits); - STBI_ASSERT(bits >= 0 && bits <= 8); - return (int)((unsigned)v * mul_table[bits]) >> shift_table[bits]; + static unsigned int mul_table[9] = { + 0, + 0xff /*0b11111111*/, + 0x55 /*0b01010101*/, + 0x49 /*0b01001001*/, + 0x11 /*0b00010001*/, + 0x21 /*0b00100001*/, + 0x41 /*0b01000001*/, + 0x81 /*0b10000001*/, + 0x01 /*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, + 0, + 0, + 1, + 0, + 2, + 4, + 6, + 0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v < 256); + v >>= (8 - bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int)((unsigned)v * mul_table[bits]) >> shift_table[bits]; } typedef struct { - int bpp, offset, hsz; - unsigned int mr, mg, mb, ma, all_a; - int extra_read; + int bpp, offset, hsz; + unsigned int mr, mg, mb, ma, all_a; + int extra_read; } stbi__bmp_data; static int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) { - // BI_BITFIELDS specifies masks explicitly, don't override - if (compress == 3) - return 1; + // BI_BITFIELDS specifies masks explicitly, don't override + if (compress == 3) + return 1; - if (compress == 0) - { - if (info->bpp == 16) - { - info->mr = 31u << 10; - info->mg = 31u << 5; - info->mb = 31u << 0; - } - else if (info->bpp == 32) - { - info->mr = 0xffu << 16; - info->mg = 0xffu << 8; - info->mb = 0xffu << 0; - info->ma = 0xffu << 24; - info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 - } - else - { - // otherwise, use defaults, which is all-0 - info->mr = info->mg = info->mb = info->ma = 0; - } - return 1; - } - return 0; // error + if (compress == 0) + { + if (info->bpp == 16) + { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + else if (info->bpp == 32) + { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } + else + { + // otherwise, use defaults, which is all-0 + info->mr = info->mg = info->mb = info->ma = 0; + } + return 1; + } + return 0; // error } static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) { - int hsz; - if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') - return stbi__errpuc("not BMP", "Corrupt BMP"); - stbi__get32le(s); // discard filesize - stbi__get16le(s); // discard reserved - stbi__get16le(s); // discard reserved - info->offset = stbi__get32le(s); - info->hsz = hsz = stbi__get32le(s); - info->mr = info->mg = info->mb = info->ma = 0; - info->extra_read = 14; + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') + return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + info->extra_read = 14; - if (info->offset < 0) - return stbi__errpuc("bad BMP", "bad BMP"); + if (info->offset < 0) + return stbi__errpuc("bad BMP", "bad BMP"); - if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) - return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); - if (hsz == 12) - { - s->img_x = stbi__get16le(s); - s->img_y = stbi__get16le(s); - } - else - { - s->img_x = stbi__get32le(s); - s->img_y = stbi__get32le(s); - } - if (stbi__get16le(s) != 1) - return stbi__errpuc("bad BMP", "bad BMP"); - info->bpp = stbi__get16le(s); - if (hsz != 12) - { - int compress = stbi__get32le(s); - if (compress == 1 || compress == 2) - return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); - if (compress >= 4) - return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes - if (compress == 3 && info->bpp != 16 && info->bpp != 32) - return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel - stbi__get32le(s); // discard sizeof - stbi__get32le(s); // discard hres - stbi__get32le(s); // discard vres - stbi__get32le(s); // discard colorsused - stbi__get32le(s); // discard max important - if (hsz == 40 || hsz == 56) - { - if (hsz == 56) - { - stbi__get32le(s); - stbi__get32le(s); - stbi__get32le(s); - stbi__get32le(s); - } - if (info->bpp == 16 || info->bpp == 32) - { - if (compress == 0) - { - stbi__bmp_set_mask_defaults(info, compress); - } - else if (compress == 3) - { - info->mr = stbi__get32le(s); - info->mg = stbi__get32le(s); - info->mb = stbi__get32le(s); - info->extra_read += 12; - // not documented, but generated by photoshop and handled by mspaint - if (info->mr == info->mg && info->mg == info->mb) - { - // ?!?!? - return stbi__errpuc("bad BMP", "bad BMP"); - } - } - else - return stbi__errpuc("bad BMP", "bad BMP"); - } - } - else - { - // V4/V5 header - int i; - if (hsz != 108 && hsz != 124) - return stbi__errpuc("bad BMP", "bad BMP"); - info->mr = stbi__get32le(s); - info->mg = stbi__get32le(s); - info->mb = stbi__get32le(s); - info->ma = stbi__get32le(s); - if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs - stbi__bmp_set_mask_defaults(info, compress); - stbi__get32le(s); // discard color space - for (i = 0; i < 12; ++i) - stbi__get32le(s); // discard color space parameters - if (hsz == 124) - { - stbi__get32le(s); // discard rendering intent - stbi__get32le(s); // discard offset of profile data - stbi__get32le(s); // discard size of profile data - stbi__get32le(s); // discard reserved - } - } - } - return (void *)1; + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) + return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) + { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } + else + { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) + return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) + { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) + return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + if (compress >= 4) + return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes + if (compress == 3 && info->bpp != 16 && info->bpp != 32) + return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) + { + if (hsz == 56) + { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) + { + if (compress == 0) + { + stbi__bmp_set_mask_defaults(info, compress); + } + else if (compress == 3) + { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->extra_read += 12; + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) + { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } + else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } + else + { + // V4/V5 header + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs + stbi__bmp_set_mask_defaults(info, compress); + stbi__get32le(s); // discard color space + for (i = 0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) + { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *)1; } static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { - stbi_uc *out; - unsigned int mr = 0, mg = 0, mb = 0, ma = 0, all_a; - stbi_uc pal[256][4]; - int psize = 0, i, j, width; - int flip_vertically, pad, target; - stbi__bmp_data info; - STBI_NOTUSED(ri); + stbi_uc *out; + unsigned int mr = 0, mg = 0, mb = 0, ma = 0, all_a; + stbi_uc pal[256][4]; + int psize = 0, i, j, width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); - info.all_a = 255; - if (stbi__bmp_parse_header(s, &info) == NULL) - return NULL; // error code already set + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set - flip_vertically = ((int)s->img_y) > 0; - s->img_y = abs((int)s->img_y); + flip_vertically = ((int)s->img_y) > 0; + s->img_y = abs((int)s->img_y); - if (s->img_y > STBI_MAX_DIMENSIONS) - return stbi__errpuc("too large", "Very large image (corrupt?)"); - if (s->img_x > STBI_MAX_DIMENSIONS) - return stbi__errpuc("too large", "Very large image (corrupt?)"); + if (s->img_y > STBI_MAX_DIMENSIONS) + return stbi__errpuc("too large", "Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) + return stbi__errpuc("too large", "Very large image (corrupt?)"); - mr = info.mr; - mg = info.mg; - mb = info.mb; - ma = info.ma; - all_a = info.all_a; + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; - if (info.hsz == 12) - { - if (info.bpp < 24) - psize = (info.offset - info.extra_read - 24) / 3; - } - else - { - if (info.bpp < 16) - psize = (info.offset - info.extra_read - info.hsz) >> 2; - } - if (psize == 0) - { - // accept some number of extra bytes after the header, but if the offset points either to before - // the header ends or implies a large amount of extra data, reject the file as malformed - int bytes_read_so_far = s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original); - int header_limit = 1024; // max we actually read is below 256 bytes currently. - int extra_data_limit = 256 * 4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. - if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) - { - return stbi__errpuc("bad header", "Corrupt BMP"); - } - // we established that bytes_read_so_far is positive and sensible. - // the first half of this test rejects offsets that are either too small positives, or - // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn - // ensures the number computed in the second half of the test can't overflow. - if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) - { - return stbi__errpuc("bad offset", "Corrupt BMP"); - } - else - { - stbi__skip(s, info.offset - bytes_read_so_far); - } - } + if (info.hsz == 12) + { + if (info.bpp < 24) + psize = (info.offset - info.extra_read - 24) / 3; + } + else + { + if (info.bpp < 16) + psize = (info.offset - info.extra_read - info.hsz) >> 2; + } + if (psize == 0) + { + // accept some number of extra bytes after the header, but if the offset points either to before + // the header ends or implies a large amount of extra data, reject the file as malformed + int bytes_read_so_far = s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original); + int header_limit = 1024; // max we actually read is below 256 bytes currently. + int extra_data_limit = 256 * 4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. + if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) + { + return stbi__errpuc("bad header", "Corrupt BMP"); + } + // we established that bytes_read_so_far is positive and sensible. + // the first half of this test rejects offsets that are either too small positives, or + // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn + // ensures the number computed in the second half of the test can't overflow. + if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) + { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } + else + { + stbi__skip(s, info.offset - bytes_read_so_far); + } + } - if (info.bpp == 24 && ma == 0xff000000) - s->img_n = 3; - else - s->img_n = ma ? 4 : 3; - if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 - target = req_comp; - else - target = s->img_n; // if they want monochrome, we'll post-convert + if (info.bpp == 24 && ma == 0xff000000) + s->img_n = 3; + else + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert - // sanity-check size - if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) - return stbi__errpuc("too large", "Corrupt BMP"); + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); - out = (stbi_uc *)stbi__malloc_mad3(target, s->img_x, s->img_y, 0); - if (!out) - return stbi__errpuc("outofmem", "Out of memory"); - if (info.bpp < 16) - { - int z = 0; - if (psize == 0 || psize > 256) - { - STBI_FREE(out); - return stbi__errpuc("invalid", "Corrupt BMP"); - } - for (i = 0; i < psize; ++i) - { - pal[i][2] = stbi__get8(s); - pal[i][1] = stbi__get8(s); - pal[i][0] = stbi__get8(s); - if (info.hsz != 12) - stbi__get8(s); - pal[i][3] = 255; - } - stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); - if (info.bpp == 1) - width = (s->img_x + 7) >> 3; - else if (info.bpp == 4) - width = (s->img_x + 1) >> 1; - else if (info.bpp == 8) - width = s->img_x; - else - { - STBI_FREE(out); - return stbi__errpuc("bad bpp", "Corrupt BMP"); - } - pad = (-width) & 3; - if (info.bpp == 1) - { - for (j = 0; j < (int)s->img_y; ++j) - { - int bit_offset = 7, v = stbi__get8(s); - for (i = 0; i < (int)s->img_x; ++i) - { - int color = (v >> bit_offset) & 0x1; - out[z++] = pal[color][0]; - out[z++] = pal[color][1]; - out[z++] = pal[color][2]; - if (target == 4) - out[z++] = 255; - if (i + 1 == (int)s->img_x) - break; - if ((--bit_offset) < 0) - { - bit_offset = 7; - v = stbi__get8(s); - } - } - stbi__skip(s, pad); - } - } - else - { - for (j = 0; j < (int)s->img_y; ++j) - { - for (i = 0; i < (int)s->img_x; i += 2) - { - int v = stbi__get8(s), v2 = 0; - if (info.bpp == 4) - { - v2 = v & 15; - v >>= 4; - } - out[z++] = pal[v][0]; - out[z++] = pal[v][1]; - out[z++] = pal[v][2]; - if (target == 4) - out[z++] = 255; - if (i + 1 == (int)s->img_x) - break; - v = (info.bpp == 8) ? stbi__get8(s) : v2; - out[z++] = pal[v][0]; - out[z++] = pal[v][1]; - out[z++] = pal[v][2]; - if (target == 4) - out[z++] = 255; - } - stbi__skip(s, pad); - } - } - } - else - { - int rshift = 0, gshift = 0, bshift = 0, ashift = 0, rcount = 0, gcount = 0, bcount = 0, acount = 0; - int z = 0; - int easy = 0; - stbi__skip(s, info.offset - info.extra_read - info.hsz); - if (info.bpp == 24) - width = 3 * s->img_x; - else if (info.bpp == 16) - width = 2 * s->img_x; - else /* bpp = 32 and pad = 0 */ - width = 0; - pad = (-width) & 3; - if (info.bpp == 24) - { - easy = 1; - } - else if (info.bpp == 32) - { - if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) - easy = 2; - } - if (!easy) - { - if (!mr || !mg || !mb) - { - STBI_FREE(out); - return stbi__errpuc("bad masks", "Corrupt BMP"); - } - // right shift amt to put high bit in position #7 - rshift = stbi__high_bit(mr) - 7; - rcount = stbi__bitcount(mr); - gshift = stbi__high_bit(mg) - 7; - gcount = stbi__bitcount(mg); - bshift = stbi__high_bit(mb) - 7; - bcount = stbi__bitcount(mb); - ashift = stbi__high_bit(ma) - 7; - acount = stbi__bitcount(ma); - if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) - { - STBI_FREE(out); - return stbi__errpuc("bad masks", "Corrupt BMP"); - } - } - for (j = 0; j < (int)s->img_y; ++j) - { - if (easy) - { - for (i = 0; i < (int)s->img_x; ++i) - { - unsigned char a; - out[z + 2] = stbi__get8(s); - out[z + 1] = stbi__get8(s); - out[z + 0] = stbi__get8(s); - z += 3; - a = (easy == 2 ? stbi__get8(s) : 255); - all_a |= a; - if (target == 4) - out[z++] = a; - } - } - else - { - int bpp = info.bpp; - for (i = 0; i < (int)s->img_x; ++i) - { - stbi__uint32 v = (bpp == 16 ? (stbi__uint32)stbi__get16le(s) : stbi__get32le(s)); - unsigned int a; - out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); - out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); - out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); - a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); - all_a |= a; - if (target == 4) - out[z++] = STBI__BYTECAST(a); - } - } - stbi__skip(s, pad); - } - } + out = (stbi_uc *)stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) + return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) + { + int z = 0; + if (psize == 0 || psize > 256) + { + STBI_FREE(out); + return stbi__errpuc("invalid", "Corrupt BMP"); + } + for (i = 0; i < psize; ++i) + { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) + stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) + width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) + width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) + width = s->img_x; + else + { + STBI_FREE(out); + return stbi__errpuc("bad bpp", "Corrupt BMP"); + } + pad = (-width) & 3; + if (info.bpp == 1) + { + for (j = 0; j < (int)s->img_y; ++j) + { + int bit_offset = 7, v = stbi__get8(s); + for (i = 0; i < (int)s->img_x; ++i) + { + int color = (v >> bit_offset) & 0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) + out[z++] = 255; + if (i + 1 == (int)s->img_x) + break; + if ((--bit_offset) < 0) + { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } + else + { + for (j = 0; j < (int)s->img_y; ++j) + { + for (i = 0; i < (int)s->img_x; i += 2) + { + int v = stbi__get8(s), v2 = 0; + if (info.bpp == 4) + { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) + out[z++] = 255; + if (i + 1 == (int)s->img_x) + break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) + out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } + else + { + int rshift = 0, gshift = 0, bshift = 0, ashift = 0, rcount = 0, gcount = 0, bcount = 0, acount = 0; + int z = 0; + int easy = 0; + stbi__skip(s, info.offset - info.extra_read - info.hsz); + if (info.bpp == 24) + width = 3 * s->img_x; + else if (info.bpp == 16) + width = 2 * s->img_x; + else /* bpp = 32 and pad = 0 */ + width = 0; + pad = (-width) & 3; + if (info.bpp == 24) + { + easy = 1; + } + else if (info.bpp == 32) + { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) + { + if (!mr || !mg || !mb) + { + STBI_FREE(out); + return stbi__errpuc("bad masks", "Corrupt BMP"); + } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr) - 7; + rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg) - 7; + gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb) - 7; + bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma) - 7; + acount = stbi__bitcount(ma); + if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) + { + STBI_FREE(out); + return stbi__errpuc("bad masks", "Corrupt BMP"); + } + } + for (j = 0; j < (int)s->img_y; ++j) + { + if (easy) + { + for (i = 0; i < (int)s->img_x; ++i) + { + unsigned char a; + out[z + 2] = stbi__get8(s); + out[z + 1] = stbi__get8(s); + out[z + 0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) + out[z++] = a; + } + } + else + { + int bpp = info.bpp; + for (i = 0; i < (int)s->img_x; ++i) + { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32)stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) + out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } - // if alpha channel is all 0s, replace with all 255s - if (target == 4 && all_a == 0) - for (i = 4 * s->img_x * s->img_y - 1; i >= 0; i -= 4) - out[i] = 255; + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i = 4 * s->img_x * s->img_y - 1; i >= 0; i -= 4) + out[i] = 255; - if (flip_vertically) - { - stbi_uc t; - for (j = 0; j < (int)s->img_y >> 1; ++j) - { - stbi_uc *p1 = out + j * s->img_x * target; - stbi_uc *p2 = out + (s->img_y - 1 - j) * s->img_x * target; - for (i = 0; i < (int)s->img_x * target; ++i) - { - t = p1[i]; - p1[i] = p2[i]; - p2[i] = t; - } - } - } + if (flip_vertically) + { + stbi_uc t; + for (j = 0; j < (int)s->img_y >> 1; ++j) + { + stbi_uc *p1 = out + j * s->img_x * target; + stbi_uc *p2 = out + (s->img_y - 1 - j) * s->img_x * target; + for (i = 0; i < (int)s->img_x * target; ++i) + { + t = p1[i]; + p1[i] = p2[i]; + p2[i] = t; + } + } + } - if (req_comp && req_comp != target) - { - out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); - if (out == NULL) - return out; // stbi__convert_format frees input on failure - } + if (req_comp && req_comp != target) + { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) + return out; // stbi__convert_format frees input on failure + } - *x = s->img_x; - *y = s->img_y; - if (comp) - *comp = s->img_n; - return out; + *x = s->img_x; + *y = s->img_y; + if (comp) + *comp = s->img_n; + return out; } #endif @@ -6774,401 +6733,401 @@ static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req // returns STBI_rgb or whatever, 0 on error static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int *is_rgb16) { - // only RGB or RGBA (incl. 16bit) or grey allowed - if (is_rgb16) - *is_rgb16 = 0; - switch (bits_per_pixel) - { - case 8: - return STBI_grey; - case 16: - if (is_grey) - return STBI_grey_alpha; - // fallthrough - case 15: - if (is_rgb16) - *is_rgb16 = 1; - return STBI_rgb; - case 24: // fallthrough - case 32: - return bits_per_pixel / 8; - default: - return 0; - } + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) + *is_rgb16 = 0; + switch (bits_per_pixel) + { + case 8: + return STBI_grey; + case 16: + if (is_grey) + return STBI_grey_alpha; + // fallthrough + case 15: + if (is_rgb16) + *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: + return bits_per_pixel / 8; + default: + return 0; + } } static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) { - int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; - int sz, tga_colormap_type; - stbi__get8(s); // discard Offset - tga_colormap_type = stbi__get8(s); // colormap type - if (tga_colormap_type > 1) - { - stbi__rewind(s); - return 0; // only RGB or indexed allowed - } - tga_image_type = stbi__get8(s); // image type - if (tga_colormap_type == 1) - { // colormapped (paletted) image - if (tga_image_type != 1 && tga_image_type != 9) - { - stbi__rewind(s); - return 0; - } - stbi__skip(s, 4); // skip index of first colormap entry and number of entries - sz = stbi__get8(s); // check bits per palette color entry - if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) - { - stbi__rewind(s); - return 0; - } - stbi__skip(s, 4); // skip image x and y origin - tga_colormap_bpp = sz; - } - else - { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE - if ((tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11)) - { - stbi__rewind(s); - return 0; // only RGB or grey allowed, +/- RLE - } - stbi__skip(s, 9); // skip colormap specification and image x/y origin - tga_colormap_bpp = 0; - } - tga_w = stbi__get16le(s); - if (tga_w < 1) - { - stbi__rewind(s); - return 0; // test width - } - tga_h = stbi__get16le(s); - if (tga_h < 1) - { - stbi__rewind(s); - return 0; // test height - } - tga_bits_per_pixel = stbi__get8(s); // bits per pixel - stbi__get8(s); // ignore alpha bits - if (tga_colormap_bpp != 0) - { - if ((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) - { - // when using a colormap, tga_bits_per_pixel is the size of the indexes - // I don't think anything but 8 or 16bit indexes makes sense - stbi__rewind(s); - return 0; - } - tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); - } - else - { - tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); - } - if (!tga_comp) - { - stbi__rewind(s); - return 0; - } - if (x) - *x = tga_w; - if (y) - *y = tga_h; - if (comp) - *comp = tga_comp; - return 1; // seems to have passed everything + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if (tga_colormap_type > 1) + { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if (tga_colormap_type == 1) + { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) + { + stbi__rewind(s); + return 0; + } + stbi__skip(s, 4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) + { + stbi__rewind(s); + return 0; + } + stbi__skip(s, 4); // skip image x and y origin + tga_colormap_bpp = sz; + } + else + { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ((tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11)) + { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s, 9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if (tga_w < 1) + { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if (tga_h < 1) + { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) + { + if ((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) + { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } + else + { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if (!tga_comp) + { + stbi__rewind(s); + return 0; + } + if (x) + *x = tga_w; + if (y) + *y = tga_h; + if (comp) + *comp = tga_comp; + return 1; // seems to have passed everything } static int stbi__tga_test(stbi__context *s) { - int res = 0; - int sz, tga_color_type; - stbi__get8(s); // discard Offset - tga_color_type = stbi__get8(s); // color type - if (tga_color_type > 1) - goto errorEnd; // only RGB or indexed allowed - sz = stbi__get8(s); // image type - if (tga_color_type == 1) - { // colormapped (paletted) image - if (sz != 1 && sz != 9) - goto errorEnd; // colortype 1 demands image type 1 or 9 - stbi__skip(s, 4); // skip index of first colormap entry and number of entries - sz = stbi__get8(s); // check bits per palette color entry - if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) - goto errorEnd; - stbi__skip(s, 4); // skip image x and y origin - } - else - { // "normal" image w/o colormap - if ((sz != 2) && (sz != 3) && (sz != 10) && (sz != 11)) - goto errorEnd; // only RGB or grey allowed, +/- RLE - stbi__skip(s, 9); // skip colormap specification and image x/y origin - } - if (stbi__get16le(s) < 1) - goto errorEnd; // test width - if (stbi__get16le(s) < 1) - goto errorEnd; // test height - sz = stbi__get8(s); // bits per pixel - if ((tga_color_type == 1) && (sz != 8) && (sz != 16)) - goto errorEnd; // for colormapped images, bpp is size of an index - if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) - goto errorEnd; + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if (tga_color_type > 1) + goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if (tga_color_type == 1) + { // colormapped (paletted) image + if (sz != 1 && sz != 9) + goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s, 4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) + goto errorEnd; + stbi__skip(s, 4); // skip image x and y origin + } + else + { // "normal" image w/o colormap + if ((sz != 2) && (sz != 3) && (sz != 10) && (sz != 11)) + goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s, 9); // skip colormap specification and image x/y origin + } + if (stbi__get16le(s) < 1) + goto errorEnd; // test width + if (stbi__get16le(s) < 1) + goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ((tga_color_type == 1) && (sz != 8) && (sz != 16)) + goto errorEnd; // for colormapped images, bpp is size of an index + if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) + goto errorEnd; - res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 errorEnd: - stbi__rewind(s); - return res; + stbi__rewind(s); + return res; } // read 16bit value and convert to 24bit RGB static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc *out) { - stbi__uint16 px = (stbi__uint16)stbi__get16le(s); - stbi__uint16 fiveBitMask = 31; - // we have 3 channels with 5bits each - int r = (px >> 10) & fiveBitMask; - int g = (px >> 5) & fiveBitMask; - int b = px & fiveBitMask; - // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later - out[0] = (stbi_uc)((r * 255) / 31); - out[1] = (stbi_uc)((g * 255) / 31); - out[2] = (stbi_uc)((b * 255) / 31); + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255) / 31); + out[1] = (stbi_uc)((g * 255) / 31); + out[2] = (stbi_uc)((b * 255) / 31); - // some people claim that the most significant bit might be used for alpha - // (possibly if an alpha-bit is set in the "image descriptor byte") - // but that only made 16bit test images completely translucent.. - // so let's treat all 15 and 16bit TGAs as RGB with no alpha. + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. } static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { - // read in the TGA header stuff - int tga_offset = stbi__get8(s); - int tga_indexed = stbi__get8(s); - int tga_image_type = stbi__get8(s); - int tga_is_RLE = 0; - int tga_palette_start = stbi__get16le(s); - int tga_palette_len = stbi__get16le(s); - int tga_palette_bits = stbi__get8(s); - int tga_x_origin = stbi__get16le(s); - int tga_y_origin = stbi__get16le(s); - int tga_width = stbi__get16le(s); - int tga_height = stbi__get16le(s); - int tga_bits_per_pixel = stbi__get8(s); - int tga_comp, tga_rgb16 = 0; - int tga_inverted = stbi__get8(s); - // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) - // image data - unsigned char *tga_data; - unsigned char *tga_palette = NULL; - int i, j; - unsigned char raw_data[4] = {0}; - int RLE_count = 0; - int RLE_repeating = 0; - int read_next_pixel = 1; - STBI_NOTUSED(ri); - STBI_NOTUSED(tga_x_origin); // @TODO - STBI_NOTUSED(tga_y_origin); // @TODO + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16 = 0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + STBI_NOTUSED(tga_x_origin); // @TODO + STBI_NOTUSED(tga_y_origin); // @TODO - if (tga_height > STBI_MAX_DIMENSIONS) - return stbi__errpuc("too large", "Very large image (corrupt?)"); - if (tga_width > STBI_MAX_DIMENSIONS) - return stbi__errpuc("too large", "Very large image (corrupt?)"); + if (tga_height > STBI_MAX_DIMENSIONS) + return stbi__errpuc("too large", "Very large image (corrupt?)"); + if (tga_width > STBI_MAX_DIMENSIONS) + return stbi__errpuc("too large", "Very large image (corrupt?)"); - // do a tiny bit of precessing - if (tga_image_type >= 8) - { - tga_image_type -= 8; - tga_is_RLE = 1; - } - tga_inverted = 1 - ((tga_inverted >> 5) & 1); + // do a tiny bit of precessing + if (tga_image_type >= 8) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); - // If I'm paletted, then I'll use the number of bits from the palette - if (tga_indexed) - tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); - else - tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + // If I'm paletted, then I'll use the number of bits from the palette + if (tga_indexed) + tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); - if (!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency - return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + if (!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); - // tga info - *x = tga_width; - *y = tga_height; - if (comp) - *comp = tga_comp; + // tga info + *x = tga_width; + *y = tga_height; + if (comp) + *comp = tga_comp; - if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) - return stbi__errpuc("too large", "Corrupt TGA"); + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); - tga_data = (unsigned char *)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); - if (!tga_data) - return stbi__errpuc("outofmem", "Out of memory"); + tga_data = (unsigned char *)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) + return stbi__errpuc("outofmem", "Out of memory"); - // skip to the data's starting position (offset usually = 0) - stbi__skip(s, tga_offset); + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset); - if (!tga_indexed && !tga_is_RLE && !tga_rgb16) - { - for (i = 0; i < tga_height; ++i) - { - int row = tga_inverted ? tga_height - i - 1 : i; - stbi_uc *tga_row = tga_data + row * tga_width * tga_comp; - stbi__getn(s, tga_row, tga_width * tga_comp); - } - } - else - { - // do I need to load a palette? - if (tga_indexed) - { - if (tga_palette_len == 0) - { /* you have to have at least one entry! */ - STBI_FREE(tga_data); - return stbi__errpuc("bad palette", "Corrupt TGA"); - } + if (!tga_indexed && !tga_is_RLE && !tga_rgb16) + { + for (i = 0; i < tga_height; ++i) + { + int row = tga_inverted ? tga_height - i - 1 : i; + stbi_uc *tga_row = tga_data + row * tga_width * tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } + else + { + // do I need to load a palette? + if (tga_indexed) + { + if (tga_palette_len == 0) + { /* you have to have at least one entry! */ + STBI_FREE(tga_data); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } - // any data to skip? (offset usually = 0) - stbi__skip(s, tga_palette_start); - // load the palette - tga_palette = (unsigned char *)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); - if (!tga_palette) - { - STBI_FREE(tga_data); - return stbi__errpuc("outofmem", "Out of memory"); - } - if (tga_rgb16) - { - stbi_uc *pal_entry = tga_palette; - STBI_ASSERT(tga_comp == STBI_rgb); - for (i = 0; i < tga_palette_len; ++i) - { - stbi__tga_read_rgb16(s, pal_entry); - pal_entry += tga_comp; - } - } - else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) - { - STBI_FREE(tga_data); - STBI_FREE(tga_palette); - return stbi__errpuc("bad palette", "Corrupt TGA"); - } - } - // load the data - for (i = 0; i < tga_width * tga_height; ++i) - { - // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? - if (tga_is_RLE) - { - if (RLE_count == 0) - { - // yep, get the next byte as a RLE command - int RLE_cmd = stbi__get8(s); - RLE_count = 1 + (RLE_cmd & 127); - RLE_repeating = RLE_cmd >> 7; - read_next_pixel = 1; - } - else if (!RLE_repeating) - { - read_next_pixel = 1; - } - } - else - { - read_next_pixel = 1; - } - // OK, if I need to read a pixel, do it now - if (read_next_pixel) - { - // load however much data we did have - if (tga_indexed) - { - // read in index, then perform the lookup - int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); - if (pal_idx >= tga_palette_len) - { - // invalid index - pal_idx = 0; - } - pal_idx *= tga_comp; - for (j = 0; j < tga_comp; ++j) - { - raw_data[j] = tga_palette[pal_idx + j]; - } - } - else if (tga_rgb16) - { - STBI_ASSERT(tga_comp == STBI_rgb); - stbi__tga_read_rgb16(s, raw_data); - } - else - { - // read in the data raw - for (j = 0; j < tga_comp; ++j) - { - raw_data[j] = stbi__get8(s); - } - } - // clear the reading flag for the next pixel - read_next_pixel = 0; - } // end of reading a pixel + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start); + // load the palette + tga_palette = (unsigned char *)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) + { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) + { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i = 0; i < tga_palette_len; ++i) + { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } + else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) + { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i = 0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if (tga_is_RLE) + { + if (RLE_count == 0) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } + else if (!RLE_repeating) + { + read_next_pixel = 1; + } + } + else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if (read_next_pixel) + { + // load however much data we did have + if (tga_indexed) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if (pal_idx >= tga_palette_len) + { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) + { + raw_data[j] = tga_palette[pal_idx + j]; + } + } + else if (tga_rgb16) + { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } + else + { + // read in the data raw + for (j = 0; j < tga_comp; ++j) + { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel - // copy data - for (j = 0; j < tga_comp; ++j) - tga_data[i * tga_comp + j] = raw_data[j]; + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i * tga_comp + j] = raw_data[j]; - // in case we're in RLE mode, keep counting down - --RLE_count; - } - // do I need to invert the image? - if (tga_inverted) - { - for (j = 0; j * 2 < tga_height; ++j) - { - int index1 = j * tga_width * tga_comp; - int index2 = (tga_height - 1 - j) * tga_width * tga_comp; - for (i = tga_width * tga_comp; i > 0; --i) - { - unsigned char temp = tga_data[index1]; - tga_data[index1] = tga_data[index2]; - tga_data[index2] = temp; - ++index1; - ++index2; - } - } - } - // clear my palette, if I had one - if (tga_palette != NULL) - { - STBI_FREE(tga_palette); - } - } + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if (tga_inverted) + { + for (j = 0; j * 2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if (tga_palette != NULL) + { + STBI_FREE(tga_palette); + } + } - // swap RGB - if the source data was RGB16, it already is in the right order - if (tga_comp >= 3 && !tga_rgb16) - { - unsigned char *tga_pixel = tga_data; - for (i = 0; i < tga_width * tga_height; ++i) - { - unsigned char temp = tga_pixel[0]; - tga_pixel[0] = tga_pixel[2]; - tga_pixel[2] = temp; - tga_pixel += tga_comp; - } - } + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char *tga_pixel = tga_data; + for (i = 0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } - // convert to target component count - if (req_comp && req_comp != tga_comp) - tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); - // the things I do to get rid of an error message, and yet keep - // Microsoft's C compilers happy... [8^( - tga_palette_start = tga_palette_len = tga_palette_bits = - tga_x_origin = tga_y_origin = 0; - STBI_NOTUSED(tga_palette_start); - // OK, done - return tga_data; + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + STBI_NOTUSED(tga_palette_start); + // OK, done + return tga_data; } #endif @@ -7178,294 +7137,294 @@ static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req #ifndef STBI_NO_PSD static int stbi__psd_test(stbi__context *s) { - int r = (stbi__get32be(s) == 0x38425053); - stbi__rewind(s); - return r; + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; } static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) { - int count, nleft, len; + int count, nleft, len; - count = 0; - while ((nleft = pixelCount - count) > 0) - { - len = stbi__get8(s); - if (len == 128) - { - // No-op. - } - else if (len < 128) - { - // Copy next len+1 bytes literally. - len++; - if (len > nleft) - return 0; // corrupt data - count += len; - while (len) - { - *p = stbi__get8(s); - p += 4; - len--; - } - } - else if (len > 128) - { - stbi_uc val; - // Next -len+1 bytes in the dest are replicated from next source byte. - // (Interpret len as a negative 8-bit int.) - len = 257 - len; - if (len > nleft) - return 0; // corrupt data - val = stbi__get8(s); - count += len; - while (len) - { - *p = val; - p += 4; - len--; - } - } - } + count = 0; + while ((nleft = pixelCount - count) > 0) + { + len = stbi__get8(s); + if (len == 128) + { + // No-op. + } + else if (len < 128) + { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) + return 0; // corrupt data + count += len; + while (len) + { + *p = stbi__get8(s); + p += 4; + len--; + } + } + else if (len > 128) + { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) + return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) + { + *p = val; + p += 4; + len--; + } + } + } - return 1; + return 1; } static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) { - int pixelCount; - int channelCount, compression; - int channel, i; - int bitdepth; - int w, h; - stbi_uc *out; - STBI_NOTUSED(ri); + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w, h; + stbi_uc *out; + STBI_NOTUSED(ri); - // Check identifier - if (stbi__get32be(s) != 0x38425053) // "8BPS" - return stbi__errpuc("not PSD", "Corrupt PSD image"); + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); - // Check file type version. - if (stbi__get16be(s) != 1) - return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); - // Skip 6 reserved bytes. - stbi__skip(s, 6); + // Skip 6 reserved bytes. + stbi__skip(s, 6); - // Read the number of channels (R, G, B, A, etc). - channelCount = stbi__get16be(s); - if (channelCount < 0 || channelCount > 16) - return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); - // Read the rows and columns of the image. - h = stbi__get32be(s); - w = stbi__get32be(s); + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); - if (h > STBI_MAX_DIMENSIONS) - return stbi__errpuc("too large", "Very large image (corrupt?)"); - if (w > STBI_MAX_DIMENSIONS) - return stbi__errpuc("too large", "Very large image (corrupt?)"); + if (h > STBI_MAX_DIMENSIONS) + return stbi__errpuc("too large", "Very large image (corrupt?)"); + if (w > STBI_MAX_DIMENSIONS) + return stbi__errpuc("too large", "Very large image (corrupt?)"); - // Make sure the depth is 8 bits. - bitdepth = stbi__get16be(s); - if (bitdepth != 8 && bitdepth != 16) - return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); - // Make sure the color mode is RGB. - // Valid options are: - // 0: Bitmap - // 1: Grayscale - // 2: Indexed color - // 3: RGB color - // 4: CMYK color - // 7: Multichannel - // 8: Duotone - // 9: Lab color - if (stbi__get16be(s) != 3) - return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); - // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) - stbi__skip(s, stbi__get32be(s)); + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s, stbi__get32be(s)); - // Skip the image resources. (resolution, pen tool paths, etc) - stbi__skip(s, stbi__get32be(s)); + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s)); - // Skip the reserved data. - stbi__skip(s, stbi__get32be(s)); + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s)); - // Find out if the data is compressed. - // Known values: - // 0: no compression - // 1: RLE compressed - compression = stbi__get16be(s); - if (compression > 1) - return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); - // Check size - if (!stbi__mad3sizes_valid(4, w, h, 0)) - return stbi__errpuc("too large", "Corrupt PSD"); + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); - // Create the destination image. + // Create the destination image. - if (!compression && bitdepth == 16 && bpc == 16) - { - out = (stbi_uc *)stbi__malloc_mad3(8, w, h, 0); - ri->bits_per_channel = 16; - } - else - out = (stbi_uc *)stbi__malloc(4 * w * h); + if (!compression && bitdepth == 16 && bpc == 16) + { + out = (stbi_uc *)stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } + else + out = (stbi_uc *)stbi__malloc(4 * w * h); - if (!out) - return stbi__errpuc("outofmem", "Out of memory"); - pixelCount = w * h; + if (!out) + return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w * h; - // Initialize the data to zero. - // memset( out, 0, pixelCount * 4 ); + // Initialize the data to zero. + // memset( out, 0, pixelCount * 4 ); - // Finally, the image data. - if (compression) - { - // RLE as used by .PSD and .TIFF - // Loop until you get the number of unpacked bytes you are expecting: - // Read the next source byte into n. - // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. - // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. - // Else if n is 128, noop. - // Endloop + // Finally, the image data. + if (compression) + { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop - // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, - // which we're going to just skip. - stbi__skip(s, h * channelCount * 2); + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2); - // Read the RLE data by channel. - for (channel = 0; channel < 4; channel++) - { - stbi_uc *p; + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) + { + stbi_uc *p; - p = out + channel; - if (channel >= channelCount) - { - // Fill this channel with default data. - for (i = 0; i < pixelCount; i++, p += 4) - *p = (channel == 3 ? 255 : 0); - } - else - { - // Read the RLE data. - if (!stbi__psd_decode_rle(s, p, pixelCount)) - { - STBI_FREE(out); - return stbi__errpuc("corrupt", "bad RLE data"); - } - } - } - } - else - { - // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) - // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + p = out + channel; + if (channel >= channelCount) + { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } + else + { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) + { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + } + else + { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. - // Read the data by channel. - for (channel = 0; channel < 4; channel++) - { - if (channel >= channelCount) - { - // Fill this channel with default data. - if (bitdepth == 16 && bpc == 16) - { - stbi__uint16 *q = ((stbi__uint16 *)out) + channel; - stbi__uint16 val = channel == 3 ? 65535 : 0; - for (i = 0; i < pixelCount; i++, q += 4) - *q = val; - } - else - { - stbi_uc *p = out + channel; - stbi_uc val = channel == 3 ? 255 : 0; - for (i = 0; i < pixelCount; i++, p += 4) - *p = val; - } - } - else - { - if (ri->bits_per_channel == 16) - { // output bpc - stbi__uint16 *q = ((stbi__uint16 *)out) + channel; - for (i = 0; i < pixelCount; i++, q += 4) - *q = (stbi__uint16)stbi__get16be(s); - } - else - { - stbi_uc *p = out + channel; - if (bitdepth == 16) - { // input bpc - for (i = 0; i < pixelCount; i++, p += 4) - *p = (stbi_uc)(stbi__get16be(s) >> 8); - } - else - { - for (i = 0; i < pixelCount; i++, p += 4) - *p = stbi__get8(s); - } - } - } - } - } + // Read the data by channel. + for (channel = 0; channel < 4; channel++) + { + if (channel >= channelCount) + { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) + { + stbi__uint16 *q = ((stbi__uint16 *)out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } + else + { + stbi_uc *p = out + channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } + else + { + if (ri->bits_per_channel == 16) + { // output bpc + stbi__uint16 *q = ((stbi__uint16 *)out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16)stbi__get16be(s); + } + else + { + stbi_uc *p = out + channel; + if (bitdepth == 16) + { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc)(stbi__get16be(s) >> 8); + } + else + { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } - // remove weird white matte from PSD - if (channelCount >= 4) - { - if (ri->bits_per_channel == 16) - { - for (i = 0; i < w * h; ++i) - { - stbi__uint16 *pixel = (stbi__uint16 *)out + 4 * i; - if (pixel[3] != 0 && pixel[3] != 65535) - { - float a = pixel[3] / 65535.0f; - float ra = 1.0f / a; - float inv_a = 65535.0f * (1 - ra); - pixel[0] = (stbi__uint16)(pixel[0] * ra + inv_a); - pixel[1] = (stbi__uint16)(pixel[1] * ra + inv_a); - pixel[2] = (stbi__uint16)(pixel[2] * ra + inv_a); - } - } - } - else - { - for (i = 0; i < w * h; ++i) - { - unsigned char *pixel = out + 4 * i; - if (pixel[3] != 0 && pixel[3] != 255) - { - float a = pixel[3] / 255.0f; - float ra = 1.0f / a; - float inv_a = 255.0f * (1 - ra); - pixel[0] = (unsigned char)(pixel[0] * ra + inv_a); - pixel[1] = (unsigned char)(pixel[1] * ra + inv_a); - pixel[2] = (unsigned char)(pixel[2] * ra + inv_a); - } - } - } - } + // remove weird white matte from PSD + if (channelCount >= 4) + { + if (ri->bits_per_channel == 16) + { + for (i = 0; i < w * h; ++i) + { + stbi__uint16 *pixel = (stbi__uint16 *)out + 4 * i; + if (pixel[3] != 0 && pixel[3] != 65535) + { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16)(pixel[0] * ra + inv_a); + pixel[1] = (stbi__uint16)(pixel[1] * ra + inv_a); + pixel[2] = (stbi__uint16)(pixel[2] * ra + inv_a); + } + } + } + else + { + for (i = 0; i < w * h; ++i) + { + unsigned char *pixel = out + 4 * i; + if (pixel[3] != 0 && pixel[3] != 255) + { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char)(pixel[0] * ra + inv_a); + pixel[1] = (unsigned char)(pixel[1] * ra + inv_a); + pixel[2] = (unsigned char)(pixel[2] * ra + inv_a); + } + } + } + } - // convert to desired output format - if (req_comp && req_comp != 4) - { - if (ri->bits_per_channel == 16) - out = (stbi_uc *)stbi__convert_format16((stbi__uint16 *)out, 4, req_comp, w, h); - else - out = stbi__convert_format(out, 4, req_comp, w, h); - if (out == NULL) - return out; // stbi__convert_format frees input on failure - } + // convert to desired output format + if (req_comp && req_comp != 4) + { + if (ri->bits_per_channel == 16) + out = (stbi_uc *)stbi__convert_format16((stbi__uint16 *)out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) + return out; // stbi__convert_format frees input on failure + } - if (comp) - *comp = 4; - *y = h; - *x = w; + if (comp) + *comp = 4; + *y = h; + *x = w; - return out; + return out; } #endif @@ -7479,242 +7438,242 @@ static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req #ifndef STBI_NO_PIC static int stbi__pic_is4(stbi__context *s, const char *str) { - int i; - for (i = 0; i < 4; ++i) - if (stbi__get8(s) != (stbi_uc)str[i]) - return 0; + int i; + for (i = 0; i < 4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; - return 1; + return 1; } static int stbi__pic_test_core(stbi__context *s) { - int i; + int i; - if (!stbi__pic_is4(s, "\x53\x80\xF6\x34")) - return 0; + if (!stbi__pic_is4(s, "\x53\x80\xF6\x34")) + return 0; - for (i = 0; i < 84; ++i) - stbi__get8(s); + for (i = 0; i < 84; ++i) + stbi__get8(s); - if (!stbi__pic_is4(s, "PICT")) - return 0; + if (!stbi__pic_is4(s, "PICT")) + return 0; - return 1; + return 1; } typedef struct { - stbi_uc size, type, channel; + stbi_uc size, type, channel; } stbi__pic_packet; static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) { - int mask = 0x80, i; + int mask = 0x80, i; - for (i = 0; i < 4; ++i, mask >>= 1) - { - if (channel & mask) - { - if (stbi__at_eof(s)) - return stbi__errpuc("bad file", "PIC file too short"); - dest[i] = stbi__get8(s); - } - } + for (i = 0; i < 4; ++i, mask >>= 1) + { + if (channel & mask) + { + if (stbi__at_eof(s)) + return stbi__errpuc("bad file", "PIC file too short"); + dest[i] = stbi__get8(s); + } + } - return dest; + return dest; } static void stbi__copyval(int channel, stbi_uc *dest, const stbi_uc *src) { - int mask = 0x80, i; + int mask = 0x80, i; - for (i = 0; i < 4; ++i, mask >>= 1) - if (channel & mask) - dest[i] = src[i]; + for (i = 0; i < 4; ++i, mask >>= 1) + if (channel & mask) + dest[i] = src[i]; } static stbi_uc *stbi__pic_load_core(stbi__context *s, int width, int height, int *comp, stbi_uc *result) { - int act_comp = 0, num_packets = 0, y, chained; - stbi__pic_packet packets[10]; + int act_comp = 0, num_packets = 0, y, chained; + stbi__pic_packet packets[10]; - // this will (should...) cater for even some bizarre stuff like having data - // for the same channel in multiple packets. - do - { - stbi__pic_packet *packet; + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do + { + stbi__pic_packet *packet; - if (num_packets == sizeof(packets) / sizeof(packets[0])) - return stbi__errpuc("bad format", "too many packets"); + if (num_packets == sizeof(packets) / sizeof(packets[0])) + return stbi__errpuc("bad format", "too many packets"); - packet = &packets[num_packets++]; + packet = &packets[num_packets++]; - chained = stbi__get8(s); - packet->size = stbi__get8(s); - packet->type = stbi__get8(s); - packet->channel = stbi__get8(s); + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); - act_comp |= packet->channel; + act_comp |= packet->channel; - if (stbi__at_eof(s)) - return stbi__errpuc("bad file", "file too short (reading packets)"); - if (packet->size != 8) - return stbi__errpuc("bad format", "packet isn't 8bpp"); - } while (chained); + if (stbi__at_eof(s)) + return stbi__errpuc("bad file", "file too short (reading packets)"); + if (packet->size != 8) + return stbi__errpuc("bad format", "packet isn't 8bpp"); + } while (chained); - *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? - for (y = 0; y < height; ++y) - { - int packet_idx; + for (y = 0; y < height; ++y) + { + int packet_idx; - for (packet_idx = 0; packet_idx < num_packets; ++packet_idx) - { - stbi__pic_packet *packet = &packets[packet_idx]; - stbi_uc *dest = result + y * width * 4; + for (packet_idx = 0; packet_idx < num_packets; ++packet_idx) + { + stbi__pic_packet *packet = &packets[packet_idx]; + stbi_uc *dest = result + y * width * 4; - switch (packet->type) - { - default: - return stbi__errpuc("bad format", "packet has bad compression type"); + switch (packet->type) + { + default: + return stbi__errpuc("bad format", "packet has bad compression type"); - case 0: - { // uncompressed - int x; + case 0: + { // uncompressed + int x; - for (x = 0; x < width; ++x, dest += 4) - if (!stbi__readval(s, packet->channel, dest)) - return 0; - break; - } + for (x = 0; x < width; ++x, dest += 4) + if (!stbi__readval(s, packet->channel, dest)) + return 0; + break; + } - case 1: // Pure RLE - { - int left = width, i; + case 1: // Pure RLE + { + int left = width, i; - while (left > 0) - { - stbi_uc count, value[4]; + while (left > 0) + { + stbi_uc count, value[4]; - count = stbi__get8(s); - if (stbi__at_eof(s)) - return stbi__errpuc("bad file", "file too short (pure read count)"); + count = stbi__get8(s); + if (stbi__at_eof(s)) + return stbi__errpuc("bad file", "file too short (pure read count)"); - if (count > left) - count = (stbi_uc)left; + if (count > left) + count = (stbi_uc)left; - if (!stbi__readval(s, packet->channel, value)) - return 0; + if (!stbi__readval(s, packet->channel, value)) + return 0; - for (i = 0; i < count; ++i, dest += 4) - stbi__copyval(packet->channel, dest, value); - left -= count; - } - } - break; + for (i = 0; i < count; ++i, dest += 4) + stbi__copyval(packet->channel, dest, value); + left -= count; + } + } + break; - case 2: - { // Mixed RLE - int left = width; - while (left > 0) - { - int count = stbi__get8(s), i; - if (stbi__at_eof(s)) - return stbi__errpuc("bad file", "file too short (mixed read count)"); + case 2: + { // Mixed RLE + int left = width; + while (left > 0) + { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) + return stbi__errpuc("bad file", "file too short (mixed read count)"); - if (count >= 128) - { // Repeated - stbi_uc value[4]; + if (count >= 128) + { // Repeated + stbi_uc value[4]; - if (count == 128) - count = stbi__get16be(s); - else - count -= 127; - if (count > left) - return stbi__errpuc("bad file", "scanline overrun"); + if (count == 128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file", "scanline overrun"); - if (!stbi__readval(s, packet->channel, value)) - return 0; + if (!stbi__readval(s, packet->channel, value)) + return 0; - for (i = 0; i < count; ++i, dest += 4) - stbi__copyval(packet->channel, dest, value); - } - else - { // Raw - ++count; - if (count > left) - return stbi__errpuc("bad file", "scanline overrun"); + for (i = 0; i < count; ++i, dest += 4) + stbi__copyval(packet->channel, dest, value); + } + else + { // Raw + ++count; + if (count > left) + return stbi__errpuc("bad file", "scanline overrun"); - for (i = 0; i < count; ++i, dest += 4) - if (!stbi__readval(s, packet->channel, dest)) - return 0; - } - left -= count; - } - break; - } - } - } - } + for (i = 0; i < count; ++i, dest += 4) + if (!stbi__readval(s, packet->channel, dest)) + return 0; + } + left -= count; + } + break; + } + } + } + } - return result; + return result; } static void *stbi__pic_load(stbi__context *s, int *px, int *py, int *comp, int req_comp, stbi__result_info *ri) { - stbi_uc *result; - int i, x, y, internal_comp; - STBI_NOTUSED(ri); + stbi_uc *result; + int i, x, y, internal_comp; + STBI_NOTUSED(ri); - if (!comp) - comp = &internal_comp; + if (!comp) + comp = &internal_comp; - for (i = 0; i < 92; ++i) - stbi__get8(s); + for (i = 0; i < 92; ++i) + stbi__get8(s); - x = stbi__get16be(s); - y = stbi__get16be(s); + x = stbi__get16be(s); + y = stbi__get16be(s); - if (y > STBI_MAX_DIMENSIONS) - return stbi__errpuc("too large", "Very large image (corrupt?)"); - if (x > STBI_MAX_DIMENSIONS) - return stbi__errpuc("too large", "Very large image (corrupt?)"); + if (y > STBI_MAX_DIMENSIONS) + return stbi__errpuc("too large", "Very large image (corrupt?)"); + if (x > STBI_MAX_DIMENSIONS) + return stbi__errpuc("too large", "Very large image (corrupt?)"); - if (stbi__at_eof(s)) - return stbi__errpuc("bad file", "file too short (pic header)"); - if (!stbi__mad3sizes_valid(x, y, 4, 0)) - return stbi__errpuc("too large", "PIC image too large to decode"); + if (stbi__at_eof(s)) + return stbi__errpuc("bad file", "file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) + return stbi__errpuc("too large", "PIC image too large to decode"); - stbi__get32be(s); // skip `ratio' - stbi__get16be(s); // skip `fields' - stbi__get16be(s); // skip `pad' + stbi__get32be(s); // skip `ratio' + stbi__get16be(s); // skip `fields' + stbi__get16be(s); // skip `pad' - // intermediate buffer is RGBA - result = (stbi_uc *)stbi__malloc_mad3(x, y, 4, 0); - if (!result) - return stbi__errpuc("outofmem", "Out of memory"); - memset(result, 0xff, x * y * 4); + // intermediate buffer is RGBA + result = (stbi_uc *)stbi__malloc_mad3(x, y, 4, 0); + if (!result) + return stbi__errpuc("outofmem", "Out of memory"); + memset(result, 0xff, x * y * 4); - if (!stbi__pic_load_core(s, x, y, comp, result)) - { - STBI_FREE(result); - result = 0; - } - *px = x; - *py = y; - if (req_comp == 0) - req_comp = *comp; - result = stbi__convert_format(result, 4, req_comp, x, y); + if (!stbi__pic_load_core(s, x, y, comp, result)) + { + STBI_FREE(result); + result = 0; + } + *px = x; + *py = y; + if (req_comp == 0) + req_comp = *comp; + result = stbi__convert_format(result, 4, req_comp, x, y); - return result; + return result; } static int stbi__pic_test(stbi__context *s) { - int r = stbi__pic_test_core(s); - stbi__rewind(s); - return r; + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; } #endif @@ -7724,626 +7683,626 @@ static int stbi__pic_test(stbi__context *s) #ifndef STBI_NO_GIF typedef struct { - stbi__int16 prefix; - stbi_uc first; - stbi_uc suffix; + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; } stbi__gif_lzw; typedef struct { - int w, h; - stbi_uc *out; // output buffer (always 4 components) - stbi_uc *background; // The current "background" as far as a gif is concerned - stbi_uc *history; - int flags, bgindex, ratio, transparent, eflags; - stbi_uc pal[256][4]; - stbi_uc lpal[256][4]; - stbi__gif_lzw codes[8192]; - stbi_uc *color_table; - int parse, step; - int lflags; - int start_x, start_y; - int max_x, max_y; - int cur_x, cur_y; - int line_size; - int delay; + int w, h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; } stbi__gif; static int stbi__gif_test_raw(stbi__context *s) { - int sz; - if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') - return 0; - sz = stbi__get8(s); - if (sz != '9' && sz != '7') - return 0; - if (stbi__get8(s) != 'a') - return 0; - return 1; + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') + return 0; + if (stbi__get8(s) != 'a') + return 0; + return 1; } static int stbi__gif_test(stbi__context *s) { - int r = stbi__gif_test_raw(s); - stbi__rewind(s); - return r; + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; } static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) { - int i; - for (i = 0; i < num_entries; ++i) - { - pal[i][2] = stbi__get8(s); - pal[i][1] = stbi__get8(s); - pal[i][0] = stbi__get8(s); - pal[i][3] = transp == i ? 0 : 255; - } + int i; + for (i = 0; i < num_entries; ++i) + { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } } static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) { - stbi_uc version; - if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') - return stbi__err("not GIF", "Corrupt GIF"); + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); - version = stbi__get8(s); - if (version != '7' && version != '9') - return stbi__err("not GIF", "Corrupt GIF"); - if (stbi__get8(s) != 'a') - return stbi__err("not GIF", "Corrupt GIF"); + version = stbi__get8(s); + if (version != '7' && version != '9') + return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') + return stbi__err("not GIF", "Corrupt GIF"); - stbi__g_failure_reason = ""; - g->w = stbi__get16le(s); - g->h = stbi__get16le(s); - g->flags = stbi__get8(s); - g->bgindex = stbi__get8(s); - g->ratio = stbi__get8(s); - g->transparent = -1; + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; - if (g->w > STBI_MAX_DIMENSIONS) - return stbi__err("too large", "Very large image (corrupt?)"); - if (g->h > STBI_MAX_DIMENSIONS) - return stbi__err("too large", "Very large image (corrupt?)"); + if (g->w > STBI_MAX_DIMENSIONS) + return stbi__err("too large", "Very large image (corrupt?)"); + if (g->h > STBI_MAX_DIMENSIONS) + return stbi__err("too large", "Very large image (corrupt?)"); - if (comp != 0) - *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + if (comp != 0) + *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments - if (is_info) - return 1; + if (is_info) + return 1; - if (g->flags & 0x80) - stbi__gif_parse_colortable(s, g->pal, 2 << (g->flags & 7), -1); + if (g->flags & 0x80) + stbi__gif_parse_colortable(s, g->pal, 2 << (g->flags & 7), -1); - return 1; + return 1; } static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) { - stbi__gif *g = (stbi__gif *)stbi__malloc(sizeof(stbi__gif)); - if (!g) - return stbi__err("outofmem", "Out of memory"); - if (!stbi__gif_header(s, g, comp, 1)) - { - STBI_FREE(g); - stbi__rewind(s); - return 0; - } - if (x) - *x = g->w; - if (y) - *y = g->h; - STBI_FREE(g); - return 1; + stbi__gif *g = (stbi__gif *)stbi__malloc(sizeof(stbi__gif)); + if (!g) + return stbi__err("outofmem", "Out of memory"); + if (!stbi__gif_header(s, g, comp, 1)) + { + STBI_FREE(g); + stbi__rewind(s); + return 0; + } + if (x) + *x = g->w; + if (y) + *y = g->h; + STBI_FREE(g); + return 1; } static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) { - stbi_uc *p, *c; - int idx; + stbi_uc *p, *c; + int idx; - // recurse to decode the prefixes, since the linked-list is backwards, - // and working backwards through an interleaved image would be nasty - if (g->codes[code].prefix >= 0) - stbi__out_gif_code(g, g->codes[code].prefix); + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); - if (g->cur_y >= g->max_y) - return; + if (g->cur_y >= g->max_y) + return; - idx = g->cur_x + g->cur_y; - p = &g->out[idx]; - g->history[idx / 4] = 1; + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; - c = &g->color_table[g->codes[code].suffix * 4]; - if (c[3] > 128) - { // don't render transparent pixels; - p[0] = c[2]; - p[1] = c[1]; - p[2] = c[0]; - p[3] = c[3]; - } - g->cur_x += 4; + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) + { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; - if (g->cur_x >= g->max_x) - { - g->cur_x = g->start_x; - g->cur_y += g->step; + if (g->cur_x >= g->max_x) + { + g->cur_x = g->start_x; + g->cur_y += g->step; - while (g->cur_y >= g->max_y && g->parse > 0) - { - g->step = (1 << g->parse) * g->line_size; - g->cur_y = g->start_y + (g->step >> 1); - --g->parse; - } - } + while (g->cur_y >= g->max_y && g->parse > 0) + { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } } static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) { - stbi_uc lzw_cs; - stbi__int32 len, init_code; - stbi__uint32 first; - stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; - stbi__gif_lzw *p; + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; - lzw_cs = stbi__get8(s); - if (lzw_cs > 12) - return NULL; - clear = 1 << lzw_cs; - first = 1; - codesize = lzw_cs + 1; - codemask = (1 << codesize) - 1; - bits = 0; - valid_bits = 0; - for (init_code = 0; init_code < clear; init_code++) - { - g->codes[init_code].prefix = -1; - g->codes[init_code].first = (stbi_uc)init_code; - g->codes[init_code].suffix = (stbi_uc)init_code; - } + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) + return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) + { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc)init_code; + g->codes[init_code].suffix = (stbi_uc)init_code; + } - // support no starting clear code - avail = clear + 2; - oldcode = -1; + // support no starting clear code + avail = clear + 2; + oldcode = -1; - len = 0; - for (;;) - { - if (valid_bits < codesize) - { - if (len == 0) - { - len = stbi__get8(s); // start new block - if (len == 0) - return g->out; - } - --len; - bits |= (stbi__int32)stbi__get8(s) << valid_bits; - valid_bits += 8; - } - else - { - stbi__int32 code = bits & codemask; - bits >>= codesize; - valid_bits -= codesize; - // @OPTIMIZE: is there some way we can accelerate the non-clear path? - if (code == clear) - { // clear code - codesize = lzw_cs + 1; - codemask = (1 << codesize) - 1; - avail = clear + 2; - oldcode = -1; - first = 0; - } - else if (code == clear + 1) - { // end of stream code - stbi__skip(s, len); - while ((len = stbi__get8(s)) > 0) - stbi__skip(s, len); - return g->out; - } - else if (code <= avail) - { - if (first) - { - return stbi__errpuc("no clear code", "Corrupt GIF"); - } + len = 0; + for (;;) + { + if (valid_bits < codesize) + { + if (len == 0) + { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32)stbi__get8(s) << valid_bits; + valid_bits += 8; + } + else + { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) + { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } + else if (code == clear + 1) + { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s, len); + return g->out; + } + else if (code <= avail) + { + if (first) + { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } - if (oldcode >= 0) - { - p = &g->codes[avail++]; - if (avail > 8192) - { - return stbi__errpuc("too many codes", "Corrupt GIF"); - } + if (oldcode >= 0) + { + p = &g->codes[avail++]; + if (avail > 8192) + { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } - p->prefix = (stbi__int16)oldcode; - p->first = g->codes[oldcode].first; - p->suffix = (code == avail) ? p->first : g->codes[code].first; - } - else if (code == avail) - return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + p->prefix = (stbi__int16)oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } + else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); - stbi__out_gif_code(g, (stbi__uint16)code); + stbi__out_gif_code(g, (stbi__uint16)code); - if ((avail & codemask) == 0 && avail <= 0x0FFF) - { - codesize++; - codemask = (1 << codesize) - 1; - } + if ((avail & codemask) == 0 && avail <= 0x0FFF) + { + codesize++; + codemask = (1 << codesize) - 1; + } - oldcode = code; - } - else - { - return stbi__errpuc("illegal code in raster", "Corrupt GIF"); - } - } - } + oldcode = code; + } + else + { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } } // this function is designed to support animated gifs, although stb_image doesn't support it // two back is the image from two frames ago, used for a very specific disposal format static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) { - int dispose; - int first_frame; - int pi; - int pcount; - STBI_NOTUSED(req_comp); + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); - // on first frame, any non-written pixels get the background colour (non-transparent) - first_frame = 0; - if (g->out == 0) - { - if (!stbi__gif_header(s, g, comp, 0)) - return 0; // stbi__g_failure_reason set by stbi__gif_header - if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) - return stbi__errpuc("too large", "GIF image is too large"); - pcount = g->w * g->h; - g->out = (stbi_uc *)stbi__malloc(4 * pcount); - g->background = (stbi_uc *)stbi__malloc(4 * pcount); - g->history = (stbi_uc *)stbi__malloc(pcount); - if (!g->out || !g->background || !g->history) - return stbi__errpuc("outofmem", "Out of memory"); + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) + { + if (!stbi__gif_header(s, g, comp, 0)) + return 0; // stbi__g_failure_reason set by stbi__gif_header + if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) + return stbi__errpuc("too large", "GIF image is too large"); + pcount = g->w * g->h; + g->out = (stbi_uc *)stbi__malloc(4 * pcount); + g->background = (stbi_uc *)stbi__malloc(4 * pcount); + g->history = (stbi_uc *)stbi__malloc(pcount); + if (!g->out || !g->background || !g->history) + return stbi__errpuc("outofmem", "Out of memory"); - // image is treated as "transparent" at the start - ie, nothing overwrites the current background; - // background colour is only used for pixels that are not rendered first frame, after that "background" - // color refers to the color that was there the previous frame. - memset(g->out, 0x00, 4 * pcount); - memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) - memset(g->history, 0x00, pcount); // pixels that were affected previous frame - first_frame = 1; - } - else - { - // second frame - how do we dispose of the previous one? - dispose = (g->eflags & 0x1C) >> 2; - pcount = g->w * g->h; + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset(g->out, 0x00, 4 * pcount); + memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) + memset(g->history, 0x00, pcount); // pixels that were affected previous frame + first_frame = 1; + } + else + { + // second frame - how do we dispose of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; - if ((dispose == 3) && (two_back == 0)) - { - dispose = 2; // if I don't have an image to revert back to, default to the old background - } + if ((dispose == 3) && (two_back == 0)) + { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } - if (dispose == 3) - { // use previous graphic - for (pi = 0; pi < pcount; ++pi) - { - if (g->history[pi]) - { - memcpy(&g->out[pi * 4], &two_back[pi * 4], 4); - } - } - } - else if (dispose == 2) - { - // restore what was changed last frame to background before that frame; - for (pi = 0; pi < pcount; ++pi) - { - if (g->history[pi]) - { - memcpy(&g->out[pi * 4], &g->background[pi * 4], 4); - } - } - } - else - { - // This is a non-disposal case eithe way, so just - // leave the pixels as is, and they will become the new background - // 1: do not dispose - // 0: not specified. - } + if (dispose == 3) + { // use previous graphic + for (pi = 0; pi < pcount; ++pi) + { + if (g->history[pi]) + { + memcpy(&g->out[pi * 4], &two_back[pi * 4], 4); + } + } + } + else if (dispose == 2) + { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) + { + if (g->history[pi]) + { + memcpy(&g->out[pi * 4], &g->background[pi * 4], 4); + } + } + } + else + { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } - // background is what out is after the undoing of the previou frame; - memcpy(g->background, g->out, 4 * g->w * g->h); - } + // background is what out is after the undoing of the previou frame; + memcpy(g->background, g->out, 4 * g->w * g->h); + } - // clear my history; - memset(g->history, 0x00, g->w * g->h); // pixels that were affected previous frame + // clear my history; + memset(g->history, 0x00, g->w * g->h); // pixels that were affected previous frame - for (;;) - { - int tag = stbi__get8(s); - switch (tag) - { - case 0x2C: /* Image Descriptor */ - { - stbi__int32 x, y, w, h; - stbi_uc *o; + for (;;) + { + int tag = stbi__get8(s); + switch (tag) + { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; - x = stbi__get16le(s); - y = stbi__get16le(s); - w = stbi__get16le(s); - h = stbi__get16le(s); - if (((x + w) > (g->w)) || ((y + h) > (g->h))) - return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); - g->line_size = g->w * 4; - g->start_x = x * 4; - g->start_y = y * g->line_size; - g->max_x = g->start_x + w * 4; - g->max_y = g->start_y + h * g->line_size; - g->cur_x = g->start_x; - g->cur_y = g->start_y; + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; - // if the width of the specified rectangle is 0, that means - // we may not see *any* pixels or the image is malformed; - // to make sure this is caught, move the current y down to - // max_y (which is what out_gif_code checks). - if (w == 0) - g->cur_y = g->max_y; + // if the width of the specified rectangle is 0, that means + // we may not see *any* pixels or the image is malformed; + // to make sure this is caught, move the current y down to + // max_y (which is what out_gif_code checks). + if (w == 0) + g->cur_y = g->max_y; - g->lflags = stbi__get8(s); + g->lflags = stbi__get8(s); - if (g->lflags & 0x40) - { - g->step = 8 * g->line_size; // first interlaced spacing - g->parse = 3; - } - else - { - g->step = g->line_size; - g->parse = 0; - } + if (g->lflags & 0x40) + { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } + else + { + g->step = g->line_size; + g->parse = 0; + } - if (g->lflags & 0x80) - { - stbi__gif_parse_colortable(s, g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); - g->color_table = (stbi_uc *)g->lpal; - } - else if (g->flags & 0x80) - { - g->color_table = (stbi_uc *)g->pal; - } - else - return stbi__errpuc("missing color table", "Corrupt GIF"); + if (g->lflags & 0x80) + { + stbi__gif_parse_colortable(s, g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *)g->lpal; + } + else if (g->flags & 0x80) + { + g->color_table = (stbi_uc *)g->pal; + } + else + return stbi__errpuc("missing color table", "Corrupt GIF"); - o = stbi__process_gif_raster(s, g); - if (!o) - return NULL; + o = stbi__process_gif_raster(s, g); + if (!o) + return NULL; - // if this was the first frame, - pcount = g->w * g->h; - if (first_frame && (g->bgindex > 0)) - { - // if first frame, any pixel not drawn to gets the background color - for (pi = 0; pi < pcount; ++pi) - { - if (g->history[pi] == 0) - { - g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; - memcpy(&g->out[pi * 4], &g->pal[g->bgindex], 4); - } - } - } + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) + { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) + { + if (g->history[pi] == 0) + { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy(&g->out[pi * 4], &g->pal[g->bgindex], 4); + } + } + } - return o; - } + return o; + } - case 0x21: // Comment Extension. - { - int len; - int ext = stbi__get8(s); - if (ext == 0xF9) - { // Graphic Control Extension. - len = stbi__get8(s); - if (len == 4) - { - g->eflags = stbi__get8(s); - g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) + { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) + { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. - // unset old transparent - if (g->transparent >= 0) - { - g->pal[g->transparent][3] = 255; - } - if (g->eflags & 0x01) - { - g->transparent = stbi__get8(s); - if (g->transparent >= 0) - { - g->pal[g->transparent][3] = 0; - } - } - else - { - // don't need transparent - stbi__skip(s, 1); - g->transparent = -1; - } - } - else - { - stbi__skip(s, len); - break; - } - } - while ((len = stbi__get8(s)) != 0) - { - stbi__skip(s, len); - } - break; - } + // unset old transparent + if (g->transparent >= 0) + { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) + { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) + { + g->pal[g->transparent][3] = 0; + } + } + else + { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } + else + { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) + { + stbi__skip(s, len); + } + break; + } - case 0x3B: // gif stream termination code - return (stbi_uc *)s; // using '1' causes warning on some compilers + case 0x3B: // gif stream termination code + return (stbi_uc *)s; // using '1' causes warning on some compilers - default: - return stbi__errpuc("unknown code", "Corrupt GIF"); - } - } + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } } static void *stbi__load_gif_main_outofmem(stbi__gif *g, stbi_uc *out, int **delays) { - STBI_FREE(g->out); - STBI_FREE(g->history); - STBI_FREE(g->background); + STBI_FREE(g->out); + STBI_FREE(g->history); + STBI_FREE(g->background); - if (out) - STBI_FREE(out); - if (delays && *delays) - STBI_FREE(*delays); - return stbi__errpuc("outofmem", "Out of memory"); + if (out) + STBI_FREE(out); + if (delays && *delays) + STBI_FREE(*delays); + return stbi__errpuc("outofmem", "Out of memory"); } static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) { - if (stbi__gif_test(s)) - { - int layers = 0; - stbi_uc *u = 0; - stbi_uc *out = 0; - stbi_uc *two_back = 0; - stbi__gif g; - int stride; - int out_size = 0; - int delays_size = 0; + if (stbi__gif_test(s)) + { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + int out_size = 0; + int delays_size = 0; - STBI_NOTUSED(out_size); - STBI_NOTUSED(delays_size); + STBI_NOTUSED(out_size); + STBI_NOTUSED(delays_size); - memset(&g, 0, sizeof(g)); - if (delays) - { - *delays = 0; - } + memset(&g, 0, sizeof(g)); + if (delays) + { + *delays = 0; + } - do - { - u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); - if (u == (stbi_uc *)s) - u = 0; // end of animated gif marker + do + { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *)s) + u = 0; // end of animated gif marker - if (u) - { - *x = g.w; - *y = g.h; - ++layers; - stride = g.w * g.h * 4; + if (u) + { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; - if (out) - { - void *tmp = (stbi_uc *)STBI_REALLOC_SIZED(out, out_size, layers * stride); - if (!tmp) - return stbi__load_gif_main_outofmem(&g, out, delays); - else - { - out = (stbi_uc *)tmp; - out_size = layers * stride; - } + if (out) + { + void *tmp = (stbi_uc *)STBI_REALLOC_SIZED(out, out_size, layers * stride); + if (!tmp) + return stbi__load_gif_main_outofmem(&g, out, delays); + else + { + out = (stbi_uc *)tmp; + out_size = layers * stride; + } - if (delays) - { - int *new_delays = (int *)STBI_REALLOC_SIZED(*delays, delays_size, sizeof(int) * layers); - if (!new_delays) - return stbi__load_gif_main_outofmem(&g, out, delays); - *delays = new_delays; - delays_size = layers * sizeof(int); - } - } - else - { - out = (stbi_uc *)stbi__malloc(layers * stride); - if (!out) - return stbi__load_gif_main_outofmem(&g, out, delays); - out_size = layers * stride; - if (delays) - { - *delays = (int *)stbi__malloc(layers * sizeof(int)); - if (!*delays) - return stbi__load_gif_main_outofmem(&g, out, delays); - delays_size = layers * sizeof(int); - } - } - memcpy(out + ((layers - 1) * stride), u, stride); - if (layers >= 2) - { - two_back = out - 2 * stride; - } + if (delays) + { + int *new_delays = (int *)STBI_REALLOC_SIZED(*delays, delays_size, sizeof(int) * layers); + if (!new_delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + *delays = new_delays; + delays_size = layers * sizeof(int); + } + } + else + { + out = (stbi_uc *)stbi__malloc(layers * stride); + if (!out) + return stbi__load_gif_main_outofmem(&g, out, delays); + out_size = layers * stride; + if (delays) + { + *delays = (int *)stbi__malloc(layers * sizeof(int)); + if (!*delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + delays_size = layers * sizeof(int); + } + } + memcpy(out + ((layers - 1) * stride), u, stride); + if (layers >= 2) + { + two_back = out - 2 * stride; + } - if (delays) - { - (*delays)[layers - 1U] = g.delay; - } - } - } while (u != 0); + if (delays) + { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); - // free temp buffer; - STBI_FREE(g.out); - STBI_FREE(g.history); - STBI_FREE(g.background); + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); - // do the final conversion after loading everything; - if (req_comp && req_comp != 4) - out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); - *z = layers; - return out; - } - else - { - return stbi__errpuc("not GIF", "Image was not as a gif type."); - } + *z = layers; + return out; + } + else + { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } } static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { - stbi_uc *u = 0; - stbi__gif g; - memset(&g, 0, sizeof(g)); - STBI_NOTUSED(ri); + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); - u = stbi__gif_load_next(s, &g, comp, req_comp, 0); - if (u == (stbi_uc *)s) - u = 0; // end of animated gif marker - if (u) - { - *x = g.w; - *y = g.h; + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *)s) + u = 0; // end of animated gif marker + if (u) + { + *x = g.w; + *y = g.h; - // moved conversion to after successful load so that the same - // can be done for multiple frames. - if (req_comp && req_comp != 4) - u = stbi__convert_format(u, 4, req_comp, g.w, g.h); - } - else if (g.out) - { - // if there was an error and we allocated an image buffer, free it! - STBI_FREE(g.out); - } + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } + else if (g.out) + { + // if there was an error and we allocated an image buffer, free it! + STBI_FREE(g.out); + } - // free buffers needed for multiple frame loading; - STBI_FREE(g.history); - STBI_FREE(g.background); + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); - return u; + return u; } static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) { - return stbi__gif_info_raw(s, x, y, comp); + return stbi__gif_info_raw(s, x, y, comp); } #endif @@ -8353,485 +8312,485 @@ static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) #ifndef STBI_NO_HDR static int stbi__hdr_test_core(stbi__context *s, const char *signature) { - int i; - for (i = 0; signature[i]; ++i) - if (stbi__get8(s) != signature[i]) - return 0; - stbi__rewind(s); - return 1; + int i; + for (i = 0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; } static int stbi__hdr_test(stbi__context *s) { - int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); - stbi__rewind(s); - if (!r) - { - r = stbi__hdr_test_core(s, "#?RGBE\n"); - stbi__rewind(s); - } - return r; + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if (!r) + { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; } #define STBI__HDR_BUFLEN 1024 static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) { - int len = 0; - char c = '\0'; + int len = 0; + char c = '\0'; - c = (char)stbi__get8(z); + c = (char)stbi__get8(z); - while (!stbi__at_eof(z) && c != '\n') - { - buffer[len++] = c; - if (len == STBI__HDR_BUFLEN - 1) - { - // flush to end of line - while (!stbi__at_eof(z) && stbi__get8(z) != '\n') - ; - break; - } - c = (char)stbi__get8(z); - } + while (!stbi__at_eof(z) && c != '\n') + { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN - 1) + { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char)stbi__get8(z); + } - buffer[len] = 0; - return buffer; + buffer[len] = 0; + return buffer; } static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) { - if (input[3] != 0) - { - float f1; - // Exponent - f1 = (float)ldexp(1.0f, input[3] - (int)(128 + 8)); - if (req_comp <= 2) - output[0] = (input[0] + input[1] + input[2]) * f1 / 3; - else - { - output[0] = input[0] * f1; - output[1] = input[1] * f1; - output[2] = input[2] * f1; - } - if (req_comp == 2) - output[1] = 1; - if (req_comp == 4) - output[3] = 1; - } - else - { - switch (req_comp) - { - case 4: - output[3] = 1; /* fallthrough */ - case 3: - output[0] = output[1] = output[2] = 0; - break; - case 2: - output[1] = 1; /* fallthrough */ - case 1: - output[0] = 0; - break; - } - } + if (input[3] != 0) + { + float f1; + // Exponent + f1 = (float)ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else + { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) + output[1] = 1; + if (req_comp == 4) + output[3] = 1; + } + else + { + switch (req_comp) + { + case 4: + output[3] = 1; /* fallthrough */ + case 3: + output[0] = output[1] = output[2] = 0; + break; + case 2: + output[1] = 1; /* fallthrough */ + case 1: + output[0] = 0; + break; + } + } } static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { - char buffer[STBI__HDR_BUFLEN]; - char *token; - int valid = 0; - int width, height; - stbi_uc *scanline; - float *hdr_data; - int len; - unsigned char count, value; - int i, j, k, c1, c2, z; - const char *headerToken; - STBI_NOTUSED(ri); + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1, c2, z; + const char *headerToken; + STBI_NOTUSED(ri); - // Check identifier - headerToken = stbi__hdr_gettoken(s, buffer); - if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) - return stbi__errpf("not HDR", "Corrupt HDR image"); + // Check identifier + headerToken = stbi__hdr_gettoken(s, buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); - // Parse header - for (;;) - { - token = stbi__hdr_gettoken(s, buffer); - if (token[0] == 0) - break; - if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) - valid = 1; - } + // Parse header + for (;;) + { + token = stbi__hdr_gettoken(s, buffer); + if (token[0] == 0) + break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) + valid = 1; + } - if (!valid) - return stbi__errpf("unsupported format", "Unsupported HDR format"); + if (!valid) + return stbi__errpf("unsupported format", "Unsupported HDR format"); - // Parse width and height - // can't use sscanf() if we're not using stdio! - token = stbi__hdr_gettoken(s, buffer); - if (strncmp(token, "-Y ", 3)) - return stbi__errpf("unsupported data layout", "Unsupported HDR format"); - token += 3; - height = (int)strtol(token, &token, 10); - while (*token == ' ') - ++token; - if (strncmp(token, "+X ", 3)) - return stbi__errpf("unsupported data layout", "Unsupported HDR format"); - token += 3; - width = (int)strtol(token, NULL, 10); + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s, buffer); + if (strncmp(token, "-Y ", 3)) + return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int)strtol(token, &token, 10); + while (*token == ' ') + ++token; + if (strncmp(token, "+X ", 3)) + return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int)strtol(token, NULL, 10); - if (height > STBI_MAX_DIMENSIONS) - return stbi__errpf("too large", "Very large image (corrupt?)"); - if (width > STBI_MAX_DIMENSIONS) - return stbi__errpf("too large", "Very large image (corrupt?)"); + if (height > STBI_MAX_DIMENSIONS) + return stbi__errpf("too large", "Very large image (corrupt?)"); + if (width > STBI_MAX_DIMENSIONS) + return stbi__errpf("too large", "Very large image (corrupt?)"); - *x = width; - *y = height; + *x = width; + *y = height; - if (comp) - *comp = 3; - if (req_comp == 0) - req_comp = 3; + if (comp) + *comp = 3; + if (req_comp == 0) + req_comp = 3; - if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) - return stbi__errpf("too large", "HDR image is too large"); + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); - // Read data - hdr_data = (float *)stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); - if (!hdr_data) - return stbi__errpf("outofmem", "Out of memory"); + // Read data + hdr_data = (float *)stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); - // Load image data - // image data is stored as some number of sca - if (width < 8 || width >= 32768) - { - // Read flat data - for (j = 0; j < height; ++j) - { - for (i = 0; i < width; ++i) - { - stbi_uc rgbe[4]; - main_decode_loop: - stbi__getn(s, rgbe, 4); - stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); - } - } - } - else - { - // Read RLE-encoded data - scanline = NULL; + // Load image data + // image data is stored as some number of sca + if (width < 8 || width >= 32768) + { + // Read flat data + for (j = 0; j < height; ++j) + { + for (i = 0; i < width; ++i) + { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } + else + { + // Read RLE-encoded data + scanline = NULL; - for (j = 0; j < height; ++j) - { - c1 = stbi__get8(s); - c2 = stbi__get8(s); - len = stbi__get8(s); - if (c1 != 2 || c2 != 2 || (len & 0x80)) - { - // not run-length encoded, so we have to actually use THIS data as a decoded - // pixel (note this can't be a valid pixel--one of RGB must be >= 128) - stbi_uc rgbe[4]; - rgbe[0] = (stbi_uc)c1; - rgbe[1] = (stbi_uc)c2; - rgbe[2] = (stbi_uc)len; - rgbe[3] = (stbi_uc)stbi__get8(s); - stbi__hdr_convert(hdr_data, rgbe, req_comp); - i = 1; - j = 0; - STBI_FREE(scanline); - goto main_decode_loop; // yes, this makes no sense - } - len <<= 8; - len |= stbi__get8(s); - if (len != width) - { - STBI_FREE(hdr_data); - STBI_FREE(scanline); - return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); - } - if (scanline == NULL) - { - scanline = (stbi_uc *)stbi__malloc_mad2(width, 4, 0); - if (!scanline) - { - STBI_FREE(hdr_data); - return stbi__errpf("outofmem", "Out of memory"); - } - } + for (j = 0; j < height; ++j) + { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) + { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc)c1; + rgbe[1] = (stbi_uc)c2; + rgbe[2] = (stbi_uc)len; + rgbe[3] = (stbi_uc)stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) + { + STBI_FREE(hdr_data); + STBI_FREE(scanline); + return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); + } + if (scanline == NULL) + { + scanline = (stbi_uc *)stbi__malloc_mad2(width, 4, 0); + if (!scanline) + { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } - for (k = 0; k < 4; ++k) - { - int nleft; - i = 0; - while ((nleft = width - i) > 0) - { - count = stbi__get8(s); - if (count > 128) - { - // Run - value = stbi__get8(s); - count -= 128; - if ((count == 0) || (count > nleft)) - { - STBI_FREE(hdr_data); - STBI_FREE(scanline); - return stbi__errpf("corrupt", "bad RLE data in HDR"); - } - for (z = 0; z < count; ++z) - scanline[i++ * 4 + k] = value; - } - else - { - // Dump - if ((count == 0) || (count > nleft)) - { - STBI_FREE(hdr_data); - STBI_FREE(scanline); - return stbi__errpf("corrupt", "bad RLE data in HDR"); - } - for (z = 0; z < count; ++z) - scanline[i++ * 4 + k] = stbi__get8(s); - } - } - } - for (i = 0; i < width; ++i) - stbi__hdr_convert(hdr_data + (j * width + i) * req_comp, scanline + i * 4, req_comp); - } - if (scanline) - STBI_FREE(scanline); - } + for (k = 0; k < 4; ++k) + { + int nleft; + i = 0; + while ((nleft = width - i) > 0) + { + count = stbi__get8(s); + if (count > 128) + { + // Run + value = stbi__get8(s); + count -= 128; + if ((count == 0) || (count > nleft)) + { + STBI_FREE(hdr_data); + STBI_FREE(scanline); + return stbi__errpf("corrupt", "bad RLE data in HDR"); + } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } + else + { + // Dump + if ((count == 0) || (count > nleft)) + { + STBI_FREE(hdr_data); + STBI_FREE(scanline); + return stbi__errpf("corrupt", "bad RLE data in HDR"); + } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i = 0; i < width; ++i) + stbi__hdr_convert(hdr_data + (j * width + i) * req_comp, scanline + i * 4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } - return hdr_data; + return hdr_data; } static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) { - char buffer[STBI__HDR_BUFLEN]; - char *token; - int valid = 0; - int dummy; + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; - if (!x) - x = &dummy; - if (!y) - y = &dummy; - if (!comp) - comp = &dummy; + if (!x) + x = &dummy; + if (!y) + y = &dummy; + if (!comp) + comp = &dummy; - if (stbi__hdr_test(s) == 0) - { - stbi__rewind(s); - return 0; - } + if (stbi__hdr_test(s) == 0) + { + stbi__rewind(s); + return 0; + } - for (;;) - { - token = stbi__hdr_gettoken(s, buffer); - if (token[0] == 0) - break; - if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) - valid = 1; - } + for (;;) + { + token = stbi__hdr_gettoken(s, buffer); + if (token[0] == 0) + break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) + valid = 1; + } - if (!valid) - { - stbi__rewind(s); - return 0; - } - token = stbi__hdr_gettoken(s, buffer); - if (strncmp(token, "-Y ", 3)) - { - stbi__rewind(s); - return 0; - } - token += 3; - *y = (int)strtol(token, &token, 10); - while (*token == ' ') - ++token; - if (strncmp(token, "+X ", 3)) - { - stbi__rewind(s); - return 0; - } - token += 3; - *x = (int)strtol(token, NULL, 10); - *comp = 3; - return 1; + if (!valid) + { + stbi__rewind(s); + return 0; + } + token = stbi__hdr_gettoken(s, buffer); + if (strncmp(token, "-Y ", 3)) + { + stbi__rewind(s); + return 0; + } + token += 3; + *y = (int)strtol(token, &token, 10); + while (*token == ' ') + ++token; + if (strncmp(token, "+X ", 3)) + { + stbi__rewind(s); + return 0; + } + token += 3; + *x = (int)strtol(token, NULL, 10); + *comp = 3; + return 1; } #endif // STBI_NO_HDR #ifndef STBI_NO_BMP static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) { - void *p; - stbi__bmp_data info; + void *p; + stbi__bmp_data info; - info.all_a = 255; - p = stbi__bmp_parse_header(s, &info); - if (p == NULL) - { - stbi__rewind(s); - return 0; - } - if (x) - *x = s->img_x; - if (y) - *y = s->img_y; - if (comp) - { - if (info.bpp == 24 && info.ma == 0xff000000) - *comp = 3; - else - *comp = info.ma ? 4 : 3; - } - return 1; + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + if (p == NULL) + { + stbi__rewind(s); + return 0; + } + if (x) + *x = s->img_x; + if (y) + *y = s->img_y; + if (comp) + { + if (info.bpp == 24 && info.ma == 0xff000000) + *comp = 3; + else + *comp = info.ma ? 4 : 3; + } + return 1; } #endif #ifndef STBI_NO_PSD static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) { - int channelCount, dummy, depth; - if (!x) - x = &dummy; - if (!y) - y = &dummy; - if (!comp) - comp = &dummy; - if (stbi__get32be(s) != 0x38425053) - { - stbi__rewind(s); - return 0; - } - if (stbi__get16be(s) != 1) - { - stbi__rewind(s); - return 0; - } - stbi__skip(s, 6); - channelCount = stbi__get16be(s); - if (channelCount < 0 || channelCount > 16) - { - stbi__rewind(s); - return 0; - } - *y = stbi__get32be(s); - *x = stbi__get32be(s); - depth = stbi__get16be(s); - if (depth != 8 && depth != 16) - { - stbi__rewind(s); - return 0; - } - if (stbi__get16be(s) != 3) - { - stbi__rewind(s); - return 0; - } - *comp = 4; - return 1; + int channelCount, dummy, depth; + if (!x) + x = &dummy; + if (!y) + y = &dummy; + if (!comp) + comp = &dummy; + if (stbi__get32be(s) != 0x38425053) + { + stbi__rewind(s); + return 0; + } + if (stbi__get16be(s) != 1) + { + stbi__rewind(s); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + { + stbi__rewind(s); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) + { + stbi__rewind(s); + return 0; + } + if (stbi__get16be(s) != 3) + { + stbi__rewind(s); + return 0; + } + *comp = 4; + return 1; } static int stbi__psd_is16(stbi__context *s) { - int channelCount, depth; - if (stbi__get32be(s) != 0x38425053) - { - stbi__rewind(s); - return 0; - } - if (stbi__get16be(s) != 1) - { - stbi__rewind(s); - return 0; - } - stbi__skip(s, 6); - channelCount = stbi__get16be(s); - if (channelCount < 0 || channelCount > 16) - { - stbi__rewind(s); - return 0; - } - STBI_NOTUSED(stbi__get32be(s)); - STBI_NOTUSED(stbi__get32be(s)); - depth = stbi__get16be(s); - if (depth != 16) - { - stbi__rewind(s); - return 0; - } - return 1; + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) + { + stbi__rewind(s); + return 0; + } + if (stbi__get16be(s) != 1) + { + stbi__rewind(s); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + { + stbi__rewind(s); + return 0; + } + STBI_NOTUSED(stbi__get32be(s)); + STBI_NOTUSED(stbi__get32be(s)); + depth = stbi__get16be(s); + if (depth != 16) + { + stbi__rewind(s); + return 0; + } + return 1; } #endif #ifndef STBI_NO_PIC static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) { - int act_comp = 0, num_packets = 0, chained, dummy; - stbi__pic_packet packets[10]; + int act_comp = 0, num_packets = 0, chained, dummy; + stbi__pic_packet packets[10]; - if (!x) - x = &dummy; - if (!y) - y = &dummy; - if (!comp) - comp = &dummy; + if (!x) + x = &dummy; + if (!y) + y = &dummy; + if (!comp) + comp = &dummy; - if (!stbi__pic_is4(s, "\x53\x80\xF6\x34")) - { - stbi__rewind(s); - return 0; - } + if (!stbi__pic_is4(s, "\x53\x80\xF6\x34")) + { + stbi__rewind(s); + return 0; + } - stbi__skip(s, 88); + stbi__skip(s, 88); - *x = stbi__get16be(s); - *y = stbi__get16be(s); - if (stbi__at_eof(s)) - { - stbi__rewind(s); - return 0; - } - if ((*x) != 0 && (1 << 28) / (*x) < (*y)) - { - stbi__rewind(s); - return 0; - } + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) + { + stbi__rewind(s); + return 0; + } + if ((*x) != 0 && (1 << 28) / (*x) < (*y)) + { + stbi__rewind(s); + return 0; + } - stbi__skip(s, 8); + stbi__skip(s, 8); - do - { - stbi__pic_packet *packet; + do + { + stbi__pic_packet *packet; - if (num_packets == sizeof(packets) / sizeof(packets[0])) - return 0; + if (num_packets == sizeof(packets) / sizeof(packets[0])) + return 0; - packet = &packets[num_packets++]; - chained = stbi__get8(s); - packet->size = stbi__get8(s); - packet->type = stbi__get8(s); - packet->channel = stbi__get8(s); - act_comp |= packet->channel; + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; - if (stbi__at_eof(s)) - { - stbi__rewind(s); - return 0; - } - if (packet->size != 8) - { - stbi__rewind(s); - return 0; - } - } while (chained); + if (stbi__at_eof(s)) + { + stbi__rewind(s); + return 0; + } + if (packet->size != 8) + { + stbi__rewind(s); + return 0; + } + } while (chained); - *comp = (act_comp & 0x10 ? 4 : 3); + *comp = (act_comp & 0x10 ? 4 : 3); - return 1; + return 1; } #endif @@ -8850,487 +8809,485 @@ static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) static int stbi__pnm_test(stbi__context *s) { - char p, t; - p = (char)stbi__get8(s); - t = (char)stbi__get8(s); - if (p != 'P' || (t != '5' && t != '6')) - { - stbi__rewind(s); - return 0; - } - return 1; + char p, t; + p = (char)stbi__get8(s); + t = (char)stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) + { + stbi__rewind(s); + return 0; + } + return 1; } static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { - stbi_uc *out; - STBI_NOTUSED(ri); + stbi_uc *out; + STBI_NOTUSED(ri); - ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); - if (ri->bits_per_channel == 0) - return 0; + ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); + if (ri->bits_per_channel == 0) + return 0; - if (s->img_y > STBI_MAX_DIMENSIONS) - return stbi__errpuc("too large", "Very large image (corrupt?)"); - if (s->img_x > STBI_MAX_DIMENSIONS) - return stbi__errpuc("too large", "Very large image (corrupt?)"); + if (s->img_y > STBI_MAX_DIMENSIONS) + return stbi__errpuc("too large", "Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) + return stbi__errpuc("too large", "Very large image (corrupt?)"); - *x = s->img_x; - *y = s->img_y; - if (comp) - *comp = s->img_n; + *x = s->img_x; + *y = s->img_y; + if (comp) + *comp = s->img_n; - if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) - return stbi__errpuc("too large", "PNM too large"); + if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) + return stbi__errpuc("too large", "PNM too large"); - out = (stbi_uc *)stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); - if (!out) - return stbi__errpuc("outofmem", "Out of memory"); - if (!stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8))) - { - STBI_FREE(out); - return stbi__errpuc("bad PNM", "PNM file truncated"); - } + out = (stbi_uc *)stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); + if (!out) + return stbi__errpuc("outofmem", "Out of memory"); + if (!stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8))) + { + STBI_FREE(out); + return stbi__errpuc("bad PNM", "PNM file truncated"); + } - if (req_comp && req_comp != s->img_n) - { - if (ri->bits_per_channel == 16) - { - out = (stbi_uc *)stbi__convert_format16((stbi__uint16 *)out, s->img_n, req_comp, s->img_x, s->img_y); - } - else - { - out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); - } - if (out == NULL) - return out; // stbi__convert_format frees input on failure - } - return out; + if (req_comp && req_comp != s->img_n) + { + if (ri->bits_per_channel == 16) + { + out = (stbi_uc *)stbi__convert_format16((stbi__uint16 *)out, s->img_n, req_comp, s->img_x, s->img_y); + } + else + { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + } + if (out == NULL) + return out; // stbi__convert_format frees input on failure + } + return out; } static int stbi__pnm_isspace(char c) { - return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; } static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) { - for (;;) - { - while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) - *c = (char)stbi__get8(s); + for (;;) + { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char)stbi__get8(s); - if (stbi__at_eof(s) || *c != '#') - break; + if (stbi__at_eof(s) || *c != '#') + break; - while (!stbi__at_eof(s) && *c != '\n' && *c != '\r') - *c = (char)stbi__get8(s); - } + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r') + *c = (char)stbi__get8(s); + } } static int stbi__pnm_isdigit(char c) { - return c >= '0' && c <= '9'; + return c >= '0' && c <= '9'; } static int stbi__pnm_getinteger(stbi__context *s, char *c) { - int value = 0; + int value = 0; - while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) - { - value = value * 10 + (*c - '0'); - *c = (char)stbi__get8(s); - if ((value > 214748364) || (value == 214748364 && *c > '7')) - return stbi__err("integer parse overflow", "Parsing an integer in the PPM header overflowed a 32-bit int"); - } + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) + { + value = value * 10 + (*c - '0'); + *c = (char)stbi__get8(s); + if ((value > 214748364) || (value == 214748364 && *c > '7')) + return stbi__err("integer parse overflow", "Parsing an integer in the PPM header overflowed a 32-bit int"); + } - return value; + return value; } static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) { - int maxv, dummy; - char c, p, t; + int maxv, dummy; + char c, p, t; - if (!x) - x = &dummy; - if (!y) - y = &dummy; - if (!comp) - comp = &dummy; + if (!x) + x = &dummy; + if (!y) + y = &dummy; + if (!comp) + comp = &dummy; - stbi__rewind(s); + stbi__rewind(s); - // Get identifier - p = (char)stbi__get8(s); - t = (char)stbi__get8(s); - if (p != 'P' || (t != '5' && t != '6')) - { - stbi__rewind(s); - return 0; - } + // Get identifier + p = (char)stbi__get8(s); + t = (char)stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) + { + stbi__rewind(s); + return 0; + } - *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm - c = (char)stbi__get8(s); - stbi__pnm_skip_whitespace(s, &c); + c = (char)stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); - *x = stbi__pnm_getinteger(s, &c); // read width - if (*x == 0) - return stbi__err("invalid width", "PPM image header had zero or overflowing width"); - stbi__pnm_skip_whitespace(s, &c); + *x = stbi__pnm_getinteger(s, &c); // read width + if (*x == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); + stbi__pnm_skip_whitespace(s, &c); - *y = stbi__pnm_getinteger(s, &c); // read height - if (*y == 0) - return stbi__err("invalid width", "PPM image header had zero or overflowing width"); - stbi__pnm_skip_whitespace(s, &c); + *y = stbi__pnm_getinteger(s, &c); // read height + if (*y == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); + stbi__pnm_skip_whitespace(s, &c); - maxv = stbi__pnm_getinteger(s, &c); // read max value - if (maxv > 65535) - return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); - else if (maxv > 255) - return 16; - else - return 8; + maxv = stbi__pnm_getinteger(s, &c); // read max value + if (maxv > 65535) + return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); + else if (maxv > 255) + return 16; + else + return 8; } static int stbi__pnm_is16(stbi__context *s) { - if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) - return 1; - return 0; + if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) + return 1; + return 0; } #endif static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) { #ifndef STBI_NO_JPEG - if (stbi__jpeg_info(s, x, y, comp)) - return 1; + if (stbi__jpeg_info(s, x, y, comp)) + return 1; #endif #ifndef STBI_NO_PNG - if (stbi__png_info(s, x, y, comp)) - return 1; + if (stbi__png_info(s, x, y, comp)) + return 1; #endif #ifndef STBI_NO_GIF - if (stbi__gif_info(s, x, y, comp)) - return 1; + if (stbi__gif_info(s, x, y, comp)) + return 1; #endif #ifndef STBI_NO_BMP - if (stbi__bmp_info(s, x, y, comp)) - return 1; + if (stbi__bmp_info(s, x, y, comp)) + return 1; #endif #ifndef STBI_NO_PSD - if (stbi__psd_info(s, x, y, comp)) - return 1; + if (stbi__psd_info(s, x, y, comp)) + return 1; #endif #ifndef STBI_NO_PIC - if (stbi__pic_info(s, x, y, comp)) - return 1; + if (stbi__pic_info(s, x, y, comp)) + return 1; #endif #ifndef STBI_NO_PNM - if (stbi__pnm_info(s, x, y, comp)) - return 1; + if (stbi__pnm_info(s, x, y, comp)) + return 1; #endif #ifndef STBI_NO_HDR - if (stbi__hdr_info(s, x, y, comp)) - return 1; + if (stbi__hdr_info(s, x, y, comp)) + return 1; #endif // test tga last because it's a crappy test! #ifndef STBI_NO_TGA - if (stbi__tga_info(s, x, y, comp)) - return 1; + if (stbi__tga_info(s, x, y, comp)) + return 1; #endif - return stbi__err("unknown image type", "Image not of any known type, or corrupt"); + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); } static int stbi__is_16_main(stbi__context *s) { #ifndef STBI_NO_PNG - if (stbi__png_is16(s)) - return 1; + if (stbi__png_is16(s)) + return 1; #endif #ifndef STBI_NO_PSD - if (stbi__psd_is16(s)) - return 1; + if (stbi__psd_is16(s)) + return 1; #endif #ifndef STBI_NO_PNM - if (stbi__pnm_is16(s)) - return 1; + if (stbi__pnm_is16(s)) + return 1; #endif - return 0; + return 0; } #ifndef STBI_NO_STDIO STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) { - FILE *f = stbi__fopen(filename, "rb"); - int result; - if (!f) - return stbi__err("can't fopen", "Unable to open file"); - result = stbi_info_from_file(f, x, y, comp); - fclose(f); - return result; + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) + return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; } STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) { - int r; - stbi__context s; - long pos = ftell(f); - stbi__start_file(&s, f); - r = stbi__info_main(&s, x, y, comp); - fseek(f, pos, SEEK_SET); - return r; + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s, x, y, comp); + fseek(f, pos, SEEK_SET); + return r; } STBIDEF int stbi_is_16_bit(char const *filename) { - FILE *f = stbi__fopen(filename, "rb"); - int result; - if (!f) - return stbi__err("can't fopen", "Unable to open file"); - result = stbi_is_16_bit_from_file(f); - fclose(f); - return result; + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) + return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; } STBIDEF int stbi_is_16_bit_from_file(FILE *f) { - int r; - stbi__context s; - long pos = ftell(f); - stbi__start_file(&s, f); - r = stbi__is_16_main(&s); - fseek(f, pos, SEEK_SET); - return r; + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f, pos, SEEK_SET); + return r; } #endif // !STBI_NO_STDIO STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) { - stbi__context s; - stbi__start_mem(&s, buffer, len); - return stbi__info_main(&s, x, y, comp); + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__info_main(&s, x, y, comp); } STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) { - stbi__context s; - stbi__start_callbacks(&s, (stbi_io_callbacks *)c, user); - return stbi__info_main(&s, x, y, comp); + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)c, user); + return stbi__info_main(&s, x, y, comp); } STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) { - stbi__context s; - stbi__start_mem(&s, buffer, len); - return stbi__is_16_main(&s); + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__is_16_main(&s); } STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) { - stbi__context s; - stbi__start_callbacks(&s, (stbi_io_callbacks *)c, user); - return stbi__is_16_main(&s); + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)c, user); + return stbi__is_16_main(&s); } -#pragma GCC diagnostic pop - #endif // STB_IMAGE_IMPLEMENTATION /* revision history: - 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs - 2.19 (2018-02-11) fix warning - 2.18 (2018-01-30) fix warnings - 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug - 1-bit BMP - *_is_16_bit api - avoid warnings - 2.16 (2017-07-23) all functions have 16-bit variants; - STBI_NO_STDIO works again; - compilation fixes; - fix rounding in unpremultiply; - optimize vertical flip; - disable raw_len validation; - documentation fixes - 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; - warning fixes; disable run-time SSE detection on gcc; - uniform handling of optional "return" values; - thread-safe initialization of zlib tables - 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs - 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now - 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes - 2.11 (2016-04-02) allocate large structures on the stack - remove white matting for transparent PSD - fix reported channel count for PNG & BMP - re-enable SSE2 in non-gcc 64-bit - support RGB-formatted JPEG - read 16-bit PNGs (only as 8-bit) - 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED - 2.09 (2016-01-16) allow comments in PNM files - 16-bit-per-pixel TGA (not bit-per-component) - info() for TGA could break due to .hdr handling - info() for BMP to shares code instead of sloppy parse - can use STBI_REALLOC_SIZED if allocator doesn't support realloc - code cleanup - 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA - 2.07 (2015-09-13) fix compiler warnings - partial animated GIF support - limited 16-bpc PSD support - #ifdef unused functions - bug with < 92 byte PIC,PNM,HDR,TGA - 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value - 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning - 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit - 2.03 (2015-04-12) extra corruption checking (mmozeiko) - stbi_set_flip_vertically_on_load (nguillemot) - fix NEON support; fix mingw support - 2.02 (2015-01-19) fix incorrect assert, fix warning - 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 - 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG - 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) - progressive JPEG (stb) - PGM/PPM support (Ken Miller) - STBI_MALLOC,STBI_REALLOC,STBI_FREE - GIF bugfix -- seemingly never worked - STBI_NO_*, STBI_ONLY_* - 1.48 (2014-12-14) fix incorrectly-named assert() - 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) - optimize PNG (ryg) - fix bug in interlaced PNG with user-specified channel count (stb) - 1.46 (2014-08-26) - fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG - 1.45 (2014-08-16) - fix MSVC-ARM internal compiler error by wrapping malloc - 1.44 (2014-08-07) - various warning fixes from Ronny Chevalier - 1.43 (2014-07-15) - fix MSVC-only compiler problem in code changed in 1.42 - 1.42 (2014-07-09) - don't define _CRT_SECURE_NO_WARNINGS (affects user code) - fixes to stbi__cleanup_jpeg path - added STBI_ASSERT to avoid requiring assert.h - 1.41 (2014-06-25) - fix search&replace from 1.36 that messed up comments/error messages - 1.40 (2014-06-22) - fix gcc struct-initialization warning - 1.39 (2014-06-15) - fix to TGA optimization when req_comp != number of components in TGA; - fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) - add support for BMP version 5 (more ignored fields) - 1.38 (2014-06-06) - suppress MSVC warnings on integer casts truncating values - fix accidental rename of 'skip' field of I/O - 1.37 (2014-06-04) - remove duplicate typedef - 1.36 (2014-06-03) - convert to header file single-file library - if de-iphone isn't set, load iphone images color-swapped instead of returning NULL - 1.35 (2014-05-27) - various warnings - fix broken STBI_SIMD path - fix bug where stbi_load_from_file no longer left file pointer in correct place - fix broken non-easy path for 32-bit BMP (possibly never used) - TGA optimization by Arseny Kapoulkine - 1.34 (unknown) - use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case - 1.33 (2011-07-14) - make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements - 1.32 (2011-07-13) - support for "info" function for all supported filetypes (SpartanJ) - 1.31 (2011-06-20) - a few more leak fixes, bug in PNG handling (SpartanJ) - 1.30 (2011-06-11) - added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) - removed deprecated format-specific test/load functions - removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway - error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) - fix inefficiency in decoding 32-bit BMP (David Woo) - 1.29 (2010-08-16) - various warning fixes from Aurelien Pocheville - 1.28 (2010-08-01) - fix bug in GIF palette transparency (SpartanJ) - 1.27 (2010-08-01) - cast-to-stbi_uc to fix warnings - 1.26 (2010-07-24) - fix bug in file buffering for PNG reported by SpartanJ - 1.25 (2010-07-17) - refix trans_data warning (Won Chun) - 1.24 (2010-07-12) - perf improvements reading from files on platforms with lock-heavy fgetc() - minor perf improvements for jpeg - deprecated type-specific functions so we'll get feedback if they're needed - attempt to fix trans_data warning (Won Chun) - 1.23 fixed bug in iPhone support - 1.22 (2010-07-10) - removed image *writing* support - stbi_info support from Jetro Lauha - GIF support from Jean-Marc Lienher - iPhone PNG-extensions from James Brown - warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) - 1.21 fix use of 'stbi_uc' in header (reported by jon blow) - 1.20 added support for Softimage PIC, by Tom Seddon - 1.19 bug in interlaced PNG corruption check (found by ryg) - 1.18 (2008-08-02) - fix a threading bug (local mutable static) - 1.17 support interlaced PNG - 1.16 major bugfix - stbi__convert_format converted one too many pixels - 1.15 initialize some fields for thread safety - 1.14 fix threadsafe conversion bug - header-file-only version (#define STBI_HEADER_FILE_ONLY before including) - 1.13 threadsafe - 1.12 const qualifiers in the API - 1.11 Support installable IDCT, colorspace conversion routines - 1.10 Fixes for 64-bit (don't use "unsigned long") - optimized upsampling by Fabian "ryg" Giesen - 1.09 Fix format-conversion for PSD code (bad global variables!) - 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz - 1.07 attempt to fix C++ warning/errors again - 1.06 attempt to fix C++ warning/errors again - 1.05 fix TGA loading to return correct *comp and use good luminance calc - 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free - 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR - 1.02 support for (subset of) HDR files, float interface for preferred access to them - 1.01 fix bug: possible bug in handling right-side up bmps... not sure - fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all - 1.00 interface to zlib that skips zlib header - 0.99 correct handling of alpha in palette - 0.98 TGA loader by lonesock; dynamically add loaders (untested) - 0.97 jpeg errors on too large a file; also catch another malloc failure - 0.96 fix detection of invalid v value - particleman@mollyrocket forum - 0.95 during header scan, seek to markers in case of padding - 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same - 0.93 handle jpegtran output; verbose errors - 0.92 read 4,8,16,24,32-bit BMP files of several formats - 0.91 output 24-bit Windows 3.0 BMP files - 0.90 fix a few more warnings; bump version number to approach 1.0 - 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd - 0.60 fix compiling as c++ - 0.59 fix warnings: merge Dave Moore's -Wall fixes - 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian - 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available - 0.56 fix bug: zlib uncompressed mode len vs. nlen - 0.55 fix bug: restart_interval not initialized to 0 - 0.54 allow NULL for 'int *comp' - 0.53 fix bug in png 3->4; speedup png decoding - 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments - 0.51 obey req_comp requests, 1-component jpegs return as 1-component, - on 'test' only check type, not whether we support this variant - 0.50 (2006-11-19) - first released version + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version */ /* diff --git a/Kernel/include/stb/image_resize.h b/Kernel/include/stb/image_resize.h index 46c7c897..f41bd393 100644 --- a/Kernel/include/stb/image_resize.h +++ b/Kernel/include/stb/image_resize.h @@ -1,198 +1,393 @@ -/* stb_image_resize - v0.97 - public domain image resizing - by Jorge L Rodriguez (@VinoBS) - 2014 +/* stb_image_resize2 - v2.14 - public domain image resizing + + by Jeff Roberts (v2) and Jorge L Rodriguez http://github.com/nothings/stb - Written with emphasis on usability, portability, and efficiency. (No - SIMD or threads, so it be easily outperformed by libs that use those.) - Only scaling and translation is supported, no rotations or shears. - Easy API downsamples w/Mitchell filter, upsamples w/cubic interpolation. + Can be threaded with the extended API. SSE2, AVX, Neon and WASM SIMD support. Only + scaling and translation is supported, no rotations or shears. COMPILING & LINKING - In one C/C++ file that #includes this file, do this: - #define STB_IMAGE_RESIZE_IMPLEMENTATION - before the #include. That will create the implementation in that file. + In one C/C++ file that #includes this file, do this: + #define STB_IMAGE_RESIZE_IMPLEMENTATION + before the #include. That will create the implementation in that file. - QUICKSTART - stbir_resize_uint8( input_pixels , in_w , in_h , 0, - output_pixels, out_w, out_h, 0, num_channels) - stbir_resize_float(...) - stbir_resize_uint8_srgb( input_pixels , in_w , in_h , 0, - output_pixels, out_w, out_h, 0, - num_channels , alpha_chan , 0) - stbir_resize_uint8_srgb_edgemode( - input_pixels , in_w , in_h , 0, - output_pixels, out_w, out_h, 0, - num_channels , alpha_chan , 0, STBIR_EDGE_CLAMP) - // WRAP/REFLECT/ZERO + EASY API CALLS: + Easy API downsamples w/Mitchell filter, upsamples w/cubic interpolation, clamps to edge. - FULL API - See the "header file" section of the source for API documentation. + stbir_resize_uint8_srgb( input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + pixel_layout_enum ) + + stbir_resize_uint8_linear( input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + pixel_layout_enum ) + + stbir_resize_float_linear( input_pixels, input_w, input_h, input_stride_in_bytes, + output_pixels, output_w, output_h, output_stride_in_bytes, + pixel_layout_enum ) + + If you pass NULL or zero for the output_pixels, we will allocate the output buffer + for you and return it from the function (free with free() or STBIR_FREE). + As a special case, XX_stride_in_bytes of 0 means packed continuously in memory. + + API LEVELS + There are three levels of API - easy-to-use, medium-complexity and extended-complexity. + + See the "header file" section of the source for API documentation. ADDITIONAL DOCUMENTATION - SRGB & FLOATING POINT REPRESENTATION - The sRGB functions presume IEEE floating point. If you do not have - IEEE floating point, define STBIR_NON_IEEE_FLOAT. This will use - a slower implementation. + MEMORY ALLOCATION + By default, we use malloc and free for memory allocation. To override the + memory allocation, before the implementation #include, add a: - MEMORY ALLOCATION - The resize functions here perform a single memory allocation using - malloc. To control the memory allocation, before the #include that - triggers the implementation, do: + #define STBIR_MALLOC(size,user_data) ... + #define STBIR_FREE(ptr,user_data) ... - #define STBIR_MALLOC(size,context) ... - #define STBIR_FREE(ptr,context) ... + Each resize makes exactly one call to malloc/free (unless you use the + extended API where you can do one allocation for many resizes). Under + address sanitizer, we do separate allocations to find overread/writes. - Each resize function makes exactly one call to malloc/free, so to use - temp memory, store the temp memory in the context and return that. + PERFORMANCE + This library was written with an emphasis on performance. When testing + stb_image_resize with RGBA, the fastest mode is STBIR_4CHANNEL with + STBIR_TYPE_UINT8 pixels and CLAMPed edges (which is what many other resize + libs do by default). Also, make sure SIMD is turned on of course (default + for 64-bit targets). Avoid WRAP edge mode if you want the fastest speed. - ASSERT - Define STBIR_ASSERT(boolval) to override assert() and not use assert.h + This library also comes with profiling built-in. If you define STBIR_PROFILE, + you can use the advanced API and get low-level profiling information by + calling stbir_resize_extended_profile_info() or stbir_resize_split_profile_info() + after a resize. - OPTIMIZATION - Define STBIR_SATURATE_INT to compute clamp values in-range using - integer operations instead of float operations. This may be faster - on some platforms. + SIMD + Most of the routines have optimized SSE2, AVX, NEON and WASM versions. - DEFAULT FILTERS - For functions which don't provide explicit control over what filters - to use, you can change the compile-time defaults with + On Microsoft compilers, we automatically turn on SIMD for 64-bit x64 and + ARM; for 32-bit x86 and ARM, you select SIMD mode by defining STBIR_SSE2 or + STBIR_NEON. For AVX and AVX2, we auto-select it by detecting the /arch:AVX + or /arch:AVX2 switches. You can also always manually turn SSE2, AVX or AVX2 + support on by defining STBIR_SSE2, STBIR_AVX or STBIR_AVX2. - #define STBIR_DEFAULT_FILTER_UPSAMPLE STBIR_FILTER_something - #define STBIR_DEFAULT_FILTER_DOWNSAMPLE STBIR_FILTER_something + On Linux, SSE2 and Neon is on by default for 64-bit x64 or ARM64. For 32-bit, + we select x86 SIMD mode by whether you have -msse2, -mavx or -mavx2 enabled + on the command line. For 32-bit ARM, you must pass -mfpu=neon-vfpv4 for both + clang and GCC, but GCC also requires an additional -mfp16-format=ieee to + automatically enable NEON. - See stbir_filter in the header-file section for the list of filters. + On x86 platforms, you can also define STBIR_FP16C to turn on FP16C instructions + for converting back and forth to half-floats. This is autoselected when we + are using AVX2. Clang and GCC also require the -mf16c switch. ARM always uses + the built-in half float hardware NEON instructions. - NEW FILTERS - A number of 1D filter kernels are used. For a list of - supported filters see the stbir_filter enum. To add a new filter, - write a filter function and add it to stbir__filter_info_table. + You can also tell us to use multiply-add instructions with STBIR_USE_FMA. + Because x86 doesn't always have fma, we turn it off by default to maintain + determinism across all platforms. If you don't care about non-FMA determinism + and are willing to restrict yourself to more recent x86 CPUs (around the AVX + timeframe), then fma will give you around a 15% speedup. - PROGRESS - For interactive use with slow resize operations, you can install - a progress-report callback: + You can force off SIMD in all cases by defining STBIR_NO_SIMD. You can turn + off AVX or AVX2 specifically with STBIR_NO_AVX or STBIR_NO_AVX2. AVX is 10% + to 40% faster, and AVX2 is generally another 12%. - #define STBIR_PROGRESS_REPORT(val) some_func(val) + ALPHA CHANNEL + Most of the resizing functions provide the ability to control how the alpha + channel of an image is processed. - The parameter val is a float which goes from 0 to 1 as progress is made. + When alpha represents transparency, it is important that when combining + colors with filtering, the pixels should not be treated equally; they + should use a weighted average based on their alpha values. For example, + if a pixel is 1% opaque bright green and another pixel is 99% opaque + black and you average them, the average will be 50% opaque, but the + unweighted average and will be a middling green color, while the weighted + average will be nearly black. This means the unweighted version introduced + green energy that didn't exist in the source image. - For example: + (If you want to know why this makes sense, you can work out the math for + the following: consider what happens if you alpha composite a source image + over a fixed color and then average the output, vs. if you average the + source image pixels and then composite that over the same fixed color. + Only the weighted average produces the same result as the ground truth + composite-then-average result.) - static void my_progress_report(float progress); - #define STBIR_PROGRESS_REPORT(val) my_progress_report(val) + Therefore, it is in general best to "alpha weight" the pixels when applying + filters to them. This essentially means multiplying the colors by the alpha + values before combining them, and then dividing by the alpha value at the + end. - #define STB_IMAGE_RESIZE_IMPLEMENTATION - #include "stb_image_resize.h" + The computer graphics industry introduced a technique called "premultiplied + alpha" or "associated alpha" in which image colors are stored in image files + already multiplied by their alpha. This saves some math when compositing, + and also avoids the need to divide by the alpha at the end (which is quite + inefficient). However, while premultiplied alpha is common in the movie CGI + industry, it is not commonplace in other industries like videogames, and most + consumer file formats are generally expected to contain not-premultiplied + colors. For example, Photoshop saves PNG files "unpremultiplied", and web + browsers like Chrome and Firefox expect PNG images to be unpremultiplied. - static void my_progress_report(float progress) - { - printf("Progress: %f%%\n", progress*100); - } + Note that there are three possibilities that might describe your image + and resize expectation: - MAX CHANNELS - If your image has more than 64 channels, define STBIR_MAX_CHANNELS - to the max you'll have. + 1. images are not premultiplied, alpha weighting is desired + 2. images are not premultiplied, alpha weighting is not desired + 3. images are premultiplied - ALPHA CHANNEL - Most of the resizing functions provide the ability to control how - the alpha channel of an image is processed. The important things - to know about this: + Both case #2 and case #3 require the exact same math: no alpha weighting + should be applied or removed. Only case 1 requires extra math operations; + the other two cases can be handled identically. - 1. The best mathematically-behaved version of alpha to use is - called "premultiplied alpha", in which the other color channels - have had the alpha value multiplied in. If you use premultiplied - alpha, linear filtering (such as image resampling done by this - library, or performed in texture units on GPUs) does the "right - thing". While premultiplied alpha is standard in the movie CGI - industry, it is still uncommon in the videogame/real-time world. + stb_image_resize expects case #1 by default, applying alpha weighting to + images, expecting the input images to be unpremultiplied. This is what the + COLOR+ALPHA buffer types tell the resizer to do. - If you linearly filter non-premultiplied alpha, strange effects - occur. (For example, the 50/50 average of 99% transparent bright green - and 1% transparent black produces 50% transparent dark green when - non-premultiplied, whereas premultiplied it produces 50% - transparent near-black. The former introduces green energy - that doesn't exist in the source image.) + When you use the pixel layouts STBIR_RGBA, STBIR_BGRA, STBIR_ARGB, + STBIR_ABGR, STBIR_RX, or STBIR_XR you are telling us that the pixels are + non-premultiplied. In these cases, the resizer will alpha weight the colors + (effectively creating the premultiplied image), do the filtering, and then + convert back to non-premult on exit. - 2. Artists should not edit premultiplied-alpha images; artists - want non-premultiplied alpha images. Thus, art tools generally output - non-premultiplied alpha images. + When you use the pixel layouts STBIR_RGBA_PM, STBIR_RGBA_PM, STBIR_RGBA_PM, + STBIR_RGBA_PM, STBIR_RX_PM or STBIR_XR_PM, you are telling that the pixels + ARE premultiplied. In this case, the resizer doesn't have to do the + premultipling - it can filter directly on the input. This about twice as + fast as the non-premultiplied case, so it's the right option if your data is + already setup correctly. - 3. You will get best results in most cases by converting images - to premultiplied alpha before processing them mathematically. + When you use the pixel layout STBIR_4CHANNEL or STBIR_2CHANNEL, you are + telling us that there is no channel that represents transparency; it may be + RGB and some unrelated fourth channel that has been stored in the alpha + channel, but it is actually not alpha. No special processing will be + performed. - 4. If you pass the flag STBIR_FLAG_ALPHA_PREMULTIPLIED, the - resizer does not do anything special for the alpha channel; - it is resampled identically to other channels. This produces - the correct results for premultiplied-alpha images, but produces - less-than-ideal results for non-premultiplied-alpha images. + The difference between the generic 4 or 2 channel layouts, and the + specialized _PM versions is with the _PM versions you are telling us that + the data *is* alpha, just don't premultiply it. That's important when + using SRGB pixel formats, we need to know where the alpha is, because + it is converted linearly (rather than with the SRGB converters). - 5. If you do not pass the flag STBIR_FLAG_ALPHA_PREMULTIPLIED, - then the resizer weights the contribution of input pixels - based on their alpha values, or, equivalently, it multiplies - the alpha value into the color channels, resamples, then divides - by the resultant alpha value. Input pixels which have alpha=0 do - not contribute at all to output pixels unless _all_ of the input - pixels affecting that output pixel have alpha=0, in which case - the result for that pixel is the same as it would be without - STBIR_FLAG_ALPHA_PREMULTIPLIED. However, this is only true for - input images in integer formats. For input images in float format, - input pixels with alpha=0 have no effect, and output pixels - which have alpha=0 will be 0 in all channels. (For float images, - you can manually achieve the same result by adding a tiny epsilon - value to the alpha channel of every image, and then subtracting - or clamping it at the end.) + Because alpha weighting produces the same effect as premultiplying, you + even have the option with non-premultiplied inputs to let the resizer + produce a premultiplied output. Because the intially computed alpha-weighted + output image is effectively premultiplied, this is actually more performant + than the normal path which un-premultiplies the output image as a final step. - 6. You can suppress the behavior described in #5 and make - all-0-alpha pixels have 0 in all channels by #defining - STBIR_NO_ALPHA_EPSILON. + Finally, when converting both in and out of non-premulitplied space (for + example, when using STBIR_RGBA), we go to somewhat heroic measures to + ensure that areas with zero alpha value pixels get something reasonable + in the RGB values. If you don't care about the RGB values of zero alpha + pixels, you can call the stbir_set_non_pm_alpha_speed_over_quality() + function - this runs a premultiplied resize about 25% faster. That said, + when you really care about speed, using premultiplied pixels for both in + and out (STBIR_RGBA_PM, etc) much faster than both of these premultiplied + options. - 7. You can separately control whether the alpha channel is - interpreted as linear or affected by the colorspace. By default - it is linear; you almost never want to apply the colorspace. - (For example, graphics hardware does not apply sRGB conversion - to the alpha channel.) + PIXEL LAYOUT CONVERSION + The resizer can convert from some pixel layouts to others. When using the + stbir_set_pixel_layouts(), you can, for example, specify STBIR_RGBA + on input, and STBIR_ARGB on output, and it will re-organize the channels + during the resize. Currently, you can only convert between two pixel + layouts with the same number of channels. + + DETERMINISM + We commit to being deterministic (from x64 to ARM to scalar to SIMD, etc). + This requires compiling with fast-math off (using at least /fp:precise). + Also, you must turn off fp-contracting (which turns mult+adds into fmas)! + We attempt to do this with pragmas, but with Clang, you usually want to add + -ffp-contract=off to the command line as well. + + For 32-bit x86, you must use SSE and SSE2 codegen for determinism. That is, + if the scalar x87 unit gets used at all, we immediately lose determinism. + On Microsoft Visual Studio 2008 and earlier, from what we can tell there is + no way to be deterministic in 32-bit x86 (some x87 always leaks in, even + with fp:strict). On 32-bit x86 GCC, determinism requires both -msse2 and + -fpmath=sse. + + Note that we will not be deterministic with float data containing NaNs - + the NaNs will propagate differently on different SIMD and platforms. + + If you turn on STBIR_USE_FMA, then we will be deterministic with other + fma targets, but we will differ from non-fma targets (this is unavoidable, + because a fma isn't simply an add with a mult - it also introduces a + rounding difference compared to non-fma instruction sequences. + + FLOAT PIXEL FORMAT RANGE + Any range of values can be used for the non-alpha float data that you pass + in (0 to 1, -1 to 1, whatever). However, if you are inputting float values + but *outputting* bytes or shorts, you must use a range of 0 to 1 so that we + scale back properly. The alpha channel must also be 0 to 1 for any format + that does premultiplication prior to resizing. + + Note also that with float output, using filters with negative lobes, the + output filtered values might go slightly out of range. You can define + STBIR_FLOAT_LOW_CLAMP and/or STBIR_FLOAT_HIGH_CLAMP to specify the range + to clamp to on output, if that's important. + + MAX/MIN SCALE FACTORS + The input pixel resolutions are in integers, and we do the internal pointer + resolution in size_t sized integers. However, the scale ratio from input + resolution to output resolution is calculated in float form. This means + the effective possible scale ratio is limited to 24 bits (or 16 million + to 1). As you get close to the size of the float resolution (again, 16 + million pixels wide or high), you might start seeing float inaccuracy + issues in general in the pipeline. If you have to do extreme resizes, + you can usually do this is multiple stages (using float intermediate + buffers). + + FLIPPED IMAGES + Stride is just the delta from one scanline to the next. This means you can + use a negative stride to handle inverted images (point to the final + scanline and use a negative stride). You can invert the input or output, + using negative strides. + + DEFAULT FILTERS + For functions which don't provide explicit control over what filters to + use, you can change the compile-time defaults with: + + #define STBIR_DEFAULT_FILTER_UPSAMPLE STBIR_FILTER_something + #define STBIR_DEFAULT_FILTER_DOWNSAMPLE STBIR_FILTER_something + + See stbir_filter in the header-file section for the list of filters. + + NEW FILTERS + A number of 1D filter kernels are supplied. For a list of supported + filters, see the stbir_filter enum. You can install your own filters by + using the stbir_set_filter_callbacks function. + + PROGRESS + For interactive use with slow resize operations, you can use the + scanline callbacks in the extended API. It would have to be a *very* large + image resample to need progress though - we're very fast. + + CEIL and FLOOR + In scalar mode, the only functions we use from math.h are ceilf and floorf, + but if you have your own versions, you can define the STBIR_CEILF(v) and + STBIR_FLOORF(v) macros and we'll use them instead. In SIMD, we just use + our own versions. + + ASSERT + Define STBIR_ASSERT(boolval) to override assert() and not use assert.h + + PORTING FROM VERSION 1 + The API has changed. You can continue to use the old version of stb_image_resize.h, + which is available in the "deprecated/" directory. + + If you're using the old simple-to-use API, porting is straightforward. + (For more advanced APIs, read the documentation.) + + stbir_resize_uint8(): + - call `stbir_resize_uint8_linear`, cast channel count to `stbir_pixel_layout` + + stbir_resize_float(): + - call `stbir_resize_float_linear`, cast channel count to `stbir_pixel_layout` + + stbir_resize_uint8_srgb(): + - function name is unchanged + - cast channel count to `stbir_pixel_layout` + - above is sufficient unless your image has alpha and it's not RGBA/BGRA + - in that case, follow the below instructions for stbir_resize_uint8_srgb_edgemode + + stbir_resize_uint8_srgb_edgemode() + - switch to the "medium complexity" API + - stbir_resize(), very similar API but a few more parameters: + - pixel_layout: cast channel count to `stbir_pixel_layout` + - data_type: STBIR_TYPE_UINT8_SRGB + - edge: unchanged (STBIR_EDGE_WRAP, etc.) + - filter: STBIR_FILTER_DEFAULT + - which channel is alpha is specified in stbir_pixel_layout, see enum for details + + FUTURE TODOS + * For polyphase integral filters, we just memcpy the coeffs to dupe + them, but we should indirect and use the same coeff memory. + * Add pixel layout conversions for sensible different channel counts + (maybe, 1->3/4, 3->4, 4->1, 3->1). + * For SIMD encode and decode scanline routines, do any pre-aligning + for bad input/output buffer alignments and pitch? + * For very wide scanlines, we should we do vertical strips to stay within + L2 cache. Maybe do chunks of 1K pixels at a time. There would be + some pixel reconversion, but probably dwarfed by things falling out + of cache. Probably also something possible with alternating between + scattering and gathering at high resize scales? + * Should we have a multiple MIPs at the same time function (could keep + more memory in cache during multiple resizes)? + * Rewrite the coefficient generator to do many at once. + * AVX-512 vertical kernels - worried about downclocking here. + * Convert the reincludes to macros when we know they aren't changing. + * Experiment with pivoting the horizontal and always using the + vertical filters (which are faster, but perhaps not enough to overcome + the pivot cost and the extra memory touches). Need to buffer the whole + image so have to balance memory use. + * Most of our code is internally function pointers, should we compile + all the SIMD stuff always and dynamically dispatch? CONTRIBUTORS - Jorge L Rodriguez: Implementation - Sean Barrett: API design, optimizations - Aras Pranckevicius: bugfix - Nathan Reed: warning fixes + Jeff Roberts: 2.0 implementation, optimizations, SIMD + Martins Mozeiko: NEON simd, WASM simd, clang and GCC whisperer + Fabian Giesen: half float and srgb converters + Sean Barrett: API design, optimizations + Jorge L Rodriguez: Original 1.0 implementation + Aras Pranckevicius: bugfixes + Nathan Reed: warning fixes for 1.0 REVISIONS - 0.97 (2020-02-02) fixed warning - 0.96 (2019-03-04) fixed warnings - 0.95 (2017-07-23) fixed warnings - 0.94 (2017-03-18) fixed warnings - 0.93 (2017-03-03) fixed bug with certain combinations of heights - 0.92 (2017-01-02) fix integer overflow on large (>2GB) images - 0.91 (2016-04-02) fix warnings; fix handling of subpixel regions - 0.90 (2014-09-17) first released version + 2.14 (2025-05-09) fixed a bug using downsampling gather horizontal first, and + scatter with vertical first. + 2.13 (2025-02-27) fixed a bug when using input callbacks, turned off simd for + tiny-c, fixed some variables that should have been static, + fixes a bug when calculating temp memory with resizes that + exceed 2GB of temp memory (very large resizes). + 2.12 (2024-10-18) fix incorrect use of user_data with STBIR_FREE + 2.11 (2024-09-08) fix harmless asan warnings in 2-channel and 3-channel mode + with AVX-2, fix some weird scaling edge conditions with + point sample mode. + 2.10 (2024-07-27) fix the defines GCC and mingw for loop unroll control, + fix MSVC 32-bit arm half float routines. + 2.09 (2024-06-19) fix the defines for 32-bit ARM GCC builds (was selecting + hardware half floats). + 2.08 (2024-06-10) fix for RGB->BGR three channel flips and add SIMD (thanks + to Ryan Salsbury), fix for sub-rect resizes, use the + pragmas to control unrolling when they are available. + 2.07 (2024-05-24) fix for slow final split during threaded conversions of very + wide scanlines when downsampling (caused by extra input + converting), fix for wide scanline resamples with many + splits (int overflow), fix GCC warning. + 2.06 (2024-02-10) fix for identical width/height 3x or more down-scaling + undersampling a single row on rare resize ratios (about 1%). + 2.05 (2024-02-07) fix for 2 pixel to 1 pixel resizes with wrap (thanks Aras), + fix for output callback (thanks Julien Koenen). + 2.04 (2023-11-17) fix for rare AVX bug, shadowed symbol (thanks Nikola Smiljanic). + 2.03 (2023-11-01) ASAN and TSAN warnings fixed, minor tweaks. + 2.00 (2023-10-10) mostly new source: new api, optimizations, simd, vertical-first, etc + 2x-5x faster without simd, 4x-12x faster with simd, + in some cases, 20x to 40x faster esp resizing large to very small. + 0.96 (2019-03-04) fixed warnings + 0.95 (2017-07-23) fixed warnings + 0.94 (2017-03-18) fixed warnings + 0.93 (2017-03-03) fixed bug with certain combinations of heights + 0.92 (2017-01-02) fix integer overflow on large (>2GB) images + 0.91 (2016-04-02) fix warnings; fix handling of subpixel regions + 0.90 (2014-09-17) first released version LICENSE - See end of file for license information. - - TODO - Don't decode all of the image data when only processing a partial tile - Don't use full-width decode buffers when only processing a partial tile - When processing wide images, break processing into tiles so data fits in L1 cache - Installable filters? - Resize that respects alpha test coverage - (Reference code: FloatImage::alphaTestCoverage and FloatImage::scaleAlphaToCoverage: - https://code.google.com/p/nvidia-texture-tools/source/browse/trunk/src/nvimage/FloatImage.cpp ) + See end of file for license information. */ -#ifndef STBIR_INCLUDE_STB_IMAGE_RESIZE_H -#define STBIR_INCLUDE_STB_IMAGE_RESIZE_H +#if !defined(STB_IMAGE_RESIZE_DO_HORIZONTALS) && !defined(STB_IMAGE_RESIZE_DO_VERTICALS) && !defined(STB_IMAGE_RESIZE_DO_CODERS) // for internal re-includes +#ifndef STBIR_INCLUDE_STB_IMAGE_RESIZE2_H +#define STBIR_INCLUDE_STB_IMAGE_RESIZE2_H + +#include #ifdef _MSC_VER typedef unsigned char stbir_uint8; typedef unsigned short stbir_uint16; typedef unsigned int stbir_uint32; +typedef unsigned __int64 stbir_uint64; #else #include typedef uint8_t stbir_uint8; typedef uint16_t stbir_uint16; typedef uint32_t stbir_uint32; +typedef uint64_t stbir_uint64; #endif #ifndef STBIRDEF @@ -208,210 +403,417 @@ typedef uint32_t stbir_uint32; #endif ////////////////////////////////////////////////////////////////////////////// +//// start "header file" /////////////////////////////////////////////////// // // Easy-to-use API: // -// * "input pixels" points to an array of image data with 'num_channels' channels (e.g. RGB=3, RGBA=4) -// * input_w is input image width (x-axis), input_h is input image height (y-axis) -// * stride is the offset between successive rows of image data in memory, in bytes. you can -// specify 0 to mean packed continuously in memory -// * alpha channel is treated identically to other channels. +// * stride is the offset between successive rows of image data +// in memory, in bytes. specify 0 for packed continuously in memory // * colorspace is linear or sRGB as specified by function name +// * Uses the default filters +// * Uses edge mode clamped // * returned result is 1 for success or 0 in case of an error. -// #define STBIR_ASSERT() to trigger an assert on parameter validation errors. -// * Memory required grows approximately linearly with input and output size, but with -// discontinuities at input_w == output_w and input_h == output_h. -// * These functions use a "default" resampling filter defined at compile time. To change the filter, -// you can change the compile-time defaults by #defining STBIR_DEFAULT_FILTER_UPSAMPLE -// and STBIR_DEFAULT_FILTER_DOWNSAMPLE, or you can use the medium-complexity API. - -STBIRDEF int stbir_resize_uint8(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels); - -STBIRDEF int stbir_resize_float(const float *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels); - -// The following functions interpret image data as gamma-corrected sRGB. -// Specify STBIR_ALPHA_CHANNEL_NONE if you have no alpha channel, -// or otherwise provide the index of the alpha channel. Flags value -// of 0 will probably do the right thing if you're not sure what -// the flags mean. - -#define STBIR_ALPHA_CHANNEL_NONE -1 - -// Set this flag if your texture has premultiplied alpha. Otherwise, stbir will -// use alpha-weighted resampling (effectively premultiplying, resampling, -// then unpremultiplying). -#define STBIR_FLAG_ALPHA_PREMULTIPLIED (1 << 0) -// The specified alpha channel should be handled as gamma-corrected value even -// when doing sRGB operations. -#define STBIR_FLAG_ALPHA_USES_COLORSPACE (1 << 1) - -STBIRDEF int stbir_resize_uint8_srgb(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags); +// stbir_pixel_layout specifies: +// number of channels +// order of channels +// whether color is premultiplied by alpha +// for back compatibility, you can cast the old channel count to an stbir_pixel_layout typedef enum { - STBIR_EDGE_CLAMP = 1, - STBIR_EDGE_REFLECT = 2, - STBIR_EDGE_WRAP = 3, - STBIR_EDGE_ZERO = 4, -} stbir_edge; + STBIR_1CHANNEL = 1, + STBIR_2CHANNEL = 2, + STBIR_RGB = 3, // 3-chan, with order specified (for channel flipping) + STBIR_BGR = 0, // 3-chan, with order specified (for channel flipping) + STBIR_4CHANNEL = 5, -// This function adds the ability to specify how requests to sample off the edge of the image are handled. -STBIRDEF int stbir_resize_uint8_srgb_edgemode(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode); + STBIR_RGBA = 4, // alpha formats, where alpha is NOT premultiplied into color channels + STBIR_BGRA = 6, + STBIR_ARGB = 7, + STBIR_ABGR = 8, + STBIR_RA = 9, + STBIR_AR = 10, -////////////////////////////////////////////////////////////////////////////// + STBIR_RGBA_PM = 11, // alpha formats, where alpha is premultiplied into color channels + STBIR_BGRA_PM = 12, + STBIR_ARGB_PM = 13, + STBIR_ABGR_PM = 14, + STBIR_RA_PM = 15, + STBIR_AR_PM = 16, + + STBIR_RGBA_NO_AW = 11, // alpha formats, where NO alpha weighting is applied at all! + STBIR_BGRA_NO_AW = 12, // these are just synonyms for the _PM flags (which also do + STBIR_ARGB_NO_AW = 13, // no alpha weighting). These names just make it more clear + STBIR_ABGR_NO_AW = 14, // for some folks). + STBIR_RA_NO_AW = 15, + STBIR_AR_NO_AW = 16, + +} stbir_pixel_layout; + +//=============================================================== +// Simple-complexity API // +// If output_pixels is NULL (0), then we will allocate the buffer and return it to you. +//-------------------------------- + +STBIRDEF unsigned char *stbir_resize_uint8_srgb(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_type); + +STBIRDEF unsigned char *stbir_resize_uint8_linear(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_type); + +STBIRDEF float *stbir_resize_float_linear(const float *input_pixels, int input_w, int input_h, int input_stride_in_bytes, + float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_type); +//=============================================================== + +//=============================================================== // Medium-complexity API // // This extends the easy-to-use API as follows: // -// * Alpha-channel can be processed separately -// * If alpha_channel is not STBIR_ALPHA_CHANNEL_NONE -// * Alpha channel will not be gamma corrected (unless flags&STBIR_FLAG_GAMMA_CORRECT) -// * Filters will be weighted by alpha channel (unless flags&STBIR_FLAG_ALPHA_PREMULTIPLIED) +// * Can specify the datatype - U8, U8_SRGB, U16, FLOAT, HALF_FLOAT +// * Edge wrap can selected explicitly // * Filter can be selected explicitly -// * uint16 image type -// * sRGB colorspace available for all types -// * context parameter for passing to STBIR_MALLOC +//-------------------------------- typedef enum { - STBIR_FILTER_DEFAULT = 0, // use same filter type that easy-to-use API chooses - STBIR_FILTER_BOX = 1, // A trapezoid w/1-pixel wide ramps, same result as box for integer scale ratios - STBIR_FILTER_TRIANGLE = 2, // On upsampling, produces same results as bilinear texture filtering - STBIR_FILTER_CUBICBSPLINE = 3, // The cubic b-spline (aka Mitchell-Netrevalli with B=1,C=0), gaussian-esque - STBIR_FILTER_CATMULLROM = 4, // An interpolating cubic spline - STBIR_FILTER_MITCHELL = 5, // Mitchell-Netrevalli filter with B=1/3, C=1/3 + STBIR_EDGE_CLAMP = 0, + STBIR_EDGE_REFLECT = 1, + STBIR_EDGE_WRAP = 2, // this edge mode is slower and uses more memory + STBIR_EDGE_ZERO = 3, +} stbir_edge; + +typedef enum +{ + STBIR_FILTER_DEFAULT = 0, // use same filter type that easy-to-use API chooses + STBIR_FILTER_BOX = 1, // A trapezoid w/1-pixel wide ramps, same result as box for integer scale ratios + STBIR_FILTER_TRIANGLE = 2, // On upsampling, produces same results as bilinear texture filtering + STBIR_FILTER_CUBICBSPLINE = 3, // The cubic b-spline (aka Mitchell-Netrevalli with B=1,C=0), gaussian-esque + STBIR_FILTER_CATMULLROM = 4, // An interpolating cubic spline + STBIR_FILTER_MITCHELL = 5, // Mitchell-Netrevalli filter with B=1/3, C=1/3 + STBIR_FILTER_POINT_SAMPLE = 6, // Simple point sampling + STBIR_FILTER_OTHER = 7, // User callback specified } stbir_filter; typedef enum { - STBIR_COLORSPACE_LINEAR, - STBIR_COLORSPACE_SRGB, - - STBIR_MAX_COLORSPACES, -} stbir_colorspace; - -// The following functions are all identical except for the type of the image data - -STBIRDEF int stbir_resize_uint8_generic(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context); - -STBIRDEF int stbir_resize_uint16_generic(const stbir_uint16 *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - stbir_uint16 *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context); - -STBIRDEF int stbir_resize_float_generic(const float *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context); - -////////////////////////////////////////////////////////////////////////////// -// -// Full-complexity API -// -// This extends the medium API as follows: -// -// * uint32 image type -// * not typesafe -// * separate filter types for each axis -// * separate edge modes for each axis -// * can specify scale explicitly for subpixel correctness -// * can specify image source tile using texture coordinates - -typedef enum -{ - STBIR_TYPE_UINT8, - STBIR_TYPE_UINT16, - STBIR_TYPE_UINT32, - STBIR_TYPE_FLOAT, - - STBIR_MAX_TYPES + STBIR_TYPE_UINT8 = 0, + STBIR_TYPE_UINT8_SRGB = 1, + STBIR_TYPE_UINT8_SRGB_ALPHA = 2, // alpha channel, when present, should also be SRGB (this is very unusual) + STBIR_TYPE_UINT16 = 3, + STBIR_TYPE_FLOAT = 4, + STBIR_TYPE_HALF_FLOAT = 5 } stbir_datatype; -STBIRDEF int stbir_resize(const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context); +// medium api +STBIRDEF void *stbir_resize(const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_layout, stbir_datatype data_type, + stbir_edge edge, stbir_filter filter); +//=============================================================== -STBIRDEF int stbir_resize_subpixel(const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context, - float x_scale, float y_scale, - float x_offset, float y_offset); +//=============================================================== +// Extended-complexity API +// +// This API exposes all resize functionality. +// +// * Separate filter types for each axis +// * Separate edge modes for each axis +// * Separate input and output data types +// * Can specify regions with subpixel correctness +// * Can specify alpha flags +// * Can specify a memory callback +// * Can specify a callback data type for pixel input and output +// * Can be threaded for a single resize +// * Can be used to resize many frames without recalculating the sampler info +// +// Use this API as follows: +// 1) Call the stbir_resize_init function on a local STBIR_RESIZE structure +// 2) Call any of the stbir_set functions +// 3) Optionally call stbir_build_samplers() if you are going to resample multiple times +// with the same input and output dimensions (like resizing video frames) +// 4) Resample by calling stbir_resize_extended(). +// 5) Call stbir_free_samplers() if you called stbir_build_samplers() +//-------------------------------- -STBIRDEF int stbir_resize_region(const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context, - float s0, float t0, float s1, float t1); -// (s0, t0) & (s1, t1) are the top-left and bottom right corner (uv addressing style: [0, 1]x[0, 1]) of a region of the input image to use. +// Types: +// INPUT CALLBACK: this callback is used for input scanlines +typedef void const *stbir_input_callback(void *optional_output, void const *input_ptr, int num_pixels, int x, int y, void *context); + +// OUTPUT CALLBACK: this callback is used for output scanlines +typedef void stbir_output_callback(void const *output_ptr, int num_pixels, int y, void *context); + +// callbacks for user installed filters +typedef float stbir__kernel_callback(float x, float scale, void *user_data); // centered at zero +typedef float stbir__support_callback(float scale, void *user_data); + +// internal structure with precomputed scaling +typedef struct stbir__info stbir__info; + +typedef struct STBIR_RESIZE // use the stbir_resize_init and stbir_override functions to set these values for future compatibility +{ + void *user_data; + void const *input_pixels; + int input_w, input_h; + double input_s0, input_t0, input_s1, input_t1; + stbir_input_callback *input_cb; + void *output_pixels; + int output_w, output_h; + int output_subx, output_suby, output_subw, output_subh; + stbir_output_callback *output_cb; + int input_stride_in_bytes; + int output_stride_in_bytes; + int splits; + int fast_alpha; + int needs_rebuild; + int called_alloc; + stbir_pixel_layout input_pixel_layout_public; + stbir_pixel_layout output_pixel_layout_public; + stbir_datatype input_data_type; + stbir_datatype output_data_type; + stbir_filter horizontal_filter, vertical_filter; + stbir_edge horizontal_edge, vertical_edge; + stbir__kernel_callback *horizontal_filter_kernel; + stbir__support_callback *horizontal_filter_support; + stbir__kernel_callback *vertical_filter_kernel; + stbir__support_callback *vertical_filter_support; + stbir__info *samplers; +} STBIR_RESIZE; + +// extended complexity api + +// First off, you must ALWAYS call stbir_resize_init on your resize structure before any of the other calls! +STBIRDEF void stbir_resize_init(STBIR_RESIZE *resize, + const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, // stride can be zero + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, // stride can be zero + stbir_pixel_layout pixel_layout, stbir_datatype data_type); + +//=============================================================== +// You can update these parameters any time after resize_init and there is no cost +//-------------------------------- + +STBIRDEF void stbir_set_datatypes(STBIR_RESIZE *resize, stbir_datatype input_type, stbir_datatype output_type); +STBIRDEF void stbir_set_pixel_callbacks(STBIR_RESIZE *resize, stbir_input_callback *input_cb, stbir_output_callback *output_cb); // no callbacks by default +STBIRDEF void stbir_set_user_data(STBIR_RESIZE *resize, void *user_data); // pass back STBIR_RESIZE* by default +STBIRDEF void stbir_set_buffer_ptrs(STBIR_RESIZE *resize, const void *input_pixels, int input_stride_in_bytes, void *output_pixels, int output_stride_in_bytes); + +//=============================================================== + +//=============================================================== +// If you call any of these functions, you will trigger a sampler rebuild! +//-------------------------------- + +STBIRDEF int stbir_set_pixel_layouts(STBIR_RESIZE *resize, stbir_pixel_layout input_pixel_layout, stbir_pixel_layout output_pixel_layout); // sets new buffer layouts +STBIRDEF int stbir_set_edgemodes(STBIR_RESIZE *resize, stbir_edge horizontal_edge, stbir_edge vertical_edge); // CLAMP by default + +STBIRDEF int stbir_set_filters(STBIR_RESIZE *resize, stbir_filter horizontal_filter, stbir_filter vertical_filter); // STBIR_DEFAULT_FILTER_UPSAMPLE/DOWNSAMPLE by default +STBIRDEF int stbir_set_filter_callbacks(STBIR_RESIZE *resize, stbir__kernel_callback *horizontal_filter, stbir__support_callback *horizontal_support, stbir__kernel_callback *vertical_filter, stbir__support_callback *vertical_support); + +STBIRDEF int stbir_set_pixel_subrect(STBIR_RESIZE *resize, int subx, int suby, int subw, int subh); // sets both sub-regions (full regions by default) +STBIRDEF int stbir_set_input_subrect(STBIR_RESIZE *resize, double s0, double t0, double s1, double t1); // sets input sub-region (full region by default) +STBIRDEF int stbir_set_output_pixel_subrect(STBIR_RESIZE *resize, int subx, int suby, int subw, int subh); // sets output sub-region (full region by default) + +// when inputting AND outputting non-premultiplied alpha pixels, we use a slower but higher quality technique +// that fills the zero alpha pixel's RGB values with something plausible. If you don't care about areas of +// zero alpha, you can call this function to get about a 25% speed improvement for STBIR_RGBA to STBIR_RGBA +// types of resizes. +STBIRDEF int stbir_set_non_pm_alpha_speed_over_quality(STBIR_RESIZE *resize, int non_pma_alpha_speed_over_quality); +//=============================================================== + +//=============================================================== +// You can call build_samplers to prebuild all the internal data we need to resample. +// Then, if you call resize_extended many times with the same resize, you only pay the +// cost once. +// If you do call build_samplers, you MUST call free_samplers eventually. +//-------------------------------- + +// This builds the samplers and does one allocation +STBIRDEF int stbir_build_samplers(STBIR_RESIZE *resize); + +// You MUST call this, if you call stbir_build_samplers or stbir_build_samplers_with_splits +STBIRDEF void stbir_free_samplers(STBIR_RESIZE *resize); +//=============================================================== + +// And this is the main function to perform the resize synchronously on one thread. +STBIRDEF int stbir_resize_extended(STBIR_RESIZE *resize); + +//=============================================================== +// Use these functions for multithreading. +// 1) You call stbir_build_samplers_with_splits first on the main thread +// 2) Then stbir_resize_with_split on each thread +// 3) stbir_free_samplers when done on the main thread +//-------------------------------- + +// This will build samplers for threading. +// You can pass in the number of threads you'd like to use (try_splits). +// It returns the number of splits (threads) that you can call it with. +/// It might be less if the image resize can't be split up that many ways. + +STBIRDEF int stbir_build_samplers_with_splits(STBIR_RESIZE *resize, int try_splits); + +// This function does a split of the resizing (you call this fuction for each +// split, on multiple threads). A split is a piece of the output resize pixel space. + +// Note that you MUST call stbir_build_samplers_with_splits before stbir_resize_extended_split! + +// Usually, you will always call stbir_resize_split with split_start as the thread_index +// and "1" for the split_count. +// But, if you have a weird situation where you MIGHT want 8 threads, but sometimes +// only 4 threads, you can use 0,2,4,6 for the split_start's and use "2" for the +// split_count each time to turn in into a 4 thread resize. (This is unusual). + +STBIRDEF int stbir_resize_extended_split(STBIR_RESIZE *resize, int split_start, int split_count); +//=============================================================== + +//=============================================================== +// Pixel Callbacks info: +//-------------------------------- + +// The input callback is super flexible - it calls you with the input address +// (based on the stride and base pointer), it gives you an optional_output +// pointer that you can fill, or you can just return your own pointer into +// your own data. +// +// You can also do conversion from non-supported data types if necessary - in +// this case, you ignore the input_ptr and just use the x and y parameters to +// calculate your own input_ptr based on the size of each non-supported pixel. +// (Something like the third example below.) +// +// You can also install just an input or just an output callback by setting the +// callback that you don't want to zero. +// +// First example, progress: (getting a callback that you can monitor the progress): +// void const * my_callback( void * optional_output, void const * input_ptr, int num_pixels, int x, int y, void * context ) +// { +// percentage_done = y / input_height; +// return input_ptr; // use buffer from call +// } +// +// Next example, copying: (copy from some other buffer or stream): +// void const * my_callback( void * optional_output, void const * input_ptr, int num_pixels, int x, int y, void * context ) +// { +// CopyOrStreamData( optional_output, other_data_src, num_pixels * pixel_width_in_bytes ); +// return optional_output; // return the optional buffer that we filled +// } +// +// Third example, input another buffer without copying: (zero-copy from other buffer): +// void const * my_callback( void * optional_output, void const * input_ptr, int num_pixels, int x, int y, void * context ) +// { +// void * pixels = ( (char*) other_image_base ) + ( y * other_image_stride ) + ( x * other_pixel_width_in_bytes ); +// return pixels; // return pointer to your data without copying +// } // // +// The output callback is considerably simpler - it just calls you so that you can dump +// out each scanline. You could even directly copy out to disk if you have a simple format +// like TGA or BMP. You can also convert to other output types here if you want. +// +// Simple example: +// void const * my_output( void * output_ptr, int num_pixels, int y, void * context ) +// { +// percentage_done = y / output_height; +// fwrite( output_ptr, pixel_width_in_bytes, num_pixels, output_file ); +// } +//=============================================================== + +//=============================================================== +// optional built-in profiling API +//-------------------------------- + +#ifdef STBIR_PROFILE + +typedef struct STBIR_PROFILE_INFO +{ + stbir_uint64 total_clocks; + + // how many clocks spent (of total_clocks) in the various resize routines, along with a string description + // there are "resize_count" number of zones + stbir_uint64 clocks[8]; + char const **descriptions; + + // count of clocks and descriptions + stbir_uint32 count; +} STBIR_PROFILE_INFO; + +// use after calling stbir_resize_extended (or stbir_build_samplers or stbir_build_samplers_with_splits) +STBIRDEF void stbir_resize_build_profile_info(STBIR_PROFILE_INFO *out_info, STBIR_RESIZE const *resize); + +// use after calling stbir_resize_extended +STBIRDEF void stbir_resize_extended_profile_info(STBIR_PROFILE_INFO *out_info, STBIR_RESIZE const *resize); + +// use after calling stbir_resize_extended_split +STBIRDEF void stbir_resize_split_profile_info(STBIR_PROFILE_INFO *out_info, STBIR_RESIZE const *resize, int split_start, int split_num); + +//=============================================================== + +#endif + //// end header file ///////////////////////////////////////////////////// -#endif // STBIR_INCLUDE_STB_IMAGE_RESIZE_H +#endif // STBIR_INCLUDE_STB_IMAGE_RESIZE2_H -#ifdef STB_IMAGE_RESIZE_IMPLEMENTATION +#if defined(STB_IMAGE_RESIZE_IMPLEMENTATION) || defined(STB_IMAGE_RESIZE2_IMPLEMENTATION) #ifndef STBIR_ASSERT #include #define STBIR_ASSERT(x) assert(x) #endif -// For memset -#include - -#include - #ifndef STBIR_MALLOC #include -// use comma operator to evaluate c, to avoid "unused parameter" warnings -#define STBIR_MALLOC(size, c) ((void)(c), malloc(size)) -#define STBIR_FREE(ptr, c) ((void)(c), free(ptr)) +#define STBIR_MALLOC(size, user_data) ((void)(user_data), malloc(size)) +#define STBIR_FREE(ptr, user_data) ((void)(user_data), free(ptr)) +// (we used the comma operator to evaluate user_data, to avoid "unused parameter" warnings) #endif -#ifndef _MSC_VER -#ifdef __cplusplus -#define stbir__inline inline -#else -#define stbir__inline -#endif -#else -#define stbir__inline __forceinline -#endif - -// should produce compiler error if size is wrong -typedef unsigned char stbir__validate_uint32[sizeof(stbir_uint32) == 4 ? 1 : -1]; - #ifdef _MSC_VER -#define STBIR__NOTUSED(v) (void)(v) + +#define stbir__inline __forceinline + #else -#define STBIR__NOTUSED(v) (void)sizeof(v) + +#define stbir__inline __inline__ + +// Clang address sanitizer +#if defined(__has_feature) +#if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) +#ifndef STBIR__SEPARATE_ALLOCATIONS +#define STBIR__SEPARATE_ALLOCATIONS +#endif +#endif +#endif + +#endif + +// GCC and MSVC +#if defined(__SANITIZE_ADDRESS__) +#ifndef STBIR__SEPARATE_ALLOCATIONS +#define STBIR__SEPARATE_ALLOCATIONS +#endif +#endif + +// Always turn off automatic FMA use - use STBIR_USE_FMA if you want. +// Otherwise, this is a determinism disaster. +#ifndef STBIR_DONT_CHANGE_FP_CONTRACT // override in case you don't want this behavior +#if defined(_MSC_VER) && !defined(__clang__) +#if _MSC_VER > 1200 +#pragma fp_contract(off) +#endif +#elif defined(__GNUC__) && !defined(__clang__) +#pragma GCC optimize("fp-contract=off") +#else +#pragma STDC FP_CONTRACT OFF +#endif +#endif + +#ifdef _MSC_VER +#define STBIR__UNUSED(v) (void)(v) +#else +#define STBIR__UNUSED(v) (void)sizeof(v) #endif #define STBIR__ARRAY_SIZE(a) (sizeof((a)) / sizeof((a)[0])) @@ -424,2485 +826,10170 @@ typedef unsigned char stbir__validate_uint32[sizeof(stbir_uint32) == 4 ? 1 : -1] #define STBIR_DEFAULT_FILTER_DOWNSAMPLE STBIR_FILTER_MITCHELL #endif -#ifndef STBIR_PROGRESS_REPORT -#define STBIR_PROGRESS_REPORT(float_0_to_1) +#ifndef STBIR__HEADER_FILENAME +#define STBIR__HEADER_FILENAME "stb_image_resize2.h" #endif -#ifndef STBIR_MAX_CHANNELS -#define STBIR_MAX_CHANNELS 64 -#endif +// the internal pixel layout enums are in a different order, so we can easily do range comparisons of types +// the public pixel layout is ordered in a way that if you cast num_channels (1-4) to the enum, you get something sensible +typedef enum +{ + STBIRI_1CHANNEL = 0, + STBIRI_2CHANNEL = 1, + STBIRI_RGB = 2, + STBIRI_BGR = 3, + STBIRI_4CHANNEL = 4, -#if STBIR_MAX_CHANNELS > 65536 -#error "Too many channels; STBIR_MAX_CHANNELS must be no more than 65536." -// because we store the indices in 16-bit variables -#endif + STBIRI_RGBA = 5, + STBIRI_BGRA = 6, + STBIRI_ARGB = 7, + STBIRI_ABGR = 8, + STBIRI_RA = 9, + STBIRI_AR = 10, -// This value is added to alpha just before premultiplication to avoid -// zeroing out color values. It is equivalent to 2^-80. If you don't want -// that behavior (it may interfere if you have floating point images with -// very small alpha values) then you can define STBIR_NO_ALPHA_EPSILON to -// disable it. -#ifndef STBIR_ALPHA_EPSILON -#define STBIR_ALPHA_EPSILON ((float)1 / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20)) -#endif + STBIRI_RGBA_PM = 11, + STBIRI_BGRA_PM = 12, + STBIRI_ARGB_PM = 13, + STBIRI_ABGR_PM = 14, + STBIRI_RA_PM = 15, + STBIRI_AR_PM = 16, +} stbir_internal_pixel_layout; -#ifdef _MSC_VER -#define STBIR__UNUSED_PARAM(v) (void)(v) -#else -#define STBIR__UNUSED_PARAM(v) (void)sizeof(v) -#endif +// define the public pixel layouts to not compile inside the implementation (to avoid accidental use) +#define STBIR_BGR bad_dont_use_in_implementation +#define STBIR_1CHANNEL STBIR_BGR +#define STBIR_2CHANNEL STBIR_BGR +#define STBIR_RGB STBIR_BGR +#define STBIR_RGBA STBIR_BGR +#define STBIR_4CHANNEL STBIR_BGR +#define STBIR_BGRA STBIR_BGR +#define STBIR_ARGB STBIR_BGR +#define STBIR_ABGR STBIR_BGR +#define STBIR_RA STBIR_BGR +#define STBIR_AR STBIR_BGR +#define STBIR_RGBA_PM STBIR_BGR +#define STBIR_BGRA_PM STBIR_BGR +#define STBIR_ARGB_PM STBIR_BGR +#define STBIR_ABGR_PM STBIR_BGR +#define STBIR_RA_PM STBIR_BGR +#define STBIR_AR_PM STBIR_BGR // must match stbir_datatype static unsigned char stbir__type_size[] = { - 1, // STBIR_TYPE_UINT8 - 2, // STBIR_TYPE_UINT16 - 4, // STBIR_TYPE_UINT32 - 4, // STBIR_TYPE_FLOAT + 1, 1, 1, 2, 4, 2 // STBIR_TYPE_UINT8,STBIR_TYPE_UINT8_SRGB,STBIR_TYPE_UINT8_SRGB_ALPHA,STBIR_TYPE_UINT16,STBIR_TYPE_FLOAT,STBIR_TYPE_HALF_FLOAT }; -// Kernel function centered at 0 -typedef float(stbir__kernel_fn)(float x, float scale); -typedef float(stbir__support_fn)(float scale); - +// When gathering, the contributors are which source pixels contribute. +// When scattering, the contributors are which destination pixels are contributed to. typedef struct { - stbir__kernel_fn *kernel; - stbir__support_fn *support; -} stbir__filter_info; - -// When upsampling, the contributors are which source pixels contribute. -// When downsampling, the contributors are which destination pixels are contributed to. -typedef struct -{ - int n0; // First contributing pixel - int n1; // Last contributing pixel + int n0; // First contributing pixel + int n1; // Last contributing pixel } stbir__contributors; typedef struct { - const void *input_data; - int input_w; - int input_h; - int input_stride_bytes; + int lowest; // First sample index for whole filter + int highest; // Last sample index for whole filter + int widest; // widest single set of samples for an output +} stbir__filter_extent_info; - void *output_data; - int output_w; - int output_h; - int output_stride_bytes; +typedef struct +{ + int n0; // First pixel of decode buffer to write to + int n1; // Last pixel of decode that will be written to + int pixel_offset_for_input; // Pixel offset into input_scanline +} stbir__span; - float s0, t0, s1, t1; +typedef struct stbir__scale_info +{ + int input_full_size; + int output_sub_size; + float scale; + float inv_scale; + float pixel_shift; // starting shift in output pixel space (in pixels) + int scale_is_rational; + stbir_uint32 scale_numerator, scale_denominator; +} stbir__scale_info; - float horizontal_shift; // Units: output pixels - float vertical_shift; // Units: output pixels - float horizontal_scale; - float vertical_scale; +typedef struct +{ + stbir__contributors *contributors; + float *coefficients; + stbir__contributors *gather_prescatter_contributors; + float *gather_prescatter_coefficients; + stbir__scale_info scale_info; + float support; + stbir_filter filter_enum; + stbir__kernel_callback *filter_kernel; + stbir__support_callback *filter_support; + stbir_edge edge; + int coefficient_width; + int filter_pixel_width; + int filter_pixel_margin; + int num_contributors; + int contributors_size; + int coefficients_size; + stbir__filter_extent_info extent_info; + int is_gather; // 0 = scatter, 1 = gather with scale >= 1, 2 = gather with scale < 1 + int gather_prescatter_num_contributors; + int gather_prescatter_coefficient_width; + int gather_prescatter_contributors_size; + int gather_prescatter_coefficients_size; +} stbir__sampler; - int channels; - int alpha_channel; - stbir_uint32 flags; - stbir_datatype type; - stbir_filter horizontal_filter; - stbir_filter vertical_filter; - stbir_edge edge_horizontal; - stbir_edge edge_vertical; - stbir_colorspace colorspace; +typedef struct +{ + stbir__contributors conservative; + int edge_sizes[2]; // this can be less than filter_pixel_margin, if the filter and scaling falls off + stbir__span spans[2]; // can be two spans, if doing input subrect with clamp mode WRAP +} stbir__extents; - stbir__contributors *horizontal_contributors; - float *horizontal_coefficients; +typedef struct +{ +#ifdef STBIR_PROFILE + union + { + struct + { + stbir_uint64 total, looping, vertical, horizontal, decode, encode, alpha, unalpha; + } named; + stbir_uint64 array[8]; + } profile; + stbir_uint64 *current_zone_excluded_ptr; +#endif + float *decode_buffer; - stbir__contributors *vertical_contributors; - float *vertical_coefficients; + int ring_buffer_first_scanline; + int ring_buffer_last_scanline; + int ring_buffer_begin_index; // first_scanline is at this index in the ring buffer + int start_output_y, end_output_y; + int start_input_y, end_input_y; // used in scatter only - int decode_buffer_pixels; - float *decode_buffer; +#ifdef STBIR__SEPARATE_ALLOCATIONS + float **ring_buffers; // one pointer for each ring buffer +#else + float *ring_buffer; // one big buffer that we index into +#endif - float *horizontal_buffer; + float *vertical_buffer; - // cache these because ceil/floor are inexplicably showing up in profile - int horizontal_coefficient_width; - int vertical_coefficient_width; - int horizontal_filter_pixel_width; - int vertical_filter_pixel_width; - int horizontal_filter_pixel_margin; - int vertical_filter_pixel_margin; - int horizontal_num_contributors; - int vertical_num_contributors; + char no_cache_straddle[64]; +} stbir__per_split_info; - int ring_buffer_length_bytes; // The length of an individual entry in the ring buffer. The total number of ring buffers is stbir__get_filter_pixel_width(filter) - int ring_buffer_num_entries; // Total number of entries in the ring buffer. - int ring_buffer_first_scanline; - int ring_buffer_last_scanline; - int ring_buffer_begin_index; // first_scanline is at this index in the ring buffer - float *ring_buffer; +typedef float *stbir__decode_pixels_func(float *decode, int width_times_channels, void const *input); +typedef void stbir__alpha_weight_func(float *decode_buffer, int width_times_channels); +typedef void stbir__horizontal_gather_channels_func(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, + stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width); +typedef void stbir__alpha_unweight_func(float *encode_buffer, int width_times_channels); +typedef void stbir__encode_pixels_func(void *output, int width_times_channels, float const *encode); - float *encode_buffer; // A temporary buffer to store floats so we don't lose precision while we do multiply-adds. +struct stbir__info +{ +#ifdef STBIR_PROFILE + union + { + struct + { + stbir_uint64 total, build, alloc, horizontal, vertical, cleanup, pivot; + } named; + stbir_uint64 array[7]; + } profile; + stbir_uint64 *current_zone_excluded_ptr; +#endif + stbir__sampler horizontal; + stbir__sampler vertical; - int horizontal_contributors_size; - int horizontal_coefficients_size; - int vertical_contributors_size; - int vertical_coefficients_size; - int decode_buffer_size; - int horizontal_buffer_size; - int ring_buffer_size; - int encode_buffer_size; -} stbir__info; + void const *input_data; + void *output_data; -static const float stbir__max_uint8_as_float = 255.0f; -static const float stbir__max_uint16_as_float = 65535.0f; -static const double stbir__max_uint32_as_float = 4294967295.0; + int input_stride_bytes; + int output_stride_bytes; + int ring_buffer_length_bytes; // The length of an individual entry in the ring buffer. The total number of ring buffers is stbir__get_filter_pixel_width(filter) + int ring_buffer_num_entries; // Total number of entries in the ring buffer. + + stbir_datatype input_type; + stbir_datatype output_type; + + stbir_input_callback *in_pixels_cb; + void *user_data; + stbir_output_callback *out_pixels_cb; + + stbir__extents scanline_extents; + + void *alloced_mem; + stbir__per_split_info *split_info; // by default 1, but there will be N of these allocated based on the thread init you did + + stbir__decode_pixels_func *decode_pixels; + stbir__alpha_weight_func *alpha_weight; + stbir__horizontal_gather_channels_func *horizontal_gather_channels; + stbir__alpha_unweight_func *alpha_unweight; + stbir__encode_pixels_func *encode_pixels; + + int alloc_ring_buffer_num_entries; // Number of entries in the ring buffer that will be allocated + int splits; // count of splits + + stbir_internal_pixel_layout input_pixel_layout_internal; + stbir_internal_pixel_layout output_pixel_layout_internal; + + int input_color_and_type; + int offset_x, offset_y; // offset within output_data + int vertical_first; + int channels; + int effective_channels; // same as channels, except on RGBA/ARGB (7), or XA/AX (3) + size_t alloced_total; +}; + +#define stbir__max_uint8_as_float 255.0f +#define stbir__max_uint16_as_float 65535.0f +#define stbir__max_uint8_as_float_inverted 3.9215689e-03f // (1.0f/255.0f) +#define stbir__max_uint16_as_float_inverted 1.5259022e-05f // (1.0f/65535.0f) +#define stbir__small_float ((float)1 / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20)) + +// min/max friendly +#define STBIR_CLAMP(x, xmin, xmax) \ + for (;;) \ + { \ + if ((x) < (xmin)) \ + (x) = (xmin); \ + if ((x) > (xmax)) \ + (x) = (xmax); \ + break; \ + } static stbir__inline int stbir__min(int a, int b) { - return a < b ? a : b; + return a < b ? a : b; } -static stbir__inline float stbir__saturate(float x) +static stbir__inline int stbir__max(int a, int b) { - if (x < 0) - return 0; - - if (x > 1) - return 1; - - return x; + return a > b ? a : b; } -#ifdef STBIR_SATURATE_INT -static stbir__inline stbir_uint8 stbir__saturate8(int x) -{ - if ((unsigned int)x <= 255) - return x; - - if (x < 0) - return 0; - - return 255; -} - -static stbir__inline stbir_uint16 stbir__saturate16(int x) -{ - if ((unsigned int)x <= 65535) - return x; - - if (x < 0) - return 0; - - return 65535; -} -#endif - static float stbir__srgb_uchar_to_linear_float[256] = { - 0.000000f, 0.000304f, 0.000607f, 0.000911f, 0.001214f, 0.001518f, 0.001821f, 0.002125f, 0.002428f, 0.002732f, 0.003035f, - 0.003347f, 0.003677f, 0.004025f, 0.004391f, 0.004777f, 0.005182f, 0.005605f, 0.006049f, 0.006512f, 0.006995f, 0.007499f, - 0.008023f, 0.008568f, 0.009134f, 0.009721f, 0.010330f, 0.010960f, 0.011612f, 0.012286f, 0.012983f, 0.013702f, 0.014444f, - 0.015209f, 0.015996f, 0.016807f, 0.017642f, 0.018500f, 0.019382f, 0.020289f, 0.021219f, 0.022174f, 0.023153f, 0.024158f, - 0.025187f, 0.026241f, 0.027321f, 0.028426f, 0.029557f, 0.030713f, 0.031896f, 0.033105f, 0.034340f, 0.035601f, 0.036889f, - 0.038204f, 0.039546f, 0.040915f, 0.042311f, 0.043735f, 0.045186f, 0.046665f, 0.048172f, 0.049707f, 0.051269f, 0.052861f, - 0.054480f, 0.056128f, 0.057805f, 0.059511f, 0.061246f, 0.063010f, 0.064803f, 0.066626f, 0.068478f, 0.070360f, 0.072272f, - 0.074214f, 0.076185f, 0.078187f, 0.080220f, 0.082283f, 0.084376f, 0.086500f, 0.088656f, 0.090842f, 0.093059f, 0.095307f, - 0.097587f, 0.099899f, 0.102242f, 0.104616f, 0.107023f, 0.109462f, 0.111932f, 0.114435f, 0.116971f, 0.119538f, 0.122139f, - 0.124772f, 0.127438f, 0.130136f, 0.132868f, 0.135633f, 0.138432f, 0.141263f, 0.144128f, 0.147027f, 0.149960f, 0.152926f, - 0.155926f, 0.158961f, 0.162029f, 0.165132f, 0.168269f, 0.171441f, 0.174647f, 0.177888f, 0.181164f, 0.184475f, 0.187821f, - 0.191202f, 0.194618f, 0.198069f, 0.201556f, 0.205079f, 0.208637f, 0.212231f, 0.215861f, 0.219526f, 0.223228f, 0.226966f, - 0.230740f, 0.234551f, 0.238398f, 0.242281f, 0.246201f, 0.250158f, 0.254152f, 0.258183f, 0.262251f, 0.266356f, 0.270498f, - 0.274677f, 0.278894f, 0.283149f, 0.287441f, 0.291771f, 0.296138f, 0.300544f, 0.304987f, 0.309469f, 0.313989f, 0.318547f, - 0.323143f, 0.327778f, 0.332452f, 0.337164f, 0.341914f, 0.346704f, 0.351533f, 0.356400f, 0.361307f, 0.366253f, 0.371238f, - 0.376262f, 0.381326f, 0.386430f, 0.391573f, 0.396755f, 0.401978f, 0.407240f, 0.412543f, 0.417885f, 0.423268f, 0.428691f, - 0.434154f, 0.439657f, 0.445201f, 0.450786f, 0.456411f, 0.462077f, 0.467784f, 0.473532f, 0.479320f, 0.485150f, 0.491021f, - 0.496933f, 0.502887f, 0.508881f, 0.514918f, 0.520996f, 0.527115f, 0.533276f, 0.539480f, 0.545725f, 0.552011f, 0.558340f, - 0.564712f, 0.571125f, 0.577581f, 0.584078f, 0.590619f, 0.597202f, 0.603827f, 0.610496f, 0.617207f, 0.623960f, 0.630757f, - 0.637597f, 0.644480f, 0.651406f, 0.658375f, 0.665387f, 0.672443f, 0.679543f, 0.686685f, 0.693872f, 0.701102f, 0.708376f, - 0.715694f, 0.723055f, 0.730461f, 0.737911f, 0.745404f, 0.752942f, 0.760525f, 0.768151f, 0.775822f, 0.783538f, 0.791298f, - 0.799103f, 0.806952f, 0.814847f, 0.822786f, 0.830770f, 0.838799f, 0.846873f, 0.854993f, 0.863157f, 0.871367f, 0.879622f, - 0.887923f, 0.896269f, 0.904661f, 0.913099f, 0.921582f, 0.930111f, 0.938686f, 0.947307f, 0.955974f, 0.964686f, 0.973445f, - 0.982251f, 0.991102f, 1.0f}; - -static float stbir__srgb_to_linear(float f) -{ - if (f <= 0.04045f) - return f / 12.92f; - else - return (float)pow((f + 0.055f) / 1.055f, 2.4f); -} - -static float stbir__linear_to_srgb(float f) -{ - if (f <= 0.0031308f) - return f * 12.92f; - else - return 1.055f * (float)pow(f, 1 / 2.4f) - 0.055f; -} - -#ifndef STBIR_NON_IEEE_FLOAT -// From https://gist.github.com/rygorous/2203834 + 0.000000f, 0.000304f, 0.000607f, 0.000911f, 0.001214f, 0.001518f, 0.001821f, 0.002125f, 0.002428f, 0.002732f, 0.003035f, + 0.003347f, 0.003677f, 0.004025f, 0.004391f, 0.004777f, 0.005182f, 0.005605f, 0.006049f, 0.006512f, 0.006995f, 0.007499f, + 0.008023f, 0.008568f, 0.009134f, 0.009721f, 0.010330f, 0.010960f, 0.011612f, 0.012286f, 0.012983f, 0.013702f, 0.014444f, + 0.015209f, 0.015996f, 0.016807f, 0.017642f, 0.018500f, 0.019382f, 0.020289f, 0.021219f, 0.022174f, 0.023153f, 0.024158f, + 0.025187f, 0.026241f, 0.027321f, 0.028426f, 0.029557f, 0.030713f, 0.031896f, 0.033105f, 0.034340f, 0.035601f, 0.036889f, + 0.038204f, 0.039546f, 0.040915f, 0.042311f, 0.043735f, 0.045186f, 0.046665f, 0.048172f, 0.049707f, 0.051269f, 0.052861f, + 0.054480f, 0.056128f, 0.057805f, 0.059511f, 0.061246f, 0.063010f, 0.064803f, 0.066626f, 0.068478f, 0.070360f, 0.072272f, + 0.074214f, 0.076185f, 0.078187f, 0.080220f, 0.082283f, 0.084376f, 0.086500f, 0.088656f, 0.090842f, 0.093059f, 0.095307f, + 0.097587f, 0.099899f, 0.102242f, 0.104616f, 0.107023f, 0.109462f, 0.111932f, 0.114435f, 0.116971f, 0.119538f, 0.122139f, + 0.124772f, 0.127438f, 0.130136f, 0.132868f, 0.135633f, 0.138432f, 0.141263f, 0.144128f, 0.147027f, 0.149960f, 0.152926f, + 0.155926f, 0.158961f, 0.162029f, 0.165132f, 0.168269f, 0.171441f, 0.174647f, 0.177888f, 0.181164f, 0.184475f, 0.187821f, + 0.191202f, 0.194618f, 0.198069f, 0.201556f, 0.205079f, 0.208637f, 0.212231f, 0.215861f, 0.219526f, 0.223228f, 0.226966f, + 0.230740f, 0.234551f, 0.238398f, 0.242281f, 0.246201f, 0.250158f, 0.254152f, 0.258183f, 0.262251f, 0.266356f, 0.270498f, + 0.274677f, 0.278894f, 0.283149f, 0.287441f, 0.291771f, 0.296138f, 0.300544f, 0.304987f, 0.309469f, 0.313989f, 0.318547f, + 0.323143f, 0.327778f, 0.332452f, 0.337164f, 0.341914f, 0.346704f, 0.351533f, 0.356400f, 0.361307f, 0.366253f, 0.371238f, + 0.376262f, 0.381326f, 0.386430f, 0.391573f, 0.396755f, 0.401978f, 0.407240f, 0.412543f, 0.417885f, 0.423268f, 0.428691f, + 0.434154f, 0.439657f, 0.445201f, 0.450786f, 0.456411f, 0.462077f, 0.467784f, 0.473532f, 0.479320f, 0.485150f, 0.491021f, + 0.496933f, 0.502887f, 0.508881f, 0.514918f, 0.520996f, 0.527115f, 0.533276f, 0.539480f, 0.545725f, 0.552011f, 0.558340f, + 0.564712f, 0.571125f, 0.577581f, 0.584078f, 0.590619f, 0.597202f, 0.603827f, 0.610496f, 0.617207f, 0.623960f, 0.630757f, + 0.637597f, 0.644480f, 0.651406f, 0.658375f, 0.665387f, 0.672443f, 0.679543f, 0.686685f, 0.693872f, 0.701102f, 0.708376f, + 0.715694f, 0.723055f, 0.730461f, 0.737911f, 0.745404f, 0.752942f, 0.760525f, 0.768151f, 0.775822f, 0.783538f, 0.791298f, + 0.799103f, 0.806952f, 0.814847f, 0.822786f, 0.830770f, 0.838799f, 0.846873f, 0.854993f, 0.863157f, 0.871367f, 0.879622f, + 0.887923f, 0.896269f, 0.904661f, 0.913099f, 0.921582f, 0.930111f, 0.938686f, 0.947307f, 0.955974f, 0.964686f, 0.973445f, + 0.982251f, 0.991102f, 1.0f}; typedef union { - stbir_uint32 u; - float f; + unsigned int u; + float f; } stbir__FP32; +// From https://gist.github.com/rygorous/2203834 + static const stbir_uint32 fp32_to_srgb8_tab4[104] = { - 0x0073000d, - 0x007a000d, - 0x0080000d, - 0x0087000d, - 0x008d000d, - 0x0094000d, - 0x009a000d, - 0x00a1000d, - 0x00a7001a, - 0x00b4001a, - 0x00c1001a, - 0x00ce001a, - 0x00da001a, - 0x00e7001a, - 0x00f4001a, - 0x0101001a, - 0x010e0033, - 0x01280033, - 0x01410033, - 0x015b0033, - 0x01750033, - 0x018f0033, - 0x01a80033, - 0x01c20033, - 0x01dc0067, - 0x020f0067, - 0x02430067, - 0x02760067, - 0x02aa0067, - 0x02dd0067, - 0x03110067, - 0x03440067, - 0x037800ce, - 0x03df00ce, - 0x044600ce, - 0x04ad00ce, - 0x051400ce, - 0x057b00c5, - 0x05dd00bc, - 0x063b00b5, - 0x06970158, - 0x07420142, - 0x07e30130, - 0x087b0120, - 0x090b0112, - 0x09940106, - 0x0a1700fc, - 0x0a9500f2, - 0x0b0f01cb, - 0x0bf401ae, - 0x0ccb0195, - 0x0d950180, - 0x0e56016e, - 0x0f0d015e, - 0x0fbc0150, - 0x10630143, - 0x11070264, - 0x1238023e, - 0x1357021d, - 0x14660201, - 0x156601e9, - 0x165a01d3, - 0x174401c0, - 0x182401af, - 0x18fe0331, - 0x1a9602fe, - 0x1c1502d2, - 0x1d7e02ad, - 0x1ed4028d, - 0x201a0270, - 0x21520256, - 0x227d0240, - 0x239f0443, - 0x25c003fe, - 0x27bf03c4, - 0x29a10392, - 0x2b6a0367, - 0x2d1d0341, - 0x2ebe031f, - 0x304d0300, - 0x31d105b0, - 0x34a80555, - 0x37520507, - 0x39d504c5, - 0x3c37048b, - 0x3e7c0458, - 0x40a8042a, - 0x42bd0401, - 0x44c20798, - 0x488e071e, - 0x4c1c06b6, - 0x4f76065d, - 0x52a50610, - 0x55ac05cc, - 0x5892058f, - 0x5b590559, - 0x5e0c0a23, - 0x631c0980, - 0x67db08f6, - 0x6c55087f, - 0x70940818, - 0x74a007bd, - 0x787d076c, - 0x7c330723, + 0x0073000d, + 0x007a000d, + 0x0080000d, + 0x0087000d, + 0x008d000d, + 0x0094000d, + 0x009a000d, + 0x00a1000d, + 0x00a7001a, + 0x00b4001a, + 0x00c1001a, + 0x00ce001a, + 0x00da001a, + 0x00e7001a, + 0x00f4001a, + 0x0101001a, + 0x010e0033, + 0x01280033, + 0x01410033, + 0x015b0033, + 0x01750033, + 0x018f0033, + 0x01a80033, + 0x01c20033, + 0x01dc0067, + 0x020f0067, + 0x02430067, + 0x02760067, + 0x02aa0067, + 0x02dd0067, + 0x03110067, + 0x03440067, + 0x037800ce, + 0x03df00ce, + 0x044600ce, + 0x04ad00ce, + 0x051400ce, + 0x057b00c5, + 0x05dd00bc, + 0x063b00b5, + 0x06970158, + 0x07420142, + 0x07e30130, + 0x087b0120, + 0x090b0112, + 0x09940106, + 0x0a1700fc, + 0x0a9500f2, + 0x0b0f01cb, + 0x0bf401ae, + 0x0ccb0195, + 0x0d950180, + 0x0e56016e, + 0x0f0d015e, + 0x0fbc0150, + 0x10630143, + 0x11070264, + 0x1238023e, + 0x1357021d, + 0x14660201, + 0x156601e9, + 0x165a01d3, + 0x174401c0, + 0x182401af, + 0x18fe0331, + 0x1a9602fe, + 0x1c1502d2, + 0x1d7e02ad, + 0x1ed4028d, + 0x201a0270, + 0x21520256, + 0x227d0240, + 0x239f0443, + 0x25c003fe, + 0x27bf03c4, + 0x29a10392, + 0x2b6a0367, + 0x2d1d0341, + 0x2ebe031f, + 0x304d0300, + 0x31d105b0, + 0x34a80555, + 0x37520507, + 0x39d504c5, + 0x3c37048b, + 0x3e7c0458, + 0x40a8042a, + 0x42bd0401, + 0x44c20798, + 0x488e071e, + 0x4c1c06b6, + 0x4f76065d, + 0x52a50610, + 0x55ac05cc, + 0x5892058f, + 0x5b590559, + 0x5e0c0a23, + 0x631c0980, + 0x67db08f6, + 0x6c55087f, + 0x70940818, + 0x74a007bd, + 0x787d076c, + 0x7c330723, }; -static stbir_uint8 stbir__linear_to_srgb_uchar(float in_) +static stbir__inline stbir_uint8 stbir__linear_to_srgb_uchar(float in) { - static const stbir__FP32 almostone = {0x3f7fffff}; // 1-eps - static const stbir__FP32 minval = {(127 - 13) << 23}; - stbir_uint32 tab, bias, scale, t; - stbir__FP32 f; + static const stbir__FP32 almostone = {0x3f7fffff}; // 1-eps + static const stbir__FP32 minval = {(127 - 13) << 23}; + stbir_uint32 tab, bias, scale, t; + stbir__FP32 f; - // Clamp to [2^(-13), 1-eps]; these two values map to 0 and 1, respectively. - // The tests are carefully written so that NaNs map to 0, same as in the reference - // implementation. - if (!(in_ > minval.f)) // written this way to catch NaNs - in_ = minval.f; - if (in_ > almostone.f) - in_ = almostone.f; + // Clamp to [2^(-13), 1-eps]; these two values map to 0 and 1, respectively. + // The tests are carefully written so that NaNs map to 0, same as in the reference + // implementation. + if (!(in > minval.f)) // written this way to catch NaNs + return 0; + if (in > almostone.f) + return 255; - // Do the table lookup and unpack bias, scale - f.f = in_; - tab = fp32_to_srgb8_tab4[(f.u - minval.u) >> 20]; - bias = (tab >> 16) << 9; - scale = tab & 0xffff; + // Do the table lookup and unpack bias, scale + f.f = in; + tab = fp32_to_srgb8_tab4[(f.u - minval.u) >> 20]; + bias = (tab >> 16) << 9; + scale = tab & 0xffff; - // Grab next-highest mantissa bits and perform linear interpolation - t = (f.u >> 12) & 0xff; - return (unsigned char)((bias + scale * t) >> 16); + // Grab next-highest mantissa bits and perform linear interpolation + t = (f.u >> 12) & 0xff; + return (unsigned char)((bias + scale * t) >> 16); } -#else -// sRGB transition values, scaled by 1<<28 -static int stbir__srgb_offset_to_linear_scaled[256] = - { - 0, - 40738, - 122216, - 203693, - 285170, - 366648, - 448125, - 529603, - 611080, - 692557, - 774035, - 855852, - 942009, - 1033024, - 1128971, - 1229926, - 1335959, - 1447142, - 1563542, - 1685229, - 1812268, - 1944725, - 2082664, - 2226148, - 2375238, - 2529996, - 2690481, - 2856753, - 3028870, - 3206888, - 3390865, - 3580856, - 3776916, - 3979100, - 4187460, - 4402049, - 4622919, - 4850123, - 5083710, - 5323731, - 5570236, - 5823273, - 6082892, - 6349140, - 6622065, - 6901714, - 7188133, - 7481369, - 7781466, - 8088471, - 8402427, - 8723380, - 9051372, - 9386448, - 9728650, - 10078021, - 10434603, - 10798439, - 11169569, - 11548036, - 11933879, - 12327139, - 12727857, - 13136073, - 13551826, - 13975156, - 14406100, - 14844697, - 15290987, - 15745007, - 16206795, - 16676389, - 17153826, - 17639142, - 18132374, - 18633560, - 19142734, - 19659934, - 20185196, - 20718552, - 21260042, - 21809696, - 22367554, - 22933648, - 23508010, - 24090680, - 24681686, - 25281066, - 25888850, - 26505076, - 27129772, - 27762974, - 28404716, - 29055026, - 29713942, - 30381490, - 31057708, - 31742624, - 32436272, - 33138682, - 33849884, - 34569912, - 35298800, - 36036568, - 36783260, - 37538896, - 38303512, - 39077136, - 39859796, - 40651528, - 41452360, - 42262316, - 43081432, - 43909732, - 44747252, - 45594016, - 46450052, - 47315392, - 48190064, - 49074096, - 49967516, - 50870356, - 51782636, - 52704392, - 53635648, - 54576432, - 55526772, - 56486700, - 57456236, - 58435408, - 59424248, - 60422780, - 61431036, - 62449032, - 63476804, - 64514376, - 65561776, - 66619028, - 67686160, - 68763192, - 69850160, - 70947088, - 72053992, - 73170912, - 74297864, - 75434880, - 76581976, - 77739184, - 78906536, - 80084040, - 81271736, - 82469648, - 83677792, - 84896192, - 86124888, - 87363888, - 88613232, - 89872928, - 91143016, - 92423512, - 93714432, - 95015816, - 96327688, - 97650056, - 98982952, - 100326408, - 101680440, - 103045072, - 104420320, - 105806224, - 107202800, - 108610064, - 110028048, - 111456776, - 112896264, - 114346544, - 115807632, - 117279552, - 118762328, - 120255976, - 121760536, - 123276016, - 124802440, - 126339832, - 127888216, - 129447616, - 131018048, - 132599544, - 134192112, - 135795792, - 137410592, - 139036528, - 140673648, - 142321952, - 143981456, - 145652208, - 147334208, - 149027488, - 150732064, - 152447968, - 154175200, - 155913792, - 157663776, - 159425168, - 161197984, - 162982240, - 164777968, - 166585184, - 168403904, - 170234160, - 172075968, - 173929344, - 175794320, - 177670896, - 179559120, - 181458992, - 183370528, - 185293776, - 187228736, - 189175424, - 191133888, - 193104112, - 195086128, - 197079968, - 199085648, - 201103184, - 203132592, - 205173888, - 207227120, - 209292272, - 211369392, - 213458480, - 215559568, - 217672656, - 219797792, - 221934976, - 224084240, - 226245600, - 228419056, - 230604656, - 232802400, - 235012320, - 237234432, - 239468736, - 241715280, - 243974080, - 246245120, - 248528464, - 250824112, - 253132064, - 255452368, - 257785040, - 260130080, - 262487520, - 264857376, - 267239664, -}; - -static stbir_uint8 stbir__linear_to_srgb_uchar(float f) -{ - int x = (int)(f * (1 << 28)); // has headroom so you don't need to clamp - int v = 0; - int i; - - // Refine the guess with a short binary search. - i = v + 128; - if (x >= stbir__srgb_offset_to_linear_scaled[i]) - v = i; - i = v + 64; - if (x >= stbir__srgb_offset_to_linear_scaled[i]) - v = i; - i = v + 32; - if (x >= stbir__srgb_offset_to_linear_scaled[i]) - v = i; - i = v + 16; - if (x >= stbir__srgb_offset_to_linear_scaled[i]) - v = i; - i = v + 8; - if (x >= stbir__srgb_offset_to_linear_scaled[i]) - v = i; - i = v + 4; - if (x >= stbir__srgb_offset_to_linear_scaled[i]) - v = i; - i = v + 2; - if (x >= stbir__srgb_offset_to_linear_scaled[i]) - v = i; - i = v + 1; - if (x >= stbir__srgb_offset_to_linear_scaled[i]) - v = i; - - return (stbir_uint8)v; -} +#ifndef STBIR_FORCE_GATHER_FILTER_SCANLINES_AMOUNT +#define STBIR_FORCE_GATHER_FILTER_SCANLINES_AMOUNT 32 // when downsampling and <= 32 scanlines of buffering, use gather. gather used down to 1/8th scaling for 25% win. #endif -static float stbir__filter_trapezoid(float x, float scale) +#ifndef STBIR_FORCE_MINIMUM_SCANLINES_FOR_SPLITS +#define STBIR_FORCE_MINIMUM_SCANLINES_FOR_SPLITS 4 // when threading, what is the minimum number of scanlines for a split? +#endif + +#define STBIR_INPUT_CALLBACK_PADDING 3 + +#ifdef _M_IX86_FP +#if (_M_IX86_FP >= 1) +#ifndef STBIR_SSE +#define STBIR_SSE +#endif +#endif +#endif + +#ifdef __TINYC__ +// tiny c has no intrinsics yet - this can become a version check if they add them +#define STBIR_NO_SIMD +#endif + +#if defined(_x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__x86_64) || defined(_M_AMD64) || defined(__SSE2__) || defined(STBIR_SSE) || defined(STBIR_SSE2) +#ifndef STBIR_SSE2 +#define STBIR_SSE2 +#endif +#if defined(__AVX__) || defined(STBIR_AVX2) +#ifndef STBIR_AVX +#ifndef STBIR_NO_AVX +#define STBIR_AVX +#endif +#endif +#endif +#if defined(__AVX2__) || defined(STBIR_AVX2) +#ifndef STBIR_NO_AVX2 +#ifndef STBIR_AVX2 +#define STBIR_AVX2 +#endif +#if defined(_MSC_VER) && !defined(__clang__) +#ifndef STBIR_FP16C // FP16C instructions are on all AVX2 cpus, so we can autoselect it here on microsoft - clang needs -m16c +#define STBIR_FP16C +#endif +#endif +#endif +#endif +#ifdef __F16C__ +#ifndef STBIR_FP16C // turn on FP16C instructions if the define is set (for clang and gcc) +#define STBIR_FP16C +#endif +#endif +#endif + +#if defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) || ((__ARM_NEON_FP & 4) != 0) || defined(__ARM_NEON__) +#ifndef STBIR_NEON +#define STBIR_NEON +#endif +#endif + +#if defined(_M_ARM) || defined(__arm__) +#ifdef STBIR_USE_FMA +#undef STBIR_USE_FMA // no FMA for 32-bit arm on MSVC +#endif +#endif + +#if defined(__wasm__) && defined(__wasm_simd128__) +#ifndef STBIR_WASM +#define STBIR_WASM +#endif +#endif + +// restrict pointers for the output pointers, other loop and unroll control +#if defined(_MSC_VER) && !defined(__clang__) +#define STBIR_STREAMOUT_PTR(star) star __restrict +#define STBIR_NO_UNROLL(ptr) __assume(ptr) // this oddly keeps msvc from unrolling a loop +#if _MSC_VER >= 1900 +#define STBIR_NO_UNROLL_LOOP_START __pragma(loop(no_vector)) +#else +#define STBIR_NO_UNROLL_LOOP_START +#endif +#elif defined(__clang__) +#define STBIR_STREAMOUT_PTR(star) star __restrict__ +#define STBIR_NO_UNROLL(ptr) __asm__("" ::"r"(ptr)) +#if (__clang_major__ >= 4) || ((__clang_major__ >= 3) && (__clang_minor__ >= 5)) +#define STBIR_NO_UNROLL_LOOP_START _Pragma("clang loop unroll(disable)") _Pragma("clang loop vectorize(disable)") +#else +#define STBIR_NO_UNROLL_LOOP_START +#endif +#elif defined(__GNUC__) +#define STBIR_STREAMOUT_PTR(star) star __restrict__ +#define STBIR_NO_UNROLL(ptr) __asm__("" ::"r"(ptr)) +#if __GNUC__ >= 14 +#define STBIR_NO_UNROLL_LOOP_START _Pragma("GCC unroll 0") _Pragma("GCC novector") +#else +#define STBIR_NO_UNROLL_LOOP_START +#endif +#define STBIR_NO_UNROLL_LOOP_START_INF_FOR +#else +#define STBIR_STREAMOUT_PTR(star) star +#define STBIR_NO_UNROLL(ptr) +#define STBIR_NO_UNROLL_LOOP_START +#endif + +#ifndef STBIR_NO_UNROLL_LOOP_START_INF_FOR +#define STBIR_NO_UNROLL_LOOP_START_INF_FOR STBIR_NO_UNROLL_LOOP_START +#endif + +#ifdef STBIR_NO_SIMD // force simd off for whatever reason + +// force simd off overrides everything else, so clear it all + +#ifdef STBIR_SSE2 +#undef STBIR_SSE2 +#endif + +#ifdef STBIR_AVX +#undef STBIR_AVX +#endif + +#ifdef STBIR_NEON +#undef STBIR_NEON +#endif + +#ifdef STBIR_AVX2 +#undef STBIR_AVX2 +#endif + +#ifdef STBIR_FP16C +#undef STBIR_FP16C +#endif + +#ifdef STBIR_WASM +#undef STBIR_WASM +#endif + +#ifdef STBIR_SIMD +#undef STBIR_SIMD +#endif + +#else // STBIR_SIMD + +#ifdef STBIR_SSE2 +#include + +#define stbir__simdf __m128 +#define stbir__simdi __m128i + +#define stbir_simdi_castf(reg) _mm_castps_si128(reg) +#define stbir_simdf_casti(reg) _mm_castsi128_ps(reg) + +#define stbir__simdf_load(reg, ptr) (reg) = _mm_loadu_ps((float const *)(ptr)) +#define stbir__simdi_load(reg, ptr) (reg) = _mm_loadu_si128((stbir__simdi const *)(ptr)) +#define stbir__simdf_load1(out, ptr) (out) = _mm_load_ss((float const *)(ptr)) // top values can be random (not denormal or nan for perf) +#define stbir__simdi_load1(out, ptr) (out) = _mm_castps_si128(_mm_load_ss((float const *)(ptr))) +#define stbir__simdf_load1z(out, ptr) (out) = _mm_load_ss((float const *)(ptr)) // top values must be zero +#define stbir__simdf_frep4(fvar) _mm_set_ps1(fvar) +#define stbir__simdf_load1frep4(out, fvar) (out) = _mm_set_ps1(fvar) +#define stbir__simdf_load2(out, ptr) (out) = _mm_castsi128_ps(_mm_loadl_epi64((__m128i *)(ptr))) // top values can be random (not denormal or nan for perf) +#define stbir__simdf_load2z(out, ptr) (out) = _mm_castsi128_ps(_mm_loadl_epi64((__m128i *)(ptr))) // top values must be zero +#define stbir__simdf_load2hmerge(out, reg, ptr) (out) = _mm_castpd_ps(_mm_loadh_pd(_mm_castps_pd(reg), (double *)(ptr))) + +#define stbir__simdf_zeroP() _mm_setzero_ps() +#define stbir__simdf_zero(reg) (reg) = _mm_setzero_ps() + +#define stbir__simdf_store(ptr, reg) _mm_storeu_ps((float *)(ptr), reg) +#define stbir__simdf_store1(ptr, reg) _mm_store_ss((float *)(ptr), reg) +#define stbir__simdf_store2(ptr, reg) _mm_storel_epi64((__m128i *)(ptr), _mm_castps_si128(reg)) +#define stbir__simdf_store2h(ptr, reg) _mm_storeh_pd((double *)(ptr), _mm_castps_pd(reg)) + +#define stbir__simdi_store(ptr, reg) _mm_storeu_si128((__m128i *)(ptr), reg) +#define stbir__simdi_store1(ptr, reg) _mm_store_ss((float *)(ptr), _mm_castsi128_ps(reg)) +#define stbir__simdi_store2(ptr, reg) _mm_storel_epi64((__m128i *)(ptr), (reg)) + +#define stbir__prefetch(ptr) _mm_prefetch((char *)(ptr), _MM_HINT_T0) + +#define stbir__simdi_expand_u8_to_u32(out0, out1, out2, out3, ireg) \ + { \ + stbir__simdi zero = _mm_setzero_si128(); \ + out2 = _mm_unpacklo_epi8(ireg, zero); \ + out3 = _mm_unpackhi_epi8(ireg, zero); \ + out0 = _mm_unpacklo_epi16(out2, zero); \ + out1 = _mm_unpackhi_epi16(out2, zero); \ + out2 = _mm_unpacklo_epi16(out3, zero); \ + out3 = _mm_unpackhi_epi16(out3, zero); \ + } + +#define stbir__simdi_expand_u8_to_1u32(out, ireg) \ + { \ + stbir__simdi zero = _mm_setzero_si128(); \ + out = _mm_unpacklo_epi8(ireg, zero); \ + out = _mm_unpacklo_epi16(out, zero); \ + } + +#define stbir__simdi_expand_u16_to_u32(out0, out1, ireg) \ + { \ + stbir__simdi zero = _mm_setzero_si128(); \ + out0 = _mm_unpacklo_epi16(ireg, zero); \ + out1 = _mm_unpackhi_epi16(ireg, zero); \ + } + +#define stbir__simdf_convert_float_to_i32(i, f) (i) = _mm_cvttps_epi32(f) +#define stbir__simdf_convert_float_to_int(f) _mm_cvtt_ss2si(f) +#define stbir__simdf_convert_float_to_uint8(f) ((unsigned char)_mm_cvtsi128_si32(_mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(f, STBIR__CONSTF(STBIR_max_uint8_as_float)), _mm_setzero_ps())))) +#define stbir__simdf_convert_float_to_short(f) ((unsigned short)_mm_cvtsi128_si32(_mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(f, STBIR__CONSTF(STBIR_max_uint16_as_float)), _mm_setzero_ps())))) + +#define stbir__simdi_to_int(i) _mm_cvtsi128_si32(i) +#define stbir__simdi_convert_i32_to_float(out, ireg) (out) = _mm_cvtepi32_ps(ireg) +#define stbir__simdf_add(out, reg0, reg1) (out) = _mm_add_ps(reg0, reg1) +#define stbir__simdf_mult(out, reg0, reg1) (out) = _mm_mul_ps(reg0, reg1) +#define stbir__simdf_mult_mem(out, reg, ptr) (out) = _mm_mul_ps(reg, _mm_loadu_ps((float const *)(ptr))) +#define stbir__simdf_mult1_mem(out, reg, ptr) (out) = _mm_mul_ss(reg, _mm_load_ss((float const *)(ptr))) +#define stbir__simdf_add_mem(out, reg, ptr) (out) = _mm_add_ps(reg, _mm_loadu_ps((float const *)(ptr))) +#define stbir__simdf_add1_mem(out, reg, ptr) (out) = _mm_add_ss(reg, _mm_load_ss((float const *)(ptr))) + +#ifdef STBIR_USE_FMA // not on by default to maintain bit identical simd to non-simd +#include +#define stbir__simdf_madd(out, add, mul1, mul2) (out) = _mm_fmadd_ps(mul1, mul2, add) +#define stbir__simdf_madd1(out, add, mul1, mul2) (out) = _mm_fmadd_ss(mul1, mul2, add) +#define stbir__simdf_madd_mem(out, add, mul, ptr) (out) = _mm_fmadd_ps(mul, _mm_loadu_ps((float const *)(ptr)), add) +#define stbir__simdf_madd1_mem(out, add, mul, ptr) (out) = _mm_fmadd_ss(mul, _mm_load_ss((float const *)(ptr)), add) +#else +#define stbir__simdf_madd(out, add, mul1, mul2) (out) = _mm_add_ps(add, _mm_mul_ps(mul1, mul2)) +#define stbir__simdf_madd1(out, add, mul1, mul2) (out) = _mm_add_ss(add, _mm_mul_ss(mul1, mul2)) +#define stbir__simdf_madd_mem(out, add, mul, ptr) (out) = _mm_add_ps(add, _mm_mul_ps(mul, _mm_loadu_ps((float const *)(ptr)))) +#define stbir__simdf_madd1_mem(out, add, mul, ptr) (out) = _mm_add_ss(add, _mm_mul_ss(mul, _mm_load_ss((float const *)(ptr)))) +#endif + +#define stbir__simdf_add1(out, reg0, reg1) (out) = _mm_add_ss(reg0, reg1) +#define stbir__simdf_mult1(out, reg0, reg1) (out) = _mm_mul_ss(reg0, reg1) + +#define stbir__simdf_and(out, reg0, reg1) (out) = _mm_and_ps(reg0, reg1) +#define stbir__simdf_or(out, reg0, reg1) (out) = _mm_or_ps(reg0, reg1) + +#define stbir__simdf_min(out, reg0, reg1) (out) = _mm_min_ps(reg0, reg1) +#define stbir__simdf_max(out, reg0, reg1) (out) = _mm_max_ps(reg0, reg1) +#define stbir__simdf_min1(out, reg0, reg1) (out) = _mm_min_ss(reg0, reg1) +#define stbir__simdf_max1(out, reg0, reg1) (out) = _mm_max_ss(reg0, reg1) + +#define stbir__simdf_0123ABCDto3ABx(out, reg0, reg1) (out) = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(_mm_shuffle_ps(reg1, reg0, (0 << 0) + (1 << 2) + (2 << 4) + (3 << 6))), (3 << 0) + (0 << 2) + (1 << 4) + (2 << 6))) +#define stbir__simdf_0123ABCDto23Ax(out, reg0, reg1) (out) = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(_mm_shuffle_ps(reg1, reg0, (0 << 0) + (1 << 2) + (2 << 4) + (3 << 6))), (2 << 0) + (3 << 2) + (0 << 4) + (1 << 6))) + +static const stbir__simdf STBIR_zeroones = {0.0f, 1.0f, 0.0f, 1.0f}; +static const stbir__simdf STBIR_onezeros = {1.0f, 0.0f, 1.0f, 0.0f}; +#define stbir__simdf_aaa1(out, alp, ones) (out) = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(_mm_movehl_ps(ones, alp)), (1 << 0) + (1 << 2) + (1 << 4) + (2 << 6))) +#define stbir__simdf_1aaa(out, alp, ones) (out) = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(_mm_movelh_ps(ones, alp)), (0 << 0) + (2 << 2) + (2 << 4) + (2 << 6))) +#define stbir__simdf_a1a1(out, alp, ones) (out) = _mm_or_ps(_mm_castsi128_ps(_mm_srli_epi64(_mm_castps_si128(alp), 32)), STBIR_zeroones) +#define stbir__simdf_1a1a(out, alp, ones) (out) = _mm_or_ps(_mm_castsi128_ps(_mm_slli_epi64(_mm_castps_si128(alp), 32)), STBIR_onezeros) + +#define stbir__simdf_swiz(reg, one, two, three, four) _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(reg), (one << 0) + (two << 2) + (three << 4) + (four << 6))) + +#define stbir__simdi_and(out, reg0, reg1) (out) = _mm_and_si128(reg0, reg1) +#define stbir__simdi_or(out, reg0, reg1) (out) = _mm_or_si128(reg0, reg1) +#define stbir__simdi_16madd(out, reg0, reg1) (out) = _mm_madd_epi16(reg0, reg1) + +#define stbir__simdf_pack_to_8bytes(out, aa, bb) \ + { \ + stbir__simdf af, bf; \ + stbir__simdi a, b; \ + af = _mm_min_ps(aa, STBIR_max_uint8_as_float); \ + bf = _mm_min_ps(bb, STBIR_max_uint8_as_float); \ + af = _mm_max_ps(af, _mm_setzero_ps()); \ + bf = _mm_max_ps(bf, _mm_setzero_ps()); \ + a = _mm_cvttps_epi32(af); \ + b = _mm_cvttps_epi32(bf); \ + a = _mm_packs_epi32(a, b); \ + out = _mm_packus_epi16(a, a); \ + } + +#define stbir__simdf_load4_transposed(o0, o1, o2, o3, ptr) \ + stbir__simdf_load(o0, (ptr)); \ + stbir__simdf_load(o1, (ptr) + 4); \ + stbir__simdf_load(o2, (ptr) + 8); \ + stbir__simdf_load(o3, (ptr) + 12); \ + { \ + __m128 tmp0, tmp1, tmp2, tmp3; \ + tmp0 = _mm_unpacklo_ps(o0, o1); \ + tmp2 = _mm_unpacklo_ps(o2, o3); \ + tmp1 = _mm_unpackhi_ps(o0, o1); \ + tmp3 = _mm_unpackhi_ps(o2, o3); \ + o0 = _mm_movelh_ps(tmp0, tmp2); \ + o1 = _mm_movehl_ps(tmp2, tmp0); \ + o2 = _mm_movelh_ps(tmp1, tmp3); \ + o3 = _mm_movehl_ps(tmp3, tmp1); \ + } + +#define stbir__interleave_pack_and_store_16_u8(ptr, r0, r1, r2, r3) \ + r0 = _mm_packs_epi32(r0, r1); \ + r2 = _mm_packs_epi32(r2, r3); \ + r1 = _mm_unpacklo_epi16(r0, r2); \ + r3 = _mm_unpackhi_epi16(r0, r2); \ + r0 = _mm_unpacklo_epi16(r1, r3); \ + r2 = _mm_unpackhi_epi16(r1, r3); \ + r0 = _mm_packus_epi16(r0, r2); \ + stbir__simdi_store(ptr, r0); + +#define stbir__simdi_32shr(out, reg, imm) out = _mm_srli_epi32(reg, imm) + +#if defined(_MSC_VER) && !defined(__clang__) +// msvc inits with 8 bytes +#define STBIR__CONST_32_TO_8(v) (char)(unsigned char)((v) & 255), (char)(unsigned char)(((v) >> 8) & 255), (char)(unsigned char)(((v) >> 16) & 255), (char)(unsigned char)(((v) >> 24) & 255) +#define STBIR__CONST_4_32i(v) STBIR__CONST_32_TO_8(v), STBIR__CONST_32_TO_8(v), STBIR__CONST_32_TO_8(v), STBIR__CONST_32_TO_8(v) +#define STBIR__CONST_4d_32i(v0, v1, v2, v3) STBIR__CONST_32_TO_8(v0), STBIR__CONST_32_TO_8(v1), STBIR__CONST_32_TO_8(v2), STBIR__CONST_32_TO_8(v3) +#else +// everything else inits with long long's +#define STBIR__CONST_4_32i(v) (long long)((((stbir_uint64)(stbir_uint32)(v)) << 32) | ((stbir_uint64)(stbir_uint32)(v))), (long long)((((stbir_uint64)(stbir_uint32)(v)) << 32) | ((stbir_uint64)(stbir_uint32)(v))) +#define STBIR__CONST_4d_32i(v0, v1, v2, v3) (long long)((((stbir_uint64)(stbir_uint32)(v1)) << 32) | ((stbir_uint64)(stbir_uint32)(v0))), (long long)((((stbir_uint64)(stbir_uint32)(v3)) << 32) | ((stbir_uint64)(stbir_uint32)(v2))) +#endif + +#define STBIR__SIMDF_CONST(var, x) stbir__simdf var = {x, x, x, x} +#define STBIR__SIMDI_CONST(var, x) stbir__simdi var = {STBIR__CONST_4_32i(x)} +#define STBIR__CONSTF(var) (var) +#define STBIR__CONSTI(var) (var) + +#if defined(STBIR_AVX) || defined(__SSE4_1__) +#include +#define stbir__simdf_pack_to_8words(out, reg0, reg1) out = _mm_packus_epi32(_mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(reg0, STBIR__CONSTF(STBIR_max_uint16_as_float)), _mm_setzero_ps())), _mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(reg1, STBIR__CONSTF(STBIR_max_uint16_as_float)), _mm_setzero_ps()))) +#else +static STBIR__SIMDI_CONST(stbir__s32_32768, 32768); +static STBIR__SIMDI_CONST(stbir__s16_32768, ((32768 << 16) | 32768)); + +#define stbir__simdf_pack_to_8words(out, reg0, reg1) \ + { \ + stbir__simdi tmp0, tmp1; \ + tmp0 = _mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(reg0, STBIR__CONSTF(STBIR_max_uint16_as_float)), _mm_setzero_ps())); \ + tmp1 = _mm_cvttps_epi32(_mm_max_ps(_mm_min_ps(reg1, STBIR__CONSTF(STBIR_max_uint16_as_float)), _mm_setzero_ps())); \ + tmp0 = _mm_sub_epi32(tmp0, stbir__s32_32768); \ + tmp1 = _mm_sub_epi32(tmp1, stbir__s32_32768); \ + out = _mm_packs_epi32(tmp0, tmp1); \ + out = _mm_sub_epi16(out, stbir__s16_32768); \ + } + +#endif + +#define STBIR_SIMD + +// if we detect AVX, set the simd8 defines +#ifdef STBIR_AVX +#include +#define STBIR_SIMD8 +#define stbir__simdf8 __m256 +#define stbir__simdi8 __m256i +#define stbir__simdf8_load(out, ptr) (out) = _mm256_loadu_ps((float const *)(ptr)) +#define stbir__simdi8_load(out, ptr) (out) = _mm256_loadu_si256((__m256i const *)(ptr)) +#define stbir__simdf8_mult(out, a, b) (out) = _mm256_mul_ps((a), (b)) +#define stbir__simdf8_store(ptr, out) _mm256_storeu_ps((float *)(ptr), out) +#define stbir__simdi8_store(ptr, reg) _mm256_storeu_si256((__m256i *)(ptr), reg) +#define stbir__simdf8_frep8(fval) _mm256_set1_ps(fval) + +#define stbir__simdf8_min(out, reg0, reg1) (out) = _mm256_min_ps(reg0, reg1) +#define stbir__simdf8_max(out, reg0, reg1) (out) = _mm256_max_ps(reg0, reg1) + +#define stbir__simdf8_add4halves(out, bot4, top8) (out) = _mm_add_ps(bot4, _mm256_extractf128_ps(top8, 1)) +#define stbir__simdf8_mult_mem(out, reg, ptr) (out) = _mm256_mul_ps(reg, _mm256_loadu_ps((float const *)(ptr))) +#define stbir__simdf8_add_mem(out, reg, ptr) (out) = _mm256_add_ps(reg, _mm256_loadu_ps((float const *)(ptr))) +#define stbir__simdf8_add(out, a, b) (out) = _mm256_add_ps(a, b) +#define stbir__simdf8_load1b(out, ptr) (out) = _mm256_broadcast_ss(ptr) +#define stbir__simdf_load1rep4(out, ptr) (out) = _mm_broadcast_ss(ptr) // avx load instruction + +#define stbir__simdi8_convert_i32_to_float(out, ireg) (out) = _mm256_cvtepi32_ps(ireg) +#define stbir__simdf8_convert_float_to_i32(i, f) (i) = _mm256_cvttps_epi32(f) + +#define stbir__simdf8_bot4s(out, a, b) (out) = _mm256_permute2f128_ps(a, b, (0 << 0) + (2 << 4)) +#define stbir__simdf8_top4s(out, a, b) (out) = _mm256_permute2f128_ps(a, b, (1 << 0) + (3 << 4)) + +#define stbir__simdf8_gettop4(reg) _mm256_extractf128_ps(reg, 1) + +#ifdef STBIR_AVX2 + +#define stbir__simdi8_expand_u8_to_u32(out0, out1, ireg) \ + { \ + stbir__simdi8 a, zero = _mm256_setzero_si256(); \ + a = _mm256_permute4x64_epi64(_mm256_unpacklo_epi8(_mm256_permute4x64_epi64(_mm256_castsi128_si256(ireg), (0 << 0) + (2 << 2) + (1 << 4) + (3 << 6)), zero), (0 << 0) + (2 << 2) + (1 << 4) + (3 << 6)); \ + out0 = _mm256_unpacklo_epi16(a, zero); \ + out1 = _mm256_unpackhi_epi16(a, zero); \ + } + +#define stbir__simdf8_pack_to_16bytes(out, aa, bb) \ + { \ + stbir__simdi8 t; \ + stbir__simdf8 af, bf; \ + stbir__simdi8 a, b; \ + af = _mm256_min_ps(aa, STBIR_max_uint8_as_floatX); \ + bf = _mm256_min_ps(bb, STBIR_max_uint8_as_floatX); \ + af = _mm256_max_ps(af, _mm256_setzero_ps()); \ + bf = _mm256_max_ps(bf, _mm256_setzero_ps()); \ + a = _mm256_cvttps_epi32(af); \ + b = _mm256_cvttps_epi32(bf); \ + t = _mm256_permute4x64_epi64(_mm256_packs_epi32(a, b), (0 << 0) + (2 << 2) + (1 << 4) + (3 << 6)); \ + out = _mm256_castsi256_si128(_mm256_permute4x64_epi64(_mm256_packus_epi16(t, t), (0 << 0) + (2 << 2) + (1 << 4) + (3 << 6))); \ + } + +#define stbir__simdi8_expand_u16_to_u32(out, ireg) out = _mm256_unpacklo_epi16(_mm256_permute4x64_epi64(_mm256_castsi128_si256(ireg), (0 << 0) + (2 << 2) + (1 << 4) + (3 << 6)), _mm256_setzero_si256()); + +#define stbir__simdf8_pack_to_16words(out, aa, bb) \ + { \ + stbir__simdf8 af, bf; \ + stbir__simdi8 a, b; \ + af = _mm256_min_ps(aa, STBIR_max_uint16_as_floatX); \ + bf = _mm256_min_ps(bb, STBIR_max_uint16_as_floatX); \ + af = _mm256_max_ps(af, _mm256_setzero_ps()); \ + bf = _mm256_max_ps(bf, _mm256_setzero_ps()); \ + a = _mm256_cvttps_epi32(af); \ + b = _mm256_cvttps_epi32(bf); \ + (out) = _mm256_permute4x64_epi64(_mm256_packus_epi32(a, b), (0 << 0) + (2 << 2) + (1 << 4) + (3 << 6)); \ + } + +#else + +#define stbir__simdi8_expand_u8_to_u32(out0, out1, ireg) \ + { \ + stbir__simdi a, zero = _mm_setzero_si128(); \ + a = _mm_unpacklo_epi8(ireg, zero); \ + out0 = _mm256_setr_m128i(_mm_unpacklo_epi16(a, zero), _mm_unpackhi_epi16(a, zero)); \ + a = _mm_unpackhi_epi8(ireg, zero); \ + out1 = _mm256_setr_m128i(_mm_unpacklo_epi16(a, zero), _mm_unpackhi_epi16(a, zero)); \ + } + +#define stbir__simdf8_pack_to_16bytes(out, aa, bb) \ + { \ + stbir__simdi t; \ + stbir__simdf8 af, bf; \ + stbir__simdi8 a, b; \ + af = _mm256_min_ps(aa, STBIR_max_uint8_as_floatX); \ + bf = _mm256_min_ps(bb, STBIR_max_uint8_as_floatX); \ + af = _mm256_max_ps(af, _mm256_setzero_ps()); \ + bf = _mm256_max_ps(bf, _mm256_setzero_ps()); \ + a = _mm256_cvttps_epi32(af); \ + b = _mm256_cvttps_epi32(bf); \ + out = _mm_packs_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); \ + out = _mm_packus_epi16(out, out); \ + t = _mm_packs_epi32(_mm256_castsi256_si128(b), _mm256_extractf128_si256(b, 1)); \ + t = _mm_packus_epi16(t, t); \ + out = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(out), _mm_castsi128_ps(t), (0 << 0) + (1 << 2) + (0 << 4) + (1 << 6))); \ + } + +#define stbir__simdi8_expand_u16_to_u32(out, ireg) \ + { \ + stbir__simdi a, b, zero = _mm_setzero_si128(); \ + a = _mm_unpacklo_epi16(ireg, zero); \ + b = _mm_unpackhi_epi16(ireg, zero); \ + out = _mm256_insertf128_si256(_mm256_castsi128_si256(a), b, 1); \ + } + +#define stbir__simdf8_pack_to_16words(out, aa, bb) \ + { \ + stbir__simdi t0, t1; \ + stbir__simdf8 af, bf; \ + stbir__simdi8 a, b; \ + af = _mm256_min_ps(aa, STBIR_max_uint16_as_floatX); \ + bf = _mm256_min_ps(bb, STBIR_max_uint16_as_floatX); \ + af = _mm256_max_ps(af, _mm256_setzero_ps()); \ + bf = _mm256_max_ps(bf, _mm256_setzero_ps()); \ + a = _mm256_cvttps_epi32(af); \ + b = _mm256_cvttps_epi32(bf); \ + t0 = _mm_packus_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); \ + t1 = _mm_packus_epi32(_mm256_castsi256_si128(b), _mm256_extractf128_si256(b, 1)); \ + out = _mm256_setr_m128i(t0, t1); \ + } + +#endif + +static __m256i stbir_00001111 = {STBIR__CONST_4d_32i(0, 0, 0, 0), STBIR__CONST_4d_32i(1, 1, 1, 1)}; +#define stbir__simdf8_0123to00001111(out, in) (out) = _mm256_permutevar_ps(in, stbir_00001111) + +static __m256i stbir_22223333 = {STBIR__CONST_4d_32i(2, 2, 2, 2), STBIR__CONST_4d_32i(3, 3, 3, 3)}; +#define stbir__simdf8_0123to22223333(out, in) (out) = _mm256_permutevar_ps(in, stbir_22223333) + +#define stbir__simdf8_0123to2222(out, in) (out) = stbir__simdf_swiz(_mm256_castps256_ps128(in), 2, 2, 2, 2) + +#define stbir__simdf8_load4b(out, ptr) (out) = _mm256_broadcast_ps((__m128 const *)(ptr)) + +static __m256i stbir_00112233 = {STBIR__CONST_4d_32i(0, 0, 1, 1), STBIR__CONST_4d_32i(2, 2, 3, 3)}; +#define stbir__simdf8_0123to00112233(out, in) (out) = _mm256_permutevar_ps(in, stbir_00112233) +#define stbir__simdf8_add4(out, a8, b) (out) = _mm256_add_ps(a8, _mm256_castps128_ps256(b)) + +static __m256i stbir_load6 = {STBIR__CONST_4_32i(0x80000000), STBIR__CONST_4d_32i(0x80000000, 0x80000000, 0, 0)}; +#define stbir__simdf8_load6z(out, ptr) (out) = _mm256_maskload_ps(ptr, stbir_load6) + +#define stbir__simdf8_0123to00000000(out, in) (out) = _mm256_shuffle_ps(in, in, (0 << 0) + (0 << 2) + (0 << 4) + (0 << 6)) +#define stbir__simdf8_0123to11111111(out, in) (out) = _mm256_shuffle_ps(in, in, (1 << 0) + (1 << 2) + (1 << 4) + (1 << 6)) +#define stbir__simdf8_0123to22222222(out, in) (out) = _mm256_shuffle_ps(in, in, (2 << 0) + (2 << 2) + (2 << 4) + (2 << 6)) +#define stbir__simdf8_0123to33333333(out, in) (out) = _mm256_shuffle_ps(in, in, (3 << 0) + (3 << 2) + (3 << 4) + (3 << 6)) +#define stbir__simdf8_0123to21032103(out, in) (out) = _mm256_shuffle_ps(in, in, (2 << 0) + (1 << 2) + (0 << 4) + (3 << 6)) +#define stbir__simdf8_0123to32103210(out, in) (out) = _mm256_shuffle_ps(in, in, (3 << 0) + (2 << 2) + (1 << 4) + (0 << 6)) +#define stbir__simdf8_0123to12301230(out, in) (out) = _mm256_shuffle_ps(in, in, (1 << 0) + (2 << 2) + (3 << 4) + (0 << 6)) +#define stbir__simdf8_0123to10321032(out, in) (out) = _mm256_shuffle_ps(in, in, (1 << 0) + (0 << 2) + (3 << 4) + (2 << 6)) +#define stbir__simdf8_0123to30123012(out, in) (out) = _mm256_shuffle_ps(in, in, (3 << 0) + (0 << 2) + (1 << 4) + (2 << 6)) + +#define stbir__simdf8_0123to11331133(out, in) (out) = _mm256_shuffle_ps(in, in, (1 << 0) + (1 << 2) + (3 << 4) + (3 << 6)) +#define stbir__simdf8_0123to00220022(out, in) (out) = _mm256_shuffle_ps(in, in, (0 << 0) + (0 << 2) + (2 << 4) + (2 << 6)) + +#define stbir__simdf8_aaa1(out, alp, ones) \ + (out) = _mm256_blend_ps(alp, ones, (1 << 0) + (1 << 1) + (1 << 2) + (0 << 3) + (1 << 4) + (1 << 5) + (1 << 6) + (0 << 7)); \ + (out) = _mm256_shuffle_ps(out, out, (3 << 0) + (3 << 2) + (3 << 4) + (0 << 6)) +#define stbir__simdf8_1aaa(out, alp, ones) \ + (out) = _mm256_blend_ps(alp, ones, (0 << 0) + (1 << 1) + (1 << 2) + (1 << 3) + (0 << 4) + (1 << 5) + (1 << 6) + (1 << 7)); \ + (out) = _mm256_shuffle_ps(out, out, (1 << 0) + (0 << 2) + (0 << 4) + (0 << 6)) +#define stbir__simdf8_a1a1(out, alp, ones) \ + (out) = _mm256_blend_ps(alp, ones, (1 << 0) + (0 << 1) + (1 << 2) + (0 << 3) + (1 << 4) + (0 << 5) + (1 << 6) + (0 << 7)); \ + (out) = _mm256_shuffle_ps(out, out, (1 << 0) + (0 << 2) + (3 << 4) + (2 << 6)) +#define stbir__simdf8_1a1a(out, alp, ones) \ + (out) = _mm256_blend_ps(alp, ones, (0 << 0) + (1 << 1) + (0 << 2) + (1 << 3) + (0 << 4) + (1 << 5) + (0 << 6) + (1 << 7)); \ + (out) = _mm256_shuffle_ps(out, out, (1 << 0) + (0 << 2) + (3 << 4) + (2 << 6)) + +#define stbir__simdf8_zero(reg) (reg) = _mm256_setzero_ps() + +#ifdef STBIR_USE_FMA // not on by default to maintain bit identical simd to non-simd +#define stbir__simdf8_madd(out, add, mul1, mul2) (out) = _mm256_fmadd_ps(mul1, mul2, add) +#define stbir__simdf8_madd_mem(out, add, mul, ptr) (out) = _mm256_fmadd_ps(mul, _mm256_loadu_ps((float const *)(ptr)), add) +#define stbir__simdf8_madd_mem4(out, add, mul, ptr) (out) = _mm256_fmadd_ps(_mm256_setr_m128(mul, _mm_setzero_ps()), _mm256_setr_m128(_mm_loadu_ps((float const *)(ptr)), _mm_setzero_ps()), add) +#else +#define stbir__simdf8_madd(out, add, mul1, mul2) (out) = _mm256_add_ps(add, _mm256_mul_ps(mul1, mul2)) +#define stbir__simdf8_madd_mem(out, add, mul, ptr) (out) = _mm256_add_ps(add, _mm256_mul_ps(mul, _mm256_loadu_ps((float const *)(ptr)))) +#define stbir__simdf8_madd_mem4(out, add, mul, ptr) (out) = _mm256_add_ps(add, _mm256_setr_m128(_mm_mul_ps(mul, _mm_loadu_ps((float const *)(ptr))), _mm_setzero_ps())) +#endif +#define stbir__if_simdf8_cast_to_simdf4(val) _mm256_castps256_ps128(val) + +#endif + +#ifdef STBIR_FLOORF +#undef STBIR_FLOORF +#endif +#define STBIR_FLOORF stbir_simd_floorf +static stbir__inline float stbir_simd_floorf(float x) // martins floorf { - float halfscale = scale / 2; - float t = 0.5f + halfscale; - STBIR_ASSERT(scale <= 1); - - x = (float)fabs(x); - - if (x >= t) - return 0; - else - { - float r = 0.5f - halfscale; - if (x <= r) - return 1; - else - return (t - x) / scale; - } +#if defined(STBIR_AVX) || defined(__SSE4_1__) || defined(STBIR_SSE41) + __m128 t = _mm_set_ss(x); + return _mm_cvtss_f32(_mm_floor_ss(t, t)); +#else + __m128 f = _mm_set_ss(x); + __m128 t = _mm_cvtepi32_ps(_mm_cvttps_epi32(f)); + __m128 r = _mm_add_ss(t, _mm_and_ps(_mm_cmplt_ss(f, t), _mm_set_ss(-1.0f))); + return _mm_cvtss_f32(r); +#endif } -static float stbir__support_trapezoid(float scale) +#ifdef STBIR_CEILF +#undef STBIR_CEILF +#endif +#define STBIR_CEILF stbir_simd_ceilf +static stbir__inline float stbir_simd_ceilf(float x) // martins ceilf { - STBIR_ASSERT(scale <= 1); - return 0.5f + scale / 2; +#if defined(STBIR_AVX) || defined(__SSE4_1__) || defined(STBIR_SSE41) + __m128 t = _mm_set_ss(x); + return _mm_cvtss_f32(_mm_ceil_ss(t, t)); +#else + __m128 f = _mm_set_ss(x); + __m128 t = _mm_cvtepi32_ps(_mm_cvttps_epi32(f)); + __m128 r = _mm_add_ss(t, _mm_and_ps(_mm_cmplt_ss(t, f), _mm_set_ss(1.0f))); + return _mm_cvtss_f32(r); +#endif } -static float stbir__filter_triangle(float x, float s) +#elif defined(STBIR_NEON) + +#include + +#define stbir__simdf float32x4_t +#define stbir__simdi uint32x4_t + +#define stbir_simdi_castf(reg) vreinterpretq_u32_f32(reg) +#define stbir_simdf_casti(reg) vreinterpretq_f32_u32(reg) + +#define stbir__simdf_load(reg, ptr) (reg) = vld1q_f32((float const *)(ptr)) +#define stbir__simdi_load(reg, ptr) (reg) = vld1q_u32((uint32_t const *)(ptr)) +#define stbir__simdf_load1(out, ptr) (out) = vld1q_dup_f32((float const *)(ptr)) // top values can be random (not denormal or nan for perf) +#define stbir__simdi_load1(out, ptr) (out) = vld1q_dup_u32((uint32_t const *)(ptr)) +#define stbir__simdf_load1z(out, ptr) (out) = vld1q_lane_f32((float const *)(ptr), vdupq_n_f32(0), 0) // top values must be zero +#define stbir__simdf_frep4(fvar) vdupq_n_f32(fvar) +#define stbir__simdf_load1frep4(out, fvar) (out) = vdupq_n_f32(fvar) +#define stbir__simdf_load2(out, ptr) (out) = vcombine_f32(vld1_f32((float const *)(ptr)), vcreate_f32(0)) // top values can be random (not denormal or nan for perf) +#define stbir__simdf_load2z(out, ptr) (out) = vcombine_f32(vld1_f32((float const *)(ptr)), vcreate_f32(0)) // top values must be zero +#define stbir__simdf_load2hmerge(out, reg, ptr) (out) = vcombine_f32(vget_low_f32(reg), vld1_f32((float const *)(ptr))) + +#define stbir__simdf_zeroP() vdupq_n_f32(0) +#define stbir__simdf_zero(reg) (reg) = vdupq_n_f32(0) + +#define stbir__simdf_store(ptr, reg) vst1q_f32((float *)(ptr), reg) +#define stbir__simdf_store1(ptr, reg) vst1q_lane_f32((float *)(ptr), reg, 0) +#define stbir__simdf_store2(ptr, reg) vst1_f32((float *)(ptr), vget_low_f32(reg)) +#define stbir__simdf_store2h(ptr, reg) vst1_f32((float *)(ptr), vget_high_f32(reg)) + +#define stbir__simdi_store(ptr, reg) vst1q_u32((uint32_t *)(ptr), reg) +#define stbir__simdi_store1(ptr, reg) vst1q_lane_u32((uint32_t *)(ptr), reg, 0) +#define stbir__simdi_store2(ptr, reg) vst1_u32((uint32_t *)(ptr), vget_low_u32(reg)) + +#define stbir__prefetch(ptr) + +#define stbir__simdi_expand_u8_to_u32(out0, out1, out2, out3, ireg) \ + { \ + uint16x8_t l = vmovl_u8(vget_low_u8(vreinterpretq_u8_u32(ireg))); \ + uint16x8_t h = vmovl_u8(vget_high_u8(vreinterpretq_u8_u32(ireg))); \ + out0 = vmovl_u16(vget_low_u16(l)); \ + out1 = vmovl_u16(vget_high_u16(l)); \ + out2 = vmovl_u16(vget_low_u16(h)); \ + out3 = vmovl_u16(vget_high_u16(h)); \ + } + +#define stbir__simdi_expand_u8_to_1u32(out, ireg) \ + { \ + uint16x8_t tmp = vmovl_u8(vget_low_u8(vreinterpretq_u8_u32(ireg))); \ + out = vmovl_u16(vget_low_u16(tmp)); \ + } + +#define stbir__simdi_expand_u16_to_u32(out0, out1, ireg) \ + { \ + uint16x8_t tmp = vreinterpretq_u16_u32(ireg); \ + out0 = vmovl_u16(vget_low_u16(tmp)); \ + out1 = vmovl_u16(vget_high_u16(tmp)); \ + } + +#define stbir__simdf_convert_float_to_i32(i, f) (i) = vreinterpretq_u32_s32(vcvtq_s32_f32(f)) +#define stbir__simdf_convert_float_to_int(f) vgetq_lane_s32(vcvtq_s32_f32(f), 0) +#define stbir__simdi_to_int(i) (int)vgetq_lane_u32(i, 0) +#define stbir__simdf_convert_float_to_uint8(f) ((unsigned char)vgetq_lane_s32(vcvtq_s32_f32(vmaxq_f32(vminq_f32(f, STBIR__CONSTF(STBIR_max_uint8_as_float)), vdupq_n_f32(0))), 0)) +#define stbir__simdf_convert_float_to_short(f) ((unsigned short)vgetq_lane_s32(vcvtq_s32_f32(vmaxq_f32(vminq_f32(f, STBIR__CONSTF(STBIR_max_uint16_as_float)), vdupq_n_f32(0))), 0)) +#define stbir__simdi_convert_i32_to_float(out, ireg) (out) = vcvtq_f32_s32(vreinterpretq_s32_u32(ireg)) +#define stbir__simdf_add(out, reg0, reg1) (out) = vaddq_f32(reg0, reg1) +#define stbir__simdf_mult(out, reg0, reg1) (out) = vmulq_f32(reg0, reg1) +#define stbir__simdf_mult_mem(out, reg, ptr) (out) = vmulq_f32(reg, vld1q_f32((float const *)(ptr))) +#define stbir__simdf_mult1_mem(out, reg, ptr) (out) = vmulq_f32(reg, vld1q_dup_f32((float const *)(ptr))) +#define stbir__simdf_add_mem(out, reg, ptr) (out) = vaddq_f32(reg, vld1q_f32((float const *)(ptr))) +#define stbir__simdf_add1_mem(out, reg, ptr) (out) = vaddq_f32(reg, vld1q_dup_f32((float const *)(ptr))) + +#ifdef STBIR_USE_FMA // not on by default to maintain bit identical simd to non-simd (and also x64 no madd to arm madd) +#define stbir__simdf_madd(out, add, mul1, mul2) (out) = vfmaq_f32(add, mul1, mul2) +#define stbir__simdf_madd1(out, add, mul1, mul2) (out) = vfmaq_f32(add, mul1, mul2) +#define stbir__simdf_madd_mem(out, add, mul, ptr) (out) = vfmaq_f32(add, mul, vld1q_f32((float const *)(ptr))) +#define stbir__simdf_madd1_mem(out, add, mul, ptr) (out) = vfmaq_f32(add, mul, vld1q_dup_f32((float const *)(ptr))) +#else +#define stbir__simdf_madd(out, add, mul1, mul2) (out) = vaddq_f32(add, vmulq_f32(mul1, mul2)) +#define stbir__simdf_madd1(out, add, mul1, mul2) (out) = vaddq_f32(add, vmulq_f32(mul1, mul2)) +#define stbir__simdf_madd_mem(out, add, mul, ptr) (out) = vaddq_f32(add, vmulq_f32(mul, vld1q_f32((float const *)(ptr)))) +#define stbir__simdf_madd1_mem(out, add, mul, ptr) (out) = vaddq_f32(add, vmulq_f32(mul, vld1q_dup_f32((float const *)(ptr)))) +#endif + +#define stbir__simdf_add1(out, reg0, reg1) (out) = vaddq_f32(reg0, reg1) +#define stbir__simdf_mult1(out, reg0, reg1) (out) = vmulq_f32(reg0, reg1) + +#define stbir__simdf_and(out, reg0, reg1) (out) = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(reg0), vreinterpretq_u32_f32(reg1))) +#define stbir__simdf_or(out, reg0, reg1) (out) = vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(reg0), vreinterpretq_u32_f32(reg1))) + +#define stbir__simdf_min(out, reg0, reg1) (out) = vminq_f32(reg0, reg1) +#define stbir__simdf_max(out, reg0, reg1) (out) = vmaxq_f32(reg0, reg1) +#define stbir__simdf_min1(out, reg0, reg1) (out) = vminq_f32(reg0, reg1) +#define stbir__simdf_max1(out, reg0, reg1) (out) = vmaxq_f32(reg0, reg1) + +#define stbir__simdf_0123ABCDto3ABx(out, reg0, reg1) (out) = vextq_f32(reg0, reg1, 3) +#define stbir__simdf_0123ABCDto23Ax(out, reg0, reg1) (out) = vextq_f32(reg0, reg1, 2) + +#define stbir__simdf_a1a1(out, alp, ones) (out) = vzipq_f32(vuzpq_f32(alp, alp).val[1], ones).val[0] +#define stbir__simdf_1a1a(out, alp, ones) (out) = vzipq_f32(ones, vuzpq_f32(alp, alp).val[0]).val[0] + +#if defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) + +#define stbir__simdf_aaa1(out, alp, ones) (out) = vcopyq_laneq_f32(vdupq_n_f32(vgetq_lane_f32(alp, 3)), 3, ones, 3) +#define stbir__simdf_1aaa(out, alp, ones) (out) = vcopyq_laneq_f32(vdupq_n_f32(vgetq_lane_f32(alp, 0)), 0, ones, 0) + +#if defined(_MSC_VER) && !defined(__clang__) +#define stbir_make16(a, b, c, d) vcombine_u8( \ + vcreate_u8((4 * a + 0) | ((4 * a + 1) << 8) | ((4 * a + 2) << 16) | ((4 * a + 3) << 24) | \ + ((stbir_uint64)(4 * b + 0) << 32) | ((stbir_uint64)(4 * b + 1) << 40) | ((stbir_uint64)(4 * b + 2) << 48) | ((stbir_uint64)(4 * b + 3) << 56)), \ + vcreate_u8((4 * c + 0) | ((4 * c + 1) << 8) | ((4 * c + 2) << 16) | ((4 * c + 3) << 24) | \ + ((stbir_uint64)(4 * d + 0) << 32) | ((stbir_uint64)(4 * d + 1) << 40) | ((stbir_uint64)(4 * d + 2) << 48) | ((stbir_uint64)(4 * d + 3) << 56))) + +static stbir__inline uint8x16x2_t stbir_make16x2(float32x4_t rega, float32x4_t regb) { - STBIR__UNUSED_PARAM(s); + uint8x16x2_t r = {vreinterpretq_u8_f32(rega), vreinterpretq_u8_f32(regb)}; + return r; +} +#else +#define stbir_make16(a, b, c, d) (uint8x16_t){4 * a + 0, 4 * a + 1, 4 * a + 2, 4 * a + 3, 4 * b + 0, 4 * b + 1, 4 * b + 2, 4 * b + 3, 4 * c + 0, 4 * c + 1, 4 * c + 2, 4 * c + 3, 4 * d + 0, 4 * d + 1, 4 * d + 2, 4 * d + 3} +#define stbir_make16x2(a, b) \ + (uint8x16x2_t) \ + { \ + { \ + vreinterpretq_u8_f32(a), vreinterpretq_u8_f32(b) \ + } \ + } +#endif - x = (float)fabs(x); +#define stbir__simdf_swiz(reg, one, two, three, four) vreinterpretq_f32_u8(vqtbl1q_u8(vreinterpretq_u8_f32(reg), stbir_make16(one, two, three, four))) +#define stbir__simdf_swiz2(rega, regb, one, two, three, four) vreinterpretq_f32_u8(vqtbl2q_u8(stbir_make16x2(rega, regb), stbir_make16(one, two, three, four))) - if (x <= 1.0f) - return 1 - x; - else - return 0; +#define stbir__simdi_16madd(out, reg0, reg1) \ + { \ + int16x8_t r0 = vreinterpretq_s16_u32(reg0); \ + int16x8_t r1 = vreinterpretq_s16_u32(reg1); \ + int32x4_t tmp0 = vmull_s16(vget_low_s16(r0), vget_low_s16(r1)); \ + int32x4_t tmp1 = vmull_s16(vget_high_s16(r0), vget_high_s16(r1)); \ + (out) = vreinterpretq_u32_s32(vpaddq_s32(tmp0, tmp1)); \ + } + +#else + +#define stbir__simdf_aaa1(out, alp, ones) (out) = vsetq_lane_f32(1.0f, vdupq_n_f32(vgetq_lane_f32(alp, 3)), 3) +#define stbir__simdf_1aaa(out, alp, ones) (out) = vsetq_lane_f32(1.0f, vdupq_n_f32(vgetq_lane_f32(alp, 0)), 0) + +#if defined(_MSC_VER) && !defined(__clang__) +static stbir__inline uint8x8x2_t stbir_make8x2(float32x4_t reg) +{ + uint8x8x2_t r = {{vget_low_u8(vreinterpretq_u8_f32(reg)), vget_high_u8(vreinterpretq_u8_f32(reg))}}; + return r; +} +#define stbir_make8(a, b) vcreate_u8( \ + (4 * a + 0) | ((4 * a + 1) << 8) | ((4 * a + 2) << 16) | ((4 * a + 3) << 24) | \ + ((stbir_uint64)(4 * b + 0) << 32) | ((stbir_uint64)(4 * b + 1) << 40) | ((stbir_uint64)(4 * b + 2) << 48) | ((stbir_uint64)(4 * b + 3) << 56)) +#else +#define stbir_make8x2(reg) \ + (uint8x8x2_t) \ + { \ + { \ + vget_low_u8(vreinterpretq_u8_f32(reg)), vget_high_u8(vreinterpretq_u8_f32(reg)) \ + } \ + } +#define stbir_make8(a, b) (uint8x8_t){4 * a + 0, 4 * a + 1, 4 * a + 2, 4 * a + 3, 4 * b + 0, 4 * b + 1, 4 * b + 2, 4 * b + 3} +#endif + +#define stbir__simdf_swiz(reg, one, two, three, four) vreinterpretq_f32_u8(vcombine_u8( \ + vtbl2_u8(stbir_make8x2(reg), stbir_make8(one, two)), \ + vtbl2_u8(stbir_make8x2(reg), stbir_make8(three, four)))) + +#define stbir__simdi_16madd(out, reg0, reg1) \ + { \ + int16x8_t r0 = vreinterpretq_s16_u32(reg0); \ + int16x8_t r1 = vreinterpretq_s16_u32(reg1); \ + int32x4_t tmp0 = vmull_s16(vget_low_s16(r0), vget_low_s16(r1)); \ + int32x4_t tmp1 = vmull_s16(vget_high_s16(r0), vget_high_s16(r1)); \ + int32x2_t out0 = vpadd_s32(vget_low_s32(tmp0), vget_high_s32(tmp0)); \ + int32x2_t out1 = vpadd_s32(vget_low_s32(tmp1), vget_high_s32(tmp1)); \ + (out) = vreinterpretq_u32_s32(vcombine_s32(out0, out1)); \ + } + +#endif + +#define stbir__simdi_and(out, reg0, reg1) (out) = vandq_u32(reg0, reg1) +#define stbir__simdi_or(out, reg0, reg1) (out) = vorrq_u32(reg0, reg1) + +#define stbir__simdf_pack_to_8bytes(out, aa, bb) \ + { \ + float32x4_t af = vmaxq_f32(vminq_f32(aa, STBIR__CONSTF(STBIR_max_uint8_as_float)), vdupq_n_f32(0)); \ + float32x4_t bf = vmaxq_f32(vminq_f32(bb, STBIR__CONSTF(STBIR_max_uint8_as_float)), vdupq_n_f32(0)); \ + int16x4_t ai = vqmovn_s32(vcvtq_s32_f32(af)); \ + int16x4_t bi = vqmovn_s32(vcvtq_s32_f32(bf)); \ + uint8x8_t out8 = vqmovun_s16(vcombine_s16(ai, bi)); \ + out = vreinterpretq_u32_u8(vcombine_u8(out8, out8)); \ + } + +#define stbir__simdf_pack_to_8words(out, aa, bb) \ + { \ + float32x4_t af = vmaxq_f32(vminq_f32(aa, STBIR__CONSTF(STBIR_max_uint16_as_float)), vdupq_n_f32(0)); \ + float32x4_t bf = vmaxq_f32(vminq_f32(bb, STBIR__CONSTF(STBIR_max_uint16_as_float)), vdupq_n_f32(0)); \ + int32x4_t ai = vcvtq_s32_f32(af); \ + int32x4_t bi = vcvtq_s32_f32(bf); \ + out = vreinterpretq_u32_u16(vcombine_u16(vqmovun_s32(ai), vqmovun_s32(bi))); \ + } + +#define stbir__interleave_pack_and_store_16_u8(ptr, r0, r1, r2, r3) \ + { \ + int16x4x2_t tmp0 = vzip_s16(vqmovn_s32(vreinterpretq_s32_u32(r0)), vqmovn_s32(vreinterpretq_s32_u32(r2))); \ + int16x4x2_t tmp1 = vzip_s16(vqmovn_s32(vreinterpretq_s32_u32(r1)), vqmovn_s32(vreinterpretq_s32_u32(r3))); \ + uint8x8x2_t out = \ + {{ \ + vqmovun_s16(vcombine_s16(tmp0.val[0], tmp0.val[1])), \ + vqmovun_s16(vcombine_s16(tmp1.val[0], tmp1.val[1])), \ + }}; \ + vst2_u8(ptr, out); \ + } + +#define stbir__simdf_load4_transposed(o0, o1, o2, o3, ptr) \ + { \ + float32x4x4_t tmp = vld4q_f32(ptr); \ + o0 = tmp.val[0]; \ + o1 = tmp.val[1]; \ + o2 = tmp.val[2]; \ + o3 = tmp.val[3]; \ + } + +#define stbir__simdi_32shr(out, reg, imm) out = vshrq_n_u32(reg, imm) + +#if defined(_MSC_VER) && !defined(__clang__) +#define STBIR__SIMDF_CONST(var, x) __declspec(align(8)) float var[] = {x, x, x, x} +#define STBIR__SIMDI_CONST(var, x) __declspec(align(8)) uint32_t var[] = {x, x, x, x} +#define STBIR__CONSTF(var) (*(const float32x4_t *)var) +#define STBIR__CONSTI(var) (*(const uint32x4_t *)var) +#else +#define STBIR__SIMDF_CONST(var, x) stbir__simdf var = {x, x, x, x} +#define STBIR__SIMDI_CONST(var, x) stbir__simdi var = {x, x, x, x} +#define STBIR__CONSTF(var) (var) +#define STBIR__CONSTI(var) (var) +#endif + +#ifdef STBIR_FLOORF +#undef STBIR_FLOORF +#endif +#define STBIR_FLOORF stbir_simd_floorf +static stbir__inline float stbir_simd_floorf(float x) +{ +#if defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) + return vget_lane_f32(vrndm_f32(vdup_n_f32(x)), 0); +#else + float32x2_t f = vdup_n_f32(x); + float32x2_t t = vcvt_f32_s32(vcvt_s32_f32(f)); + uint32x2_t a = vclt_f32(f, t); + uint32x2_t b = vreinterpret_u32_f32(vdup_n_f32(-1.0f)); + float32x2_t r = vadd_f32(t, vreinterpret_f32_u32(vand_u32(a, b))); + return vget_lane_f32(r, 0); +#endif } -static float stbir__filter_cubic(float x, float s) +#ifdef STBIR_CEILF +#undef STBIR_CEILF +#endif +#define STBIR_CEILF stbir_simd_ceilf +static stbir__inline float stbir_simd_ceilf(float x) { - STBIR__UNUSED_PARAM(s); - - x = (float)fabs(x); - - if (x < 1.0f) - return (4 + x * x * (3 * x - 6)) / 6; - else if (x < 2.0f) - return (8 + x * (-12 + x * (6 - x))) / 6; - - return (0.0f); +#if defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) + return vget_lane_f32(vrndp_f32(vdup_n_f32(x)), 0); +#else + float32x2_t f = vdup_n_f32(x); + float32x2_t t = vcvt_f32_s32(vcvt_s32_f32(f)); + uint32x2_t a = vclt_f32(t, f); + uint32x2_t b = vreinterpret_u32_f32(vdup_n_f32(1.0f)); + float32x2_t r = vadd_f32(t, vreinterpret_f32_u32(vand_u32(a, b))); + return vget_lane_f32(r, 0); +#endif } -static float stbir__filter_catmullrom(float x, float s) +#define STBIR_SIMD + +#elif defined(STBIR_WASM) + +#include + +#define stbir__simdf v128_t +#define stbir__simdi v128_t + +#define stbir_simdi_castf(reg) (reg) +#define stbir_simdf_casti(reg) (reg) + +#define stbir__simdf_load(reg, ptr) (reg) = wasm_v128_load((void const *)(ptr)) +#define stbir__simdi_load(reg, ptr) (reg) = wasm_v128_load((void const *)(ptr)) +#define stbir__simdf_load1(out, ptr) (out) = wasm_v128_load32_splat((void const *)(ptr)) // top values can be random (not denormal or nan for perf) +#define stbir__simdi_load1(out, ptr) (out) = wasm_v128_load32_splat((void const *)(ptr)) +#define stbir__simdf_load1z(out, ptr) (out) = wasm_v128_load32_zero((void const *)(ptr)) // top values must be zero +#define stbir__simdf_frep4(fvar) wasm_f32x4_splat(fvar) +#define stbir__simdf_load1frep4(out, fvar) (out) = wasm_f32x4_splat(fvar) +#define stbir__simdf_load2(out, ptr) (out) = wasm_v128_load64_splat((void const *)(ptr)) // top values can be random (not denormal or nan for perf) +#define stbir__simdf_load2z(out, ptr) (out) = wasm_v128_load64_zero((void const *)(ptr)) // top values must be zero +#define stbir__simdf_load2hmerge(out, reg, ptr) (out) = wasm_v128_load64_lane((void const *)(ptr), reg, 1) + +#define stbir__simdf_zeroP() wasm_f32x4_const_splat(0) +#define stbir__simdf_zero(reg) (reg) = wasm_f32x4_const_splat(0) + +#define stbir__simdf_store(ptr, reg) wasm_v128_store((void *)(ptr), reg) +#define stbir__simdf_store1(ptr, reg) wasm_v128_store32_lane((void *)(ptr), reg, 0) +#define stbir__simdf_store2(ptr, reg) wasm_v128_store64_lane((void *)(ptr), reg, 0) +#define stbir__simdf_store2h(ptr, reg) wasm_v128_store64_lane((void *)(ptr), reg, 1) + +#define stbir__simdi_store(ptr, reg) wasm_v128_store((void *)(ptr), reg) +#define stbir__simdi_store1(ptr, reg) wasm_v128_store32_lane((void *)(ptr), reg, 0) +#define stbir__simdi_store2(ptr, reg) wasm_v128_store64_lane((void *)(ptr), reg, 0) + +#define stbir__prefetch(ptr) + +#define stbir__simdi_expand_u8_to_u32(out0, out1, out2, out3, ireg) \ + { \ + v128_t l = wasm_u16x8_extend_low_u8x16(ireg); \ + v128_t h = wasm_u16x8_extend_high_u8x16(ireg); \ + out0 = wasm_u32x4_extend_low_u16x8(l); \ + out1 = wasm_u32x4_extend_high_u16x8(l); \ + out2 = wasm_u32x4_extend_low_u16x8(h); \ + out3 = wasm_u32x4_extend_high_u16x8(h); \ + } + +#define stbir__simdi_expand_u8_to_1u32(out, ireg) \ + { \ + v128_t tmp = wasm_u16x8_extend_low_u8x16(ireg); \ + out = wasm_u32x4_extend_low_u16x8(tmp); \ + } + +#define stbir__simdi_expand_u16_to_u32(out0, out1, ireg) \ + { \ + out0 = wasm_u32x4_extend_low_u16x8(ireg); \ + out1 = wasm_u32x4_extend_high_u16x8(ireg); \ + } + +#define stbir__simdf_convert_float_to_i32(i, f) (i) = wasm_i32x4_trunc_sat_f32x4(f) +#define stbir__simdf_convert_float_to_int(f) wasm_i32x4_extract_lane(wasm_i32x4_trunc_sat_f32x4(f), 0) +#define stbir__simdi_to_int(i) wasm_i32x4_extract_lane(i, 0) +#define stbir__simdf_convert_float_to_uint8(f) ((unsigned char)wasm_i32x4_extract_lane(wasm_i32x4_trunc_sat_f32x4(wasm_f32x4_max(wasm_f32x4_min(f, STBIR_max_uint8_as_float), wasm_f32x4_const_splat(0))), 0)) +#define stbir__simdf_convert_float_to_short(f) ((unsigned short)wasm_i32x4_extract_lane(wasm_i32x4_trunc_sat_f32x4(wasm_f32x4_max(wasm_f32x4_min(f, STBIR_max_uint16_as_float), wasm_f32x4_const_splat(0))), 0)) +#define stbir__simdi_convert_i32_to_float(out, ireg) (out) = wasm_f32x4_convert_i32x4(ireg) +#define stbir__simdf_add(out, reg0, reg1) (out) = wasm_f32x4_add(reg0, reg1) +#define stbir__simdf_mult(out, reg0, reg1) (out) = wasm_f32x4_mul(reg0, reg1) +#define stbir__simdf_mult_mem(out, reg, ptr) (out) = wasm_f32x4_mul(reg, wasm_v128_load((void const *)(ptr))) +#define stbir__simdf_mult1_mem(out, reg, ptr) (out) = wasm_f32x4_mul(reg, wasm_v128_load32_splat((void const *)(ptr))) +#define stbir__simdf_add_mem(out, reg, ptr) (out) = wasm_f32x4_add(reg, wasm_v128_load((void const *)(ptr))) +#define stbir__simdf_add1_mem(out, reg, ptr) (out) = wasm_f32x4_add(reg, wasm_v128_load32_splat((void const *)(ptr))) + +#define stbir__simdf_madd(out, add, mul1, mul2) (out) = wasm_f32x4_add(add, wasm_f32x4_mul(mul1, mul2)) +#define stbir__simdf_madd1(out, add, mul1, mul2) (out) = wasm_f32x4_add(add, wasm_f32x4_mul(mul1, mul2)) +#define stbir__simdf_madd_mem(out, add, mul, ptr) (out) = wasm_f32x4_add(add, wasm_f32x4_mul(mul, wasm_v128_load((void const *)(ptr)))) +#define stbir__simdf_madd1_mem(out, add, mul, ptr) (out) = wasm_f32x4_add(add, wasm_f32x4_mul(mul, wasm_v128_load32_splat((void const *)(ptr)))) + +#define stbir__simdf_add1(out, reg0, reg1) (out) = wasm_f32x4_add(reg0, reg1) +#define stbir__simdf_mult1(out, reg0, reg1) (out) = wasm_f32x4_mul(reg0, reg1) + +#define stbir__simdf_and(out, reg0, reg1) (out) = wasm_v128_and(reg0, reg1) +#define stbir__simdf_or(out, reg0, reg1) (out) = wasm_v128_or(reg0, reg1) + +#define stbir__simdf_min(out, reg0, reg1) (out) = wasm_f32x4_min(reg0, reg1) +#define stbir__simdf_max(out, reg0, reg1) (out) = wasm_f32x4_max(reg0, reg1) +#define stbir__simdf_min1(out, reg0, reg1) (out) = wasm_f32x4_min(reg0, reg1) +#define stbir__simdf_max1(out, reg0, reg1) (out) = wasm_f32x4_max(reg0, reg1) + +#define stbir__simdf_0123ABCDto3ABx(out, reg0, reg1) (out) = wasm_i32x4_shuffle(reg0, reg1, 3, 4, 5, -1) +#define stbir__simdf_0123ABCDto23Ax(out, reg0, reg1) (out) = wasm_i32x4_shuffle(reg0, reg1, 2, 3, 4, -1) + +#define stbir__simdf_aaa1(out, alp, ones) (out) = wasm_i32x4_shuffle(alp, ones, 3, 3, 3, 4) +#define stbir__simdf_1aaa(out, alp, ones) (out) = wasm_i32x4_shuffle(alp, ones, 4, 0, 0, 0) +#define stbir__simdf_a1a1(out, alp, ones) (out) = wasm_i32x4_shuffle(alp, ones, 1, 4, 3, 4) +#define stbir__simdf_1a1a(out, alp, ones) (out) = wasm_i32x4_shuffle(alp, ones, 4, 0, 4, 2) + +#define stbir__simdf_swiz(reg, one, two, three, four) wasm_i32x4_shuffle(reg, reg, one, two, three, four) + +#define stbir__simdi_and(out, reg0, reg1) (out) = wasm_v128_and(reg0, reg1) +#define stbir__simdi_or(out, reg0, reg1) (out) = wasm_v128_or(reg0, reg1) +#define stbir__simdi_16madd(out, reg0, reg1) (out) = wasm_i32x4_dot_i16x8(reg0, reg1) + +#define stbir__simdf_pack_to_8bytes(out, aa, bb) \ + { \ + v128_t af = wasm_f32x4_max(wasm_f32x4_min(aa, STBIR_max_uint8_as_float), wasm_f32x4_const_splat(0)); \ + v128_t bf = wasm_f32x4_max(wasm_f32x4_min(bb, STBIR_max_uint8_as_float), wasm_f32x4_const_splat(0)); \ + v128_t ai = wasm_i32x4_trunc_sat_f32x4(af); \ + v128_t bi = wasm_i32x4_trunc_sat_f32x4(bf); \ + v128_t out16 = wasm_i16x8_narrow_i32x4(ai, bi); \ + out = wasm_u8x16_narrow_i16x8(out16, out16); \ + } + +#define stbir__simdf_pack_to_8words(out, aa, bb) \ + { \ + v128_t af = wasm_f32x4_max(wasm_f32x4_min(aa, STBIR_max_uint16_as_float), wasm_f32x4_const_splat(0)); \ + v128_t bf = wasm_f32x4_max(wasm_f32x4_min(bb, STBIR_max_uint16_as_float), wasm_f32x4_const_splat(0)); \ + v128_t ai = wasm_i32x4_trunc_sat_f32x4(af); \ + v128_t bi = wasm_i32x4_trunc_sat_f32x4(bf); \ + out = wasm_u16x8_narrow_i32x4(ai, bi); \ + } + +#define stbir__interleave_pack_and_store_16_u8(ptr, r0, r1, r2, r3) \ + { \ + v128_t tmp0 = wasm_i16x8_narrow_i32x4(r0, r1); \ + v128_t tmp1 = wasm_i16x8_narrow_i32x4(r2, r3); \ + v128_t tmp = wasm_u8x16_narrow_i16x8(tmp0, tmp1); \ + tmp = wasm_i8x16_shuffle(tmp, tmp, 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15); \ + wasm_v128_store((void *)(ptr), tmp); \ + } + +#define stbir__simdf_load4_transposed(o0, o1, o2, o3, ptr) \ + { \ + v128_t t0 = wasm_v128_load(ptr); \ + v128_t t1 = wasm_v128_load(ptr + 4); \ + v128_t t2 = wasm_v128_load(ptr + 8); \ + v128_t t3 = wasm_v128_load(ptr + 12); \ + v128_t s0 = wasm_i32x4_shuffle(t0, t1, 0, 4, 2, 6); \ + v128_t s1 = wasm_i32x4_shuffle(t0, t1, 1, 5, 3, 7); \ + v128_t s2 = wasm_i32x4_shuffle(t2, t3, 0, 4, 2, 6); \ + v128_t s3 = wasm_i32x4_shuffle(t2, t3, 1, 5, 3, 7); \ + o0 = wasm_i32x4_shuffle(s0, s2, 0, 1, 4, 5); \ + o1 = wasm_i32x4_shuffle(s1, s3, 0, 1, 4, 5); \ + o2 = wasm_i32x4_shuffle(s0, s2, 2, 3, 6, 7); \ + o3 = wasm_i32x4_shuffle(s1, s3, 2, 3, 6, 7); \ + } + +#define stbir__simdi_32shr(out, reg, imm) out = wasm_u32x4_shr(reg, imm) + +typedef float stbir__f32x4 __attribute__((__vector_size__(16), __aligned__(16))); +#define STBIR__SIMDF_CONST(var, x) \ + stbir__simdf var = (v128_t)(stbir__f32x4) { x, x, x, x } +#define STBIR__SIMDI_CONST(var, x) stbir__simdi var = {x, x, x, x} +#define STBIR__CONSTF(var) (var) +#define STBIR__CONSTI(var) (var) + +#ifdef STBIR_FLOORF +#undef STBIR_FLOORF +#endif +#define STBIR_FLOORF stbir_simd_floorf +static stbir__inline float stbir_simd_floorf(float x) { - STBIR__UNUSED_PARAM(s); - - x = (float)fabs(x); - - if (x < 1.0f) - return 1 - x * x * (2.5f - 1.5f * x); - else if (x < 2.0f) - return 2 - x * (4 + x * (0.5f * x - 2.5f)); - - return (0.0f); + return wasm_f32x4_extract_lane(wasm_f32x4_floor(wasm_f32x4_splat(x)), 0); } -static float stbir__filter_mitchell(float x, float s) +#ifdef STBIR_CEILF +#undef STBIR_CEILF +#endif +#define STBIR_CEILF stbir_simd_ceilf +static stbir__inline float stbir_simd_ceilf(float x) { - STBIR__UNUSED_PARAM(s); - - x = (float)fabs(x); - - if (x < 1.0f) - return (16 + x * x * (21 * x - 36)) / 18; - else if (x < 2.0f) - return (32 + x * (-60 + x * (36 - 7 * x))) / 18; - - return (0.0f); + return wasm_f32x4_extract_lane(wasm_f32x4_ceil(wasm_f32x4_splat(x)), 0); } -static float stbir__support_zero(float s) +#define STBIR_SIMD + +#endif // SSE2/NEON/WASM + +#endif // NO SIMD + +#ifdef STBIR_SIMD8 +#define stbir__simdfX stbir__simdf8 +#define stbir__simdiX stbir__simdi8 +#define stbir__simdfX_load stbir__simdf8_load +#define stbir__simdiX_load stbir__simdi8_load +#define stbir__simdfX_mult stbir__simdf8_mult +#define stbir__simdfX_add_mem stbir__simdf8_add_mem +#define stbir__simdfX_madd_mem stbir__simdf8_madd_mem +#define stbir__simdfX_store stbir__simdf8_store +#define stbir__simdiX_store stbir__simdi8_store +#define stbir__simdf_frepX stbir__simdf8_frep8 +#define stbir__simdfX_madd stbir__simdf8_madd +#define stbir__simdfX_min stbir__simdf8_min +#define stbir__simdfX_max stbir__simdf8_max +#define stbir__simdfX_aaa1 stbir__simdf8_aaa1 +#define stbir__simdfX_1aaa stbir__simdf8_1aaa +#define stbir__simdfX_a1a1 stbir__simdf8_a1a1 +#define stbir__simdfX_1a1a stbir__simdf8_1a1a +#define stbir__simdfX_convert_float_to_i32 stbir__simdf8_convert_float_to_i32 +#define stbir__simdfX_pack_to_words stbir__simdf8_pack_to_16words +#define stbir__simdfX_zero stbir__simdf8_zero +#define STBIR_onesX STBIR_ones8 +#define STBIR_max_uint8_as_floatX STBIR_max_uint8_as_float8 +#define STBIR_max_uint16_as_floatX STBIR_max_uint16_as_float8 +#define STBIR_simd_point5X STBIR_simd_point58 +#define stbir__simdfX_float_count 8 +#define stbir__simdfX_0123to1230 stbir__simdf8_0123to12301230 +#define stbir__simdfX_0123to2103 stbir__simdf8_0123to21032103 +static const stbir__simdf8 STBIR_max_uint16_as_float_inverted8 = {stbir__max_uint16_as_float_inverted, stbir__max_uint16_as_float_inverted, stbir__max_uint16_as_float_inverted, stbir__max_uint16_as_float_inverted, stbir__max_uint16_as_float_inverted, stbir__max_uint16_as_float_inverted, stbir__max_uint16_as_float_inverted, stbir__max_uint16_as_float_inverted}; +static const stbir__simdf8 STBIR_max_uint8_as_float_inverted8 = {stbir__max_uint8_as_float_inverted, stbir__max_uint8_as_float_inverted, stbir__max_uint8_as_float_inverted, stbir__max_uint8_as_float_inverted, stbir__max_uint8_as_float_inverted, stbir__max_uint8_as_float_inverted, stbir__max_uint8_as_float_inverted, stbir__max_uint8_as_float_inverted}; +static const stbir__simdf8 STBIR_ones8 = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; +static const stbir__simdf8 STBIR_simd_point58 = {0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5}; +static const stbir__simdf8 STBIR_max_uint8_as_float8 = {stbir__max_uint8_as_float, stbir__max_uint8_as_float, stbir__max_uint8_as_float, stbir__max_uint8_as_float, stbir__max_uint8_as_float, stbir__max_uint8_as_float, stbir__max_uint8_as_float, stbir__max_uint8_as_float}; +static const stbir__simdf8 STBIR_max_uint16_as_float8 = {stbir__max_uint16_as_float, stbir__max_uint16_as_float, stbir__max_uint16_as_float, stbir__max_uint16_as_float, stbir__max_uint16_as_float, stbir__max_uint16_as_float, stbir__max_uint16_as_float, stbir__max_uint16_as_float}; +#else +#define stbir__simdfX stbir__simdf +#define stbir__simdiX stbir__simdi +#define stbir__simdfX_load stbir__simdf_load +#define stbir__simdiX_load stbir__simdi_load +#define stbir__simdfX_mult stbir__simdf_mult +#define stbir__simdfX_add_mem stbir__simdf_add_mem +#define stbir__simdfX_madd_mem stbir__simdf_madd_mem +#define stbir__simdfX_store stbir__simdf_store +#define stbir__simdiX_store stbir__simdi_store +#define stbir__simdf_frepX stbir__simdf_frep4 +#define stbir__simdfX_madd stbir__simdf_madd +#define stbir__simdfX_min stbir__simdf_min +#define stbir__simdfX_max stbir__simdf_max +#define stbir__simdfX_aaa1 stbir__simdf_aaa1 +#define stbir__simdfX_1aaa stbir__simdf_1aaa +#define stbir__simdfX_a1a1 stbir__simdf_a1a1 +#define stbir__simdfX_1a1a stbir__simdf_1a1a +#define stbir__simdfX_convert_float_to_i32 stbir__simdf_convert_float_to_i32 +#define stbir__simdfX_pack_to_words stbir__simdf_pack_to_8words +#define stbir__simdfX_zero stbir__simdf_zero +#define STBIR_onesX STBIR__CONSTF(STBIR_ones) +#define STBIR_simd_point5X STBIR__CONSTF(STBIR_simd_point5) +#define STBIR_max_uint8_as_floatX STBIR__CONSTF(STBIR_max_uint8_as_float) +#define STBIR_max_uint16_as_floatX STBIR__CONSTF(STBIR_max_uint16_as_float) +#define stbir__simdfX_float_count 4 +#define stbir__if_simdf8_cast_to_simdf4(val) (val) +#define stbir__simdfX_0123to1230 stbir__simdf_0123to1230 +#define stbir__simdfX_0123to2103 stbir__simdf_0123to2103 +#endif + +#if defined(STBIR_NEON) && !defined(_M_ARM) && !defined(__arm__) + +#if defined(_MSC_VER) && !defined(__clang__) +typedef __int16 stbir__FP16; +#else +typedef float16_t stbir__FP16; +#endif + +#else // no NEON, or 32-bit ARM for MSVC + +typedef union stbir__FP16 { - STBIR__UNUSED_PARAM(s); - return 0; + unsigned short u; +} stbir__FP16; + +#endif + +#if (!defined(STBIR_NEON) && !defined(STBIR_FP16C)) || (defined(STBIR_NEON) && defined(_M_ARM)) || (defined(STBIR_NEON) && defined(__arm__)) + +// Fabian's half float routines, see: https://gist.github.com/rygorous/2156668 + +static stbir__inline float stbir__half_to_float(stbir__FP16 h) +{ + static const stbir__FP32 magic = {(254 - 15) << 23}; + static const stbir__FP32 was_infnan = {(127 + 16) << 23}; + stbir__FP32 o; + + o.u = (h.u & 0x7fff) << 13; // exponent/mantissa bits + o.f *= magic.f; // exponent adjust + if (o.f >= was_infnan.f) // make sure Inf/NaN survive + o.u |= 255 << 23; + o.u |= (h.u & 0x8000) << 16; // sign bit + return o.f; } -static float stbir__support_one(float s) +static stbir__inline stbir__FP16 stbir__float_to_half(float val) { - STBIR__UNUSED_PARAM(s); - return 1; + stbir__FP32 f32infty = {255 << 23}; + stbir__FP32 f16max = {(127 + 16) << 23}; + stbir__FP32 denorm_magic = {((127 - 15) + (23 - 10) + 1) << 23}; + unsigned int sign_mask = 0x80000000u; + stbir__FP16 o = {0}; + stbir__FP32 f; + unsigned int sign; + + f.f = val; + sign = f.u & sign_mask; + f.u ^= sign; + + if (f.u >= f16max.u) // result is Inf or NaN (all exponent bits set) + o.u = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf + else // (De)normalized number or zero + { + if (f.u < (113 << 23)) // resulting FP16 is subnormal or zero + { + // use a magic value to align our 10 mantissa bits at the bottom of + // the float. as long as FP addition is round-to-nearest-even this + // just works. + f.f += denorm_magic.f; + // and one integer subtract of the bias later, we have our final float! + o.u = (unsigned short)(f.u - denorm_magic.u); + } + else + { + unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd + // update exponent, rounding bias part 1 + f.u = f.u + ((15u - 127) << 23) + 0xfff; + // rounding bias part 2 + f.u += mant_odd; + // take the bits! + o.u = (unsigned short)(f.u >> 13); + } + } + + o.u |= sign >> 16; + return o; } -static float stbir__support_two(float s) +#endif + +#if defined(STBIR_FP16C) + +#include + +static stbir__inline void stbir__half_to_float_SIMD(float *output, stbir__FP16 const *input) { - STBIR__UNUSED_PARAM(s); - return 2; + _mm256_storeu_ps((float *)output, _mm256_cvtph_ps(_mm_loadu_si128((__m128i const *)input))); } -static stbir__filter_info stbir__filter_info_table[] = { - {NULL, stbir__support_zero}, - {stbir__filter_trapezoid, stbir__support_trapezoid}, - {stbir__filter_triangle, stbir__support_one}, - {stbir__filter_cubic, stbir__support_two}, - {stbir__filter_catmullrom, stbir__support_two}, - {stbir__filter_mitchell, stbir__support_two}, -}; - -stbir__inline static int stbir__use_upsampling(float ratio) +static stbir__inline void stbir__float_to_half_SIMD(stbir__FP16 *output, float const *input) { - return ratio > 1; + _mm_storeu_si128((__m128i *)output, _mm256_cvtps_ph(_mm256_loadu_ps(input), 0)); } -stbir__inline static int stbir__use_width_upsampling(stbir__info *stbir_info) +static stbir__inline float stbir__half_to_float(stbir__FP16 h) { - return stbir__use_upsampling(stbir_info->horizontal_scale); + return _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128((int)h.u))); } -stbir__inline static int stbir__use_height_upsampling(stbir__info *stbir_info) +static stbir__inline stbir__FP16 stbir__float_to_half(float f) { - return stbir__use_upsampling(stbir_info->vertical_scale); + stbir__FP16 h; + h.u = (unsigned short)_mm_cvtsi128_si32(_mm_cvtps_ph(_mm_set_ss(f), 0)); + return h; +} + +#elif defined(STBIR_SSE2) + +// Fabian's half float routines, see: https://gist.github.com/rygorous/2156668 +stbir__inline static void stbir__half_to_float_SIMD(float *output, void const *input) +{ + static const STBIR__SIMDI_CONST(mask_nosign, 0x7fff); + static const STBIR__SIMDI_CONST(smallest_normal, 0x0400); + static const STBIR__SIMDI_CONST(infinity, 0x7c00); + static const STBIR__SIMDI_CONST(expadjust_normal, (127 - 15) << 23); + static const STBIR__SIMDI_CONST(magic_denorm, 113 << 23); + + __m128i i = _mm_loadu_si128((__m128i const *)(input)); + __m128i h = _mm_unpacklo_epi16(i, _mm_setzero_si128()); + __m128i mnosign = STBIR__CONSTI(mask_nosign); + __m128i eadjust = STBIR__CONSTI(expadjust_normal); + __m128i smallest = STBIR__CONSTI(smallest_normal); + __m128i infty = STBIR__CONSTI(infinity); + __m128i expmant = _mm_and_si128(mnosign, h); + __m128i justsign = _mm_xor_si128(h, expmant); + __m128i b_notinfnan = _mm_cmpgt_epi32(infty, expmant); + __m128i b_isdenorm = _mm_cmpgt_epi32(smallest, expmant); + __m128i shifted = _mm_slli_epi32(expmant, 13); + __m128i adj_infnan = _mm_andnot_si128(b_notinfnan, eadjust); + __m128i adjusted = _mm_add_epi32(eadjust, shifted); + __m128i den1 = _mm_add_epi32(shifted, STBIR__CONSTI(magic_denorm)); + __m128i adjusted2 = _mm_add_epi32(adjusted, adj_infnan); + __m128 den2 = _mm_sub_ps(_mm_castsi128_ps(den1), *(const __m128 *)&magic_denorm); + __m128 adjusted3 = _mm_and_ps(den2, _mm_castsi128_ps(b_isdenorm)); + __m128 adjusted4 = _mm_andnot_ps(_mm_castsi128_ps(b_isdenorm), _mm_castsi128_ps(adjusted2)); + __m128 adjusted5 = _mm_or_ps(adjusted3, adjusted4); + __m128i sign = _mm_slli_epi32(justsign, 16); + __m128 final = _mm_or_ps(adjusted5, _mm_castsi128_ps(sign)); + stbir__simdf_store(output + 0, final); + + h = _mm_unpackhi_epi16(i, _mm_setzero_si128()); + expmant = _mm_and_si128(mnosign, h); + justsign = _mm_xor_si128(h, expmant); + b_notinfnan = _mm_cmpgt_epi32(infty, expmant); + b_isdenorm = _mm_cmpgt_epi32(smallest, expmant); + shifted = _mm_slli_epi32(expmant, 13); + adj_infnan = _mm_andnot_si128(b_notinfnan, eadjust); + adjusted = _mm_add_epi32(eadjust, shifted); + den1 = _mm_add_epi32(shifted, STBIR__CONSTI(magic_denorm)); + adjusted2 = _mm_add_epi32(adjusted, adj_infnan); + den2 = _mm_sub_ps(_mm_castsi128_ps(den1), *(const __m128 *)&magic_denorm); + adjusted3 = _mm_and_ps(den2, _mm_castsi128_ps(b_isdenorm)); + adjusted4 = _mm_andnot_ps(_mm_castsi128_ps(b_isdenorm), _mm_castsi128_ps(adjusted2)); + adjusted5 = _mm_or_ps(adjusted3, adjusted4); + sign = _mm_slli_epi32(justsign, 16); + final = _mm_or_ps(adjusted5, _mm_castsi128_ps(sign)); + stbir__simdf_store(output + 4, final); + + // ~38 SSE2 ops for 8 values +} + +// Fabian's round-to-nearest-even float to half +// ~48 SSE2 ops for 8 output +stbir__inline static void stbir__float_to_half_SIMD(void *output, float const *input) +{ + static const STBIR__SIMDI_CONST(mask_sign, 0x80000000u); + static const STBIR__SIMDI_CONST(c_f16max, (127 + 16) << 23); // all FP32 values >=this round to +inf + static const STBIR__SIMDI_CONST(c_nanbit, 0x200); + static const STBIR__SIMDI_CONST(c_infty_as_fp16, 0x7c00); + static const STBIR__SIMDI_CONST(c_min_normal, (127 - 14) << 23); // smallest FP32 that yields a normalized FP16 + static const STBIR__SIMDI_CONST(c_subnorm_magic, ((127 - 15) + (23 - 10) + 1) << 23); + static const STBIR__SIMDI_CONST(c_normal_bias, 0xfff - ((127 - 15) << 23)); // adjust exponent and add mantissa rounding + + __m128 f = _mm_loadu_ps(input); + __m128 msign = _mm_castsi128_ps(STBIR__CONSTI(mask_sign)); + __m128 justsign = _mm_and_ps(msign, f); + __m128 absf = _mm_xor_ps(f, justsign); + __m128i absf_int = _mm_castps_si128(absf); // the cast is "free" (extra bypass latency, but no thruput hit) + __m128i f16max = STBIR__CONSTI(c_f16max); + __m128 b_isnan = _mm_cmpunord_ps(absf, absf); // is this a NaN? + __m128i b_isregular = _mm_cmpgt_epi32(f16max, absf_int); // (sub)normalized or special? + __m128i nanbit = _mm_and_si128(_mm_castps_si128(b_isnan), STBIR__CONSTI(c_nanbit)); + __m128i inf_or_nan = _mm_or_si128(nanbit, STBIR__CONSTI(c_infty_as_fp16)); // output for specials + + __m128i min_normal = STBIR__CONSTI(c_min_normal); + __m128i b_issub = _mm_cmpgt_epi32(min_normal, absf_int); + + // "result is subnormal" path + __m128 subnorm1 = _mm_add_ps(absf, _mm_castsi128_ps(STBIR__CONSTI(c_subnorm_magic))); // magic value to round output mantissa + __m128i subnorm2 = _mm_sub_epi32(_mm_castps_si128(subnorm1), STBIR__CONSTI(c_subnorm_magic)); // subtract out bias + + // "result is normal" path + __m128i mantoddbit = _mm_slli_epi32(absf_int, 31 - 13); // shift bit 13 (mantissa LSB) to sign + __m128i mantodd = _mm_srai_epi32(mantoddbit, 31); // -1 if FP16 mantissa odd, else 0 + + __m128i round1 = _mm_add_epi32(absf_int, STBIR__CONSTI(c_normal_bias)); + __m128i round2 = _mm_sub_epi32(round1, mantodd); // if mantissa LSB odd, bias towards rounding up (RTNE) + __m128i normal = _mm_srli_epi32(round2, 13); // rounded result + + // combine the two non-specials + __m128i nonspecial = _mm_or_si128(_mm_and_si128(subnorm2, b_issub), _mm_andnot_si128(b_issub, normal)); + + // merge in specials as well + __m128i joined = _mm_or_si128(_mm_and_si128(nonspecial, b_isregular), _mm_andnot_si128(b_isregular, inf_or_nan)); + + __m128i sign_shift = _mm_srai_epi32(_mm_castps_si128(justsign), 16); + __m128i final2, final = _mm_or_si128(joined, sign_shift); + + f = _mm_loadu_ps(input + 4); + justsign = _mm_and_ps(msign, f); + absf = _mm_xor_ps(f, justsign); + absf_int = _mm_castps_si128(absf); // the cast is "free" (extra bypass latency, but no thruput hit) + b_isnan = _mm_cmpunord_ps(absf, absf); // is this a NaN? + b_isregular = _mm_cmpgt_epi32(f16max, absf_int); // (sub)normalized or special? + nanbit = _mm_and_si128(_mm_castps_si128(b_isnan), c_nanbit); + inf_or_nan = _mm_or_si128(nanbit, STBIR__CONSTI(c_infty_as_fp16)); // output for specials + + b_issub = _mm_cmpgt_epi32(min_normal, absf_int); + + // "result is subnormal" path + subnorm1 = _mm_add_ps(absf, _mm_castsi128_ps(STBIR__CONSTI(c_subnorm_magic))); // magic value to round output mantissa + subnorm2 = _mm_sub_epi32(_mm_castps_si128(subnorm1), STBIR__CONSTI(c_subnorm_magic)); // subtract out bias + + // "result is normal" path + mantoddbit = _mm_slli_epi32(absf_int, 31 - 13); // shift bit 13 (mantissa LSB) to sign + mantodd = _mm_srai_epi32(mantoddbit, 31); // -1 if FP16 mantissa odd, else 0 + + round1 = _mm_add_epi32(absf_int, STBIR__CONSTI(c_normal_bias)); + round2 = _mm_sub_epi32(round1, mantodd); // if mantissa LSB odd, bias towards rounding up (RTNE) + normal = _mm_srli_epi32(round2, 13); // rounded result + + // combine the two non-specials + nonspecial = _mm_or_si128(_mm_and_si128(subnorm2, b_issub), _mm_andnot_si128(b_issub, normal)); + + // merge in specials as well + joined = _mm_or_si128(_mm_and_si128(nonspecial, b_isregular), _mm_andnot_si128(b_isregular, inf_or_nan)); + + sign_shift = _mm_srai_epi32(_mm_castps_si128(justsign), 16); + final2 = _mm_or_si128(joined, sign_shift); + final = _mm_packs_epi32(final, final2); + stbir__simdi_store(output, final); +} + +#elif defined(STBIR_NEON) && defined(_MSC_VER) && defined(_M_ARM64) && !defined(__clang__) // 64-bit ARM on MSVC (not clang) + +static stbir__inline void stbir__half_to_float_SIMD(float *output, stbir__FP16 const *input) +{ + float16x4_t in0 = vld1_f16(input + 0); + float16x4_t in1 = vld1_f16(input + 4); + vst1q_f32(output + 0, vcvt_f32_f16(in0)); + vst1q_f32(output + 4, vcvt_f32_f16(in1)); +} + +static stbir__inline void stbir__float_to_half_SIMD(stbir__FP16 *output, float const *input) +{ + float16x4_t out0 = vcvt_f16_f32(vld1q_f32(input + 0)); + float16x4_t out1 = vcvt_f16_f32(vld1q_f32(input + 4)); + vst1_f16(output + 0, out0); + vst1_f16(output + 4, out1); +} + +static stbir__inline float stbir__half_to_float(stbir__FP16 h) +{ + return vgetq_lane_f32(vcvt_f32_f16(vld1_dup_f16(&h)), 0); +} + +static stbir__inline stbir__FP16 stbir__float_to_half(float f) +{ + return vget_lane_f16(vcvt_f16_f32(vdupq_n_f32(f)), 0).n16_u16[0]; +} + +#elif defined(STBIR_NEON) && (defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__)) // 64-bit ARM + +static stbir__inline void stbir__half_to_float_SIMD(float *output, stbir__FP16 const *input) +{ + float16x8_t in = vld1q_f16(input); + vst1q_f32(output + 0, vcvt_f32_f16(vget_low_f16(in))); + vst1q_f32(output + 4, vcvt_f32_f16(vget_high_f16(in))); +} + +static stbir__inline void stbir__float_to_half_SIMD(stbir__FP16 *output, float const *input) +{ + float16x4_t out0 = vcvt_f16_f32(vld1q_f32(input + 0)); + float16x4_t out1 = vcvt_f16_f32(vld1q_f32(input + 4)); + vst1q_f16(output, vcombine_f16(out0, out1)); +} + +static stbir__inline float stbir__half_to_float(stbir__FP16 h) +{ + return vgetq_lane_f32(vcvt_f32_f16(vdup_n_f16(h)), 0); +} + +static stbir__inline stbir__FP16 stbir__float_to_half(float f) +{ + return vget_lane_f16(vcvt_f16_f32(vdupq_n_f32(f)), 0); +} + +#elif defined(STBIR_WASM) || (defined(STBIR_NEON) && (defined(_MSC_VER) || defined(_M_ARM) || defined(__arm__))) // WASM or 32-bit ARM on MSVC/clang + +static stbir__inline void stbir__half_to_float_SIMD(float *output, stbir__FP16 const *input) +{ + for (int i = 0; i < 8; i++) + { + output[i] = stbir__half_to_float(input[i]); + } +} +static stbir__inline void stbir__float_to_half_SIMD(stbir__FP16 *output, float const *input) +{ + for (int i = 0; i < 8; i++) + { + output[i] = stbir__float_to_half(input[i]); + } +} + +#endif + +#ifdef STBIR_SIMD + +#define stbir__simdf_0123to3333(out, reg) (out) = stbir__simdf_swiz(reg, 3, 3, 3, 3) +#define stbir__simdf_0123to2222(out, reg) (out) = stbir__simdf_swiz(reg, 2, 2, 2, 2) +#define stbir__simdf_0123to1111(out, reg) (out) = stbir__simdf_swiz(reg, 1, 1, 1, 1) +#define stbir__simdf_0123to0000(out, reg) (out) = stbir__simdf_swiz(reg, 0, 0, 0, 0) +#define stbir__simdf_0123to0003(out, reg) (out) = stbir__simdf_swiz(reg, 0, 0, 0, 3) +#define stbir__simdf_0123to0001(out, reg) (out) = stbir__simdf_swiz(reg, 0, 0, 0, 1) +#define stbir__simdf_0123to1122(out, reg) (out) = stbir__simdf_swiz(reg, 1, 1, 2, 2) +#define stbir__simdf_0123to2333(out, reg) (out) = stbir__simdf_swiz(reg, 2, 3, 3, 3) +#define stbir__simdf_0123to0023(out, reg) (out) = stbir__simdf_swiz(reg, 0, 0, 2, 3) +#define stbir__simdf_0123to1230(out, reg) (out) = stbir__simdf_swiz(reg, 1, 2, 3, 0) +#define stbir__simdf_0123to2103(out, reg) (out) = stbir__simdf_swiz(reg, 2, 1, 0, 3) +#define stbir__simdf_0123to3210(out, reg) (out) = stbir__simdf_swiz(reg, 3, 2, 1, 0) +#define stbir__simdf_0123to2301(out, reg) (out) = stbir__simdf_swiz(reg, 2, 3, 0, 1) +#define stbir__simdf_0123to3012(out, reg) (out) = stbir__simdf_swiz(reg, 3, 0, 1, 2) +#define stbir__simdf_0123to0011(out, reg) (out) = stbir__simdf_swiz(reg, 0, 0, 1, 1) +#define stbir__simdf_0123to1100(out, reg) (out) = stbir__simdf_swiz(reg, 1, 1, 0, 0) +#define stbir__simdf_0123to2233(out, reg) (out) = stbir__simdf_swiz(reg, 2, 2, 3, 3) +#define stbir__simdf_0123to1133(out, reg) (out) = stbir__simdf_swiz(reg, 1, 1, 3, 3) +#define stbir__simdf_0123to0022(out, reg) (out) = stbir__simdf_swiz(reg, 0, 0, 2, 2) +#define stbir__simdf_0123to1032(out, reg) (out) = stbir__simdf_swiz(reg, 1, 0, 3, 2) + +typedef union stbir__simdi_u32 +{ + stbir_uint32 m128i_u32[4]; + int m128i_i32[4]; + stbir__simdi m128i_i128; +} stbir__simdi_u32; + +static const int STBIR_mask[9] = {0, 0, 0, -1, -1, -1, 0, 0, 0}; + +static const STBIR__SIMDF_CONST(STBIR_max_uint8_as_float, stbir__max_uint8_as_float); +static const STBIR__SIMDF_CONST(STBIR_max_uint16_as_float, stbir__max_uint16_as_float); +static const STBIR__SIMDF_CONST(STBIR_max_uint8_as_float_inverted, stbir__max_uint8_as_float_inverted); +static const STBIR__SIMDF_CONST(STBIR_max_uint16_as_float_inverted, stbir__max_uint16_as_float_inverted); + +static const STBIR__SIMDF_CONST(STBIR_simd_point5, 0.5f); +static const STBIR__SIMDF_CONST(STBIR_ones, 1.0f); +static const STBIR__SIMDI_CONST(STBIR_almost_zero, (127 - 13) << 23); +static const STBIR__SIMDI_CONST(STBIR_almost_one, 0x3f7fffff); +static const STBIR__SIMDI_CONST(STBIR_mastissa_mask, 0xff); +static const STBIR__SIMDI_CONST(STBIR_topscale, 0x02000000); + +// Basically, in simd mode, we unroll the proper amount, and we don't want +// the non-simd remnant loops to be unroll because they only run a few times +// Adding this switch saves about 5K on clang which is Captain Unroll the 3rd. +#define STBIR_SIMD_STREAMOUT_PTR(star) STBIR_STREAMOUT_PTR(star) +#define STBIR_SIMD_NO_UNROLL(ptr) STBIR_NO_UNROLL(ptr) +#define STBIR_SIMD_NO_UNROLL_LOOP_START STBIR_NO_UNROLL_LOOP_START +#define STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR STBIR_NO_UNROLL_LOOP_START_INF_FOR + +#ifdef STBIR_MEMCPY +#undef STBIR_MEMCPY +#endif +#define STBIR_MEMCPY stbir_simd_memcpy + +// override normal use of memcpy with much simpler copy (faster and smaller with our sized copies) +static void stbir_simd_memcpy(void *dest, void const *src, size_t bytes) +{ + char STBIR_SIMD_STREAMOUT_PTR(*) d = (char *)dest; + char STBIR_SIMD_STREAMOUT_PTR(*) d_end = ((char *)dest) + bytes; + ptrdiff_t ofs_to_src = (char *)src - (char *)dest; + + // check overlaps + STBIR_ASSERT(((d >= ((char *)src) + bytes)) || ((d + bytes) <= (char *)src)); + + if (bytes < (16 * stbir__simdfX_float_count)) + { + if (bytes < 16) + { + if (bytes) + { + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + STBIR_SIMD_NO_UNROLL(d); + d[0] = d[ofs_to_src]; + ++d; + } while (d < d_end); + } + } + else + { + stbir__simdf x; + // do one unaligned to get us aligned for the stream out below + stbir__simdf_load(x, (d + ofs_to_src)); + stbir__simdf_store(d, x); + d = (char *)((((size_t)d) + 16) & ~15); + + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + STBIR_SIMD_NO_UNROLL(d); + + if (d > (d_end - 16)) + { + if (d == d_end) + return; + d = d_end - 16; + } + + stbir__simdf_load(x, (d + ofs_to_src)); + stbir__simdf_store(d, x); + d += 16; + } + } + } + else + { + stbir__simdfX x0, x1, x2, x3; + + // do one unaligned to get us aligned for the stream out below + stbir__simdfX_load(x0, (d + ofs_to_src) + 0 * stbir__simdfX_float_count); + stbir__simdfX_load(x1, (d + ofs_to_src) + 4 * stbir__simdfX_float_count); + stbir__simdfX_load(x2, (d + ofs_to_src) + 8 * stbir__simdfX_float_count); + stbir__simdfX_load(x3, (d + ofs_to_src) + 12 * stbir__simdfX_float_count); + stbir__simdfX_store(d + 0 * stbir__simdfX_float_count, x0); + stbir__simdfX_store(d + 4 * stbir__simdfX_float_count, x1); + stbir__simdfX_store(d + 8 * stbir__simdfX_float_count, x2); + stbir__simdfX_store(d + 12 * stbir__simdfX_float_count, x3); + d = (char *)((((size_t)d) + (16 * stbir__simdfX_float_count)) & ~((16 * stbir__simdfX_float_count) - 1)); + + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + STBIR_SIMD_NO_UNROLL(d); + + if (d > (d_end - (16 * stbir__simdfX_float_count))) + { + if (d == d_end) + return; + d = d_end - (16 * stbir__simdfX_float_count); + } + + stbir__simdfX_load(x0, (d + ofs_to_src) + 0 * stbir__simdfX_float_count); + stbir__simdfX_load(x1, (d + ofs_to_src) + 4 * stbir__simdfX_float_count); + stbir__simdfX_load(x2, (d + ofs_to_src) + 8 * stbir__simdfX_float_count); + stbir__simdfX_load(x3, (d + ofs_to_src) + 12 * stbir__simdfX_float_count); + stbir__simdfX_store(d + 0 * stbir__simdfX_float_count, x0); + stbir__simdfX_store(d + 4 * stbir__simdfX_float_count, x1); + stbir__simdfX_store(d + 8 * stbir__simdfX_float_count, x2); + stbir__simdfX_store(d + 12 * stbir__simdfX_float_count, x3); + d += (16 * stbir__simdfX_float_count); + } + } +} + +// memcpy that is specically intentionally overlapping (src is smaller then dest, so can be +// a normal forward copy, bytes is divisible by 4 and bytes is greater than or equal to +// the diff between dest and src) +static void stbir_overlapping_memcpy(void *dest, void const *src, size_t bytes) +{ + char STBIR_SIMD_STREAMOUT_PTR(*) sd = (char *)src; + char STBIR_SIMD_STREAMOUT_PTR(*) s_end = ((char *)src) + bytes; + ptrdiff_t ofs_to_dest = (char *)dest - (char *)src; + + if (ofs_to_dest >= 16) // is the overlap more than 16 away? + { + char STBIR_SIMD_STREAMOUT_PTR(*) s_end16 = ((char *)src) + (bytes & ~15); + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + stbir__simdf x; + STBIR_SIMD_NO_UNROLL(sd); + stbir__simdf_load(x, sd); + stbir__simdf_store((sd + ofs_to_dest), x); + sd += 16; + } while (sd < s_end16); + + if (sd == s_end) + return; + } + + do + { + STBIR_SIMD_NO_UNROLL(sd); + *(int *)(sd + ofs_to_dest) = *(int *)sd; + sd += 4; + } while (sd < s_end); +} + +#else // no SSE2 + +// when in scalar mode, we let unrolling happen, so this macro just does the __restrict +#define STBIR_SIMD_STREAMOUT_PTR(star) STBIR_STREAMOUT_PTR(star) +#define STBIR_SIMD_NO_UNROLL(ptr) +#define STBIR_SIMD_NO_UNROLL_LOOP_START +#define STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + +#endif // SSE2 + +#ifdef STBIR_PROFILE + +#ifndef STBIR_PROFILE_FUNC + +#if defined(_x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__x86_64) || defined(__SSE2__) || defined(STBIR_SSE) || defined(_M_IX86_FP) || defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(_X86_) + +#ifdef _MSC_VER + +STBIRDEF stbir_uint64 __rdtsc(); +#define STBIR_PROFILE_FUNC() __rdtsc() + +#else // non msvc + +static stbir__inline stbir_uint64 STBIR_PROFILE_FUNC() +{ + stbir_uint32 lo, hi; + asm volatile("rdtsc" : "=a"(lo), "=d"(hi)); + return (((stbir_uint64)hi) << 32) | ((stbir_uint64)lo); +} + +#endif // msvc + +#elif defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) || defined(__ARM_NEON__) + +#if defined(_MSC_VER) && !defined(__clang__) + +#define STBIR_PROFILE_FUNC() _ReadStatusReg(ARM64_CNTVCT) + +#else + +static stbir__inline stbir_uint64 STBIR_PROFILE_FUNC() +{ + stbir_uint64 tsc; + asm volatile("mrs %0, cntvct_el0" : "=r"(tsc)); + return tsc; +} + +#endif + +#else // x64, arm + +#error Unknown platform for profiling. + +#endif // x64, arm + +#endif // STBIR_PROFILE_FUNC + +#define STBIR_ONLY_PROFILE_GET_SPLIT_INFO , stbir__per_split_info *split_info +#define STBIR_ONLY_PROFILE_SET_SPLIT_INFO , split_info + +#define STBIR_ONLY_PROFILE_BUILD_GET_INFO , stbir__info *profile_info +#define STBIR_ONLY_PROFILE_BUILD_SET_INFO , profile_info + +// super light-weight micro profiler +#define STBIR_PROFILE_START_ll(info, wh) \ + { \ + stbir_uint64 wh##thiszonetime = STBIR_PROFILE_FUNC(); \ + stbir_uint64 *wh##save_parent_excluded_ptr = info->current_zone_excluded_ptr; \ + stbir_uint64 wh##current_zone_excluded = 0; \ + info->current_zone_excluded_ptr = &wh##current_zone_excluded; +#define STBIR_PROFILE_END_ll(info, wh) \ + wh##thiszonetime = STBIR_PROFILE_FUNC() - wh##thiszonetime; \ + info->profile.named.wh += wh##thiszonetime - wh##current_zone_excluded; \ + *wh##save_parent_excluded_ptr += wh##thiszonetime; \ + info->current_zone_excluded_ptr = wh##save_parent_excluded_ptr; \ + } +#define STBIR_PROFILE_FIRST_START_ll(info, wh) \ + { \ + int i; \ + info->current_zone_excluded_ptr = &info->profile.named.total; \ + for (i = 0; i < STBIR__ARRAY_SIZE(info->profile.array); i++) \ + info->profile.array[i] = 0; \ + } \ + STBIR_PROFILE_START_ll(info, wh); +#define STBIR_PROFILE_CLEAR_EXTRAS_ll(info, num) \ + { \ + int extra; \ + for (extra = 1; extra < (num); extra++) \ + { \ + int i; \ + for (i = 0; i < STBIR__ARRAY_SIZE((info)->profile.array); i++) \ + (info)[extra].profile.array[i] = 0; \ + } \ + } + +// for thread data +#define STBIR_PROFILE_START(wh) STBIR_PROFILE_START_ll(split_info, wh) +#define STBIR_PROFILE_END(wh) STBIR_PROFILE_END_ll(split_info, wh) +#define STBIR_PROFILE_FIRST_START(wh) STBIR_PROFILE_FIRST_START_ll(split_info, wh) +#define STBIR_PROFILE_CLEAR_EXTRAS() STBIR_PROFILE_CLEAR_EXTRAS_ll(split_info, split_count) + +// for build data +#define STBIR_PROFILE_BUILD_START(wh) STBIR_PROFILE_START_ll(profile_info, wh) +#define STBIR_PROFILE_BUILD_END(wh) STBIR_PROFILE_END_ll(profile_info, wh) +#define STBIR_PROFILE_BUILD_FIRST_START(wh) STBIR_PROFILE_FIRST_START_ll(profile_info, wh) +#define STBIR_PROFILE_BUILD_CLEAR(info) \ + { \ + int i; \ + for (i = 0; i < STBIR__ARRAY_SIZE(info->profile.array); i++) \ + info->profile.array[i] = 0; \ + } + +#else // no profile + +#define STBIR_ONLY_PROFILE_GET_SPLIT_INFO +#define STBIR_ONLY_PROFILE_SET_SPLIT_INFO + +#define STBIR_ONLY_PROFILE_BUILD_GET_INFO +#define STBIR_ONLY_PROFILE_BUILD_SET_INFO + +#define STBIR_PROFILE_START(wh) +#define STBIR_PROFILE_END(wh) +#define STBIR_PROFILE_FIRST_START(wh) +#define STBIR_PROFILE_CLEAR_EXTRAS() + +#define STBIR_PROFILE_BUILD_START(wh) +#define STBIR_PROFILE_BUILD_END(wh) +#define STBIR_PROFILE_BUILD_FIRST_START(wh) +#define STBIR_PROFILE_BUILD_CLEAR(info) + +#endif // stbir_profile + +#ifndef STBIR_CEILF +#include +#if _MSC_VER <= 1200 // support VC6 for Sean +#define STBIR_CEILF(x) ((float)ceil((float)(x))) +#define STBIR_FLOORF(x) ((float)floor((float)(x))) +#else +#define STBIR_CEILF(x) ceilf(x) +#define STBIR_FLOORF(x) floorf(x) +#endif +#endif + +#ifndef STBIR_MEMCPY +// For memcpy +#include +#define STBIR_MEMCPY(dest, src, len) memcpy(dest, src, len) +#endif + +#ifndef STBIR_SIMD + +// memcpy that is specifically intentionally overlapping (src is smaller then dest, so can be +// a normal forward copy, bytes is divisible by 4 and bytes is greater than or equal to +// the diff between dest and src) +static void stbir_overlapping_memcpy(void *dest, void const *src, size_t bytes) +{ + char STBIR_SIMD_STREAMOUT_PTR(*) sd = (char *)src; + char STBIR_SIMD_STREAMOUT_PTR(*) s_end = ((char *)src) + bytes; + ptrdiff_t ofs_to_dest = (char *)dest - (char *)src; + + if (ofs_to_dest >= 8) // is the overlap more than 8 away? + { + char STBIR_SIMD_STREAMOUT_PTR(*) s_end8 = ((char *)src) + (bytes & ~7); + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_NO_UNROLL(sd); + *(stbir_uint64 *)(sd + ofs_to_dest) = *(stbir_uint64 *)sd; + sd += 8; + } while (sd < s_end8); + + if (sd == s_end) + return; + } + + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_NO_UNROLL(sd); + *(int *)(sd + ofs_to_dest) = *(int *)sd; + sd += 4; + } while (sd < s_end); +} + +#endif + +static float stbir__filter_trapezoid(float x, float scale, void *user_data) +{ + float halfscale = scale / 2; + float t = 0.5f + halfscale; + STBIR_ASSERT(scale <= 1); + STBIR__UNUSED(user_data); + + if (x < 0.0f) + x = -x; + + if (x >= t) + return 0.0f; + else + { + float r = 0.5f - halfscale; + if (x <= r) + return 1.0f; + else + return (t - x) / scale; + } +} + +static float stbir__support_trapezoid(float scale, void *user_data) +{ + STBIR__UNUSED(user_data); + return 0.5f + scale / 2.0f; +} + +static float stbir__filter_triangle(float x, float s, void *user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + + if (x < 0.0f) + x = -x; + + if (x <= 1.0f) + return 1.0f - x; + else + return 0.0f; +} + +static float stbir__filter_point(float x, float s, void *user_data) +{ + STBIR__UNUSED(x); + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + + return 1.0f; +} + +static float stbir__filter_cubic(float x, float s, void *user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + + if (x < 0.0f) + x = -x; + + if (x < 1.0f) + return (4.0f + x * x * (3.0f * x - 6.0f)) / 6.0f; + else if (x < 2.0f) + return (8.0f + x * (-12.0f + x * (6.0f - x))) / 6.0f; + + return (0.0f); +} + +static float stbir__filter_catmullrom(float x, float s, void *user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + + if (x < 0.0f) + x = -x; + + if (x < 1.0f) + return 1.0f - x * x * (2.5f - 1.5f * x); + else if (x < 2.0f) + return 2.0f - x * (4.0f + x * (0.5f * x - 2.5f)); + + return (0.0f); +} + +static float stbir__filter_mitchell(float x, float s, void *user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + + if (x < 0.0f) + x = -x; + + if (x < 1.0f) + return (16.0f + x * x * (21.0f * x - 36.0f)) / 18.0f; + else if (x < 2.0f) + return (32.0f + x * (-60.0f + x * (36.0f - 7.0f * x))) / 18.0f; + + return (0.0f); +} + +static float stbir__support_zeropoint5(float s, void *user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + return 0.5f; +} + +static float stbir__support_one(float s, void *user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + return 1; +} + +static float stbir__support_two(float s, void *user_data) +{ + STBIR__UNUSED(s); + STBIR__UNUSED(user_data); + return 2; } // This is the maximum number of input samples that can affect an output sample -// with the given filter -static int stbir__get_filter_pixel_width(stbir_filter filter, float scale) +// with the given filter from the output pixel's perspective +static int stbir__get_filter_pixel_width(stbir__support_callback *support, float scale, void *user_data) { - STBIR_ASSERT(filter != 0); - STBIR_ASSERT(filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); + STBIR_ASSERT(support != 0); - if (stbir__use_upsampling(scale)) - return (int)ceil(stbir__filter_info_table[filter].support(1 / scale) * 2); - else - return (int)ceil(stbir__filter_info_table[filter].support(scale) * 2 / scale); + if (scale >= (1.0f - stbir__small_float)) // upscale + return (int)STBIR_CEILF(support(1.0f / scale, user_data) * 2.0f); + else + return (int)STBIR_CEILF(support(scale, user_data) * 2.0f / scale); } -// This is how much to expand buffers to account for filters seeking outside -// the image boundaries. -static int stbir__get_filter_pixel_margin(stbir_filter filter, float scale) +// this is how many coefficents per run of the filter (which is different +// from the filter_pixel_width depending on if we are scattering or gathering) +static int stbir__get_coefficient_width(stbir__sampler *samp, int is_gather, void *user_data) { - return stbir__get_filter_pixel_width(filter, scale) / 2; + float scale = samp->scale_info.scale; + stbir__support_callback *support = samp->filter_support; + + switch (is_gather) + { + case 1: + return (int)STBIR_CEILF(support(1.0f / scale, user_data) * 2.0f); + case 2: + return (int)STBIR_CEILF(support(scale, user_data) * 2.0f / scale); + case 0: + return (int)STBIR_CEILF(support(scale, user_data) * 2.0f); + default: + STBIR_ASSERT((is_gather >= 0) && (is_gather <= 2)); + return 0; + } } -static int stbir__get_coefficient_width(stbir_filter filter, float scale) +static int stbir__get_contributors(stbir__sampler *samp, int is_gather) { - if (stbir__use_upsampling(scale)) - return (int)ceil(stbir__filter_info_table[filter].support(1 / scale) * 2); - else - return (int)ceil(stbir__filter_info_table[filter].support(scale) * 2); + if (is_gather) + return samp->scale_info.output_sub_size; + else + return (samp->scale_info.input_full_size + samp->filter_pixel_margin * 2); } -static int stbir__get_contributors(float scale, stbir_filter filter, int input_size, int output_size) +static int stbir__edge_zero_full(int n, int max) { - if (stbir__use_upsampling(scale)) - return output_size; - else - return (input_size + stbir__get_filter_pixel_margin(filter, scale) * 2); + STBIR__UNUSED(n); + STBIR__UNUSED(max); + return 0; // NOTREACHED } -static int stbir__get_total_horizontal_coefficients(stbir__info *info) +static int stbir__edge_clamp_full(int n, int max) { - return info->horizontal_num_contributors * stbir__get_coefficient_width(info->horizontal_filter, info->horizontal_scale); + if (n < 0) + return 0; + + if (n >= max) + return max - 1; + + return n; // NOTREACHED } -static int stbir__get_total_vertical_coefficients(stbir__info *info) +static int stbir__edge_reflect_full(int n, int max) { - return info->vertical_num_contributors * stbir__get_coefficient_width(info->vertical_filter, info->vertical_scale); + if (n < 0) + { + if (n > -max) + return -n; + else + return max - 1; + } + + if (n >= max) + { + int max2 = max * 2; + if (n >= max2) + return 0; + else + return max2 - n - 1; + } + + return n; // NOTREACHED } -static stbir__contributors *stbir__get_contributor(stbir__contributors *contributors, int n) +static int stbir__edge_wrap_full(int n, int max) { - return &contributors[n]; + if (n >= 0) + return (n % max); + else + { + int m = (-n) % max; + + if (m != 0) + m = max - m; + + return (m); + } } -// For perf reasons this code is duplicated in stbir__resample_horizontal_upsample/downsample, -// if you change it here change it there too. -static float *stbir__get_coefficient(float *coefficients, stbir_filter filter, float scale, int n, int c) -{ - int width = stbir__get_coefficient_width(filter, scale); - return &coefficients[width * n + c]; -} - -static int stbir__edge_wrap_slow(stbir_edge edge, int n, int max) -{ - switch (edge) - { - case STBIR_EDGE_ZERO: - return 0; // we'll decode the wrong pixel here, and then overwrite with 0s later - - case STBIR_EDGE_CLAMP: - if (n < 0) - return 0; - - if (n >= max) - return max - 1; - - return n; // NOTREACHED - - case STBIR_EDGE_REFLECT: - { - if (n < 0) - { - if (n < max) - return -n; - else - return max - 1; - } - - if (n >= max) - { - int max2 = max * 2; - if (n >= max2) - return 0; - else - return max2 - n - 1; - } - - return n; // NOTREACHED - } - - case STBIR_EDGE_WRAP: - if (n >= 0) - return (n % max); - else - { - int m = (-n) % max; - - if (m != 0) - m = max - m; - - return (m); - } - // NOTREACHED - - default: - STBIR_ASSERT(!"Unimplemented edge type"); - return 0; - } -} +typedef int stbir__edge_wrap_func(int n, int max); +static stbir__edge_wrap_func *stbir__edge_wrap_slow[] = + { + stbir__edge_clamp_full, // STBIR_EDGE_CLAMP + stbir__edge_reflect_full, // STBIR_EDGE_REFLECT + stbir__edge_wrap_full, // STBIR_EDGE_WRAP + stbir__edge_zero_full, // STBIR_EDGE_ZERO +}; stbir__inline static int stbir__edge_wrap(stbir_edge edge, int n, int max) { - // avoid per-pixel switch - if (n >= 0 && n < max) - return n; - return stbir__edge_wrap_slow(edge, n, max); + // avoid per-pixel switch + if (n >= 0 && n < max) + return n; + return stbir__edge_wrap_slow[edge](n, max); } -// What input pixels contribute to this output pixel? -static void stbir__calculate_sample_range_upsample(int n, float out_filter_radius, float scale_ratio, float out_shift, int *in_first_pixel, int *in_last_pixel, float *in_center_of_out) +#define STBIR__MERGE_RUNS_PIXEL_THRESHOLD 16 + +// get information on the extents of a sampler +static void stbir__get_extents(stbir__sampler *samp, stbir__extents *scanline_extents) { - float out_pixel_center = (float)n + 0.5f; - float out_pixel_influence_lowerbound = out_pixel_center - out_filter_radius; - float out_pixel_influence_upperbound = out_pixel_center + out_filter_radius; + int j, stop; + int left_margin, right_margin; + int min_n = 0x7fffffff, max_n = -0x7fffffff; + int min_left = 0x7fffffff, max_left = -0x7fffffff; + int min_right = 0x7fffffff, max_right = -0x7fffffff; + stbir_edge edge = samp->edge; + stbir__contributors *contributors = samp->contributors; + int output_sub_size = samp->scale_info.output_sub_size; + int input_full_size = samp->scale_info.input_full_size; + int filter_pixel_margin = samp->filter_pixel_margin; - float in_pixel_influence_lowerbound = (out_pixel_influence_lowerbound + out_shift) / scale_ratio; - float in_pixel_influence_upperbound = (out_pixel_influence_upperbound + out_shift) / scale_ratio; + STBIR_ASSERT(samp->is_gather); - *in_center_of_out = (out_pixel_center + out_shift) / scale_ratio; - *in_first_pixel = (int)(floor(in_pixel_influence_lowerbound + 0.5)); - *in_last_pixel = (int)(floor(in_pixel_influence_upperbound - 0.5)); + stop = output_sub_size; + for (j = 0; j < stop; j++) + { + STBIR_ASSERT(contributors[j].n1 >= contributors[j].n0); + if (contributors[j].n0 < min_n) + { + min_n = contributors[j].n0; + stop = j + filter_pixel_margin; // if we find a new min, only scan another filter width + if (stop > output_sub_size) + stop = output_sub_size; + } + } + + stop = 0; + for (j = output_sub_size - 1; j >= stop; j--) + { + STBIR_ASSERT(contributors[j].n1 >= contributors[j].n0); + if (contributors[j].n1 > max_n) + { + max_n = contributors[j].n1; + stop = j - filter_pixel_margin; // if we find a new max, only scan another filter width + if (stop < 0) + stop = 0; + } + } + + STBIR_ASSERT(scanline_extents->conservative.n0 <= min_n); + STBIR_ASSERT(scanline_extents->conservative.n1 >= max_n); + + // now calculate how much into the margins we really read + left_margin = 0; + if (min_n < 0) + { + left_margin = -min_n; + min_n = 0; + } + + right_margin = 0; + if (max_n >= input_full_size) + { + right_margin = max_n - input_full_size + 1; + max_n = input_full_size - 1; + } + + // index 1 is margin pixel extents (how many pixels we hang over the edge) + scanline_extents->edge_sizes[0] = left_margin; + scanline_extents->edge_sizes[1] = right_margin; + + // index 2 is pixels read from the input + scanline_extents->spans[0].n0 = min_n; + scanline_extents->spans[0].n1 = max_n; + scanline_extents->spans[0].pixel_offset_for_input = min_n; + + // default to no other input range + scanline_extents->spans[1].n0 = 0; + scanline_extents->spans[1].n1 = -1; + scanline_extents->spans[1].pixel_offset_for_input = 0; + + // don't have to do edge calc for zero clamp + if (edge == STBIR_EDGE_ZERO) + return; + + // convert margin pixels to the pixels within the input (min and max) + for (j = -left_margin; j < 0; j++) + { + int p = stbir__edge_wrap(edge, j, input_full_size); + if (p < min_left) + min_left = p; + if (p > max_left) + max_left = p; + } + + for (j = input_full_size; j < (input_full_size + right_margin); j++) + { + int p = stbir__edge_wrap(edge, j, input_full_size); + if (p < min_right) + min_right = p; + if (p > max_right) + max_right = p; + } + + // merge the left margin pixel region if it connects within 4 pixels of main pixel region + if (min_left != 0x7fffffff) + { + if (((min_left <= min_n) && ((max_left + STBIR__MERGE_RUNS_PIXEL_THRESHOLD) >= min_n)) || + ((min_n <= min_left) && ((max_n + STBIR__MERGE_RUNS_PIXEL_THRESHOLD) >= max_left))) + { + scanline_extents->spans[0].n0 = min_n = stbir__min(min_n, min_left); + scanline_extents->spans[0].n1 = max_n = stbir__max(max_n, max_left); + scanline_extents->spans[0].pixel_offset_for_input = min_n; + left_margin = 0; + } + } + + // merge the right margin pixel region if it connects within 4 pixels of main pixel region + if (min_right != 0x7fffffff) + { + if (((min_right <= min_n) && ((max_right + STBIR__MERGE_RUNS_PIXEL_THRESHOLD) >= min_n)) || + ((min_n <= min_right) && ((max_n + STBIR__MERGE_RUNS_PIXEL_THRESHOLD) >= max_right))) + { + scanline_extents->spans[0].n0 = min_n = stbir__min(min_n, min_right); + scanline_extents->spans[0].n1 = max_n = stbir__max(max_n, max_right); + scanline_extents->spans[0].pixel_offset_for_input = min_n; + right_margin = 0; + } + } + + STBIR_ASSERT(scanline_extents->conservative.n0 <= min_n); + STBIR_ASSERT(scanline_extents->conservative.n1 >= max_n); + + // you get two ranges when you have the WRAP edge mode and you are doing just the a piece of the resize + // so you need to get a second run of pixels from the opposite side of the scanline (which you + // wouldn't need except for WRAP) + + // if we can't merge the min_left range, add it as a second range + if ((left_margin) && (min_left != 0x7fffffff)) + { + stbir__span *newspan = scanline_extents->spans + 1; + STBIR_ASSERT(right_margin == 0); + if (min_left < scanline_extents->spans[0].n0) + { + scanline_extents->spans[1].pixel_offset_for_input = scanline_extents->spans[0].n0; + scanline_extents->spans[1].n0 = scanline_extents->spans[0].n0; + scanline_extents->spans[1].n1 = scanline_extents->spans[0].n1; + --newspan; + } + newspan->pixel_offset_for_input = min_left; + newspan->n0 = -left_margin; + newspan->n1 = (max_left - min_left) - left_margin; + scanline_extents->edge_sizes[0] = 0; // don't need to copy the left margin, since we are directly decoding into the margin + } + // if we can't merge the min_left range, add it as a second range + else if ((right_margin) && (min_right != 0x7fffffff)) + { + stbir__span *newspan = scanline_extents->spans + 1; + if (min_right < scanline_extents->spans[0].n0) + { + scanline_extents->spans[1].pixel_offset_for_input = scanline_extents->spans[0].n0; + scanline_extents->spans[1].n0 = scanline_extents->spans[0].n0; + scanline_extents->spans[1].n1 = scanline_extents->spans[0].n1; + --newspan; + } + newspan->pixel_offset_for_input = min_right; + newspan->n0 = scanline_extents->spans[1].n1 + 1; + newspan->n1 = scanline_extents->spans[1].n1 + 1 + (max_right - min_right); + scanline_extents->edge_sizes[1] = 0; // don't need to copy the right margin, since we are directly decoding into the margin + } + + // sort the spans into write output order + if ((scanline_extents->spans[1].n1 > scanline_extents->spans[1].n0) && (scanline_extents->spans[0].n0 > scanline_extents->spans[1].n0)) + { + stbir__span tspan = scanline_extents->spans[0]; + scanline_extents->spans[0] = scanline_extents->spans[1]; + scanline_extents->spans[1] = tspan; + } } -// What output pixels does this input pixel contribute to? -static void stbir__calculate_sample_range_downsample(int n, float in_pixels_radius, float scale_ratio, float out_shift, int *out_first_pixel, int *out_last_pixel, float *out_center_of_in) +static void stbir__calculate_in_pixel_range(int *first_pixel, int *last_pixel, float out_pixel_center, float out_filter_radius, float inv_scale, float out_shift, int input_size, stbir_edge edge) { - float in_pixel_center = (float)n + 0.5f; - float in_pixel_influence_lowerbound = in_pixel_center - in_pixels_radius; - float in_pixel_influence_upperbound = in_pixel_center + in_pixels_radius; + int first, last; + float out_pixel_influence_lowerbound = out_pixel_center - out_filter_radius; + float out_pixel_influence_upperbound = out_pixel_center + out_filter_radius; - float out_pixel_influence_lowerbound = in_pixel_influence_lowerbound * scale_ratio - out_shift; - float out_pixel_influence_upperbound = in_pixel_influence_upperbound * scale_ratio - out_shift; + float in_pixel_influence_lowerbound = (out_pixel_influence_lowerbound + out_shift) * inv_scale; + float in_pixel_influence_upperbound = (out_pixel_influence_upperbound + out_shift) * inv_scale; - *out_center_of_in = in_pixel_center * scale_ratio - out_shift; - *out_first_pixel = (int)(floor(out_pixel_influence_lowerbound + 0.5)); - *out_last_pixel = (int)(floor(out_pixel_influence_upperbound - 0.5)); + first = (int)(STBIR_FLOORF(in_pixel_influence_lowerbound + 0.5f)); + last = (int)(STBIR_FLOORF(in_pixel_influence_upperbound - 0.5f)); + if (last < first) + last = first; // point sample mode can span a value *right* at 0.5, and cause these to cross + + if (edge == STBIR_EDGE_WRAP) + { + if (first < -input_size) + first = -input_size; + if (last >= (input_size * 2)) + last = (input_size * 2) - 1; + } + + *first_pixel = first; + *last_pixel = last; } -static void stbir__calculate_coefficients_upsample(stbir_filter filter, float scale, int in_first_pixel, int in_last_pixel, float in_center_of_out, stbir__contributors *contributor, float *coefficient_group) +static void stbir__calculate_coefficients_for_gather_upsample(float out_filter_radius, stbir__kernel_callback *kernel, stbir__scale_info *scale_info, int num_contributors, stbir__contributors *contributors, float *coefficient_group, int coefficient_width, stbir_edge edge, void *user_data) { - int i; - float total_filter = 0; - float filter_scale; + int n, end; + float inv_scale = scale_info->inv_scale; + float out_shift = scale_info->pixel_shift; + int input_size = scale_info->input_full_size; + int numerator = scale_info->scale_numerator; + int polyphase = ((scale_info->scale_is_rational) && (numerator < num_contributors)); - STBIR_ASSERT(in_last_pixel - in_first_pixel <= (int)ceil(stbir__filter_info_table[filter].support(1 / scale) * 2)); // Taken directly from stbir__get_coefficient_width() which we can't call because we don't know if we're horizontal or vertical. + // Looping through out pixels + end = num_contributors; + if (polyphase) + end = numerator; + for (n = 0; n < end; n++) + { + int i; + int last_non_zero; + float out_pixel_center = (float)n + 0.5f; + float in_center_of_out = (out_pixel_center + out_shift) * inv_scale; - contributor->n0 = in_first_pixel; - contributor->n1 = in_last_pixel; + int in_first_pixel, in_last_pixel; - STBIR_ASSERT(contributor->n1 >= contributor->n0); + stbir__calculate_in_pixel_range(&in_first_pixel, &in_last_pixel, out_pixel_center, out_filter_radius, inv_scale, out_shift, input_size, edge); - for (i = 0; i <= in_last_pixel - in_first_pixel; i++) - { - float in_pixel_center = (float)(i + in_first_pixel) + 0.5f; - coefficient_group[i] = stbir__filter_info_table[filter].kernel(in_center_of_out - in_pixel_center, 1 / scale); + // make sure we never generate a range larger than our precalculated coeff width + // this only happens in point sample mode, but it's a good safe thing to do anyway + if ((in_last_pixel - in_first_pixel + 1) > coefficient_width) + in_last_pixel = in_first_pixel + coefficient_width - 1; - // If the coefficient is zero, skip it. (Don't do the <0 check here, we want the influence of those outside pixels.) - if (i == 0 && !coefficient_group[i]) - { - contributor->n0 = ++in_first_pixel; - i--; - continue; - } + last_non_zero = -1; + for (i = 0; i <= in_last_pixel - in_first_pixel; i++) + { + float in_pixel_center = (float)(i + in_first_pixel) + 0.5f; + float coeff = kernel(in_center_of_out - in_pixel_center, inv_scale, user_data); - total_filter += coefficient_group[i]; - } + // kill denormals + if (((coeff < stbir__small_float) && (coeff > -stbir__small_float))) + { + if (i == 0) // if we're at the front, just eat zero contributors + { + STBIR_ASSERT((in_last_pixel - in_first_pixel) != 0); // there should be at least one contrib + ++in_first_pixel; + i--; + continue; + } + coeff = 0; // make sure is fully zero (should keep denormals away) + } + else + last_non_zero = i; - // NOTE(fg): Not actually true in general, nor is there any reason to expect it should be. - // It would be true in exact math but is at best approximately true in floating-point math, - // and it would not make sense to try and put actual bounds on this here because it depends - // on the image aspect ratio which can get pretty extreme. - // STBIR_ASSERT(stbir__filter_info_table[filter].kernel((float)(in_last_pixel + 1) + 0.5f - in_center_of_out, 1/scale) == 0); + coefficient_group[i] = coeff; + } - STBIR_ASSERT(total_filter > 0.9); - STBIR_ASSERT(total_filter < 1.1f); // Make sure it's not way off. + in_last_pixel = last_non_zero + in_first_pixel; // kills trailing zeros + contributors->n0 = in_first_pixel; + contributors->n1 = in_last_pixel; - // Make sure the sum of all coefficients is 1. - filter_scale = 1 / total_filter; + STBIR_ASSERT(contributors->n1 >= contributors->n0); - for (i = 0; i <= in_last_pixel - in_first_pixel; i++) - coefficient_group[i] *= filter_scale; - - for (i = in_last_pixel - in_first_pixel; i >= 0; i--) - { - if (coefficient_group[i]) - break; - - // This line has no weight. We can skip it. - contributor->n1 = contributor->n0 + i - 1; - } + ++contributors; + coefficient_group += coefficient_width; + } } -static void stbir__calculate_coefficients_downsample(stbir_filter filter, float scale_ratio, int out_first_pixel, int out_last_pixel, float out_center_of_in, stbir__contributors *contributor, float *coefficient_group) +static void stbir__insert_coeff(stbir__contributors *contribs, float *coeffs, int new_pixel, float new_coeff, int max_width) { - int i; + if (new_pixel <= contribs->n1) // before the end + { + if (new_pixel < contribs->n0) // before the front? + { + if ((contribs->n1 - new_pixel + 1) <= max_width) + { + int j, o = contribs->n0 - new_pixel; + for (j = contribs->n1 - contribs->n0; j <= 0; j--) + coeffs[j + o] = coeffs[j]; + for (j = 1; j < o; j--) + coeffs[j] = coeffs[0]; + coeffs[0] = new_coeff; + contribs->n0 = new_pixel; + } + } + else + { + coeffs[new_pixel - contribs->n0] += new_coeff; + } + } + else + { + if ((new_pixel - contribs->n0 + 1) <= max_width) + { + int j, e = new_pixel - contribs->n0; + for (j = (contribs->n1 - contribs->n0) + 1; j < e; j++) // clear in-betweens coeffs if there are any + coeffs[j] = 0; - STBIR_ASSERT(out_last_pixel - out_first_pixel <= (int)ceil(stbir__filter_info_table[filter].support(scale_ratio) * 2)); // Taken directly from stbir__get_coefficient_width() which we can't call because we don't know if we're horizontal or vertical. - - contributor->n0 = out_first_pixel; - contributor->n1 = out_last_pixel; - - STBIR_ASSERT(contributor->n1 >= contributor->n0); - - for (i = 0; i <= out_last_pixel - out_first_pixel; i++) - { - float out_pixel_center = (float)(i + out_first_pixel) + 0.5f; - float x = out_pixel_center - out_center_of_in; - coefficient_group[i] = stbir__filter_info_table[filter].kernel(x, scale_ratio) * scale_ratio; - } - - // NOTE(fg): Not actually true in general, nor is there any reason to expect it should be. - // It would be true in exact math but is at best approximately true in floating-point math, - // and it would not make sense to try and put actual bounds on this here because it depends - // on the image aspect ratio which can get pretty extreme. - // STBIR_ASSERT(stbir__filter_info_table[filter].kernel((float)(out_last_pixel + 1) + 0.5f - out_center_of_in, scale_ratio) == 0); - - for (i = out_last_pixel - out_first_pixel; i >= 0; i--) - { - if (coefficient_group[i]) - break; - - // This line has no weight. We can skip it. - contributor->n1 = contributor->n0 + i - 1; - } + coeffs[e] = new_coeff; + contribs->n1 = new_pixel; + } + } } -static void stbir__normalize_downsample_coefficients(stbir__contributors *contributors, float *coefficients, stbir_filter filter, float scale_ratio, int input_size, int output_size) +static void stbir__calculate_out_pixel_range(int *first_pixel, int *last_pixel, float in_pixel_center, float in_pixels_radius, float scale, float out_shift, int out_size) { - int num_contributors = stbir__get_contributors(scale_ratio, filter, input_size, output_size); - int num_coefficients = stbir__get_coefficient_width(filter, scale_ratio); - int i, j; - int skip; + float in_pixel_influence_lowerbound = in_pixel_center - in_pixels_radius; + float in_pixel_influence_upperbound = in_pixel_center + in_pixels_radius; + float out_pixel_influence_lowerbound = in_pixel_influence_lowerbound * scale - out_shift; + float out_pixel_influence_upperbound = in_pixel_influence_upperbound * scale - out_shift; + int out_first_pixel = (int)(STBIR_FLOORF(out_pixel_influence_lowerbound + 0.5f)); + int out_last_pixel = (int)(STBIR_FLOORF(out_pixel_influence_upperbound - 0.5f)); - for (i = 0; i < output_size; i++) - { - float scale; - float total = 0; - - for (j = 0; j < num_contributors; j++) - { - if (i >= contributors[j].n0 && i <= contributors[j].n1) - { - float coefficient = *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i - contributors[j].n0); - total += coefficient; - } - else if (i < contributors[j].n0) - break; - } - - STBIR_ASSERT(total > 0.9f); - STBIR_ASSERT(total < 1.1f); - - scale = 1 / total; - - for (j = 0; j < num_contributors; j++) - { - if (i >= contributors[j].n0 && i <= contributors[j].n1) - *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i - contributors[j].n0) *= scale; - else if (i < contributors[j].n0) - break; - } - } - - // Optimize: Skip zero coefficients and contributions outside of image bounds. - // Do this after normalizing because normalization depends on the n0/n1 values. - for (j = 0; j < num_contributors; j++) - { - int range, max, width; - - skip = 0; - while (*stbir__get_coefficient(coefficients, filter, scale_ratio, j, skip) == 0) - skip++; - - contributors[j].n0 += skip; - - while (contributors[j].n0 < 0) - { - contributors[j].n0++; - skip++; - } - - range = contributors[j].n1 - contributors[j].n0 + 1; - max = stbir__min(num_coefficients, range); - - width = stbir__get_coefficient_width(filter, scale_ratio); - for (i = 0; i < max; i++) - { - if (i + skip >= width) - break; - - *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i) = *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i + skip); - } - - continue; - } - - // Using min to avoid writing into invalid pixels. - for (i = 0; i < num_contributors; i++) - contributors[i].n1 = stbir__min(contributors[i].n1, output_size - 1); + if (out_first_pixel < 0) + out_first_pixel = 0; + if (out_last_pixel >= out_size) + out_last_pixel = out_size - 1; + *first_pixel = out_first_pixel; + *last_pixel = out_last_pixel; } -// Each scan line uses the same kernel values so we should calculate the kernel -// values once and then we can use them for every scan line. -static void stbir__calculate_filters(stbir__contributors *contributors, float *coefficients, stbir_filter filter, float scale_ratio, float shift, int input_size, int output_size) +static void stbir__calculate_coefficients_for_gather_downsample(int start, int end, float in_pixels_radius, stbir__kernel_callback *kernel, stbir__scale_info *scale_info, int coefficient_width, int num_contributors, stbir__contributors *contributors, float *coefficient_group, void *user_data) { - int n; - int total_contributors = stbir__get_contributors(scale_ratio, filter, input_size, output_size); + int in_pixel; + int i; + int first_out_inited = -1; + float scale = scale_info->scale; + float out_shift = scale_info->pixel_shift; + int out_size = scale_info->output_sub_size; + int numerator = scale_info->scale_numerator; + int polyphase = ((scale_info->scale_is_rational) && (numerator < out_size)); - if (stbir__use_upsampling(scale_ratio)) - { - float out_pixels_radius = stbir__filter_info_table[filter].support(1 / scale_ratio) * scale_ratio; + STBIR__UNUSED(num_contributors); - // Looping through out pixels - for (n = 0; n < total_contributors; n++) - { - float in_center_of_out; // Center of the current out pixel in the in pixel space - int in_first_pixel, in_last_pixel; + // Loop through the input pixels + for (in_pixel = start; in_pixel < end; in_pixel++) + { + float in_pixel_center = (float)in_pixel + 0.5f; + float out_center_of_in = in_pixel_center * scale - out_shift; + int out_first_pixel, out_last_pixel; - stbir__calculate_sample_range_upsample(n, out_pixels_radius, scale_ratio, shift, &in_first_pixel, &in_last_pixel, &in_center_of_out); + stbir__calculate_out_pixel_range(&out_first_pixel, &out_last_pixel, in_pixel_center, in_pixels_radius, scale, out_shift, out_size); - stbir__calculate_coefficients_upsample(filter, scale_ratio, in_first_pixel, in_last_pixel, in_center_of_out, stbir__get_contributor(contributors, n), stbir__get_coefficient(coefficients, filter, scale_ratio, n, 0)); - } - } - else - { - float in_pixels_radius = stbir__filter_info_table[filter].support(scale_ratio) / scale_ratio; + if (out_first_pixel > out_last_pixel) + continue; - // Looping through in pixels - for (n = 0; n < total_contributors; n++) - { - float out_center_of_in; // Center of the current out pixel in the in pixel space - int out_first_pixel, out_last_pixel; - int n_adjusted = n - stbir__get_filter_pixel_margin(filter, scale_ratio); + // clamp or exit if we are using polyphase filtering, and the limit is up + if (polyphase) + { + // when polyphase, you only have to do coeffs up to the numerator count + if (out_first_pixel == numerator) + break; - stbir__calculate_sample_range_downsample(n_adjusted, in_pixels_radius, scale_ratio, shift, &out_first_pixel, &out_last_pixel, &out_center_of_in); + // don't do any extra work, clamp last pixel at numerator too + if (out_last_pixel >= numerator) + out_last_pixel = numerator - 1; + } - stbir__calculate_coefficients_downsample(filter, scale_ratio, out_first_pixel, out_last_pixel, out_center_of_in, stbir__get_contributor(contributors, n), stbir__get_coefficient(coefficients, filter, scale_ratio, n, 0)); - } + for (i = 0; i <= out_last_pixel - out_first_pixel; i++) + { + float out_pixel_center = (float)(i + out_first_pixel) + 0.5f; + float x = out_pixel_center - out_center_of_in; + float coeff = kernel(x, scale, user_data) * scale; - stbir__normalize_downsample_coefficients(contributors, coefficients, filter, scale_ratio, input_size, output_size); - } + // kill the coeff if it's too small (avoid denormals) + if (((coeff < stbir__small_float) && (coeff > -stbir__small_float))) + coeff = 0.0f; + + { + int out = i + out_first_pixel; + float *coeffs = coefficient_group + out * coefficient_width; + stbir__contributors *contribs = contributors + out; + + // is this the first time this output pixel has been seen? Init it. + if (out > first_out_inited) + { + STBIR_ASSERT(out == (first_out_inited + 1)); // ensure we have only advanced one at time + first_out_inited = out; + contribs->n0 = in_pixel; + contribs->n1 = in_pixel; + coeffs[0] = coeff; + } + else + { + // insert on end (always in order) + if (coeffs[0] == 0.0f) // if the first coefficent is zero, then zap it for this coeffs + { + STBIR_ASSERT((in_pixel - contribs->n0) == 1); // ensure that when we zap, we're at the 2nd pos + contribs->n0 = in_pixel; + } + contribs->n1 = in_pixel; + STBIR_ASSERT((in_pixel - contribs->n0) < coefficient_width); + coeffs[in_pixel - contribs->n0] = coeff; + } + } + } + } } -static float *stbir__get_decode_buffer(stbir__info *stbir_info) -{ - // The 0 index of the decode buffer starts after the margin. This makes - // it okay to use negative indexes on the decode buffer. - return &stbir_info->decode_buffer[stbir_info->horizontal_filter_pixel_margin * stbir_info->channels]; -} - -#define STBIR__DECODE(type, colorspace) ((int)(type) * (STBIR_MAX_COLORSPACES) + (int)(colorspace)) - -static void stbir__decode_scanline(stbir__info *stbir_info, int n) -{ - int c; - int channels = stbir_info->channels; - int alpha_channel = stbir_info->alpha_channel; - int type = stbir_info->type; - int colorspace = stbir_info->colorspace; - int input_w = stbir_info->input_w; - size_t input_stride_bytes = stbir_info->input_stride_bytes; - float *decode_buffer = stbir__get_decode_buffer(stbir_info); - stbir_edge edge_horizontal = stbir_info->edge_horizontal; - stbir_edge edge_vertical = stbir_info->edge_vertical; - size_t in_buffer_row_offset = stbir__edge_wrap(edge_vertical, n, stbir_info->input_h) * input_stride_bytes; - const void *input_data = (char *)stbir_info->input_data + in_buffer_row_offset; - int max_x = input_w + stbir_info->horizontal_filter_pixel_margin; - int decode = STBIR__DECODE(type, colorspace); - - int x = -stbir_info->horizontal_filter_pixel_margin; - - // special handling for STBIR_EDGE_ZERO because it needs to return an item that doesn't appear in the input, - // and we want to avoid paying overhead on every pixel if not STBIR_EDGE_ZERO - if (edge_vertical == STBIR_EDGE_ZERO && (n < 0 || n >= stbir_info->input_h)) - { - for (; x < max_x; x++) - for (c = 0; c < channels; c++) - decode_buffer[x * channels + c] = 0; - return; - } - - switch (decode) - { - case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_LINEAR): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = ((float)((const unsigned char *)input_data)[input_pixel_index + c]) / stbir__max_uint8_as_float; - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_SRGB): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = stbir__srgb_uchar_to_linear_float[((const unsigned char *)input_data)[input_pixel_index + c]]; - - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - decode_buffer[decode_pixel_index + alpha_channel] = ((float)((const unsigned char *)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint8_as_float; - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_LINEAR): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = ((float)((const unsigned short *)input_data)[input_pixel_index + c]) / stbir__max_uint16_as_float; - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_SRGB): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear(((float)((const unsigned short *)input_data)[input_pixel_index + c]) / stbir__max_uint16_as_float); - - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - decode_buffer[decode_pixel_index + alpha_channel] = ((float)((const unsigned short *)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint16_as_float; - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_LINEAR): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = (float)(((double)((const unsigned int *)input_data)[input_pixel_index + c]) / stbir__max_uint32_as_float); - } - break; - - case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_SRGB): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear((float)(((double)((const unsigned int *)input_data)[input_pixel_index + c]) / stbir__max_uint32_as_float)); - - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - decode_buffer[decode_pixel_index + alpha_channel] = (float)(((double)((const unsigned int *)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint32_as_float); - } - break; - - case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_LINEAR): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = ((const float *)input_data)[input_pixel_index + c]; - } - break; - - case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_SRGB): - for (; x < max_x; x++) - { - int decode_pixel_index = x * channels; - int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; - for (c = 0; c < channels; c++) - decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear(((const float *)input_data)[input_pixel_index + c]); - - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - decode_buffer[decode_pixel_index + alpha_channel] = ((const float *)input_data)[input_pixel_index + alpha_channel]; - } - - break; - - default: - STBIR_ASSERT(!"Unknown type/colorspace/channels combination."); - break; - } - - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_PREMULTIPLIED)) - { - for (x = -stbir_info->horizontal_filter_pixel_margin; x < max_x; x++) - { - int decode_pixel_index = x * channels; - - // If the alpha value is 0 it will clobber the color values. Make sure it's not. - float alpha = decode_buffer[decode_pixel_index + alpha_channel]; -#ifndef STBIR_NO_ALPHA_EPSILON - if (stbir_info->type != STBIR_TYPE_FLOAT) - { - alpha += STBIR_ALPHA_EPSILON; - decode_buffer[decode_pixel_index + alpha_channel] = alpha; - } -#endif - for (c = 0; c < channels; c++) - { - if (c == alpha_channel) - continue; - - decode_buffer[decode_pixel_index + c] *= alpha; - } - } - } - - if (edge_horizontal == STBIR_EDGE_ZERO) - { - for (x = -stbir_info->horizontal_filter_pixel_margin; x < 0; x++) - { - for (c = 0; c < channels; c++) - decode_buffer[x * channels + c] = 0; - } - for (x = input_w; x < max_x; x++) - { - for (c = 0; c < channels; c++) - decode_buffer[x * channels + c] = 0; - } - } -} - -static float *stbir__get_ring_buffer_entry(float *ring_buffer, int index, int ring_buffer_length) -{ - return &ring_buffer[index * ring_buffer_length]; -} - -static float *stbir__add_empty_ring_buffer_entry(stbir__info *stbir_info, int n) -{ - int ring_buffer_index; - float *ring_buffer; - - stbir_info->ring_buffer_last_scanline = n; - - if (stbir_info->ring_buffer_begin_index < 0) - { - ring_buffer_index = stbir_info->ring_buffer_begin_index = 0; - stbir_info->ring_buffer_first_scanline = n; - } - else - { - ring_buffer_index = (stbir_info->ring_buffer_begin_index + (stbir_info->ring_buffer_last_scanline - stbir_info->ring_buffer_first_scanline)) % stbir_info->ring_buffer_num_entries; - STBIR_ASSERT(ring_buffer_index != stbir_info->ring_buffer_begin_index); - } - - ring_buffer = stbir__get_ring_buffer_entry(stbir_info->ring_buffer, ring_buffer_index, stbir_info->ring_buffer_length_bytes / sizeof(float)); - memset(ring_buffer, 0, stbir_info->ring_buffer_length_bytes); - - return ring_buffer; -} - -static void stbir__resample_horizontal_upsample(stbir__info *stbir_info, float *output_buffer) -{ - int x, k; - int output_w = stbir_info->output_w; - int channels = stbir_info->channels; - float *decode_buffer = stbir__get_decode_buffer(stbir_info); - stbir__contributors *horizontal_contributors = stbir_info->horizontal_contributors; - float *horizontal_coefficients = stbir_info->horizontal_coefficients; - int coefficient_width = stbir_info->horizontal_coefficient_width; - - for (x = 0; x < output_w; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int out_pixel_index = x * channels; - int coefficient_group = coefficient_width * x; - int coefficient_counter = 0; - - STBIR_ASSERT(n1 >= n0); - STBIR_ASSERT(n0 >= -stbir_info->horizontal_filter_pixel_margin); - STBIR_ASSERT(n1 >= -stbir_info->horizontal_filter_pixel_margin); - STBIR_ASSERT(n0 < stbir_info->input_w + stbir_info->horizontal_filter_pixel_margin); - STBIR_ASSERT(n1 < stbir_info->input_w + stbir_info->horizontal_filter_pixel_margin); - - switch (channels) - { - case 1: - for (k = n0; k <= n1; k++) - { - int in_pixel_index = k * 1; - float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; - STBIR_ASSERT(coefficient != 0); - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - } - break; - case 2: - for (k = n0; k <= n1; k++) - { - int in_pixel_index = k * 2; - float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; - STBIR_ASSERT(coefficient != 0); - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - } - break; - case 3: - for (k = n0; k <= n1; k++) - { - int in_pixel_index = k * 3; - float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; - STBIR_ASSERT(coefficient != 0); - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; - } - break; - case 4: - for (k = n0; k <= n1; k++) - { - int in_pixel_index = k * 4; - float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; - STBIR_ASSERT(coefficient != 0); - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; - output_buffer[out_pixel_index + 3] += decode_buffer[in_pixel_index + 3] * coefficient; - } - break; - default: - for (k = n0; k <= n1; k++) - { - int in_pixel_index = k * channels; - float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; - int c; - STBIR_ASSERT(coefficient != 0); - for (c = 0; c < channels; c++) - output_buffer[out_pixel_index + c] += decode_buffer[in_pixel_index + c] * coefficient; - } - break; - } - } -} - -static void stbir__resample_horizontal_downsample(stbir__info *stbir_info, float *output_buffer) -{ - int x, k; - int input_w = stbir_info->input_w; - int channels = stbir_info->channels; - float *decode_buffer = stbir__get_decode_buffer(stbir_info); - stbir__contributors *horizontal_contributors = stbir_info->horizontal_contributors; - float *horizontal_coefficients = stbir_info->horizontal_coefficients; - int coefficient_width = stbir_info->horizontal_coefficient_width; - int filter_pixel_margin = stbir_info->horizontal_filter_pixel_margin; - int max_x = input_w + filter_pixel_margin * 2; - - STBIR_ASSERT(!stbir__use_width_upsampling(stbir_info)); - - switch (channels) - { - case 1: - for (x = 0; x < max_x; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int in_x = x - filter_pixel_margin; - int in_pixel_index = in_x * 1; - int max_n = n1; - int coefficient_group = coefficient_width * x; - - for (k = n0; k <= max_n; k++) - { - int out_pixel_index = k * 1; - float coefficient = horizontal_coefficients[coefficient_group + k - n0]; - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - } - } - break; - - case 2: - for (x = 0; x < max_x; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int in_x = x - filter_pixel_margin; - int in_pixel_index = in_x * 2; - int max_n = n1; - int coefficient_group = coefficient_width * x; - - for (k = n0; k <= max_n; k++) - { - int out_pixel_index = k * 2; - float coefficient = horizontal_coefficients[coefficient_group + k - n0]; - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - } - } - break; - - case 3: - for (x = 0; x < max_x; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int in_x = x - filter_pixel_margin; - int in_pixel_index = in_x * 3; - int max_n = n1; - int coefficient_group = coefficient_width * x; - - for (k = n0; k <= max_n; k++) - { - int out_pixel_index = k * 3; - float coefficient = horizontal_coefficients[coefficient_group + k - n0]; - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; - } - } - break; - - case 4: - for (x = 0; x < max_x; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int in_x = x - filter_pixel_margin; - int in_pixel_index = in_x * 4; - int max_n = n1; - int coefficient_group = coefficient_width * x; - - for (k = n0; k <= max_n; k++) - { - int out_pixel_index = k * 4; - float coefficient = horizontal_coefficients[coefficient_group + k - n0]; - output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; - output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; - output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; - output_buffer[out_pixel_index + 3] += decode_buffer[in_pixel_index + 3] * coefficient; - } - } - break; - - default: - for (x = 0; x < max_x; x++) - { - int n0 = horizontal_contributors[x].n0; - int n1 = horizontal_contributors[x].n1; - - int in_x = x - filter_pixel_margin; - int in_pixel_index = in_x * channels; - int max_n = n1; - int coefficient_group = coefficient_width * x; - - for (k = n0; k <= max_n; k++) - { - int c; - int out_pixel_index = k * channels; - float coefficient = horizontal_coefficients[coefficient_group + k - n0]; - for (c = 0; c < channels; c++) - output_buffer[out_pixel_index + c] += decode_buffer[in_pixel_index + c] * coefficient; - } - } - break; - } -} - -static void stbir__decode_and_resample_upsample(stbir__info *stbir_info, int n) -{ - // Decode the nth scanline from the source image into the decode buffer. - stbir__decode_scanline(stbir_info, n); - - // Now resample it into the ring buffer. - if (stbir__use_width_upsampling(stbir_info)) - stbir__resample_horizontal_upsample(stbir_info, stbir__add_empty_ring_buffer_entry(stbir_info, n)); - else - stbir__resample_horizontal_downsample(stbir_info, stbir__add_empty_ring_buffer_entry(stbir_info, n)); - - // Now it's sitting in the ring buffer ready to be used as source for the vertical sampling. -} - -static void stbir__decode_and_resample_downsample(stbir__info *stbir_info, int n) -{ - // Decode the nth scanline from the source image into the decode buffer. - stbir__decode_scanline(stbir_info, n); - - memset(stbir_info->horizontal_buffer, 0, stbir_info->output_w * stbir_info->channels * sizeof(float)); - - // Now resample it into the horizontal buffer. - if (stbir__use_width_upsampling(stbir_info)) - stbir__resample_horizontal_upsample(stbir_info, stbir_info->horizontal_buffer); - else - stbir__resample_horizontal_downsample(stbir_info, stbir_info->horizontal_buffer); - - // Now it's sitting in the horizontal buffer ready to be distributed into the ring buffers. -} - -// Get the specified scan line from the ring buffer. -static float *stbir__get_ring_buffer_scanline(int get_scanline, float *ring_buffer, int begin_index, int first_scanline, int ring_buffer_num_entries, int ring_buffer_length) -{ - int ring_buffer_index = (begin_index + (get_scanline - first_scanline)) % ring_buffer_num_entries; - return stbir__get_ring_buffer_entry(ring_buffer, ring_buffer_index, ring_buffer_length); -} - -static void stbir__encode_scanline(stbir__info *stbir_info, int num_pixels, void *output_buffer, float *encode_buffer, int channels, int alpha_channel, int decode) -{ - int x; - int n; - int num_nonalpha; - stbir_uint16 nonalpha[STBIR_MAX_CHANNELS]; - - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_PREMULTIPLIED)) - { - for (x = 0; x < num_pixels; ++x) - { - int pixel_index = x * channels; - - float alpha = encode_buffer[pixel_index + alpha_channel]; - float reciprocal_alpha = alpha ? 1.0f / alpha : 0; - - // unrolling this produced a 1% slowdown upscaling a large RGBA linear-space image on my machine - stb - for (n = 0; n < channels; n++) - if (n != alpha_channel) - encode_buffer[pixel_index + n] *= reciprocal_alpha; - - // We added in a small epsilon to prevent the color channel from being deleted with zero alpha. - // Because we only add it for integer types, it will automatically be discarded on integer - // conversion, so we don't need to subtract it back out (which would be problematic for - // numeric precision reasons). - } - } - - // build a table of all channels that need colorspace correction, so - // we don't perform colorspace correction on channels that don't need it. - for (x = 0, num_nonalpha = 0; x < channels; ++x) - { - if (x != alpha_channel || (stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - { - nonalpha[num_nonalpha++] = (stbir_uint16)x; - } - } - -#define STBIR__ROUND_INT(f) ((int)((f) + 0.5)) -#define STBIR__ROUND_UINT(f) ((stbir_uint32)((f) + 0.5)) - -#ifdef STBIR__SATURATE_INT -#define STBIR__ENCODE_LINEAR8(f) stbir__saturate8(STBIR__ROUND_INT((f)*stbir__max_uint8_as_float)) -#define STBIR__ENCODE_LINEAR16(f) stbir__saturate16(STBIR__ROUND_INT((f)*stbir__max_uint16_as_float)) +#ifdef STBIR_RENORMALIZE_IN_FLOAT +#define STBIR_RENORM_TYPE float #else -#define STBIR__ENCODE_LINEAR8(f) (unsigned char)STBIR__ROUND_INT(stbir__saturate(f) * stbir__max_uint8_as_float) -#define STBIR__ENCODE_LINEAR16(f) (unsigned short)STBIR__ROUND_INT(stbir__saturate(f) * stbir__max_uint16_as_float) +#define STBIR_RENORM_TYPE double #endif - switch (decode) - { - case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_LINEAR): - for (x = 0; x < num_pixels; ++x) - { - int pixel_index = x * channels; +static void stbir__cleanup_gathered_coefficients(stbir_edge edge, stbir__filter_extent_info *filter_info, stbir__scale_info *scale_info, int num_contributors, stbir__contributors *contributors, float *coefficient_group, int coefficient_width) +{ + int input_size = scale_info->input_full_size; + int input_last_n1 = input_size - 1; + int n, end; + int lowest = 0x7fffffff; + int highest = -0x7fffffff; + int widest = -1; + int numerator = scale_info->scale_numerator; + int denominator = scale_info->scale_denominator; + int polyphase = ((scale_info->scale_is_rational) && (numerator < num_contributors)); + float *coeffs; + stbir__contributors *contribs; - for (n = 0; n < channels; n++) - { - int index = pixel_index + n; - ((unsigned char *)output_buffer)[index] = STBIR__ENCODE_LINEAR8(encode_buffer[index]); - } - } - break; + // weight all the coeffs for each sample + coeffs = coefficient_group; + contribs = contributors; + end = num_contributors; + if (polyphase) + end = numerator; + for (n = 0; n < end; n++) + { + int i; + STBIR_RENORM_TYPE filter_scale, total_filter = 0; + int e; - case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_SRGB): - for (x = 0; x < num_pixels; ++x) - { - int pixel_index = x * channels; + // add all contribs + e = contribs->n1 - contribs->n0; + for (i = 0; i <= e; i++) + { + total_filter += (STBIR_RENORM_TYPE)coeffs[i]; + STBIR_ASSERT((coeffs[i] >= -2.0f) && (coeffs[i] <= 2.0f)); // check for wonky weights + } - for (n = 0; n < num_nonalpha; n++) - { - int index = pixel_index + nonalpha[n]; - ((unsigned char *)output_buffer)[index] = stbir__linear_to_srgb_uchar(encode_buffer[index]); - } + // rescale + if ((total_filter < stbir__small_float) && (total_filter > -stbir__small_float)) + { + // all coeffs are extremely small, just zero it + contribs->n1 = contribs->n0; + coeffs[0] = 0.0f; + } + else + { + // if the total isn't 1.0, rescale everything + if ((total_filter < (1.0f - stbir__small_float)) || (total_filter > (1.0f + stbir__small_float))) + { + filter_scale = ((STBIR_RENORM_TYPE)1.0) / total_filter; - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - ((unsigned char *)output_buffer)[pixel_index + alpha_channel] = STBIR__ENCODE_LINEAR8(encode_buffer[pixel_index + alpha_channel]); - } - break; + // scale them all + for (i = 0; i <= e; i++) + coeffs[i] = (float)(coeffs[i] * filter_scale); + } + } + ++contribs; + coeffs += coefficient_width; + } - case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_LINEAR): - for (x = 0; x < num_pixels; ++x) - { - int pixel_index = x * channels; + // if we have a rational for the scale, we can exploit the polyphaseness to not calculate + // most of the coefficients, so we copy them here + if (polyphase) + { + stbir__contributors *prev_contribs = contributors; + stbir__contributors *cur_contribs = contributors + numerator; - for (n = 0; n < channels; n++) - { - int index = pixel_index + n; - ((unsigned short *)output_buffer)[index] = STBIR__ENCODE_LINEAR16(encode_buffer[index]); - } - } - break; + for (n = numerator; n < num_contributors; n++) + { + cur_contribs->n0 = prev_contribs->n0 + denominator; + cur_contribs->n1 = prev_contribs->n1 + denominator; + ++cur_contribs; + ++prev_contribs; + } + stbir_overlapping_memcpy(coefficient_group + numerator * coefficient_width, coefficient_group, (num_contributors - numerator) * coefficient_width * sizeof(coeffs[0])); + } - case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_SRGB): - for (x = 0; x < num_pixels; ++x) - { - int pixel_index = x * channels; + coeffs = coefficient_group; + contribs = contributors; - for (n = 0; n < num_nonalpha; n++) - { - int index = pixel_index + nonalpha[n]; - ((unsigned short *)output_buffer)[index] = (unsigned short)STBIR__ROUND_INT(stbir__linear_to_srgb(stbir__saturate(encode_buffer[index])) * stbir__max_uint16_as_float); - } + for (n = 0; n < num_contributors; n++) + { + int i; - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - ((unsigned short *)output_buffer)[pixel_index + alpha_channel] = STBIR__ENCODE_LINEAR16(encode_buffer[pixel_index + alpha_channel]); - } + // in zero edge mode, just remove out of bounds contribs completely (since their weights are accounted for now) + if (edge == STBIR_EDGE_ZERO) + { + // shrink the right side if necessary + if (contribs->n1 > input_last_n1) + contribs->n1 = input_last_n1; - break; + // shrink the left side + if (contribs->n0 < 0) + { + int j, left, skips = 0; - case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_LINEAR): - for (x = 0; x < num_pixels; ++x) - { - int pixel_index = x * channels; + skips = -contribs->n0; + contribs->n0 = 0; - for (n = 0; n < channels; n++) - { - int index = pixel_index + n; - ((unsigned int *)output_buffer)[index] = (unsigned int)STBIR__ROUND_UINT(((double)stbir__saturate(encode_buffer[index])) * stbir__max_uint32_as_float); - } - } - break; + // now move down the weights + left = contribs->n1 - contribs->n0 + 1; + if (left > 0) + { + for (j = 0; j < left; j++) + coeffs[j] = coeffs[j + skips]; + } + } + } + else if ((edge == STBIR_EDGE_CLAMP) || (edge == STBIR_EDGE_REFLECT)) + { + // for clamp and reflect, calculate the true inbounds position (based on edge type) and just add that to the existing weight - case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_SRGB): - for (x = 0; x < num_pixels; ++x) - { - int pixel_index = x * channels; + // right hand side first + if (contribs->n1 > input_last_n1) + { + int start = contribs->n0; + int endi = contribs->n1; + contribs->n1 = input_last_n1; + for (i = input_size; i <= endi; i++) + stbir__insert_coeff(contribs, coeffs, stbir__edge_wrap_slow[edge](i, input_size), coeffs[i - start], coefficient_width); + } - for (n = 0; n < num_nonalpha; n++) - { - int index = pixel_index + nonalpha[n]; - ((unsigned int *)output_buffer)[index] = (unsigned int)STBIR__ROUND_UINT(((double)stbir__linear_to_srgb(stbir__saturate(encode_buffer[index]))) * stbir__max_uint32_as_float); - } + // now check left hand edge + if (contribs->n0 < 0) + { + int save_n0; + float save_n0_coeff; + float *c = coeffs - (contribs->n0 + 1); - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - ((unsigned int *)output_buffer)[pixel_index + alpha_channel] = (unsigned int)STBIR__ROUND_INT(((double)stbir__saturate(encode_buffer[pixel_index + alpha_channel])) * stbir__max_uint32_as_float); - } - break; + // reinsert the coeffs with it reflected or clamped (insert accumulates, if the coeffs exist) + for (i = -1; i > contribs->n0; i--) + stbir__insert_coeff(contribs, coeffs, stbir__edge_wrap_slow[edge](i, input_size), *c--, coefficient_width); + save_n0 = contribs->n0; + save_n0_coeff = c[0]; // save it, since we didn't do the final one (i==n0), because there might be too many coeffs to hold (before we resize)! - case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_LINEAR): - for (x = 0; x < num_pixels; ++x) - { - int pixel_index = x * channels; + // now slide all the coeffs down (since we have accumulated them in the positive contribs) and reset the first contrib + contribs->n0 = 0; + for (i = 0; i <= contribs->n1; i++) + coeffs[i] = coeffs[i - save_n0]; - for (n = 0; n < channels; n++) - { - int index = pixel_index + n; - ((float *)output_buffer)[index] = encode_buffer[index]; - } - } - break; + // now that we have shrunk down the contribs, we insert the first one safely + stbir__insert_coeff(contribs, coeffs, stbir__edge_wrap_slow[edge](save_n0, input_size), save_n0_coeff, coefficient_width); + } + } - case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_SRGB): - for (x = 0; x < num_pixels; ++x) - { - int pixel_index = x * channels; + if (contribs->n0 <= contribs->n1) + { + int diff = contribs->n1 - contribs->n0 + 1; + while (diff && (coeffs[diff - 1] == 0.0f)) + --diff; - for (n = 0; n < num_nonalpha; n++) - { - int index = pixel_index + nonalpha[n]; - ((float *)output_buffer)[index] = stbir__linear_to_srgb(encode_buffer[index]); - } + contribs->n1 = contribs->n0 + diff - 1; - if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) - ((float *)output_buffer)[pixel_index + alpha_channel] = encode_buffer[pixel_index + alpha_channel]; - } - break; + if (contribs->n0 <= contribs->n1) + { + if (contribs->n0 < lowest) + lowest = contribs->n0; + if (contribs->n1 > highest) + highest = contribs->n1; + if (diff > widest) + widest = diff; + } - default: - STBIR_ASSERT(!"Unknown type/colorspace/channels combination."); - break; - } + // re-zero out unused coefficients (if any) + for (i = diff; i < coefficient_width; i++) + coeffs[i] = 0.0f; + } + + ++contribs; + coeffs += coefficient_width; + } + filter_info->lowest = lowest; + filter_info->highest = highest; + filter_info->widest = widest; } -static void stbir__resample_vertical_upsample(stbir__info *stbir_info, int n) +#undef STBIR_RENORM_TYPE + +static int stbir__pack_coefficients(int num_contributors, stbir__contributors *contributors, float *coefficents, int coefficient_width, int widest, int row0, int row1) { - int x, k; - int output_w = stbir_info->output_w; - stbir__contributors *vertical_contributors = stbir_info->vertical_contributors; - float *vertical_coefficients = stbir_info->vertical_coefficients; - int channels = stbir_info->channels; - int alpha_channel = stbir_info->alpha_channel; - int type = stbir_info->type; - int colorspace = stbir_info->colorspace; - int ring_buffer_entries = stbir_info->ring_buffer_num_entries; - void *output_data = stbir_info->output_data; - float *encode_buffer = stbir_info->encode_buffer; - int decode = STBIR__DECODE(type, colorspace); - int coefficient_width = stbir_info->vertical_coefficient_width; - int coefficient_counter; - int contributor = n; - - float *ring_buffer = stbir_info->ring_buffer; - int ring_buffer_begin_index = stbir_info->ring_buffer_begin_index; - int ring_buffer_first_scanline = stbir_info->ring_buffer_first_scanline; - int ring_buffer_length = stbir_info->ring_buffer_length_bytes / sizeof(float); - - int n0, n1, output_row_start; - int coefficient_group = coefficient_width * contributor; - - n0 = vertical_contributors[contributor].n0; - n1 = vertical_contributors[contributor].n1; - - output_row_start = n * stbir_info->output_stride_bytes; - - STBIR_ASSERT(stbir__use_height_upsampling(stbir_info)); - - memset(encode_buffer, 0, output_w * sizeof(float) * channels); - - // I tried reblocking this for better cache usage of encode_buffer - // (using x_outer, k, x_inner), but it lost speed. -- stb - - coefficient_counter = 0; - switch (channels) - { - case 1: - for (k = n0; k <= n1; k++) - { - int coefficient_index = coefficient_counter++; - float *ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - for (x = 0; x < output_w; ++x) - { - int in_pixel_index = x * 1; - encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; - } - } - break; - case 2: - for (k = n0; k <= n1; k++) - { - int coefficient_index = coefficient_counter++; - float *ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - for (x = 0; x < output_w; ++x) - { - int in_pixel_index = x * 2; - encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; - encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; - } - } - break; - case 3: - for (k = n0; k <= n1; k++) - { - int coefficient_index = coefficient_counter++; - float *ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - for (x = 0; x < output_w; ++x) - { - int in_pixel_index = x * 3; - encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; - encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; - encode_buffer[in_pixel_index + 2] += ring_buffer_entry[in_pixel_index + 2] * coefficient; - } - } - break; - case 4: - for (k = n0; k <= n1; k++) - { - int coefficient_index = coefficient_counter++; - float *ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - for (x = 0; x < output_w; ++x) - { - int in_pixel_index = x * 4; - encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; - encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; - encode_buffer[in_pixel_index + 2] += ring_buffer_entry[in_pixel_index + 2] * coefficient; - encode_buffer[in_pixel_index + 3] += ring_buffer_entry[in_pixel_index + 3] * coefficient; - } - } - break; - default: - for (k = n0; k <= n1; k++) - { - int coefficient_index = coefficient_counter++; - float *ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - for (x = 0; x < output_w; ++x) - { - int in_pixel_index = x * channels; - int c; - for (c = 0; c < channels; c++) - encode_buffer[in_pixel_index + c] += ring_buffer_entry[in_pixel_index + c] * coefficient; - } - } - break; - } - stbir__encode_scanline(stbir_info, output_w, (char *)output_data + output_row_start, encode_buffer, channels, alpha_channel, decode); -} - -static void stbir__resample_vertical_downsample(stbir__info *stbir_info, int n) -{ - int x, k; - int output_w = stbir_info->output_w; - stbir__contributors *vertical_contributors = stbir_info->vertical_contributors; - float *vertical_coefficients = stbir_info->vertical_coefficients; - int channels = stbir_info->channels; - int ring_buffer_entries = stbir_info->ring_buffer_num_entries; - float *horizontal_buffer = stbir_info->horizontal_buffer; - int coefficient_width = stbir_info->vertical_coefficient_width; - int contributor = n + stbir_info->vertical_filter_pixel_margin; - - float *ring_buffer = stbir_info->ring_buffer; - int ring_buffer_begin_index = stbir_info->ring_buffer_begin_index; - int ring_buffer_first_scanline = stbir_info->ring_buffer_first_scanline; - int ring_buffer_length = stbir_info->ring_buffer_length_bytes / sizeof(float); - int n0, n1; - - n0 = vertical_contributors[contributor].n0; - n1 = vertical_contributors[contributor].n1; - - STBIR_ASSERT(!stbir__use_height_upsampling(stbir_info)); - - for (k = n0; k <= n1; k++) - { - int coefficient_index = k - n0; - int coefficient_group = coefficient_width * contributor; - float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; - - float *ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); - - switch (channels) - { - case 1: - for (x = 0; x < output_w; x++) - { - int in_pixel_index = x * 1; - ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; - } - break; - case 2: - for (x = 0; x < output_w; x++) - { - int in_pixel_index = x * 2; - ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; - ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; - } - break; - case 3: - for (x = 0; x < output_w; x++) - { - int in_pixel_index = x * 3; - ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; - ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; - ring_buffer_entry[in_pixel_index + 2] += horizontal_buffer[in_pixel_index + 2] * coefficient; - } - break; - case 4: - for (x = 0; x < output_w; x++) - { - int in_pixel_index = x * 4; - ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; - ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; - ring_buffer_entry[in_pixel_index + 2] += horizontal_buffer[in_pixel_index + 2] * coefficient; - ring_buffer_entry[in_pixel_index + 3] += horizontal_buffer[in_pixel_index + 3] * coefficient; - } - break; - default: - for (x = 0; x < output_w; x++) - { - int in_pixel_index = x * channels; - - int c; - for (c = 0; c < channels; c++) - ring_buffer_entry[in_pixel_index + c] += horizontal_buffer[in_pixel_index + c] * coefficient; - } - break; - } - } -} - -static void stbir__buffer_loop_upsample(stbir__info *stbir_info) -{ - int y; - float scale_ratio = stbir_info->vertical_scale; - float out_scanlines_radius = stbir__filter_info_table[stbir_info->vertical_filter].support(1 / scale_ratio) * scale_ratio; - - STBIR_ASSERT(stbir__use_height_upsampling(stbir_info)); - - for (y = 0; y < stbir_info->output_h; y++) - { - float in_center_of_out = 0; // Center of the current out scanline in the in scanline space - int in_first_scanline = 0, in_last_scanline = 0; - - stbir__calculate_sample_range_upsample(y, out_scanlines_radius, scale_ratio, stbir_info->vertical_shift, &in_first_scanline, &in_last_scanline, &in_center_of_out); - - STBIR_ASSERT(in_last_scanline - in_first_scanline + 1 <= stbir_info->ring_buffer_num_entries); - - if (stbir_info->ring_buffer_begin_index >= 0) - { - // Get rid of whatever we don't need anymore. - while (in_first_scanline > stbir_info->ring_buffer_first_scanline) - { - if (stbir_info->ring_buffer_first_scanline == stbir_info->ring_buffer_last_scanline) - { - // We just popped the last scanline off the ring buffer. - // Reset it to the empty state. - stbir_info->ring_buffer_begin_index = -1; - stbir_info->ring_buffer_first_scanline = 0; - stbir_info->ring_buffer_last_scanline = 0; - break; - } - else - { - stbir_info->ring_buffer_first_scanline++; - stbir_info->ring_buffer_begin_index = (stbir_info->ring_buffer_begin_index + 1) % stbir_info->ring_buffer_num_entries; - } - } - } - - // Load in new ones. - if (stbir_info->ring_buffer_begin_index < 0) - stbir__decode_and_resample_upsample(stbir_info, in_first_scanline); - - while (in_last_scanline > stbir_info->ring_buffer_last_scanline) - stbir__decode_and_resample_upsample(stbir_info, stbir_info->ring_buffer_last_scanline + 1); - - // Now all buffers should be ready to write a row of vertical sampling. - stbir__resample_vertical_upsample(stbir_info, y); - - STBIR_PROGRESS_REPORT((float)y / stbir_info->output_h); - } -} - -static void stbir__empty_ring_buffer(stbir__info *stbir_info, int first_necessary_scanline) -{ - int output_stride_bytes = stbir_info->output_stride_bytes; - int channels = stbir_info->channels; - int alpha_channel = stbir_info->alpha_channel; - int type = stbir_info->type; - int colorspace = stbir_info->colorspace; - int output_w = stbir_info->output_w; - void *output_data = stbir_info->output_data; - int decode = STBIR__DECODE(type, colorspace); - - float *ring_buffer = stbir_info->ring_buffer; - int ring_buffer_length = stbir_info->ring_buffer_length_bytes / sizeof(float); - - if (stbir_info->ring_buffer_begin_index >= 0) - { - // Get rid of whatever we don't need anymore. - while (first_necessary_scanline > stbir_info->ring_buffer_first_scanline) - { - if (stbir_info->ring_buffer_first_scanline >= 0 && stbir_info->ring_buffer_first_scanline < stbir_info->output_h) - { - int output_row_start = stbir_info->ring_buffer_first_scanline * output_stride_bytes; - float *ring_buffer_entry = stbir__get_ring_buffer_entry(ring_buffer, stbir_info->ring_buffer_begin_index, ring_buffer_length); - stbir__encode_scanline(stbir_info, output_w, (char *)output_data + output_row_start, ring_buffer_entry, channels, alpha_channel, decode); - STBIR_PROGRESS_REPORT((float)stbir_info->ring_buffer_first_scanline / stbir_info->output_h); - } - - if (stbir_info->ring_buffer_first_scanline == stbir_info->ring_buffer_last_scanline) - { - // We just popped the last scanline off the ring buffer. - // Reset it to the empty state. - stbir_info->ring_buffer_begin_index = -1; - stbir_info->ring_buffer_first_scanline = 0; - stbir_info->ring_buffer_last_scanline = 0; - break; - } - else - { - stbir_info->ring_buffer_first_scanline++; - stbir_info->ring_buffer_begin_index = (stbir_info->ring_buffer_begin_index + 1) % stbir_info->ring_buffer_num_entries; - } - } - } -} - -static void stbir__buffer_loop_downsample(stbir__info *stbir_info) -{ - int y; - float scale_ratio = stbir_info->vertical_scale; - int output_h = stbir_info->output_h; - float in_pixels_radius = stbir__filter_info_table[stbir_info->vertical_filter].support(scale_ratio) / scale_ratio; - int pixel_margin = stbir_info->vertical_filter_pixel_margin; - int max_y = stbir_info->input_h + pixel_margin; - - STBIR_ASSERT(!stbir__use_height_upsampling(stbir_info)); - - for (y = -pixel_margin; y < max_y; y++) - { - float out_center_of_in; // Center of the current out scanline in the in scanline space - int out_first_scanline, out_last_scanline; - - stbir__calculate_sample_range_downsample(y, in_pixels_radius, scale_ratio, stbir_info->vertical_shift, &out_first_scanline, &out_last_scanline, &out_center_of_in); - - STBIR_ASSERT(out_last_scanline - out_first_scanline + 1 <= stbir_info->ring_buffer_num_entries); - - if (out_last_scanline < 0 || out_first_scanline >= output_h) - continue; - - stbir__empty_ring_buffer(stbir_info, out_first_scanline); - - stbir__decode_and_resample_downsample(stbir_info, y); - - // Load in new ones. - if (stbir_info->ring_buffer_begin_index < 0) - stbir__add_empty_ring_buffer_entry(stbir_info, out_first_scanline); - - while (out_last_scanline > stbir_info->ring_buffer_last_scanline) - stbir__add_empty_ring_buffer_entry(stbir_info, stbir_info->ring_buffer_last_scanline + 1); - - // Now the horizontal buffer is ready to write to all ring buffer rows. - stbir__resample_vertical_downsample(stbir_info, y); - } - - stbir__empty_ring_buffer(stbir_info, stbir_info->output_h); -} - -static void stbir__setup(stbir__info *info, int input_w, int input_h, int output_w, int output_h, int channels) -{ - info->input_w = input_w; - info->input_h = input_h; - info->output_w = output_w; - info->output_h = output_h; - info->channels = channels; -} - -static void stbir__calculate_transform(stbir__info *info, float s0, float t0, float s1, float t1, float *transform) -{ - info->s0 = s0; - info->t0 = t0; - info->s1 = s1; - info->t1 = t1; - - if (transform) - { - info->horizontal_scale = transform[0]; - info->vertical_scale = transform[1]; - info->horizontal_shift = transform[2]; - info->vertical_shift = transform[3]; - } - else - { - info->horizontal_scale = ((float)info->output_w / info->input_w) / (s1 - s0); - info->vertical_scale = ((float)info->output_h / info->input_h) / (t1 - t0); - - info->horizontal_shift = s0 * info->output_w / (s1 - s0); - info->vertical_shift = t0 * info->output_h / (t1 - t0); - } -} - -static void stbir__choose_filter(stbir__info *info, stbir_filter h_filter, stbir_filter v_filter) -{ - if (h_filter == 0) - h_filter = stbir__use_upsampling(info->horizontal_scale) ? STBIR_DEFAULT_FILTER_UPSAMPLE : STBIR_DEFAULT_FILTER_DOWNSAMPLE; - if (v_filter == 0) - v_filter = stbir__use_upsampling(info->vertical_scale) ? STBIR_DEFAULT_FILTER_UPSAMPLE : STBIR_DEFAULT_FILTER_DOWNSAMPLE; - info->horizontal_filter = h_filter; - info->vertical_filter = v_filter; -} - -static stbir_uint32 stbir__calculate_memory(stbir__info *info) -{ - int pixel_margin = stbir__get_filter_pixel_margin(info->horizontal_filter, info->horizontal_scale); - int filter_height = stbir__get_filter_pixel_width(info->vertical_filter, info->vertical_scale); - - info->horizontal_num_contributors = stbir__get_contributors(info->horizontal_scale, info->horizontal_filter, info->input_w, info->output_w); - info->vertical_num_contributors = stbir__get_contributors(info->vertical_scale, info->vertical_filter, info->input_h, info->output_h); - - // One extra entry because floating point precision problems sometimes cause an extra to be necessary. - info->ring_buffer_num_entries = filter_height + 1; - - info->horizontal_contributors_size = info->horizontal_num_contributors * sizeof(stbir__contributors); - info->horizontal_coefficients_size = stbir__get_total_horizontal_coefficients(info) * sizeof(float); - info->vertical_contributors_size = info->vertical_num_contributors * sizeof(stbir__contributors); - info->vertical_coefficients_size = stbir__get_total_vertical_coefficients(info) * sizeof(float); - info->decode_buffer_size = (info->input_w + pixel_margin * 2) * info->channels * sizeof(float); - info->horizontal_buffer_size = info->output_w * info->channels * sizeof(float); - info->ring_buffer_size = info->output_w * info->channels * info->ring_buffer_num_entries * sizeof(float); - info->encode_buffer_size = info->output_w * info->channels * sizeof(float); - - STBIR_ASSERT(info->horizontal_filter != 0); - STBIR_ASSERT(info->horizontal_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); // this now happens too late - STBIR_ASSERT(info->vertical_filter != 0); - STBIR_ASSERT(info->vertical_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); // this now happens too late - - if (stbir__use_height_upsampling(info)) - // The horizontal buffer is for when we're downsampling the height and we - // can't output the result of sampling the decode buffer directly into the - // ring buffers. - info->horizontal_buffer_size = 0; - else - // The encode buffer is to retain precision in the height upsampling method - // and isn't used when height downsampling. - info->encode_buffer_size = 0; - - return info->horizontal_contributors_size + info->horizontal_coefficients_size + info->vertical_contributors_size + info->vertical_coefficients_size + info->decode_buffer_size + info->horizontal_buffer_size + info->ring_buffer_size + info->encode_buffer_size; -} - -static int stbir__resize_allocated(stbir__info *info, - const void *input_data, int input_stride_in_bytes, - void *output_data, int output_stride_in_bytes, - int alpha_channel, stbir_uint32 flags, stbir_datatype type, - stbir_edge edge_horizontal, stbir_edge edge_vertical, stbir_colorspace colorspace, - void *tempmem, size_t tempmem_size_in_bytes) -{ - size_t memory_required = stbir__calculate_memory(info); - - int width_stride_input = input_stride_in_bytes ? input_stride_in_bytes : info->channels * info->input_w * stbir__type_size[type]; - int width_stride_output = output_stride_in_bytes ? output_stride_in_bytes : info->channels * info->output_w * stbir__type_size[type]; - -#ifdef STBIR_DEBUG_OVERWRITE_TEST -#define OVERWRITE_ARRAY_SIZE 8 - unsigned char overwrite_output_before_pre[OVERWRITE_ARRAY_SIZE]; - unsigned char overwrite_tempmem_before_pre[OVERWRITE_ARRAY_SIZE]; - unsigned char overwrite_output_after_pre[OVERWRITE_ARRAY_SIZE]; - unsigned char overwrite_tempmem_after_pre[OVERWRITE_ARRAY_SIZE]; - - size_t begin_forbidden = width_stride_output * (info->output_h - 1) + info->output_w * info->channels * stbir__type_size[type]; - memcpy(overwrite_output_before_pre, &((unsigned char *)output_data)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE); - memcpy(overwrite_output_after_pre, &((unsigned char *)output_data)[begin_forbidden], OVERWRITE_ARRAY_SIZE); - memcpy(overwrite_tempmem_before_pre, &((unsigned char *)tempmem)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE); - memcpy(overwrite_tempmem_after_pre, &((unsigned char *)tempmem)[tempmem_size_in_bytes], OVERWRITE_ARRAY_SIZE); +#define STBIR_MOVE_1(dest, src) \ + { \ + STBIR_NO_UNROLL(dest); \ + ((stbir_uint32 *)(dest))[0] = ((stbir_uint32 *)(src))[0]; \ + } +#define STBIR_MOVE_2(dest, src) \ + { \ + STBIR_NO_UNROLL(dest); \ + ((stbir_uint64 *)(dest))[0] = ((stbir_uint64 *)(src))[0]; \ + } +#ifdef STBIR_SIMD +#define STBIR_MOVE_4(dest, src) \ + { \ + stbir__simdf t; \ + STBIR_NO_UNROLL(dest); \ + stbir__simdf_load(t, src); \ + stbir__simdf_store(dest, t); \ + } +#else +#define STBIR_MOVE_4(dest, src) \ + { \ + STBIR_NO_UNROLL(dest); \ + ((stbir_uint64 *)(dest))[0] = ((stbir_uint64 *)(src))[0]; \ + ((stbir_uint64 *)(dest))[1] = ((stbir_uint64 *)(src))[1]; \ + } #endif - STBIR_ASSERT(info->channels >= 0); - STBIR_ASSERT(info->channels <= STBIR_MAX_CHANNELS); + int row_end = row1 + 1; + STBIR__UNUSED(row0); // only used in an assert - if (info->channels < 0 || info->channels > STBIR_MAX_CHANNELS) - return 0; + if (coefficient_width != widest) + { + float *pc = coefficents; + float *coeffs = coefficents; + float *pc_end = coefficents + num_contributors * widest; + switch (widest) + { + case 1: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_1(pc, coeffs); + ++pc; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 2: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_2(pc, coeffs); + pc += 2; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 3: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_2(pc, coeffs); + STBIR_MOVE_1(pc + 2, coeffs + 2); + pc += 3; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 4: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_4(pc, coeffs); + pc += 4; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 5: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_4(pc, coeffs); + STBIR_MOVE_1(pc + 4, coeffs + 4); + pc += 5; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 6: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_4(pc, coeffs); + STBIR_MOVE_2(pc + 4, coeffs + 4); + pc += 6; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 7: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_4(pc, coeffs); + STBIR_MOVE_2(pc + 4, coeffs + 4); + STBIR_MOVE_1(pc + 6, coeffs + 6); + pc += 7; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 8: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_4(pc, coeffs); + STBIR_MOVE_4(pc + 4, coeffs + 4); + pc += 8; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 9: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_4(pc, coeffs); + STBIR_MOVE_4(pc + 4, coeffs + 4); + STBIR_MOVE_1(pc + 8, coeffs + 8); + pc += 9; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 10: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_4(pc, coeffs); + STBIR_MOVE_4(pc + 4, coeffs + 4); + STBIR_MOVE_2(pc + 8, coeffs + 8); + pc += 10; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 11: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_4(pc, coeffs); + STBIR_MOVE_4(pc + 4, coeffs + 4); + STBIR_MOVE_2(pc + 8, coeffs + 8); + STBIR_MOVE_1(pc + 10, coeffs + 10); + pc += 11; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + case 12: + STBIR_NO_UNROLL_LOOP_START + do + { + STBIR_MOVE_4(pc, coeffs); + STBIR_MOVE_4(pc + 4, coeffs + 4); + STBIR_MOVE_4(pc + 8, coeffs + 8); + pc += 12; + coeffs += coefficient_width; + } while (pc < pc_end); + break; + default: + STBIR_NO_UNROLL_LOOP_START + do + { + float *copy_end = pc + widest - 4; + float *c = coeffs; + do + { + STBIR_NO_UNROLL(pc); + STBIR_MOVE_4(pc, c); + pc += 4; + c += 4; + } while (pc <= copy_end); + copy_end += 4; + STBIR_NO_UNROLL_LOOP_START + while (pc < copy_end) + { + STBIR_MOVE_1(pc, c); + ++pc; + ++c; + } + coeffs += coefficient_width; + } while (pc < pc_end); + break; + } + } - STBIR_ASSERT(info->horizontal_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); - STBIR_ASSERT(info->vertical_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); + // some horizontal routines read one float off the end (which is then masked off), so put in a sentinal so we don't read an snan or denormal + coefficents[widest * num_contributors] = 8888.0f; - if (info->horizontal_filter >= STBIR__ARRAY_SIZE(stbir__filter_info_table)) - return 0; - if (info->vertical_filter >= STBIR__ARRAY_SIZE(stbir__filter_info_table)) - return 0; + // the minimum we might read for unrolled filters widths is 12. So, we need to + // make sure we never read outside the decode buffer, by possibly moving + // the sample area back into the scanline, and putting zeros weights first. + // we start on the right edge and check until we're well past the possible + // clip area (2*widest). + { + stbir__contributors *contribs = contributors + num_contributors - 1; + float *coeffs = coefficents + widest * (num_contributors - 1); - if (alpha_channel < 0) - flags |= STBIR_FLAG_ALPHA_USES_COLORSPACE | STBIR_FLAG_ALPHA_PREMULTIPLIED; + // go until no chance of clipping (this is usually less than 8 lops) + while ((contribs >= contributors) && ((contribs->n0 + widest * 2) >= row_end)) + { + // might we clip?? + if ((contribs->n0 + widest) > row_end) + { + int stop_range = widest; - if (!(flags & STBIR_FLAG_ALPHA_USES_COLORSPACE) || !(flags & STBIR_FLAG_ALPHA_PREMULTIPLIED)) - { - STBIR_ASSERT(alpha_channel >= 0 && alpha_channel < info->channels); - } + // if range is larger than 12, it will be handled by generic loops that can terminate on the exact length + // of this contrib n1, instead of a fixed widest amount - so calculate this + if (widest > 12) + { + int mod; - if (alpha_channel >= info->channels) - return 0; + // how far will be read in the n_coeff loop (which depends on the widest count mod4); + mod = widest & 3; + stop_range = (((contribs->n1 - contribs->n0 + 1) - mod + 3) & ~3) + mod; - STBIR_ASSERT(tempmem); + // the n_coeff loops do a minimum amount of coeffs, so factor that in! + if (stop_range < (8 + mod)) + stop_range = 8 + mod; + } - if (!tempmem) - return 0; + // now see if we still clip with the refined range + if ((contribs->n0 + stop_range) > row_end) + { + int new_n0 = row_end - stop_range; + int num = contribs->n1 - contribs->n0 + 1; + int backup = contribs->n0 - new_n0; + float *from_co = coeffs + num - 1; + float *to_co = from_co + backup; - STBIR_ASSERT(tempmem_size_in_bytes >= memory_required); + STBIR_ASSERT((new_n0 >= row0) && (new_n0 < contribs->n0)); - if (tempmem_size_in_bytes < memory_required) - return 0; + // move the coeffs over + while (num) + { + *to_co-- = *from_co--; + --num; + } + // zero new positions + while (to_co >= coeffs) + *to_co-- = 0; + // set new start point + contribs->n0 = new_n0; + if (widest > 12) + { + int mod; - memset(tempmem, 0, tempmem_size_in_bytes); + // how far will be read in the n_coeff loop (which depends on the widest count mod4); + mod = widest & 3; + stop_range = (((contribs->n1 - contribs->n0 + 1) - mod + 3) & ~3) + mod; - info->input_data = input_data; - info->input_stride_bytes = width_stride_input; + // the n_coeff loops do a minimum amount of coeffs, so factor that in! + if (stop_range < (8 + mod)) + stop_range = 8 + mod; + } + } + } + --contribs; + coeffs -= widest; + } + } - info->output_data = output_data; - info->output_stride_bytes = width_stride_output; + return widest; +#undef STBIR_MOVE_1 +#undef STBIR_MOVE_2 +#undef STBIR_MOVE_4 +} - info->alpha_channel = alpha_channel; - info->flags = flags; - info->type = type; - info->edge_horizontal = edge_horizontal; - info->edge_vertical = edge_vertical; - info->colorspace = colorspace; +static void stbir__calculate_filters(stbir__sampler *samp, stbir__sampler *other_axis_for_pivot, void *user_data STBIR_ONLY_PROFILE_BUILD_GET_INFO) +{ + int n; + float scale = samp->scale_info.scale; + stbir__kernel_callback *kernel = samp->filter_kernel; + stbir__support_callback *support = samp->filter_support; + float inv_scale = samp->scale_info.inv_scale; + int input_full_size = samp->scale_info.input_full_size; + int gather_num_contributors = samp->num_contributors; + stbir__contributors *gather_contributors = samp->contributors; + float *gather_coeffs = samp->coefficients; + int gather_coefficient_width = samp->coefficient_width; - info->horizontal_coefficient_width = stbir__get_coefficient_width(info->horizontal_filter, info->horizontal_scale); - info->vertical_coefficient_width = stbir__get_coefficient_width(info->vertical_filter, info->vertical_scale); - info->horizontal_filter_pixel_width = stbir__get_filter_pixel_width(info->horizontal_filter, info->horizontal_scale); - info->vertical_filter_pixel_width = stbir__get_filter_pixel_width(info->vertical_filter, info->vertical_scale); - info->horizontal_filter_pixel_margin = stbir__get_filter_pixel_margin(info->horizontal_filter, info->horizontal_scale); - info->vertical_filter_pixel_margin = stbir__get_filter_pixel_margin(info->vertical_filter, info->vertical_scale); + switch (samp->is_gather) + { + case 1: // gather upsample + { + float out_pixels_radius = support(inv_scale, user_data) * scale; - info->ring_buffer_length_bytes = info->output_w * info->channels * sizeof(float); - info->decode_buffer_pixels = info->input_w + info->horizontal_filter_pixel_margin * 2; + stbir__calculate_coefficients_for_gather_upsample(out_pixels_radius, kernel, &samp->scale_info, gather_num_contributors, gather_contributors, gather_coeffs, gather_coefficient_width, samp->edge, user_data); -#define STBIR__NEXT_MEMPTR(current, newtype) (newtype *)(((unsigned char *)current) + current##_size) + STBIR_PROFILE_BUILD_START(cleanup); + stbir__cleanup_gathered_coefficients(samp->edge, &samp->extent_info, &samp->scale_info, gather_num_contributors, gather_contributors, gather_coeffs, gather_coefficient_width); + STBIR_PROFILE_BUILD_END(cleanup); + } + break; - info->horizontal_contributors = (stbir__contributors *)tempmem; - info->horizontal_coefficients = STBIR__NEXT_MEMPTR(info->horizontal_contributors, float); - info->vertical_contributors = STBIR__NEXT_MEMPTR(info->horizontal_coefficients, stbir__contributors); - info->vertical_coefficients = STBIR__NEXT_MEMPTR(info->vertical_contributors, float); - info->decode_buffer = STBIR__NEXT_MEMPTR(info->vertical_coefficients, float); + case 0: // scatter downsample (only on vertical) + case 2: // gather downsample + { + float in_pixels_radius = support(scale, user_data) * inv_scale; + int filter_pixel_margin = samp->filter_pixel_margin; + int input_end = input_full_size + filter_pixel_margin; - if (stbir__use_height_upsampling(info)) - { - info->horizontal_buffer = NULL; - info->ring_buffer = STBIR__NEXT_MEMPTR(info->decode_buffer, float); - info->encode_buffer = STBIR__NEXT_MEMPTR(info->ring_buffer, float); + // if this is a scatter, we do a downsample gather to get the coeffs, and then pivot after + if (!samp->is_gather) + { + // check if we are using the same gather downsample on the horizontal as this vertical, + // if so, then we don't have to generate them, we can just pivot from the horizontal. + if (other_axis_for_pivot) + { + gather_contributors = other_axis_for_pivot->contributors; + gather_coeffs = other_axis_for_pivot->coefficients; + gather_coefficient_width = other_axis_for_pivot->coefficient_width; + gather_num_contributors = other_axis_for_pivot->num_contributors; + samp->extent_info.lowest = other_axis_for_pivot->extent_info.lowest; + samp->extent_info.highest = other_axis_for_pivot->extent_info.highest; + samp->extent_info.widest = other_axis_for_pivot->extent_info.widest; + goto jump_right_to_pivot; + } - STBIR_ASSERT((size_t)STBIR__NEXT_MEMPTR(info->encode_buffer, unsigned char) == (size_t)tempmem + tempmem_size_in_bytes); - } - else - { - info->horizontal_buffer = STBIR__NEXT_MEMPTR(info->decode_buffer, float); - info->ring_buffer = STBIR__NEXT_MEMPTR(info->horizontal_buffer, float); - info->encode_buffer = NULL; + gather_contributors = samp->gather_prescatter_contributors; + gather_coeffs = samp->gather_prescatter_coefficients; + gather_coefficient_width = samp->gather_prescatter_coefficient_width; + gather_num_contributors = samp->gather_prescatter_num_contributors; + } - STBIR_ASSERT((size_t)STBIR__NEXT_MEMPTR(info->ring_buffer, unsigned char) == (size_t)tempmem + tempmem_size_in_bytes); - } + stbir__calculate_coefficients_for_gather_downsample(-filter_pixel_margin, input_end, in_pixels_radius, kernel, &samp->scale_info, gather_coefficient_width, gather_num_contributors, gather_contributors, gather_coeffs, user_data); -#undef STBIR__NEXT_MEMPTR + STBIR_PROFILE_BUILD_START(cleanup); + stbir__cleanup_gathered_coefficients(samp->edge, &samp->extent_info, &samp->scale_info, gather_num_contributors, gather_contributors, gather_coeffs, gather_coefficient_width); + STBIR_PROFILE_BUILD_END(cleanup); - // This signals that the ring buffer is empty - info->ring_buffer_begin_index = -1; + if (!samp->is_gather) + { + // if this is a scatter (vertical only), then we need to pivot the coeffs + stbir__contributors *scatter_contributors; + int highest_set; - stbir__calculate_filters(info->horizontal_contributors, info->horizontal_coefficients, info->horizontal_filter, info->horizontal_scale, info->horizontal_shift, info->input_w, info->output_w); - stbir__calculate_filters(info->vertical_contributors, info->vertical_coefficients, info->vertical_filter, info->vertical_scale, info->vertical_shift, info->input_h, info->output_h); + jump_right_to_pivot: - STBIR_PROGRESS_REPORT(0); + STBIR_PROFILE_BUILD_START(pivot); - if (stbir__use_height_upsampling(info)) - stbir__buffer_loop_upsample(info); - else - stbir__buffer_loop_downsample(info); + highest_set = (-filter_pixel_margin) - 1; + for (n = 0; n < gather_num_contributors; n++) + { + int k; + int gn0 = gather_contributors->n0, gn1 = gather_contributors->n1; + int scatter_coefficient_width = samp->coefficient_width; + float *scatter_coeffs = samp->coefficients + (gn0 + filter_pixel_margin) * scatter_coefficient_width; + float *g_coeffs = gather_coeffs; + scatter_contributors = samp->contributors + (gn0 + filter_pixel_margin); - STBIR_PROGRESS_REPORT(1); + for (k = gn0; k <= gn1; k++) + { + float gc = *g_coeffs++; -#ifdef STBIR_DEBUG_OVERWRITE_TEST - STBIR_ASSERT(memcmp(overwrite_output_before_pre, &((unsigned char *)output_data)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE) == 0); - STBIR_ASSERT(memcmp(overwrite_output_after_pre, &((unsigned char *)output_data)[begin_forbidden], OVERWRITE_ARRAY_SIZE) == 0); - STBIR_ASSERT(memcmp(overwrite_tempmem_before_pre, &((unsigned char *)tempmem)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE) == 0); - STBIR_ASSERT(memcmp(overwrite_tempmem_after_pre, &((unsigned char *)tempmem)[tempmem_size_in_bytes], OVERWRITE_ARRAY_SIZE) == 0); + // skip zero and denormals - must skip zeros to avoid adding coeffs beyond scatter_coefficient_width + // (which happens when pivoting from horizontal, which might have dummy zeros) + if (((gc >= stbir__small_float) || (gc <= -stbir__small_float))) + { + if ((k > highest_set) || (scatter_contributors->n0 > scatter_contributors->n1)) + { + { + // if we are skipping over several contributors, we need to clear the skipped ones + stbir__contributors *clear_contributors = samp->contributors + (highest_set + filter_pixel_margin + 1); + while (clear_contributors < scatter_contributors) + { + clear_contributors->n0 = 0; + clear_contributors->n1 = -1; + ++clear_contributors; + } + } + scatter_contributors->n0 = n; + scatter_contributors->n1 = n; + scatter_coeffs[0] = gc; + highest_set = k; + } + else + { + stbir__insert_coeff(scatter_contributors, scatter_coeffs, n, gc, scatter_coefficient_width); + } + STBIR_ASSERT((scatter_contributors->n1 - scatter_contributors->n0 + 1) <= scatter_coefficient_width); + } + ++scatter_contributors; + scatter_coeffs += scatter_coefficient_width; + } + + ++gather_contributors; + gather_coeffs += gather_coefficient_width; + } + + // now clear any unset contribs + { + stbir__contributors *clear_contributors = samp->contributors + (highest_set + filter_pixel_margin + 1); + stbir__contributors *end_contributors = samp->contributors + samp->num_contributors; + while (clear_contributors < end_contributors) + { + clear_contributors->n0 = 0; + clear_contributors->n1 = -1; + ++clear_contributors; + } + } + + STBIR_PROFILE_BUILD_END(pivot); + } + } + break; + } +} + +//======================================================================================================== +// scanline decoders and encoders + +#define stbir__coder_min_num 1 +#define STB_IMAGE_RESIZE_DO_CODERS +#include STBIR__HEADER_FILENAME + +#define stbir__decode_suffix BGRA +#define stbir__decode_swizzle +#define stbir__decode_order0 2 +#define stbir__decode_order1 1 +#define stbir__decode_order2 0 +#define stbir__decode_order3 3 +#define stbir__encode_order0 2 +#define stbir__encode_order1 1 +#define stbir__encode_order2 0 +#define stbir__encode_order3 3 +#define stbir__coder_min_num 4 +#define STB_IMAGE_RESIZE_DO_CODERS +#include STBIR__HEADER_FILENAME + +#define stbir__decode_suffix ARGB +#define stbir__decode_swizzle +#define stbir__decode_order0 1 +#define stbir__decode_order1 2 +#define stbir__decode_order2 3 +#define stbir__decode_order3 0 +#define stbir__encode_order0 3 +#define stbir__encode_order1 0 +#define stbir__encode_order2 1 +#define stbir__encode_order3 2 +#define stbir__coder_min_num 4 +#define STB_IMAGE_RESIZE_DO_CODERS +#include STBIR__HEADER_FILENAME + +#define stbir__decode_suffix ABGR +#define stbir__decode_swizzle +#define stbir__decode_order0 3 +#define stbir__decode_order1 2 +#define stbir__decode_order2 1 +#define stbir__decode_order3 0 +#define stbir__encode_order0 3 +#define stbir__encode_order1 2 +#define stbir__encode_order2 1 +#define stbir__encode_order3 0 +#define stbir__coder_min_num 4 +#define STB_IMAGE_RESIZE_DO_CODERS +#include STBIR__HEADER_FILENAME + +#define stbir__decode_suffix AR +#define stbir__decode_swizzle +#define stbir__decode_order0 1 +#define stbir__decode_order1 0 +#define stbir__decode_order2 3 +#define stbir__decode_order3 2 +#define stbir__encode_order0 1 +#define stbir__encode_order1 0 +#define stbir__encode_order2 3 +#define stbir__encode_order3 2 +#define stbir__coder_min_num 2 +#define STB_IMAGE_RESIZE_DO_CODERS +#include STBIR__HEADER_FILENAME + +// fancy alpha means we expand to keep both premultipied and non-premultiplied color channels +static void stbir__fancy_alpha_weight_4ch(float *out_buffer, int width_times_channels) +{ + float STBIR_STREAMOUT_PTR(*) out = out_buffer; + float const *end_decode = out_buffer + (width_times_channels / 4) * 7; // decode buffer aligned to end of out_buffer + float STBIR_STREAMOUT_PTR(*) decode = (float *)end_decode - width_times_channels; + + // fancy alpha is stored internally as R G B A Rpm Gpm Bpm + +#ifdef STBIR_SIMD + +#ifdef STBIR_SIMD8 + decode += 16; + STBIR_NO_UNROLL_LOOP_START + while (decode <= end_decode) + { + stbir__simdf8 d0, d1, a0, a1, p0, p1; + STBIR_NO_UNROLL(decode); + stbir__simdf8_load(d0, decode - 16); + stbir__simdf8_load(d1, decode - 16 + 8); + stbir__simdf8_0123to33333333(a0, d0); + stbir__simdf8_0123to33333333(a1, d1); + stbir__simdf8_mult(p0, a0, d0); + stbir__simdf8_mult(p1, a1, d1); + stbir__simdf8_bot4s(a0, d0, p0); + stbir__simdf8_bot4s(a1, d1, p1); + stbir__simdf8_top4s(d0, d0, p0); + stbir__simdf8_top4s(d1, d1, p1); + stbir__simdf8_store(out, a0); + stbir__simdf8_store(out + 7, d0); + stbir__simdf8_store(out + 14, a1); + stbir__simdf8_store(out + 21, d1); + decode += 16; + out += 28; + } + decode -= 16; +#else + decode += 8; + STBIR_NO_UNROLL_LOOP_START + while (decode <= end_decode) + { + stbir__simdf d0, a0, d1, a1, p0, p1; + STBIR_NO_UNROLL(decode); + stbir__simdf_load(d0, decode - 8); + stbir__simdf_load(d1, decode - 8 + 4); + stbir__simdf_0123to3333(a0, d0); + stbir__simdf_0123to3333(a1, d1); + stbir__simdf_mult(p0, a0, d0); + stbir__simdf_mult(p1, a1, d1); + stbir__simdf_store(out, d0); + stbir__simdf_store(out + 4, p0); + stbir__simdf_store(out + 7, d1); + stbir__simdf_store(out + 7 + 4, p1); + decode += 8; + out += 14; + } + decode -= 8; #endif - return 1; +// might be one last odd pixel +#ifdef STBIR_SIMD8 + STBIR_NO_UNROLL_LOOP_START + while (decode < end_decode) +#else + if (decode < end_decode) +#endif + { + stbir__simdf d, a, p; + STBIR_NO_UNROLL(decode); + stbir__simdf_load(d, decode); + stbir__simdf_0123to3333(a, d); + stbir__simdf_mult(p, a, d); + stbir__simdf_store(out, d); + stbir__simdf_store(out + 4, p); + decode += 4; + out += 7; + } + +#else + + while (decode < end_decode) + { + float r = decode[0], g = decode[1], b = decode[2], alpha = decode[3]; + out[0] = r; + out[1] = g; + out[2] = b; + out[3] = alpha; + out[4] = r * alpha; + out[5] = g * alpha; + out[6] = b * alpha; + out += 7; + decode += 4; + } + +#endif } -static int stbir__resize_arbitrary( - void *alloc_context, - const void *input_data, int input_w, int input_h, int input_stride_in_bytes, - void *output_data, int output_w, int output_h, int output_stride_in_bytes, - float s0, float t0, float s1, float t1, float *transform, - int channels, int alpha_channel, stbir_uint32 flags, stbir_datatype type, - stbir_filter h_filter, stbir_filter v_filter, - stbir_edge edge_horizontal, stbir_edge edge_vertical, stbir_colorspace colorspace) +static void stbir__fancy_alpha_weight_2ch(float *out_buffer, int width_times_channels) { - stbir__info info; - int result; - size_t memory_required; - void *extra_memory; + float STBIR_STREAMOUT_PTR(*) out = out_buffer; + float const *end_decode = out_buffer + (width_times_channels / 2) * 3; + float STBIR_STREAMOUT_PTR(*) decode = (float *)end_decode - width_times_channels; - stbir__setup(&info, input_w, input_h, output_w, output_h, channels); - stbir__calculate_transform(&info, s0, t0, s1, t1, transform); - stbir__choose_filter(&info, h_filter, v_filter); - memory_required = stbir__calculate_memory(&info); - extra_memory = STBIR_MALLOC(memory_required, alloc_context); + // for fancy alpha, turns into: [X A Xpm][X A Xpm],etc - if (!extra_memory) - return 0; +#ifdef STBIR_SIMD - result = stbir__resize_allocated(&info, input_data, input_stride_in_bytes, - output_data, output_stride_in_bytes, - alpha_channel, flags, type, - edge_horizontal, edge_vertical, - colorspace, extra_memory, memory_required); + decode += 8; + if (decode <= end_decode) + { + STBIR_NO_UNROLL_LOOP_START + do + { +#ifdef STBIR_SIMD8 + stbir__simdf8 d0, a0, p0; + STBIR_NO_UNROLL(decode); + stbir__simdf8_load(d0, decode - 8); + stbir__simdf8_0123to11331133(p0, d0); + stbir__simdf8_0123to00220022(a0, d0); + stbir__simdf8_mult(p0, p0, a0); - STBIR_FREE(extra_memory, alloc_context); + stbir__simdf_store2(out, stbir__if_simdf8_cast_to_simdf4(d0)); + stbir__simdf_store(out + 2, stbir__if_simdf8_cast_to_simdf4(p0)); + stbir__simdf_store2h(out + 3, stbir__if_simdf8_cast_to_simdf4(d0)); - return result; + stbir__simdf_store2(out + 6, stbir__simdf8_gettop4(d0)); + stbir__simdf_store(out + 8, stbir__simdf8_gettop4(p0)); + stbir__simdf_store2h(out + 9, stbir__simdf8_gettop4(d0)); +#else + stbir__simdf d0, a0, d1, a1, p0, p1; + STBIR_NO_UNROLL(decode); + stbir__simdf_load(d0, decode - 8); + stbir__simdf_load(d1, decode - 8 + 4); + stbir__simdf_0123to1133(p0, d0); + stbir__simdf_0123to1133(p1, d1); + stbir__simdf_0123to0022(a0, d0); + stbir__simdf_0123to0022(a1, d1); + stbir__simdf_mult(p0, p0, a0); + stbir__simdf_mult(p1, p1, a1); + + stbir__simdf_store2(out, d0); + stbir__simdf_store(out + 2, p0); + stbir__simdf_store2h(out + 3, d0); + + stbir__simdf_store2(out + 6, d1); + stbir__simdf_store(out + 8, p1); + stbir__simdf_store2h(out + 9, d1); +#endif + decode += 8; + out += 12; + } while (decode <= end_decode); + } + decode -= 8; +#endif + + STBIR_SIMD_NO_UNROLL_LOOP_START + while (decode < end_decode) + { + float x = decode[0], y = decode[1]; + STBIR_SIMD_NO_UNROLL(decode); + out[0] = x; + out[1] = y; + out[2] = x * y; + out += 3; + decode += 2; + } } -STBIRDEF int stbir_resize_uint8(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels) +static void stbir__fancy_alpha_unweight_4ch(float *encode_buffer, int width_times_channels) { - return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0, 0, 1, 1, NULL, num_channels, -1, 0, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, - STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_LINEAR); + float STBIR_SIMD_STREAMOUT_PTR(*) encode = encode_buffer; + float STBIR_SIMD_STREAMOUT_PTR(*) input = encode_buffer; + float const *end_output = encode_buffer + width_times_channels; + + // fancy RGBA is stored internally as R G B A Rpm Gpm Bpm + + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float alpha = input[3]; +#ifdef STBIR_SIMD + stbir__simdf i, ia; + STBIR_SIMD_NO_UNROLL(encode); + if (alpha < stbir__small_float) + { + stbir__simdf_load(i, input); + stbir__simdf_store(encode, i); + } + else + { + stbir__simdf_load1frep4(ia, 1.0f / alpha); + stbir__simdf_load(i, input + 4); + stbir__simdf_mult(i, i, ia); + stbir__simdf_store(encode, i); + encode[3] = alpha; + } +#else + if (alpha < stbir__small_float) + { + encode[0] = input[0]; + encode[1] = input[1]; + encode[2] = input[2]; + } + else + { + float ialpha = 1.0f / alpha; + encode[0] = input[4] * ialpha; + encode[1] = input[5] * ialpha; + encode[2] = input[6] * ialpha; + } + encode[3] = alpha; +#endif + + input += 7; + encode += 4; + } while (encode < end_output); } -STBIRDEF int stbir_resize_float(const float *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels) +// format: [X A Xpm][X A Xpm] etc +static void stbir__fancy_alpha_unweight_2ch(float *encode_buffer, int width_times_channels) { - return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0, 0, 1, 1, NULL, num_channels, -1, 0, STBIR_TYPE_FLOAT, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, - STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_LINEAR); + float STBIR_SIMD_STREAMOUT_PTR(*) encode = encode_buffer; + float STBIR_SIMD_STREAMOUT_PTR(*) input = encode_buffer; + float const *end_output = encode_buffer + width_times_channels; + + do + { + float alpha = input[1]; + encode[0] = input[0]; + if (alpha >= stbir__small_float) + encode[0] = input[2] / alpha; + encode[1] = alpha; + + input += 3; + encode += 2; + } while (encode < end_output); } -STBIRDEF int stbir_resize_uint8_srgb(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags) +static void stbir__simple_alpha_weight_4ch(float *decode_buffer, int width_times_channels) { - return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, - STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_SRGB); + float STBIR_STREAMOUT_PTR(*) decode = decode_buffer; + float const *end_decode = decode_buffer + width_times_channels; + +#ifdef STBIR_SIMD + { + decode += 2 * stbir__simdfX_float_count; + STBIR_NO_UNROLL_LOOP_START + while (decode <= end_decode) + { + stbir__simdfX d0, a0, d1, a1; + STBIR_NO_UNROLL(decode); + stbir__simdfX_load(d0, decode - 2 * stbir__simdfX_float_count); + stbir__simdfX_load(d1, decode - 2 * stbir__simdfX_float_count + stbir__simdfX_float_count); + stbir__simdfX_aaa1(a0, d0, STBIR_onesX); + stbir__simdfX_aaa1(a1, d1, STBIR_onesX); + stbir__simdfX_mult(d0, d0, a0); + stbir__simdfX_mult(d1, d1, a1); + stbir__simdfX_store(decode - 2 * stbir__simdfX_float_count, d0); + stbir__simdfX_store(decode - 2 * stbir__simdfX_float_count + stbir__simdfX_float_count, d1); + decode += 2 * stbir__simdfX_float_count; + } + decode -= 2 * stbir__simdfX_float_count; + +// few last pixels remnants +#ifdef STBIR_SIMD8 + STBIR_NO_UNROLL_LOOP_START + while (decode < end_decode) +#else + if (decode < end_decode) +#endif + { + stbir__simdf d, a; + stbir__simdf_load(d, decode); + stbir__simdf_aaa1(a, d, STBIR__CONSTF(STBIR_ones)); + stbir__simdf_mult(d, d, a); + stbir__simdf_store(decode, d); + decode += 4; + } + } + +#else + + while (decode < end_decode) + { + float alpha = decode[3]; + decode[0] *= alpha; + decode[1] *= alpha; + decode[2] *= alpha; + decode += 4; + } + +#endif } -STBIRDEF int stbir_resize_uint8_srgb_edgemode(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode) +static void stbir__simple_alpha_weight_2ch(float *decode_buffer, int width_times_channels) { - return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, - edge_wrap_mode, edge_wrap_mode, STBIR_COLORSPACE_SRGB); + float STBIR_STREAMOUT_PTR(*) decode = decode_buffer; + float const *end_decode = decode_buffer + width_times_channels; + +#ifdef STBIR_SIMD + decode += 2 * stbir__simdfX_float_count; + STBIR_NO_UNROLL_LOOP_START + while (decode <= end_decode) + { + stbir__simdfX d0, a0, d1, a1; + STBIR_NO_UNROLL(decode); + stbir__simdfX_load(d0, decode - 2 * stbir__simdfX_float_count); + stbir__simdfX_load(d1, decode - 2 * stbir__simdfX_float_count + stbir__simdfX_float_count); + stbir__simdfX_a1a1(a0, d0, STBIR_onesX); + stbir__simdfX_a1a1(a1, d1, STBIR_onesX); + stbir__simdfX_mult(d0, d0, a0); + stbir__simdfX_mult(d1, d1, a1); + stbir__simdfX_store(decode - 2 * stbir__simdfX_float_count, d0); + stbir__simdfX_store(decode - 2 * stbir__simdfX_float_count + stbir__simdfX_float_count, d1); + decode += 2 * stbir__simdfX_float_count; + } + decode -= 2 * stbir__simdfX_float_count; +#endif + + STBIR_SIMD_NO_UNROLL_LOOP_START + while (decode < end_decode) + { + float alpha = decode[1]; + STBIR_SIMD_NO_UNROLL(decode); + decode[0] *= alpha; + decode += 2; + } } -STBIRDEF int stbir_resize_uint8_generic(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context) +static void stbir__simple_alpha_unweight_4ch(float *encode_buffer, int width_times_channels) { - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, STBIR_TYPE_UINT8, filter, filter, - edge_wrap_mode, edge_wrap_mode, space); + float STBIR_SIMD_STREAMOUT_PTR(*) encode = encode_buffer; + float const *end_output = encode_buffer + width_times_channels; + + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float alpha = encode[3]; + +#ifdef STBIR_SIMD + stbir__simdf i, ia; + STBIR_SIMD_NO_UNROLL(encode); + if (alpha >= stbir__small_float) + { + stbir__simdf_load1frep4(ia, 1.0f / alpha); + stbir__simdf_load(i, encode); + stbir__simdf_mult(i, i, ia); + stbir__simdf_store(encode, i); + encode[3] = alpha; + } +#else + if (alpha >= stbir__small_float) + { + float ialpha = 1.0f / alpha; + encode[0] *= ialpha; + encode[1] *= ialpha; + encode[2] *= ialpha; + } +#endif + encode += 4; + } while (encode < end_output); } -STBIRDEF int stbir_resize_uint16_generic(const stbir_uint16 *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - stbir_uint16 *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context) +static void stbir__simple_alpha_unweight_2ch(float *encode_buffer, int width_times_channels) { - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, STBIR_TYPE_UINT16, filter, filter, - edge_wrap_mode, edge_wrap_mode, space); + float STBIR_SIMD_STREAMOUT_PTR(*) encode = encode_buffer; + float const *end_output = encode_buffer + width_times_channels; + + do + { + float alpha = encode[1]; + if (alpha >= stbir__small_float) + encode[0] /= alpha; + encode += 2; + } while (encode < end_output); } -STBIRDEF int stbir_resize_float_generic(const float *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, - void *alloc_context) +// only used in RGB->BGR or BGR->RGB +static void stbir__simple_flip_3ch(float *decode_buffer, int width_times_channels) { - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, STBIR_TYPE_FLOAT, filter, filter, - edge_wrap_mode, edge_wrap_mode, space); + float STBIR_STREAMOUT_PTR(*) decode = decode_buffer; + float const *end_decode = decode_buffer + width_times_channels; + +#ifdef STBIR_SIMD +#ifdef stbir__simdf_swiz2 // do we have two argument swizzles? + end_decode -= 12; + STBIR_NO_UNROLL_LOOP_START + while (decode <= end_decode) + { + // on arm64 8 instructions, no overlapping stores + stbir__simdf a, b, c, na, nb; + STBIR_SIMD_NO_UNROLL(decode); + stbir__simdf_load(a, decode); + stbir__simdf_load(b, decode + 4); + stbir__simdf_load(c, decode + 8); + + na = stbir__simdf_swiz2(a, b, 2, 1, 0, 5); + b = stbir__simdf_swiz2(a, b, 4, 3, 6, 7); + nb = stbir__simdf_swiz2(b, c, 0, 1, 4, 3); + c = stbir__simdf_swiz2(b, c, 2, 7, 6, 5); + + stbir__simdf_store(decode, na); + stbir__simdf_store(decode + 4, nb); + stbir__simdf_store(decode + 8, c); + decode += 12; + } + end_decode += 12; +#else + end_decode -= 24; + STBIR_NO_UNROLL_LOOP_START + while (decode <= end_decode) + { + // 26 instructions on x64 + stbir__simdf a, b, c, d, e, f, g; + float i21, i23; + STBIR_SIMD_NO_UNROLL(decode); + stbir__simdf_load(a, decode); + stbir__simdf_load(b, decode + 3); + stbir__simdf_load(c, decode + 6); + stbir__simdf_load(d, decode + 9); + stbir__simdf_load(e, decode + 12); + stbir__simdf_load(f, decode + 15); + stbir__simdf_load(g, decode + 18); + + a = stbir__simdf_swiz(a, 2, 1, 0, 3); + b = stbir__simdf_swiz(b, 2, 1, 0, 3); + c = stbir__simdf_swiz(c, 2, 1, 0, 3); + d = stbir__simdf_swiz(d, 2, 1, 0, 3); + e = stbir__simdf_swiz(e, 2, 1, 0, 3); + f = stbir__simdf_swiz(f, 2, 1, 0, 3); + g = stbir__simdf_swiz(g, 2, 1, 0, 3); + + // stores overlap, need to be in order, + stbir__simdf_store(decode, a); + i21 = decode[21]; + stbir__simdf_store(decode + 3, b); + i23 = decode[23]; + stbir__simdf_store(decode + 6, c); + stbir__simdf_store(decode + 9, d); + stbir__simdf_store(decode + 12, e); + stbir__simdf_store(decode + 15, f); + stbir__simdf_store(decode + 18, g); + decode[21] = i23; + decode[23] = i21; + decode += 24; + } + end_decode += 24; +#endif +#else + end_decode -= 12; + STBIR_NO_UNROLL_LOOP_START + while (decode <= end_decode) + { + // 16 instructions + float t0, t1, t2, t3; + STBIR_NO_UNROLL(decode); + t0 = decode[0]; + t1 = decode[3]; + t2 = decode[6]; + t3 = decode[9]; + decode[0] = decode[2]; + decode[3] = decode[5]; + decode[6] = decode[8]; + decode[9] = decode[11]; + decode[2] = t0; + decode[5] = t1; + decode[8] = t2; + decode[11] = t3; + decode += 12; + } + end_decode += 12; +#endif + + STBIR_NO_UNROLL_LOOP_START + while (decode < end_decode) + { + float t = decode[0]; + STBIR_NO_UNROLL(decode); + decode[0] = decode[2]; + decode[2] = t; + decode += 3; + } } -STBIRDEF int stbir_resize(const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context) +static void stbir__decode_scanline(stbir__info const *stbir_info, int n, float *output_buffer STBIR_ONLY_PROFILE_GET_SPLIT_INFO) { - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, datatype, filter_horizontal, filter_vertical, - edge_mode_horizontal, edge_mode_vertical, space); + int channels = stbir_info->channels; + int effective_channels = stbir_info->effective_channels; + int input_sample_in_bytes = stbir__type_size[stbir_info->input_type] * channels; + stbir_edge edge_horizontal = stbir_info->horizontal.edge; + stbir_edge edge_vertical = stbir_info->vertical.edge; + int row = stbir__edge_wrap(edge_vertical, n, stbir_info->vertical.scale_info.input_full_size); + const void *input_plane_data = ((char *)stbir_info->input_data) + (size_t)row * (size_t)stbir_info->input_stride_bytes; + stbir__span const *spans = stbir_info->scanline_extents.spans; + float *full_decode_buffer = output_buffer - stbir_info->scanline_extents.conservative.n0 * effective_channels; + float *last_decoded = 0; + + // if we are on edge_zero, and we get in here with an out of bounds n, then the calculate filters has failed + STBIR_ASSERT(!(edge_vertical == STBIR_EDGE_ZERO && (n < 0 || n >= stbir_info->vertical.scale_info.input_full_size))); + + do + { + float *decode_buffer; + void const *input_data; + float *end_decode; + int width_times_channels; + int width; + + if (spans->n1 < spans->n0) + break; + + width = spans->n1 + 1 - spans->n0; + decode_buffer = full_decode_buffer + spans->n0 * effective_channels; + end_decode = full_decode_buffer + (spans->n1 + 1) * effective_channels; + width_times_channels = width * channels; + + // read directly out of input plane by default + input_data = ((char *)input_plane_data) + spans->pixel_offset_for_input * input_sample_in_bytes; + + // if we have an input callback, call it to get the input data + if (stbir_info->in_pixels_cb) + { + // call the callback with a temp buffer (that they can choose to use or not). the temp is just right aligned memory in the decode_buffer itself + input_data = stbir_info->in_pixels_cb(((char *)end_decode) - (width * input_sample_in_bytes) + sizeof(float) * STBIR_INPUT_CALLBACK_PADDING, input_plane_data, width, spans->pixel_offset_for_input, row, stbir_info->user_data); + } + + STBIR_PROFILE_START(decode); + // convert the pixels info the float decode_buffer, (we index from end_decode, so that when channelsdecode_pixels((float *)end_decode - width_times_channels, width_times_channels, input_data); + STBIR_PROFILE_END(decode); + + if (stbir_info->alpha_weight) + { + STBIR_PROFILE_START(alpha); + stbir_info->alpha_weight(decode_buffer, width_times_channels); + STBIR_PROFILE_END(alpha); + } + + ++spans; + } while (spans <= (&stbir_info->scanline_extents.spans[1])); + + // handle the edge_wrap filter (all other types are handled back out at the calculate_filter stage) + // basically the idea here is that if we have the whole scanline in memory, we don't redecode the + // wrapped edge pixels, and instead just memcpy them from the scanline into the edge positions + if ((edge_horizontal == STBIR_EDGE_WRAP) && (stbir_info->scanline_extents.edge_sizes[0] | stbir_info->scanline_extents.edge_sizes[1])) + { + // this code only runs if we're in edge_wrap, and we're doing the entire scanline + int e, start_x[2]; + int input_full_size = stbir_info->horizontal.scale_info.input_full_size; + + start_x[0] = -stbir_info->scanline_extents.edge_sizes[0]; // left edge start x + start_x[1] = input_full_size; // right edge + + for (e = 0; e < 2; e++) + { + // do each margin + int margin = stbir_info->scanline_extents.edge_sizes[e]; + if (margin) + { + int x = start_x[e]; + float *marg = full_decode_buffer + x * effective_channels; + float const *src = full_decode_buffer + stbir__edge_wrap(edge_horizontal, x, input_full_size) * effective_channels; + STBIR_MEMCPY(marg, src, margin * effective_channels * sizeof(float)); + if (e == 1) + last_decoded = marg + margin * effective_channels; + } + } + } + + // some of the horizontal gathers read one float off the edge (which is masked out), but we force a zero here to make sure no NaNs leak in + // (we can't pre-zero it, because the input callback can use that area as padding) + last_decoded[0] = 0.0f; + + // we clear this extra float, because the final output pixel filter kernel might have used one less coeff than the max filter width + // when this happens, we do read that pixel from the input, so it too could be Nan, so just zero an extra one. + // this fits because each scanline is padded by three floats (STBIR_INPUT_CALLBACK_PADDING) + last_decoded[1] = 0.0f; } -STBIRDEF int stbir_resize_subpixel(const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context, - float x_scale, float y_scale, - float x_offset, float y_offset) +//================= +// Do 1 channel horizontal routines + +#ifdef STBIR_SIMD + +#define stbir__1_coeff_only() \ + stbir__simdf tot, c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1(c, hc); \ + stbir__simdf_mult1_mem(tot, c, decode); + +#define stbir__2_coeff_only() \ + stbir__simdf tot, c, d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2z(c, hc); \ + stbir__simdf_load2(d, decode); \ + stbir__simdf_mult(tot, c, d); \ + stbir__simdf_0123to1230(c, tot); \ + stbir__simdf_add1(tot, tot, c); + +#define stbir__3_coeff_only() \ + stbir__simdf tot, c, t; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(c, hc); \ + stbir__simdf_mult_mem(tot, c, decode); \ + stbir__simdf_0123to1230(c, tot); \ + stbir__simdf_0123to2301(t, tot); \ + stbir__simdf_add1(tot, tot, c); \ + stbir__simdf_add1(tot, tot, t); + +#define stbir__store_output_tiny() \ + stbir__simdf_store1(output, tot); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 1; + +#define stbir__4_coeff_start() \ + stbir__simdf tot, c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(c, hc); \ + stbir__simdf_mult_mem(tot, c, decode); + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(c, hc + (ofs)); \ + stbir__simdf_madd_mem(tot, tot, c, decode + (ofs)); + +#define stbir__1_coeff_remnant(ofs) \ + { \ + stbir__simdf d; \ + stbir__simdf_load1z(c, hc + (ofs)); \ + stbir__simdf_load1(d, decode + (ofs)); \ + stbir__simdf_madd(tot, tot, d, c); \ + } + +#define stbir__2_coeff_remnant(ofs) \ + { \ + stbir__simdf d; \ + stbir__simdf_load2z(c, hc + (ofs)); \ + stbir__simdf_load2(d, decode + (ofs)); \ + stbir__simdf_madd(tot, tot, d, c); \ + } + +#define stbir__3_coeff_setup() \ + stbir__simdf mask; \ + stbir__simdf_load(mask, STBIR_mask + 3); + +#define stbir__3_coeff_remnant(ofs) \ + stbir__simdf_load(c, hc + (ofs)); \ + stbir__simdf_and(c, c, mask); \ + stbir__simdf_madd_mem(tot, tot, c, decode + (ofs)); + +#define stbir__store_output() \ + stbir__simdf_0123to2301(c, tot); \ + stbir__simdf_add(tot, tot, c); \ + stbir__simdf_0123to1230(c, tot); \ + stbir__simdf_add1(tot, tot, c); \ + stbir__simdf_store1(output, tot); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 1; + +#else + +#define stbir__1_coeff_only() \ + float tot; \ + tot = decode[0] * hc[0]; + +#define stbir__2_coeff_only() \ + float tot; \ + tot = decode[0] * hc[0]; \ + tot += decode[1] * hc[1]; + +#define stbir__3_coeff_only() \ + float tot; \ + tot = decode[0] * hc[0]; \ + tot += decode[1] * hc[1]; \ + tot += decode[2] * hc[2]; + +#define stbir__store_output_tiny() \ + output[0] = tot; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 1; + +#define stbir__4_coeff_start() \ + float tot0, tot1, tot2, tot3; \ + tot0 = decode[0] * hc[0]; \ + tot1 = decode[1] * hc[1]; \ + tot2 = decode[2] * hc[2]; \ + tot3 = decode[3] * hc[3]; + +#define stbir__4_coeff_continue_from_4(ofs) \ + tot0 += decode[0 + (ofs)] * hc[0 + (ofs)]; \ + tot1 += decode[1 + (ofs)] * hc[1 + (ofs)]; \ + tot2 += decode[2 + (ofs)] * hc[2 + (ofs)]; \ + tot3 += decode[3 + (ofs)] * hc[3 + (ofs)]; + +#define stbir__1_coeff_remnant(ofs) \ + tot0 += decode[0 + (ofs)] * hc[0 + (ofs)]; + +#define stbir__2_coeff_remnant(ofs) \ + tot0 += decode[0 + (ofs)] * hc[0 + (ofs)]; \ + tot1 += decode[1 + (ofs)] * hc[1 + (ofs)]; + +#define stbir__3_coeff_remnant(ofs) \ + tot0 += decode[0 + (ofs)] * hc[0 + (ofs)]; \ + tot1 += decode[1 + (ofs)] * hc[1 + (ofs)]; \ + tot2 += decode[2 + (ofs)] * hc[2 + (ofs)]; + +#define stbir__store_output() \ + output[0] = (tot0 + tot2) + (tot1 + tot3); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 1; + +#endif + +#define STBIR__horizontal_channels 1 +#define STB_IMAGE_RESIZE_DO_HORIZONTALS +#include STBIR__HEADER_FILENAME + +//================= +// Do 2 channel horizontal routines + +#ifdef STBIR_SIMD + +#define stbir__1_coeff_only() \ + stbir__simdf tot, c, d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1z(c, hc); \ + stbir__simdf_0123to0011(c, c); \ + stbir__simdf_load2(d, decode); \ + stbir__simdf_mult(tot, d, c); + +#define stbir__2_coeff_only() \ + stbir__simdf tot, c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2(c, hc); \ + stbir__simdf_0123to0011(c, c); \ + stbir__simdf_mult_mem(tot, c, decode); + +#define stbir__3_coeff_only() \ + stbir__simdf tot, c, cs, d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc); \ + stbir__simdf_0123to0011(c, cs); \ + stbir__simdf_mult_mem(tot, c, decode); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_load2z(d, decode + 4); \ + stbir__simdf_madd(tot, tot, d, c); + +#define stbir__store_output_tiny() \ + stbir__simdf_0123to2301(c, tot); \ + stbir__simdf_add(tot, tot, c); \ + stbir__simdf_store2(output, tot); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 2; + +#ifdef STBIR_SIMD8 + +#define stbir__4_coeff_start() \ + stbir__simdf8 tot0, c, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc); \ + stbir__simdf8_0123to00112233(c, cs); \ + stbir__simdf8_mult_mem(tot0, c, decode); + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc + (ofs)); \ + stbir__simdf8_0123to00112233(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 2); + +#define stbir__1_coeff_remnant(ofs) \ + { \ + stbir__simdf t, d; \ + stbir__simdf_load1z(t, hc + (ofs)); \ + stbir__simdf_load2(d, decode + (ofs) * 2); \ + stbir__simdf_0123to0011(t, t); \ + stbir__simdf_mult(t, t, d); \ + stbir__simdf8_add4(tot0, tot0, t); \ + } + +#define stbir__2_coeff_remnant(ofs) \ + { \ + stbir__simdf t; \ + stbir__simdf_load2(t, hc + (ofs)); \ + stbir__simdf_0123to0011(t, t); \ + stbir__simdf_mult_mem(t, t, decode + (ofs) * 2); \ + stbir__simdf8_add4(tot0, tot0, t); \ + } + +#define stbir__3_coeff_remnant(ofs) \ + { \ + stbir__simdf8 d; \ + stbir__simdf8_load4b(cs, hc + (ofs)); \ + stbir__simdf8_0123to00112233(c, cs); \ + stbir__simdf8_load6z(d, decode + (ofs) * 2); \ + stbir__simdf8_madd(tot0, tot0, c, d); \ + } + +#define stbir__store_output() \ + { \ + stbir__simdf t, d; \ + stbir__simdf8_add4halves(t, stbir__if_simdf8_cast_to_simdf4(tot0), tot0); \ + stbir__simdf_0123to2301(d, t); \ + stbir__simdf_add(t, t, d); \ + stbir__simdf_store2(output, t); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 2; \ + } + +#else + +#define stbir__4_coeff_start() \ + stbir__simdf tot0, tot1, c, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc); \ + stbir__simdf_0123to0011(c, cs); \ + stbir__simdf_mult_mem(tot0, c, decode); \ + stbir__simdf_0123to2233(c, cs); \ + stbir__simdf_mult_mem(tot1, c, decode + 4); + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc + (ofs)); \ + stbir__simdf_0123to0011(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 2); \ + stbir__simdf_0123to2233(c, cs); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 2 + 4); + +#define stbir__1_coeff_remnant(ofs) \ + { \ + stbir__simdf d; \ + stbir__simdf_load1z(cs, hc + (ofs)); \ + stbir__simdf_0123to0011(c, cs); \ + stbir__simdf_load2(d, decode + (ofs) * 2); \ + stbir__simdf_madd(tot0, tot0, d, c); \ + } + +#define stbir__2_coeff_remnant(ofs) \ + stbir__simdf_load2(cs, hc + (ofs)); \ + stbir__simdf_0123to0011(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 2); + +#define stbir__3_coeff_remnant(ofs) \ + { \ + stbir__simdf d; \ + stbir__simdf_load(cs, hc + (ofs)); \ + stbir__simdf_0123to0011(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 2); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_load2z(d, decode + (ofs) * 2 + 4); \ + stbir__simdf_madd(tot1, tot1, d, c); \ + } + +#define stbir__store_output() \ + stbir__simdf_add(tot0, tot0, tot1); \ + stbir__simdf_0123to2301(c, tot0); \ + stbir__simdf_add(tot0, tot0, c); \ + stbir__simdf_store2(output, tot0); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 2; + +#endif + +#else + +#define stbir__1_coeff_only() \ + float tota, totb, c; \ + c = hc[0]; \ + tota = decode[0] * c; \ + totb = decode[1] * c; + +#define stbir__2_coeff_only() \ + float tota, totb, c; \ + c = hc[0]; \ + tota = decode[0] * c; \ + totb = decode[1] * c; \ + c = hc[1]; \ + tota += decode[2] * c; \ + totb += decode[3] * c; + +// this weird order of add matches the simd +#define stbir__3_coeff_only() \ + float tota, totb, c; \ + c = hc[0]; \ + tota = decode[0] * c; \ + totb = decode[1] * c; \ + c = hc[2]; \ + tota += decode[4] * c; \ + totb += decode[5] * c; \ + c = hc[1]; \ + tota += decode[2] * c; \ + totb += decode[3] * c; + +#define stbir__store_output_tiny() \ + output[0] = tota; \ + output[1] = totb; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 2; + +#define stbir__4_coeff_start() \ + float tota0, tota1, tota2, tota3, totb0, totb1, totb2, totb3, c; \ + c = hc[0]; \ + tota0 = decode[0] * c; \ + totb0 = decode[1] * c; \ + c = hc[1]; \ + tota1 = decode[2] * c; \ + totb1 = decode[3] * c; \ + c = hc[2]; \ + tota2 = decode[4] * c; \ + totb2 = decode[5] * c; \ + c = hc[3]; \ + tota3 = decode[6] * c; \ + totb3 = decode[7] * c; + +#define stbir__4_coeff_continue_from_4(ofs) \ + c = hc[0 + (ofs)]; \ + tota0 += decode[0 + (ofs) * 2] * c; \ + totb0 += decode[1 + (ofs) * 2] * c; \ + c = hc[1 + (ofs)]; \ + tota1 += decode[2 + (ofs) * 2] * c; \ + totb1 += decode[3 + (ofs) * 2] * c; \ + c = hc[2 + (ofs)]; \ + tota2 += decode[4 + (ofs) * 2] * c; \ + totb2 += decode[5 + (ofs) * 2] * c; \ + c = hc[3 + (ofs)]; \ + tota3 += decode[6 + (ofs) * 2] * c; \ + totb3 += decode[7 + (ofs) * 2] * c; + +#define stbir__1_coeff_remnant(ofs) \ + c = hc[0 + (ofs)]; \ + tota0 += decode[0 + (ofs) * 2] * c; \ + totb0 += decode[1 + (ofs) * 2] * c; + +#define stbir__2_coeff_remnant(ofs) \ + c = hc[0 + (ofs)]; \ + tota0 += decode[0 + (ofs) * 2] * c; \ + totb0 += decode[1 + (ofs) * 2] * c; \ + c = hc[1 + (ofs)]; \ + tota1 += decode[2 + (ofs) * 2] * c; \ + totb1 += decode[3 + (ofs) * 2] * c; + +#define stbir__3_coeff_remnant(ofs) \ + c = hc[0 + (ofs)]; \ + tota0 += decode[0 + (ofs) * 2] * c; \ + totb0 += decode[1 + (ofs) * 2] * c; \ + c = hc[1 + (ofs)]; \ + tota1 += decode[2 + (ofs) * 2] * c; \ + totb1 += decode[3 + (ofs) * 2] * c; \ + c = hc[2 + (ofs)]; \ + tota2 += decode[4 + (ofs) * 2] * c; \ + totb2 += decode[5 + (ofs) * 2] * c; + +#define stbir__store_output() \ + output[0] = (tota0 + tota2) + (tota1 + tota3); \ + output[1] = (totb0 + totb2) + (totb1 + totb3); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 2; + +#endif + +#define STBIR__horizontal_channels 2 +#define STB_IMAGE_RESIZE_DO_HORIZONTALS +#include STBIR__HEADER_FILENAME + +//================= +// Do 3 channel horizontal routines + +#ifdef STBIR_SIMD + +#define stbir__1_coeff_only() \ + stbir__simdf tot, c, d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1z(c, hc); \ + stbir__simdf_0123to0001(c, c); \ + stbir__simdf_load(d, decode); \ + stbir__simdf_mult(tot, d, c); + +#define stbir__2_coeff_only() \ + stbir__simdf tot, c, cs, d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2(cs, hc); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_load(d, decode); \ + stbir__simdf_mult(tot, d, c); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_load(d, decode + 3); \ + stbir__simdf_madd(tot, tot, d, c); + +#define stbir__3_coeff_only() \ + stbir__simdf tot, c, d, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_load(d, decode); \ + stbir__simdf_mult(tot, d, c); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_load(d, decode + 3); \ + stbir__simdf_madd(tot, tot, d, c); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_load(d, decode + 6); \ + stbir__simdf_madd(tot, tot, d, c); + +#define stbir__store_output_tiny() \ + stbir__simdf_store2(output, tot); \ + stbir__simdf_0123to2301(tot, tot); \ + stbir__simdf_store1(output + 2, tot); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 3; + +#ifdef STBIR_SIMD8 + +// we're loading from the XXXYYY decode by -1 to get the XXXYYY into different halves of the AVX reg fyi +#define stbir__4_coeff_start() \ + stbir__simdf8 tot0, tot1, c, cs; \ + stbir__simdf t; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc); \ + stbir__simdf8_0123to00001111(c, cs); \ + stbir__simdf8_mult_mem(tot0, c, decode - 1); \ + stbir__simdf8_0123to22223333(c, cs); \ + stbir__simdf8_mult_mem(tot1, c, decode + 6 - 1); + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc + (ofs)); \ + stbir__simdf8_0123to00001111(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 3 - 1); \ + stbir__simdf8_0123to22223333(c, cs); \ + stbir__simdf8_madd_mem(tot1, tot1, c, decode + (ofs) * 3 + 6 - 1); + +#define stbir__1_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1rep4(t, hc + (ofs)); \ + stbir__simdf8_madd_mem4(tot0, tot0, t, decode + (ofs) * 3 - 1); + +#define stbir__2_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc + (ofs) - 2); \ + stbir__simdf8_0123to22223333(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 3 - 1); + +#define stbir__3_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc + (ofs)); \ + stbir__simdf8_0123to00001111(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 3 - 1); \ + stbir__simdf8_0123to2222(t, cs); \ + stbir__simdf8_madd_mem4(tot1, tot1, t, decode + (ofs) * 3 + 6 - 1); + +#define stbir__store_output() \ + stbir__simdf8_add(tot0, tot0, tot1); \ + stbir__simdf_0123to1230(t, stbir__if_simdf8_cast_to_simdf4(tot0)); \ + stbir__simdf8_add4halves(t, t, tot0); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 3; \ + if (output < output_end) \ + { \ + stbir__simdf_store(output - 3, t); \ + continue; \ + } \ + { \ + stbir__simdf tt; \ + stbir__simdf_0123to2301(tt, t); \ + stbir__simdf_store2(output - 3, t); \ + stbir__simdf_store1(output + 2 - 3, tt); \ + } \ + break; + +#else + +#define stbir__4_coeff_start() \ + stbir__simdf tot0, tot1, tot2, c, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc); \ + stbir__simdf_0123to0001(c, cs); \ + stbir__simdf_mult_mem(tot0, c, decode); \ + stbir__simdf_0123to1122(c, cs); \ + stbir__simdf_mult_mem(tot1, c, decode + 4); \ + stbir__simdf_0123to2333(c, cs); \ + stbir__simdf_mult_mem(tot2, c, decode + 8); + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc + (ofs)); \ + stbir__simdf_0123to0001(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 3); \ + stbir__simdf_0123to1122(c, cs); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 3 + 4); \ + stbir__simdf_0123to2333(c, cs); \ + stbir__simdf_madd_mem(tot2, tot2, c, decode + (ofs) * 3 + 8); + +#define stbir__1_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1z(c, hc + (ofs)); \ + stbir__simdf_0123to0001(c, c); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 3); + +#define stbir__2_coeff_remnant(ofs) \ + { \ + stbir__simdf d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2z(cs, hc + (ofs)); \ + stbir__simdf_0123to0001(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 3); \ + stbir__simdf_0123to1122(c, cs); \ + stbir__simdf_load2z(d, decode + (ofs) * 3 + 4); \ + stbir__simdf_madd(tot1, tot1, c, d); \ + } + +#define stbir__3_coeff_remnant(ofs) \ + { \ + stbir__simdf d; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc + (ofs)); \ + stbir__simdf_0123to0001(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 3); \ + stbir__simdf_0123to1122(c, cs); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 3 + 4); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_load1z(d, decode + (ofs) * 3 + 8); \ + stbir__simdf_madd(tot2, tot2, c, d); \ + } + +#define stbir__store_output() \ + stbir__simdf_0123ABCDto3ABx(c, tot0, tot1); \ + stbir__simdf_0123ABCDto23Ax(cs, tot1, tot2); \ + stbir__simdf_0123to1230(tot2, tot2); \ + stbir__simdf_add(tot0, tot0, cs); \ + stbir__simdf_add(c, c, tot2); \ + stbir__simdf_add(tot0, tot0, c); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 3; \ + if (output < output_end) \ + { \ + stbir__simdf_store(output - 3, tot0); \ + continue; \ + } \ + stbir__simdf_0123to2301(tot1, tot0); \ + stbir__simdf_store2(output - 3, tot0); \ + stbir__simdf_store1(output + 2 - 3, tot1); \ + break; + +#endif + +#else + +#define stbir__1_coeff_only() \ + float tot0, tot1, tot2, c; \ + c = hc[0]; \ + tot0 = decode[0] * c; \ + tot1 = decode[1] * c; \ + tot2 = decode[2] * c; + +#define stbir__2_coeff_only() \ + float tot0, tot1, tot2, c; \ + c = hc[0]; \ + tot0 = decode[0] * c; \ + tot1 = decode[1] * c; \ + tot2 = decode[2] * c; \ + c = hc[1]; \ + tot0 += decode[3] * c; \ + tot1 += decode[4] * c; \ + tot2 += decode[5] * c; + +#define stbir__3_coeff_only() \ + float tot0, tot1, tot2, c; \ + c = hc[0]; \ + tot0 = decode[0] * c; \ + tot1 = decode[1] * c; \ + tot2 = decode[2] * c; \ + c = hc[1]; \ + tot0 += decode[3] * c; \ + tot1 += decode[4] * c; \ + tot2 += decode[5] * c; \ + c = hc[2]; \ + tot0 += decode[6] * c; \ + tot1 += decode[7] * c; \ + tot2 += decode[8] * c; + +#define stbir__store_output_tiny() \ + output[0] = tot0; \ + output[1] = tot1; \ + output[2] = tot2; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 3; + +#define stbir__4_coeff_start() \ + float tota0, tota1, tota2, totb0, totb1, totb2, totc0, totc1, totc2, totd0, totd1, totd2, c; \ + c = hc[0]; \ + tota0 = decode[0] * c; \ + tota1 = decode[1] * c; \ + tota2 = decode[2] * c; \ + c = hc[1]; \ + totb0 = decode[3] * c; \ + totb1 = decode[4] * c; \ + totb2 = decode[5] * c; \ + c = hc[2]; \ + totc0 = decode[6] * c; \ + totc1 = decode[7] * c; \ + totc2 = decode[8] * c; \ + c = hc[3]; \ + totd0 = decode[9] * c; \ + totd1 = decode[10] * c; \ + totd2 = decode[11] * c; + +#define stbir__4_coeff_continue_from_4(ofs) \ + c = hc[0 + (ofs)]; \ + tota0 += decode[0 + (ofs) * 3] * c; \ + tota1 += decode[1 + (ofs) * 3] * c; \ + tota2 += decode[2 + (ofs) * 3] * c; \ + c = hc[1 + (ofs)]; \ + totb0 += decode[3 + (ofs) * 3] * c; \ + totb1 += decode[4 + (ofs) * 3] * c; \ + totb2 += decode[5 + (ofs) * 3] * c; \ + c = hc[2 + (ofs)]; \ + totc0 += decode[6 + (ofs) * 3] * c; \ + totc1 += decode[7 + (ofs) * 3] * c; \ + totc2 += decode[8 + (ofs) * 3] * c; \ + c = hc[3 + (ofs)]; \ + totd0 += decode[9 + (ofs) * 3] * c; \ + totd1 += decode[10 + (ofs) * 3] * c; \ + totd2 += decode[11 + (ofs) * 3] * c; + +#define stbir__1_coeff_remnant(ofs) \ + c = hc[0 + (ofs)]; \ + tota0 += decode[0 + (ofs) * 3] * c; \ + tota1 += decode[1 + (ofs) * 3] * c; \ + tota2 += decode[2 + (ofs) * 3] * c; + +#define stbir__2_coeff_remnant(ofs) \ + c = hc[0 + (ofs)]; \ + tota0 += decode[0 + (ofs) * 3] * c; \ + tota1 += decode[1 + (ofs) * 3] * c; \ + tota2 += decode[2 + (ofs) * 3] * c; \ + c = hc[1 + (ofs)]; \ + totb0 += decode[3 + (ofs) * 3] * c; \ + totb1 += decode[4 + (ofs) * 3] * c; \ + totb2 += decode[5 + (ofs) * 3] * c; + +#define stbir__3_coeff_remnant(ofs) \ + c = hc[0 + (ofs)]; \ + tota0 += decode[0 + (ofs) * 3] * c; \ + tota1 += decode[1 + (ofs) * 3] * c; \ + tota2 += decode[2 + (ofs) * 3] * c; \ + c = hc[1 + (ofs)]; \ + totb0 += decode[3 + (ofs) * 3] * c; \ + totb1 += decode[4 + (ofs) * 3] * c; \ + totb2 += decode[5 + (ofs) * 3] * c; \ + c = hc[2 + (ofs)]; \ + totc0 += decode[6 + (ofs) * 3] * c; \ + totc1 += decode[7 + (ofs) * 3] * c; \ + totc2 += decode[8 + (ofs) * 3] * c; + +#define stbir__store_output() \ + output[0] = (tota0 + totc0) + (totb0 + totd0); \ + output[1] = (tota1 + totc1) + (totb1 + totd1); \ + output[2] = (tota2 + totc2) + (totb2 + totd2); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 3; + +#endif + +#define STBIR__horizontal_channels 3 +#define STB_IMAGE_RESIZE_DO_HORIZONTALS +#include STBIR__HEADER_FILENAME + +//================= +// Do 4 channel horizontal routines + +#ifdef STBIR_SIMD + +#define stbir__1_coeff_only() \ + stbir__simdf tot, c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1(c, hc); \ + stbir__simdf_0123to0000(c, c); \ + stbir__simdf_mult_mem(tot, c, decode); + +#define stbir__2_coeff_only() \ + stbir__simdf tot, c, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2(cs, hc); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_mult_mem(tot, c, decode); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_madd_mem(tot, tot, c, decode + 4); + +#define stbir__3_coeff_only() \ + stbir__simdf tot, c, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_mult_mem(tot, c, decode); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_madd_mem(tot, tot, c, decode + 4); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_madd_mem(tot, tot, c, decode + 8); + +#define stbir__store_output_tiny() \ + stbir__simdf_store(output, tot); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 4; + +#ifdef STBIR_SIMD8 + +#define stbir__4_coeff_start() \ + stbir__simdf8 tot0, c, cs; \ + stbir__simdf t; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc); \ + stbir__simdf8_0123to00001111(c, cs); \ + stbir__simdf8_mult_mem(tot0, c, decode); \ + stbir__simdf8_0123to22223333(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + 8); + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc + (ofs)); \ + stbir__simdf8_0123to00001111(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 4); \ + stbir__simdf8_0123to22223333(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 4 + 8); + +#define stbir__1_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1rep4(t, hc + (ofs)); \ + stbir__simdf8_madd_mem4(tot0, tot0, t, decode + (ofs) * 4); + +#define stbir__2_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc + (ofs) - 2); \ + stbir__simdf8_0123to22223333(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 4); + +#define stbir__3_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc + (ofs)); \ + stbir__simdf8_0123to00001111(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 4); \ + stbir__simdf8_0123to2222(t, cs); \ + stbir__simdf8_madd_mem4(tot0, tot0, t, decode + (ofs) * 4 + 8); + +#define stbir__store_output() \ + stbir__simdf8_add4halves(t, stbir__if_simdf8_cast_to_simdf4(tot0), tot0); \ + stbir__simdf_store(output, t); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 4; + +#else + +#define stbir__4_coeff_start() \ + stbir__simdf tot0, tot1, c, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_mult_mem(tot0, c, decode); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_mult_mem(tot1, c, decode + 4); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + 8); \ + stbir__simdf_0123to3333(c, cs); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + 12); + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc + (ofs)); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 4); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 4 + 4); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 4 + 8); \ + stbir__simdf_0123to3333(c, cs); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 4 + 12); + +#define stbir__1_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1(c, hc + (ofs)); \ + stbir__simdf_0123to0000(c, c); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 4); + +#define stbir__2_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2(cs, hc + (ofs)); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 4); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 4 + 4); + +#define stbir__3_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc + (ofs)); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 4); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 4 + 4); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 4 + 8); + +#define stbir__store_output() \ + stbir__simdf_add(tot0, tot0, tot1); \ + stbir__simdf_store(output, tot0); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 4; + +#endif + +#else + +#define stbir__1_coeff_only() \ + float p0, p1, p2, p3, c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0]; \ + p0 = decode[0] * c; \ + p1 = decode[1] * c; \ + p2 = decode[2] * c; \ + p3 = decode[3] * c; + +#define stbir__2_coeff_only() \ + float p0, p1, p2, p3, c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0]; \ + p0 = decode[0] * c; \ + p1 = decode[1] * c; \ + p2 = decode[2] * c; \ + p3 = decode[3] * c; \ + c = hc[1]; \ + p0 += decode[4] * c; \ + p1 += decode[5] * c; \ + p2 += decode[6] * c; \ + p3 += decode[7] * c; + +#define stbir__3_coeff_only() \ + float p0, p1, p2, p3, c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0]; \ + p0 = decode[0] * c; \ + p1 = decode[1] * c; \ + p2 = decode[2] * c; \ + p3 = decode[3] * c; \ + c = hc[1]; \ + p0 += decode[4] * c; \ + p1 += decode[5] * c; \ + p2 += decode[6] * c; \ + p3 += decode[7] * c; \ + c = hc[2]; \ + p0 += decode[8] * c; \ + p1 += decode[9] * c; \ + p2 += decode[10] * c; \ + p3 += decode[11] * c; + +#define stbir__store_output_tiny() \ + output[0] = p0; \ + output[1] = p1; \ + output[2] = p2; \ + output[3] = p3; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 4; + +#define stbir__4_coeff_start() \ + float x0, x1, x2, x3, y0, y1, y2, y3, c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0]; \ + x0 = decode[0] * c; \ + x1 = decode[1] * c; \ + x2 = decode[2] * c; \ + x3 = decode[3] * c; \ + c = hc[1]; \ + y0 = decode[4] * c; \ + y1 = decode[5] * c; \ + y2 = decode[6] * c; \ + y3 = decode[7] * c; \ + c = hc[2]; \ + x0 += decode[8] * c; \ + x1 += decode[9] * c; \ + x2 += decode[10] * c; \ + x3 += decode[11] * c; \ + c = hc[3]; \ + y0 += decode[12] * c; \ + y1 += decode[13] * c; \ + y2 += decode[14] * c; \ + y3 += decode[15] * c; + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0 + (ofs)]; \ + x0 += decode[0 + (ofs) * 4] * c; \ + x1 += decode[1 + (ofs) * 4] * c; \ + x2 += decode[2 + (ofs) * 4] * c; \ + x3 += decode[3 + (ofs) * 4] * c; \ + c = hc[1 + (ofs)]; \ + y0 += decode[4 + (ofs) * 4] * c; \ + y1 += decode[5 + (ofs) * 4] * c; \ + y2 += decode[6 + (ofs) * 4] * c; \ + y3 += decode[7 + (ofs) * 4] * c; \ + c = hc[2 + (ofs)]; \ + x0 += decode[8 + (ofs) * 4] * c; \ + x1 += decode[9 + (ofs) * 4] * c; \ + x2 += decode[10 + (ofs) * 4] * c; \ + x3 += decode[11 + (ofs) * 4] * c; \ + c = hc[3 + (ofs)]; \ + y0 += decode[12 + (ofs) * 4] * c; \ + y1 += decode[13 + (ofs) * 4] * c; \ + y2 += decode[14 + (ofs) * 4] * c; \ + y3 += decode[15 + (ofs) * 4] * c; + +#define stbir__1_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0 + (ofs)]; \ + x0 += decode[0 + (ofs) * 4] * c; \ + x1 += decode[1 + (ofs) * 4] * c; \ + x2 += decode[2 + (ofs) * 4] * c; \ + x3 += decode[3 + (ofs) * 4] * c; + +#define stbir__2_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0 + (ofs)]; \ + x0 += decode[0 + (ofs) * 4] * c; \ + x1 += decode[1 + (ofs) * 4] * c; \ + x2 += decode[2 + (ofs) * 4] * c; \ + x3 += decode[3 + (ofs) * 4] * c; \ + c = hc[1 + (ofs)]; \ + y0 += decode[4 + (ofs) * 4] * c; \ + y1 += decode[5 + (ofs) * 4] * c; \ + y2 += decode[6 + (ofs) * 4] * c; \ + y3 += decode[7 + (ofs) * 4] * c; + +#define stbir__3_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0 + (ofs)]; \ + x0 += decode[0 + (ofs) * 4] * c; \ + x1 += decode[1 + (ofs) * 4] * c; \ + x2 += decode[2 + (ofs) * 4] * c; \ + x3 += decode[3 + (ofs) * 4] * c; \ + c = hc[1 + (ofs)]; \ + y0 += decode[4 + (ofs) * 4] * c; \ + y1 += decode[5 + (ofs) * 4] * c; \ + y2 += decode[6 + (ofs) * 4] * c; \ + y3 += decode[7 + (ofs) * 4] * c; \ + c = hc[2 + (ofs)]; \ + x0 += decode[8 + (ofs) * 4] * c; \ + x1 += decode[9 + (ofs) * 4] * c; \ + x2 += decode[10 + (ofs) * 4] * c; \ + x3 += decode[11 + (ofs) * 4] * c; + +#define stbir__store_output() \ + output[0] = x0 + y0; \ + output[1] = x1 + y1; \ + output[2] = x2 + y2; \ + output[3] = x3 + y3; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 4; + +#endif + +#define STBIR__horizontal_channels 4 +#define STB_IMAGE_RESIZE_DO_HORIZONTALS +#include STBIR__HEADER_FILENAME + +//================= +// Do 7 channel horizontal routines + +#ifdef STBIR_SIMD + +#define stbir__1_coeff_only() \ + stbir__simdf tot0, tot1, c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1(c, hc); \ + stbir__simdf_0123to0000(c, c); \ + stbir__simdf_mult_mem(tot0, c, decode); \ + stbir__simdf_mult_mem(tot1, c, decode + 3); + +#define stbir__2_coeff_only() \ + stbir__simdf tot0, tot1, c, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2(cs, hc); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_mult_mem(tot0, c, decode); \ + stbir__simdf_mult_mem(tot1, c, decode + 3); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + 7); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + 10); + +#define stbir__3_coeff_only() \ + stbir__simdf tot0, tot1, c, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_mult_mem(tot0, c, decode); \ + stbir__simdf_mult_mem(tot1, c, decode + 3); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + 7); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + 10); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + 14); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + 17); + +#define stbir__store_output_tiny() \ + stbir__simdf_store(output + 3, tot1); \ + stbir__simdf_store(output, tot0); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 7; + +#ifdef STBIR_SIMD8 + +#define stbir__4_coeff_start() \ + stbir__simdf8 tot0, tot1, c, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc); \ + stbir__simdf8_0123to00000000(c, cs); \ + stbir__simdf8_mult_mem(tot0, c, decode); \ + stbir__simdf8_0123to11111111(c, cs); \ + stbir__simdf8_mult_mem(tot1, c, decode + 7); \ + stbir__simdf8_0123to22222222(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + 14); \ + stbir__simdf8_0123to33333333(c, cs); \ + stbir__simdf8_madd_mem(tot1, tot1, c, decode + 21); + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc + (ofs)); \ + stbir__simdf8_0123to00000000(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 7); \ + stbir__simdf8_0123to11111111(c, cs); \ + stbir__simdf8_madd_mem(tot1, tot1, c, decode + (ofs) * 7 + 7); \ + stbir__simdf8_0123to22222222(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 7 + 14); \ + stbir__simdf8_0123to33333333(c, cs); \ + stbir__simdf8_madd_mem(tot1, tot1, c, decode + (ofs) * 7 + 21); + +#define stbir__1_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load1b(c, hc + (ofs)); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 7); + +#define stbir__2_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load1b(c, hc + (ofs)); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 7); \ + stbir__simdf8_load1b(c, hc + (ofs) + 1); \ + stbir__simdf8_madd_mem(tot1, tot1, c, decode + (ofs) * 7 + 7); + +#define stbir__3_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf8_load4b(cs, hc + (ofs)); \ + stbir__simdf8_0123to00000000(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 7); \ + stbir__simdf8_0123to11111111(c, cs); \ + stbir__simdf8_madd_mem(tot1, tot1, c, decode + (ofs) * 7 + 7); \ + stbir__simdf8_0123to22222222(c, cs); \ + stbir__simdf8_madd_mem(tot0, tot0, c, decode + (ofs) * 7 + 14); + +#define stbir__store_output() \ + stbir__simdf8_add(tot0, tot0, tot1); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 7; \ + if (output < output_end) \ + { \ + stbir__simdf8_store(output - 7, tot0); \ + continue; \ + } \ + stbir__simdf_store(output - 7 + 3, stbir__simdf_swiz(stbir__simdf8_gettop4(tot0), 0, 0, 1, 2)); \ + stbir__simdf_store(output - 7, stbir__if_simdf8_cast_to_simdf4(tot0)); \ + break; + +#else + +#define stbir__4_coeff_start() \ + stbir__simdf tot0, tot1, tot2, tot3, c, cs; \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_mult_mem(tot0, c, decode); \ + stbir__simdf_mult_mem(tot1, c, decode + 3); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_mult_mem(tot2, c, decode + 7); \ + stbir__simdf_mult_mem(tot3, c, decode + 10); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + 14); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + 17); \ + stbir__simdf_0123to3333(c, cs); \ + stbir__simdf_madd_mem(tot2, tot2, c, decode + 21); \ + stbir__simdf_madd_mem(tot3, tot3, c, decode + 24); + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc + (ofs)); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 7); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 7 + 3); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_madd_mem(tot2, tot2, c, decode + (ofs) * 7 + 7); \ + stbir__simdf_madd_mem(tot3, tot3, c, decode + (ofs) * 7 + 10); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 7 + 14); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 7 + 17); \ + stbir__simdf_0123to3333(c, cs); \ + stbir__simdf_madd_mem(tot2, tot2, c, decode + (ofs) * 7 + 21); \ + stbir__simdf_madd_mem(tot3, tot3, c, decode + (ofs) * 7 + 24); + +#define stbir__1_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load1(c, hc + (ofs)); \ + stbir__simdf_0123to0000(c, c); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 7); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 7 + 3); + +#define stbir__2_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load2(cs, hc + (ofs)); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 7); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 7 + 3); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_madd_mem(tot2, tot2, c, decode + (ofs) * 7 + 7); \ + stbir__simdf_madd_mem(tot3, tot3, c, decode + (ofs) * 7 + 10); + +#define stbir__3_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + stbir__simdf_load(cs, hc + (ofs)); \ + stbir__simdf_0123to0000(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 7); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 7 + 3); \ + stbir__simdf_0123to1111(c, cs); \ + stbir__simdf_madd_mem(tot2, tot2, c, decode + (ofs) * 7 + 7); \ + stbir__simdf_madd_mem(tot3, tot3, c, decode + (ofs) * 7 + 10); \ + stbir__simdf_0123to2222(c, cs); \ + stbir__simdf_madd_mem(tot0, tot0, c, decode + (ofs) * 7 + 14); \ + stbir__simdf_madd_mem(tot1, tot1, c, decode + (ofs) * 7 + 17); + +#define stbir__store_output() \ + stbir__simdf_add(tot0, tot0, tot2); \ + stbir__simdf_add(tot1, tot1, tot3); \ + stbir__simdf_store(output + 3, tot1); \ + stbir__simdf_store(output, tot0); \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 7; + +#endif + +#else + +#define stbir__1_coeff_only() \ + float tot0, tot1, tot2, tot3, tot4, tot5, tot6, c; \ + c = hc[0]; \ + tot0 = decode[0] * c; \ + tot1 = decode[1] * c; \ + tot2 = decode[2] * c; \ + tot3 = decode[3] * c; \ + tot4 = decode[4] * c; \ + tot5 = decode[5] * c; \ + tot6 = decode[6] * c; + +#define stbir__2_coeff_only() \ + float tot0, tot1, tot2, tot3, tot4, tot5, tot6, c; \ + c = hc[0]; \ + tot0 = decode[0] * c; \ + tot1 = decode[1] * c; \ + tot2 = decode[2] * c; \ + tot3 = decode[3] * c; \ + tot4 = decode[4] * c; \ + tot5 = decode[5] * c; \ + tot6 = decode[6] * c; \ + c = hc[1]; \ + tot0 += decode[7] * c; \ + tot1 += decode[8] * c; \ + tot2 += decode[9] * c; \ + tot3 += decode[10] * c; \ + tot4 += decode[11] * c; \ + tot5 += decode[12] * c; \ + tot6 += decode[13] * c; + +#define stbir__3_coeff_only() \ + float tot0, tot1, tot2, tot3, tot4, tot5, tot6, c; \ + c = hc[0]; \ + tot0 = decode[0] * c; \ + tot1 = decode[1] * c; \ + tot2 = decode[2] * c; \ + tot3 = decode[3] * c; \ + tot4 = decode[4] * c; \ + tot5 = decode[5] * c; \ + tot6 = decode[6] * c; \ + c = hc[1]; \ + tot0 += decode[7] * c; \ + tot1 += decode[8] * c; \ + tot2 += decode[9] * c; \ + tot3 += decode[10] * c; \ + tot4 += decode[11] * c; \ + tot5 += decode[12] * c; \ + tot6 += decode[13] * c; \ + c = hc[2]; \ + tot0 += decode[14] * c; \ + tot1 += decode[15] * c; \ + tot2 += decode[16] * c; \ + tot3 += decode[17] * c; \ + tot4 += decode[18] * c; \ + tot5 += decode[19] * c; \ + tot6 += decode[20] * c; + +#define stbir__store_output_tiny() \ + output[0] = tot0; \ + output[1] = tot1; \ + output[2] = tot2; \ + output[3] = tot3; \ + output[4] = tot4; \ + output[5] = tot5; \ + output[6] = tot6; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 7; + +#define stbir__4_coeff_start() \ + float x0, x1, x2, x3, x4, x5, x6, y0, y1, y2, y3, y4, y5, y6, c; \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0]; \ + x0 = decode[0] * c; \ + x1 = decode[1] * c; \ + x2 = decode[2] * c; \ + x3 = decode[3] * c; \ + x4 = decode[4] * c; \ + x5 = decode[5] * c; \ + x6 = decode[6] * c; \ + c = hc[1]; \ + y0 = decode[7] * c; \ + y1 = decode[8] * c; \ + y2 = decode[9] * c; \ + y3 = decode[10] * c; \ + y4 = decode[11] * c; \ + y5 = decode[12] * c; \ + y6 = decode[13] * c; \ + c = hc[2]; \ + x0 += decode[14] * c; \ + x1 += decode[15] * c; \ + x2 += decode[16] * c; \ + x3 += decode[17] * c; \ + x4 += decode[18] * c; \ + x5 += decode[19] * c; \ + x6 += decode[20] * c; \ + c = hc[3]; \ + y0 += decode[21] * c; \ + y1 += decode[22] * c; \ + y2 += decode[23] * c; \ + y3 += decode[24] * c; \ + y4 += decode[25] * c; \ + y5 += decode[26] * c; \ + y6 += decode[27] * c; + +#define stbir__4_coeff_continue_from_4(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0 + (ofs)]; \ + x0 += decode[0 + (ofs) * 7] * c; \ + x1 += decode[1 + (ofs) * 7] * c; \ + x2 += decode[2 + (ofs) * 7] * c; \ + x3 += decode[3 + (ofs) * 7] * c; \ + x4 += decode[4 + (ofs) * 7] * c; \ + x5 += decode[5 + (ofs) * 7] * c; \ + x6 += decode[6 + (ofs) * 7] * c; \ + c = hc[1 + (ofs)]; \ + y0 += decode[7 + (ofs) * 7] * c; \ + y1 += decode[8 + (ofs) * 7] * c; \ + y2 += decode[9 + (ofs) * 7] * c; \ + y3 += decode[10 + (ofs) * 7] * c; \ + y4 += decode[11 + (ofs) * 7] * c; \ + y5 += decode[12 + (ofs) * 7] * c; \ + y6 += decode[13 + (ofs) * 7] * c; \ + c = hc[2 + (ofs)]; \ + x0 += decode[14 + (ofs) * 7] * c; \ + x1 += decode[15 + (ofs) * 7] * c; \ + x2 += decode[16 + (ofs) * 7] * c; \ + x3 += decode[17 + (ofs) * 7] * c; \ + x4 += decode[18 + (ofs) * 7] * c; \ + x5 += decode[19 + (ofs) * 7] * c; \ + x6 += decode[20 + (ofs) * 7] * c; \ + c = hc[3 + (ofs)]; \ + y0 += decode[21 + (ofs) * 7] * c; \ + y1 += decode[22 + (ofs) * 7] * c; \ + y2 += decode[23 + (ofs) * 7] * c; \ + y3 += decode[24 + (ofs) * 7] * c; \ + y4 += decode[25 + (ofs) * 7] * c; \ + y5 += decode[26 + (ofs) * 7] * c; \ + y6 += decode[27 + (ofs) * 7] * c; + +#define stbir__1_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0 + (ofs)]; \ + x0 += decode[0 + (ofs) * 7] * c; \ + x1 += decode[1 + (ofs) * 7] * c; \ + x2 += decode[2 + (ofs) * 7] * c; \ + x3 += decode[3 + (ofs) * 7] * c; \ + x4 += decode[4 + (ofs) * 7] * c; \ + x5 += decode[5 + (ofs) * 7] * c; \ + x6 += decode[6 + (ofs) * 7] * c; + +#define stbir__2_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0 + (ofs)]; \ + x0 += decode[0 + (ofs) * 7] * c; \ + x1 += decode[1 + (ofs) * 7] * c; \ + x2 += decode[2 + (ofs) * 7] * c; \ + x3 += decode[3 + (ofs) * 7] * c; \ + x4 += decode[4 + (ofs) * 7] * c; \ + x5 += decode[5 + (ofs) * 7] * c; \ + x6 += decode[6 + (ofs) * 7] * c; \ + c = hc[1 + (ofs)]; \ + y0 += decode[7 + (ofs) * 7] * c; \ + y1 += decode[8 + (ofs) * 7] * c; \ + y2 += decode[9 + (ofs) * 7] * c; \ + y3 += decode[10 + (ofs) * 7] * c; \ + y4 += decode[11 + (ofs) * 7] * c; \ + y5 += decode[12 + (ofs) * 7] * c; \ + y6 += decode[13 + (ofs) * 7] * c; + +#define stbir__3_coeff_remnant(ofs) \ + STBIR_SIMD_NO_UNROLL(decode); \ + c = hc[0 + (ofs)]; \ + x0 += decode[0 + (ofs) * 7] * c; \ + x1 += decode[1 + (ofs) * 7] * c; \ + x2 += decode[2 + (ofs) * 7] * c; \ + x3 += decode[3 + (ofs) * 7] * c; \ + x4 += decode[4 + (ofs) * 7] * c; \ + x5 += decode[5 + (ofs) * 7] * c; \ + x6 += decode[6 + (ofs) * 7] * c; \ + c = hc[1 + (ofs)]; \ + y0 += decode[7 + (ofs) * 7] * c; \ + y1 += decode[8 + (ofs) * 7] * c; \ + y2 += decode[9 + (ofs) * 7] * c; \ + y3 += decode[10 + (ofs) * 7] * c; \ + y4 += decode[11 + (ofs) * 7] * c; \ + y5 += decode[12 + (ofs) * 7] * c; \ + y6 += decode[13 + (ofs) * 7] * c; \ + c = hc[2 + (ofs)]; \ + x0 += decode[14 + (ofs) * 7] * c; \ + x1 += decode[15 + (ofs) * 7] * c; \ + x2 += decode[16 + (ofs) * 7] * c; \ + x3 += decode[17 + (ofs) * 7] * c; \ + x4 += decode[18 + (ofs) * 7] * c; \ + x5 += decode[19 + (ofs) * 7] * c; \ + x6 += decode[20 + (ofs) * 7] * c; + +#define stbir__store_output() \ + output[0] = x0 + y0; \ + output[1] = x1 + y1; \ + output[2] = x2 + y2; \ + output[3] = x3 + y3; \ + output[4] = x4 + y4; \ + output[5] = x5 + y5; \ + output[6] = x6 + y6; \ + horizontal_coefficients += coefficient_width; \ + ++horizontal_contributors; \ + output += 7; + +#endif + +#define STBIR__horizontal_channels 7 +#define STB_IMAGE_RESIZE_DO_HORIZONTALS +#include STBIR__HEADER_FILENAME + +// include all of the vertical resamplers (both scatter and gather versions) + +#define STBIR__vertical_channels 1 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 1 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 2 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 2 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 3 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 3 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 4 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 4 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 5 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 5 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 6 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 6 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 7 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 7 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 8 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#include STBIR__HEADER_FILENAME + +#define STBIR__vertical_channels 8 +#define STB_IMAGE_RESIZE_DO_VERTICALS +#define STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#include STBIR__HEADER_FILENAME + +typedef void STBIR_VERTICAL_GATHERFUNC(float *output, float const *coeffs, float const **inputs, float const *input0_end); + +static STBIR_VERTICAL_GATHERFUNC *stbir__vertical_gathers[8] = + { + stbir__vertical_gather_with_1_coeffs, stbir__vertical_gather_with_2_coeffs, stbir__vertical_gather_with_3_coeffs, stbir__vertical_gather_with_4_coeffs, stbir__vertical_gather_with_5_coeffs, stbir__vertical_gather_with_6_coeffs, stbir__vertical_gather_with_7_coeffs, stbir__vertical_gather_with_8_coeffs}; + +static STBIR_VERTICAL_GATHERFUNC *stbir__vertical_gathers_continues[8] = + { + stbir__vertical_gather_with_1_coeffs_cont, stbir__vertical_gather_with_2_coeffs_cont, stbir__vertical_gather_with_3_coeffs_cont, stbir__vertical_gather_with_4_coeffs_cont, stbir__vertical_gather_with_5_coeffs_cont, stbir__vertical_gather_with_6_coeffs_cont, stbir__vertical_gather_with_7_coeffs_cont, stbir__vertical_gather_with_8_coeffs_cont}; + +typedef void STBIR_VERTICAL_SCATTERFUNC(float **outputs, float const *coeffs, float const *input, float const *input_end); + +static STBIR_VERTICAL_SCATTERFUNC *stbir__vertical_scatter_sets[8] = + { + stbir__vertical_scatter_with_1_coeffs, stbir__vertical_scatter_with_2_coeffs, stbir__vertical_scatter_with_3_coeffs, stbir__vertical_scatter_with_4_coeffs, stbir__vertical_scatter_with_5_coeffs, stbir__vertical_scatter_with_6_coeffs, stbir__vertical_scatter_with_7_coeffs, stbir__vertical_scatter_with_8_coeffs}; + +static STBIR_VERTICAL_SCATTERFUNC *stbir__vertical_scatter_blends[8] = + { + stbir__vertical_scatter_with_1_coeffs_cont, stbir__vertical_scatter_with_2_coeffs_cont, stbir__vertical_scatter_with_3_coeffs_cont, stbir__vertical_scatter_with_4_coeffs_cont, stbir__vertical_scatter_with_5_coeffs_cont, stbir__vertical_scatter_with_6_coeffs_cont, stbir__vertical_scatter_with_7_coeffs_cont, stbir__vertical_scatter_with_8_coeffs_cont}; + +static void stbir__encode_scanline(stbir__info const *stbir_info, void *output_buffer_data, float *encode_buffer, int row STBIR_ONLY_PROFILE_GET_SPLIT_INFO) { - float transform[4]; - transform[0] = x_scale; - transform[1] = y_scale; - transform[2] = x_offset; - transform[3] = y_offset; - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - 0, 0, 1, 1, transform, num_channels, alpha_channel, flags, datatype, filter_horizontal, filter_vertical, - edge_mode_horizontal, edge_mode_vertical, space); + int num_pixels = stbir_info->horizontal.scale_info.output_sub_size; + int channels = stbir_info->channels; + int width_times_channels = num_pixels * channels; + void *output_buffer; + + // un-alpha weight if we need to + if (stbir_info->alpha_unweight) + { + STBIR_PROFILE_START(unalpha); + stbir_info->alpha_unweight(encode_buffer, width_times_channels); + STBIR_PROFILE_END(unalpha); + } + + // write directly into output by default + output_buffer = output_buffer_data; + + // if we have an output callback, we first convert the decode buffer in place (and then hand that to the callback) + if (stbir_info->out_pixels_cb) + output_buffer = encode_buffer; + + STBIR_PROFILE_START(encode); + // convert into the output buffer + stbir_info->encode_pixels(output_buffer, width_times_channels, encode_buffer); + STBIR_PROFILE_END(encode); + + // if we have an output callback, call it to send the data + if (stbir_info->out_pixels_cb) + stbir_info->out_pixels_cb(output_buffer, num_pixels, row, stbir_info->user_data); } -STBIRDEF int stbir_resize_region(const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, - void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, - stbir_datatype datatype, - int num_channels, int alpha_channel, int flags, - stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, - stbir_filter filter_horizontal, stbir_filter filter_vertical, - stbir_colorspace space, void *alloc_context, - float s0, float t0, float s1, float t1) +// Get the ring buffer pointer for an index +static float *stbir__get_ring_buffer_entry(stbir__info const *stbir_info, stbir__per_split_info const *split_info, int index) { - return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, - output_pixels, output_w, output_h, output_stride_in_bytes, - s0, t0, s1, t1, NULL, num_channels, alpha_channel, flags, datatype, filter_horizontal, filter_vertical, - edge_mode_horizontal, edge_mode_vertical, space); + STBIR_ASSERT(index < stbir_info->ring_buffer_num_entries); + +#ifdef STBIR__SEPARATE_ALLOCATIONS + return split_info->ring_buffers[index]; +#else + return (float *)(((char *)split_info->ring_buffer) + (index * stbir_info->ring_buffer_length_bytes)); +#endif } +// Get the specified scan line from the ring buffer +static float *stbir__get_ring_buffer_scanline(stbir__info const *stbir_info, stbir__per_split_info const *split_info, int get_scanline) +{ + int ring_buffer_index = (split_info->ring_buffer_begin_index + (get_scanline - split_info->ring_buffer_first_scanline)) % stbir_info->ring_buffer_num_entries; + return stbir__get_ring_buffer_entry(stbir_info, split_info, ring_buffer_index); +} + +static void stbir__resample_horizontal_gather(stbir__info const *stbir_info, float *output_buffer, float const *input_buffer STBIR_ONLY_PROFILE_GET_SPLIT_INFO) +{ + float const *decode_buffer = input_buffer - (stbir_info->scanline_extents.conservative.n0 * stbir_info->effective_channels); + + STBIR_PROFILE_START(horizontal); + if ((stbir_info->horizontal.filter_enum == STBIR_FILTER_POINT_SAMPLE) && (stbir_info->horizontal.scale_info.scale == 1.0f)) + STBIR_MEMCPY(output_buffer, input_buffer, stbir_info->horizontal.scale_info.output_sub_size * sizeof(float) * stbir_info->effective_channels); + else + stbir_info->horizontal_gather_channels(output_buffer, stbir_info->horizontal.scale_info.output_sub_size, decode_buffer, stbir_info->horizontal.contributors, stbir_info->horizontal.coefficients, stbir_info->horizontal.coefficient_width); + STBIR_PROFILE_END(horizontal); +} + +static void stbir__resample_vertical_gather(stbir__info const *stbir_info, stbir__per_split_info *split_info, int n, int contrib_n0, int contrib_n1, float const *vertical_coefficients) +{ + float *encode_buffer = split_info->vertical_buffer; + float *decode_buffer = split_info->decode_buffer; + int vertical_first = stbir_info->vertical_first; + int width = (vertical_first) ? (stbir_info->scanline_extents.conservative.n1 - stbir_info->scanline_extents.conservative.n0 + 1) : stbir_info->horizontal.scale_info.output_sub_size; + int width_times_channels = stbir_info->effective_channels * width; + + STBIR_ASSERT(stbir_info->vertical.is_gather); + + // loop over the contributing scanlines and scale into the buffer + STBIR_PROFILE_START(vertical); + { + int k = 0, total = contrib_n1 - contrib_n0 + 1; + STBIR_ASSERT(total > 0); + do + { + float const *inputs[8]; + int i, cnt = total; + if (cnt > 8) + cnt = 8; + for (i = 0; i < cnt; i++) + inputs[i] = stbir__get_ring_buffer_scanline(stbir_info, split_info, k + i + contrib_n0); + + // call the N scanlines at a time function (up to 8 scanlines of blending at once) + ((k == 0) ? stbir__vertical_gathers : stbir__vertical_gathers_continues)[cnt - 1]((vertical_first) ? decode_buffer : encode_buffer, vertical_coefficients + k, inputs, inputs[0] + width_times_channels); + k += cnt; + total -= cnt; + } while (total); + } + STBIR_PROFILE_END(vertical); + + if (vertical_first) + { + // Now resample the gathered vertical data in the horizontal axis into the encode buffer + decode_buffer[width_times_channels] = 0.0f; // clear two over for horizontals with a remnant of 3 + decode_buffer[width_times_channels + 1] = 0.0f; + stbir__resample_horizontal_gather(stbir_info, encode_buffer, decode_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO); + } + + stbir__encode_scanline(stbir_info, ((char *)stbir_info->output_data) + ((size_t)n * (size_t)stbir_info->output_stride_bytes), + encode_buffer, n STBIR_ONLY_PROFILE_SET_SPLIT_INFO); +} + +static void stbir__decode_and_resample_for_vertical_gather_loop(stbir__info const *stbir_info, stbir__per_split_info *split_info, int n) +{ + int ring_buffer_index; + float *ring_buffer; + + // Decode the nth scanline from the source image into the decode buffer. + stbir__decode_scanline(stbir_info, n, split_info->decode_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO); + + // update new end scanline + split_info->ring_buffer_last_scanline = n; + + // get ring buffer + ring_buffer_index = (split_info->ring_buffer_begin_index + (split_info->ring_buffer_last_scanline - split_info->ring_buffer_first_scanline)) % stbir_info->ring_buffer_num_entries; + ring_buffer = stbir__get_ring_buffer_entry(stbir_info, split_info, ring_buffer_index); + + // Now resample it into the ring buffer. + stbir__resample_horizontal_gather(stbir_info, ring_buffer, split_info->decode_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO); + + // Now it's sitting in the ring buffer ready to be used as source for the vertical sampling. +} + +static void stbir__vertical_gather_loop(stbir__info const *stbir_info, stbir__per_split_info *split_info, int split_count) +{ + int y, start_output_y, end_output_y; + stbir__contributors *vertical_contributors = stbir_info->vertical.contributors; + float const *vertical_coefficients = stbir_info->vertical.coefficients; + + STBIR_ASSERT(stbir_info->vertical.is_gather); + + start_output_y = split_info->start_output_y; + end_output_y = split_info[split_count - 1].end_output_y; + + vertical_contributors += start_output_y; + vertical_coefficients += start_output_y * stbir_info->vertical.coefficient_width; + + // initialize the ring buffer for gathering + split_info->ring_buffer_begin_index = 0; + split_info->ring_buffer_first_scanline = vertical_contributors->n0; + split_info->ring_buffer_last_scanline = split_info->ring_buffer_first_scanline - 1; // means "empty" + + for (y = start_output_y; y < end_output_y; y++) + { + int in_first_scanline, in_last_scanline; + + in_first_scanline = vertical_contributors->n0; + in_last_scanline = vertical_contributors->n1; + + // make sure the indexing hasn't broken + STBIR_ASSERT(in_first_scanline >= split_info->ring_buffer_first_scanline); + + // Load in new scanlines + while (in_last_scanline > split_info->ring_buffer_last_scanline) + { + STBIR_ASSERT((split_info->ring_buffer_last_scanline - split_info->ring_buffer_first_scanline + 1) <= stbir_info->ring_buffer_num_entries); + + // make sure there was room in the ring buffer when we add new scanlines + if ((split_info->ring_buffer_last_scanline - split_info->ring_buffer_first_scanline + 1) == stbir_info->ring_buffer_num_entries) + { + split_info->ring_buffer_first_scanline++; + split_info->ring_buffer_begin_index++; + } + + if (stbir_info->vertical_first) + { + float *ring_buffer = stbir__get_ring_buffer_scanline(stbir_info, split_info, ++split_info->ring_buffer_last_scanline); + // Decode the nth scanline from the source image into the decode buffer. + stbir__decode_scanline(stbir_info, split_info->ring_buffer_last_scanline, ring_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO); + } + else + { + stbir__decode_and_resample_for_vertical_gather_loop(stbir_info, split_info, split_info->ring_buffer_last_scanline + 1); + } + } + + // Now all buffers should be ready to write a row of vertical sampling, so do it. + stbir__resample_vertical_gather(stbir_info, split_info, y, in_first_scanline, in_last_scanline, vertical_coefficients); + + ++vertical_contributors; + vertical_coefficients += stbir_info->vertical.coefficient_width; + } +} + +#define STBIR__FLOAT_EMPTY_MARKER 3.0e+38F +#define STBIR__FLOAT_BUFFER_IS_EMPTY(ptr) ((ptr)[0] == STBIR__FLOAT_EMPTY_MARKER) + +static void stbir__encode_first_scanline_from_scatter(stbir__info const *stbir_info, stbir__per_split_info *split_info) +{ + // evict a scanline out into the output buffer + float *ring_buffer_entry = stbir__get_ring_buffer_entry(stbir_info, split_info, split_info->ring_buffer_begin_index); + + // dump the scanline out + stbir__encode_scanline(stbir_info, ((char *)stbir_info->output_data) + ((size_t)split_info->ring_buffer_first_scanline * (size_t)stbir_info->output_stride_bytes), ring_buffer_entry, split_info->ring_buffer_first_scanline STBIR_ONLY_PROFILE_SET_SPLIT_INFO); + + // mark it as empty + ring_buffer_entry[0] = STBIR__FLOAT_EMPTY_MARKER; + + // advance the first scanline + split_info->ring_buffer_first_scanline++; + if (++split_info->ring_buffer_begin_index == stbir_info->ring_buffer_num_entries) + split_info->ring_buffer_begin_index = 0; +} + +static void stbir__horizontal_resample_and_encode_first_scanline_from_scatter(stbir__info const *stbir_info, stbir__per_split_info *split_info) +{ + // evict a scanline out into the output buffer + + float *ring_buffer_entry = stbir__get_ring_buffer_entry(stbir_info, split_info, split_info->ring_buffer_begin_index); + + // Now resample it into the buffer. + stbir__resample_horizontal_gather(stbir_info, split_info->vertical_buffer, ring_buffer_entry STBIR_ONLY_PROFILE_SET_SPLIT_INFO); + + // dump the scanline out + stbir__encode_scanline(stbir_info, ((char *)stbir_info->output_data) + ((size_t)split_info->ring_buffer_first_scanline * (size_t)stbir_info->output_stride_bytes), split_info->vertical_buffer, split_info->ring_buffer_first_scanline STBIR_ONLY_PROFILE_SET_SPLIT_INFO); + + // mark it as empty + ring_buffer_entry[0] = STBIR__FLOAT_EMPTY_MARKER; + + // advance the first scanline + split_info->ring_buffer_first_scanline++; + if (++split_info->ring_buffer_begin_index == stbir_info->ring_buffer_num_entries) + split_info->ring_buffer_begin_index = 0; +} + +static void stbir__resample_vertical_scatter(stbir__info const *stbir_info, stbir__per_split_info *split_info, int n0, int n1, float const *vertical_coefficients, float const *vertical_buffer, float const *vertical_buffer_end) +{ + STBIR_ASSERT(!stbir_info->vertical.is_gather); + + STBIR_PROFILE_START(vertical); + { + int k = 0, total = n1 - n0 + 1; + STBIR_ASSERT(total > 0); + do + { + float *outputs[8]; + int i, n = total; + if (n > 8) + n = 8; + for (i = 0; i < n; i++) + { + outputs[i] = stbir__get_ring_buffer_scanline(stbir_info, split_info, k + i + n0); + if ((i) && (STBIR__FLOAT_BUFFER_IS_EMPTY(outputs[i]) != STBIR__FLOAT_BUFFER_IS_EMPTY(outputs[0]))) // make sure runs are of the same type + { + n = i; + break; + } + } + // call the scatter to N scanlines at a time function (up to 8 scanlines of scattering at once) + ((STBIR__FLOAT_BUFFER_IS_EMPTY(outputs[0])) ? stbir__vertical_scatter_sets : stbir__vertical_scatter_blends)[n - 1](outputs, vertical_coefficients + k, vertical_buffer, vertical_buffer_end); + k += n; + total -= n; + } while (total); + } + + STBIR_PROFILE_END(vertical); +} + +typedef void stbir__handle_scanline_for_scatter_func(stbir__info const *stbir_info, stbir__per_split_info *split_info); + +static void stbir__vertical_scatter_loop(stbir__info const *stbir_info, stbir__per_split_info *split_info, int split_count) +{ + int y, start_output_y, end_output_y, start_input_y, end_input_y; + stbir__contributors *vertical_contributors = stbir_info->vertical.contributors; + float const *vertical_coefficients = stbir_info->vertical.coefficients; + stbir__handle_scanline_for_scatter_func *handle_scanline_for_scatter; + void *scanline_scatter_buffer; + void *scanline_scatter_buffer_end; + int on_first_input_y, last_input_y; + int width = (stbir_info->vertical_first) ? (stbir_info->scanline_extents.conservative.n1 - stbir_info->scanline_extents.conservative.n0 + 1) : stbir_info->horizontal.scale_info.output_sub_size; + int width_times_channels = stbir_info->effective_channels * width; + + STBIR_ASSERT(!stbir_info->vertical.is_gather); + + start_output_y = split_info->start_output_y; + end_output_y = split_info[split_count - 1].end_output_y; // may do multiple split counts + + start_input_y = split_info->start_input_y; + end_input_y = split_info[split_count - 1].end_input_y; + + // adjust for starting offset start_input_y + y = start_input_y + stbir_info->vertical.filter_pixel_margin; + vertical_contributors += y; + vertical_coefficients += stbir_info->vertical.coefficient_width * y; + + if (stbir_info->vertical_first) + { + handle_scanline_for_scatter = stbir__horizontal_resample_and_encode_first_scanline_from_scatter; + scanline_scatter_buffer = split_info->decode_buffer; + scanline_scatter_buffer_end = ((char *)scanline_scatter_buffer) + sizeof(float) * stbir_info->effective_channels * (stbir_info->scanline_extents.conservative.n1 - stbir_info->scanline_extents.conservative.n0 + 1); + } + else + { + handle_scanline_for_scatter = stbir__encode_first_scanline_from_scatter; + scanline_scatter_buffer = split_info->vertical_buffer; + scanline_scatter_buffer_end = ((char *)scanline_scatter_buffer) + sizeof(float) * stbir_info->effective_channels * stbir_info->horizontal.scale_info.output_sub_size; + } + + // initialize the ring buffer for scattering + split_info->ring_buffer_first_scanline = start_output_y; + split_info->ring_buffer_last_scanline = -1; + split_info->ring_buffer_begin_index = -1; + + // mark all the buffers as empty to start + for (y = 0; y < stbir_info->ring_buffer_num_entries; y++) + { + float *decode_buffer = stbir__get_ring_buffer_entry(stbir_info, split_info, y); + decode_buffer[width_times_channels] = 0.0f; // clear two over for horizontals with a remnant of 3 + decode_buffer[width_times_channels + 1] = 0.0f; + decode_buffer[0] = STBIR__FLOAT_EMPTY_MARKER; // only used on scatter + } + + // do the loop in input space + on_first_input_y = 1; + last_input_y = start_input_y; + for (y = start_input_y; y < end_input_y; y++) + { + int out_first_scanline, out_last_scanline; + + out_first_scanline = vertical_contributors->n0; + out_last_scanline = vertical_contributors->n1; + + STBIR_ASSERT(out_last_scanline - out_first_scanline + 1 <= stbir_info->ring_buffer_num_entries); + + if ((out_last_scanline >= out_first_scanline) && (((out_first_scanline >= start_output_y) && (out_first_scanline < end_output_y)) || ((out_last_scanline >= start_output_y) && (out_last_scanline < end_output_y)))) + { + float const *vc = vertical_coefficients; + + // keep track of the range actually seen for the next resize + last_input_y = y; + if ((on_first_input_y) && (y > start_input_y)) + split_info->start_input_y = y; + on_first_input_y = 0; + + // clip the region + if (out_first_scanline < start_output_y) + { + vc += start_output_y - out_first_scanline; + out_first_scanline = start_output_y; + } + + if (out_last_scanline >= end_output_y) + out_last_scanline = end_output_y - 1; + + // if very first scanline, init the index + if (split_info->ring_buffer_begin_index < 0) + split_info->ring_buffer_begin_index = out_first_scanline - start_output_y; + + STBIR_ASSERT(split_info->ring_buffer_begin_index <= out_first_scanline); + + // Decode the nth scanline from the source image into the decode buffer. + stbir__decode_scanline(stbir_info, y, split_info->decode_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO); + + // When horizontal first, we resample horizontally into the vertical buffer before we scatter it out + if (!stbir_info->vertical_first) + stbir__resample_horizontal_gather(stbir_info, split_info->vertical_buffer, split_info->decode_buffer STBIR_ONLY_PROFILE_SET_SPLIT_INFO); + + // Now it's sitting in the buffer ready to be distributed into the ring buffers. + + // evict from the ringbuffer, if we need are full + if (((split_info->ring_buffer_last_scanline - split_info->ring_buffer_first_scanline + 1) == stbir_info->ring_buffer_num_entries) && + (out_last_scanline > split_info->ring_buffer_last_scanline)) + handle_scanline_for_scatter(stbir_info, split_info); + + // Now the horizontal buffer is ready to write to all ring buffer rows, so do it. + stbir__resample_vertical_scatter(stbir_info, split_info, out_first_scanline, out_last_scanline, vc, (float *)scanline_scatter_buffer, (float *)scanline_scatter_buffer_end); + + // update the end of the buffer + if (out_last_scanline > split_info->ring_buffer_last_scanline) + split_info->ring_buffer_last_scanline = out_last_scanline; + } + ++vertical_contributors; + vertical_coefficients += stbir_info->vertical.coefficient_width; + } + + // now evict the scanlines that are left over in the ring buffer + while (split_info->ring_buffer_first_scanline < end_output_y) + handle_scanline_for_scatter(stbir_info, split_info); + + // update the end_input_y if we do multiple resizes with the same data + ++last_input_y; + for (y = 0; y < split_count; y++) + if (split_info[y].end_input_y > last_input_y) + split_info[y].end_input_y = last_input_y; +} + +static stbir__kernel_callback *stbir__builtin_kernels[] = {0, stbir__filter_trapezoid, stbir__filter_triangle, stbir__filter_cubic, stbir__filter_catmullrom, stbir__filter_mitchell, stbir__filter_point}; +static stbir__support_callback *stbir__builtin_supports[] = {0, stbir__support_trapezoid, stbir__support_one, stbir__support_two, stbir__support_two, stbir__support_two, stbir__support_zeropoint5}; + +static void stbir__set_sampler(stbir__sampler *samp, stbir_filter filter, stbir__kernel_callback *kernel, stbir__support_callback *support, stbir_edge edge, stbir__scale_info *scale_info, int always_gather, void *user_data) +{ + // set filter + if (filter == 0) + { + filter = STBIR_DEFAULT_FILTER_DOWNSAMPLE; // default to downsample + if (scale_info->scale >= (1.0f - stbir__small_float)) + { + if ((scale_info->scale <= (1.0f + stbir__small_float)) && (STBIR_CEILF(scale_info->pixel_shift) == scale_info->pixel_shift)) + filter = STBIR_FILTER_POINT_SAMPLE; + else + filter = STBIR_DEFAULT_FILTER_UPSAMPLE; + } + } + samp->filter_enum = filter; + + STBIR_ASSERT(samp->filter_enum != 0); + STBIR_ASSERT((unsigned)samp->filter_enum < STBIR_FILTER_OTHER); + samp->filter_kernel = stbir__builtin_kernels[filter]; + samp->filter_support = stbir__builtin_supports[filter]; + + if (kernel && support) + { + samp->filter_kernel = kernel; + samp->filter_support = support; + samp->filter_enum = STBIR_FILTER_OTHER; + } + + samp->edge = edge; + samp->filter_pixel_width = stbir__get_filter_pixel_width(samp->filter_support, scale_info->scale, user_data); + // Gather is always better, but in extreme downsamples, you have to most or all of the data in memory + // For horizontal, we always have all the pixels, so we always use gather here (always_gather==1). + // For vertical, we use gather if scaling up (which means we will have samp->filter_pixel_width + // scanlines in memory at once). + samp->is_gather = 0; + if (scale_info->scale >= (1.0f - stbir__small_float)) + samp->is_gather = 1; + else if ((always_gather) || (samp->filter_pixel_width <= STBIR_FORCE_GATHER_FILTER_SCANLINES_AMOUNT)) + samp->is_gather = 2; + + // pre calculate stuff based on the above + samp->coefficient_width = stbir__get_coefficient_width(samp, samp->is_gather, user_data); + + // filter_pixel_width is the conservative size in pixels of input that affect an output pixel. + // In rare cases (only with 2 pix to 1 pix with the default filters), it's possible that the + // filter will extend before or after the scanline beyond just one extra entire copy of the + // scanline (we would hit the edge twice). We don't let you do that, so we clamp the total + // width to 3x the total of input pixel (once for the scanline, once for the left side + // overhang, and once for the right side). We only do this for edge mode, since the other + // modes can just re-edge clamp back in again. + if (edge == STBIR_EDGE_WRAP) + if (samp->filter_pixel_width > (scale_info->input_full_size * 3)) + samp->filter_pixel_width = scale_info->input_full_size * 3; + + // This is how much to expand buffers to account for filters seeking outside + // the image boundaries. + samp->filter_pixel_margin = samp->filter_pixel_width / 2; + + // filter_pixel_margin is the amount that this filter can overhang on just one side of either + // end of the scanline (left or the right). Since we only allow you to overhang 1 scanline's + // worth of pixels, we clamp this one side of overhang to the input scanline size. Again, + // this clamping only happens in rare cases with the default filters (2 pix to 1 pix). + if (edge == STBIR_EDGE_WRAP) + if (samp->filter_pixel_margin > scale_info->input_full_size) + samp->filter_pixel_margin = scale_info->input_full_size; + + samp->num_contributors = stbir__get_contributors(samp, samp->is_gather); + + samp->contributors_size = samp->num_contributors * sizeof(stbir__contributors); + samp->coefficients_size = samp->num_contributors * samp->coefficient_width * sizeof(float) + sizeof(float) * STBIR_INPUT_CALLBACK_PADDING; // extra sizeof(float) is padding + + samp->gather_prescatter_contributors = 0; + samp->gather_prescatter_coefficients = 0; + if (samp->is_gather == 0) + { + samp->gather_prescatter_coefficient_width = samp->filter_pixel_width; + samp->gather_prescatter_num_contributors = stbir__get_contributors(samp, 2); + samp->gather_prescatter_contributors_size = samp->gather_prescatter_num_contributors * sizeof(stbir__contributors); + samp->gather_prescatter_coefficients_size = samp->gather_prescatter_num_contributors * samp->gather_prescatter_coefficient_width * sizeof(float); + } +} + +static void stbir__get_conservative_extents(stbir__sampler *samp, stbir__contributors *range, void *user_data) +{ + float scale = samp->scale_info.scale; + float out_shift = samp->scale_info.pixel_shift; + stbir__support_callback *support = samp->filter_support; + int input_full_size = samp->scale_info.input_full_size; + stbir_edge edge = samp->edge; + float inv_scale = samp->scale_info.inv_scale; + + STBIR_ASSERT(samp->is_gather != 0); + + if (samp->is_gather == 1) + { + int in_first_pixel, in_last_pixel; + float out_filter_radius = support(inv_scale, user_data) * scale; + + stbir__calculate_in_pixel_range(&in_first_pixel, &in_last_pixel, 0.5, out_filter_radius, inv_scale, out_shift, input_full_size, edge); + range->n0 = in_first_pixel; + stbir__calculate_in_pixel_range(&in_first_pixel, &in_last_pixel, ((float)(samp->scale_info.output_sub_size - 1)) + 0.5f, out_filter_radius, inv_scale, out_shift, input_full_size, edge); + range->n1 = in_last_pixel; + } + else if (samp->is_gather == 2) // downsample gather, refine + { + float in_pixels_radius = support(scale, user_data) * inv_scale; + int filter_pixel_margin = samp->filter_pixel_margin; + int output_sub_size = samp->scale_info.output_sub_size; + int input_end; + int n; + int in_first_pixel, in_last_pixel; + + // get a conservative area of the input range + stbir__calculate_in_pixel_range(&in_first_pixel, &in_last_pixel, 0, 0, inv_scale, out_shift, input_full_size, edge); + range->n0 = in_first_pixel; + stbir__calculate_in_pixel_range(&in_first_pixel, &in_last_pixel, (float)output_sub_size, 0, inv_scale, out_shift, input_full_size, edge); + range->n1 = in_last_pixel; + + // now go through the margin to the start of area to find bottom + n = range->n0 + 1; + input_end = -filter_pixel_margin; + while (n >= input_end) + { + int out_first_pixel, out_last_pixel; + stbir__calculate_out_pixel_range(&out_first_pixel, &out_last_pixel, ((float)n) + 0.5f, in_pixels_radius, scale, out_shift, output_sub_size); + if (out_first_pixel > out_last_pixel) + break; + + if ((out_first_pixel < output_sub_size) || (out_last_pixel >= 0)) + range->n0 = n; + --n; + } + + // now go through the end of the area through the margin to find top + n = range->n1 - 1; + input_end = n + 1 + filter_pixel_margin; + while (n <= input_end) + { + int out_first_pixel, out_last_pixel; + stbir__calculate_out_pixel_range(&out_first_pixel, &out_last_pixel, ((float)n) + 0.5f, in_pixels_radius, scale, out_shift, output_sub_size); + if (out_first_pixel > out_last_pixel) + break; + if ((out_first_pixel < output_sub_size) || (out_last_pixel >= 0)) + range->n1 = n; + ++n; + } + } + + if (samp->edge == STBIR_EDGE_WRAP) + { + // if we are wrapping, and we are very close to the image size (so the edges might merge), just use the scanline up to the edge + if ((range->n0 > 0) && (range->n1 >= input_full_size)) + { + int marg = range->n1 - input_full_size + 1; + if ((marg + STBIR__MERGE_RUNS_PIXEL_THRESHOLD) >= range->n0) + range->n0 = 0; + } + if ((range->n0 < 0) && (range->n1 < (input_full_size - 1))) + { + int marg = -range->n0; + if ((input_full_size - marg - STBIR__MERGE_RUNS_PIXEL_THRESHOLD - 1) <= range->n1) + range->n1 = input_full_size - 1; + } + } + else + { + // for non-edge-wrap modes, we never read over the edge, so clamp + if (range->n0 < 0) + range->n0 = 0; + if (range->n1 >= input_full_size) + range->n1 = input_full_size - 1; + } +} + +static void stbir__get_split_info(stbir__per_split_info *split_info, int splits, int output_height, int vertical_pixel_margin, int input_full_height) +{ + int i, cur; + int left = output_height; + + cur = 0; + for (i = 0; i < splits; i++) + { + int each; + split_info[i].start_output_y = cur; + each = left / (splits - i); + split_info[i].end_output_y = cur + each; + cur += each; + left -= each; + + // scatter range (updated to minimum as you run it) + split_info[i].start_input_y = -vertical_pixel_margin; + split_info[i].end_input_y = input_full_height + vertical_pixel_margin; + } +} + +static void stbir__free_internal_mem(stbir__info *info) +{ +#define STBIR__FREE_AND_CLEAR(ptr) \ + { \ + if (ptr) \ + { \ + void *p = (ptr); \ + (ptr) = 0; \ + STBIR_FREE(p, info->user_data); \ + } \ + } + + if (info) + { +#ifndef STBIR__SEPARATE_ALLOCATIONS + STBIR__FREE_AND_CLEAR(info->alloced_mem); +#else + int i, j; + + if ((info->vertical.gather_prescatter_contributors) && ((void *)info->vertical.gather_prescatter_contributors != (void *)info->split_info[0].decode_buffer)) + { + STBIR__FREE_AND_CLEAR(info->vertical.gather_prescatter_coefficients); + STBIR__FREE_AND_CLEAR(info->vertical.gather_prescatter_contributors); + } + for (i = 0; i < info->splits; i++) + { + for (j = 0; j < info->alloc_ring_buffer_num_entries; j++) + { +#ifdef STBIR_SIMD8 + if (info->effective_channels == 3) + --info->split_info[i].ring_buffers[j]; // avx in 3 channel mode needs one float at the start of the buffer +#endif + STBIR__FREE_AND_CLEAR(info->split_info[i].ring_buffers[j]); + } + +#ifdef STBIR_SIMD8 + if (info->effective_channels == 3) + --info->split_info[i].decode_buffer; // avx in 3 channel mode needs one float at the start of the buffer +#endif + STBIR__FREE_AND_CLEAR(info->split_info[i].decode_buffer); + STBIR__FREE_AND_CLEAR(info->split_info[i].ring_buffers); + STBIR__FREE_AND_CLEAR(info->split_info[i].vertical_buffer); + } + STBIR__FREE_AND_CLEAR(info->split_info); + if (info->vertical.coefficients != info->horizontal.coefficients) + { + STBIR__FREE_AND_CLEAR(info->vertical.coefficients); + STBIR__FREE_AND_CLEAR(info->vertical.contributors); + } + STBIR__FREE_AND_CLEAR(info->horizontal.coefficients); + STBIR__FREE_AND_CLEAR(info->horizontal.contributors); + STBIR__FREE_AND_CLEAR(info->alloced_mem); + STBIR_FREE(info, info->user_data); +#endif + } + +#undef STBIR__FREE_AND_CLEAR +} + +static int stbir__get_max_split(int splits, int height) +{ + int i; + int max = 0; + + for (i = 0; i < splits; i++) + { + int each = height / (splits - i); + if (each > max) + max = each; + height -= each; + } + return max; +} + +static stbir__horizontal_gather_channels_func **stbir__horizontal_gather_n_coeffs_funcs[8] = + { + 0, stbir__horizontal_gather_1_channels_with_n_coeffs_funcs, stbir__horizontal_gather_2_channels_with_n_coeffs_funcs, stbir__horizontal_gather_3_channels_with_n_coeffs_funcs, stbir__horizontal_gather_4_channels_with_n_coeffs_funcs, 0, 0, stbir__horizontal_gather_7_channels_with_n_coeffs_funcs}; + +static stbir__horizontal_gather_channels_func **stbir__horizontal_gather_channels_funcs[8] = + { + 0, stbir__horizontal_gather_1_channels_funcs, stbir__horizontal_gather_2_channels_funcs, stbir__horizontal_gather_3_channels_funcs, stbir__horizontal_gather_4_channels_funcs, 0, 0, stbir__horizontal_gather_7_channels_funcs}; + +// there are six resize classifications: 0 == vertical scatter, 1 == vertical gather < 1x scale, 2 == vertical gather 1x-2x scale, 4 == vertical gather < 3x scale, 4 == vertical gather > 3x scale, 5 == <=4 pixel height, 6 == <=4 pixel wide column +#define STBIR_RESIZE_CLASSIFICATIONS 8 + +static float stbir__compute_weights[5][STBIR_RESIZE_CLASSIFICATIONS][4] = // 5 = 0=1chan, 1=2chan, 2=3chan, 3=4chan, 4=7chan + { + { + {1.00000f, 1.00000f, 0.31250f, 1.00000f}, + {0.56250f, 0.59375f, 0.00000f, 0.96875f}, + {1.00000f, 0.06250f, 0.00000f, 1.00000f}, + {0.00000f, 0.09375f, 1.00000f, 1.00000f}, + {1.00000f, 1.00000f, 1.00000f, 1.00000f}, + {0.03125f, 0.12500f, 1.00000f, 1.00000f}, + {0.06250f, 0.12500f, 0.00000f, 1.00000f}, + {0.00000f, 1.00000f, 0.00000f, 0.03125f}, + }, + { + {0.00000f, 0.84375f, 0.00000f, 0.03125f}, + {0.09375f, 0.93750f, 0.00000f, 0.78125f}, + {0.87500f, 0.21875f, 0.00000f, 0.96875f}, + {0.09375f, 0.09375f, 1.00000f, 1.00000f}, + {1.00000f, 1.00000f, 1.00000f, 1.00000f}, + {0.03125f, 0.12500f, 1.00000f, 1.00000f}, + {0.06250f, 0.12500f, 0.00000f, 1.00000f}, + {0.00000f, 1.00000f, 0.00000f, 0.53125f}, + }, + { + {0.00000f, 0.53125f, 0.00000f, 0.03125f}, + {0.06250f, 0.96875f, 0.00000f, 0.53125f}, + {0.87500f, 0.18750f, 0.00000f, 0.93750f}, + {0.00000f, 0.09375f, 1.00000f, 1.00000f}, + {1.00000f, 1.00000f, 1.00000f, 1.00000f}, + {0.03125f, 0.12500f, 1.00000f, 1.00000f}, + {0.06250f, 0.12500f, 0.00000f, 1.00000f}, + {0.00000f, 1.00000f, 0.00000f, 0.56250f}, + }, + { + {0.00000f, 0.50000f, 0.00000f, 0.71875f}, + {0.06250f, 0.84375f, 0.00000f, 0.87500f}, + {1.00000f, 0.50000f, 0.50000f, 0.96875f}, + {1.00000f, 0.09375f, 0.31250f, 0.50000f}, + {1.00000f, 1.00000f, 1.00000f, 1.00000f}, + {1.00000f, 0.03125f, 0.03125f, 0.53125f}, + {0.18750f, 0.12500f, 0.00000f, 1.00000f}, + {0.00000f, 1.00000f, 0.03125f, 0.18750f}, + }, + { + {0.00000f, 0.59375f, 0.00000f, 0.96875f}, + {0.06250f, 0.81250f, 0.06250f, 0.59375f}, + {0.75000f, 0.43750f, 0.12500f, 0.96875f}, + {0.87500f, 0.06250f, 0.18750f, 0.43750f}, + {1.00000f, 1.00000f, 1.00000f, 1.00000f}, + {0.15625f, 0.12500f, 1.00000f, 1.00000f}, + {0.06250f, 0.12500f, 0.00000f, 1.00000f}, + {0.00000f, 1.00000f, 0.03125f, 0.34375f}, + }}; + +// structure that allow us to query and override info for training the costs +typedef struct STBIR__V_FIRST_INFO +{ + double v_cost, h_cost; + int control_v_first; // 0 = no control, 1 = force hori, 2 = force vert + int v_first; + int v_resize_classification; + int is_gather; +} STBIR__V_FIRST_INFO; + +#ifdef STBIR__V_FIRST_INFO_BUFFER +static STBIR__V_FIRST_INFO STBIR__V_FIRST_INFO_BUFFER = {0}; +#define STBIR__V_FIRST_INFO_POINTER &STBIR__V_FIRST_INFO_BUFFER +#else +#define STBIR__V_FIRST_INFO_POINTER 0 +#endif + +// Figure out whether to scale along the horizontal or vertical first. +// This only *super* important when you are scaling by a massively +// different amount in the vertical vs the horizontal (for example, if +// you are scaling by 2x in the width, and 0.5x in the height, then you +// want to do the vertical scale first, because it's around 3x faster +// in that order. +// +// In more normal circumstances, this makes a 20-40% differences, so +// it's good to get right, but not critical. The normal way that you +// decide which direction goes first is just figuring out which +// direction does more multiplies. But with modern CPUs with their +// fancy caches and SIMD and high IPC abilities, so there's just a lot +// more that goes into it. +// +// My handwavy sort of solution is to have an app that does a whole +// bunch of timing for both vertical and horizontal first modes, +// and then another app that can read lots of these timing files +// and try to search for the best weights to use. Dotimings.c +// is the app that does a bunch of timings, and vf_train.c is the +// app that solves for the best weights (and shows how well it +// does currently). + +static int stbir__should_do_vertical_first(float weights_table[STBIR_RESIZE_CLASSIFICATIONS][4], int horizontal_filter_pixel_width, float horizontal_scale, int horizontal_output_size, int vertical_filter_pixel_width, float vertical_scale, int vertical_output_size, int is_gather, STBIR__V_FIRST_INFO *info) +{ + double v_cost, h_cost; + float *weights; + int vertical_first; + int v_classification; + + // categorize the resize into buckets + if ((vertical_output_size <= 4) || (horizontal_output_size <= 4)) + v_classification = (vertical_output_size < horizontal_output_size) ? 6 : 7; + else if (vertical_scale <= 1.0f) + v_classification = (is_gather) ? 1 : 0; + else if (vertical_scale <= 2.0f) + v_classification = 2; + else if (vertical_scale <= 3.0f) + v_classification = 3; + else if (vertical_scale <= 4.0f) + v_classification = 5; + else + v_classification = 6; + + // use the right weights + weights = weights_table[v_classification]; + + // this is the costs when you don't take into account modern CPUs with high ipc and simd and caches - wish we had a better estimate + h_cost = (float)horizontal_filter_pixel_width * weights[0] + horizontal_scale * (float)vertical_filter_pixel_width * weights[1]; + v_cost = (float)vertical_filter_pixel_width * weights[2] + vertical_scale * (float)horizontal_filter_pixel_width * weights[3]; + + // use computation estimate to decide vertical first or not + vertical_first = (v_cost <= h_cost) ? 1 : 0; + + // save these, if requested + if (info) + { + info->h_cost = h_cost; + info->v_cost = v_cost; + info->v_resize_classification = v_classification; + info->v_first = vertical_first; + info->is_gather = is_gather; + } + + // and this allows us to override everything for testing (see dotiming.c) + if ((info) && (info->control_v_first)) + vertical_first = (info->control_v_first == 2) ? 1 : 0; + + return vertical_first; +} + +// layout lookups - must match stbir_internal_pixel_layout +static unsigned char stbir__pixel_channels[] = { + 1, 2, 3, 3, 4, // 1ch, 2ch, rgb, bgr, 4ch + 4, 4, 4, 4, 2, 2, // RGBA,BGRA,ARGB,ABGR,RA,AR + 4, 4, 4, 4, 2, 2, // RGBA_PM,BGRA_PM,ARGB_PM,ABGR_PM,RA_PM,AR_PM +}; + +// the internal pixel layout enums are in a different order, so we can easily do range comparisons of types +// the public pixel layout is ordered in a way that if you cast num_channels (1-4) to the enum, you get something sensible +static stbir_internal_pixel_layout stbir__pixel_layout_convert_public_to_internal[] = { + STBIRI_BGR, + STBIRI_1CHANNEL, + STBIRI_2CHANNEL, + STBIRI_RGB, + STBIRI_RGBA, + STBIRI_4CHANNEL, + STBIRI_BGRA, + STBIRI_ARGB, + STBIRI_ABGR, + STBIRI_RA, + STBIRI_AR, + STBIRI_RGBA_PM, + STBIRI_BGRA_PM, + STBIRI_ARGB_PM, + STBIRI_ABGR_PM, + STBIRI_RA_PM, + STBIRI_AR_PM, +}; + +static stbir__info *stbir__alloc_internal_mem_and_build_samplers(stbir__sampler *horizontal, stbir__sampler *vertical, stbir__contributors *conservative, stbir_pixel_layout input_pixel_layout_public, stbir_pixel_layout output_pixel_layout_public, int splits, int new_x, int new_y, int fast_alpha, void *user_data STBIR_ONLY_PROFILE_BUILD_GET_INFO) +{ + static char stbir_channel_count_index[8] = {9, 0, 1, 2, 3, 9, 9, 4}; + + stbir__info *info = 0; + void *alloced = 0; + size_t alloced_total = 0; + int vertical_first; + size_t decode_buffer_size, ring_buffer_length_bytes, ring_buffer_size, vertical_buffer_size; + int alloc_ring_buffer_num_entries; + + int alpha_weighting_type = 0; // 0=none, 1=simple, 2=fancy + int conservative_split_output_size = stbir__get_max_split(splits, vertical->scale_info.output_sub_size); + stbir_internal_pixel_layout input_pixel_layout = stbir__pixel_layout_convert_public_to_internal[input_pixel_layout_public]; + stbir_internal_pixel_layout output_pixel_layout = stbir__pixel_layout_convert_public_to_internal[output_pixel_layout_public]; + int channels = stbir__pixel_channels[input_pixel_layout]; + int effective_channels = channels; + + // first figure out what type of alpha weighting to use (if any) + if ((horizontal->filter_enum != STBIR_FILTER_POINT_SAMPLE) || (vertical->filter_enum != STBIR_FILTER_POINT_SAMPLE)) // no alpha weighting on point sampling + { + if ((input_pixel_layout >= STBIRI_RGBA) && (input_pixel_layout <= STBIRI_AR) && (output_pixel_layout >= STBIRI_RGBA) && (output_pixel_layout <= STBIRI_AR)) + { + if (fast_alpha) + { + alpha_weighting_type = 4; + } + else + { + static int fancy_alpha_effective_cnts[6] = {7, 7, 7, 7, 3, 3}; + alpha_weighting_type = 2; + effective_channels = fancy_alpha_effective_cnts[input_pixel_layout - STBIRI_RGBA]; + } + } + else if ((input_pixel_layout >= STBIRI_RGBA_PM) && (input_pixel_layout <= STBIRI_AR_PM) && (output_pixel_layout >= STBIRI_RGBA) && (output_pixel_layout <= STBIRI_AR)) + { + // input premult, output non-premult + alpha_weighting_type = 3; + } + else if ((input_pixel_layout >= STBIRI_RGBA) && (input_pixel_layout <= STBIRI_AR) && (output_pixel_layout >= STBIRI_RGBA_PM) && (output_pixel_layout <= STBIRI_AR_PM)) + { + // input non-premult, output premult + alpha_weighting_type = 1; + } + } + + // channel in and out count must match currently + if (channels != stbir__pixel_channels[output_pixel_layout]) + return 0; + + // get vertical first + vertical_first = stbir__should_do_vertical_first(stbir__compute_weights[(int)stbir_channel_count_index[effective_channels]], horizontal->filter_pixel_width, horizontal->scale_info.scale, horizontal->scale_info.output_sub_size, vertical->filter_pixel_width, vertical->scale_info.scale, vertical->scale_info.output_sub_size, vertical->is_gather, STBIR__V_FIRST_INFO_POINTER); + + // sometimes read one float off in some of the unrolled loops (with a weight of zero coeff, so it doesn't have an effect) + // we use a few extra floats instead of just 1, so that input callback buffer can overlap with the decode buffer without + // the conversion routines overwriting the callback input data. + decode_buffer_size = (conservative->n1 - conservative->n0 + 1) * effective_channels * sizeof(float) + sizeof(float) * STBIR_INPUT_CALLBACK_PADDING; // extra floats for input callback stagger + +#if defined(STBIR__SEPARATE_ALLOCATIONS) && defined(STBIR_SIMD8) + if (effective_channels == 3) + decode_buffer_size += sizeof(float); // avx in 3 channel mode needs one float at the start of the buffer (only with separate allocations) +#endif + + ring_buffer_length_bytes = (size_t)horizontal->scale_info.output_sub_size * (size_t)effective_channels * sizeof(float) + sizeof(float) * STBIR_INPUT_CALLBACK_PADDING; // extra floats for padding + + // if we do vertical first, the ring buffer holds a whole decoded line + if (vertical_first) + ring_buffer_length_bytes = (decode_buffer_size + 15) & ~15; + + if ((ring_buffer_length_bytes & 4095) == 0) + ring_buffer_length_bytes += 64 * 3; // avoid 4k alias + + // One extra entry because floating point precision problems sometimes cause an extra to be necessary. + alloc_ring_buffer_num_entries = vertical->filter_pixel_width + 1; + + // we never need more ring buffer entries than the scanlines we're outputting when in scatter mode + if ((!vertical->is_gather) && (alloc_ring_buffer_num_entries > conservative_split_output_size)) + alloc_ring_buffer_num_entries = conservative_split_output_size; + + ring_buffer_size = (size_t)alloc_ring_buffer_num_entries * (size_t)ring_buffer_length_bytes; + + // The vertical buffer is used differently, depending on whether we are scattering + // the vertical scanlines, or gathering them. + // If scattering, it's used at the temp buffer to accumulate each output. + // If gathering, it's just the output buffer. + vertical_buffer_size = (size_t)horizontal->scale_info.output_sub_size * (size_t)effective_channels * sizeof(float) + sizeof(float); // extra float for padding + + // we make two passes through this loop, 1st to add everything up, 2nd to allocate and init + for (;;) + { + int i; + void *advance_mem = alloced; + int copy_horizontal = 0; + stbir__sampler *possibly_use_horizontal_for_pivot = 0; + +#ifdef STBIR__SEPARATE_ALLOCATIONS +#define STBIR__NEXT_PTR(ptr, size, ntype) \ + if (alloced) \ + { \ + void *p = STBIR_MALLOC(size, user_data); \ + if (p == 0) \ + { \ + stbir__free_internal_mem(info); \ + return 0; \ + } \ + (ptr) = (ntype *)p; \ + } +#else +#define STBIR__NEXT_PTR(ptr, size, ntype) \ + advance_mem = (void *)((((size_t)advance_mem) + 15) & ~15); \ + if (alloced) \ + ptr = (ntype *)advance_mem; \ + advance_mem = ((char *)advance_mem) + (size); +#endif + + STBIR__NEXT_PTR(info, sizeof(stbir__info), stbir__info); + + STBIR__NEXT_PTR(info->split_info, sizeof(stbir__per_split_info) * splits, stbir__per_split_info); + + if (info) + { + static stbir__alpha_weight_func *fancy_alpha_weights[6] = {stbir__fancy_alpha_weight_4ch, stbir__fancy_alpha_weight_4ch, stbir__fancy_alpha_weight_4ch, stbir__fancy_alpha_weight_4ch, stbir__fancy_alpha_weight_2ch, stbir__fancy_alpha_weight_2ch}; + static stbir__alpha_unweight_func *fancy_alpha_unweights[6] = {stbir__fancy_alpha_unweight_4ch, stbir__fancy_alpha_unweight_4ch, stbir__fancy_alpha_unweight_4ch, stbir__fancy_alpha_unweight_4ch, stbir__fancy_alpha_unweight_2ch, stbir__fancy_alpha_unweight_2ch}; + static stbir__alpha_weight_func *simple_alpha_weights[6] = {stbir__simple_alpha_weight_4ch, stbir__simple_alpha_weight_4ch, stbir__simple_alpha_weight_4ch, stbir__simple_alpha_weight_4ch, stbir__simple_alpha_weight_2ch, stbir__simple_alpha_weight_2ch}; + static stbir__alpha_unweight_func *simple_alpha_unweights[6] = {stbir__simple_alpha_unweight_4ch, stbir__simple_alpha_unweight_4ch, stbir__simple_alpha_unweight_4ch, stbir__simple_alpha_unweight_4ch, stbir__simple_alpha_unweight_2ch, stbir__simple_alpha_unweight_2ch}; + + // initialize info fields + info->alloced_mem = alloced; + info->alloced_total = alloced_total; + + info->channels = channels; + info->effective_channels = effective_channels; + + info->offset_x = new_x; + info->offset_y = new_y; + info->alloc_ring_buffer_num_entries = (int)alloc_ring_buffer_num_entries; + info->ring_buffer_num_entries = 0; + info->ring_buffer_length_bytes = (int)ring_buffer_length_bytes; + info->splits = splits; + info->vertical_first = vertical_first; + + info->input_pixel_layout_internal = input_pixel_layout; + info->output_pixel_layout_internal = output_pixel_layout; + + // setup alpha weight functions + info->alpha_weight = 0; + info->alpha_unweight = 0; + + // handle alpha weighting functions and overrides + if (alpha_weighting_type == 2) + { + // high quality alpha multiplying on the way in, dividing on the way out + info->alpha_weight = fancy_alpha_weights[input_pixel_layout - STBIRI_RGBA]; + info->alpha_unweight = fancy_alpha_unweights[output_pixel_layout - STBIRI_RGBA]; + } + else if (alpha_weighting_type == 4) + { + // fast alpha multiplying on the way in, dividing on the way out + info->alpha_weight = simple_alpha_weights[input_pixel_layout - STBIRI_RGBA]; + info->alpha_unweight = simple_alpha_unweights[output_pixel_layout - STBIRI_RGBA]; + } + else if (alpha_weighting_type == 1) + { + // fast alpha on the way in, leave in premultiplied form on way out + info->alpha_weight = simple_alpha_weights[input_pixel_layout - STBIRI_RGBA]; + } + else if (alpha_weighting_type == 3) + { + // incoming is premultiplied, fast alpha dividing on the way out - non-premultiplied output + info->alpha_unweight = simple_alpha_unweights[output_pixel_layout - STBIRI_RGBA]; + } + + // handle 3-chan color flipping, using the alpha weight path + if (((input_pixel_layout == STBIRI_RGB) && (output_pixel_layout == STBIRI_BGR)) || + ((input_pixel_layout == STBIRI_BGR) && (output_pixel_layout == STBIRI_RGB))) + { + // do the flipping on the smaller of the two ends + if (horizontal->scale_info.scale < 1.0f) + info->alpha_unweight = stbir__simple_flip_3ch; + else + info->alpha_weight = stbir__simple_flip_3ch; + } + } + + // get all the per-split buffers + for (i = 0; i < splits; i++) + { + STBIR__NEXT_PTR(info->split_info[i].decode_buffer, decode_buffer_size, float); + +#ifdef STBIR__SEPARATE_ALLOCATIONS + +#ifdef STBIR_SIMD8 + if ((info) && (effective_channels == 3)) + ++info->split_info[i].decode_buffer; // avx in 3 channel mode needs one float at the start of the buffer +#endif + + STBIR__NEXT_PTR(info->split_info[i].ring_buffers, alloc_ring_buffer_num_entries * sizeof(float *), float *); + { + int j; + for (j = 0; j < alloc_ring_buffer_num_entries; j++) + { + STBIR__NEXT_PTR(info->split_info[i].ring_buffers[j], ring_buffer_length_bytes, float); +#ifdef STBIR_SIMD8 + if ((info) && (effective_channels == 3)) + ++info->split_info[i].ring_buffers[j]; // avx in 3 channel mode needs one float at the start of the buffer +#endif + } + } +#else + STBIR__NEXT_PTR(info->split_info[i].ring_buffer, ring_buffer_size, float); +#endif + STBIR__NEXT_PTR(info->split_info[i].vertical_buffer, vertical_buffer_size, float); + } + + // alloc memory for to-be-pivoted coeffs (if necessary) + if (vertical->is_gather == 0) + { + size_t both; + size_t temp_mem_amt; + + // when in vertical scatter mode, we first build the coefficients in gather mode, and then pivot after, + // that means we need two buffers, so we try to use the decode buffer and ring buffer for this. if that + // is too small, we just allocate extra memory to use as this temp. + + both = (size_t)vertical->gather_prescatter_contributors_size + (size_t)vertical->gather_prescatter_coefficients_size; + +#ifdef STBIR__SEPARATE_ALLOCATIONS + temp_mem_amt = decode_buffer_size; + +#ifdef STBIR_SIMD8 + if (effective_channels == 3) + --temp_mem_amt; // avx in 3 channel mode needs one float at the start of the buffer +#endif +#else + temp_mem_amt = (size_t)(decode_buffer_size + ring_buffer_size + vertical_buffer_size) * (size_t)splits; +#endif + if (temp_mem_amt >= both) + { + if (info) + { + vertical->gather_prescatter_contributors = (stbir__contributors *)info->split_info[0].decode_buffer; + vertical->gather_prescatter_coefficients = (float *)(((char *)info->split_info[0].decode_buffer) + vertical->gather_prescatter_contributors_size); + } + } + else + { + // ring+decode memory is too small, so allocate temp memory + STBIR__NEXT_PTR(vertical->gather_prescatter_contributors, vertical->gather_prescatter_contributors_size, stbir__contributors); + STBIR__NEXT_PTR(vertical->gather_prescatter_coefficients, vertical->gather_prescatter_coefficients_size, float); + } + } + + STBIR__NEXT_PTR(horizontal->contributors, horizontal->contributors_size, stbir__contributors); + STBIR__NEXT_PTR(horizontal->coefficients, horizontal->coefficients_size, float); + + // are the two filters identical?? (happens a lot with mipmap generation) + if ((horizontal->filter_kernel == vertical->filter_kernel) && (horizontal->filter_support == vertical->filter_support) && (horizontal->edge == vertical->edge) && (horizontal->scale_info.output_sub_size == vertical->scale_info.output_sub_size)) + { + float diff_scale = horizontal->scale_info.scale - vertical->scale_info.scale; + float diff_shift = horizontal->scale_info.pixel_shift - vertical->scale_info.pixel_shift; + if (diff_scale < 0.0f) + diff_scale = -diff_scale; + if (diff_shift < 0.0f) + diff_shift = -diff_shift; + if ((diff_scale <= stbir__small_float) && (diff_shift <= stbir__small_float)) + { + if (horizontal->is_gather == vertical->is_gather) + { + copy_horizontal = 1; + goto no_vert_alloc; + } + // everything matches, but vertical is scatter, horizontal is gather, use horizontal coeffs for vertical pivot coeffs + possibly_use_horizontal_for_pivot = horizontal; + } + } + + STBIR__NEXT_PTR(vertical->contributors, vertical->contributors_size, stbir__contributors); + STBIR__NEXT_PTR(vertical->coefficients, vertical->coefficients_size, float); + + no_vert_alloc: + + if (info) + { + STBIR_PROFILE_BUILD_START(horizontal); + + stbir__calculate_filters(horizontal, 0, user_data STBIR_ONLY_PROFILE_BUILD_SET_INFO); + + // setup the horizontal gather functions + // start with defaulting to the n_coeffs functions (specialized on channels and remnant leftover) + info->horizontal_gather_channels = stbir__horizontal_gather_n_coeffs_funcs[effective_channels][horizontal->extent_info.widest & 3]; + // but if the number of coeffs <= 12, use another set of special cases. <=12 coeffs is any enlarging resize, or shrinking resize down to about 1/3 size + if (horizontal->extent_info.widest <= 12) + info->horizontal_gather_channels = stbir__horizontal_gather_channels_funcs[effective_channels][horizontal->extent_info.widest - 1]; + + info->scanline_extents.conservative.n0 = conservative->n0; + info->scanline_extents.conservative.n1 = conservative->n1; + + // get exact extents + stbir__get_extents(horizontal, &info->scanline_extents); + + // pack the horizontal coeffs + horizontal->coefficient_width = stbir__pack_coefficients(horizontal->num_contributors, horizontal->contributors, horizontal->coefficients, horizontal->coefficient_width, horizontal->extent_info.widest, info->scanline_extents.conservative.n0, info->scanline_extents.conservative.n1); + + STBIR_MEMCPY(&info->horizontal, horizontal, sizeof(stbir__sampler)); + + STBIR_PROFILE_BUILD_END(horizontal); + + if (copy_horizontal) + { + STBIR_MEMCPY(&info->vertical, horizontal, sizeof(stbir__sampler)); + } + else + { + STBIR_PROFILE_BUILD_START(vertical); + + stbir__calculate_filters(vertical, possibly_use_horizontal_for_pivot, user_data STBIR_ONLY_PROFILE_BUILD_SET_INFO); + STBIR_MEMCPY(&info->vertical, vertical, sizeof(stbir__sampler)); + + STBIR_PROFILE_BUILD_END(vertical); + } + + // setup the vertical split ranges + stbir__get_split_info(info->split_info, info->splits, info->vertical.scale_info.output_sub_size, info->vertical.filter_pixel_margin, info->vertical.scale_info.input_full_size); + + // now we know precisely how many entries we need + info->ring_buffer_num_entries = info->vertical.extent_info.widest; + + // we never need more ring buffer entries than the scanlines we're outputting + if ((!info->vertical.is_gather) && (info->ring_buffer_num_entries > conservative_split_output_size)) + info->ring_buffer_num_entries = conservative_split_output_size; + STBIR_ASSERT(info->ring_buffer_num_entries <= info->alloc_ring_buffer_num_entries); + } +#undef STBIR__NEXT_PTR + + // is this the first time through loop? + if (info == 0) + { + alloced_total = (15 + (size_t)advance_mem); + alloced = STBIR_MALLOC(alloced_total, user_data); + if (alloced == 0) + return 0; + } + else + return info; // success + } +} + +static int stbir__perform_resize(stbir__info const *info, int split_start, int split_count) +{ + stbir__per_split_info *split_info = info->split_info + split_start; + + STBIR_PROFILE_CLEAR_EXTRAS(); + + STBIR_PROFILE_FIRST_START(looping); + if (info->vertical.is_gather) + stbir__vertical_gather_loop(info, split_info, split_count); + else + stbir__vertical_scatter_loop(info, split_info, split_count); + STBIR_PROFILE_END(looping); + + return 1; +} + +static void stbir__update_info_from_resize(stbir__info *info, STBIR_RESIZE *resize) +{ + static stbir__decode_pixels_func *decode_simple[STBIR_TYPE_HALF_FLOAT - STBIR_TYPE_UINT8_SRGB + 1] = + { + /* 1ch-4ch */ stbir__decode_uint8_srgb, + stbir__decode_uint8_srgb, + 0, + stbir__decode_float_linear, + stbir__decode_half_float_linear, + }; + + static stbir__decode_pixels_func *decode_alphas[STBIRI_AR - STBIRI_RGBA + 1][STBIR_TYPE_HALF_FLOAT - STBIR_TYPE_UINT8_SRGB + 1] = + { + {/* RGBA */ stbir__decode_uint8_srgb4_linearalpha, stbir__decode_uint8_srgb, 0, stbir__decode_float_linear, stbir__decode_half_float_linear}, + {/* BGRA */ stbir__decode_uint8_srgb4_linearalpha_BGRA, stbir__decode_uint8_srgb_BGRA, 0, stbir__decode_float_linear_BGRA, stbir__decode_half_float_linear_BGRA}, + {/* ARGB */ stbir__decode_uint8_srgb4_linearalpha_ARGB, stbir__decode_uint8_srgb_ARGB, 0, stbir__decode_float_linear_ARGB, stbir__decode_half_float_linear_ARGB}, + {/* ABGR */ stbir__decode_uint8_srgb4_linearalpha_ABGR, stbir__decode_uint8_srgb_ABGR, 0, stbir__decode_float_linear_ABGR, stbir__decode_half_float_linear_ABGR}, + {/* RA */ stbir__decode_uint8_srgb2_linearalpha, stbir__decode_uint8_srgb, 0, stbir__decode_float_linear, stbir__decode_half_float_linear}, + {/* AR */ stbir__decode_uint8_srgb2_linearalpha_AR, stbir__decode_uint8_srgb_AR, 0, stbir__decode_float_linear_AR, stbir__decode_half_float_linear_AR}, + }; + + static stbir__decode_pixels_func *decode_simple_scaled_or_not[2][2] = + { + {stbir__decode_uint8_linear_scaled, stbir__decode_uint8_linear}, + {stbir__decode_uint16_linear_scaled, stbir__decode_uint16_linear}, + }; + + static stbir__decode_pixels_func *decode_alphas_scaled_or_not[STBIRI_AR - STBIRI_RGBA + 1][2][2] = + { + {/* RGBA */ {stbir__decode_uint8_linear_scaled, stbir__decode_uint8_linear}, {stbir__decode_uint16_linear_scaled, stbir__decode_uint16_linear}}, + {/* BGRA */ {stbir__decode_uint8_linear_scaled_BGRA, stbir__decode_uint8_linear_BGRA}, {stbir__decode_uint16_linear_scaled_BGRA, stbir__decode_uint16_linear_BGRA}}, + {/* ARGB */ {stbir__decode_uint8_linear_scaled_ARGB, stbir__decode_uint8_linear_ARGB}, {stbir__decode_uint16_linear_scaled_ARGB, stbir__decode_uint16_linear_ARGB}}, + {/* ABGR */ {stbir__decode_uint8_linear_scaled_ABGR, stbir__decode_uint8_linear_ABGR}, {stbir__decode_uint16_linear_scaled_ABGR, stbir__decode_uint16_linear_ABGR}}, + {/* RA */ {stbir__decode_uint8_linear_scaled, stbir__decode_uint8_linear}, {stbir__decode_uint16_linear_scaled, stbir__decode_uint16_linear}}, + {/* AR */ {stbir__decode_uint8_linear_scaled_AR, stbir__decode_uint8_linear_AR}, {stbir__decode_uint16_linear_scaled_AR, stbir__decode_uint16_linear_AR}}}; + + static stbir__encode_pixels_func *encode_simple[STBIR_TYPE_HALF_FLOAT - STBIR_TYPE_UINT8_SRGB + 1] = + { + /* 1ch-4ch */ stbir__encode_uint8_srgb, + stbir__encode_uint8_srgb, + 0, + stbir__encode_float_linear, + stbir__encode_half_float_linear, + }; + + static stbir__encode_pixels_func *encode_alphas[STBIRI_AR - STBIRI_RGBA + 1][STBIR_TYPE_HALF_FLOAT - STBIR_TYPE_UINT8_SRGB + 1] = + { + {/* RGBA */ stbir__encode_uint8_srgb4_linearalpha, stbir__encode_uint8_srgb, 0, stbir__encode_float_linear, stbir__encode_half_float_linear}, + {/* BGRA */ stbir__encode_uint8_srgb4_linearalpha_BGRA, stbir__encode_uint8_srgb_BGRA, 0, stbir__encode_float_linear_BGRA, stbir__encode_half_float_linear_BGRA}, + {/* ARGB */ stbir__encode_uint8_srgb4_linearalpha_ARGB, stbir__encode_uint8_srgb_ARGB, 0, stbir__encode_float_linear_ARGB, stbir__encode_half_float_linear_ARGB}, + {/* ABGR */ stbir__encode_uint8_srgb4_linearalpha_ABGR, stbir__encode_uint8_srgb_ABGR, 0, stbir__encode_float_linear_ABGR, stbir__encode_half_float_linear_ABGR}, + {/* RA */ stbir__encode_uint8_srgb2_linearalpha, stbir__encode_uint8_srgb, 0, stbir__encode_float_linear, stbir__encode_half_float_linear}, + {/* AR */ stbir__encode_uint8_srgb2_linearalpha_AR, stbir__encode_uint8_srgb_AR, 0, stbir__encode_float_linear_AR, stbir__encode_half_float_linear_AR}}; + + static stbir__encode_pixels_func *encode_simple_scaled_or_not[2][2] = + { + {stbir__encode_uint8_linear_scaled, stbir__encode_uint8_linear}, + {stbir__encode_uint16_linear_scaled, stbir__encode_uint16_linear}, + }; + + static stbir__encode_pixels_func *encode_alphas_scaled_or_not[STBIRI_AR - STBIRI_RGBA + 1][2][2] = + { + {/* RGBA */ {stbir__encode_uint8_linear_scaled, stbir__encode_uint8_linear}, {stbir__encode_uint16_linear_scaled, stbir__encode_uint16_linear}}, + {/* BGRA */ {stbir__encode_uint8_linear_scaled_BGRA, stbir__encode_uint8_linear_BGRA}, {stbir__encode_uint16_linear_scaled_BGRA, stbir__encode_uint16_linear_BGRA}}, + {/* ARGB */ {stbir__encode_uint8_linear_scaled_ARGB, stbir__encode_uint8_linear_ARGB}, {stbir__encode_uint16_linear_scaled_ARGB, stbir__encode_uint16_linear_ARGB}}, + {/* ABGR */ {stbir__encode_uint8_linear_scaled_ABGR, stbir__encode_uint8_linear_ABGR}, {stbir__encode_uint16_linear_scaled_ABGR, stbir__encode_uint16_linear_ABGR}}, + {/* RA */ {stbir__encode_uint8_linear_scaled, stbir__encode_uint8_linear}, {stbir__encode_uint16_linear_scaled, stbir__encode_uint16_linear}}, + {/* AR */ {stbir__encode_uint8_linear_scaled_AR, stbir__encode_uint8_linear_AR}, {stbir__encode_uint16_linear_scaled_AR, stbir__encode_uint16_linear_AR}}}; + + stbir__decode_pixels_func *decode_pixels = 0; + stbir__encode_pixels_func *encode_pixels = 0; + stbir_datatype input_type, output_type; + + input_type = resize->input_data_type; + output_type = resize->output_data_type; + info->input_data = resize->input_pixels; + info->input_stride_bytes = resize->input_stride_in_bytes; + info->output_stride_bytes = resize->output_stride_in_bytes; + + // if we're completely point sampling, then we can turn off SRGB + if ((info->horizontal.filter_enum == STBIR_FILTER_POINT_SAMPLE) && (info->vertical.filter_enum == STBIR_FILTER_POINT_SAMPLE)) + { + if (((input_type == STBIR_TYPE_UINT8_SRGB) || (input_type == STBIR_TYPE_UINT8_SRGB_ALPHA)) && + ((output_type == STBIR_TYPE_UINT8_SRGB) || (output_type == STBIR_TYPE_UINT8_SRGB_ALPHA))) + { + input_type = STBIR_TYPE_UINT8; + output_type = STBIR_TYPE_UINT8; + } + } + + // recalc the output and input strides + if (info->input_stride_bytes == 0) + info->input_stride_bytes = info->channels * info->horizontal.scale_info.input_full_size * stbir__type_size[input_type]; + + if (info->output_stride_bytes == 0) + info->output_stride_bytes = info->channels * info->horizontal.scale_info.output_sub_size * stbir__type_size[output_type]; + + // calc offset + info->output_data = ((char *)resize->output_pixels) + ((size_t)info->offset_y * (size_t)resize->output_stride_in_bytes) + (info->offset_x * info->channels * stbir__type_size[output_type]); + + info->in_pixels_cb = resize->input_cb; + info->user_data = resize->user_data; + info->out_pixels_cb = resize->output_cb; + + // setup the input format converters + if ((input_type == STBIR_TYPE_UINT8) || (input_type == STBIR_TYPE_UINT16)) + { + int non_scaled = 0; + + // check if we can run unscaled - 0-255.0/0-65535.0 instead of 0-1.0 (which is a tiny bit faster when doing linear 8->8 or 16->16) + if ((!info->alpha_weight) && (!info->alpha_unweight)) // don't short circuit when alpha weighting (get everything to 0-1.0 as usual) + if (((input_type == STBIR_TYPE_UINT8) && (output_type == STBIR_TYPE_UINT8)) || ((input_type == STBIR_TYPE_UINT16) && (output_type == STBIR_TYPE_UINT16))) + non_scaled = 1; + + if (info->input_pixel_layout_internal <= STBIRI_4CHANNEL) + decode_pixels = decode_simple_scaled_or_not[input_type == STBIR_TYPE_UINT16][non_scaled]; + else + decode_pixels = decode_alphas_scaled_or_not[(info->input_pixel_layout_internal - STBIRI_RGBA) % (STBIRI_AR - STBIRI_RGBA + 1)][input_type == STBIR_TYPE_UINT16][non_scaled]; + } + else + { + if (info->input_pixel_layout_internal <= STBIRI_4CHANNEL) + decode_pixels = decode_simple[input_type - STBIR_TYPE_UINT8_SRGB]; + else + decode_pixels = decode_alphas[(info->input_pixel_layout_internal - STBIRI_RGBA) % (STBIRI_AR - STBIRI_RGBA + 1)][input_type - STBIR_TYPE_UINT8_SRGB]; + } + + // setup the output format converters + if ((output_type == STBIR_TYPE_UINT8) || (output_type == STBIR_TYPE_UINT16)) + { + int non_scaled = 0; + + // check if we can run unscaled - 0-255.0/0-65535.0 instead of 0-1.0 (which is a tiny bit faster when doing linear 8->8 or 16->16) + if ((!info->alpha_weight) && (!info->alpha_unweight)) // don't short circuit when alpha weighting (get everything to 0-1.0 as usual) + if (((input_type == STBIR_TYPE_UINT8) && (output_type == STBIR_TYPE_UINT8)) || ((input_type == STBIR_TYPE_UINT16) && (output_type == STBIR_TYPE_UINT16))) + non_scaled = 1; + + if (info->output_pixel_layout_internal <= STBIRI_4CHANNEL) + encode_pixels = encode_simple_scaled_or_not[output_type == STBIR_TYPE_UINT16][non_scaled]; + else + encode_pixels = encode_alphas_scaled_or_not[(info->output_pixel_layout_internal - STBIRI_RGBA) % (STBIRI_AR - STBIRI_RGBA + 1)][output_type == STBIR_TYPE_UINT16][non_scaled]; + } + else + { + if (info->output_pixel_layout_internal <= STBIRI_4CHANNEL) + encode_pixels = encode_simple[output_type - STBIR_TYPE_UINT8_SRGB]; + else + encode_pixels = encode_alphas[(info->output_pixel_layout_internal - STBIRI_RGBA) % (STBIRI_AR - STBIRI_RGBA + 1)][output_type - STBIR_TYPE_UINT8_SRGB]; + } + + info->input_type = input_type; + info->output_type = output_type; + info->decode_pixels = decode_pixels; + info->encode_pixels = encode_pixels; +} + +static void stbir__clip(int *outx, int *outsubw, int outw, double *u0, double *u1) +{ + double per, adj; + int over; + + // do left/top edge + if (*outx < 0) + { + per = ((double)*outx) / ((double)*outsubw); // is negative + adj = per * (*u1 - *u0); + *u0 -= adj; // increases u0 + *outx = 0; + } + + // do right/bot edge + over = outw - (*outx + *outsubw); + if (over < 0) + { + per = ((double)over) / ((double)*outsubw); // is negative + adj = per * (*u1 - *u0); + *u1 += adj; // decrease u1 + *outsubw = outw - *outx; + } +} + +// converts a double to a rational that has less than one float bit of error (returns 0 if unable to do so) +static int stbir__double_to_rational(double f, stbir_uint32 limit, stbir_uint32 *numer, stbir_uint32 *denom, int limit_denom) // limit_denom (1) or limit numer (0) +{ + double err; + stbir_uint64 top, bot; + stbir_uint64 numer_last = 0; + stbir_uint64 denom_last = 1; + stbir_uint64 numer_estimate = 1; + stbir_uint64 denom_estimate = 0; + + // scale to past float error range + top = (stbir_uint64)(f * (double)(1 << 25)); + bot = 1 << 25; + + // keep refining, but usually stops in a few loops - usually 5 for bad cases + for (;;) + { + stbir_uint64 est, temp; + + // hit limit, break out and do best full range estimate + if (((limit_denom) ? denom_estimate : numer_estimate) >= limit) + break; + + // is the current error less than 1 bit of a float? if so, we're done + if (denom_estimate) + { + err = ((double)numer_estimate / (double)denom_estimate) - f; + if (err < 0.0) + err = -err; + if (err < (1.0 / (double)(1 << 24))) + { + // yup, found it + *numer = (stbir_uint32)numer_estimate; + *denom = (stbir_uint32)denom_estimate; + return 1; + } + } + + // no more refinement bits left? break out and do full range estimate + if (bot == 0) + break; + + // gcd the estimate bits + est = top / bot; + temp = top % bot; + top = bot; + bot = temp; + + // move remainders + temp = est * denom_estimate + denom_last; + denom_last = denom_estimate; + denom_estimate = temp; + + // move remainders + temp = est * numer_estimate + numer_last; + numer_last = numer_estimate; + numer_estimate = temp; + } + + // we didn't fine anything good enough for float, use a full range estimate + if (limit_denom) + { + numer_estimate = (stbir_uint64)(f * (double)limit + 0.5); + denom_estimate = limit; + } + else + { + numer_estimate = limit; + denom_estimate = (stbir_uint64)(((double)limit / f) + 0.5); + } + + *numer = (stbir_uint32)numer_estimate; + *denom = (stbir_uint32)denom_estimate; + + err = (denom_estimate) ? (((double)(stbir_uint32)numer_estimate / (double)(stbir_uint32)denom_estimate) - f) : 1.0; + if (err < 0.0) + err = -err; + return (err < (1.0 / (double)(1 << 24))) ? 1 : 0; +} + +static int stbir__calculate_region_transform(stbir__scale_info *scale_info, int output_full_range, int *output_offset, int output_sub_range, int input_full_range, double input_s0, double input_s1) +{ + double output_range, input_range, output_s, input_s, ratio, scale; + + input_s = input_s1 - input_s0; + + // null area + if ((output_full_range == 0) || (input_full_range == 0) || + (output_sub_range == 0) || (input_s <= stbir__small_float)) + return 0; + + // are either of the ranges completely out of bounds? + if ((*output_offset >= output_full_range) || ((*output_offset + output_sub_range) <= 0) || (input_s0 >= (1.0f - stbir__small_float)) || (input_s1 <= stbir__small_float)) + return 0; + + output_range = (double)output_full_range; + input_range = (double)input_full_range; + + output_s = ((double)output_sub_range) / output_range; + + // figure out the scaling to use + ratio = output_s / input_s; + + // save scale before clipping + scale = (output_range / input_range) * ratio; + scale_info->scale = (float)scale; + scale_info->inv_scale = (float)(1.0 / scale); + + // clip output area to left/right output edges (and adjust input area) + stbir__clip(output_offset, &output_sub_range, output_full_range, &input_s0, &input_s1); + + // recalc input area + input_s = input_s1 - input_s0; + + // after clipping do we have zero input area? + if (input_s <= stbir__small_float) + return 0; + + // calculate and store the starting source offsets in output pixel space + scale_info->pixel_shift = (float)(input_s0 * ratio * output_range); + + scale_info->scale_is_rational = stbir__double_to_rational(scale, (scale <= 1.0) ? output_full_range : input_full_range, &scale_info->scale_numerator, &scale_info->scale_denominator, (scale >= 1.0)); + + scale_info->input_full_size = input_full_range; + scale_info->output_sub_size = output_sub_range; + + return 1; +} + +static void stbir__init_and_set_layout(STBIR_RESIZE *resize, stbir_pixel_layout pixel_layout, stbir_datatype data_type) +{ + resize->input_cb = 0; + resize->output_cb = 0; + resize->user_data = resize; + resize->samplers = 0; + resize->called_alloc = 0; + resize->horizontal_filter = STBIR_FILTER_DEFAULT; + resize->horizontal_filter_kernel = 0; + resize->horizontal_filter_support = 0; + resize->vertical_filter = STBIR_FILTER_DEFAULT; + resize->vertical_filter_kernel = 0; + resize->vertical_filter_support = 0; + resize->horizontal_edge = STBIR_EDGE_CLAMP; + resize->vertical_edge = STBIR_EDGE_CLAMP; + resize->input_s0 = 0; + resize->input_t0 = 0; + resize->input_s1 = 1; + resize->input_t1 = 1; + resize->output_subx = 0; + resize->output_suby = 0; + resize->output_subw = resize->output_w; + resize->output_subh = resize->output_h; + resize->input_data_type = data_type; + resize->output_data_type = data_type; + resize->input_pixel_layout_public = pixel_layout; + resize->output_pixel_layout_public = pixel_layout; + resize->needs_rebuild = 1; +} + +STBIRDEF void stbir_resize_init(STBIR_RESIZE *resize, + const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, // stride can be zero + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, // stride can be zero + stbir_pixel_layout pixel_layout, stbir_datatype data_type) +{ + resize->input_pixels = input_pixels; + resize->input_w = input_w; + resize->input_h = input_h; + resize->input_stride_in_bytes = input_stride_in_bytes; + resize->output_pixels = output_pixels; + resize->output_w = output_w; + resize->output_h = output_h; + resize->output_stride_in_bytes = output_stride_in_bytes; + resize->fast_alpha = 0; + + stbir__init_and_set_layout(resize, pixel_layout, data_type); +} + +// You can update parameters any time after resize_init +STBIRDEF void stbir_set_datatypes(STBIR_RESIZE *resize, stbir_datatype input_type, stbir_datatype output_type) // by default, datatype from resize_init +{ + resize->input_data_type = input_type; + resize->output_data_type = output_type; + if ((resize->samplers) && (!resize->needs_rebuild)) + stbir__update_info_from_resize(resize->samplers, resize); +} + +STBIRDEF void stbir_set_pixel_callbacks(STBIR_RESIZE *resize, stbir_input_callback *input_cb, stbir_output_callback *output_cb) // no callbacks by default +{ + resize->input_cb = input_cb; + resize->output_cb = output_cb; + + if ((resize->samplers) && (!resize->needs_rebuild)) + { + resize->samplers->in_pixels_cb = input_cb; + resize->samplers->out_pixels_cb = output_cb; + } +} + +STBIRDEF void stbir_set_user_data(STBIR_RESIZE *resize, void *user_data) // pass back STBIR_RESIZE* by default +{ + resize->user_data = user_data; + if ((resize->samplers) && (!resize->needs_rebuild)) + resize->samplers->user_data = user_data; +} + +STBIRDEF void stbir_set_buffer_ptrs(STBIR_RESIZE *resize, const void *input_pixels, int input_stride_in_bytes, void *output_pixels, int output_stride_in_bytes) +{ + resize->input_pixels = input_pixels; + resize->input_stride_in_bytes = input_stride_in_bytes; + resize->output_pixels = output_pixels; + resize->output_stride_in_bytes = output_stride_in_bytes; + if ((resize->samplers) && (!resize->needs_rebuild)) + stbir__update_info_from_resize(resize->samplers, resize); +} + +STBIRDEF int stbir_set_edgemodes(STBIR_RESIZE *resize, stbir_edge horizontal_edge, stbir_edge vertical_edge) // CLAMP by default +{ + resize->horizontal_edge = horizontal_edge; + resize->vertical_edge = vertical_edge; + resize->needs_rebuild = 1; + return 1; +} + +STBIRDEF int stbir_set_filters(STBIR_RESIZE *resize, stbir_filter horizontal_filter, stbir_filter vertical_filter) // STBIR_DEFAULT_FILTER_UPSAMPLE/DOWNSAMPLE by default +{ + resize->horizontal_filter = horizontal_filter; + resize->vertical_filter = vertical_filter; + resize->needs_rebuild = 1; + return 1; +} + +STBIRDEF int stbir_set_filter_callbacks(STBIR_RESIZE *resize, stbir__kernel_callback *horizontal_filter, stbir__support_callback *horizontal_support, stbir__kernel_callback *vertical_filter, stbir__support_callback *vertical_support) +{ + resize->horizontal_filter_kernel = horizontal_filter; + resize->horizontal_filter_support = horizontal_support; + resize->vertical_filter_kernel = vertical_filter; + resize->vertical_filter_support = vertical_support; + resize->needs_rebuild = 1; + return 1; +} + +STBIRDEF int stbir_set_pixel_layouts(STBIR_RESIZE *resize, stbir_pixel_layout input_pixel_layout, stbir_pixel_layout output_pixel_layout) // sets new pixel layouts +{ + resize->input_pixel_layout_public = input_pixel_layout; + resize->output_pixel_layout_public = output_pixel_layout; + resize->needs_rebuild = 1; + return 1; +} + +STBIRDEF int stbir_set_non_pm_alpha_speed_over_quality(STBIR_RESIZE *resize, int non_pma_alpha_speed_over_quality) // sets alpha speed +{ + resize->fast_alpha = non_pma_alpha_speed_over_quality; + resize->needs_rebuild = 1; + return 1; +} + +STBIRDEF int stbir_set_input_subrect(STBIR_RESIZE *resize, double s0, double t0, double s1, double t1) // sets input region (full region by default) +{ + resize->input_s0 = s0; + resize->input_t0 = t0; + resize->input_s1 = s1; + resize->input_t1 = t1; + resize->needs_rebuild = 1; + + // are we inbounds? + if ((s1 < stbir__small_float) || ((s1 - s0) < stbir__small_float) || + (t1 < stbir__small_float) || ((t1 - t0) < stbir__small_float) || + (s0 > (1.0f - stbir__small_float)) || + (t0 > (1.0f - stbir__small_float))) + return 0; + + return 1; +} + +STBIRDEF int stbir_set_output_pixel_subrect(STBIR_RESIZE *resize, int subx, int suby, int subw, int subh) // sets input region (full region by default) +{ + resize->output_subx = subx; + resize->output_suby = suby; + resize->output_subw = subw; + resize->output_subh = subh; + resize->needs_rebuild = 1; + + // are we inbounds? + if ((subx >= resize->output_w) || ((subx + subw) <= 0) || (suby >= resize->output_h) || ((suby + subh) <= 0) || (subw == 0) || (subh == 0)) + return 0; + + return 1; +} + +STBIRDEF int stbir_set_pixel_subrect(STBIR_RESIZE *resize, int subx, int suby, int subw, int subh) // sets both regions (full regions by default) +{ + double s0, t0, s1, t1; + + s0 = ((double)subx) / ((double)resize->output_w); + t0 = ((double)suby) / ((double)resize->output_h); + s1 = ((double)(subx + subw)) / ((double)resize->output_w); + t1 = ((double)(suby + subh)) / ((double)resize->output_h); + + resize->input_s0 = s0; + resize->input_t0 = t0; + resize->input_s1 = s1; + resize->input_t1 = t1; + resize->output_subx = subx; + resize->output_suby = suby; + resize->output_subw = subw; + resize->output_subh = subh; + resize->needs_rebuild = 1; + + // are we inbounds? + if ((subx >= resize->output_w) || ((subx + subw) <= 0) || (suby >= resize->output_h) || ((suby + subh) <= 0) || (subw == 0) || (subh == 0)) + return 0; + + return 1; +} + +static int stbir__perform_build(STBIR_RESIZE *resize, int splits) +{ + stbir__contributors conservative = {0, 0}; + stbir__sampler horizontal, vertical; + int new_output_subx, new_output_suby; + stbir__info *out_info; +#ifdef STBIR_PROFILE + stbir__info profile_infod; // used to contain building profile info before everything is allocated + stbir__info *profile_info = &profile_infod; +#endif + + // have we already built the samplers? + if (resize->samplers) + return 0; + +#define STBIR_RETURN_ERROR_AND_ASSERT(exp) \ + STBIR_ASSERT(!(exp)); \ + if (exp) \ + return 0; + STBIR_RETURN_ERROR_AND_ASSERT((unsigned)resize->horizontal_filter >= STBIR_FILTER_OTHER) + STBIR_RETURN_ERROR_AND_ASSERT((unsigned)resize->vertical_filter >= STBIR_FILTER_OTHER) +#undef STBIR_RETURN_ERROR_AND_ASSERT + + if (splits <= 0) + return 0; + + STBIR_PROFILE_BUILD_FIRST_START(build); + + new_output_subx = resize->output_subx; + new_output_suby = resize->output_suby; + + // do horizontal clip and scale calcs + if (!stbir__calculate_region_transform(&horizontal.scale_info, resize->output_w, &new_output_subx, resize->output_subw, resize->input_w, resize->input_s0, resize->input_s1)) + return 0; + + // do vertical clip and scale calcs + if (!stbir__calculate_region_transform(&vertical.scale_info, resize->output_h, &new_output_suby, resize->output_subh, resize->input_h, resize->input_t0, resize->input_t1)) + return 0; + + // if nothing to do, just return + if ((horizontal.scale_info.output_sub_size == 0) || (vertical.scale_info.output_sub_size == 0)) + return 0; + + stbir__set_sampler(&horizontal, resize->horizontal_filter, resize->horizontal_filter_kernel, resize->horizontal_filter_support, resize->horizontal_edge, &horizontal.scale_info, 1, resize->user_data); + stbir__get_conservative_extents(&horizontal, &conservative, resize->user_data); + stbir__set_sampler(&vertical, resize->vertical_filter, resize->horizontal_filter_kernel, resize->vertical_filter_support, resize->vertical_edge, &vertical.scale_info, 0, resize->user_data); + + if ((vertical.scale_info.output_sub_size / splits) < STBIR_FORCE_MINIMUM_SCANLINES_FOR_SPLITS) // each split should be a minimum of 4 scanlines (handwavey choice) + { + splits = vertical.scale_info.output_sub_size / STBIR_FORCE_MINIMUM_SCANLINES_FOR_SPLITS; + if (splits == 0) + splits = 1; + } + + STBIR_PROFILE_BUILD_START(alloc); + out_info = stbir__alloc_internal_mem_and_build_samplers(&horizontal, &vertical, &conservative, resize->input_pixel_layout_public, resize->output_pixel_layout_public, splits, new_output_subx, new_output_suby, resize->fast_alpha, resize->user_data STBIR_ONLY_PROFILE_BUILD_SET_INFO); + STBIR_PROFILE_BUILD_END(alloc); + STBIR_PROFILE_BUILD_END(build); + + if (out_info) + { + resize->splits = splits; + resize->samplers = out_info; + resize->needs_rebuild = 0; +#ifdef STBIR_PROFILE + STBIR_MEMCPY(&out_info->profile, &profile_infod.profile, sizeof(out_info->profile)); +#endif + + // update anything that can be changed without recalcing samplers + stbir__update_info_from_resize(out_info, resize); + + return splits; + } + + return 0; +} + +void stbir_free_samplers(STBIR_RESIZE *resize) +{ + if (resize->samplers) + { + stbir__free_internal_mem(resize->samplers); + resize->samplers = 0; + resize->called_alloc = 0; + } +} + +STBIRDEF int stbir_build_samplers_with_splits(STBIR_RESIZE *resize, int splits) +{ + if ((resize->samplers == 0) || (resize->needs_rebuild)) + { + if (resize->samplers) + stbir_free_samplers(resize); + + resize->called_alloc = 1; + return stbir__perform_build(resize, splits); + } + + STBIR_PROFILE_BUILD_CLEAR(resize->samplers); + + return 1; +} + +STBIRDEF int stbir_build_samplers(STBIR_RESIZE *resize) +{ + return stbir_build_samplers_with_splits(resize, 1); +} + +STBIRDEF int stbir_resize_extended(STBIR_RESIZE *resize) +{ + int result; + + if ((resize->samplers == 0) || (resize->needs_rebuild)) + { + int alloc_state = resize->called_alloc; // remember allocated state + + if (resize->samplers) + { + stbir__free_internal_mem(resize->samplers); + resize->samplers = 0; + } + + if (!stbir_build_samplers(resize)) + return 0; + + resize->called_alloc = alloc_state; + + // if build_samplers succeeded (above), but there are no samplers set, then + // the area to stretch into was zero pixels, so don't do anything and return + // success + if (resize->samplers == 0) + return 1; + } + else + { + // didn't build anything - clear it + STBIR_PROFILE_BUILD_CLEAR(resize->samplers); + } + + // do resize + result = stbir__perform_resize(resize->samplers, 0, resize->splits); + + // if we alloced, then free + if (!resize->called_alloc) + { + stbir_free_samplers(resize); + resize->samplers = 0; + } + + return result; +} + +STBIRDEF int stbir_resize_extended_split(STBIR_RESIZE *resize, int split_start, int split_count) +{ + STBIR_ASSERT(resize->samplers); + + // if we're just doing the whole thing, call full + if ((split_start == -1) || ((split_start == 0) && (split_count == resize->splits))) + return stbir_resize_extended(resize); + + // you **must** build samplers first when using split resize + if ((resize->samplers == 0) || (resize->needs_rebuild)) + return 0; + + if ((split_start >= resize->splits) || (split_start < 0) || ((split_start + split_count) > resize->splits) || (split_count <= 0)) + return 0; + + // do resize + return stbir__perform_resize(resize->samplers, split_start, split_count); +} + +static int stbir__check_output_stuff(void **ret_ptr, int *ret_pitch, void *output_pixels, int type_size, int output_w, int output_h, int output_stride_in_bytes, stbir_internal_pixel_layout pixel_layout) +{ + size_t size; + int pitch; + void *ptr; + + pitch = output_w * type_size * stbir__pixel_channels[pixel_layout]; + if (pitch == 0) + return 0; + + if (output_stride_in_bytes == 0) + output_stride_in_bytes = pitch; + + if (output_stride_in_bytes < pitch) + return 0; + + size = (size_t)output_stride_in_bytes * (size_t)output_h; + if (size == 0) + return 0; + + *ret_ptr = 0; + *ret_pitch = output_stride_in_bytes; + + if (output_pixels == 0) + { + ptr = STBIR_MALLOC(size, 0); + if (ptr == 0) + return 0; + + *ret_ptr = ptr; + *ret_pitch = pitch; + } + + return 1; +} + +STBIRDEF unsigned char *stbir_resize_uint8_linear(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_layout) +{ + STBIR_RESIZE resize; + unsigned char *optr; + int opitch; + + if (!stbir__check_output_stuff((void **)&optr, &opitch, output_pixels, sizeof(unsigned char), output_w, output_h, output_stride_in_bytes, stbir__pixel_layout_convert_public_to_internal[pixel_layout])) + return 0; + + stbir_resize_init(&resize, + input_pixels, input_w, input_h, input_stride_in_bytes, + (optr) ? optr : output_pixels, output_w, output_h, opitch, + pixel_layout, STBIR_TYPE_UINT8); + + if (!stbir_resize_extended(&resize)) + { + if (optr) + STBIR_FREE(optr, 0); + return 0; + } + + return (optr) ? optr : output_pixels; +} + +STBIRDEF unsigned char *stbir_resize_uint8_srgb(const unsigned char *input_pixels, int input_w, int input_h, int input_stride_in_bytes, + unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_layout) +{ + STBIR_RESIZE resize; + unsigned char *optr; + int opitch; + + if (!stbir__check_output_stuff((void **)&optr, &opitch, output_pixels, sizeof(unsigned char), output_w, output_h, output_stride_in_bytes, stbir__pixel_layout_convert_public_to_internal[pixel_layout])) + return 0; + + stbir_resize_init(&resize, + input_pixels, input_w, input_h, input_stride_in_bytes, + (optr) ? optr : output_pixels, output_w, output_h, opitch, + pixel_layout, STBIR_TYPE_UINT8_SRGB); + + if (!stbir_resize_extended(&resize)) + { + if (optr) + STBIR_FREE(optr, 0); + return 0; + } + + return (optr) ? optr : output_pixels; +} + +STBIRDEF float *stbir_resize_float_linear(const float *input_pixels, int input_w, int input_h, int input_stride_in_bytes, + float *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_layout) +{ + STBIR_RESIZE resize; + float *optr; + int opitch; + + if (!stbir__check_output_stuff((void **)&optr, &opitch, output_pixels, sizeof(float), output_w, output_h, output_stride_in_bytes, stbir__pixel_layout_convert_public_to_internal[pixel_layout])) + return 0; + + stbir_resize_init(&resize, + input_pixels, input_w, input_h, input_stride_in_bytes, + (optr) ? optr : output_pixels, output_w, output_h, opitch, + pixel_layout, STBIR_TYPE_FLOAT); + + if (!stbir_resize_extended(&resize)) + { + if (optr) + STBIR_FREE(optr, 0); + return 0; + } + + return (optr) ? optr : output_pixels; +} + +STBIRDEF void *stbir_resize(const void *input_pixels, int input_w, int input_h, int input_stride_in_bytes, + void *output_pixels, int output_w, int output_h, int output_stride_in_bytes, + stbir_pixel_layout pixel_layout, stbir_datatype data_type, + stbir_edge edge, stbir_filter filter) +{ + STBIR_RESIZE resize; + float *optr; + int opitch; + + if (!stbir__check_output_stuff((void **)&optr, &opitch, output_pixels, stbir__type_size[data_type], output_w, output_h, output_stride_in_bytes, stbir__pixel_layout_convert_public_to_internal[pixel_layout])) + return 0; + + stbir_resize_init(&resize, + input_pixels, input_w, input_h, input_stride_in_bytes, + (optr) ? optr : output_pixels, output_w, output_h, output_stride_in_bytes, + pixel_layout, data_type); + + resize.horizontal_edge = edge; + resize.vertical_edge = edge; + resize.horizontal_filter = filter; + resize.vertical_filter = filter; + + if (!stbir_resize_extended(&resize)) + { + if (optr) + STBIR_FREE(optr, 0); + return 0; + } + + return (optr) ? optr : output_pixels; +} + +#ifdef STBIR_PROFILE + +STBIRDEF void stbir_resize_build_profile_info(STBIR_PROFILE_INFO *info, STBIR_RESIZE const *resize) +{ + static char const *bdescriptions[6] = {"Building", "Allocating", "Horizontal sampler", "Vertical sampler", "Coefficient cleanup", "Coefficient piovot"}; + stbir__info *samp = resize->samplers; + int i; + + typedef int testa[(STBIR__ARRAY_SIZE(bdescriptions) == (STBIR__ARRAY_SIZE(samp->profile.array) - 1)) ? 1 : -1]; + typedef int testb[(sizeof(samp->profile.array) == (sizeof(samp->profile.named))) ? 1 : -1]; + typedef int testc[(sizeof(info->clocks) >= (sizeof(samp->profile.named))) ? 1 : -1]; + + for (i = 0; i < STBIR__ARRAY_SIZE(bdescriptions); i++) + info->clocks[i] = samp->profile.array[i + 1]; + + info->total_clocks = samp->profile.named.total; + info->descriptions = bdescriptions; + info->count = STBIR__ARRAY_SIZE(bdescriptions); +} + +STBIRDEF void stbir_resize_split_profile_info(STBIR_PROFILE_INFO *info, STBIR_RESIZE const *resize, int split_start, int split_count) +{ + static char const *descriptions[7] = {"Looping", "Vertical sampling", "Horizontal sampling", "Scanline input", "Scanline output", "Alpha weighting", "Alpha unweighting"}; + stbir__per_split_info *split_info; + int s, i; + + typedef int testa[(STBIR__ARRAY_SIZE(descriptions) == (STBIR__ARRAY_SIZE(split_info->profile.array) - 1)) ? 1 : -1]; + typedef int testb[(sizeof(split_info->profile.array) == (sizeof(split_info->profile.named))) ? 1 : -1]; + typedef int testc[(sizeof(info->clocks) >= (sizeof(split_info->profile.named))) ? 1 : -1]; + + if (split_start == -1) + { + split_start = 0; + split_count = resize->samplers->splits; + } + + if ((split_start >= resize->splits) || (split_start < 0) || ((split_start + split_count) > resize->splits) || (split_count <= 0)) + { + info->total_clocks = 0; + info->descriptions = 0; + info->count = 0; + return; + } + + split_info = resize->samplers->split_info + split_start; + + // sum up the profile from all the splits + for (i = 0; i < STBIR__ARRAY_SIZE(descriptions); i++) + { + stbir_uint64 sum = 0; + for (s = 0; s < split_count; s++) + sum += split_info[s].profile.array[i + 1]; + info->clocks[i] = sum; + } + + info->total_clocks = split_info->profile.named.total; + info->descriptions = descriptions; + info->count = STBIR__ARRAY_SIZE(descriptions); +} + +STBIRDEF void stbir_resize_extended_profile_info(STBIR_PROFILE_INFO *info, STBIR_RESIZE const *resize) +{ + stbir_resize_split_profile_info(info, resize, -1, 0); +} + +#endif // STBIR_PROFILE + +#undef STBIR_BGR +#undef STBIR_1CHANNEL +#undef STBIR_2CHANNEL +#undef STBIR_RGB +#undef STBIR_RGBA +#undef STBIR_4CHANNEL +#undef STBIR_BGRA +#undef STBIR_ARGB +#undef STBIR_ABGR +#undef STBIR_RA +#undef STBIR_AR +#undef STBIR_RGBA_PM +#undef STBIR_BGRA_PM +#undef STBIR_ARGB_PM +#undef STBIR_ABGR_PM +#undef STBIR_RA_PM +#undef STBIR_AR_PM + #endif // STB_IMAGE_RESIZE_IMPLEMENTATION +#else // STB_IMAGE_RESIZE_HORIZONTALS&STB_IMAGE_RESIZE_DO_VERTICALS + +// we reinclude the header file to define all the horizontal functions +// specializing each function for the number of coeffs is 20-40% faster *OVERALL* + +// by including the header file again this way, we can still debug the functions + +#define STBIR_strs_join2(start, mid, end) start##mid##end +#define STBIR_strs_join1(start, mid, end) STBIR_strs_join2(start, mid, end) + +#define STBIR_strs_join24(start, mid1, mid2, end) start##mid1##mid2##end +#define STBIR_strs_join14(start, mid1, mid2, end) STBIR_strs_join24(start, mid1, mid2, end) + +#ifdef STB_IMAGE_RESIZE_DO_CODERS + +#ifdef stbir__decode_suffix +#define STBIR__CODER_NAME(name) STBIR_strs_join1(name, _, stbir__decode_suffix) +#else +#define STBIR__CODER_NAME(name) name +#endif + +#ifdef stbir__decode_swizzle +#define stbir__decode_simdf8_flip(reg) STBIR_strs_join1(STBIR_strs_join1(STBIR_strs_join1(STBIR_strs_join1(stbir__simdf8_0123to, stbir__decode_order0, stbir__decode_order1), stbir__decode_order2, stbir__decode_order3), stbir__decode_order0, stbir__decode_order1), stbir__decode_order2, stbir__decode_order3)(reg, reg) +#define stbir__decode_simdf4_flip(reg) STBIR_strs_join1(STBIR_strs_join1(stbir__simdf_0123to, stbir__decode_order0, stbir__decode_order1), stbir__decode_order2, stbir__decode_order3)(reg, reg) +#define stbir__encode_simdf8_unflip(reg) STBIR_strs_join1(STBIR_strs_join1(STBIR_strs_join1(STBIR_strs_join1(stbir__simdf8_0123to, stbir__encode_order0, stbir__encode_order1), stbir__encode_order2, stbir__encode_order3), stbir__encode_order0, stbir__encode_order1), stbir__encode_order2, stbir__encode_order3)(reg, reg) +#define stbir__encode_simdf4_unflip(reg) STBIR_strs_join1(STBIR_strs_join1(stbir__simdf_0123to, stbir__encode_order0, stbir__encode_order1), stbir__encode_order2, stbir__encode_order3)(reg, reg) +#else +#define stbir__decode_order0 0 +#define stbir__decode_order1 1 +#define stbir__decode_order2 2 +#define stbir__decode_order3 3 +#define stbir__encode_order0 0 +#define stbir__encode_order1 1 +#define stbir__encode_order2 2 +#define stbir__encode_order3 3 +#define stbir__decode_simdf8_flip(reg) +#define stbir__decode_simdf4_flip(reg) +#define stbir__encode_simdf8_unflip(reg) +#define stbir__encode_simdf4_unflip(reg) +#endif + +#ifdef STBIR_SIMD8 +#define stbir__encode_simdfX_unflip stbir__encode_simdf8_unflip +#else +#define stbir__encode_simdfX_unflip stbir__encode_simdf4_unflip +#endif + +static float *STBIR__CODER_NAME(stbir__decode_uint8_linear_scaled)(float *decodep, int width_times_channels, void const *inputp) +{ + float STBIR_STREAMOUT_PTR(*) decode = decodep; + float *decode_end = (float *)decode + width_times_channels; + unsigned char const *input = (unsigned char const *)inputp; + +#ifdef STBIR_SIMD + unsigned char const *end_input_m16 = input + width_times_channels - 16; + if (width_times_channels >= 16) + { + decode_end -= 16; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { +#ifdef STBIR_SIMD8 + stbir__simdi i; + stbir__simdi8 o0, o1; + stbir__simdf8 of0, of1; + STBIR_NO_UNROLL(decode); + stbir__simdi_load(i, input); + stbir__simdi8_expand_u8_to_u32(o0, o1, i); + stbir__simdi8_convert_i32_to_float(of0, o0); + stbir__simdi8_convert_i32_to_float(of1, o1); + stbir__simdf8_mult(of0, of0, STBIR_max_uint8_as_float_inverted8); + stbir__simdf8_mult(of1, of1, STBIR_max_uint8_as_float_inverted8); + stbir__decode_simdf8_flip(of0); + stbir__decode_simdf8_flip(of1); + stbir__simdf8_store(decode + 0, of0); + stbir__simdf8_store(decode + 8, of1); +#else + stbir__simdi i, o0, o1, o2, o3; + stbir__simdf of0, of1, of2, of3; + STBIR_NO_UNROLL(decode); + stbir__simdi_load(i, input); + stbir__simdi_expand_u8_to_u32(o0, o1, o2, o3, i); + stbir__simdi_convert_i32_to_float(of0, o0); + stbir__simdi_convert_i32_to_float(of1, o1); + stbir__simdi_convert_i32_to_float(of2, o2); + stbir__simdi_convert_i32_to_float(of3, o3); + stbir__simdf_mult(of0, of0, STBIR__CONSTF(STBIR_max_uint8_as_float_inverted)); + stbir__simdf_mult(of1, of1, STBIR__CONSTF(STBIR_max_uint8_as_float_inverted)); + stbir__simdf_mult(of2, of2, STBIR__CONSTF(STBIR_max_uint8_as_float_inverted)); + stbir__simdf_mult(of3, of3, STBIR__CONSTF(STBIR_max_uint8_as_float_inverted)); + stbir__decode_simdf4_flip(of0); + stbir__decode_simdf4_flip(of1); + stbir__decode_simdf4_flip(of2); + stbir__decode_simdf4_flip(of3); + stbir__simdf_store(decode + 0, of0); + stbir__simdf_store(decode + 4, of1); + stbir__simdf_store(decode + 8, of2); + stbir__simdf_store(decode + 12, of3); +#endif + decode += 16; + input += 16; + if (decode <= decode_end) + continue; + if (decode == (decode_end + 16)) + break; + decode = decode_end; // backup and do last couple + input = end_input_m16; + } + return decode_end + 16; + } +#endif + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (decode <= decode_end) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0 - 4] = ((float)(input[stbir__decode_order0])) * stbir__max_uint8_as_float_inverted; + decode[1 - 4] = ((float)(input[stbir__decode_order1])) * stbir__max_uint8_as_float_inverted; + decode[2 - 4] = ((float)(input[stbir__decode_order2])) * stbir__max_uint8_as_float_inverted; + decode[3 - 4] = ((float)(input[stbir__decode_order3])) * stbir__max_uint8_as_float_inverted; + decode += 4; + input += 4; + } + decode -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (decode < decode_end) + { + STBIR_NO_UNROLL(decode); + decode[0] = ((float)(input[stbir__decode_order0])) * stbir__max_uint8_as_float_inverted; +#if stbir__coder_min_num >= 2 + decode[1] = ((float)(input[stbir__decode_order1])) * stbir__max_uint8_as_float_inverted; +#endif +#if stbir__coder_min_num >= 3 + decode[2] = ((float)(input[stbir__decode_order2])) * stbir__max_uint8_as_float_inverted; +#endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } +#endif + + return decode_end; +} + +static void STBIR__CODER_NAME(stbir__encode_uint8_linear_scaled)(void *outputp, int width_times_channels, float const *encode) +{ + unsigned char STBIR_SIMD_STREAMOUT_PTR(*) output = (unsigned char *)outputp; + unsigned char *end_output = ((unsigned char *)output) + width_times_channels; + +#ifdef STBIR_SIMD + if (width_times_channels >= stbir__simdfX_float_count * 2) + { + float const *end_encode_m8 = encode + width_times_channels - stbir__simdfX_float_count * 2; + end_output -= stbir__simdfX_float_count * 2; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + stbir__simdfX e0, e1; + stbir__simdi i; + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdfX_madd_mem(e0, STBIR_simd_point5X, STBIR_max_uint8_as_floatX, encode); + stbir__simdfX_madd_mem(e1, STBIR_simd_point5X, STBIR_max_uint8_as_floatX, encode + stbir__simdfX_float_count); + stbir__encode_simdfX_unflip(e0); + stbir__encode_simdfX_unflip(e1); +#ifdef STBIR_SIMD8 + stbir__simdf8_pack_to_16bytes(i, e0, e1); + stbir__simdi_store(output, i); +#else + stbir__simdf_pack_to_8bytes(i, e0, e1); + stbir__simdi_store2(output, i); +#endif + encode += stbir__simdfX_float_count * 2; + output += stbir__simdfX_float_count * 2; + if (output <= end_output) + continue; + if (output == (end_output + stbir__simdfX_float_count * 2)) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_NO_UNROLL_LOOP_START + while (output <= end_output) + { + stbir__simdf e0; + stbir__simdi i0; + STBIR_NO_UNROLL(encode); + stbir__simdf_load(e0, encode); + stbir__simdf_madd(e0, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint8_as_float), e0); + stbir__encode_simdf4_unflip(e0); + stbir__simdf_pack_to_8bytes(i0, e0, e0); // only use first 4 + *(int *)(output - 4) = stbir__simdi_to_int(i0); + output += 4; + encode += 4; + } + output -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (output < end_output) + { + stbir__simdf e0; + STBIR_NO_UNROLL(encode); + stbir__simdf_madd1_mem(e0, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint8_as_float), encode + stbir__encode_order0); + output[0] = stbir__simdf_convert_float_to_uint8(e0); +#if stbir__coder_min_num >= 2 + stbir__simdf_madd1_mem(e0, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint8_as_float), encode + stbir__encode_order1); + output[1] = stbir__simdf_convert_float_to_uint8(e0); +#endif +#if stbir__coder_min_num >= 3 + stbir__simdf_madd1_mem(e0, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint8_as_float), encode + stbir__encode_order2); + output[2] = stbir__simdf_convert_float_to_uint8(e0); +#endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } +#endif + +#else + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + while (output <= end_output) + { + float f; + f = encode[stbir__encode_order0] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[0 - 4] = (unsigned char)f; + f = encode[stbir__encode_order1] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[1 - 4] = (unsigned char)f; + f = encode[stbir__encode_order2] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[2 - 4] = (unsigned char)f; + f = encode[stbir__encode_order3] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[3 - 4] = (unsigned char)f; + output += 4; + encode += 4; + } + output -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (output < end_output) + { + float f; + STBIR_NO_UNROLL(encode); + f = encode[stbir__encode_order0] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[0] = (unsigned char)f; +#if stbir__coder_min_num >= 2 + f = encode[stbir__encode_order1] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[1] = (unsigned char)f; +#endif +#if stbir__coder_min_num >= 3 + f = encode[stbir__encode_order2] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[2] = (unsigned char)f; +#endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } +#endif +#endif +} + +static float *STBIR__CODER_NAME(stbir__decode_uint8_linear)(float *decodep, int width_times_channels, void const *inputp) +{ + float STBIR_STREAMOUT_PTR(*) decode = decodep; + float *decode_end = (float *)decode + width_times_channels; + unsigned char const *input = (unsigned char const *)inputp; + +#ifdef STBIR_SIMD + unsigned char const *end_input_m16 = input + width_times_channels - 16; + if (width_times_channels >= 16) + { + decode_end -= 16; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { +#ifdef STBIR_SIMD8 + stbir__simdi i; + stbir__simdi8 o0, o1; + stbir__simdf8 of0, of1; + STBIR_NO_UNROLL(decode); + stbir__simdi_load(i, input); + stbir__simdi8_expand_u8_to_u32(o0, o1, i); + stbir__simdi8_convert_i32_to_float(of0, o0); + stbir__simdi8_convert_i32_to_float(of1, o1); + stbir__decode_simdf8_flip(of0); + stbir__decode_simdf8_flip(of1); + stbir__simdf8_store(decode + 0, of0); + stbir__simdf8_store(decode + 8, of1); +#else + stbir__simdi i, o0, o1, o2, o3; + stbir__simdf of0, of1, of2, of3; + STBIR_NO_UNROLL(decode); + stbir__simdi_load(i, input); + stbir__simdi_expand_u8_to_u32(o0, o1, o2, o3, i); + stbir__simdi_convert_i32_to_float(of0, o0); + stbir__simdi_convert_i32_to_float(of1, o1); + stbir__simdi_convert_i32_to_float(of2, o2); + stbir__simdi_convert_i32_to_float(of3, o3); + stbir__decode_simdf4_flip(of0); + stbir__decode_simdf4_flip(of1); + stbir__decode_simdf4_flip(of2); + stbir__decode_simdf4_flip(of3); + stbir__simdf_store(decode + 0, of0); + stbir__simdf_store(decode + 4, of1); + stbir__simdf_store(decode + 8, of2); + stbir__simdf_store(decode + 12, of3); +#endif + decode += 16; + input += 16; + if (decode <= decode_end) + continue; + if (decode == (decode_end + 16)) + break; + decode = decode_end; // backup and do last couple + input = end_input_m16; + } + return decode_end + 16; + } +#endif + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (decode <= decode_end) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0 - 4] = ((float)(input[stbir__decode_order0])); + decode[1 - 4] = ((float)(input[stbir__decode_order1])); + decode[2 - 4] = ((float)(input[stbir__decode_order2])); + decode[3 - 4] = ((float)(input[stbir__decode_order3])); + decode += 4; + input += 4; + } + decode -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (decode < decode_end) + { + STBIR_NO_UNROLL(decode); + decode[0] = ((float)(input[stbir__decode_order0])); +#if stbir__coder_min_num >= 2 + decode[1] = ((float)(input[stbir__decode_order1])); +#endif +#if stbir__coder_min_num >= 3 + decode[2] = ((float)(input[stbir__decode_order2])); +#endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } +#endif + return decode_end; +} + +static void STBIR__CODER_NAME(stbir__encode_uint8_linear)(void *outputp, int width_times_channels, float const *encode) +{ + unsigned char STBIR_SIMD_STREAMOUT_PTR(*) output = (unsigned char *)outputp; + unsigned char *end_output = ((unsigned char *)output) + width_times_channels; + +#ifdef STBIR_SIMD + if (width_times_channels >= stbir__simdfX_float_count * 2) + { + float const *end_encode_m8 = encode + width_times_channels - stbir__simdfX_float_count * 2; + end_output -= stbir__simdfX_float_count * 2; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + stbir__simdfX e0, e1; + stbir__simdi i; + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdfX_add_mem(e0, STBIR_simd_point5X, encode); + stbir__simdfX_add_mem(e1, STBIR_simd_point5X, encode + stbir__simdfX_float_count); + stbir__encode_simdfX_unflip(e0); + stbir__encode_simdfX_unflip(e1); +#ifdef STBIR_SIMD8 + stbir__simdf8_pack_to_16bytes(i, e0, e1); + stbir__simdi_store(output, i); +#else + stbir__simdf_pack_to_8bytes(i, e0, e1); + stbir__simdi_store2(output, i); +#endif + encode += stbir__simdfX_float_count * 2; + output += stbir__simdfX_float_count * 2; + if (output <= end_output) + continue; + if (output == (end_output + stbir__simdfX_float_count * 2)) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_NO_UNROLL_LOOP_START + while (output <= end_output) + { + stbir__simdf e0; + stbir__simdi i0; + STBIR_NO_UNROLL(encode); + stbir__simdf_load(e0, encode); + stbir__simdf_add(e0, STBIR__CONSTF(STBIR_simd_point5), e0); + stbir__encode_simdf4_unflip(e0); + stbir__simdf_pack_to_8bytes(i0, e0, e0); // only use first 4 + *(int *)(output - 4) = stbir__simdi_to_int(i0); + output += 4; + encode += 4; + } + output -= 4; +#endif + +#else + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + while (output <= end_output) + { + float f; + f = encode[stbir__encode_order0] + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[0 - 4] = (unsigned char)f; + f = encode[stbir__encode_order1] + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[1 - 4] = (unsigned char)f; + f = encode[stbir__encode_order2] + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[2 - 4] = (unsigned char)f; + f = encode[stbir__encode_order3] + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[3 - 4] = (unsigned char)f; + output += 4; + encode += 4; + } + output -= 4; +#endif + +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (output < end_output) + { + float f; + STBIR_NO_UNROLL(encode); + f = encode[stbir__encode_order0] + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[0] = (unsigned char)f; +#if stbir__coder_min_num >= 2 + f = encode[stbir__encode_order1] + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[1] = (unsigned char)f; +#endif +#if stbir__coder_min_num >= 3 + f = encode[stbir__encode_order2] + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[2] = (unsigned char)f; +#endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } +#endif +} + +static float *STBIR__CODER_NAME(stbir__decode_uint8_srgb)(float *decodep, int width_times_channels, void const *inputp) +{ + float STBIR_STREAMOUT_PTR(*) decode = decodep; + float *decode_end = (float *)decode + width_times_channels; + unsigned char const *input = (unsigned char const *)inputp; + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + while (decode <= decode_end) + { + decode[0 - 4] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order0]]; + decode[1 - 4] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order1]]; + decode[2 - 4] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order2]]; + decode[3 - 4] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order3]]; + decode += 4; + input += 4; + } + decode -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (decode < decode_end) + { + STBIR_NO_UNROLL(decode); + decode[0] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order0]]; +#if stbir__coder_min_num >= 2 + decode[1] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order1]]; +#endif +#if stbir__coder_min_num >= 3 + decode[2] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order2]]; +#endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } +#endif + return decode_end; +} + +#define stbir__min_max_shift20(i, f) \ + stbir__simdf_max(f, f, stbir_simdf_casti(STBIR__CONSTI(STBIR_almost_zero))); \ + stbir__simdf_min(f, f, stbir_simdf_casti(STBIR__CONSTI(STBIR_almost_one))); \ + stbir__simdi_32shr(i, stbir_simdi_castf(f), 20); + +#define stbir__scale_and_convert(i, f) \ + stbir__simdf_madd(f, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint8_as_float), f); \ + stbir__simdf_max(f, f, stbir__simdf_zeroP()); \ + stbir__simdf_min(f, f, STBIR__CONSTF(STBIR_max_uint8_as_float)); \ + stbir__simdf_convert_float_to_i32(i, f); + +#define stbir__linear_to_srgb_finish(i, f) \ + { \ + stbir__simdi temp; \ + stbir__simdi_32shr(temp, stbir_simdi_castf(f), 12); \ + stbir__simdi_and(temp, temp, STBIR__CONSTI(STBIR_mastissa_mask)); \ + stbir__simdi_or(temp, temp, STBIR__CONSTI(STBIR_topscale)); \ + stbir__simdi_16madd(i, i, temp); \ + stbir__simdi_32shr(i, i, 16); \ + } + +#define stbir__simdi_table_lookup2(v0, v1, table) \ + { \ + stbir__simdi_u32 temp0, temp1; \ + temp0.m128i_i128 = v0; \ + temp1.m128i_i128 = v1; \ + temp0.m128i_u32[0] = table[temp0.m128i_i32[0]]; \ + temp0.m128i_u32[1] = table[temp0.m128i_i32[1]]; \ + temp0.m128i_u32[2] = table[temp0.m128i_i32[2]]; \ + temp0.m128i_u32[3] = table[temp0.m128i_i32[3]]; \ + temp1.m128i_u32[0] = table[temp1.m128i_i32[0]]; \ + temp1.m128i_u32[1] = table[temp1.m128i_i32[1]]; \ + temp1.m128i_u32[2] = table[temp1.m128i_i32[2]]; \ + temp1.m128i_u32[3] = table[temp1.m128i_i32[3]]; \ + v0 = temp0.m128i_i128; \ + v1 = temp1.m128i_i128; \ + } + +#define stbir__simdi_table_lookup3(v0, v1, v2, table) \ + { \ + stbir__simdi_u32 temp0, temp1, temp2; \ + temp0.m128i_i128 = v0; \ + temp1.m128i_i128 = v1; \ + temp2.m128i_i128 = v2; \ + temp0.m128i_u32[0] = table[temp0.m128i_i32[0]]; \ + temp0.m128i_u32[1] = table[temp0.m128i_i32[1]]; \ + temp0.m128i_u32[2] = table[temp0.m128i_i32[2]]; \ + temp0.m128i_u32[3] = table[temp0.m128i_i32[3]]; \ + temp1.m128i_u32[0] = table[temp1.m128i_i32[0]]; \ + temp1.m128i_u32[1] = table[temp1.m128i_i32[1]]; \ + temp1.m128i_u32[2] = table[temp1.m128i_i32[2]]; \ + temp1.m128i_u32[3] = table[temp1.m128i_i32[3]]; \ + temp2.m128i_u32[0] = table[temp2.m128i_i32[0]]; \ + temp2.m128i_u32[1] = table[temp2.m128i_i32[1]]; \ + temp2.m128i_u32[2] = table[temp2.m128i_i32[2]]; \ + temp2.m128i_u32[3] = table[temp2.m128i_i32[3]]; \ + v0 = temp0.m128i_i128; \ + v1 = temp1.m128i_i128; \ + v2 = temp2.m128i_i128; \ + } + +#define stbir__simdi_table_lookup4(v0, v1, v2, v3, table) \ + { \ + stbir__simdi_u32 temp0, temp1, temp2, temp3; \ + temp0.m128i_i128 = v0; \ + temp1.m128i_i128 = v1; \ + temp2.m128i_i128 = v2; \ + temp3.m128i_i128 = v3; \ + temp0.m128i_u32[0] = table[temp0.m128i_i32[0]]; \ + temp0.m128i_u32[1] = table[temp0.m128i_i32[1]]; \ + temp0.m128i_u32[2] = table[temp0.m128i_i32[2]]; \ + temp0.m128i_u32[3] = table[temp0.m128i_i32[3]]; \ + temp1.m128i_u32[0] = table[temp1.m128i_i32[0]]; \ + temp1.m128i_u32[1] = table[temp1.m128i_i32[1]]; \ + temp1.m128i_u32[2] = table[temp1.m128i_i32[2]]; \ + temp1.m128i_u32[3] = table[temp1.m128i_i32[3]]; \ + temp2.m128i_u32[0] = table[temp2.m128i_i32[0]]; \ + temp2.m128i_u32[1] = table[temp2.m128i_i32[1]]; \ + temp2.m128i_u32[2] = table[temp2.m128i_i32[2]]; \ + temp2.m128i_u32[3] = table[temp2.m128i_i32[3]]; \ + temp3.m128i_u32[0] = table[temp3.m128i_i32[0]]; \ + temp3.m128i_u32[1] = table[temp3.m128i_i32[1]]; \ + temp3.m128i_u32[2] = table[temp3.m128i_i32[2]]; \ + temp3.m128i_u32[3] = table[temp3.m128i_i32[3]]; \ + v0 = temp0.m128i_i128; \ + v1 = temp1.m128i_i128; \ + v2 = temp2.m128i_i128; \ + v3 = temp3.m128i_i128; \ + } + +static void STBIR__CODER_NAME(stbir__encode_uint8_srgb)(void *outputp, int width_times_channels, float const *encode) +{ + unsigned char STBIR_SIMD_STREAMOUT_PTR(*) output = (unsigned char *)outputp; + unsigned char *end_output = ((unsigned char *)output) + width_times_channels; + +#ifdef STBIR_SIMD + + if (width_times_channels >= 16) + { + float const *end_encode_m16 = encode + width_times_channels - 16; + end_output -= 16; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + stbir__simdf f0, f1, f2, f3; + stbir__simdi i0, i1, i2, i3; + STBIR_SIMD_NO_UNROLL(encode); + + stbir__simdf_load4_transposed(f0, f1, f2, f3, encode); + + stbir__min_max_shift20(i0, f0); + stbir__min_max_shift20(i1, f1); + stbir__min_max_shift20(i2, f2); + stbir__min_max_shift20(i3, f3); + + stbir__simdi_table_lookup4(i0, i1, i2, i3, (fp32_to_srgb8_tab4 - (127 - 13) * 8)); + + stbir__linear_to_srgb_finish(i0, f0); + stbir__linear_to_srgb_finish(i1, f1); + stbir__linear_to_srgb_finish(i2, f2); + stbir__linear_to_srgb_finish(i3, f3); + + stbir__interleave_pack_and_store_16_u8(output, STBIR_strs_join1(i, , stbir__encode_order0), STBIR_strs_join1(i, , stbir__encode_order1), STBIR_strs_join1(i, , stbir__encode_order2), STBIR_strs_join1(i, , stbir__encode_order3)); + + encode += 16; + output += 16; + if (output <= end_output) + continue; + if (output == (end_output + 16)) + break; + output = end_output; // backup and do last couple + encode = end_encode_m16; + } + return; + } +#endif + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (output <= end_output) + { + STBIR_SIMD_NO_UNROLL(encode); + + output[0 - 4] = stbir__linear_to_srgb_uchar(encode[stbir__encode_order0]); + output[1 - 4] = stbir__linear_to_srgb_uchar(encode[stbir__encode_order1]); + output[2 - 4] = stbir__linear_to_srgb_uchar(encode[stbir__encode_order2]); + output[3 - 4] = stbir__linear_to_srgb_uchar(encode[stbir__encode_order3]); + + output += 4; + encode += 4; + } + output -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (output < end_output) + { + STBIR_NO_UNROLL(encode); + output[0] = stbir__linear_to_srgb_uchar(encode[stbir__encode_order0]); +#if stbir__coder_min_num >= 2 + output[1] = stbir__linear_to_srgb_uchar(encode[stbir__encode_order1]); +#endif +#if stbir__coder_min_num >= 3 + output[2] = stbir__linear_to_srgb_uchar(encode[stbir__encode_order2]); +#endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } +#endif +} + +#if (stbir__coder_min_num == 4) || ((stbir__coder_min_num == 1) && (!defined(stbir__decode_swizzle))) + +static float *STBIR__CODER_NAME(stbir__decode_uint8_srgb4_linearalpha)(float *decodep, int width_times_channels, void const *inputp) +{ + float STBIR_STREAMOUT_PTR(*) decode = decodep; + float *decode_end = (float *)decode + width_times_channels; + unsigned char const *input = (unsigned char const *)inputp; + + do + { + decode[0] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order0]]; + decode[1] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order1]]; + decode[2] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order2]]; + decode[3] = ((float)input[stbir__decode_order3]) * stbir__max_uint8_as_float_inverted; + input += 4; + decode += 4; + } while (decode < decode_end); + return decode_end; +} + +static void STBIR__CODER_NAME(stbir__encode_uint8_srgb4_linearalpha)(void *outputp, int width_times_channels, float const *encode) +{ + unsigned char STBIR_SIMD_STREAMOUT_PTR(*) output = (unsigned char *)outputp; + unsigned char *end_output = ((unsigned char *)output) + width_times_channels; + +#ifdef STBIR_SIMD + + if (width_times_channels >= 16) + { + float const *end_encode_m16 = encode + width_times_channels - 16; + end_output -= 16; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + stbir__simdf f0, f1, f2, f3; + stbir__simdi i0, i1, i2, i3; + + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdf_load4_transposed(f0, f1, f2, f3, encode); + + stbir__min_max_shift20(i0, f0); + stbir__min_max_shift20(i1, f1); + stbir__min_max_shift20(i2, f2); + stbir__scale_and_convert(i3, f3); + + stbir__simdi_table_lookup3(i0, i1, i2, (fp32_to_srgb8_tab4 - (127 - 13) * 8)); + + stbir__linear_to_srgb_finish(i0, f0); + stbir__linear_to_srgb_finish(i1, f1); + stbir__linear_to_srgb_finish(i2, f2); + + stbir__interleave_pack_and_store_16_u8(output, STBIR_strs_join1(i, , stbir__encode_order0), STBIR_strs_join1(i, , stbir__encode_order1), STBIR_strs_join1(i, , stbir__encode_order2), STBIR_strs_join1(i, , stbir__encode_order3)); + + output += 16; + encode += 16; + + if (output <= end_output) + continue; + if (output == (end_output + 16)) + break; + output = end_output; // backup and do last couple + encode = end_encode_m16; + } + return; + } +#endif + + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float f; + STBIR_SIMD_NO_UNROLL(encode); + + output[stbir__decode_order0] = stbir__linear_to_srgb_uchar(encode[0]); + output[stbir__decode_order1] = stbir__linear_to_srgb_uchar(encode[1]); + output[stbir__decode_order2] = stbir__linear_to_srgb_uchar(encode[2]); + + f = encode[3] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[stbir__decode_order3] = (unsigned char)f; + + output += 4; + encode += 4; + } while (output < end_output); +} + +#endif + +#if (stbir__coder_min_num == 2) || ((stbir__coder_min_num == 1) && (!defined(stbir__decode_swizzle))) + +static float *STBIR__CODER_NAME(stbir__decode_uint8_srgb2_linearalpha)(float *decodep, int width_times_channels, void const *inputp) +{ + float STBIR_STREAMOUT_PTR(*) decode = decodep; + float *decode_end = (float *)decode + width_times_channels; + unsigned char const *input = (unsigned char const *)inputp; + + decode += 4; + while (decode <= decode_end) + { + decode[0 - 4] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order0]]; + decode[1 - 4] = ((float)input[stbir__decode_order1]) * stbir__max_uint8_as_float_inverted; + decode[2 - 4] = stbir__srgb_uchar_to_linear_float[input[stbir__decode_order0 + 2]]; + decode[3 - 4] = ((float)input[stbir__decode_order1 + 2]) * stbir__max_uint8_as_float_inverted; + input += 4; + decode += 4; + } + decode -= 4; + if (decode < decode_end) + { + decode[0] = stbir__srgb_uchar_to_linear_float[stbir__decode_order0]; + decode[1] = ((float)input[stbir__decode_order1]) * stbir__max_uint8_as_float_inverted; + } + return decode_end; +} + +static void STBIR__CODER_NAME(stbir__encode_uint8_srgb2_linearalpha)(void *outputp, int width_times_channels, float const *encode) +{ + unsigned char STBIR_SIMD_STREAMOUT_PTR(*) output = (unsigned char *)outputp; + unsigned char *end_output = ((unsigned char *)output) + width_times_channels; + +#ifdef STBIR_SIMD + + if (width_times_channels >= 16) + { + float const *end_encode_m16 = encode + width_times_channels - 16; + end_output -= 16; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + stbir__simdf f0, f1, f2, f3; + stbir__simdi i0, i1, i2, i3; + + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdf_load4_transposed(f0, f1, f2, f3, encode); + + stbir__min_max_shift20(i0, f0); + stbir__scale_and_convert(i1, f1); + stbir__min_max_shift20(i2, f2); + stbir__scale_and_convert(i3, f3); + + stbir__simdi_table_lookup2(i0, i2, (fp32_to_srgb8_tab4 - (127 - 13) * 8)); + + stbir__linear_to_srgb_finish(i0, f0); + stbir__linear_to_srgb_finish(i2, f2); + + stbir__interleave_pack_and_store_16_u8(output, STBIR_strs_join1(i, , stbir__encode_order0), STBIR_strs_join1(i, , stbir__encode_order1), STBIR_strs_join1(i, , stbir__encode_order2), STBIR_strs_join1(i, , stbir__encode_order3)); + + output += 16; + encode += 16; + if (output <= end_output) + continue; + if (output == (end_output + 16)) + break; + output = end_output; // backup and do last couple + encode = end_encode_m16; + } + return; + } +#endif + + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float f; + STBIR_SIMD_NO_UNROLL(encode); + + output[stbir__decode_order0] = stbir__linear_to_srgb_uchar(encode[0]); + + f = encode[1] * stbir__max_uint8_as_float + 0.5f; + STBIR_CLAMP(f, 0, 255); + output[stbir__decode_order1] = (unsigned char)f; + + output += 2; + encode += 2; + } while (output < end_output); +} + +#endif + +static float *STBIR__CODER_NAME(stbir__decode_uint16_linear_scaled)(float *decodep, int width_times_channels, void const *inputp) +{ + float STBIR_STREAMOUT_PTR(*) decode = decodep; + float *decode_end = (float *)decode + width_times_channels; + unsigned short const *input = (unsigned short const *)inputp; + +#ifdef STBIR_SIMD + unsigned short const *end_input_m8 = input + width_times_channels - 8; + if (width_times_channels >= 8) + { + decode_end -= 8; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { +#ifdef STBIR_SIMD8 + stbir__simdi i; + stbir__simdi8 o; + stbir__simdf8 of; + STBIR_NO_UNROLL(decode); + stbir__simdi_load(i, input); + stbir__simdi8_expand_u16_to_u32(o, i); + stbir__simdi8_convert_i32_to_float(of, o); + stbir__simdf8_mult(of, of, STBIR_max_uint16_as_float_inverted8); + stbir__decode_simdf8_flip(of); + stbir__simdf8_store(decode + 0, of); +#else + stbir__simdi i, o0, o1; + stbir__simdf of0, of1; + STBIR_NO_UNROLL(decode); + stbir__simdi_load(i, input); + stbir__simdi_expand_u16_to_u32(o0, o1, i); + stbir__simdi_convert_i32_to_float(of0, o0); + stbir__simdi_convert_i32_to_float(of1, o1); + stbir__simdf_mult(of0, of0, STBIR__CONSTF(STBIR_max_uint16_as_float_inverted)); + stbir__simdf_mult(of1, of1, STBIR__CONSTF(STBIR_max_uint16_as_float_inverted)); + stbir__decode_simdf4_flip(of0); + stbir__decode_simdf4_flip(of1); + stbir__simdf_store(decode + 0, of0); + stbir__simdf_store(decode + 4, of1); +#endif + decode += 8; + input += 8; + if (decode <= decode_end) + continue; + if (decode == (decode_end + 8)) + break; + decode = decode_end; // backup and do last couple + input = end_input_m8; + } + return decode_end + 8; + } +#endif + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (decode <= decode_end) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0 - 4] = ((float)(input[stbir__decode_order0])) * stbir__max_uint16_as_float_inverted; + decode[1 - 4] = ((float)(input[stbir__decode_order1])) * stbir__max_uint16_as_float_inverted; + decode[2 - 4] = ((float)(input[stbir__decode_order2])) * stbir__max_uint16_as_float_inverted; + decode[3 - 4] = ((float)(input[stbir__decode_order3])) * stbir__max_uint16_as_float_inverted; + decode += 4; + input += 4; + } + decode -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (decode < decode_end) + { + STBIR_NO_UNROLL(decode); + decode[0] = ((float)(input[stbir__decode_order0])) * stbir__max_uint16_as_float_inverted; +#if stbir__coder_min_num >= 2 + decode[1] = ((float)(input[stbir__decode_order1])) * stbir__max_uint16_as_float_inverted; +#endif +#if stbir__coder_min_num >= 3 + decode[2] = ((float)(input[stbir__decode_order2])) * stbir__max_uint16_as_float_inverted; +#endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } +#endif + return decode_end; +} + +static void STBIR__CODER_NAME(stbir__encode_uint16_linear_scaled)(void *outputp, int width_times_channels, float const *encode) +{ + unsigned short STBIR_SIMD_STREAMOUT_PTR(*) output = (unsigned short *)outputp; + unsigned short *end_output = ((unsigned short *)output) + width_times_channels; + +#ifdef STBIR_SIMD + { + if (width_times_channels >= stbir__simdfX_float_count * 2) + { + float const *end_encode_m8 = encode + width_times_channels - stbir__simdfX_float_count * 2; + end_output -= stbir__simdfX_float_count * 2; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + stbir__simdfX e0, e1; + stbir__simdiX i; + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdfX_madd_mem(e0, STBIR_simd_point5X, STBIR_max_uint16_as_floatX, encode); + stbir__simdfX_madd_mem(e1, STBIR_simd_point5X, STBIR_max_uint16_as_floatX, encode + stbir__simdfX_float_count); + stbir__encode_simdfX_unflip(e0); + stbir__encode_simdfX_unflip(e1); + stbir__simdfX_pack_to_words(i, e0, e1); + stbir__simdiX_store(output, i); + encode += stbir__simdfX_float_count * 2; + output += stbir__simdfX_float_count * 2; + if (output <= end_output) + continue; + if (output == (end_output + stbir__simdfX_float_count * 2)) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + } + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_NO_UNROLL_LOOP_START + while (output <= end_output) + { + stbir__simdf e; + stbir__simdi i; + STBIR_NO_UNROLL(encode); + stbir__simdf_load(e, encode); + stbir__simdf_madd(e, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint16_as_float), e); + stbir__encode_simdf4_unflip(e); + stbir__simdf_pack_to_8words(i, e, e); // only use first 4 + stbir__simdi_store2(output - 4, i); + output += 4; + encode += 4; + } + output -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (output < end_output) + { + stbir__simdf e; + STBIR_NO_UNROLL(encode); + stbir__simdf_madd1_mem(e, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint16_as_float), encode + stbir__encode_order0); + output[0] = stbir__simdf_convert_float_to_short(e); +#if stbir__coder_min_num >= 2 + stbir__simdf_madd1_mem(e, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint16_as_float), encode + stbir__encode_order1); + output[1] = stbir__simdf_convert_float_to_short(e); +#endif +#if stbir__coder_min_num >= 3 + stbir__simdf_madd1_mem(e, STBIR__CONSTF(STBIR_simd_point5), STBIR__CONSTF(STBIR_max_uint16_as_float), encode + stbir__encode_order2); + output[2] = stbir__simdf_convert_float_to_short(e); +#endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } +#endif + +#else + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (output <= end_output) + { + float f; + STBIR_SIMD_NO_UNROLL(encode); + f = encode[stbir__encode_order0] * stbir__max_uint16_as_float + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[0 - 4] = (unsigned short)f; + f = encode[stbir__encode_order1] * stbir__max_uint16_as_float + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[1 - 4] = (unsigned short)f; + f = encode[stbir__encode_order2] * stbir__max_uint16_as_float + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[2 - 4] = (unsigned short)f; + f = encode[stbir__encode_order3] * stbir__max_uint16_as_float + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[3 - 4] = (unsigned short)f; + output += 4; + encode += 4; + } + output -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (output < end_output) + { + float f; + STBIR_NO_UNROLL(encode); + f = encode[stbir__encode_order0] * stbir__max_uint16_as_float + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[0] = (unsigned short)f; +#if stbir__coder_min_num >= 2 + f = encode[stbir__encode_order1] * stbir__max_uint16_as_float + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[1] = (unsigned short)f; +#endif +#if stbir__coder_min_num >= 3 + f = encode[stbir__encode_order2] * stbir__max_uint16_as_float + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[2] = (unsigned short)f; +#endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } +#endif +#endif +} + +static float *STBIR__CODER_NAME(stbir__decode_uint16_linear)(float *decodep, int width_times_channels, void const *inputp) +{ + float STBIR_STREAMOUT_PTR(*) decode = decodep; + float *decode_end = (float *)decode + width_times_channels; + unsigned short const *input = (unsigned short const *)inputp; + +#ifdef STBIR_SIMD + unsigned short const *end_input_m8 = input + width_times_channels - 8; + if (width_times_channels >= 8) + { + decode_end -= 8; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { +#ifdef STBIR_SIMD8 + stbir__simdi i; + stbir__simdi8 o; + stbir__simdf8 of; + STBIR_NO_UNROLL(decode); + stbir__simdi_load(i, input); + stbir__simdi8_expand_u16_to_u32(o, i); + stbir__simdi8_convert_i32_to_float(of, o); + stbir__decode_simdf8_flip(of); + stbir__simdf8_store(decode + 0, of); +#else + stbir__simdi i, o0, o1; + stbir__simdf of0, of1; + STBIR_NO_UNROLL(decode); + stbir__simdi_load(i, input); + stbir__simdi_expand_u16_to_u32(o0, o1, i); + stbir__simdi_convert_i32_to_float(of0, o0); + stbir__simdi_convert_i32_to_float(of1, o1); + stbir__decode_simdf4_flip(of0); + stbir__decode_simdf4_flip(of1); + stbir__simdf_store(decode + 0, of0); + stbir__simdf_store(decode + 4, of1); +#endif + decode += 8; + input += 8; + if (decode <= decode_end) + continue; + if (decode == (decode_end + 8)) + break; + decode = decode_end; // backup and do last couple + input = end_input_m8; + } + return decode_end + 8; + } +#endif + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (decode <= decode_end) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0 - 4] = ((float)(input[stbir__decode_order0])); + decode[1 - 4] = ((float)(input[stbir__decode_order1])); + decode[2 - 4] = ((float)(input[stbir__decode_order2])); + decode[3 - 4] = ((float)(input[stbir__decode_order3])); + decode += 4; + input += 4; + } + decode -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (decode < decode_end) + { + STBIR_NO_UNROLL(decode); + decode[0] = ((float)(input[stbir__decode_order0])); +#if stbir__coder_min_num >= 2 + decode[1] = ((float)(input[stbir__decode_order1])); +#endif +#if stbir__coder_min_num >= 3 + decode[2] = ((float)(input[stbir__decode_order2])); +#endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } +#endif + return decode_end; +} + +static void STBIR__CODER_NAME(stbir__encode_uint16_linear)(void *outputp, int width_times_channels, float const *encode) +{ + unsigned short STBIR_SIMD_STREAMOUT_PTR(*) output = (unsigned short *)outputp; + unsigned short *end_output = ((unsigned short *)output) + width_times_channels; + +#ifdef STBIR_SIMD + { + if (width_times_channels >= stbir__simdfX_float_count * 2) + { + float const *end_encode_m8 = encode + width_times_channels - stbir__simdfX_float_count * 2; + end_output -= stbir__simdfX_float_count * 2; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + stbir__simdfX e0, e1; + stbir__simdiX i; + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdfX_add_mem(e0, STBIR_simd_point5X, encode); + stbir__simdfX_add_mem(e1, STBIR_simd_point5X, encode + stbir__simdfX_float_count); + stbir__encode_simdfX_unflip(e0); + stbir__encode_simdfX_unflip(e1); + stbir__simdfX_pack_to_words(i, e0, e1); + stbir__simdiX_store(output, i); + encode += stbir__simdfX_float_count * 2; + output += stbir__simdfX_float_count * 2; + if (output <= end_output) + continue; + if (output == (end_output + stbir__simdfX_float_count * 2)) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + } + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_NO_UNROLL_LOOP_START + while (output <= end_output) + { + stbir__simdf e; + stbir__simdi i; + STBIR_NO_UNROLL(encode); + stbir__simdf_load(e, encode); + stbir__simdf_add(e, STBIR__CONSTF(STBIR_simd_point5), e); + stbir__encode_simdf4_unflip(e); + stbir__simdf_pack_to_8words(i, e, e); // only use first 4 + stbir__simdi_store2(output - 4, i); + output += 4; + encode += 4; + } + output -= 4; +#endif + +#else + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (output <= end_output) + { + float f; + STBIR_SIMD_NO_UNROLL(encode); + f = encode[stbir__encode_order0] + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[0 - 4] = (unsigned short)f; + f = encode[stbir__encode_order1] + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[1 - 4] = (unsigned short)f; + f = encode[stbir__encode_order2] + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[2 - 4] = (unsigned short)f; + f = encode[stbir__encode_order3] + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[3 - 4] = (unsigned short)f; + output += 4; + encode += 4; + } + output -= 4; +#endif + +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (output < end_output) + { + float f; + STBIR_NO_UNROLL(encode); + f = encode[stbir__encode_order0] + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[0] = (unsigned short)f; +#if stbir__coder_min_num >= 2 + f = encode[stbir__encode_order1] + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[1] = (unsigned short)f; +#endif +#if stbir__coder_min_num >= 3 + f = encode[stbir__encode_order2] + 0.5f; + STBIR_CLAMP(f, 0, 65535); + output[2] = (unsigned short)f; +#endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } +#endif +} + +static float *STBIR__CODER_NAME(stbir__decode_half_float_linear)(float *decodep, int width_times_channels, void const *inputp) +{ + float STBIR_STREAMOUT_PTR(*) decode = decodep; + float *decode_end = (float *)decode + width_times_channels; + stbir__FP16 const *input = (stbir__FP16 const *)inputp; + +#ifdef STBIR_SIMD + if (width_times_channels >= 8) + { + stbir__FP16 const *end_input_m8 = input + width_times_channels - 8; + decode_end -= 8; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + STBIR_NO_UNROLL(decode); + + stbir__half_to_float_SIMD(decode, input); +#ifdef stbir__decode_swizzle +#ifdef STBIR_SIMD8 + { + stbir__simdf8 of; + stbir__simdf8_load(of, decode); + stbir__decode_simdf8_flip(of); + stbir__simdf8_store(decode, of); + } +#else + { + stbir__simdf of0, of1; + stbir__simdf_load(of0, decode); + stbir__simdf_load(of1, decode + 4); + stbir__decode_simdf4_flip(of0); + stbir__decode_simdf4_flip(of1); + stbir__simdf_store(decode, of0); + stbir__simdf_store(decode + 4, of1); + } +#endif +#endif + decode += 8; + input += 8; + if (decode <= decode_end) + continue; + if (decode == (decode_end + 8)) + break; + decode = decode_end; // backup and do last couple + input = end_input_m8; + } + return decode_end + 8; + } +#endif + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (decode <= decode_end) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0 - 4] = stbir__half_to_float(input[stbir__decode_order0]); + decode[1 - 4] = stbir__half_to_float(input[stbir__decode_order1]); + decode[2 - 4] = stbir__half_to_float(input[stbir__decode_order2]); + decode[3 - 4] = stbir__half_to_float(input[stbir__decode_order3]); + decode += 4; + input += 4; + } + decode -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (decode < decode_end) + { + STBIR_NO_UNROLL(decode); + decode[0] = stbir__half_to_float(input[stbir__decode_order0]); +#if stbir__coder_min_num >= 2 + decode[1] = stbir__half_to_float(input[stbir__decode_order1]); +#endif +#if stbir__coder_min_num >= 3 + decode[2] = stbir__half_to_float(input[stbir__decode_order2]); +#endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } +#endif + return decode_end; +} + +static void STBIR__CODER_NAME(stbir__encode_half_float_linear)(void *outputp, int width_times_channels, float const *encode) +{ + stbir__FP16 STBIR_SIMD_STREAMOUT_PTR(*) output = (stbir__FP16 *)outputp; + stbir__FP16 *end_output = ((stbir__FP16 *)output) + width_times_channels; + +#ifdef STBIR_SIMD + if (width_times_channels >= 8) + { + float const *end_encode_m8 = encode + width_times_channels - 8; + end_output -= 8; + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + STBIR_SIMD_NO_UNROLL(encode); +#ifdef stbir__decode_swizzle +#ifdef STBIR_SIMD8 + { + stbir__simdf8 of; + stbir__simdf8_load(of, encode); + stbir__encode_simdf8_unflip(of); + stbir__float_to_half_SIMD(output, (float *)&of); + } +#else + { + stbir__simdf of[2]; + stbir__simdf_load(of[0], encode); + stbir__simdf_load(of[1], encode + 4); + stbir__encode_simdf4_unflip(of[0]); + stbir__encode_simdf4_unflip(of[1]); + stbir__float_to_half_SIMD(output, (float *)of); + } +#endif +#else + stbir__float_to_half_SIMD(output, encode); +#endif + encode += 8; + output += 8; + if (output <= end_output) + continue; + if (output == (end_output + 8)) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } +#endif + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (output <= end_output) + { + STBIR_SIMD_NO_UNROLL(output); + output[0 - 4] = stbir__float_to_half(encode[stbir__encode_order0]); + output[1 - 4] = stbir__float_to_half(encode[stbir__encode_order1]); + output[2 - 4] = stbir__float_to_half(encode[stbir__encode_order2]); + output[3 - 4] = stbir__float_to_half(encode[stbir__encode_order3]); + output += 4; + encode += 4; + } + output -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (output < end_output) + { + STBIR_NO_UNROLL(output); + output[0] = stbir__float_to_half(encode[stbir__encode_order0]); +#if stbir__coder_min_num >= 2 + output[1] = stbir__float_to_half(encode[stbir__encode_order1]); +#endif +#if stbir__coder_min_num >= 3 + output[2] = stbir__float_to_half(encode[stbir__encode_order2]); +#endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } +#endif +} + +static float *STBIR__CODER_NAME(stbir__decode_float_linear)(float *decodep, int width_times_channels, void const *inputp) +{ +#ifdef stbir__decode_swizzle + float STBIR_STREAMOUT_PTR(*) decode = decodep; + float *decode_end = (float *)decode + width_times_channels; + float const *input = (float const *)inputp; + +#ifdef STBIR_SIMD + if (width_times_channels >= 16) + { + float const *end_input_m16 = input + width_times_channels - 16; + decode_end -= 16; + STBIR_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + STBIR_NO_UNROLL(decode); +#ifdef stbir__decode_swizzle +#ifdef STBIR_SIMD8 + { + stbir__simdf8 of0, of1; + stbir__simdf8_load(of0, input); + stbir__simdf8_load(of1, input + 8); + stbir__decode_simdf8_flip(of0); + stbir__decode_simdf8_flip(of1); + stbir__simdf8_store(decode, of0); + stbir__simdf8_store(decode + 8, of1); + } +#else + { + stbir__simdf of0, of1, of2, of3; + stbir__simdf_load(of0, input); + stbir__simdf_load(of1, input + 4); + stbir__simdf_load(of2, input + 8); + stbir__simdf_load(of3, input + 12); + stbir__decode_simdf4_flip(of0); + stbir__decode_simdf4_flip(of1); + stbir__decode_simdf4_flip(of2); + stbir__decode_simdf4_flip(of3); + stbir__simdf_store(decode, of0); + stbir__simdf_store(decode + 4, of1); + stbir__simdf_store(decode + 8, of2); + stbir__simdf_store(decode + 12, of3); + } +#endif +#endif + decode += 16; + input += 16; + if (decode <= decode_end) + continue; + if (decode == (decode_end + 16)) + break; + decode = decode_end; // backup and do last couple + input = end_input_m16; + } + return decode_end + 16; + } +#endif + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + decode += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (decode <= decode_end) + { + STBIR_SIMD_NO_UNROLL(decode); + decode[0 - 4] = input[stbir__decode_order0]; + decode[1 - 4] = input[stbir__decode_order1]; + decode[2 - 4] = input[stbir__decode_order2]; + decode[3 - 4] = input[stbir__decode_order3]; + decode += 4; + input += 4; + } + decode -= 4; +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (decode < decode_end) + { + STBIR_NO_UNROLL(decode); + decode[0] = input[stbir__decode_order0]; +#if stbir__coder_min_num >= 2 + decode[1] = input[stbir__decode_order1]; +#endif +#if stbir__coder_min_num >= 3 + decode[2] = input[stbir__decode_order2]; +#endif + decode += stbir__coder_min_num; + input += stbir__coder_min_num; + } +#endif + return decode_end; + +#else + + if ((void *)decodep != inputp) + STBIR_MEMCPY(decodep, inputp, width_times_channels * sizeof(float)); + + return decodep + width_times_channels; + +#endif +} + +static void STBIR__CODER_NAME(stbir__encode_float_linear)(void *outputp, int width_times_channels, float const *encode) +{ +#if !defined(STBIR_FLOAT_HIGH_CLAMP) && !defined(STBIR_FLOAT_LO_CLAMP) && !defined(stbir__decode_swizzle) + + if ((void *)outputp != (void *)encode) + STBIR_MEMCPY(outputp, encode, width_times_channels * sizeof(float)); + +#else + + float STBIR_SIMD_STREAMOUT_PTR(*) output = (float *)outputp; + float *end_output = ((float *)output) + width_times_channels; + +#ifdef STBIR_FLOAT_HIGH_CLAMP +#define stbir_scalar_hi_clamp(v) \ + if (v > STBIR_FLOAT_HIGH_CLAMP) \ + v = STBIR_FLOAT_HIGH_CLAMP; +#else +#define stbir_scalar_hi_clamp(v) +#endif +#ifdef STBIR_FLOAT_LOW_CLAMP +#define stbir_scalar_lo_clamp(v) \ + if (v < STBIR_FLOAT_LOW_CLAMP) \ + v = STBIR_FLOAT_LOW_CLAMP; +#else +#define stbir_scalar_lo_clamp(v) +#endif + +#ifdef STBIR_SIMD + +#ifdef STBIR_FLOAT_HIGH_CLAMP + const stbir__simdfX high_clamp = stbir__simdf_frepX(STBIR_FLOAT_HIGH_CLAMP); +#endif +#ifdef STBIR_FLOAT_LOW_CLAMP + const stbir__simdfX low_clamp = stbir__simdf_frepX(STBIR_FLOAT_LOW_CLAMP); +#endif + + if (width_times_channels >= (stbir__simdfX_float_count * 2)) + { + float const *end_encode_m8 = encode + width_times_channels - (stbir__simdfX_float_count * 2); + end_output -= (stbir__simdfX_float_count * 2); + STBIR_SIMD_NO_UNROLL_LOOP_START_INF_FOR + for (;;) + { + stbir__simdfX e0, e1; + STBIR_SIMD_NO_UNROLL(encode); + stbir__simdfX_load(e0, encode); + stbir__simdfX_load(e1, encode + stbir__simdfX_float_count); +#ifdef STBIR_FLOAT_HIGH_CLAMP + stbir__simdfX_min(e0, e0, high_clamp); + stbir__simdfX_min(e1, e1, high_clamp); +#endif +#ifdef STBIR_FLOAT_LOW_CLAMP + stbir__simdfX_max(e0, e0, low_clamp); + stbir__simdfX_max(e1, e1, low_clamp); +#endif + stbir__encode_simdfX_unflip(e0); + stbir__encode_simdfX_unflip(e1); + stbir__simdfX_store(output, e0); + stbir__simdfX_store(output + stbir__simdfX_float_count, e1); + encode += stbir__simdfX_float_count * 2; + output += stbir__simdfX_float_count * 2; + if (output < end_output) + continue; + if (output == (end_output + (stbir__simdfX_float_count * 2))) + break; + output = end_output; // backup and do last couple + encode = end_encode_m8; + } + return; + } + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_NO_UNROLL_LOOP_START + while (output <= end_output) + { + stbir__simdf e0; + STBIR_NO_UNROLL(encode); + stbir__simdf_load(e0, encode); +#ifdef STBIR_FLOAT_HIGH_CLAMP + stbir__simdf_min(e0, e0, high_clamp); +#endif +#ifdef STBIR_FLOAT_LOW_CLAMP + stbir__simdf_max(e0, e0, low_clamp); +#endif + stbir__encode_simdf4_unflip(e0); + stbir__simdf_store(output - 4, e0); + output += 4; + encode += 4; + } + output -= 4; +#endif + +#else + +// try to do blocks of 4 when you can +#if stbir__coder_min_num != 3 // doesn't divide cleanly by four + output += 4; + STBIR_SIMD_NO_UNROLL_LOOP_START + while (output <= end_output) + { + float e; + STBIR_SIMD_NO_UNROLL(encode); + e = encode[stbir__encode_order0]; + stbir_scalar_hi_clamp(e); + stbir_scalar_lo_clamp(e); + output[0 - 4] = e; + e = encode[stbir__encode_order1]; + stbir_scalar_hi_clamp(e); + stbir_scalar_lo_clamp(e); + output[1 - 4] = e; + e = encode[stbir__encode_order2]; + stbir_scalar_hi_clamp(e); + stbir_scalar_lo_clamp(e); + output[2 - 4] = e; + e = encode[stbir__encode_order3]; + stbir_scalar_hi_clamp(e); + stbir_scalar_lo_clamp(e); + output[3 - 4] = e; + output += 4; + encode += 4; + } + output -= 4; + +#endif + +#endif + +// do the remnants +#if stbir__coder_min_num < 4 + STBIR_NO_UNROLL_LOOP_START + while (output < end_output) + { + float e; + STBIR_NO_UNROLL(encode); + e = encode[stbir__encode_order0]; + stbir_scalar_hi_clamp(e); + stbir_scalar_lo_clamp(e); + output[0] = e; +#if stbir__coder_min_num >= 2 + e = encode[stbir__encode_order1]; + stbir_scalar_hi_clamp(e); + stbir_scalar_lo_clamp(e); + output[1] = e; +#endif +#if stbir__coder_min_num >= 3 + e = encode[stbir__encode_order2]; + stbir_scalar_hi_clamp(e); + stbir_scalar_lo_clamp(e); + output[2] = e; +#endif + output += stbir__coder_min_num; + encode += stbir__coder_min_num; + } +#endif + +#endif +} + +#undef stbir__decode_suffix +#undef stbir__decode_simdf8_flip +#undef stbir__decode_simdf4_flip +#undef stbir__decode_order0 +#undef stbir__decode_order1 +#undef stbir__decode_order2 +#undef stbir__decode_order3 +#undef stbir__encode_order0 +#undef stbir__encode_order1 +#undef stbir__encode_order2 +#undef stbir__encode_order3 +#undef stbir__encode_simdf8_unflip +#undef stbir__encode_simdf4_unflip +#undef stbir__encode_simdfX_unflip +#undef STBIR__CODER_NAME +#undef stbir__coder_min_num +#undef stbir__decode_swizzle +#undef stbir_scalar_hi_clamp +#undef stbir_scalar_lo_clamp +#undef STB_IMAGE_RESIZE_DO_CODERS + +#elif defined(STB_IMAGE_RESIZE_DO_VERTICALS) + +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#define STBIR_chans(start, end) STBIR_strs_join14(start, STBIR__vertical_channels, end, _cont) +#else +#define STBIR_chans(start, end) STBIR_strs_join1(start, STBIR__vertical_channels, end) +#endif + +#if STBIR__vertical_channels >= 1 +#define stbIF0(code) code +#else +#define stbIF0(code) +#endif +#if STBIR__vertical_channels >= 2 +#define stbIF1(code) code +#else +#define stbIF1(code) +#endif +#if STBIR__vertical_channels >= 3 +#define stbIF2(code) code +#else +#define stbIF2(code) +#endif +#if STBIR__vertical_channels >= 4 +#define stbIF3(code) code +#else +#define stbIF3(code) +#endif +#if STBIR__vertical_channels >= 5 +#define stbIF4(code) code +#else +#define stbIF4(code) +#endif +#if STBIR__vertical_channels >= 6 +#define stbIF5(code) code +#else +#define stbIF5(code) +#endif +#if STBIR__vertical_channels >= 7 +#define stbIF6(code) code +#else +#define stbIF6(code) +#endif +#if STBIR__vertical_channels >= 8 +#define stbIF7(code) code +#else +#define stbIF7(code) +#endif + +static void STBIR_chans(stbir__vertical_scatter_with_, _coeffs)(float **outputs, float const *vertical_coefficients, float const *input, float const *input_end) +{ + stbIF0(float STBIR_SIMD_STREAMOUT_PTR(*) output0 = outputs[0]; float c0s = vertical_coefficients[0];) + stbIF1(float STBIR_SIMD_STREAMOUT_PTR(*) output1 = outputs[1]; float c1s = vertical_coefficients[1];) + stbIF2(float STBIR_SIMD_STREAMOUT_PTR(*) output2 = outputs[2]; float c2s = vertical_coefficients[2];) + stbIF3(float STBIR_SIMD_STREAMOUT_PTR(*) output3 = outputs[3]; float c3s = vertical_coefficients[3];) + stbIF4(float STBIR_SIMD_STREAMOUT_PTR(*) output4 = outputs[4]; float c4s = vertical_coefficients[4];) + stbIF5(float STBIR_SIMD_STREAMOUT_PTR(*) output5 = outputs[5]; float c5s = vertical_coefficients[5];) + stbIF6(float STBIR_SIMD_STREAMOUT_PTR(*) output6 = outputs[6]; float c6s = vertical_coefficients[6];) + stbIF7(float STBIR_SIMD_STREAMOUT_PTR(*) output7 = outputs[7]; float c7s = vertical_coefficients[7];) + +#ifdef STBIR_SIMD + { + stbIF0(stbir__simdfX c0 = stbir__simdf_frepX(c0s);) + stbIF1(stbir__simdfX c1 = stbir__simdf_frepX(c1s);) + stbIF2(stbir__simdfX c2 = stbir__simdf_frepX(c2s);) + stbIF3(stbir__simdfX c3 = stbir__simdf_frepX(c3s);) + stbIF4(stbir__simdfX c4 = stbir__simdf_frepX(c4s);) + stbIF5(stbir__simdfX c5 = stbir__simdf_frepX(c5s);) + stbIF6(stbir__simdfX c6 = stbir__simdf_frepX(c6s);) + stbIF7(stbir__simdfX c7 = stbir__simdf_frepX(c7s);) + STBIR_SIMD_NO_UNROLL_LOOP_START while (((char *)input_end - (char *)input) >= (16 * stbir__simdfX_float_count)) + { + stbir__simdfX o0, o1, o2, o3, r0, r1, r2, r3; + STBIR_SIMD_NO_UNROLL(output0); + + stbir__simdfX_load(r0, input); + stbir__simdfX_load(r1, input + stbir__simdfX_float_count); + stbir__simdfX_load(r2, input + (2 * stbir__simdfX_float_count)); + stbir__simdfX_load(r3, input + (3 * stbir__simdfX_float_count)); + +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0(stbir__simdfX_load(o0, output0); stbir__simdfX_load(o1, output0 + stbir__simdfX_float_count); stbir__simdfX_load(o2, output0 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(o3, output0 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c0); stbir__simdfX_madd(o1, o1, r1, c0); stbir__simdfX_madd(o2, o2, r2, c0); stbir__simdfX_madd(o3, o3, r3, c0); + stbir__simdfX_store(output0, o0); stbir__simdfX_store(output0 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output0 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output0 + (3 * stbir__simdfX_float_count), o3);) + stbIF1(stbir__simdfX_load(o0, output1); stbir__simdfX_load(o1, output1 + stbir__simdfX_float_count); stbir__simdfX_load(o2, output1 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(o3, output1 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c1); stbir__simdfX_madd(o1, o1, r1, c1); stbir__simdfX_madd(o2, o2, r2, c1); stbir__simdfX_madd(o3, o3, r3, c1); + stbir__simdfX_store(output1, o0); stbir__simdfX_store(output1 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output1 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output1 + (3 * stbir__simdfX_float_count), o3);) + stbIF2(stbir__simdfX_load(o0, output2); stbir__simdfX_load(o1, output2 + stbir__simdfX_float_count); stbir__simdfX_load(o2, output2 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(o3, output2 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c2); stbir__simdfX_madd(o1, o1, r1, c2); stbir__simdfX_madd(o2, o2, r2, c2); stbir__simdfX_madd(o3, o3, r3, c2); + stbir__simdfX_store(output2, o0); stbir__simdfX_store(output2 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output2 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output2 + (3 * stbir__simdfX_float_count), o3);) + stbIF3(stbir__simdfX_load(o0, output3); stbir__simdfX_load(o1, output3 + stbir__simdfX_float_count); stbir__simdfX_load(o2, output3 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(o3, output3 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c3); stbir__simdfX_madd(o1, o1, r1, c3); stbir__simdfX_madd(o2, o2, r2, c3); stbir__simdfX_madd(o3, o3, r3, c3); + stbir__simdfX_store(output3, o0); stbir__simdfX_store(output3 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output3 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output3 + (3 * stbir__simdfX_float_count), o3);) + stbIF4(stbir__simdfX_load(o0, output4); stbir__simdfX_load(o1, output4 + stbir__simdfX_float_count); stbir__simdfX_load(o2, output4 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(o3, output4 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c4); stbir__simdfX_madd(o1, o1, r1, c4); stbir__simdfX_madd(o2, o2, r2, c4); stbir__simdfX_madd(o3, o3, r3, c4); + stbir__simdfX_store(output4, o0); stbir__simdfX_store(output4 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output4 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output4 + (3 * stbir__simdfX_float_count), o3);) + stbIF5(stbir__simdfX_load(o0, output5); stbir__simdfX_load(o1, output5 + stbir__simdfX_float_count); stbir__simdfX_load(o2, output5 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(o3, output5 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c5); stbir__simdfX_madd(o1, o1, r1, c5); stbir__simdfX_madd(o2, o2, r2, c5); stbir__simdfX_madd(o3, o3, r3, c5); + stbir__simdfX_store(output5, o0); stbir__simdfX_store(output5 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output5 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output5 + (3 * stbir__simdfX_float_count), o3);) + stbIF6(stbir__simdfX_load(o0, output6); stbir__simdfX_load(o1, output6 + stbir__simdfX_float_count); stbir__simdfX_load(o2, output6 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(o3, output6 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c6); stbir__simdfX_madd(o1, o1, r1, c6); stbir__simdfX_madd(o2, o2, r2, c6); stbir__simdfX_madd(o3, o3, r3, c6); + stbir__simdfX_store(output6, o0); stbir__simdfX_store(output6 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output6 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output6 + (3 * stbir__simdfX_float_count), o3);) + stbIF7(stbir__simdfX_load(o0, output7); stbir__simdfX_load(o1, output7 + stbir__simdfX_float_count); stbir__simdfX_load(o2, output7 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(o3, output7 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c7); stbir__simdfX_madd(o1, o1, r1, c7); stbir__simdfX_madd(o2, o2, r2, c7); stbir__simdfX_madd(o3, o3, r3, c7); + stbir__simdfX_store(output7, o0); stbir__simdfX_store(output7 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output7 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output7 + (3 * stbir__simdfX_float_count), o3);) +#else + stbIF0(stbir__simdfX_mult(o0, r0, c0); stbir__simdfX_mult(o1, r1, c0); stbir__simdfX_mult(o2, r2, c0); stbir__simdfX_mult(o3, r3, c0); + stbir__simdfX_store(output0, o0); stbir__simdfX_store(output0 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output0 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output0 + (3 * stbir__simdfX_float_count), o3);) + stbIF1(stbir__simdfX_mult(o0, r0, c1); stbir__simdfX_mult(o1, r1, c1); stbir__simdfX_mult(o2, r2, c1); stbir__simdfX_mult(o3, r3, c1); + stbir__simdfX_store(output1, o0); stbir__simdfX_store(output1 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output1 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output1 + (3 * stbir__simdfX_float_count), o3);) + stbIF2(stbir__simdfX_mult(o0, r0, c2); stbir__simdfX_mult(o1, r1, c2); stbir__simdfX_mult(o2, r2, c2); stbir__simdfX_mult(o3, r3, c2); + stbir__simdfX_store(output2, o0); stbir__simdfX_store(output2 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output2 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output2 + (3 * stbir__simdfX_float_count), o3);) + stbIF3(stbir__simdfX_mult(o0, r0, c3); stbir__simdfX_mult(o1, r1, c3); stbir__simdfX_mult(o2, r2, c3); stbir__simdfX_mult(o3, r3, c3); + stbir__simdfX_store(output3, o0); stbir__simdfX_store(output3 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output3 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output3 + (3 * stbir__simdfX_float_count), o3);) + stbIF4(stbir__simdfX_mult(o0, r0, c4); stbir__simdfX_mult(o1, r1, c4); stbir__simdfX_mult(o2, r2, c4); stbir__simdfX_mult(o3, r3, c4); + stbir__simdfX_store(output4, o0); stbir__simdfX_store(output4 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output4 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output4 + (3 * stbir__simdfX_float_count), o3);) + stbIF5(stbir__simdfX_mult(o0, r0, c5); stbir__simdfX_mult(o1, r1, c5); stbir__simdfX_mult(o2, r2, c5); stbir__simdfX_mult(o3, r3, c5); + stbir__simdfX_store(output5, o0); stbir__simdfX_store(output5 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output5 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output5 + (3 * stbir__simdfX_float_count), o3);) + stbIF6(stbir__simdfX_mult(o0, r0, c6); stbir__simdfX_mult(o1, r1, c6); stbir__simdfX_mult(o2, r2, c6); stbir__simdfX_mult(o3, r3, c6); + stbir__simdfX_store(output6, o0); stbir__simdfX_store(output6 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output6 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output6 + (3 * stbir__simdfX_float_count), o3);) + stbIF7(stbir__simdfX_mult(o0, r0, c7); stbir__simdfX_mult(o1, r1, c7); stbir__simdfX_mult(o2, r2, c7); stbir__simdfX_mult(o3, r3, c7); + stbir__simdfX_store(output7, o0); stbir__simdfX_store(output7 + stbir__simdfX_float_count, o1); stbir__simdfX_store(output7 + (2 * stbir__simdfX_float_count), o2); stbir__simdfX_store(output7 + (3 * stbir__simdfX_float_count), o3);) +#endif + + input += (4 * stbir__simdfX_float_count); + stbIF0(output0 += (4 * stbir__simdfX_float_count);) stbIF1(output1 += (4 * stbir__simdfX_float_count);) stbIF2(output2 += (4 * stbir__simdfX_float_count);) stbIF3(output3 += (4 * stbir__simdfX_float_count);) stbIF4(output4 += (4 * stbir__simdfX_float_count);) stbIF5(output5 += (4 * stbir__simdfX_float_count);) stbIF6(output6 += (4 * stbir__simdfX_float_count);) stbIF7(output7 += (4 * stbir__simdfX_float_count);) + } + STBIR_SIMD_NO_UNROLL_LOOP_START + while (((char *)input_end - (char *)input) >= 16) + { + stbir__simdf o0, r0; + STBIR_SIMD_NO_UNROLL(output0); + + stbir__simdf_load(r0, input); + +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0(stbir__simdf_load(o0, output0); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c0)); stbir__simdf_store(output0, o0);) + stbIF1(stbir__simdf_load(o0, output1); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c1)); stbir__simdf_store(output1, o0);) + stbIF2(stbir__simdf_load(o0, output2); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c2)); stbir__simdf_store(output2, o0);) + stbIF3(stbir__simdf_load(o0, output3); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c3)); stbir__simdf_store(output3, o0);) + stbIF4(stbir__simdf_load(o0, output4); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c4)); stbir__simdf_store(output4, o0);) + stbIF5(stbir__simdf_load(o0, output5); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c5)); stbir__simdf_store(output5, o0);) + stbIF6(stbir__simdf_load(o0, output6); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c6)); stbir__simdf_store(output6, o0);) + stbIF7(stbir__simdf_load(o0, output7); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c7)); stbir__simdf_store(output7, o0);) +#else + stbIF0(stbir__simdf_mult(o0, r0, stbir__if_simdf8_cast_to_simdf4(c0)); stbir__simdf_store(output0, o0);) + stbIF1(stbir__simdf_mult(o0, r0, stbir__if_simdf8_cast_to_simdf4(c1)); stbir__simdf_store(output1, o0);) + stbIF2(stbir__simdf_mult(o0, r0, stbir__if_simdf8_cast_to_simdf4(c2)); stbir__simdf_store(output2, o0);) + stbIF3(stbir__simdf_mult(o0, r0, stbir__if_simdf8_cast_to_simdf4(c3)); stbir__simdf_store(output3, o0);) + stbIF4(stbir__simdf_mult(o0, r0, stbir__if_simdf8_cast_to_simdf4(c4)); stbir__simdf_store(output4, o0);) + stbIF5(stbir__simdf_mult(o0, r0, stbir__if_simdf8_cast_to_simdf4(c5)); stbir__simdf_store(output5, o0);) + stbIF6(stbir__simdf_mult(o0, r0, stbir__if_simdf8_cast_to_simdf4(c6)); stbir__simdf_store(output6, o0);) + stbIF7(stbir__simdf_mult(o0, r0, stbir__if_simdf8_cast_to_simdf4(c7)); stbir__simdf_store(output7, o0);) +#endif + + input += 4; + stbIF0(output0 += 4;) stbIF1(output1 += 4;) stbIF2(output2 += 4;) stbIF3(output3 += 4;) stbIF4(output4 += 4;) stbIF5(output5 += 4;) stbIF6(output6 += 4;) stbIF7(output7 += 4;) + } + } +#else + STBIR_NO_UNROLL_LOOP_START while (((char *)input_end - (char *)input) >= 16) + { + float r0, r1, r2, r3; + STBIR_NO_UNROLL(input); + + r0 = input[0], r1 = input[1], r2 = input[2], r3 = input[3]; + +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0(output0[0] += (r0 * c0s); output0[1] += (r1 * c0s); output0[2] += (r2 * c0s); output0[3] += (r3 * c0s);) + stbIF1(output1[0] += (r0 * c1s); output1[1] += (r1 * c1s); output1[2] += (r2 * c1s); output1[3] += (r3 * c1s);) + stbIF2(output2[0] += (r0 * c2s); output2[1] += (r1 * c2s); output2[2] += (r2 * c2s); output2[3] += (r3 * c2s);) + stbIF3(output3[0] += (r0 * c3s); output3[1] += (r1 * c3s); output3[2] += (r2 * c3s); output3[3] += (r3 * c3s);) + stbIF4(output4[0] += (r0 * c4s); output4[1] += (r1 * c4s); output4[2] += (r2 * c4s); output4[3] += (r3 * c4s);) + stbIF5(output5[0] += (r0 * c5s); output5[1] += (r1 * c5s); output5[2] += (r2 * c5s); output5[3] += (r3 * c5s);) + stbIF6(output6[0] += (r0 * c6s); output6[1] += (r1 * c6s); output6[2] += (r2 * c6s); output6[3] += (r3 * c6s);) + stbIF7(output7[0] += (r0 * c7s); output7[1] += (r1 * c7s); output7[2] += (r2 * c7s); output7[3] += (r3 * c7s);) +#else + stbIF0(output0[0] = (r0 * c0s); output0[1] = (r1 * c0s); output0[2] = (r2 * c0s); output0[3] = (r3 * c0s);) + stbIF1(output1[0] = (r0 * c1s); output1[1] = (r1 * c1s); output1[2] = (r2 * c1s); output1[3] = (r3 * c1s);) + stbIF2(output2[0] = (r0 * c2s); output2[1] = (r1 * c2s); output2[2] = (r2 * c2s); output2[3] = (r3 * c2s);) + stbIF3(output3[0] = (r0 * c3s); output3[1] = (r1 * c3s); output3[2] = (r2 * c3s); output3[3] = (r3 * c3s);) + stbIF4(output4[0] = (r0 * c4s); output4[1] = (r1 * c4s); output4[2] = (r2 * c4s); output4[3] = (r3 * c4s);) + stbIF5(output5[0] = (r0 * c5s); output5[1] = (r1 * c5s); output5[2] = (r2 * c5s); output5[3] = (r3 * c5s);) + stbIF6(output6[0] = (r0 * c6s); output6[1] = (r1 * c6s); output6[2] = (r2 * c6s); output6[3] = (r3 * c6s);) + stbIF7(output7[0] = (r0 * c7s); output7[1] = (r1 * c7s); output7[2] = (r2 * c7s); output7[3] = (r3 * c7s);) +#endif + + input += 4; + stbIF0(output0 += 4;) stbIF1(output1 += 4;) stbIF2(output2 += 4;) stbIF3(output3 += 4;) stbIF4(output4 += 4;) stbIF5(output5 += 4;) stbIF6(output6 += 4;) stbIF7(output7 += 4;) + } +#endif + STBIR_NO_UNROLL_LOOP_START + while (input < input_end) + { + float r = input[0]; + STBIR_NO_UNROLL(output0); + +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0(output0[0] += (r * c0s);) + stbIF1(output1[0] += (r * c1s);) + stbIF2(output2[0] += (r * c2s);) + stbIF3(output3[0] += (r * c3s);) + stbIF4(output4[0] += (r * c4s);) + stbIF5(output5[0] += (r * c5s);) + stbIF6(output6[0] += (r * c6s);) + stbIF7(output7[0] += (r * c7s);) +#else + stbIF0(output0[0] = (r * c0s);) + stbIF1(output1[0] = (r * c1s);) + stbIF2(output2[0] = (r * c2s);) + stbIF3(output3[0] = (r * c3s);) + stbIF4(output4[0] = (r * c4s);) + stbIF5(output5[0] = (r * c5s);) + stbIF6(output6[0] = (r * c6s);) + stbIF7(output7[0] = (r * c7s);) +#endif + + ++ input; + stbIF0(++output0;) stbIF1(++output1;) stbIF2(++output2;) stbIF3(++output3;) stbIF4(++output4;) stbIF5(++output5;) stbIF6(++output6;) stbIF7(++output7;) + } +} + +static void STBIR_chans(stbir__vertical_gather_with_, _coeffs)(float *outputp, float const *vertical_coefficients, float const **inputs, float const *input0_end) +{ + float STBIR_SIMD_STREAMOUT_PTR(*) output = outputp; + + stbIF0(float const *input0 = inputs[0]; float c0s = vertical_coefficients[0];) + stbIF1(float const *input1 = inputs[1]; float c1s = vertical_coefficients[1];) + stbIF2(float const *input2 = inputs[2]; float c2s = vertical_coefficients[2];) + stbIF3(float const *input3 = inputs[3]; float c3s = vertical_coefficients[3];) + stbIF4(float const *input4 = inputs[4]; float c4s = vertical_coefficients[4];) + stbIF5(float const *input5 = inputs[5]; float c5s = vertical_coefficients[5];) + stbIF6(float const *input6 = inputs[6]; float c6s = vertical_coefficients[6];) + stbIF7(float const *input7 = inputs[7]; float c7s = vertical_coefficients[7];) + +#if (STBIR__vertical_channels == 1) && !defined(STB_IMAGE_RESIZE_VERTICAL_CONTINUE) + // check single channel one weight + if ((c0s >= (1.0f - 0.000001f)) && (c0s <= (1.0f + 0.000001f))) + { + STBIR_MEMCPY(output, input0, (char *)input0_end - (char *)input0); + return; + } +#endif + +#ifdef STBIR_SIMD + { + stbIF0(stbir__simdfX c0 = stbir__simdf_frepX(c0s);) + stbIF1(stbir__simdfX c1 = stbir__simdf_frepX(c1s);) + stbIF2(stbir__simdfX c2 = stbir__simdf_frepX(c2s);) + stbIF3(stbir__simdfX c3 = stbir__simdf_frepX(c3s);) + stbIF4(stbir__simdfX c4 = stbir__simdf_frepX(c4s);) + stbIF5(stbir__simdfX c5 = stbir__simdf_frepX(c5s);) + stbIF6(stbir__simdfX c6 = stbir__simdf_frepX(c6s);) + stbIF7(stbir__simdfX c7 = stbir__simdf_frepX(c7s);) + + STBIR_SIMD_NO_UNROLL_LOOP_START while (((char *)input0_end - (char *)input0) >= (16 * stbir__simdfX_float_count)) + { + stbir__simdfX o0, o1, o2, o3, r0, r1, r2, r3; + STBIR_SIMD_NO_UNROLL(output); + + // prefetch four loop iterations ahead (doesn't affect much for small resizes, but helps with big ones) + stbIF0(stbir__prefetch(input0 + (16 * stbir__simdfX_float_count));) + stbIF1(stbir__prefetch(input1 + (16 * stbir__simdfX_float_count));) + stbIF2(stbir__prefetch(input2 + (16 * stbir__simdfX_float_count));) + stbIF3(stbir__prefetch(input3 + (16 * stbir__simdfX_float_count));) + stbIF4(stbir__prefetch(input4 + (16 * stbir__simdfX_float_count));) + stbIF5(stbir__prefetch(input5 + (16 * stbir__simdfX_float_count));) + stbIF6(stbir__prefetch(input6 + (16 * stbir__simdfX_float_count));) + stbIF7(stbir__prefetch(input7 + (16 * stbir__simdfX_float_count));) + +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0(stbir__simdfX_load(o0, output); stbir__simdfX_load(o1, output + stbir__simdfX_float_count); stbir__simdfX_load(o2, output + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(o3, output + (3 * stbir__simdfX_float_count)); + stbir__simdfX_load(r0, input0); stbir__simdfX_load(r1, input0 + stbir__simdfX_float_count); stbir__simdfX_load(r2, input0 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(r3, input0 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c0); stbir__simdfX_madd(o1, o1, r1, c0); stbir__simdfX_madd(o2, o2, r2, c0); stbir__simdfX_madd(o3, o3, r3, c0);) +#else + stbIF0(stbir__simdfX_load(r0, input0); stbir__simdfX_load(r1, input0 + stbir__simdfX_float_count); stbir__simdfX_load(r2, input0 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(r3, input0 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_mult(o0, r0, c0); stbir__simdfX_mult(o1, r1, c0); stbir__simdfX_mult(o2, r2, c0); stbir__simdfX_mult(o3, r3, c0);) +#endif + + stbIF1(stbir__simdfX_load(r0, input1); stbir__simdfX_load(r1, input1 + stbir__simdfX_float_count); stbir__simdfX_load(r2, input1 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(r3, input1 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c1); stbir__simdfX_madd(o1, o1, r1, c1); stbir__simdfX_madd(o2, o2, r2, c1); stbir__simdfX_madd(o3, o3, r3, c1);) + stbIF2(stbir__simdfX_load(r0, input2); stbir__simdfX_load(r1, input2 + stbir__simdfX_float_count); stbir__simdfX_load(r2, input2 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(r3, input2 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c2); stbir__simdfX_madd(o1, o1, r1, c2); stbir__simdfX_madd(o2, o2, r2, c2); stbir__simdfX_madd(o3, o3, r3, c2);) + stbIF3(stbir__simdfX_load(r0, input3); stbir__simdfX_load(r1, input3 + stbir__simdfX_float_count); stbir__simdfX_load(r2, input3 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(r3, input3 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c3); stbir__simdfX_madd(o1, o1, r1, c3); stbir__simdfX_madd(o2, o2, r2, c3); stbir__simdfX_madd(o3, o3, r3, c3);) + stbIF4(stbir__simdfX_load(r0, input4); stbir__simdfX_load(r1, input4 + stbir__simdfX_float_count); stbir__simdfX_load(r2, input4 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(r3, input4 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c4); stbir__simdfX_madd(o1, o1, r1, c4); stbir__simdfX_madd(o2, o2, r2, c4); stbir__simdfX_madd(o3, o3, r3, c4);) + stbIF5(stbir__simdfX_load(r0, input5); stbir__simdfX_load(r1, input5 + stbir__simdfX_float_count); stbir__simdfX_load(r2, input5 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(r3, input5 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c5); stbir__simdfX_madd(o1, o1, r1, c5); stbir__simdfX_madd(o2, o2, r2, c5); stbir__simdfX_madd(o3, o3, r3, c5);) + stbIF6(stbir__simdfX_load(r0, input6); stbir__simdfX_load(r1, input6 + stbir__simdfX_float_count); stbir__simdfX_load(r2, input6 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(r3, input6 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c6); stbir__simdfX_madd(o1, o1, r1, c6); stbir__simdfX_madd(o2, o2, r2, c6); stbir__simdfX_madd(o3, o3, r3, c6);) + stbIF7(stbir__simdfX_load(r0, input7); stbir__simdfX_load(r1, input7 + stbir__simdfX_float_count); stbir__simdfX_load(r2, input7 + (2 * stbir__simdfX_float_count)); stbir__simdfX_load(r3, input7 + (3 * stbir__simdfX_float_count)); + stbir__simdfX_madd(o0, o0, r0, c7); stbir__simdfX_madd(o1, o1, r1, c7); stbir__simdfX_madd(o2, o2, r2, c7); stbir__simdfX_madd(o3, o3, r3, c7);) + + stbir__simdfX_store(output, o0); + stbir__simdfX_store(output + stbir__simdfX_float_count, o1); + stbir__simdfX_store(output + (2 * stbir__simdfX_float_count), o2); + stbir__simdfX_store(output + (3 * stbir__simdfX_float_count), o3); + output += (4 * stbir__simdfX_float_count); + stbIF0(input0 += (4 * stbir__simdfX_float_count);) stbIF1(input1 += (4 * stbir__simdfX_float_count);) stbIF2(input2 += (4 * stbir__simdfX_float_count);) stbIF3(input3 += (4 * stbir__simdfX_float_count);) stbIF4(input4 += (4 * stbir__simdfX_float_count);) stbIF5(input5 += (4 * stbir__simdfX_float_count);) stbIF6(input6 += (4 * stbir__simdfX_float_count);) stbIF7(input7 += (4 * stbir__simdfX_float_count);) + } + + STBIR_SIMD_NO_UNROLL_LOOP_START + while (((char *)input0_end - (char *)input0) >= 16) + { + stbir__simdf o0, r0; + STBIR_SIMD_NO_UNROLL(output); + +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0(stbir__simdf_load(o0, output); stbir__simdf_load(r0, input0); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c0));) +#else + stbIF0(stbir__simdf_load(r0, input0); stbir__simdf_mult(o0, r0, stbir__if_simdf8_cast_to_simdf4(c0));) +#endif + stbIF1(stbir__simdf_load(r0, input1); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c1));) + stbIF2(stbir__simdf_load(r0, input2); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c2));) + stbIF3(stbir__simdf_load(r0, input3); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c3));) + stbIF4(stbir__simdf_load(r0, input4); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c4));) + stbIF5(stbir__simdf_load(r0, input5); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c5));) + stbIF6(stbir__simdf_load(r0, input6); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c6));) + stbIF7(stbir__simdf_load(r0, input7); stbir__simdf_madd(o0, o0, r0, stbir__if_simdf8_cast_to_simdf4(c7));) + + stbir__simdf_store(output, o0); + output += 4; + stbIF0(input0 += 4;) stbIF1(input1 += 4;) stbIF2(input2 += 4;) stbIF3(input3 += 4;) stbIF4(input4 += 4;) stbIF5(input5 += 4;) stbIF6(input6 += 4;) stbIF7(input7 += 4;) + } + } +#else + STBIR_NO_UNROLL_LOOP_START while (((char *)input0_end - (char *)input0) >= 16) + { + float o0, o1, o2, o3; + STBIR_NO_UNROLL(output); +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0(o0 = output[0] + input0[0] * c0s; o1 = output[1] + input0[1] * c0s; o2 = output[2] + input0[2] * c0s; o3 = output[3] + input0[3] * c0s;) +#else + stbIF0(o0 = input0[0] * c0s; o1 = input0[1] * c0s; o2 = input0[2] * c0s; o3 = input0[3] * c0s;) +#endif + stbIF1(o0 += input1[0] * c1s; o1 += input1[1] * c1s; o2 += input1[2] * c1s; o3 += input1[3] * c1s;) + stbIF2(o0 += input2[0] * c2s; o1 += input2[1] * c2s; o2 += input2[2] * c2s; o3 += input2[3] * c2s;) + stbIF3(o0 += input3[0] * c3s; o1 += input3[1] * c3s; o2 += input3[2] * c3s; o3 += input3[3] * c3s;) + stbIF4(o0 += input4[0] * c4s; o1 += input4[1] * c4s; o2 += input4[2] * c4s; o3 += input4[3] * c4s;) + stbIF5(o0 += input5[0] * c5s; o1 += input5[1] * c5s; o2 += input5[2] * c5s; o3 += input5[3] * c5s;) + stbIF6(o0 += input6[0] * c6s; o1 += input6[1] * c6s; o2 += input6[2] * c6s; o3 += input6[3] * c6s;) + stbIF7(o0 += input7[0] * c7s; o1 += input7[1] * c7s; o2 += input7[2] * c7s; o3 += input7[3] * c7s;) + output[0] = o0; + output[1] = o1; + output[2] = o2; + output[3] = o3; + output += 4; + stbIF0(input0 += 4;) stbIF1(input1 += 4;) stbIF2(input2 += 4;) stbIF3(input3 += 4;) stbIF4(input4 += 4;) stbIF5(input5 += 4;) stbIF6(input6 += 4;) stbIF7(input7 += 4;) + } +#endif + STBIR_NO_UNROLL_LOOP_START + while (input0 < input0_end) + { + float o0; + STBIR_NO_UNROLL(output); +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE + stbIF0(o0 = output[0] + input0[0] * c0s;) +#else + stbIF0(o0 = input0[0] * c0s;) +#endif + stbIF1(o0 += input1[0] * c1s;) + stbIF2(o0 += input2[0] * c2s;) + stbIF3(o0 += input3[0] * c3s;) + stbIF4(o0 += input4[0] * c4s;) + stbIF5(o0 += input5[0] * c5s;) + stbIF6(o0 += input6[0] * c6s;) + stbIF7(o0 += input7[0] * c7s;) + output[0] = o0; + ++output; + stbIF0(++input0;) stbIF1(++input1;) stbIF2(++input2;) stbIF3(++input3;) stbIF4(++input4;) stbIF5(++input5;) stbIF6(++input6;) stbIF7(++input7;) + } +} + +#undef stbIF0 +#undef stbIF1 +#undef stbIF2 +#undef stbIF3 +#undef stbIF4 +#undef stbIF5 +#undef stbIF6 +#undef stbIF7 +#undef STB_IMAGE_RESIZE_DO_VERTICALS +#undef STBIR__vertical_channels +#undef STB_IMAGE_RESIZE_DO_HORIZONTALS +#undef STBIR_strs_join24 +#undef STBIR_strs_join14 +#undef STBIR_chans +#ifdef STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#undef STB_IMAGE_RESIZE_VERTICAL_CONTINUE +#endif + +#else // !STB_IMAGE_RESIZE_DO_VERTICALS + +#define STBIR_chans(start, end) STBIR_strs_join1(start, STBIR__horizontal_channels, end) + +#ifndef stbir__2_coeff_only +#define stbir__2_coeff_only() \ + stbir__1_coeff_only(); \ + stbir__1_coeff_remnant(1); +#endif + +#ifndef stbir__2_coeff_remnant +#define stbir__2_coeff_remnant(ofs) \ + stbir__1_coeff_remnant(ofs); \ + stbir__1_coeff_remnant((ofs) + 1); +#endif + +#ifndef stbir__3_coeff_only +#define stbir__3_coeff_only() \ + stbir__2_coeff_only(); \ + stbir__1_coeff_remnant(2); +#endif + +#ifndef stbir__3_coeff_remnant +#define stbir__3_coeff_remnant(ofs) \ + stbir__2_coeff_remnant(ofs); \ + stbir__1_coeff_remnant((ofs) + 2); +#endif + +#ifndef stbir__3_coeff_setup +#define stbir__3_coeff_setup() +#endif + +#ifndef stbir__4_coeff_start +#define stbir__4_coeff_start() \ + stbir__2_coeff_only(); \ + stbir__2_coeff_remnant(2); +#endif + +#ifndef stbir__4_coeff_continue_from_4 +#define stbir__4_coeff_continue_from_4(ofs) \ + stbir__2_coeff_remnant(ofs); \ + stbir__2_coeff_remnant((ofs) + 2); +#endif + +#ifndef stbir__store_output_tiny +#define stbir__store_output_tiny stbir__store_output +#endif + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_1_coeff)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__1_coeff_only(); + stbir__store_output_tiny(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_2_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__2_coeff_only(); + stbir__store_output_tiny(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_3_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__3_coeff_only(); + stbir__store_output_tiny(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_4_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_5_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__1_coeff_remnant(4); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_6_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__2_coeff_remnant(4); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_7_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + stbir__3_coeff_setup(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + + stbir__4_coeff_start(); + stbir__3_coeff_remnant(4); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_8_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__4_coeff_continue_from_4(4); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_9_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__4_coeff_continue_from_4(4); + stbir__1_coeff_remnant(8); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_10_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__4_coeff_continue_from_4(4); + stbir__2_coeff_remnant(8); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_11_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + stbir__3_coeff_setup(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__4_coeff_continue_from_4(4); + stbir__3_coeff_remnant(8); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_12_coeffs)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + float const *hc = horizontal_coefficients; + stbir__4_coeff_start(); + stbir__4_coeff_continue_from_4(4); + stbir__4_coeff_continue_from_4(8); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_n_coeffs_mod0)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + int n = ((horizontal_contributors->n1 - horizontal_contributors->n0 + 1) - 4 + 3) >> 2; + float const *hc = horizontal_coefficients; + + stbir__4_coeff_start(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + hc += 4; + decode += STBIR__horizontal_channels * 4; + stbir__4_coeff_continue_from_4(0); + --n; + } while (n > 0); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_n_coeffs_mod1)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + int n = ((horizontal_contributors->n1 - horizontal_contributors->n0 + 1) - 5 + 3) >> 2; + float const *hc = horizontal_coefficients; + + stbir__4_coeff_start(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + hc += 4; + decode += STBIR__horizontal_channels * 4; + stbir__4_coeff_continue_from_4(0); + --n; + } while (n > 0); + stbir__1_coeff_remnant(4); + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_n_coeffs_mod2)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + int n = ((horizontal_contributors->n1 - horizontal_contributors->n0 + 1) - 6 + 3) >> 2; + float const *hc = horizontal_coefficients; + + stbir__4_coeff_start(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + hc += 4; + decode += STBIR__horizontal_channels * 4; + stbir__4_coeff_continue_from_4(0); + --n; + } while (n > 0); + stbir__2_coeff_remnant(4); + + stbir__store_output(); + } while (output < output_end); +} + +static void STBIR_chans(stbir__horizontal_gather_, _channels_with_n_coeffs_mod3)(float *output_buffer, unsigned int output_sub_size, float const *decode_buffer, stbir__contributors const *horizontal_contributors, float const *horizontal_coefficients, int coefficient_width) +{ + float const *output_end = output_buffer + output_sub_size * STBIR__horizontal_channels; + float STBIR_SIMD_STREAMOUT_PTR(*) output = output_buffer; + stbir__3_coeff_setup(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + float const *decode = decode_buffer + horizontal_contributors->n0 * STBIR__horizontal_channels; + int n = ((horizontal_contributors->n1 - horizontal_contributors->n0 + 1) - 7 + 3) >> 2; + float const *hc = horizontal_coefficients; + + stbir__4_coeff_start(); + STBIR_SIMD_NO_UNROLL_LOOP_START + do + { + hc += 4; + decode += STBIR__horizontal_channels * 4; + stbir__4_coeff_continue_from_4(0); + --n; + } while (n > 0); + stbir__3_coeff_remnant(4); + + stbir__store_output(); + } while (output < output_end); +} + +static stbir__horizontal_gather_channels_func *STBIR_chans(stbir__horizontal_gather_, _channels_with_n_coeffs_funcs)[4] = + { + STBIR_chans(stbir__horizontal_gather_, _channels_with_n_coeffs_mod0), + STBIR_chans(stbir__horizontal_gather_, _channels_with_n_coeffs_mod1), + STBIR_chans(stbir__horizontal_gather_, _channels_with_n_coeffs_mod2), + STBIR_chans(stbir__horizontal_gather_, _channels_with_n_coeffs_mod3), +}; + +static stbir__horizontal_gather_channels_func *STBIR_chans(stbir__horizontal_gather_, _channels_funcs)[12] = + { + STBIR_chans(stbir__horizontal_gather_, _channels_with_1_coeff), + STBIR_chans(stbir__horizontal_gather_, _channels_with_2_coeffs), + STBIR_chans(stbir__horizontal_gather_, _channels_with_3_coeffs), + STBIR_chans(stbir__horizontal_gather_, _channels_with_4_coeffs), + STBIR_chans(stbir__horizontal_gather_, _channels_with_5_coeffs), + STBIR_chans(stbir__horizontal_gather_, _channels_with_6_coeffs), + STBIR_chans(stbir__horizontal_gather_, _channels_with_7_coeffs), + STBIR_chans(stbir__horizontal_gather_, _channels_with_8_coeffs), + STBIR_chans(stbir__horizontal_gather_, _channels_with_9_coeffs), + STBIR_chans(stbir__horizontal_gather_, _channels_with_10_coeffs), + STBIR_chans(stbir__horizontal_gather_, _channels_with_11_coeffs), + STBIR_chans(stbir__horizontal_gather_, _channels_with_12_coeffs), +}; + +#undef STBIR__horizontal_channels +#undef STB_IMAGE_RESIZE_DO_HORIZONTALS +#undef stbir__1_coeff_only +#undef stbir__1_coeff_remnant +#undef stbir__2_coeff_only +#undef stbir__2_coeff_remnant +#undef stbir__3_coeff_only +#undef stbir__3_coeff_remnant +#undef stbir__3_coeff_setup +#undef stbir__4_coeff_start +#undef stbir__4_coeff_continue_from_4 +#undef stbir__store_output +#undef stbir__store_output_tiny +#undef STBIR_chans + +#endif // HORIZONALS + +#undef STBIR_strs_join2 +#undef STBIR_strs_join1 + +#endif // STB_IMAGE_RESIZE_DO_HORIZONTALS/VERTICALS/CODERS + /* ------------------------------------------------------------------------------ This software is available under 2 licenses -- choose whichever you prefer.