🛠️🐜 Antkeeper superbuild with dependencies included https://antkeeper.com
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

8649 lines
268 KiB

  1. #ifndef TINYEXR_H_
  2. #define TINYEXR_H_
  3. /*
  4. Copyright (c) 2014 - 2021, Syoyo Fujita and many contributors.
  5. All rights reserved.
  6. Redistribution and use in source and binary forms, with or without
  7. modification, are permitted provided that the following conditions are met:
  8. * Redistributions of source code must retain the above copyright
  9. notice, this list of conditions and the following disclaimer.
  10. * Redistributions in binary form must reproduce the above copyright
  11. notice, this list of conditions and the following disclaimer in the
  12. documentation and/or other materials provided with the distribution.
  13. * Neither the name of the Syoyo Fujita nor the
  14. names of its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written permission.
  16. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  17. ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  18. WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  19. DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  20. DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  21. (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  22. LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  23. ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  25. SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. // TinyEXR contains some OpenEXR code, which is licensed under ------------
  28. ///////////////////////////////////////////////////////////////////////////
  29. //
  30. // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
  31. // Digital Ltd. LLC
  32. //
  33. // All rights reserved.
  34. //
  35. // Redistribution and use in source and binary forms, with or without
  36. // modification, are permitted provided that the following conditions are
  37. // met:
  38. // * Redistributions of source code must retain the above copyright
  39. // notice, this list of conditions and the following disclaimer.
  40. // * Redistributions in binary form must reproduce the above
  41. // copyright notice, this list of conditions and the following disclaimer
  42. // in the documentation and/or other materials provided with the
  43. // distribution.
  44. // * Neither the name of Industrial Light & Magic nor the names of
  45. // its contributors may be used to endorse or promote products derived
  46. // from this software without specific prior written permission.
  47. //
  48. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  49. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  50. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  51. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  52. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  53. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  54. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  55. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  56. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  57. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  58. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  59. //
  60. ///////////////////////////////////////////////////////////////////////////
  61. // End of OpenEXR license -------------------------------------------------
  62. //
  63. //
  64. // Do this:
  65. // #define TINYEXR_IMPLEMENTATION
  66. // before you include this file in *one* C or C++ file to create the
  67. // implementation.
  68. //
  69. // // i.e. it should look like this:
  70. // #include ...
  71. // #include ...
  72. // #include ...
  73. // #define TINYEXR_IMPLEMENTATION
  74. // #include "tinyexr.h"
  75. //
  76. //
  77. #include <stddef.h> // for size_t
  78. #include <stdint.h> // guess stdint.h is available(C99)
  79. #ifdef __cplusplus
  80. extern "C" {
  81. #endif
  82. #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
  83. defined(__i386) || defined(__i486__) || defined(__i486) || \
  84. defined(i386) || defined(__ia64__) || defined(__x86_64__)
  85. #define TINYEXR_X86_OR_X64_CPU 1
  86. #else
  87. #define TINYEXR_X86_OR_X64_CPU 0
  88. #endif
  89. #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || TINYEXR_X86_OR_X64_CPU
  90. #define TINYEXR_LITTLE_ENDIAN 1
  91. #else
  92. #define TINYEXR_LITTLE_ENDIAN 0
  93. #endif
  94. // Use miniz or not to decode ZIP format pixel. Linking with zlib
  95. // required if this flas is 0 and TINYEXR_USE_STB_ZLIB is 0.
  96. #ifndef TINYEXR_USE_MINIZ
  97. #define TINYEXR_USE_MINIZ (1)
  98. #endif
  99. // Use the ZIP implementation of stb_image.h and stb_image_write.h.
  100. #ifndef TINYEXR_USE_STB_ZLIB
  101. #define TINYEXR_USE_STB_ZLIB (0)
  102. #endif
  103. // Disable PIZ comporession when applying cpplint.
  104. #ifndef TINYEXR_USE_PIZ
  105. #define TINYEXR_USE_PIZ (1)
  106. #endif
  107. #ifndef TINYEXR_USE_ZFP
  108. #define TINYEXR_USE_ZFP (0) // TinyEXR extension.
  109. // http://computation.llnl.gov/projects/floating-point-compression
  110. #endif
  111. #ifndef TINYEXR_USE_THREAD
  112. #define TINYEXR_USE_THREAD (0) // No threaded loading.
  113. // http://computation.llnl.gov/projects/floating-point-compression
  114. #endif
  115. #ifndef TINYEXR_USE_OPENMP
  116. #ifdef _OPENMP
  117. #define TINYEXR_USE_OPENMP (1)
  118. #else
  119. #define TINYEXR_USE_OPENMP (0)
  120. #endif
  121. #endif
  122. #define TINYEXR_SUCCESS (0)
  123. #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
  124. #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
  125. #define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
  126. #define TINYEXR_ERROR_INVALID_DATA (-4)
  127. #define TINYEXR_ERROR_INVALID_FILE (-5)
  128. #define TINYEXR_ERROR_INVALID_PARAMETER (-6)
  129. #define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
  130. #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
  131. #define TINYEXR_ERROR_INVALID_HEADER (-9)
  132. #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
  133. #define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
  134. #define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
  135. #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
  136. // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
  137. // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
  138. #define TINYEXR_PIXELTYPE_UINT (0)
  139. #define TINYEXR_PIXELTYPE_HALF (1)
  140. #define TINYEXR_PIXELTYPE_FLOAT (2)
  141. #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
  142. #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
  143. #define TINYEXR_COMPRESSIONTYPE_NONE (0)
  144. #define TINYEXR_COMPRESSIONTYPE_RLE (1)
  145. #define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
  146. #define TINYEXR_COMPRESSIONTYPE_ZIP (3)
  147. #define TINYEXR_COMPRESSIONTYPE_PIZ (4)
  148. #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
  149. #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
  150. #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
  151. #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
  152. #define TINYEXR_TILE_ONE_LEVEL (0)
  153. #define TINYEXR_TILE_MIPMAP_LEVELS (1)
  154. #define TINYEXR_TILE_RIPMAP_LEVELS (2)
  155. #define TINYEXR_TILE_ROUND_DOWN (0)
  156. #define TINYEXR_TILE_ROUND_UP (1)
  157. typedef struct TEXRVersion {
  158. int version; // this must be 2
  159. // tile format image;
  160. // not zero for only a single-part "normal" tiled file (according to spec.)
  161. int tiled;
  162. int long_name; // long name attribute
  163. // deep image(EXR 2.0);
  164. // for a multi-part file, indicates that at least one part is of type deep* (according to spec.)
  165. int non_image;
  166. int multipart; // multi-part(EXR 2.0)
  167. } EXRVersion;
  168. typedef struct TEXRAttribute {
  169. char name[256]; // name and type are up to 255 chars long.
  170. char type[256];
  171. unsigned char *value; // uint8_t*
  172. int size;
  173. int pad0;
  174. } EXRAttribute;
  175. typedef struct TEXRChannelInfo {
  176. char name[256]; // less than 255 bytes long
  177. int pixel_type;
  178. int x_sampling;
  179. int y_sampling;
  180. unsigned char p_linear;
  181. unsigned char pad[3];
  182. } EXRChannelInfo;
  183. typedef struct TEXRTile {
  184. int offset_x;
  185. int offset_y;
  186. int level_x;
  187. int level_y;
  188. int width; // actual width in a tile.
  189. int height; // actual height int a tile.
  190. unsigned char **images; // image[channels][pixels]
  191. } EXRTile;
  192. typedef struct TEXRBox2i {
  193. int min_x;
  194. int min_y;
  195. int max_x;
  196. int max_y;
  197. } EXRBox2i;
  198. typedef struct TEXRHeader {
  199. float pixel_aspect_ratio;
  200. int line_order;
  201. EXRBox2i data_window;
  202. EXRBox2i display_window;
  203. float screen_window_center[2];
  204. float screen_window_width;
  205. int chunk_count;
  206. // Properties for tiled format(`tiledesc`).
  207. int tiled;
  208. int tile_size_x;
  209. int tile_size_y;
  210. int tile_level_mode;
  211. int tile_rounding_mode;
  212. int long_name;
  213. // for a single-part file, agree with the version field bit 11
  214. // for a multi-part file, it is consistent with the type of part
  215. int non_image;
  216. int multipart;
  217. unsigned int header_len;
  218. // Custom attributes(exludes required attributes(e.g. `channels`,
  219. // `compression`, etc)
  220. int num_custom_attributes;
  221. EXRAttribute *custom_attributes; // array of EXRAttribute. size =
  222. // `num_custom_attributes`.
  223. EXRChannelInfo *channels; // [num_channels]
  224. int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
  225. // each channel. This is overwritten with `requested_pixel_types` when
  226. // loading.
  227. int num_channels;
  228. int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
  229. int *requested_pixel_types; // Filled initially by
  230. // ParseEXRHeaderFrom(Meomory|File), then users
  231. // can edit it(only valid for HALF pixel type
  232. // channel)
  233. // name attribute required for multipart files;
  234. // must be unique and non empty (according to spec.);
  235. // use EXRSetNameAttr for setting value;
  236. // max 255 character allowed - excluding terminating zero
  237. char name[256];
  238. } EXRHeader;
  239. typedef struct TEXRMultiPartHeader {
  240. int num_headers;
  241. EXRHeader *headers;
  242. } EXRMultiPartHeader;
  243. typedef struct TEXRImage {
  244. EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
  245. // from tiles manually. NULL if scanline format.
  246. struct TEXRImage* next_level; // NULL if scanline format or image is the last level.
  247. int level_x; // x level index
  248. int level_y; // y level index
  249. unsigned char **images; // image[channels][pixels]. NULL if tiled format.
  250. int width;
  251. int height;
  252. int num_channels;
  253. // Properties for tile format.
  254. int num_tiles;
  255. } EXRImage;
  256. typedef struct TEXRMultiPartImage {
  257. int num_images;
  258. EXRImage *images;
  259. } EXRMultiPartImage;
  260. typedef struct TDeepImage {
  261. const char **channel_names;
  262. float ***image; // image[channels][scanlines][samples]
  263. int **offset_table; // offset_table[scanline][offsets]
  264. int num_channels;
  265. int width;
  266. int height;
  267. int pad0;
  268. } DeepImage;
  269. // @deprecated { For backward compatibility. Not recommended to use. }
  270. // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
  271. // alpha) or RGB(A) channels.
  272. // Application must free image data as returned by `out_rgba`
  273. // Result image format is: float x RGBA x width x hight
  274. // Returns negative value and may set error string in `err` when there's an
  275. // error
  276. extern int LoadEXR(float **out_rgba, int *width, int *height,
  277. const char *filename, const char **err);
  278. // Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
  279. // contains A(single channel alpha) or RGB(A) channels. Application must free
  280. // image data as returned by `out_rgba` Result image format is: float x RGBA x
  281. // width x hight Returns negative value and may set error string in `err` when
  282. // there's an error When the specified layer name is not found in the EXR file,
  283. // the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
  284. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
  285. const char *filename, const char *layer_name,
  286. const char **err);
  287. //
  288. // Get layer infos from EXR file.
  289. //
  290. // @param[out] layer_names List of layer names. Application must free memory
  291. // after using this.
  292. // @param[out] num_layers The number of layers
  293. // @param[out] err Error string(will be filled when the function returns error
  294. // code). Free it using FreeEXRErrorMessage after using this value.
  295. //
  296. // @return TINYEXR_SUCCEES upon success.
  297. //
  298. extern int EXRLayers(const char *filename, const char **layer_names[],
  299. int *num_layers, const char **err);
  300. // @deprecated { to be removed. }
  301. // Simple wrapper API for ParseEXRHeaderFromFile.
  302. // checking given file is a EXR file(by just look up header)
  303. // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
  304. // others
  305. extern int IsEXR(const char *filename);
  306. // @deprecated { to be removed. }
  307. // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
  308. // components must be 1(Grayscale), 3(RGB) or 4(RGBA).
  309. // Input image format is: `float x width x height`, or `float x RGB(A) x width x
  310. // hight`
  311. // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
  312. // value.
  313. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
  314. // Use ZIP compression by default.
  315. // Returns negative value and may set error string in `err` when there's an
  316. // error
  317. extern int SaveEXR(const float *data, const int width, const int height,
  318. const int components, const int save_as_fp16,
  319. const char *filename, const char **err);
  320. // Returns the number of resolution levels of the image (including the base)
  321. extern int EXRNumLevels(const EXRImage* exr_image);
  322. // Initialize EXRHeader struct
  323. extern void InitEXRHeader(EXRHeader *exr_header);
  324. // Set name attribute of EXRHeader struct (it makes a copy)
  325. extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name);
  326. // Initialize EXRImage struct
  327. extern void InitEXRImage(EXRImage *exr_image);
  328. // Frees internal data of EXRHeader struct
  329. extern int FreeEXRHeader(EXRHeader *exr_header);
  330. // Frees internal data of EXRImage struct
  331. extern int FreeEXRImage(EXRImage *exr_image);
  332. // Frees error message
  333. extern void FreeEXRErrorMessage(const char *msg);
  334. // Parse EXR version header of a file.
  335. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
  336. // Parse EXR version header from memory-mapped EXR data.
  337. extern int ParseEXRVersionFromMemory(EXRVersion *version,
  338. const unsigned char *memory, size_t size);
  339. // Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
  340. // When there was an error message, Application must free `err` with
  341. // FreeEXRErrorMessage()
  342. extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
  343. const char *filename, const char **err);
  344. // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
  345. // When there was an error message, Application must free `err` with
  346. // FreeEXRErrorMessage()
  347. extern int ParseEXRHeaderFromMemory(EXRHeader *header,
  348. const EXRVersion *version,
  349. const unsigned char *memory, size_t size,
  350. const char **err);
  351. // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
  352. // array.
  353. // When there was an error message, Application must free `err` with
  354. // FreeEXRErrorMessage()
  355. extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
  356. int *num_headers,
  357. const EXRVersion *version,
  358. const char *filename,
  359. const char **err);
  360. // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
  361. // array
  362. // When there was an error message, Application must free `err` with
  363. // FreeEXRErrorMessage()
  364. extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
  365. int *num_headers,
  366. const EXRVersion *version,
  367. const unsigned char *memory,
  368. size_t size, const char **err);
  369. // Loads single-part OpenEXR image from a file.
  370. // Application must setup `ParseEXRHeaderFromFile` before calling this function.
  371. // Application can free EXRImage using `FreeEXRImage`
  372. // Returns negative value and may set error string in `err` when there's an
  373. // error
  374. // When there was an error message, Application must free `err` with
  375. // FreeEXRErrorMessage()
  376. extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
  377. const char *filename, const char **err);
  378. // Loads single-part OpenEXR image from a memory.
  379. // Application must setup `EXRHeader` with
  380. // `ParseEXRHeaderFromMemory` before calling this function.
  381. // Application can free EXRImage using `FreeEXRImage`
  382. // Returns negative value and may set error string in `err` when there's an
  383. // error
  384. // When there was an error message, Application must free `err` with
  385. // FreeEXRErrorMessage()
  386. extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
  387. const unsigned char *memory,
  388. const size_t size, const char **err);
  389. // Loads multi-part OpenEXR image from a file.
  390. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
  391. // function.
  392. // Application can free EXRImage using `FreeEXRImage`
  393. // Returns negative value and may set error string in `err` when there's an
  394. // error
  395. // When there was an error message, Application must free `err` with
  396. // FreeEXRErrorMessage()
  397. extern int LoadEXRMultipartImageFromFile(EXRImage *images,
  398. const EXRHeader **headers,
  399. unsigned int num_parts,
  400. const char *filename,
  401. const char **err);
  402. // Loads multi-part OpenEXR image from a memory.
  403. // Application must setup `EXRHeader*` array with
  404. // `ParseEXRMultipartHeaderFromMemory` before calling this function.
  405. // Application can free EXRImage using `FreeEXRImage`
  406. // Returns negative value and may set error string in `err` when there's an
  407. // error
  408. // When there was an error message, Application must free `err` with
  409. // FreeEXRErrorMessage()
  410. extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
  411. const EXRHeader **headers,
  412. unsigned int num_parts,
  413. const unsigned char *memory,
  414. const size_t size, const char **err);
  415. // Saves multi-channel, single-frame OpenEXR image to a file.
  416. // Returns negative value and may set error string in `err` when there's an
  417. // error
  418. // When there was an error message, Application must free `err` with
  419. // FreeEXRErrorMessage()
  420. extern int SaveEXRImageToFile(const EXRImage *image,
  421. const EXRHeader *exr_header, const char *filename,
  422. const char **err);
  423. // Saves multi-channel, single-frame OpenEXR image to a memory.
  424. // Image is compressed using EXRImage.compression value.
  425. // Return the number of bytes if success.
  426. // Return zero and will set error string in `err` when there's an
  427. // error.
  428. // When there was an error message, Application must free `err` with
  429. // FreeEXRErrorMessage()
  430. extern size_t SaveEXRImageToMemory(const EXRImage *image,
  431. const EXRHeader *exr_header,
  432. unsigned char **memory, const char **err);
  433. // Saves multi-channel, multi-frame OpenEXR image to a memory.
  434. // Image is compressed using EXRImage.compression value.
  435. // File global attributes (eg. display_window) must be set in the first header.
  436. // Returns negative value and may set error string in `err` when there's an
  437. // error
  438. // When there was an error message, Application must free `err` with
  439. // FreeEXRErrorMessage()
  440. extern int SaveEXRMultipartImageToFile(const EXRImage *images,
  441. const EXRHeader **exr_headers,
  442. unsigned int num_parts,
  443. const char *filename, const char **err);
  444. // Saves multi-channel, multi-frame OpenEXR image to a memory.
  445. // Image is compressed using EXRImage.compression value.
  446. // File global attributes (eg. display_window) must be set in the first header.
  447. // Return the number of bytes if success.
  448. // Return zero and will set error string in `err` when there's an
  449. // error.
  450. // When there was an error message, Application must free `err` with
  451. // FreeEXRErrorMessage()
  452. extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images,
  453. const EXRHeader **exr_headers,
  454. unsigned int num_parts,
  455. unsigned char **memory, const char **err);
  456. // Loads single-frame OpenEXR deep image.
  457. // Application must free memory of variables in DeepImage(image, offset_table)
  458. // Returns negative value and may set error string in `err` when there's an
  459. // error
  460. // When there was an error message, Application must free `err` with
  461. // FreeEXRErrorMessage()
  462. extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
  463. const char **err);
  464. // NOT YET IMPLEMENTED:
  465. // Saves single-frame OpenEXR deep image.
  466. // Returns negative value and may set error string in `err` when there's an
  467. // error
  468. // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
  469. // const char **err);
  470. // NOT YET IMPLEMENTED:
  471. // Loads multi-part OpenEXR deep image.
  472. // Application must free memory of variables in DeepImage(image, offset_table)
  473. // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
  474. // char *filename,
  475. // const char **err);
  476. // For emscripten.
  477. // Loads single-frame OpenEXR image from memory. Assume EXR image contains
  478. // RGB(A) channels.
  479. // Returns negative value and may set error string in `err` when there's an
  480. // error
  481. // When there was an error message, Application must free `err` with
  482. // FreeEXRErrorMessage()
  483. extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
  484. const unsigned char *memory, size_t size,
  485. const char **err);
  486. #ifdef __cplusplus
  487. }
  488. #endif
  489. #endif // TINYEXR_H_
  490. #ifdef TINYEXR_IMPLEMENTATION
  491. #ifndef TINYEXR_IMPLEMENTATION_DEFINED
  492. #define TINYEXR_IMPLEMENTATION_DEFINED
  493. #ifdef _WIN32
  494. #ifndef WIN32_LEAN_AND_MEAN
  495. #define WIN32_LEAN_AND_MEAN
  496. #endif
  497. #ifndef NOMINMAX
  498. #define NOMINMAX
  499. #endif
  500. #include <windows.h> // for UTF-8
  501. #endif
  502. #include <algorithm>
  503. #include <cassert>
  504. #include <cstdio>
  505. #include <cstdlib>
  506. #include <cstring>
  507. #include <sstream>
  508. // #include <iostream> // debug
  509. #include <limits>
  510. #include <string>
  511. #include <vector>
  512. #include <set>
  513. // https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support
  514. #if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900)
  515. #define TINYEXR_HAS_CXX11 (1)
  516. // C++11
  517. #include <cstdint>
  518. #if TINYEXR_USE_THREAD
  519. #include <atomic>
  520. #include <thread>
  521. #endif
  522. #endif // __cplusplus > 199711L
  523. #if TINYEXR_USE_OPENMP
  524. #include <omp.h>
  525. #endif
  526. #if TINYEXR_USE_MINIZ
  527. #include <miniz.h>
  528. #else
  529. // Issue #46. Please include your own zlib-compatible API header before
  530. // including `tinyexr.h`
  531. //#include "zlib.h"
  532. #endif
  533. #if TINYEXR_USE_STB_ZLIB
  534. // Since we don't know where a project has stb_image.h and stb_image_write.h
  535. // and whether they are in the include path, we don't include them here, and
  536. // instead declare the two relevant functions manually.
  537. // from stb_image.h:
  538. extern "C" int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen);
  539. // from stb_image_write.h:
  540. extern "C" unsigned char *stbi_zlib_compress(unsigned char *data, int data_len, int *out_len, int quality);
  541. #endif
  542. #if TINYEXR_USE_ZFP
  543. #ifdef __clang__
  544. #pragma clang diagnostic push
  545. #pragma clang diagnostic ignored "-Weverything"
  546. #endif
  547. #include "zfp.h"
  548. #ifdef __clang__
  549. #pragma clang diagnostic pop
  550. #endif
  551. #endif
  552. namespace tinyexr {
  553. #if __cplusplus > 199711L
  554. // C++11
  555. typedef uint64_t tinyexr_uint64;
  556. typedef int64_t tinyexr_int64;
  557. #else
  558. // Although `long long` is not a standard type pre C++11, assume it is defined
  559. // as a compiler's extension.
  560. #ifdef __clang__
  561. #pragma clang diagnostic push
  562. #pragma clang diagnostic ignored "-Wc++11-long-long"
  563. #endif
  564. typedef unsigned long long tinyexr_uint64;
  565. typedef long long tinyexr_int64;
  566. #ifdef __clang__
  567. #pragma clang diagnostic pop
  568. #endif
  569. #endif
  570. // static bool IsBigEndian(void) {
  571. // union {
  572. // unsigned int i;
  573. // char c[4];
  574. // } bint = {0x01020304};
  575. //
  576. // return bint.c[0] == 1;
  577. //}
  578. static void SetErrorMessage(const std::string &msg, const char **err) {
  579. if (err) {
  580. #ifdef _WIN32
  581. (*err) = _strdup(msg.c_str());
  582. #else
  583. (*err) = strdup(msg.c_str());
  584. #endif
  585. }
  586. }
  587. #if 0
  588. static void SetWarningMessage(const std::string &msg, const char **warn) {
  589. if (warn) {
  590. #ifdef _WIN32
  591. (*warn) = _strdup(msg.c_str());
  592. #else
  593. (*warn) = strdup(msg.c_str());
  594. #endif
  595. }
  596. }
  597. #endif
  598. static const int kEXRVersionSize = 8;
  599. static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
  600. unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
  601. const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
  602. dst[0] = src[0];
  603. dst[1] = src[1];
  604. }
  605. static void swap2(unsigned short *val) {
  606. #ifdef TINYEXR_LITTLE_ENDIAN
  607. (void)val;
  608. #else
  609. unsigned short tmp = *val;
  610. unsigned char *dst = reinterpret_cast<unsigned char *>(val);
  611. unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
  612. dst[0] = src[1];
  613. dst[1] = src[0];
  614. #endif
  615. }
  616. #ifdef __clang__
  617. #pragma clang diagnostic push
  618. #pragma clang diagnostic ignored "-Wunused-function"
  619. #endif
  620. #ifdef __GNUC__
  621. #pragma GCC diagnostic push
  622. #pragma GCC diagnostic ignored "-Wunused-function"
  623. #endif
  624. static void cpy4(int *dst_val, const int *src_val) {
  625. unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
  626. const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
  627. dst[0] = src[0];
  628. dst[1] = src[1];
  629. dst[2] = src[2];
  630. dst[3] = src[3];
  631. }
  632. static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
  633. unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
  634. const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
  635. dst[0] = src[0];
  636. dst[1] = src[1];
  637. dst[2] = src[2];
  638. dst[3] = src[3];
  639. }
  640. static void cpy4(float *dst_val, const float *src_val) {
  641. unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
  642. const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
  643. dst[0] = src[0];
  644. dst[1] = src[1];
  645. dst[2] = src[2];
  646. dst[3] = src[3];
  647. }
  648. #ifdef __clang__
  649. #pragma clang diagnostic pop
  650. #endif
  651. #ifdef __GNUC__
  652. #pragma GCC diagnostic pop
  653. #endif
  654. static void swap4(unsigned int *val) {
  655. #ifdef TINYEXR_LITTLE_ENDIAN
  656. (void)val;
  657. #else
  658. unsigned int tmp = *val;
  659. unsigned char *dst = reinterpret_cast<unsigned char *>(val);
  660. unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
  661. dst[0] = src[3];
  662. dst[1] = src[2];
  663. dst[2] = src[1];
  664. dst[3] = src[0];
  665. #endif
  666. }
  667. static void swap4(int *val) {
  668. #ifdef TINYEXR_LITTLE_ENDIAN
  669. (void)val;
  670. #else
  671. int tmp = *val;
  672. unsigned char *dst = reinterpret_cast<unsigned char *>(val);
  673. unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
  674. dst[0] = src[3];
  675. dst[1] = src[2];
  676. dst[2] = src[1];
  677. dst[3] = src[0];
  678. #endif
  679. }
  680. static void swap4(float *val) {
  681. #ifdef TINYEXR_LITTLE_ENDIAN
  682. (void)val;
  683. #else
  684. float tmp = *val;
  685. unsigned char *dst = reinterpret_cast<unsigned char *>(val);
  686. unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
  687. dst[0] = src[3];
  688. dst[1] = src[2];
  689. dst[2] = src[1];
  690. dst[3] = src[0];
  691. #endif
  692. }
  693. #if 0
  694. static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
  695. unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
  696. const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
  697. dst[0] = src[0];
  698. dst[1] = src[1];
  699. dst[2] = src[2];
  700. dst[3] = src[3];
  701. dst[4] = src[4];
  702. dst[5] = src[5];
  703. dst[6] = src[6];
  704. dst[7] = src[7];
  705. }
  706. #endif
  707. static void swap8(tinyexr::tinyexr_uint64 *val) {
  708. #ifdef TINYEXR_LITTLE_ENDIAN
  709. (void)val;
  710. #else
  711. tinyexr::tinyexr_uint64 tmp = (*val);
  712. unsigned char *dst = reinterpret_cast<unsigned char *>(val);
  713. unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
  714. dst[0] = src[7];
  715. dst[1] = src[6];
  716. dst[2] = src[5];
  717. dst[3] = src[4];
  718. dst[4] = src[3];
  719. dst[5] = src[2];
  720. dst[6] = src[1];
  721. dst[7] = src[0];
  722. #endif
  723. }
  724. // https://gist.github.com/rygorous/2156668
  725. union FP32 {
  726. unsigned int u;
  727. float f;
  728. struct {
  729. #if TINYEXR_LITTLE_ENDIAN
  730. unsigned int Mantissa : 23;
  731. unsigned int Exponent : 8;
  732. unsigned int Sign : 1;
  733. #else
  734. unsigned int Sign : 1;
  735. unsigned int Exponent : 8;
  736. unsigned int Mantissa : 23;
  737. #endif
  738. } s;
  739. };
  740. #ifdef __clang__
  741. #pragma clang diagnostic push
  742. #pragma clang diagnostic ignored "-Wpadded"
  743. #endif
  744. union FP16 {
  745. unsigned short u;
  746. struct {
  747. #if TINYEXR_LITTLE_ENDIAN
  748. unsigned int Mantissa : 10;
  749. unsigned int Exponent : 5;
  750. unsigned int Sign : 1;
  751. #else
  752. unsigned int Sign : 1;
  753. unsigned int Exponent : 5;
  754. unsigned int Mantissa : 10;
  755. #endif
  756. } s;
  757. };
  758. #ifdef __clang__
  759. #pragma clang diagnostic pop
  760. #endif
  761. static FP32 half_to_float(FP16 h) {
  762. static const FP32 magic = {113 << 23};
  763. static const unsigned int shifted_exp = 0x7c00
  764. << 13; // exponent mask after shift
  765. FP32 o;
  766. o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
  767. unsigned int exp_ = shifted_exp & o.u; // just the exponent
  768. o.u += (127 - 15) << 23; // exponent adjust
  769. // handle exponent special cases
  770. if (exp_ == shifted_exp) // Inf/NaN?
  771. o.u += (128 - 16) << 23; // extra exp adjust
  772. else if (exp_ == 0) // Zero/Denormal?
  773. {
  774. o.u += 1 << 23; // extra exp adjust
  775. o.f -= magic.f; // renormalize
  776. }
  777. o.u |= (h.u & 0x8000U) << 16U; // sign bit
  778. return o;
  779. }
  780. static FP16 float_to_half_full(FP32 f) {
  781. FP16 o = {0};
  782. // Based on ISPC reference code (with minor modifications)
  783. if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
  784. o.s.Exponent = 0;
  785. else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
  786. {
  787. o.s.Exponent = 31;
  788. o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
  789. } else // Normalized number
  790. {
  791. // Exponent unbias the single, then bias the halfp
  792. int newexp = f.s.Exponent - 127 + 15;
  793. if (newexp >= 31) // Overflow, return signed infinity
  794. o.s.Exponent = 31;
  795. else if (newexp <= 0) // Underflow
  796. {
  797. if ((14 - newexp) <= 24) // Mantissa might be non-zero
  798. {
  799. unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
  800. o.s.Mantissa = mant >> (14 - newexp);
  801. if ((mant >> (13 - newexp)) & 1) // Check for rounding
  802. o.u++; // Round, might overflow into exp bit, but this is OK
  803. }
  804. } else {
  805. o.s.Exponent = static_cast<unsigned int>(newexp);
  806. o.s.Mantissa = f.s.Mantissa >> 13;
  807. if (f.s.Mantissa & 0x1000) // Check for rounding
  808. o.u++; // Round, might overflow to inf, this is OK
  809. }
  810. }
  811. o.s.Sign = f.s.Sign;
  812. return o;
  813. }
  814. // NOTE: From OpenEXR code
  815. // #define IMF_INCREASING_Y 0
  816. // #define IMF_DECREASING_Y 1
  817. // #define IMF_RAMDOM_Y 2
  818. //
  819. // #define IMF_NO_COMPRESSION 0
  820. // #define IMF_RLE_COMPRESSION 1
  821. // #define IMF_ZIPS_COMPRESSION 2
  822. // #define IMF_ZIP_COMPRESSION 3
  823. // #define IMF_PIZ_COMPRESSION 4
  824. // #define IMF_PXR24_COMPRESSION 5
  825. // #define IMF_B44_COMPRESSION 6
  826. // #define IMF_B44A_COMPRESSION 7
  827. #ifdef __clang__
  828. #pragma clang diagnostic push
  829. #if __has_warning("-Wzero-as-null-pointer-constant")
  830. #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
  831. #endif
  832. #endif
  833. static const char *ReadString(std::string *s, const char *ptr, size_t len) {
  834. // Read untile NULL(\0).
  835. const char *p = ptr;
  836. const char *q = ptr;
  837. while ((size_t(q - ptr) < len) && (*q) != 0) {
  838. q++;
  839. }
  840. if (size_t(q - ptr) >= len) {
  841. (*s).clear();
  842. return NULL;
  843. }
  844. (*s) = std::string(p, q);
  845. return q + 1; // skip '\0'
  846. }
  847. static bool ReadAttribute(std::string *name, std::string *type,
  848. std::vector<unsigned char> *data, size_t *marker_size,
  849. const char *marker, size_t size) {
  850. size_t name_len = strnlen(marker, size);
  851. if (name_len == size) {
  852. // String does not have a terminating character.
  853. return false;
  854. }
  855. *name = std::string(marker, name_len);
  856. marker += name_len + 1;
  857. size -= name_len + 1;
  858. size_t type_len = strnlen(marker, size);
  859. if (type_len == size) {
  860. return false;
  861. }
  862. *type = std::string(marker, type_len);
  863. marker += type_len + 1;
  864. size -= type_len + 1;
  865. if (size < sizeof(uint32_t)) {
  866. return false;
  867. }
  868. uint32_t data_len;
  869. memcpy(&data_len, marker, sizeof(uint32_t));
  870. tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
  871. if (data_len == 0) {
  872. if ((*type).compare("string") == 0) {
  873. // Accept empty string attribute.
  874. marker += sizeof(uint32_t);
  875. size -= sizeof(uint32_t);
  876. *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
  877. data->resize(1);
  878. (*data)[0] = '\0';
  879. return true;
  880. } else {
  881. return false;
  882. }
  883. }
  884. marker += sizeof(uint32_t);
  885. size -= sizeof(uint32_t);
  886. if (size < data_len) {
  887. return false;
  888. }
  889. data->resize(static_cast<size_t>(data_len));
  890. memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
  891. *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
  892. return true;
  893. }
  894. static void WriteAttributeToMemory(std::vector<unsigned char> *out,
  895. const char *name, const char *type,
  896. const unsigned char *data, int len) {
  897. out->insert(out->end(), name, name + strlen(name) + 1);
  898. out->insert(out->end(), type, type + strlen(type) + 1);
  899. int outLen = len;
  900. tinyexr::swap4(&outLen);
  901. out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
  902. reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
  903. out->insert(out->end(), data, data + len);
  904. }
  905. typedef struct TChannelInfo {
  906. std::string name; // less than 255 bytes long
  907. int pixel_type;
  908. int requested_pixel_type;
  909. int x_sampling;
  910. int y_sampling;
  911. unsigned char p_linear;
  912. unsigned char pad[3];
  913. } ChannelInfo;
  914. typedef struct {
  915. int min_x;
  916. int min_y;
  917. int max_x;
  918. int max_y;
  919. } Box2iInfo;
  920. struct HeaderInfo {
  921. std::vector<tinyexr::ChannelInfo> channels;
  922. std::vector<EXRAttribute> attributes;
  923. Box2iInfo data_window;
  924. int line_order;
  925. Box2iInfo display_window;
  926. float screen_window_center[2];
  927. float screen_window_width;
  928. float pixel_aspect_ratio;
  929. int chunk_count;
  930. // Tiled format
  931. int tiled; // Non-zero if the part is tiled.
  932. int tile_size_x;
  933. int tile_size_y;
  934. int tile_level_mode;
  935. int tile_rounding_mode;
  936. unsigned int header_len;
  937. int compression_type;
  938. // required for multi-part or non-image files
  939. std::string name;
  940. // required for multi-part or non-image files
  941. std::string type;
  942. void clear() {
  943. channels.clear();
  944. attributes.clear();
  945. data_window.min_x = 0;
  946. data_window.min_y = 0;
  947. data_window.max_x = 0;
  948. data_window.max_y = 0;
  949. line_order = 0;
  950. display_window.min_x = 0;
  951. display_window.min_y = 0;
  952. display_window.max_x = 0;
  953. display_window.max_y = 0;
  954. screen_window_center[0] = 0.0f;
  955. screen_window_center[1] = 0.0f;
  956. screen_window_width = 0.0f;
  957. pixel_aspect_ratio = 0.0f;
  958. chunk_count = 0;
  959. // Tiled format
  960. tiled = 0;
  961. tile_size_x = 0;
  962. tile_size_y = 0;
  963. tile_level_mode = 0;
  964. tile_rounding_mode = 0;
  965. header_len = 0;
  966. compression_type = 0;
  967. name.clear();
  968. type.clear();
  969. }
  970. };
  971. static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
  972. const std::vector<unsigned char> &data) {
  973. const char *p = reinterpret_cast<const char *>(&data.at(0));
  974. for (;;) {
  975. if ((*p) == 0) {
  976. break;
  977. }
  978. ChannelInfo info;
  979. tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
  980. (p - reinterpret_cast<const char *>(data.data()));
  981. if (data_len < 0) {
  982. return false;
  983. }
  984. p = ReadString(&info.name, p, size_t(data_len));
  985. if ((p == NULL) && (info.name.empty())) {
  986. // Buffer overrun. Issue #51.
  987. return false;
  988. }
  989. const unsigned char *data_end =
  990. reinterpret_cast<const unsigned char *>(p) + 16;
  991. if (data_end >= (data.data() + data.size())) {
  992. return false;
  993. }
  994. memcpy(&info.pixel_type, p, sizeof(int));
  995. p += 4;
  996. info.p_linear = static_cast<unsigned char>(p[0]); // uchar
  997. p += 1 + 3; // reserved: uchar[3]
  998. memcpy(&info.x_sampling, p, sizeof(int)); // int
  999. p += 4;
  1000. memcpy(&info.y_sampling, p, sizeof(int)); // int
  1001. p += 4;
  1002. tinyexr::swap4(&info.pixel_type);
  1003. tinyexr::swap4(&info.x_sampling);
  1004. tinyexr::swap4(&info.y_sampling);
  1005. channels.push_back(info);
  1006. }
  1007. return true;
  1008. }
  1009. static void WriteChannelInfo(std::vector<unsigned char> &data,
  1010. const std::vector<ChannelInfo> &channels) {
  1011. size_t sz = 0;
  1012. // Calculate total size.
  1013. for (size_t c = 0; c < channels.size(); c++) {
  1014. sz += channels[c].name.length() + 1; // +1 for \0
  1015. sz += 16; // 4 * int
  1016. }
  1017. data.resize(sz + 1);
  1018. unsigned char *p = &data.at(0);
  1019. for (size_t c = 0; c < channels.size(); c++) {
  1020. memcpy(p, channels[c].name.c_str(), channels[c].name.length());
  1021. p += channels[c].name.length();
  1022. (*p) = '\0';
  1023. p++;
  1024. int pixel_type = channels[c].requested_pixel_type;
  1025. int x_sampling = channels[c].x_sampling;
  1026. int y_sampling = channels[c].y_sampling;
  1027. tinyexr::swap4(&pixel_type);
  1028. tinyexr::swap4(&x_sampling);
  1029. tinyexr::swap4(&y_sampling);
  1030. memcpy(p, &pixel_type, sizeof(int));
  1031. p += sizeof(int);
  1032. (*p) = channels[c].p_linear;
  1033. p += 4;
  1034. memcpy(p, &x_sampling, sizeof(int));
  1035. p += sizeof(int);
  1036. memcpy(p, &y_sampling, sizeof(int));
  1037. p += sizeof(int);
  1038. }
  1039. (*p) = '\0';
  1040. }
  1041. static void CompressZip(unsigned char *dst,
  1042. tinyexr::tinyexr_uint64 &compressedSize,
  1043. const unsigned char *src, unsigned long src_size) {
  1044. std::vector<unsigned char> tmpBuf(src_size);
  1045. //
  1046. // Apply EXR-specific? postprocess. Grabbed from OpenEXR's
  1047. // ImfZipCompressor.cpp
  1048. //
  1049. //
  1050. // Reorder the pixel data.
  1051. //
  1052. const char *srcPtr = reinterpret_cast<const char *>(src);
  1053. {
  1054. char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
  1055. char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
  1056. const char *stop = srcPtr + src_size;
  1057. for (;;) {
  1058. if (srcPtr < stop)
  1059. *(t1++) = *(srcPtr++);
  1060. else
  1061. break;
  1062. if (srcPtr < stop)
  1063. *(t2++) = *(srcPtr++);
  1064. else
  1065. break;
  1066. }
  1067. }
  1068. //
  1069. // Predictor.
  1070. //
  1071. {
  1072. unsigned char *t = &tmpBuf.at(0) + 1;
  1073. unsigned char *stop = &tmpBuf.at(0) + src_size;
  1074. int p = t[-1];
  1075. while (t < stop) {
  1076. int d = int(t[0]) - p + (128 + 256);
  1077. p = t[0];
  1078. t[0] = static_cast<unsigned char>(d);
  1079. ++t;
  1080. }
  1081. }
  1082. #if TINYEXR_USE_MINIZ
  1083. //
  1084. // Compress the data using miniz
  1085. //
  1086. mz_ulong outSize = mz_compressBound(src_size);
  1087. int ret = mz_compress(
  1088. dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
  1089. src_size);
  1090. assert(ret == MZ_OK);
  1091. (void)ret;
  1092. compressedSize = outSize;
  1093. #elif TINYEXR_USE_STB_ZLIB
  1094. int outSize;
  1095. unsigned char* r = stbi_zlib_compress(const_cast<unsigned char*>(&tmpBuf.at(0)), src_size, &outSize, 8);
  1096. assert(r);
  1097. memcpy(dst, r, outSize);
  1098. free(r);
  1099. compressedSize = outSize;
  1100. #else
  1101. uLong outSize = compressBound(static_cast<uLong>(src_size));
  1102. int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
  1103. src_size);
  1104. assert(ret == Z_OK);
  1105. compressedSize = outSize;
  1106. #endif
  1107. // Use uncompressed data when compressed data is larger than uncompressed.
  1108. // (Issue 40)
  1109. if (compressedSize >= src_size) {
  1110. compressedSize = src_size;
  1111. memcpy(dst, src, src_size);
  1112. }
  1113. }
  1114. static bool DecompressZip(unsigned char *dst,
  1115. unsigned long *uncompressed_size /* inout */,
  1116. const unsigned char *src, unsigned long src_size) {
  1117. if ((*uncompressed_size) == src_size) {
  1118. // Data is not compressed(Issue 40).
  1119. memcpy(dst, src, src_size);
  1120. return true;
  1121. }
  1122. std::vector<unsigned char> tmpBuf(*uncompressed_size);
  1123. #if TINYEXR_USE_MINIZ
  1124. int ret =
  1125. mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
  1126. if (MZ_OK != ret) {
  1127. return false;
  1128. }
  1129. #elif TINYEXR_USE_STB_ZLIB
  1130. int ret = stbi_zlib_decode_buffer(reinterpret_cast<char*>(&tmpBuf.at(0)),
  1131. *uncompressed_size, reinterpret_cast<const char*>(src), src_size);
  1132. if (ret < 0) {
  1133. return false;
  1134. }
  1135. #else
  1136. int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
  1137. if (Z_OK != ret) {
  1138. return false;
  1139. }
  1140. #endif
  1141. //
  1142. // Apply EXR-specific? postprocess. Grabbed from OpenEXR's
  1143. // ImfZipCompressor.cpp
  1144. //
  1145. // Predictor.
  1146. {
  1147. unsigned char *t = &tmpBuf.at(0) + 1;
  1148. unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
  1149. while (t < stop) {
  1150. int d = int(t[-1]) + int(t[0]) - 128;
  1151. t[0] = static_cast<unsigned char>(d);
  1152. ++t;
  1153. }
  1154. }
  1155. // Reorder the pixel data.
  1156. {
  1157. const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
  1158. const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
  1159. (*uncompressed_size + 1) / 2;
  1160. char *s = reinterpret_cast<char *>(dst);
  1161. char *stop = s + (*uncompressed_size);
  1162. for (;;) {
  1163. if (s < stop)
  1164. *(s++) = *(t1++);
  1165. else
  1166. break;
  1167. if (s < stop)
  1168. *(s++) = *(t2++);
  1169. else
  1170. break;
  1171. }
  1172. }
  1173. return true;
  1174. }
  1175. // RLE code from OpenEXR --------------------------------------
  1176. #ifdef __clang__
  1177. #pragma clang diagnostic push
  1178. #pragma clang diagnostic ignored "-Wsign-conversion"
  1179. #if __has_warning("-Wextra-semi-stmt")
  1180. #pragma clang diagnostic ignored "-Wextra-semi-stmt"
  1181. #endif
  1182. #endif
  1183. #ifdef _MSC_VER
  1184. #pragma warning(push)
  1185. #pragma warning(disable : 4204) // nonstandard extension used : non-constant
  1186. // aggregate initializer (also supported by GNU
  1187. // C and C99, so no big deal)
  1188. #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
  1189. // 'int', possible loss of data
  1190. #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
  1191. // 'int', possible loss of data
  1192. #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
  1193. // deprecated. Instead, use the ISO C and C++
  1194. // conformant name: _strdup.
  1195. #endif
  1196. const int MIN_RUN_LENGTH = 3;
  1197. const int MAX_RUN_LENGTH = 127;
  1198. //
  1199. // Compress an array of bytes, using run-length encoding,
  1200. // and return the length of the compressed data.
  1201. //
  1202. static int rleCompress(int inLength, const char in[], signed char out[]) {
  1203. const char *inEnd = in + inLength;
  1204. const char *runStart = in;
  1205. const char *runEnd = in + 1;
  1206. signed char *outWrite = out;
  1207. while (runStart < inEnd) {
  1208. while (runEnd < inEnd && *runStart == *runEnd &&
  1209. runEnd - runStart - 1 < MAX_RUN_LENGTH) {
  1210. ++runEnd;
  1211. }
  1212. if (runEnd - runStart >= MIN_RUN_LENGTH) {
  1213. //
  1214. // Compressible run
  1215. //
  1216. *outWrite++ = static_cast<char>(runEnd - runStart) - 1;
  1217. *outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
  1218. runStart = runEnd;
  1219. } else {
  1220. //
  1221. // Uncompressable run
  1222. //
  1223. while (runEnd < inEnd &&
  1224. ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
  1225. (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
  1226. runEnd - runStart < MAX_RUN_LENGTH) {
  1227. ++runEnd;
  1228. }
  1229. *outWrite++ = static_cast<char>(runStart - runEnd);
  1230. while (runStart < runEnd) {
  1231. *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
  1232. }
  1233. }
  1234. ++runEnd;
  1235. }
  1236. return static_cast<int>(outWrite - out);
  1237. }
  1238. //
  1239. // Uncompress an array of bytes compressed with rleCompress().
  1240. // Returns the length of the oncompressed data, or 0 if the
  1241. // length of the uncompressed data would be more than maxLength.
  1242. //
  1243. static int rleUncompress(int inLength, int maxLength, const signed char in[],
  1244. char out[]) {
  1245. char *outStart = out;
  1246. while (inLength > 0) {
  1247. if (*in < 0) {
  1248. int count = -(static_cast<int>(*in++));
  1249. inLength -= count + 1;
  1250. // Fixes #116: Add bounds check to in buffer.
  1251. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
  1252. memcpy(out, in, count);
  1253. out += count;
  1254. in += count;
  1255. } else {
  1256. int count = *in++;
  1257. inLength -= 2;
  1258. if (0 > (maxLength -= count + 1)) return 0;
  1259. memset(out, *reinterpret_cast<const char *>(in), count + 1);
  1260. out += count + 1;
  1261. in++;
  1262. }
  1263. }
  1264. return static_cast<int>(out - outStart);
  1265. }
  1266. #ifdef __clang__
  1267. #pragma clang diagnostic pop
  1268. #endif
  1269. // End of RLE code from OpenEXR -----------------------------------
  1270. static void CompressRle(unsigned char *dst,
  1271. tinyexr::tinyexr_uint64 &compressedSize,
  1272. const unsigned char *src, unsigned long src_size) {
  1273. std::vector<unsigned char> tmpBuf(src_size);
  1274. //
  1275. // Apply EXR-specific? postprocess. Grabbed from OpenEXR's
  1276. // ImfRleCompressor.cpp
  1277. //
  1278. //
  1279. // Reorder the pixel data.
  1280. //
  1281. const char *srcPtr = reinterpret_cast<const char *>(src);
  1282. {
  1283. char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
  1284. char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
  1285. const char *stop = srcPtr + src_size;
  1286. for (;;) {
  1287. if (srcPtr < stop)
  1288. *(t1++) = *(srcPtr++);
  1289. else
  1290. break;
  1291. if (srcPtr < stop)
  1292. *(t2++) = *(srcPtr++);
  1293. else
  1294. break;
  1295. }
  1296. }
  1297. //
  1298. // Predictor.
  1299. //
  1300. {
  1301. unsigned char *t = &tmpBuf.at(0) + 1;
  1302. unsigned char *stop = &tmpBuf.at(0) + src_size;
  1303. int p = t[-1];
  1304. while (t < stop) {
  1305. int d = int(t[0]) - p + (128 + 256);
  1306. p = t[0];
  1307. t[0] = static_cast<unsigned char>(d);
  1308. ++t;
  1309. }
  1310. }
  1311. // outSize will be (srcSiz * 3) / 2 at max.
  1312. int outSize = rleCompress(static_cast<int>(src_size),
  1313. reinterpret_cast<const char *>(&tmpBuf.at(0)),
  1314. reinterpret_cast<signed char *>(dst));
  1315. assert(outSize > 0);
  1316. compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
  1317. // Use uncompressed data when compressed data is larger than uncompressed.
  1318. // (Issue 40)
  1319. if (compressedSize >= src_size) {
  1320. compressedSize = src_size;
  1321. memcpy(dst, src, src_size);
  1322. }
  1323. }
  1324. static bool DecompressRle(unsigned char *dst,
  1325. const unsigned long uncompressed_size,
  1326. const unsigned char *src, unsigned long src_size) {
  1327. if (uncompressed_size == src_size) {
  1328. // Data is not compressed(Issue 40).
  1329. memcpy(dst, src, src_size);
  1330. return true;
  1331. }
  1332. // Workaround for issue #112.
  1333. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
  1334. if (src_size <= 2) {
  1335. return false;
  1336. }
  1337. std::vector<unsigned char> tmpBuf(uncompressed_size);
  1338. int ret = rleUncompress(static_cast<int>(src_size),
  1339. static_cast<int>(uncompressed_size),
  1340. reinterpret_cast<const signed char *>(src),
  1341. reinterpret_cast<char *>(&tmpBuf.at(0)));
  1342. if (ret != static_cast<int>(uncompressed_size)) {
  1343. return false;
  1344. }
  1345. //
  1346. // Apply EXR-specific? postprocess. Grabbed from OpenEXR's
  1347. // ImfRleCompressor.cpp
  1348. //
  1349. // Predictor.
  1350. {
  1351. unsigned char *t = &tmpBuf.at(0) + 1;
  1352. unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
  1353. while (t < stop) {
  1354. int d = int(t[-1]) + int(t[0]) - 128;
  1355. t[0] = static_cast<unsigned char>(d);
  1356. ++t;
  1357. }
  1358. }
  1359. // Reorder the pixel data.
  1360. {
  1361. const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
  1362. const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
  1363. (uncompressed_size + 1) / 2;
  1364. char *s = reinterpret_cast<char *>(dst);
  1365. char *stop = s + uncompressed_size;
  1366. for (;;) {
  1367. if (s < stop)
  1368. *(s++) = *(t1++);
  1369. else
  1370. break;
  1371. if (s < stop)
  1372. *(s++) = *(t2++);
  1373. else
  1374. break;
  1375. }
  1376. }
  1377. return true;
  1378. }
  1379. #if TINYEXR_USE_PIZ
  1380. #ifdef __clang__
  1381. #pragma clang diagnostic push
  1382. #pragma clang diagnostic ignored "-Wc++11-long-long"
  1383. #pragma clang diagnostic ignored "-Wold-style-cast"
  1384. #pragma clang diagnostic ignored "-Wpadded"
  1385. #pragma clang diagnostic ignored "-Wsign-conversion"
  1386. #pragma clang diagnostic ignored "-Wc++11-extensions"
  1387. #pragma clang diagnostic ignored "-Wconversion"
  1388. #pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
  1389. #if __has_warning("-Wcast-qual")
  1390. #pragma clang diagnostic ignored "-Wcast-qual"
  1391. #endif
  1392. #if __has_warning("-Wextra-semi-stmt")
  1393. #pragma clang diagnostic ignored "-Wextra-semi-stmt"
  1394. #endif
  1395. #endif
  1396. //
  1397. // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
  1398. //
  1399. // -----------------------------------------------------------------
  1400. // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
  1401. // Digital Ltd. LLC)
  1402. // (3 clause BSD license)
  1403. //
  1404. struct PIZChannelData {
  1405. unsigned short *start;
  1406. unsigned short *end;
  1407. int nx;
  1408. int ny;
  1409. int ys;
  1410. int size;
  1411. };
  1412. //-----------------------------------------------------------------------------
  1413. //
  1414. // 16-bit Haar Wavelet encoding and decoding
  1415. //
  1416. // The source code in this file is derived from the encoding
  1417. // and decoding routines written by Christian Rouet for his
  1418. // PIZ image file format.
  1419. //
  1420. //-----------------------------------------------------------------------------
  1421. //
  1422. // Wavelet basis functions without modulo arithmetic; they produce
  1423. // the best compression ratios when the wavelet-transformed data are
  1424. // Huffman-encoded, but the wavelet transform works only for 14-bit
  1425. // data (untransformed data values must be less than (1 << 14)).
  1426. //
  1427. inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
  1428. unsigned short &h) {
  1429. short as = static_cast<short>(a);
  1430. short bs = static_cast<short>(b);
  1431. short ms = (as + bs) >> 1;
  1432. short ds = as - bs;
  1433. l = static_cast<unsigned short>(ms);
  1434. h = static_cast<unsigned short>(ds);
  1435. }
  1436. inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
  1437. unsigned short &b) {
  1438. short ls = static_cast<short>(l);
  1439. short hs = static_cast<short>(h);
  1440. int hi = hs;
  1441. int ai = ls + (hi & 1) + (hi >> 1);
  1442. short as = static_cast<short>(ai);
  1443. short bs = static_cast<short>(ai - hi);
  1444. a = static_cast<unsigned short>(as);
  1445. b = static_cast<unsigned short>(bs);
  1446. }
  1447. //
  1448. // Wavelet basis functions with modulo arithmetic; they work with full
  1449. // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
  1450. // compress the data quite as well.
  1451. //
  1452. const int NBITS = 16;
  1453. const int A_OFFSET = 1 << (NBITS - 1);
  1454. const int M_OFFSET = 1 << (NBITS - 1);
  1455. const int MOD_MASK = (1 << NBITS) - 1;
  1456. inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
  1457. unsigned short &h) {
  1458. int ao = (a + A_OFFSET) & MOD_MASK;
  1459. int m = ((ao + b) >> 1);
  1460. int d = ao - b;
  1461. if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
  1462. d &= MOD_MASK;
  1463. l = static_cast<unsigned short>(m);
  1464. h = static_cast<unsigned short>(d);
  1465. }
  1466. inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
  1467. unsigned short &b) {
  1468. int m = l;
  1469. int d = h;
  1470. int bb = (m - (d >> 1)) & MOD_MASK;
  1471. int aa = (d + bb - A_OFFSET) & MOD_MASK;
  1472. b = static_cast<unsigned short>(bb);
  1473. a = static_cast<unsigned short>(aa);
  1474. }
  1475. //
  1476. // 2D Wavelet encoding:
  1477. //
  1478. static void wav2Encode(
  1479. unsigned short *in, // io: values are transformed in place
  1480. int nx, // i : x size
  1481. int ox, // i : x offset
  1482. int ny, // i : y size
  1483. int oy, // i : y offset
  1484. unsigned short mx) // i : maximum in[x][y] value
  1485. {
  1486. bool w14 = (mx < (1 << 14));
  1487. int n = (nx > ny) ? ny : nx;
  1488. int p = 1; // == 1 << level
  1489. int p2 = 2; // == 1 << (level+1)
  1490. //
  1491. // Hierarchical loop on smaller dimension n
  1492. //
  1493. while (p2 <= n) {
  1494. unsigned short *py = in;
  1495. unsigned short *ey = in + oy * (ny - p2);
  1496. int oy1 = oy * p;
  1497. int oy2 = oy * p2;
  1498. int ox1 = ox * p;
  1499. int ox2 = ox * p2;
  1500. unsigned short i00, i01, i10, i11;
  1501. //
  1502. // Y loop
  1503. //
  1504. for (; py <= ey; py += oy2) {
  1505. unsigned short *px = py;
  1506. unsigned short *ex = py + ox * (nx - p2);
  1507. //
  1508. // X loop
  1509. //
  1510. for (; px <= ex; px += ox2) {
  1511. unsigned short *p01 = px + ox1;
  1512. unsigned short *p10 = px + oy1;
  1513. unsigned short *p11 = p10 + ox1;
  1514. //
  1515. // 2D wavelet encoding
  1516. //
  1517. if (w14) {
  1518. wenc14(*px, *p01, i00, i01);
  1519. wenc14(*p10, *p11, i10, i11);
  1520. wenc14(i00, i10, *px, *p10);
  1521. wenc14(i01, i11, *p01, *p11);
  1522. } else {
  1523. wenc16(*px, *p01, i00, i01);
  1524. wenc16(*p10, *p11, i10, i11);
  1525. wenc16(i00, i10, *px, *p10);
  1526. wenc16(i01, i11, *p01, *p11);
  1527. }
  1528. }
  1529. //
  1530. // Encode (1D) odd column (still in Y loop)
  1531. //
  1532. if (nx & p) {
  1533. unsigned short *p10 = px + oy1;
  1534. if (w14)
  1535. wenc14(*px, *p10, i00, *p10);
  1536. else
  1537. wenc16(*px, *p10, i00, *p10);
  1538. *px = i00;
  1539. }
  1540. }
  1541. //
  1542. // Encode (1D) odd line (must loop in X)
  1543. //
  1544. if (ny & p) {
  1545. unsigned short *px = py;
  1546. unsigned short *ex = py + ox * (nx - p2);
  1547. for (; px <= ex; px += ox2) {
  1548. unsigned short *p01 = px + ox1;
  1549. if (w14)
  1550. wenc14(*px, *p01, i00, *p01);
  1551. else
  1552. wenc16(*px, *p01, i00, *p01);
  1553. *px = i00;
  1554. }
  1555. }
  1556. //
  1557. // Next level
  1558. //
  1559. p = p2;
  1560. p2 <<= 1;
  1561. }
  1562. }
  1563. //
  1564. // 2D Wavelet decoding:
  1565. //
  1566. static void wav2Decode(
  1567. unsigned short *in, // io: values are transformed in place
  1568. int nx, // i : x size
  1569. int ox, // i : x offset
  1570. int ny, // i : y size
  1571. int oy, // i : y offset
  1572. unsigned short mx) // i : maximum in[x][y] value
  1573. {
  1574. bool w14 = (mx < (1 << 14));
  1575. int n = (nx > ny) ? ny : nx;
  1576. int p = 1;
  1577. int p2;
  1578. //
  1579. // Search max level
  1580. //
  1581. while (p <= n) p <<= 1;
  1582. p >>= 1;
  1583. p2 = p;
  1584. p >>= 1;
  1585. //
  1586. // Hierarchical loop on smaller dimension n
  1587. //
  1588. while (p >= 1) {
  1589. unsigned short *py = in;
  1590. unsigned short *ey = in + oy * (ny - p2);
  1591. int oy1 = oy * p;
  1592. int oy2 = oy * p2;
  1593. int ox1 = ox * p;
  1594. int ox2 = ox * p2;
  1595. unsigned short i00, i01, i10, i11;
  1596. //
  1597. // Y loop
  1598. //
  1599. for (; py <= ey; py += oy2) {
  1600. unsigned short *px = py;
  1601. unsigned short *ex = py + ox * (nx - p2);
  1602. //
  1603. // X loop
  1604. //
  1605. for (; px <= ex; px += ox2) {
  1606. unsigned short *p01 = px + ox1;
  1607. unsigned short *p10 = px + oy1;
  1608. unsigned short *p11 = p10 + ox1;
  1609. //
  1610. // 2D wavelet decoding
  1611. //
  1612. if (w14) {
  1613. wdec14(*px, *p10, i00, i10);
  1614. wdec14(*p01, *p11, i01, i11);
  1615. wdec14(i00, i01, *px, *p01);
  1616. wdec14(i10, i11, *p10, *p11);
  1617. } else {
  1618. wdec16(*px, *p10, i00, i10);
  1619. wdec16(*p01, *p11, i01, i11);
  1620. wdec16(i00, i01, *px, *p01);
  1621. wdec16(i10, i11, *p10, *p11);
  1622. }
  1623. }
  1624. //
  1625. // Decode (1D) odd column (still in Y loop)
  1626. //
  1627. if (nx & p) {
  1628. unsigned short *p10 = px + oy1;
  1629. if (w14)
  1630. wdec14(*px, *p10, i00, *p10);
  1631. else
  1632. wdec16(*px, *p10, i00, *p10);
  1633. *px = i00;
  1634. }
  1635. }
  1636. //
  1637. // Decode (1D) odd line (must loop in X)
  1638. //
  1639. if (ny & p) {
  1640. unsigned short *px = py;
  1641. unsigned short *ex = py + ox * (nx - p2);
  1642. for (; px <= ex; px += ox2) {
  1643. unsigned short *p01 = px + ox1;
  1644. if (w14)
  1645. wdec14(*px, *p01, i00, *p01);
  1646. else
  1647. wdec16(*px, *p01, i00, *p01);
  1648. *px = i00;
  1649. }
  1650. }
  1651. //
  1652. // Next level
  1653. //
  1654. p2 = p;
  1655. p >>= 1;
  1656. }
  1657. }
  1658. //-----------------------------------------------------------------------------
  1659. //
  1660. // 16-bit Huffman compression and decompression.
  1661. //
  1662. // The source code in this file is derived from the 8-bit
  1663. // Huffman compression and decompression routines written
  1664. // by Christian Rouet for his PIZ image file format.
  1665. //
  1666. //-----------------------------------------------------------------------------
  1667. // Adds some modification for tinyexr.
  1668. const int HUF_ENCBITS = 16; // literal (value) bit length
  1669. const int HUF_DECBITS = 14; // decoding bit size (>= 8)
  1670. const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
  1671. const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
  1672. const int HUF_DECMASK = HUF_DECSIZE - 1;
  1673. struct HufDec { // short code long code
  1674. //-------------------------------
  1675. unsigned int len : 8; // code length 0
  1676. unsigned int lit : 24; // lit p size
  1677. unsigned int *p; // 0 lits
  1678. };
  1679. inline long long hufLength(long long code) { return code & 63; }
  1680. inline long long hufCode(long long code) { return code >> 6; }
  1681. inline void outputBits(int nBits, long long bits, long long &c, int &lc,
  1682. char *&out) {
  1683. c <<= nBits;
  1684. lc += nBits;
  1685. c |= bits;
  1686. while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
  1687. }
  1688. inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
  1689. while (lc < nBits) {
  1690. c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
  1691. lc += 8;
  1692. }
  1693. lc -= nBits;
  1694. return (c >> lc) & ((1 << nBits) - 1);
  1695. }
  1696. //
  1697. // ENCODING TABLE BUILDING & (UN)PACKING
  1698. //
  1699. //
  1700. // Build a "canonical" Huffman code table:
  1701. // - for each (uncompressed) symbol, hcode contains the length
  1702. // of the corresponding code (in the compressed data)
  1703. // - canonical codes are computed and stored in hcode
  1704. // - the rules for constructing canonical codes are as follows:
  1705. // * shorter codes (if filled with zeroes to the right)
  1706. // have a numerically higher value than longer codes
  1707. // * for codes with the same length, numerical values
  1708. // increase with numerical symbol values
  1709. // - because the canonical code table can be constructed from
  1710. // symbol lengths alone, the code table can be transmitted
  1711. // without sending the actual code values
  1712. // - see http://www.compressconsult.com/huffman/
  1713. //
  1714. static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
  1715. long long n[59];
  1716. //
  1717. // For each i from 0 through 58, count the
  1718. // number of different codes of length i, and
  1719. // store the count in n[i].
  1720. //
  1721. for (int i = 0; i <= 58; ++i) n[i] = 0;
  1722. for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
  1723. //
  1724. // For each i from 58 through 1, compute the
  1725. // numerically lowest code with length i, and
  1726. // store that code in n[i].
  1727. //
  1728. long long c = 0;
  1729. for (int i = 58; i > 0; --i) {
  1730. long long nc = ((c + n[i]) >> 1);
  1731. n[i] = c;
  1732. c = nc;
  1733. }
  1734. //
  1735. // hcode[i] contains the length, l, of the
  1736. // code for symbol i. Assign the next available
  1737. // code of length l to the symbol and store both
  1738. // l and the code in hcode[i].
  1739. //
  1740. for (int i = 0; i < HUF_ENCSIZE; ++i) {
  1741. int l = static_cast<int>(hcode[i]);
  1742. if (l > 0) hcode[i] = l | (n[l]++ << 6);
  1743. }
  1744. }
  1745. //
  1746. // Compute Huffman codes (based on frq input) and store them in frq:
  1747. // - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
  1748. // - max code length is 58 bits;
  1749. // - codes outside the range [im-iM] have a null length (unused values);
  1750. // - original frequencies are destroyed;
  1751. // - encoding tables are used by hufEncode() and hufBuildDecTable();
  1752. //
  1753. struct FHeapCompare {
  1754. bool operator()(long long *a, long long *b) { return *a > *b; }
  1755. };
  1756. static void hufBuildEncTable(
  1757. long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
  1758. int *im, // o: min frq index
  1759. int *iM) // o: max frq index
  1760. {
  1761. //
  1762. // This function assumes that when it is called, array frq
  1763. // indicates the frequency of all possible symbols in the data
  1764. // that are to be Huffman-encoded. (frq[i] contains the number
  1765. // of occurrences of symbol i in the data.)
  1766. //
  1767. // The loop below does three things:
  1768. //
  1769. // 1) Finds the minimum and maximum indices that point
  1770. // to non-zero entries in frq:
  1771. //
  1772. // frq[im] != 0, and frq[i] == 0 for all i < im
  1773. // frq[iM] != 0, and frq[i] == 0 for all i > iM
  1774. //
  1775. // 2) Fills array fHeap with pointers to all non-zero
  1776. // entries in frq.
  1777. //
  1778. // 3) Initializes array hlink such that hlink[i] == i
  1779. // for all array entries.
  1780. //
  1781. std::vector<int> hlink(HUF_ENCSIZE);
  1782. std::vector<long long *> fHeap(HUF_ENCSIZE);
  1783. *im = 0;
  1784. while (!frq[*im]) (*im)++;
  1785. int nf = 0;
  1786. for (int i = *im; i < HUF_ENCSIZE; i++) {
  1787. hlink[i] = i;
  1788. if (frq[i]) {
  1789. fHeap[nf] = &frq[i];
  1790. nf++;
  1791. *iM = i;
  1792. }
  1793. }
  1794. //
  1795. // Add a pseudo-symbol, with a frequency count of 1, to frq;
  1796. // adjust the fHeap and hlink array accordingly. Function
  1797. // hufEncode() uses the pseudo-symbol for run-length encoding.
  1798. //
  1799. (*iM)++;
  1800. frq[*iM] = 1;
  1801. fHeap[nf] = &frq[*iM];
  1802. nf++;
  1803. //
  1804. // Build an array, scode, such that scode[i] contains the number
  1805. // of bits assigned to symbol i. Conceptually this is done by
  1806. // constructing a tree whose leaves are the symbols with non-zero
  1807. // frequency:
  1808. //
  1809. // Make a heap that contains all symbols with a non-zero frequency,
  1810. // with the least frequent symbol on top.
  1811. //
  1812. // Repeat until only one symbol is left on the heap:
  1813. //
  1814. // Take the two least frequent symbols off the top of the heap.
  1815. // Create a new node that has first two nodes as children, and
  1816. // whose frequency is the sum of the frequencies of the first
  1817. // two nodes. Put the new node back into the heap.
  1818. //
  1819. // The last node left on the heap is the root of the tree. For each
  1820. // leaf node, the distance between the root and the leaf is the length
  1821. // of the code for the corresponding symbol.
  1822. //
  1823. // The loop below doesn't actually build the tree; instead we compute
  1824. // the distances of the leaves from the root on the fly. When a new
  1825. // node is added to the heap, then that node's descendants are linked
  1826. // into a single linear list that starts at the new node, and the code
  1827. // lengths of the descendants (that is, their distance from the root
  1828. // of the tree) are incremented by one.
  1829. //
  1830. std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
  1831. std::vector<long long> scode(HUF_ENCSIZE);
  1832. memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
  1833. while (nf > 1) {
  1834. //
  1835. // Find the indices, mm and m, of the two smallest non-zero frq
  1836. // values in fHeap, add the smallest frq to the second-smallest
  1837. // frq, and remove the smallest frq value from fHeap.
  1838. //
  1839. int mm = fHeap[0] - frq;
  1840. std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
  1841. --nf;
  1842. int m = fHeap[0] - frq;
  1843. std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
  1844. frq[m] += frq[mm];
  1845. std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
  1846. //
  1847. // The entries in scode are linked into lists with the
  1848. // entries in hlink serving as "next" pointers and with
  1849. // the end of a list marked by hlink[j] == j.
  1850. //
  1851. // Traverse the lists that start at scode[m] and scode[mm].
  1852. // For each element visited, increment the length of the
  1853. // corresponding code by one bit. (If we visit scode[j]
  1854. // during the traversal, then the code for symbol j becomes
  1855. // one bit longer.)
  1856. //
  1857. // Merge the lists that start at scode[m] and scode[mm]
  1858. // into a single list that starts at scode[m].
  1859. //
  1860. //
  1861. // Add a bit to all codes in the first list.
  1862. //
  1863. for (int j = m;; j = hlink[j]) {
  1864. scode[j]++;
  1865. assert(scode[j] <= 58);
  1866. if (hlink[j] == j) {
  1867. //
  1868. // Merge the two lists.
  1869. //
  1870. hlink[j] = mm;
  1871. break;
  1872. }
  1873. }
  1874. //
  1875. // Add a bit to all codes in the second list
  1876. //
  1877. for (int j = mm;; j = hlink[j]) {
  1878. scode[j]++;
  1879. assert(scode[j] <= 58);
  1880. if (hlink[j] == j) break;
  1881. }
  1882. }
  1883. //
  1884. // Build a canonical Huffman code table, replacing the code
  1885. // lengths in scode with (code, code length) pairs. Copy the
  1886. // code table from scode into frq.
  1887. //
  1888. hufCanonicalCodeTable(scode.data());
  1889. memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
  1890. }
  1891. //
  1892. // Pack an encoding table:
  1893. // - only code lengths, not actual codes, are stored
  1894. // - runs of zeroes are compressed as follows:
  1895. //
  1896. // unpacked packed
  1897. // --------------------------------
  1898. // 1 zero 0 (6 bits)
  1899. // 2 zeroes 59
  1900. // 3 zeroes 60
  1901. // 4 zeroes 61
  1902. // 5 zeroes 62
  1903. // n zeroes (6 or more) 63 n-6 (6 + 8 bits)
  1904. //
  1905. const int SHORT_ZEROCODE_RUN = 59;
  1906. const int LONG_ZEROCODE_RUN = 63;
  1907. const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
  1908. const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
  1909. static void hufPackEncTable(
  1910. const long long *hcode, // i : encoding table [HUF_ENCSIZE]
  1911. int im, // i : min hcode index
  1912. int iM, // i : max hcode index
  1913. char **pcode) // o: ptr to packed table (updated)
  1914. {
  1915. char *p = *pcode;
  1916. long long c = 0;
  1917. int lc = 0;
  1918. for (; im <= iM; im++) {
  1919. int l = hufLength(hcode[im]);
  1920. if (l == 0) {
  1921. int zerun = 1;
  1922. while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
  1923. if (hufLength(hcode[im + 1]) > 0) break;
  1924. im++;
  1925. zerun++;
  1926. }
  1927. if (zerun >= 2) {
  1928. if (zerun >= SHORTEST_LONG_RUN) {
  1929. outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
  1930. outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
  1931. } else {
  1932. outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
  1933. }
  1934. continue;
  1935. }
  1936. }
  1937. outputBits(6, l, c, lc, p);
  1938. }
  1939. if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
  1940. *pcode = p;
  1941. }
  1942. //
  1943. // Unpack an encoding table packed by hufPackEncTable():
  1944. //
  1945. static bool hufUnpackEncTable(
  1946. const char **pcode, // io: ptr to packed table (updated)
  1947. int ni, // i : input size (in bytes)
  1948. int im, // i : min hcode index
  1949. int iM, // i : max hcode index
  1950. long long *hcode) // o: encoding table [HUF_ENCSIZE]
  1951. {
  1952. memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
  1953. const char *p = *pcode;
  1954. long long c = 0;
  1955. int lc = 0;
  1956. for (; im <= iM; im++) {
  1957. if (p - *pcode >= ni) {
  1958. return false;
  1959. }
  1960. long long l = hcode[im] = getBits(6, c, lc, p); // code length
  1961. if (l == (long long)LONG_ZEROCODE_RUN) {
  1962. if (p - *pcode > ni) {
  1963. return false;
  1964. }
  1965. int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
  1966. if (im + zerun > iM + 1) {
  1967. return false;
  1968. }
  1969. while (zerun--) hcode[im++] = 0;
  1970. im--;
  1971. } else if (l >= (long long)SHORT_ZEROCODE_RUN) {
  1972. int zerun = l - SHORT_ZEROCODE_RUN + 2;
  1973. if (im + zerun > iM + 1) {
  1974. return false;
  1975. }
  1976. while (zerun--) hcode[im++] = 0;
  1977. im--;
  1978. }
  1979. }
  1980. *pcode = const_cast<char *>(p);
  1981. hufCanonicalCodeTable(hcode);
  1982. return true;
  1983. }
  1984. //
  1985. // DECODING TABLE BUILDING
  1986. //
  1987. //
  1988. // Clear a newly allocated decoding table so that it contains only zeroes.
  1989. //
  1990. static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
  1991. // decoding table [HUF_DECSIZE]
  1992. {
  1993. for (int i = 0; i < HUF_DECSIZE; i++) {
  1994. hdecod[i].len = 0;
  1995. hdecod[i].lit = 0;
  1996. hdecod[i].p = NULL;
  1997. }
  1998. // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
  1999. }
  2000. //
  2001. // Build a decoding hash table based on the encoding table hcode:
  2002. // - short codes (<= HUF_DECBITS) are resolved with a single table access;
  2003. // - long code entry allocations are not optimized, because long codes are
  2004. // unfrequent;
  2005. // - decoding tables are used by hufDecode();
  2006. //
  2007. static bool hufBuildDecTable(const long long *hcode, // i : encoding table
  2008. int im, // i : min index in hcode
  2009. int iM, // i : max index in hcode
  2010. HufDec *hdecod) // o: (allocated by caller)
  2011. // decoding table [HUF_DECSIZE]
  2012. {
  2013. //
  2014. // Init hashtable & loop on all codes.
  2015. // Assumes that hufClearDecTable(hdecod) has already been called.
  2016. //
  2017. for (; im <= iM; im++) {
  2018. long long c = hufCode(hcode[im]);
  2019. int l = hufLength(hcode[im]);
  2020. if (c >> l) {
  2021. //
  2022. // Error: c is supposed to be an l-bit code,
  2023. // but c contains a value that is greater
  2024. // than the largest l-bit number.
  2025. //
  2026. // invalidTableEntry();
  2027. return false;
  2028. }
  2029. if (l > HUF_DECBITS) {
  2030. //
  2031. // Long code: add a secondary entry
  2032. //
  2033. HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
  2034. if (pl->len) {
  2035. //
  2036. // Error: a short code has already
  2037. // been stored in table entry *pl.
  2038. //
  2039. // invalidTableEntry();
  2040. return false;
  2041. }
  2042. pl->lit++;
  2043. if (pl->p) {
  2044. unsigned int *p = pl->p;
  2045. pl->p = new unsigned int[pl->lit];
  2046. for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
  2047. delete[] p;
  2048. } else {
  2049. pl->p = new unsigned int[1];
  2050. }
  2051. pl->p[pl->lit - 1] = im;
  2052. } else if (l) {
  2053. //
  2054. // Short code: init all primary entries
  2055. //
  2056. HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
  2057. for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
  2058. if (pl->len || pl->p) {
  2059. //
  2060. // Error: a short code or a long code has
  2061. // already been stored in table entry *pl.
  2062. //
  2063. // invalidTableEntry();
  2064. return false;
  2065. }
  2066. pl->len = l;
  2067. pl->lit = im;
  2068. }
  2069. }
  2070. }
  2071. return true;
  2072. }
  2073. //
  2074. // Free the long code entries of a decoding table built by hufBuildDecTable()
  2075. //
  2076. static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
  2077. {
  2078. for (int i = 0; i < HUF_DECSIZE; i++) {
  2079. if (hdecod[i].p) {
  2080. delete[] hdecod[i].p;
  2081. hdecod[i].p = 0;
  2082. }
  2083. }
  2084. }
  2085. //
  2086. // ENCODING
  2087. //
  2088. inline void outputCode(long long code, long long &c, int &lc, char *&out) {
  2089. outputBits(hufLength(code), hufCode(code), c, lc, out);
  2090. }
  2091. inline void sendCode(long long sCode, int runCount, long long runCode,
  2092. long long &c, int &lc, char *&out) {
  2093. //
  2094. // Output a run of runCount instances of the symbol sCount.
  2095. // Output the symbols explicitly, or if that is shorter, output
  2096. // the sCode symbol once followed by a runCode symbol and runCount
  2097. // expressed as an 8-bit number.
  2098. //
  2099. if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
  2100. outputCode(sCode, c, lc, out);
  2101. outputCode(runCode, c, lc, out);
  2102. outputBits(8, runCount, c, lc, out);
  2103. } else {
  2104. while (runCount-- >= 0) outputCode(sCode, c, lc, out);
  2105. }
  2106. }
  2107. //
  2108. // Encode (compress) ni values based on the Huffman encoding table hcode:
  2109. //
  2110. static int hufEncode // return: output size (in bits)
  2111. (const long long *hcode, // i : encoding table
  2112. const unsigned short *in, // i : uncompressed input buffer
  2113. const int ni, // i : input buffer size (in bytes)
  2114. int rlc, // i : rl code
  2115. char *out) // o: compressed output buffer
  2116. {
  2117. char *outStart = out;
  2118. long long c = 0; // bits not yet written to out
  2119. int lc = 0; // number of valid bits in c (LSB)
  2120. int s = in[0];
  2121. int cs = 0;
  2122. //
  2123. // Loop on input values
  2124. //
  2125. for (int i = 1; i < ni; i++) {
  2126. //
  2127. // Count same values or send code
  2128. //
  2129. if (s == in[i] && cs < 255) {
  2130. cs++;
  2131. } else {
  2132. sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
  2133. cs = 0;
  2134. }
  2135. s = in[i];
  2136. }
  2137. //
  2138. // Send remaining code
  2139. //
  2140. sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
  2141. if (lc) *out = (c << (8 - lc)) & 0xff;
  2142. return (out - outStart) * 8 + lc;
  2143. }
  2144. //
  2145. // DECODING
  2146. //
  2147. //
  2148. // In order to force the compiler to inline them,
  2149. // getChar() and getCode() are implemented as macros
  2150. // instead of "inline" functions.
  2151. //
  2152. #define getChar(c, lc, in) \
  2153. { \
  2154. c = (c << 8) | *(unsigned char *)(in++); \
  2155. lc += 8; \
  2156. }
  2157. #if 0
  2158. #define getCode(po, rlc, c, lc, in, out, ob, oe) \
  2159. { \
  2160. if (po == rlc) { \
  2161. if (lc < 8) getChar(c, lc, in); \
  2162. \
  2163. lc -= 8; \
  2164. \
  2165. unsigned char cs = (c >> lc); \
  2166. \
  2167. if (out + cs > oe) return false; \
  2168. \
  2169. /* TinyEXR issue 78 */ \
  2170. unsigned short s = out[-1]; \
  2171. \
  2172. while (cs-- > 0) *out++ = s; \
  2173. } else if (out < oe) { \
  2174. *out++ = po; \
  2175. } else { \
  2176. return false; \
  2177. } \
  2178. }
  2179. #else
  2180. static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
  2181. const char *in_end, unsigned short *&out,
  2182. const unsigned short *ob, const unsigned short *oe) {
  2183. (void)ob;
  2184. if (po == rlc) {
  2185. if (lc < 8) {
  2186. /* TinyEXR issue 78 */
  2187. /* TinyEXR issue 160. in + 1 -> in */
  2188. if (in >= in_end) {
  2189. return false;
  2190. }
  2191. getChar(c, lc, in);
  2192. }
  2193. lc -= 8;
  2194. unsigned char cs = (c >> lc);
  2195. if (out + cs > oe) return false;
  2196. // Bounds check for safety
  2197. // Issue 100.
  2198. if ((out - 1) < ob) return false;
  2199. unsigned short s = out[-1];
  2200. while (cs-- > 0) *out++ = s;
  2201. } else if (out < oe) {
  2202. *out++ = po;
  2203. } else {
  2204. return false;
  2205. }
  2206. return true;
  2207. }
  2208. #endif
  2209. //
  2210. // Decode (uncompress) ni bits based on encoding & decoding tables:
  2211. //
  2212. static bool hufDecode(const long long *hcode, // i : encoding table
  2213. const HufDec *hdecod, // i : decoding table
  2214. const char *in, // i : compressed input buffer
  2215. int ni, // i : input size (in bits)
  2216. int rlc, // i : run-length code
  2217. int no, // i : expected output size (in bytes)
  2218. unsigned short *out) // o: uncompressed output buffer
  2219. {
  2220. long long c = 0;
  2221. int lc = 0;
  2222. unsigned short *outb = out; // begin
  2223. unsigned short *oe = out + no; // end
  2224. const char *ie = in + (ni + 7) / 8; // input byte size
  2225. //
  2226. // Loop on input bytes
  2227. //
  2228. while (in < ie) {
  2229. getChar(c, lc, in);
  2230. //
  2231. // Access decoding table
  2232. //
  2233. while (lc >= HUF_DECBITS) {
  2234. const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
  2235. if (pl.len) {
  2236. //
  2237. // Get short code
  2238. //
  2239. lc -= pl.len;
  2240. // std::cout << "lit = " << pl.lit << std::endl;
  2241. // std::cout << "rlc = " << rlc << std::endl;
  2242. // std::cout << "c = " << c << std::endl;
  2243. // std::cout << "lc = " << lc << std::endl;
  2244. // std::cout << "in = " << in << std::endl;
  2245. // std::cout << "out = " << out << std::endl;
  2246. // std::cout << "oe = " << oe << std::endl;
  2247. if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
  2248. return false;
  2249. }
  2250. } else {
  2251. if (!pl.p) {
  2252. return false;
  2253. }
  2254. // invalidCode(); // wrong code
  2255. //
  2256. // Search long code
  2257. //
  2258. int j;
  2259. for (j = 0; j < pl.lit; j++) {
  2260. int l = hufLength(hcode[pl.p[j]]);
  2261. while (lc < l && in < ie) // get more bits
  2262. getChar(c, lc, in);
  2263. if (lc >= l) {
  2264. if (hufCode(hcode[pl.p[j]]) ==
  2265. ((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
  2266. //
  2267. // Found : get long code
  2268. //
  2269. lc -= l;
  2270. if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
  2271. return false;
  2272. }
  2273. break;
  2274. }
  2275. }
  2276. }
  2277. if (j == pl.lit) {
  2278. return false;
  2279. // invalidCode(); // Not found
  2280. }
  2281. }
  2282. }
  2283. }
  2284. //
  2285. // Get remaining (short) codes
  2286. //
  2287. int i = (8 - ni) & 7;
  2288. c >>= i;
  2289. lc -= i;
  2290. while (lc > 0) {
  2291. const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
  2292. if (pl.len) {
  2293. lc -= pl.len;
  2294. if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
  2295. return false;
  2296. }
  2297. } else {
  2298. return false;
  2299. // invalidCode(); // wrong (long) code
  2300. }
  2301. }
  2302. if (out - outb != no) {
  2303. return false;
  2304. }
  2305. // notEnoughData ();
  2306. return true;
  2307. }
  2308. static void countFrequencies(std::vector<long long> &freq,
  2309. const unsigned short data[/*n*/], int n) {
  2310. for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
  2311. for (int i = 0; i < n; ++i) ++freq[data[i]];
  2312. }
  2313. static void writeUInt(char buf[4], unsigned int i) {
  2314. unsigned char *b = (unsigned char *)buf;
  2315. b[0] = i;
  2316. b[1] = i >> 8;
  2317. b[2] = i >> 16;
  2318. b[3] = i >> 24;
  2319. }
  2320. static unsigned int readUInt(const char buf[4]) {
  2321. const unsigned char *b = (const unsigned char *)buf;
  2322. return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
  2323. ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
  2324. }
  2325. //
  2326. // EXTERNAL INTERFACE
  2327. //
  2328. static int hufCompress(const unsigned short raw[], int nRaw,
  2329. char compressed[]) {
  2330. if (nRaw == 0) return 0;
  2331. std::vector<long long> freq(HUF_ENCSIZE);
  2332. countFrequencies(freq, raw, nRaw);
  2333. int im = 0;
  2334. int iM = 0;
  2335. hufBuildEncTable(freq.data(), &im, &iM);
  2336. char *tableStart = compressed + 20;
  2337. char *tableEnd = tableStart;
  2338. hufPackEncTable(freq.data(), im, iM, &tableEnd);
  2339. int tableLength = tableEnd - tableStart;
  2340. char *dataStart = tableEnd;
  2341. int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
  2342. int data_length = (nBits + 7) / 8;
  2343. writeUInt(compressed, im);
  2344. writeUInt(compressed + 4, iM);
  2345. writeUInt(compressed + 8, tableLength);
  2346. writeUInt(compressed + 12, nBits);
  2347. writeUInt(compressed + 16, 0); // room for future extensions
  2348. return dataStart + data_length - compressed;
  2349. }
  2350. static bool hufUncompress(const char compressed[], int nCompressed,
  2351. std::vector<unsigned short> *raw) {
  2352. if (nCompressed == 0) {
  2353. if (raw->size() != 0) return false;
  2354. return false;
  2355. }
  2356. int im = readUInt(compressed);
  2357. int iM = readUInt(compressed + 4);
  2358. // int tableLength = readUInt (compressed + 8);
  2359. int nBits = readUInt(compressed + 12);
  2360. if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
  2361. const char *ptr = compressed + 20;
  2362. //
  2363. // Fast decoder needs at least 2x64-bits of compressed data, and
  2364. // needs to be run-able on this platform. Otherwise, fall back
  2365. // to the original decoder
  2366. //
  2367. // if (FastHufDecoder::enabled() && nBits > 128)
  2368. //{
  2369. // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
  2370. // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
  2371. //}
  2372. // else
  2373. {
  2374. std::vector<long long> freq(HUF_ENCSIZE);
  2375. std::vector<HufDec> hdec(HUF_DECSIZE);
  2376. hufClearDecTable(&hdec.at(0));
  2377. hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
  2378. &freq.at(0));
  2379. {
  2380. if (nBits > 8 * (nCompressed - (ptr - compressed))) {
  2381. return false;
  2382. }
  2383. hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
  2384. hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
  2385. raw->data());
  2386. }
  2387. // catch (...)
  2388. //{
  2389. // hufFreeDecTable (hdec);
  2390. // throw;
  2391. //}
  2392. hufFreeDecTable(&hdec.at(0));
  2393. }
  2394. return true;
  2395. }
  2396. //
  2397. // Functions to compress the range of values in the pixel data
  2398. //
  2399. const int USHORT_RANGE = (1 << 16);
  2400. const int BITMAP_SIZE = (USHORT_RANGE >> 3);
  2401. static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
  2402. unsigned char bitmap[BITMAP_SIZE],
  2403. unsigned short &minNonZero,
  2404. unsigned short &maxNonZero) {
  2405. for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
  2406. for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
  2407. bitmap[0] &= ~1; // zero is not explicitly stored in
  2408. // the bitmap; we assume that the
  2409. // data always contain zeroes
  2410. minNonZero = BITMAP_SIZE - 1;
  2411. maxNonZero = 0;
  2412. for (int i = 0; i < BITMAP_SIZE; ++i) {
  2413. if (bitmap[i]) {
  2414. if (minNonZero > i) minNonZero = i;
  2415. if (maxNonZero < i) maxNonZero = i;
  2416. }
  2417. }
  2418. }
  2419. static unsigned short forwardLutFromBitmap(
  2420. const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
  2421. int k = 0;
  2422. for (int i = 0; i < USHORT_RANGE; ++i) {
  2423. if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
  2424. lut[i] = k++;
  2425. else
  2426. lut[i] = 0;
  2427. }
  2428. return k - 1; // maximum value stored in lut[],
  2429. } // i.e. number of ones in bitmap minus 1
  2430. static unsigned short reverseLutFromBitmap(
  2431. const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
  2432. int k = 0;
  2433. for (int i = 0; i < USHORT_RANGE; ++i) {
  2434. if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
  2435. }
  2436. int n = k - 1;
  2437. while (k < USHORT_RANGE) lut[k++] = 0;
  2438. return n; // maximum k where lut[k] is non-zero,
  2439. } // i.e. number of ones in bitmap minus 1
  2440. static void applyLut(const unsigned short lut[USHORT_RANGE],
  2441. unsigned short data[/*nData*/], int nData) {
  2442. for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
  2443. }
  2444. #ifdef __clang__
  2445. #pragma clang diagnostic pop
  2446. #endif // __clang__
  2447. #ifdef _MSC_VER
  2448. #pragma warning(pop)
  2449. #endif
  2450. static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
  2451. const unsigned char *inPtr, size_t inSize,
  2452. const std::vector<ChannelInfo> &channelInfo,
  2453. int data_width, int num_lines) {
  2454. std::vector<unsigned char> bitmap(BITMAP_SIZE);
  2455. unsigned short minNonZero;
  2456. unsigned short maxNonZero;
  2457. #if !TINYEXR_LITTLE_ENDIAN
  2458. // @todo { PIZ compression on BigEndian architecture. }
  2459. assert(0);
  2460. return false;
  2461. #endif
  2462. // Assume `inSize` is multiple of 2 or 4.
  2463. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
  2464. std::vector<PIZChannelData> channelData(channelInfo.size());
  2465. unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
  2466. for (size_t c = 0; c < channelData.size(); c++) {
  2467. PIZChannelData &cd = channelData[c];
  2468. cd.start = tmpBufferEnd;
  2469. cd.end = cd.start;
  2470. cd.nx = data_width;
  2471. cd.ny = num_lines;
  2472. // cd.ys = c.channel().ySampling;
  2473. size_t pixelSize = sizeof(int); // UINT and FLOAT
  2474. if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
  2475. pixelSize = sizeof(short);
  2476. }
  2477. cd.size = static_cast<int>(pixelSize / sizeof(short));
  2478. tmpBufferEnd += cd.nx * cd.ny * cd.size;
  2479. }
  2480. const unsigned char *ptr = inPtr;
  2481. for (int y = 0; y < num_lines; ++y) {
  2482. for (size_t i = 0; i < channelData.size(); ++i) {
  2483. PIZChannelData &cd = channelData[i];
  2484. // if (modp (y, cd.ys) != 0)
  2485. // continue;
  2486. size_t n = static_cast<size_t>(cd.nx * cd.size);
  2487. memcpy(cd.end, ptr, n * sizeof(unsigned short));
  2488. ptr += n * sizeof(unsigned short);
  2489. cd.end += n;
  2490. }
  2491. }
  2492. bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
  2493. bitmap.data(), minNonZero, maxNonZero);
  2494. std::vector<unsigned short> lut(USHORT_RANGE);
  2495. unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
  2496. applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
  2497. //
  2498. // Store range compression info in _outBuffer
  2499. //
  2500. char *buf = reinterpret_cast<char *>(outPtr);
  2501. memcpy(buf, &minNonZero, sizeof(unsigned short));
  2502. buf += sizeof(unsigned short);
  2503. memcpy(buf, &maxNonZero, sizeof(unsigned short));
  2504. buf += sizeof(unsigned short);
  2505. if (minNonZero <= maxNonZero) {
  2506. memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
  2507. maxNonZero - minNonZero + 1);
  2508. buf += maxNonZero - minNonZero + 1;
  2509. }
  2510. //
  2511. // Apply wavelet encoding
  2512. //
  2513. for (size_t i = 0; i < channelData.size(); ++i) {
  2514. PIZChannelData &cd = channelData[i];
  2515. for (int j = 0; j < cd.size; ++j) {
  2516. wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
  2517. maxValue);
  2518. }
  2519. }
  2520. //
  2521. // Apply Huffman encoding; append the result to _outBuffer
  2522. //
  2523. // length header(4byte), then huff data. Initialize length header with zero,
  2524. // then later fill it by `length`.
  2525. char *lengthPtr = buf;
  2526. int zero = 0;
  2527. memcpy(buf, &zero, sizeof(int));
  2528. buf += sizeof(int);
  2529. int length =
  2530. hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
  2531. memcpy(lengthPtr, &length, sizeof(int));
  2532. (*outSize) = static_cast<unsigned int>(
  2533. (reinterpret_cast<unsigned char *>(buf) - outPtr) +
  2534. static_cast<unsigned int>(length));
  2535. // Use uncompressed data when compressed data is larger than uncompressed.
  2536. // (Issue 40)
  2537. if ((*outSize) >= inSize) {
  2538. (*outSize) = static_cast<unsigned int>(inSize);
  2539. memcpy(outPtr, inPtr, inSize);
  2540. }
  2541. return true;
  2542. }
  2543. static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
  2544. size_t tmpBufSizeInBytes, size_t inLen, int num_channels,
  2545. const EXRChannelInfo *channels, int data_width,
  2546. int num_lines) {
  2547. if (inLen == tmpBufSizeInBytes) {
  2548. // Data is not compressed(Issue 40).
  2549. memcpy(outPtr, inPtr, inLen);
  2550. return true;
  2551. }
  2552. std::vector<unsigned char> bitmap(BITMAP_SIZE);
  2553. unsigned short minNonZero;
  2554. unsigned short maxNonZero;
  2555. #if !TINYEXR_LITTLE_ENDIAN
  2556. // @todo { PIZ compression on BigEndian architecture. }
  2557. assert(0);
  2558. return false;
  2559. #endif
  2560. memset(bitmap.data(), 0, BITMAP_SIZE);
  2561. const unsigned char *ptr = inPtr;
  2562. // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
  2563. tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
  2564. // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
  2565. tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
  2566. ptr += 4;
  2567. if (maxNonZero >= BITMAP_SIZE) {
  2568. return false;
  2569. }
  2570. if (minNonZero <= maxNonZero) {
  2571. memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
  2572. maxNonZero - minNonZero + 1);
  2573. ptr += maxNonZero - minNonZero + 1;
  2574. }
  2575. std::vector<unsigned short> lut(USHORT_RANGE);
  2576. memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
  2577. unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
  2578. //
  2579. // Huffman decoding
  2580. //
  2581. int length;
  2582. // length = *(reinterpret_cast<const int *>(ptr));
  2583. tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
  2584. ptr += sizeof(int);
  2585. if (size_t((ptr - inPtr) + length) > inLen) {
  2586. return false;
  2587. }
  2588. std::vector<unsigned short> tmpBuffer(tmpBufSizeInBytes / sizeof(unsigned short));
  2589. hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
  2590. //
  2591. // Wavelet decoding
  2592. //
  2593. std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
  2594. unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
  2595. for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
  2596. const EXRChannelInfo &chan = channels[i];
  2597. size_t pixelSize = sizeof(int); // UINT and FLOAT
  2598. if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
  2599. pixelSize = sizeof(short);
  2600. }
  2601. channelData[i].start = tmpBufferEnd;
  2602. channelData[i].end = channelData[i].start;
  2603. channelData[i].nx = data_width;
  2604. channelData[i].ny = num_lines;
  2605. // channelData[i].ys = 1;
  2606. channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
  2607. tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
  2608. }
  2609. for (size_t i = 0; i < channelData.size(); ++i) {
  2610. PIZChannelData &cd = channelData[i];
  2611. for (int j = 0; j < cd.size; ++j) {
  2612. wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
  2613. maxValue);
  2614. }
  2615. }
  2616. //
  2617. // Expand the pixel data to their original range
  2618. //
  2619. applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSizeInBytes / sizeof(unsigned short)));
  2620. for (int y = 0; y < num_lines; y++) {
  2621. for (size_t i = 0; i < channelData.size(); ++i) {
  2622. PIZChannelData &cd = channelData[i];
  2623. // if (modp (y, cd.ys) != 0)
  2624. // continue;
  2625. size_t n = static_cast<size_t>(cd.nx * cd.size);
  2626. memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
  2627. outPtr += n * sizeof(unsigned short);
  2628. cd.end += n;
  2629. }
  2630. }
  2631. return true;
  2632. }
  2633. #endif // TINYEXR_USE_PIZ
  2634. #if TINYEXR_USE_ZFP
  2635. struct ZFPCompressionParam {
  2636. double rate;
  2637. unsigned int precision;
  2638. unsigned int __pad0;
  2639. double tolerance;
  2640. int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
  2641. unsigned int __pad1;
  2642. ZFPCompressionParam() {
  2643. type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
  2644. rate = 2.0;
  2645. precision = 0;
  2646. tolerance = 0.0;
  2647. }
  2648. };
  2649. static bool FindZFPCompressionParam(ZFPCompressionParam *param,
  2650. const EXRAttribute *attributes,
  2651. int num_attributes, std::string *err) {
  2652. bool foundType = false;
  2653. for (int i = 0; i < num_attributes; i++) {
  2654. if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
  2655. if (attributes[i].size == 1) {
  2656. param->type = static_cast<int>(attributes[i].value[0]);
  2657. foundType = true;
  2658. break;
  2659. } else {
  2660. if (err) {
  2661. (*err) +=
  2662. "zfpCompressionType attribute must be uchar(1 byte) type.\n";
  2663. }
  2664. return false;
  2665. }
  2666. }
  2667. }
  2668. if (!foundType) {
  2669. if (err) {
  2670. (*err) += "`zfpCompressionType` attribute not found.\n";
  2671. }
  2672. return false;
  2673. }
  2674. if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
  2675. for (int i = 0; i < num_attributes; i++) {
  2676. if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
  2677. (attributes[i].size == 8)) {
  2678. param->rate = *(reinterpret_cast<double *>(attributes[i].value));
  2679. return true;
  2680. }
  2681. }
  2682. if (err) {
  2683. (*err) += "`zfpCompressionRate` attribute not found.\n";
  2684. }
  2685. } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
  2686. for (int i = 0; i < num_attributes; i++) {
  2687. if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
  2688. (attributes[i].size == 4)) {
  2689. param->rate = *(reinterpret_cast<int *>(attributes[i].value));
  2690. return true;
  2691. }
  2692. }
  2693. if (err) {
  2694. (*err) += "`zfpCompressionPrecision` attribute not found.\n";
  2695. }
  2696. } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
  2697. for (int i = 0; i < num_attributes; i++) {
  2698. if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
  2699. (attributes[i].size == 8)) {
  2700. param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
  2701. return true;
  2702. }
  2703. }
  2704. if (err) {
  2705. (*err) += "`zfpCompressionTolerance` attribute not found.\n";
  2706. }
  2707. } else {
  2708. if (err) {
  2709. (*err) += "Unknown value specified for `zfpCompressionType`.\n";
  2710. }
  2711. }
  2712. return false;
  2713. }
  2714. // Assume pixel format is FLOAT for all channels.
  2715. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
  2716. size_t num_channels, const unsigned char *src,
  2717. unsigned long src_size,
  2718. const ZFPCompressionParam &param) {
  2719. size_t uncompressed_size =
  2720. size_t(dst_width) * size_t(dst_num_lines) * num_channels;
  2721. if (uncompressed_size == src_size) {
  2722. // Data is not compressed(Issue 40).
  2723. memcpy(dst, src, src_size);
  2724. }
  2725. zfp_stream *zfp = NULL;
  2726. zfp_field *field = NULL;
  2727. assert((dst_width % 4) == 0);
  2728. assert((dst_num_lines % 4) == 0);
  2729. if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
  2730. return false;
  2731. }
  2732. field =
  2733. zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
  2734. zfp_type_float, static_cast<unsigned int>(dst_width),
  2735. static_cast<unsigned int>(dst_num_lines) *
  2736. static_cast<unsigned int>(num_channels));
  2737. zfp = zfp_stream_open(NULL);
  2738. if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
  2739. zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
  2740. /* write random access */ 0);
  2741. } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
  2742. zfp_stream_set_precision(zfp, param.precision);
  2743. } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
  2744. zfp_stream_set_accuracy(zfp, param.tolerance);
  2745. } else {
  2746. assert(0);
  2747. }
  2748. size_t buf_size = zfp_stream_maximum_size(zfp, field);
  2749. std::vector<unsigned char> buf(buf_size);
  2750. memcpy(&buf.at(0), src, src_size);
  2751. bitstream *stream = stream_open(&buf.at(0), buf_size);
  2752. zfp_stream_set_bit_stream(zfp, stream);
  2753. zfp_stream_rewind(zfp);
  2754. size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
  2755. for (size_t c = 0; c < size_t(num_channels); c++) {
  2756. // decompress 4x4 pixel block.
  2757. for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
  2758. for (size_t x = 0; x < size_t(dst_width); x += 4) {
  2759. float fblock[16];
  2760. zfp_decode_block_float_2(zfp, fblock);
  2761. for (size_t j = 0; j < 4; j++) {
  2762. for (size_t i = 0; i < 4; i++) {
  2763. dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
  2764. fblock[j * 4 + i];
  2765. }
  2766. }
  2767. }
  2768. }
  2769. }
  2770. zfp_field_free(field);
  2771. zfp_stream_close(zfp);
  2772. stream_close(stream);
  2773. return true;
  2774. }
  2775. // Assume pixel format is FLOAT for all channels.
  2776. static bool CompressZfp(std::vector<unsigned char> *outBuf,
  2777. unsigned int *outSize, const float *inPtr, int width,
  2778. int num_lines, int num_channels,
  2779. const ZFPCompressionParam &param) {
  2780. zfp_stream *zfp = NULL;
  2781. zfp_field *field = NULL;
  2782. assert((width % 4) == 0);
  2783. assert((num_lines % 4) == 0);
  2784. if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
  2785. return false;
  2786. }
  2787. // create input array.
  2788. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
  2789. zfp_type_float, static_cast<unsigned int>(width),
  2790. static_cast<unsigned int>(num_lines * num_channels));
  2791. zfp = zfp_stream_open(NULL);
  2792. if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
  2793. zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
  2794. } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
  2795. zfp_stream_set_precision(zfp, param.precision);
  2796. } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
  2797. zfp_stream_set_accuracy(zfp, param.tolerance);
  2798. } else {
  2799. assert(0);
  2800. }
  2801. size_t buf_size = zfp_stream_maximum_size(zfp, field);
  2802. outBuf->resize(buf_size);
  2803. bitstream *stream = stream_open(&outBuf->at(0), buf_size);
  2804. zfp_stream_set_bit_stream(zfp, stream);
  2805. zfp_field_free(field);
  2806. size_t image_size = size_t(width) * size_t(num_lines);
  2807. for (size_t c = 0; c < size_t(num_channels); c++) {
  2808. // compress 4x4 pixel block.
  2809. for (size_t y = 0; y < size_t(num_lines); y += 4) {
  2810. for (size_t x = 0; x < size_t(width); x += 4) {
  2811. float fblock[16];
  2812. for (size_t j = 0; j < 4; j++) {
  2813. for (size_t i = 0; i < 4; i++) {
  2814. fblock[j * 4 + i] =
  2815. inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
  2816. }
  2817. }
  2818. zfp_encode_block_float_2(zfp, fblock);
  2819. }
  2820. }
  2821. }
  2822. zfp_stream_flush(zfp);
  2823. (*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
  2824. zfp_stream_close(zfp);
  2825. return true;
  2826. }
  2827. #endif
  2828. //
  2829. // -----------------------------------------------------------------
  2830. //
  2831. // heuristics
  2832. #define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192)
  2833. // TODO(syoyo): Refactor function arguments.
  2834. static bool DecodePixelData(/* out */ unsigned char **out_images,
  2835. const int *requested_pixel_types,
  2836. const unsigned char *data_ptr, size_t data_len,
  2837. int compression_type, int line_order, int width,
  2838. int height, int x_stride, int y, int line_no,
  2839. int num_lines, size_t pixel_data_size,
  2840. size_t num_attributes,
  2841. const EXRAttribute *attributes, size_t num_channels,
  2842. const EXRChannelInfo *channels,
  2843. const std::vector<size_t> &channel_offset_list) {
  2844. if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
  2845. #if TINYEXR_USE_PIZ
  2846. if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
  2847. // Invalid input #90
  2848. return false;
  2849. }
  2850. // Allocate original data size.
  2851. std::vector<unsigned char> outBuf(static_cast<size_t>(
  2852. static_cast<size_t>(width * num_lines) * pixel_data_size));
  2853. size_t tmpBufLen = outBuf.size();
  2854. bool ret = tinyexr::DecompressPiz(
  2855. reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
  2856. data_len, static_cast<int>(num_channels), channels, width, num_lines);
  2857. if (!ret) {
  2858. return false;
  2859. }
  2860. // For PIZ_COMPRESSION:
  2861. // pixel sample data for channel 0 for scanline 0
  2862. // pixel sample data for channel 1 for scanline 0
  2863. // pixel sample data for channel ... for scanline 0
  2864. // pixel sample data for channel n for scanline 0
  2865. // pixel sample data for channel 0 for scanline 1
  2866. // pixel sample data for channel 1 for scanline 1
  2867. // pixel sample data for channel ... for scanline 1
  2868. // pixel sample data for channel n for scanline 1
  2869. // ...
  2870. for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
  2871. if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
  2872. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  2873. const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
  2874. &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
  2875. channel_offset_list[c] * static_cast<size_t>(width)));
  2876. for (size_t u = 0; u < static_cast<size_t>(width); u++) {
  2877. FP16 hf;
  2878. // hf.u = line_ptr[u];
  2879. // use `cpy` to avoid unaligned memory access when compiler's
  2880. // optimization is on.
  2881. tinyexr::cpy2(&(hf.u), line_ptr + u);
  2882. tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
  2883. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
  2884. unsigned short *image =
  2885. reinterpret_cast<unsigned short **>(out_images)[c];
  2886. if (line_order == 0) {
  2887. image += (static_cast<size_t>(line_no) + v) *
  2888. static_cast<size_t>(x_stride) +
  2889. u;
  2890. } else {
  2891. image += static_cast<size_t>(
  2892. (height - 1 - (line_no + static_cast<int>(v)))) *
  2893. static_cast<size_t>(x_stride) +
  2894. u;
  2895. }
  2896. *image = hf.u;
  2897. } else { // HALF -> FLOAT
  2898. FP32 f32 = half_to_float(hf);
  2899. float *image = reinterpret_cast<float **>(out_images)[c];
  2900. size_t offset = 0;
  2901. if (line_order == 0) {
  2902. offset = (static_cast<size_t>(line_no) + v) *
  2903. static_cast<size_t>(x_stride) +
  2904. u;
  2905. } else {
  2906. offset = static_cast<size_t>(
  2907. (height - 1 - (line_no + static_cast<int>(v)))) *
  2908. static_cast<size_t>(x_stride) +
  2909. u;
  2910. }
  2911. image += offset;
  2912. *image = f32.f;
  2913. }
  2914. }
  2915. }
  2916. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
  2917. assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
  2918. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  2919. const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
  2920. &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
  2921. channel_offset_list[c] * static_cast<size_t>(width)));
  2922. for (size_t u = 0; u < static_cast<size_t>(width); u++) {
  2923. unsigned int val;
  2924. // val = line_ptr[u];
  2925. tinyexr::cpy4(&val, line_ptr + u);
  2926. tinyexr::swap4(&val);
  2927. unsigned int *image =
  2928. reinterpret_cast<unsigned int **>(out_images)[c];
  2929. if (line_order == 0) {
  2930. image += (static_cast<size_t>(line_no) + v) *
  2931. static_cast<size_t>(x_stride) +
  2932. u;
  2933. } else {
  2934. image += static_cast<size_t>(
  2935. (height - 1 - (line_no + static_cast<int>(v)))) *
  2936. static_cast<size_t>(x_stride) +
  2937. u;
  2938. }
  2939. *image = val;
  2940. }
  2941. }
  2942. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
  2943. assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
  2944. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  2945. const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
  2946. v * pixel_data_size * static_cast<size_t>(x_stride) +
  2947. channel_offset_list[c] * static_cast<size_t>(x_stride)));
  2948. for (size_t u = 0; u < static_cast<size_t>(width); u++) {
  2949. float val;
  2950. // val = line_ptr[u];
  2951. tinyexr::cpy4(&val, line_ptr + u);
  2952. tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
  2953. float *image = reinterpret_cast<float **>(out_images)[c];
  2954. if (line_order == 0) {
  2955. image += (static_cast<size_t>(line_no) + v) *
  2956. static_cast<size_t>(x_stride) +
  2957. u;
  2958. } else {
  2959. image += static_cast<size_t>(
  2960. (height - 1 - (line_no + static_cast<int>(v)))) *
  2961. static_cast<size_t>(x_stride) +
  2962. u;
  2963. }
  2964. *image = val;
  2965. }
  2966. }
  2967. } else {
  2968. assert(0);
  2969. }
  2970. }
  2971. #else
  2972. assert(0 && "PIZ is enabled in this build");
  2973. return false;
  2974. #endif
  2975. } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
  2976. compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
  2977. // Allocate original data size.
  2978. std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
  2979. static_cast<size_t>(num_lines) *
  2980. pixel_data_size);
  2981. unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
  2982. assert(dstLen > 0);
  2983. if (!tinyexr::DecompressZip(
  2984. reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
  2985. static_cast<unsigned long>(data_len))) {
  2986. return false;
  2987. }
  2988. // For ZIP_COMPRESSION:
  2989. // pixel sample data for channel 0 for scanline 0
  2990. // pixel sample data for channel 1 for scanline 0
  2991. // pixel sample data for channel ... for scanline 0
  2992. // pixel sample data for channel n for scanline 0
  2993. // pixel sample data for channel 0 for scanline 1
  2994. // pixel sample data for channel 1 for scanline 1
  2995. // pixel sample data for channel ... for scanline 1
  2996. // pixel sample data for channel n for scanline 1
  2997. // ...
  2998. for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
  2999. if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
  3000. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  3001. const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
  3002. &outBuf.at(v * static_cast<size_t>(pixel_data_size) *
  3003. static_cast<size_t>(width) +
  3004. channel_offset_list[c] * static_cast<size_t>(width)));
  3005. for (size_t u = 0; u < static_cast<size_t>(width); u++) {
  3006. tinyexr::FP16 hf;
  3007. // hf.u = line_ptr[u];
  3008. tinyexr::cpy2(&(hf.u), line_ptr + u);
  3009. tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
  3010. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
  3011. unsigned short *image =
  3012. reinterpret_cast<unsigned short **>(out_images)[c];
  3013. if (line_order == 0) {
  3014. image += (static_cast<size_t>(line_no) + v) *
  3015. static_cast<size_t>(x_stride) +
  3016. u;
  3017. } else {
  3018. image += (static_cast<size_t>(height) - 1U -
  3019. (static_cast<size_t>(line_no) + v)) *
  3020. static_cast<size_t>(x_stride) +
  3021. u;
  3022. }
  3023. *image = hf.u;
  3024. } else { // HALF -> FLOAT
  3025. tinyexr::FP32 f32 = half_to_float(hf);
  3026. float *image = reinterpret_cast<float **>(out_images)[c];
  3027. size_t offset = 0;
  3028. if (line_order == 0) {
  3029. offset = (static_cast<size_t>(line_no) + v) *
  3030. static_cast<size_t>(x_stride) +
  3031. u;
  3032. } else {
  3033. offset = (static_cast<size_t>(height) - 1U -
  3034. (static_cast<size_t>(line_no) + v)) *
  3035. static_cast<size_t>(x_stride) +
  3036. u;
  3037. }
  3038. image += offset;
  3039. *image = f32.f;
  3040. }
  3041. }
  3042. }
  3043. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
  3044. assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
  3045. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  3046. const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
  3047. &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
  3048. channel_offset_list[c] * static_cast<size_t>(width)));
  3049. for (size_t u = 0; u < static_cast<size_t>(width); u++) {
  3050. unsigned int val;
  3051. // val = line_ptr[u];
  3052. tinyexr::cpy4(&val, line_ptr + u);
  3053. tinyexr::swap4(&val);
  3054. unsigned int *image =
  3055. reinterpret_cast<unsigned int **>(out_images)[c];
  3056. if (line_order == 0) {
  3057. image += (static_cast<size_t>(line_no) + v) *
  3058. static_cast<size_t>(x_stride) +
  3059. u;
  3060. } else {
  3061. image += (static_cast<size_t>(height) - 1U -
  3062. (static_cast<size_t>(line_no) + v)) *
  3063. static_cast<size_t>(x_stride) +
  3064. u;
  3065. }
  3066. *image = val;
  3067. }
  3068. }
  3069. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
  3070. assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
  3071. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  3072. const float *line_ptr = reinterpret_cast<float *>(
  3073. &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
  3074. channel_offset_list[c] * static_cast<size_t>(width)));
  3075. for (size_t u = 0; u < static_cast<size_t>(width); u++) {
  3076. float val;
  3077. // val = line_ptr[u];
  3078. tinyexr::cpy4(&val, line_ptr + u);
  3079. tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
  3080. float *image = reinterpret_cast<float **>(out_images)[c];
  3081. if (line_order == 0) {
  3082. image += (static_cast<size_t>(line_no) + v) *
  3083. static_cast<size_t>(x_stride) +
  3084. u;
  3085. } else {
  3086. image += (static_cast<size_t>(height) - 1U -
  3087. (static_cast<size_t>(line_no) + v)) *
  3088. static_cast<size_t>(x_stride) +
  3089. u;
  3090. }
  3091. *image = val;
  3092. }
  3093. }
  3094. } else {
  3095. assert(0);
  3096. return false;
  3097. }
  3098. }
  3099. } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
  3100. // Allocate original data size.
  3101. std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
  3102. static_cast<size_t>(num_lines) *
  3103. pixel_data_size);
  3104. unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
  3105. if (dstLen == 0) {
  3106. return false;
  3107. }
  3108. if (!tinyexr::DecompressRle(
  3109. reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
  3110. static_cast<unsigned long>(data_len))) {
  3111. return false;
  3112. }
  3113. // For RLE_COMPRESSION:
  3114. // pixel sample data for channel 0 for scanline 0
  3115. // pixel sample data for channel 1 for scanline 0
  3116. // pixel sample data for channel ... for scanline 0
  3117. // pixel sample data for channel n for scanline 0
  3118. // pixel sample data for channel 0 for scanline 1
  3119. // pixel sample data for channel 1 for scanline 1
  3120. // pixel sample data for channel ... for scanline 1
  3121. // pixel sample data for channel n for scanline 1
  3122. // ...
  3123. for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
  3124. if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
  3125. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  3126. const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
  3127. &outBuf.at(v * static_cast<size_t>(pixel_data_size) *
  3128. static_cast<size_t>(width) +
  3129. channel_offset_list[c] * static_cast<size_t>(width)));
  3130. for (size_t u = 0; u < static_cast<size_t>(width); u++) {
  3131. tinyexr::FP16 hf;
  3132. // hf.u = line_ptr[u];
  3133. tinyexr::cpy2(&(hf.u), line_ptr + u);
  3134. tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
  3135. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
  3136. unsigned short *image =
  3137. reinterpret_cast<unsigned short **>(out_images)[c];
  3138. if (line_order == 0) {
  3139. image += (static_cast<size_t>(line_no) + v) *
  3140. static_cast<size_t>(x_stride) +
  3141. u;
  3142. } else {
  3143. image += (static_cast<size_t>(height) - 1U -
  3144. (static_cast<size_t>(line_no) + v)) *
  3145. static_cast<size_t>(x_stride) +
  3146. u;
  3147. }
  3148. *image = hf.u;
  3149. } else { // HALF -> FLOAT
  3150. tinyexr::FP32 f32 = half_to_float(hf);
  3151. float *image = reinterpret_cast<float **>(out_images)[c];
  3152. if (line_order == 0) {
  3153. image += (static_cast<size_t>(line_no) + v) *
  3154. static_cast<size_t>(x_stride) +
  3155. u;
  3156. } else {
  3157. image += (static_cast<size_t>(height) - 1U -
  3158. (static_cast<size_t>(line_no) + v)) *
  3159. static_cast<size_t>(x_stride) +
  3160. u;
  3161. }
  3162. *image = f32.f;
  3163. }
  3164. }
  3165. }
  3166. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
  3167. assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
  3168. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  3169. const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
  3170. &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
  3171. channel_offset_list[c] * static_cast<size_t>(width)));
  3172. for (size_t u = 0; u < static_cast<size_t>(width); u++) {
  3173. unsigned int val;
  3174. // val = line_ptr[u];
  3175. tinyexr::cpy4(&val, line_ptr + u);
  3176. tinyexr::swap4(&val);
  3177. unsigned int *image =
  3178. reinterpret_cast<unsigned int **>(out_images)[c];
  3179. if (line_order == 0) {
  3180. image += (static_cast<size_t>(line_no) + v) *
  3181. static_cast<size_t>(x_stride) +
  3182. u;
  3183. } else {
  3184. image += (static_cast<size_t>(height) - 1U -
  3185. (static_cast<size_t>(line_no) + v)) *
  3186. static_cast<size_t>(x_stride) +
  3187. u;
  3188. }
  3189. *image = val;
  3190. }
  3191. }
  3192. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
  3193. assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
  3194. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  3195. const float *line_ptr = reinterpret_cast<float *>(
  3196. &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
  3197. channel_offset_list[c] * static_cast<size_t>(width)));
  3198. for (size_t u = 0; u < static_cast<size_t>(width); u++) {
  3199. float val;
  3200. // val = line_ptr[u];
  3201. tinyexr::cpy4(&val, line_ptr + u);
  3202. tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
  3203. float *image = reinterpret_cast<float **>(out_images)[c];
  3204. if (line_order == 0) {
  3205. image += (static_cast<size_t>(line_no) + v) *
  3206. static_cast<size_t>(x_stride) +
  3207. u;
  3208. } else {
  3209. image += (static_cast<size_t>(height) - 1U -
  3210. (static_cast<size_t>(line_no) + v)) *
  3211. static_cast<size_t>(x_stride) +
  3212. u;
  3213. }
  3214. *image = val;
  3215. }
  3216. }
  3217. } else {
  3218. assert(0);
  3219. return false;
  3220. }
  3221. }
  3222. } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
  3223. #if TINYEXR_USE_ZFP
  3224. tinyexr::ZFPCompressionParam zfp_compression_param;
  3225. std::string e;
  3226. if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
  3227. int(num_attributes), &e)) {
  3228. // This code path should not be reachable.
  3229. assert(0);
  3230. return false;
  3231. }
  3232. // Allocate original data size.
  3233. std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
  3234. static_cast<size_t>(num_lines) *
  3235. pixel_data_size);
  3236. unsigned long dstLen = outBuf.size();
  3237. assert(dstLen > 0);
  3238. tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
  3239. num_lines, num_channels, data_ptr,
  3240. static_cast<unsigned long>(data_len),
  3241. zfp_compression_param);
  3242. // For ZFP_COMPRESSION:
  3243. // pixel sample data for channel 0 for scanline 0
  3244. // pixel sample data for channel 1 for scanline 0
  3245. // pixel sample data for channel ... for scanline 0
  3246. // pixel sample data for channel n for scanline 0
  3247. // pixel sample data for channel 0 for scanline 1
  3248. // pixel sample data for channel 1 for scanline 1
  3249. // pixel sample data for channel ... for scanline 1
  3250. // pixel sample data for channel n for scanline 1
  3251. // ...
  3252. for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
  3253. assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
  3254. if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
  3255. assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
  3256. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  3257. const float *line_ptr = reinterpret_cast<float *>(
  3258. &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
  3259. channel_offset_list[c] * static_cast<size_t>(width)));
  3260. for (size_t u = 0; u < static_cast<size_t>(width); u++) {
  3261. float val;
  3262. tinyexr::cpy4(&val, line_ptr + u);
  3263. tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
  3264. float *image = reinterpret_cast<float **>(out_images)[c];
  3265. if (line_order == 0) {
  3266. image += (static_cast<size_t>(line_no) + v) *
  3267. static_cast<size_t>(x_stride) +
  3268. u;
  3269. } else {
  3270. image += (static_cast<size_t>(height) - 1U -
  3271. (static_cast<size_t>(line_no) + v)) *
  3272. static_cast<size_t>(x_stride) +
  3273. u;
  3274. }
  3275. *image = val;
  3276. }
  3277. }
  3278. } else {
  3279. assert(0);
  3280. return false;
  3281. }
  3282. }
  3283. #else
  3284. (void)attributes;
  3285. (void)num_attributes;
  3286. (void)num_channels;
  3287. assert(0);
  3288. return false;
  3289. #endif
  3290. } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
  3291. for (size_t c = 0; c < num_channels; c++) {
  3292. for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
  3293. if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
  3294. const unsigned short *line_ptr =
  3295. reinterpret_cast<const unsigned short *>(
  3296. data_ptr + v * pixel_data_size * size_t(width) +
  3297. channel_offset_list[c] * static_cast<size_t>(width));
  3298. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
  3299. unsigned short *outLine =
  3300. reinterpret_cast<unsigned short *>(out_images[c]);
  3301. if (line_order == 0) {
  3302. outLine += (size_t(y) + v) * size_t(x_stride);
  3303. } else {
  3304. outLine +=
  3305. (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
  3306. }
  3307. for (int u = 0; u < width; u++) {
  3308. tinyexr::FP16 hf;
  3309. // hf.u = line_ptr[u];
  3310. tinyexr::cpy2(&(hf.u), line_ptr + u);
  3311. tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
  3312. outLine[u] = hf.u;
  3313. }
  3314. } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
  3315. float *outLine = reinterpret_cast<float *>(out_images[c]);
  3316. if (line_order == 0) {
  3317. outLine += (size_t(y) + v) * size_t(x_stride);
  3318. } else {
  3319. outLine +=
  3320. (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
  3321. }
  3322. if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
  3323. (data_ptr + data_len)) {
  3324. // Insufficient data size
  3325. return false;
  3326. }
  3327. for (int u = 0; u < width; u++) {
  3328. tinyexr::FP16 hf;
  3329. // address may not be aliged. use byte-wise copy for safety.#76
  3330. // hf.u = line_ptr[u];
  3331. tinyexr::cpy2(&(hf.u), line_ptr + u);
  3332. tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
  3333. tinyexr::FP32 f32 = half_to_float(hf);
  3334. outLine[u] = f32.f;
  3335. }
  3336. } else {
  3337. assert(0);
  3338. return false;
  3339. }
  3340. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
  3341. const float *line_ptr = reinterpret_cast<const float *>(
  3342. data_ptr + v * pixel_data_size * size_t(width) +
  3343. channel_offset_list[c] * static_cast<size_t>(width));
  3344. float *outLine = reinterpret_cast<float *>(out_images[c]);
  3345. if (line_order == 0) {
  3346. outLine += (size_t(y) + v) * size_t(x_stride);
  3347. } else {
  3348. outLine +=
  3349. (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
  3350. }
  3351. if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
  3352. (data_ptr + data_len)) {
  3353. // Insufficient data size
  3354. return false;
  3355. }
  3356. for (int u = 0; u < width; u++) {
  3357. float val;
  3358. tinyexr::cpy4(&val, line_ptr + u);
  3359. tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
  3360. outLine[u] = val;
  3361. }
  3362. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
  3363. const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
  3364. data_ptr + v * pixel_data_size * size_t(width) +
  3365. channel_offset_list[c] * static_cast<size_t>(width));
  3366. unsigned int *outLine =
  3367. reinterpret_cast<unsigned int *>(out_images[c]);
  3368. if (line_order == 0) {
  3369. outLine += (size_t(y) + v) * size_t(x_stride);
  3370. } else {
  3371. outLine +=
  3372. (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
  3373. }
  3374. for (int u = 0; u < width; u++) {
  3375. if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
  3376. (data_ptr + data_len)) {
  3377. // Corrupsed data?
  3378. return false;
  3379. }
  3380. unsigned int val;
  3381. tinyexr::cpy4(&val, line_ptr + u);
  3382. tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
  3383. outLine[u] = val;
  3384. }
  3385. }
  3386. }
  3387. }
  3388. }
  3389. return true;
  3390. }
  3391. static bool DecodeTiledPixelData(
  3392. unsigned char **out_images, int *width, int *height,
  3393. const int *requested_pixel_types, const unsigned char *data_ptr,
  3394. size_t data_len, int compression_type, int line_order, int data_width,
  3395. int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
  3396. int tile_size_y, size_t pixel_data_size, size_t num_attributes,
  3397. const EXRAttribute *attributes, size_t num_channels,
  3398. const EXRChannelInfo *channels,
  3399. const std::vector<size_t> &channel_offset_list) {
  3400. // Here, data_width and data_height are the dimensions of the current (sub)level.
  3401. if (tile_size_x * tile_offset_x > data_width ||
  3402. tile_size_y * tile_offset_y > data_height) {
  3403. return false;
  3404. }
  3405. // Compute actual image size in a tile.
  3406. if ((tile_offset_x + 1) * tile_size_x >= data_width) {
  3407. (*width) = data_width - (tile_offset_x * tile_size_x);
  3408. } else {
  3409. (*width) = tile_size_x;
  3410. }
  3411. if ((tile_offset_y + 1) * tile_size_y >= data_height) {
  3412. (*height) = data_height - (tile_offset_y * tile_size_y);
  3413. } else {
  3414. (*height) = tile_size_y;
  3415. }
  3416. // Image size = tile size.
  3417. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
  3418. compression_type, line_order, (*width), tile_size_y,
  3419. /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
  3420. (*height), pixel_data_size, num_attributes, attributes,
  3421. num_channels, channels, channel_offset_list);
  3422. }
  3423. static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
  3424. int *pixel_data_size, size_t *channel_offset,
  3425. int num_channels,
  3426. const EXRChannelInfo *channels) {
  3427. channel_offset_list->resize(static_cast<size_t>(num_channels));
  3428. (*pixel_data_size) = 0;
  3429. (*channel_offset) = 0;
  3430. for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
  3431. (*channel_offset_list)[c] = (*channel_offset);
  3432. if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
  3433. (*pixel_data_size) += sizeof(unsigned short);
  3434. (*channel_offset) += sizeof(unsigned short);
  3435. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
  3436. (*pixel_data_size) += sizeof(float);
  3437. (*channel_offset) += sizeof(float);
  3438. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
  3439. (*pixel_data_size) += sizeof(unsigned int);
  3440. (*channel_offset) += sizeof(unsigned int);
  3441. } else {
  3442. // ???
  3443. return false;
  3444. }
  3445. }
  3446. return true;
  3447. }
  3448. static unsigned char **AllocateImage(int num_channels,
  3449. const EXRChannelInfo *channels,
  3450. const int *requested_pixel_types,
  3451. int data_width, int data_height) {
  3452. unsigned char **images =
  3453. reinterpret_cast<unsigned char **>(static_cast<float **>(
  3454. malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
  3455. for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
  3456. size_t data_len =
  3457. static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
  3458. if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
  3459. // pixel_data_size += sizeof(unsigned short);
  3460. // channel_offset += sizeof(unsigned short);
  3461. // Alloc internal image for half type.
  3462. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
  3463. images[c] =
  3464. reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
  3465. malloc(sizeof(unsigned short) * data_len)));
  3466. } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
  3467. images[c] = reinterpret_cast<unsigned char *>(
  3468. static_cast<float *>(malloc(sizeof(float) * data_len)));
  3469. } else {
  3470. assert(0);
  3471. }
  3472. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
  3473. // pixel_data_size += sizeof(float);
  3474. // channel_offset += sizeof(float);
  3475. images[c] = reinterpret_cast<unsigned char *>(
  3476. static_cast<float *>(malloc(sizeof(float) * data_len)));
  3477. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
  3478. // pixel_data_size += sizeof(unsigned int);
  3479. // channel_offset += sizeof(unsigned int);
  3480. images[c] = reinterpret_cast<unsigned char *>(
  3481. static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
  3482. } else {
  3483. assert(0);
  3484. }
  3485. }
  3486. return images;
  3487. }
  3488. #ifdef _WIN32
  3489. static inline std::wstring UTF8ToWchar(const std::string &str) {
  3490. int wstr_size =
  3491. MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
  3492. std::wstring wstr(wstr_size, 0);
  3493. MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
  3494. (int)wstr.size());
  3495. return wstr;
  3496. }
  3497. #endif
  3498. static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
  3499. const EXRVersion *version, std::string *err,
  3500. const unsigned char *buf, size_t size) {
  3501. const char *marker = reinterpret_cast<const char *>(&buf[0]);
  3502. if (empty_header) {
  3503. (*empty_header) = false;
  3504. }
  3505. if (version->multipart) {
  3506. if (size > 0 && marker[0] == '\0') {
  3507. // End of header list.
  3508. if (empty_header) {
  3509. (*empty_header) = true;
  3510. }
  3511. return TINYEXR_SUCCESS;
  3512. }
  3513. }
  3514. // According to the spec, the header of every OpenEXR file must contain at
  3515. // least the following attributes:
  3516. //
  3517. // channels chlist
  3518. // compression compression
  3519. // dataWindow box2i
  3520. // displayWindow box2i
  3521. // lineOrder lineOrder
  3522. // pixelAspectRatio float
  3523. // screenWindowCenter v2f
  3524. // screenWindowWidth float
  3525. bool has_channels = false;
  3526. bool has_compression = false;
  3527. bool has_data_window = false;
  3528. bool has_display_window = false;
  3529. bool has_line_order = false;
  3530. bool has_pixel_aspect_ratio = false;
  3531. bool has_screen_window_center = false;
  3532. bool has_screen_window_width = false;
  3533. bool has_name = false;
  3534. bool has_type = false;
  3535. info->name.clear();
  3536. info->type.clear();
  3537. info->data_window.min_x = 0;
  3538. info->data_window.min_y = 0;
  3539. info->data_window.max_x = 0;
  3540. info->data_window.max_y = 0;
  3541. info->line_order = 0; // @fixme
  3542. info->display_window.min_x = 0;
  3543. info->display_window.min_y = 0;
  3544. info->display_window.max_x = 0;
  3545. info->display_window.max_y = 0;
  3546. info->screen_window_center[0] = 0.0f;
  3547. info->screen_window_center[1] = 0.0f;
  3548. info->screen_window_width = -1.0f;
  3549. info->pixel_aspect_ratio = -1.0f;
  3550. info->tiled = 0;
  3551. info->tile_size_x = -1;
  3552. info->tile_size_y = -1;
  3553. info->tile_level_mode = -1;
  3554. info->tile_rounding_mode = -1;
  3555. info->attributes.clear();
  3556. // Read attributes
  3557. size_t orig_size = size;
  3558. for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
  3559. if (0 == size) {
  3560. if (err) {
  3561. (*err) += "Insufficient data size for attributes.\n";
  3562. }
  3563. return TINYEXR_ERROR_INVALID_DATA;
  3564. } else if (marker[0] == '\0') {
  3565. size--;
  3566. break;
  3567. }
  3568. std::string attr_name;
  3569. std::string attr_type;
  3570. std::vector<unsigned char> data;
  3571. size_t marker_size;
  3572. if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
  3573. marker, size)) {
  3574. if (err) {
  3575. (*err) += "Failed to read attribute.\n";
  3576. }
  3577. return TINYEXR_ERROR_INVALID_DATA;
  3578. }
  3579. marker += marker_size;
  3580. size -= marker_size;
  3581. // For a multipart file, the version field 9th bit is 0.
  3582. if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) {
  3583. unsigned int x_size, y_size;
  3584. unsigned char tile_mode;
  3585. if (data.size() != 9) {
  3586. if (err) {
  3587. (*err) += "(ParseEXRHeader) Invalid attribute data size. Attribute data size must be 9.\n";
  3588. }
  3589. return TINYEXR_ERROR_INVALID_DATA;
  3590. }
  3591. assert(data.size() == 9);
  3592. memcpy(&x_size, &data.at(0), sizeof(int));
  3593. memcpy(&y_size, &data.at(4), sizeof(int));
  3594. tile_mode = data[8];
  3595. tinyexr::swap4(&x_size);
  3596. tinyexr::swap4(&y_size);
  3597. if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
  3598. y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
  3599. if (err) {
  3600. (*err) = "Tile sizes were invalid.";
  3601. }
  3602. return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
  3603. }
  3604. info->tile_size_x = static_cast<int>(x_size);
  3605. info->tile_size_y = static_cast<int>(y_size);
  3606. // mode = levelMode + roundingMode * 16
  3607. info->tile_level_mode = tile_mode & 0x3;
  3608. info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
  3609. info->tiled = 1;
  3610. } else if (attr_name.compare("compression") == 0) {
  3611. bool ok = false;
  3612. if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
  3613. ok = true;
  3614. }
  3615. if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
  3616. #if TINYEXR_USE_PIZ
  3617. ok = true;
  3618. #else
  3619. if (err) {
  3620. (*err) = "PIZ compression is not supported.";
  3621. }
  3622. return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
  3623. #endif
  3624. }
  3625. if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
  3626. #if TINYEXR_USE_ZFP
  3627. ok = true;
  3628. #else
  3629. if (err) {
  3630. (*err) = "ZFP compression is not supported.";
  3631. }
  3632. return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
  3633. #endif
  3634. }
  3635. if (!ok) {
  3636. if (err) {
  3637. (*err) = "Unknown compression type.";
  3638. }
  3639. return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
  3640. }
  3641. info->compression_type = static_cast<int>(data[0]);
  3642. has_compression = true;
  3643. } else if (attr_name.compare("channels") == 0) {
  3644. // name: zero-terminated string, from 1 to 255 bytes long
  3645. // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
  3646. // pLinear: unsigned char, possible values are 0 and 1
  3647. // reserved: three chars, should be zero
  3648. // xSampling: int
  3649. // ySampling: int
  3650. if (!ReadChannelInfo(info->channels, data)) {
  3651. if (err) {
  3652. (*err) += "Failed to parse channel info.\n";
  3653. }
  3654. return TINYEXR_ERROR_INVALID_DATA;
  3655. }
  3656. if (info->channels.size() < 1) {
  3657. if (err) {
  3658. (*err) += "# of channels is zero.\n";
  3659. }
  3660. return TINYEXR_ERROR_INVALID_DATA;
  3661. }
  3662. has_channels = true;
  3663. } else if (attr_name.compare("dataWindow") == 0) {
  3664. if (data.size() >= 16) {
  3665. memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
  3666. memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
  3667. memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
  3668. memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
  3669. tinyexr::swap4(&info->data_window.min_x);
  3670. tinyexr::swap4(&info->data_window.min_y);
  3671. tinyexr::swap4(&info->data_window.max_x);
  3672. tinyexr::swap4(&info->data_window.max_y);
  3673. has_data_window = true;
  3674. }
  3675. } else if (attr_name.compare("displayWindow") == 0) {
  3676. if (data.size() >= 16) {
  3677. memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
  3678. memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
  3679. memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
  3680. memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
  3681. tinyexr::swap4(&info->display_window.min_x);
  3682. tinyexr::swap4(&info->display_window.min_y);
  3683. tinyexr::swap4(&info->display_window.max_x);
  3684. tinyexr::swap4(&info->display_window.max_y);
  3685. has_display_window = true;
  3686. }
  3687. } else if (attr_name.compare("lineOrder") == 0) {
  3688. if (data.size() >= 1) {
  3689. info->line_order = static_cast<int>(data[0]);
  3690. has_line_order = true;
  3691. }
  3692. } else if (attr_name.compare("pixelAspectRatio") == 0) {
  3693. if (data.size() >= sizeof(float)) {
  3694. memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
  3695. tinyexr::swap4(&info->pixel_aspect_ratio);
  3696. has_pixel_aspect_ratio = true;
  3697. }
  3698. } else if (attr_name.compare("screenWindowCenter") == 0) {
  3699. if (data.size() >= 8) {
  3700. memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
  3701. memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
  3702. tinyexr::swap4(&info->screen_window_center[0]);
  3703. tinyexr::swap4(&info->screen_window_center[1]);
  3704. has_screen_window_center = true;
  3705. }
  3706. } else if (attr_name.compare("screenWindowWidth") == 0) {
  3707. if (data.size() >= sizeof(float)) {
  3708. memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
  3709. tinyexr::swap4(&info->screen_window_width);
  3710. has_screen_window_width = true;
  3711. }
  3712. } else if (attr_name.compare("chunkCount") == 0) {
  3713. if (data.size() >= sizeof(int)) {
  3714. memcpy(&info->chunk_count, &data.at(0), sizeof(int));
  3715. tinyexr::swap4(&info->chunk_count);
  3716. }
  3717. } else if (attr_name.compare("name") == 0) {
  3718. if (!data.empty() && data[0]) {
  3719. data.push_back(0);
  3720. size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
  3721. info->name.resize(len);
  3722. info->name.assign(reinterpret_cast<const char*>(&data[0]), len);
  3723. has_name = true;
  3724. }
  3725. } else if (attr_name.compare("type") == 0) {
  3726. if (!data.empty() && data[0]) {
  3727. data.push_back(0);
  3728. size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
  3729. info->type.resize(len);
  3730. info->type.assign(reinterpret_cast<const char*>(&data[0]), len);
  3731. has_type = true;
  3732. }
  3733. } else {
  3734. // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
  3735. if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
  3736. EXRAttribute attrib;
  3737. #ifdef _MSC_VER
  3738. strncpy_s(attrib.name, attr_name.c_str(), 255);
  3739. strncpy_s(attrib.type, attr_type.c_str(), 255);
  3740. #else
  3741. strncpy(attrib.name, attr_name.c_str(), 255);
  3742. strncpy(attrib.type, attr_type.c_str(), 255);
  3743. #endif
  3744. attrib.name[255] = '\0';
  3745. attrib.type[255] = '\0';
  3746. attrib.size = static_cast<int>(data.size());
  3747. attrib.value = static_cast<unsigned char *>(malloc(data.size()));
  3748. memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
  3749. data.size());
  3750. info->attributes.push_back(attrib);
  3751. }
  3752. }
  3753. }
  3754. // Check if required attributes exist
  3755. {
  3756. std::stringstream ss_err;
  3757. if (!has_compression) {
  3758. ss_err << "\"compression\" attribute not found in the header."
  3759. << std::endl;
  3760. }
  3761. if (!has_channels) {
  3762. ss_err << "\"channels\" attribute not found in the header." << std::endl;
  3763. }
  3764. if (!has_line_order) {
  3765. ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
  3766. }
  3767. if (!has_display_window) {
  3768. ss_err << "\"displayWindow\" attribute not found in the header."
  3769. << std::endl;
  3770. }
  3771. if (!has_data_window) {
  3772. ss_err << "\"dataWindow\" attribute not found in the header or invalid."
  3773. << std::endl;
  3774. }
  3775. if (!has_pixel_aspect_ratio) {
  3776. ss_err << "\"pixelAspectRatio\" attribute not found in the header."
  3777. << std::endl;
  3778. }
  3779. if (!has_screen_window_width) {
  3780. ss_err << "\"screenWindowWidth\" attribute not found in the header."
  3781. << std::endl;
  3782. }
  3783. if (!has_screen_window_center) {
  3784. ss_err << "\"screenWindowCenter\" attribute not found in the header."
  3785. << std::endl;
  3786. }
  3787. if (version->multipart || version->non_image) {
  3788. if (!has_name) {
  3789. ss_err << "\"name\" attribute not found in the header."
  3790. << std::endl;
  3791. }
  3792. if (!has_type) {
  3793. ss_err << "\"type\" attribute not found in the header."
  3794. << std::endl;
  3795. }
  3796. }
  3797. if (!(ss_err.str().empty())) {
  3798. if (err) {
  3799. (*err) += ss_err.str();
  3800. }
  3801. return TINYEXR_ERROR_INVALID_HEADER;
  3802. }
  3803. }
  3804. info->header_len = static_cast<unsigned int>(orig_size - size);
  3805. return TINYEXR_SUCCESS;
  3806. }
  3807. // C++ HeaderInfo to C EXRHeader conversion.
  3808. static bool ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info, std::string *warn, std::string *err) {
  3809. exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
  3810. exr_header->screen_window_center[0] = info.screen_window_center[0];
  3811. exr_header->screen_window_center[1] = info.screen_window_center[1];
  3812. exr_header->screen_window_width = info.screen_window_width;
  3813. exr_header->chunk_count = info.chunk_count;
  3814. exr_header->display_window.min_x = info.display_window.min_x;
  3815. exr_header->display_window.min_y = info.display_window.min_y;
  3816. exr_header->display_window.max_x = info.display_window.max_x;
  3817. exr_header->display_window.max_y = info.display_window.max_y;
  3818. exr_header->data_window.min_x = info.data_window.min_x;
  3819. exr_header->data_window.min_y = info.data_window.min_y;
  3820. exr_header->data_window.max_x = info.data_window.max_x;
  3821. exr_header->data_window.max_y = info.data_window.max_y;
  3822. exr_header->line_order = info.line_order;
  3823. exr_header->compression_type = info.compression_type;
  3824. exr_header->tiled = info.tiled;
  3825. exr_header->tile_size_x = info.tile_size_x;
  3826. exr_header->tile_size_y = info.tile_size_y;
  3827. exr_header->tile_level_mode = info.tile_level_mode;
  3828. exr_header->tile_rounding_mode = info.tile_rounding_mode;
  3829. EXRSetNameAttr(exr_header, info.name.c_str());
  3830. if (!info.type.empty()) {
  3831. bool valid = true;
  3832. if (info.type == "scanlineimage") {
  3833. if (exr_header->tiled) {
  3834. if (err) {
  3835. (*err) += "(ConvertHeader) tiled bit must be off for `scanlineimage` type.\n";
  3836. }
  3837. valid = false;
  3838. }
  3839. } else if (info.type == "tiledimage") {
  3840. if (!exr_header->tiled) {
  3841. if (err) {
  3842. (*err) += "(ConvertHeader) tiled bit must be on for `tiledimage` type.\n";
  3843. }
  3844. valid = false;
  3845. }
  3846. } else if (info.type == "deeptile") {
  3847. exr_header->non_image = 1;
  3848. if (!exr_header->tiled) {
  3849. if (err) {
  3850. (*err) += "(ConvertHeader) tiled bit must be on for `deeptile` type.\n";
  3851. }
  3852. valid = false;
  3853. }
  3854. } else if (info.type == "deepscanline") {
  3855. exr_header->non_image = 1;
  3856. if (exr_header->tiled) {
  3857. if (err) {
  3858. (*err) += "(ConvertHeader) tiled bit must be off for `deepscanline` type.\n";
  3859. }
  3860. //valid = false;
  3861. }
  3862. } else {
  3863. if (warn) {
  3864. std::stringstream ss;
  3865. ss << "(ConvertHeader) Unsupported or unknown info.type: " << info.type << "\n";
  3866. (*warn) += ss.str();
  3867. }
  3868. }
  3869. if (!valid) {
  3870. return false;
  3871. }
  3872. }
  3873. exr_header->num_channels = static_cast<int>(info.channels.size());
  3874. exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
  3875. sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
  3876. for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
  3877. #ifdef _MSC_VER
  3878. strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
  3879. #else
  3880. strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
  3881. #endif
  3882. // manually add '\0' for safety.
  3883. exr_header->channels[c].name[255] = '\0';
  3884. exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
  3885. exr_header->channels[c].p_linear = info.channels[c].p_linear;
  3886. exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
  3887. exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
  3888. }
  3889. exr_header->pixel_types = static_cast<int *>(
  3890. malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
  3891. for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
  3892. exr_header->pixel_types[c] = info.channels[c].pixel_type;
  3893. }
  3894. // Initially fill with values of `pixel_types`
  3895. exr_header->requested_pixel_types = static_cast<int *>(
  3896. malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
  3897. for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
  3898. exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
  3899. }
  3900. exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
  3901. if (exr_header->num_custom_attributes > 0) {
  3902. // TODO(syoyo): Report warning when # of attributes exceeds
  3903. // `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
  3904. if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
  3905. exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
  3906. }
  3907. exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
  3908. sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
  3909. for (size_t i = 0; i < info.attributes.size(); i++) {
  3910. memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
  3911. 256);
  3912. memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
  3913. 256);
  3914. exr_header->custom_attributes[i].size = info.attributes[i].size;
  3915. // Just copy pointer
  3916. exr_header->custom_attributes[i].value = info.attributes[i].value;
  3917. }
  3918. } else {
  3919. exr_header->custom_attributes = NULL;
  3920. }
  3921. exr_header->header_len = info.header_len;
  3922. return true;
  3923. }
  3924. struct OffsetData {
  3925. OffsetData() : num_x_levels(0), num_y_levels(0) {}
  3926. std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets;
  3927. int num_x_levels;
  3928. int num_y_levels;
  3929. };
  3930. static int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) {
  3931. switch (tile_level_mode) {
  3932. case TINYEXR_TILE_ONE_LEVEL:
  3933. return 0;
  3934. case TINYEXR_TILE_MIPMAP_LEVELS:
  3935. return lx;
  3936. case TINYEXR_TILE_RIPMAP_LEVELS:
  3937. return lx + ly * num_x_levels;
  3938. default:
  3939. assert(false);
  3940. }
  3941. return 0;
  3942. }
  3943. static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) {
  3944. assert(level >= 0);
  3945. int b = static_cast<int>(1u << static_cast<unsigned int>(level));
  3946. int level_size = toplevel_size / b;
  3947. if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size)
  3948. level_size += 1;
  3949. return std::max(level_size, 1);
  3950. }
  3951. static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header,
  3952. const OffsetData& offset_data,
  3953. const std::vector<size_t>& channel_offset_list,
  3954. int pixel_data_size,
  3955. const unsigned char* head, const size_t size,
  3956. std::string* err) {
  3957. int num_channels = exr_header->num_channels;
  3958. int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels);
  3959. int num_y_tiles = int(offset_data.offsets[size_t(level_index)].size());
  3960. assert(num_y_tiles);
  3961. int num_x_tiles = int(offset_data.offsets[size_t(level_index)][0].size());
  3962. assert(num_x_tiles);
  3963. int num_tiles = num_x_tiles * num_y_tiles;
  3964. int err_code = TINYEXR_SUCCESS;
  3965. enum {
  3966. EF_SUCCESS = 0,
  3967. EF_INVALID_DATA = 1,
  3968. EF_INSUFFICIENT_DATA = 2,
  3969. EF_FAILED_TO_DECODE = 4
  3970. };
  3971. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  3972. std::atomic<unsigned> error_flag(EF_SUCCESS);
  3973. #else
  3974. unsigned error_flag(EF_SUCCESS);
  3975. #endif
  3976. // Although the spec says : "...the data window is subdivided into an array of smaller rectangles...",
  3977. // the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window.
  3978. #if 0
  3979. if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) &&
  3980. exr_image->level_x == 0 && exr_image->level_y == 0) {
  3981. if (err) {
  3982. (*err) += "Failed to decode tile data.\n";
  3983. }
  3984. err_code = TINYEXR_ERROR_INVALID_DATA;
  3985. }
  3986. #endif
  3987. exr_image->tiles = static_cast<EXRTile*>(
  3988. calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
  3989. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  3990. std::vector<std::thread> workers;
  3991. std::atomic<int> tile_count(0);
  3992. int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
  3993. if (num_threads > int(num_tiles)) {
  3994. num_threads = int(num_tiles);
  3995. }
  3996. for (int t = 0; t < num_threads; t++) {
  3997. workers.emplace_back(std::thread([&]()
  3998. {
  3999. int tile_idx = 0;
  4000. while ((tile_idx = tile_count++) < num_tiles) {
  4001. #else
  4002. #if TINYEXR_USE_OPENMP
  4003. #pragma omp parallel for
  4004. #endif
  4005. for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
  4006. #endif
  4007. // Allocate memory for each tile.
  4008. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
  4009. num_channels, exr_header->channels,
  4010. exr_header->requested_pixel_types, exr_header->tile_size_x,
  4011. exr_header->tile_size_y);
  4012. int x_tile = tile_idx % num_x_tiles;
  4013. int y_tile = tile_idx / num_x_tiles;
  4014. // 16 byte: tile coordinates
  4015. // 4 byte : data size
  4016. // ~ : data(uncompressed or compressed)
  4017. tinyexr::tinyexr_uint64 offset = offset_data.offsets[size_t(level_index)][size_t(y_tile)][size_t(x_tile)];
  4018. if (offset + sizeof(int) * 5 > size) {
  4019. // Insufficient data size.
  4020. error_flag |= EF_INSUFFICIENT_DATA;
  4021. continue;
  4022. }
  4023. size_t data_size =
  4024. size_t(size - (offset + sizeof(int) * 5));
  4025. const unsigned char* data_ptr =
  4026. reinterpret_cast<const unsigned char*>(head + offset);
  4027. int tile_coordinates[4];
  4028. memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
  4029. tinyexr::swap4(&tile_coordinates[0]);
  4030. tinyexr::swap4(&tile_coordinates[1]);
  4031. tinyexr::swap4(&tile_coordinates[2]);
  4032. tinyexr::swap4(&tile_coordinates[3]);
  4033. if (tile_coordinates[2] != exr_image->level_x) {
  4034. // Invalid data.
  4035. error_flag |= EF_INVALID_DATA;
  4036. continue;
  4037. }
  4038. if (tile_coordinates[3] != exr_image->level_y) {
  4039. // Invalid data.
  4040. error_flag |= EF_INVALID_DATA;
  4041. continue;
  4042. }
  4043. int data_len;
  4044. memcpy(&data_len, data_ptr + 16,
  4045. sizeof(int)); // 16 = sizeof(tile_coordinates)
  4046. tinyexr::swap4(&data_len);
  4047. if (data_len < 2 || size_t(data_len) > data_size) {
  4048. // Insufficient data size.
  4049. error_flag |= EF_INSUFFICIENT_DATA;
  4050. continue;
  4051. }
  4052. // Move to data addr: 20 = 16 + 4;
  4053. data_ptr += 20;
  4054. bool ret = tinyexr::DecodeTiledPixelData(
  4055. exr_image->tiles[tile_idx].images,
  4056. &(exr_image->tiles[tile_idx].width),
  4057. &(exr_image->tiles[tile_idx].height),
  4058. exr_header->requested_pixel_types, data_ptr,
  4059. static_cast<size_t>(data_len), exr_header->compression_type,
  4060. exr_header->line_order,
  4061. exr_image->width, exr_image->height,
  4062. tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
  4063. exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
  4064. static_cast<size_t>(exr_header->num_custom_attributes),
  4065. exr_header->custom_attributes,
  4066. static_cast<size_t>(exr_header->num_channels),
  4067. exr_header->channels, channel_offset_list);
  4068. if (!ret) {
  4069. // Failed to decode tile data.
  4070. error_flag |= EF_FAILED_TO_DECODE;
  4071. }
  4072. exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
  4073. exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
  4074. exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
  4075. exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
  4076. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  4077. }
  4078. }));
  4079. } // num_thread loop
  4080. for (auto& t : workers) {
  4081. t.join();
  4082. }
  4083. #else
  4084. } // parallel for
  4085. #endif
  4086. // Even in the event of an error, the reserved memory may be freed.
  4087. exr_image->num_channels = num_channels;
  4088. exr_image->num_tiles = static_cast<int>(num_tiles);
  4089. if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA;
  4090. if (err) {
  4091. if (error_flag & EF_INSUFFICIENT_DATA) {
  4092. (*err) += "Insufficient data length.\n";
  4093. }
  4094. if (error_flag & EF_FAILED_TO_DECODE) {
  4095. (*err) += "Failed to decode tile data.\n";
  4096. }
  4097. }
  4098. return err_code;
  4099. }
  4100. static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
  4101. const OffsetData& offset_data,
  4102. const unsigned char *head, const size_t size,
  4103. std::string *err) {
  4104. int num_channels = exr_header->num_channels;
  4105. int num_scanline_blocks = 1;
  4106. if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
  4107. num_scanline_blocks = 16;
  4108. } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
  4109. num_scanline_blocks = 32;
  4110. } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
  4111. num_scanline_blocks = 16;
  4112. #if TINYEXR_USE_ZFP
  4113. tinyexr::ZFPCompressionParam zfp_compression_param;
  4114. if (!FindZFPCompressionParam(&zfp_compression_param,
  4115. exr_header->custom_attributes,
  4116. int(exr_header->num_custom_attributes), err)) {
  4117. return TINYEXR_ERROR_INVALID_HEADER;
  4118. }
  4119. #endif
  4120. }
  4121. if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
  4122. exr_header->data_window.max_y < exr_header->data_window.min_y) {
  4123. if (err) {
  4124. (*err) += "Invalid data window.\n";
  4125. }
  4126. return TINYEXR_ERROR_INVALID_DATA;
  4127. }
  4128. int data_width =
  4129. exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
  4130. int data_height =
  4131. exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
  4132. // Do not allow too large data_width and data_height. header invalid?
  4133. {
  4134. if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) {
  4135. if (err) {
  4136. std::stringstream ss;
  4137. ss << "data_with or data_height too large. data_width: " << data_width
  4138. << ", "
  4139. << "data_height = " << data_height << std::endl;
  4140. (*err) += ss.str();
  4141. }
  4142. return TINYEXR_ERROR_INVALID_DATA;
  4143. }
  4144. if (exr_header->tiled) {
  4145. if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) {
  4146. if (err) {
  4147. std::stringstream ss;
  4148. ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x
  4149. << ", "
  4150. << "tile height = " << exr_header->tile_size_y << std::endl;
  4151. (*err) += ss.str();
  4152. }
  4153. return TINYEXR_ERROR_INVALID_DATA;
  4154. }
  4155. }
  4156. }
  4157. const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
  4158. size_t num_blocks = offsets.size();
  4159. std::vector<size_t> channel_offset_list;
  4160. int pixel_data_size = 0;
  4161. size_t channel_offset = 0;
  4162. if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
  4163. &channel_offset, num_channels,
  4164. exr_header->channels)) {
  4165. if (err) {
  4166. (*err) += "Failed to compute channel layout.\n";
  4167. }
  4168. return TINYEXR_ERROR_INVALID_DATA;
  4169. }
  4170. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  4171. std::atomic<bool> invalid_data(false);
  4172. #else
  4173. bool invalid_data(false);
  4174. #endif
  4175. if (exr_header->tiled) {
  4176. // value check
  4177. if (exr_header->tile_size_x < 0) {
  4178. if (err) {
  4179. std::stringstream ss;
  4180. ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
  4181. (*err) += ss.str();
  4182. }
  4183. return TINYEXR_ERROR_INVALID_HEADER;
  4184. }
  4185. if (exr_header->tile_size_y < 0) {
  4186. if (err) {
  4187. std::stringstream ss;
  4188. ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
  4189. (*err) += ss.str();
  4190. }
  4191. return TINYEXR_ERROR_INVALID_HEADER;
  4192. }
  4193. if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) {
  4194. EXRImage* level_image = NULL;
  4195. for (int level = 0; level < offset_data.num_x_levels; ++level) {
  4196. if (!level_image) {
  4197. level_image = exr_image;
  4198. } else {
  4199. level_image->next_level = new EXRImage;
  4200. InitEXRImage(level_image->next_level);
  4201. level_image = level_image->next_level;
  4202. }
  4203. level_image->width =
  4204. LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode);
  4205. level_image->height =
  4206. LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode);
  4207. level_image->level_x = level;
  4208. level_image->level_y = level;
  4209. int ret = DecodeTiledLevel(level_image, exr_header,
  4210. offset_data,
  4211. channel_offset_list,
  4212. pixel_data_size,
  4213. head, size,
  4214. err);
  4215. if (ret != TINYEXR_SUCCESS) return ret;
  4216. }
  4217. } else {
  4218. EXRImage* level_image = NULL;
  4219. for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y)
  4220. for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) {
  4221. if (!level_image) {
  4222. level_image = exr_image;
  4223. } else {
  4224. level_image->next_level = new EXRImage;
  4225. InitEXRImage(level_image->next_level);
  4226. level_image = level_image->next_level;
  4227. }
  4228. level_image->width =
  4229. LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode);
  4230. level_image->height =
  4231. LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode);
  4232. level_image->level_x = level_x;
  4233. level_image->level_y = level_y;
  4234. int ret = DecodeTiledLevel(level_image, exr_header,
  4235. offset_data,
  4236. channel_offset_list,
  4237. pixel_data_size,
  4238. head, size,
  4239. err);
  4240. if (ret != TINYEXR_SUCCESS) return ret;
  4241. }
  4242. }
  4243. } else { // scanline format
  4244. // Don't allow too large image(256GB * pixel_data_size or more). Workaround
  4245. // for #104.
  4246. size_t total_data_len =
  4247. size_t(data_width) * size_t(data_height) * size_t(num_channels);
  4248. const bool total_data_len_overflown =
  4249. sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
  4250. if ((total_data_len == 0) || total_data_len_overflown) {
  4251. if (err) {
  4252. std::stringstream ss;
  4253. ss << "Image data size is zero or too large: width = " << data_width
  4254. << ", height = " << data_height << ", channels = " << num_channels
  4255. << std::endl;
  4256. (*err) += ss.str();
  4257. }
  4258. return TINYEXR_ERROR_INVALID_DATA;
  4259. }
  4260. exr_image->images = tinyexr::AllocateImage(
  4261. num_channels, exr_header->channels, exr_header->requested_pixel_types,
  4262. data_width, data_height);
  4263. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  4264. std::vector<std::thread> workers;
  4265. std::atomic<int> y_count(0);
  4266. int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
  4267. if (num_threads > int(num_blocks)) {
  4268. num_threads = int(num_blocks);
  4269. }
  4270. for (int t = 0; t < num_threads; t++) {
  4271. workers.emplace_back(std::thread([&]() {
  4272. int y = 0;
  4273. while ((y = y_count++) < int(num_blocks)) {
  4274. #else
  4275. #if TINYEXR_USE_OPENMP
  4276. #pragma omp parallel for
  4277. #endif
  4278. for (int y = 0; y < static_cast<int>(num_blocks); y++) {
  4279. #endif
  4280. size_t y_idx = static_cast<size_t>(y);
  4281. if (offsets[y_idx] + sizeof(int) * 2 > size) {
  4282. invalid_data = true;
  4283. } else {
  4284. // 4 byte: scan line
  4285. // 4 byte: data size
  4286. // ~ : pixel data(uncompressed or compressed)
  4287. size_t data_size =
  4288. size_t(size - (offsets[y_idx] + sizeof(int) * 2));
  4289. const unsigned char *data_ptr =
  4290. reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
  4291. int line_no;
  4292. memcpy(&line_no, data_ptr, sizeof(int));
  4293. int data_len;
  4294. memcpy(&data_len, data_ptr + 4, sizeof(int));
  4295. tinyexr::swap4(&line_no);
  4296. tinyexr::swap4(&data_len);
  4297. if (size_t(data_len) > data_size) {
  4298. invalid_data = true;
  4299. } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
  4300. // Too large value. Assume this is invalid
  4301. // 2**20 = 1048576 = heuristic value.
  4302. invalid_data = true;
  4303. } else if (data_len == 0) {
  4304. // TODO(syoyo): May be ok to raise the threshold for example
  4305. // `data_len < 4`
  4306. invalid_data = true;
  4307. } else {
  4308. // line_no may be negative.
  4309. int end_line_no = (std::min)(line_no + num_scanline_blocks,
  4310. (exr_header->data_window.max_y + 1));
  4311. int num_lines = end_line_no - line_no;
  4312. if (num_lines <= 0) {
  4313. invalid_data = true;
  4314. } else {
  4315. // Move to data addr: 8 = 4 + 4;
  4316. data_ptr += 8;
  4317. // Adjust line_no with data_window.bmin.y
  4318. // overflow check
  4319. tinyexr_int64 lno =
  4320. static_cast<tinyexr_int64>(line_no) -
  4321. static_cast<tinyexr_int64>(exr_header->data_window.min_y);
  4322. if (lno > std::numeric_limits<int>::max()) {
  4323. line_no = -1; // invalid
  4324. } else if (lno < -std::numeric_limits<int>::max()) {
  4325. line_no = -1; // invalid
  4326. } else {
  4327. line_no -= exr_header->data_window.min_y;
  4328. }
  4329. if (line_no < 0) {
  4330. invalid_data = true;
  4331. } else {
  4332. if (!tinyexr::DecodePixelData(
  4333. exr_image->images, exr_header->requested_pixel_types,
  4334. data_ptr, static_cast<size_t>(data_len),
  4335. exr_header->compression_type, exr_header->line_order,
  4336. data_width, data_height, data_width, y, line_no,
  4337. num_lines, static_cast<size_t>(pixel_data_size),
  4338. static_cast<size_t>(
  4339. exr_header->num_custom_attributes),
  4340. exr_header->custom_attributes,
  4341. static_cast<size_t>(exr_header->num_channels),
  4342. exr_header->channels, channel_offset_list)) {
  4343. invalid_data = true;
  4344. }
  4345. }
  4346. }
  4347. }
  4348. }
  4349. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  4350. }
  4351. }));
  4352. }
  4353. for (auto &t : workers) {
  4354. t.join();
  4355. }
  4356. #else
  4357. } // omp parallel
  4358. #endif
  4359. }
  4360. if (invalid_data) {
  4361. if (err) {
  4362. (*err) += "Invalid data found when decoding pixels.\n";
  4363. }
  4364. return TINYEXR_ERROR_INVALID_DATA;
  4365. }
  4366. // Overwrite `pixel_type` with `requested_pixel_type`.
  4367. {
  4368. for (int c = 0; c < exr_header->num_channels; c++) {
  4369. exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
  4370. }
  4371. }
  4372. {
  4373. exr_image->num_channels = num_channels;
  4374. exr_image->width = data_width;
  4375. exr_image->height = data_height;
  4376. }
  4377. return TINYEXR_SUCCESS;
  4378. }
  4379. static bool ReconstructLineOffsets(
  4380. std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
  4381. const unsigned char *head, const unsigned char *marker, const size_t size) {
  4382. assert(head < marker);
  4383. assert(offsets->size() == n);
  4384. for (size_t i = 0; i < n; i++) {
  4385. size_t offset = static_cast<size_t>(marker - head);
  4386. // Offset should not exceed whole EXR file/data size.
  4387. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
  4388. return false;
  4389. }
  4390. int y;
  4391. unsigned int data_len;
  4392. memcpy(&y, marker, sizeof(int));
  4393. memcpy(&data_len, marker + 4, sizeof(unsigned int));
  4394. if (data_len >= size) {
  4395. return false;
  4396. }
  4397. tinyexr::swap4(&y);
  4398. tinyexr::swap4(&data_len);
  4399. (*offsets)[i] = offset;
  4400. marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
  4401. }
  4402. return true;
  4403. }
  4404. static int FloorLog2(unsigned x) {
  4405. //
  4406. // For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
  4407. //
  4408. int y = 0;
  4409. while (x > 1) {
  4410. y += 1;
  4411. x >>= 1u;
  4412. }
  4413. return y;
  4414. }
  4415. static int CeilLog2(unsigned x) {
  4416. //
  4417. // For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)).
  4418. //
  4419. int y = 0;
  4420. int r = 0;
  4421. while (x > 1) {
  4422. if (x & 1)
  4423. r = 1;
  4424. y += 1;
  4425. x >>= 1u;
  4426. }
  4427. return y + r;
  4428. }
  4429. static int RoundLog2(int x, int tile_rounding_mode) {
  4430. return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x));
  4431. }
  4432. static int CalculateNumXLevels(const EXRHeader* exr_header) {
  4433. int min_x = exr_header->data_window.min_x;
  4434. int max_x = exr_header->data_window.max_x;
  4435. int min_y = exr_header->data_window.min_y;
  4436. int max_y = exr_header->data_window.max_y;
  4437. int num = 0;
  4438. switch (exr_header->tile_level_mode) {
  4439. case TINYEXR_TILE_ONE_LEVEL:
  4440. num = 1;
  4441. break;
  4442. case TINYEXR_TILE_MIPMAP_LEVELS:
  4443. {
  4444. int w = max_x - min_x + 1;
  4445. int h = max_y - min_y + 1;
  4446. num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
  4447. }
  4448. break;
  4449. case TINYEXR_TILE_RIPMAP_LEVELS:
  4450. {
  4451. int w = max_x - min_x + 1;
  4452. num = RoundLog2(w, exr_header->tile_rounding_mode) + 1;
  4453. }
  4454. break;
  4455. default:
  4456. assert(false);
  4457. }
  4458. return num;
  4459. }
  4460. static int CalculateNumYLevels(const EXRHeader* exr_header) {
  4461. int min_x = exr_header->data_window.min_x;
  4462. int max_x = exr_header->data_window.max_x;
  4463. int min_y = exr_header->data_window.min_y;
  4464. int max_y = exr_header->data_window.max_y;
  4465. int num = 0;
  4466. switch (exr_header->tile_level_mode) {
  4467. case TINYEXR_TILE_ONE_LEVEL:
  4468. num = 1;
  4469. break;
  4470. case TINYEXR_TILE_MIPMAP_LEVELS:
  4471. {
  4472. int w = max_x - min_x + 1;
  4473. int h = max_y - min_y + 1;
  4474. num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
  4475. }
  4476. break;
  4477. case TINYEXR_TILE_RIPMAP_LEVELS:
  4478. {
  4479. int h = max_y - min_y + 1;
  4480. num = RoundLog2(h, exr_header->tile_rounding_mode) + 1;
  4481. }
  4482. break;
  4483. default:
  4484. assert(false);
  4485. }
  4486. return num;
  4487. }
  4488. static void CalculateNumTiles(std::vector<int>& numTiles,
  4489. int toplevel_size,
  4490. int size,
  4491. int tile_rounding_mode) {
  4492. for (unsigned i = 0; i < numTiles.size(); i++) {
  4493. int l = LevelSize(toplevel_size, int(i), tile_rounding_mode);
  4494. assert(l <= std::numeric_limits<int>::max() - size + 1);
  4495. numTiles[i] = (l + size - 1) / size;
  4496. }
  4497. }
  4498. static void PrecalculateTileInfo(std::vector<int>& num_x_tiles,
  4499. std::vector<int>& num_y_tiles,
  4500. const EXRHeader* exr_header) {
  4501. int min_x = exr_header->data_window.min_x;
  4502. int max_x = exr_header->data_window.max_x;
  4503. int min_y = exr_header->data_window.min_y;
  4504. int max_y = exr_header->data_window.max_y;
  4505. int num_x_levels = CalculateNumXLevels(exr_header);
  4506. int num_y_levels = CalculateNumYLevels(exr_header);
  4507. num_x_tiles.resize(size_t(num_x_levels));
  4508. num_y_tiles.resize(size_t(num_y_levels));
  4509. CalculateNumTiles(num_x_tiles,
  4510. max_x - min_x + 1,
  4511. exr_header->tile_size_x,
  4512. exr_header->tile_rounding_mode);
  4513. CalculateNumTiles(num_y_tiles,
  4514. max_y - min_y + 1,
  4515. exr_header->tile_size_y,
  4516. exr_header->tile_rounding_mode);
  4517. }
  4518. static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) {
  4519. offset_data.offsets.resize(1);
  4520. offset_data.offsets[0].resize(1);
  4521. offset_data.offsets[0][0].resize(num_blocks);
  4522. offset_data.num_x_levels = 1;
  4523. offset_data.num_y_levels = 1;
  4524. }
  4525. // Return sum of tile blocks.
  4526. static int InitTileOffsets(OffsetData& offset_data,
  4527. const EXRHeader* exr_header,
  4528. const std::vector<int>& num_x_tiles,
  4529. const std::vector<int>& num_y_tiles) {
  4530. int num_tile_blocks = 0;
  4531. offset_data.num_x_levels = static_cast<int>(num_x_tiles.size());
  4532. offset_data.num_y_levels = static_cast<int>(num_y_tiles.size());
  4533. switch (exr_header->tile_level_mode) {
  4534. case TINYEXR_TILE_ONE_LEVEL:
  4535. case TINYEXR_TILE_MIPMAP_LEVELS:
  4536. assert(offset_data.num_x_levels == offset_data.num_y_levels);
  4537. offset_data.offsets.resize(size_t(offset_data.num_x_levels));
  4538. for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
  4539. offset_data.offsets[l].resize(size_t(num_y_tiles[l]));
  4540. for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
  4541. offset_data.offsets[l][dy].resize(size_t(num_x_tiles[l]));
  4542. num_tile_blocks += num_x_tiles[l];
  4543. }
  4544. }
  4545. break;
  4546. case TINYEXR_TILE_RIPMAP_LEVELS:
  4547. offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels));
  4548. for (int ly = 0; ly < offset_data.num_y_levels; ++ly) {
  4549. for (int lx = 0; lx < offset_data.num_x_levels; ++lx) {
  4550. int l = ly * offset_data.num_x_levels + lx;
  4551. offset_data.offsets[size_t(l)].resize(size_t(num_y_tiles[size_t(ly)]));
  4552. for (size_t dy = 0; dy < offset_data.offsets[size_t(l)].size(); ++dy) {
  4553. offset_data.offsets[size_t(l)][dy].resize(size_t(num_x_tiles[size_t(lx)]));
  4554. num_tile_blocks += num_x_tiles[size_t(lx)];
  4555. }
  4556. }
  4557. }
  4558. break;
  4559. default:
  4560. assert(false);
  4561. }
  4562. return num_tile_blocks;
  4563. }
  4564. static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) {
  4565. for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
  4566. for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
  4567. for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx)
  4568. if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0)
  4569. return true;
  4570. return false;
  4571. }
  4572. static bool isValidTile(const EXRHeader* exr_header,
  4573. const OffsetData& offset_data,
  4574. int dx, int dy, int lx, int ly) {
  4575. if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false;
  4576. int num_x_levels = offset_data.num_x_levels;
  4577. int num_y_levels = offset_data.num_y_levels;
  4578. switch (exr_header->tile_level_mode) {
  4579. case TINYEXR_TILE_ONE_LEVEL:
  4580. if (lx == 0 &&
  4581. ly == 0 &&
  4582. offset_data.offsets.size() > 0 &&
  4583. offset_data.offsets[0].size() > static_cast<size_t>(dy) &&
  4584. offset_data.offsets[0][size_t(dy)].size() > static_cast<size_t>(dx)) {
  4585. return true;
  4586. }
  4587. break;
  4588. case TINYEXR_TILE_MIPMAP_LEVELS:
  4589. if (lx < num_x_levels &&
  4590. ly < num_y_levels &&
  4591. offset_data.offsets.size() > static_cast<size_t>(lx) &&
  4592. offset_data.offsets[size_t(lx)].size() > static_cast<size_t>(dy) &&
  4593. offset_data.offsets[size_t(lx)][size_t(dy)].size() > static_cast<size_t>(dx)) {
  4594. return true;
  4595. }
  4596. break;
  4597. case TINYEXR_TILE_RIPMAP_LEVELS:
  4598. {
  4599. size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels);
  4600. if (lx < num_x_levels &&
  4601. ly < num_y_levels &&
  4602. (offset_data.offsets.size() > idx) &&
  4603. offset_data.offsets[idx].size() > static_cast<size_t>(dy) &&
  4604. offset_data.offsets[idx][size_t(dy)].size() > static_cast<size_t>(dx)) {
  4605. return true;
  4606. }
  4607. }
  4608. break;
  4609. default:
  4610. return false;
  4611. }
  4612. return false;
  4613. }
  4614. static void ReconstructTileOffsets(OffsetData& offset_data,
  4615. const EXRHeader* exr_header,
  4616. const unsigned char* head, const unsigned char* marker, const size_t /*size*/,
  4617. bool isMultiPartFile,
  4618. bool isDeep) {
  4619. int numXLevels = offset_data.num_x_levels;
  4620. for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
  4621. for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
  4622. for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
  4623. tinyexr::tinyexr_uint64 tileOffset = tinyexr::tinyexr_uint64(marker - head);
  4624. if (isMultiPartFile) {
  4625. //int partNumber;
  4626. marker += sizeof(int);
  4627. }
  4628. int tileX;
  4629. memcpy(&tileX, marker, sizeof(int));
  4630. tinyexr::swap4(&tileX);
  4631. marker += sizeof(int);
  4632. int tileY;
  4633. memcpy(&tileY, marker, sizeof(int));
  4634. tinyexr::swap4(&tileY);
  4635. marker += sizeof(int);
  4636. int levelX;
  4637. memcpy(&levelX, marker, sizeof(int));
  4638. tinyexr::swap4(&levelX);
  4639. marker += sizeof(int);
  4640. int levelY;
  4641. memcpy(&levelY, marker, sizeof(int));
  4642. tinyexr::swap4(&levelY);
  4643. marker += sizeof(int);
  4644. if (isDeep) {
  4645. tinyexr::tinyexr_int64 packed_offset_table_size;
  4646. memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64));
  4647. tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size));
  4648. marker += sizeof(tinyexr::tinyexr_int64);
  4649. tinyexr::tinyexr_int64 packed_sample_size;
  4650. memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64));
  4651. tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size));
  4652. marker += sizeof(tinyexr::tinyexr_int64);
  4653. // next Int64 is unpacked sample size - skip that too
  4654. marker += packed_offset_table_size + packed_sample_size + 8;
  4655. } else {
  4656. int dataSize;
  4657. memcpy(&dataSize, marker, sizeof(int));
  4658. tinyexr::swap4(&dataSize);
  4659. marker += sizeof(int);
  4660. marker += dataSize;
  4661. }
  4662. if (!isValidTile(exr_header, offset_data,
  4663. tileX, tileY, levelX, levelY))
  4664. return;
  4665. int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels);
  4666. offset_data.offsets[size_t(level_idx)][size_t(tileY)][size_t(tileX)] = tileOffset;
  4667. }
  4668. }
  4669. }
  4670. }
  4671. // marker output is also
  4672. static int ReadOffsets(OffsetData& offset_data,
  4673. const unsigned char* head,
  4674. const unsigned char*& marker,
  4675. const size_t size,
  4676. const char** err) {
  4677. for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
  4678. for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
  4679. for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
  4680. tinyexr::tinyexr_uint64 offset;
  4681. if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
  4682. tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
  4683. return TINYEXR_ERROR_INVALID_DATA;
  4684. }
  4685. memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
  4686. tinyexr::swap8(&offset);
  4687. if (offset >= size) {
  4688. tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
  4689. return TINYEXR_ERROR_INVALID_DATA;
  4690. }
  4691. marker += sizeof(tinyexr::tinyexr_uint64); // = 8
  4692. offset_data.offsets[l][dy][dx] = offset;
  4693. }
  4694. }
  4695. }
  4696. return TINYEXR_SUCCESS;
  4697. }
  4698. static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
  4699. const unsigned char *head,
  4700. const unsigned char *marker, const size_t size,
  4701. const char **err) {
  4702. if (exr_image == NULL || exr_header == NULL || head == NULL ||
  4703. marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
  4704. tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
  4705. return TINYEXR_ERROR_INVALID_ARGUMENT;
  4706. }
  4707. int num_scanline_blocks = 1;
  4708. if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
  4709. num_scanline_blocks = 16;
  4710. } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
  4711. num_scanline_blocks = 32;
  4712. } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
  4713. num_scanline_blocks = 16;
  4714. }
  4715. if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
  4716. exr_header->data_window.max_x - exr_header->data_window.min_x ==
  4717. std::numeric_limits<int>::max()) {
  4718. // Issue 63
  4719. tinyexr::SetErrorMessage("Invalid data width value", err);
  4720. return TINYEXR_ERROR_INVALID_DATA;
  4721. }
  4722. int data_width =
  4723. exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
  4724. if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
  4725. exr_header->data_window.max_y - exr_header->data_window.min_y ==
  4726. std::numeric_limits<int>::max()) {
  4727. tinyexr::SetErrorMessage("Invalid data height value", err);
  4728. return TINYEXR_ERROR_INVALID_DATA;
  4729. }
  4730. int data_height =
  4731. exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
  4732. // Do not allow too large data_width and data_height. header invalid?
  4733. {
  4734. if (data_width > TINYEXR_DIMENSION_THRESHOLD) {
  4735. tinyexr::SetErrorMessage("data width too large.", err);
  4736. return TINYEXR_ERROR_INVALID_DATA;
  4737. }
  4738. if (data_height > TINYEXR_DIMENSION_THRESHOLD) {
  4739. tinyexr::SetErrorMessage("data height too large.", err);
  4740. return TINYEXR_ERROR_INVALID_DATA;
  4741. }
  4742. }
  4743. if (exr_header->tiled) {
  4744. if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) {
  4745. tinyexr::SetErrorMessage("tile width too large.", err);
  4746. return TINYEXR_ERROR_INVALID_DATA;
  4747. }
  4748. if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) {
  4749. tinyexr::SetErrorMessage("tile height too large.", err);
  4750. return TINYEXR_ERROR_INVALID_DATA;
  4751. }
  4752. }
  4753. // Read offset tables.
  4754. OffsetData offset_data;
  4755. size_t num_blocks = 0;
  4756. // For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header.
  4757. // If chunk_count > 0 then chunk_count must be equal to the calculated tile count.
  4758. if (exr_header->tiled) {
  4759. {
  4760. std::vector<int> num_x_tiles, num_y_tiles;
  4761. PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header);
  4762. num_blocks = size_t(InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles));
  4763. if (exr_header->chunk_count > 0) {
  4764. if (exr_header->chunk_count != static_cast<int>(num_blocks)) {
  4765. tinyexr::SetErrorMessage("Invalid offset table size.", err);
  4766. return TINYEXR_ERROR_INVALID_DATA;
  4767. }
  4768. }
  4769. }
  4770. int ret = ReadOffsets(offset_data, head, marker, size, err);
  4771. if (ret != TINYEXR_SUCCESS) return ret;
  4772. if (IsAnyOffsetsAreInvalid(offset_data)) {
  4773. ReconstructTileOffsets(offset_data, exr_header,
  4774. head, marker, size,
  4775. exr_header->multipart, exr_header->non_image);
  4776. }
  4777. } else if (exr_header->chunk_count > 0) {
  4778. // Use `chunkCount` attribute.
  4779. num_blocks = static_cast<size_t>(exr_header->chunk_count);
  4780. InitSingleResolutionOffsets(offset_data, num_blocks);
  4781. } else {
  4782. num_blocks = static_cast<size_t>(data_height) /
  4783. static_cast<size_t>(num_scanline_blocks);
  4784. if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
  4785. static_cast<size_t>(data_height)) {
  4786. num_blocks++;
  4787. }
  4788. InitSingleResolutionOffsets(offset_data, num_blocks);
  4789. }
  4790. if (!exr_header->tiled) {
  4791. std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
  4792. for (size_t y = 0; y < num_blocks; y++) {
  4793. tinyexr::tinyexr_uint64 offset;
  4794. // Issue #81
  4795. if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
  4796. tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
  4797. return TINYEXR_ERROR_INVALID_DATA;
  4798. }
  4799. memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
  4800. tinyexr::swap8(&offset);
  4801. if (offset >= size) {
  4802. tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
  4803. return TINYEXR_ERROR_INVALID_DATA;
  4804. }
  4805. marker += sizeof(tinyexr::tinyexr_uint64); // = 8
  4806. offsets[y] = offset;
  4807. }
  4808. // If line offsets are invalid, we try to reconstruct it.
  4809. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
  4810. for (size_t y = 0; y < num_blocks; y++) {
  4811. if (offsets[y] <= 0) {
  4812. // TODO(syoyo) Report as warning?
  4813. // if (err) {
  4814. // stringstream ss;
  4815. // ss << "Incomplete lineOffsets." << std::endl;
  4816. // (*err) += ss.str();
  4817. //}
  4818. bool ret =
  4819. ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
  4820. if (ret) {
  4821. // OK
  4822. break;
  4823. } else {
  4824. tinyexr::SetErrorMessage(
  4825. "Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
  4826. return TINYEXR_ERROR_INVALID_DATA;
  4827. }
  4828. }
  4829. }
  4830. }
  4831. {
  4832. std::string e;
  4833. int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e);
  4834. if (ret != TINYEXR_SUCCESS) {
  4835. if (!e.empty()) {
  4836. tinyexr::SetErrorMessage(e, err);
  4837. }
  4838. #if 1
  4839. FreeEXRImage(exr_image);
  4840. #else
  4841. // release memory(if exists)
  4842. if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
  4843. for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
  4844. if (exr_image->images[c]) {
  4845. free(exr_image->images[c]);
  4846. exr_image->images[c] = NULL;
  4847. }
  4848. }
  4849. free(exr_image->images);
  4850. exr_image->images = NULL;
  4851. }
  4852. #endif
  4853. }
  4854. return ret;
  4855. }
  4856. }
  4857. static void GetLayers(const EXRHeader &exr_header,
  4858. std::vector<std::string> &layer_names) {
  4859. // Naive implementation
  4860. // Group channels by layers
  4861. // go over all channel names, split by periods
  4862. // collect unique names
  4863. layer_names.clear();
  4864. for (int c = 0; c < exr_header.num_channels; c++) {
  4865. std::string full_name(exr_header.channels[c].name);
  4866. const size_t pos = full_name.find_last_of('.');
  4867. if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
  4868. full_name.erase(pos);
  4869. if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
  4870. layer_names.end())
  4871. layer_names.push_back(full_name);
  4872. }
  4873. }
  4874. }
  4875. struct LayerChannel {
  4876. explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
  4877. size_t index;
  4878. std::string name;
  4879. };
  4880. static void ChannelsInLayer(const EXRHeader &exr_header,
  4881. const std::string &layer_name,
  4882. std::vector<LayerChannel> &channels) {
  4883. channels.clear();
  4884. for (int c = 0; c < exr_header.num_channels; c++) {
  4885. std::string ch_name(exr_header.channels[c].name);
  4886. if (layer_name.empty()) {
  4887. const size_t pos = ch_name.find_last_of('.');
  4888. if (pos != std::string::npos && pos < ch_name.size()) {
  4889. ch_name = ch_name.substr(pos + 1);
  4890. }
  4891. } else {
  4892. const size_t pos = ch_name.find(layer_name + '.');
  4893. if (pos == std::string::npos) continue;
  4894. if (pos == 0) {
  4895. ch_name = ch_name.substr(layer_name.size() + 1);
  4896. }
  4897. }
  4898. LayerChannel ch(size_t(c), ch_name);
  4899. channels.push_back(ch);
  4900. }
  4901. }
  4902. } // namespace tinyexr
  4903. int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
  4904. const char **err) {
  4905. EXRVersion exr_version;
  4906. EXRHeader exr_header;
  4907. InitEXRHeader(&exr_header);
  4908. {
  4909. int ret = ParseEXRVersionFromFile(&exr_version, filename);
  4910. if (ret != TINYEXR_SUCCESS) {
  4911. tinyexr::SetErrorMessage("Invalid EXR header.", err);
  4912. return ret;
  4913. }
  4914. if (exr_version.multipart || exr_version.non_image) {
  4915. tinyexr::SetErrorMessage(
  4916. "Loading multipart or DeepImage is not supported in LoadEXR() API",
  4917. err);
  4918. return TINYEXR_ERROR_INVALID_DATA; // @fixme.
  4919. }
  4920. }
  4921. int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
  4922. if (ret != TINYEXR_SUCCESS) {
  4923. FreeEXRHeader(&exr_header);
  4924. return ret;
  4925. }
  4926. std::vector<std::string> layer_vec;
  4927. tinyexr::GetLayers(exr_header, layer_vec);
  4928. (*num_layers) = int(layer_vec.size());
  4929. (*layer_names) = static_cast<const char **>(
  4930. malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
  4931. for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
  4932. #ifdef _MSC_VER
  4933. (*layer_names)[c] = _strdup(layer_vec[c].c_str());
  4934. #else
  4935. (*layer_names)[c] = strdup(layer_vec[c].c_str());
  4936. #endif
  4937. }
  4938. FreeEXRHeader(&exr_header);
  4939. return TINYEXR_SUCCESS;
  4940. }
  4941. int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
  4942. const char **err) {
  4943. return LoadEXRWithLayer(out_rgba, width, height, filename,
  4944. /* layername */ NULL, err);
  4945. }
  4946. int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
  4947. const char *filename, const char *layername,
  4948. const char **err) {
  4949. if (out_rgba == NULL) {
  4950. tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
  4951. return TINYEXR_ERROR_INVALID_ARGUMENT;
  4952. }
  4953. EXRVersion exr_version;
  4954. EXRImage exr_image;
  4955. EXRHeader exr_header;
  4956. InitEXRHeader(&exr_header);
  4957. InitEXRImage(&exr_image);
  4958. {
  4959. int ret = ParseEXRVersionFromFile(&exr_version, filename);
  4960. if (ret != TINYEXR_SUCCESS) {
  4961. std::stringstream ss;
  4962. ss << "Failed to open EXR file or read version info from EXR file. code("
  4963. << ret << ")";
  4964. tinyexr::SetErrorMessage(ss.str(), err);
  4965. return ret;
  4966. }
  4967. if (exr_version.multipart || exr_version.non_image) {
  4968. tinyexr::SetErrorMessage(
  4969. "Loading multipart or DeepImage is not supported in LoadEXR() API",
  4970. err);
  4971. return TINYEXR_ERROR_INVALID_DATA; // @fixme.
  4972. }
  4973. }
  4974. {
  4975. int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
  4976. if (ret != TINYEXR_SUCCESS) {
  4977. FreeEXRHeader(&exr_header);
  4978. return ret;
  4979. }
  4980. }
  4981. // Read HALF channel as FLOAT.
  4982. for (int i = 0; i < exr_header.num_channels; i++) {
  4983. if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
  4984. exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
  4985. }
  4986. }
  4987. // TODO: Probably limit loading to layers (channels) selected by layer index
  4988. {
  4989. int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
  4990. if (ret != TINYEXR_SUCCESS) {
  4991. FreeEXRHeader(&exr_header);
  4992. return ret;
  4993. }
  4994. }
  4995. // RGBA
  4996. int idxR = -1;
  4997. int idxG = -1;
  4998. int idxB = -1;
  4999. int idxA = -1;
  5000. std::vector<std::string> layer_names;
  5001. tinyexr::GetLayers(exr_header, layer_names);
  5002. std::vector<tinyexr::LayerChannel> channels;
  5003. tinyexr::ChannelsInLayer(
  5004. exr_header, layername == NULL ? "" : std::string(layername), channels);
  5005. if (channels.size() < 1) {
  5006. tinyexr::SetErrorMessage("Layer Not Found", err);
  5007. FreeEXRHeader(&exr_header);
  5008. FreeEXRImage(&exr_image);
  5009. return TINYEXR_ERROR_LAYER_NOT_FOUND;
  5010. }
  5011. size_t ch_count = channels.size() < 4 ? channels.size() : 4;
  5012. for (size_t c = 0; c < ch_count; c++) {
  5013. const tinyexr::LayerChannel &ch = channels[c];
  5014. if (ch.name == "R") {
  5015. idxR = int(ch.index);
  5016. } else if (ch.name == "G") {
  5017. idxG = int(ch.index);
  5018. } else if (ch.name == "B") {
  5019. idxB = int(ch.index);
  5020. } else if (ch.name == "A") {
  5021. idxA = int(ch.index);
  5022. }
  5023. }
  5024. if (channels.size() == 1) {
  5025. int chIdx = int(channels.front().index);
  5026. // Grayscale channel only.
  5027. (*out_rgba) = reinterpret_cast<float *>(
  5028. malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
  5029. static_cast<size_t>(exr_image.height)));
  5030. if (exr_header.tiled) {
  5031. for (int it = 0; it < exr_image.num_tiles; it++) {
  5032. for (int j = 0; j < exr_header.tile_size_y; j++) {
  5033. for (int i = 0; i < exr_header.tile_size_x; i++) {
  5034. const int ii = exr_image.tiles[it].offset_x *
  5035. static_cast<int>(exr_header.tile_size_x) +
  5036. i;
  5037. const int jj = exr_image.tiles[it].offset_y *
  5038. static_cast<int>(exr_header.tile_size_y) +
  5039. j;
  5040. const int idx = ii + jj * static_cast<int>(exr_image.width);
  5041. // out of region check.
  5042. if (ii >= exr_image.width) {
  5043. continue;
  5044. }
  5045. if (jj >= exr_image.height) {
  5046. continue;
  5047. }
  5048. const int srcIdx = i + j * exr_header.tile_size_x;
  5049. unsigned char **src = exr_image.tiles[it].images;
  5050. (*out_rgba)[4 * idx + 0] =
  5051. reinterpret_cast<float **>(src)[chIdx][srcIdx];
  5052. (*out_rgba)[4 * idx + 1] =
  5053. reinterpret_cast<float **>(src)[chIdx][srcIdx];
  5054. (*out_rgba)[4 * idx + 2] =
  5055. reinterpret_cast<float **>(src)[chIdx][srcIdx];
  5056. (*out_rgba)[4 * idx + 3] =
  5057. reinterpret_cast<float **>(src)[chIdx][srcIdx];
  5058. }
  5059. }
  5060. }
  5061. } else {
  5062. for (int i = 0; i < exr_image.width * exr_image.height; i++) {
  5063. const float val =
  5064. reinterpret_cast<float **>(exr_image.images)[chIdx][i];
  5065. (*out_rgba)[4 * i + 0] = val;
  5066. (*out_rgba)[4 * i + 1] = val;
  5067. (*out_rgba)[4 * i + 2] = val;
  5068. (*out_rgba)[4 * i + 3] = val;
  5069. }
  5070. }
  5071. } else {
  5072. // Assume RGB(A)
  5073. if (idxR == -1) {
  5074. tinyexr::SetErrorMessage("R channel not found", err);
  5075. FreeEXRHeader(&exr_header);
  5076. FreeEXRImage(&exr_image);
  5077. return TINYEXR_ERROR_INVALID_DATA;
  5078. }
  5079. if (idxG == -1) {
  5080. tinyexr::SetErrorMessage("G channel not found", err);
  5081. FreeEXRHeader(&exr_header);
  5082. FreeEXRImage(&exr_image);
  5083. return TINYEXR_ERROR_INVALID_DATA;
  5084. }
  5085. if (idxB == -1) {
  5086. tinyexr::SetErrorMessage("B channel not found", err);
  5087. FreeEXRHeader(&exr_header);
  5088. FreeEXRImage(&exr_image);
  5089. return TINYEXR_ERROR_INVALID_DATA;
  5090. }
  5091. (*out_rgba) = reinterpret_cast<float *>(
  5092. malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
  5093. static_cast<size_t>(exr_image.height)));
  5094. if (exr_header.tiled) {
  5095. for (int it = 0; it < exr_image.num_tiles; it++) {
  5096. for (int j = 0; j < exr_header.tile_size_y; j++) {
  5097. for (int i = 0; i < exr_header.tile_size_x; i++) {
  5098. const int ii =
  5099. exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
  5100. const int jj =
  5101. exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
  5102. const int idx = ii + jj * exr_image.width;
  5103. // out of region check.
  5104. if (ii >= exr_image.width) {
  5105. continue;
  5106. }
  5107. if (jj >= exr_image.height) {
  5108. continue;
  5109. }
  5110. const int srcIdx = i + j * exr_header.tile_size_x;
  5111. unsigned char **src = exr_image.tiles[it].images;
  5112. (*out_rgba)[4 * idx + 0] =
  5113. reinterpret_cast<float **>(src)[idxR][srcIdx];
  5114. (*out_rgba)[4 * idx + 1] =
  5115. reinterpret_cast<float **>(src)[idxG][srcIdx];
  5116. (*out_rgba)[4 * idx + 2] =
  5117. reinterpret_cast<float **>(src)[idxB][srcIdx];
  5118. if (idxA != -1) {
  5119. (*out_rgba)[4 * idx + 3] =
  5120. reinterpret_cast<float **>(src)[idxA][srcIdx];
  5121. } else {
  5122. (*out_rgba)[4 * idx + 3] = 1.0;
  5123. }
  5124. }
  5125. }
  5126. }
  5127. } else {
  5128. for (int i = 0; i < exr_image.width * exr_image.height; i++) {
  5129. (*out_rgba)[4 * i + 0] =
  5130. reinterpret_cast<float **>(exr_image.images)[idxR][i];
  5131. (*out_rgba)[4 * i + 1] =
  5132. reinterpret_cast<float **>(exr_image.images)[idxG][i];
  5133. (*out_rgba)[4 * i + 2] =
  5134. reinterpret_cast<float **>(exr_image.images)[idxB][i];
  5135. if (idxA != -1) {
  5136. (*out_rgba)[4 * i + 3] =
  5137. reinterpret_cast<float **>(exr_image.images)[idxA][i];
  5138. } else {
  5139. (*out_rgba)[4 * i + 3] = 1.0;
  5140. }
  5141. }
  5142. }
  5143. }
  5144. (*width) = exr_image.width;
  5145. (*height) = exr_image.height;
  5146. FreeEXRHeader(&exr_header);
  5147. FreeEXRImage(&exr_image);
  5148. return TINYEXR_SUCCESS;
  5149. }
  5150. int IsEXR(const char *filename) {
  5151. EXRVersion exr_version;
  5152. int ret = ParseEXRVersionFromFile(&exr_version, filename);
  5153. if (ret != TINYEXR_SUCCESS) {
  5154. return ret;
  5155. }
  5156. return TINYEXR_SUCCESS;
  5157. }
  5158. int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
  5159. const unsigned char *memory, size_t size,
  5160. const char **err) {
  5161. if (memory == NULL || exr_header == NULL) {
  5162. tinyexr::SetErrorMessage(
  5163. "Invalid argument. `memory` or `exr_header` argument is null in "
  5164. "ParseEXRHeaderFromMemory()",
  5165. err);
  5166. // Invalid argument
  5167. return TINYEXR_ERROR_INVALID_ARGUMENT;
  5168. }
  5169. if (size < tinyexr::kEXRVersionSize) {
  5170. tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
  5171. return TINYEXR_ERROR_INVALID_DATA;
  5172. }
  5173. const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
  5174. size_t marker_size = size - tinyexr::kEXRVersionSize;
  5175. tinyexr::HeaderInfo info;
  5176. info.clear();
  5177. int ret;
  5178. {
  5179. std::string err_str;
  5180. ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
  5181. if (ret != TINYEXR_SUCCESS) {
  5182. if (err && !err_str.empty()) {
  5183. tinyexr::SetErrorMessage(err_str, err);
  5184. }
  5185. }
  5186. }
  5187. {
  5188. std::string warn;
  5189. std::string err_str;
  5190. if (!ConvertHeader(exr_header, info, &warn, &err_str)) {
  5191. if (err && !err_str.empty()) {
  5192. tinyexr::SetErrorMessage(err_str, err);
  5193. }
  5194. ret = TINYEXR_ERROR_INVALID_HEADER;
  5195. }
  5196. }
  5197. exr_header->multipart = version->multipart ? 1 : 0;
  5198. exr_header->non_image = version->non_image ? 1 : 0;
  5199. return ret;
  5200. }
  5201. int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
  5202. const unsigned char *memory, size_t size,
  5203. const char **err) {
  5204. if (out_rgba == NULL || memory == NULL) {
  5205. tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
  5206. return TINYEXR_ERROR_INVALID_ARGUMENT;
  5207. }
  5208. EXRVersion exr_version;
  5209. EXRImage exr_image;
  5210. EXRHeader exr_header;
  5211. InitEXRHeader(&exr_header);
  5212. int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
  5213. if (ret != TINYEXR_SUCCESS) {
  5214. std::stringstream ss;
  5215. ss << "Failed to parse EXR version. code(" << ret << ")";
  5216. tinyexr::SetErrorMessage(ss.str(), err);
  5217. return ret;
  5218. }
  5219. ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
  5220. if (ret != TINYEXR_SUCCESS) {
  5221. return ret;
  5222. }
  5223. // Read HALF channel as FLOAT.
  5224. for (int i = 0; i < exr_header.num_channels; i++) {
  5225. if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
  5226. exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
  5227. }
  5228. }
  5229. InitEXRImage(&exr_image);
  5230. ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
  5231. if (ret != TINYEXR_SUCCESS) {
  5232. return ret;
  5233. }
  5234. // RGBA
  5235. int idxR = -1;
  5236. int idxG = -1;
  5237. int idxB = -1;
  5238. int idxA = -1;
  5239. for (int c = 0; c < exr_header.num_channels; c++) {
  5240. if (strcmp(exr_header.channels[c].name, "R") == 0) {
  5241. idxR = c;
  5242. } else if (strcmp(exr_header.channels[c].name, "G") == 0) {
  5243. idxG = c;
  5244. } else if (strcmp(exr_header.channels[c].name, "B") == 0) {
  5245. idxB = c;
  5246. } else if (strcmp(exr_header.channels[c].name, "A") == 0) {
  5247. idxA = c;
  5248. }
  5249. }
  5250. // TODO(syoyo): Refactor removing same code as used in LoadEXR().
  5251. if (exr_header.num_channels == 1) {
  5252. // Grayscale channel only.
  5253. (*out_rgba) = reinterpret_cast<float *>(
  5254. malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
  5255. static_cast<size_t>(exr_image.height)));
  5256. if (exr_header.tiled) {
  5257. for (int it = 0; it < exr_image.num_tiles; it++) {
  5258. for (int j = 0; j < exr_header.tile_size_y; j++) {
  5259. for (int i = 0; i < exr_header.tile_size_x; i++) {
  5260. const int ii =
  5261. exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
  5262. const int jj =
  5263. exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
  5264. const int idx = ii + jj * exr_image.width;
  5265. // out of region check.
  5266. if (ii >= exr_image.width) {
  5267. continue;
  5268. }
  5269. if (jj >= exr_image.height) {
  5270. continue;
  5271. }
  5272. const int srcIdx = i + j * exr_header.tile_size_x;
  5273. unsigned char **src = exr_image.tiles[it].images;
  5274. (*out_rgba)[4 * idx + 0] =
  5275. reinterpret_cast<float **>(src)[0][srcIdx];
  5276. (*out_rgba)[4 * idx + 1] =
  5277. reinterpret_cast<float **>(src)[0][srcIdx];
  5278. (*out_rgba)[4 * idx + 2] =
  5279. reinterpret_cast<float **>(src)[0][srcIdx];
  5280. (*out_rgba)[4 * idx + 3] =
  5281. reinterpret_cast<float **>(src)[0][srcIdx];
  5282. }
  5283. }
  5284. }
  5285. } else {
  5286. for (int i = 0; i < exr_image.width * exr_image.height; i++) {
  5287. const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
  5288. (*out_rgba)[4 * i + 0] = val;
  5289. (*out_rgba)[4 * i + 1] = val;
  5290. (*out_rgba)[4 * i + 2] = val;
  5291. (*out_rgba)[4 * i + 3] = val;
  5292. }
  5293. }
  5294. } else {
  5295. // TODO(syoyo): Support non RGBA image.
  5296. if (idxR == -1) {
  5297. tinyexr::SetErrorMessage("R channel not found", err);
  5298. // @todo { free exr_image }
  5299. return TINYEXR_ERROR_INVALID_DATA;
  5300. }
  5301. if (idxG == -1) {
  5302. tinyexr::SetErrorMessage("G channel not found", err);
  5303. // @todo { free exr_image }
  5304. return TINYEXR_ERROR_INVALID_DATA;
  5305. }
  5306. if (idxB == -1) {
  5307. tinyexr::SetErrorMessage("B channel not found", err);
  5308. // @todo { free exr_image }
  5309. return TINYEXR_ERROR_INVALID_DATA;
  5310. }
  5311. (*out_rgba) = reinterpret_cast<float *>(
  5312. malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
  5313. static_cast<size_t>(exr_image.height)));
  5314. if (exr_header.tiled) {
  5315. for (int it = 0; it < exr_image.num_tiles; it++) {
  5316. for (int j = 0; j < exr_header.tile_size_y; j++)
  5317. for (int i = 0; i < exr_header.tile_size_x; i++) {
  5318. const int ii =
  5319. exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
  5320. const int jj =
  5321. exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
  5322. const int idx = ii + jj * exr_image.width;
  5323. // out of region check.
  5324. if (ii >= exr_image.width) {
  5325. continue;
  5326. }
  5327. if (jj >= exr_image.height) {
  5328. continue;
  5329. }
  5330. const int srcIdx = i + j * exr_header.tile_size_x;
  5331. unsigned char **src = exr_image.tiles[it].images;
  5332. (*out_rgba)[4 * idx + 0] =
  5333. reinterpret_cast<float **>(src)[idxR][srcIdx];
  5334. (*out_rgba)[4 * idx + 1] =
  5335. reinterpret_cast<float **>(src)[idxG][srcIdx];
  5336. (*out_rgba)[4 * idx + 2] =
  5337. reinterpret_cast<float **>(src)[idxB][srcIdx];
  5338. if (idxA != -1) {
  5339. (*out_rgba)[4 * idx + 3] =
  5340. reinterpret_cast<float **>(src)[idxA][srcIdx];
  5341. } else {
  5342. (*out_rgba)[4 * idx + 3] = 1.0;
  5343. }
  5344. }
  5345. }
  5346. } else {
  5347. for (int i = 0; i < exr_image.width * exr_image.height; i++) {
  5348. (*out_rgba)[4 * i + 0] =
  5349. reinterpret_cast<float **>(exr_image.images)[idxR][i];
  5350. (*out_rgba)[4 * i + 1] =
  5351. reinterpret_cast<float **>(exr_image.images)[idxG][i];
  5352. (*out_rgba)[4 * i + 2] =
  5353. reinterpret_cast<float **>(exr_image.images)[idxB][i];
  5354. if (idxA != -1) {
  5355. (*out_rgba)[4 * i + 3] =
  5356. reinterpret_cast<float **>(exr_image.images)[idxA][i];
  5357. } else {
  5358. (*out_rgba)[4 * i + 3] = 1.0;
  5359. }
  5360. }
  5361. }
  5362. }
  5363. (*width) = exr_image.width;
  5364. (*height) = exr_image.height;
  5365. FreeEXRHeader(&exr_header);
  5366. FreeEXRImage(&exr_image);
  5367. return TINYEXR_SUCCESS;
  5368. }
  5369. int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
  5370. const char *filename, const char **err) {
  5371. if (exr_image == NULL) {
  5372. tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
  5373. return TINYEXR_ERROR_INVALID_ARGUMENT;
  5374. }
  5375. FILE *fp = NULL;
  5376. #ifdef _WIN32
  5377. #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
  5378. errno_t errcode =
  5379. _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
  5380. if (errcode != 0) {
  5381. tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
  5382. // TODO(syoyo): return wfopen_s erro code
  5383. return TINYEXR_ERROR_CANT_OPEN_FILE;
  5384. }
  5385. #else
  5386. // Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
  5387. fp = fopen(filename, "rb");
  5388. #endif
  5389. #else
  5390. fp = fopen(filename, "rb");
  5391. #endif
  5392. if (!fp) {
  5393. tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
  5394. return TINYEXR_ERROR_CANT_OPEN_FILE;
  5395. }
  5396. size_t filesize;
  5397. // Compute size
  5398. fseek(fp, 0, SEEK_END);
  5399. filesize = static_cast<size_t>(ftell(fp));
  5400. fseek(fp, 0, SEEK_SET);
  5401. if (filesize < 16) {
  5402. tinyexr::SetErrorMessage("File size too short " + std::string(filename),
  5403. err);
  5404. return TINYEXR_ERROR_INVALID_FILE;
  5405. }
  5406. std::vector<unsigned char> buf(filesize); // @todo { use mmap }
  5407. {
  5408. size_t ret;
  5409. ret = fread(&buf[0], 1, filesize, fp);
  5410. assert(ret == filesize);
  5411. fclose(fp);
  5412. (void)ret;
  5413. }
  5414. return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
  5415. err);
  5416. }
  5417. int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
  5418. const unsigned char *memory, const size_t size,
  5419. const char **err) {
  5420. if (exr_image == NULL || memory == NULL ||
  5421. (size < tinyexr::kEXRVersionSize)) {
  5422. tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
  5423. err);
  5424. return TINYEXR_ERROR_INVALID_ARGUMENT;
  5425. }
  5426. if (exr_header->header_len == 0) {
  5427. tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
  5428. return TINYEXR_ERROR_INVALID_ARGUMENT;
  5429. }
  5430. const unsigned char *head = memory;
  5431. const unsigned char *marker = reinterpret_cast<const unsigned char *>(
  5432. memory + exr_header->header_len +
  5433. 8); // +8 for magic number + version header.
  5434. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
  5435. err);
  5436. }
  5437. namespace tinyexr
  5438. {
  5439. #ifdef __clang__
  5440. #pragma clang diagnostic push
  5441. #pragma clang diagnostic ignored "-Wsign-conversion"
  5442. #endif
  5443. // out_data must be allocated initially with the block-header size
  5444. // of the current image(-part) type
  5445. static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data,
  5446. const unsigned char* const* images,
  5447. int compression_type,
  5448. int /*line_order*/,
  5449. int width, // for tiled : tile.width
  5450. int /*height*/, // for tiled : header.tile_size_y
  5451. int x_stride, // for tiled : header.tile_size_x
  5452. int line_no, // for tiled : 0
  5453. int num_lines, // for tiled : tile.height
  5454. size_t pixel_data_size,
  5455. const std::vector<ChannelInfo>& channels,
  5456. const std::vector<size_t>& channel_offset_list,
  5457. const void* compression_param = 0) // zfp compression param
  5458. {
  5459. size_t buf_size = static_cast<size_t>(width) *
  5460. static_cast<size_t>(num_lines) *
  5461. static_cast<size_t>(pixel_data_size);
  5462. //int last2bit = (buf_size & 3);
  5463. // buf_size must be multiple of four
  5464. //if(last2bit) buf_size += 4 - last2bit;
  5465. std::vector<unsigned char> buf(buf_size);
  5466. size_t start_y = static_cast<size_t>(line_no);
  5467. for (size_t c = 0; c < channels.size(); c++) {
  5468. if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
  5469. if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
  5470. for (int y = 0; y < num_lines; y++) {
  5471. // Assume increasing Y
  5472. float *line_ptr = reinterpret_cast<float *>(&buf.at(
  5473. static_cast<size_t>(pixel_data_size * size_t(y) * size_t(width)) +
  5474. channel_offset_list[c] *
  5475. static_cast<size_t>(width)));
  5476. for (int x = 0; x < width; x++) {
  5477. tinyexr::FP16 h16;
  5478. h16.u = reinterpret_cast<const unsigned short * const *>(
  5479. images)[c][(y + start_y) * size_t(x_stride) + size_t(x)];
  5480. tinyexr::FP32 f32 = half_to_float(h16);
  5481. tinyexr::swap4(&f32.f);
  5482. // line_ptr[x] = f32.f;
  5483. tinyexr::cpy4(line_ptr + x, &(f32.f));
  5484. }
  5485. }
  5486. } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
  5487. for (int y = 0; y < num_lines; y++) {
  5488. // Assume increasing Y
  5489. unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
  5490. &buf.at(static_cast<size_t>(pixel_data_size * y *
  5491. width) +
  5492. channel_offset_list[c] *
  5493. static_cast<size_t>(width)));
  5494. for (int x = 0; x < width; x++) {
  5495. unsigned short val = reinterpret_cast<const unsigned short * const *>(
  5496. images)[c][(y + start_y) * x_stride + x];
  5497. tinyexr::swap2(&val);
  5498. // line_ptr[x] = val;
  5499. tinyexr::cpy2(line_ptr + x, &val);
  5500. }
  5501. }
  5502. } else {
  5503. assert(0);
  5504. }
  5505. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
  5506. if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
  5507. for (int y = 0; y < num_lines; y++) {
  5508. // Assume increasing Y
  5509. unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
  5510. &buf.at(static_cast<size_t>(pixel_data_size * y *
  5511. width) +
  5512. channel_offset_list[c] *
  5513. static_cast<size_t>(width)));
  5514. for (int x = 0; x < width; x++) {
  5515. tinyexr::FP32 f32;
  5516. f32.f = reinterpret_cast<const float * const *>(
  5517. images)[c][(y + start_y) * x_stride + x];
  5518. tinyexr::FP16 h16;
  5519. h16 = float_to_half_full(f32);
  5520. tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
  5521. // line_ptr[x] = h16.u;
  5522. tinyexr::cpy2(line_ptr + x, &(h16.u));
  5523. }
  5524. }
  5525. } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
  5526. for (int y = 0; y < num_lines; y++) {
  5527. // Assume increasing Y
  5528. float *line_ptr = reinterpret_cast<float *>(&buf.at(
  5529. static_cast<size_t>(pixel_data_size * y * width) +
  5530. channel_offset_list[c] *
  5531. static_cast<size_t>(width)));
  5532. for (int x = 0; x < width; x++) {
  5533. float val = reinterpret_cast<const float * const *>(
  5534. images)[c][(y + start_y) * x_stride + x];
  5535. tinyexr::swap4(&val);
  5536. // line_ptr[x] = val;
  5537. tinyexr::cpy4(line_ptr + x, &val);
  5538. }
  5539. }
  5540. } else {
  5541. assert(0);
  5542. }
  5543. } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
  5544. for (int y = 0; y < num_lines; y++) {
  5545. // Assume increasing Y
  5546. unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
  5547. static_cast<size_t>(pixel_data_size * y * width) +
  5548. channel_offset_list[c] * static_cast<size_t>(width)));
  5549. for (int x = 0; x < width; x++) {
  5550. unsigned int val = reinterpret_cast<const unsigned int * const *>(
  5551. images)[c][(y + start_y) * x_stride + x];
  5552. tinyexr::swap4(&val);
  5553. // line_ptr[x] = val;
  5554. tinyexr::cpy4(line_ptr + x, &val);
  5555. }
  5556. }
  5557. }
  5558. }
  5559. if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
  5560. // 4 byte: scan line
  5561. // 4 byte: data size
  5562. // ~ : pixel data(uncompressed)
  5563. out_data.insert(out_data.end(), buf.begin(), buf.end());
  5564. } else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
  5565. (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
  5566. #if TINYEXR_USE_MINIZ
  5567. std::vector<unsigned char> block(mz_compressBound(
  5568. static_cast<unsigned long>(buf.size())));
  5569. #elif TINYEXR_USE_STB_ZLIB
  5570. // there is no compressBound() function, so we use a value that
  5571. // is grossly overestimated, but should always work
  5572. std::vector<unsigned char> block(256 + 2 * buf.size());
  5573. #else
  5574. std::vector<unsigned char> block(
  5575. compressBound(static_cast<uLong>(buf.size())));
  5576. #endif
  5577. tinyexr::tinyexr_uint64 outSize = block.size();
  5578. tinyexr::CompressZip(&block.at(0), outSize,
  5579. reinterpret_cast<const unsigned char *>(&buf.at(0)),
  5580. static_cast<unsigned long>(buf.size()));
  5581. // 4 byte: scan line
  5582. // 4 byte: data size
  5583. // ~ : pixel data(compressed)
  5584. unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
  5585. out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
  5586. } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
  5587. // (buf.size() * 3) / 2 would be enough.
  5588. std::vector<unsigned char> block((buf.size() * 3) / 2);
  5589. tinyexr::tinyexr_uint64 outSize = block.size();
  5590. tinyexr::CompressRle(&block.at(0), outSize,
  5591. reinterpret_cast<const unsigned char *>(&buf.at(0)),
  5592. static_cast<unsigned long>(buf.size()));
  5593. // 4 byte: scan line
  5594. // 4 byte: data size
  5595. // ~ : pixel data(compressed)
  5596. unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
  5597. out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
  5598. } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
  5599. #if TINYEXR_USE_PIZ
  5600. unsigned int bufLen =
  5601. 8192 + static_cast<unsigned int>(
  5602. 2 * static_cast<unsigned int>(
  5603. buf.size())); // @fixme { compute good bound. }
  5604. std::vector<unsigned char> block(bufLen);
  5605. unsigned int outSize = static_cast<unsigned int>(block.size());
  5606. CompressPiz(&block.at(0), &outSize,
  5607. reinterpret_cast<const unsigned char *>(&buf.at(0)),
  5608. buf.size(), channels, width, num_lines);
  5609. // 4 byte: scan line
  5610. // 4 byte: data size
  5611. // ~ : pixel data(compressed)
  5612. unsigned int data_len = outSize;
  5613. out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
  5614. #else
  5615. assert(0);
  5616. #endif
  5617. } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
  5618. #if TINYEXR_USE_ZFP
  5619. const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param);
  5620. std::vector<unsigned char> block;
  5621. unsigned int outSize;
  5622. tinyexr::CompressZfp(
  5623. &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
  5624. width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param);
  5625. // 4 byte: scan line
  5626. // 4 byte: data size
  5627. // ~ : pixel data(compressed)
  5628. unsigned int data_len = outSize;
  5629. out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
  5630. #else
  5631. (void)compression_param;
  5632. assert(0);
  5633. #endif
  5634. } else {
  5635. assert(0);
  5636. return false;
  5637. }
  5638. return true;
  5639. }
  5640. static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header,
  5641. const std::vector<tinyexr::ChannelInfo>& channels,
  5642. std::vector<std::vector<unsigned char> >& data_list,
  5643. size_t start_index, // for data_list
  5644. int num_x_tiles, int num_y_tiles,
  5645. const std::vector<size_t>& channel_offset_list,
  5646. int pixel_data_size,
  5647. const void* compression_param, // must be set if zfp compression is enabled
  5648. std::string* err) {
  5649. int num_tiles = num_x_tiles * num_y_tiles;
  5650. assert(num_tiles == level_image->num_tiles);
  5651. if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) &&
  5652. level_image->level_x == 0 && level_image->level_y == 0) {
  5653. if (err) {
  5654. (*err) += "Failed to encode tile data.\n";
  5655. }
  5656. return TINYEXR_ERROR_INVALID_DATA;
  5657. }
  5658. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  5659. std::atomic<bool> invalid_data(false);
  5660. #else
  5661. bool invalid_data(false);
  5662. #endif
  5663. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  5664. std::vector<std::thread> workers;
  5665. std::atomic<int> tile_count(0);
  5666. int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
  5667. if (num_threads > int(num_tiles)) {
  5668. num_threads = int(num_tiles);
  5669. }
  5670. for (int t = 0; t < num_threads; t++) {
  5671. workers.emplace_back(std::thread([&]() {
  5672. int i = 0;
  5673. while ((i = tile_count++) < num_tiles) {
  5674. #else
  5675. // Use signed int since some OpenMP compiler doesn't allow unsigned type for
  5676. // `parallel for`
  5677. #if TINYEXR_USE_OPENMP
  5678. #pragma omp parallel for
  5679. #endif
  5680. for (int i = 0; i < num_tiles; i++) {
  5681. #endif
  5682. size_t tile_idx = static_cast<size_t>(i);
  5683. size_t data_idx = tile_idx + start_index;
  5684. int x_tile = i % num_x_tiles;
  5685. int y_tile = i / num_x_tiles;
  5686. EXRTile& tile = level_image->tiles[tile_idx];
  5687. const unsigned char* const* images =
  5688. static_cast<const unsigned char* const*>(tile.images);
  5689. data_list[data_idx].resize(5*sizeof(int));
  5690. size_t data_header_size = data_list[data_idx].size();
  5691. bool ret = EncodePixelData(data_list[data_idx],
  5692. images,
  5693. exr_header->compression_type,
  5694. 0, // increasing y
  5695. tile.width,
  5696. exr_header->tile_size_y,
  5697. exr_header->tile_size_x,
  5698. 0,
  5699. tile.height,
  5700. pixel_data_size,
  5701. channels,
  5702. channel_offset_list,
  5703. compression_param);
  5704. if (!ret) {
  5705. invalid_data = true;
  5706. continue;
  5707. }
  5708. assert(data_list[data_idx].size() > data_header_size);
  5709. int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size);
  5710. //tileX, tileY, levelX, levelY // pixel_data_size(int)
  5711. memcpy(&data_list[data_idx][0], &x_tile, sizeof(int));
  5712. memcpy(&data_list[data_idx][4], &y_tile, sizeof(int));
  5713. memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int));
  5714. memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int));
  5715. memcpy(&data_list[data_idx][16], &data_len, sizeof(int));
  5716. swap4(reinterpret_cast<int*>(&data_list[data_idx][0]));
  5717. swap4(reinterpret_cast<int*>(&data_list[data_idx][4]));
  5718. swap4(reinterpret_cast<int*>(&data_list[data_idx][8]));
  5719. swap4(reinterpret_cast<int*>(&data_list[data_idx][12]));
  5720. swap4(reinterpret_cast<int*>(&data_list[data_idx][16]));
  5721. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  5722. }
  5723. }));
  5724. }
  5725. for (auto &t : workers) {
  5726. t.join();
  5727. }
  5728. #else
  5729. } // omp parallel
  5730. #endif
  5731. if (invalid_data) {
  5732. if (err) {
  5733. (*err) += "Failed to encode tile data.\n";
  5734. }
  5735. return TINYEXR_ERROR_INVALID_DATA;
  5736. }
  5737. return TINYEXR_SUCCESS;
  5738. }
  5739. static int NumScanlines(int compression_type) {
  5740. int num_scanlines = 1;
  5741. if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
  5742. num_scanlines = 16;
  5743. } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
  5744. num_scanlines = 32;
  5745. } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
  5746. num_scanlines = 16;
  5747. }
  5748. return num_scanlines;
  5749. }
  5750. static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header,
  5751. const std::vector<ChannelInfo>& channels,
  5752. int num_blocks,
  5753. tinyexr_uint64 chunk_offset, // starting offset of current chunk
  5754. bool is_multipart,
  5755. OffsetData& offset_data, // output block offsets, must be initialized
  5756. std::vector<std::vector<unsigned char> >& data_list, // output
  5757. tinyexr_uint64& total_size, // output: ending offset of current chunk
  5758. std::string* err) {
  5759. int num_scanlines = NumScanlines(exr_header->compression_type);
  5760. data_list.resize(num_blocks);
  5761. std::vector<size_t> channel_offset_list(
  5762. static_cast<size_t>(exr_header->num_channels));
  5763. int pixel_data_size = 0;
  5764. {
  5765. size_t channel_offset = 0;
  5766. for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
  5767. channel_offset_list[c] = channel_offset;
  5768. if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
  5769. pixel_data_size += sizeof(unsigned short);
  5770. channel_offset += sizeof(unsigned short);
  5771. } else if (channels[c].requested_pixel_type ==
  5772. TINYEXR_PIXELTYPE_FLOAT) {
  5773. pixel_data_size += sizeof(float);
  5774. channel_offset += sizeof(float);
  5775. } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) {
  5776. pixel_data_size += sizeof(unsigned int);
  5777. channel_offset += sizeof(unsigned int);
  5778. } else {
  5779. assert(0);
  5780. }
  5781. }
  5782. }
  5783. const void* compression_param = 0;
  5784. #if TINYEXR_USE_ZFP
  5785. tinyexr::ZFPCompressionParam zfp_compression_param;
  5786. // Use ZFP compression parameter from custom attributes(if such a parameter
  5787. // exists)
  5788. {
  5789. std::string e;
  5790. bool ret = tinyexr::FindZFPCompressionParam(
  5791. &zfp_compression_param, exr_header->custom_attributes,
  5792. exr_header->num_custom_attributes, &e);
  5793. if (!ret) {
  5794. // Use predefined compression parameter.
  5795. zfp_compression_param.type = 0;
  5796. zfp_compression_param.rate = 2;
  5797. }
  5798. compression_param = &zfp_compression_param;
  5799. }
  5800. #endif
  5801. tinyexr_uint64 offset = chunk_offset;
  5802. tinyexr_uint64 doffset = is_multipart ? 4u : 0u;
  5803. if (exr_image->tiles) {
  5804. const EXRImage* level_image = exr_image;
  5805. size_t block_idx = 0;
  5806. //tinyexr::tinyexr_uint64 block_data_size = 0;
  5807. int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
  5808. offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels);
  5809. for (int level_index = 0; level_index < num_levels; ++level_index) {
  5810. if (!level_image) {
  5811. if (err) {
  5812. (*err) += "Invalid number of tiled levels for EncodeChunk\n";
  5813. }
  5814. return TINYEXR_ERROR_INVALID_DATA;
  5815. }
  5816. int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y,
  5817. exr_header->tile_level_mode, offset_data.num_x_levels);
  5818. if (level_index_from_image != level_index) {
  5819. if (err) {
  5820. (*err) += "Incorrect level ordering in tiled image\n";
  5821. }
  5822. return TINYEXR_ERROR_INVALID_DATA;
  5823. }
  5824. int num_y_tiles = int(offset_data.offsets[level_index].size());
  5825. assert(num_y_tiles);
  5826. int num_x_tiles = int(offset_data.offsets[level_index][0].size());
  5827. assert(num_x_tiles);
  5828. std::string e;
  5829. int ret = EncodeTiledLevel(level_image,
  5830. exr_header,
  5831. channels,
  5832. data_list,
  5833. block_idx,
  5834. num_x_tiles,
  5835. num_y_tiles,
  5836. channel_offset_list,
  5837. pixel_data_size,
  5838. compression_param,
  5839. &e);
  5840. if (ret != TINYEXR_SUCCESS) {
  5841. if (!e.empty() && err) {
  5842. (*err) += e;
  5843. }
  5844. return ret;
  5845. }
  5846. for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j)
  5847. for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) {
  5848. offset_data.offsets[level_index][j][i] = offset;
  5849. swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i]));
  5850. offset += data_list[block_idx].size() + doffset;
  5851. //block_data_size += data_list[block_idx].size();
  5852. ++block_idx;
  5853. }
  5854. level_image = level_image->next_level;
  5855. }
  5856. assert(static_cast<int>(block_idx) == num_blocks);
  5857. total_size = offset;
  5858. } else { // scanlines
  5859. std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
  5860. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  5861. std::atomic<bool> invalid_data(false);
  5862. std::vector<std::thread> workers;
  5863. std::atomic<int> block_count(0);
  5864. int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks);
  5865. for (int t = 0; t < num_threads; t++) {
  5866. workers.emplace_back(std::thread([&]() {
  5867. int i = 0;
  5868. while ((i = block_count++) < num_blocks) {
  5869. #else
  5870. bool invalid_data(false);
  5871. #if TINYEXR_USE_OPENMP
  5872. #pragma omp parallel for
  5873. #endif
  5874. for (int i = 0; i < num_blocks; i++) {
  5875. #endif
  5876. int start_y = num_scanlines * i;
  5877. int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height);
  5878. int num_lines = end_Y - start_y;
  5879. const unsigned char* const* images =
  5880. static_cast<const unsigned char* const*>(exr_image->images);
  5881. data_list[i].resize(2*sizeof(int));
  5882. size_t data_header_size = data_list[i].size();
  5883. bool ret = EncodePixelData(data_list[i],
  5884. images,
  5885. exr_header->compression_type,
  5886. 0, // increasing y
  5887. exr_image->width,
  5888. exr_image->height,
  5889. exr_image->width,
  5890. start_y,
  5891. num_lines,
  5892. pixel_data_size,
  5893. channels,
  5894. channel_offset_list,
  5895. compression_param);
  5896. if (!ret) {
  5897. invalid_data = true;
  5898. continue; // "break" cannot be used with OpenMP
  5899. }
  5900. assert(data_list[i].size() > data_header_size);
  5901. int data_len = static_cast<int>(data_list[i].size() - data_header_size);
  5902. memcpy(&data_list[i][0], &start_y, sizeof(int));
  5903. memcpy(&data_list[i][4], &data_len, sizeof(int));
  5904. swap4(reinterpret_cast<int*>(&data_list[i][0]));
  5905. swap4(reinterpret_cast<int*>(&data_list[i][4]));
  5906. #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
  5907. }
  5908. }));
  5909. }
  5910. for (auto &t : workers) {
  5911. t.join();
  5912. }
  5913. #else
  5914. } // omp parallel
  5915. #endif
  5916. if (invalid_data) {
  5917. if (err) {
  5918. (*err) += "Failed to encode scanline data.\n";
  5919. }
  5920. return TINYEXR_ERROR_INVALID_DATA;
  5921. }
  5922. for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
  5923. offsets[i] = offset;
  5924. tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
  5925. offset += data_list[i].size() + doffset;
  5926. }
  5927. total_size = static_cast<size_t>(offset);
  5928. }
  5929. return TINYEXR_SUCCESS;
  5930. }
  5931. // can save a single or multi-part image (no deep* formats)
  5932. static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images,
  5933. const EXRHeader** exr_headers,
  5934. unsigned int num_parts,
  5935. unsigned char** memory_out, const char** err) {
  5936. if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
  5937. memory_out == NULL) {
  5938. SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
  5939. err);
  5940. return 0;
  5941. }
  5942. {
  5943. for (unsigned int i = 0; i < num_parts; ++i) {
  5944. if (exr_headers[i]->compression_type < 0) {
  5945. SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
  5946. err);
  5947. return 0;
  5948. }
  5949. #if !TINYEXR_USE_PIZ
  5950. if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
  5951. SetErrorMessage("PIZ compression is not supported in this build",
  5952. err);
  5953. return 0;
  5954. }
  5955. #endif
  5956. #if !TINYEXR_USE_ZFP
  5957. if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
  5958. SetErrorMessage("ZFP compression is not supported in this build",
  5959. err);
  5960. return 0;
  5961. }
  5962. #else
  5963. for (int c = 0; c < exr_header->num_channels; ++c) {
  5964. if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) {
  5965. SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
  5966. err);
  5967. return 0;
  5968. }
  5969. }
  5970. #endif
  5971. }
  5972. }
  5973. std::vector<unsigned char> memory;
  5974. // Header
  5975. {
  5976. const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
  5977. memory.insert(memory.end(), header, header + 4);
  5978. }
  5979. // Version
  5980. // using value from the first header
  5981. int long_name = exr_headers[0]->long_name;
  5982. {
  5983. char marker[] = { 2, 0, 0, 0 };
  5984. /* @todo
  5985. if (exr_header->non_image) {
  5986. marker[1] |= 0x8;
  5987. }
  5988. */
  5989. // tiled
  5990. if (num_parts == 1 && exr_images[0].tiles) {
  5991. marker[1] |= 0x2;
  5992. }
  5993. // long_name
  5994. if (long_name) {
  5995. marker[1] |= 0x4;
  5996. }
  5997. // multipart
  5998. if (num_parts > 1) {
  5999. marker[1] |= 0x10;
  6000. }
  6001. memory.insert(memory.end(), marker, marker + 4);
  6002. }
  6003. int total_chunk_count = 0;
  6004. std::vector<int> chunk_count(num_parts);
  6005. std::vector<OffsetData> offset_data(num_parts);
  6006. for (unsigned int i = 0; i < num_parts; ++i) {
  6007. if (!exr_images[i].tiles) {
  6008. int num_scanlines = NumScanlines(exr_headers[i]->compression_type);
  6009. chunk_count[i] =
  6010. (exr_images[i].height + num_scanlines - 1) / num_scanlines;
  6011. InitSingleResolutionOffsets(offset_data[i], chunk_count[i]);
  6012. total_chunk_count += chunk_count[i];
  6013. } else {
  6014. {
  6015. std::vector<int> num_x_tiles, num_y_tiles;
  6016. PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
  6017. chunk_count[i] =
  6018. InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles);
  6019. total_chunk_count += chunk_count[i];
  6020. }
  6021. }
  6022. }
  6023. // Write attributes to memory buffer.
  6024. std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts);
  6025. {
  6026. std::set<std::string> partnames;
  6027. for (unsigned int i = 0; i < num_parts; ++i) {
  6028. //channels
  6029. {
  6030. std::vector<unsigned char> data;
  6031. for (int c = 0; c < exr_headers[i]->num_channels; c++) {
  6032. tinyexr::ChannelInfo info;
  6033. info.p_linear = 0;
  6034. info.pixel_type = exr_headers[i]->pixel_types[c];
  6035. info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c];
  6036. info.x_sampling = 1;
  6037. info.y_sampling = 1;
  6038. info.name = std::string(exr_headers[i]->channels[c].name);
  6039. channels[i].push_back(info);
  6040. }
  6041. tinyexr::WriteChannelInfo(data, channels[i]);
  6042. tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
  6043. static_cast<int>(data.size()));
  6044. }
  6045. {
  6046. int comp = exr_headers[i]->compression_type;
  6047. swap4(&comp);
  6048. WriteAttributeToMemory(
  6049. &memory, "compression", "compression",
  6050. reinterpret_cast<const unsigned char*>(&comp), 1);
  6051. }
  6052. {
  6053. int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 };
  6054. swap4(&data[0]);
  6055. swap4(&data[1]);
  6056. swap4(&data[2]);
  6057. swap4(&data[3]);
  6058. WriteAttributeToMemory(
  6059. &memory, "dataWindow", "box2i",
  6060. reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4);
  6061. int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 };
  6062. swap4(&data0[0]);
  6063. swap4(&data0[1]);
  6064. swap4(&data0[2]);
  6065. swap4(&data0[3]);
  6066. // Note: must be the same across parts (currently, using value from the first header)
  6067. WriteAttributeToMemory(
  6068. &memory, "displayWindow", "box2i",
  6069. reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4);
  6070. }
  6071. {
  6072. unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
  6073. WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
  6074. &line_order, 1);
  6075. }
  6076. {
  6077. // Note: must be the same across parts
  6078. float aspectRatio = 1.0f;
  6079. swap4(&aspectRatio);
  6080. WriteAttributeToMemory(
  6081. &memory, "pixelAspectRatio", "float",
  6082. reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float));
  6083. }
  6084. {
  6085. float center[2] = { 0.0f, 0.0f };
  6086. swap4(&center[0]);
  6087. swap4(&center[1]);
  6088. WriteAttributeToMemory(
  6089. &memory, "screenWindowCenter", "v2f",
  6090. reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float));
  6091. }
  6092. {
  6093. float w = 1.0f;
  6094. swap4(&w);
  6095. WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
  6096. reinterpret_cast<const unsigned char*>(&w),
  6097. sizeof(float));
  6098. }
  6099. if (exr_images[i].tiles) {
  6100. unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3);
  6101. if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u);
  6102. //unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  6103. unsigned int datai[3] = { 0, 0, 0 };
  6104. unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]);
  6105. datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x);
  6106. datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y);
  6107. data[8] = tile_mode;
  6108. swap4(reinterpret_cast<unsigned int*>(&data[0]));
  6109. swap4(reinterpret_cast<unsigned int*>(&data[4]));
  6110. WriteAttributeToMemory(
  6111. &memory, "tiles", "tiledesc",
  6112. reinterpret_cast<const unsigned char*>(data), 9);
  6113. }
  6114. // must be present for multi-part files - according to spec.
  6115. if (num_parts > 1) {
  6116. // name
  6117. {
  6118. size_t len = 0;
  6119. if ((len = strlen(exr_headers[i]->name)) > 0) {
  6120. partnames.emplace(exr_headers[i]->name);
  6121. if (partnames.size() != i + 1) {
  6122. SetErrorMessage("'name' attributes must be unique for a multi-part file", err);
  6123. return 0;
  6124. }
  6125. WriteAttributeToMemory(
  6126. &memory, "name", "string",
  6127. reinterpret_cast<const unsigned char*>(exr_headers[i]->name),
  6128. static_cast<int>(len));
  6129. } else {
  6130. SetErrorMessage("Invalid 'name' attribute for a multi-part file", err);
  6131. return 0;
  6132. }
  6133. }
  6134. // type
  6135. {
  6136. const char* type = "scanlineimage";
  6137. if (exr_images[i].tiles) type = "tiledimage";
  6138. WriteAttributeToMemory(
  6139. &memory, "type", "string",
  6140. reinterpret_cast<const unsigned char*>(type),
  6141. static_cast<int>(strlen(type)));
  6142. }
  6143. // chunkCount
  6144. {
  6145. WriteAttributeToMemory(
  6146. &memory, "chunkCount", "int",
  6147. reinterpret_cast<const unsigned char*>(&chunk_count[i]),
  6148. 4);
  6149. }
  6150. }
  6151. // Custom attributes
  6152. if (exr_headers[i]->num_custom_attributes > 0) {
  6153. for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) {
  6154. tinyexr::WriteAttributeToMemory(
  6155. &memory, exr_headers[i]->custom_attributes[j].name,
  6156. exr_headers[i]->custom_attributes[j].type,
  6157. reinterpret_cast<const unsigned char*>(
  6158. exr_headers[i]->custom_attributes[j].value),
  6159. exr_headers[i]->custom_attributes[j].size);
  6160. }
  6161. }
  6162. { // end of header
  6163. memory.push_back(0);
  6164. }
  6165. }
  6166. }
  6167. if (num_parts > 1) {
  6168. // end of header list
  6169. memory.push_back(0);
  6170. }
  6171. tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64);
  6172. tinyexr_uint64 total_size = 0;
  6173. std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts);
  6174. for (unsigned int i = 0; i < num_parts; ++i) {
  6175. std::string e;
  6176. int ret = EncodeChunk(&exr_images[i], exr_headers[i],
  6177. channels[i],
  6178. chunk_count[i],
  6179. // starting offset of current chunk after part-number
  6180. chunk_offset,
  6181. num_parts > 1,
  6182. offset_data[i], // output: block offsets, must be initialized
  6183. data_lists[i], // output
  6184. total_size, // output
  6185. &e);
  6186. if (ret != TINYEXR_SUCCESS) {
  6187. if (!e.empty()) {
  6188. tinyexr::SetErrorMessage(e, err);
  6189. }
  6190. return 0;
  6191. }
  6192. chunk_offset = total_size;
  6193. }
  6194. // Allocating required memory
  6195. if (total_size == 0) { // something went wrong
  6196. tinyexr::SetErrorMessage("Output memory size is zero", err);
  6197. return 0;
  6198. }
  6199. (*memory_out) = static_cast<unsigned char*>(malloc(total_size));
  6200. // Writing header
  6201. memcpy((*memory_out), &memory[0], memory.size());
  6202. unsigned char* memory_ptr = *memory_out + memory.size();
  6203. size_t sum = memory.size();
  6204. // Writing offset data for chunks
  6205. for (unsigned int i = 0; i < num_parts; ++i) {
  6206. if (exr_images[i].tiles) {
  6207. const EXRImage* level_image = &exr_images[i];
  6208. int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
  6209. offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels);
  6210. for (int level_index = 0; level_index < num_levels; ++level_index) {
  6211. for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) {
  6212. size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size();
  6213. sum += num_bytes;
  6214. assert(sum <= total_size);
  6215. memcpy(memory_ptr,
  6216. reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]),
  6217. num_bytes);
  6218. memory_ptr += num_bytes;
  6219. }
  6220. level_image = level_image->next_level;
  6221. }
  6222. } else {
  6223. size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]);
  6224. sum += num_bytes;
  6225. assert(sum <= total_size);
  6226. std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0];
  6227. memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes);
  6228. memory_ptr += num_bytes;
  6229. }
  6230. }
  6231. // Writing chunk data
  6232. for (unsigned int i = 0; i < num_parts; ++i) {
  6233. for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) {
  6234. if (num_parts > 1) {
  6235. sum += 4;
  6236. assert(sum <= total_size);
  6237. unsigned int part_number = i;
  6238. swap4(&part_number);
  6239. memcpy(memory_ptr, &part_number, 4);
  6240. memory_ptr += 4;
  6241. }
  6242. sum += data_lists[i][j].size();
  6243. assert(sum <= total_size);
  6244. memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size());
  6245. memory_ptr += data_lists[i][j].size();
  6246. }
  6247. }
  6248. assert(sum == total_size);
  6249. return total_size; // OK
  6250. }
  6251. #ifdef __clang__
  6252. #pragma clang diagnostic pop
  6253. #endif
  6254. } // tinyexr
  6255. size_t SaveEXRImageToMemory(const EXRImage* exr_image,
  6256. const EXRHeader* exr_header,
  6257. unsigned char** memory_out, const char** err) {
  6258. return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err);
  6259. }
  6260. int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
  6261. const char *filename, const char **err) {
  6262. if (exr_image == NULL || filename == NULL ||
  6263. exr_header->compression_type < 0) {
  6264. tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
  6265. return TINYEXR_ERROR_INVALID_ARGUMENT;
  6266. }
  6267. #if !TINYEXR_USE_PIZ
  6268. if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
  6269. tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
  6270. err);
  6271. return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
  6272. }
  6273. #endif
  6274. #if !TINYEXR_USE_ZFP
  6275. if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
  6276. tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
  6277. err);
  6278. return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
  6279. }
  6280. #endif
  6281. FILE *fp = NULL;
  6282. #ifdef _WIN32
  6283. #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang
  6284. errno_t errcode =
  6285. _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
  6286. if (errcode != 0) {
  6287. tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
  6288. err);
  6289. return TINYEXR_ERROR_CANT_WRITE_FILE;
  6290. }
  6291. #else
  6292. // Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
  6293. fp = fopen(filename, "wb");
  6294. #endif
  6295. #else
  6296. fp = fopen(filename, "wb");
  6297. #endif
  6298. if (!fp) {
  6299. tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
  6300. err);
  6301. return TINYEXR_ERROR_CANT_WRITE_FILE;
  6302. }
  6303. unsigned char *mem = NULL;
  6304. size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
  6305. if (mem_size == 0) {
  6306. return TINYEXR_ERROR_SERIALZATION_FAILED;
  6307. }
  6308. size_t written_size = 0;
  6309. if ((mem_size > 0) && mem) {
  6310. written_size = fwrite(mem, 1, mem_size, fp);
  6311. }
  6312. free(mem);
  6313. fclose(fp);
  6314. if (written_size != mem_size) {
  6315. tinyexr::SetErrorMessage("Cannot write a file", err);
  6316. return TINYEXR_ERROR_CANT_WRITE_FILE;
  6317. }
  6318. return TINYEXR_SUCCESS;
  6319. }
  6320. size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images,
  6321. const EXRHeader** exr_headers,
  6322. unsigned int num_parts,
  6323. unsigned char** memory_out, const char** err) {
  6324. if (exr_images == NULL || exr_headers == NULL || num_parts < 2 ||
  6325. memory_out == NULL) {
  6326. tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
  6327. err);
  6328. return 0;
  6329. }
  6330. return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err);
  6331. }
  6332. int SaveEXRMultipartImageToFile(const EXRImage* exr_images,
  6333. const EXRHeader** exr_headers,
  6334. unsigned int num_parts,
  6335. const char* filename,
  6336. const char** err) {
  6337. if (exr_images == NULL || exr_headers == NULL || num_parts < 2) {
  6338. tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile",
  6339. err);
  6340. return TINYEXR_ERROR_INVALID_ARGUMENT;
  6341. }
  6342. FILE *fp = NULL;
  6343. #ifdef _WIN32
  6344. #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
  6345. errno_t errcode =
  6346. _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
  6347. if (errcode != 0) {
  6348. tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
  6349. err);
  6350. return TINYEXR_ERROR_CANT_WRITE_FILE;
  6351. }
  6352. #else
  6353. // Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
  6354. fp = fopen(filename, "wb");
  6355. #endif
  6356. #else
  6357. fp = fopen(filename, "wb");
  6358. #endif
  6359. if (!fp) {
  6360. tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
  6361. err);
  6362. return TINYEXR_ERROR_CANT_WRITE_FILE;
  6363. }
  6364. unsigned char *mem = NULL;
  6365. size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err);
  6366. if (mem_size == 0) {
  6367. return TINYEXR_ERROR_SERIALZATION_FAILED;
  6368. }
  6369. size_t written_size = 0;
  6370. if ((mem_size > 0) && mem) {
  6371. written_size = fwrite(mem, 1, mem_size, fp);
  6372. }
  6373. free(mem);
  6374. fclose(fp);
  6375. if (written_size != mem_size) {
  6376. tinyexr::SetErrorMessage("Cannot write a file", err);
  6377. return TINYEXR_ERROR_CANT_WRITE_FILE;
  6378. }
  6379. return TINYEXR_SUCCESS;
  6380. }
  6381. int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
  6382. if (deep_image == NULL) {
  6383. tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
  6384. return TINYEXR_ERROR_INVALID_ARGUMENT;
  6385. }
  6386. #ifdef _WIN32
  6387. FILE *fp = NULL;
  6388. #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
  6389. errno_t errcode =
  6390. _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
  6391. if (errcode != 0) {
  6392. tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
  6393. err);
  6394. return TINYEXR_ERROR_CANT_OPEN_FILE;
  6395. }
  6396. #else
  6397. // Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
  6398. fp = fopen(filename, "rb");
  6399. #endif
  6400. if (!fp) {
  6401. tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
  6402. err);
  6403. return TINYEXR_ERROR_CANT_OPEN_FILE;
  6404. }
  6405. #else
  6406. FILE *fp = fopen(filename, "rb");
  6407. if (!fp) {
  6408. tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
  6409. err);
  6410. return TINYEXR_ERROR_CANT_OPEN_FILE;
  6411. }
  6412. #endif
  6413. size_t filesize;
  6414. // Compute size
  6415. fseek(fp, 0, SEEK_END);
  6416. filesize = static_cast<size_t>(ftell(fp));
  6417. fseek(fp, 0, SEEK_SET);
  6418. if (filesize == 0) {
  6419. fclose(fp);
  6420. tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
  6421. err);
  6422. return TINYEXR_ERROR_INVALID_FILE;
  6423. }
  6424. std::vector<char> buf(filesize); // @todo { use mmap }
  6425. {
  6426. size_t ret;
  6427. ret = fread(&buf[0], 1, filesize, fp);
  6428. assert(ret == filesize);
  6429. (void)ret;
  6430. }
  6431. fclose(fp);
  6432. const char *head = &buf[0];
  6433. const char *marker = &buf[0];
  6434. // Header check.
  6435. {
  6436. const char header[] = {0x76, 0x2f, 0x31, 0x01};
  6437. if (memcmp(marker, header, 4) != 0) {
  6438. tinyexr::SetErrorMessage("Invalid magic number", err);
  6439. return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
  6440. }
  6441. marker += 4;
  6442. }
  6443. // Version, scanline.
  6444. {
  6445. // ver 2.0, scanline, deep bit on(0x800)
  6446. // must be [2, 0, 0, 0]
  6447. if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
  6448. tinyexr::SetErrorMessage("Unsupported version or scanline", err);
  6449. return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
  6450. }
  6451. marker += 4;
  6452. }
  6453. int dx = -1;
  6454. int dy = -1;
  6455. int dw = -1;
  6456. int dh = -1;
  6457. int num_scanline_blocks = 1; // 16 for ZIP compression.
  6458. int compression_type = -1;
  6459. int num_channels = -1;
  6460. std::vector<tinyexr::ChannelInfo> channels;
  6461. // Read attributes
  6462. size_t size = filesize - tinyexr::kEXRVersionSize;
  6463. for (;;) {
  6464. if (0 == size) {
  6465. return TINYEXR_ERROR_INVALID_DATA;
  6466. } else if (marker[0] == '\0') {
  6467. marker++;
  6468. size--;
  6469. break;
  6470. }
  6471. std::string attr_name;
  6472. std::string attr_type;
  6473. std::vector<unsigned char> data;
  6474. size_t marker_size;
  6475. if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
  6476. marker, size)) {
  6477. std::stringstream ss;
  6478. ss << "Failed to parse attribute\n";
  6479. tinyexr::SetErrorMessage(ss.str(), err);
  6480. return TINYEXR_ERROR_INVALID_DATA;
  6481. }
  6482. marker += marker_size;
  6483. size -= marker_size;
  6484. if (attr_name.compare("compression") == 0) {
  6485. compression_type = data[0];
  6486. if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
  6487. std::stringstream ss;
  6488. ss << "Unsupported compression type : " << compression_type;
  6489. tinyexr::SetErrorMessage(ss.str(), err);
  6490. return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
  6491. }
  6492. if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
  6493. num_scanline_blocks = 16;
  6494. }
  6495. } else if (attr_name.compare("channels") == 0) {
  6496. // name: zero-terminated string, from 1 to 255 bytes long
  6497. // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
  6498. // pLinear: unsigned char, possible values are 0 and 1
  6499. // reserved: three chars, should be zero
  6500. // xSampling: int
  6501. // ySampling: int
  6502. if (!tinyexr::ReadChannelInfo(channels, data)) {
  6503. tinyexr::SetErrorMessage("Failed to parse channel info", err);
  6504. return TINYEXR_ERROR_INVALID_DATA;
  6505. }
  6506. num_channels = static_cast<int>(channels.size());
  6507. if (num_channels < 1) {
  6508. tinyexr::SetErrorMessage("Invalid channels format", err);
  6509. return TINYEXR_ERROR_INVALID_DATA;
  6510. }
  6511. } else if (attr_name.compare("dataWindow") == 0) {
  6512. memcpy(&dx, &data.at(0), sizeof(int));
  6513. memcpy(&dy, &data.at(4), sizeof(int));
  6514. memcpy(&dw, &data.at(8), sizeof(int));
  6515. memcpy(&dh, &data.at(12), sizeof(int));
  6516. tinyexr::swap4(&dx);
  6517. tinyexr::swap4(&dy);
  6518. tinyexr::swap4(&dw);
  6519. tinyexr::swap4(&dh);
  6520. } else if (attr_name.compare("displayWindow") == 0) {
  6521. int x;
  6522. int y;
  6523. int w;
  6524. int h;
  6525. memcpy(&x, &data.at(0), sizeof(int));
  6526. memcpy(&y, &data.at(4), sizeof(int));
  6527. memcpy(&w, &data.at(8), sizeof(int));
  6528. memcpy(&h, &data.at(12), sizeof(int));
  6529. tinyexr::swap4(&x);
  6530. tinyexr::swap4(&y);
  6531. tinyexr::swap4(&w);
  6532. tinyexr::swap4(&h);
  6533. }
  6534. }
  6535. assert(dx >= 0);
  6536. assert(dy >= 0);
  6537. assert(dw >= 0);
  6538. assert(dh >= 0);
  6539. assert(num_channels >= 1);
  6540. int data_width = dw - dx + 1;
  6541. int data_height = dh - dy + 1;
  6542. // Read offset tables.
  6543. int num_blocks = data_height / num_scanline_blocks;
  6544. if (num_blocks * num_scanline_blocks < data_height) {
  6545. num_blocks++;
  6546. }
  6547. std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
  6548. for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
  6549. tinyexr::tinyexr_int64 offset;
  6550. memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
  6551. tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
  6552. marker += sizeof(tinyexr::tinyexr_int64); // = 8
  6553. offsets[y] = offset;
  6554. }
  6555. #if TINYEXR_USE_PIZ
  6556. if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
  6557. (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
  6558. (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
  6559. (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
  6560. (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
  6561. #else
  6562. if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
  6563. (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
  6564. (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
  6565. (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
  6566. #endif
  6567. // OK
  6568. } else {
  6569. tinyexr::SetErrorMessage("Unsupported compression format", err);
  6570. return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
  6571. }
  6572. deep_image->image = static_cast<float ***>(
  6573. malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
  6574. for (int c = 0; c < num_channels; c++) {
  6575. deep_image->image[c] = static_cast<float **>(
  6576. malloc(sizeof(float *) * static_cast<size_t>(data_height)));
  6577. for (int y = 0; y < data_height; y++) {
  6578. }
  6579. }
  6580. deep_image->offset_table = static_cast<int **>(
  6581. malloc(sizeof(int *) * static_cast<size_t>(data_height)));
  6582. for (int y = 0; y < data_height; y++) {
  6583. deep_image->offset_table[y] = static_cast<int *>(
  6584. malloc(sizeof(int) * static_cast<size_t>(data_width)));
  6585. }
  6586. for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
  6587. const unsigned char *data_ptr =
  6588. reinterpret_cast<const unsigned char *>(head + offsets[y]);
  6589. // int: y coordinate
  6590. // int64: packed size of pixel offset table
  6591. // int64: packed size of sample data
  6592. // int64: unpacked size of sample data
  6593. // compressed pixel offset table
  6594. // compressed sample data
  6595. int line_no;
  6596. tinyexr::tinyexr_int64 packedOffsetTableSize;
  6597. tinyexr::tinyexr_int64 packedSampleDataSize;
  6598. tinyexr::tinyexr_int64 unpackedSampleDataSize;
  6599. memcpy(&line_no, data_ptr, sizeof(int));
  6600. memcpy(&packedOffsetTableSize, data_ptr + 4,
  6601. sizeof(tinyexr::tinyexr_int64));
  6602. memcpy(&packedSampleDataSize, data_ptr + 12,
  6603. sizeof(tinyexr::tinyexr_int64));
  6604. memcpy(&unpackedSampleDataSize, data_ptr + 20,
  6605. sizeof(tinyexr::tinyexr_int64));
  6606. tinyexr::swap4(&line_no);
  6607. tinyexr::swap8(
  6608. reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
  6609. tinyexr::swap8(
  6610. reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
  6611. tinyexr::swap8(
  6612. reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
  6613. std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
  6614. // decode pixel offset table.
  6615. {
  6616. unsigned long dstLen =
  6617. static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
  6618. if (!tinyexr::DecompressZip(
  6619. reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
  6620. &dstLen, data_ptr + 28,
  6621. static_cast<unsigned long>(packedOffsetTableSize))) {
  6622. return false;
  6623. }
  6624. assert(dstLen == pixelOffsetTable.size() * sizeof(int));
  6625. for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
  6626. deep_image->offset_table[y][i] = pixelOffsetTable[i];
  6627. }
  6628. }
  6629. std::vector<unsigned char> sample_data(
  6630. static_cast<size_t>(unpackedSampleDataSize));
  6631. // decode sample data.
  6632. {
  6633. unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
  6634. if (dstLen) {
  6635. if (!tinyexr::DecompressZip(
  6636. reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
  6637. data_ptr + 28 + packedOffsetTableSize,
  6638. static_cast<unsigned long>(packedSampleDataSize))) {
  6639. return false;
  6640. }
  6641. assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
  6642. }
  6643. }
  6644. // decode sample
  6645. int sampleSize = -1;
  6646. std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
  6647. {
  6648. int channel_offset = 0;
  6649. for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
  6650. channel_offset_list[i] = channel_offset;
  6651. if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
  6652. channel_offset += 4;
  6653. } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
  6654. channel_offset += 2;
  6655. } else if (channels[i].pixel_type ==
  6656. TINYEXR_PIXELTYPE_FLOAT) { // float
  6657. channel_offset += 4;
  6658. } else {
  6659. assert(0);
  6660. }
  6661. }
  6662. sampleSize = channel_offset;
  6663. }
  6664. assert(sampleSize >= 2);
  6665. assert(static_cast<size_t>(
  6666. pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
  6667. sampleSize) == sample_data.size());
  6668. int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
  6669. //
  6670. // Alloc memory
  6671. //
  6672. //
  6673. // pixel data is stored as image[channels][pixel_samples]
  6674. //
  6675. {
  6676. tinyexr::tinyexr_uint64 data_offset = 0;
  6677. for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
  6678. deep_image->image[c][y] = static_cast<float *>(
  6679. malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
  6680. if (channels[c].pixel_type == 0) { // UINT
  6681. for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
  6682. unsigned int ui;
  6683. unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
  6684. &sample_data.at(size_t(data_offset) + x * sizeof(int)));
  6685. tinyexr::cpy4(&ui, src_ptr);
  6686. deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
  6687. }
  6688. data_offset +=
  6689. sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
  6690. } else if (channels[c].pixel_type == 1) { // half
  6691. for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
  6692. tinyexr::FP16 f16;
  6693. const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
  6694. &sample_data.at(size_t(data_offset) + x * sizeof(short)));
  6695. tinyexr::cpy2(&(f16.u), src_ptr);
  6696. tinyexr::FP32 f32 = half_to_float(f16);
  6697. deep_image->image[c][y][x] = f32.f;
  6698. }
  6699. data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
  6700. } else { // float
  6701. for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
  6702. float f;
  6703. const float *src_ptr = reinterpret_cast<float *>(
  6704. &sample_data.at(size_t(data_offset) + x * sizeof(float)));
  6705. tinyexr::cpy4(&f, src_ptr);
  6706. deep_image->image[c][y][x] = f;
  6707. }
  6708. data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
  6709. }
  6710. }
  6711. }
  6712. } // y
  6713. deep_image->width = data_width;
  6714. deep_image->height = data_height;
  6715. deep_image->channel_names = static_cast<const char **>(
  6716. malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
  6717. for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
  6718. #ifdef _WIN32
  6719. deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
  6720. #else
  6721. deep_image->channel_names[c] = strdup(channels[c].name.c_str());
  6722. #endif
  6723. }
  6724. deep_image->num_channels = num_channels;
  6725. return TINYEXR_SUCCESS;
  6726. }
  6727. void InitEXRImage(EXRImage *exr_image) {
  6728. if (exr_image == NULL) {
  6729. return;
  6730. }
  6731. exr_image->width = 0;
  6732. exr_image->height = 0;
  6733. exr_image->num_channels = 0;
  6734. exr_image->images = NULL;
  6735. exr_image->tiles = NULL;
  6736. exr_image->next_level = NULL;
  6737. exr_image->level_x = 0;
  6738. exr_image->level_y = 0;
  6739. exr_image->num_tiles = 0;
  6740. }
  6741. void FreeEXRErrorMessage(const char *msg) {
  6742. if (msg) {
  6743. free(reinterpret_cast<void *>(const_cast<char *>(msg)));
  6744. }
  6745. return;
  6746. }
  6747. void InitEXRHeader(EXRHeader *exr_header) {
  6748. if (exr_header == NULL) {
  6749. return;
  6750. }
  6751. memset(exr_header, 0, sizeof(EXRHeader));
  6752. }
  6753. int FreeEXRHeader(EXRHeader *exr_header) {
  6754. if (exr_header == NULL) {
  6755. return TINYEXR_ERROR_INVALID_ARGUMENT;
  6756. }
  6757. if (exr_header->channels) {
  6758. free(exr_header->channels);
  6759. }
  6760. if (exr_header->pixel_types) {
  6761. free(exr_header->pixel_types);
  6762. }
  6763. if (exr_header->requested_pixel_types) {
  6764. free(exr_header->requested_pixel_types);
  6765. }
  6766. for (int i = 0; i < exr_header->num_custom_attributes; i++) {
  6767. if (exr_header->custom_attributes[i].value) {
  6768. free(exr_header->custom_attributes[i].value);
  6769. }
  6770. }
  6771. if (exr_header->custom_attributes) {
  6772. free(exr_header->custom_attributes);
  6773. }
  6774. EXRSetNameAttr(exr_header, NULL);
  6775. return TINYEXR_SUCCESS;
  6776. }
  6777. void EXRSetNameAttr(EXRHeader* exr_header, const char* name) {
  6778. if (exr_header == NULL) {
  6779. return;
  6780. }
  6781. memset(exr_header->name, 0, 256);
  6782. if (name != NULL) {
  6783. size_t len = std::min(strlen(name), size_t(255));
  6784. if (len) {
  6785. memcpy(exr_header->name, name, len);
  6786. }
  6787. }
  6788. }
  6789. int EXRNumLevels(const EXRImage* exr_image) {
  6790. if (exr_image == NULL) return 0;
  6791. if(exr_image->images) return 1; // scanlines
  6792. int levels = 1;
  6793. const EXRImage* level_image = exr_image;
  6794. while((level_image = level_image->next_level)) ++levels;
  6795. return levels;
  6796. }
  6797. int FreeEXRImage(EXRImage *exr_image) {
  6798. if (exr_image == NULL) {
  6799. return TINYEXR_ERROR_INVALID_ARGUMENT;
  6800. }
  6801. if (exr_image->next_level) {
  6802. FreeEXRImage(exr_image->next_level);
  6803. delete exr_image->next_level;
  6804. }
  6805. for (int i = 0; i < exr_image->num_channels; i++) {
  6806. if (exr_image->images && exr_image->images[i]) {
  6807. free(exr_image->images[i]);
  6808. }
  6809. }
  6810. if (exr_image->images) {
  6811. free(exr_image->images);
  6812. }
  6813. if (exr_image->tiles) {
  6814. for (int tid = 0; tid < exr_image->num_tiles; tid++) {
  6815. for (int i = 0; i < exr_image->num_channels; i++) {
  6816. if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
  6817. free(exr_image->tiles[tid].images[i]);
  6818. }
  6819. }
  6820. if (exr_image->tiles[tid].images) {
  6821. free(exr_image->tiles[tid].images);
  6822. }
  6823. }
  6824. free(exr_image->tiles);
  6825. }
  6826. return TINYEXR_SUCCESS;
  6827. }
  6828. int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
  6829. const char *filename, const char **err) {
  6830. if (exr_header == NULL || exr_version == NULL || filename == NULL) {
  6831. tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
  6832. err);
  6833. return TINYEXR_ERROR_INVALID_ARGUMENT;
  6834. }
  6835. FILE *fp = NULL;
  6836. #ifdef _WIN32
  6837. #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
  6838. errno_t errcode =
  6839. _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
  6840. if (errcode != 0) {
  6841. tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
  6842. return TINYEXR_ERROR_INVALID_FILE;
  6843. }
  6844. #else
  6845. // Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
  6846. fp = fopen(filename, "rb");
  6847. #endif
  6848. #else
  6849. fp = fopen(filename, "rb");
  6850. #endif
  6851. if (!fp) {
  6852. tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
  6853. return TINYEXR_ERROR_CANT_OPEN_FILE;
  6854. }
  6855. size_t filesize;
  6856. // Compute size
  6857. fseek(fp, 0, SEEK_END);
  6858. filesize = static_cast<size_t>(ftell(fp));
  6859. fseek(fp, 0, SEEK_SET);
  6860. std::vector<unsigned char> buf(filesize); // @todo { use mmap }
  6861. {
  6862. size_t ret;
  6863. ret = fread(&buf[0], 1, filesize, fp);
  6864. assert(ret == filesize);
  6865. fclose(fp);
  6866. if (ret != filesize) {
  6867. tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
  6868. err);
  6869. return TINYEXR_ERROR_INVALID_FILE;
  6870. }
  6871. }
  6872. return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
  6873. err);
  6874. }
  6875. int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
  6876. int *num_headers,
  6877. const EXRVersion *exr_version,
  6878. const unsigned char *memory, size_t size,
  6879. const char **err) {
  6880. if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
  6881. exr_version == NULL) {
  6882. // Invalid argument
  6883. tinyexr::SetErrorMessage(
  6884. "Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
  6885. return TINYEXR_ERROR_INVALID_ARGUMENT;
  6886. }
  6887. if (size < tinyexr::kEXRVersionSize) {
  6888. tinyexr::SetErrorMessage("Data size too short", err);
  6889. return TINYEXR_ERROR_INVALID_DATA;
  6890. }
  6891. const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
  6892. size_t marker_size = size - tinyexr::kEXRVersionSize;
  6893. std::vector<tinyexr::HeaderInfo> infos;
  6894. for (;;) {
  6895. tinyexr::HeaderInfo info;
  6896. info.clear();
  6897. std::string err_str;
  6898. bool empty_header = false;
  6899. int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
  6900. marker, marker_size);
  6901. if (ret != TINYEXR_SUCCESS) {
  6902. tinyexr::SetErrorMessage(err_str, err);
  6903. return ret;
  6904. }
  6905. if (empty_header) {
  6906. marker += 1; // skip '\0'
  6907. break;
  6908. }
  6909. // `chunkCount` must exist in the header.
  6910. if (info.chunk_count == 0) {
  6911. tinyexr::SetErrorMessage(
  6912. "`chunkCount' attribute is not found in the header.", err);
  6913. return TINYEXR_ERROR_INVALID_DATA;
  6914. }
  6915. infos.push_back(info);
  6916. // move to next header.
  6917. marker += info.header_len;
  6918. size -= info.header_len;
  6919. }
  6920. // allocate memory for EXRHeader and create array of EXRHeader pointers.
  6921. (*exr_headers) =
  6922. static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
  6923. int retcode = TINYEXR_SUCCESS;
  6924. for (size_t i = 0; i < infos.size(); i++) {
  6925. EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
  6926. memset(exr_header, 0, sizeof(EXRHeader));
  6927. std::string warn;
  6928. std::string _err;
  6929. if (!ConvertHeader(exr_header, infos[i], &warn, &_err)) {
  6930. if (!_err.empty()) {
  6931. tinyexr::SetErrorMessage(
  6932. _err, err);
  6933. }
  6934. // continue to converting headers
  6935. retcode = TINYEXR_ERROR_INVALID_HEADER;
  6936. }
  6937. exr_header->multipart = exr_version->multipart ? 1 : 0;
  6938. (*exr_headers)[i] = exr_header;
  6939. }
  6940. (*num_headers) = static_cast<int>(infos.size());
  6941. return retcode;
  6942. }
  6943. int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
  6944. const EXRVersion *exr_version,
  6945. const char *filename, const char **err) {
  6946. if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
  6947. filename == NULL) {
  6948. tinyexr::SetErrorMessage(
  6949. "Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
  6950. return TINYEXR_ERROR_INVALID_ARGUMENT;
  6951. }
  6952. FILE *fp = NULL;
  6953. #ifdef _WIN32
  6954. #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
  6955. errno_t errcode =
  6956. _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
  6957. if (errcode != 0) {
  6958. tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
  6959. return TINYEXR_ERROR_INVALID_FILE;
  6960. }
  6961. #else
  6962. // Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
  6963. fp = fopen(filename, "rb");
  6964. #endif
  6965. #else
  6966. fp = fopen(filename, "rb");
  6967. #endif
  6968. if (!fp) {
  6969. tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
  6970. return TINYEXR_ERROR_CANT_OPEN_FILE;
  6971. }
  6972. size_t filesize;
  6973. // Compute size
  6974. fseek(fp, 0, SEEK_END);
  6975. filesize = static_cast<size_t>(ftell(fp));
  6976. fseek(fp, 0, SEEK_SET);
  6977. std::vector<unsigned char> buf(filesize); // @todo { use mmap }
  6978. {
  6979. size_t ret;
  6980. ret = fread(&buf[0], 1, filesize, fp);
  6981. assert(ret == filesize);
  6982. fclose(fp);
  6983. if (ret != filesize) {
  6984. tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
  6985. return TINYEXR_ERROR_INVALID_FILE;
  6986. }
  6987. }
  6988. return ParseEXRMultipartHeaderFromMemory(
  6989. exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
  6990. }
  6991. int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
  6992. size_t size) {
  6993. if (version == NULL || memory == NULL) {
  6994. return TINYEXR_ERROR_INVALID_ARGUMENT;
  6995. }
  6996. if (size < tinyexr::kEXRVersionSize) {
  6997. return TINYEXR_ERROR_INVALID_DATA;
  6998. }
  6999. const unsigned char *marker = memory;
  7000. // Header check.
  7001. {
  7002. const char header[] = {0x76, 0x2f, 0x31, 0x01};
  7003. if (memcmp(marker, header, 4) != 0) {
  7004. return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
  7005. }
  7006. marker += 4;
  7007. }
  7008. version->tiled = false;
  7009. version->long_name = false;
  7010. version->non_image = false;
  7011. version->multipart = false;
  7012. // Parse version header.
  7013. {
  7014. // must be 2
  7015. if (marker[0] != 2) {
  7016. return TINYEXR_ERROR_INVALID_EXR_VERSION;
  7017. }
  7018. if (version == NULL) {
  7019. return TINYEXR_SUCCESS; // May OK
  7020. }
  7021. version->version = 2;
  7022. if (marker[1] & 0x2) { // 9th bit
  7023. version->tiled = true;
  7024. }
  7025. if (marker[1] & 0x4) { // 10th bit
  7026. version->long_name = true;
  7027. }
  7028. if (marker[1] & 0x8) { // 11th bit
  7029. version->non_image = true; // (deep image)
  7030. }
  7031. if (marker[1] & 0x10) { // 12th bit
  7032. version->multipart = true;
  7033. }
  7034. }
  7035. return TINYEXR_SUCCESS;
  7036. }
  7037. int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
  7038. if (filename == NULL) {
  7039. return TINYEXR_ERROR_INVALID_ARGUMENT;
  7040. }
  7041. FILE *fp = NULL;
  7042. #ifdef _WIN32
  7043. #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
  7044. errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
  7045. if (err != 0) {
  7046. // TODO(syoyo): return wfopen_s erro code
  7047. return TINYEXR_ERROR_CANT_OPEN_FILE;
  7048. }
  7049. #else
  7050. // Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
  7051. fp = fopen(filename, "rb");
  7052. #endif
  7053. #else
  7054. fp = fopen(filename, "rb");
  7055. #endif
  7056. if (!fp) {
  7057. return TINYEXR_ERROR_CANT_OPEN_FILE;
  7058. }
  7059. size_t file_size;
  7060. // Compute size
  7061. fseek(fp, 0, SEEK_END);
  7062. file_size = static_cast<size_t>(ftell(fp));
  7063. fseek(fp, 0, SEEK_SET);
  7064. if (file_size < tinyexr::kEXRVersionSize) {
  7065. return TINYEXR_ERROR_INVALID_FILE;
  7066. }
  7067. unsigned char buf[tinyexr::kEXRVersionSize];
  7068. size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
  7069. fclose(fp);
  7070. if (ret != tinyexr::kEXRVersionSize) {
  7071. return TINYEXR_ERROR_INVALID_FILE;
  7072. }
  7073. return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
  7074. }
  7075. int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
  7076. const EXRHeader **exr_headers,
  7077. unsigned int num_parts,
  7078. const unsigned char *memory,
  7079. const size_t size, const char **err) {
  7080. if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
  7081. memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
  7082. tinyexr::SetErrorMessage(
  7083. "Invalid argument for LoadEXRMultipartImageFromMemory()", err);
  7084. return TINYEXR_ERROR_INVALID_ARGUMENT;
  7085. }
  7086. // compute total header size.
  7087. size_t total_header_size = 0;
  7088. for (unsigned int i = 0; i < num_parts; i++) {
  7089. if (exr_headers[i]->header_len == 0) {
  7090. tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
  7091. return TINYEXR_ERROR_INVALID_ARGUMENT;
  7092. }
  7093. total_header_size += exr_headers[i]->header_len;
  7094. }
  7095. const char *marker = reinterpret_cast<const char *>(
  7096. memory + total_header_size + 4 +
  7097. 4); // +8 for magic number and version header.
  7098. marker += 1; // Skip empty header.
  7099. // NOTE 1:
  7100. // In multipart image, There is 'part number' before chunk data.
  7101. // 4 byte : part number
  7102. // 4+ : chunk
  7103. //
  7104. // NOTE 2:
  7105. // EXR spec says 'part number' is 'unsigned long' but actually this is
  7106. // 'unsigned int(4 bytes)' in OpenEXR implementation...
  7107. // http://www.openexr.com/openexrfilelayout.pdf
  7108. // Load chunk offset table.
  7109. std::vector<tinyexr::OffsetData> chunk_offset_table_list;
  7110. chunk_offset_table_list.reserve(num_parts);
  7111. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
  7112. chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1);
  7113. tinyexr::OffsetData& offset_data = chunk_offset_table_list.back();
  7114. if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) {
  7115. tinyexr::InitSingleResolutionOffsets(offset_data, size_t(exr_headers[i]->chunk_count));
  7116. std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0];
  7117. for (size_t c = 0; c < offset_table.size(); c++) {
  7118. tinyexr::tinyexr_uint64 offset;
  7119. memcpy(&offset, marker, 8);
  7120. tinyexr::swap8(&offset);
  7121. if (offset >= size) {
  7122. tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
  7123. err);
  7124. return TINYEXR_ERROR_INVALID_DATA;
  7125. }
  7126. offset_table[c] = offset + 4; // +4 to skip 'part number'
  7127. marker += 8;
  7128. }
  7129. } else {
  7130. {
  7131. std::vector<int> num_x_tiles, num_y_tiles;
  7132. tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
  7133. int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles);
  7134. if (num_blocks != exr_headers[i]->chunk_count) {
  7135. tinyexr::SetErrorMessage("Invalid offset table size.", err);
  7136. return TINYEXR_ERROR_INVALID_DATA;
  7137. }
  7138. }
  7139. for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
  7140. for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
  7141. for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
  7142. tinyexr::tinyexr_uint64 offset;
  7143. memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
  7144. tinyexr::swap8(&offset);
  7145. if (offset >= size) {
  7146. tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
  7147. err);
  7148. return TINYEXR_ERROR_INVALID_DATA;
  7149. }
  7150. offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number'
  7151. marker += sizeof(tinyexr::tinyexr_uint64); // = 8
  7152. }
  7153. }
  7154. }
  7155. }
  7156. }
  7157. // Decode image.
  7158. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
  7159. tinyexr::OffsetData &offset_data = chunk_offset_table_list[i];
  7160. // First check 'part number' is identitical to 'i'
  7161. for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
  7162. for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
  7163. for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
  7164. const unsigned char *part_number_addr =
  7165. memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field.
  7166. unsigned int part_no;
  7167. memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
  7168. tinyexr::swap4(&part_no);
  7169. if (part_no != i) {
  7170. tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
  7171. err);
  7172. return TINYEXR_ERROR_INVALID_DATA;
  7173. }
  7174. }
  7175. std::string e;
  7176. int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data,
  7177. memory, size, &e);
  7178. if (ret != TINYEXR_SUCCESS) {
  7179. if (!e.empty()) {
  7180. tinyexr::SetErrorMessage(e, err);
  7181. }
  7182. return ret;
  7183. }
  7184. }
  7185. return TINYEXR_SUCCESS;
  7186. }
  7187. int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
  7188. const EXRHeader **exr_headers,
  7189. unsigned int num_parts, const char *filename,
  7190. const char **err) {
  7191. if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
  7192. tinyexr::SetErrorMessage(
  7193. "Invalid argument for LoadEXRMultipartImageFromFile", err);
  7194. return TINYEXR_ERROR_INVALID_ARGUMENT;
  7195. }
  7196. FILE *fp = NULL;
  7197. #ifdef _WIN32
  7198. #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang.
  7199. errno_t errcode =
  7200. _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
  7201. if (errcode != 0) {
  7202. tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
  7203. return TINYEXR_ERROR_CANT_OPEN_FILE;
  7204. }
  7205. #else
  7206. // Unknown compiler or MinGW without MINGW_HAS_SECURE_API.
  7207. fp = fopen(filename, "rb");
  7208. #endif
  7209. #else
  7210. fp = fopen(filename, "rb");
  7211. #endif
  7212. if (!fp) {
  7213. tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
  7214. return TINYEXR_ERROR_CANT_OPEN_FILE;
  7215. }
  7216. size_t filesize;
  7217. // Compute size
  7218. fseek(fp, 0, SEEK_END);
  7219. filesize = static_cast<size_t>(ftell(fp));
  7220. fseek(fp, 0, SEEK_SET);
  7221. std::vector<unsigned char> buf(filesize); // @todo { use mmap }
  7222. {
  7223. size_t ret;
  7224. ret = fread(&buf[0], 1, filesize, fp);
  7225. assert(ret == filesize);
  7226. fclose(fp);
  7227. (void)ret;
  7228. }
  7229. return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
  7230. &buf.at(0), filesize, err);
  7231. }
  7232. int SaveEXR(const float *data, int width, int height, int components,
  7233. const int save_as_fp16, const char *outfilename, const char **err) {
  7234. if ((components == 1) || components == 3 || components == 4) {
  7235. // OK
  7236. } else {
  7237. std::stringstream ss;
  7238. ss << "Unsupported component value : " << components << std::endl;
  7239. tinyexr::SetErrorMessage(ss.str(), err);
  7240. return TINYEXR_ERROR_INVALID_ARGUMENT;
  7241. }
  7242. EXRHeader header;
  7243. InitEXRHeader(&header);
  7244. if ((width < 16) && (height < 16)) {
  7245. // No compression for small image.
  7246. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
  7247. } else {
  7248. header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
  7249. }
  7250. EXRImage image;
  7251. InitEXRImage(&image);
  7252. image.num_channels = components;
  7253. std::vector<float> images[4];
  7254. if (components == 1) {
  7255. images[0].resize(static_cast<size_t>(width * height));
  7256. memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
  7257. } else {
  7258. images[0].resize(static_cast<size_t>(width * height));
  7259. images[1].resize(static_cast<size_t>(width * height));
  7260. images[2].resize(static_cast<size_t>(width * height));
  7261. images[3].resize(static_cast<size_t>(width * height));
  7262. // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
  7263. for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
  7264. images[0][i] = data[static_cast<size_t>(components) * i + 0];
  7265. images[1][i] = data[static_cast<size_t>(components) * i + 1];
  7266. images[2][i] = data[static_cast<size_t>(components) * i + 2];
  7267. if (components == 4) {
  7268. images[3][i] = data[static_cast<size_t>(components) * i + 3];
  7269. }
  7270. }
  7271. }
  7272. float *image_ptr[4] = {0, 0, 0, 0};
  7273. if (components == 4) {
  7274. image_ptr[0] = &(images[3].at(0)); // A
  7275. image_ptr[1] = &(images[2].at(0)); // B
  7276. image_ptr[2] = &(images[1].at(0)); // G
  7277. image_ptr[3] = &(images[0].at(0)); // R
  7278. } else if (components == 3) {
  7279. image_ptr[0] = &(images[2].at(0)); // B
  7280. image_ptr[1] = &(images[1].at(0)); // G
  7281. image_ptr[2] = &(images[0].at(0)); // R
  7282. } else if (components == 1) {
  7283. image_ptr[0] = &(images[0].at(0)); // A
  7284. }
  7285. image.images = reinterpret_cast<unsigned char **>(image_ptr);
  7286. image.width = width;
  7287. image.height = height;
  7288. header.num_channels = components;
  7289. header.channels = static_cast<EXRChannelInfo *>(malloc(
  7290. sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
  7291. // Must be (A)BGR order, since most of EXR viewers expect this channel order.
  7292. if (components == 4) {
  7293. #ifdef _MSC_VER
  7294. strncpy_s(header.channels[0].name, "A", 255);
  7295. strncpy_s(header.channels[1].name, "B", 255);
  7296. strncpy_s(header.channels[2].name, "G", 255);
  7297. strncpy_s(header.channels[3].name, "R", 255);
  7298. #else
  7299. strncpy(header.channels[0].name, "A", 255);
  7300. strncpy(header.channels[1].name, "B", 255);
  7301. strncpy(header.channels[2].name, "G", 255);
  7302. strncpy(header.channels[3].name, "R", 255);
  7303. #endif
  7304. header.channels[0].name[strlen("A")] = '\0';
  7305. header.channels[1].name[strlen("B")] = '\0';
  7306. header.channels[2].name[strlen("G")] = '\0';
  7307. header.channels[3].name[strlen("R")] = '\0';
  7308. } else if (components == 3) {
  7309. #ifdef _MSC_VER
  7310. strncpy_s(header.channels[0].name, "B", 255);
  7311. strncpy_s(header.channels[1].name, "G", 255);
  7312. strncpy_s(header.channels[2].name, "R", 255);
  7313. #else
  7314. strncpy(header.channels[0].name, "B", 255);
  7315. strncpy(header.channels[1].name, "G", 255);
  7316. strncpy(header.channels[2].name, "R", 255);
  7317. #endif
  7318. header.channels[0].name[strlen("B")] = '\0';
  7319. header.channels[1].name[strlen("G")] = '\0';
  7320. header.channels[2].name[strlen("R")] = '\0';
  7321. } else {
  7322. #ifdef _MSC_VER
  7323. strncpy_s(header.channels[0].name, "A", 255);
  7324. #else
  7325. strncpy(header.channels[0].name, "A", 255);
  7326. #endif
  7327. header.channels[0].name[strlen("A")] = '\0';
  7328. }
  7329. header.pixel_types = static_cast<int *>(
  7330. malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
  7331. header.requested_pixel_types = static_cast<int *>(
  7332. malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
  7333. for (int i = 0; i < header.num_channels; i++) {
  7334. header.pixel_types[i] =
  7335. TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
  7336. if (save_as_fp16 > 0) {
  7337. header.requested_pixel_types[i] =
  7338. TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
  7339. } else {
  7340. header.requested_pixel_types[i] =
  7341. TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
  7342. // no precision reduction)
  7343. }
  7344. }
  7345. int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
  7346. if (ret != TINYEXR_SUCCESS) {
  7347. return ret;
  7348. }
  7349. free(header.channels);
  7350. free(header.pixel_types);
  7351. free(header.requested_pixel_types);
  7352. return ret;
  7353. }
  7354. #ifdef __clang__
  7355. // zero-as-null-ppinter-constant
  7356. #pragma clang diagnostic pop
  7357. #endif
  7358. #endif // TINYEXR_IMPLEMENTATION_DEFINED
  7359. #endif // TINYEXR_IMPLEMENTATION