From 9cebc3926026b5df82b9ed8d0d2eb0c805e2f2b5 Mon Sep 17 00:00:00 2001 From: Adrien Kaiser Date: Thu, 7 Nov 2019 14:43:46 +0100 Subject: [PATCH 001/139] Python bindings: Expose material_ids of mesh_t. --- python/bindings.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/python/bindings.cc b/python/bindings.cc index f066a8b9..c0c3ddce 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -134,6 +134,13 @@ PYBIND11_MODULE(tinyobjloader, tobj_module) py::buffer_info buf = ret.request(); memcpy(buf.ptr, instance.indices.data(), instance.indices.size() * 3 * sizeof(int)); return ret; + }) + .def_readonly("material_ids", &mesh_t::material_ids) + .def("numpy_material_ids", [] (mesh_t &instance) { + auto ret = py::array_t(instance.material_ids.size()); + py::buffer_info buf = ret.request(); + memcpy(buf.ptr, instance.material_ids.data(), instance.material_ids.size() * sizeof(int)); + return ret; }); py::class_(tobj_module, "lines_t") From 059248ffe1b5b8c0c8959b71b29e81fc6271e21e Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 24 Nov 2019 17:44:46 +0900 Subject: [PATCH 002/139] Support multiple search paths(separated by ';' in Win32, ':' in Posix) for .mtl(v1 API). Fixes #244 --- models/issue-244-mtl-searchpaths.obj | 22 +++++++++ tests/assets/issue-244.mtl | 11 +++++ tests/tester.cc | 37 ++++++++++++++ tiny_obj_loader.h | 73 +++++++++++++++++++++++----- 4 files changed, 131 insertions(+), 12 deletions(-) create mode 100644 models/issue-244-mtl-searchpaths.obj create mode 100644 tests/assets/issue-244.mtl diff --git a/models/issue-244-mtl-searchpaths.obj b/models/issue-244-mtl-searchpaths.obj new file mode 100644 index 00000000..58abea1c --- /dev/null +++ b/models/issue-244-mtl-searchpaths.obj @@ -0,0 +1,22 @@ +# .mtl is located at tests/assets +mtllib issue-244.mtl + +v -1.000000 1.202466 1.000000 +v 1.000000 1.202466 1.000000 +v -1.000000 1.202466 -1.000000 +v 1.000000 1.202466 -1.000000 +vn 0.0000 1.0000 0.0000 +v -1.000000 0.000000 1.000000 +v 1.000000 0.000000 1.000000 +v -1.000000 0.000000 -1.000000 +v 1.000000 0.000000 -1.000000 +vn 0.0000 1.0000 0.0000 + +usemtl None +o Plane.001 +f 1//1 2//1 4//1 + +# Following geometry is ignored without fix for #235 +usemtl None1 +o Plane +f 5//2 6//2 8//2 diff --git a/tests/assets/issue-244.mtl b/tests/assets/issue-244.mtl new file mode 100644 index 00000000..63c46a43 --- /dev/null +++ b/tests/assets/issue-244.mtl @@ -0,0 +1,11 @@ +newmtl None +Ka 0 0 0 +Kd 0 0 1 +Ks 0 0 0 + +newmtl None1 +Ka 0 0 0 +Kd 1 0 0 +Ks 0 0 0 + + diff --git a/tests/tester.cc b/tests/tester.cc index 0cd1bf2f..e2896391 100644 --- a/tests/tester.cc +++ b/tests/tester.cc @@ -1217,6 +1217,41 @@ void test_usemtl_then_o_issue235() { TEST_CHECK(4 == shapes[1].mesh.indices[0].vertex_index); } +void test_mtl_searchpaths_issue244() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + // .mtl is located at ./assets/issue-244.mtl +#if _WIN32 + std::string search_paths("../;../models;./assets"); +#else + std::string search_paths("../:../models:./assets"); +#endif + + std::string warn; + std::string err; + bool ret = tinyobj::LoadObj( + &attrib, &shapes, &materials, &warn, &err, + "../models/issue-244-mtl-searchpaths.obj", + search_paths.c_str()); + + TEST_CHECK(warn.empty()); + + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; + } + + TEST_CHECK(true == ret); + TEST_CHECK(2 == shapes.size()); + TEST_CHECK(2 == materials.size()); + TEST_CHECK(4 == shapes[1].mesh.indices[0].vertex_index); +} + // Fuzzer test. // Just check if it does not crash. // Disable by default since Windows filesystem can't create filename of afl @@ -1310,4 +1345,6 @@ TEST_LIST = { test_leading_zero_in_exponent_notation_issue210}, {"usemtl_then_o_issue235", test_usemtl_then_o_issue235}, + {"mtl_searchpaths_issue244", + test_mtl_searchpaths_issue244}, {NULL, NULL}}; diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 08edd0ca..c406d151 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -26,6 +26,7 @@ THE SOFTWARE. // version 2.0.0 : Add new object oriented API. 1.x API is still provided. // * Support line primitive. // * Support points primitive. +// * Support multiple search path for .mtl(v1 API). // version 1.4.0 : Modifed ParseTextureNameAndOption API // version 1.3.1 : Make ParseTextureNameAndOption API public // version 1.3.0 : Separate warning and error message(breaking API of LoadObj) @@ -436,6 +437,7 @@ class MaterialReader { /// class MaterialFileReader : public MaterialReader { public: + // Path could contain separator(';' in Windows, ':' in Posix) explicit MaterialFileReader(const std::string &mtl_basedir) : m_mtlBaseDir(mtl_basedir) {} virtual ~MaterialFileReader() {} @@ -1634,6 +1636,21 @@ static void SplitString(const std::string &s, char delim, } } +static std::string JoinPath(const std::string &dir, + const std::string &filename) { + if (dir.empty()) { + return filename; + } else { + // check '/' + char lastChar = *dir.rbegin(); + if (lastChar != '/') { + return dir + std::string("/") + filename; + } else { + return dir + filename; + } + } +} + void LoadMtl(std::map *material_map, std::vector *materials, std::istream *inStream, std::string *warning, std::string *err) { @@ -2015,27 +2032,59 @@ bool MaterialFileReader::operator()(const std::string &matId, std::vector *materials, std::map *matMap, std::string *warn, std::string *err) { - std::string filepath; - if (!m_mtlBaseDir.empty()) { - filepath = std::string(m_mtlBaseDir) + matId; - } else { - filepath = matId; - } +#if _WIN32 + char sep = ';'; +#else + char sep = ':'; +#endif + + // https://stackoverflow.com/questions/5167625/splitting-a-c-stdstring-using-tokens-e-g + std::vector paths; + std::istringstream f(m_mtlBaseDir); + + std::string s; + while (getline(f, s, sep)) { + paths.push_back(s); + } + + for (size_t i = 0; i < paths.size(); i++) { + std::string filepath = JoinPath(paths[i], matId); + + std::ifstream matIStream(filepath.c_str()); + if (matIStream) { + LoadMtl(matMap, materials, &matIStream, warn, err); + + return true; + } + } - std::ifstream matIStream(filepath.c_str()); - if (!matIStream) { std::stringstream ss; - ss << "Material file [ " << filepath << " ] not found." << std::endl; + ss << "Material file [ " << matId + << " ] not found in a path : " << m_mtlBaseDir << std::endl; if (warn) { (*warn) += ss.str(); } return false; - } - LoadMtl(matMap, materials, &matIStream, warn, err); + } else { + std::string filepath = matId; + std::ifstream matIStream(filepath.c_str()); + if (matIStream) { + LoadMtl(matMap, materials, &matIStream, warn, err); - return true; + return true; + } + + std::stringstream ss; + ss << "Material file [ " << filepath + << " ] not found in a path : " << m_mtlBaseDir << std::endl; + if (warn) { + (*warn) += ss.str(); + } + + return false; + } } bool MaterialStreamReader::operator()(const std::string &matId, From 2cc39e3c8c49a163a1fc452727c8160927a53bc4 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 24 Nov 2019 18:12:17 +0900 Subject: [PATCH 003/139] if -> ifdef --- tiny_obj_loader.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index c406d151..c324df24 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -2033,7 +2033,7 @@ bool MaterialFileReader::operator()(const std::string &matId, std::map *matMap, std::string *warn, std::string *err) { if (!m_mtlBaseDir.empty()) { -#if _WIN32 +#ifdef _WIN32 char sep = ';'; #else char sep = ':'; From bfdb443510e2e6612eedd596822c922d2b7175dc Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Mon, 25 Nov 2019 20:55:38 +0900 Subject: [PATCH 004/139] Report unknwon materilal name in `usemtl` as a warning. --- tiny_obj_loader.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index c324df24..23916b61 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -2364,6 +2364,9 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, newMaterialId = material_map[namebuf]; } else { // { error!! material not found } + if (warn) { + (*warn) += "material [ " + namebuf + " ] not found in .mtl\n"; + } } if (newMaterialId != material) { @@ -2768,7 +2771,10 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, if (material_map.find(namebuf) != material_map.end()) { newMaterialId = material_map[namebuf]; } else { - // { error!! material not found } + // { warn!! material not found } + if (warn && (!callback.usemtl_cb)) { + (*warn) += "material [ " + namebuf + " ] not found in .mtl\n"; + } } if (newMaterialId != material_id) { From 4b62a197a8d092ac01b16b28a7887c2dbde27dc3 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 Nov 2019 16:16:09 +0900 Subject: [PATCH 005/139] Skip whitespaces between `usemtl` and name. Fixes #246 --- tests/tester.cc | 30 ++++++++++++++++++++++++++++++ tiny_obj_loader.h | 10 ++++------ 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/tests/tester.cc b/tests/tester.cc index e2896391..4bb773a1 100644 --- a/tests/tester.cc +++ b/tests/tester.cc @@ -1252,6 +1252,34 @@ void test_mtl_searchpaths_issue244() { TEST_CHECK(4 == shapes[1].mesh.indices[0].vertex_index); } +void test_usemtl_whitespace_issue246() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + std::string warn; + std::string err; + bool ret = tinyobj::LoadObj( + &attrib, &shapes, &materials, &warn, &err, + "../models/issue-246-usemtl-whitespace.obj", + gMtlBasePath); + + TEST_CHECK(warn.empty()); + + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; + } + + TEST_CHECK(true == ret); + TEST_CHECK(1 == shapes.size()); + TEST_CHECK(1 == materials.size()); + TEST_CHECK(0 == shapes[0].mesh.material_ids[0]); +} + // Fuzzer test. // Just check if it does not crash. // Disable by default since Windows filesystem can't create filename of afl @@ -1347,4 +1375,6 @@ TEST_LIST = { test_usemtl_then_o_issue235}, {"mtl_searchpaths_issue244", test_mtl_searchpaths_issue244}, + {"usemtl_whitespece_issue246", + test_usemtl_whitespace_issue246}, {NULL, NULL}}; diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 23916b61..5da8b198 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -2353,11 +2353,9 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, } // use mtl - if ((0 == strncmp(token, "usemtl", 6)) && IS_SPACE((token[6]))) { - token += 7; - std::stringstream ss; - ss << token; - std::string namebuf = ss.str(); + if ((0 == strncmp(token, "usemtl", 6))) { + token += 6; + std::string namebuf = parseString(&token); int newMaterialId = -1; if (material_map.find(namebuf) != material_map.end()) { @@ -2365,7 +2363,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, } else { // { error!! material not found } if (warn) { - (*warn) += "material [ " + namebuf + " ] not found in .mtl\n"; + (*warn) += "material [ '" + namebuf + "' ] not found in .mtl\n"; } } From e52dfdbecdf8893b161a65e508bc009746fa6042 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 Nov 2019 16:17:03 +0900 Subject: [PATCH 006/139] Test scene for issue 246. --- models/issue-246-usemtl-whitespace.mtl | 4 ++++ models/issue-246-usemtl-whitespace.obj | 17 +++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 models/issue-246-usemtl-whitespace.mtl create mode 100644 models/issue-246-usemtl-whitespace.obj diff --git a/models/issue-246-usemtl-whitespace.mtl b/models/issue-246-usemtl-whitespace.mtl new file mode 100644 index 00000000..e7c91e33 --- /dev/null +++ b/models/issue-246-usemtl-whitespace.mtl @@ -0,0 +1,4 @@ +newmtl 1 +Ka 0 0 0 +Kd 1 1 1 +Ks 0 0 0 diff --git a/models/issue-246-usemtl-whitespace.obj b/models/issue-246-usemtl-whitespace.obj new file mode 100644 index 00000000..f159349b --- /dev/null +++ b/models/issue-246-usemtl-whitespace.obj @@ -0,0 +1,17 @@ +# .mtl is located at tests/assets +mtllib issue-246-usemtl-whitespace.mtl + +v -1.000000 1.202466 1.000000 +v 1.000000 1.202466 1.000000 +v -1.000000 1.202466 -1.000000 +v 1.000000 1.202466 -1.000000 +vn 0.0000 1.0000 0.0000 +v -1.000000 0.000000 1.000000 +v 1.000000 0.000000 1.000000 +v -1.000000 0.000000 -1.000000 +v 1.000000 0.000000 -1.000000 +vn 0.0000 1.0000 0.0000 + +usemtl 1 +o Plane.001 +f 1//1 2//1 4//1 From 9cc429b4af0e36932b81c6e48abdc803fc540d19 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Mon, 23 Dec 2019 17:18:24 +0900 Subject: [PATCH 007/139] Support `-texres resolution` texture option. Fixes #248 --- models/issue-248-texres-texopt.mtl | 25 +++++++++++++++++++++++ models/issue-248-texres-texopt.obj | 32 ++++++++++++++++++++++++++++++ tests/tester.cc | 30 ++++++++++++++++++++++++++++ tiny_obj_loader.h | 9 +++++++-- 4 files changed, 94 insertions(+), 2 deletions(-) create mode 100644 models/issue-248-texres-texopt.mtl create mode 100644 models/issue-248-texres-texopt.obj diff --git a/models/issue-248-texres-texopt.mtl b/models/issue-248-texres-texopt.mtl new file mode 100644 index 00000000..537fa15b --- /dev/null +++ b/models/issue-248-texres-texopt.mtl @@ -0,0 +1,25 @@ +newmtl white +Ka 0 0 0 +Kd 1 1 1 +Ks 0 0 0 +map_Kd -texres 512 input.jpg + +newmtl red +Ka 0 0 0 +Kd 1 0 0 +Ks 0 0 0 + +newmtl green +Ka 0 0 0 +Kd 0 1 0 +Ks 0 0 0 + +newmtl blue +Ka 0 0 0 +Kd 0 0 1 +Ks 0 0 0 + +newmtl light +Ka 20 20 20 +Kd 1 1 1 +Ks 0 0 0 diff --git a/models/issue-248-texres-texopt.obj b/models/issue-248-texres-texopt.obj new file mode 100644 index 00000000..43058a91 --- /dev/null +++ b/models/issue-248-texres-texopt.obj @@ -0,0 +1,32 @@ +mtllib issue-248-texres-texopt.mtl + +v 0.000000 2.000000 2.000000 +v 0.000000 0.000000 2.000000 +v 2.000000 0.000000 2.000000 +v 2.000000 2.000000 2.000000 +v 0.000000 2.000000 0.000000 +v 0.000000 0.000000 0.000000 +v 2.000000 0.000000 0.000000 +v 2.000000 2.000000 0.000000 +# 8 vertices + +g front cube +usemtl white +f 1 2 3 4 +# two white spaces between 'back' and 'cube' +g back cube +# expects white material +f 8 7 6 5 +g right cube +usemtl red +f 4 3 7 8 +g top cube +usemtl white +f 5 1 4 8 +g left cube +usemtl green +f 5 6 2 1 +g bottom cube +usemtl white +f 2 6 7 3 +# 6 elements diff --git a/tests/tester.cc b/tests/tester.cc index 4bb773a1..7538c740 100644 --- a/tests/tester.cc +++ b/tests/tester.cc @@ -1280,6 +1280,34 @@ void test_usemtl_whitespace_issue246() { TEST_CHECK(0 == shapes[0].mesh.material_ids[0]); } +void test_texres_texopt_issue248() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + std::string warn; + std::string err; + bool ret = tinyobj::LoadObj( + &attrib, &shapes, &materials, &warn, &err, + "../models/issue-248-texres-texopt.obj", + gMtlBasePath); + + TEST_CHECK(warn.empty()); + + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; + } + + TEST_CHECK(true == ret); + TEST_CHECK(1 < materials.size()); + TEST_CHECK(512 == materials[0].diffuse_texopt.texture_resolution); + TEST_CHECK("input.jpg" == materials[0].diffuse_texname); +} + // Fuzzer test. // Just check if it does not crash. // Disable by default since Windows filesystem can't create filename of afl @@ -1377,4 +1405,6 @@ TEST_LIST = { test_mtl_searchpaths_issue244}, {"usemtl_whitespece_issue246", test_usemtl_whitespace_issue246}, + {"texres_texopt_issue248", + test_texres_texopt_issue248}, {NULL, NULL}}; diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 5da8b198..f9e3c649 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -155,7 +155,7 @@ typedef struct { real_t origin_offset[3]; // -o u [v [w]] (default 0 0 0) real_t scale[3]; // -s u [v [w]] (default 1 1 1) real_t turbulence[3]; // -t u [v [w]] (default 0 0 0) - // int texture_resolution; // -texres resolution (default = ?) TODO + int texture_resolution; // -texres resolution (No default value in the spec. We'll use -1) bool clamp; // -clamp (default false) char imfchan; // -imfchan (the default for bump is 'l' and for decal is 'm') bool blendu; // -blendu (default on) @@ -1194,6 +1194,10 @@ bool ParseTextureNameAndOption(std::string *texname, texture_option_t *texopt, } else if ((0 == strncmp(token, "-type", 5)) && IS_SPACE((token[5]))) { token += 5; texopt->type = parseTextureType((&token), TEXTURE_TYPE_NONE); + } else if ((0 == strncmp(token, "-texres", 7)) && IS_SPACE((token[7]))) { + token += 7; + // TODO(syoyo): Check if arg is int type. + texopt->texture_resolution = parseInt(&token); } else if ((0 == strncmp(token, "-imfchan", 8)) && IS_SPACE((token[8]))) { token += 9; token += strspn(token, " \t"); @@ -1258,6 +1262,7 @@ static void InitTexOpt(texture_option_t *texopt, const bool is_bump) { texopt->turbulence[0] = static_cast(0.0); texopt->turbulence[1] = static_cast(0.0); texopt->turbulence[2] = static_cast(0.0); + texopt->texture_resolution = -1; texopt->type = TEXTURE_TYPE_NONE; } @@ -2354,7 +2359,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, // use mtl if ((0 == strncmp(token, "usemtl", 6))) { - token += 6; + token += 6; std::string namebuf = parseString(&token); int newMaterialId = -1; From 2e282bc3b26214d7e26d05bbea41651940649795 Mon Sep 17 00:00:00 2001 From: Nate Koenig Date: Wed, 8 Jan 2020 10:42:07 -0800 Subject: [PATCH 008/139] Set a decent diffuse value when map_Kd is present and Kd is not specified --- tiny_obj_loader.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index f9e3c649..e1424d0a 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1669,6 +1669,10 @@ void LoadMtl(std::map *material_map, bool has_d = false; bool has_tr = false; + // has_kd is used to set a default diffuse value when map_Kd is present + // and Kd is not. + bool has_kd = false; + std::stringstream warn_ss; size_t line_no = 0; @@ -1750,6 +1754,7 @@ void LoadMtl(std::map *material_map, material.diffuse[0] = r; material.diffuse[1] = g; material.diffuse[2] = b; + has_kd = true; continue; } @@ -1902,6 +1907,16 @@ void LoadMtl(std::map *material_map, token += 7; ParseTextureNameAndOption(&(material.diffuse_texname), &(material.diffuse_texopt), token); + + // Set a decent diffuse default value if a diffuse texture is specified + // without a matching Kd value. + if (!has_kd) + { + material.diffuse[0] = 0.6; + material.diffuse[1] = 0.6; + material.diffuse[2] = 0.6; + } + continue; } From 7e165407129cb3d177b3d2ea6e8254d481dae468 Mon Sep 17 00:00:00 2001 From: Nate Koenig Date: Fri, 10 Jan 2020 14:04:25 -0800 Subject: [PATCH 009/139] Fix windows compiler warnings --- tiny_obj_loader.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index e1424d0a..41547cb4 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1912,9 +1912,9 @@ void LoadMtl(std::map *material_map, // without a matching Kd value. if (!has_kd) { - material.diffuse[0] = 0.6; - material.diffuse[1] = 0.6; - material.diffuse[2] = 0.6; + material.diffuse[0] = static_cast(0.6); + material.diffuse[1] = static_cast(0.6); + material.diffuse[2] = static_cast(0.6); } continue; From d2cb6d2e3466af4cbcd139816a973ba749910025 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 24 Jan 2020 20:05:46 +0900 Subject: [PATCH 010/139] Cosmetics. --- experimental/viewer.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/experimental/viewer.cc b/experimental/viewer.cc index ba77d27a..4886b784 100644 --- a/experimental/viewer.cc +++ b/experimental/viewer.cc @@ -677,7 +677,7 @@ int main(int argc, char **argv) return -1; } - std::cout << "GLFW OK." << std::endl; + std::cout << "GLFW Init OK." << std::endl; window = glfwCreateWindow(width, height, "Obj viewer", NULL, NULL); From c4d1ec780a8dea34d5001ca18769eb7a7995d0d6 Mon Sep 17 00:00:00 2001 From: Murray Cumming Date: Sun, 26 Jan 2020 21:07:39 +0100 Subject: [PATCH 011/139] operator(): Add override keywords To avoid compiler warnings, for instance with clang's -Werror=suggest-override . --- tiny_obj_loader.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 41547cb4..02bd9668 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -444,7 +444,7 @@ class MaterialFileReader : public MaterialReader { virtual bool operator()(const std::string &matId, std::vector *materials, std::map *matMap, std::string *warn, - std::string *err); + std::string *err) override; private: std::string m_mtlBaseDir; @@ -461,7 +461,7 @@ class MaterialStreamReader : public MaterialReader { virtual bool operator()(const std::string &matId, std::vector *materials, std::map *matMap, std::string *warn, - std::string *err); + std::string *err) override; private: std::istream &m_inStream; From cc320259995118c003844bcc2da18753f1cbe701 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 9 Feb 2020 14:41:22 +0900 Subject: [PATCH 012/139] Update python-binding README. --- python/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/python/README.md b/python/README.md index f7b3d232..cd1e24dc 100644 --- a/python/README.md +++ b/python/README.md @@ -3,6 +3,14 @@ `tinyobjloader` is a python wrapper for C++ wavefront .obj loader. `tinyobjloader` is rather fast and feature rich than other pure python version of .obj loader. +## Install + +You can install `tinyobjloader` with pip. + +``` +$ pip install tinyobjloader +``` + ## Quick tutorial ```py From 2636244e73fa424bb68da6c0ae38f22d15d2f7c6 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Mon, 17 Feb 2020 13:43:06 +0900 Subject: [PATCH 013/139] `override` is a C++11 keyword. --- tiny_obj_loader.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 02bd9668..5e27022c 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -63,6 +63,13 @@ THE SOFTWARE. namespace tinyobj { +// TODO(syoyo): Better C++11 detection for older compiler +#if __cplusplus > 199711L +#define TINYOBJ_OVERRIDE override +#else +#define TINYOBJ_OVERRIDE +#endif + #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") @@ -444,7 +451,7 @@ class MaterialFileReader : public MaterialReader { virtual bool operator()(const std::string &matId, std::vector *materials, std::map *matMap, std::string *warn, - std::string *err) override; + std::string *err) TINYOBJ_OVERRIDE; private: std::string m_mtlBaseDir; @@ -461,7 +468,7 @@ class MaterialStreamReader : public MaterialReader { virtual bool operator()(const std::string &matId, std::vector *materials, std::map *matMap, std::string *warn, - std::string *err) override; + std::string *err) TINYOBJ_OVERRIDE; private: std::istream &m_inStream; From a4426518c65b5b2a4da0b70c15f341f0bfab11d1 Mon Sep 17 00:00:00 2001 From: Paul Melnikow Date: Mon, 17 Feb 2020 12:37:10 -0500 Subject: [PATCH 014/139] Bump cibuildwheel to 1.1.0 Add support for Python 3.8. --- azure-pipelines.yml | 65 +++++++++++++-------------------------------- 1 file changed, 18 insertions(+), 47 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 213a90ff..0ba9f74f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -11,86 +11,57 @@ jobs: make && ./tester displayName: Run unit tests - - job: python_linux - pool: { vmImage: "Ubuntu-16.04" } + - job: linux + pool: {vmImage: "Ubuntu-16.04"} steps: - task: UsePythonVersion@0 - bash: | + python -m pip install --upgrade pip + pip install cibuildwheel==1.1.0 # Make the header files available to the build. cp *.h python - python -m pip install --upgrade pip - pip install cibuildwheel==0.12.0 cd python cibuildwheel --output-dir wheelhouse . - task: PublishBuildArtifacts@1 - inputs: { pathtoPublish: "python/wheelhouse" } + inputs: {pathtoPublish: 'python/wheelhouse'} - script: | pip install black==19.10b0 black --check python/ displayName: Check Python code format - - job: python_macos - pool: { vmImage: "macOS-10.13" } + - job: macos + pool: {vmImage: 'macOS-10.13'} variables: # Support C++11: https://github.com/joerick/cibuildwheel/pull/156 MACOSX_DEPLOYMENT_TARGET: 10.9 steps: - task: UsePythonVersion@0 - bash: | + python -m pip install --upgrade pip + pip install cibuildwheel==1.1.0 # Make the header files available to the build. cp *.h python - python -m pip install --upgrade pip - pip install cibuildwheel==0.12.0 cd python cibuildwheel --output-dir wheelhouse . - task: PublishBuildArtifacts@1 - inputs: { pathtoPublish: "python/wheelhouse" } + inputs: {pathtoPublish: 'python/wheelhouse'} - - job: python_windows - pool: { vmImage: "vs2017-win2016" } + - job: windows + pool: {vmImage: 'vs2017-win2016'} steps: - - { - task: UsePythonVersion@0, - inputs: { versionSpec: "2.7", architecture: x86 }, - } - - { - task: UsePythonVersion@0, - inputs: { versionSpec: "2.7", architecture: x64 }, - } - - { - task: UsePythonVersion@0, - inputs: { versionSpec: "3.5", architecture: x86 }, - } - - { - task: UsePythonVersion@0, - inputs: { versionSpec: "3.5", architecture: x64 }, - } - - { - task: UsePythonVersion@0, - inputs: { versionSpec: "3.6", architecture: x86 }, - } - - { - task: UsePythonVersion@0, - inputs: { versionSpec: "3.6", architecture: x64 }, - } - - { - task: UsePythonVersion@0, - inputs: { versionSpec: "3.7", architecture: x86 }, - } - - { - task: UsePythonVersion@0, - inputs: { versionSpec: "3.7", architecture: x64 }, - } + - task: UsePythonVersion@0 - script: choco install vcpython27 -f -y displayName: Install Visual C++ for Python 2.7 - bash: | - cp *.h python python -m pip install --upgrade pip - pip install cibuildwheel==0.12.0 + pip install cibuildwheel==1.1.0 + # Make the header files available to the build. + cp *.h python cd python cibuildwheel --output-dir wheelhouse . - task: PublishBuildArtifacts@1 - inputs: { pathtoPublish: "python/wheelhouse" } + inputs: {pathtoPublish: 'python/wheelhouse'} + trigger: - master From 2ad6bb27eb3c8f6a66a9bf4b124a25c008542ded Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Wed, 19 Feb 2020 17:15:27 +0900 Subject: [PATCH 015/139] Describe the transfer of the repository. --- README.md | 1 + examples/viewer/README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index 6160babd..d1ea07e2 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,7 @@ Old version is available as `v0.9.x` branch https://github.com/syoyo/tinyobjload ## What's new +* 19 Feb, 2020 : The repository has been moved to https://github.com/tinyobjloader/tinyobjloader ! * 18 May, 2019 : Python binding!(See `python` folder. Also see https://pypi.org/project/tinyobjloader/) * 14 Apr, 2019 : Bump version v2.0.0 rc0. New C++ API and python bindings!(1.x API still exists for backward compatibility) * 20 Aug, 2016 : Bump version v1.0.0. New data structure and API! diff --git a/examples/viewer/README.md b/examples/viewer/README.md index 9cb032c7..83392f01 100644 --- a/examples/viewer/README.md +++ b/examples/viewer/README.md @@ -37,6 +37,7 @@ Then, ## TODO +* [ ] Alpha texturing. * [ ] Support per-face material. * [ ] Use shader-based GL rendering. * [ ] PBR shader support. From 50a9758c4fa8115ce5c05dabebcf09f1b1a11252 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 20 Feb 2020 23:56:51 +0900 Subject: [PATCH 016/139] Changed Travis CI URL. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d1ea07e2..7683e58d 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![Join the chat at https://gitter.im/syoyo/tinyobjloader](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/syoyo/tinyobjloader?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://travis-ci.org/syoyo/tinyobjloader.svg)](https://travis-ci.org/syoyo/tinyobjloader) +[![Build Status](https://travis-ci.org/tinyobjloader/tinyobjloader.svg?branch=master)](https://travis-ci.org/tinyobjloader/tinyobjloader) [![AZ Build Status](https://dev.azure.com/syoyo/lte%20oss/_apis/build/status/syoyo.tinyobjloader?branchName=master)](https://dev.azure.com/syoyo/lte%20oss/_build/latest?definitionId=2&branchName=master) From fd47de452c5e44319d97192d6b91e91b66d9d766 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 27 Feb 2020 01:07:27 +0900 Subject: [PATCH 017/139] Deprecate gitter chat. --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7683e58d..29ad1a5f 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,5 @@ # tinyobjloader -[![Join the chat at https://gitter.im/syoyo/tinyobjloader](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/syoyo/tinyobjloader?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - [![Build Status](https://travis-ci.org/tinyobjloader/tinyobjloader.svg?branch=master)](https://travis-ci.org/tinyobjloader/tinyobjloader) [![AZ Build Status](https://dev.azure.com/syoyo/lte%20oss/_apis/build/status/syoyo.tinyobjloader?branchName=master)](https://dev.azure.com/syoyo/lte%20oss/_build/latest?definitionId=2&branchName=master) @@ -12,6 +10,9 @@ [![Download](https://api.bintray.com/packages/conan/conan-center/tinyobjloader%3A_/images/download.svg)](https://bintray.com/conan/conan-center/tinyobjloader%3A_/_latestVersion) +[![Join the chat at https://gitter.im/syoyo/tinyobjloader](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/syoyo/tinyobjloader?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) (inactive. gitter chat will be removed in the future. Please use github issue if you have questions and got issues) + + Tiny but powerful single file wavefront obj loader written in C++03. No dependency except for C++ STL. It can parse over 10M polygons with moderate memory and time. `tinyobjloader` is good for embedding .obj loader to your (global illumination) renderer ;-) From d7d53217c30a7970596d1b93f97a5fd97b7cf1a5 Mon Sep 17 00:00:00 2001 From: Paul Melnikow Date: Wed, 26 Feb 2020 17:16:59 -0500 Subject: [PATCH 018/139] RELEASE 2.0.0rc5 (#256) --- python/setup.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/python/setup.py b/python/setup.py index bf1b8410..d7aa0ca2 100644 --- a/python/setup.py +++ b/python/setup.py @@ -82,14 +82,28 @@ def __str__(self): setuptools.setup( name="tinyobjloader", - version="0.1", - description="Python module for tinyobjloader", + version="2.0.0rc5", + description="Tiny but powerful Wavefront OBJ loader", long_description=long_description, long_description_content_type="text/markdown", author="Syoyo Fujita", author_email="syoyo@lighttransport.com", - url="https://github.com/syoyo/tinyobjloader", - classifiers=["License :: OSI Approved :: MIT License"], + url="https://github.com/tinyobjloader/tinyobjloader", + project_urls={ + "Issue Tracker": "https://github.com/tinyobjloader/tinyobjloader/issues", + }, + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "Intended Audience :: Manufacturing", + "Topic :: Artistic Software", + "Topic :: Multimedia :: Graphics :: 3D Modeling", + "Topic :: Scientific/Engineering :: Visualization", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + ], packages=setuptools.find_packages(), ext_modules=[m], ) From 1b6389443db94aab097c209576242a2e5b6624f6 Mon Sep 17 00:00:00 2001 From: Adrien Kaiser Date: Thu, 27 Feb 2020 13:03:34 +0100 Subject: [PATCH 019/139] Python: Add numpy_material_ids to sample. --- python/sample.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/sample.py b/python/sample.py index 5a74972d..2f30c006 100644 --- a/python/sample.py +++ b/python/sample.py @@ -75,3 +75,4 @@ print("[{}] vt_idx {}".format(i, idx.texcoord_index)) print("numpy_indices = {}".format(shape.mesh.numpy_indices())) print("numpy_num_face_vertices = {}".format(shape.mesh.numpy_num_face_vertices())) + print("numpy_material_ids = {}".format(shape.mesh.numpy_material_ids())) From 13b634d2af1a5cac7070e6dbe2c6a8327bdd5ab6 Mon Sep 17 00:00:00 2001 From: Paul Melnikow Date: Thu, 27 Feb 2020 12:58:52 -0500 Subject: [PATCH 020/139] Move Python format check into separate CI job (#261) --- azure-pipelines.yml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0ba9f74f..042ad9fc 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -11,6 +11,15 @@ jobs: make && ./tester displayName: Run unit tests + - job: python_format + pool: { vmImage: "ubuntu-latest" } + steps: + - task: UsePythonVersion@0 + - script: | + pip install black==19.10b0 + black --check python/ + displayName: Check Python code format + - job: linux pool: {vmImage: "Ubuntu-16.04"} steps: @@ -24,10 +33,6 @@ jobs: cibuildwheel --output-dir wheelhouse . - task: PublishBuildArtifacts@1 inputs: {pathtoPublish: 'python/wheelhouse'} - - script: | - pip install black==19.10b0 - black --check python/ - displayName: Check Python code format - job: macos pool: {vmImage: 'macOS-10.13'} From 1421a10d6ed9742f5b2c1766d22faa6cfbc56248 Mon Sep 17 00:00:00 2001 From: Paul Melnikow Date: Thu, 27 Feb 2020 12:59:05 -0500 Subject: [PATCH 021/139] Bump MacOS image for Azure Pipeline (#260) The 10.13 image is being removed on 3/23. https://devblogs.microsoft.com/devops/removing-older-images-in-azure-pipelines-hosted-pools/ --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 042ad9fc..00541f51 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -35,7 +35,7 @@ jobs: inputs: {pathtoPublish: 'python/wheelhouse'} - job: macos - pool: {vmImage: 'macOS-10.13'} + pool: {vmImage: 'macOS-10.15'} variables: # Support C++11: https://github.com/joerick/cibuildwheel/pull/156 MACOSX_DEPLOYMENT_TARGET: 10.9 From c255ab333f7a8e0c8ab5c96468cab2f665bc2ee9 Mon Sep 17 00:00:00 2001 From: Benjamin Adamson Date: Wed, 4 Mar 2020 07:48:15 -0800 Subject: [PATCH 022/139] Resolve issue #259 (#263) * Resolve issue #259 * adding missing typename (oops) * callback_t_ -> callback_t --- tiny_obj_loader.h | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 5e27022c..7c978728 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -154,7 +154,7 @@ typedef enum { TEXTURE_TYPE_CUBE_RIGHT } texture_type_t; -typedef struct { +struct texture_option_t { texture_type_t type; // -type (default TEXTURE_TYPE_NONE) real_t sharpness; // -boost (default 1.0?) real_t brightness; // base_value in -mm option (default 0) @@ -172,9 +172,9 @@ typedef struct { // extension std::string colorspace; // Explicitly specify color space of stored texel // value. Usually `sRGB` or `linear` (default empty). -} texture_option_t; +}; -typedef struct _material_t { +struct material_t { std::string name; real_t ambient[3]; @@ -317,25 +317,25 @@ typedef struct _material_t { #endif -} material_t; +}; -typedef struct { +struct tag_t { std::string name; std::vector intValues; std::vector floatValues; std::vector stringValues; -} tag_t; +}; // Index struct to support different indices for vtx/normal/texcoord. // -1 means not used. -typedef struct { +struct index_t { int vertex_index; int normal_index; int texcoord_index; -} index_t; +}; -typedef struct { +struct mesh_t { std::vector indices; std::vector num_face_vertices; // The number of vertices per @@ -346,28 +346,28 @@ typedef struct { // ID(0 = off. positive value // = group id) std::vector tags; // SubD tag -} mesh_t; +}; -// typedef struct { +// struct path_t { // std::vector indices; // pairs of indices for lines -//} path_t; +//}; -typedef struct { +struct lines_t { // Linear flattened indices. std::vector indices; // indices for vertices(poly lines) std::vector num_line_vertices; // The number of vertices per line. -} lines_t; +}; -typedef struct { +struct points_t { std::vector indices; // indices for points -} points_t; +}; -typedef struct { +struct shape_t { std::string name; mesh_t mesh; lines_t lines; points_t points; -} shape_t; +}; // Vertex attributes struct attrib_t { @@ -393,7 +393,7 @@ struct attrib_t { const std::vector &GetVertexWeights() const { return vertex_weights; } }; -typedef struct callback_t_ { +struct callback_t { // W is optional and set to 1 if there is no `w` item in `v` line void (*vertex_cb)(void *user_data, real_t x, real_t y, real_t z, real_t w); void (*normal_cb)(void *user_data, real_t x, real_t y, real_t z); @@ -417,7 +417,7 @@ typedef struct callback_t_ { void (*group_cb)(void *user_data, const char **names, int num_names); void (*object_cb)(void *user_data, const char *name); - callback_t_() + callback_t() : vertex_cb(NULL), normal_cb(NULL), texcoord_cb(NULL), @@ -426,7 +426,7 @@ typedef struct callback_t_ { mtllib_cb(NULL), group_cb(NULL), object_cb(NULL) {} -} callback_t; +}; class MaterialReader { public: From 19c3ec9e2efbf02447ee93ea2f3ea48b9c3f168b Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Mon, 9 Mar 2020 14:39:48 +0900 Subject: [PATCH 023/139] Add skin weight support(`vw` tag. tinyobjloader extension) --- examples/skin_weight/Makefile | 2 + examples/skin_weight/README.md | 7 +++ examples/skin_weight/main.cc | 103 +++++++++++++++++++++++++++++++++ models/skin-weight.obj | 43 ++++++++++++++ tiny_obj_loader.h | 86 ++++++++++++++++++++++++--- 5 files changed, 233 insertions(+), 8 deletions(-) create mode 100644 examples/skin_weight/Makefile create mode 100644 examples/skin_weight/README.md create mode 100644 examples/skin_weight/main.cc create mode 100644 models/skin-weight.obj diff --git a/examples/skin_weight/Makefile b/examples/skin_weight/Makefile new file mode 100644 index 00000000..59e4e3c3 --- /dev/null +++ b/examples/skin_weight/Makefile @@ -0,0 +1,2 @@ +all: + clang++ -std=c++11 -o skin_weight -I../../ -g main.cc diff --git a/examples/skin_weight/README.md b/examples/skin_weight/README.md new file mode 100644 index 00000000..800d8472 --- /dev/null +++ b/examples/skin_weight/README.md @@ -0,0 +1,7 @@ +This example printf skin weight of vertex(`vw`). TinyObjLoader extension. + +## Run example + +``` +$ ./skin_weight ../../models/skin-weight.obj +``` diff --git a/examples/skin_weight/main.cc b/examples/skin_weight/main.cc new file mode 100644 index 00000000..a3770afe --- /dev/null +++ b/examples/skin_weight/main.cc @@ -0,0 +1,103 @@ +// +// g++ -g -std=c++11 main.cc +// +#define TINYOBJLOADER_IMPLEMENTATION +#include "tiny_obj_loader.h" + +#include +#include +#include +#include +#include +#include + +#include // C++11 + +#ifdef __clang__ +#pragma clang diagnostic push +#if __has_warning("-Wzero-as-null-pointer-constant") +#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" +#endif +#endif + +static void ConstructVertexWeight( + const std::vector &vertices, + const std::vector &skin_weights, + std::vector *vertex_skin_weights) +{ + size_t num_vertices = vertices.size() / 3; + + vertex_skin_weights->resize(num_vertices); + + for (size_t i = 0; i < skin_weights.size(); i++) { + const tinyobj::skin_weight_t &skin = skin_weights[i]; + + assert(skin.vertex_id >= 0); + assert(skin.vertex_id < num_vertices); + + (*vertex_skin_weights)[skin.vertex_id] = skin; + } + + // now you can lookup i'th vertex skin weight by `vertex_skin_weights[i]` + + +} + +static bool TestLoadObj(const char* filename, const char* basepath = nullptr, + bool triangulate = true) { + std::cout << "Loading " << filename << std::endl; + + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + std::string warn; + std::string err; + bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, filename, + basepath, triangulate); + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; + } + + if (!ret) { + printf("Failed to load/parse .obj.\n"); + return false; + } + + std::vector vertex_skin_weights; + + ConstructVertexWeight( + attrib.vertices, + attrib.skin_weights, + &vertex_skin_weights); + + for (size_t v = 0; v < vertex_skin_weights.size(); v++) { + std::cout << "vertex[" << v << "] num_weights = " << vertex_skin_weights[v].weightValues.size() << "\n"; + for (size_t w = 0; w < vertex_skin_weights[v].weightValues.size(); w++) { + std::cout << " w[" << w << "] joint = " << vertex_skin_weights[v].weightValues[w].joint_id + << ", weight = " << vertex_skin_weights[v].weightValues[w].weight << "\n"; + } + } + + return true; +} + + +int main(int argc, char** argv) { + if (argc < 2) { + std::cerr << "Need input.obj\n"; + return EXIT_FAILURE; + } + + const char* basepath = nullptr; + if (argc > 2) { + basepath = argv[2]; + } + assert(true == TestLoadObj(argv[1], basepath)); + + return 0; +} diff --git a/models/skin-weight.obj b/models/skin-weight.obj new file mode 100644 index 00000000..41f182f5 --- /dev/null +++ b/models/skin-weight.obj @@ -0,0 +1,43 @@ +mtllib cube.mtl + +v 0.000000 2.000000 2.000000 +v 0.000000 0.000000 2.000000 +v 2.000000 0.000000 2.000000 +v 2.000000 2.000000 2.000000 +v 0.000000 2.000000 0.000000 +v 0.000000 0.000000 0.000000 +v 2.000000 0.000000 0.000000 +v 2.000000 2.000000 0.000000 +# 8 vertices + +vw 0 0 1.0 +vw 1 0 0.5 1 0.5 +vw 2 1 1.0 +vw 3 2 1.0 +vw 4 3 1.0 +vw 5 0 0.25 1 0.25 2 0.25 3 0.25 +# No weight for 6th vertex +# vw 6 0 1.0 +vw 7 0 1.0 +# max 4 joints + +g front cube +usemtl white +f 1 2 3 4 +# two white spaces between 'back' and 'cube' +g back cube +# expects white material +f 8 7 6 5 +g right cube +usemtl red +f 4 3 7 8 +g top cube +usemtl white +f 5 1 4 8 +g left cube +usemtl green +f 5 6 2 1 +g bottom cube +usemtl white +f 2 6 7 3 +# 6 elements diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 7c978728..e4dba982 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -27,6 +27,7 @@ THE SOFTWARE. // * Support line primitive. // * Support points primitive. // * Support multiple search path for .mtl(v1 API). +// * Support vertex weight `vw`(as an tinyobj extension) // version 1.4.0 : Modifed ParseTextureNameAndOption API // version 1.3.1 : Make ParseTextureNameAndOption API public // version 1.3.0 : Separate warning and error message(breaking API of LoadObj) @@ -162,8 +163,9 @@ struct texture_option_t { real_t origin_offset[3]; // -o u [v [w]] (default 0 0 0) real_t scale[3]; // -s u [v [w]] (default 1 1 1) real_t turbulence[3]; // -t u [v [w]] (default 0 0 0) - int texture_resolution; // -texres resolution (No default value in the spec. We'll use -1) - bool clamp; // -clamp (default false) + int texture_resolution; // -texres resolution (No default value in the spec. + // We'll use -1) + bool clamp; // -clamp (default false) char imfchan; // -imfchan (the default for bump is 'l' and for decal is 'm') bool blendu; // -blendu (default on) bool blendv; // -blendv (default on) @@ -316,7 +318,6 @@ struct material_t { } #endif - }; struct tag_t { @@ -327,6 +328,18 @@ struct tag_t { std::vector stringValues; }; +struct joint_and_weight_t { + int joint_id; + real_t weight; +}; + +struct skin_weight_t { + int vertex_id; // Corresponding vertex index in `attrib_t::vertices`. + // Compared to `index_t`, this index must be positive and + // start with 0(does not allow relative indexing) + std::vector weightValues; +}; + // Index struct to support different indices for vtx/normal/texcoord. // -1 means not used. struct index_t { @@ -383,6 +396,16 @@ struct attrib_t { std::vector texcoord_ws; // 'vt'(w) std::vector colors; // extension: vertex colors + // + // TinyObj extension. + // + + // NOTE(syoyo): array index is based on the appearance order. + // To get a corresponding skin weight for a specific vertex id `vid`, + // Need to reconstruct a look up table: `skin_weight_t::vertex_id` == `vid` + // (e.g. using std::map, std::unordered_map) + std::vector skin_weights; + attrib_t() {} // @@ -625,11 +648,10 @@ bool ParseTextureNameAndOption(std::string *texname, texture_option_t *texopt, #include #include #include -#include -#include - #include +#include #include +#include namespace tinyobj { @@ -1917,8 +1939,7 @@ void LoadMtl(std::map *material_map, // Set a decent diffuse default value if a diffuse texture is specified // without a matching Kd value. - if (!has_kd) - { + if (!has_kd) { material.diffuse[0] = static_cast(0.6); material.diffuse[1] = static_cast(0.6); material.diffuse[2] = static_cast(0.6); @@ -2181,6 +2202,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, std::vector vn; std::vector vt; std::vector vc; + std::vector vw; std::vector tags; PrimGroup prim_group; std::string name; @@ -2274,6 +2296,53 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, continue; } + // skin weight. tinyobj extension + if (token[0] == 'v' && token[1] == 'w' && IS_SPACE((token[2]))) { + token += 3; + + // vw ... + // example: + // vw 0 0 0.25 1 0.25 2 0.5 + + // TODO(syoyo): Add syntax check + int vid = 0; + vid = parseInt(&token); + + skin_weight_t sw; + + sw.vertex_id = vid; + + while (!IS_NEW_LINE(token[0])) { + real_t j, w; + // joint_id should not be negative, weight may be negative + // TODO(syoyo): # of elements check + parseReal2(&j, &w, &token, -1.0); + + if (j < 0.0) { + if (err) { + std::stringstream ss; + ss << "Failed parse `vw' line. joint_id is negative. " + "line " + << line_num << ".)\n"; + (*err) += ss.str(); + } + return false; + } + + joint_and_weight_t jw; + + jw.joint_id = int(j); + jw.weight = w; + + sw.weightValues.push_back(jw); + + size_t n = strspn(token, " \t\r"); + token += n; + } + + vw.push_back(sw); + } + // line if (token[0] == 'l' && IS_SPACE((token[1]))) { token += 2; @@ -2675,6 +2744,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, attrib->texcoords.swap(vt); attrib->texcoord_ws.swap(vt); attrib->colors.swap(vc); + attrib->skin_weights.swap(vw); return true; } From 46d47686132bc221aa884d02f8310d75eb967b87 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Mon, 9 Mar 2020 21:08:14 +0900 Subject: [PATCH 024/139] Add `override` keyword to fix clang compilation with c++11 + stricter compile setting(-std=c++11 -Werror -Weverything -Wno-c++98-compat) --- tiny_obj_loader.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 7c978728..b35ae5af 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -447,7 +447,7 @@ class MaterialFileReader : public MaterialReader { // Path could contain separator(';' in Windows, ':' in Posix) explicit MaterialFileReader(const std::string &mtl_basedir) : m_mtlBaseDir(mtl_basedir) {} - virtual ~MaterialFileReader() {} + virtual ~MaterialFileReader() TINYOBJ_OVERRIDE {} virtual bool operator()(const std::string &matId, std::vector *materials, std::map *matMap, std::string *warn, @@ -464,7 +464,7 @@ class MaterialStreamReader : public MaterialReader { public: explicit MaterialStreamReader(std::istream &inStream) : m_inStream(inStream) {} - virtual ~MaterialStreamReader() {} + virtual ~MaterialStreamReader() TINYOBJ_OVERRIDE {} virtual bool operator()(const std::string &matId, std::vector *materials, std::map *matMap, std::string *warn, From 700459d4159ed6f2b9ebb7f4f08dc8ae51dc60e0 Mon Sep 17 00:00:00 2001 From: Lily <47812810+LilyWangL@users.noreply.github.com> Date: Thu, 19 Mar 2020 23:11:17 +0800 Subject: [PATCH 025/139] Add vcpkg installation instructions (#267) * Add vcpkg installation instructions * Add vcpkg installation instructions --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 29ad1a5f..cf26fb82 100644 --- a/README.md +++ b/README.md @@ -147,6 +147,18 @@ One option is to simply copy the header file into your project and to make sure Tinyobjlaoder is also available as a [conan package](https://bintray.com/conan/conan-center/tinyobjloader%3A_/_latestVersion). Conan integrates with many build systems and lets you avoid manual dependency installation. Their [documentation](https://docs.conan.io/en/latest/getting_started.html) is a great starting point. +### Building tinyobjloader - Using vcpkg + +You can download and install tinyobjloader using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: + + git clone https://github.com/Microsoft/vcpkg.git + cd vcpkg + ./bootstrap-vcpkg.sh + ./vcpkg integrate install + ./vcpkg install tinyobjloader + +The tinyobjloader port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. + ### Data format `attrib_t` contains single and linear array of vertex data(position, normal and texcoord). From cbf733c0146988df83e2b87f0d3dfe436737ba6c Mon Sep 17 00:00:00 2001 From: domgho Date: Mon, 30 Mar 2020 17:33:09 +0200 Subject: [PATCH 026/139] Fix calculating geometric normals (#270) --- examples/viewer/viewer.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/viewer/viewer.cc b/examples/viewer/viewer.cc index 9ab1142c..145a140d 100644 --- a/examples/viewer/viewer.cc +++ b/examples/viewer/viewer.cc @@ -181,9 +181,9 @@ static void CalcNormal(float N[3], float v0[3], float v1[3], float v2[3]) { v20[1] = v2[1] - v0[1]; v20[2] = v2[2] - v0[2]; - N[0] = v20[1] * v10[2] - v20[2] * v10[1]; - N[1] = v20[2] * v10[0] - v20[0] * v10[2]; - N[2] = v20[0] * v10[1] - v20[1] * v10[0]; + N[0] = v10[1] * v20[2] - v10[2] * v20[1]; + N[1] = v10[2] * v20[0] - v10[0] * v20[2]; + N[2] = v10[0] * v20[1] - v10[1] * v20[0]; float len2 = N[0] * N[0] + N[1] * N[1] + N[2] * N[2]; if (len2 > 0.0f) { From e39c1737bc61c8dce28be7932cfe839d408e7838 Mon Sep 17 00:00:00 2001 From: domgho Date: Mon, 30 Mar 2020 17:35:55 +0200 Subject: [PATCH 027/139] Remove duplicate lookups (#271) See PR #213 --- tiny_obj_loader.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index b35ae5af..7fa95c90 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -2385,8 +2385,9 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, std::string namebuf = parseString(&token); int newMaterialId = -1; - if (material_map.find(namebuf) != material_map.end()) { - newMaterialId = material_map[namebuf]; + std::map::const_iterator it = material_map.find(namebuf); + if (it != material_map.end()) { + newMaterialId = it->second; } else { // { error!! material not found } if (warn) { @@ -2793,8 +2794,9 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, std::string namebuf = ss.str(); int newMaterialId = -1; - if (material_map.find(namebuf) != material_map.end()) { - newMaterialId = material_map[namebuf]; + std::map::const_iterator it = material_map.find(namebuf); + if (it != material_map.end()) { + newMaterialId = it->second; } else { // { warn!! material not found } if (warn && (!callback.usemtl_cb)) { @@ -2970,8 +2972,9 @@ bool ObjReader::ParseFromFile(const std::string &filename, // split at last '/'(for unixish system) or '\\'(for windows) to get // the base directory of .obj file // - if (filename.find_last_of("/\\") != std::string::npos) { - mtl_search_path = filename.substr(0, filename.find_last_of("/\\")); + size_t pos = filename.find_last_of("/\\"); + if (pos != std::string::npos) { + mtl_search_path = filename.substr(0, pos); } } else { mtl_search_path = config.mtl_search_path; From df61facbd7e69981268787a969f583cacdda7d0f Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Wed, 29 Apr 2020 19:13:47 +0900 Subject: [PATCH 028/139] Bump version 2.0.0-rc6 --- python/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/setup.py b/python/setup.py index d7aa0ca2..41324487 100644 --- a/python/setup.py +++ b/python/setup.py @@ -82,7 +82,7 @@ def __str__(self): setuptools.setup( name="tinyobjloader", - version="2.0.0rc5", + version="2.0.0rc6", description="Tiny but powerful Wavefront OBJ loader", long_description=long_description, long_description_content_type="text/markdown", From 4cb4af6ecea1345cd73d00594ed5d4e3cc52d680 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Wed, 6 May 2020 12:43:17 +0900 Subject: [PATCH 029/139] Bump SOVERSION in cmake. Fixes #274 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bbd7633b..e0076eaa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,8 +3,8 @@ #on the platform and configuration it is set to build in. project(tinyobjloader) cmake_minimum_required(VERSION 2.8.11) -set(TINYOBJLOADER_SOVERSION 1) -set(TINYOBJLOADER_VERSION 1.0.4) +set(TINYOBJLOADER_SOVERSION 2) +set(TINYOBJLOADER_VERSION 2.0.0-rc.6) #optional double precision support option(TINYOBJLOADER_USE_DOUBLE "Build library with double precision instead of single (float)" OFF) From 93377859892898beefdb663bc29c069f8f98280a Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Wed, 27 May 2020 22:28:28 +0900 Subject: [PATCH 030/139] auto PyPi twine upload experiment. --- azure-pipelines.yml | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 00541f51..149d6653 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,6 +1,9 @@ variables: CIBW_BEFORE_BUILD: pip install pybind11==2.4.3 - CIBW_SKIP: cp27-win* + #CIBW_TEST_COMMAND: TODO "python -c \"import tinyobjloader; tinyobjloader.test()\"" + CIBW_BUILD_VERBOSITY: "2" + CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 + CIBW_MANYLINUX_I686_IMAGE: manylinux2014 jobs: - job: unit_linux @@ -33,6 +36,7 @@ jobs: cibuildwheel --output-dir wheelhouse . - task: PublishBuildArtifacts@1 inputs: {pathtoPublish: 'python/wheelhouse'} + artifact: tinyobjDeployLinux - job: macos pool: {vmImage: 'macOS-10.15'} @@ -50,6 +54,7 @@ jobs: cibuildwheel --output-dir wheelhouse . - task: PublishBuildArtifacts@1 inputs: {pathtoPublish: 'python/wheelhouse'} + artifact: tinyobjDeployMacOS - job: windows pool: {vmImage: 'vs2017-win2016'} @@ -66,6 +71,31 @@ jobs: cibuildwheel --output-dir wheelhouse . - task: PublishBuildArtifacts@1 inputs: {pathtoPublish: 'python/wheelhouse'} + artifact: tinyobjDeployWindows + +- job: deployPyPI + # Based on vispy: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml + pool: {vmImage: 'Ubuntu-16.04'} + condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/v')) + dependsOn: + - linux + - macos + - windows + steps: + - task: UsePythonVersion@0 + - task: DownloadPipelineArtifact@2 + inputs: + patterns: | + tinyobjDeployLinux/* + tinyobjDeployMacOS/*.whl + tinyobjDeployWindows/*.whl + - bash: | + cd $(Pipeline.Workspace)/python + python -m pip install --upgrade pip + pip install twine + twine upload -u "__token__" --skip-existing vispyDeployLinux/* vispyDeployMacOS/* vispyDeployWindows/* + env: + TWINE_PASSWORD: $(pypiToken2) trigger: From 6e633f3d7215b91563468488db2902a7819be054 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Wed, 27 May 2020 22:42:16 +0900 Subject: [PATCH 031/139] Update azure-pipelines.yml for Azure Pipelines --- azure-pipelines.yml | 47 ++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 149d6653..a41493b1 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -73,30 +73,29 @@ jobs: inputs: {pathtoPublish: 'python/wheelhouse'} artifact: tinyobjDeployWindows -- job: deployPyPI - # Based on vispy: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml - pool: {vmImage: 'Ubuntu-16.04'} - condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/v')) - dependsOn: - - linux - - macos - - windows - steps: - - task: UsePythonVersion@0 - - task: DownloadPipelineArtifact@2 - inputs: - patterns: | - tinyobjDeployLinux/* - tinyobjDeployMacOS/*.whl - tinyobjDeployWindows/*.whl - - bash: | - cd $(Pipeline.Workspace)/python - python -m pip install --upgrade pip - pip install twine - twine upload -u "__token__" --skip-existing vispyDeployLinux/* vispyDeployMacOS/* vispyDeployWindows/* - env: - TWINE_PASSWORD: $(pypiToken2) - + - job: deployPyPI + # Based on vispy: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml + pool: {vmImage: 'Ubuntu-16.04'} + condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/v')) + dependsOn: + - linux + - macos + - windows + steps: + - task: UsePythonVersion@0 + - task: DownloadPipelineArtifact@2 + inputs: + patterns: | + tinyobjDeployLinux/* + tinyobjDeployMacOS/*.whl + tinyobjDeployWindows/*.whl + - bash: | + cd $(Pipeline.Workspace)/python + python -m pip install --upgrade pip + pip install twine + twine upload -u "__token__" --skip-existing vispyDeployLinux/* vispyDeployMacOS/* vispyDeployWindows/* + env: + TWINE_PASSWORD: $(pypiToken2) trigger: - master From e4164d49c15cf50312eae5e1307994da8e9a474d Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Wed, 27 May 2020 22:44:08 +0900 Subject: [PATCH 032/139] Update azure-pipelines.yml for Azure Pipelines --- azure-pipelines.yml | 46 ++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a41493b1..00c0c011 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -73,29 +73,29 @@ jobs: inputs: {pathtoPublish: 'python/wheelhouse'} artifact: tinyobjDeployWindows - - job: deployPyPI - # Based on vispy: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml - pool: {vmImage: 'Ubuntu-16.04'} - condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/v')) - dependsOn: - - linux - - macos - - windows - steps: - - task: UsePythonVersion@0 - - task: DownloadPipelineArtifact@2 - inputs: - patterns: | - tinyobjDeployLinux/* - tinyobjDeployMacOS/*.whl - tinyobjDeployWindows/*.whl - - bash: | - cd $(Pipeline.Workspace)/python - python -m pip install --upgrade pip - pip install twine - twine upload -u "__token__" --skip-existing vispyDeployLinux/* vispyDeployMacOS/* vispyDeployWindows/* - env: - TWINE_PASSWORD: $(pypiToken2) + - job: deployPyPI + # Based on vispy: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml + pool: {vmImage: 'Ubuntu-16.04'} + condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/v')) + dependsOn: + - linux + - macos + - windows + steps: + - task: UsePythonVersion@0 + - task: DownloadPipelineArtifact@2 + inputs: + patterns: | + tinyobjDeployLinux/* + tinyobjDeployMacOS/*.whl + tinyobjDeployWindows/*.whl + - bash: | + cd $(Pipeline.Workspace)/python + python -m pip install --upgrade pip + pip install twine + twine upload -u "__token__" --skip-existing vispyDeployLinux/* vispyDeployMacOS/* vispyDeployWindows/* + env: + TWINE_PASSWORD: $(pypiToken2) trigger: - master From 6bcf8a737e350d8cd58881463a08ec8b594dc8ed Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Wed, 27 May 2020 22:46:54 +0900 Subject: [PATCH 033/139] Update URL for Azure Pipeline badge. --- README.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index cf26fb82..88526896 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![Build Status](https://travis-ci.org/tinyobjloader/tinyobjloader.svg?branch=master)](https://travis-ci.org/tinyobjloader/tinyobjloader) -[![AZ Build Status](https://dev.azure.com/syoyo/lte%20oss/_apis/build/status/syoyo.tinyobjloader?branchName=master)](https://dev.azure.com/syoyo/lte%20oss/_build/latest?definitionId=2&branchName=master) +[![AZ Build Status](https://dev.azure.com/tinyobjloader/tinyobjloader/_apis/build/status/tinyobjloader.tinyobjloader?branchName=master)](https://dev.azure.com/tinyobjloader/tinyobjloader/_build/latest?definitionId=1&branchName=master) [![AppVeyor Build status](https://ci.appveyor.com/api/projects/status/tlb421q3t2oyobcn/branch/master?svg=true)](https://ci.appveyor.com/project/syoyo/tinyobjloader/branch/master) @@ -10,7 +10,7 @@ [![Download](https://api.bintray.com/packages/conan/conan-center/tinyobjloader%3A_/images/download.svg)](https://bintray.com/conan/conan-center/tinyobjloader%3A_/_latestVersion) -[![Join the chat at https://gitter.im/syoyo/tinyobjloader](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/syoyo/tinyobjloader?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) (inactive. gitter chat will be removed in the future. Please use github issue if you have questions and got issues) +[![Join the chat at https://gitter.im/syoyo/tinyobjloader](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/syoyo/tinyobjloader?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) (inactive. gitter chat will be removed in the future. Please use github issue if you have questions and got issues) Tiny but powerful single file wavefront obj loader written in C++03. No dependency except for C++ STL. It can parse over 10M polygons with moderate memory and time. @@ -314,6 +314,11 @@ Here is some benchmark result. Time are measured on MacBook 12(Early 2016, Core * baseline(v1.0.x): 6800 msecs(2.3x faster than old version) * optimised: 1500 msecs(10x faster than old version, 4.5x faster than baseline) +## Python binding + +### CI + PyPI upload + +cibuildwheels + twine upload for each git tagging event is handled in Azure Pipeline. ## Tests From f585a63738161992fafc7869c4cbe7906944a9d8 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Wed, 27 May 2020 22:59:06 +0900 Subject: [PATCH 034/139] Update azure-pipelines.yml for Azure Pipelines --- azure-pipelines.yml | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 00c0c011..5f1dcfd5 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -34,9 +34,10 @@ jobs: cp *.h python cd python cibuildwheel --output-dir wheelhouse . - - task: PublishBuildArtifacts@1 - inputs: {pathtoPublish: 'python/wheelhouse'} - artifact: tinyobjDeployLinux + - task: PublishPipelineArtifact@1 + inputs: + path: $(Pipeline.Workspace)/python/wheelhouse + artifact: tinyobjDeployLinux - job: macos pool: {vmImage: 'macOS-10.15'} @@ -52,9 +53,10 @@ jobs: cp *.h python cd python cibuildwheel --output-dir wheelhouse . - - task: PublishBuildArtifacts@1 - inputs: {pathtoPublish: 'python/wheelhouse'} - artifact: tinyobjDeployMacOS + - task: PublishPipelineArtifact@1 + inputs: + path: $(Pipeline.Workspace)/python/wheelhouse + artifact: tinyobjDeployMacOS - job: windows pool: {vmImage: 'vs2017-win2016'} @@ -69,9 +71,10 @@ jobs: cp *.h python cd python cibuildwheel --output-dir wheelhouse . - - task: PublishBuildArtifacts@1 - inputs: {pathtoPublish: 'python/wheelhouse'} - artifact: tinyobjDeployWindows + - task: PublishPipelineArtifact@1 + inputs: + path: $(Pipeline.Workspace)/python/wheelhouse + artifact: tinyobjDeployWindows - job: deployPyPI # Based on vispy: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml @@ -85,10 +88,10 @@ jobs: - task: UsePythonVersion@0 - task: DownloadPipelineArtifact@2 inputs: - patterns: | - tinyobjDeployLinux/* - tinyobjDeployMacOS/*.whl - tinyobjDeployWindows/*.whl + patterns: | + tinyobjDeployLinux/* + tinyobjDeployMacOS/*.whl + tinyobjDeployWindows/*.whl - bash: | cd $(Pipeline.Workspace)/python python -m pip install --upgrade pip From 5925923f34e836374b1e4f584e672103a5e099a0 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 02:05:12 +0900 Subject: [PATCH 035/139] Update azure-pipelines.yml for Azure Pipelines --- azure-pipelines.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 5f1dcfd5..89cb6076 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,5 +1,6 @@ variables: - CIBW_BEFORE_BUILD: pip install pybind11==2.4.3 + # It seems pybind11 is already installed in CIBW docker image + #CIBW_BEFORE_BUILD: pip install pybind11==2.4.3 #CIBW_TEST_COMMAND: TODO "python -c \"import tinyobjloader; tinyobjloader.test()\"" CIBW_BUILD_VERBOSITY: "2" CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 @@ -36,7 +37,7 @@ jobs: cibuildwheel --output-dir wheelhouse . - task: PublishPipelineArtifact@1 inputs: - path: $(Pipeline.Workspace)/python/wheelhouse + path: $(System.DefaultWorkingDirectory)/python/wheelhouse artifact: tinyobjDeployLinux - job: macos @@ -55,7 +56,7 @@ jobs: cibuildwheel --output-dir wheelhouse . - task: PublishPipelineArtifact@1 inputs: - path: $(Pipeline.Workspace)/python/wheelhouse + path: $(System.DefaultWorkingDirectory)/python/wheelhouse artifact: tinyobjDeployMacOS - job: windows @@ -73,7 +74,7 @@ jobs: cibuildwheel --output-dir wheelhouse . - task: PublishPipelineArtifact@1 inputs: - path: $(Pipeline.Workspace)/python/wheelhouse + path: $(System.DefaultWorkingDirectory)/python/wheelhouse artifact: tinyobjDeployWindows - job: deployPyPI From e28bccc9f9b0f9e5d3ee4d4afa5524152adb37ef Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 02:14:05 +0900 Subject: [PATCH 036/139] Update azure-pipelines.yml for Azure Pipelines --- azure-pipelines.yml | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 89cb6076..850389fc 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,6 +1,7 @@ variables: - # It seems pybind11 is already installed in CIBW docker image - #CIBW_BEFORE_BUILD: pip install pybind11==2.4.3 + CIBW_BEFORE_BUILD: "pip install -U numpy" + CIBW_BEFORE_BUILD_MACOS: "pip install -U pip setuptools" + CIBW_BEFORE_BUILD_LINUX: "pip install -U pip setuptools" #CIBW_TEST_COMMAND: TODO "python -c \"import tinyobjloader; tinyobjloader.test()\"" CIBW_BUILD_VERBOSITY: "2" CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 @@ -30,7 +31,7 @@ jobs: - task: UsePythonVersion@0 - bash: | python -m pip install --upgrade pip - pip install cibuildwheel==1.1.0 + pip install cibuildwheel twine # Make the header files available to the build. cp *.h python cd python @@ -49,7 +50,7 @@ jobs: - task: UsePythonVersion@0 - bash: | python -m pip install --upgrade pip - pip install cibuildwheel==1.1.0 + pip install cibuildwheel # Make the header files available to the build. cp *.h python cd python @@ -67,7 +68,7 @@ jobs: displayName: Install Visual C++ for Python 2.7 - bash: | python -m pip install --upgrade pip - pip install cibuildwheel==1.1.0 + pip install cibuildwheel # Make the header files available to the build. cp *.h python cd python @@ -102,7 +103,12 @@ jobs: TWINE_PASSWORD: $(pypiToken2) trigger: - - master + branches: + include: + - '*' + tags: + include: + - 'v*' pr: branches: From aa9668362ec9e7399db0ddcc904bf7bd812ce8bf Mon Sep 17 00:00:00 2001 From: Syoyo Date: Thu, 28 May 2020 02:41:51 +0900 Subject: [PATCH 037/139] Skip py27 build for windows. Use python3 for linux and macOS --- azure-pipelines.yml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 850389fc..08caa8b1 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,7 +1,10 @@ variables: - CIBW_BEFORE_BUILD: "pip install -U numpy" - CIBW_BEFORE_BUILD_MACOS: "pip install -U pip setuptools" - CIBW_BEFORE_BUILD_LINUX: "pip install -U pip setuptools" + # https://cibuildwheel.readthedocs.io/en/stable/cpp_standards/ + # python2.7 + C++11(pybind11) is not available. + CIBW_SKIP: "cp27-win*" + #CIBW_BEFORE_BUILD: "pip install -U numpy" + #CIBW_BEFORE_BUILD_MACOS: "pip install -U pip setuptools" + #CIBW_BEFORE_BUILD_LINUX: "pip install -U pip setuptools" #CIBW_TEST_COMMAND: TODO "python -c \"import tinyobjloader; tinyobjloader.test()\"" CIBW_BUILD_VERBOSITY: "2" CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 @@ -30,8 +33,8 @@ jobs: steps: - task: UsePythonVersion@0 - bash: | - python -m pip install --upgrade pip - pip install cibuildwheel twine + python3 -m pip install --upgrade pip + pip3 install cibuildwheel twine # Make the header files available to the build. cp *.h python cd python @@ -49,8 +52,8 @@ jobs: steps: - task: UsePythonVersion@0 - bash: | - python -m pip install --upgrade pip - pip install cibuildwheel + python3 -m pip install --upgrade pip + pip3 install cibuildwheel # Make the header files available to the build. cp *.h python cd python From ac3675cb358c7af18e996fe928beabff8497ad33 Mon Sep 17 00:00:00 2001 From: Syoyo Date: Thu, 28 May 2020 03:00:27 +0900 Subject: [PATCH 038/139] Use manylinux1 --- azure-pipelines.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 08caa8b1..0566384b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -2,13 +2,13 @@ variables: # https://cibuildwheel.readthedocs.io/en/stable/cpp_standards/ # python2.7 + C++11(pybind11) is not available. CIBW_SKIP: "cp27-win*" - #CIBW_BEFORE_BUILD: "pip install -U numpy" + CIBW_BEFORE_BUILD: "pip install pybind11" #CIBW_BEFORE_BUILD_MACOS: "pip install -U pip setuptools" #CIBW_BEFORE_BUILD_LINUX: "pip install -U pip setuptools" #CIBW_TEST_COMMAND: TODO "python -c \"import tinyobjloader; tinyobjloader.test()\"" CIBW_BUILD_VERBOSITY: "2" - CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 - CIBW_MANYLINUX_I686_IMAGE: manylinux2014 + #CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 + #CIBW_MANYLINUX_I686_IMAGE: manylinux2014 jobs: - job: unit_linux From 60d0cdd52b099f9407cfc0b7f2f2a0548b9e4d44 Mon Sep 17 00:00:00 2001 From: Syoyo Date: Thu, 28 May 2020 03:22:59 +0900 Subject: [PATCH 039/139] Skip pp27-win32 --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0566384b..6ab7bcf6 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,7 +1,7 @@ variables: # https://cibuildwheel.readthedocs.io/en/stable/cpp_standards/ # python2.7 + C++11(pybind11) is not available. - CIBW_SKIP: "cp27-win*" + CIBW_SKIP: "cp27-win* pp27-win32" CIBW_BEFORE_BUILD: "pip install pybind11" #CIBW_BEFORE_BUILD_MACOS: "pip install -U pip setuptools" #CIBW_BEFORE_BUILD_LINUX: "pip install -U pip setuptools" From fb8e0317be4d4b27fa69240769a59db4be380058 Mon Sep 17 00:00:00 2001 From: Syoyo Date: Thu, 28 May 2020 03:45:26 +0900 Subject: [PATCH 040/139] Disable pp36-win32 --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 6ab7bcf6..f693a5e6 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,7 +1,7 @@ variables: # https://cibuildwheel.readthedocs.io/en/stable/cpp_standards/ # python2.7 + C++11(pybind11) is not available. - CIBW_SKIP: "cp27-win* pp27-win32" + CIBW_SKIP: "cp27-win* pp27-win32 pp36-win32" CIBW_BEFORE_BUILD: "pip install pybind11" #CIBW_BEFORE_BUILD_MACOS: "pip install -U pip setuptools" #CIBW_BEFORE_BUILD_LINUX: "pip install -U pip setuptools" From e11207bc34e851604cbcd482aa2781ce5669a33e Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 13:32:17 +0900 Subject: [PATCH 041/139] Use PublishBuildArtifacts: https://stackoverflow.com/questions/58841733/how-to-debug-azure-devops-task-publishpipelineartifact-when-one-or-more-errors-o --- azure-pipelines.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f693a5e6..73126e92 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -39,10 +39,10 @@ jobs: cp *.h python cd python cibuildwheel --output-dir wheelhouse . - - task: PublishPipelineArtifact@1 - inputs: + - task: PublishBuildArtifacts@1 + inputs: path: $(System.DefaultWorkingDirectory)/python/wheelhouse - artifact: tinyobjDeployLinux + artifactName: tinyobjDeployLinux - job: macos pool: {vmImage: 'macOS-10.15'} @@ -58,10 +58,10 @@ jobs: cp *.h python cd python cibuildwheel --output-dir wheelhouse . - - task: PublishPipelineArtifact@1 + - task: PublishBuildArtifacts@1 inputs: path: $(System.DefaultWorkingDirectory)/python/wheelhouse - artifact: tinyobjDeployMacOS + artifactName: tinyobjDeployMacOS - job: windows pool: {vmImage: 'vs2017-win2016'} @@ -76,10 +76,10 @@ jobs: cp *.h python cd python cibuildwheel --output-dir wheelhouse . - - task: PublishPipelineArtifact@1 + - task: PublishBuildArtifacts@1 inputs: path: $(System.DefaultWorkingDirectory)/python/wheelhouse - artifact: tinyobjDeployWindows + artifactName: tinyobjDeployWindows - job: deployPyPI # Based on vispy: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml From 4cadd9132db8323b0d7684a11333153891580097 Mon Sep 17 00:00:00 2001 From: Syoyo Date: Thu, 28 May 2020 14:08:16 +0900 Subject: [PATCH 042/139] Describe versioning(for developer) --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 88526896..3e9b47c8 100644 --- a/README.md +++ b/README.md @@ -320,6 +320,14 @@ Here is some benchmark result. Time are measured on MacBook 12(Early 2016, Core cibuildwheels + twine upload for each git tagging event is handled in Azure Pipeline. +#### How to bump version(For developer) + +* Bump version in CMakeLists.txt +* Update version in `python/setup.py` +* Commit with tag name starging with `v`(e.g. `v2.1.0`) +* `git push --tags` + * cibuildwheels + pypi upload(through twine) will be automatically triggered in Azure Pipeline. + ## Tests Unit tests are provided in `tests` directory. See `tests/README.md` for details. From 9844ffd0c360667072c538d3c547fd21b2b4d7a2 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 14:31:15 +0900 Subject: [PATCH 043/139] Fix deployPyPI script. --- azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 73126e92..4ef3f778 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -98,10 +98,10 @@ jobs: tinyobjDeployMacOS/*.whl tinyobjDeployWindows/*.whl - bash: | - cd $(Pipeline.Workspace)/python + cd $(Pipeline.Workspace) python -m pip install --upgrade pip pip install twine - twine upload -u "__token__" --skip-existing vispyDeployLinux/* vispyDeployMacOS/* vispyDeployWindows/* + twine upload -u "__token__" --skip-existing tinyobjDeployLinux/* tinyobjDeployMacOS/* tinyobjDeployWindows/* env: TWINE_PASSWORD: $(pypiToken2) From 5bc550d9f3f3108971360bbf09dabc95283936e1 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 15:05:23 +0900 Subject: [PATCH 044/139] Copy artifact to staging directory. --- azure-pipelines.yml | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 4ef3f778..454882e8 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -39,9 +39,13 @@ jobs: cp *.h python cd python cibuildwheel --output-dir wheelhouse . + - task: CopyFiles@2 + inputs: + contents: 'python/wheelhouse/**' + targetFolder: $(Build.ArtifactStagingDirectory) - task: PublishBuildArtifacts@1 inputs: - path: $(System.DefaultWorkingDirectory)/python/wheelhouse + path: $(Build.ArtifactStagingDirectory) artifactName: tinyobjDeployLinux - job: macos @@ -58,9 +62,13 @@ jobs: cp *.h python cd python cibuildwheel --output-dir wheelhouse . + - task: CopyFiles@2 + inputs: + contents: 'python/wheelhouse/*.whl' + targetFolder: $(Build.ArtifactStagingDirectory) - task: PublishBuildArtifacts@1 inputs: - path: $(System.DefaultWorkingDirectory)/python/wheelhouse + path: $(Build.ArtifactStagingDirectory) artifactName: tinyobjDeployMacOS - job: windows @@ -76,9 +84,13 @@ jobs: cp *.h python cd python cibuildwheel --output-dir wheelhouse . + - task: CopyFiles@2 + inputs: + contents: 'python/wheelhouse/*.whl' + targetFolder: $(Build.ArtifactStagingDirectory) - task: PublishBuildArtifacts@1 inputs: - path: $(System.DefaultWorkingDirectory)/python/wheelhouse + path: $(Build.ArtifactStagingDirectory) artifactName: tinyobjDeployWindows - job: deployPyPI @@ -92,11 +104,6 @@ jobs: steps: - task: UsePythonVersion@0 - task: DownloadPipelineArtifact@2 - inputs: - patterns: | - tinyobjDeployLinux/* - tinyobjDeployMacOS/*.whl - tinyobjDeployWindows/*.whl - bash: | cd $(Pipeline.Workspace) python -m pip install --upgrade pip From 15c179d7982b5ff567fd27c3f91f06a3ebed2f82 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 15:48:08 +0900 Subject: [PATCH 045/139] Downlaod artifacts --- azure-pipelines.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 454882e8..3f3de6b5 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -103,9 +103,27 @@ jobs: - windows steps: - task: UsePythonVersion@0 + + # It looks we need to run Download task for each job - task: DownloadPipelineArtifact@2 + inputs: + artifact: 'tinyobjDeployLinux' + path: $(Pipeline.Workspace)/tinyobjDeployLinux + + - task: DownloadPipelineArtifact@2 + inputs: + artifact: 'tinyobjDeployMacOS' + path: $(Pipeline.Workspace)/tinyobjDeployMacOS + + - task: DownloadPipelineArtifact@2 + inputs: + artifact: 'tinyobjDeployWindows' + path: $(Pipeline.Workspace)/tinyobjDeployWindows + + # Publish to PyPI through twine - bash: | cd $(Pipeline.Workspace) + tree python -m pip install --upgrade pip pip install twine twine upload -u "__token__" --skip-existing tinyobjDeployLinux/* tinyobjDeployMacOS/* tinyobjDeployWindows/* From 25136da5efa5340f9e7899a52671266b4a12b011 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 16:24:05 +0900 Subject: [PATCH 046/139] Use DownloadBuildArtifact@0 --- azure-pipelines.yml | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 3f3de6b5..4ef3bb38 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -104,29 +104,30 @@ jobs: steps: - task: UsePythonVersion@0 - # It looks we need to run Download task for each job - - task: DownloadPipelineArtifact@2 + # TODO(syoyo): Use buildType: specific to download multiple artifacts at once? + - task: DownloadBuildArtifact@0 inputs: - artifact: 'tinyobjDeployLinux' - path: $(Pipeline.Workspace)/tinyobjDeployLinux + artifactName: 'tinyobjDeployLinux' + downloadPath: $(Pipeline.Workspace)/tinyobjDeployLinux - - task: DownloadPipelineArtifact@2 + - task: DownloadBuildArtifact@0 inputs: - artifact: 'tinyobjDeployMacOS' - path: $(Pipeline.Workspace)/tinyobjDeployMacOS + artifactName: 'tinyobjDeployMacOS' + downloadPath: $(Pipeline.Workspace)/tinyobjDeployMacOS - - task: DownloadPipelineArtifact@2 + - task: DownloadBuildArtifact@0 inputs: - artifact: 'tinyobjDeployWindows' - path: $(Pipeline.Workspace)/tinyobjDeployWindows + artifactName: 'tinyobjDeployWindows' + downloadPath: $(Pipeline.Workspace)/tinyobjDeployWindows # Publish to PyPI through twine - bash: | cd $(Pipeline.Workspace) - tree + find . python -m pip install --upgrade pip pip install twine - twine upload -u "__token__" --skip-existing tinyobjDeployLinux/* tinyobjDeployMacOS/* tinyobjDeployWindows/* + echo tinyobjDeployLinux/python/wheelhouse/* tinyobjDeployMacOS/python/wheelhouse/* tinyobjDeployWindows/python/wheelhouse/* + twine upload -u "__token__" --skip-existing tinyobjDeployLinux/python/wheelhouse/* tinyobjDeployMacOS/python/wheelhouse/* tinyobjDeployWindows/python/wheelhouse/* env: TWINE_PASSWORD: $(pypiToken2) From 60160f788062a915fb9ced9c4053736b7017ec2d Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 16:27:11 +0900 Subject: [PATCH 047/139] Task name typo. --- azure-pipelines.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 4ef3bb38..f8c3f323 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -105,17 +105,17 @@ jobs: - task: UsePythonVersion@0 # TODO(syoyo): Use buildType: specific to download multiple artifacts at once? - - task: DownloadBuildArtifact@0 + - task: DownloadBuildArtifacts@0 inputs: artifactName: 'tinyobjDeployLinux' downloadPath: $(Pipeline.Workspace)/tinyobjDeployLinux - - task: DownloadBuildArtifact@0 + - task: DownloadBuildArtifacts@0 inputs: artifactName: 'tinyobjDeployMacOS' downloadPath: $(Pipeline.Workspace)/tinyobjDeployMacOS - - task: DownloadBuildArtifact@0 + - task: DownloadBuildArtifacts@0 inputs: artifactName: 'tinyobjDeployWindows' downloadPath: $(Pipeline.Workspace)/tinyobjDeployWindows From 04f5410494c9041d5a4b492526b3167fca9d453e Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 16:49:16 +0900 Subject: [PATCH 048/139] Build source dist Fix upload artifact dir. --- azure-pipelines.yml | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f8c3f323..28563665 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -38,11 +38,23 @@ jobs: # Make the header files available to the build. cp *.h python cd python + + # Source dist + python3 setup.py sdist + + # build binary wheels cibuildwheel --output-dir wheelhouse . + - task: CopyFiles@2 inputs: contents: 'python/wheelhouse/**' targetFolder: $(Build.ArtifactStagingDirectory) + + - task: CopyFiles@2 + inputs: + contents: 'python/sdist/**' + targetFolder: $(Build.ArtifactStagingDirectory) + - task: PublishBuildArtifacts@1 inputs: path: $(Build.ArtifactStagingDirectory) @@ -108,17 +120,17 @@ jobs: - task: DownloadBuildArtifacts@0 inputs: artifactName: 'tinyobjDeployLinux' - downloadPath: $(Pipeline.Workspace)/tinyobjDeployLinux + downloadPath: $(Pipeline.Workspace) - task: DownloadBuildArtifacts@0 inputs: artifactName: 'tinyobjDeployMacOS' - downloadPath: $(Pipeline.Workspace)/tinyobjDeployMacOS + downloadPath: $(Pipeline.Workspace) - task: DownloadBuildArtifacts@0 inputs: artifactName: 'tinyobjDeployWindows' - downloadPath: $(Pipeline.Workspace)/tinyobjDeployWindows + downloadPath: $(Pipeline.Workspace) # Publish to PyPI through twine - bash: | @@ -126,8 +138,9 @@ jobs: find . python -m pip install --upgrade pip pip install twine + echo tinyobjDeployLinux/python/sdist/* echo tinyobjDeployLinux/python/wheelhouse/* tinyobjDeployMacOS/python/wheelhouse/* tinyobjDeployWindows/python/wheelhouse/* - twine upload -u "__token__" --skip-existing tinyobjDeployLinux/python/wheelhouse/* tinyobjDeployMacOS/python/wheelhouse/* tinyobjDeployWindows/python/wheelhouse/* + twine upload -u "__token__" --skip-existing tinyobjDeployLinux/python/sdist/* tinyobjDeployLinux/python/wheelhouse/* tinyobjDeployMacOS/python/wheelhouse/* tinyobjDeployWindows/python/wheelhouse/* env: TWINE_PASSWORD: $(pypiToken2) From 566dc740682de9867ac2c42095733617478f0cb9 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 17:05:31 +0900 Subject: [PATCH 049/139] Fix dist dir --- azure-pipelines.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 28563665..69f3b8ec 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -41,6 +41,7 @@ jobs: # Source dist python3 setup.py sdist + ls -la dist/* # build binary wheels cibuildwheel --output-dir wheelhouse . @@ -52,7 +53,7 @@ jobs: - task: CopyFiles@2 inputs: - contents: 'python/sdist/**' + contents: 'python/dist/**' targetFolder: $(Build.ArtifactStagingDirectory) - task: PublishBuildArtifacts@1 From db37801c98dbcf7ca2d719fbfa8fae9bc7b32b0a Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 17:30:33 +0900 Subject: [PATCH 050/139] sdist -> dist fix --- azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 69f3b8ec..9463f4c5 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -139,9 +139,9 @@ jobs: find . python -m pip install --upgrade pip pip install twine - echo tinyobjDeployLinux/python/sdist/* + echo tinyobjDeployLinux/python/dist/* echo tinyobjDeployLinux/python/wheelhouse/* tinyobjDeployMacOS/python/wheelhouse/* tinyobjDeployWindows/python/wheelhouse/* - twine upload -u "__token__" --skip-existing tinyobjDeployLinux/python/sdist/* tinyobjDeployLinux/python/wheelhouse/* tinyobjDeployMacOS/python/wheelhouse/* tinyobjDeployWindows/python/wheelhouse/* + twine upload -u "__token__" --skip-existing tinyobjDeployLinux/python/dist/* tinyobjDeployLinux/python/wheelhouse/* tinyobjDeployMacOS/python/wheelhouse/* tinyobjDeployWindows/python/wheelhouse/* env: TWINE_PASSWORD: $(pypiToken2) From 0b66d7743ffe25c9d9f8cb9892e4d8de3c68f7cd Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 18:36:41 +0900 Subject: [PATCH 051/139] Update AppVeyor URL --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3e9b47c8..e7799a6c 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![AZ Build Status](https://dev.azure.com/tinyobjloader/tinyobjloader/_apis/build/status/tinyobjloader.tinyobjloader?branchName=master)](https://dev.azure.com/tinyobjloader/tinyobjloader/_build/latest?definitionId=1&branchName=master) -[![AppVeyor Build status](https://ci.appveyor.com/api/projects/status/tlb421q3t2oyobcn/branch/master?svg=true)](https://ci.appveyor.com/project/syoyo/tinyobjloader/branch/master) +[![AppVeyor Build status](https://ci.appveyor.com/api/projects/status/m6wfkvket7gth8wn/branch/master?svg=true)](https://ci.appveyor.com/project/syoyo/tinyobjloader-6e4qf/branch/master) [![Coverage Status](https://coveralls.io/repos/github/syoyo/tinyobjloader/badge.svg?branch=master)](https://coveralls.io/github/syoyo/tinyobjloader?branch=master) From b200acfe63c1ccbd67948eea4de7ad8f769561b2 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 28 May 2020 20:26:03 +0900 Subject: [PATCH 052/139] Update ninja to 1.10.0 --- appveyor.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index 89fd1007..93f691ce 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -11,7 +11,7 @@ install: ####################################################################################### # Install Ninja ####################################################################################### - - set NINJA_URL="https://github.com/ninja-build/ninja/releases/download/v1.6.0/ninja-win.zip" + - set NINJA_URL="https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-win.zip" - appveyor DownloadFile %NINJA_URL% -FileName ninja.zip - 7z x ninja.zip -oC:\projects\deps\ninja > nul - set PATH=C:\projects\deps\ninja;%PATH% From bec38e3bbfe56e6cf218d30375d3990ffbd9e362 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Tue, 2 Jun 2020 14:08:53 +0900 Subject: [PATCH 053/139] Add URL to rtxON --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e7799a6c..c284d0d7 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ TinyObjLoader is successfully used in ... * Lighthouse2: https://github.com/jbikker/lighthouse2 * rayrender(an open source R package for raytracing scenes in created in R): https://github.com/tylermorganwall/rayrender * liblava - A modern C++ and easy-to-use framework for the Vulkan API. [MIT]: https://github.com/liblava/liblava +* rtxON - Simple Vulkan raytracing tutorials https://github.com/iOrange/rtxON * Your project here! (Letting us know via github issue is welcome!) ### Old version(v0.9.x) From cff52cd9eafac54cf8bbe3ca7da8219122acbf5d Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 16 Jul 2020 15:50:59 +0900 Subject: [PATCH 054/139] Add metal ray tracer to the list. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index c284d0d7..f5b2a0a3 100644 --- a/README.md +++ b/README.md @@ -71,6 +71,7 @@ TinyObjLoader is successfully used in ... * rayrender(an open source R package for raytracing scenes in created in R): https://github.com/tylermorganwall/rayrender * liblava - A modern C++ and easy-to-use framework for the Vulkan API. [MIT]: https://github.com/liblava/liblava * rtxON - Simple Vulkan raytracing tutorials https://github.com/iOrange/rtxON +* metal-ray-tracer - Writing ray-tracer using Metal Performance Shaders https://github.com/sergeyreznik/metal-ray-tracer https://sergeyreznik.github.io/metal-ray-tracer/index.html * Your project here! (Letting us know via github issue is welcome!) ### Old version(v0.9.x) From ebd177f4317f243a4a27ba61d6bda11184311a54 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 1 Aug 2020 14:09:50 +0900 Subject: [PATCH 055/139] Update issue templates --- .github/ISSUE_TEMPLATE/issue-report.md | 28 ++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/issue-report.md diff --git a/.github/ISSUE_TEMPLATE/issue-report.md b/.github/ISSUE_TEMPLATE/issue-report.md new file mode 100644 index 00000000..1361a329 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/issue-report.md @@ -0,0 +1,28 @@ +--- +name: Issue report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the issue** +A clear and concise description of what the issue is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Compile TinyObjLoader with '...' +2. Load .obj file '...' +3. See error + +Please attach minimal and reproducible files(source codes, .obj/.mtl files, etc) + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Environment** + - TinyObjLoader version + - OS: [e.g. Linux] + - Compiler [e.g. gcc 7.3] + - Other environment [e.g. Python version if you use python binding] From 53f17f2fc84913995b938a101fdb95882b465f38 Mon Sep 17 00:00:00 2001 From: Alberto Oporto Ames Date: Thu, 6 Aug 2020 23:04:06 -0500 Subject: [PATCH 056/139] Btw add aur badge (#280) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index f5b2a0a3..4a657a9f 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,8 @@ [![Coverage Status](https://coveralls.io/repos/github/syoyo/tinyobjloader/badge.svg?branch=master)](https://coveralls.io/github/syoyo/tinyobjloader?branch=master) +[![AUR version](https://img.shields.io/aur/version/tinyobjloader?logo=arch-linux)](https://aur.archlinux.org/packages/tinyobjloader) + [![Download](https://api.bintray.com/packages/conan/conan-center/tinyobjloader%3A_/images/download.svg)](https://bintray.com/conan/conan-center/tinyobjloader%3A_/_latestVersion) [![Join the chat at https://gitter.im/syoyo/tinyobjloader](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/syoyo/tinyobjloader?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) (inactive. gitter chat will be removed in the future. Please use github issue if you have questions and got issues) From 2b75981a3a20af72ec642f631f67e40380799006 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 15 Aug 2020 22:00:02 +0900 Subject: [PATCH 057/139] Bump version v2.0.0rc7 --- CMakeLists.txt | 2 +- python/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e0076eaa..0a43d294 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,7 +4,7 @@ project(tinyobjloader) cmake_minimum_required(VERSION 2.8.11) set(TINYOBJLOADER_SOVERSION 2) -set(TINYOBJLOADER_VERSION 2.0.0-rc.6) +set(TINYOBJLOADER_VERSION 2.0.0-rc.7) #optional double precision support option(TINYOBJLOADER_USE_DOUBLE "Build library with double precision instead of single (float)" OFF) diff --git a/python/setup.py b/python/setup.py index 41324487..d189a019 100644 --- a/python/setup.py +++ b/python/setup.py @@ -82,7 +82,7 @@ def __str__(self): setuptools.setup( name="tinyobjloader", - version="2.0.0rc6", + version="2.0.0rc7", description="Tiny but powerful Wavefront OBJ loader", long_description=long_description, long_description_content_type="text/markdown", From bd4d94494924a37eb57adf68167c047af75224b8 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 28 Aug 2020 18:54:17 +0900 Subject: [PATCH 058/139] Remove gitter link. --- README.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/README.md b/README.md index 4a657a9f..3a6f3367 100644 --- a/README.md +++ b/README.md @@ -10,10 +10,7 @@ [![AUR version](https://img.shields.io/aur/version/tinyobjloader?logo=arch-linux)](https://aur.archlinux.org/packages/tinyobjloader) -[![Download](https://api.bintray.com/packages/conan/conan-center/tinyobjloader%3A_/images/download.svg)](https://bintray.com/conan/conan-center/tinyobjloader%3A_/_latestVersion) - -[![Join the chat at https://gitter.im/syoyo/tinyobjloader](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/syoyo/tinyobjloader?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) (inactive. gitter chat will be removed in the future. Please use github issue if you have questions and got issues) - +[![Download](https://api.bintray.com/packages/conan/conan-center/tinyobjloader%3A_/images/download.svg)](https://bintray.com/conan/conan-center/tinyobjloader%3A_/_latestVersion) (not recommended) Tiny but powerful single file wavefront obj loader written in C++03. No dependency except for C++ STL. It can parse over 10M polygons with moderate memory and time. From a40e9c2fa2b592993005a94d432a37601c5390db Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 30 Aug 2020 17:04:07 +0900 Subject: [PATCH 059/139] Disable blank issue creation. --- .github/ISSUE_TEMPLATE/config.yml | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..3ba13e0c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false From b46cd4685edb126dc1cd754dc61b247e5e773d0d Mon Sep 17 00:00:00 2001 From: Louise Poubel Date: Tue, 17 Nov 2020 14:31:19 -0800 Subject: [PATCH 060/139] Support MTL files with escaped whitespace Signed-off-by: Louise Poubel --- examples/viewer/README.md | 2 ++ .../mtl filename with whitespace issue46.mtl | 4 +++ .../mtl filename with whitespace issue46.obj | 31 +++++++++++++++++ tests/tester.cc | 26 ++++++++++++++ tiny_obj_loader.h | 34 +++++++++++++------ 5 files changed, 87 insertions(+), 10 deletions(-) create mode 100644 models/mtl filename with whitespace issue46.mtl create mode 100644 models/mtl filename with whitespace issue46.obj diff --git a/examples/viewer/README.md b/examples/viewer/README.md index 83392f01..76207bfa 100644 --- a/examples/viewer/README.md +++ b/examples/viewer/README.md @@ -5,6 +5,8 @@ * premake5 * glfw3 * glew +* xcursor +* xinerama ## Build on MaCOSX diff --git a/models/mtl filename with whitespace issue46.mtl b/models/mtl filename with whitespace issue46.mtl new file mode 100644 index 00000000..b79d99b0 --- /dev/null +++ b/models/mtl filename with whitespace issue46.mtl @@ -0,0 +1,4 @@ +newmtl green +Ka 0 0 0 +Kd 0 1 0 +Ks 0 0 0 diff --git a/models/mtl filename with whitespace issue46.obj b/models/mtl filename with whitespace issue46.obj new file mode 100644 index 00000000..72d1dc9e --- /dev/null +++ b/models/mtl filename with whitespace issue46.obj @@ -0,0 +1,31 @@ +mtllib invalid-file-without-spaces.mtl invalid\ file\ with\ spaces.mtl mtl\ filename\ with\ whitespace\ issue46.mtl + +v 0.000000 2.000000 2.000000 +v 0.000000 0.000000 2.000000 +v 2.000000 0.000000 2.000000 +v 2.000000 2.000000 2.000000 +v 0.000000 2.000000 0.000000 +v 0.000000 0.000000 0.000000 +v 2.000000 0.000000 0.000000 +v 2.000000 2.000000 0.000000 +# 8 vertices + +g front cube +usemtl green +f 1 2 3 4 +g back cube +usemtl green +f 8 7 6 5 +g right cube +usemtl green +f 4 3 7 8 +g left cube +usemtl green +f 5 6 2 1 +g top cube +usemtl green +f 5 1 4 8 +g bottom cube +usemtl green +f 2 6 7 3 +# 6 elements diff --git a/tests/tester.cc b/tests/tester.cc index 7538c740..0fd0dbc1 100644 --- a/tests/tester.cc +++ b/tests/tester.cc @@ -1308,6 +1308,30 @@ void test_texres_texopt_issue248() { TEST_CHECK("input.jpg" == materials[0].diffuse_texname); } +void test_mtl_filename_with_whitespace_issue46() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + std::string warn; + std::string err; + bool ret = + tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, + "../models/mtl filename with whitespace issue46.obj", + gMtlBasePath); + + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; + } + TEST_CHECK(true == ret); + TEST_CHECK(1 == materials.size()); + TEST_CHECK("green" == materials[0].name); +} + // Fuzzer test. // Just check if it does not crash. // Disable by default since Windows filesystem can't create filename of afl @@ -1407,4 +1431,6 @@ TEST_LIST = { test_usemtl_whitespace_issue246}, {"texres_texopt_issue248", test_texres_texopt_issue248}, + {"test_mtl_filename_with_whitespace_issue46", + test_mtl_filename_with_whitespace_issue46}, {NULL, NULL}}; diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index edd18f58..c8489598 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1658,16 +1658,30 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, return true; } -// Split a string with specified delimiter character. -// http://stackoverflow.com/questions/236129/split-a-string-in-c -static void SplitString(const std::string &s, char delim, +// Split a string with specified delimiter character and escape character. +// https://rosettacode.org/wiki/Tokenize_a_string_with_escaping#C.2B.2B +static void SplitString(const std::string &s, char delim, char escape, std::vector &elems) { - std::stringstream ss; - ss.str(s); - std::string item; - while (std::getline(ss, item, delim)) { - elems.push_back(item); + std::string token; + + bool escaping = false; + for (char ch : s) { + if (escaping) { + escaping = false; + } else if (ch == escape) { + escaping = true; + continue; + } else if (ch == delim) { + if (!token.empty()) { + elems.push_back(token); + } + token.clear(); + continue; + } + token += ch; } + + elems.push_back(token); } static std::string JoinPath(const std::string &dir, @@ -2483,7 +2497,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, token += 7; std::vector filenames; - SplitString(std::string(token), ' ', filenames); + SplitString(std::string(token), ' ', '\\', filenames); if (filenames.empty()) { if (warn) { @@ -2891,7 +2905,7 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, token += 7; std::vector filenames; - SplitString(std::string(token), ' ', filenames); + SplitString(std::string(token), ' ', '\\', filenames); if (filenames.empty()) { if (warn) { From 350d32a5739e567d700dde2326673f134676b7d1 Mon Sep 17 00:00:00 2001 From: Louise Poubel Date: Wed, 18 Nov 2020 09:53:15 -0800 Subject: [PATCH 061/139] Remove C++11 Signed-off-by: Louise Poubel --- tiny_obj_loader.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index c8489598..bbb35ca9 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1665,7 +1665,8 @@ static void SplitString(const std::string &s, char delim, char escape, std::string token; bool escaping = false; - for (char ch : s) { + for (int i = 0; i < s.size(); ++i) { + char ch = s[i]; if (escaping) { escaping = false; } else if (ch == escape) { From 4f1099a341baa31a0e562d6e098c0014a0b2ee19 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 19 Nov 2020 16:25:10 +0900 Subject: [PATCH 062/139] Make C++03 default for unit test build(for Travis CI) --- tests/Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/Makefile b/tests/Makefile index c6708232..83d297d2 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,10 +1,11 @@ .PHONY: clean -CXX = clang++ +CXX ?= clang++ CXXFLAGS ?= -g -O1 +EXTRA_CXXFLAGS ?= -std=c++03 -fsanitize=address tester: tester.cc ../tiny_obj_loader.h - $(CXX) $(CXXFLAGS) -fsanitize=address -o tester tester.cc + $(CXX) $(CXXFLAGS) $(EXTRA_CXXFLAGS) -o tester tester.cc all: tester From a8b535f8eece5000dcbab5b30aebb48d769e80d9 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 19 Nov 2020 17:49:01 +0900 Subject: [PATCH 063/139] Add change log. --- tiny_obj_loader.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index bbb35ca9..14447b56 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -28,6 +28,7 @@ THE SOFTWARE. // * Support points primitive. // * Support multiple search path for .mtl(v1 API). // * Support vertex weight `vw`(as an tinyobj extension) +// * Support escaped whitespece in mtllib // version 1.4.0 : Modifed ParseTextureNameAndOption API // version 1.3.1 : Make ParseTextureNameAndOption API public // version 1.3.0 : Separate warning and error message(breaking API of LoadObj) From 94d2f7fe1f7742818dbcd0917d11679d055a33de Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 19 Nov 2020 17:50:12 +0900 Subject: [PATCH 064/139] Bump version. --- CMakeLists.txt | 2 +- python/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0a43d294..3d6febac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,7 +4,7 @@ project(tinyobjloader) cmake_minimum_required(VERSION 2.8.11) set(TINYOBJLOADER_SOVERSION 2) -set(TINYOBJLOADER_VERSION 2.0.0-rc.7) +set(TINYOBJLOADER_VERSION 2.0.0-rc.8) #optional double precision support option(TINYOBJLOADER_USE_DOUBLE "Build library with double precision instead of single (float)" OFF) diff --git a/python/setup.py b/python/setup.py index d189a019..8a9b69c0 100644 --- a/python/setup.py +++ b/python/setup.py @@ -82,7 +82,7 @@ def __str__(self): setuptools.setup( name="tinyobjloader", - version="2.0.0rc7", + version="2.0.0rc8", description="Tiny but powerful Wavefront OBJ loader", long_description=long_description, long_description_content_type="text/markdown", From 53d772573b5b855b62aa0a5b3664f192c95270ca Mon Sep 17 00:00:00 2001 From: Nam Vu Date: Fri, 4 Dec 2020 00:50:48 -0600 Subject: [PATCH 065/139] Updated README to include new API doc. (#289) Co-authored-by: Nam Vu --- README.md | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 3a6f3367..1fda8aeb 100644 --- a/README.md +++ b/README.md @@ -241,7 +241,7 @@ TinyObjLoader now use `real_t` for floating point data type. Default is `float(32bit)`. You can enable `double(64bit)` precision by using `TINYOBJLOADER_USE_DOUBLE` define. -#### Example code +#### Example code (Deprecated API) ```c++ #define TINYOBJLOADER_IMPLEMENTATION // define this in only *one* .cc @@ -302,6 +302,69 @@ for (size_t s = 0; s < shapes.size(); s++) { ``` +#### Example code (New Object Oriented API) + +```c++ +#define TINYOBJLOADER_IMPLEMENTATION // define this in only *one* .cc +#include "tiny_obj_loader.h" + + +std::string inputfile = "cornell_box.obj"; +tinyobj::ObjReaderConfig reader_config; +reader_config.mtl_search_path = "./"; // Path to material files + +tinyobj::ObjReader reader; + +if (!reader.ParseFromFile(inputfile, reader_config)) { + if (!reader.Error().empty()) { + std::cerr << "TinyObjReader: " << reader.Error(); + } + exit(1); +} + +if (!reader.Warning().empty()) { + std::cout << "TinyObjReader: " << reader.Warning(); +} + +auto& attrib = reader.GetAttrib(); +auto& shapes = reader.GetShapes(); +auto& materials = reader.GetMaterials(); + +// Loop over shapes +for (size_t s = 0; s < shapes.size(); s++) { + // Loop over faces(polygon) + size_t index_offset = 0; + for (size_t f = 0; f < shapes[s].mesh.num_face_vertices.size(); f++) { + int fv = shapes[s].mesh.num_face_vertices[f]; + + // Loop over vertices in the face. + for (size_t v = 0; v < fv; v++) { + // access to vertex + tinyobj::index_t idx = shapes[s].mesh.indices[index_offset + v]; + tinyobj::real_t vx = attrib.vertices[3*idx.vertex_index+0]; + tinyobj::real_t vy = attrib.vertices[3*idx.vertex_index+1]; + tinyobj::real_t vz = attrib.vertices[3*idx.vertex_index+2]; + tinyobj::real_t nx = attrib.normals[3*idx.normal_index+0]; + tinyobj::real_t ny = attrib.normals[3*idx.normal_index+1]; + tinyobj::real_t nz = attrib.normals[3*idx.normal_index+2]; + tinyobj::real_t tx = attrib.texcoords[2*idx.texcoord_index+0]; + tinyobj::real_t ty = attrib.texcoords[2*idx.texcoord_index+1]; + // Optional: vertex colors + // tinyobj::real_t red = attrib.colors[3*idx.vertex_index+0]; + // tinyobj::real_t green = attrib.colors[3*idx.vertex_index+1]; + // tinyobj::real_t blue = attrib.colors[3*idx.vertex_index+2]; + } + index_offset += fv; + + // per-face material + shapes[s].mesh.material_ids[f]; + } +} + +``` + + + ## Optimized loader Optimized multi-threaded .obj loader is available at `experimental/` directory. From 9173980d1de273b17eba5e10eb189e8b4be89425 Mon Sep 17 00:00:00 2001 From: Hector Dearman Date: Tue, 22 Dec 2020 08:03:24 +0000 Subject: [PATCH 066/139] Use size_t rather than int in SplitString. (#290) This prevents a warning when tiny_obj_loader is compiled with -Wsign-compare. --- tiny_obj_loader.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 14447b56..99213f6d 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1666,7 +1666,7 @@ static void SplitString(const std::string &s, char delim, char escape, std::string token; bool escaping = false; - for (int i = 0; i < s.size(); ++i) { + for (size_t i = 0; i < s.size(); ++i) { char ch = s[i]; if (escaping) { escaping = false; From 7ba4b652ee0c5175ec8abf66199e84d88adf11f1 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Mon, 8 Mar 2021 15:32:47 +0900 Subject: [PATCH 067/139] Use simple triangulation rule for the quad face when triangulation. This partially solves issue no. 295. (#296) --- models/issue-295-trianguation-failure.obj | 38 ++ tests/tester.cc | 34 ++ tiny_obj_loader.h | 475 ++++++++++++++-------- 3 files changed, 368 insertions(+), 179 deletions(-) create mode 100644 models/issue-295-trianguation-failure.obj diff --git a/models/issue-295-trianguation-failure.obj b/models/issue-295-trianguation-failure.obj new file mode 100644 index 00000000..f3b2649c --- /dev/null +++ b/models/issue-295-trianguation-failure.obj @@ -0,0 +1,38 @@ +#mtllib invalid.mtl +v 14678.0 0.0 9605.0 +v 14678.0 1.0 9605.0 +v 14678.0 0.0 9606.0 +v 14678.0 1.0 9606.0 +v 14678.0 0.0 9607.0 +v 14678.0 1.0 9607.0 +v 14678.0 0.0 9608.0 +v 14678.0 1.0 9608.0 +v 14679.0 0.0 9605.0 +v 14679.0 1.0 9605.0 +v 14679.0 0.0 9606.0 +v 14679.0 1.0 9606.0 +v 14679.0 0.0 9607.0 +v 14679.0 1.0 9607.0 +v 14679.0 0.0 9608.0 +v 14679.0 1.0 9608.0 +# UV +vt 0.0 0.0 +vt 1.0 0.0 +vt 1.0 1.0 +vt 0.0 1.0 +#usemtl invalid +o invalid +f 9/4 11/1 3/2 1/3 +f 4/1 12/2 10/3 2/4 +f 2/3 10/4 9/1 1/2 +f 3/2 4/3 2/4 1/1 +f 10/3 12/4 11/1 9/2 +f 11/4 13/1 5/2 3/3 +f 6/1 14/2 12/3 4/4 +f 5/2 6/3 4/4 3/1 +f 12/3 14/4 13/1 11/2 +f 13/4 15/1 7/2 5/3 +f 8/1 16/2 14/3 6/4 +f 15/2 16/3 8/4 7/1 +f 7/2 8/3 6/4 5/1 +f 14/3 16/4 15/1 13/2 diff --git a/tests/tester.cc b/tests/tester.cc index 0fd0dbc1..ebb5290c 100644 --- a/tests/tester.cc +++ b/tests/tester.cc @@ -1332,6 +1332,38 @@ void test_mtl_filename_with_whitespace_issue46() { TEST_CHECK("green" == materials[0].name); } +void test_face_missing_issue295() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + std::string warn; + std::string err; + bool ret = tinyobj::LoadObj( + &attrib, &shapes, &materials, &warn, &err, + "../models/issue-295-trianguation-failure.obj", + gMtlBasePath, /* triangualte */true); + + TEST_CHECK(warn.empty()); + + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; + } + + TEST_CHECK(true == ret); + TEST_CHECK(1 == shapes.size()); + + // 14 quad faces are triangulated into 28 triangles. + TEST_CHECK(28 == shapes[0].mesh.num_face_vertices.size()); + TEST_CHECK(28 == shapes[0].mesh.smoothing_group_ids.size()); + TEST_CHECK(28 == shapes[0].mesh.material_ids.size()); + TEST_CHECK((3 * 28) == shapes[0].mesh.indices.size()); // 28 triangle faces x 3 +} + // Fuzzer test. // Just check if it does not crash. // Disable by default since Windows filesystem can't create filename of afl @@ -1433,4 +1465,6 @@ TEST_LIST = { test_texres_texopt_issue248}, {"test_mtl_filename_with_whitespace_issue46", test_mtl_filename_with_whitespace_issue46}, + {"test_face_missing_issue295", + test_face_missing_issue295}, {NULL, NULL}}; diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 99213f6d..4bfd7b75 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1,7 +1,7 @@ /* The MIT License (MIT) -Copyright (c) 2012-2018 Syoyo Fujita and many contributors. +Copyright (c) 2012-Present, Syoyo Fujita and many contributors. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -1366,8 +1366,8 @@ static int pnpoly(int nvert, T *vertx, T *verty, T testx, T testy) { static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, const std::vector &tags, const int material_id, const std::string &name, - bool triangulate, - const std::vector &v) { + bool triangulate, const std::vector &v, + std::string *warn) { if (prim_group.IsEmpty()) { return false; } @@ -1384,30 +1384,34 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, if (npolys < 3) { // Face must have 3+ vertices. + if (warn) { + (*warn) += "Degenerated face found\n."; + } continue; } - vertex_index_t i0 = face.vertex_indices[0]; - vertex_index_t i1(-1); - vertex_index_t i2 = face.vertex_indices[1]; - if (triangulate) { - // find the two axes to work in - size_t axes[2] = {1, 2}; - for (size_t k = 0; k < npolys; ++k) { - i0 = face.vertex_indices[(k + 0) % npolys]; - i1 = face.vertex_indices[(k + 1) % npolys]; - i2 = face.vertex_indices[(k + 2) % npolys]; + if (npolys == 4) { + vertex_index_t i0 = face.vertex_indices[0]; + vertex_index_t i1 = face.vertex_indices[1]; + vertex_index_t i2 = face.vertex_indices[2]; + vertex_index_t i3 = face.vertex_indices[3]; + size_t vi0 = size_t(i0.v_idx); size_t vi1 = size_t(i1.v_idx); size_t vi2 = size_t(i2.v_idx); + size_t vi3 = size_t(i3.v_idx); if (((3 * vi0 + 2) >= v.size()) || ((3 * vi1 + 2) >= v.size()) || - ((3 * vi2 + 2) >= v.size())) { + ((3 * vi2 + 2) >= v.size()) || ((3 * vi3 + 2) >= v.size())) { // Invalid triangle. // FIXME(syoyo): Is it ok to simply skip this invalid triangle? + if (warn) { + (*warn) += "Face with invalid vertex index found.\n"; + } continue; } + real_t v0x = v[vi0 * 3 + 0]; real_t v0y = v[vi0 * 3 + 1]; real_t v0z = v[vi0 * 3 + 2]; @@ -1417,186 +1421,297 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, real_t v2x = v[vi2 * 3 + 0]; real_t v2y = v[vi2 * 3 + 1]; real_t v2z = v[vi2 * 3 + 2]; - real_t e0x = v1x - v0x; - real_t e0y = v1y - v0y; - real_t e0z = v1z - v0z; - real_t e1x = v2x - v1x; - real_t e1y = v2y - v1y; - real_t e1z = v2z - v1z; - real_t cx = std::fabs(e0y * e1z - e0z * e1y); - real_t cy = std::fabs(e0z * e1x - e0x * e1z); - real_t cz = std::fabs(e0x * e1y - e0y * e1x); - const real_t epsilon = std::numeric_limits::epsilon(); - if (cx > epsilon || cy > epsilon || cz > epsilon) { - // found a corner - if (cx > cy && cx > cz) { - } else { - axes[0] = 0; - if (cz > cx && cz > cy) axes[1] = 1; - } - break; - } - } - - real_t area = 0; - for (size_t k = 0; k < npolys; ++k) { - i0 = face.vertex_indices[(k + 0) % npolys]; - i1 = face.vertex_indices[(k + 1) % npolys]; - size_t vi0 = size_t(i0.v_idx); - size_t vi1 = size_t(i1.v_idx); - if (((vi0 * 3 + axes[0]) >= v.size()) || - ((vi0 * 3 + axes[1]) >= v.size()) || - ((vi1 * 3 + axes[0]) >= v.size()) || - ((vi1 * 3 + axes[1]) >= v.size())) { - // Invalid index. - continue; - } - real_t v0x = v[vi0 * 3 + axes[0]]; - real_t v0y = v[vi0 * 3 + axes[1]]; - real_t v1x = v[vi1 * 3 + axes[0]]; - real_t v1y = v[vi1 * 3 + axes[1]]; - area += (v0x * v1y - v0y * v1x) * static_cast(0.5); - } - - face_t remainingFace = face; // copy - size_t guess_vert = 0; - vertex_index_t ind[3]; - real_t vx[3]; - real_t vy[3]; - - // How many iterations can we do without decreasing the remaining - // vertices. - size_t remainingIterations = face.vertex_indices.size(); - size_t previousRemainingVertices = remainingFace.vertex_indices.size(); - - while (remainingFace.vertex_indices.size() > 3 && - remainingIterations > 0) { - npolys = remainingFace.vertex_indices.size(); - if (guess_vert >= npolys) { - guess_vert -= npolys; - } + real_t v3x = v[vi3 * 3 + 0]; + real_t v3y = v[vi3 * 3 + 1]; + real_t v3z = v[vi3 * 3 + 2]; + + // There are two candidates to split the quad into two triangles. + // + // Choose the shortest edge. + // TODO: Is it better to determine the edge to split by calculating + // the area of each triangle? + // + // +---+ + // |\ | + // | \ | + // | \| + // +---+ + // + // +---+ + // | /| + // | / | + // |/ | + // +---+ + + real_t e02x = v2x - v0x; + real_t e02y = v2y - v0y; + real_t e02z = v2z - v0z; + real_t e13x = v3x - v1x; + real_t e13y = v3y - v1y; + real_t e13z = v3z - v1z; + + real_t sqr02 = e02x * e02x + e02y * e02y + e02z * e02z; + real_t sqr13 = e13x * e13x + e13y * e13y + e13z * e13z; + + index_t idx0, idx1, idx2, idx3; + + idx0.vertex_index = i0.v_idx; + idx0.normal_index = i0.vn_idx; + idx0.texcoord_index = i0.vt_idx; + idx1.vertex_index = i1.v_idx; + idx1.normal_index = i1.vn_idx; + idx1.texcoord_index = i1.vt_idx; + idx2.vertex_index = i2.v_idx; + idx2.normal_index = i2.vn_idx; + idx2.texcoord_index = i2.vt_idx; + idx3.vertex_index = i3.v_idx; + idx3.normal_index = i3.vn_idx; + idx3.texcoord_index = i3.vt_idx; + + if (sqr02 < sqr13) { + // [0, 1, 2], [0, 2, 3] + shape->mesh.indices.push_back(idx0); + shape->mesh.indices.push_back(idx1); + shape->mesh.indices.push_back(idx2); - if (previousRemainingVertices != npolys) { - // The number of remaining vertices decreased. Reset counters. - previousRemainingVertices = npolys; - remainingIterations = npolys; + shape->mesh.indices.push_back(idx0); + shape->mesh.indices.push_back(idx2); + shape->mesh.indices.push_back(idx3); } else { - // We didn't consume a vertex on previous iteration, reduce the - // available iterations. - remainingIterations--; - } + // [0, 1, 3], [1, 2, 3] + shape->mesh.indices.push_back(idx0); + shape->mesh.indices.push_back(idx1); + shape->mesh.indices.push_back(idx3); - for (size_t k = 0; k < 3; k++) { - ind[k] = remainingFace.vertex_indices[(guess_vert + k) % npolys]; - size_t vi = size_t(ind[k].v_idx); - if (((vi * 3 + axes[0]) >= v.size()) || - ((vi * 3 + axes[1]) >= v.size())) { - // ??? - vx[k] = static_cast(0.0); - vy[k] = static_cast(0.0); - } else { - vx[k] = v[vi * 3 + axes[0]]; - vy[k] = v[vi * 3 + axes[1]]; - } - } - real_t e0x = vx[1] - vx[0]; - real_t e0y = vy[1] - vy[0]; - real_t e1x = vx[2] - vx[1]; - real_t e1y = vy[2] - vy[1]; - real_t cross = e0x * e1y - e0y * e1x; - // if an internal angle - if (cross * area < static_cast(0.0)) { - guess_vert += 1; - continue; + shape->mesh.indices.push_back(idx1); + shape->mesh.indices.push_back(idx2); + shape->mesh.indices.push_back(idx3); } - // check all other verts in case they are inside this triangle - bool overlap = false; - for (size_t otherVert = 3; otherVert < npolys; ++otherVert) { - size_t idx = (guess_vert + otherVert) % npolys; + // Two triangle faces + shape->mesh.num_face_vertices.push_back(3); + shape->mesh.num_face_vertices.push_back(3); - if (idx >= remainingFace.vertex_indices.size()) { - // ??? - continue; - } + shape->mesh.material_ids.push_back(material_id); + shape->mesh.material_ids.push_back(material_id); - size_t ovi = size_t(remainingFace.vertex_indices[idx].v_idx); + shape->mesh.smoothing_group_ids.push_back(face.smoothing_group_id); + shape->mesh.smoothing_group_ids.push_back(face.smoothing_group_id); - if (((ovi * 3 + axes[0]) >= v.size()) || - ((ovi * 3 + axes[1]) >= v.size())) { - // ??? + } else { + vertex_index_t i0 = face.vertex_indices[0]; + vertex_index_t i1(-1); + vertex_index_t i2 = face.vertex_indices[1]; + + // find the two axes to work in + size_t axes[2] = {1, 2}; + for (size_t k = 0; k < npolys; ++k) { + i0 = face.vertex_indices[(k + 0) % npolys]; + i1 = face.vertex_indices[(k + 1) % npolys]; + i2 = face.vertex_indices[(k + 2) % npolys]; + size_t vi0 = size_t(i0.v_idx); + size_t vi1 = size_t(i1.v_idx); + size_t vi2 = size_t(i2.v_idx); + + if (((3 * vi0 + 2) >= v.size()) || ((3 * vi1 + 2) >= v.size()) || + ((3 * vi2 + 2) >= v.size())) { + // Invalid triangle. + // FIXME(syoyo): Is it ok to simply skip this invalid triangle? continue; } - real_t tx = v[ovi * 3 + axes[0]]; - real_t ty = v[ovi * 3 + axes[1]]; - if (pnpoly(3, vx, vy, tx, ty)) { - overlap = true; + real_t v0x = v[vi0 * 3 + 0]; + real_t v0y = v[vi0 * 3 + 1]; + real_t v0z = v[vi0 * 3 + 2]; + real_t v1x = v[vi1 * 3 + 0]; + real_t v1y = v[vi1 * 3 + 1]; + real_t v1z = v[vi1 * 3 + 2]; + real_t v2x = v[vi2 * 3 + 0]; + real_t v2y = v[vi2 * 3 + 1]; + real_t v2z = v[vi2 * 3 + 2]; + real_t e0x = v1x - v0x; + real_t e0y = v1y - v0y; + real_t e0z = v1z - v0z; + real_t e1x = v2x - v1x; + real_t e1y = v2y - v1y; + real_t e1z = v2z - v1z; + real_t cx = std::fabs(e0y * e1z - e0z * e1y); + real_t cy = std::fabs(e0z * e1x - e0x * e1z); + real_t cz = std::fabs(e0x * e1y - e0y * e1x); + const real_t epsilon = std::numeric_limits::epsilon(); + if (cx > epsilon || cy > epsilon || cz > epsilon) { + // found a corner + if (cx > cy && cx > cz) { + } else { + axes[0] = 0; + if (cz > cx && cz > cy) axes[1] = 1; + } break; } } - if (overlap) { - guess_vert += 1; - continue; + real_t area = 0; + for (size_t k = 0; k < npolys; ++k) { + i0 = face.vertex_indices[(k + 0) % npolys]; + i1 = face.vertex_indices[(k + 1) % npolys]; + size_t vi0 = size_t(i0.v_idx); + size_t vi1 = size_t(i1.v_idx); + if (((vi0 * 3 + axes[0]) >= v.size()) || + ((vi0 * 3 + axes[1]) >= v.size()) || + ((vi1 * 3 + axes[0]) >= v.size()) || + ((vi1 * 3 + axes[1]) >= v.size())) { + // Invalid index. + continue; + } + real_t v0x = v[vi0 * 3 + axes[0]]; + real_t v0y = v[vi0 * 3 + axes[1]]; + real_t v1x = v[vi1 * 3 + axes[0]]; + real_t v1y = v[vi1 * 3 + axes[1]]; + area += (v0x * v1y - v0y * v1x) * static_cast(0.5); } - // this triangle is an ear - { - index_t idx0, idx1, idx2; - idx0.vertex_index = ind[0].v_idx; - idx0.normal_index = ind[0].vn_idx; - idx0.texcoord_index = ind[0].vt_idx; - idx1.vertex_index = ind[1].v_idx; - idx1.normal_index = ind[1].vn_idx; - idx1.texcoord_index = ind[1].vt_idx; - idx2.vertex_index = ind[2].v_idx; - idx2.normal_index = ind[2].vn_idx; - idx2.texcoord_index = ind[2].vt_idx; + face_t remainingFace = face; // copy + size_t guess_vert = 0; + vertex_index_t ind[3]; + real_t vx[3]; + real_t vy[3]; + + // How many iterations can we do without decreasing the remaining + // vertices. + size_t remainingIterations = face.vertex_indices.size(); + size_t previousRemainingVertices = + remainingFace.vertex_indices.size(); + + while (remainingFace.vertex_indices.size() > 3 && + remainingIterations > 0) { + npolys = remainingFace.vertex_indices.size(); + if (guess_vert >= npolys) { + guess_vert -= npolys; + } - shape->mesh.indices.push_back(idx0); - shape->mesh.indices.push_back(idx1); - shape->mesh.indices.push_back(idx2); + if (previousRemainingVertices != npolys) { + // The number of remaining vertices decreased. Reset counters. + previousRemainingVertices = npolys; + remainingIterations = npolys; + } else { + // We didn't consume a vertex on previous iteration, reduce the + // available iterations. + remainingIterations--; + } - shape->mesh.num_face_vertices.push_back(3); - shape->mesh.material_ids.push_back(material_id); - shape->mesh.smoothing_group_ids.push_back(face.smoothing_group_id); - } + for (size_t k = 0; k < 3; k++) { + ind[k] = remainingFace.vertex_indices[(guess_vert + k) % npolys]; + size_t vi = size_t(ind[k].v_idx); + if (((vi * 3 + axes[0]) >= v.size()) || + ((vi * 3 + axes[1]) >= v.size())) { + // ??? + vx[k] = static_cast(0.0); + vy[k] = static_cast(0.0); + } else { + vx[k] = v[vi * 3 + axes[0]]; + vy[k] = v[vi * 3 + axes[1]]; + } + } + real_t e0x = vx[1] - vx[0]; + real_t e0y = vy[1] - vy[0]; + real_t e1x = vx[2] - vx[1]; + real_t e1y = vy[2] - vy[1]; + real_t cross = e0x * e1y - e0y * e1x; + // if an internal angle + if (cross * area < static_cast(0.0)) { + guess_vert += 1; + continue; + } - // remove v1 from the list - size_t removed_vert_index = (guess_vert + 1) % npolys; - while (removed_vert_index + 1 < npolys) { - remainingFace.vertex_indices[removed_vert_index] = - remainingFace.vertex_indices[removed_vert_index + 1]; - removed_vert_index += 1; - } - remainingFace.vertex_indices.pop_back(); - } + // check all other verts in case they are inside this triangle + bool overlap = false; + for (size_t otherVert = 3; otherVert < npolys; ++otherVert) { + size_t idx = (guess_vert + otherVert) % npolys; + + if (idx >= remainingFace.vertex_indices.size()) { + // ??? + continue; + } + + size_t ovi = size_t(remainingFace.vertex_indices[idx].v_idx); + + if (((ovi * 3 + axes[0]) >= v.size()) || + ((ovi * 3 + axes[1]) >= v.size())) { + // ??? + continue; + } + real_t tx = v[ovi * 3 + axes[0]]; + real_t ty = v[ovi * 3 + axes[1]]; + if (pnpoly(3, vx, vy, tx, ty)) { + overlap = true; + break; + } + } - if (remainingFace.vertex_indices.size() == 3) { - i0 = remainingFace.vertex_indices[0]; - i1 = remainingFace.vertex_indices[1]; - i2 = remainingFace.vertex_indices[2]; - { - index_t idx0, idx1, idx2; - idx0.vertex_index = i0.v_idx; - idx0.normal_index = i0.vn_idx; - idx0.texcoord_index = i0.vt_idx; - idx1.vertex_index = i1.v_idx; - idx1.normal_index = i1.vn_idx; - idx1.texcoord_index = i1.vt_idx; - idx2.vertex_index = i2.v_idx; - idx2.normal_index = i2.vn_idx; - idx2.texcoord_index = i2.vt_idx; + if (overlap) { + guess_vert += 1; + continue; + } - shape->mesh.indices.push_back(idx0); - shape->mesh.indices.push_back(idx1); - shape->mesh.indices.push_back(idx2); + // this triangle is an ear + { + index_t idx0, idx1, idx2; + idx0.vertex_index = ind[0].v_idx; + idx0.normal_index = ind[0].vn_idx; + idx0.texcoord_index = ind[0].vt_idx; + idx1.vertex_index = ind[1].v_idx; + idx1.normal_index = ind[1].vn_idx; + idx1.texcoord_index = ind[1].vt_idx; + idx2.vertex_index = ind[2].v_idx; + idx2.normal_index = ind[2].vn_idx; + idx2.texcoord_index = ind[2].vt_idx; + + shape->mesh.indices.push_back(idx0); + shape->mesh.indices.push_back(idx1); + shape->mesh.indices.push_back(idx2); + + shape->mesh.num_face_vertices.push_back(3); + shape->mesh.material_ids.push_back(material_id); + shape->mesh.smoothing_group_ids.push_back( + face.smoothing_group_id); + } - shape->mesh.num_face_vertices.push_back(3); - shape->mesh.material_ids.push_back(material_id); - shape->mesh.smoothing_group_ids.push_back(face.smoothing_group_id); + // remove v1 from the list + size_t removed_vert_index = (guess_vert + 1) % npolys; + while (removed_vert_index + 1 < npolys) { + remainingFace.vertex_indices[removed_vert_index] = + remainingFace.vertex_indices[removed_vert_index + 1]; + removed_vert_index += 1; + } + remainingFace.vertex_indices.pop_back(); } - } + + if (remainingFace.vertex_indices.size() == 3) { + i0 = remainingFace.vertex_indices[0]; + i1 = remainingFace.vertex_indices[1]; + i2 = remainingFace.vertex_indices[2]; + { + index_t idx0, idx1, idx2; + idx0.vertex_index = i0.v_idx; + idx0.normal_index = i0.vn_idx; + idx0.texcoord_index = i0.vt_idx; + idx1.vertex_index = i1.v_idx; + idx1.normal_index = i1.vn_idx; + idx1.texcoord_index = i1.vt_idx; + idx2.vertex_index = i2.v_idx; + idx2.normal_index = i2.vn_idx; + idx2.texcoord_index = i2.vt_idx; + + shape->mesh.indices.push_back(idx0); + shape->mesh.indices.push_back(idx1); + shape->mesh.indices.push_back(idx2); + + shape->mesh.num_face_vertices.push_back(3); + shape->mesh.material_ids.push_back(material_id); + shape->mesh.smoothing_group_ids.push_back( + face.smoothing_group_id); + } + } + } // npolys } else { for (size_t k = 0; k < npolys; k++) { index_t idx; @@ -2174,7 +2289,7 @@ bool MaterialStreamReader::operator()(const std::string &matId, bool LoadObj(attrib_t *attrib, std::vector *shapes, std::vector *materials, std::string *warn, std::string *err, const char *filename, const char *mtl_basedir, - bool trianglulate, bool default_vcols_fallback) { + bool triangulate, bool default_vcols_fallback) { attrib->vertices.clear(); attrib->normals.clear(); attrib->texcoords.clear(); @@ -2204,7 +2319,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, MaterialFileReader matFileReader(baseDir); return LoadObj(attrib, shapes, materials, warn, err, &ifs, &matFileReader, - trianglulate, default_vcols_fallback); + triangulate, default_vcols_fallback); } bool LoadObj(attrib_t *attrib, std::vector *shapes, @@ -2470,7 +2585,8 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, std::string namebuf = parseString(&token); int newMaterialId = -1; - std::map::const_iterator it = material_map.find(namebuf); + std::map::const_iterator it = + material_map.find(namebuf); if (it != material_map.end()) { newMaterialId = it->second; } else { @@ -2485,7 +2601,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, // this time. // just clear `faceGroup` after `exportGroupsToShape()` call. exportGroupsToShape(&shape, prim_group, tags, material, name, - triangulate, v); + triangulate, v, warn); prim_group.faceGroup.clear(); material = newMaterialId; } @@ -2548,7 +2664,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, if (token[0] == 'g' && IS_SPACE((token[1]))) { // flush previous face group. bool ret = exportGroupsToShape(&shape, prim_group, tags, material, name, - triangulate, v); + triangulate, v, warn); (void)ret; // return value not used. if (shape.mesh.indices.size() > 0) { @@ -2600,7 +2716,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, if (token[0] == 'o' && IS_SPACE((token[1]))) { // flush previous face group. bool ret = exportGroupsToShape(&shape, prim_group, tags, material, name, - triangulate, v); + triangulate, v, warn); (void)ret; // return value not used. if (shape.mesh.indices.size() > 0 || shape.lines.indices.size() > 0 || @@ -2740,7 +2856,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, } bool ret = exportGroupsToShape(&shape, prim_group, tags, material, name, - triangulate, v); + triangulate, v, warn); // exportGroupsToShape return false when `usemtl` is called in the last // line. // we also add `shape` to `shapes` when `shape.mesh` has already some @@ -2880,7 +2996,8 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, std::string namebuf = ss.str(); int newMaterialId = -1; - std::map::const_iterator it = material_map.find(namebuf); + std::map::const_iterator it = + material_map.find(namebuf); if (it != material_map.end()) { newMaterialId = it->second; } else { From f760a8a0356a4372a27ac81f28f6afed78b5e63d Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 19 Mar 2021 14:38:58 +0900 Subject: [PATCH 068/139] Update example code in README. (#301) --- README.md | 66 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 1fda8aeb..ce5ab1b0 100644 --- a/README.md +++ b/README.md @@ -274,24 +274,33 @@ for (size_t s = 0; s < shapes.size(); s++) { // Loop over faces(polygon) size_t index_offset = 0; for (size_t f = 0; f < shapes[s].mesh.num_face_vertices.size(); f++) { - int fv = shapes[s].mesh.num_face_vertices[f]; + size_t fv = size_t(shapes[s].mesh.num_face_vertices[f]); // Loop over vertices in the face. for (size_t v = 0; v < fv; v++) { // access to vertex tinyobj::index_t idx = shapes[s].mesh.indices[index_offset + v]; - tinyobj::real_t vx = attrib.vertices[3*idx.vertex_index+0]; - tinyobj::real_t vy = attrib.vertices[3*idx.vertex_index+1]; - tinyobj::real_t vz = attrib.vertices[3*idx.vertex_index+2]; - tinyobj::real_t nx = attrib.normals[3*idx.normal_index+0]; - tinyobj::real_t ny = attrib.normals[3*idx.normal_index+1]; - tinyobj::real_t nz = attrib.normals[3*idx.normal_index+2]; - tinyobj::real_t tx = attrib.texcoords[2*idx.texcoord_index+0]; - tinyobj::real_t ty = attrib.texcoords[2*idx.texcoord_index+1]; + + tinyobj::real_t vx = attrib.vertices[3*size_t(idx.vertex_index)+0]; + tinyobj::real_t vy = attrib.vertices[3*size_t(idx.vertex_index)+1]; + tinyobj::real_t vz = attrib.vertices[3*size_t(idx.vertex_index)+2]; + + // Check if `normal_index` is zero or positive. negative = no normal data + if (idx.normal_index >= 0) { + tinyobj::real_t nx = attrib.normals[3*size_t(idx.normal_index)+0]; + tinyobj::real_t ny = attrib.normals[3*size_t(idx.normal_index)+1]; + tinyobj::real_t nz = attrib.normals[3*size_t(idx.normal_index)+2]; + } + + // Check if `texcoord_index` is zero or positive. negative = no texcoord data + if (idx.texcoord_index >= 0) { + tinyobj::real_t tx = attrib.texcoords[2*size_t(idx.texcoord_index)+0]; + tinyobj::real_t ty = attrib.texcoords[2*size_t(idx.texcoord_index)+1]; + } // Optional: vertex colors - // tinyobj::real_t red = attrib.colors[3*idx.vertex_index+0]; - // tinyobj::real_t green = attrib.colors[3*idx.vertex_index+1]; - // tinyobj::real_t blue = attrib.colors[3*idx.vertex_index+2]; + // tinyobj::real_t red = attrib.colors[3*size_t(idx.vertex_index)+0]; + // tinyobj::real_t green = attrib.colors[3*size_t(idx.vertex_index)+1]; + // tinyobj::real_t blue = attrib.colors[3*size_t(idx.vertex_index)+2]; } index_offset += fv; @@ -335,24 +344,33 @@ for (size_t s = 0; s < shapes.size(); s++) { // Loop over faces(polygon) size_t index_offset = 0; for (size_t f = 0; f < shapes[s].mesh.num_face_vertices.size(); f++) { - int fv = shapes[s].mesh.num_face_vertices[f]; + size_t fv = size_t(shapes[s].mesh.num_face_vertices[f]); // Loop over vertices in the face. for (size_t v = 0; v < fv; v++) { // access to vertex tinyobj::index_t idx = shapes[s].mesh.indices[index_offset + v]; - tinyobj::real_t vx = attrib.vertices[3*idx.vertex_index+0]; - tinyobj::real_t vy = attrib.vertices[3*idx.vertex_index+1]; - tinyobj::real_t vz = attrib.vertices[3*idx.vertex_index+2]; - tinyobj::real_t nx = attrib.normals[3*idx.normal_index+0]; - tinyobj::real_t ny = attrib.normals[3*idx.normal_index+1]; - tinyobj::real_t nz = attrib.normals[3*idx.normal_index+2]; - tinyobj::real_t tx = attrib.texcoords[2*idx.texcoord_index+0]; - tinyobj::real_t ty = attrib.texcoords[2*idx.texcoord_index+1]; + tinyobj::real_t vx = attrib.vertices[3*size_t(idx.vertex_index)+0]; + tinyobj::real_t vy = attrib.vertices[3*size_t(idx.vertex_index)+1]; + tinyobj::real_t vz = attrib.vertices[3*size_t(idx.vertex_index)+2]; + + // Check if `normal_index` is zero or positive. negative = no normal data + if (idx.normal_index >= 0) { + tinyobj::real_t nx = attrib.normals[3*size_t(idx.normal_index)+0]; + tinyobj::real_t ny = attrib.normals[3*size_t(idx.normal_index)+1]; + tinyobj::real_t nz = attrib.normals[3*size_t(idx.normal_index)+2]; + } + + // Check if `texcoord_index` is zero or positive. negative = no texcoord data + if (idx.texcoord_index >= 0) { + tinyobj::real_t tx = attrib.texcoords[2*size_t(idx.texcoord_index)+0]; + tinyobj::real_t ty = attrib.texcoords[2*size_t(idx.texcoord_index)+1]; + } + // Optional: vertex colors - // tinyobj::real_t red = attrib.colors[3*idx.vertex_index+0]; - // tinyobj::real_t green = attrib.colors[3*idx.vertex_index+1]; - // tinyobj::real_t blue = attrib.colors[3*idx.vertex_index+2]; + // tinyobj::real_t red = attrib.colors[3*size_t(idx.vertex_index)+0]; + // tinyobj::real_t green = attrib.colors[3*size_t(idx.vertex_index)+1]; + // tinyobj::real_t blue = attrib.colors[3*size_t(idx.vertex_index)+2]; } index_offset += fv; From 79d44219e74836e9151898de8debb2b26e8e7c29 Mon Sep 17 00:00:00 2001 From: xantares Date: Tue, 23 Mar 2021 07:19:26 +0100 Subject: [PATCH 069/139] Bump required cmake version (#299) Avoids a warning when cmake is too old (2.8.11 is from 2013, 3.2 is from 2015) --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3d6febac..4a82c922 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ #This configures the Cmake system with multiple properties, depending #on the platform and configuration it is set to build in. project(tinyobjloader) -cmake_minimum_required(VERSION 2.8.11) +cmake_minimum_required(VERSION 3.2) set(TINYOBJLOADER_SOVERSION 2) set(TINYOBJLOADER_VERSION 2.0.0-rc.8) From 15bc2685b51612748bcfdb820bb4d42087a7dce1 Mon Sep 17 00:00:00 2001 From: Catena cyber <35799796+catenacyber@users.noreply.github.com> Date: Wed, 31 Mar 2021 18:34:58 +0200 Subject: [PATCH 070/139] Fuzz target for oss-fuzz integration (#302) * Fuzz target for oss-fuzz integration * README for fuzzing --- CMakeLists.txt | 5 ++++ fuzzer/README.md | 47 ++++++++++++++++++++++++++++++++++ fuzzer/fuzz_ParseFromString.cc | 26 +++++++++++++++++++ 3 files changed, 78 insertions(+) create mode 100644 fuzzer/README.md create mode 100644 fuzzer/fuzz_ParseFromString.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index 4a82c922..8f1eb797 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -109,6 +109,11 @@ write_basic_package_version_file(${PROJECT_NAME}-config-version.cmake #pkg-config file configure_file(${PROJECT_NAME}.pc.in ${LIBRARY_NAME}.pc @ONLY) +if(DEFINED ENV{LIB_FUZZING_ENGINE}) + add_executable(fuzz_ParseFromString fuzzer/fuzz_ParseFromString.cc) + target_link_libraries(fuzz_ParseFromString ${LIBRARY_NAME} $ENV{LIB_FUZZING_ENGINE}) +endif() + #Installation install(TARGETS ${LIBRARY_NAME} diff --git a/fuzzer/README.md b/fuzzer/README.md new file mode 100644 index 00000000..a30cd673 --- /dev/null +++ b/fuzzer/README.md @@ -0,0 +1,47 @@ +# Fuzzing test + +Do fuzzing test for tinyobjloader + +## Supported API + +* [x] ParseFromString + +## Requirements + +* clang with fuzzer support(`-fsanitize=fuzzer`. at least clang 8.0 should work) + +## Setup + +### Ubuntu 18.04 + +``` +$ sudo apt install clang++-8 +$ sudo apt install libfuzzer-8-dev +``` + +Optionally, if you didn't set `update-alternatives` you can set `clang++` to point to `clang++8` + +``` +$ sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-8 10 +$ sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-8 10 +``` + +## How to compile + +Fuzz target is compiled with the rest of the project when environment variable `LIB_FUZZING_ENGINE` is defined when running cmake +With clang, you can compile with +``` +$ export LIB_FUZZING_ENGINE=-fsanitize=fuzzer +$ mkdir build && cd build +$ cmake .. -DBUILD_SHARED_LIBS=OFF +$ make -j $(nproc) +``` + +## How to run + +Increase memory limit. e.g. `-rss_limit_mb=2000` +cf libfuzzer.info for all options + +``` +$ ./fuzz_ParseFromString -rss_limit_mb=2000 +``` diff --git a/fuzzer/fuzz_ParseFromString.cc b/fuzzer/fuzz_ParseFromString.cc new file mode 100644 index 00000000..aa45f89a --- /dev/null +++ b/fuzzer/fuzz_ParseFromString.cc @@ -0,0 +1,26 @@ +#include +#include +#include +#include +#include + +#define TINYOBJLOADER_IMPLEMENTATION // define this in only *one* .cc +#include "tiny_obj_loader.h" + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + tinyobj::ObjReaderConfig reader_config; + tinyobj::ObjReader reader; + if (Size < 2) { + return 0; + } + for (size_t i = 0; i < Size-1; i++) { + if (Data[i] == 0) { + std::string obj_text (reinterpret_cast(Data), i); + std::string mtl_text (reinterpret_cast(Data+i+1), Size-i-1); + reader.ParseFromString(obj_text, mtl_text,reader_config); + return 0; + } + } + return 0; +} + From 0ed6c38f20c63b996fbb9fa949569b2acb213a3d Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 8 Apr 2021 18:57:52 +0900 Subject: [PATCH 071/139] Catenacyber iofix (#304) * Prevent integer overflow in tryParseDouble * Add regression test data to be run by fuzz target * Add simple regression test runner. Co-authored-by: Philippe Antoine --- fuzzer/README.md | 4 ++++ fuzzer/regression_runner/Makefile | 2 ++ fuzzer/regression_runner/README.md | 11 +++++++++++ ...minimized-fuzz_ParseFromString-4877060179886080 | Bin 0 -> 16 bytes tiny_obj_loader.h | 4 ++++ 5 files changed, 21 insertions(+) create mode 100644 fuzzer/regression_runner/Makefile create mode 100644 fuzzer/regression_runner/README.md create mode 100644 fuzzer/regressions/clusterfuzz-testcase-minimized-fuzz_ParseFromString-4877060179886080 diff --git a/fuzzer/README.md b/fuzzer/README.md index a30cd673..1cd63a29 100644 --- a/fuzzer/README.md +++ b/fuzzer/README.md @@ -45,3 +45,7 @@ cf libfuzzer.info for all options ``` $ ./fuzz_ParseFromString -rss_limit_mb=2000 ``` + +## Regression tests + +See `regression_runner/` diff --git a/fuzzer/regression_runner/Makefile b/fuzzer/regression_runner/Makefile new file mode 100644 index 00000000..f2c38a0d --- /dev/null +++ b/fuzzer/regression_runner/Makefile @@ -0,0 +1,2 @@ +all: + clang++ -fsanitize=address,undefined ../../loader_example.cc diff --git a/fuzzer/regression_runner/README.md b/fuzzer/regression_runner/README.md new file mode 100644 index 00000000..f59b9f6a --- /dev/null +++ b/fuzzer/regression_runner/README.md @@ -0,0 +1,11 @@ +# Run fuzzer regression tests + +Currently we only support Linux + clang. + +## How to run + +``` +$ make +$ ./a.out ../regressions/ +``` + diff --git a/fuzzer/regressions/clusterfuzz-testcase-minimized-fuzz_ParseFromString-4877060179886080 b/fuzzer/regressions/clusterfuzz-testcase-minimized-fuzz_ParseFromString-4877060179886080 new file mode 100644 index 0000000000000000000000000000000000000000..e5094497f11c155cd4bfaedf32fcb377051f8885 GIT binary patch literal 16 XcmXR@Fm^RDH8(IfG&C}^U{C-69`^${ literal 0 HcmV?d00001 diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 4bfd7b75..bc5b1887 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -941,6 +941,10 @@ static bool tryParseDouble(const char *s, const char *s_end, double *result) { read = 0; end_not_reached = (curr != s_end); while (end_not_reached && IS_DIGIT(*curr)) { + if (exponent > std::numeric_limits::max()/10) { + // Integer overflow + goto fail; + } exponent *= 10; exponent += static_cast(*curr - 0x30); curr++; From 0b6a0b5fc0d39f36a69a82d29383cf1bda88e999 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Tue, 1 Jun 2021 17:22:20 +0900 Subject: [PATCH 072/139] Suppress clang warning: -Wdouble-promotion (#308) --- tiny_obj_loader.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index bc5b1887..706eaaf2 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -2453,7 +2453,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, // TODO(syoyo): # of elements check parseReal2(&j, &w, &token, -1.0); - if (j < 0.0) { + if (j < static_cast(0)) { if (err) { std::stringstream ss; ss << "Failed parse `vw' line. joint_id is negative. " From dd88876017100fc3f10b994f5189c350cc087d93 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 10 Jun 2021 01:49:01 +0900 Subject: [PATCH 073/139] Bump windows vm image. (#310) Describe python2.7 is not supported. --- azure-pipelines.yml | 4 +--- python/README.md | 5 +++++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 9463f4c5..0e835ae1 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -85,11 +85,9 @@ jobs: artifactName: tinyobjDeployMacOS - job: windows - pool: {vmImage: 'vs2017-win2016'} + pool: {vmImage: 'windows-2019'} steps: - task: UsePythonVersion@0 - - script: choco install vcpython27 -f -y - displayName: Install Visual C++ for Python 2.7 - bash: | python -m pip install --upgrade pip pip install cibuildwheel diff --git a/python/README.md b/python/README.md index cd1e24dc..b6e2872d 100644 --- a/python/README.md +++ b/python/README.md @@ -3,6 +3,11 @@ `tinyobjloader` is a python wrapper for C++ wavefront .obj loader. `tinyobjloader` is rather fast and feature rich than other pure python version of .obj loader. +## Requirements + +* python 3.x(3.6+ recommended) + * python 2.7 may work, but not officially supported. + ## Install You can install `tinyobjloader` with pip. From 662d5e54f466f4af09de31b5bff802506c81fe2a Mon Sep 17 00:00:00 2001 From: Jeremy Nimmer Date: Thu, 10 Jun 2021 03:28:59 -0700 Subject: [PATCH 074/139] Remove library uses of std::endl on std::stringstream (#311) There is no need to flush a stringstream. https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#Rio-endl --- tiny_obj_loader.h | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 706eaaf2..6dc73717 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1986,7 +1986,7 @@ void LoadMtl(std::map *material_map, warn_ss << "Both `d` and `Tr` parameters defined for \"" << material.name << "\". Use the value of `d` for dissolve (line " << line_no - << " in .mtl.)" << std::endl; + << " in .mtl.)\n"; } has_d = true; continue; @@ -1998,7 +1998,7 @@ void LoadMtl(std::map *material_map, warn_ss << "Both `d` and `Tr` parameters defined for \"" << material.name << "\". Use the value of `d` for dissolve (line " << line_no - << " in .mtl.)" << std::endl; + << " in .mtl.)\n"; } else { // We invert value of Tr(assume Tr is in range [0, 1]) // NOTE: Interpretation of Tr is application(exporter) dependent. For @@ -2244,7 +2244,7 @@ bool MaterialFileReader::operator()(const std::string &matId, std::stringstream ss; ss << "Material file [ " << matId - << " ] not found in a path : " << m_mtlBaseDir << std::endl; + << " ] not found in a path : " << m_mtlBaseDir << "\n"; if (warn) { (*warn) += ss.str(); } @@ -2261,7 +2261,7 @@ bool MaterialFileReader::operator()(const std::string &matId, std::stringstream ss; ss << "Material file [ " << filepath - << " ] not found in a path : " << m_mtlBaseDir << std::endl; + << " ] not found in a path : " << m_mtlBaseDir << "\n"; if (warn) { (*warn) += ss.str(); } @@ -2278,7 +2278,7 @@ bool MaterialStreamReader::operator()(const std::string &matId, (void)matId; if (!m_inStream) { std::stringstream ss; - ss << "Material stream in error state. " << std::endl; + ss << "Material stream in error state. \n"; if (warn) { (*warn) += ss.str(); } @@ -2304,7 +2304,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, std::ifstream ifs(filename); if (!ifs) { - errss << "Cannot open file [" << filename << "]" << std::endl; + errss << "Cannot open file [" << filename << "]\n"; if (err) { (*err) = errss.str(); } @@ -2837,24 +2837,21 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, if (greatest_v_idx >= static_cast(v.size() / 3)) { if (warn) { std::stringstream ss; - ss << "Vertex indices out of bounds (line " << line_num << ".)\n" - << std::endl; + ss << "Vertex indices out of bounds (line " << line_num << ".)\n\n"; (*warn) += ss.str(); } } if (greatest_vn_idx >= static_cast(vn.size() / 3)) { if (warn) { std::stringstream ss; - ss << "Vertex normal indices out of bounds (line " << line_num << ".)\n" - << std::endl; + ss << "Vertex normal indices out of bounds (line " << line_num << ".)\n\n"; (*warn) += ss.str(); } } if (greatest_vt_idx >= static_cast(vt.size() / 2)) { if (warn) { std::stringstream ss; - ss << "Vertex texcoord indices out of bounds (line " << line_num << ".)\n" - << std::endl; + ss << "Vertex texcoord indices out of bounds (line " << line_num << ".)\n\n"; (*warn) += ss.str(); } } From db7454cbcee913fa132c191ab52b920998c2d796 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 29 Jul 2021 16:55:42 +0900 Subject: [PATCH 075/139] Integrate Mapbox earcut.hpp for robust triangluation (#298) * Use simple triangulation rule for the quad face when triangulation. This partially solves issue no. 295. * Embed mapbox/earcut.hpp code(for robust triangulation) * Use mapbox/earcut.hpp for the polygon tessellation(for a polygon with 5 or more vertices). * Use Mapbox earcut(robust triangulation) by default for python binding. * Fix compile of Mapbox earcut code path. --- README.md | 18 +- examples/viewer/viewer.cc | 11 + python/tiny_obj_loader.cc | 3 + tiny_obj_loader.h | 978 +++++++++++++++++++++++++++++++++++++- 4 files changed, 992 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index ce5ab1b0..bfd902cb 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,7 @@ Old version is available as `v0.9.x` branch https://github.com/syoyo/tinyobjload ## What's new +* 29 Jul, 2021 : Added Mapbox's earcut for robust triangulation. Also fixes triangulation bug. * 19 Feb, 2020 : The repository has been moved to https://github.com/tinyobjloader/tinyobjloader ! * 18 May, 2019 : Python binding!(See `python` folder. Also see https://pypi.org/project/tinyobjloader/) * 14 Apr, 2019 : Bump version v2.0.0 rc0. New C++ API and python bindings!(1.x API still exists for backward compatibility) @@ -139,6 +140,7 @@ TinyObjLoader is licensed under MIT license. ### Third party licenses. * pybind11 : BSD-style license. +* mapbox earcut.hpp: ISC License. ## Usage @@ -241,10 +243,22 @@ TinyObjLoader now use `real_t` for floating point data type. Default is `float(32bit)`. You can enable `double(64bit)` precision by using `TINYOBJLOADER_USE_DOUBLE` define. +### Robust triangulation + +When you enable `triangulation`(default is enabled), +TinyObjLoader triangulate polygons(faces with 4 or more vertices). + +Built-in trinagulation code may not work well in some polygon shape. + +You can define `TINYOBJLOADER_USE_MAPBOX_EARCUT` for robust triangulation using `mapbox/earcut.hpp`. +This requires C++11 compiler though. + #### Example code (Deprecated API) ```c++ #define TINYOBJLOADER_IMPLEMENTATION // define this in only *one* .cc +// Optional. define TINYOBJLOADER_USE_MAPBOX_EARCUT gives robust trinagulation. Requires C++11 +//#define TINYOBJLOADER_USE_MAPBOX_EARCUT #include "tiny_obj_loader.h" std::string inputfile = "cornell_box.obj"; @@ -315,6 +329,8 @@ for (size_t s = 0; s < shapes.size(); s++) { ```c++ #define TINYOBJLOADER_IMPLEMENTATION // define this in only *one* .cc +// Optional. define TINYOBJLOADER_USE_MAPBOX_EARCUT gives robust trinagulation. Requires C++11 +//#define TINYOBJLOADER_USE_MAPBOX_EARCUT #include "tiny_obj_loader.h" @@ -353,7 +369,7 @@ for (size_t s = 0; s < shapes.size(); s++) { tinyobj::real_t vx = attrib.vertices[3*size_t(idx.vertex_index)+0]; tinyobj::real_t vy = attrib.vertices[3*size_t(idx.vertex_index)+1]; tinyobj::real_t vz = attrib.vertices[3*size_t(idx.vertex_index)+2]; - + // Check if `normal_index` is zero or positive. negative = no normal data if (idx.normal_index >= 0) { tinyobj::real_t nx = attrib.normals[3*size_t(idx.normal_index)+0]; diff --git a/examples/viewer/viewer.cc b/examples/viewer/viewer.cc index 145a140d..bc959c94 100644 --- a/examples/viewer/viewer.cc +++ b/examples/viewer/viewer.cc @@ -23,13 +23,24 @@ #include #define TINYOBJLOADER_IMPLEMENTATION +// TINYOBJLOADER_USE_MAPBOX_EARCUT: Enable better triangulation. Requires C++11 +//#define TINYOBJLOADER_USE_MAPBOX_EARCUT #include "../../tiny_obj_loader.h" #include "trackball.h" +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Weverything" +#endif + #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + #ifdef _WIN32 #ifdef __cplusplus extern "C" { diff --git a/python/tiny_obj_loader.cc b/python/tiny_obj_loader.cc index 11d49865..a0b8bc60 100644 --- a/python/tiny_obj_loader.cc +++ b/python/tiny_obj_loader.cc @@ -2,5 +2,8 @@ // Need also define this in `binding.cc`(and all compilation units) #define TINYOBJLOADER_USE_DOUBLE +// Use robust triangulation by using Mapbox earcut. +#define TINYOBJLOADER_USE_MAPBOX_EARCUT + #define TINYOBJLOADER_IMPLEMENTATION #include "tiny_obj_loader.h" diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 6dc73717..1a15e1d4 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -29,6 +29,7 @@ THE SOFTWARE. // * Support multiple search path for .mtl(v1 API). // * Support vertex weight `vw`(as an tinyobj extension) // * Support escaped whitespece in mtllib +// * Add robust triangulation using Mapbox earcut(TINYOBJLOADER_USE_MAPBOX_EARCUT). // version 1.4.0 : Modifed ParseTextureNameAndOption API // version 1.3.1 : Make ParseTextureNameAndOption API public // version 1.3.0 : Separate warning and error message(breaking API of LoadObj) @@ -502,6 +503,11 @@ class MaterialStreamReader : public MaterialReader { struct ObjReaderConfig { bool triangulate; // triangulate polygon? + // Currently not used. + // "simple" or empty: Create triangle fan + // "earcut": Use the algorithm based on Ear clipping + std::string triangulation_method; + /// Parse vertex color. /// If vertex color is not present, its filled with default value. /// false = no vertex color @@ -515,7 +521,8 @@ struct ObjReaderConfig { /// std::string mtl_search_path; - ObjReaderConfig() : triangulate(true), vertex_color(true) {} + ObjReaderConfig() + : triangulate(true), triangulation_method("simple"), vertex_color(true) {} }; /// @@ -654,6 +661,871 @@ bool ParseTextureNameAndOption(std::string *texname, texture_option_t *texopt, #include #include +#ifdef TINYOBJLOADER_USE_MAPBOX_EARCUT + +#include +#include + +/* +ISC License + +Copyright (c) 2015, Mapbox + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. +*/ + +namespace mapbox { + +namespace util { + +template +struct nth { + inline static typename std::tuple_element::type get(const T &t) { + return std::get(t); + }; +}; + +} // namespace util + +namespace detail { + +template +class Earcut { + public: + std::vector indices; + std::size_t vertices = 0; + + template + void operator()(const Polygon &points); + + private: + struct Node { + Node(N index, double x_, double y_) : i(index), x(x_), y(y_) {} + Node(const Node &) = delete; + Node &operator=(const Node &) = delete; + Node(Node &&) = delete; + Node &operator=(Node &&) = delete; + + const N i; + const double x; + const double y; + + // previous and next vertice nodes in a polygon ring + Node *prev = nullptr; + Node *next = nullptr; + + // z-order curve value + int32_t z = 0; + + // previous and next nodes in z-order + Node *prevZ = nullptr; + Node *nextZ = nullptr; + + // indicates whether this is a steiner point + bool steiner = false; + }; + + template + Node *linkedList(const Ring &points, const bool clockwise); + Node *filterPoints(Node *start, Node *end = nullptr); + void earcutLinked(Node *ear, int pass = 0); + bool isEar(Node *ear); + bool isEarHashed(Node *ear); + Node *cureLocalIntersections(Node *start); + void splitEarcut(Node *start); + template + Node *eliminateHoles(const Polygon &points, Node *outerNode); + void eliminateHole(Node *hole, Node *outerNode); + Node *findHoleBridge(Node *hole, Node *outerNode); + bool sectorContainsSector(const Node *m, const Node *p); + void indexCurve(Node *start); + Node *sortLinked(Node *list); + int32_t zOrder(const double x_, const double y_); + Node *getLeftmost(Node *start); + bool pointInTriangle(double ax, double ay, double bx, double by, double cx, + double cy, double px, double py) const; + bool isValidDiagonal(Node *a, Node *b); + double area(const Node *p, const Node *q, const Node *r) const; + bool equals(const Node *p1, const Node *p2); + bool intersects(const Node *p1, const Node *q1, const Node *p2, + const Node *q2); + bool onSegment(const Node *p, const Node *q, const Node *r); + int sign(double val); + bool intersectsPolygon(const Node *a, const Node *b); + bool locallyInside(const Node *a, const Node *b); + bool middleInside(const Node *a, const Node *b); + Node *splitPolygon(Node *a, Node *b); + template + Node *insertNode(std::size_t i, const Point &p, Node *last); + void removeNode(Node *p); + + bool hashing; + double minX, maxX; + double minY, maxY; + double inv_size = 0; + + template > + class ObjectPool { + public: + ObjectPool() {} + ObjectPool(std::size_t blockSize_) { reset(blockSize_); } + ~ObjectPool() { clear(); } + template + T *construct(Args &&... args) { + if (currentIndex >= blockSize) { + currentBlock = alloc_traits::allocate(alloc, blockSize); + allocations.emplace_back(currentBlock); + currentIndex = 0; + } + T *object = ¤tBlock[currentIndex++]; + alloc_traits::construct(alloc, object, std::forward(args)...); + return object; + } + void reset(std::size_t newBlockSize) { + for (auto allocation : allocations) { + alloc_traits::deallocate(alloc, allocation, blockSize); + } + allocations.clear(); + blockSize = std::max(1, newBlockSize); + currentBlock = nullptr; + currentIndex = blockSize; + } + void clear() { reset(blockSize); } + + private: + T *currentBlock = nullptr; + std::size_t currentIndex = 1; + std::size_t blockSize = 1; + std::vector allocations; + Alloc alloc; + typedef typename std::allocator_traits alloc_traits; + }; + ObjectPool nodes; +}; + +template +template +void Earcut::operator()(const Polygon &points) { + // reset + indices.clear(); + vertices = 0; + + if (points.empty()) return; + + double x; + double y; + int threshold = 80; + std::size_t len = 0; + + for (size_t i = 0; threshold >= 0 && i < points.size(); i++) { + threshold -= static_cast(points[i].size()); + len += points[i].size(); + } + + // estimate size of nodes and indices + nodes.reset(len * 3 / 2); + indices.reserve(len + points[0].size()); + + Node *outerNode = linkedList(points[0], true); + if (!outerNode || outerNode->prev == outerNode->next) return; + + if (points.size() > 1) outerNode = eliminateHoles(points, outerNode); + + // if the shape is not too simple, we'll use z-order curve hash later; + // calculate polygon bbox + hashing = threshold < 0; + if (hashing) { + Node *p = outerNode->next; + minX = maxX = outerNode->x; + minY = maxY = outerNode->y; + do { + x = p->x; + y = p->y; + minX = std::min(minX, x); + minY = std::min(minY, y); + maxX = std::max(maxX, x); + maxY = std::max(maxY, y); + p = p->next; + } while (p != outerNode); + + // minX, minY and size are later used to transform coords into integers for + // z-order calculation + inv_size = std::max(maxX - minX, maxY - minY); + inv_size = inv_size != .0 ? (1. / inv_size) : .0; + } + + earcutLinked(outerNode); + + nodes.clear(); +} + +// create a circular doubly linked list from polygon points in the specified +// winding order +template +template +typename Earcut::Node *Earcut::linkedList(const Ring &points, + const bool clockwise) { + using Point = typename Ring::value_type; + double sum = 0; + const std::size_t len = points.size(); + std::size_t i, j; + Node *last = nullptr; + + // calculate original winding order of a polygon ring + for (i = 0, j = len > 0 ? len - 1 : 0; i < len; j = i++) { + const auto &p1 = points[i]; + const auto &p2 = points[j]; + const double p20 = util::nth<0, Point>::get(p2); + const double p10 = util::nth<0, Point>::get(p1); + const double p11 = util::nth<1, Point>::get(p1); + const double p21 = util::nth<1, Point>::get(p2); + sum += (p20 - p10) * (p11 + p21); + } + + // link points into circular doubly-linked list in the specified winding order + if (clockwise == (sum > 0)) { + for (i = 0; i < len; i++) last = insertNode(vertices + i, points[i], last); + } else { + for (i = len; i-- > 0;) last = insertNode(vertices + i, points[i], last); + } + + if (last && equals(last, last->next)) { + removeNode(last); + last = last->next; + } + + vertices += len; + + return last; +} + +// eliminate colinear or duplicate points +template +typename Earcut::Node *Earcut::filterPoints(Node *start, Node *end) { + if (!end) end = start; + + Node *p = start; + bool again; + do { + again = false; + + if (!p->steiner && (equals(p, p->next) || area(p->prev, p, p->next) == 0)) { + removeNode(p); + p = end = p->prev; + + if (p == p->next) break; + again = true; + + } else { + p = p->next; + } + } while (again || p != end); + + return end; +} + +// main ear slicing loop which triangulates a polygon (given as a linked list) +template +void Earcut::earcutLinked(Node *ear, int pass) { + if (!ear) return; + + // interlink polygon nodes in z-order + if (!pass && hashing) indexCurve(ear); + + Node *stop = ear; + Node *prev; + Node *next; + + int iterations = 0; + + // iterate through ears, slicing them one by one + while (ear->prev != ear->next) { + iterations++; + prev = ear->prev; + next = ear->next; + + if (hashing ? isEarHashed(ear) : isEar(ear)) { + // cut off the triangle + indices.emplace_back(prev->i); + indices.emplace_back(ear->i); + indices.emplace_back(next->i); + + removeNode(ear); + + // skipping the next vertice leads to less sliver triangles + ear = next->next; + stop = next->next; + + continue; + } + + ear = next; + + // if we looped through the whole remaining polygon and can't find any more + // ears + if (ear == stop) { + // try filtering points and slicing again + if (!pass) earcutLinked(filterPoints(ear), 1); + + // if this didn't work, try curing all small self-intersections locally + else if (pass == 1) { + ear = cureLocalIntersections(filterPoints(ear)); + earcutLinked(ear, 2); + + // as a last resort, try splitting the remaining polygon into two + } else if (pass == 2) + splitEarcut(ear); + + break; + } + } +} + +// check whether a polygon node forms a valid ear with adjacent nodes +template +bool Earcut::isEar(Node *ear) { + const Node *a = ear->prev; + const Node *b = ear; + const Node *c = ear->next; + + if (area(a, b, c) >= 0) return false; // reflex, can't be an ear + + // now make sure we don't have other points inside the potential ear + Node *p = ear->next->next; + + while (p != ear->prev) { + if (pointInTriangle(a->x, a->y, b->x, b->y, c->x, c->y, p->x, p->y) && + area(p->prev, p, p->next) >= 0) + return false; + p = p->next; + } + + return true; +} + +template +bool Earcut::isEarHashed(Node *ear) { + const Node *a = ear->prev; + const Node *b = ear; + const Node *c = ear->next; + + if (area(a, b, c) >= 0) return false; // reflex, can't be an ear + + // triangle bbox; min & max are calculated like this for speed + const double minTX = std::min(a->x, std::min(b->x, c->x)); + const double minTY = std::min(a->y, std::min(b->y, c->y)); + const double maxTX = std::max(a->x, std::max(b->x, c->x)); + const double maxTY = std::max(a->y, std::max(b->y, c->y)); + + // z-order range for the current triangle bbox; + const int32_t minZ = zOrder(minTX, minTY); + const int32_t maxZ = zOrder(maxTX, maxTY); + + // first look for points inside the triangle in increasing z-order + Node *p = ear->nextZ; + + while (p && p->z <= maxZ) { + if (p != ear->prev && p != ear->next && + pointInTriangle(a->x, a->y, b->x, b->y, c->x, c->y, p->x, p->y) && + area(p->prev, p, p->next) >= 0) + return false; + p = p->nextZ; + } + + // then look for points in decreasing z-order + p = ear->prevZ; + + while (p && p->z >= minZ) { + if (p != ear->prev && p != ear->next && + pointInTriangle(a->x, a->y, b->x, b->y, c->x, c->y, p->x, p->y) && + area(p->prev, p, p->next) >= 0) + return false; + p = p->prevZ; + } + + return true; +} + +// go through all polygon nodes and cure small local self-intersections +template +typename Earcut::Node *Earcut::cureLocalIntersections(Node *start) { + Node *p = start; + do { + Node *a = p->prev; + Node *b = p->next->next; + + // a self-intersection where edge (v[i-1],v[i]) intersects (v[i+1],v[i+2]) + if (!equals(a, b) && intersects(a, p, p->next, b) && locallyInside(a, b) && + locallyInside(b, a)) { + indices.emplace_back(a->i); + indices.emplace_back(p->i); + indices.emplace_back(b->i); + + // remove two nodes involved + removeNode(p); + removeNode(p->next); + + p = start = b; + } + p = p->next; + } while (p != start); + + return filterPoints(p); +} + +// try splitting polygon into two and triangulate them independently +template +void Earcut::splitEarcut(Node *start) { + // look for a valid diagonal that divides the polygon into two + Node *a = start; + do { + Node *b = a->next->next; + while (b != a->prev) { + if (a->i != b->i && isValidDiagonal(a, b)) { + // split the polygon in two by the diagonal + Node *c = splitPolygon(a, b); + + // filter colinear points around the cuts + a = filterPoints(a, a->next); + c = filterPoints(c, c->next); + + // run earcut on each half + earcutLinked(a); + earcutLinked(c); + return; + } + b = b->next; + } + a = a->next; + } while (a != start); +} + +// link every hole into the outer loop, producing a single-ring polygon without +// holes +template +template +typename Earcut::Node *Earcut::eliminateHoles(const Polygon &points, + Node *outerNode) { + const size_t len = points.size(); + + std::vector queue; + for (size_t i = 1; i < len; i++) { + Node *list = linkedList(points[i], false); + if (list) { + if (list == list->next) list->steiner = true; + queue.push_back(getLeftmost(list)); + } + } + std::sort(queue.begin(), queue.end(), + [](const Node *a, const Node *b) { return a->x < b->x; }); + + // process holes from left to right + for (size_t i = 0; i < queue.size(); i++) { + eliminateHole(queue[i], outerNode); + outerNode = filterPoints(outerNode, outerNode->next); + } + + return outerNode; +} + +// find a bridge between vertices that connects hole with an outer ring and and +// link it +template +void Earcut::eliminateHole(Node *hole, Node *outerNode) { + outerNode = findHoleBridge(hole, outerNode); + if (outerNode) { + Node *b = splitPolygon(outerNode, hole); + + // filter out colinear points around cuts + filterPoints(outerNode, outerNode->next); + filterPoints(b, b->next); + } +} + +// David Eberly's algorithm for finding a bridge between hole and outer polygon +template +typename Earcut::Node *Earcut::findHoleBridge(Node *hole, + Node *outerNode) { + Node *p = outerNode; + double hx = hole->x; + double hy = hole->y; + double qx = -std::numeric_limits::infinity(); + Node *m = nullptr; + + // find a segment intersected by a ray from the hole's leftmost Vertex to the + // left; segment's endpoint with lesser x will be potential connection Vertex + do { + if (hy <= p->y && hy >= p->next->y && p->next->y != p->y) { + double x = p->x + (hy - p->y) * (p->next->x - p->x) / (p->next->y - p->y); + if (x <= hx && x > qx) { + qx = x; + if (x == hx) { + if (hy == p->y) return p; + if (hy == p->next->y) return p->next; + } + m = p->x < p->next->x ? p : p->next; + } + } + p = p->next; + } while (p != outerNode); + + if (!m) return 0; + + if (hx == qx) return m; // hole touches outer segment; pick leftmost endpoint + + // look for points inside the triangle of hole Vertex, segment intersection + // and endpoint; if there are no points found, we have a valid connection; + // otherwise choose the Vertex of the minimum angle with the ray as connection + // Vertex + + const Node *stop = m; + double tanMin = std::numeric_limits::infinity(); + double tanCur = 0; + + p = m; + double mx = m->x; + double my = m->y; + + do { + if (hx >= p->x && p->x >= mx && hx != p->x && + pointInTriangle(hy < my ? hx : qx, hy, mx, my, hy < my ? qx : hx, hy, + p->x, p->y)) { + tanCur = std::abs(hy - p->y) / (hx - p->x); // tangential + + if (locallyInside(p, hole) && + (tanCur < tanMin || + (tanCur == tanMin && (p->x > m->x || sectorContainsSector(m, p))))) { + m = p; + tanMin = tanCur; + } + } + + p = p->next; + } while (p != stop); + + return m; +} + +// whether sector in vertex m contains sector in vertex p in the same +// coordinates +template +bool Earcut::sectorContainsSector(const Node *m, const Node *p) { + return area(m->prev, m, p->prev) < 0 && area(p->next, m, m->next) < 0; +} + +// interlink polygon nodes in z-order +template +void Earcut::indexCurve(Node *start) { + assert(start); + Node *p = start; + + do { + p->z = p->z ? p->z : zOrder(p->x, p->y); + p->prevZ = p->prev; + p->nextZ = p->next; + p = p->next; + } while (p != start); + + p->prevZ->nextZ = nullptr; + p->prevZ = nullptr; + + sortLinked(p); +} + +// Simon Tatham's linked list merge sort algorithm +// http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html +template +typename Earcut::Node *Earcut::sortLinked(Node *list) { + assert(list); + Node *p; + Node *q; + Node *e; + Node *tail; + int i, numMerges, pSize, qSize; + int inSize = 1; + + for (;;) { + p = list; + list = nullptr; + tail = nullptr; + numMerges = 0; + + while (p) { + numMerges++; + q = p; + pSize = 0; + for (i = 0; i < inSize; i++) { + pSize++; + q = q->nextZ; + if (!q) break; + } + + qSize = inSize; + + while (pSize > 0 || (qSize > 0 && q)) { + if (pSize == 0) { + e = q; + q = q->nextZ; + qSize--; + } else if (qSize == 0 || !q) { + e = p; + p = p->nextZ; + pSize--; + } else if (p->z <= q->z) { + e = p; + p = p->nextZ; + pSize--; + } else { + e = q; + q = q->nextZ; + qSize--; + } + + if (tail) + tail->nextZ = e; + else + list = e; + + e->prevZ = tail; + tail = e; + } + + p = q; + } + + tail->nextZ = nullptr; + + if (numMerges <= 1) return list; + + inSize *= 2; + } +} + +// z-order of a Vertex given coords and size of the data bounding box +template +int32_t Earcut::zOrder(const double x_, const double y_) { + // coords are transformed into non-negative 15-bit integer range + int32_t x = static_cast(32767.0 * (x_ - minX) * inv_size); + int32_t y = static_cast(32767.0 * (y_ - minY) * inv_size); + + x = (x | (x << 8)) & 0x00FF00FF; + x = (x | (x << 4)) & 0x0F0F0F0F; + x = (x | (x << 2)) & 0x33333333; + x = (x | (x << 1)) & 0x55555555; + + y = (y | (y << 8)) & 0x00FF00FF; + y = (y | (y << 4)) & 0x0F0F0F0F; + y = (y | (y << 2)) & 0x33333333; + y = (y | (y << 1)) & 0x55555555; + + return x | (y << 1); +} + +// find the leftmost node of a polygon ring +template +typename Earcut::Node *Earcut::getLeftmost(Node *start) { + Node *p = start; + Node *leftmost = start; + do { + if (p->x < leftmost->x || (p->x == leftmost->x && p->y < leftmost->y)) + leftmost = p; + p = p->next; + } while (p != start); + + return leftmost; +} + +// check if a point lies within a convex triangle +template +bool Earcut::pointInTriangle(double ax, double ay, double bx, double by, + double cx, double cy, double px, + double py) const { + return (cx - px) * (ay - py) - (ax - px) * (cy - py) >= 0 && + (ax - px) * (by - py) - (bx - px) * (ay - py) >= 0 && + (bx - px) * (cy - py) - (cx - px) * (by - py) >= 0; +} + +// check if a diagonal between two polygon nodes is valid (lies in polygon +// interior) +template +bool Earcut::isValidDiagonal(Node *a, Node *b) { + return a->next->i != b->i && a->prev->i != b->i && + !intersectsPolygon(a, b) && // dones't intersect other edges + ((locallyInside(a, b) && locallyInside(b, a) && + middleInside(a, b) && // locally visible + (area(a->prev, a, b->prev) != 0.0 || + area(a, b->prev, b) != + 0.0)) || // does not create opposite-facing sectors + (equals(a, b) && area(a->prev, a, a->next) > 0 && + area(b->prev, b, b->next) > 0)); // special zero-length case +} + +// signed area of a triangle +template +double Earcut::area(const Node *p, const Node *q, const Node *r) const { + return (q->y - p->y) * (r->x - q->x) - (q->x - p->x) * (r->y - q->y); +} + +// check if two points are equal +template +bool Earcut::equals(const Node *p1, const Node *p2) { + return p1->x == p2->x && p1->y == p2->y; +} + +// check if two segments intersect +template +bool Earcut::intersects(const Node *p1, const Node *q1, const Node *p2, + const Node *q2) { + int o1 = sign(area(p1, q1, p2)); + int o2 = sign(area(p1, q1, q2)); + int o3 = sign(area(p2, q2, p1)); + int o4 = sign(area(p2, q2, q1)); + + if (o1 != o2 && o3 != o4) return true; // general case + + if (o1 == 0 && onSegment(p1, p2, q1)) + return true; // p1, q1 and p2 are collinear and p2 lies on p1q1 + if (o2 == 0 && onSegment(p1, q2, q1)) + return true; // p1, q1 and q2 are collinear and q2 lies on p1q1 + if (o3 == 0 && onSegment(p2, p1, q2)) + return true; // p2, q2 and p1 are collinear and p1 lies on p2q2 + if (o4 == 0 && onSegment(p2, q1, q2)) + return true; // p2, q2 and q1 are collinear and q1 lies on p2q2 + + return false; +} + +// for collinear points p, q, r, check if point q lies on segment pr +template +bool Earcut::onSegment(const Node *p, const Node *q, const Node *r) { + return q->x <= std::max(p->x, r->x) && + q->x >= std::min(p->x, r->x) && + q->y <= std::max(p->y, r->y) && + q->y >= std::min(p->y, r->y); +} + +template +int Earcut::sign(double val) { + return (0.0 < val) - (val < 0.0); +} + +// check if a polygon diagonal intersects any polygon segments +template +bool Earcut::intersectsPolygon(const Node *a, const Node *b) { + const Node *p = a; + do { + if (p->i != a->i && p->next->i != a->i && p->i != b->i && + p->next->i != b->i && intersects(p, p->next, a, b)) + return true; + p = p->next; + } while (p != a); + + return false; +} + +// check if a polygon diagonal is locally inside the polygon +template +bool Earcut::locallyInside(const Node *a, const Node *b) { + return area(a->prev, a, a->next) < 0 + ? area(a, b, a->next) >= 0 && area(a, a->prev, b) >= 0 + : area(a, b, a->prev) < 0 || area(a, a->next, b) < 0; +} + +// check if the middle Vertex of a polygon diagonal is inside the polygon +template +bool Earcut::middleInside(const Node *a, const Node *b) { + const Node *p = a; + bool inside = false; + double px = (a->x + b->x) / 2; + double py = (a->y + b->y) / 2; + do { + if (((p->y > py) != (p->next->y > py)) && p->next->y != p->y && + (px < (p->next->x - p->x) * (py - p->y) / (p->next->y - p->y) + p->x)) + inside = !inside; + p = p->next; + } while (p != a); + + return inside; +} + +// link two polygon vertices with a bridge; if the vertices belong to the same +// ring, it splits polygon into two; if one belongs to the outer ring and +// another to a hole, it merges it into a single ring +template +typename Earcut::Node *Earcut::splitPolygon(Node *a, Node *b) { + Node *a2 = nodes.construct(a->i, a->x, a->y); + Node *b2 = nodes.construct(b->i, b->x, b->y); + Node *an = a->next; + Node *bp = b->prev; + + a->next = b; + b->prev = a; + + a2->next = an; + an->prev = a2; + + b2->next = a2; + a2->prev = b2; + + bp->next = b2; + b2->prev = bp; + + return b2; +} + +// create a node and util::optionally link it with previous one (in a circular +// doubly linked list) +template +template +typename Earcut::Node *Earcut::insertNode(std::size_t i, const Point &pt, + Node *last) { + Node *p = nodes.construct(static_cast(i), util::nth<0, Point>::get(pt), + util::nth<1, Point>::get(pt)); + + if (!last) { + p->prev = p; + p->next = p; + + } else { + assert(last); + p->next = last->next; + p->prev = last; + last->next->prev = p; + last->next = p; + } + return p; +} + +template +void Earcut::removeNode(Node *p) { + p->next->prev = p->prev; + p->prev->next = p->next; + + if (p->prevZ) p->prevZ->nextZ = p->nextZ; + if (p->nextZ) p->nextZ->prevZ = p->prevZ; +} +} // namespace detail + +template +std::vector earcut(const Polygon &poly) { + mapbox::detail::Earcut earcut; + earcut(poly); + return std::move(earcut.indices); +} +} // namespace mapbox + +#endif // TINYOBJLOADER_USE_MAPBOX_EARCUT + namespace tinyobj { MaterialReader::~MaterialReader() {} @@ -1542,37 +2414,87 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, real_t cy = std::fabs(e0z * e1x - e0x * e1z); real_t cz = std::fabs(e0x * e1y - e0y * e1x); const real_t epsilon = std::numeric_limits::epsilon(); + // std::cout << "cx " << cx << ", cy " << cy << ", cz " << cz << + // "\n"; if (cx > epsilon || cy > epsilon || cz > epsilon) { + // std::cout << "corner\n"; // found a corner if (cx > cy && cx > cz) { + // std::cout << "pattern0\n"; } else { + // std::cout << "axes[0] = 0\n"; axes[0] = 0; - if (cz > cx && cz > cy) axes[1] = 1; + if (cz > cx && cz > cy) { + // std::cout << "axes[1] = 1\n"; + axes[1] = 1; + } } break; } } - real_t area = 0; - for (size_t k = 0; k < npolys; ++k) { - i0 = face.vertex_indices[(k + 0) % npolys]; - i1 = face.vertex_indices[(k + 1) % npolys]; +#ifdef TINYOBJLOADER_USE_MAPBOX_EARCUT + using Point = std::array; + + // first polyline define the main polygon. + // following polylines define holes(not used in tinyobj). + std::vector > polygon; + + std::vector polyline; + + // Fill polygon data(facevarying vertices). + for (size_t k = 0; k < npolys; k++) { + i0 = face.vertex_indices[k]; size_t vi0 = size_t(i0.v_idx); - size_t vi1 = size_t(i1.v_idx); - if (((vi0 * 3 + axes[0]) >= v.size()) || - ((vi0 * 3 + axes[1]) >= v.size()) || - ((vi1 * 3 + axes[0]) >= v.size()) || - ((vi1 * 3 + axes[1]) >= v.size())) { - // Invalid index. - continue; - } + + assert(((3 * vi0 + 2) < v.size())); + real_t v0x = v[vi0 * 3 + axes[0]]; real_t v0y = v[vi0 * 3 + axes[1]]; - real_t v1x = v[vi1 * 3 + axes[0]]; - real_t v1y = v[vi1 * 3 + axes[1]]; - area += (v0x * v1y - v0y * v1x) * static_cast(0.5); + + polyline.push_back({v0x, v0y}); } + polygon.push_back(polyline); + std::vector indices = mapbox::earcut(polygon); + // => result = 3 * faces, clockwise + + assert(indices.size() % 3 == 0); + + // Reconstruct vertex_index_t + for (size_t k = 0; k < indices.size() / 3; k++) { + { + index_t idx0, idx1, idx2; + idx0.vertex_index = face.vertex_indices[indices[3 * k + 0]].v_idx; + idx0.normal_index = + face.vertex_indices[indices[3 * k + 0]].vn_idx; + idx0.texcoord_index = + face.vertex_indices[indices[3 * k + 0]].vt_idx; + idx1.vertex_index = face.vertex_indices[indices[3 * k + 1]].v_idx; + idx1.normal_index = + face.vertex_indices[indices[3 * k + 1]].vn_idx; + idx1.texcoord_index = + face.vertex_indices[indices[3 * k + 1]].vt_idx; + idx2.vertex_index = face.vertex_indices[indices[3 * k + 2]].v_idx; + idx2.normal_index = + face.vertex_indices[indices[3 * k + 2]].vn_idx; + idx2.texcoord_index = + face.vertex_indices[indices[3 * k + 2]].vt_idx; + + shape->mesh.indices.push_back(idx0); + shape->mesh.indices.push_back(idx1); + shape->mesh.indices.push_back(idx2); + + shape->mesh.num_face_vertices.push_back(3); + shape->mesh.material_ids.push_back(material_id); + shape->mesh.smoothing_group_ids.push_back( + face.smoothing_group_id); + } + } + +#else // Built-in ear clipping triangulation + + face_t remainingFace = face; // copy size_t guess_vert = 0; vertex_index_t ind[3]; @@ -1587,6 +2509,9 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, while (remainingFace.vertex_indices.size() > 3 && remainingIterations > 0) { + // std::cout << "remainingIterations " << remainingIterations << + // "\n"; + npolys = remainingFace.vertex_indices.size(); if (guess_vert >= npolys) { guess_vert -= npolys; @@ -1615,14 +2540,26 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, vy[k] = v[vi * 3 + axes[1]]; } } + + // + // area is calculated per face + // real_t e0x = vx[1] - vx[0]; real_t e0y = vy[1] - vy[0]; real_t e1x = vx[2] - vx[1]; real_t e1y = vy[2] - vy[1]; real_t cross = e0x * e1y - e0y * e1x; + // std::cout << "axes = " << axes[0] << ", " << axes[1] << "\n"; + // std::cout << "e0x, e0y, e1x, e1y " << e0x << ", " << e0y << ", " + // << e1x << ", " << e1y << "\n"; + + real_t area = (vx[0] * vy[1] - vy[0] * vx[1]) * static_cast(0.5); + // std::cout << "cross " << cross << ", area " << area << "\n"; // if an internal angle if (cross * area < static_cast(0.0)) { + // std::cout << "internal \n"; guess_vert += 1; + // std::cout << "guess vert : " << guess_vert << "\n"; continue; } @@ -1632,6 +2569,7 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, size_t idx = (guess_vert + otherVert) % npolys; if (idx >= remainingFace.vertex_indices.size()) { + // std::cout << "???0\n"; // ??? continue; } @@ -1640,18 +2578,21 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, if (((ovi * 3 + axes[0]) >= v.size()) || ((ovi * 3 + axes[1]) >= v.size())) { + // std::cout << "???1\n"; // ??? continue; } real_t tx = v[ovi * 3 + axes[0]]; real_t ty = v[ovi * 3 + axes[1]]; if (pnpoly(3, vx, vy, tx, ty)) { + // std::cout << "overlap\n"; overlap = true; break; } } if (overlap) { + // std::cout << "overlap2\n"; guess_vert += 1; continue; } @@ -1689,6 +2630,8 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, remainingFace.vertex_indices.pop_back(); } + // std::cout << "remainingFace.vi.size = " << + // remainingFace.vertex_indices.size() << "\n"; if (remainingFace.vertex_indices.size() == 3) { i0 = remainingFace.vertex_indices[0]; i1 = remainingFace.vertex_indices[1]; @@ -1715,6 +2658,7 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, face.smoothing_group_id); } } +#endif } // npolys } else { for (size_t k = 0; k < npolys; k++) { From 51908fb967baad5b98973796b32c57989915d910 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 5 Aug 2021 18:31:25 +0900 Subject: [PATCH 076/139] Donot embed earcut.hpp (#313) * Do not embed mapbox/earcut.hpp. * Fix python binding build. * Fix python binding build on Azure pipeline * Include . Use compiler-specific compile flags in python module build. * format python code. --- LICENSE | 21 ++ README.md | 3 +- azure-pipelines.yml | 6 + loader_example.cc | 4 +- mapbox/LICENSE | 15 + mapbox/earcut.hpp | 820 +++++++++++++++++++++++++++++++++++++++++ python/README.md | 2 +- python/setup.py | 31 +- tiny_obj_loader.h | 869 +------------------------------------------- 9 files changed, 906 insertions(+), 865 deletions(-) create mode 100644 mapbox/LICENSE create mode 100644 mapbox/earcut.hpp diff --git a/LICENSE b/LICENSE index 3af18aba..e9fbe447 100644 --- a/LICENSE +++ b/LICENSE @@ -19,3 +19,24 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +---------------------------------- + +mapbox/earcut.hpp + +ISC License + +Copyright (c) 2015, Mapbox + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. + diff --git a/README.md b/README.md index bfd902cb..94c7a138 100644 --- a/README.md +++ b/README.md @@ -251,7 +251,8 @@ TinyObjLoader triangulate polygons(faces with 4 or more vertices). Built-in trinagulation code may not work well in some polygon shape. You can define `TINYOBJLOADER_USE_MAPBOX_EARCUT` for robust triangulation using `mapbox/earcut.hpp`. -This requires C++11 compiler though. +This requires C++11 compiler though. And you need to copy `mapbox/earcut.hpp` to your project. +If you have your own `mapbox/earcut.hpp` file incuded in your project, you can define `TINYOBJLOADER_DONOT_INCLUDE_MAPBOX_EARCUT` so that `mapbox/earcut.hpp` is not included inside of `tiny_obj_loader.h`. #### Example code (Deprecated API) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0e835ae1..a71c43b3 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -37,6 +37,8 @@ jobs: pip3 install cibuildwheel twine # Make the header files available to the build. cp *.h python + mkdir python/mapbox + cp mapbox/earcut.hpp python/mapbox/ cd python # Source dist @@ -73,6 +75,8 @@ jobs: pip3 install cibuildwheel # Make the header files available to the build. cp *.h python + mkdir python/mapbox + cp mapbox/earcut.hpp python/mapbox/earcut.hpp cd python cibuildwheel --output-dir wheelhouse . - task: CopyFiles@2 @@ -93,6 +97,8 @@ jobs: pip install cibuildwheel # Make the header files available to the build. cp *.h python + mkdir python/mapbox + cp mapbox/earcut.hpp python/mapbox/ cd python cibuildwheel --output-dir wheelhouse . - task: CopyFiles@2 diff --git a/loader_example.cc b/loader_example.cc index 69e55976..8143bb79 100644 --- a/loader_example.cc +++ b/loader_example.cc @@ -379,12 +379,12 @@ static bool TestStreamLoadObj() { public: MaterialStringStreamReader(const std::string& matSStream) : m_matSStream(matSStream) {} - virtual ~MaterialStringStreamReader() {} + virtual ~MaterialStringStreamReader() TINYOBJ_OVERRIDE {} virtual bool operator()(const std::string& matId, std::vector* materials, std::map* matMap, std::string* warn, - std::string* err) { + std::string* err) TINYOBJ_OVERRIDE { (void)err; (void)matId; LoadMtl(matMap, materials, &m_matSStream, warn, err); diff --git a/mapbox/LICENSE b/mapbox/LICENSE new file mode 100644 index 00000000..8bafb577 --- /dev/null +++ b/mapbox/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2015, Mapbox + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. diff --git a/mapbox/earcut.hpp b/mapbox/earcut.hpp new file mode 100644 index 00000000..01bd7e96 --- /dev/null +++ b/mapbox/earcut.hpp @@ -0,0 +1,820 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace mapbox { + +namespace util { + +template struct nth { + inline static typename std::tuple_element::type + get(const T& t) { return std::get(t); }; +}; + +} + +namespace detail { + +template +class Earcut { +public: + std::vector indices; + std::size_t vertices = 0; + + template + void operator()(const Polygon& points); + +private: + struct Node { + Node(N index, double x_, double y_) : i(index), x(x_), y(y_) {} + Node(const Node&) = delete; + Node& operator=(const Node&) = delete; + Node(Node&&) = delete; + Node& operator=(Node&&) = delete; + + const N i; + const double x; + const double y; + + // previous and next vertice nodes in a polygon ring + Node* prev = nullptr; + Node* next = nullptr; + + // z-order curve value + int32_t z = 0; + + // previous and next nodes in z-order + Node* prevZ = nullptr; + Node* nextZ = nullptr; + + // indicates whether this is a steiner point + bool steiner = false; + }; + + template Node* linkedList(const Ring& points, const bool clockwise); + Node* filterPoints(Node* start, Node* end = nullptr); + void earcutLinked(Node* ear, int pass = 0); + bool isEar(Node* ear); + bool isEarHashed(Node* ear); + Node* cureLocalIntersections(Node* start); + void splitEarcut(Node* start); + template Node* eliminateHoles(const Polygon& points, Node* outerNode); + Node* eliminateHole(Node* hole, Node* outerNode); + Node* findHoleBridge(Node* hole, Node* outerNode); + bool sectorContainsSector(const Node* m, const Node* p); + void indexCurve(Node* start); + Node* sortLinked(Node* list); + int32_t zOrder(const double x_, const double y_); + Node* getLeftmost(Node* start); + bool pointInTriangle(double ax, double ay, double bx, double by, double cx, double cy, double px, double py) const; + bool isValidDiagonal(Node* a, Node* b); + double area(const Node* p, const Node* q, const Node* r) const; + bool equals(const Node* p1, const Node* p2); + bool intersects(const Node* p1, const Node* q1, const Node* p2, const Node* q2); + bool onSegment(const Node* p, const Node* q, const Node* r); + int sign(double val); + bool intersectsPolygon(const Node* a, const Node* b); + bool locallyInside(const Node* a, const Node* b); + bool middleInside(const Node* a, const Node* b); + Node* splitPolygon(Node* a, Node* b); + template Node* insertNode(std::size_t i, const Point& p, Node* last); + void removeNode(Node* p); + + bool hashing; + double minX, maxX; + double minY, maxY; + double inv_size = 0; + + template > + class ObjectPool { + public: + ObjectPool() { } + ObjectPool(std::size_t blockSize_) { + reset(blockSize_); + } + ~ObjectPool() { + clear(); + } + template + T* construct(Args&&... args) { + if (currentIndex >= blockSize) { + currentBlock = alloc_traits::allocate(alloc, blockSize); + allocations.emplace_back(currentBlock); + currentIndex = 0; + } + T* object = ¤tBlock[currentIndex++]; + alloc_traits::construct(alloc, object, std::forward(args)...); + return object; + } + void reset(std::size_t newBlockSize) { + for (auto allocation : allocations) { + alloc_traits::deallocate(alloc, allocation, blockSize); + } + allocations.clear(); + blockSize = std::max(1, newBlockSize); + currentBlock = nullptr; + currentIndex = blockSize; + } + void clear() { reset(blockSize); } + private: + T* currentBlock = nullptr; + std::size_t currentIndex = 1; + std::size_t blockSize = 1; + std::vector allocations; + Alloc alloc; + typedef typename std::allocator_traits alloc_traits; + }; + ObjectPool nodes; +}; + +template template +void Earcut::operator()(const Polygon& points) { + // reset + indices.clear(); + vertices = 0; + + if (points.empty()) return; + + double x; + double y; + int threshold = 80; + std::size_t len = 0; + + for (size_t i = 0; threshold >= 0 && i < points.size(); i++) { + threshold -= static_cast(points[i].size()); + len += points[i].size(); + } + + //estimate size of nodes and indices + nodes.reset(len * 3 / 2); + indices.reserve(len + points[0].size()); + + Node* outerNode = linkedList(points[0], true); + if (!outerNode || outerNode->prev == outerNode->next) return; + + if (points.size() > 1) outerNode = eliminateHoles(points, outerNode); + + // if the shape is not too simple, we'll use z-order curve hash later; calculate polygon bbox + hashing = threshold < 0; + if (hashing) { + Node* p = outerNode->next; + minX = maxX = outerNode->x; + minY = maxY = outerNode->y; + do { + x = p->x; + y = p->y; + minX = std::min(minX, x); + minY = std::min(minY, y); + maxX = std::max(maxX, x); + maxY = std::max(maxY, y); + p = p->next; + } while (p != outerNode); + + // minX, minY and size are later used to transform coords into integers for z-order calculation + inv_size = std::max(maxX - minX, maxY - minY); + inv_size = inv_size != .0 ? (1. / inv_size) : .0; + } + + earcutLinked(outerNode); + + nodes.clear(); +} + +// create a circular doubly linked list from polygon points in the specified winding order +template template +typename Earcut::Node* +Earcut::linkedList(const Ring& points, const bool clockwise) { + using Point = typename Ring::value_type; + double sum = 0; + const std::size_t len = points.size(); + std::size_t i, j; + Node* last = nullptr; + + // calculate original winding order of a polygon ring + for (i = 0, j = len > 0 ? len - 1 : 0; i < len; j = i++) { + const auto& p1 = points[i]; + const auto& p2 = points[j]; + const double p20 = util::nth<0, Point>::get(p2); + const double p10 = util::nth<0, Point>::get(p1); + const double p11 = util::nth<1, Point>::get(p1); + const double p21 = util::nth<1, Point>::get(p2); + sum += (p20 - p10) * (p11 + p21); + } + + // link points into circular doubly-linked list in the specified winding order + if (clockwise == (sum > 0)) { + for (i = 0; i < len; i++) last = insertNode(vertices + i, points[i], last); + } else { + for (i = len; i-- > 0;) last = insertNode(vertices + i, points[i], last); + } + + if (last && equals(last, last->next)) { + removeNode(last); + last = last->next; + } + + vertices += len; + + return last; +} + +// eliminate colinear or duplicate points +template +typename Earcut::Node* +Earcut::filterPoints(Node* start, Node* end) { + if (!end) end = start; + + Node* p = start; + bool again; + do { + again = false; + + if (!p->steiner && (equals(p, p->next) || area(p->prev, p, p->next) == 0)) { + removeNode(p); + p = end = p->prev; + + if (p == p->next) break; + again = true; + + } else { + p = p->next; + } + } while (again || p != end); + + return end; +} + +// main ear slicing loop which triangulates a polygon (given as a linked list) +template +void Earcut::earcutLinked(Node* ear, int pass) { + if (!ear) return; + + // interlink polygon nodes in z-order + if (!pass && hashing) indexCurve(ear); + + Node* stop = ear; + Node* prev; + Node* next; + + int iterations = 0; + + // iterate through ears, slicing them one by one + while (ear->prev != ear->next) { + iterations++; + prev = ear->prev; + next = ear->next; + + if (hashing ? isEarHashed(ear) : isEar(ear)) { + // cut off the triangle + indices.emplace_back(prev->i); + indices.emplace_back(ear->i); + indices.emplace_back(next->i); + + removeNode(ear); + + // skipping the next vertice leads to less sliver triangles + ear = next->next; + stop = next->next; + + continue; + } + + ear = next; + + // if we looped through the whole remaining polygon and can't find any more ears + if (ear == stop) { + // try filtering points and slicing again + if (!pass) earcutLinked(filterPoints(ear), 1); + + // if this didn't work, try curing all small self-intersections locally + else if (pass == 1) { + ear = cureLocalIntersections(filterPoints(ear)); + earcutLinked(ear, 2); + + // as a last resort, try splitting the remaining polygon into two + } else if (pass == 2) splitEarcut(ear); + + break; + } + } +} + +// check whether a polygon node forms a valid ear with adjacent nodes +template +bool Earcut::isEar(Node* ear) { + const Node* a = ear->prev; + const Node* b = ear; + const Node* c = ear->next; + + if (area(a, b, c) >= 0) return false; // reflex, can't be an ear + + // now make sure we don't have other points inside the potential ear + Node* p = ear->next->next; + + while (p != ear->prev) { + if (pointInTriangle(a->x, a->y, b->x, b->y, c->x, c->y, p->x, p->y) && + area(p->prev, p, p->next) >= 0) return false; + p = p->next; + } + + return true; +} + +template +bool Earcut::isEarHashed(Node* ear) { + const Node* a = ear->prev; + const Node* b = ear; + const Node* c = ear->next; + + if (area(a, b, c) >= 0) return false; // reflex, can't be an ear + + // triangle bbox; min & max are calculated like this for speed + const double minTX = std::min(a->x, std::min(b->x, c->x)); + const double minTY = std::min(a->y, std::min(b->y, c->y)); + const double maxTX = std::max(a->x, std::max(b->x, c->x)); + const double maxTY = std::max(a->y, std::max(b->y, c->y)); + + // z-order range for the current triangle bbox; + const int32_t minZ = zOrder(minTX, minTY); + const int32_t maxZ = zOrder(maxTX, maxTY); + + // first look for points inside the triangle in increasing z-order + Node* p = ear->nextZ; + + while (p && p->z <= maxZ) { + if (p != ear->prev && p != ear->next && + pointInTriangle(a->x, a->y, b->x, b->y, c->x, c->y, p->x, p->y) && + area(p->prev, p, p->next) >= 0) return false; + p = p->nextZ; + } + + // then look for points in decreasing z-order + p = ear->prevZ; + + while (p && p->z >= minZ) { + if (p != ear->prev && p != ear->next && + pointInTriangle(a->x, a->y, b->x, b->y, c->x, c->y, p->x, p->y) && + area(p->prev, p, p->next) >= 0) return false; + p = p->prevZ; + } + + return true; +} + +// go through all polygon nodes and cure small local self-intersections +template +typename Earcut::Node* +Earcut::cureLocalIntersections(Node* start) { + Node* p = start; + do { + Node* a = p->prev; + Node* b = p->next->next; + + // a self-intersection where edge (v[i-1],v[i]) intersects (v[i+1],v[i+2]) + if (!equals(a, b) && intersects(a, p, p->next, b) && locallyInside(a, b) && locallyInside(b, a)) { + indices.emplace_back(a->i); + indices.emplace_back(p->i); + indices.emplace_back(b->i); + + // remove two nodes involved + removeNode(p); + removeNode(p->next); + + p = start = b; + } + p = p->next; + } while (p != start); + + return filterPoints(p); +} + +// try splitting polygon into two and triangulate them independently +template +void Earcut::splitEarcut(Node* start) { + // look for a valid diagonal that divides the polygon into two + Node* a = start; + do { + Node* b = a->next->next; + while (b != a->prev) { + if (a->i != b->i && isValidDiagonal(a, b)) { + // split the polygon in two by the diagonal + Node* c = splitPolygon(a, b); + + // filter colinear points around the cuts + a = filterPoints(a, a->next); + c = filterPoints(c, c->next); + + // run earcut on each half + earcutLinked(a); + earcutLinked(c); + return; + } + b = b->next; + } + a = a->next; + } while (a != start); +} + +// link every hole into the outer loop, producing a single-ring polygon without holes +template template +typename Earcut::Node* +Earcut::eliminateHoles(const Polygon& points, Node* outerNode) { + const size_t len = points.size(); + + std::vector queue; + for (size_t i = 1; i < len; i++) { + Node* list = linkedList(points[i], false); + if (list) { + if (list == list->next) list->steiner = true; + queue.push_back(getLeftmost(list)); + } + } + std::sort(queue.begin(), queue.end(), [](const Node* a, const Node* b) { + return a->x < b->x; + }); + + // process holes from left to right + for (size_t i = 0; i < queue.size(); i++) { + outerNode = eliminateHole(queue[i], outerNode); + outerNode = filterPoints(outerNode, outerNode->next); + } + + return outerNode; +} + +// find a bridge between vertices that connects hole with an outer ring and and link it +template +typename Earcut::Node* +Earcut::eliminateHole(Node* hole, Node* outerNode) { + Node* bridge = findHoleBridge(hole, outerNode); + if (!bridge) { + return outerNode; + } + + Node* bridgeReverse = splitPolygon(bridge, hole); + + // filter collinear points around the cuts + Node* filteredBridge = filterPoints(bridge, bridge->next); + filterPoints(bridgeReverse, bridgeReverse->next); + + // Check if input node was removed by the filtering + return outerNode == bridge ? filteredBridge : outerNode; +} + +// David Eberly's algorithm for finding a bridge between hole and outer polygon +template +typename Earcut::Node* +Earcut::findHoleBridge(Node* hole, Node* outerNode) { + Node* p = outerNode; + double hx = hole->x; + double hy = hole->y; + double qx = -std::numeric_limits::infinity(); + Node* m = nullptr; + + // find a segment intersected by a ray from the hole's leftmost Vertex to the left; + // segment's endpoint with lesser x will be potential connection Vertex + do { + if (hy <= p->y && hy >= p->next->y && p->next->y != p->y) { + double x = p->x + (hy - p->y) * (p->next->x - p->x) / (p->next->y - p->y); + if (x <= hx && x > qx) { + qx = x; + if (x == hx) { + if (hy == p->y) return p; + if (hy == p->next->y) return p->next; + } + m = p->x < p->next->x ? p : p->next; + } + } + p = p->next; + } while (p != outerNode); + + if (!m) return 0; + + if (hx == qx) return m; // hole touches outer segment; pick leftmost endpoint + + // look for points inside the triangle of hole Vertex, segment intersection and endpoint; + // if there are no points found, we have a valid connection; + // otherwise choose the Vertex of the minimum angle with the ray as connection Vertex + + const Node* stop = m; + double tanMin = std::numeric_limits::infinity(); + double tanCur = 0; + + p = m; + double mx = m->x; + double my = m->y; + + do { + if (hx >= p->x && p->x >= mx && hx != p->x && + pointInTriangle(hy < my ? hx : qx, hy, mx, my, hy < my ? qx : hx, hy, p->x, p->y)) { + + tanCur = std::abs(hy - p->y) / (hx - p->x); // tangential + + if (locallyInside(p, hole) && + (tanCur < tanMin || (tanCur == tanMin && (p->x > m->x || sectorContainsSector(m, p))))) { + m = p; + tanMin = tanCur; + } + } + + p = p->next; + } while (p != stop); + + return m; +} + +// whether sector in vertex m contains sector in vertex p in the same coordinates +template +bool Earcut::sectorContainsSector(const Node* m, const Node* p) { + return area(m->prev, m, p->prev) < 0 && area(p->next, m, m->next) < 0; +} + +// interlink polygon nodes in z-order +template +void Earcut::indexCurve(Node* start) { + assert(start); + Node* p = start; + + do { + p->z = p->z ? p->z : zOrder(p->x, p->y); + p->prevZ = p->prev; + p->nextZ = p->next; + p = p->next; + } while (p != start); + + p->prevZ->nextZ = nullptr; + p->prevZ = nullptr; + + sortLinked(p); +} + +// Simon Tatham's linked list merge sort algorithm +// http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html +template +typename Earcut::Node* +Earcut::sortLinked(Node* list) { + assert(list); + Node* p; + Node* q; + Node* e; + Node* tail; + int i, numMerges, pSize, qSize; + int inSize = 1; + + for (;;) { + p = list; + list = nullptr; + tail = nullptr; + numMerges = 0; + + while (p) { + numMerges++; + q = p; + pSize = 0; + for (i = 0; i < inSize; i++) { + pSize++; + q = q->nextZ; + if (!q) break; + } + + qSize = inSize; + + while (pSize > 0 || (qSize > 0 && q)) { + + if (pSize == 0) { + e = q; + q = q->nextZ; + qSize--; + } else if (qSize == 0 || !q) { + e = p; + p = p->nextZ; + pSize--; + } else if (p->z <= q->z) { + e = p; + p = p->nextZ; + pSize--; + } else { + e = q; + q = q->nextZ; + qSize--; + } + + if (tail) tail->nextZ = e; + else list = e; + + e->prevZ = tail; + tail = e; + } + + p = q; + } + + tail->nextZ = nullptr; + + if (numMerges <= 1) return list; + + inSize *= 2; + } +} + +// z-order of a Vertex given coords and size of the data bounding box +template +int32_t Earcut::zOrder(const double x_, const double y_) { + // coords are transformed into non-negative 15-bit integer range + int32_t x = static_cast(32767.0 * (x_ - minX) * inv_size); + int32_t y = static_cast(32767.0 * (y_ - minY) * inv_size); + + x = (x | (x << 8)) & 0x00FF00FF; + x = (x | (x << 4)) & 0x0F0F0F0F; + x = (x | (x << 2)) & 0x33333333; + x = (x | (x << 1)) & 0x55555555; + + y = (y | (y << 8)) & 0x00FF00FF; + y = (y | (y << 4)) & 0x0F0F0F0F; + y = (y | (y << 2)) & 0x33333333; + y = (y | (y << 1)) & 0x55555555; + + return x | (y << 1); +} + +// find the leftmost node of a polygon ring +template +typename Earcut::Node* +Earcut::getLeftmost(Node* start) { + Node* p = start; + Node* leftmost = start; + do { + if (p->x < leftmost->x || (p->x == leftmost->x && p->y < leftmost->y)) + leftmost = p; + p = p->next; + } while (p != start); + + return leftmost; +} + +// check if a point lies within a convex triangle +template +bool Earcut::pointInTriangle(double ax, double ay, double bx, double by, double cx, double cy, double px, double py) const { + return (cx - px) * (ay - py) - (ax - px) * (cy - py) >= 0 && + (ax - px) * (by - py) - (bx - px) * (ay - py) >= 0 && + (bx - px) * (cy - py) - (cx - px) * (by - py) >= 0; +} + +// check if a diagonal between two polygon nodes is valid (lies in polygon interior) +template +bool Earcut::isValidDiagonal(Node* a, Node* b) { + return a->next->i != b->i && a->prev->i != b->i && !intersectsPolygon(a, b) && // dones't intersect other edges + ((locallyInside(a, b) && locallyInside(b, a) && middleInside(a, b) && // locally visible + (area(a->prev, a, b->prev) != 0.0 || area(a, b->prev, b) != 0.0)) || // does not create opposite-facing sectors + (equals(a, b) && area(a->prev, a, a->next) > 0 && area(b->prev, b, b->next) > 0)); // special zero-length case +} + +// signed area of a triangle +template +double Earcut::area(const Node* p, const Node* q, const Node* r) const { + return (q->y - p->y) * (r->x - q->x) - (q->x - p->x) * (r->y - q->y); +} + +// check if two points are equal +template +bool Earcut::equals(const Node* p1, const Node* p2) { + return p1->x == p2->x && p1->y == p2->y; +} + +// check if two segments intersect +template +bool Earcut::intersects(const Node* p1, const Node* q1, const Node* p2, const Node* q2) { + int o1 = sign(area(p1, q1, p2)); + int o2 = sign(area(p1, q1, q2)); + int o3 = sign(area(p2, q2, p1)); + int o4 = sign(area(p2, q2, q1)); + + if (o1 != o2 && o3 != o4) return true; // general case + + if (o1 == 0 && onSegment(p1, p2, q1)) return true; // p1, q1 and p2 are collinear and p2 lies on p1q1 + if (o2 == 0 && onSegment(p1, q2, q1)) return true; // p1, q1 and q2 are collinear and q2 lies on p1q1 + if (o3 == 0 && onSegment(p2, p1, q2)) return true; // p2, q2 and p1 are collinear and p1 lies on p2q2 + if (o4 == 0 && onSegment(p2, q1, q2)) return true; // p2, q2 and q1 are collinear and q1 lies on p2q2 + + return false; +} + +// for collinear points p, q, r, check if point q lies on segment pr +template +bool Earcut::onSegment(const Node* p, const Node* q, const Node* r) { + return q->x <= std::max(p->x, r->x) && + q->x >= std::min(p->x, r->x) && + q->y <= std::max(p->y, r->y) && + q->y >= std::min(p->y, r->y); +} + +template +int Earcut::sign(double val) { + return (0.0 < val) - (val < 0.0); +} + +// check if a polygon diagonal intersects any polygon segments +template +bool Earcut::intersectsPolygon(const Node* a, const Node* b) { + const Node* p = a; + do { + if (p->i != a->i && p->next->i != a->i && p->i != b->i && p->next->i != b->i && + intersects(p, p->next, a, b)) return true; + p = p->next; + } while (p != a); + + return false; +} + +// check if a polygon diagonal is locally inside the polygon +template +bool Earcut::locallyInside(const Node* a, const Node* b) { + return area(a->prev, a, a->next) < 0 ? + area(a, b, a->next) >= 0 && area(a, a->prev, b) >= 0 : + area(a, b, a->prev) < 0 || area(a, a->next, b) < 0; +} + +// check if the middle Vertex of a polygon diagonal is inside the polygon +template +bool Earcut::middleInside(const Node* a, const Node* b) { + const Node* p = a; + bool inside = false; + double px = (a->x + b->x) / 2; + double py = (a->y + b->y) / 2; + do { + if (((p->y > py) != (p->next->y > py)) && p->next->y != p->y && + (px < (p->next->x - p->x) * (py - p->y) / (p->next->y - p->y) + p->x)) + inside = !inside; + p = p->next; + } while (p != a); + + return inside; +} + +// link two polygon vertices with a bridge; if the vertices belong to the same ring, it splits +// polygon into two; if one belongs to the outer ring and another to a hole, it merges it into a +// single ring +template +typename Earcut::Node* +Earcut::splitPolygon(Node* a, Node* b) { + Node* a2 = nodes.construct(a->i, a->x, a->y); + Node* b2 = nodes.construct(b->i, b->x, b->y); + Node* an = a->next; + Node* bp = b->prev; + + a->next = b; + b->prev = a; + + a2->next = an; + an->prev = a2; + + b2->next = a2; + a2->prev = b2; + + bp->next = b2; + b2->prev = bp; + + return b2; +} + +// create a node and util::optionally link it with previous one (in a circular doubly linked list) +template template +typename Earcut::Node* +Earcut::insertNode(std::size_t i, const Point& pt, Node* last) { + Node* p = nodes.construct(static_cast(i), util::nth<0, Point>::get(pt), util::nth<1, Point>::get(pt)); + + if (!last) { + p->prev = p; + p->next = p; + + } else { + assert(last); + p->next = last->next; + p->prev = last; + last->next->prev = p; + last->next = p; + } + return p; +} + +template +void Earcut::removeNode(Node* p) { + p->next->prev = p->prev; + p->prev->next = p->next; + + if (p->prevZ) p->prevZ->nextZ = p->nextZ; + if (p->nextZ) p->nextZ->prevZ = p->prevZ; +} +} + +template +std::vector earcut(const Polygon& poly) { + mapbox::detail::Earcut earcut; + earcut(poly); + return std::move(earcut.indices); +} +} diff --git a/python/README.md b/python/README.md index b6e2872d..913668f6 100644 --- a/python/README.md +++ b/python/README.md @@ -83,7 +83,7 @@ $ python setup.py build ## License -MIT license. +MIT(tinyobjloader) and ISC(mapbox earcut) license. ## TODO * [ ] Writer saver diff --git a/python/setup.py b/python/setup.py index 8a9b69c0..df53136f 100644 --- a/python/setup.py +++ b/python/setup.py @@ -1,6 +1,8 @@ import setuptools import platform +from distutils.command.build_ext import build_ext + with open("README.md", "r") as fh: long_description = fh.read() @@ -45,8 +47,26 @@ def __str__(self): return interpreter_include_path -ext_compile_args = ["-std=c++11"] -ext_link_args = [] +# unix = default compiler name? +copt = {"unix": ["-std=c++11"], "gcc": ["-std=c++11"], "clang": ["std=c++11"]} +# TODO: set C++ version for msvc? {'msvc': ["/std:c++14"] } + +# ext_compile_args = ["-std=c++11"] +# ext_link_args = [] + +# https://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used +class build_ext_subclass(build_ext): + def build_extensions(self): + c = self.compiler.compiler_type + if c in copt: + for e in self.extensions: + e.extra_compile_args = copt[c] + + # if lopt.has_key(c): + # for e in self.extensions: + # e.extra_link_args = lopt[ c ] + build_ext.build_extensions(self) + # Developer option # @@ -61,8 +81,8 @@ def __str__(self): # `tiny_obj_loader.cc` contains implementation of tiny_obj_loader. m = setuptools.Extension( "tinyobjloader", - extra_compile_args=ext_compile_args, - extra_link_args=ext_link_args, + # extra_compile_args=ext_compile_args, + # extra_link_args=ext_link_args, sources=["bindings.cc", "tiny_obj_loader.cc"], include_dirs=[ # Support `build_ext` finding tinyobjloader (without first running @@ -100,10 +120,11 @@ def __str__(self): "Topic :: Artistic Software", "Topic :: Multimedia :: Graphics :: 3D Modeling", "Topic :: Scientific/Engineering :: Visualization", - "License :: OSI Approved :: MIT License", + "License :: OSI Approved :: MIT License(ISC License for mapbox earcut.hpp)", "Operating System :: OS Independent", "Programming Language :: Python :: 3", ], packages=setuptools.find_packages(), ext_modules=[m], + cmdclass={"build_ext": build_ext_subclass}, ) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 1a15e1d4..77f86b2d 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -663,866 +663,23 @@ bool ParseTextureNameAndOption(std::string *texname, texture_option_t *texopt, #ifdef TINYOBJLOADER_USE_MAPBOX_EARCUT -#include -#include - -/* -ISC License - -Copyright (c) 2015, Mapbox - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. -*/ - -namespace mapbox { - -namespace util { - -template -struct nth { - inline static typename std::tuple_element::type get(const T &t) { - return std::get(t); - }; -}; - -} // namespace util - -namespace detail { - -template -class Earcut { - public: - std::vector indices; - std::size_t vertices = 0; - - template - void operator()(const Polygon &points); - - private: - struct Node { - Node(N index, double x_, double y_) : i(index), x(x_), y(y_) {} - Node(const Node &) = delete; - Node &operator=(const Node &) = delete; - Node(Node &&) = delete; - Node &operator=(Node &&) = delete; - - const N i; - const double x; - const double y; - - // previous and next vertice nodes in a polygon ring - Node *prev = nullptr; - Node *next = nullptr; - - // z-order curve value - int32_t z = 0; - - // previous and next nodes in z-order - Node *prevZ = nullptr; - Node *nextZ = nullptr; - - // indicates whether this is a steiner point - bool steiner = false; - }; - - template - Node *linkedList(const Ring &points, const bool clockwise); - Node *filterPoints(Node *start, Node *end = nullptr); - void earcutLinked(Node *ear, int pass = 0); - bool isEar(Node *ear); - bool isEarHashed(Node *ear); - Node *cureLocalIntersections(Node *start); - void splitEarcut(Node *start); - template - Node *eliminateHoles(const Polygon &points, Node *outerNode); - void eliminateHole(Node *hole, Node *outerNode); - Node *findHoleBridge(Node *hole, Node *outerNode); - bool sectorContainsSector(const Node *m, const Node *p); - void indexCurve(Node *start); - Node *sortLinked(Node *list); - int32_t zOrder(const double x_, const double y_); - Node *getLeftmost(Node *start); - bool pointInTriangle(double ax, double ay, double bx, double by, double cx, - double cy, double px, double py) const; - bool isValidDiagonal(Node *a, Node *b); - double area(const Node *p, const Node *q, const Node *r) const; - bool equals(const Node *p1, const Node *p2); - bool intersects(const Node *p1, const Node *q1, const Node *p2, - const Node *q2); - bool onSegment(const Node *p, const Node *q, const Node *r); - int sign(double val); - bool intersectsPolygon(const Node *a, const Node *b); - bool locallyInside(const Node *a, const Node *b); - bool middleInside(const Node *a, const Node *b); - Node *splitPolygon(Node *a, Node *b); - template - Node *insertNode(std::size_t i, const Point &p, Node *last); - void removeNode(Node *p); - - bool hashing; - double minX, maxX; - double minY, maxY; - double inv_size = 0; - - template > - class ObjectPool { - public: - ObjectPool() {} - ObjectPool(std::size_t blockSize_) { reset(blockSize_); } - ~ObjectPool() { clear(); } - template - T *construct(Args &&... args) { - if (currentIndex >= blockSize) { - currentBlock = alloc_traits::allocate(alloc, blockSize); - allocations.emplace_back(currentBlock); - currentIndex = 0; - } - T *object = ¤tBlock[currentIndex++]; - alloc_traits::construct(alloc, object, std::forward(args)...); - return object; - } - void reset(std::size_t newBlockSize) { - for (auto allocation : allocations) { - alloc_traits::deallocate(alloc, allocation, blockSize); - } - allocations.clear(); - blockSize = std::max(1, newBlockSize); - currentBlock = nullptr; - currentIndex = blockSize; - } - void clear() { reset(blockSize); } - - private: - T *currentBlock = nullptr; - std::size_t currentIndex = 1; - std::size_t blockSize = 1; - std::vector allocations; - Alloc alloc; - typedef typename std::allocator_traits alloc_traits; - }; - ObjectPool nodes; -}; - -template -template -void Earcut::operator()(const Polygon &points) { - // reset - indices.clear(); - vertices = 0; - - if (points.empty()) return; - - double x; - double y; - int threshold = 80; - std::size_t len = 0; - - for (size_t i = 0; threshold >= 0 && i < points.size(); i++) { - threshold -= static_cast(points[i].size()); - len += points[i].size(); - } - - // estimate size of nodes and indices - nodes.reset(len * 3 / 2); - indices.reserve(len + points[0].size()); - - Node *outerNode = linkedList(points[0], true); - if (!outerNode || outerNode->prev == outerNode->next) return; - - if (points.size() > 1) outerNode = eliminateHoles(points, outerNode); - - // if the shape is not too simple, we'll use z-order curve hash later; - // calculate polygon bbox - hashing = threshold < 0; - if (hashing) { - Node *p = outerNode->next; - minX = maxX = outerNode->x; - minY = maxY = outerNode->y; - do { - x = p->x; - y = p->y; - minX = std::min(minX, x); - minY = std::min(minY, y); - maxX = std::max(maxX, x); - maxY = std::max(maxY, y); - p = p->next; - } while (p != outerNode); - - // minX, minY and size are later used to transform coords into integers for - // z-order calculation - inv_size = std::max(maxX - minX, maxY - minY); - inv_size = inv_size != .0 ? (1. / inv_size) : .0; - } - - earcutLinked(outerNode); - - nodes.clear(); -} - -// create a circular doubly linked list from polygon points in the specified -// winding order -template -template -typename Earcut::Node *Earcut::linkedList(const Ring &points, - const bool clockwise) { - using Point = typename Ring::value_type; - double sum = 0; - const std::size_t len = points.size(); - std::size_t i, j; - Node *last = nullptr; - - // calculate original winding order of a polygon ring - for (i = 0, j = len > 0 ? len - 1 : 0; i < len; j = i++) { - const auto &p1 = points[i]; - const auto &p2 = points[j]; - const double p20 = util::nth<0, Point>::get(p2); - const double p10 = util::nth<0, Point>::get(p1); - const double p11 = util::nth<1, Point>::get(p1); - const double p21 = util::nth<1, Point>::get(p2); - sum += (p20 - p10) * (p11 + p21); - } - - // link points into circular doubly-linked list in the specified winding order - if (clockwise == (sum > 0)) { - for (i = 0; i < len; i++) last = insertNode(vertices + i, points[i], last); - } else { - for (i = len; i-- > 0;) last = insertNode(vertices + i, points[i], last); - } - - if (last && equals(last, last->next)) { - removeNode(last); - last = last->next; - } - - vertices += len; - - return last; -} - -// eliminate colinear or duplicate points -template -typename Earcut::Node *Earcut::filterPoints(Node *start, Node *end) { - if (!end) end = start; - - Node *p = start; - bool again; - do { - again = false; - - if (!p->steiner && (equals(p, p->next) || area(p->prev, p, p->next) == 0)) { - removeNode(p); - p = end = p->prev; - - if (p == p->next) break; - again = true; - - } else { - p = p->next; - } - } while (again || p != end); - - return end; -} - -// main ear slicing loop which triangulates a polygon (given as a linked list) -template -void Earcut::earcutLinked(Node *ear, int pass) { - if (!ear) return; - - // interlink polygon nodes in z-order - if (!pass && hashing) indexCurve(ear); - - Node *stop = ear; - Node *prev; - Node *next; - - int iterations = 0; - - // iterate through ears, slicing them one by one - while (ear->prev != ear->next) { - iterations++; - prev = ear->prev; - next = ear->next; - - if (hashing ? isEarHashed(ear) : isEar(ear)) { - // cut off the triangle - indices.emplace_back(prev->i); - indices.emplace_back(ear->i); - indices.emplace_back(next->i); - - removeNode(ear); - - // skipping the next vertice leads to less sliver triangles - ear = next->next; - stop = next->next; - - continue; - } - - ear = next; - - // if we looped through the whole remaining polygon and can't find any more - // ears - if (ear == stop) { - // try filtering points and slicing again - if (!pass) earcutLinked(filterPoints(ear), 1); - - // if this didn't work, try curing all small self-intersections locally - else if (pass == 1) { - ear = cureLocalIntersections(filterPoints(ear)); - earcutLinked(ear, 2); - - // as a last resort, try splitting the remaining polygon into two - } else if (pass == 2) - splitEarcut(ear); - - break; - } - } -} - -// check whether a polygon node forms a valid ear with adjacent nodes -template -bool Earcut::isEar(Node *ear) { - const Node *a = ear->prev; - const Node *b = ear; - const Node *c = ear->next; - - if (area(a, b, c) >= 0) return false; // reflex, can't be an ear - - // now make sure we don't have other points inside the potential ear - Node *p = ear->next->next; - - while (p != ear->prev) { - if (pointInTriangle(a->x, a->y, b->x, b->y, c->x, c->y, p->x, p->y) && - area(p->prev, p, p->next) >= 0) - return false; - p = p->next; - } - - return true; -} - -template -bool Earcut::isEarHashed(Node *ear) { - const Node *a = ear->prev; - const Node *b = ear; - const Node *c = ear->next; - - if (area(a, b, c) >= 0) return false; // reflex, can't be an ear - - // triangle bbox; min & max are calculated like this for speed - const double minTX = std::min(a->x, std::min(b->x, c->x)); - const double minTY = std::min(a->y, std::min(b->y, c->y)); - const double maxTX = std::max(a->x, std::max(b->x, c->x)); - const double maxTY = std::max(a->y, std::max(b->y, c->y)); - - // z-order range for the current triangle bbox; - const int32_t minZ = zOrder(minTX, minTY); - const int32_t maxZ = zOrder(maxTX, maxTY); - - // first look for points inside the triangle in increasing z-order - Node *p = ear->nextZ; - - while (p && p->z <= maxZ) { - if (p != ear->prev && p != ear->next && - pointInTriangle(a->x, a->y, b->x, b->y, c->x, c->y, p->x, p->y) && - area(p->prev, p, p->next) >= 0) - return false; - p = p->nextZ; - } - - // then look for points in decreasing z-order - p = ear->prevZ; - - while (p && p->z >= minZ) { - if (p != ear->prev && p != ear->next && - pointInTriangle(a->x, a->y, b->x, b->y, c->x, c->y, p->x, p->y) && - area(p->prev, p, p->next) >= 0) - return false; - p = p->prevZ; - } - - return true; -} - -// go through all polygon nodes and cure small local self-intersections -template -typename Earcut::Node *Earcut::cureLocalIntersections(Node *start) { - Node *p = start; - do { - Node *a = p->prev; - Node *b = p->next->next; - - // a self-intersection where edge (v[i-1],v[i]) intersects (v[i+1],v[i+2]) - if (!equals(a, b) && intersects(a, p, p->next, b) && locallyInside(a, b) && - locallyInside(b, a)) { - indices.emplace_back(a->i); - indices.emplace_back(p->i); - indices.emplace_back(b->i); - - // remove two nodes involved - removeNode(p); - removeNode(p->next); - - p = start = b; - } - p = p->next; - } while (p != start); - - return filterPoints(p); -} - -// try splitting polygon into two and triangulate them independently -template -void Earcut::splitEarcut(Node *start) { - // look for a valid diagonal that divides the polygon into two - Node *a = start; - do { - Node *b = a->next->next; - while (b != a->prev) { - if (a->i != b->i && isValidDiagonal(a, b)) { - // split the polygon in two by the diagonal - Node *c = splitPolygon(a, b); - - // filter colinear points around the cuts - a = filterPoints(a, a->next); - c = filterPoints(c, c->next); - - // run earcut on each half - earcutLinked(a); - earcutLinked(c); - return; - } - b = b->next; - } - a = a->next; - } while (a != start); -} - -// link every hole into the outer loop, producing a single-ring polygon without -// holes -template -template -typename Earcut::Node *Earcut::eliminateHoles(const Polygon &points, - Node *outerNode) { - const size_t len = points.size(); - - std::vector queue; - for (size_t i = 1; i < len; i++) { - Node *list = linkedList(points[i], false); - if (list) { - if (list == list->next) list->steiner = true; - queue.push_back(getLeftmost(list)); - } - } - std::sort(queue.begin(), queue.end(), - [](const Node *a, const Node *b) { return a->x < b->x; }); - - // process holes from left to right - for (size_t i = 0; i < queue.size(); i++) { - eliminateHole(queue[i], outerNode); - outerNode = filterPoints(outerNode, outerNode->next); - } - - return outerNode; -} - -// find a bridge between vertices that connects hole with an outer ring and and -// link it -template -void Earcut::eliminateHole(Node *hole, Node *outerNode) { - outerNode = findHoleBridge(hole, outerNode); - if (outerNode) { - Node *b = splitPolygon(outerNode, hole); - - // filter out colinear points around cuts - filterPoints(outerNode, outerNode->next); - filterPoints(b, b->next); - } -} - -// David Eberly's algorithm for finding a bridge between hole and outer polygon -template -typename Earcut::Node *Earcut::findHoleBridge(Node *hole, - Node *outerNode) { - Node *p = outerNode; - double hx = hole->x; - double hy = hole->y; - double qx = -std::numeric_limits::infinity(); - Node *m = nullptr; - - // find a segment intersected by a ray from the hole's leftmost Vertex to the - // left; segment's endpoint with lesser x will be potential connection Vertex - do { - if (hy <= p->y && hy >= p->next->y && p->next->y != p->y) { - double x = p->x + (hy - p->y) * (p->next->x - p->x) / (p->next->y - p->y); - if (x <= hx && x > qx) { - qx = x; - if (x == hx) { - if (hy == p->y) return p; - if (hy == p->next->y) return p->next; - } - m = p->x < p->next->x ? p : p->next; - } - } - p = p->next; - } while (p != outerNode); - - if (!m) return 0; - - if (hx == qx) return m; // hole touches outer segment; pick leftmost endpoint - - // look for points inside the triangle of hole Vertex, segment intersection - // and endpoint; if there are no points found, we have a valid connection; - // otherwise choose the Vertex of the minimum angle with the ray as connection - // Vertex - - const Node *stop = m; - double tanMin = std::numeric_limits::infinity(); - double tanCur = 0; - - p = m; - double mx = m->x; - double my = m->y; - - do { - if (hx >= p->x && p->x >= mx && hx != p->x && - pointInTriangle(hy < my ? hx : qx, hy, mx, my, hy < my ? qx : hx, hy, - p->x, p->y)) { - tanCur = std::abs(hy - p->y) / (hx - p->x); // tangential - - if (locallyInside(p, hole) && - (tanCur < tanMin || - (tanCur == tanMin && (p->x > m->x || sectorContainsSector(m, p))))) { - m = p; - tanMin = tanCur; - } - } - - p = p->next; - } while (p != stop); - - return m; -} - -// whether sector in vertex m contains sector in vertex p in the same -// coordinates -template -bool Earcut::sectorContainsSector(const Node *m, const Node *p) { - return area(m->prev, m, p->prev) < 0 && area(p->next, m, m->next) < 0; -} - -// interlink polygon nodes in z-order -template -void Earcut::indexCurve(Node *start) { - assert(start); - Node *p = start; - - do { - p->z = p->z ? p->z : zOrder(p->x, p->y); - p->prevZ = p->prev; - p->nextZ = p->next; - p = p->next; - } while (p != start); - - p->prevZ->nextZ = nullptr; - p->prevZ = nullptr; - - sortLinked(p); -} - -// Simon Tatham's linked list merge sort algorithm -// http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html -template -typename Earcut::Node *Earcut::sortLinked(Node *list) { - assert(list); - Node *p; - Node *q; - Node *e; - Node *tail; - int i, numMerges, pSize, qSize; - int inSize = 1; - - for (;;) { - p = list; - list = nullptr; - tail = nullptr; - numMerges = 0; - - while (p) { - numMerges++; - q = p; - pSize = 0; - for (i = 0; i < inSize; i++) { - pSize++; - q = q->nextZ; - if (!q) break; - } - - qSize = inSize; - - while (pSize > 0 || (qSize > 0 && q)) { - if (pSize == 0) { - e = q; - q = q->nextZ; - qSize--; - } else if (qSize == 0 || !q) { - e = p; - p = p->nextZ; - pSize--; - } else if (p->z <= q->z) { - e = p; - p = p->nextZ; - pSize--; - } else { - e = q; - q = q->nextZ; - qSize--; - } - - if (tail) - tail->nextZ = e; - else - list = e; - - e->prevZ = tail; - tail = e; - } - - p = q; - } - - tail->nextZ = nullptr; - - if (numMerges <= 1) return list; - - inSize *= 2; - } -} - -// z-order of a Vertex given coords and size of the data bounding box -template -int32_t Earcut::zOrder(const double x_, const double y_) { - // coords are transformed into non-negative 15-bit integer range - int32_t x = static_cast(32767.0 * (x_ - minX) * inv_size); - int32_t y = static_cast(32767.0 * (y_ - minY) * inv_size); - - x = (x | (x << 8)) & 0x00FF00FF; - x = (x | (x << 4)) & 0x0F0F0F0F; - x = (x | (x << 2)) & 0x33333333; - x = (x | (x << 1)) & 0x55555555; - - y = (y | (y << 8)) & 0x00FF00FF; - y = (y | (y << 4)) & 0x0F0F0F0F; - y = (y | (y << 2)) & 0x33333333; - y = (y | (y << 1)) & 0x55555555; - - return x | (y << 1); -} - -// find the leftmost node of a polygon ring -template -typename Earcut::Node *Earcut::getLeftmost(Node *start) { - Node *p = start; - Node *leftmost = start; - do { - if (p->x < leftmost->x || (p->x == leftmost->x && p->y < leftmost->y)) - leftmost = p; - p = p->next; - } while (p != start); - - return leftmost; -} - -// check if a point lies within a convex triangle -template -bool Earcut::pointInTriangle(double ax, double ay, double bx, double by, - double cx, double cy, double px, - double py) const { - return (cx - px) * (ay - py) - (ax - px) * (cy - py) >= 0 && - (ax - px) * (by - py) - (bx - px) * (ay - py) >= 0 && - (bx - px) * (cy - py) - (cx - px) * (by - py) >= 0; -} - -// check if a diagonal between two polygon nodes is valid (lies in polygon -// interior) -template -bool Earcut::isValidDiagonal(Node *a, Node *b) { - return a->next->i != b->i && a->prev->i != b->i && - !intersectsPolygon(a, b) && // dones't intersect other edges - ((locallyInside(a, b) && locallyInside(b, a) && - middleInside(a, b) && // locally visible - (area(a->prev, a, b->prev) != 0.0 || - area(a, b->prev, b) != - 0.0)) || // does not create opposite-facing sectors - (equals(a, b) && area(a->prev, a, a->next) > 0 && - area(b->prev, b, b->next) > 0)); // special zero-length case -} - -// signed area of a triangle -template -double Earcut::area(const Node *p, const Node *q, const Node *r) const { - return (q->y - p->y) * (r->x - q->x) - (q->x - p->x) * (r->y - q->y); -} - -// check if two points are equal -template -bool Earcut::equals(const Node *p1, const Node *p2) { - return p1->x == p2->x && p1->y == p2->y; -} - -// check if two segments intersect -template -bool Earcut::intersects(const Node *p1, const Node *q1, const Node *p2, - const Node *q2) { - int o1 = sign(area(p1, q1, p2)); - int o2 = sign(area(p1, q1, q2)); - int o3 = sign(area(p2, q2, p1)); - int o4 = sign(area(p2, q2, q1)); - - if (o1 != o2 && o3 != o4) return true; // general case - - if (o1 == 0 && onSegment(p1, p2, q1)) - return true; // p1, q1 and p2 are collinear and p2 lies on p1q1 - if (o2 == 0 && onSegment(p1, q2, q1)) - return true; // p1, q1 and q2 are collinear and q2 lies on p1q1 - if (o3 == 0 && onSegment(p2, p1, q2)) - return true; // p2, q2 and p1 are collinear and p1 lies on p2q2 - if (o4 == 0 && onSegment(p2, q1, q2)) - return true; // p2, q2 and q1 are collinear and q1 lies on p2q2 - - return false; -} - -// for collinear points p, q, r, check if point q lies on segment pr -template -bool Earcut::onSegment(const Node *p, const Node *q, const Node *r) { - return q->x <= std::max(p->x, r->x) && - q->x >= std::min(p->x, r->x) && - q->y <= std::max(p->y, r->y) && - q->y >= std::min(p->y, r->y); -} - -template -int Earcut::sign(double val) { - return (0.0 < val) - (val < 0.0); -} - -// check if a polygon diagonal intersects any polygon segments -template -bool Earcut::intersectsPolygon(const Node *a, const Node *b) { - const Node *p = a; - do { - if (p->i != a->i && p->next->i != a->i && p->i != b->i && - p->next->i != b->i && intersects(p, p->next, a, b)) - return true; - p = p->next; - } while (p != a); - - return false; -} - -// check if a polygon diagonal is locally inside the polygon -template -bool Earcut::locallyInside(const Node *a, const Node *b) { - return area(a->prev, a, a->next) < 0 - ? area(a, b, a->next) >= 0 && area(a, a->prev, b) >= 0 - : area(a, b, a->prev) < 0 || area(a, a->next, b) < 0; -} - -// check if the middle Vertex of a polygon diagonal is inside the polygon -template -bool Earcut::middleInside(const Node *a, const Node *b) { - const Node *p = a; - bool inside = false; - double px = (a->x + b->x) / 2; - double py = (a->y + b->y) / 2; - do { - if (((p->y > py) != (p->next->y > py)) && p->next->y != p->y && - (px < (p->next->x - p->x) * (py - p->y) / (p->next->y - p->y) + p->x)) - inside = !inside; - p = p->next; - } while (p != a); - - return inside; -} - -// link two polygon vertices with a bridge; if the vertices belong to the same -// ring, it splits polygon into two; if one belongs to the outer ring and -// another to a hole, it merges it into a single ring -template -typename Earcut::Node *Earcut::splitPolygon(Node *a, Node *b) { - Node *a2 = nodes.construct(a->i, a->x, a->y); - Node *b2 = nodes.construct(b->i, b->x, b->y); - Node *an = a->next; - Node *bp = b->prev; - - a->next = b; - b->prev = a; - - a2->next = an; - an->prev = a2; - - b2->next = a2; - a2->prev = b2; - - bp->next = b2; - b2->prev = bp; - - return b2; -} - -// create a node and util::optionally link it with previous one (in a circular -// doubly linked list) -template -template -typename Earcut::Node *Earcut::insertNode(std::size_t i, const Point &pt, - Node *last) { - Node *p = nodes.construct(static_cast(i), util::nth<0, Point>::get(pt), - util::nth<1, Point>::get(pt)); - - if (!last) { - p->prev = p; - p->next = p; +#ifdef TINYOBJLOADER_DONOT_INCLUDE_MAPBOX_EARCUT +// Assume earcut.hpp is included outside of tiny_obj_loader.h +#else - } else { - assert(last); - p->next = last->next; - p->prev = last; - last->next->prev = p; - last->next = p; - } - return p; -} +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Weverything" +#endif -template -void Earcut::removeNode(Node *p) { - p->next->prev = p->prev; - p->prev->next = p->next; +#include +#include "mapbox/earcut.hpp" - if (p->prevZ) p->prevZ->nextZ = p->nextZ; - if (p->nextZ) p->nextZ->prevZ = p->prevZ; -} -} // namespace detail +#ifdef __clang__ +#pragma clang diagnostic pop +#endif -template -std::vector earcut(const Polygon &poly) { - mapbox::detail::Earcut earcut; - earcut(poly); - return std::move(earcut.indices); -} -} // namespace mapbox +#endif #endif // TINYOBJLOADER_USE_MAPBOX_EARCUT From b86c49fb1319a97c7960083affc74c7bdc571749 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 20 Aug 2021 17:30:54 +0900 Subject: [PATCH 077/139] To avoid annoying compilation error caused by MSVC's min/max define, use (#316) hard-coded int max value insted of std::numeric_limits::max(). --- tiny_obj_loader.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 77f86b2d..c4bb4546 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -970,7 +970,9 @@ static bool tryParseDouble(const char *s, const char *s_end, double *result) { read = 0; end_not_reached = (curr != s_end); while (end_not_reached && IS_DIGIT(*curr)) { - if (exponent > std::numeric_limits::max()/10) { + // To avoid annoying MSVC's min/max macro definiton, + // Use hardcoded int max value + if (exponent > (2147483647/10)) { // 2147483647 = std::numeric_limits::max() // Integer overflow goto fail; } From cda807746092692fee68b241b4fa40dfcacaa164 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 21 Aug 2021 18:43:20 +0900 Subject: [PATCH 078/139] Remove bintray and conan build since these are not maintained anymore. (#317) Also remove github release depoly in Travis(this will be transited to Github Actions) --- .bintray.in | 37 ------------------------------------- .travis.yml | 25 ------------------------- README.md | 8 ++------ tools/travis_postbuild.sh | 12 ------------ 4 files changed, 2 insertions(+), 80 deletions(-) delete mode 100644 .bintray.in delete mode 100755 tools/travis_postbuild.sh diff --git a/.bintray.in b/.bintray.in deleted file mode 100644 index 4336d65c..00000000 --- a/.bintray.in +++ /dev/null @@ -1,37 +0,0 @@ -{ - /* Bintray package information. - In case the package already exists on Bintray, only the name, repo and subject - fields are mandatory. */ - - "package": { - "name": "releases", // Bintray package name - "repo": "tinyobjloader", // Bintray repository name - "subject": "syoyo" // Bintray subject (user or organization) - }, - - /* Package version information. - In case the version already exists on Bintray, only the name fields is mandatory. */ - - "version": { - "name": "@VERSION@", - "desc": "@VERSION@", - "released": "@DATE@", - "vcs_tag": "@VERSION@", - "gpgSign": false - }, - - /* Configure the files you would like to upload to Bintray and their upload path. - You can define one or more groups of patterns. - Each group contains three patterns: - - includePattern: Pattern in the form of Ruby regular expression, indicating the path of files to be uploaded to Bintray. - excludePattern: Optional. Pattern in the form of Ruby regular expression, indicating the path of files to be removed from the list of files specified by the includePattern. - uploadPattern: Upload path on Bintray. The path can contain symbols in the form of $1, $2,... that are replaced with capturing groups defined in the include pattern. - - Note: Regular expressions defined as part of the includePattern property must be wrapped with brackets. */ - - "files": - [ {"includePattern": "dist/(.*)", "uploadPattern": "$1"} ], - "publish": true -} - diff --git a/.travis.yml b/.travis.yml index 06b2d758..12b67f28 100644 --- a/.travis.yml +++ b/.travis.yml @@ -54,28 +54,3 @@ script: - mkdir dist - cp tiny_obj_loader.h dist/ -before_deploy: - - echo "Creating description file for bintray." - - ./tools/travis_postbuild.sh - -deploy: - - provider: bintray - file: ".bintray.json" - user: "syoyo" - key: - secure: W4F1VZcDcVOMe8Ymvo0bHery/JSmVhadl1NgAnGus6o7zVw7ChElKA1ho/NtqUbtoW8o1qUKMJdLQeh786jolocZJEJlns9JZ5FCet6H2b3kITfUa4GR5T11V/ZYwL3SajW8vZ1xu5UrpP5HHgFMYtxb1MFrNLDI60sh0RnyV/qFFBnCJGZPagF/M1mzbJeDml5xK5lShH0r8QpH+7MeQ1J8ungEyJ7UCyr1ao8gY9eq1/05IpHR9vri/d48EXQWHbqtI8EwCc7064oCYQGyYcLsD4yPEokwrdelkCvDquSpJLmbJENfZCc4vZGXsykjnQ8+gltJomBAivQFB9vc06ETEJssMzitbrfEZUrqFwZj/HZM7CYGXfGQWltL828SppCjsuWrgQ/VYXM5UgRpmhlxbqnuyxnYvKZ9EDW4+EnMkOmIl7WSDovp8E/4CZ0ghs+YyFS4SrgeqFCXS8bvxrkDUUPSipHuGBOt02fRnccKzU+3zU6Q5fghyLczz4ZtnOdk+Niz/njyF0SZfPYTUgb3GzAJ8Su6kvWJCAGdedON3n1F/TtybCE2dIdATxaO2uFQbwYjSOCiq209oCJ7MrsQZibRsa5a9YXyjlLkPxwOeVwo8wJyJclqWswIkhdSO8xvTnkwESv4yLzLutEOlBVlQbJzpyuS6vx0yHOYkwc= - all_branches: true - on: - repo: syoyo/tinyobjloader - condition: -n "$DEPLOY_BUILD" - tags: true - skip_cleanup: true - - provider: releases - api_key: - secure: AsXameK4GJn6h6wMmDrKTr7q/o9EI7hX7zWg1W6VaFBQKfkBvOmjJolWimjl6HMoRZ1NpMmK5GDm3zBlTUeABtgVBIyNWgE9vWS39ff6D5iQKcgScFsJkyILt0GikBqbN2pLGQ2t/M1Qh6n1sEIfzqekiCcF5Qvy5yYlYvHtaRGV02QeYAej/xx15/9SMuKTncHhjf63ClYPu8ODid7QUegJUvlQUeXoPsBDbaXMH2uDWoBWF7etX7G2Iob4NE8GX+ZP6dj+Ogi7p4HXThK650mzLL/pUl584EjjY/vePqx0cFhtpiRwvrW8SNPI1aJ1Phwa1enLRUgfS3bnkwQAMw/SCXSK2lnCvkUAXyTgpG03HWrZURj4vhEPXc7qHooO+dsfmi+JanYLaSDyrGpgQznLGjCMnVATimry0KxSufUY8Wt72Wh+nf7N0IgTUCjl32sWnQd/MRZPkxFuaf1h7r9RoH9KZY0yIOV09gABEFCGrOIZA2FcyhC2G26Bc4zyNrfMFpZ2DI76qdcWNdJGkRkpxtH9sGU8JgZu6Em2f1e6+SLgkBsPxbhRk5PwdhA9AXE2p9PmQqhO3jJKusGBZSoHAF7TlwagRY2J01yJxF7ge6zG9U8QuBqs1bB1zdnE34fHWOgs4st3inC+oBDOhvnEg1Nm/qeYVWMBzpwclSg= - file: tiny_obj_loader.h - all_branches: true - on: - repo: syoyo/tinyobjloader - tags: true - skip_cleanup: true diff --git a/README.md b/README.md index 94c7a138..e547e2dc 100644 --- a/README.md +++ b/README.md @@ -10,8 +10,6 @@ [![AUR version](https://img.shields.io/aur/version/tinyobjloader?logo=arch-linux)](https://aur.archlinux.org/packages/tinyobjloader) -[![Download](https://api.bintray.com/packages/conan/conan-center/tinyobjloader%3A_/images/download.svg)](https://bintray.com/conan/conan-center/tinyobjloader%3A_/_latestVersion) (not recommended) - Tiny but powerful single file wavefront obj loader written in C++03. No dependency except for C++ STL. It can parse over 10M polygons with moderate memory and time. `tinyobjloader` is good for embedding .obj loader to your (global illumination) renderer ;-) @@ -148,11 +146,9 @@ TinyObjLoader is licensed under MIT license. One option is to simply copy the header file into your project and to make sure that `TINYOBJLOADER_IMPLEMENTATION` is defined exactly once. -Tinyobjlaoder is also available as a [conan package](https://bintray.com/conan/conan-center/tinyobjloader%3A_/_latestVersion). Conan integrates with many build systems and lets you avoid manual dependency installation. Their [documentation](https://docs.conan.io/en/latest/getting_started.html) is a great starting point. - -### Building tinyobjloader - Using vcpkg +### Building tinyobjloader - Using vcpkg(not recommended though) -You can download and install tinyobjloader using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: +Alghouth it is not a recommended way, you can download and install tinyobjloader using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: git clone https://github.com/Microsoft/vcpkg.git cd vcpkg diff --git a/tools/travis_postbuild.sh b/tools/travis_postbuild.sh deleted file mode 100755 index 00c5d498..00000000 --- a/tools/travis_postbuild.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -DATEVAL=`date +%Y-%m-%d` -VERSIONVAL=master - -# Use tag as version -if [ $TRAVIS_TAG ]; then - VERSIONVAL=$TRAVIS_TAG -fi - -sed -e s%@DATE@%${DATEVAL}% .bintray.in > .bintray.tmp -sed -e s%@VERSION@%${VERSIONVAL}% .bintray.tmp > .bintray.json From 1e794e9c641ffbf46d7e6e3b445e4b1c2bda10bb Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 21 Aug 2021 18:48:59 +0900 Subject: [PATCH 079/139] Add note on v2.0 version. --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e547e2dc..91e9a92c 100644 --- a/README.md +++ b/README.md @@ -16,8 +16,10 @@ Tiny but powerful single file wavefront obj loader written in C++03. No dependen If you are looking for C89 version, please see https://github.com/syoyo/tinyobjloader-c . -Notice! -------- +Version notice +-------------- + +We recommend to use `master`(`main`) branch. Its v2.0 release candidate. Most features are now nearly robust and stable(Remaining task for release v2.0 is polishing C++ and Python API). We have released new version v1.0.0 on 20 Aug, 2016. Old version is available as `v0.9.x` branch https://github.com/syoyo/tinyobjloader/tree/v0.9.x From 95fe4ef41de28ce6b0aef00bfdd7b788641a15a6 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 27 Aug 2021 19:45:16 +0900 Subject: [PATCH 080/139] v2.0.0-rc9 --- CMakeLists.txt | 2 +- README.md | 2 +- python/setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8f1eb797..f64b42ce 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,7 +4,7 @@ project(tinyobjloader) cmake_minimum_required(VERSION 3.2) set(TINYOBJLOADER_SOVERSION 2) -set(TINYOBJLOADER_VERSION 2.0.0-rc.8) +set(TINYOBJLOADER_VERSION 2.0.0-rc.9) #optional double precision support option(TINYOBJLOADER_USE_DOUBLE "Build library with double precision instead of single (float)" OFF) diff --git a/README.md b/README.md index 91e9a92c..038e36c0 100644 --- a/README.md +++ b/README.md @@ -421,7 +421,7 @@ cibuildwheels + twine upload for each git tagging event is handled in Azure Pipe * Bump version in CMakeLists.txt * Update version in `python/setup.py` -* Commit with tag name starging with `v`(e.g. `v2.1.0`) +* Commit with tag name starting with `v`(e.g. `v2.1.0`) * `git push --tags` * cibuildwheels + pypi upload(through twine) will be automatically triggered in Azure Pipeline. diff --git a/python/setup.py b/python/setup.py index df53136f..6c63fdfb 100644 --- a/python/setup.py +++ b/python/setup.py @@ -102,7 +102,7 @@ def build_extensions(self): setuptools.setup( name="tinyobjloader", - version="2.0.0rc8", + version="2.0.0rc9", description="Tiny but powerful Wavefront OBJ loader", long_description=long_description, long_description_content_type="text/markdown", From 8c089916e761858482df228ebdd2303a8a8b5671 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 27 Aug 2021 20:11:00 +0900 Subject: [PATCH 081/139] Use approved classifiers. --- python/setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/setup.py b/python/setup.py index 6c63fdfb..578cd1ef 100644 --- a/python/setup.py +++ b/python/setup.py @@ -120,7 +120,8 @@ def build_extensions(self): "Topic :: Artistic Software", "Topic :: Multimedia :: Graphics :: 3D Modeling", "Topic :: Scientific/Engineering :: Visualization", - "License :: OSI Approved :: MIT License(ISC License for mapbox earcut.hpp)", + "License :: OSI Approved :: MIT License", + "License :: OSI Approved :: ISC License (ISCL)", "Operating System :: OS Independent", "Programming Language :: Python :: 3", ], From 3e401b55a4a9ebfc6344a8a2a698245e7d3fc09e Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 27 Aug 2021 20:11:30 +0900 Subject: [PATCH 082/139] Update version tagging procedure for developers. --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 038e36c0..143e0426 100644 --- a/README.md +++ b/README.md @@ -421,7 +421,8 @@ cibuildwheels + twine upload for each git tagging event is handled in Azure Pipe * Bump version in CMakeLists.txt * Update version in `python/setup.py` -* Commit with tag name starting with `v`(e.g. `v2.1.0`) +* Commit and push `master`. Confirm C.I. build is OK. +* Create tag starting with `v`(e.g. `v2.1.0`) * `git push --tags` * cibuildwheels + pypi upload(through twine) will be automatically triggered in Azure Pipeline. From 097ad6a826336a76e42a27e6e531da7dbdf3fd01 Mon Sep 17 00:00:00 2001 From: Elnar Dakeshov Date: Fri, 1 Oct 2021 04:46:26 -0700 Subject: [PATCH 083/139] Remove unnecessary ObjReader destructor (#322) The destructor serves no purpose and is a pessimization in case the `ObjReader` class is every returned or moved around, as it inhibits move-constructor generation in C++11 and above. --- tiny_obj_loader.h | 1 - 1 file changed, 1 deletion(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index c4bb4546..38c8bd7c 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -531,7 +531,6 @@ struct ObjReaderConfig { class ObjReader { public: ObjReader() : valid_(false) {} - ~ObjReader() {} /// /// Load .obj and .mtl from a file. From dec0ff050085f897822d2d369aa6c04617bf2eb2 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 5 Dec 2021 19:24:04 +0900 Subject: [PATCH 084/139] Bc nine respect smoothing groups (#325) * New vertex normal generation functions in viewer.cc that handle multiple smoothing groups correctly. * Fix comiple on gcc. * Update stb_image.h * Add extra comment. * Ubuntu 16.04 seems not availabe on Azure Pipeline anymore, so use `ubuntu-latest`. Co-authored-by: Brian Collins --- azure-pipelines.yml | 4 +- examples/viewer/stb_image.h | 2734 +++++++++++++++++++++++++---------- examples/viewer/viewer.cc | 185 ++- 3 files changed, 2118 insertions(+), 805 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a71c43b3..b9c4fcd5 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -28,8 +28,10 @@ jobs: black --check python/ displayName: Check Python code format + # Ubuntu16.04 seems now deprecated(as of 2021/12/01), + # so use `ubuntu-latest` - job: linux - pool: {vmImage: "Ubuntu-16.04"} + pool: {vmImage: "ubuntu-latest"} steps: - task: UsePythonVersion@0 - bash: | diff --git a/examples/viewer/stb_image.h b/examples/viewer/stb_image.h index a3c11299..d60371b9 100644 --- a/examples/viewer/stb_image.h +++ b/examples/viewer/stb_image.h @@ -1,5 +1,5 @@ -/* stb_image - v2.12 - public domain image loader - http://nothings.org/stb_image.h - no warranty implied; use at your own risk +/* stb_image - v2.27 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk Do this: #define STB_IMAGE_IMPLEMENTATION @@ -21,7 +21,7 @@ avoid problematic images and only need the trivial interface JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) - PNG 1/2/4/8-bit-per-channel (16 bpc not supported) + PNG 1/2/4/8/16-bit-per-channel TGA (not sure what subset, if a subset) BMP non-1bpp, non-RLE @@ -42,136 +42,34 @@ Full documentation under "DOCUMENTATION" below. - Revision 2.00 release notes: - - - Progressive JPEG is now supported. - - - PPM and PGM binary formats are now supported, thanks to Ken Miller. - - - x86 platforms now make use of SSE2 SIMD instructions for - JPEG decoding, and ARM platforms can use NEON SIMD if requested. - This work was done by Fabian "ryg" Giesen. SSE2 is used by - default, but NEON must be enabled explicitly; see docs. - - With other JPEG optimizations included in this version, we see - 2x speedup on a JPEG on an x86 machine, and a 1.5x speedup - on a JPEG on an ARM machine, relative to previous versions of this - library. The same results will not obtain for all JPGs and for all - x86/ARM machines. (Note that progressive JPEGs are significantly - slower to decode than regular JPEGs.) This doesn't mean that this - is the fastest JPEG decoder in the land; rather, it brings it - closer to parity with standard libraries. If you want the fastest - decode, look elsewhere. (See "Philosophy" section of docs below.) - - See final bullet items below for more info on SIMD. - - - Added STBI_MALLOC, STBI_REALLOC, and STBI_FREE macros for replacing - the memory allocator. Unlike other STBI libraries, these macros don't - support a context parameter, so if you need to pass a context in to - the allocator, you'll have to store it in a global or a thread-local - variable. - - - Split existing STBI_NO_HDR flag into two flags, STBI_NO_HDR and - STBI_NO_LINEAR. - STBI_NO_HDR: suppress implementation of .hdr reader format - STBI_NO_LINEAR: suppress high-dynamic-range light-linear float API - - - You can suppress implementation of any of the decoders to reduce - your code footprint by #defining one or more of the following - symbols before creating the implementation. - - STBI_NO_JPEG - STBI_NO_PNG - STBI_NO_BMP - STBI_NO_PSD - STBI_NO_TGA - STBI_NO_GIF - STBI_NO_HDR - STBI_NO_PIC - STBI_NO_PNM (.ppm and .pgm) - - - You can request *only* certain decoders and suppress all other ones - (this will be more forward-compatible, as addition of new decoders - doesn't require you to disable them explicitly): - - STBI_ONLY_JPEG - STBI_ONLY_PNG - STBI_ONLY_BMP - STBI_ONLY_PSD - STBI_ONLY_TGA - STBI_ONLY_GIF - STBI_ONLY_HDR - STBI_ONLY_PIC - STBI_ONLY_PNM (.ppm and .pgm) - - Note that you can define multiples of these, and you will get all - of them ("only x" and "only y" is interpreted to mean "only x&y"). - - - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still - want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB - - - Compilation of all SIMD code can be suppressed with - #define STBI_NO_SIMD - It should not be necessary to disable SIMD unless you have issues - compiling (e.g. using an x86 compiler which doesn't support SSE - intrinsics or that doesn't support the method used to detect - SSE2 support at run-time), and even those can be reported as - bugs so I can refine the built-in compile-time checking to be - smarter. - - - The old STBI_SIMD system which allowed installing a user-defined - IDCT etc. has been removed. If you need this, don't upgrade. My - assumption is that almost nobody was doing this, and those who - were will find the built-in SIMD more satisfactory anyway. - - - RGB values computed for JPEG images are slightly different from - previous versions of stb_image. (This is due to using less - integer precision in SIMD.) The C code has been adjusted so - that the same RGB values will be computed regardless of whether - SIMD support is available, so your app should always produce - consistent results. But these results are slightly different from - previous versions. (Specifically, about 3% of available YCbCr values - will compute different RGB results from pre-1.49 versions by +-1; - most of the deviating values are one smaller in the G channel.) - - - If you must produce consistent results with previous versions of - stb_image, #define STBI_JPEG_OLD and you will get the same results - you used to; however, you will not get the SIMD speedups for - the YCbCr-to-RGB conversion step (although you should still see - significant JPEG speedup from the other changes). - - Please note that STBI_JPEG_OLD is a temporary feature; it will be - removed in future versions of the library. It is only intended for - near-term back-compatibility use. - - - Latest revision history: +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes + 2.26 (2020-07-13) many minor fixes + 2.25 (2020-02-02) fix warnings + 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically + 2.23 (2019-08-11) fix clang static analysis warning + 2.22 (2019-03-04) gif fixes, fix warnings + 2.21 (2019-02-25) fix typo in comment + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 RGB-format JPEG; remove white matting in PSD; - allocate large structures on the stack; + allocate large structures on the stack; correct channel count for PNG & BMP 2.10 (2016-01-22) avoid warning introduced in 2.09 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED - 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA - 2.07 (2015-09-13) partial animated GIF support - limited 16-bit PSD support - minor bugs, code cleanup, and compiler warnings - 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value - 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning - 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit - 2.03 (2015-04-12) additional corruption checking - stbi_set_flip_vertically_on_load - fix NEON support; fix mingw support - 2.02 (2015-01-19) fix incorrect assert, fix warning - 2.01 (2015-01-17) fix various warnings - 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG - 2.00 (2014-12-25) optimize JPEG, including x86 SSE2 & ARM NEON SIMD - progressive JPEG - PGM/PPM support - STBI_MALLOC,STBI_REALLOC,STBI_FREE - STBI_NO_*, STBI_ONLY_* - GIF bugfix See end of file for full revision history. @@ -186,34 +84,43 @@ Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) - urraka@github (animated gif) Junggon Kim (PNM comments) - Daniel Gibson (16-bit TGA) - - Optimizations & bugfixes - Fabian "ryg" Giesen - Arseny Kapoulkine + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine Simon Breuss (16-bit PNM) + John-Mark Allen + Carmelo J Fdez-Aguera Bug & warning fixes - Marc LeBlanc David Woo Guillaume George Martins Mozeiko - Christpher Lloyd Martin Golini Jerry Jansson Joseph Thomson - Dave Moore Roy Eltham Hayaki Saito Phil Jordan - Won Chun Luke Graham Johan Duparc Nathan Reed - the Horde3D community Thomas Ruf Ronny Chevalier Nick Verigakis - Janez Zemva John Bartholomew Michal Cichon svdijk@github - Jonathan Blow Ken Hamada Tero Hanninen Baldur Karlsson - Laurent Gomila Cort Stratton Sergio Gonzalez romigrou@github - Aruelien Pocheville Thibault Reuille Cass Everitt Matthew Gregan - Ryamond Barbiero Paul Du Bois Engin Manap snagar@github - Michaelangel007@github Oriol Ferrer Mesia socks-the-fox - Blazej Dariusz Roszkowski - - -LICENSE - -This software is dual-licensed to the public domain and under the following -license: you are granted a perpetual, irrevocable license to copy, modify, -publish, and distribute this file as you see fit. - + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski + Phil Jordan Dave Moore Roy Eltham + Hayaki Saito Nathan Reed Won Chun + Luke Graham Johan Duparc Nick Verigakis the Horde3D community + Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Eugene Golushkov Laurent Gomila Cort Stratton github:snagar + Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex + Cass Everitt Ryamond Barbiero github:grim210 + Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw + Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus + Josh Tobin Matthew Gregan github:poppolopoppo + Julian Raschke Gregory Mullen Christian Floisand github:darealshinji + Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 + Brad Weinberger Matvey Cherevko github:mosra + Luca Sas Alexander Veselov Zack Middleton [reserved] + Ryan C. Gordon [reserved] [reserved] + DO NOT ADD YOUR NAME HERE + + Jacko Dirks + + To add your name to the credits, pick a random blank space in the middle and fill it. + 80% of merge conflicts on stb PRs are due to people adding their name at the end + of the credits. */ #ifndef STBI_INCLUDE_STB_IMAGE_H @@ -222,10 +129,8 @@ publish, and distribute this file as you see fit. // DOCUMENTATION // // Limitations: -// - no 16-bit-per-channel PNG // - no 12-bit-per-channel JPEG // - no JPEGs with arithmetic coding -// - no 1-bit BMP // - GIF always returns *comp=4 // // Basic usage (see HDR discussion below for HDR usage): @@ -238,10 +143,10 @@ publish, and distribute this file as you see fit. // stbi_image_free(data) // // Standard parameters: -// int *x -- outputs image width in pixels -// int *y -- outputs image height in pixels -// int *comp -- outputs # of image components in image file -// int req_comp -- if non-zero, # of image components requested in result +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result // // The return value from an image loader is an 'unsigned char *' which points // to the pixel data, or NULL on an allocation failure or if the image is @@ -249,11 +154,12 @@ publish, and distribute this file as you see fit. // with each pixel consisting of N interleaved 8-bit components; the first // pixel pointed to is top-left-most in the image. There is no padding between // image scanlines or between pixels, regardless of format. The number of -// components N is 'req_comp' if req_comp is non-zero, or *comp otherwise. -// If req_comp is non-zero, *comp has the number of components that _would_ -// have been output otherwise. E.g. if you set req_comp to 4, you will always -// get RGBA output, but you can check *comp to see if it's trivially opaque -// because e.g. there were only 3 channels in the source image. +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. // // An output image with N components has the following components interleaved // in this order in each pixel: @@ -265,14 +171,50 @@ publish, and distribute this file as you see fit. // 4 red, green, blue, alpha // // If image loading fails for any reason, the return value will be NULL, -// and *x, *y, *comp will be unchanged. The function stbi_failure_reason() -// can be queried for an extremely brief, end-user unfriendly explanation -// of why the load failed. Define STBI_NO_FAILURE_STRINGS to avoid -// compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly // more user-friendly ones. // // Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. // +// To query the width, height and component count of an image without having to +// decode the full file, you can use the stbi_info family of functions: +// +// int x,y,n,ok; +// ok = stbi_info(filename, &x, &y, &n); +// // returns ok=1 and sets x, y, n if image is a supported format, +// // 0 otherwise. +// +// Note that stb_image pervasively uses ints in its public API for sizes, +// including sizes of memory buffers. This is now part of the API and thus +// hard to change without causing breakage. As a result, the various image +// loaders all have certain limits on image size; these differ somewhat +// by format but generally boil down to either just under 2GB or just under +// 1GB. When the decoded image would be larger than this, stb_image decoding +// will fail. +// +// Additionally, stb_image will reject image files that have any of their +// dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS, +// which defaults to 2**24 = 16777216 pixels. Due to the above memory limit, +// the only way to have an image with such dimensions load correctly +// is for it to have a rather extreme aspect ratio. Either way, the +// assumption here is that such larger images are likely to be malformed +// or malicious. If you do need to load an image with individual dimensions +// larger than that, and it still fits in the overall size limit, you can +// #define STBI_MAX_DIMENSIONS on your own to be something larger. +// +// =========================================================================== +// +// UNICODE: +// +// If compiling for Windows and you wish to use Unicode filenames, compile +// with +// #define STBI_WINDOWS_UTF8 +// and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert +// Windows wchar_t filenames to utf8. +// // =========================================================================== // // Philosophy @@ -285,15 +227,15 @@ publish, and distribute this file as you see fit. // // Sometimes I let "good performance" creep up in priority over "easy to maintain", // and for best performance I may provide less-easy-to-use APIs that give higher -// performance, in addition to the easy to use ones. Nevertheless, it's important +// performance, in addition to the easy-to-use ones. Nevertheless, it's important // to keep in mind that from the standpoint of you, a client of this library, -// all you care about is #1 and #3, and stb libraries do not emphasize #3 above all. +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. // // Some secondary priorities arise directly from the first two, some of which -// make more explicit reasons why performance can't be emphasized. +// provide more explicit reasons why performance can't be emphasized. // // - Portable ("ease of use") -// - Small footprint ("easy to maintain") +// - Small source code footprint ("easy to maintain") // - No dependencies ("ease of use") // // =========================================================================== @@ -325,13 +267,6 @@ publish, and distribute this file as you see fit. // (at least this is true for iOS and Android). Therefore, the NEON support is // toggled by a build flag: define STBI_NEON to get NEON loops. // -// The output of the JPEG decoder is slightly different from versions where -// SIMD support was introduced (that is, for versions before 1.49). The -// difference is only +-1 in the 8-bit RGB channels, and only on a small -// fraction of pixels. You can force the pre-1.49 behavior by defining -// STBI_JPEG_OLD, but this will disable some of the SIMD decoding path -// and hence cost some performance. -// // If for some reason you do not want to use any of SIMD code, or if // you have issues compiling it, you can disable it entirely by // defining STBI_NO_SIMD. @@ -340,11 +275,10 @@ publish, and distribute this file as you see fit. // // HDR image support (disable by defining STBI_NO_HDR) // -// stb_image now supports loading HDR images in general, and currently -// the Radiance .HDR file format, although the support is provided -// generically. You can still load any file through the existing interface; -// if you attempt to load an HDR file, it will be automatically remapped to -// LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// stb_image supports loading HDR images in general, and currently the Radiance +// .HDR file format specifically. You can still load any file through the existing +// interface; if you attempt to load an HDR file, it will be automatically remapped +// to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; // both of these constants can be reconfigured through this interface: // // stbi_hdr_to_ldr_gamma(2.2f); @@ -376,18 +310,59 @@ publish, and distribute this file as you see fit. // // iPhone PNG support: // -// By default we convert iphone-formatted PNGs back to RGB, even though -// they are internally encoded differently. You can disable this conversion -// by by calling stbi_convert_iphone_png_to_rgb(0), in which case -// you will always just get the native iphone "format" through (which -// is BGR stored in RGB). +// We optionally support converting iPhone-formatted PNGs (which store +// premultiplied BGRA) back to RGB, even though they're internally encoded +// differently. To enable this conversion, call +// stbi_convert_iphone_png_to_rgb(1). // // Call stbi_set_unpremultiply_on_load(1) as well to force a divide per // pixel to remove any premultiplied alpha *only* if the image file explicitly // says there's premultiplied data (currently only happens in iPhone images, // and only if iPhone convert-to-rgb processing is on). // - +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// +// - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater +// than that size (in either width or height) without further processing. +// This is to let programs in the wild set an upper bound to prevent +// denial-of-service attacks on untrusted data, as one could generate a +// valid image of gigantic dimensions and force stb_image to allocate a +// huge block of memory and spend disproportionate time decoding it. By +// default this is set to (1 << 24), which is 16777216, but that's still +// very big. #ifndef STBI_NO_STDIO #include @@ -397,7 +372,7 @@ publish, and distribute this file as you see fit. enum { - STBI_default = 0, // only used for req_comp + STBI_default = 0, // only used for desired_channels STBI_grey = 1, STBI_grey_alpha = 2, @@ -405,17 +380,21 @@ enum STBI_rgb_alpha = 4 }; +#include typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; #ifdef __cplusplus extern "C" { #endif +#ifndef STBIDEF #ifdef STB_IMAGE_STATIC #define STBIDEF static #else #define STBIDEF extern #endif +#endif ////////////////////////////////////////////////////////////////////////////// // @@ -433,22 +412,52 @@ typedef struct int (*eof) (void *user); // returns nonzero if we are at end of file/data } stbi_io_callbacks; -STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *comp, int req_comp); -STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *comp, int req_comp); -STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *comp, int req_comp); +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); #ifndef STBI_NO_STDIO -STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *comp, int req_comp); +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); // for stbi_load_from_file, file pointer is left pointing immediately after image #endif +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + +#ifdef STBI_WINDOWS_UTF8 +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// #ifndef STBI_NO_LINEAR - STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *comp, int req_comp); - STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp); - STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp); + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); #ifndef STBI_NO_STDIO - STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *comp, int req_comp); + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); #endif #endif @@ -472,7 +481,7 @@ STBIDEF int stbi_is_hdr_from_file(FILE *f); // get a VERY brief reason for failure -// NOT THREADSAFE +// on most compilers (and ALL modern mainstream compilers) this is threadsafe STBIDEF const char *stbi_failure_reason (void); // free the loaded image -- this is just free() @@ -481,11 +490,14 @@ STBIDEF void stbi_image_free (void *retval_from_stbi_load); // get image dimensions & components without fully decoding STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); #ifndef STBI_NO_STDIO -STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); -STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); - +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); #endif @@ -502,6 +514,13 @@ STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); // flip the image vertically, so the first pixel in the output array is the bottom left STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); +// as above, but only applies to images loaded on the thread that calls the function +// this function is only available if your compiler supports thread-local variables; +// calling it will fail to link if your compiler doesn't +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); + // ZLIB client - used by PNG, available for other purposes STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); @@ -566,9 +585,10 @@ STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const ch #include // ptrdiff_t on osx #include #include +#include #if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) -#include // ldexp +#include // ldexp, pow #endif #ifndef STBI_NO_STDIO @@ -580,6 +600,12 @@ STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const ch #define STBI_ASSERT(x) assert(x) #endif +#ifdef __cplusplus +#define STBI_EXTERN extern "C" +#else +#define STBI_EXTERN extern +#endif + #ifndef _MSC_VER #ifdef __cplusplus @@ -591,6 +617,23 @@ STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const ch #define stbi_inline __forceinline #endif +#ifndef STBI_NO_THREAD_LOCALS + #if defined(__cplusplus) && __cplusplus >= 201103L + #define STBI_THREAD_LOCAL thread_local + #elif defined(__GNUC__) && __GNUC__ < 5 + #define STBI_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define STBI_THREAD_LOCAL __declspec(thread) + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + #define STBI_THREAD_LOCAL _Thread_local + #endif + + #ifndef STBI_THREAD_LOCAL + #if defined(__GNUC__) + #define STBI_THREAD_LOCAL __thread + #endif + #endif +#endif #ifdef _MSC_VER typedef unsigned short stbi__uint16; @@ -621,7 +664,7 @@ typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; #ifdef STBI_HAS_LROTL #define stbi_lrot(x,y) _lrotl(x,y) #else - #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (-(y) & 31))) #endif #if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) @@ -649,12 +692,14 @@ typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; #define STBI__X86_TARGET #endif -#if defined(__GNUC__) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) -// NOTE: not clear do we actually need this for the 64-bit path? +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) // gcc doesn't support sse2 intrinsics unless you compile with -msse2, -// (but compiling with -msse2 allows the compiler to use SSE2 everywhere; -// this is just broken and gcc are jerks for not fixing it properly -// http://www.virtualdub.org/blog/pivot/entry.php?id=363 ) +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. #define STBI_NO_SIMD #endif @@ -702,25 +747,27 @@ static int stbi__cpuid3(void) #define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name -static int stbi__sse2_available() +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) { int info3 = stbi__cpuid3(); return ((info3 >> 26) & 1) != 0; } +#endif + #else // assume GCC-style if not VC++ #define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) -static int stbi__sse2_available() +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) { -#if defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 // GCC 4.8 or later - // GCC 4.8+ has a nice way to do this - return __builtin_cpu_supports("sse2"); -#else - // portable way to do this, preferably without using GCC inline ASM? - // just bail for now. - return 0; -#endif + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; } +#endif + #endif #endif @@ -731,14 +778,21 @@ static int stbi__sse2_available() #ifdef STBI_NEON #include -// assume GCC or Clang on ARM targets +#ifdef _MSC_VER +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name +#else #define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) #endif +#endif #ifndef STBI_SIMD_ALIGN #define STBI_SIMD_ALIGN(type, name) type name #endif +#ifndef STBI_MAX_DIMENSIONS +#define STBI_MAX_DIMENSIONS (1 << 24) +#endif + /////////////////////////////////////////////// // // stbi__context struct and start_xxx functions @@ -756,6 +810,7 @@ typedef struct int read_from_callbacks; int buflen; stbi_uc buffer_start[128]; + int callback_already_read; stbi_uc *img_buffer, *img_buffer_end; stbi_uc *img_buffer_original, *img_buffer_original_end; @@ -769,6 +824,7 @@ static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) { s->io.read = NULL; s->read_from_callbacks = 0; + s->callback_already_read = 0; s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; } @@ -780,7 +836,8 @@ static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void * s->io_user_data = user; s->buflen = sizeof(s->buffer_start); s->read_from_callbacks = 1; - s->img_buffer_original = s->buffer_start; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = s->buffer_start; stbi__refill_buffer(s); s->img_buffer_original_end = s->img_buffer_end; } @@ -794,12 +851,17 @@ static int stbi__stdio_read(void *user, char *data, int size) static void stbi__stdio_skip(void *user, int n) { + int ch; fseek((FILE*) user, n, SEEK_CUR); + ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */ + if (ch != EOF) { + ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */ + } } static int stbi__stdio_eof(void *user) { - return feof((FILE*) user); + return feof((FILE*) user) || ferror((FILE *) user); } static stbi_io_callbacks stbi__stdio_callbacks = @@ -827,79 +889,180 @@ static void stbi__rewind(stbi__context *s) s->img_buffer_end = s->img_buffer_original_end; } +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + #ifndef STBI_NO_JPEG static int stbi__jpeg_test(stbi__context *s); -static stbi_uc *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_PNG static int stbi__png_test(stbi__context *s); -static stbi_uc *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); #endif #ifndef STBI_NO_BMP static int stbi__bmp_test(stbi__context *s); -static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_TGA static int stbi__tga_test(stbi__context *s); -static stbi_uc *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_PSD static int stbi__psd_test(stbi__context *s); -static stbi_uc *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); #endif #ifndef STBI_NO_HDR static int stbi__hdr_test(stbi__context *s); -static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_PIC static int stbi__pic_test(stbi__context *s); -static stbi_uc *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_GIF static int stbi__gif_test(stbi__context *s); -static stbi_uc *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); #endif #ifndef STBI_NO_PNM static int stbi__pnm_test(stbi__context *s); -static stbi_uc *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__pnm_is16(stbi__context *s); #endif -// this is not threadsafe -static const char *stbi__g_failure_reason; +static +#ifdef STBI_THREAD_LOCAL +STBI_THREAD_LOCAL +#endif +const char *stbi__g_failure_reason; STBIDEF const char *stbi_failure_reason(void) { return stbi__g_failure_reason; } +#ifndef STBI_NO_FAILURE_STRINGS static int stbi__err(const char *str) { stbi__g_failure_reason = str; return 0; } +#endif static void *stbi__malloc(size_t size) { return STBI_MALLOC(size); } +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} +#endif + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} +#endif + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + // stbi__err - error // stbi__errpf - error returning pointer to float // stbi__errpuc - error returning pointer to unsigned char @@ -928,40 +1091,69 @@ static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); #endif -static int stbi__vertically_flip_on_load = 0; +static int stbi__vertically_flip_on_load_global = 0; STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) { - stbi__vertically_flip_on_load = flag_true_if_should_flip; + stbi__vertically_flip_on_load_global = flag_true_if_should_flip; } -static unsigned char *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +#ifndef STBI_THREAD_LOCAL +#define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global +#else +static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set; + +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) { - #ifndef STBI_NO_JPEG - if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp); - #endif + stbi__vertically_flip_on_load_local = flag_true_if_should_flip; + stbi__vertically_flip_on_load_set = 1; +} + +#define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ + ? stbi__vertically_flip_on_load_local \ + : stbi__vertically_flip_on_load_global) +#endif // STBI_THREAD_LOCAL + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + // test the formats with a very explicit header first (at least a FOURCC + // or distinctive magic number first) #ifndef STBI_NO_PNG - if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp); + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); #endif #ifndef STBI_NO_BMP - if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp); + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); #endif #ifndef STBI_NO_GIF - if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp); + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); #endif #ifndef STBI_NO_PSD - if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp); + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #else + STBI_NOTUSED(bpc); #endif #ifndef STBI_NO_PIC - if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp); + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + + // then the formats that can end up attempting to load with just 1 or 2 + // bytes matching expectations; these are prone to false positives, so + // try them later + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); #endif #ifndef STBI_NO_PNM - if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp); + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); #endif #ifndef STBI_NO_HDR if (stbi__hdr_test(s)) { - float *hdr = stbi__hdr_load(s, x,y,comp,req_comp); + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); } #endif @@ -969,66 +1161,179 @@ static unsigned char *stbi__load_main(stbi__context *s, int *x, int *y, int *com #ifndef STBI_NO_TGA // test tga last because it's a crappy test! if (stbi__tga_test(s)) - return stbi__tga_load(s,x,y,comp,req_comp); + return stbi__tga_load(s,x,y,comp,req_comp, ri); #endif return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); } -static unsigned char *stbi__load_flip(stbi__context *s, int *x, int *y, int *comp, int req_comp) +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) { - unsigned char *result = stbi__load_main(s, x, y, comp, req_comp); + int i; + int img_len = w * h * channels; + stbi_uc *reduced; - if (stbi__vertically_flip_on_load && result != NULL) { - int w = *x, h = *y; - int depth = req_comp ? req_comp : *comp; - int row,col,z; - stbi_uc temp; - - // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once - for (row = 0; row < (h>>1); row++) { - for (col = 0; col < w; col++) { - for (z = 0; z < depth; z++) { - temp = result[(row * w + col) * depth + z]; - result[(row * w + col) * depth + z] = result[((h - row - 1) * w + col) * depth + z]; - result[((h - row - 1) * w + col) * depth + z] = temp; - } - } + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; } } +} - return result; +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } } +#endif -#ifndef STBI_NO_HDR +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 8) { + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 16) { + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) { if (stbi__vertically_flip_on_load && result != NULL) { - int w = *x, h = *y; - int depth = req_comp ? req_comp : *comp; - int row,col,z; - float temp; - - // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once - for (row = 0; row < (h>>1); row++) { - for (col = 0; col < w; col++) { - for (z = 0; z < depth; z++) { - temp = result[(row * w + col) * depth + z]; - result[(row * w + col) * depth + z] = result[((h - row - 1) * w + col) * depth + z]; - result[((h - row - 1) * w + col) * depth + z] = temp; - } - } - } + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); } } #endif #ifndef STBI_NO_STDIO +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); +#endif + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); +} +#endif + static FILE *stbi__fopen(char const *filename, char const *mode) { FILE *f; +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) + return 0; + #if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 if (0 != fopen_s(&f, filename, mode)) f=0; #else @@ -1053,42 +1358,98 @@ STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req unsigned char *result; stbi__context s; stbi__start_file(&s,f); - result = stbi__load_flip(&s,x,y,comp,req_comp); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); if (result) { // need to 'unget' all the characters in the IO buffer fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); } return result; } + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + #endif //!STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) { stbi__context s; stbi__start_mem(&s,buffer,len); - return stbi__load_flip(&s,x,y,comp,req_comp); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); } STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) { stbi__context s; stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); - return stbi__load_flip(&s,x,y,comp,req_comp); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); } +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + #ifndef STBI_NO_LINEAR static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) { unsigned char *data; #ifndef STBI_NO_HDR if (stbi__hdr_test(s)) { - float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp); + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); if (hdr_data) stbi__float_postprocess(hdr_data,x,y,comp,req_comp); return hdr_data; } #endif - data = stbi__load_flip(s, x, y, comp, req_comp); + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); if (data) return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); @@ -1158,12 +1519,16 @@ STBIDEF int stbi_is_hdr (char const *filename) return result; } -STBIDEF int stbi_is_hdr_from_file(FILE *f) +STBIDEF int stbi_is_hdr_from_file(FILE *f) { #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; stbi__context s; stbi__start_file(&s,f); - return stbi__hdr_test(&s); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; #else STBI_NOTUSED(f); return 0; @@ -1212,6 +1577,7 @@ enum static void stbi__refill_buffer(stbi__context *s) { int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original); if (n == 0) { // at end of file, treat same as if from memory, but need to handle case // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file @@ -1236,6 +1602,9 @@ stbi_inline static stbi_uc stbi__get8(stbi__context *s) return 0; } +#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else stbi_inline static int stbi__at_eof(stbi__context *s) { if (s->io.read) { @@ -1247,9 +1616,14 @@ stbi_inline static int stbi__at_eof(stbi__context *s) return s->img_buffer >= s->img_buffer_end; } +#endif +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) +// nothing +#else static void stbi__skip(stbi__context *s, int n) { + if (n == 0) return; // already there! if (n < 0) { s->img_buffer = s->img_buffer_end; return; @@ -1264,7 +1638,11 @@ static void stbi__skip(stbi__context *s, int n) } s->img_buffer += n; } +#endif +#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM) +// nothing +#else static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) { if (s->io.read) { @@ -1288,18 +1666,27 @@ static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) } else return 0; } +#endif +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else static int stbi__get16be(stbi__context *s) { int z = stbi__get8(s); return (z << 8) + stbi__get8(s); } +#endif +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else static stbi__uint32 stbi__get32be(stbi__context *s) { stbi__uint32 z = stbi__get16be(s); return (z << 16) + stbi__get16be(s); } +#endif #if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) // nothing @@ -1315,13 +1702,16 @@ static int stbi__get16le(stbi__context *s) static stbi__uint32 stbi__get32le(stbi__context *s) { stbi__uint32 z = stbi__get16le(s); - return z + (stbi__get16le(s) << 16); + z += (stbi__uint32)stbi__get16le(s) << 16; + return z; } #endif #define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings - +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else ////////////////////////////////////////////////////////////////////////////// // // generic converter from built-in img_n to req_comp @@ -1337,7 +1727,11 @@ static stbi_uc stbi__compute_y(int r, int g, int b) { return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); } +#endif +#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) { int i,j; @@ -1346,7 +1740,7 @@ static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int r if (req_comp == img_n) return data; STBI_ASSERT(req_comp >= 1 && req_comp <= 4); - good = (unsigned char *) stbi__malloc(req_comp * x * y); + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); if (good == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); @@ -1356,37 +1750,97 @@ static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int r unsigned char *src = data + j * x * img_n ; unsigned char *dest = good + j * x * req_comp; - #define COMBO(a,b) ((a)*8+(b)) - #define CASE(a,b) case COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) // convert source image with img_n components to one with req_comp components; // avoid switch per pixel, so use switch per scanline and massive macros - switch (COMBO(img_n, req_comp)) { - CASE(1,2) dest[0]=src[0], dest[1]=255; break; - CASE(1,3) dest[0]=dest[1]=dest[2]=src[0]; break; - CASE(1,4) dest[0]=dest[1]=dest[2]=src[0], dest[3]=255; break; - CASE(2,1) dest[0]=src[0]; break; - CASE(2,3) dest[0]=dest[1]=dest[2]=src[0]; break; - CASE(2,4) dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; break; - CASE(3,4) dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=255; break; - CASE(3,1) dest[0]=stbi__compute_y(src[0],src[1],src[2]); break; - CASE(3,2) dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = 255; break; - CASE(4,1) dest[0]=stbi__compute_y(src[0],src[1],src[2]); break; - CASE(4,2) dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = src[3]; break; - CASE(4,3) dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; break; - default: STBI_ASSERT(0); + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion"); } - #undef CASE + #undef STBI__CASE } STBI_FREE(data); return good; } +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif #ifndef STBI_NO_LINEAR static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) { int i,k,n; - float *output = (float *) stbi__malloc(x * y * comp * sizeof(float)); + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } // compute number of non-alpha components if (comp & 1) n = comp; else n = comp-1; @@ -1394,7 +1848,11 @@ static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) for (k=0; k < n; ++k) { output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); } - if (k < comp) output[i*comp + k] = data[i*comp+k]/255.0f; + } + if (n < comp) { + for (i=0; i < x*y; ++i) { + output[i*comp + n] = data[i*comp + n]/255.0f; + } } STBI_FREE(data); return output; @@ -1406,7 +1864,9 @@ static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) { int i,k,n; - stbi_uc *output = (stbi_uc *) stbi__malloc(x * y * comp); + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } // compute number of non-alpha components if (comp & 1) n = comp; else n = comp-1; @@ -1471,7 +1931,7 @@ typedef struct stbi__context *s; stbi__huffman huff_dc[4]; stbi__huffman huff_ac[4]; - stbi_uc dequant[4][64]; + stbi__uint16 dequant[4][64]; stbi__int16 fast_ac[4][1 << FAST_BITS]; // sizes for components, interleaved MCUs @@ -1507,6 +1967,8 @@ typedef struct int succ_high; int succ_low; int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag int rgb; int scan_n, order[4]; @@ -1520,7 +1982,8 @@ typedef struct static int stbi__build_huffman(stbi__huffman *h, int *count) { - int i,j,k=0,code; + int i,j,k=0; + unsigned int code; // build size list for each symbol (from JPEG spec) for (i=0; i < 16; ++i) for (j=0; j < count[i]; ++j) @@ -1536,7 +1999,7 @@ static int stbi__build_huffman(stbi__huffman *h, int *count) if (h->size[k] == j) { while (h->size[k] == j) h->code[k++] = (stbi__uint16) (code++); - if (code-1 >= (1 << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); } // compute largest code + 1 for this size, preshifted as needed later h->maxcode[j] = code << (16-j); @@ -1577,10 +2040,10 @@ static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) // magnitude code followed by receive_extend code int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); int m = 1 << (magbits - 1); - if (k < m) k += (-1 << magbits) + 1; + if (k < m) k += (~0U << magbits) + 1; // if the result is small enough, we can fit it in fast_ac table if (k >= -128 && k <= 127) - fast_ac[i] = (stbi__int16) ((k << 8) + (run << 4) + (len + magbits)); + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); } } } @@ -1589,9 +2052,10 @@ static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) static void stbi__grow_buffer_unsafe(stbi__jpeg *j) { do { - int b = j->nomore ? 0 : stbi__get8(j->s); + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); if (b == 0xff) { int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes if (c != 0) { j->marker = (unsigned char) c; j->nomore = 1; @@ -1604,7 +2068,7 @@ static void stbi__grow_buffer_unsafe(stbi__jpeg *j) } // (1 << n) - 1 -static stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; // decode a jpeg huffman value from the bitstream stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) @@ -1657,7 +2121,7 @@ stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) } // bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); - sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) k = stbi_lrot(j->code_buffer, n); - STBI_ASSERT(n >= 0 && n < (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))); j->code_buffer = k & ~stbi__bmask[n]; k &= stbi__bmask[n]; j->code_bits -= n; - return k + (stbi__jbias[n] & ~sgn); + return k + (stbi__jbias[n] & (sgn - 1)); } // get some unsigned bits @@ -1700,7 +2163,7 @@ stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) // given a value that's at position X in the zigzag stream, // where does it appear in the 8x8 matrix coded as row-major? -static stbi_uc stbi__jpeg_dezigzag[64+15] = +static const stbi_uc stbi__jpeg_dezigzag[64+15] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, @@ -1716,14 +2179,14 @@ static stbi_uc stbi__jpeg_dezigzag[64+15] = }; // decode one 64-entry block-- -static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi_uc *dequant) +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) { int diff,dc,k; int t; if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); t = stbi__jpeg_huff_decode(j, hdc); - if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + if (t < 0 || t > 15) return stbi__err("bad huffman code","Corrupt JPEG"); // 0 all the ac values now so we can do it 32-bits at a time memset(data,0,64*sizeof(data[0])); @@ -1780,11 +2243,12 @@ static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__ // first scan for DC coefficient, must be first memset(data,0,64*sizeof(data[0])); // 0 all the ac values now t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); diff = t ? stbi__extend_receive(j, t) : 0; dc = j->img_comp[b].dc_pred + diff; j->img_comp[b].dc_pred = dc; - data[0] = (short) (dc << j->succ_low); + data[0] = (short) (dc * (1 << j->succ_low)); } else { // refinement scan for DC coefficient if (stbi__jpeg_get_bit(j)) @@ -1821,7 +2285,7 @@ static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__ j->code_buffer <<= s; j->code_bits -= s; zig = stbi__jpeg_dezigzag[k++]; - data[zig] = (short) ((r >> 8) << shift); + data[zig] = (short) ((r >> 8) * (1 << shift)); } else { int rs = stbi__jpeg_huff_decode(j, hac); if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); @@ -1839,7 +2303,7 @@ static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__ } else { k += r; zig = stbi__jpeg_dezigzag[k++]; - data[zig] = (short) (stbi__extend_receive(j,s) << shift); + data[zig] = (short) (stbi__extend_receive(j,s) * (1 << shift)); } } } while (k <= j->spec_end); @@ -1926,7 +2390,7 @@ stbi_inline static stbi_uc stbi__clamp(int x) } #define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) -#define stbi__fsh(x) ((x) << 12) +#define stbi__fsh(x) ((x) * 4096) // derived from jidctint -- DCT_ISLOW #define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ @@ -1981,7 +2445,7 @@ static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) // (1|2|3|4|5|6|7)==0 0 seconds // all separate -0.047 seconds // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds - int dcterm = d[0] << 2; + int dcterm = d[0]*4; v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; } else { STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) @@ -2425,7 +2889,7 @@ static stbi_uc stbi__get_marker(stbi__jpeg *j) x = stbi__get8(j->s); if (x != 0xff) return STBI__MARKER_none; while (x == 0xff) - x = stbi__get8(j->s); + x = stbi__get8(j->s); // consume repeated 0xff fill bytes return x; } @@ -2440,7 +2904,7 @@ static void stbi__jpeg_reset(stbi__jpeg *j) j->code_bits = 0; j->code_buffer = 0; j->nomore = 0; - j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; j->marker = STBI__MARKER_none; j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; j->eob_run = 0; @@ -2572,7 +3036,7 @@ static int stbi__parse_entropy_coded_data(stbi__jpeg *z) } } -static void stbi__jpeg_dequantize(short *data, stbi_uc *dequant) +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) { int i; for (i=0; i < 64; ++i) @@ -2614,13 +3078,14 @@ static int stbi__process_marker(stbi__jpeg *z, int m) L = stbi__get16be(z->s)-2; while (L > 0) { int q = stbi__get8(z->s); - int p = q >> 4; + int p = q >> 4, sixteen = (p != 0); int t = q & 15,i; - if (p != 0) return stbi__err("bad DQT type","Corrupt JPEG"); + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + for (i=0; i < 64; ++i) - z->dequant[t][stbi__jpeg_dezigzag[i]] = stbi__get8(z->s); - L -= 65; + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); } return L==0; @@ -2653,12 +3118,50 @@ static int stbi__process_marker(stbi__jpeg *z, int m) } return L==0; } + // check for comment block or APP blocks if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { - stbi__skip(z->s, stbi__get16be(z->s)-2); + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); return 1; } - return 0; + + return stbi__err("unknown marker","Corrupt JPEG"); } // after we see SOS @@ -2701,6 +3204,28 @@ static int stbi__process_scan_header(stbi__jpeg *z) return 1; } +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + static int stbi__process_frame_header(stbi__jpeg *z, int scan) { stbi__context *s = z->s; @@ -2709,8 +3234,10 @@ static int stbi__process_frame_header(stbi__jpeg *z, int scan) p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); c = stbi__get8(s); - if (c != 3 && c != 1) return stbi__err("bad component count","Corrupt JPEG"); // JFIF requires + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); s->img_n = c; for (i=0; i < c; ++i) { z->img_comp[i].data = NULL; @@ -2721,15 +3248,10 @@ static int stbi__process_frame_header(stbi__jpeg *z, int scan) z->rgb = 0; for (i=0; i < s->img_n; ++i) { - static unsigned char rgb[3] = { 'R', 'G', 'B' }; + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; z->img_comp[i].id = stbi__get8(s); - if (z->img_comp[i].id != i+1) // JFIF requires - if (z->img_comp[i].id != i) { // some version of jpegtran outputs non-JFIF-compliant files! - // somethings output this (see http://fileformats.archiveteam.org/wiki/JPEG#Color_format) - if (z->img_comp[i].id != rgb[i]) - return stbi__err("bad component ID","Corrupt JPEG"); - ++z->rgb; - } + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; q = stbi__get8(s); z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); @@ -2738,18 +3260,26 @@ static int stbi__process_frame_header(stbi__jpeg *z, int scan) if (scan != STBI__SCAN_load) return 1; - if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); for (i=0; i < s->img_n; ++i) { if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; } + // check that plane subsampling factors are integer ratios; our resamplers can't deal with fractional ratios + // and I've never seen a non-corrupted JPEG file actually use them + for (i=0; i < s->img_n; ++i) { + if (h_max % z->img_comp[i].h != 0) return stbi__err("bad H","Corrupt JPEG"); + if (v_max % z->img_comp[i].v != 0) return stbi__err("bad V","Corrupt JPEG"); + } + // compute interleaved mcu info z->img_h_max = h_max; z->img_v_max = v_max; z->img_mcu_w = h_max * 8; z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; @@ -2761,28 +3291,27 @@ static int stbi__process_frame_header(stbi__jpeg *z, int scan) // the bogus oversized data from using interleaved MCUs and their // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; - z->img_comp[i].raw_data = stbi__malloc(z->img_comp[i].w2 * z->img_comp[i].h2+15); - - if (z->img_comp[i].raw_data == NULL) { - for(--i; i >= 0; --i) { - STBI_FREE(z->img_comp[i].raw_data); - z->img_comp[i].raw_data = NULL; - } - return stbi__err("outofmem", "Out of memory"); - } + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); // align blocks for idct using mmx/sse z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); - z->img_comp[i].linebuf = NULL; if (z->progressive) { - z->img_comp[i].coeff_w = (z->img_comp[i].w2 + 7) >> 3; - z->img_comp[i].coeff_h = (z->img_comp[i].h2 + 7) >> 3; - z->img_comp[i].raw_coeff = STBI_MALLOC(z->img_comp[i].coeff_w * z->img_comp[i].coeff_h * 64 * sizeof(short) + 15); + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); - } else { - z->img_comp[i].coeff = 0; - z->img_comp[i].raw_coeff = 0; } } @@ -2801,6 +3330,8 @@ static int stbi__process_frame_header(stbi__jpeg *z, int scan) static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) { int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 z->marker = STBI__MARKER_none; // initialize cached marker to empty m = stbi__get_marker(z); if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); @@ -2842,12 +3373,15 @@ static int stbi__decode_jpeg_image(stbi__jpeg *j) if (x == 255) { j->marker = stbi__get8(j->s); break; - } else if (x != 0) { - return stbi__err("junk before marker", "Corrupt JPEG"); } } // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 } + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); } else { if (!stbi__process_marker(j, m)) return 0; } @@ -3066,38 +3600,9 @@ static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_ return out; } -#ifdef STBI_JPEG_OLD -// this is the same YCbCr-to-RGB calculation that stb_image has used -// historically before the algorithm changes in 1.49 -#define float2fixed(x) ((int) ((x) * 65536 + 0.5)) -static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) -{ - int i; - for (i=0; i < count; ++i) { - int y_fixed = (y[i] << 16) + 32768; // rounding - int r,g,b; - int cr = pcr[i] - 128; - int cb = pcb[i] - 128; - r = y_fixed + cr*float2fixed(1.40200f); - g = y_fixed - cr*float2fixed(0.71414f) - cb*float2fixed(0.34414f); - b = y_fixed + cb*float2fixed(1.77200f); - r >>= 16; - g >>= 16; - b >>= 16; - if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } - if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } - if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } - out[0] = (stbi_uc)r; - out[1] = (stbi_uc)g; - out[2] = (stbi_uc)b; - out[3] = 255; - out += step; - } -} -#else // this is a reduced-precision calculation of YCbCr-to-RGB introduced // to make sure the code produces the same results in both SIMD and scalar -#define float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) { int i; @@ -3106,9 +3611,9 @@ static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc int r,g,b; int cr = pcr[i] - 128; int cb = pcb[i] - 128; - r = y_fixed + cr* float2fixed(1.40200f); - g = y_fixed + (cr*-float2fixed(0.71414f)) + ((cb*-float2fixed(0.34414f)) & 0xffff0000); - b = y_fixed + cb* float2fixed(1.77200f); + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); r >>= 20; g >>= 20; b >>= 20; @@ -3122,7 +3627,6 @@ static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc out += step; } } -#endif #if defined(STBI_SSE2) || defined(STBI_NEON) static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) @@ -3241,9 +3745,9 @@ static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc cons int r,g,b; int cr = pcr[i] - 128; int cb = pcb[i] - 128; - r = y_fixed + cr* float2fixed(1.40200f); - g = y_fixed + cr*-float2fixed(0.71414f) + ((cb*-float2fixed(0.34414f)) & 0xffff0000); - b = y_fixed + cb* float2fixed(1.77200f); + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); r >>= 20; g >>= 20; b >>= 20; @@ -3269,18 +3773,14 @@ static void stbi__setup_jpeg(stbi__jpeg *j) #ifdef STBI_SSE2 if (stbi__sse2_available()) { j->idct_block_kernel = stbi__idct_simd; - #ifndef STBI_JPEG_OLD j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; - #endif j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; } #endif #ifdef STBI_NEON j->idct_block_kernel = stbi__idct_simd; - #ifndef STBI_JPEG_OLD j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; - #endif j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; #endif } @@ -3288,23 +3788,7 @@ static void stbi__setup_jpeg(stbi__jpeg *j) // clean up the temporary component buffers static void stbi__cleanup_jpeg(stbi__jpeg *j) { - int i; - for (i=0; i < j->s->img_n; ++i) { - if (j->img_comp[i].raw_data) { - STBI_FREE(j->img_comp[i].raw_data); - j->img_comp[i].raw_data = NULL; - j->img_comp[i].data = NULL; - } - if (j->img_comp[i].raw_coeff) { - STBI_FREE(j->img_comp[i].raw_coeff); - j->img_comp[i].raw_coeff = 0; - j->img_comp[i].coeff = 0; - } - if (j->img_comp[i].linebuf) { - STBI_FREE(j->img_comp[i].linebuf); - j->img_comp[i].linebuf = NULL; - } - } + stbi__free_jpeg_components(j, j->s->img_n, 0); } typedef struct @@ -3317,9 +3801,16 @@ typedef struct int ypos; // which pre-expansion row we're on } stbi__resample; +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) { - int n, decode_n; + int n, decode_n, is_rgb; z->s->img_n = 0; // make stbi__cleanup_jpeg safe // validate req_comp @@ -3329,19 +3820,25 @@ static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } // determine actual number of components to generate - n = req_comp ? req_comp : z->s->img_n; + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; - if (z->s->img_n == 3 && n < 3) + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) decode_n = 1; else decode_n = z->s->img_n; + // nothing to do if no components requested; check this now to avoid + // accessing uninitialized coutput[0] later + if (decode_n <= 0) { stbi__cleanup_jpeg(z); return NULL; } + // resample and color-convert { int k; unsigned int i,j; stbi_uc *output; - stbi_uc *coutput[4]; + stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL }; stbi__resample res_comp[4]; @@ -3368,7 +3865,7 @@ static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp } // can't error after this so, this is safe - output = (stbi_uc *) stbi__malloc(n * z->s->img_x * z->s->img_y + 1); + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } // now go ahead and resample @@ -3391,7 +3888,7 @@ static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp if (n >= 3) { stbi_uc *y = coutput[0]; if (z->s->img_n == 3) { - if (z->rgb == 3) { + if (is_rgb) { for (i=0; i < z->s->img_x; ++i) { out[0] = y[i]; out[1] = coutput[1][i]; @@ -3402,6 +3899,28 @@ static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp } else { z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } } else for (i=0; i < z->s->img_x; ++i) { out[0] = out[1] = out[2] = y[i]; @@ -3409,25 +3928,55 @@ static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp out += n; } } else { - stbi_uc *y = coutput[0]; - if (n == 1) - for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; - else - for (i=0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255; + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } + } } } stbi__cleanup_jpeg(z); *out_x = z->s->img_x; *out_y = z->s->img_y; - if (comp) *comp = z->s->img_n; // report original components, not output + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output return output; } } -static unsigned char *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { unsigned char* result; stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__errpuc("outofmem", "Out of memory"); + STBI_NOTUSED(ri); j->s = s; stbi__setup_jpeg(j); result = load_jpeg_image(j, x,y,comp,req_comp); @@ -3438,11 +3987,13 @@ static unsigned char *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *com static int stbi__jpeg_test(stbi__context *s) { int r; - stbi__jpeg j; - j.s = s; - stbi__setup_jpeg(&j); - r = stbi__decode_jpeg_header(&j, STBI__SCAN_type); + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__err("outofmem", "Out of memory"); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); stbi__rewind(s); + STBI_FREE(j); return r; } @@ -3454,7 +4005,7 @@ static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) } if (x) *x = j->s->img_x; if (y) *y = j->s->img_y; - if (comp) *comp = j->s->img_n; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; return 1; } @@ -3462,6 +4013,7 @@ static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) { int result; stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + if (!j) return stbi__err("outofmem", "Out of memory"); j->s = s; result = stbi__jpeg_info_raw(j, x, y, comp); STBI_FREE(j); @@ -3481,6 +4033,7 @@ static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) // fast-way is faster to check than jpeg huffman, but slow way is slower #define STBI__ZFAST_BITS 9 // accelerate all cases in default tables #define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) +#define STBI__ZNSYMS 288 // number of symbols in literal/length alphabet // zlib-style huffman encoding // (jpegs packs from left, zlib from right, so can't share code) @@ -3490,8 +4043,8 @@ typedef struct stbi__uint16 firstcode[16]; int maxcode[17]; stbi__uint16 firstsymbol[16]; - stbi_uc size[288]; - stbi__uint16 value[288]; + stbi_uc size[STBI__ZNSYMS]; + stbi__uint16 value[STBI__ZNSYMS]; } stbi__zhuffman; stbi_inline static int stbi__bitreverse16(int n) @@ -3511,7 +4064,7 @@ stbi_inline static int stbi__bit_reverse(int v, int bits) return stbi__bitreverse16(v) >> (16-bits); } -static int stbi__zbuild_huffman(stbi__zhuffman *z, stbi_uc *sizelist, int num) +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) { int i,k=0; int code, next_code[16], sizes[17]; @@ -3578,16 +4131,23 @@ typedef struct stbi__zhuffman z_length, z_distance; } stbi__zbuf; +stbi_inline static int stbi__zeof(stbi__zbuf *z) +{ + return (z->zbuffer >= z->zbuffer_end); +} + stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) { - if (z->zbuffer >= z->zbuffer_end) return 0; - return *z->zbuffer++; + return stbi__zeof(z) ? 0 : *z->zbuffer++; } static void stbi__fill_bits(stbi__zbuf *z) { do { - STBI_ASSERT(z->code_buffer < (1U << z->num_bits)); + if (z->code_buffer >= (1U << z->num_bits)) { + z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ + return; + } z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; z->num_bits += 8; } while (z->num_bits <= 24); @@ -3612,10 +4172,11 @@ static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) for (s=STBI__ZFAST_BITS+1; ; ++s) if (k < z->maxcode[s]) break; - if (s == 16) return -1; // invalid code! + if (s >= 16) return -1; // invalid code! // code size is s, so: b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; - STBI_ASSERT(z->size[b] == s); + if (b >= STBI__ZNSYMS) return -1; // some data was corrupt somewhere! + if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. a->code_buffer >>= s; a->num_bits -= s; return z->value[b]; @@ -3624,7 +4185,12 @@ static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) { int b,s; - if (a->num_bits < 16) stbi__fill_bits(a); + if (a->num_bits < 16) { + if (stbi__zeof(a)) { + return -1; /* report error for unexpected end of data. */ + } + stbi__fill_bits(a); + } b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; if (b) { s = b >> 9; @@ -3638,13 +4204,16 @@ stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes { char *q; - int cur, limit, old_limit; + unsigned int cur, limit, old_limit; z->zout = zout; if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); - cur = (int) (z->zout - z->zout_start); - limit = old_limit = (int) (z->zout_end - z->zout_start); - while (cur + n > limit) + cur = (unsigned int) (z->zout - z->zout_start); + limit = old_limit = (unsigned) (z->zout_end - z->zout_start); + if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory"); + while (cur + n > limit) { + if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory"); limit *= 2; + } q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); STBI_NOTUSED(old_limit); if (q == NULL) return stbi__err("outofmem", "Out of memory"); @@ -3654,18 +4223,18 @@ static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room return 1; } -static int stbi__zlength_base[31] = { +static const int stbi__zlength_base[31] = { 3,4,5,6,7,8,9,10,11,13, 15,17,19,23,27,31,35,43,51,59, 67,83,99,115,131,163,195,227,258,0,0 }; -static int stbi__zlength_extra[31]= +static const int stbi__zlength_extra[31]= { 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; -static int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; -static int stbi__zdist_extra[32] = +static const int stbi__zdist_extra[32] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; static int stbi__parse_huffman_block(stbi__zbuf *a) @@ -3712,7 +4281,7 @@ static int stbi__parse_huffman_block(stbi__zbuf *a) static int stbi__compute_huffman_codes(stbi__zbuf *a) { - static stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; stbi__zhuffman z_codelength; stbi_uc lencodes[286+32+137];//padding for maximum single op stbi_uc codelength_sizes[19]; @@ -3721,6 +4290,7 @@ static int stbi__compute_huffman_codes(stbi__zbuf *a) int hlit = stbi__zreceive(a,5) + 257; int hdist = stbi__zreceive(a,5) + 1; int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; memset(codelength_sizes, 0, sizeof(codelength_sizes)); for (i=0; i < hclen; ++i) { @@ -3730,27 +4300,30 @@ static int stbi__compute_huffman_codes(stbi__zbuf *a) if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; n = 0; - while (n < hlit + hdist) { + while (n < ntot) { int c = stbi__zhuffman_decode(a, &z_codelength); if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); if (c < 16) lencodes[n++] = (stbi_uc) c; - else if (c == 16) { - c = stbi__zreceive(a,2)+3; - memset(lencodes+n, lencodes[n-1], c); - n += c; - } else if (c == 17) { - c = stbi__zreceive(a,3)+3; - memset(lencodes+n, 0, c); - n += c; - } else { - STBI_ASSERT(c == 18); - c = stbi__zreceive(a,7)+11; - memset(lencodes+n, 0, c); + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) { + c = stbi__zreceive(a,3)+3; + } else if (c == 18) { + c = stbi__zreceive(a,7)+11; + } else { + return stbi__err("bad codelengths", "Corrupt PNG"); + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); n += c; } } - if (n != hlit+hdist) return stbi__err("bad codelengths","Corrupt PNG"); + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; return 1; @@ -3769,7 +4342,7 @@ static int stbi__parse_uncompressed_block(stbi__zbuf *a) a->code_buffer >>= 8; a->num_bits -= 8; } - STBI_ASSERT(a->num_bits == 0); + if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG"); // now fill header the normal way while (k < 4) header[k++] = stbi__zget8(a); @@ -3791,6 +4364,7 @@ static int stbi__parse_zlib_header(stbi__zbuf *a) int cm = cmf & 15; /* int cinfo = cmf >> 4; */ int flg = stbi__zget8(a); + if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png @@ -3798,9 +4372,24 @@ static int stbi__parse_zlib_header(stbi__zbuf *a) return 1; } -// @TODO: should statically initialize these for optimal thread safety -static stbi_uc stbi__zdefault_length[288], stbi__zdefault_distance[32]; -static void stbi__init_zdefaults(void) +static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: { int i; // use <= to match clearly with spec for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; @@ -3810,6 +4399,7 @@ static void stbi__init_zdefaults(void) for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; } +*/ static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) { @@ -3828,8 +4418,7 @@ static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) } else { if (type == 1) { // use fixed code lengths - if (!stbi__zdefault_distance[31]) stbi__init_zdefaults(); - if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , STBI__ZNSYMS)) return 0; if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; } else { if (!stbi__compute_huffman_codes(a)) return 0; @@ -3953,7 +4542,7 @@ static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) static int stbi__check_png_header(stbi__context *s) { - static stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; int i; for (i=0; i < 8; ++i) if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); @@ -3999,7 +4588,7 @@ static int stbi__paeth(int a, int b, int c) return c; } -static stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; // create the png data from post-deflated data static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) @@ -4016,31 +4605,33 @@ static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 r int width = x; STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); - a->out = (stbi_uc *) stbi__malloc(x * y * output_bytes); // extra bytes to write off the end into + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into if (!a->out) return stbi__err("outofmem", "Out of memory"); + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); img_width_bytes = (((img_n * x * depth) + 7) >> 3); img_len = (img_width_bytes + 1) * y; - if (s->img_x == x && s->img_y == y) { - if (raw_len != img_len) return stbi__err("not enough pixels","Corrupt PNG"); - } else { // interlaced: - if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); - } + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); for (j=0; j < y; ++j) { stbi_uc *cur = a->out + stride*j; - stbi_uc *prior = cur - stride; + stbi_uc *prior; int filter = *raw++; if (filter > 4) return stbi__err("invalid filter","Corrupt PNG"); if (depth < 8) { - STBI_ASSERT(img_width_bytes <= x); + if (img_width_bytes > x) return stbi__err("invalid width","Corrupt PNG"); cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place filter_bytes = 1; width = img_width_bytes; } + prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above // if first row, use special filter that doesn't sample previous row if (j == 0) filter = first_row_filter[filter]; @@ -4081,37 +4672,37 @@ static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 r // this is a little gross, so that we don't switch per-pixel or per-component if (depth < 8 || img_n == out_n) { int nk = (width - 1)*filter_bytes; - #define CASE(f) \ + #define STBI__CASE(f) \ case f: \ for (k=0; k < nk; ++k) switch (filter) { // "none" filter turns into a memcpy here; make that explicit. case STBI__F_none: memcpy(cur, raw, nk); break; - CASE(STBI__F_sub) cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); break; - CASE(STBI__F_up) cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; - CASE(STBI__F_avg) cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); break; - CASE(STBI__F_paeth) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); break; - CASE(STBI__F_avg_first) cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); break; - CASE(STBI__F_paeth_first) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; } - #undef CASE + #undef STBI__CASE raw += nk; } else { STBI_ASSERT(img_n+1 == out_n); - #define CASE(f) \ + #define STBI__CASE(f) \ case f: \ for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ for (k=0; k < filter_bytes; ++k) switch (filter) { - CASE(STBI__F_none) cur[k] = raw[k]; break; - CASE(STBI__F_sub) cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); break; - CASE(STBI__F_up) cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; - CASE(STBI__F_avg) cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); break; - CASE(STBI__F_paeth) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); break; - CASE(STBI__F_avg_first) cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); break; - CASE(STBI__F_paeth_first) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); break; + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; } - #undef CASE + #undef STBI__CASE // the loop above sets the high byte of the pixels' alpha, but for // 16 bit png files we also need the low byte set. we'll do that here. @@ -4214,13 +4805,16 @@ static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 r static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) { + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; stbi_uc *final; int p; if (!interlaced) return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); // de-interlacing - final = (stbi_uc *) stbi__malloc(a->s->img_x * a->s->img_y * out_n); + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + if (!final) return stbi__err("outofmem", "Out of memory"); for (p=0; p < 7; ++p) { int xorig[] = { 0,4,0,2,0,1,0 }; int yorig[] = { 0,0,4,0,2,0,1 }; @@ -4240,8 +4834,8 @@ static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint3 for (i=0; i < x; ++i) { int out_y = j*yspc[p]+yorig[p]; int out_x = i*xspc[p]+xorig[p]; - memcpy(final + out_y*a->s->img_x*out_n + out_x*out_n, - a->out + (j*x+i)*out_n, out_n); + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); } } STBI_FREE(a->out); @@ -4309,7 +4903,7 @@ static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; stbi_uc *p, *temp_out, *orig = a->out; - p = (stbi_uc *) stbi__malloc(pixel_count * pal_img_n); + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); if (p == NULL) return stbi__err("outofmem", "Out of memory"); // between here and free(out) below, exitting would leak @@ -4341,39 +4935,46 @@ static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int return 1; } -static int stbi__reduce_png(stbi__png *p) -{ - int i; - int img_len = p->s->img_x * p->s->img_y * p->s->img_out_n; - stbi_uc *reduced; - stbi__uint16 *orig = (stbi__uint16*)p->out; - - if (p->depth != 16) return 1; // don't need to do anything if not 16-bit data - - reduced = (stbi_uc *)stbi__malloc(img_len); - if (p == NULL) return stbi__err("outofmem", "Out of memory"); - - for (i = 0; i < img_len; ++i) reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is a decent approx of 16->8 bit scaling +static int stbi__unpremultiply_on_load_global = 0; +static int stbi__de_iphone_flag_global = 0; - p->out = reduced; - STBI_FREE(orig); +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; +} - return 1; +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_global = flag_true_if_should_convert; } -static int stbi__unpremultiply_on_load = 0; -static int stbi__de_iphone_flag = 0; +#ifndef STBI_THREAD_LOCAL +#define stbi__unpremultiply_on_load stbi__unpremultiply_on_load_global +#define stbi__de_iphone_flag stbi__de_iphone_flag_global +#else +static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local, stbi__unpremultiply_on_load_set; +static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set; -STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +STBIDEF void stbi__unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) { - stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_set = 1; } -STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) { - stbi__de_iphone_flag = flag_true_if_should_convert; + stbi__de_iphone_flag_local = flag_true_if_should_convert; + stbi__de_iphone_flag_set = 1; } +#define stbi__unpremultiply_on_load (stbi__unpremultiply_on_load_set \ + ? stbi__unpremultiply_on_load_local \ + : stbi__unpremultiply_on_load_global) +#define stbi__de_iphone_flag (stbi__de_iphone_flag_set \ + ? stbi__de_iphone_flag_local \ + : stbi__de_iphone_flag_global) +#endif // STBI_THREAD_LOCAL + static void stbi__de_iphone(stbi__png *z) { stbi__context *s = z->s; @@ -4395,9 +4996,10 @@ static void stbi__de_iphone(stbi__png *z) stbi_uc a = p[3]; stbi_uc t = p[0]; if (a) { - p[0] = p[2] * 255 / a; - p[1] = p[1] * 255 / a; - p[2] = t * 255 / a; + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; } else { p[0] = p[2]; p[2] = t; @@ -4416,12 +5018,12 @@ static void stbi__de_iphone(stbi__png *z) } } -#define STBI__PNG_TYPE(a,b,c,d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) { stbi_uc palette[1024], pal_img_n=0; - stbi_uc has_trans=0, tc[3]; + stbi_uc has_trans=0, tc[3]={0}; stbi__uint16 tc16[3]; stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; int first=1,k,interlace=0, color=0, is_iphone=0; @@ -4447,11 +5049,13 @@ static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); first = 0; if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); - s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); - s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + s->img_x = stbi__get32be(s); + s->img_y = stbi__get32be(s); + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); - if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); @@ -4500,7 +5104,7 @@ static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); has_trans = 1; if (z->depth == 16) { - for (k = 0; k < s->img_n; ++k) tc16[k] = stbi__get16be(s); // copy the values as-is + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is } else { for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger } @@ -4560,8 +5164,13 @@ static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) if (req_comp >= 3) s->img_out_n = req_comp; if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; } STBI_FREE(z->expanded); z->expanded = NULL; + // end of PNG chunk, read and skip CRC + stbi__get32be(s); return 1; } @@ -4587,20 +5196,24 @@ static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) } } -static unsigned char *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp) +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) { - unsigned char *result=NULL; + void *result=NULL; if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { - if (p->depth == 16) { - if (!stbi__reduce_png(p)) { - return result; - } - } + if (p->depth <= 8) + ri->bits_per_channel = 8; + else if (p->depth == 16) + ri->bits_per_channel = 16; + else + return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); result = p->out; p->out = NULL; if (req_comp && req_comp != p->s->img_out_n) { - result = stbi__convert_format(result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); p->s->img_out_n = req_comp; if (result == NULL) return result; } @@ -4615,11 +5228,11 @@ static unsigned char *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req return result; } -static unsigned char *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { stbi__png p; p.s = s; - return stbi__do_png(&p, x,y,comp,req_comp); + return stbi__do_png(&p, x,y,comp,req_comp, ri); } static int stbi__png_test(stbi__context *s) @@ -4648,6 +5261,19 @@ static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) p.s = s; return stbi__png_info_raw(&p, x, y, comp); } + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} #endif // Microsoft/Windows BMP image @@ -4681,11 +5307,11 @@ static int stbi__high_bit(unsigned int z) { int n=0; if (z == 0) return -1; - if (z >= 0x10000) n += 16, z >>= 16; - if (z >= 0x00100) n += 8, z >>= 8; - if (z >= 0x00010) n += 4, z >>= 4; - if (z >= 0x00004) n += 2, z >>= 2; - if (z >= 0x00002) n += 1, z >>= 1; + if (z >= 0x10000) { n += 16; z >>= 16; } + if (z >= 0x00100) { n += 8; z >>= 8; } + if (z >= 0x00010) { n += 4; z >>= 4; } + if (z >= 0x00004) { n += 2; z >>= 2; } + if (z >= 0x00002) { n += 1;/* >>= 1;*/ } return n; } @@ -4699,29 +5325,62 @@ static int stbi__bitcount(unsigned int a) return a & 0xff; } -static int stbi__shiftsigned(int v, int shift, int bits) -{ - int result; - int z=0; - - if (shift < 0) v <<= -shift; - else v >>= shift; - result = v; - - z = bits; - while (z < 8) { - result += v >> z; - z += bits; - } - return result; +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(unsigned int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; } typedef struct { int bpp, offset, hsz; unsigned int mr,mg,mb,ma, all_a; + int extra_read; } stbi__bmp_data; +static int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) +{ + // BI_BITFIELDS specifies masks explicitly, don't override + if (compress == 3) + return 1; + + if (compress == 0) { + if (info->bpp == 16) { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } else if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + // otherwise, use defaults, which is all-0 + info->mr = info->mg = info->mb = info->ma = 0; + } + return 1; + } + return 0; // error +} + static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) { int hsz; @@ -4732,7 +5391,10 @@ static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) info->offset = stbi__get32le(s); info->hsz = hsz = stbi__get32le(s); info->mr = info->mg = info->mb = info->ma = 0; - + info->extra_read = 14; + + if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP"); + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); if (hsz == 12) { s->img_x = stbi__get16le(s); @@ -4743,10 +5405,11 @@ static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) } if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); info->bpp = stbi__get16le(s); - if (info->bpp == 1) return stbi__errpuc("monochrome", "BMP type not supported: 1-bit"); if (hsz != 12) { int compress = stbi__get32le(s); if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + if (compress >= 4) return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes + if (compress == 3 && info->bpp != 16 && info->bpp != 32) return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel stbi__get32le(s); // discard sizeof stbi__get32le(s); // discard hres stbi__get32le(s); // discard vres @@ -4761,21 +5424,12 @@ static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) } if (info->bpp == 16 || info->bpp == 32) { if (compress == 0) { - if (info->bpp == 32) { - info->mr = 0xffu << 16; - info->mg = 0xffu << 8; - info->mb = 0xffu << 0; - info->ma = 0xffu << 24; - info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 - } else { - info->mr = 31u << 10; - info->mg = 31u << 5; - info->mb = 31u << 0; - } + stbi__bmp_set_mask_defaults(info, compress); } else if (compress == 3) { info->mr = stbi__get32le(s); info->mg = stbi__get32le(s); info->mb = stbi__get32le(s); + info->extra_read += 12; // not documented, but generated by photoshop and handled by mspaint if (info->mr == info->mg && info->mg == info->mb) { // ?!?!? @@ -4785,6 +5439,7 @@ static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) return stbi__errpuc("bad BMP", "bad BMP"); } } else { + // V4/V5 header int i; if (hsz != 108 && hsz != 124) return stbi__errpuc("bad BMP", "bad BMP"); @@ -4792,6 +5447,8 @@ static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) info->mg = stbi__get32le(s); info->mb = stbi__get32le(s); info->ma = stbi__get32le(s); + if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs + stbi__bmp_set_mask_defaults(info, compress); stbi__get32le(s); // discard color space for (i=0; i < 12; ++i) stbi__get32le(s); // discard color space parameters @@ -4807,7 +5464,7 @@ static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) } -static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { stbi_uc *out; unsigned int mr=0,mg=0,mb=0,ma=0, all_a; @@ -4815,14 +5472,18 @@ static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int int psize=0,i,j,width; int flip_vertically, pad, target; stbi__bmp_data info; + STBI_NOTUSED(ri); - info.all_a = 255; + info.all_a = 255; if (stbi__bmp_parse_header(s, &info) == NULL) return NULL; // error code already set flip_vertically = ((int) s->img_y) > 0; s->img_y = abs((int) s->img_y); + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + mr = info.mr; mg = info.mg; mb = info.mb; @@ -4831,19 +5492,31 @@ static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int if (info.hsz == 12) { if (info.bpp < 24) - psize = (info.offset - 14 - 24) / 3; + psize = (info.offset - info.extra_read - 24) / 3; } else { if (info.bpp < 16) - psize = (info.offset - 14 - info.hsz) >> 2; + psize = (info.offset - info.extra_read - info.hsz) >> 2; + } + if (psize == 0) { + if (info.offset != s->callback_already_read + (s->img_buffer - s->img_buffer_original)) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } } - s->img_n = ma ? 4 : 3; + if (info.bpp == 24 && ma == 0xff000000) + s->img_n = 3; + else + s->img_n = ma ? 4 : 3; if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 target = req_comp; else target = s->img_n; // if they want monochrome, we'll post-convert - out = (stbi_uc *) stbi__malloc(target * s->img_x * s->img_y); + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); if (!out) return stbi__errpuc("outofmem", "Out of memory"); if (info.bpp < 16) { int z=0; @@ -4855,36 +5528,56 @@ static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int if (info.hsz != 12) stbi__get8(s); pal[i][3] = 255; } - stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); - if (info.bpp == 4) width = (s->img_x + 1) >> 1; + stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; else if (info.bpp == 8) width = s->img_x; else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } pad = (-width)&3; - for (j=0; j < (int) s->img_y; ++j) { - for (i=0; i < (int) s->img_x; i += 2) { - int v=stbi__get8(s),v2=0; - if (info.bpp == 4) { - v2 = v & 15; - v >>= 4; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } } - out[z++] = pal[v][0]; - out[z++] = pal[v][1]; - out[z++] = pal[v][2]; - if (target == 4) out[z++] = 255; - if (i+1 == (int) s->img_x) break; - v = (info.bpp == 8) ? stbi__get8(s) : v2; - out[z++] = pal[v][0]; - out[z++] = pal[v][1]; - out[z++] = pal[v][2]; - if (target == 4) out[z++] = 255; + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); } - stbi__skip(s, pad); } } else { int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; int z = 0; int easy=0; - stbi__skip(s, info.offset - 14 - info.hsz); + stbi__skip(s, info.offset - info.extra_read - info.hsz); if (info.bpp == 24) width = 3 * s->img_x; else if (info.bpp == 16) width = 2*s->img_x; else /* bpp = 32 and pad = 0 */ width=0; @@ -4902,6 +5595,7 @@ static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } } for (j=0; j < (int) s->img_y; ++j) { if (easy) { @@ -4919,7 +5613,7 @@ static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int int bpp = info.bpp; for (i=0; i < (int) s->img_x; ++i) { stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); - int a; + unsigned int a; out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); @@ -4931,7 +5625,7 @@ static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int stbi__skip(s, pad); } } - + // if alpha channel is all 0s, replace with all 255s if (target == 4 && all_a == 0) for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) @@ -4943,7 +5637,7 @@ static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int stbi_uc *p1 = out + j *s->img_x*target; stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; for (i=0; i < (int) s->img_x*target; ++i) { - t = p1[i], p1[i] = p2[i], p2[i] = t; + t = p1[i]; p1[i] = p2[i]; p2[i] = t; } } } @@ -4967,14 +5661,14 @@ static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) { // only RGB or RGBA (incl. 16bit) or grey allowed - if(is_rgb16) *is_rgb16 = 0; + if (is_rgb16) *is_rgb16 = 0; switch(bits_per_pixel) { case 8: return STBI_grey; case 16: if(is_grey) return STBI_grey_alpha; - // else: fall-through + // fallthrough case 15: if(is_rgb16) *is_rgb16 = 1; - return STBI_rgb; - case 24: // fall-through + return STBI_rgb; + case 24: // fallthrough case 32: return bits_per_pixel/8; default: return 0; } @@ -5077,18 +5771,18 @@ static int stbi__tga_test(stbi__context *s) } // read 16bit value and convert to 24bit RGB -void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) { - stbi__uint16 px = stbi__get16le(s); + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); stbi__uint16 fiveBitMask = 31; // we have 3 channels with 5bits each int r = (px >> 10) & fiveBitMask; int g = (px >> 5) & fiveBitMask; int b = px & fiveBitMask; // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later - out[0] = (r * 255)/31; - out[1] = (g * 255)/31; - out[2] = (b * 255)/31; + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); // some people claim that the most significant bit might be used for alpha // (possibly if an alpha-bit is set in the "image descriptor byte") @@ -5096,7 +5790,7 @@ void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) // so let's treat all 15 and 16bit TGAs as RGB with no alpha. } -static stbi_uc *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { // read in the TGA header stuff int tga_offset = stbi__get8(s); @@ -5118,10 +5812,16 @@ static stbi_uc *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int unsigned char *tga_data; unsigned char *tga_palette = NULL; int i, j; - unsigned char raw_data[4]; + unsigned char raw_data[4] = {0}; int RLE_count = 0; int RLE_repeating = 0; int read_next_pixel = 1; + STBI_NOTUSED(ri); + STBI_NOTUSED(tga_x_origin); // @TODO + STBI_NOTUSED(tga_y_origin); // @TODO + + if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); // do a tiny bit of precessing if ( tga_image_type >= 8 ) @@ -5143,7 +5843,10 @@ static stbi_uc *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int *y = tga_height; if (comp) *comp = tga_comp; - tga_data = (unsigned char*)stbi__malloc( (size_t)tga_width * tga_height * tga_comp ); + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); // skip to the data's starting position (offset usually = 0) @@ -5159,10 +5862,15 @@ static stbi_uc *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int // do I need to load a palette? if ( tga_indexed) { + if (tga_palette_len == 0) { /* you have to have at least one entry! */ + STBI_FREE(tga_data); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + // any data to skip? (offset usually = 0) stbi__skip(s, tga_palette_start ); // load the palette - tga_palette = (unsigned char*)stbi__malloc( tga_palette_len * tga_comp ); + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); if (!tga_palette) { STBI_FREE(tga_data); return stbi__errpuc("outofmem", "Out of memory"); @@ -5282,6 +5990,7 @@ static stbi_uc *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int // Microsoft's C compilers happy... [8^( tga_palette_start = tga_palette_len = tga_palette_bits = tga_x_origin = tga_y_origin = 0; + STBI_NOTUSED(tga_palette_start); // OK, done return tga_data; } @@ -5298,14 +6007,53 @@ static int stbi__psd_test(stbi__context *s) return r; } -static stbi_uc *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) { - int pixelCount; + int pixelCount; int channelCount, compression; - int channel, i, count, len; + int channel, i; int bitdepth; int w,h; stbi_uc *out; + STBI_NOTUSED(ri); // Check identifier if (stbi__get32be(s) != 0x38425053) // "8BPS" @@ -5327,6 +6075,9 @@ static stbi_uc *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int h = stbi__get32be(s); w = stbi__get32be(s); + if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + // Make sure the depth is 8 bits. bitdepth = stbi__get16be(s); if (bitdepth != 8 && bitdepth != 16) @@ -5362,8 +6113,18 @@ static stbi_uc *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int if (compression > 1) return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + // Create the destination image. - out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); pixelCount = w*h; @@ -5380,7 +6141,7 @@ static stbi_uc *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int // Else if n is 128, noop. // Endloop - // The RLE-compressed data is preceeded by a 2-byte data count for each row in the data, + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, // which we're going to just skip. stbi__skip(s, h * channelCount * 2 ); @@ -5395,82 +6156,86 @@ static stbi_uc *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int *p = (channel == 3 ? 255 : 0); } else { // Read the RLE data. - count = 0; - while (count < pixelCount) { - len = stbi__get8(s); - if (len == 128) { - // No-op. - } else if (len < 128) { - // Copy next len+1 bytes literally. - len++; - count += len; - while (len) { - *p = stbi__get8(s); - p += 4; - len--; - } - } else if (len > 128) { - stbi_uc val; - // Next -len+1 bytes in the dest are replicated from next source byte. - // (Interpret len as a negative 8-bit int.) - len ^= 0x0FF; - len += 2; - val = stbi__get8(s); - count += len; - while (len) { - *p = val; - p += 4; - len--; - } - } + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); } } } } else { // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) - // where each channel consists of an 8-bit value for each pixel in the image. + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. // Read the data by channel. for (channel = 0; channel < 4; channel++) { - stbi_uc *p; - - p = out + channel; if (channel >= channelCount) { // Fill this channel with default data. - stbi_uc val = channel == 3 ? 255 : 0; - for (i = 0; i < pixelCount; i++, p += 4) - *p = val; - } else { - // Read the data. - if (bitdepth == 16) { - for (i = 0; i < pixelCount; i++, p += 4) - *p = (stbi_uc) (stbi__get16be(s) >> 8); + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; for (i = 0; i < pixelCount; i++, p += 4) - *p = stbi__get8(s); + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } } } } } + // remove weird white matte from PSD if (channelCount >= 4) { - for (i=0; i < w*h; ++i) { - unsigned char *pixel = out + 4*i; - if (pixel[3] != 0 && pixel[3] != 255) { - // remove weird white matte from PSD - float a = pixel[3] / 255.0f; - float ra = 1.0f / a; - float inv_a = 255.0f * (1 - ra); - pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); - pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); - pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } } } } + // convert to desired output format if (req_comp && req_comp != 4) { - out = stbi__convert_format(out, 4, req_comp, w, h); + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); if (out == NULL) return out; // stbi__convert_format frees input on failure } @@ -5654,25 +6419,33 @@ static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *c return result; } -static stbi_uc *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp) +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) { stbi_uc *result; - int i, x,y; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; for (i=0; i<92; ++i) stbi__get8(s); x = stbi__get16be(s); y = stbi__get16be(s); + + if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); - if ((1 << 28) / x < y) return stbi__errpuc("too large", "Image too large to decode"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); stbi__get32be(s); //skip `ratio' stbi__get16be(s); //skip `fields' stbi__get16be(s); //skip `pad' // intermediate buffer is RGBA - result = (stbi_uc *) stbi__malloc(x*y*4); + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + if (!result) return stbi__errpuc("outofmem", "Out of memory"); memset(result, 0xff, x*y*4); if (!stbi__pic_load_core(s,x,y,comp, result)) { @@ -5709,11 +6482,13 @@ typedef struct typedef struct { int w,h; - stbi_uc *out, *old_out; // output buffer (always 4 components) - int flags, bgindex, ratio, transparent, eflags, delay; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; stbi_uc pal[256][4]; stbi_uc lpal[256][4]; - stbi__gif_lzw codes[4096]; + stbi__gif_lzw codes[8192]; stbi_uc *color_table; int parse, step; int lflags; @@ -5721,6 +6496,7 @@ typedef struct int max_x, max_y; int cur_x, cur_y; int line_size; + int delay; } stbi__gif; static int stbi__gif_test_raw(stbi__context *s) @@ -5769,6 +6545,9 @@ static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_in g->ratio = stbi__get8(s); g->transparent = -1; + if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments if (is_info) return 1; @@ -5782,6 +6561,7 @@ static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_in static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) { stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!g) return stbi__err("outofmem", "Out of memory"); if (!stbi__gif_header(s, g, comp, 1)) { STBI_FREE(g); stbi__rewind( s ); @@ -5796,6 +6576,7 @@ static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) { stbi_uc *p, *c; + int idx; // recurse to decode the prefixes, since the linked-list is backwards, // and working backwards through an interleaved image would be nasty @@ -5804,10 +6585,12 @@ static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) if (g->cur_y >= g->max_y) return; - p = &g->out[g->cur_x + g->cur_y]; - c = &g->color_table[g->codes[code].suffix * 4]; + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; - if (c[3] >= 128) { + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; p[0] = c[2]; p[1] = c[1]; p[2] = c[0]; @@ -5881,11 +6664,16 @@ static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) stbi__skip(s,len); return g->out; } else if (code <= avail) { - if (first) return stbi__errpuc("no clear code", "Corrupt GIF"); + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } if (oldcode >= 0) { p = &g->codes[avail++]; - if (avail > 4096) return stbi__errpuc("too many codes", "Corrupt GIF"); + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + p->prefix = (stbi__int16) oldcode; p->first = g->codes[oldcode].first; p->suffix = (code == avail) ? p->first : g->codes[code].first; @@ -5907,59 +6695,77 @@ static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) } } -static void stbi__fill_gif_background(stbi__gif *g, int x0, int y0, int x1, int y1) -{ - int x, y; - stbi_uc *c = g->pal[g->bgindex]; - for (y = y0; y < y1; y += 4 * g->w) { - for (x = x0; x < x1; x += 4) { - stbi_uc *p = &g->out[y + x]; - p[0] = c[2]; - p[1] = c[1]; - p[2] = c[0]; - p[3] = 0; - } - } -} - // this function is designed to support animated gifs, although stb_image doesn't support it -static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp) +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) { - int i; - stbi_uc *prev_out = 0; + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); - if (g->out == 0 && !stbi__gif_header(s, g, comp,0)) - return 0; // stbi__g_failure_reason set by stbi__gif_header + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) + return stbi__errpuc("too large", "GIF image is too large"); + pcount = g->w * g->h; + g->out = (stbi_uc *) stbi__malloc(4 * pcount); + g->background = (stbi_uc *) stbi__malloc(4 * pcount); + g->history = (stbi_uc *) stbi__malloc(pcount); + if (!g->out || !g->background || !g->history) + return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset(g->out, 0x00, 4 * pcount); + memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) + memset(g->history, 0x00, pcount); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispose of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; - prev_out = g->out; - g->out = (stbi_uc *) stbi__malloc(4 * g->w * g->h); - if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory"); + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } - switch ((g->eflags & 0x1C) >> 2) { - case 0: // unspecified (also always used on 1st frame) - stbi__fill_gif_background(g, 0, 0, 4 * g->w, 4 * g->w * g->h); - break; - case 1: // do not dispose - if (prev_out) memcpy(g->out, prev_out, 4 * g->w * g->h); - g->old_out = prev_out; - break; - case 2: // dispose to background - if (prev_out) memcpy(g->out, prev_out, 4 * g->w * g->h); - stbi__fill_gif_background(g, g->start_x, g->start_y, g->max_x, g->max_y); - break; - case 3: // dispose to previous - if (g->old_out) { - for (i = g->start_y; i < g->max_y; i += 4 * g->w) - memcpy(&g->out[i + g->start_x], &g->old_out[i + g->start_x], g->max_x - g->start_x); + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } } - break; + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); } + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + for (;;) { - switch (stbi__get8(s)) { + int tag = stbi__get8(s); + switch (tag) { case 0x2C: /* Image Descriptor */ { - int prev_trans = -1; stbi__int32 x, y, w, h; stbi_uc *o; @@ -5978,6 +6784,13 @@ static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, i g->cur_x = g->start_x; g->cur_y = g->start_y; + // if the width of the specified rectangle is 0, that means + // we may not see *any* pixels or the image is malformed; + // to make sure this is caught, move the current y down to + // max_y (which is what out_gif_code checks). + if (w == 0) + g->cur_y = g->max_y; + g->lflags = stbi__get8(s); if (g->lflags & 0x40) { @@ -5992,19 +6805,24 @@ static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, i stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); g->color_table = (stbi_uc *) g->lpal; } else if (g->flags & 0x80) { - if (g->transparent >= 0 && (g->eflags & 0x01)) { - prev_trans = g->pal[g->transparent][3]; - g->pal[g->transparent][3] = 0; - } g->color_table = (stbi_uc *) g->pal; } else return stbi__errpuc("missing color table", "Corrupt GIF"); o = stbi__process_gif_raster(s, g); - if (o == NULL) return NULL; - - if (prev_trans != -1) - g->pal[g->transparent][3] = (stbi_uc) prev_trans; + if (!o) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } return o; } @@ -6012,19 +6830,35 @@ static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, i case 0x21: // Comment Extension. { int len; - if (stbi__get8(s) == 0xF9) { // Graphic Control Extension. + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. len = stbi__get8(s); if (len == 4) { g->eflags = stbi__get8(s); - g->delay = stbi__get16le(s); - g->transparent = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } } else { stbi__skip(s, len); break; } } - while ((len = stbi__get8(s)) != 0) + while ((len = stbi__get8(s)) != 0) { stbi__skip(s, len); + } break; } @@ -6035,27 +6869,130 @@ static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, i return stbi__errpuc("unknown code", "Corrupt GIF"); } } +} - STBI_NOTUSED(req_comp); +static void *stbi__load_gif_main_outofmem(stbi__gif *g, stbi_uc *out, int **delays) +{ + STBI_FREE(g->out); + STBI_FREE(g->history); + STBI_FREE(g->background); + + if (out) STBI_FREE(out); + if (delays && *delays) STBI_FREE(*delays); + return stbi__errpuc("outofmem", "Out of memory"); +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + int out_size = 0; + int delays_size = 0; + + STBI_NOTUSED(out_size); + STBI_NOTUSED(delays_size); + + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride ); + if (!tmp) + return stbi__load_gif_main_outofmem(&g, out, delays); + else { + out = (stbi_uc*) tmp; + out_size = layers * stride; + } + + if (delays) { + int *new_delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); + if (!new_delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + *delays = new_delays; + delays_size = layers * sizeof(int); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (!out) + return stbi__load_gif_main_outofmem(&g, out, delays); + out_size = layers * stride; + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + if (!*delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + delays_size = layers * sizeof(int); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } } -static stbi_uc *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { stbi_uc *u = 0; - stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); - memset(g, 0, sizeof(*g)); + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); - u = stbi__gif_load_next(s, g, comp, req_comp); + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); if (u == (stbi_uc *) s) u = 0; // end of animated gif marker if (u) { - *x = g->w; - *y = g->h; + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. if (req_comp && req_comp != 4) - u = stbi__convert_format(u, 4, req_comp, g->w, g->h); + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } else if (g.out) { + // if there was an error and we allocated an image buffer, free it! + STBI_FREE(g.out); } - else if (g->out) - STBI_FREE(g->out); - STBI_FREE(g); + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + return u; } @@ -6069,20 +7006,24 @@ static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) // Radiance RGBE HDR loader // originally by Nicolas Schulz #ifndef STBI_NO_HDR -static int stbi__hdr_test_core(stbi__context *s) +static int stbi__hdr_test_core(stbi__context *s, const char *signature) { - const char *signature = "#?RADIANCE\n"; int i; for (i=0; signature[i]; ++i) if (stbi__get8(s) != signature[i]) - return 0; + return 0; + stbi__rewind(s); return 1; } static int stbi__hdr_test(stbi__context* s) { - int r = stbi__hdr_test_core(s); + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } return r; } @@ -6136,7 +7077,7 @@ static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) } } -static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { char buffer[STBI__HDR_BUFLEN]; char *token; @@ -6147,10 +7088,12 @@ static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int re int len; unsigned char count, value; int i, j, k, c1,c2, z; - + const char *headerToken; + STBI_NOTUSED(ri); // Check identifier - if (strcmp(stbi__hdr_gettoken(s,buffer), "#?RADIANCE") != 0) + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) return stbi__errpf("not HDR", "Corrupt HDR image"); // Parse header @@ -6173,14 +7116,22 @@ static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int re token += 3; width = (int) strtol(token, NULL, 10); + if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + *x = width; *y = height; if (comp) *comp = 3; if (req_comp == 0) req_comp = 3; + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + // Read data - hdr_data = (float *) stbi__malloc(height * width * req_comp * sizeof(float)); + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); // Load image data // image data is stored as some number of sca @@ -6219,20 +7170,29 @@ static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int re len <<= 8; len |= stbi__get8(s); if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } - if (scanline == NULL) scanline = (stbi_uc *) stbi__malloc(width * 4); + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } for (k = 0; k < 4; ++k) { + int nleft; i = 0; - while (i < width) { + while ((nleft = width - i) > 0) { count = stbi__get8(s); if (count > 128) { // Run value = stbi__get8(s); count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } for (z = 0; z < count; ++z) scanline[i++ * 4 + k] = value; } else { // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } for (z = 0; z < count; ++z) scanline[i++ * 4 + k] = stbi__get8(s); } @@ -6241,7 +7201,8 @@ static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int re for (i=0; i < width; ++i) stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); } - STBI_FREE(scanline); + if (scanline) + STBI_FREE(scanline); } return hdr_data; @@ -6252,6 +7213,11 @@ static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) char buffer[STBI__HDR_BUFLEN]; char *token; int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; if (stbi__hdr_test(s) == 0) { stbi__rewind( s ); @@ -6293,14 +7259,20 @@ static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) void *p; stbi__bmp_data info; - info.all_a = 255; + info.all_a = 255; p = stbi__bmp_parse_header(s, &info); - stbi__rewind( s ); - if (p == NULL) + if (p == NULL) { + stbi__rewind( s ); return 0; - *x = s->img_x; - *y = s->img_y; - *comp = info.ma ? 4 : 3; + } + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) { + if (info.bpp == 24 && info.ma == 0xff000000) + *comp = 3; + else + *comp = info.ma ? 4 : 3; + } return 1; } #endif @@ -6308,7 +7280,10 @@ static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) #ifndef STBI_NO_PSD static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) { - int channelCount; + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; if (stbi__get32be(s) != 0x38425053) { stbi__rewind( s ); return 0; @@ -6325,7 +7300,8 @@ static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) } *y = stbi__get32be(s); *x = stbi__get32be(s); - if (stbi__get16be(s) != 8) { + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { stbi__rewind( s ); return 0; } @@ -6336,14 +7312,45 @@ static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) *comp = 4; return 1; } + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + STBI_NOTUSED(stbi__get32be(s)); + STBI_NOTUSED(stbi__get32be(s)); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} #endif #ifndef STBI_NO_PIC static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) { - int act_comp=0,num_packets=0,chained; + int act_comp=0,num_packets=0,chained,dummy; stbi__pic_packet packets[10]; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { stbi__rewind(s); return 0; @@ -6403,7 +7410,6 @@ static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) // Known limitations: // Does not support comments in the header section // Does not support ASCII image data (formats P2 and P3) -// Does not support 16-bit-per-channel #ifndef STBI_NO_PNM @@ -6419,18 +7425,28 @@ static int stbi__pnm_test(stbi__context *s) return 1; } -static stbi_uc *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) { stbi_uc *out; - if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + STBI_NOTUSED(ri); + + ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); + if (ri->bits_per_channel == 0) return 0; + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + *x = s->img_x; *y = s->img_y; - *comp = s->img_n; + if (comp) *comp = s->img_n; + + if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) + return stbi__errpuc("too large", "PNM too large"); - out = (stbi_uc *) stbi__malloc(s->img_n * s->img_x * s->img_y); + out = (stbi_uc *) stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); if (!out) return stbi__errpuc("outofmem", "Out of memory"); - stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8)); if (req_comp && req_comp != s->img_n) { out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); @@ -6477,16 +7493,20 @@ static int stbi__pnm_getinteger(stbi__context *s, char *c) static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) { - int maxv; + int maxv, dummy; char c, p, t; - stbi__rewind( s ); + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); // Get identifier p = (char) stbi__get8(s); t = (char) stbi__get8(s); if (p != 'P' || (t != '5' && t != '6')) { - stbi__rewind( s ); + stbi__rewind(s); return 0; } @@ -6502,11 +7522,19 @@ static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) stbi__pnm_skip_whitespace(s, &c); maxv = stbi__pnm_getinteger(s, &c); // read max value - - if (maxv > 255) - return stbi__err("max value > 255", "PPM image not 8-bit"); + if (maxv > 65535) + return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); + else if (maxv > 255) + return 16; else - return 1; + return 8; +} + +static int stbi__pnm_is16(stbi__context *s) +{ + if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) + return 1; + return 0; } #endif @@ -6552,6 +7580,22 @@ static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) return stbi__err("unknown image type", "Image not of any known type, or corrupt"); } +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_is16(s)) return 1; + #endif + return 0; +} + #ifndef STBI_NO_STDIO STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) { @@ -6573,6 +7617,27 @@ STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) fseek(f,pos,SEEK_SET); return r; } + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} #endif // !STBI_NO_STDIO STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) @@ -6589,10 +7654,44 @@ STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int return stbi__info_main(&s,x,y,comp); } +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + #endif // STB_IMAGE_IMPLEMENTATION /* revision history: + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes 2.11 (2016-04-02) allocate large structures on the stack remove white matting for transparent PSD @@ -6753,3 +7852,46 @@ STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int 0.50 (2006-11-19) first released version */ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/examples/viewer/viewer.cc b/examples/viewer/viewer.cc index bc959c94..c523ecdf 100644 --- a/examples/viewer/viewer.cc +++ b/examples/viewer/viewer.cc @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -228,6 +229,31 @@ void normalizeVector(vec3 &v) { } } +/* + There are 2 approaches here to automatically generating vertex normals. The + old approach (computeSmoothingNormals) doesn't handle multiple smoothing + groups properly, as it effectively merges all smoothing groups present in the + OBJ file into a single group. However, it can be useful when the OBJ file + contains vertex normals which you want to use, but is missing some, as it + will attempt to fill in the missing normals without generating new shapes. + + The new approach (computeSmoothingShapes, computeAllSmoothingNormals) handles + multiple smoothing groups but is a bit more complicated, as handling this + correctly requires potentially generating new vertices (and hence shapes). + In general, the new approach is most useful if your OBJ file is missing + vertex normals entirely, and instead relies on smoothing groups to correctly + generate them as a pre-process. That said, it can be used to reliably + generate vertex normals in the general case. If you want to always generate + normals in this way, simply force set regen_all_normals to true below. By + default, it's only true when there are no vertex normals present. One other + thing to keep in mind is that the statistics printed apply to the model + *prior* to shape regeneration, so you'd need to print them again if you want + to see the new statistics. + + TODO(syoyo): import computeSmoothingShapes and computeAllSmoothingNormals to + tinyobjloader as utility functions. +*/ + // Check if `mesh_t` contains smoothing group id. bool hasSmoothingGroup(const tinyobj::shape_t& shape) { @@ -295,6 +321,138 @@ void computeSmoothingNormals(const tinyobj::attrib_t& attrib, const tinyobj::sha } } // computeSmoothingNormals + +static void computeAllSmoothingNormals(tinyobj::attrib_t& attrib, + std::vector& shapes) { + vec3 p[3]; + for (size_t s = 0, slen = shapes.size(); s < slen; ++s) { + const tinyobj::shape_t& shape(shapes[s]); + size_t facecount = shape.mesh.num_face_vertices.size(); + assert(shape.mesh.smoothing_group_ids.size()); + + for (size_t f = 0, flen = facecount; f < flen; ++f) { + for (unsigned int v = 0; v < 3; ++v) { + tinyobj::index_t idx = shape.mesh.indices[3*f + v]; + assert(idx.vertex_index != -1); + p[v].v[0] = attrib.vertices[3*idx.vertex_index ]; + p[v].v[1] = attrib.vertices[3*idx.vertex_index+1]; + p[v].v[2] = attrib.vertices[3*idx.vertex_index+2]; + } + + // cross(p[1] - p[0], p[2] - p[0]) + float nx = (p[1].v[1] - p[0].v[1]) * (p[2].v[2] - p[0].v[2]) - + (p[1].v[2] - p[0].v[2]) * (p[2].v[1] - p[0].v[1]); + float ny = (p[1].v[2] - p[0].v[2]) * (p[2].v[0] - p[0].v[0]) - + (p[1].v[0] - p[0].v[0]) * (p[2].v[2] - p[0].v[2]); + float nz = (p[1].v[0] - p[0].v[0]) * (p[2].v[1] - p[0].v[1]) - + (p[1].v[1] - p[0].v[1]) * (p[2].v[0] - p[0].v[0]); + + // Don't normalize here. + for (unsigned int v = 0; v < 3; ++v) { + tinyobj::index_t idx = shape.mesh.indices[3*f + v]; + attrib.normals[3*idx.normal_index ] += nx; + attrib.normals[3*idx.normal_index+1] += ny; + attrib.normals[3*idx.normal_index+2] += nz; + } + } + } + + assert(attrib.normals.size() % 3 == 0); + for (size_t i = 0, nlen = attrib.normals.size() / 3; i < nlen; ++i) { + tinyobj::real_t& nx = attrib.normals[3*i ]; + tinyobj::real_t& ny = attrib.normals[3*i+1]; + tinyobj::real_t& nz = attrib.normals[3*i+2]; + tinyobj::real_t len = sqrtf(nx*nx + ny*ny + nz*nz); + tinyobj::real_t scale = len == 0 ? 0 : 1 / len; + nx *= scale; + ny *= scale; + nz *= scale; + } +} + +static void computeSmoothingShape(tinyobj::attrib_t& inattrib, tinyobj::shape_t& inshape, + std::vector>& sortedids, + unsigned int idbegin, unsigned int idend, + std::vector& outshapes, + tinyobj::attrib_t& outattrib) { + unsigned int sgroupid = sortedids[idbegin].first; + bool hasmaterials = inshape.mesh.material_ids.size(); + // Make a new shape from the set of faces in the range [idbegin, idend). + outshapes.emplace_back(); + tinyobj::shape_t& outshape = outshapes.back(); + outshape.name = inshape.name; + // Skip lines and points. + + std::unordered_map remap; + for (unsigned int id = idbegin; id < idend; ++id) { + unsigned int face = sortedids[id].second; + + outshape.mesh.num_face_vertices.push_back(3); // always triangles + if (hasmaterials) + outshape.mesh.material_ids.push_back(inshape.mesh.material_ids[face]); + outshape.mesh.smoothing_group_ids.push_back(sgroupid); + // Skip tags. + + for (unsigned int v = 0; v < 3; ++v) { + tinyobj::index_t inidx = inshape.mesh.indices[3*face + v], outidx; + assert(inidx.vertex_index != -1); + auto iter = remap.find(inidx.vertex_index); + // Smooth group 0 disables smoothing so no shared vertices in that case. + if (sgroupid && iter != remap.end()) { + outidx.vertex_index = (*iter).second; + outidx.normal_index = outidx.vertex_index; + outidx.texcoord_index = (inidx.texcoord_index == -1) ? -1 : outidx.vertex_index; + } + else { + assert(outattrib.vertices.size() % 3 == 0); + unsigned int offset = static_cast(outattrib.vertices.size() / 3); + outidx.vertex_index = outidx.normal_index = offset; + outidx.texcoord_index = (inidx.texcoord_index == -1) ? -1 : offset; + outattrib.vertices.push_back(inattrib.vertices[3*inidx.vertex_index ]); + outattrib.vertices.push_back(inattrib.vertices[3*inidx.vertex_index+1]); + outattrib.vertices.push_back(inattrib.vertices[3*inidx.vertex_index+2]); + outattrib.normals.push_back(0.0f); + outattrib.normals.push_back(0.0f); + outattrib.normals.push_back(0.0f); + if (inidx.texcoord_index != -1) { + outattrib.texcoords.push_back(inattrib.texcoords[2*inidx.texcoord_index ]); + outattrib.texcoords.push_back(inattrib.texcoords[2*inidx.texcoord_index+1]); + } + remap[inidx.vertex_index] = offset; + } + outshape.mesh.indices.push_back(outidx); + } + } +} + +static void computeSmoothingShapes(tinyobj::attrib_t &inattrib, + std::vector& inshapes, + std::vector& outshapes, + tinyobj::attrib_t& outattrib) { + for (size_t s = 0, slen = inshapes.size() ; s < slen; ++s) { + tinyobj::shape_t& inshape = inshapes[s]; + + unsigned int numfaces = static_cast(inshape.mesh.smoothing_group_ids.size()); + assert(numfaces); + std::vector> sortedids(numfaces); + for (unsigned int i = 0; i < numfaces; ++i) + sortedids[i] = std::make_pair(inshape.mesh.smoothing_group_ids[i], i); + sort(sortedids.begin(), sortedids.end()); + + unsigned int activeid = sortedids[0].first; + unsigned int id = activeid, idbegin = 0, idend = 0; + // Faces are now bundled by smoothing group id, create shapes from these. + while (idbegin < numfaces) { + while (activeid == id && ++idend < numfaces) + id = sortedids[idend].first; + computeSmoothingShape(inattrib, inshape, sortedids, idbegin, idend, + outshapes, outattrib); + activeid = id; + idbegin = idend; + } + } +} + } // namespace static bool LoadObjAndConvert(float bmin[3], float bmax[3], @@ -302,8 +460,8 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3], std::vector& materials, std::map& textures, const char* filename) { - tinyobj::attrib_t attrib; - std::vector shapes; + tinyobj::attrib_t inattrib; + std::vector inshapes; timerutil tm; @@ -321,7 +479,7 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3], std::string warn; std::string err; - bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, filename, + bool ret = tinyobj::LoadObj(&inattrib, &inshapes, &materials, &warn, &err, filename, base_dir.c_str()); if (!warn.empty()) { std::cout << "WARN: " << warn << std::endl; @@ -339,11 +497,11 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3], printf("Parsing time: %d [ms]\n", (int)tm.msec()); - printf("# of vertices = %d\n", (int)(attrib.vertices.size()) / 3); - printf("# of normals = %d\n", (int)(attrib.normals.size()) / 3); - printf("# of texcoords = %d\n", (int)(attrib.texcoords.size()) / 2); + printf("# of vertices = %d\n", (int)(inattrib.vertices.size()) / 3); + printf("# of normals = %d\n", (int)(inattrib.normals.size()) / 3); + printf("# of texcoords = %d\n", (int)(inattrib.texcoords.size()) / 2); printf("# of materials = %d\n", (int)materials.size()); - printf("# of shapes = %d\n", (int)shapes.size()); + printf("# of shapes = %d\n", (int)inshapes.size()); // Append `default` material materials.push_back(tinyobj::material_t()); @@ -410,6 +568,17 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3], bmin[0] = bmin[1] = bmin[2] = std::numeric_limits::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits::max(); + bool regen_all_normals = inattrib.normals.size() == 0; + tinyobj::attrib_t outattrib; + std::vector outshapes; + if (regen_all_normals) { + computeSmoothingShapes(inattrib, inshapes, outshapes, outattrib); + computeAllSmoothingNormals(outattrib, outshapes); + } + + std::vector& shapes = regen_all_normals ? outshapes : inshapes; + tinyobj::attrib_t& attrib = regen_all_normals ? outattrib : inattrib; + { for (size_t s = 0; s < shapes.size(); s++) { DrawObject o; @@ -417,7 +586,7 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3], // Check for smoothing group and compute smoothing normals std::map smoothVertexNormals; - if (hasSmoothingGroup(shapes[s]) > 0) { + if (!regen_all_normals && (hasSmoothingGroup(shapes[s]) > 0)) { std::cout << "Compute smoothingNormal for shape [" << s << "]" << std::endl; computeSmoothingNormals(attrib, shapes[s], smoothVertexNormals); } From 8322e00ae685ea623ab6ac5a6cebcfa2d22fbf93 Mon Sep 17 00:00:00 2001 From: Jiahao Li Date: Mon, 27 Dec 2021 19:18:14 +0800 Subject: [PATCH 085/139] Fix mtllib reloading: load an mtl file only once (#327) --- tiny_obj_loader.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 38c8bd7c..7d0c3844 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -657,6 +657,7 @@ bool ParseTextureNameAndOption(std::string *texname, texture_option_t *texopt, #include #include #include +#include #include #include @@ -2445,6 +2446,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, std::string name; // material + std::set material_filenames; std::map material_map; int material = -1; @@ -2735,6 +2737,11 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, } else { bool found = false; for (size_t s = 0; s < filenames.size(); s++) { + if (material_filenames.count(filenames[s]) > 0) { + found = true; + continue; + } + std::string warn_mtl; std::string err_mtl; bool ok = (*readMatFn)(filenames[s].c_str(), materials, @@ -2749,6 +2756,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, if (ok) { found = true; + material_filenames.insert(filenames[s]); break; } } @@ -2993,6 +3001,7 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, std::stringstream errss; // material + std::set material_filenames; std::map material_map; int material_id = -1; // -1 = invalid @@ -3138,6 +3147,11 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, } else { bool found = false; for (size_t s = 0; s < filenames.size(); s++) { + if (material_filenames.count(filenames[s]) > 0) { + found = true; + continue; + } + std::string warn_mtl; std::string err_mtl; bool ok = (*readMatFn)(filenames[s].c_str(), &materials, @@ -3153,6 +3167,7 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, if (ok) { found = true; + material_filenames.insert(filenames[s]); break; } } From 03a2f3a46615da7da641c170b8823b3e0fb63110 Mon Sep 17 00:00:00 2001 From: jordanozang <38968035+jordanozang@users.noreply.github.com> Date: Sun, 19 Jun 2022 11:48:37 -0400 Subject: [PATCH 086/139] Fix typo (#338) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 143e0426..107ab8cb 100644 --- a/README.md +++ b/README.md @@ -150,7 +150,7 @@ One option is to simply copy the header file into your project and to make sure ### Building tinyobjloader - Using vcpkg(not recommended though) -Alghouth it is not a recommended way, you can download and install tinyobjloader using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: +Although it is not a recommended way, you can download and install tinyobjloader using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: git clone https://github.com/Microsoft/vcpkg.git cd vcpkg From a1e8bad32e1ccd26a7936c5354ecf856aec2cf59 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Tue, 5 Jul 2022 19:37:55 +0900 Subject: [PATCH 087/139] Update README. --- README.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 107ab8cb..c0f641be 100644 --- a/README.md +++ b/README.md @@ -19,14 +19,14 @@ If you are looking for C89 version, please see https://github.com/syoyo/tinyobjl Version notice -------------- -We recommend to use `master`(`main`) branch. Its v2.0 release candidate. Most features are now nearly robust and stable(Remaining task for release v2.0 is polishing C++ and Python API). +We recommend to use `master`(`main`) branch. Its v2.0 release candidate. Most features are now nearly robust and stable(Remaining task for release v2.0 is polishing C++ and Python API, and fix built-in triangulation code). We have released new version v1.0.0 on 20 Aug, 2016. Old version is available as `v0.9.x` branch https://github.com/syoyo/tinyobjloader/tree/v0.9.x ## What's new -* 29 Jul, 2021 : Added Mapbox's earcut for robust triangulation. Also fixes triangulation bug. +* 29 Jul, 2021 : Added Mapbox's earcut for robust triangulation. Also fixes triangulation bug(still there is some issue in built-in triangulation algorithm: https://github.com/tinyobjloader/tinyobjloader/issues/319). * 19 Feb, 2020 : The repository has been moved to https://github.com/tinyobjloader/tinyobjloader ! * 18 May, 2019 : Python binding!(See `python` folder. Also see https://pypi.org/project/tinyobjloader/) * 14 Apr, 2019 : Bump version v2.0.0 rc0. New C++ API and python bindings!(1.x API still exists for backward compatibility) @@ -106,10 +106,7 @@ TinyObjLoader is successfully used in ... * Vertex color(as an extension: https://blender.stackexchange.com/questions/31997/how-can-i-get-vertex-painted-obj-files-to-import-into-blender) * Texcoord * Normal -* Material - * Unknown material attributes are returned as key-value(value is string) map. * Crease tag('t'). This is OpenSubdiv specific(not in wavefront .obj specification) -* PBR material extension for .MTL. Its proposed here: http://exocortex.com/blog/extending_wavefront_mtl_to_support_pbr * Callback API for custom loading. * Double precision support(for HPC application). * Smoothing group @@ -126,12 +123,16 @@ TinyObjLoader is successfully used in ... * [ ] surface. * [ ] Free form curve/surfaces +### Material + +* PBR material extension for .MTL. Its proposed here: http://exocortex.com/blog/extending_wavefront_mtl_to_support_pbr +* Texture options +* Unknown material attributes are returned as key-value(value is string) map. ## TODO * [ ] Fix obj_sticker example. * [ ] More unit test codes. -* [x] Texture options ## License From be02c9cb978b68481e8b157fd3f7e190968ab14b Mon Sep 17 00:00:00 2001 From: Trider12 <47952308+Trider12@users.noreply.github.com> Date: Thu, 6 Oct 2022 14:25:55 +0600 Subject: [PATCH 088/139] Add alternative aliases for displacement maps during mtl parsing (#343) --- tiny_obj_loader.h | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 7d0c3844..0307d533 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -2203,15 +2203,8 @@ void LoadMtl(std::map *material_map, } // bump texture - if ((0 == strncmp(token, "map_bump", 8)) && IS_SPACE(token[8])) { - token += 9; - ParseTextureNameAndOption(&(material.bump_texname), - &(material.bump_texopt), token); - continue; - } - - // bump texture - if ((0 == strncmp(token, "map_Bump", 8)) && IS_SPACE(token[8])) { + if ((0 == strncmp(token, "map_bump", 8)) && IS_SPACE(token[8]) || + (0 == strncmp(token, "map_Bump", 8)) && IS_SPACE(token[8])) { token += 9; ParseTextureNameAndOption(&(material.bump_texname), &(material.bump_texopt), token); @@ -2235,6 +2228,15 @@ void LoadMtl(std::map *material_map, continue; } + // displacement texture + if ((0 == strncmp(token, "map_disp", 8)) && IS_SPACE(token[8]) || + (0 == strncmp(token, "map_Disp", 8)) && IS_SPACE(token[8])) { + token += 9; + ParseTextureNameAndOption(&(material.displacement_texname), + &(material.displacement_texopt), token); + continue; + } + // displacement texture if ((0 == strncmp(token, "disp", 4)) && IS_SPACE(token[4])) { token += 5; From 6db06ee8d959860e92c9252f3dad33bab8e8c454 Mon Sep 17 00:00:00 2001 From: Trider12 <47952308+Trider12@users.noreply.github.com> Date: Thu, 6 Oct 2022 23:15:13 +0600 Subject: [PATCH 089/139] Fix a Clang warning (#344) --- tiny_obj_loader.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 0307d533..8d829b19 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -2203,8 +2203,9 @@ void LoadMtl(std::map *material_map, } // bump texture - if ((0 == strncmp(token, "map_bump", 8)) && IS_SPACE(token[8]) || - (0 == strncmp(token, "map_Bump", 8)) && IS_SPACE(token[8])) { + if (((0 == strncmp(token, "map_bump", 8)) || + (0 == strncmp(token, "map_Bump", 8))) && + IS_SPACE(token[8])) { token += 9; ParseTextureNameAndOption(&(material.bump_texname), &(material.bump_texopt), token); @@ -2229,8 +2230,9 @@ void LoadMtl(std::map *material_map, } // displacement texture - if ((0 == strncmp(token, "map_disp", 8)) && IS_SPACE(token[8]) || - (0 == strncmp(token, "map_Disp", 8)) && IS_SPACE(token[8])) { + if (((0 == strncmp(token, "map_disp", 8)) || + (0 == strncmp(token, "map_Disp", 8))) && + IS_SPACE(token[8])) { token += 9; ParseTextureNameAndOption(&(material.displacement_texname), &(material.displacement_texopt), token); From 910907829465dc72401aa0edcbb8ab51058a28b0 Mon Sep 17 00:00:00 2001 From: tylermorganwall Date: Sat, 8 Oct 2022 06:19:37 -0400 Subject: [PATCH 090/139] Fix normal flipping during triangulation (#340) * Fix normal flipping during triangulation -Update triangulation routine to use Newell's method to ensure polygons aren't flipped when triangulated -Don't triangulate when npolys == 3 * Fix compile. * Update tiny_obj_loader.h -Remove dependency on C++11 by using a custom struct instead of an std::array * Update tiny_obj_loader.h -Fix compilation by adding default constructor and remove array access * Update tiny_obj_loader.h -Fix struct constructor * Update tiny_obj_loader.h -Change array access to struct member Co-authored-by: Syoyo Fujita --- tiny_obj_loader.h | 212 +++++++++++++++++++++++++++++++++------------- 1 file changed, 152 insertions(+), 60 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 8d829b19..5afac115 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1397,6 +1397,41 @@ static int pnpoly(int nvert, T *vertx, T *verty, T testx, T testy) { return c; } +struct TinyObjPoint { + real_t x, y, z; + TinyObjPoint() : x(0), y(0), z(0) {} + TinyObjPoint(real_t x_, real_t y_, real_t z_) : + x(x_), y(y_), z(z_) {} +}; + +inline TinyObjPoint cross(const TinyObjPoint &v1, const TinyObjPoint &v2) { + return TinyObjPoint(v1.y * v2.z - v1.z * v2.y, + v1.z * v2.x - v1.x * v2.z, + v1.x * v2.y - v1.y * v2.x); +} + +inline real_t dot(const TinyObjPoint &v1, const TinyObjPoint &v2) { + return (v1.x * v2.x + v1.y * v2.y + v1.z * v2.z); +} + +inline real_t GetLength(TinyObjPoint &e) { + return std::sqrt(e.x*e.x + e.y*e.y + e.z*e.z); +} + +inline TinyObjPoint Normalize(TinyObjPoint e) { + real_t inv_length = 1.0 / GetLength(e); + return TinyObjPoint(e.x * inv_length, e.y * inv_length, e.z * inv_length ); +} + + +inline TinyObjPoint WorldToLocal(const TinyObjPoint& a, + const TinyObjPoint& u, + const TinyObjPoint& v, + const TinyObjPoint& w) { + return TinyObjPoint(dot(a,u),dot(a,v),dot(a,w)); +} + + // TODO(syoyo): refactor function. static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, const std::vector &tags, @@ -1425,7 +1460,7 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, continue; } - if (triangulate) { + if (triangulate && npolys != 3) { if (npolys == 4) { vertex_index_t i0 = face.vertex_indices[0]; vertex_index_t i1 = face.vertex_indices[1]; @@ -1534,65 +1569,59 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, shape->mesh.smoothing_group_ids.push_back(face.smoothing_group_id); } else { +#ifdef TINYOBJLOADER_USE_MAPBOX_EARCUT vertex_index_t i0 = face.vertex_indices[0]; - vertex_index_t i1(-1); - vertex_index_t i2 = face.vertex_indices[1]; + vertex_index_t i0_2 = i0; - // find the two axes to work in - size_t axes[2] = {1, 2}; + // TMW change: Find the normal axis of the polygon using Newell's method + TinyObjPoint n; for (size_t k = 0; k < npolys; ++k) { - i0 = face.vertex_indices[(k + 0) % npolys]; - i1 = face.vertex_indices[(k + 1) % npolys]; - i2 = face.vertex_indices[(k + 2) % npolys]; + i0 = face.vertex_indices[k % npolys]; size_t vi0 = size_t(i0.v_idx); - size_t vi1 = size_t(i1.v_idx); - size_t vi2 = size_t(i2.v_idx); - if (((3 * vi0 + 2) >= v.size()) || ((3 * vi1 + 2) >= v.size()) || - ((3 * vi2 + 2) >= v.size())) { - // Invalid triangle. - // FIXME(syoyo): Is it ok to simply skip this invalid triangle? - continue; - } + size_t j = (k + 1) % npolys; + i0_2 = face.vertex_indices[j]; + size_t vi0_2 = size_t(i0_2.v_idx); + real_t v0x = v[vi0 * 3 + 0]; real_t v0y = v[vi0 * 3 + 1]; real_t v0z = v[vi0 * 3 + 2]; - real_t v1x = v[vi1 * 3 + 0]; - real_t v1y = v[vi1 * 3 + 1]; - real_t v1z = v[vi1 * 3 + 2]; - real_t v2x = v[vi2 * 3 + 0]; - real_t v2y = v[vi2 * 3 + 1]; - real_t v2z = v[vi2 * 3 + 2]; - real_t e0x = v1x - v0x; - real_t e0y = v1y - v0y; - real_t e0z = v1z - v0z; - real_t e1x = v2x - v1x; - real_t e1y = v2y - v1y; - real_t e1z = v2z - v1z; - real_t cx = std::fabs(e0y * e1z - e0z * e1y); - real_t cy = std::fabs(e0z * e1x - e0x * e1z); - real_t cz = std::fabs(e0x * e1y - e0y * e1x); - const real_t epsilon = std::numeric_limits::epsilon(); - // std::cout << "cx " << cx << ", cy " << cy << ", cz " << cz << - // "\n"; - if (cx > epsilon || cy > epsilon || cz > epsilon) { - // std::cout << "corner\n"; - // found a corner - if (cx > cy && cx > cz) { - // std::cout << "pattern0\n"; - } else { - // std::cout << "axes[0] = 0\n"; - axes[0] = 0; - if (cz > cx && cz > cy) { - // std::cout << "axes[1] = 1\n"; - axes[1] = 1; - } - } - break; - } - } -#ifdef TINYOBJLOADER_USE_MAPBOX_EARCUT + real_t v0x_2 = v[vi0_2 * 3 + 0]; + real_t v0y_2 = v[vi0_2 * 3 + 1]; + real_t v0z_2 = v[vi0_2 * 3 + 2]; + + const TinyObjPoint point1(v0x,v0y,v0z); + const TinyObjPoint point2(v0x_2,v0y_2,v0z_2); + + TinyObjPoint a(point1.x - point2.x, point1.y - point2.y, point1.z - point2.z); + TinyObjPoint b(point1.x + point2.x, point1.y + point2.y, point1.z + point2.z); + + n.x += (a.x * b.z); + n.y += (a.z * b.x); + n.z += (a.x * b.y); + } + real_t length_n = GetLength(n); + //Check if zero length normal + if(length_n <= 0) { + continue; + } + //Negative is to flip the normal to the correct direction + real_t inv_length = -1.0f / length_n; + n.x *= inv_length; + n.y *= inv_length; + n.z *= inv_length; + + TinyObjPoint axis_w, axis_v, axis_u; + axis_w = n; + TinyObjPoint a; + if(abs(axis_w.x) > 0.9999999) { + a = TinyObjPoint(0,1,0); + } else { + a = TinyObjPoint(1,0,0); + } + axis_v = Normalize(cross(axis_w, a)); + axis_u = cross(axis_w, axis_v); using Point = std::array; // first polyline define the main polygon. @@ -1601,6 +1630,9 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, std::vector polyline; + //TMW change: Find best normal and project v0x and v0y to those coordinates, instead of + //picking a plane aligned with an axis (which can flip polygons). + // Fill polygon data(facevarying vertices). for (size_t k = 0; k < npolys; k++) { i0 = face.vertex_indices[k]; @@ -1608,10 +1640,14 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, assert(((3 * vi0 + 2) < v.size())); - real_t v0x = v[vi0 * 3 + axes[0]]; - real_t v0y = v[vi0 * 3 + axes[1]]; + real_t v0x = v[vi0 * 3 + 0]; + real_t v0y = v[vi0 * 3 + 1]; + real_t v0z = v[vi0 * 3 + 2]; + + TinyObjPoint polypoint(v0x,v0y,v0z); + TinyObjPoint loc = WorldToLocal(polypoint, axis_u, axis_v, axis_w); - polyline.push_back({v0x, v0y}); + polyline.push_back({loc.x, loc.y}); } polygon.push_back(polyline); @@ -1626,19 +1662,19 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, index_t idx0, idx1, idx2; idx0.vertex_index = face.vertex_indices[indices[3 * k + 0]].v_idx; idx0.normal_index = - face.vertex_indices[indices[3 * k + 0]].vn_idx; + face.vertex_indices[indices[3 * k + 0]].vn_idx; idx0.texcoord_index = - face.vertex_indices[indices[3 * k + 0]].vt_idx; + face.vertex_indices[indices[3 * k + 0]].vt_idx; idx1.vertex_index = face.vertex_indices[indices[3 * k + 1]].v_idx; idx1.normal_index = - face.vertex_indices[indices[3 * k + 1]].vn_idx; + face.vertex_indices[indices[3 * k + 1]].vn_idx; idx1.texcoord_index = - face.vertex_indices[indices[3 * k + 1]].vt_idx; + face.vertex_indices[indices[3 * k + 1]].vt_idx; idx2.vertex_index = face.vertex_indices[indices[3 * k + 2]].v_idx; idx2.normal_index = - face.vertex_indices[indices[3 * k + 2]].vn_idx; + face.vertex_indices[indices[3 * k + 2]].vn_idx; idx2.texcoord_index = - face.vertex_indices[indices[3 * k + 2]].vt_idx; + face.vertex_indices[indices[3 * k + 2]].vt_idx; shape->mesh.indices.push_back(idx0); shape->mesh.indices.push_back(idx1); @@ -1652,7 +1688,63 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, } #else // Built-in ear clipping triangulation + vertex_index_t i0 = face.vertex_indices[0]; + vertex_index_t i1(-1); + vertex_index_t i2 = face.vertex_indices[1]; + + // find the two axes to work in + size_t axes[2] = {1, 2}; + for (size_t k = 0; k < npolys; ++k) { + i0 = face.vertex_indices[(k + 0) % npolys]; + i1 = face.vertex_indices[(k + 1) % npolys]; + i2 = face.vertex_indices[(k + 2) % npolys]; + size_t vi0 = size_t(i0.v_idx); + size_t vi1 = size_t(i1.v_idx); + size_t vi2 = size_t(i2.v_idx); + if (((3 * vi0 + 2) >= v.size()) || ((3 * vi1 + 2) >= v.size()) || + ((3 * vi2 + 2) >= v.size())) { + // Invalid triangle. + // FIXME(syoyo): Is it ok to simply skip this invalid triangle? + continue; + } + real_t v0x = v[vi0 * 3 + 0]; + real_t v0y = v[vi0 * 3 + 1]; + real_t v0z = v[vi0 * 3 + 2]; + real_t v1x = v[vi1 * 3 + 0]; + real_t v1y = v[vi1 * 3 + 1]; + real_t v1z = v[vi1 * 3 + 2]; + real_t v2x = v[vi2 * 3 + 0]; + real_t v2y = v[vi2 * 3 + 1]; + real_t v2z = v[vi2 * 3 + 2]; + real_t e0x = v1x - v0x; + real_t e0y = v1y - v0y; + real_t e0z = v1z - v0z; + real_t e1x = v2x - v1x; + real_t e1y = v2y - v1y; + real_t e1z = v2z - v1z; + real_t cx = std::fabs(e0y * e1z - e0z * e1y); + real_t cy = std::fabs(e0z * e1x - e0x * e1z); + real_t cz = std::fabs(e0x * e1y - e0y * e1x); + const real_t epsilon = std::numeric_limits::epsilon(); + // std::cout << "cx " << cx << ", cy " << cy << ", cz " << cz << + // "\n"; + if (cx > epsilon || cy > epsilon || cz > epsilon) { + // std::cout << "corner\n"; + // found a corner + if (cx > cy && cx > cz) { + // std::cout << "pattern0\n"; + } else { + // std::cout << "axes[0] = 0\n"; + axes[0] = 0; + if (cz > cx && cz > cy) { + // std::cout << "axes[1] = 1\n"; + axes[1] = 1; + } + } + break; + } + } face_t remainingFace = face; // copy size_t guess_vert = 0; From 28a27f7006a7aa22ac8ee2997e2bf639c56b4ebd Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 9 Oct 2022 03:52:37 +0900 Subject: [PATCH 091/139] Bfut python new setup (#345) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Delete MANIFEST.in * Update setup.py per current pybind11 example; set relative include paths to library header * Move setup.py and pyproject.toml to root, delete MANIFEST.in * Update Python build per current pybind11 example, setting binding relative include paths to library header * Update azure pipeline build. Use pipx to build source dist of Python package. * Fix black(python format checker) install * Use 'latest' OS image. * Disable aarch64 linux build support in Azure pipeline build since it takes too much time to build. Co-authored-by: Benjamin Futász <34510704+bfut@users.noreply.github.com> --- azure-pipelines.yml | 61 +++++++++--------- pyproject.toml | 3 + python/MANIFEST.in | 6 -- python/Makefile | 2 +- python/README.md | 13 ++-- python/bindings.cc | 4 +- python/pyproject.toml | 2 - python/setup.py | 131 -------------------------------------- python/tiny_obj_loader.cc | 2 +- setup.py | 63 ++++++++++++++++++ 10 files changed, 104 insertions(+), 183 deletions(-) create mode 100644 pyproject.toml delete mode 100644 python/MANIFEST.in delete mode 100644 python/pyproject.toml delete mode 100644 python/setup.py create mode 100644 setup.py diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b9c4fcd5..a9202d44 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,8 +1,13 @@ variables: # https://cibuildwheel.readthedocs.io/en/stable/cpp_standards/ - # python2.7 + C++11(pybind11) is not available. - CIBW_SKIP: "cp27-win* pp27-win32 pp36-win32" + # cibuildwheel now supports python 3.6+(as of 2022 Oct) + #CIBW_SKIP: "pp*" CIBW_BEFORE_BUILD: "pip install pybind11" + CIBW_ARCHS_LINUXBEFORE_BUILD: "pip install pybind11" + # disable aarch64 build for a while since it(pulling docker aarch64 image) exceeds Azure's 60 min limit + # NOTE: aarch64 linux support in Azure pipeline is not yet officially supported(as of 2022 Oct) https://github.com/microsoft/azure-pipelines-agent/issues/3935 + #CIBW_ARCHS_LINUX: auto aarch64 + CIBW_ARCHS_MACOS: x86_64 universal2 arm64 #CIBW_BEFORE_BUILD_MACOS: "pip install -U pip setuptools" #CIBW_BEFORE_BUILD_LINUX: "pip install -U pip setuptools" #CIBW_TEST_COMMAND: TODO "python -c \"import tinyobjloader; tinyobjloader.test()\"" @@ -24,7 +29,11 @@ jobs: steps: - task: UsePythonVersion@0 - script: | - pip install black==19.10b0 + # 19.10b0 triggers 'cannot import name '_unicodefun' from 'click'' error. + # https://stackoverflow.com/questions/71673404/importerror-cannot-import-name-unicodefun-from-click + #pip install black==19.10b0 + pip install black==22.3.0 + black --check python/ displayName: Check Python code format @@ -37,27 +46,25 @@ jobs: - bash: | python3 -m pip install --upgrade pip pip3 install cibuildwheel twine - # Make the header files available to the build. - cp *.h python - mkdir python/mapbox - cp mapbox/earcut.hpp python/mapbox/ - cd python + + # Use pipx to build source dist + pip3 install pipx # Source dist - python3 setup.py sdist + pipx run build --sdist ls -la dist/* # build binary wheels - cibuildwheel --output-dir wheelhouse . + cibuildwheel --platform linux --output-dir wheelhouse . - task: CopyFiles@2 inputs: - contents: 'python/wheelhouse/**' + contents: 'wheelhouse/**' targetFolder: $(Build.ArtifactStagingDirectory) - task: CopyFiles@2 inputs: - contents: 'python/dist/**' + contents: 'dist/**' targetFolder: $(Build.ArtifactStagingDirectory) - task: PublishBuildArtifacts@1 @@ -66,7 +73,7 @@ jobs: artifactName: tinyobjDeployLinux - job: macos - pool: {vmImage: 'macOS-10.15'} + pool: {vmImage: 'macOS-latest'} variables: # Support C++11: https://github.com/joerick/cibuildwheel/pull/156 MACOSX_DEPLOYMENT_TARGET: 10.9 @@ -75,15 +82,10 @@ jobs: - bash: | python3 -m pip install --upgrade pip pip3 install cibuildwheel - # Make the header files available to the build. - cp *.h python - mkdir python/mapbox - cp mapbox/earcut.hpp python/mapbox/earcut.hpp - cd python - cibuildwheel --output-dir wheelhouse . + cibuildwheel --platform macos --output-dir wheelhouse . - task: CopyFiles@2 inputs: - contents: 'python/wheelhouse/*.whl' + contents: 'wheelhouse/*.whl' targetFolder: $(Build.ArtifactStagingDirectory) - task: PublishBuildArtifacts@1 inputs: @@ -91,21 +93,16 @@ jobs: artifactName: tinyobjDeployMacOS - job: windows - pool: {vmImage: 'windows-2019'} + pool: {vmImage: 'windows-latest'} steps: - task: UsePythonVersion@0 - bash: | python -m pip install --upgrade pip pip install cibuildwheel - # Make the header files available to the build. - cp *.h python - mkdir python/mapbox - cp mapbox/earcut.hpp python/mapbox/ - cd python - cibuildwheel --output-dir wheelhouse . + cibuildwheel --platform windows --output-dir wheelhouse . - task: CopyFiles@2 inputs: - contents: 'python/wheelhouse/*.whl' + contents: 'wheelhouse/*.whl' targetFolder: $(Build.ArtifactStagingDirectory) - task: PublishBuildArtifacts@1 inputs: @@ -114,7 +111,7 @@ jobs: - job: deployPyPI # Based on vispy: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml - pool: {vmImage: 'Ubuntu-16.04'} + pool: {vmImage: 'ubuntu-latest'} condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/v')) dependsOn: - linux @@ -145,9 +142,9 @@ jobs: find . python -m pip install --upgrade pip pip install twine - echo tinyobjDeployLinux/python/dist/* - echo tinyobjDeployLinux/python/wheelhouse/* tinyobjDeployMacOS/python/wheelhouse/* tinyobjDeployWindows/python/wheelhouse/* - twine upload -u "__token__" --skip-existing tinyobjDeployLinux/python/dist/* tinyobjDeployLinux/python/wheelhouse/* tinyobjDeployMacOS/python/wheelhouse/* tinyobjDeployWindows/python/wheelhouse/* + echo tinyobjDeployLinux/dist/* + echo tinyobjDeployLinux/wheelhouse/* tinyobjDeployMacOS/wheelhouse/* tinyobjDeployWindows/wheelhouse/* + twine upload -u "__token__" --skip-existing tinyobjDeployLinux/dist/* tinyobjDeployLinux/wheelhouse/* tinyobjDeployMacOS/wheelhouse/* tinyobjDeployWindows/wheelhouse/* env: TWINE_PASSWORD: $(pypiToken2) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..ac831e23 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools>=42", "wheel", "pip>=19.2", "pybind11>=2.8.0"] +build-backend = "setuptools.build_meta" diff --git a/python/MANIFEST.in b/python/MANIFEST.in deleted file mode 100644 index 90ef93d6..00000000 --- a/python/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -# Copy the header file into the python/ folder. -include ../tiny_obj_loader.h -# Include it in the source distribution. -include tiny_obj_loader.h - -include pyproject.toml diff --git a/python/Makefile b/python/Makefile index 06352eda..ede9c2d2 100644 --- a/python/Makefile +++ b/python/Makefile @@ -1,5 +1,5 @@ all: - python setup.py build + cd .. && python -m pip install . t: python sample.py diff --git a/python/README.md b/python/README.md index 913668f6..8f9aa5d2 100644 --- a/python/README.md +++ b/python/README.md @@ -5,8 +5,7 @@ ## Requirements -* python 3.x(3.6+ recommended) - * python 2.7 may work, but not officially supported. +* python 3.6+ ## Install @@ -67,18 +66,17 @@ https://github.com/syoyo/tinyobjloader/blob/master/python/sample.py ## How to build -Using `cibuildwheel` is an recommended way to build a python module. +Using `cibuildwheel` is a recommended way to build a python module. See $tinyobjloader/azure-pipelines.yml for details. ### Developer build -Edit `setup.py` and uncomment `Developer option` lines - Assume pip is installed. ``` -$ pip install pybind11 -$ python setup.py build +$ git clone https://github.com/tinyobjloader/tinyobjloader +$ cd tinyobjloader +$ python -m pip install . ``` ## License @@ -87,4 +85,3 @@ MIT(tinyobjloader) and ISC(mapbox earcut) license. ## TODO * [ ] Writer saver - diff --git a/python/bindings.cc b/python/bindings.cc index 08592d70..1e98ac7e 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -8,7 +8,7 @@ // define some helper functions for pybind11 #define TINY_OBJ_LOADER_PYTHON_BINDING -#include "tiny_obj_loader.h" +#include "../tiny_obj_loader.h" namespace py = pybind11; @@ -132,7 +132,7 @@ PYBIND11_MODULE(tinyobjloader, tobj_module) .def("numpy_indices", [] (mesh_t &instance) { // Flatten indexes. index_t is composed of 3 ints(vertex_index, normal_index, texcoord_index). // numpy_indices = [0, -1, -1, 1, -1, -1, ...] - // C++11 or later should pack POD struct tightly and does not reorder variables, + // C++11 or later should pack POD struct tightly and does not reorder variables, // so we can memcpy to copy data. // Still, we check the size of struct and byte offsets of each variable just for sure. static_assert(sizeof(index_t) == 12, "sizeof(index_t) must be 12"); diff --git a/python/pyproject.toml b/python/pyproject.toml deleted file mode 100644 index 51e27c42..00000000 --- a/python/pyproject.toml +++ /dev/null @@ -1,2 +0,0 @@ -[build-system] -requires = ["setuptools", "wheel", "pybind11>=2.3"] diff --git a/python/setup.py b/python/setup.py deleted file mode 100644 index 578cd1ef..00000000 --- a/python/setup.py +++ /dev/null @@ -1,131 +0,0 @@ -import setuptools -import platform - -from distutils.command.build_ext import build_ext - -with open("README.md", "r") as fh: - long_description = fh.read() - -# Adapted from https://github.com/pybind/python_example/blob/master/setup.py -class get_pybind_include(object): - """Helper class to determine the pybind11 include path - The purpose of this class is to postpone importing pybind11 - until it is actually installed, so that the ``get_include()`` - method can be invoked. """ - - def __init__(self, user=False, pep517=False): - self.user = user - self.pep517 = pep517 - - def __str__(self): - import os - import pybind11 - - interpreter_include_path = pybind11.get_include(self.user) - - if self.pep517: - # When pybind11 is installed permanently in site packages, the headers - # will be in the interpreter include path above. PEP 517 provides an - # experimental feature for build system dependencies. When installing - # a package from a source distribvution, first its build dependencies - # are installed in a temporary location. pybind11 does not return the - # correct path for this condition, so we glom together a second path, - # and ultimately specify them _both_ in the include search path. - # https://github.com/pybind/pybind11/issues/1067 - return os.path.abspath( - os.path.join( - os.path.dirname(pybind11.__file__), - "..", - "..", - "..", - "..", - "include", - os.path.basename(interpreter_include_path), - ) - ) - else: - return interpreter_include_path - - -# unix = default compiler name? -copt = {"unix": ["-std=c++11"], "gcc": ["-std=c++11"], "clang": ["std=c++11"]} -# TODO: set C++ version for msvc? {'msvc': ["/std:c++14"] } - -# ext_compile_args = ["-std=c++11"] -# ext_link_args = [] - -# https://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used -class build_ext_subclass(build_ext): - def build_extensions(self): - c = self.compiler.compiler_type - if c in copt: - for e in self.extensions: - e.extra_compile_args = copt[c] - - # if lopt.has_key(c): - # for e in self.extensions: - # e.extra_link_args = lopt[ c ] - build_ext.build_extensions(self) - - -# Developer option -# -# if platform.system() == "Darwin": -# # XCode10 or later does not support libstdc++, so we need to use libc++. -# # macosx-version 10.6 does not support libc++, so we require min macosx version 10.9. -# ext_compile_args.append("-stdlib=libc++") -# ext_compile_args.append("-mmacosx-version-min=10.9") -# ext_link_args.append("-stdlib=libc++") -# ext_link_args.append("-mmacosx-version-min=10.9") - -# `tiny_obj_loader.cc` contains implementation of tiny_obj_loader. -m = setuptools.Extension( - "tinyobjloader", - # extra_compile_args=ext_compile_args, - # extra_link_args=ext_link_args, - sources=["bindings.cc", "tiny_obj_loader.cc"], - include_dirs=[ - # Support `build_ext` finding tinyobjloader (without first running - # `sdist`). - "..", - # Support `build_ext` finding pybind 11 (provided it's permanently - # installed). - get_pybind_include(), - get_pybind_include(user=True), - # Support building from a source distribution finding pybind11 from - # a PEP 517 temporary install. - get_pybind_include(pep517=True), - ], - language="c++", -) - - -setuptools.setup( - name="tinyobjloader", - version="2.0.0rc9", - description="Tiny but powerful Wavefront OBJ loader", - long_description=long_description, - long_description_content_type="text/markdown", - author="Syoyo Fujita", - author_email="syoyo@lighttransport.com", - url="https://github.com/tinyobjloader/tinyobjloader", - project_urls={ - "Issue Tracker": "https://github.com/tinyobjloader/tinyobjloader/issues", - }, - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "Intended Audience :: Science/Research", - "Intended Audience :: Manufacturing", - "Topic :: Artistic Software", - "Topic :: Multimedia :: Graphics :: 3D Modeling", - "Topic :: Scientific/Engineering :: Visualization", - "License :: OSI Approved :: MIT License", - "License :: OSI Approved :: ISC License (ISCL)", - "Operating System :: OS Independent", - "Programming Language :: Python :: 3", - ], - packages=setuptools.find_packages(), - ext_modules=[m], - cmdclass={"build_ext": build_ext_subclass}, -) diff --git a/python/tiny_obj_loader.cc b/python/tiny_obj_loader.cc index a0b8bc60..821542e7 100644 --- a/python/tiny_obj_loader.cc +++ b/python/tiny_obj_loader.cc @@ -6,4 +6,4 @@ #define TINYOBJLOADER_USE_MAPBOX_EARCUT #define TINYOBJLOADER_IMPLEMENTATION -#include "tiny_obj_loader.h" +#include "../tiny_obj_loader.h" diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..123490dd --- /dev/null +++ b/setup.py @@ -0,0 +1,63 @@ +# Adapted from https://github.com/pybind/python_example/blob/master/setup.py +import sys + +#from pybind11 import get_cmake_dir +# Available at setup time due to pyproject.toml +from pybind11.setup_helpers import Pybind11Extension#, build_ext +from setuptools import setup + +__version__ = "2.0.0rc9" + +with open("README.md", "r", encoding="utf8") as fh: + long_description = fh.read() + +# The main interface is through Pybind11Extension. +# * You can add cxx_std=11/14/17, and then build_ext can be removed. +# * You can set include_pybind11=false to add the include directory yourself, +# say from a submodule. +# +# Note: +# Sort input source files if you glob sources to ensure bit-for-bit +# reproducible builds (https://github.com/pybind/python_example/pull/53) + +ext_modules = [ + Pybind11Extension("tinyobjloader", + sorted(["python/bindings.cc", "python/tiny_obj_loader.cc"]), + # Example: passing in the version to the compiled code + define_macros = [('VERSION_INFO', __version__)], + cxx_std=11, + ), +] + +setup( + name="tinyobjloader", + version=__version__, + author="Syoyo Fujita", + author_email="syoyo@lighttransport.com", + url="https://github.com/tinyobjloader/tinyobjloader", + project_urls={ + "Issue Tracker": "https://github.com/tinyobjloader/tinyobjloader/issues", + }, + description="Tiny but powerful Wavefront OBJ loader", + long_description=long_description, + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "Intended Audience :: Manufacturing", + "Topic :: Artistic Software", + "Topic :: Multimedia :: Graphics :: 3D Modeling", + "Topic :: Scientific/Engineering :: Visualization", + "License :: OSI Approved :: MIT License", + "License :: OSI Approved :: ISC License (ISCL)", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + ], + ext_modules=ext_modules, + extras_require={"test": "pytest"}, + # Currently, build_ext only provides an optional "highest supported C++ + # level" feature, but in the future it may provide more features. + # cmdclass={"build_ext": build_ext}, + zip_safe=False, + python_requires=">=3.6", +) From 5a41a782beaaa67434c6f8630ce07bde86629ddb Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 9 Oct 2022 21:14:45 +0900 Subject: [PATCH 092/139] [Viewer example] Add wireframe onoff feature. Add backface cull mode. --- examples/viewer/viewer.cc | 70 ++++++++++++++++++++++++++------------- models/issue-319-002.obj | 39 ++++++++++++++++++++++ models/issue-319-003.obj | 27 +++++++++++++++ models/issue-330.obj | 12 +++++++ 4 files changed, 125 insertions(+), 23 deletions(-) create mode 100644 models/issue-319-002.obj create mode 100644 models/issue-319-003.obj create mode 100644 models/issue-330.obj diff --git a/examples/viewer/viewer.cc b/examples/viewer/viewer.cc index c523ecdf..875cf181 100644 --- a/examples/viewer/viewer.cc +++ b/examples/viewer/viewer.cc @@ -152,6 +152,8 @@ bool mouseRightPressed; float curr_quat[4]; float prev_quat[4]; float eye[3], lookat[3], up[3]; +bool g_show_wire = true; +bool g_cull_face = false; GLFWwindow* window; @@ -833,8 +835,19 @@ static void keyboardFunc(GLFWwindow* window, int key, int scancode, int action, mv_z += -1; // camera.move(mv_x * 0.05, mv_y * 0.05, mv_z * 0.05); // Close window - if (key == GLFW_KEY_Q || key == GLFW_KEY_ESCAPE) + if (key == GLFW_KEY_Q || key == GLFW_KEY_ESCAPE) { glfwSetWindowShouldClose(window, GL_TRUE); + } + + if (key == GLFW_KEY_W) { + // toggle wireframe + g_show_wire = !g_show_wire; + } + + if (key == GLFW_KEY_C) { + // cull option + g_cull_face = !g_cull_face; + } // init_frame = true; } @@ -898,7 +911,11 @@ static void Draw(const std::vector& drawObjects, std::vector& materials, std::map& textures) { glPolygonMode(GL_FRONT, GL_FILL); - glPolygonMode(GL_BACK, GL_FILL); + if (g_cull_face) { + glPolygonMode(GL_BACK, GL_LINE); + } else { + glPolygonMode(GL_BACK, GL_FILL); + } glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); @@ -933,29 +950,31 @@ static void Draw(const std::vector& drawObjects, } // draw wireframe - glDisable(GL_POLYGON_OFFSET_FILL); - glPolygonMode(GL_FRONT, GL_LINE); - glPolygonMode(GL_BACK, GL_LINE); + if (g_show_wire) { + glDisable(GL_POLYGON_OFFSET_FILL); + glPolygonMode(GL_FRONT, GL_LINE); + glPolygonMode(GL_BACK, GL_LINE); + + glColor3f(0.0f, 0.0f, 0.4f); + for (size_t i = 0; i < drawObjects.size(); i++) { + DrawObject o = drawObjects[i]; + if (o.vb_id < 1) { + continue; + } - glColor3f(0.0f, 0.0f, 0.4f); - for (size_t i = 0; i < drawObjects.size(); i++) { - DrawObject o = drawObjects[i]; - if (o.vb_id < 1) { - continue; + glBindBuffer(GL_ARRAY_BUFFER, o.vb_id); + glEnableClientState(GL_VERTEX_ARRAY); + glEnableClientState(GL_NORMAL_ARRAY); + glDisableClientState(GL_COLOR_ARRAY); + glDisableClientState(GL_TEXTURE_COORD_ARRAY); + glVertexPointer(3, GL_FLOAT, stride, (const void*)0); + glNormalPointer(GL_FLOAT, stride, (const void*)(sizeof(float) * 3)); + glColorPointer(3, GL_FLOAT, stride, (const void*)(sizeof(float) * 6)); + glTexCoordPointer(2, GL_FLOAT, stride, (const void*)(sizeof(float) * 9)); + + glDrawArrays(GL_TRIANGLES, 0, 3 * o.numTriangles); + CheckErrors("drawarrays"); } - - glBindBuffer(GL_ARRAY_BUFFER, o.vb_id); - glEnableClientState(GL_VERTEX_ARRAY); - glEnableClientState(GL_NORMAL_ARRAY); - glDisableClientState(GL_COLOR_ARRAY); - glDisableClientState(GL_TEXTURE_COORD_ARRAY); - glVertexPointer(3, GL_FLOAT, stride, (const void*)0); - glNormalPointer(GL_FLOAT, stride, (const void*)(sizeof(float) * 3)); - glColorPointer(3, GL_FLOAT, stride, (const void*)(sizeof(float) * 6)); - glTexCoordPointer(2, GL_FLOAT, stride, (const void*)(sizeof(float) * 9)); - - glDrawArrays(GL_TRIANGLES, 0, 3 * o.numTriangles); - CheckErrors("drawarrays"); } } @@ -995,6 +1014,11 @@ int main(int argc, char** argv) { return 1; } + std::cout << "W : Toggle wireframe\n"; + std::cout << "C : Toggle face culling\n"; + //std::cout << "K, J, H, L, P, N : Move camera\n"; + std::cout << "Q, Esc : quit\n"; + glfwMakeContextCurrent(window); glfwSwapInterval(1); diff --git a/models/issue-319-002.obj b/models/issue-319-002.obj new file mode 100644 index 00000000..8e056fa0 --- /dev/null +++ b/models/issue-319-002.obj @@ -0,0 +1,39 @@ +### +# +# OBJ File Generated by Meshlab +# +#### +# Object ZH2_001.obj +# +# Vertices: 19 +# Faces: 3 +# +#### +v 8219.830078 6406.934082 9.603000 +v 8219.632812 6406.582031 9.603000 +v 8219.632812 6406.582031 9.139000 +v 8219.973633 6405.420898 9.139000 +v 8211.128906 6404.090820 9.139000 +v 8211.128906 6404.090820 9.603000 +v 8211.469727 6402.930176 9.139000 +v 8211.469727 6402.930176 9.603000 +v 8211.133789 6402.831055 9.603000 +v 8210.793945 6403.992188 9.603000 +v 8210.713867 6404.264160 9.603000 +v 8211.840820 6403.038086 9.139000 +v 8219.899414 6404.861816 9.139000 +v 8219.755859 6405.352051 9.139000 +v 8211.985352 6402.544922 9.139000 +v 8232.911133 6378.534180 55.848999 +v 8226.281250 6376.591797 55.848999 +v 8226.341797 6376.384766 55.848999 +v 8233.450195 6378.466797 55.852001 +v 8233.450195 6378.466797 55.852001 +# 19 vertices, 0 vertices normals + +f 2 1 11 10 9 8 6 +f 5 7 12 15 13 14 4 3 +f 18 19 20 16 17 +# 3 faces, 0 coords texture + +# End of File diff --git a/models/issue-319-003.obj b/models/issue-319-003.obj new file mode 100644 index 00000000..882a25c1 --- /dev/null +++ b/models/issue-319-003.obj @@ -0,0 +1,27 @@ +#### +# +# OBJ File Generated by Meshlab +# +#### +# Object new 1.obj +# +# Vertices: 10 +# Faces: 1 +# +#### +v 8434.808594 6083.654785 2.387000 +v 8434.808594 6083.654785 71.633003 +v 8432.309570 6092.206055 71.633003 +v 8432.309570 6092.206055 63.955002 +v 8432.309570 6092.206055 2.387000 +v 8433.083984 6089.560059 71.633003 +v 8433.161133 6089.293945 71.633003 +v 8432.309570 6092.206055 64.323997 +v 8432.309570 6092.206055 67.152000 +v 8432.309570 6092.206055 68.078003 +# 10 vertices, 0 vertices normals + +f 6 7 2 1 5 4 8 9 10 3 +# 1 faces, 0 coords texture + +# End of File diff --git a/models/issue-330.obj b/models/issue-330.obj new file mode 100644 index 00000000..aa46631c --- /dev/null +++ b/models/issue-330.obj @@ -0,0 +1,12 @@ +v -105.342712 40.184242 -16.056709 +v -105.463989 40.202003 -16.003181 +v -105.564941 40.207558 -15.934708 +v -105.722252 40.151146 -16.112091 +v -105.610237 40.191372 -16.176643 +v -105.667282 40.189800 -15.864197 +v -105.751717 40.125790 -15.794304 +# 7 vertices, 0 vertices normals + +f 2 5 4 3 +f 4 6 3 +# 2 faces, 0 coords texture From f48bd0bfb9b00ed1f5b13dbcdbd7909ca8bd49b5 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 9 Oct 2022 21:36:35 +0900 Subject: [PATCH 093/139] Bump version to v2.0.0rc10 --- CMakeLists.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f64b42ce..b1ae4334 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,7 +4,7 @@ project(tinyobjloader) cmake_minimum_required(VERSION 3.2) set(TINYOBJLOADER_SOVERSION 2) -set(TINYOBJLOADER_VERSION 2.0.0-rc.9) +set(TINYOBJLOADER_VERSION 2.0.0-rc.10) #optional double precision support option(TINYOBJLOADER_USE_DOUBLE "Build library with double precision instead of single (float)" OFF) diff --git a/setup.py b/setup.py index 123490dd..b8bde4d4 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ from pybind11.setup_helpers import Pybind11Extension#, build_ext from setuptools import setup -__version__ = "2.0.0rc9" +__version__ = "2.0.0rc10" with open("README.md", "r", encoding="utf8") as fh: long_description = fh.read() From 69cb4cdcf81d1dcb9e748c7bd76b6a1c4e781ee5 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 9 Oct 2022 21:37:39 +0900 Subject: [PATCH 094/139] Update README. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c0f641be..4f48f333 100644 --- a/README.md +++ b/README.md @@ -421,7 +421,7 @@ cibuildwheels + twine upload for each git tagging event is handled in Azure Pipe #### How to bump version(For developer) * Bump version in CMakeLists.txt -* Update version in `python/setup.py` +* Update version in `setup.py` * Commit and push `master`. Confirm C.I. build is OK. * Create tag starting with `v`(e.g. `v2.1.0`) * `git push --tags` From 764cf751847b3e569d92ac400de7ec8c62be9ecd Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 9 Oct 2022 22:29:56 +0900 Subject: [PATCH 095/139] Fix twine upload. long_description_content_type is missing in setup.py --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index b8bde4d4..4f2ecc04 100644 --- a/setup.py +++ b/setup.py @@ -40,6 +40,7 @@ }, description="Tiny but powerful Wavefront OBJ loader", long_description=long_description, + long_description_content_type='text/markdown', classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", From f856161bb6843e4b8346ffbaa4b9b4f0fa709b10 Mon Sep 17 00:00:00 2001 From: tylermorganwall Date: Fri, 21 Oct 2022 04:26:51 -0400 Subject: [PATCH 096/139] Update tiny_obj_loader.h (#346) Fix wrong axis selected in Newell's method --- tiny_obj_loader.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 5afac115..57989301 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1597,7 +1597,7 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, TinyObjPoint a(point1.x - point2.x, point1.y - point2.y, point1.z - point2.z); TinyObjPoint b(point1.x + point2.x, point1.y + point2.y, point1.z + point2.z); - n.x += (a.x * b.z); + n.x += (a.y * b.z); n.y += (a.z * b.x); n.z += (a.x * b.y); } From d0fe1d366c337fc4ced012b76aece3e24fd4124b Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 21 Oct 2022 17:36:23 +0900 Subject: [PATCH 097/139] Suppress some clang warnings. --- tiny_obj_loader.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 57989301..d109a0bd 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1607,7 +1607,7 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, continue; } //Negative is to flip the normal to the correct direction - real_t inv_length = -1.0f / length_n; + real_t inv_length = -real_t(1.0) / length_n; n.x *= inv_length; n.y *= inv_length; n.z *= inv_length; @@ -1615,7 +1615,7 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, TinyObjPoint axis_w, axis_v, axis_u; axis_w = n; TinyObjPoint a; - if(abs(axis_w.x) > 0.9999999) { + if(std::abs(axis_w.x) > real_t(0.9999999)) { a = TinyObjPoint(0,1,0); } else { a = TinyObjPoint(1,0,0); From 5790ebd9eb466bc1ebe8ed5c5a55ee642b6f24bc Mon Sep 17 00:00:00 2001 From: SpaceIm <30052553+SpaceIm@users.noreply.github.com> Date: Sat, 22 Oct 2022 21:27:52 +0200 Subject: [PATCH 098/139] move cmake_minimum_required before project (#347) --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b1ae4334..d7fd667c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,8 +1,8 @@ #Tiny Object Loader Cmake configuration file. #This configures the Cmake system with multiple properties, depending #on the platform and configuration it is set to build in. -project(tinyobjloader) cmake_minimum_required(VERSION 3.2) +project(tinyobjloader) set(TINYOBJLOADER_SOVERSION 2) set(TINYOBJLOADER_VERSION 2.0.0-rc.10) From f026a1e0b86979cb5a7d7f6f2432d02590e61dc9 Mon Sep 17 00:00:00 2001 From: Trider12 <47952308+Trider12@users.noreply.github.com> Date: Thu, 3 Nov 2022 22:26:00 +0600 Subject: [PATCH 099/139] Allow normal and tex indices to be zero + fix a warning (#349) * Allow normal and tex indices to be zero (cherry picked from commit 4428e7e7e82a4aea1f49ff748525c688d0bd2649) * Fixed a warning * Fixed constness * Fixed a warning * Fixed the absence of std::to_string() * Fixed a quirky old compiler error * Fixed a c++03 warning --- tiny_obj_loader.h | 65 +++++++++++++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 25 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index d109a0bd..20e97001 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -796,8 +796,21 @@ static std::istream &safeGetline(std::istream &is, std::string &t) { (static_cast((x) - '0') < static_cast(10)) #define IS_NEW_LINE(x) (((x) == '\r') || ((x) == '\n') || ((x) == '\0')) +template +static inline std::string toString(const T &t) { + std::stringstream ss; + ss << t; + return ss.str(); +} + +struct warning_context +{ + std::string *warn; + size_t line_number; +}; + // Make index zero-base, and also support relative index. -static inline bool fixIndex(int idx, int n, int *ret) { +static inline bool fixIndex(int idx, int n, int *ret, bool allow_zero, const warning_context &context) { if (!ret) { return false; } @@ -809,7 +822,13 @@ static inline bool fixIndex(int idx, int n, int *ret) { if (idx == 0) { // zero is not allowed according to the spec. - return false; + if (context.warn) { + (*context.warn) += "A zero value index found (will have a value of -1 for normal and tex indices. Line " + + toString(context.line_number) + ").\n"; + } + + (*ret) = idx - 1; + return allow_zero; } if (idx < 0) { @@ -1134,14 +1153,14 @@ static tag_sizes parseTagTriple(const char **token) { // Parse triples with index offsets: i, i/j/k, i//k, i/j static bool parseTriple(const char **token, int vsize, int vnsize, int vtsize, - vertex_index_t *ret) { + vertex_index_t *ret, const warning_context &context) { if (!ret) { return false; } vertex_index_t vi(-1); - if (!fixIndex(atoi((*token)), vsize, &(vi.v_idx))) { + if (!fixIndex(atoi((*token)), vsize, &vi.v_idx, false, context)) { return false; } @@ -1155,7 +1174,7 @@ static bool parseTriple(const char **token, int vsize, int vnsize, int vtsize, // i//k if ((*token)[0] == '/') { (*token)++; - if (!fixIndex(atoi((*token)), vnsize, &(vi.vn_idx))) { + if (!fixIndex(atoi((*token)), vnsize, &vi.vn_idx, true, context)) { return false; } (*token) += strcspn((*token), "/ \t\r"); @@ -1164,7 +1183,7 @@ static bool parseTriple(const char **token, int vsize, int vnsize, int vtsize, } // i/j/k or i/j - if (!fixIndex(atoi((*token)), vtsize, &(vi.vt_idx))) { + if (!fixIndex(atoi((*token)), vtsize, &vi.vt_idx, true, context)) { return false; } @@ -1176,7 +1195,7 @@ static bool parseTriple(const char **token, int vsize, int vnsize, int vtsize, // i/j/k (*token)++; // skip '/' - if (!fixIndex(atoi((*token)), vnsize, &(vi.vn_idx))) { + if (!fixIndex(atoi((*token)), vnsize, &vi.vn_idx, true, context)) { return false; } (*token) += strcspn((*token), "/ \t\r"); @@ -1419,7 +1438,7 @@ inline real_t GetLength(TinyObjPoint &e) { } inline TinyObjPoint Normalize(TinyObjPoint e) { - real_t inv_length = 1.0 / GetLength(e); + real_t inv_length = real_t(1) / GetLength(e); return TinyObjPoint(e.x * inv_length, e.y * inv_length, e.z * inv_length ); } @@ -2678,6 +2697,10 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, vw.push_back(sw); } + warning_context context; + context.warn = warn; + context.line_number = line_num; + // line if (token[0] == 'l' && IS_SPACE((token[1]))) { token += 2; @@ -2688,13 +2711,10 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, vertex_index_t vi; if (!parseTriple(&token, static_cast(v.size() / 3), static_cast(vn.size() / 3), - static_cast(vt.size() / 2), &vi)) { + static_cast(vt.size() / 2), &vi, context)) { if (err) { - std::stringstream ss; - ss << "Failed parse `l' line(e.g. zero value for vertex index. " - "line " - << line_num << ".)\n"; - (*err) += ss.str(); + (*err) += "Failed to parse `l' line (e.g. a zero value for vertex index. Line " + + toString(line_num) + ").\n"; } return false; } @@ -2720,13 +2740,10 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, vertex_index_t vi; if (!parseTriple(&token, static_cast(v.size() / 3), static_cast(vn.size() / 3), - static_cast(vt.size() / 2), &vi)) { + static_cast(vt.size() / 2), &vi, context)) { if (err) { - std::stringstream ss; - ss << "Failed parse `p' line(e.g. zero value for vertex index. " - "line " - << line_num << ".)\n"; - (*err) += ss.str(); + (*err) += "Failed to parse `p' line (e.g. a zero value for vertex index. Line " + + toString(line_num) + ").\n"; } return false; } @@ -2756,12 +2773,10 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, vertex_index_t vi; if (!parseTriple(&token, static_cast(v.size() / 3), static_cast(vn.size() / 3), - static_cast(vt.size() / 2), &vi)) { + static_cast(vt.size() / 2), &vi, context)) { if (err) { - std::stringstream ss; - ss << "Failed parse `f' line(e.g. zero value for face index. line " - << line_num << ".)\n"; - (*err) += ss.str(); + (*err) += "Failed to parse `f' line (e.g. a zero value for vertex index. Line " + + toString(line_num) + ").\n"; } return false; } From 4a02d274ae1836b40adaa603fbafa97a76c2abc7 Mon Sep 17 00:00:00 2001 From: Eduardo Doria Date: Mon, 7 Nov 2022 08:40:38 -0300 Subject: [PATCH 100/139] Added Supernova Engine to Use case (#350) * Added Supernova Engine to Use case * More details about Supernova Engine --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 4f48f333..072f1573 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,7 @@ TinyObjLoader is successfully used in ... * liblava - A modern C++ and easy-to-use framework for the Vulkan API. [MIT]: https://github.com/liblava/liblava * rtxON - Simple Vulkan raytracing tutorials https://github.com/iOrange/rtxON * metal-ray-tracer - Writing ray-tracer using Metal Performance Shaders https://github.com/sergeyreznik/metal-ray-tracer https://sergeyreznik.github.io/metal-ray-tracer/index.html +* Supernova Engine - 2D and 3D projects with Lua or C++ in data oriented design: https://github.com/supernovaengine/supernova * Your project here! (Letting us know via github issue is welcome!) ### Old version(v0.9.x) From 0a67aaa9d94bdeb96b44eb54dfff6c51197e7df9 Mon Sep 17 00:00:00 2001 From: Wojciech Jarosz Date: Thu, 17 Nov 2022 04:53:50 -0500 Subject: [PATCH 101/139] adding vertex color support to callback API (#352) --- tiny_obj_loader.h | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 20e97001..d8e7c419 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -421,6 +421,8 @@ struct attrib_t { struct callback_t { // W is optional and set to 1 if there is no `w` item in `v` line void (*vertex_cb)(void *user_data, real_t x, real_t y, real_t z, real_t w); + void (*vertex_color_cb)(void *user_data, real_t x, real_t y, real_t z, + real_t r, real_t g, real_t b, bool has_color); void (*normal_cb)(void *user_data, real_t x, real_t y, real_t z); // y and z are optional and set to 0 if there is no `y` and/or `z` item(s) in @@ -3153,11 +3155,15 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, // vertex if (token[0] == 'v' && IS_SPACE((token[1]))) { token += 2; - // TODO(syoyo): Support parsing vertex color extension. - real_t x, y, z, w; // w is optional. default = 1.0 - parseV(&x, &y, &z, &w, &token); + real_t x, y, z; + real_t r, g, b; + + bool found_color = parseVertexWithColor(&x, &y, &z, &r, &g, &b, &token); if (callback.vertex_cb) { - callback.vertex_cb(user_data, x, y, z, w); + callback.vertex_cb(user_data, x, y, z, r); // r=w is optional + } + if (callback.vertex_color_cb) { + callback.vertex_color_cb(user_data, x, y, z, r, g, b, found_color); } continue; } From 0fd8b20bc2c071010ce26a2adfaee16ec0875049 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 18 Nov 2022 03:43:39 +0900 Subject: [PATCH 102/139] Make python binding sample work without numpy installed. --- python/sample.py | 40 +++++++++++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/python/sample.py b/python/sample.py index 2f30c006..ed2d961b 100644 --- a/python/sample.py +++ b/python/sample.py @@ -1,6 +1,13 @@ import sys import tinyobjloader +is_numpy_available = False +try: + import numpy + is_numpy_available = True +except: + print("NumPy not installed. Do not use numpy_*** API. If you encounter slow performance, see a performance tips for non-numpy API https://github.com/tinyobjloader/tinyobjloader/issues/275") + filename = "../models/cornell_box.obj" @@ -24,9 +31,9 @@ print("Warn:", reader.Warning()) attrib = reader.GetAttrib() -print("attrib.vertices = ", len(attrib.vertices)) -print("attrib.normals = ", len(attrib.normals)) -print("attrib.texcoords = ", len(attrib.texcoords)) +print("len(attrib.vertices) = ", len(attrib.vertices)) +print("len(attrib.normals) = ", len(attrib.normals)) +print("len(attrib.texcoords) = ", len(attrib.texcoords)) # vertex data must be `xyzxyzxyz...` assert len(attrib.vertices) % 3 == 0 @@ -37,6 +44,19 @@ # texcoords data must be `uvuvuv...` assert len(attrib.texcoords) % 2 == 0 +# Performance note +# (direct?) array access through member variable is quite slow. +# https://github.com/tinyobjloader/tinyobjloader/issues/275#issuecomment-753465833 +# +# We encourage first copy(?) varible to Python world: +# +# vertices = attrib.vertices +# +# for i in range(...) +# v = vertices[i] +# +# Or please consider using numpy_*** interface(e.g. numpy_vertices()) + for (i, v) in enumerate(attrib.vertices): print("v[{}] = {}".format(i, v)) @@ -46,7 +66,10 @@ for (i, v) in enumerate(attrib.texcoords): print("vt[{}] = {}".format(i, t)) -print("numpy_vertices = {}".format(attrib.numpy_vertices())) +if is_numpy_available: + print("numpy_v = {}".format(attrib.numpy_vertices())) + print("numpy_vn = {}".format(attrib.numpy_normals())) + print("numpy_vt = {}".format(attrib.numpy_texcoords())) materials = reader.GetMaterials() print("Num materials: ", len(materials)) @@ -73,6 +96,9 @@ print("[{}] v_idx {}".format(i, idx.vertex_index)) print("[{}] vn_idx {}".format(i, idx.normal_index)) print("[{}] vt_idx {}".format(i, idx.texcoord_index)) - print("numpy_indices = {}".format(shape.mesh.numpy_indices())) - print("numpy_num_face_vertices = {}".format(shape.mesh.numpy_num_face_vertices())) - print("numpy_material_ids = {}".format(shape.mesh.numpy_material_ids())) + print("material_ids = {}".format(shape.mesh.material_ids)) + + if is_numpy_available: + print("numpy_indices = {}".format(shape.mesh.numpy_indices())) + print("numpy_num_face_vertices = {}".format(shape.mesh.numpy_num_face_vertices())) + print("numpy_material_ids = {}".format(shape.mesh.numpy_material_ids())) From 83c96d2294c803fe55b736529581a04c59fd9180 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 19 Nov 2022 19:17:15 +0900 Subject: [PATCH 103/139] Bump black version --- azure-pipelines.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a9202d44..fdc128fa 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -32,7 +32,8 @@ jobs: # 19.10b0 triggers 'cannot import name '_unicodefun' from 'click'' error. # https://stackoverflow.com/questions/71673404/importerror-cannot-import-name-unicodefun-from-click #pip install black==19.10b0 - pip install black==22.3.0 + #pip install black==22.3.0 + pip install black==22.10.0 black --check python/ displayName: Check Python code format From 640df370865bbd12bb7ec165478ea6d02d892f47 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 19 Nov 2022 19:18:27 +0900 Subject: [PATCH 104/139] Format code. --- python/sample.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/python/sample.py b/python/sample.py index ed2d961b..7c57bd14 100644 --- a/python/sample.py +++ b/python/sample.py @@ -3,10 +3,13 @@ is_numpy_available = False try: - import numpy - is_numpy_available = True + import numpy + + is_numpy_available = True except: - print("NumPy not installed. Do not use numpy_*** API. If you encounter slow performance, see a performance tips for non-numpy API https://github.com/tinyobjloader/tinyobjloader/issues/275") + print( + "NumPy not installed. Do not use numpy_*** API. If you encounter slow performance, see a performance tips for non-numpy API https://github.com/tinyobjloader/tinyobjloader/issues/275" + ) filename = "../models/cornell_box.obj" @@ -47,7 +50,7 @@ # Performance note # (direct?) array access through member variable is quite slow. # https://github.com/tinyobjloader/tinyobjloader/issues/275#issuecomment-753465833 -# +# # We encourage first copy(?) varible to Python world: # # vertices = attrib.vertices @@ -100,5 +103,7 @@ if is_numpy_available: print("numpy_indices = {}".format(shape.mesh.numpy_indices())) - print("numpy_num_face_vertices = {}".format(shape.mesh.numpy_num_face_vertices())) + print( + "numpy_num_face_vertices = {}".format(shape.mesh.numpy_num_face_vertices()) + ) print("numpy_material_ids = {}".format(shape.mesh.numpy_material_ids())) From 2f947710aeedea2089ade2b62ef669521d90f2ef Mon Sep 17 00:00:00 2001 From: Mohit Sethi Date: Tue, 29 Nov 2022 14:44:00 +0530 Subject: [PATCH 105/139] Added Arc Game Engine to README.md (#354) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 072f1573..efb8732e 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,7 @@ TinyObjLoader is successfully used in ... * rtxON - Simple Vulkan raytracing tutorials https://github.com/iOrange/rtxON * metal-ray-tracer - Writing ray-tracer using Metal Performance Shaders https://github.com/sergeyreznik/metal-ray-tracer https://sergeyreznik.github.io/metal-ray-tracer/index.html * Supernova Engine - 2D and 3D projects with Lua or C++ in data oriented design: https://github.com/supernovaengine/supernova +* AGE (Arc Game Engine) - An open-source engine for building 2D & 3D real-time rendering and interactive contents: https://github.com/MohitSethi99/ArcGameEngine * Your project here! (Letting us know via github issue is welcome!) ### Old version(v0.9.x) From 45f683cb3122fafdc3b126b406fc8ecab61b9033 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Tue, 13 Dec 2022 20:11:55 +0900 Subject: [PATCH 106/139] Ignore leading whitespaces in newmtl. Fixes #356 --- models/issue-356-leading-spaces-newmtl.mtl | 2 ++ models/issue-356-leading-spaces-newmtl.obj | 2 ++ tiny_obj_loader.h | 11 ++++++++--- 3 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 models/issue-356-leading-spaces-newmtl.mtl create mode 100644 models/issue-356-leading-spaces-newmtl.obj diff --git a/models/issue-356-leading-spaces-newmtl.mtl b/models/issue-356-leading-spaces-newmtl.mtl new file mode 100644 index 00000000..f5a388e9 --- /dev/null +++ b/models/issue-356-leading-spaces-newmtl.mtl @@ -0,0 +1,2 @@ +newmtl aaa +Ka 1.000000 1.000000 1.000000 diff --git a/models/issue-356-leading-spaces-newmtl.obj b/models/issue-356-leading-spaces-newmtl.obj new file mode 100644 index 00000000..b41984a8 --- /dev/null +++ b/models/issue-356-leading-spaces-newmtl.obj @@ -0,0 +1,2 @@ +mtllib issue-356-leading-spaces-newmtl.mtl +usemtl aaa diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index d8e7c419..3d86b903 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -2108,9 +2108,14 @@ void LoadMtl(std::map *material_map, // set new mtl name token += 7; { - std::stringstream sstr; - sstr << token; - material.name = sstr.str(); + std::string namebuf = parseString(&token); + // TODO: empty name check? + if (namebuf.empty()) { + if (warning) { + (*warning) += "empty material name in `newmtl`\n"; + } + } + material.name = namebuf; } continue; } From 0fc802cf468d23b9d205890b76b268f61b948e6d Mon Sep 17 00:00:00 2001 From: Paul Melnikow Date: Fri, 6 Jan 2023 13:36:15 -0500 Subject: [PATCH 107/139] Fix TALOS-2020-1212 (#358) Co-authored-by: Jacob Beard --- .gitignore | 2 + models/invalid-relative-texture-index.obj | 2 + models/invalid-relative-vertex-index.obj | 1 + tests/tester.cc | 48 +++++++++++++++++++++++ tiny_obj_loader.h | 5 ++- 5 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 models/invalid-relative-texture-index.obj create mode 100644 models/invalid-relative-vertex-index.obj diff --git a/.gitignore b/.gitignore index cd219d8e..f9b4d691 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,5 @@ build/ /python/*.egg-info /python/.eggs /python/tiny_obj_loader.h +/tests/tester +/tests/tester.dSYM diff --git a/models/invalid-relative-texture-index.obj b/models/invalid-relative-texture-index.obj new file mode 100644 index 00000000..ed3a571a --- /dev/null +++ b/models/invalid-relative-texture-index.obj @@ -0,0 +1,2 @@ +vt 0 0 +f 1/-1 1/-1 1/-2 \ No newline at end of file diff --git a/models/invalid-relative-vertex-index.obj b/models/invalid-relative-vertex-index.obj new file mode 100644 index 00000000..bddc54a9 --- /dev/null +++ b/models/invalid-relative-vertex-index.obj @@ -0,0 +1 @@ +f -4 -3 -2 \ No newline at end of file diff --git a/tests/tester.cc b/tests/tester.cc index ebb5290c..2f336c76 100644 --- a/tests/tester.cc +++ b/tests/tester.cc @@ -858,6 +858,50 @@ void test_zero_face_idx_value_issue140() { TEST_CHECK(!err.empty()); } +void test_invalid_relative_vertex_index() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + std::string warn; + std::string err; + bool ret = + tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, + "../models/invalid-relative-vertex-index.obj", gMtlBasePath); + + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; + } + TEST_CHECK(false == ret); + TEST_CHECK(!err.empty()); +} + +void test_invalid_texture_vertex_index() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + std::string warn; + std::string err; + bool ret = + tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, + "../models/invalid-relative-texture-vertex-index.obj", gMtlBasePath); + + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; + } + TEST_CHECK(false == ret); + TEST_CHECK(!err.empty()); +} + void test_texture_name_whitespace_issue145() { tinyobj::attrib_t attrib; std::vector shapes; @@ -1467,4 +1511,8 @@ TEST_LIST = { test_mtl_filename_with_whitespace_issue46}, {"test_face_missing_issue295", test_face_missing_issue295}, + {"test_invalid_relative_vertex_index", + test_invalid_relative_vertex_index}, + {"test_invalid_texture_vertex_index", + test_invalid_texture_vertex_index}, {NULL, NULL}}; diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 3d86b903..d91f7620 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -835,6 +835,9 @@ static inline bool fixIndex(int idx, int n, int *ret, bool allow_zero, const war if (idx < 0) { (*ret) = n + idx; // negative value = relative + if((*ret) < 0){ + return false; // invalid relative index + } return true; } @@ -2782,7 +2785,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, static_cast(vn.size() / 3), static_cast(vt.size() / 2), &vi, context)) { if (err) { - (*err) += "Failed to parse `f' line (e.g. a zero value for vertex index. Line " + + (*err) += "Failed to parse `f' line (e.g. a zero value for vertex index or invalid relative vertex index). Line " + toString(line_num) + ").\n"; } return false; From ed48fa93268ef2c3e930370fd2fa760de30aee18 Mon Sep 17 00:00:00 2001 From: Sean Curtis Date: Tue, 14 Feb 2023 01:09:41 -0800 Subject: [PATCH 108/139] Add initialization to missing member of callback_t (#361) --- tiny_obj_loader.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index d91f7620..f32f8a9f 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -446,6 +446,7 @@ struct callback_t { callback_t() : vertex_cb(NULL), + vertex_color_cb(NULL), normal_cb(NULL), texcoord_cb(NULL), index_cb(NULL), From 54684096e4ab1fcff9e7571888489e48d018c7fb Mon Sep 17 00:00:00 2001 From: Hai Nguyen <379079+chaoticbob@users.noreply.github.com> Date: Mon, 20 Feb 2023 08:29:01 +0000 Subject: [PATCH 109/139] Added support for ambient occlusion texture (#362) * Added support for ambient occlusion texture - Added parsing for ambient occlsuion texture using map_Po or map_ao * Revert "Added support for ambient occlusion texture" This reverts commit af1784dc22611b61105c5c76eb69021f3c079d8f. * Additional details for map_Ka - Added comment to to indicate that map_Ka can be used for both ambient and ambient occlusion textures. --- tiny_obj_loader.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index f32f8a9f..edd65b11 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -194,7 +194,7 @@ struct material_t { int dummy; // Suppress padding warning. - std::string ambient_texname; // map_Ka + std::string ambient_texname; // map_Ka. For ambient or ambient occlusion. std::string diffuse_texname; // map_Kd std::string specular_texname; // map_Ks std::string specular_highlight_texname; // map_Ns @@ -2283,7 +2283,7 @@ void LoadMtl(std::map *material_map, continue; } - // ambient texture + // ambient or ambient occlusion texture if ((0 == strncmp(token, "map_Ka", 6)) && IS_SPACE(token[6])) { token += 7; ParseTextureNameAndOption(&(material.ambient_texname), From bca2719a11e688b85ce9af21dcb156f3d8b918bc Mon Sep 17 00:00:00 2001 From: NCJ Date: Mon, 20 Mar 2023 02:29:37 +0800 Subject: [PATCH 110/139] Fix example writer transmittance (#363) --- examples/obj_sticher/obj_writer.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/obj_sticher/obj_writer.cc b/examples/obj_sticher/obj_writer.cc index 9ea8d7c4..31a2c895 100644 --- a/examples/obj_sticher/obj_writer.cc +++ b/examples/obj_sticher/obj_writer.cc @@ -26,7 +26,7 @@ bool WriteMat(const std::string& filename, const std::vector Date: Sat, 3 Jun 2023 19:33:46 +0900 Subject: [PATCH 111/139] Fix material.Pcr value print. Fixes #367 --- loader_example.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loader_example.cc b/loader_example.cc index 8143bb79..21feb684 100644 --- a/loader_example.cc +++ b/loader_example.cc @@ -257,7 +257,7 @@ static void PrintInfo(const tinyobj::attrib_t& attrib, printf(" material.Pm = %f\n", static_cast(materials[i].metallic)); printf(" material.Ps = %f\n", static_cast(materials[i].sheen)); printf(" material.Pc = %f\n", static_cast(materials[i].clearcoat_thickness)); - printf(" material.Pcr = %f\n", static_cast(materials[i].clearcoat_thickness)); + printf(" material.Pcr = %f\n", static_cast(materials[i].clearcoat_roughness)); printf(" material.aniso = %f\n", static_cast(materials[i].anisotropy)); printf(" material.anisor = %f\n", static_cast(materials[i].anisotropy_rotation)); printf(" material.map_Ke = %s\n", materials[i].emissive_texname.c_str()); From 22441b15f129f0dcda8a472ce615ad7fdfd6f714 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Tue, 20 Jun 2023 21:05:20 +0900 Subject: [PATCH 112/139] Add PBR MTL spec document. Fixes #364 --- README.md | 2 +- pbr-mtl.md | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 pbr-mtl.md diff --git a/README.md b/README.md index efb8732e..df061724 100644 --- a/README.md +++ b/README.md @@ -127,7 +127,7 @@ TinyObjLoader is successfully used in ... ### Material -* PBR material extension for .MTL. Its proposed here: http://exocortex.com/blog/extending_wavefront_mtl_to_support_pbr +* PBR material extension for .MTL. Please see [pbr-mtl.md](pbr-mtl.md) for details. * Texture options * Unknown material attributes are returned as key-value(value is string) map. diff --git a/pbr-mtl.md b/pbr-mtl.md new file mode 100644 index 00000000..36c3e86b --- /dev/null +++ b/pbr-mtl.md @@ -0,0 +1,25 @@ +## PBR material extension. + +The spec can be found in internet archive: https://web.archive.org/web/20230210121526/http://exocortex.com/blog/extending_wavefront_mtl_to_support_pbr + +* Kd/map_Kd (base/diffuse) // reuse +* Ks/map_Ks (specular) // reuse +* d or Tr (opacity) // reuse +* map_d/map_Tr (opacitymap) // reuse +* Tf (translucency) // reuse +* bump/-bm (bump map) // reuse +* disp (displacement map) // reuse + +PBR material parameters as defined by the Disney PBR. + +* Pr/map_Pr (roughness) // new +* Pm/map_Pm (metallic) // new +* Ps/map_Ps (sheen) // new +* Pc (clearcoat thickness) // new +* Pcr (clearcoat roughness) // new +* Ke/map_Ke (emissive) // new +* aniso (anisotropy) // new +* anisor (anisotropy rotation) // new +* norm (normal map) // new + +EoL. From f5569db1ffb3b0222663ba38a7a9b3f6a461c469 Mon Sep 17 00:00:00 2001 From: Dario Cangialosi Date: Fri, 7 Jul 2023 13:43:27 +0200 Subject: [PATCH 113/139] [build in CMake for viewer] adding it. compiles and works on Windows MSYS/MINGW and Debian MX. (#368) --- examples/viewer/.gitignore | 1 + examples/viewer/CMakeLists.txt | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 examples/viewer/.gitignore create mode 100644 examples/viewer/CMakeLists.txt diff --git a/examples/viewer/.gitignore b/examples/viewer/.gitignore new file mode 100644 index 00000000..378eac25 --- /dev/null +++ b/examples/viewer/.gitignore @@ -0,0 +1 @@ +build diff --git a/examples/viewer/CMakeLists.txt b/examples/viewer/CMakeLists.txt new file mode 100644 index 00000000..df3af268 --- /dev/null +++ b/examples/viewer/CMakeLists.txt @@ -0,0 +1,29 @@ +# cmake -S . -B build && cmake --build build && build/app + +# cmake -S . -B build -G "Ninja" && cmake --build build && build/app +# cmake -S . -B build -G "CodeBlocks - Ninja" && cmake --build build && build/app +# cmake -S . -B build -G "Visual Studio 17 2022" && cmake --build build && build/app + +cmake_minimum_required(VERSION 3.18) +project( app VERSION 0.1 ) + +file(GLOB SOURCE_FILES "*.c*" ) +add_executable(app ${SOURCE_FILES}) + +find_package( OpenGL REQUIRED ) +find_package( glfw3 REQUIRED ) + +set(ADDITIONAL_LIBRARIES "") +if(WIN32) +set(ADDITIONAL_LIBRARIES winmm) +endif() + +set(GLEW_LIBRARY "") +if(UNIX) +set(GLEW_LIBRARY GLEW) +else() +find_package( glew REQUIRED ) +set(GLEW_LIBRARY GLEW::glew) +endif() + +target_link_libraries(${PROJECT_NAME} OpenGL::GL OpenGL::GLU glfw ${ADDITIONAL_LIBRARIES} ${GLEW_LIBRARY} ) From 804be4e5e7616635a3960516db7254b87213398c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tur=C3=A1nszki=20J=C3=A1nos?= Date: Mon, 7 Aug 2023 11:33:54 +0200 Subject: [PATCH 114/139] added Wicked Engine to projects list (#369) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index df061724..b63c9853 100644 --- a/README.md +++ b/README.md @@ -74,6 +74,7 @@ TinyObjLoader is successfully used in ... * metal-ray-tracer - Writing ray-tracer using Metal Performance Shaders https://github.com/sergeyreznik/metal-ray-tracer https://sergeyreznik.github.io/metal-ray-tracer/index.html * Supernova Engine - 2D and 3D projects with Lua or C++ in data oriented design: https://github.com/supernovaengine/supernova * AGE (Arc Game Engine) - An open-source engine for building 2D & 3D real-time rendering and interactive contents: https://github.com/MohitSethi99/ArcGameEngine +* [Wicked Engine](https://github.com/turanszkij/WickedEngine) - 3D engine with modern graphics * Your project here! (Letting us know via github issue is welcome!) ### Old version(v0.9.x) From 853f059d778058a43c954850e561a231934b33a7 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Tue, 15 Aug 2023 05:48:58 +0900 Subject: [PATCH 115/139] use std::fabs for float type(std::abs is only available for integer type) --- tiny_obj_loader.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index edd65b11..28d5d849 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -1640,7 +1640,7 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, TinyObjPoint axis_w, axis_v, axis_u; axis_w = n; TinyObjPoint a; - if(std::abs(axis_w.x) > real_t(0.9999999)) { + if(std::fabs(axis_w.x) > real_t(0.9999999)) { a = TinyObjPoint(0,1,0); } else { a = TinyObjPoint(1,0,0); From ee45fb41db95bf9563f2a41bc63adfa18475c2ee Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 24 Aug 2023 01:33:34 +0900 Subject: [PATCH 116/139] Update pbr-mtl.md --- pbr-mtl.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pbr-mtl.md b/pbr-mtl.md index 36c3e86b..b5856216 100644 --- a/pbr-mtl.md +++ b/pbr-mtl.md @@ -1,6 +1,10 @@ ## PBR material extension. -The spec can be found in internet archive: https://web.archive.org/web/20230210121526/http://exocortex.com/blog/extending_wavefront_mtl_to_support_pbr +The spec can be found in either + +https://benhouston3d.com/blog/extended-wavefront-obj-mtl-for-pbr/ + +or Internet Archive: https://web.archive.org/web/20230210121526/http://exocortex.com/blog/extending_wavefront_mtl_to_support_pbr * Kd/map_Kd (base/diffuse) // reuse * Ks/map_Ks (specular) // reuse From cc327eecf7f8f4139932aec8d75db2d091f412ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Dumas?= Date: Fri, 22 Dec 2023 09:01:19 -0800 Subject: [PATCH 117/139] Support facets with > 255 vertices. (#375) --- tiny_obj_loader.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 28d5d849..dbb64c3c 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -352,10 +352,9 @@ struct index_t { struct mesh_t { std::vector indices; - std::vector + std::vector num_face_vertices; // The number of vertices per - // face. 3 = triangle, 4 = quad, - // ... Up to 255 vertices per face. + // face. 3 = triangle, 4 = quad, ... std::vector material_ids; // per-face material ID std::vector smoothing_group_ids; // per-face smoothing group // ID(0 = off. positive value @@ -1946,7 +1945,7 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, } shape->mesh.num_face_vertices.push_back( - static_cast(npolys)); + static_cast(npolys)); shape->mesh.material_ids.push_back(material_id); // per face shape->mesh.smoothing_group_ids.push_back( face.smoothing_group_id); // per face From e0f1ba05e2c9456ceca6839c65c4ef0e4e81140c Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 26 Jan 2024 04:00:19 +0900 Subject: [PATCH 118/139] Update python-binding project files. Add Github Actions and Cirrus CI task for Python wheel build. --- .cirrus.yml | 30 ++++++++++ .github/workflow/wheels.yml | 115 ++++++++++++++++++++++++++++++++++++ MANIFEST.in | 8 +++ pyproject.toml | 38 +++++++++++- setup.py | 21 ++++--- 5 files changed, 203 insertions(+), 9 deletions(-) create mode 100644 .cirrus.yml create mode 100644 .github/workflow/wheels.yml create mode 100644 MANIFEST.in diff --git a/.cirrus.yml b/.cirrus.yml new file mode 100644 index 00000000..d6adddd9 --- /dev/null +++ b/.cirrus.yml @@ -0,0 +1,30 @@ +build_and_store_wheels: &BUILD_AND_STORE_WHEELS + install_cibuildwheel_script: + - python -m pip install cibuildwheel==2.16.2 + run_cibuildwheel_script: + - cibuildwheel + wheels_artifacts: + path: "wheelhouse/*" + + # Upload only for tagged commit + only_if: $CIRRUS_TAG != '' + publish_script: + - python -m pip install twine + - python -m twine upload --repository-url https://upload.pypi.org/legacy/ --username __token__ wheelhouse/*.whl + + +linux_aarch64_task: + name: Build Linux aarch64 wheels. + compute_engine_instance: + image_project: cirrus-images + image: family/docker-builder-arm64 + architecture: arm64 + platform: linux + cpu: 4 + memory: 4G + environment: + TWINE_PASSWORD: ENCRYPTED[88f22b6fab51dc9306c5690f2ad999f4114320fecbcff933864d3f225bbd03037ca511742e8572fa2b63dfa9cebee365] + + install_pre_requirements_script: + - apt install -y python3-venv python-is-python3 + <<: *BUILD_AND_STORE_WHEELS diff --git a/.github/workflow/wheels.yml b/.github/workflow/wheels.yml new file mode 100644 index 00000000..91082540 --- /dev/null +++ b/.github/workflow/wheels.yml @@ -0,0 +1,115 @@ +name: Build and upload to PyPI + +# Build on every branch push, tag push, and pull request change: +on: [push, pull_request] + +jobs: + + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + fetch-tags: true # Optional, use if you use setuptools_scm + + - name: Build wheels + uses: pypa/cibuildwheel@v2.16.2 + # to supply options, put them in 'env', like: + # env: + # CIBW_SOME_OPTION: value + # Disable building PyPy wheels on all platforms + env: + CIBW_ARCHS_MACOS: "x86_64 universal2 arm64" + CIBW_ARCHS_WINDOWS: "AMD64 x86" + # disable aarm64 build since its too slow to build(docker + qemu) + CIBW_ARCHS_LINUX: "x86_64 i686" + # it looks cibuildwheel fails to add version string to wheel file for python 3.6, so skip it + CIBW_SKIP: pp* + + - uses: actions/upload-artifact@v4 + with: + name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} + path: ./wheelhouse/*.whl + + # It looks cibuildwheels did not clean build folder(CMake), and it results to Windows arm64 build failure(trying to reuse x86 build of .obj) + # So supply separated build job for Windows ARM64 build + # TODO: clean build folder using CIBW_BEFORE_ALL? + build_win_arm64_wheels: + name: Build ARM64 wheels on Windows. + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + fetch-tags: true # Optional, use if you use setuptools_scm + + - name: Build wheels + uses: pypa/cibuildwheel@v2.16.2 + # to supply options, put them in 'env', like: + # env: + # CIBW_SOME_OPTION: value + # Disable building PyPy wheels on all platforms + env: + CIBW_ARCHS_WINDOWS: "ARM64" + CIBW_SKIP: pp* + + - uses: actions/upload-artifact@v4 + with: + name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} + path: ./wheelhouse/*.whl + + make_sdist: + name: Make SDist + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Optional, use if you use setuptools_scm + fetch-tags: true # Optional, use if you use setuptools_scm + + - name: Build SDist + run: pipx run build --sdist + + - uses: actions/upload-artifact@v4 + with: + name: cibw-sdist + path: dist/*.tar.gz + + upload_all: + needs: [build_wheels, build_wheels, make_sdist] + runs-on: ubuntu-latest + environment: release + permissions: + # IMPORTANT: this permission is mandatory for trusted publishing + id-token: write + # upload to PyPI on every tag starting with 'v' + # NOTE: Without github.event_name & githug.ref check, `upload_all` task is still triggered on 'main' branch push. + # (then get 'Branch "main" is not allowed to deploy to release due to environment protection rules.' error) + # So still do event_name and github.ref check. + # TODO: Make it work only using Github `environment` feature. + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') + # alternatively, to publish when a GitHub Release is created, use the following rule: + # if: github.event_name == 'push' && github.event.action == 'published' + steps: + - uses: actions/download-artifact@v4 + with: + pattern: cibw-* + path: dist + merge-multiple: true + + - uses: pypa/gh-action-pypi-publish@release/v1 + with: + # Use Trusted Publisher feature: + # https://docs.pypi.org/trusted-publishers/ + # so no use of PYPI_API_TOKEN + #password: ${{ secrets.PYPI_API_TOKEN }} + # + # Avoid race condition when using multiple CIs + skip-existing: true + verbose: true diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..d2632da5 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,8 @@ +include pyproject.toml +include setup.py +include README.md +include LICENSE +include python/sample.py +include python/bindings.cc +include python/tiny_obj_loader.cc +include tiny_obj_loader.h diff --git a/pyproject.toml b/pyproject.toml index ac831e23..d3ba7cf3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,39 @@ [build-system] -requires = ["setuptools>=42", "wheel", "pip>=19.2", "pybind11>=2.8.0"] + +requires = [ + # NOTE: setuptools_scm>=8 is not supported in py3.6 cibuildwheel env. + # so use older setuptools_scm for a while + #"setuptools>=64", + #"setuptools_scm>=8", + "setuptools>=45", + "setuptools_scm[toml]<8", + "wheel", + "pybind11>=2.10.0", +] build-backend = "setuptools.build_meta" + +[tool.black] +line-length = 140 + +[project] +name = "tinyobjloader" + +# version: Use setuptools_scm +dynamic = ["version", "classifiers", "authors", "description"] + + +readme = {file = "README.md", content-type = "text/markdown"} + +# Project URLs in pyproject.toml is not mature. +# so write it to setup.py +# https://github.com/pypa/packaging-problems/issues/606 +# +# [project.urils] + + +[tool.setuptools_scm] +# setuptools_scm>=8 +#version_file = "python/_version.py" + +# setuptools_scm<8 +write_to = "python/_version.py" diff --git a/setup.py b/setup.py index 4f2ecc04..cb950871 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,11 @@ from pybind11.setup_helpers import Pybind11Extension#, build_ext from setuptools import setup -__version__ = "2.0.0rc10" +try: + # try to read setuptools_scm generated _version.py + from .python import _version +except: + __version__ = "2.0.0rc10" with open("README.md", "r", encoding="utf8") as fh: long_description = fh.read() @@ -31,13 +35,14 @@ setup( name="tinyobjloader", - version=__version__, + packages=['python'], + #version=__version__, author="Syoyo Fujita", author_email="syoyo@lighttransport.com", url="https://github.com/tinyobjloader/tinyobjloader", - project_urls={ - "Issue Tracker": "https://github.com/tinyobjloader/tinyobjloader/issues", - }, + #project_urls={ + # "Issue Tracker": "https://github.com/tinyobjloader/tinyobjloader/issues", + #}, description="Tiny but powerful Wavefront OBJ loader", long_description=long_description, long_description_content_type='text/markdown', @@ -55,10 +60,10 @@ "Programming Language :: Python :: 3", ], ext_modules=ext_modules, - extras_require={"test": "pytest"}, + #extras_require={"test": "pytest"}, # Currently, build_ext only provides an optional "highest supported C++ # level" feature, but in the future it may provide more features. # cmdclass={"build_ext": build_ext}, - zip_safe=False, - python_requires=">=3.6", + #zip_safe=False, + #python_requires=">=3.6", ) From 121d8c765f124b9b158483364972f38295a485f6 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 26 Jan 2024 04:06:53 +0900 Subject: [PATCH 119/139] Disable python wheels build and pypi upload in Azure pipelines. --- README.md | 8 +- azure-pipelines.yml | 227 +++++++++++++++++++++++--------------------- 2 files changed, 121 insertions(+), 114 deletions(-) diff --git a/README.md b/README.md index b63c9853..f75721e4 100644 --- a/README.md +++ b/README.md @@ -419,16 +419,16 @@ Here is some benchmark result. Time are measured on MacBook 12(Early 2016, Core ### CI + PyPI upload -cibuildwheels + twine upload for each git tagging event is handled in Azure Pipeline. +cibuildwheels + twine upload for each git tagging event is handled in Github Actions and Cirrus CI. #### How to bump version(For developer) * Bump version in CMakeLists.txt -* Update version in `setup.py` -* Commit and push `master`. Confirm C.I. build is OK. +* Commit and push `release`. Confirm C.I. build is OK. * Create tag starting with `v`(e.g. `v2.1.0`) * `git push --tags` - * cibuildwheels + pypi upload(through twine) will be automatically triggered in Azure Pipeline. + * version settings is automatically handled in python binding through setuptools_scm. + * cibuildwheels + pypi upload(through twine) will be automatically triggered in Github Actions + Cirrus CI. ## Tests diff --git a/azure-pipelines.yml b/azure-pipelines.yml index fdc128fa..2580f172 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,3 +1,8 @@ +# +# Python wheels build is now done in Github Actions + Cirrus CI(for arm build) +# so python build is disabled in Azure pipelines. +# + variables: # https://cibuildwheel.readthedocs.io/en/stable/cpp_standards/ # cibuildwheel now supports python 3.6+(as of 2022 Oct) @@ -38,116 +43,118 @@ jobs: black --check python/ displayName: Check Python code format - # Ubuntu16.04 seems now deprecated(as of 2021/12/01), - # so use `ubuntu-latest` - - job: linux - pool: {vmImage: "ubuntu-latest"} - steps: - - task: UsePythonVersion@0 - - bash: | - python3 -m pip install --upgrade pip - pip3 install cibuildwheel twine - - # Use pipx to build source dist - pip3 install pipx - - # Source dist - pipx run build --sdist - ls -la dist/* - - # build binary wheels - cibuildwheel --platform linux --output-dir wheelhouse . - - - task: CopyFiles@2 - inputs: - contents: 'wheelhouse/**' - targetFolder: $(Build.ArtifactStagingDirectory) - - - task: CopyFiles@2 - inputs: - contents: 'dist/**' - targetFolder: $(Build.ArtifactStagingDirectory) - - - task: PublishBuildArtifacts@1 - inputs: - path: $(Build.ArtifactStagingDirectory) - artifactName: tinyobjDeployLinux - - - job: macos - pool: {vmImage: 'macOS-latest'} - variables: - # Support C++11: https://github.com/joerick/cibuildwheel/pull/156 - MACOSX_DEPLOYMENT_TARGET: 10.9 - steps: - - task: UsePythonVersion@0 - - bash: | - python3 -m pip install --upgrade pip - pip3 install cibuildwheel - cibuildwheel --platform macos --output-dir wheelhouse . - - task: CopyFiles@2 - inputs: - contents: 'wheelhouse/*.whl' - targetFolder: $(Build.ArtifactStagingDirectory) - - task: PublishBuildArtifacts@1 - inputs: - path: $(Build.ArtifactStagingDirectory) - artifactName: tinyobjDeployMacOS - - - job: windows - pool: {vmImage: 'windows-latest'} - steps: - - task: UsePythonVersion@0 - - bash: | - python -m pip install --upgrade pip - pip install cibuildwheel - cibuildwheel --platform windows --output-dir wheelhouse . - - task: CopyFiles@2 - inputs: - contents: 'wheelhouse/*.whl' - targetFolder: $(Build.ArtifactStagingDirectory) - - task: PublishBuildArtifacts@1 - inputs: - path: $(Build.ArtifactStagingDirectory) - artifactName: tinyobjDeployWindows - - - job: deployPyPI - # Based on vispy: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml - pool: {vmImage: 'ubuntu-latest'} - condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/v')) - dependsOn: - - linux - - macos - - windows - steps: - - task: UsePythonVersion@0 - - # TODO(syoyo): Use buildType: specific to download multiple artifacts at once? - - task: DownloadBuildArtifacts@0 - inputs: - artifactName: 'tinyobjDeployLinux' - downloadPath: $(Pipeline.Workspace) - - - task: DownloadBuildArtifacts@0 - inputs: - artifactName: 'tinyobjDeployMacOS' - downloadPath: $(Pipeline.Workspace) - - - task: DownloadBuildArtifacts@0 - inputs: - artifactName: 'tinyobjDeployWindows' - downloadPath: $(Pipeline.Workspace) - - # Publish to PyPI through twine - - bash: | - cd $(Pipeline.Workspace) - find . - python -m pip install --upgrade pip - pip install twine - echo tinyobjDeployLinux/dist/* - echo tinyobjDeployLinux/wheelhouse/* tinyobjDeployMacOS/wheelhouse/* tinyobjDeployWindows/wheelhouse/* - twine upload -u "__token__" --skip-existing tinyobjDeployLinux/dist/* tinyobjDeployLinux/wheelhouse/* tinyobjDeployMacOS/wheelhouse/* tinyobjDeployWindows/wheelhouse/* - env: - TWINE_PASSWORD: $(pypiToken2) + # Disabled: python build + ## + ## Ubuntu16.04 seems now deprecated(as of 2021/12/01), + ## so use `ubuntu-latest` + #- job: linux + # pool: {vmImage: "ubuntu-latest"} + # steps: + # - task: UsePythonVersion@0 + # - bash: | + # python3 -m pip install --upgrade pip + # pip3 install cibuildwheel twine + + # # Use pipx to build source dist + # pip3 install pipx + + # # Source dist + # pipx run build --sdist + # ls -la dist/* + + # # build binary wheels + # cibuildwheel --platform linux --output-dir wheelhouse . + + # - task: CopyFiles@2 + # inputs: + # contents: 'wheelhouse/**' + # targetFolder: $(Build.ArtifactStagingDirectory) + + # - task: CopyFiles@2 + # inputs: + # contents: 'dist/**' + # targetFolder: $(Build.ArtifactStagingDirectory) + + # - task: PublishBuildArtifacts@1 + # inputs: + # path: $(Build.ArtifactStagingDirectory) + # artifactName: tinyobjDeployLinux + + #- job: macos + # pool: {vmImage: 'macOS-latest'} + # variables: + # # Support C++11: https://github.com/joerick/cibuildwheel/pull/156 + # MACOSX_DEPLOYMENT_TARGET: 10.9 + # steps: + # - task: UsePythonVersion@0 + # - bash: | + # python3 -m pip install --upgrade pip + # pip3 install cibuildwheel + # cibuildwheel --platform macos --output-dir wheelhouse . + # - task: CopyFiles@2 + # inputs: + # contents: 'wheelhouse/*.whl' + # targetFolder: $(Build.ArtifactStagingDirectory) + # - task: PublishBuildArtifacts@1 + # inputs: + # path: $(Build.ArtifactStagingDirectory) + # artifactName: tinyobjDeployMacOS + + #- job: windows + # pool: {vmImage: 'windows-latest'} + # steps: + # - task: UsePythonVersion@0 + # - bash: | + # python -m pip install --upgrade pip + # pip install cibuildwheel + # cibuildwheel --platform windows --output-dir wheelhouse . + # - task: CopyFiles@2 + # inputs: + # contents: 'wheelhouse/*.whl' + # targetFolder: $(Build.ArtifactStagingDirectory) + # - task: PublishBuildArtifacts@1 + # inputs: + # path: $(Build.ArtifactStagingDirectory) + # artifactName: tinyobjDeployWindows + + #- job: deployPyPI + # # Based on vispy: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml + # pool: {vmImage: 'ubuntu-latest'} + # condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/v')) + # dependsOn: + # - linux + # - macos + # - windows + # steps: + # - task: UsePythonVersion@0 + + # # TODO(syoyo): Use buildType: specific to download multiple artifacts at once? + # - task: DownloadBuildArtifacts@0 + # inputs: + # artifactName: 'tinyobjDeployLinux' + # downloadPath: $(Pipeline.Workspace) + + # - task: DownloadBuildArtifacts@0 + # inputs: + # artifactName: 'tinyobjDeployMacOS' + # downloadPath: $(Pipeline.Workspace) + + # - task: DownloadBuildArtifacts@0 + # inputs: + # artifactName: 'tinyobjDeployWindows' + # downloadPath: $(Pipeline.Workspace) + + # # Publish to PyPI through twine + # - bash: | + # cd $(Pipeline.Workspace) + # find . + # python -m pip install --upgrade pip + # pip install twine + # echo tinyobjDeployLinux/dist/* + # echo tinyobjDeployLinux/wheelhouse/* tinyobjDeployMacOS/wheelhouse/* tinyobjDeployWindows/wheelhouse/* + # twine upload -u "__token__" --skip-existing tinyobjDeployLinux/dist/* tinyobjDeployLinux/wheelhouse/* tinyobjDeployMacOS/wheelhouse/* tinyobjDeployWindows/wheelhouse/* + # env: + # TWINE_PASSWORD: $(pypiToken2) trigger: branches: From 9d227d450da2c9cc06a5229c654b8fd7e1604f34 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 26 Jan 2024 04:13:33 +0900 Subject: [PATCH 120/139] Add pip install procedure. --- README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f75721e4..5e3ee798 100644 --- a/README.md +++ b/README.md @@ -417,9 +417,15 @@ Here is some benchmark result. Time are measured on MacBook 12(Early 2016, Core ## Python binding +``` +$ python -m pip install tinyobjloader +``` + +See [python/sample.py](python/sample.py) for example use of Python binding of tinyobjloader. + ### CI + PyPI upload -cibuildwheels + twine upload for each git tagging event is handled in Github Actions and Cirrus CI. +cibuildwheels + twine upload for each git tagging event is handled in Github Actions and Cirrus CI(arm builds). #### How to bump version(For developer) From 5ba59a3bc1410aef14aea7d8915acf92816a6cab Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 26 Jan 2024 04:14:40 +0900 Subject: [PATCH 121/139] Update encrypted token for twine in Cirrus CI build. --- .cirrus.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cirrus.yml b/.cirrus.yml index d6adddd9..0aff9456 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -23,7 +23,7 @@ linux_aarch64_task: cpu: 4 memory: 4G environment: - TWINE_PASSWORD: ENCRYPTED[88f22b6fab51dc9306c5690f2ad999f4114320fecbcff933864d3f225bbd03037ca511742e8572fa2b63dfa9cebee365] + TWINE_PASSWORD: ENCRYPTED[ade2037764e68fea251152f7585f3f77cdd748af06dc0f06942c45a8a8770fff19032c985f8dc193229c8adb2c0fecb9] install_pre_requirements_script: - apt install -y python3-venv python-is-python3 From 52b4f668a4773011fa089a01b16fd0d914b9457f Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 26 Jan 2024 04:15:46 +0900 Subject: [PATCH 122/139] Fix dirname --- .github/{workflow => workflows}/wheels.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/{workflow => workflows}/wheels.yml (100%) diff --git a/.github/workflow/wheels.yml b/.github/workflows/wheels.yml similarity index 100% rename from .github/workflow/wheels.yml rename to .github/workflows/wheels.yml From 6213f60b8cd25269858134f9c433ce0ab49b021f Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 26 Jan 2024 07:10:44 +0900 Subject: [PATCH 123/139] Add support building python module in CMake. Bump required minimal cmake version for pybind11. --- CMakeLists.txt | 62 ++++- bootstrap-cmake-linux-with-pyhthon.sh | 20 ++ cmake/ClangClCMakeCompileRules.cmake | 9 + cmake/aarch64-linux-gnu.toolchain | 14 + cmake/clang-cl-msvc-windows.cmake | 327 ++++++++++++++++++++++++ cmake/clang-cl-msvc-wsl.cmake | 327 ++++++++++++++++++++++++ cmake/linux_i386.toolchain.cmake | 17 ++ cmake/llvm-mingw-cross.cmake | 24 ++ cmake/llvm-mingw-win64.cmake | 20 ++ cmake/mingw64-cross.cmake | 20 ++ cmake/sanitizers/FindASan.cmake | 59 +++++ cmake/sanitizers/FindMSan.cmake | 57 +++++ cmake/sanitizers/FindSanitizers.cmake | 94 +++++++ cmake/sanitizers/FindTSan.cmake | 65 +++++ cmake/sanitizers/FindUBSan.cmake | 46 ++++ cmake/sanitizers/asan-wrapper | 55 ++++ cmake/sanitizers/sanitize-helpers.cmake | 177 +++++++++++++ 17 files changed, 1390 insertions(+), 3 deletions(-) create mode 100755 bootstrap-cmake-linux-with-pyhthon.sh create mode 100644 cmake/ClangClCMakeCompileRules.cmake create mode 100644 cmake/aarch64-linux-gnu.toolchain create mode 100644 cmake/clang-cl-msvc-windows.cmake create mode 100644 cmake/clang-cl-msvc-wsl.cmake create mode 100644 cmake/linux_i386.toolchain.cmake create mode 100644 cmake/llvm-mingw-cross.cmake create mode 100644 cmake/llvm-mingw-win64.cmake create mode 100644 cmake/mingw64-cross.cmake create mode 100644 cmake/sanitizers/FindASan.cmake create mode 100644 cmake/sanitizers/FindMSan.cmake create mode 100755 cmake/sanitizers/FindSanitizers.cmake create mode 100644 cmake/sanitizers/FindTSan.cmake create mode 100644 cmake/sanitizers/FindUBSan.cmake create mode 100755 cmake/sanitizers/asan-wrapper create mode 100755 cmake/sanitizers/sanitize-helpers.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index d7fd667c..13a36ea4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,13 +1,23 @@ #Tiny Object Loader Cmake configuration file. #This configures the Cmake system with multiple properties, depending #on the platform and configuration it is set to build in. -cmake_minimum_required(VERSION 3.2) -project(tinyobjloader) +cmake_minimum_required(VERSION 3.16) +project(tinyobjloader CXX) set(TINYOBJLOADER_SOVERSION 2) set(TINYOBJLOADER_VERSION 2.0.0-rc.10) +set(PY_TARGET "aaa") #optional double precision support option(TINYOBJLOADER_USE_DOUBLE "Build library with double precision instead of single (float)" OFF) +option(TINYOBJLOADER_WITH_PYTHON "Build Python module(for developer). Use pyproject.toml/setup.py to build Python module for end-users" OFF) +option(TINYOBJLOADER_PREFER_LOCAL_PYTHON_INSTALLATION + "Prefer locally-installed Python interpreter than system or conda/brew installed Python. Please specify your Python interpreter with `Python3_EXECUTABLE` cmake option if you enable this option." + OFF) + +list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) +list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/sanitizers) +find_package(Sanitizers) # Address sanitizer (-DSANITIZE_ADDRESS=ON) + if(TINYOBJLOADER_USE_DOUBLE) set(LIBRARY_NAME ${PROJECT_NAME}_double) @@ -15,7 +25,6 @@ else() set(LIBRARY_NAME ${PROJECT_NAME}) endif() - #Folder Shortcuts set(TINYOBJLOADEREXAMPLES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/examples) @@ -46,7 +55,36 @@ set(TINYOBJLOADER_RUNTIME_DIR ${CMAKE_INSTALL_BINDIR}) option(TINYOBJLOADER_BUILD_TEST_LOADER "Build Example Loader Application" OFF) +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) + +# Build standalone .so for Python binding(for developer) +if (TINYOBJLOADER_WITH_PYTHON) + + if(TINYOBJLOADER_PREFER_LOCAL_PYTHON_INSTALLATION) + #message(STATUS "Local Python") + set(Python3_FIND_FRAMEWORK NEVER) # Do not search framework python + set(Python3_FIND_STRATEGY LOCATION) + set(Python3_FIND_REGISTRY NEVER) # Windows only + else() + set(Python3_FIND_FRAMEWORK LAST + )# Prefer Brew/Conda to Apple framework python + endif() + + find_package( + Python3 + COMPONENTS Interpreter Development + REQUIRED) + + find_package(pybind11 CONFIG REQUIRED) + +endif() + + + add_library(${LIBRARY_NAME} ${tinyobjloader-Source}) +add_sanitizers(${LIBRARY_NAME}) if(BUILD_SHARED_LIBS) set_target_properties(${LIBRARY_NAME} PROPERTIES @@ -85,6 +123,24 @@ if(TINYOBJLOADER_BUILD_OBJ_STICHER) ) endif() +if (TINYOBJLOADER_WITH_PYTHON) + # pybind11 method: + pybind11_add_module(${PY_TARGET} ${CMAKE_SOURCE_DIR}/python/bindings.cc ) + + add_sanitizers(${PY_TARGET}) + set_target_properties(${PY_TARGET} PROPERTIES OUTPUT_NAME "tinyobjloader") + + # copy .so to jdepp/ + add_custom_command( + TARGET ${PY_TARGET} + POST_BUILD + COMMAND "${CMAKE_COMMAND}" -E copy "$" + "${CMAKE_SOURCE_DIR}/python/$" + COMMENT "copying tinyobjloader native python module file to python/" + VERBATIM) + +endif() + #Write CMake package config files include(CMakePackageConfigHelpers) diff --git a/bootstrap-cmake-linux-with-pyhthon.sh b/bootstrap-cmake-linux-with-pyhthon.sh new file mode 100755 index 00000000..96cf4bf6 --- /dev/null +++ b/bootstrap-cmake-linux-with-pyhthon.sh @@ -0,0 +1,20 @@ +curdir=`pwd` + +builddir=${curdir}/build_python_module + +rm -rf ${builddir} +mkdir ${builddir} + +# set path to pybind11 +# If you install pybind11 through pip, its usually installed to /pybind11. +pybind11_path=`python -c "import site; print (site.getsitepackages()[0])"` +echo ${pybind11_path} + +CC=clang CXX=clang++ \ + pybind11_DIR=${pybind11_path}/pybind11 \ + cmake \ + -B${builddir} \ + -DCMAKE_VERBOSE_MAKEFILE=1 \ + -DTINYOBJLOADER_WITH_PYTHON=1 + +cd ${curdir} diff --git a/cmake/ClangClCMakeCompileRules.cmake b/cmake/ClangClCMakeCompileRules.cmake new file mode 100644 index 00000000..a3bcf1c2 --- /dev/null +++ b/cmake/ClangClCMakeCompileRules.cmake @@ -0,0 +1,9 @@ +# macOS paths usually start with /Users/*. Unfortunately, clang-cl interprets +# paths starting with /U as macro undefines, so we need to put a -- before the +# input file path to force it to be treated as a path. CMake's compilation rules +# should be tweaked accordingly, but until that's done, and to support older +# CMake versions, overriding compilation rules works well enough. This file will +# be included by cmake after the default compilation rules have already been set +# up, so we can just modify them instead of duplicating them entirely. +string(REPLACE "-c " "-c -- " CMAKE_C_COMPILE_OBJECT "${CMAKE_C_COMPILE_OBJECT}") +string(REPLACE "-c " "-c -- " CMAKE_CXX_COMPILE_OBJECT "${CMAKE_CXX_COMPILE_OBJECT}") diff --git a/cmake/aarch64-linux-gnu.toolchain b/cmake/aarch64-linux-gnu.toolchain new file mode 100644 index 00000000..cdcdaf25 --- /dev/null +++ b/cmake/aarch64-linux-gnu.toolchain @@ -0,0 +1,14 @@ +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_PROCESSOR aarch64) +set(CMAKE_C_COMPILER_TARGET aarch64-linux-gnu) + +set(CMAKE_FIND_ROOT_PATH /usr/aarch64-linux-gnu/) + +# Sync with GitHub Actions config +set(CMAKE_C_COMPILER aarch64-linux-gnu-gcc) +set(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++) + +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) diff --git a/cmake/clang-cl-msvc-windows.cmake b/cmake/clang-cl-msvc-windows.cmake new file mode 100644 index 00000000..e2eac142 --- /dev/null +++ b/cmake/clang-cl-msvc-windows.cmake @@ -0,0 +1,327 @@ +# From llvm/cmake/platforms/WinMsvc.cmake +# Modified to use clang-cl on native Windows. + +# Cross toolchain configuration for using clang-cl on non-Windows hosts to +# target MSVC. +# +# Usage: +# cmake -G Ninja +# -DCMAKE_TOOLCHAIN_FILE=/path/to/this/file +# -DHOST_ARCH=[aarch64|arm64|armv7|arm|i686|x86|x86_64|x64] +# -DLLVM_NATIVE_TOOLCHAIN=/path/to/llvm/installation +# -DMSVC_BASE=/path/to/MSVC/system/libraries/and/includes +# -DWINSDK_BASE=/path/to/windows-sdk +# -DWINSDK_VER=windows sdk version folder name +# +# HOST_ARCH: +# The architecture to build for. +# +# LLVM_NATIVE_TOOLCHAIN: +# *Absolute path* to a folder containing the toolchain which will be used to +# build. At a minimum, this folder should have a bin directory with a +# copy of clang-cl, clang, clang++, and lld-link, as well as a lib directory +# containing clang's system resource directory. +# +# MSVC_BASE: +# *Absolute path* to the folder containing MSVC headers and system libraries. +# The layout of the folder matches that which is intalled by MSVC 2017 on +# Windows, and should look like this: +# +# ${MSVC_BASE} +# include +# vector +# stdint.h +# etc... +# lib +# x64 +# libcmt.lib +# msvcrt.lib +# etc... +# x86 +# libcmt.lib +# msvcrt.lib +# etc... +# +# For versions of MSVC < 2017, or where you have a hermetic toolchain in a +# custom format, you must use symlinks or restructure it to look like the above. +# +# WINSDK_BASE: +# Together with WINSDK_VER, determines the location of Windows SDK headers +# and libraries. +# +# WINSDK_VER: +# Together with WINSDK_BASE, determines the locations of Windows SDK headers +# and libraries. +# +# WINSDK_BASE and WINSDK_VER work together to define a folder layout that matches +# that of the Windows SDK installation on a standard Windows machine. It should +# match the layout described below. +# +# Note that if you install Windows SDK to a windows machine and simply copy the +# files, it will already be in the correct layout. +# +# ${WINSDK_BASE} +# Include +# ${WINSDK_VER} +# shared +# ucrt +# um +# windows.h +# etc... +# Lib +# ${WINSDK_VER} +# ucrt +# x64 +# x86 +# ucrt.lib +# etc... +# um +# x64 +# x86 +# kernel32.lib +# etc +# +# IMPORTANT: In order for this to work, you will need a valid copy of the Windows +# SDK and C++ STL headers and libraries on your host. Additionally, since the +# Windows libraries and headers are not case-correct, this toolchain file sets +# up a VFS overlay for the SDK headers and case-correcting symlinks for the +# libraries when running on a case-sensitive filesystem. + + +# When configuring CMake with a toolchain file against a top-level CMakeLists.txt, +# it will actually run CMake many times, once for each small test program used to +# determine what features a compiler supports. Unfortunately, none of these +# invocations share a CMakeCache.txt with the top-level invocation, meaning they +# won't see the value of any arguments the user passed via -D. Since these are +# necessary to properly configure MSVC in both the top-level configuration as well as +# all feature-test invocations, we set environment variables with the values so that +# these environments get inherited by child invocations. We can switch to +# CMAKE_TRY_COMPILE_PLATFORM_VARIABLES once our minimum supported CMake version +# is 3.6 or greater. +function(init_user_prop prop) + if(${prop}) + set(ENV{_${prop}} "${${prop}}") + else() + set(${prop} "$ENV{_${prop}}" PARENT_SCOPE) + endif() +endfunction() + +function(generate_winsdk_vfs_overlay winsdk_include_dir output_path) + set(include_dirs) + file(GLOB_RECURSE entries LIST_DIRECTORIES true "${winsdk_include_dir}/*") + foreach(entry ${entries}) + if(IS_DIRECTORY "${entry}") + list(APPEND include_dirs "${entry}") + endif() + endforeach() + + file(WRITE "${output_path}" "version: 0\n") + file(APPEND "${output_path}" "case-sensitive: false\n") + file(APPEND "${output_path}" "roots:\n") + + foreach(dir ${include_dirs}) + file(GLOB headers RELATIVE "${dir}" "${dir}/*.h") + if(NOT headers) + continue() + endif() + + file(APPEND "${output_path}" " - name: \"${dir}\"\n") + file(APPEND "${output_path}" " type: directory\n") + file(APPEND "${output_path}" " contents:\n") + + foreach(header ${headers}) + file(APPEND "${output_path}" " - name: \"${header}\"\n") + file(APPEND "${output_path}" " type: file\n") + file(APPEND "${output_path}" " external-contents: \"${dir}/${header}\"\n") + endforeach() + endforeach() +endfunction() + +function(generate_winsdk_lib_symlinks winsdk_um_lib_dir output_dir) + execute_process(COMMAND "${CMAKE_COMMAND}" -E make_directory "${output_dir}") + file(GLOB libraries RELATIVE "${winsdk_um_lib_dir}" "${winsdk_um_lib_dir}/*") + foreach(library ${libraries}) + string(TOLOWER "${library}" all_lowercase_symlink_name) + if(NOT library STREQUAL all_lowercase_symlink_name) + execute_process(COMMAND "${CMAKE_COMMAND}" + -E create_symlink + "${winsdk_um_lib_dir}/${library}" + "${output_dir}/${all_lowercase_symlink_name}") + endif() + + get_filename_component(name_we "${library}" NAME_WE) + get_filename_component(ext "${library}" EXT) + string(TOLOWER "${ext}" lowercase_ext) + set(lowercase_ext_symlink_name "${name_we}${lowercase_ext}") + if(NOT library STREQUAL lowercase_ext_symlink_name AND + NOT all_lowercase_symlink_name STREQUAL lowercase_ext_symlink_name) + execute_process(COMMAND "${CMAKE_COMMAND}" + -E create_symlink + "${winsdk_um_lib_dir}/${library}" + "${output_dir}/${lowercase_ext_symlink_name}") + endif() + endforeach() +endfunction() + +set(CMAKE_SYSTEM_NAME Windows) +set(CMAKE_SYSTEM_VERSION 10.0) +set(CMAKE_SYSTEM_PROCESSOR AMD64) + +init_user_prop(HOST_ARCH) +init_user_prop(LLVM_NATIVE_TOOLCHAIN) +init_user_prop(MSVC_BASE) +init_user_prop(WINSDK_BASE) +init_user_prop(WINSDK_VER) + +if(NOT HOST_ARCH) + set(HOST_ARCH x86_64) +endif() +if(HOST_ARCH STREQUAL "aarch64" OR HOST_ARCH STREQUAL "arm64") + set(TRIPLE_ARCH "aarch64") + set(WINSDK_ARCH "arm64") +elseif(HOST_ARCH STREQUAL "armv7" OR HOST_ARCH STREQUAL "arm") + set(TRIPLE_ARCH "armv7") + set(WINSDK_ARCH "arm") +elseif(HOST_ARCH STREQUAL "i686" OR HOST_ARCH STREQUAL "x86") + set(TRIPLE_ARCH "i686") + set(WINSDK_ARCH "x86") +elseif(HOST_ARCH STREQUAL "x86_64" OR HOST_ARCH STREQUAL "x64") + set(TRIPLE_ARCH "x86_64") + set(WINSDK_ARCH "x64") +else() + message(SEND_ERROR "Unknown host architecture ${HOST_ARCH}. Must be aarch64 (or arm64), armv7 (or arm), i686 (or x86), or x86_64 (or x64).") +endif() + +set(MSVC_INCLUDE "${MSVC_BASE}/include") +set(ATLMFC_INCLUDE "${MSVC_BASE}/atlmfc/include") +set(MSVC_LIB "${MSVC_BASE}/lib") +set(ATLMFC_LIB "${MSVC_BASE}/atlmfc/lib") +set(WINSDK_INCLUDE "${WINSDK_BASE}/Include/${WINSDK_VER}") +set(WINSDK_LIB "${WINSDK_BASE}/Lib/${WINSDK_VER}") + +# Do some sanity checking to make sure we can find a native toolchain and +# that the Windows SDK / MSVC STL directories look kosher. +if(NOT EXISTS "${LLVM_NATIVE_TOOLCHAIN}/bin/clang-cl.exe" OR + NOT EXISTS "${LLVM_NATIVE_TOOLCHAIN}/bin/lld-link.exe") + message(SEND_ERROR + "LLVM_NATIVE_TOOLCHAIN folder '${LLVM_NATIVE_TOOLCHAIN}' does not " + "point to a valid directory containing bin/clang-cl.exe and bin/lld-link.exe " + "binaries") +endif() + +if(NOT EXISTS "${MSVC_BASE}" OR + NOT EXISTS "${MSVC_INCLUDE}" OR + NOT EXISTS "${MSVC_LIB}") + message(SEND_ERROR + "CMake variable MSVC_BASE must point to a folder containing MSVC " + "system headers and libraries") +endif() + +if(NOT EXISTS "${WINSDK_BASE}" OR + NOT EXISTS "${WINSDK_INCLUDE}" OR + NOT EXISTS "${WINSDK_LIB}") + message(SEND_ERROR + "CMake variable WINSDK_BASE and WINSDK_VER must resolve to a valid " + "Windows SDK installation") +endif() + +if(NOT EXISTS "${WINSDK_INCLUDE}/um/Windows.h") + message(SEND_ERROR "Cannot find Windows.h") +endif() +if(NOT EXISTS "${WINSDK_INCLUDE}/um/WINDOWS.H") + set(case_sensitive_filesystem TRUE) +endif() + +set(CMAKE_C_COMPILER "${LLVM_NATIVE_TOOLCHAIN}/bin/clang-cl.exe" CACHE FILEPATH "") +set(CMAKE_CXX_COMPILER "${LLVM_NATIVE_TOOLCHAIN}/bin/clang-cl.exe" CACHE FILEPATH "") +set(CMAKE_LINKER "${LLVM_NATIVE_TOOLCHAIN}/bin/lld-link.exe" CACHE FILEPATH "") + +# Even though we're cross-compiling, we need some native tools (e.g. llvm-tblgen), and those +# native tools have to be built before we can start doing the cross-build. LLVM supports +# a CROSS_TOOLCHAIN_FLAGS_NATIVE argument which consists of a list of flags to pass to CMake +# when configuring the NATIVE portion of the cross-build. By default we construct this so +# that it points to the tools in the same location as the native clang-cl that we're using. +list(APPEND _CTF_NATIVE_DEFAULT "-DCMAKE_ASM_COMPILER=${LLVM_NATIVE_TOOLCHAIN}/bin/clang") +list(APPEND _CTF_NATIVE_DEFAULT "-DCMAKE_C_COMPILER=${LLVM_NATIVE_TOOLCHAIN}/bin/clang") +list(APPEND _CTF_NATIVE_DEFAULT "-DCMAKE_CXX_COMPILER=${LLVM_NATIVE_TOOLCHAIN}/bin/clang++") + +set(CROSS_TOOLCHAIN_FLAGS_NATIVE "${_CTF_NATIVE_DEFAULT}" CACHE STRING "") + +set(COMPILE_FLAGS + -D_CRT_SECURE_NO_WARNINGS + --target=${TRIPLE_ARCH}-windows-msvc + -fms-compatibility-version=19.11 + -imsvc "\"${ATLMFC_INCLUDE}\"" + -imsvc "\"${MSVC_INCLUDE}\"" + -imsvc "\"${WINSDK_INCLUDE}/ucrt\"" + -imsvc "\"${WINSDK_INCLUDE}/shared\"" + -imsvc "\"${WINSDK_INCLUDE}/um\"" + -imsvc "\"${WINSDK_INCLUDE}/winrt\"") + +if(case_sensitive_filesystem) + # Ensure all sub-configures use the top-level VFS overlay instead of generating their own. + init_user_prop(winsdk_vfs_overlay_path) + if(NOT winsdk_vfs_overlay_path) + set(winsdk_vfs_overlay_path "${CMAKE_BINARY_DIR}/winsdk_vfs_overlay.yaml") + generate_winsdk_vfs_overlay("${WINSDK_BASE}/Include/${WINSDK_VER}" "${winsdk_vfs_overlay_path}") + init_user_prop(winsdk_vfs_overlay_path) + endif() + list(APPEND COMPILE_FLAGS + -Xclang -ivfsoverlay -Xclang "${winsdk_vfs_overlay_path}") +endif() + +string(REPLACE ";" " " COMPILE_FLAGS "${COMPILE_FLAGS}") + +# We need to preserve any flags that were passed in by the user. However, we +# can't append to CMAKE_C_FLAGS and friends directly, because toolchain files +# will be re-invoked on each reconfigure and therefore need to be idempotent. +# The assignments to the _INITIAL cache variables don't use FORCE, so they'll +# only be populated on the initial configure, and their values won't change +# afterward. +set(_CMAKE_C_FLAGS_INITIAL "${CMAKE_C_FLAGS}" CACHE STRING "") +set(CMAKE_C_FLAGS "${_CMAKE_C_FLAGS_INITIAL} ${COMPILE_FLAGS}" CACHE STRING "" FORCE) + +set(_CMAKE_CXX_FLAGS_INITIAL "${CMAKE_CXX_FLAGS}" CACHE STRING "") +set(CMAKE_CXX_FLAGS "${_CMAKE_CXX_FLAGS_INITIAL} ${COMPILE_FLAGS}" CACHE STRING "" FORCE) + +set(LINK_FLAGS + # Prevent CMake from attempting to invoke mt.exe. It only recognizes the slashed form and not the dashed form. + /manifest:no + + -libpath:"${ATLMFC_LIB}/${WINSDK_ARCH}" + -libpath:"${MSVC_LIB}/${WINSDK_ARCH}" + -libpath:"${WINSDK_LIB}/ucrt/${WINSDK_ARCH}" + -libpath:"${WINSDK_LIB}/um/${WINSDK_ARCH}") + +if(case_sensitive_filesystem) + # Ensure all sub-configures use the top-level symlinks dir instead of generating their own. + init_user_prop(winsdk_lib_symlinks_dir) + if(NOT winsdk_lib_symlinks_dir) + set(winsdk_lib_symlinks_dir "${CMAKE_BINARY_DIR}/winsdk_lib_symlinks") + generate_winsdk_lib_symlinks("${WINSDK_BASE}/Lib/${WINSDK_VER}/um/${WINSDK_ARCH}" "${winsdk_lib_symlinks_dir}") + init_user_prop(winsdk_lib_symlinks_dir) + endif() + list(APPEND LINK_FLAGS + -libpath:"${winsdk_lib_symlinks_dir}") +endif() + +string(REPLACE ";" " " LINK_FLAGS "${LINK_FLAGS}") + +# See explanation for compiler flags above for the _INITIAL variables. +set(_CMAKE_EXE_LINKER_FLAGS_INITIAL "${CMAKE_EXE_LINKER_FLAGS}" CACHE STRING "") +set(CMAKE_EXE_LINKER_FLAGS "${_CMAKE_EXE_LINKER_FLAGS_INITIAL} ${LINK_FLAGS}" CACHE STRING "" FORCE) + +set(_CMAKE_MODULE_LINKER_FLAGS_INITIAL "${CMAKE_MODULE_LINKER_FLAGS}" CACHE STRING "") +set(CMAKE_MODULE_LINKER_FLAGS "${_CMAKE_MODULE_LINKER_FLAGS_INITIAL} ${LINK_FLAGS}" CACHE STRING "" FORCE) + +set(_CMAKE_SHARED_LINKER_FLAGS_INITIAL "${CMAKE_SHARED_LINKER_FLAGS}" CACHE STRING "") +set(CMAKE_SHARED_LINKER_FLAGS "${_CMAKE_SHARED_LINKER_FLAGS_INITIAL} ${LINK_FLAGS}" CACHE STRING "" FORCE) + +# CMake populates these with a bunch of unnecessary libraries, which requires +# extra case-correcting symlinks and what not. Instead, let projects explicitly +# control which libraries they require. +set(CMAKE_C_STANDARD_LIBRARIES "" CACHE STRING "" FORCE) +set(CMAKE_CXX_STANDARD_LIBRARIES "" CACHE STRING "" FORCE) + +# Allow clang-cl to work with macOS paths. +set(CMAKE_USER_MAKE_RULES_OVERRIDE "${CMAKE_CURRENT_LIST_DIR}/ClangClCMakeCompileRules.cmake") diff --git a/cmake/clang-cl-msvc-wsl.cmake b/cmake/clang-cl-msvc-wsl.cmake new file mode 100644 index 00000000..ffe21314 --- /dev/null +++ b/cmake/clang-cl-msvc-wsl.cmake @@ -0,0 +1,327 @@ +# From llvm/cmake/platforms/WinMsvc.cmake +# Modified to use clang-cl on native Windows. + +# Cross toolchain configuration for using clang-cl on non-Windows hosts to +# target MSVC. +# +# Usage: +# cmake -G Ninja +# -DCMAKE_TOOLCHAIN_FILE=/path/to/this/file +# -DHOST_ARCH=[aarch64|arm64|armv7|arm|i686|x86|x86_64|x64] +# -DLLVM_NATIVE_TOOLCHAIN=/path/to/llvm/installation +# -DMSVC_BASE=/path/to/MSVC/system/libraries/and/includes +# -DWINSDK_BASE=/path/to/windows-sdk +# -DWINSDK_VER=windows sdk version folder name +# +# HOST_ARCH: +# The architecture to build for. +# +# LLVM_NATIVE_TOOLCHAIN: +# *Absolute path* to a folder containing the toolchain which will be used to +# build. At a minimum, this folder should have a bin directory with a +# copy of clang-cl, clang, clang++, and lld-link, as well as a lib directory +# containing clang's system resource directory. +# +# MSVC_BASE: +# *Absolute path* to the folder containing MSVC headers and system libraries. +# The layout of the folder matches that which is intalled by MSVC 2017 on +# Windows, and should look like this: +# +# ${MSVC_BASE} +# include +# vector +# stdint.h +# etc... +# lib +# x64 +# libcmt.lib +# msvcrt.lib +# etc... +# x86 +# libcmt.lib +# msvcrt.lib +# etc... +# +# For versions of MSVC < 2017, or where you have a hermetic toolchain in a +# custom format, you must use symlinks or restructure it to look like the above. +# +# WINSDK_BASE: +# Together with WINSDK_VER, determines the location of Windows SDK headers +# and libraries. +# +# WINSDK_VER: +# Together with WINSDK_BASE, determines the locations of Windows SDK headers +# and libraries. +# +# WINSDK_BASE and WINSDK_VER work together to define a folder layout that matches +# that of the Windows SDK installation on a standard Windows machine. It should +# match the layout described below. +# +# Note that if you install Windows SDK to a windows machine and simply copy the +# files, it will already be in the correct layout. +# +# ${WINSDK_BASE} +# Include +# ${WINSDK_VER} +# shared +# ucrt +# um +# windows.h +# etc... +# Lib +# ${WINSDK_VER} +# ucrt +# x64 +# x86 +# ucrt.lib +# etc... +# um +# x64 +# x86 +# kernel32.lib +# etc +# +# IMPORTANT: In order for this to work, you will need a valid copy of the Windows +# SDK and C++ STL headers and libraries on your host. Additionally, since the +# Windows libraries and headers are not case-correct, this toolchain file sets +# up a VFS overlay for the SDK headers and case-correcting symlinks for the +# libraries when running on a case-sensitive filesystem. + + +# When configuring CMake with a toolchain file against a top-level CMakeLists.txt, +# it will actually run CMake many times, once for each small test program used to +# determine what features a compiler supports. Unfortunately, none of these +# invocations share a CMakeCache.txt with the top-level invocation, meaning they +# won't see the value of any arguments the user passed via -D. Since these are +# necessary to properly configure MSVC in both the top-level configuration as well as +# all feature-test invocations, we set environment variables with the values so that +# these environments get inherited by child invocations. We can switch to +# CMAKE_TRY_COMPILE_PLATFORM_VARIABLES once our minimum supported CMake version +# is 3.6 or greater. +function(init_user_prop prop) + if(${prop}) + set(ENV{_${prop}} "${${prop}}") + else() + set(${prop} "$ENV{_${prop}}" PARENT_SCOPE) + endif() +endfunction() + +function(generate_winsdk_vfs_overlay winsdk_include_dir output_path) + set(include_dirs) + file(GLOB_RECURSE entries LIST_DIRECTORIES true "${winsdk_include_dir}/*") + foreach(entry ${entries}) + if(IS_DIRECTORY "${entry}") + list(APPEND include_dirs "${entry}") + endif() + endforeach() + + file(WRITE "${output_path}" "version: 0\n") + file(APPEND "${output_path}" "case-sensitive: false\n") + file(APPEND "${output_path}" "roots:\n") + + foreach(dir ${include_dirs}) + file(GLOB headers RELATIVE "${dir}" "${dir}/*.h") + if(NOT headers) + continue() + endif() + + file(APPEND "${output_path}" " - name: \"${dir}\"\n") + file(APPEND "${output_path}" " type: directory\n") + file(APPEND "${output_path}" " contents:\n") + + foreach(header ${headers}) + file(APPEND "${output_path}" " - name: \"${header}\"\n") + file(APPEND "${output_path}" " type: file\n") + file(APPEND "${output_path}" " external-contents: \"${dir}/${header}\"\n") + endforeach() + endforeach() +endfunction() + +function(generate_winsdk_lib_symlinks winsdk_um_lib_dir output_dir) + execute_process(COMMAND "${CMAKE_COMMAND}" -E make_directory "${output_dir}") + file(GLOB libraries RELATIVE "${winsdk_um_lib_dir}" "${winsdk_um_lib_dir}/*") + foreach(library ${libraries}) + string(TOLOWER "${library}" all_lowercase_symlink_name) + if(NOT library STREQUAL all_lowercase_symlink_name) + execute_process(COMMAND "${CMAKE_COMMAND}" + -E create_symlink + "${winsdk_um_lib_dir}/${library}" + "${output_dir}/${all_lowercase_symlink_name}") + endif() + + get_filename_component(name_we "${library}" NAME_WE) + get_filename_component(ext "${library}" EXT) + string(TOLOWER "${ext}" lowercase_ext) + set(lowercase_ext_symlink_name "${name_we}${lowercase_ext}") + if(NOT library STREQUAL lowercase_ext_symlink_name AND + NOT all_lowercase_symlink_name STREQUAL lowercase_ext_symlink_name) + execute_process(COMMAND "${CMAKE_COMMAND}" + -E create_symlink + "${winsdk_um_lib_dir}/${library}" + "${output_dir}/${lowercase_ext_symlink_name}") + endif() + endforeach() +endfunction() + +set(CMAKE_SYSTEM_NAME Windows) +set(CMAKE_SYSTEM_VERSION 10.0) +set(CMAKE_SYSTEM_PROCESSOR AMD64) + +init_user_prop(HOST_ARCH) +init_user_prop(LLVM_NATIVE_TOOLCHAIN) +init_user_prop(MSVC_BASE) +init_user_prop(WINSDK_BASE) +init_user_prop(WINSDK_VER) + +if(NOT HOST_ARCH) + set(HOST_ARCH x86_64) +endif() +if(HOST_ARCH STREQUAL "aarch64" OR HOST_ARCH STREQUAL "arm64") + set(TRIPLE_ARCH "aarch64") + set(WINSDK_ARCH "arm64") +elseif(HOST_ARCH STREQUAL "armv7" OR HOST_ARCH STREQUAL "arm") + set(TRIPLE_ARCH "armv7") + set(WINSDK_ARCH "arm") +elseif(HOST_ARCH STREQUAL "i686" OR HOST_ARCH STREQUAL "x86") + set(TRIPLE_ARCH "i686") + set(WINSDK_ARCH "x86") +elseif(HOST_ARCH STREQUAL "x86_64" OR HOST_ARCH STREQUAL "x64") + set(TRIPLE_ARCH "x86_64") + set(WINSDK_ARCH "x64") +else() + message(SEND_ERROR "Unknown host architecture ${HOST_ARCH}. Must be aarch64 (or arm64), armv7 (or arm), i686 (or x86), or x86_64 (or x64).") +endif() + +set(MSVC_INCLUDE "${MSVC_BASE}/include") +set(ATLMFC_INCLUDE "${MSVC_BASE}/atlmfc/include") +set(MSVC_LIB "${MSVC_BASE}/lib") +set(ATLMFC_LIB "${MSVC_BASE}/atlmfc/lib") +set(WINSDK_INCLUDE "${WINSDK_BASE}/Include/${WINSDK_VER}") +set(WINSDK_LIB "${WINSDK_BASE}/Lib/${WINSDK_VER}") + +# Do some sanity checking to make sure we can find a native toolchain and +# that the Windows SDK / MSVC STL directories look kosher. +if(NOT EXISTS "${LLVM_NATIVE_TOOLCHAIN}/bin/clang-cl" OR + NOT EXISTS "${LLVM_NATIVE_TOOLCHAIN}/bin/lld-link") + message(SEND_ERROR + "LLVM_NATIVE_TOOLCHAIN folder '${LLVM_NATIVE_TOOLCHAIN}' does not " + "point to a valid directory containing bin/clang-cl and bin/lld-link " + "binaries") +endif() + +if(NOT EXISTS "${MSVC_BASE}" OR + NOT EXISTS "${MSVC_INCLUDE}" OR + NOT EXISTS "${MSVC_LIB}") + message(SEND_ERROR + "CMake variable MSVC_BASE must point to a folder containing MSVC " + "system headers and libraries") +endif() + +if(NOT EXISTS "${WINSDK_BASE}" OR + NOT EXISTS "${WINSDK_INCLUDE}" OR + NOT EXISTS "${WINSDK_LIB}") + message(SEND_ERROR + "CMake variable WINSDK_BASE and WINSDK_VER must resolve to a valid " + "Windows SDK installation") +endif() + +if(NOT EXISTS "${WINSDK_INCLUDE}/um/Windows.h") + message(SEND_ERROR "Cannot find Windows.h") +endif() +if(NOT EXISTS "${WINSDK_INCLUDE}/um/WINDOWS.H") + set(case_sensitive_filesystem TRUE) +endif() + +set(CMAKE_C_COMPILER "${LLVM_NATIVE_TOOLCHAIN}/bin/clang-cl" CACHE FILEPATH "") +set(CMAKE_CXX_COMPILER "${LLVM_NATIVE_TOOLCHAIN}/bin/clang-cl" CACHE FILEPATH "") +set(CMAKE_LINKER "${LLVM_NATIVE_TOOLCHAIN}/bin/lld-link" CACHE FILEPATH "") + +# Even though we're cross-compiling, we need some native tools (e.g. llvm-tblgen), and those +# native tools have to be built before we can start doing the cross-build. LLVM supports +# a CROSS_TOOLCHAIN_FLAGS_NATIVE argument which consists of a list of flags to pass to CMake +# when configuring the NATIVE portion of the cross-build. By default we construct this so +# that it points to the tools in the same location as the native clang-cl that we're using. +list(APPEND _CTF_NATIVE_DEFAULT "-DCMAKE_ASM_COMPILER=${LLVM_NATIVE_TOOLCHAIN}/bin/clang") +list(APPEND _CTF_NATIVE_DEFAULT "-DCMAKE_C_COMPILER=${LLVM_NATIVE_TOOLCHAIN}/bin/clang") +list(APPEND _CTF_NATIVE_DEFAULT "-DCMAKE_CXX_COMPILER=${LLVM_NATIVE_TOOLCHAIN}/bin/clang++") + +set(CROSS_TOOLCHAIN_FLAGS_NATIVE "${_CTF_NATIVE_DEFAULT}" CACHE STRING "") + +set(COMPILE_FLAGS + -D_CRT_SECURE_NO_WARNINGS + --target=${TRIPLE_ARCH}-windows-msvc + -fms-compatibility-version=19.11 + -imsvc "\"${ATLMFC_INCLUDE}\"" + -imsvc "\"${MSVC_INCLUDE}\"" + -imsvc "\"${WINSDK_INCLUDE}/ucrt\"" + -imsvc "\"${WINSDK_INCLUDE}/shared\"" + -imsvc "\"${WINSDK_INCLUDE}/um\"" + -imsvc "\"${WINSDK_INCLUDE}/winrt\"") + +if(case_sensitive_filesystem) + # Ensure all sub-configures use the top-level VFS overlay instead of generating their own. + init_user_prop(winsdk_vfs_overlay_path) + if(NOT winsdk_vfs_overlay_path) + set(winsdk_vfs_overlay_path "${CMAKE_BINARY_DIR}/winsdk_vfs_overlay.yaml") + generate_winsdk_vfs_overlay("${WINSDK_BASE}/Include/${WINSDK_VER}" "${winsdk_vfs_overlay_path}") + init_user_prop(winsdk_vfs_overlay_path) + endif() + list(APPEND COMPILE_FLAGS + -Xclang -ivfsoverlay -Xclang "${winsdk_vfs_overlay_path}") +endif() + +string(REPLACE ";" " " COMPILE_FLAGS "${COMPILE_FLAGS}") + +# We need to preserve any flags that were passed in by the user. However, we +# can't append to CMAKE_C_FLAGS and friends directly, because toolchain files +# will be re-invoked on each reconfigure and therefore need to be idempotent. +# The assignments to the _INITIAL cache variables don't use FORCE, so they'll +# only be populated on the initial configure, and their values won't change +# afterward. +set(_CMAKE_C_FLAGS_INITIAL "${CMAKE_C_FLAGS}" CACHE STRING "") +set(CMAKE_C_FLAGS "${_CMAKE_C_FLAGS_INITIAL} ${COMPILE_FLAGS}" CACHE STRING "" FORCE) + +set(_CMAKE_CXX_FLAGS_INITIAL "${CMAKE_CXX_FLAGS}" CACHE STRING "") +set(CMAKE_CXX_FLAGS "${_CMAKE_CXX_FLAGS_INITIAL} ${COMPILE_FLAGS}" CACHE STRING "" FORCE) + +set(LINK_FLAGS + # Prevent CMake from attempting to invoke mt.exe. It only recognizes the slashed form and not the dashed form. + /manifest:no + + -libpath:"${ATLMFC_LIB}/${WINSDK_ARCH}" + -libpath:"${MSVC_LIB}/${WINSDK_ARCH}" + -libpath:"${WINSDK_LIB}/ucrt/${WINSDK_ARCH}" + -libpath:"${WINSDK_LIB}/um/${WINSDK_ARCH}") + +if(case_sensitive_filesystem) + # Ensure all sub-configures use the top-level symlinks dir instead of generating their own. + init_user_prop(winsdk_lib_symlinks_dir) + if(NOT winsdk_lib_symlinks_dir) + set(winsdk_lib_symlinks_dir "${CMAKE_BINARY_DIR}/winsdk_lib_symlinks") + generate_winsdk_lib_symlinks("${WINSDK_BASE}/Lib/${WINSDK_VER}/um/${WINSDK_ARCH}" "${winsdk_lib_symlinks_dir}") + init_user_prop(winsdk_lib_symlinks_dir) + endif() + list(APPEND LINK_FLAGS + -libpath:"${winsdk_lib_symlinks_dir}") +endif() + +string(REPLACE ";" " " LINK_FLAGS "${LINK_FLAGS}") + +# See explanation for compiler flags above for the _INITIAL variables. +set(_CMAKE_EXE_LINKER_FLAGS_INITIAL "${CMAKE_EXE_LINKER_FLAGS}" CACHE STRING "") +set(CMAKE_EXE_LINKER_FLAGS "${_CMAKE_EXE_LINKER_FLAGS_INITIAL} ${LINK_FLAGS}" CACHE STRING "" FORCE) + +set(_CMAKE_MODULE_LINKER_FLAGS_INITIAL "${CMAKE_MODULE_LINKER_FLAGS}" CACHE STRING "") +set(CMAKE_MODULE_LINKER_FLAGS "${_CMAKE_MODULE_LINKER_FLAGS_INITIAL} ${LINK_FLAGS}" CACHE STRING "" FORCE) + +set(_CMAKE_SHARED_LINKER_FLAGS_INITIAL "${CMAKE_SHARED_LINKER_FLAGS}" CACHE STRING "") +set(CMAKE_SHARED_LINKER_FLAGS "${_CMAKE_SHARED_LINKER_FLAGS_INITIAL} ${LINK_FLAGS}" CACHE STRING "" FORCE) + +# CMake populates these with a bunch of unnecessary libraries, which requires +# extra case-correcting symlinks and what not. Instead, let projects explicitly +# control which libraries they require. +set(CMAKE_C_STANDARD_LIBRARIES "" CACHE STRING "" FORCE) +set(CMAKE_CXX_STANDARD_LIBRARIES "" CACHE STRING "" FORCE) + +# Allow clang-cl to work with macOS paths. +set(CMAKE_USER_MAKE_RULES_OVERRIDE "${CMAKE_CURRENT_LIST_DIR}/ClangClCMakeCompileRules.cmake") diff --git a/cmake/linux_i386.toolchain.cmake b/cmake/linux_i386.toolchain.cmake new file mode 100644 index 00000000..9c4a5094 --- /dev/null +++ b/cmake/linux_i386.toolchain.cmake @@ -0,0 +1,17 @@ +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_PROCESSOR "i386") +set(CMAKE_C_COMPILER_TARGET i386-linux-gnu) + +# Assume debian/ubuntu +#set(CMAKE_FIND_ROOT_PATH /usr/lib/i386-linux-gnu/) + +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32") + +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) + +# https://stackoverflow.com/questions/41557927/using-usr-lib-i386-linux-gnu-instead-of-usr-lib-x86-64-linux-gnu-to-find-libra +set(FIND_LIBRARY_USE_LIB64_PATHS OFF) diff --git a/cmake/llvm-mingw-cross.cmake b/cmake/llvm-mingw-cross.cmake new file mode 100644 index 00000000..f7e1759c --- /dev/null +++ b/cmake/llvm-mingw-cross.cmake @@ -0,0 +1,24 @@ +SET(CMAKE_SYSTEM_NAME Windows) + +IF (DEFINED ENV{LLVM_MINGW_DIR}) + SET(LLVM_MINGW_ROOT "$ENV{LLVM_MINGW_DIR}") +ELSE () + SET(LLVM_MINGW_ROOT "/mnt/data/local/llvm-mingw-20200325-ubuntu-18.04") +ENDIF() + + +SET(CMAKE_C_COMPILER ${LLVM_MINGW_ROOT}/bin/x86_64-w64-mingw32-clang) +SET(CMAKE_CXX_COMPILER ${LLVM_MINGW_ROOT}/bin/x86_64-w64-mingw32-clang++) +SET(CMAKE_RC_COMPILER ${LLVM_MINGW_ROOT}/bin/x86_64-w64-mingw32-windres) + +#SET(CMAKE_C_LINK_EXECUTABLE x86_64-w64-mingw32-gcc) +#SET(CMAKE_CXX_LINK_EXECUTABLE x86_64-w64-mingw32-g++) + +SET(CMAKE_FIND_ROOT_PATH ${LLVM_MINGW_ROOT}/x86_64-w64-mingw32) + +# We may need some advanced thread APIs to compile, so enable 0x601(Win7) if required. +# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_WIN32_WINNT=0x601") + +SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) diff --git a/cmake/llvm-mingw-win64.cmake b/cmake/llvm-mingw-win64.cmake new file mode 100644 index 00000000..439b5240 --- /dev/null +++ b/cmake/llvm-mingw-win64.cmake @@ -0,0 +1,20 @@ +SET(CMAKE_SYSTEM_NAME Windows) + +IF (DEFINED ENV{LLVM_MINGW_DIR}) + SET(LLVM_MINGW_ROOT "$ENV{LLVM_MINGW_DIR}") +ELSE () + SET(LLVM_MINGW_ROOT "C:/ProgramData/llvm-mingw") +ENDIF() + +SET(CMAKE_C_COMPILER ${LLVM_MINGW_ROOT}/bin/x86_64-w64-mingw32-clang.exe) +SET(CMAKE_CXX_COMPILER ${LLVM_MINGW_ROOT}/bin/x86_64-w64-mingw32-clang++.exe) +SET(CMAKE_RC_COMPILER ${LLVM_MINGW_ROOT}/bin/x86_64-w64-mingw32-windres.exe) + +SET(CMAKE_FIND_ROOT_PATH ${LLVM_MINGW_ROOT}/x86_64-w64-mingw32) + +# We may need some advanced thread APIs to compile tinyusz. use 0x601(Win7) if required +# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_WIN32_WINNT=0x601") + +SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) diff --git a/cmake/mingw64-cross.cmake b/cmake/mingw64-cross.cmake new file mode 100644 index 00000000..479cdd4d --- /dev/null +++ b/cmake/mingw64-cross.cmake @@ -0,0 +1,20 @@ +SET(CMAKE_SYSTEM_NAME Windows) + +IF (DEFINED ENV{MINGW_GCC_DIR}) + SET(MINGW_GCC_ROOT "$ENV{MINGW_GCC_DIR}") +ELSE () + # Assume mingw cross compiler is installed in your system + SET(MINGW_GCC_ROOT "/usr") +ENDIF() + +# win32 may fail to compile with C++11 threads. + +SET(CMAKE_C_COMPILER ${MINGW_GCC_ROOT}/bin/x86_64-w64-mingw32-gcc-posix) +SET(CMAKE_CXX_COMPILER ${MINGW_GCC_ROOT}/bin/x86_64-w64-mingw32-g++-posix) +SET(CMAKE_RC_COMPILER ${MINGW_GCC_ROOT}/bin/x86_64-w64-mingw32-windres) + +SET(CMAKE_FIND_ROOT_PATH ${MINGW_GCC_ROOT}/x86_64-w64-mingw32) + +SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) diff --git a/cmake/sanitizers/FindASan.cmake b/cmake/sanitizers/FindASan.cmake new file mode 100644 index 00000000..98ea7cb3 --- /dev/null +++ b/cmake/sanitizers/FindASan.cmake @@ -0,0 +1,59 @@ +# The MIT License (MIT) +# +# Copyright (c) +# 2013 Matthew Arsenault +# 2015-2016 RWTH Aachen University, Federal Republic of Germany +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +option(SANITIZE_ADDRESS "Enable AddressSanitizer for sanitized targets." Off) + +set(FLAG_CANDIDATES + # Clang 3.2+ use this version. The no-omit-frame-pointer option is optional. + "-g -fsanitize=address -fno-omit-frame-pointer" + "-g -fsanitize=address" + + # Older deprecated flag for ASan + "-g -faddress-sanitizer" +) + + +if (SANITIZE_ADDRESS AND (SANITIZE_THREAD OR SANITIZE_MEMORY)) + message(FATAL_ERROR "AddressSanitizer is not compatible with " + "ThreadSanitizer or MemorySanitizer.") +endif () + + +include(sanitize-helpers) + +if (SANITIZE_ADDRESS) + sanitizer_check_compiler_flags("${FLAG_CANDIDATES}" "AddressSanitizer" + "ASan") + + find_program(ASan_WRAPPER "asan-wrapper" PATHS ${CMAKE_MODULE_PATH}) + mark_as_advanced(ASan_WRAPPER) +endif () + +function (add_sanitize_address TARGET) + if (NOT SANITIZE_ADDRESS) + return() + endif () + + sanitizer_add_flags(${TARGET} "AddressSanitizer" "ASan") +endfunction () diff --git a/cmake/sanitizers/FindMSan.cmake b/cmake/sanitizers/FindMSan.cmake new file mode 100644 index 00000000..22d0050e --- /dev/null +++ b/cmake/sanitizers/FindMSan.cmake @@ -0,0 +1,57 @@ +# The MIT License (MIT) +# +# Copyright (c) +# 2013 Matthew Arsenault +# 2015-2016 RWTH Aachen University, Federal Republic of Germany +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +option(SANITIZE_MEMORY "Enable MemorySanitizer for sanitized targets." Off) + +set(FLAG_CANDIDATES + "-g -fsanitize=memory" +) + + +include(sanitize-helpers) + +if (SANITIZE_MEMORY) + if (NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + message(WARNING "MemorySanitizer disabled for target ${TARGET} because " + "MemorySanitizer is supported for Linux systems only.") + set(SANITIZE_MEMORY Off CACHE BOOL + "Enable MemorySanitizer for sanitized targets." FORCE) + elseif (NOT ${CMAKE_SIZEOF_VOID_P} EQUAL 8) + message(WARNING "MemorySanitizer disabled for target ${TARGET} because " + "MemorySanitizer is supported for 64bit systems only.") + set(SANITIZE_MEMORY Off CACHE BOOL + "Enable MemorySanitizer for sanitized targets." FORCE) + else () + sanitizer_check_compiler_flags("${FLAG_CANDIDATES}" "MemorySanitizer" + "MSan") + endif () +endif () + +function (add_sanitize_memory TARGET) + if (NOT SANITIZE_MEMORY) + return() + endif () + + sanitizer_add_flags(${TARGET} "MemorySanitizer" "MSan") +endfunction () diff --git a/cmake/sanitizers/FindSanitizers.cmake b/cmake/sanitizers/FindSanitizers.cmake new file mode 100755 index 00000000..101bab84 --- /dev/null +++ b/cmake/sanitizers/FindSanitizers.cmake @@ -0,0 +1,94 @@ +# The MIT License (MIT) +# +# Copyright (c) +# 2013 Matthew Arsenault +# 2015-2016 RWTH Aachen University, Federal Republic of Germany +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# If any of the used compiler is a GNU compiler, add a second option to static +# link against the sanitizers. +option(SANITIZE_LINK_STATIC "Try to link static against sanitizers." Off) + + + + +set(FIND_QUIETLY_FLAG "") +if (DEFINED Sanitizers_FIND_QUIETLY) + set(FIND_QUIETLY_FLAG "QUIET") +endif () + +find_package(ASan ${FIND_QUIETLY_FLAG}) +find_package(TSan ${FIND_QUIETLY_FLAG}) +find_package(MSan ${FIND_QUIETLY_FLAG}) +find_package(UBSan ${FIND_QUIETLY_FLAG}) + + + + +function(sanitizer_add_blacklist_file FILE) + if(NOT IS_ABSOLUTE ${FILE}) + set(FILE "${CMAKE_CURRENT_SOURCE_DIR}/${FILE}") + endif() + get_filename_component(FILE "${FILE}" REALPATH) + + sanitizer_check_compiler_flags("-fsanitize-blacklist=${FILE}" + "SanitizerBlacklist" "SanBlist") +endfunction() + +function(add_sanitizers ...) + # If no sanitizer is enabled, return immediately. + if (NOT (SANITIZE_ADDRESS OR SANITIZE_MEMORY OR SANITIZE_THREAD OR + SANITIZE_UNDEFINED)) + return() + endif () + + foreach (TARGET ${ARGV}) + # Check if this target will be compiled by exactly one compiler. Other- + # wise sanitizers can't be used and a warning should be printed once. + get_target_property(TARGET_TYPE ${TARGET} TYPE) + if (TARGET_TYPE STREQUAL "INTERFACE_LIBRARY") + message(WARNING "Can't use any sanitizers for target ${TARGET}, " + "because it is an interface library and cannot be " + "compiled directly.") + return() + endif () + sanitizer_target_compilers(${TARGET} TARGET_COMPILER) + list(LENGTH TARGET_COMPILER NUM_COMPILERS) + if (NUM_COMPILERS GREATER 1) + message(WARNING "Can't use any sanitizers for target ${TARGET}, " + "because it will be compiled by incompatible compilers. " + "Target will be compiled without sanitizers.") + return() + + # If the target is compiled by no or no known compiler, give a warning. + elseif (NUM_COMPILERS EQUAL 0) + message(WARNING "Sanitizers for target ${TARGET} may not be" + " usable, because it uses no or an unknown compiler. " + "This is a false warning for targets using only " + "object lib(s) as input.") + endif () + + # Add sanitizers for target. + add_sanitize_address(${TARGET}) + add_sanitize_thread(${TARGET}) + add_sanitize_memory(${TARGET}) + add_sanitize_undefined(${TARGET}) + endforeach () +endfunction(add_sanitizers) diff --git a/cmake/sanitizers/FindTSan.cmake b/cmake/sanitizers/FindTSan.cmake new file mode 100644 index 00000000..3cba3c03 --- /dev/null +++ b/cmake/sanitizers/FindTSan.cmake @@ -0,0 +1,65 @@ +# The MIT License (MIT) +# +# Copyright (c) +# 2013 Matthew Arsenault +# 2015-2016 RWTH Aachen University, Federal Republic of Germany +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +option(SANITIZE_THREAD "Enable ThreadSanitizer for sanitized targets." Off) + +set(FLAG_CANDIDATES + "-g -fsanitize=thread" +) + + +# ThreadSanitizer is not compatible with MemorySanitizer. +if (SANITIZE_THREAD AND SANITIZE_MEMORY) + message(FATAL_ERROR "ThreadSanitizer is not compatible with " + "MemorySanitizer.") +endif () + + +include(sanitize-helpers) + +if (SANITIZE_THREAD) + if (NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND + NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Darwin") + message(WARNING "ThreadSanitizer disabled for target ${TARGET} because " + "ThreadSanitizer is supported for Linux systems and macOS only.") + set(SANITIZE_THREAD Off CACHE BOOL + "Enable ThreadSanitizer for sanitized targets." FORCE) + elseif (NOT ${CMAKE_SIZEOF_VOID_P} EQUAL 8) + message(WARNING "ThreadSanitizer disabled for target ${TARGET} because " + "ThreadSanitizer is supported for 64bit systems only.") + set(SANITIZE_THREAD Off CACHE BOOL + "Enable ThreadSanitizer for sanitized targets." FORCE) + else () + sanitizer_check_compiler_flags("${FLAG_CANDIDATES}" "ThreadSanitizer" + "TSan") + endif () +endif () + +function (add_sanitize_thread TARGET) + if (NOT SANITIZE_THREAD) + return() + endif () + + sanitizer_add_flags(${TARGET} "ThreadSanitizer" "TSan") +endfunction () diff --git a/cmake/sanitizers/FindUBSan.cmake b/cmake/sanitizers/FindUBSan.cmake new file mode 100644 index 00000000..ae103f71 --- /dev/null +++ b/cmake/sanitizers/FindUBSan.cmake @@ -0,0 +1,46 @@ +# The MIT License (MIT) +# +# Copyright (c) +# 2013 Matthew Arsenault +# 2015-2016 RWTH Aachen University, Federal Republic of Germany +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +option(SANITIZE_UNDEFINED + "Enable UndefinedBehaviorSanitizer for sanitized targets." Off) + +set(FLAG_CANDIDATES + "-g -fsanitize=undefined" +) + + +include(sanitize-helpers) + +if (SANITIZE_UNDEFINED) + sanitizer_check_compiler_flags("${FLAG_CANDIDATES}" + "UndefinedBehaviorSanitizer" "UBSan") +endif () + +function (add_sanitize_undefined TARGET) + if (NOT SANITIZE_UNDEFINED) + return() + endif () + + sanitizer_add_flags(${TARGET} "UndefinedBehaviorSanitizer" "UBSan") +endfunction () diff --git a/cmake/sanitizers/asan-wrapper b/cmake/sanitizers/asan-wrapper new file mode 100755 index 00000000..5d541033 --- /dev/null +++ b/cmake/sanitizers/asan-wrapper @@ -0,0 +1,55 @@ +#!/bin/sh + +# The MIT License (MIT) +# +# Copyright (c) +# 2013 Matthew Arsenault +# 2015-2016 RWTH Aachen University, Federal Republic of Germany +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# This script is a wrapper for AddressSanitizer. In some special cases you need +# to preload AddressSanitizer to avoid error messages - e.g. if you're +# preloading another library to your application. At the moment this script will +# only do something, if we're running on a Linux platform. OSX might not be +# affected. + + +# Exit immediately, if platform is not Linux. +if [ "$(uname)" != "Linux" ] +then + exec $@ +fi + + +# Get the used libasan of the application ($1). If a libasan was found, it will +# be prepended to LD_PRELOAD. +libasan=$(ldd $1 | grep libasan | sed "s/^[[:space:]]//" | cut -d' ' -f1) +if [ -n "$libasan" ] +then + if [ -n "$LD_PRELOAD" ] + then + export LD_PRELOAD="$libasan:$LD_PRELOAD" + else + export LD_PRELOAD="$libasan" + fi +fi + +# Execute the application. +exec $@ diff --git a/cmake/sanitizers/sanitize-helpers.cmake b/cmake/sanitizers/sanitize-helpers.cmake new file mode 100755 index 00000000..3649b074 --- /dev/null +++ b/cmake/sanitizers/sanitize-helpers.cmake @@ -0,0 +1,177 @@ +# The MIT License (MIT) +# +# Copyright (c) +# 2013 Matthew Arsenault +# 2015-2016 RWTH Aachen University, Federal Republic of Germany +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# Helper function to get the language of a source file. +function (sanitizer_lang_of_source FILE RETURN_VAR) + get_filename_component(LONGEST_EXT "${FILE}" EXT) + # If extension is empty return. This can happen for extensionless headers + if("${LONGEST_EXT}" STREQUAL "") + set(${RETURN_VAR} "" PARENT_SCOPE) + return() + endif() + # Get shortest extension as some files can have dot in their names + string(REGEX REPLACE "^.*(\\.[^.]+)$" "\\1" FILE_EXT ${LONGEST_EXT}) + string(TOLOWER "${FILE_EXT}" FILE_EXT) + string(SUBSTRING "${FILE_EXT}" 1 -1 FILE_EXT) + + get_property(ENABLED_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES) + foreach (LANG ${ENABLED_LANGUAGES}) + list(FIND CMAKE_${LANG}_SOURCE_FILE_EXTENSIONS "${FILE_EXT}" TEMP) + if (NOT ${TEMP} EQUAL -1) + set(${RETURN_VAR} "${LANG}" PARENT_SCOPE) + return() + endif () + endforeach() + + set(${RETURN_VAR} "" PARENT_SCOPE) +endfunction () + + +# Helper function to get compilers used by a target. +function (sanitizer_target_compilers TARGET RETURN_VAR) + # Check if all sources for target use the same compiler. If a target uses + # e.g. C and Fortran mixed and uses different compilers (e.g. clang and + # gfortran) this can trigger huge problems, because different compilers may + # use different implementations for sanitizers. + set(BUFFER "") + get_target_property(TSOURCES ${TARGET} SOURCES) + foreach (FILE ${TSOURCES}) + # If expression was found, FILE is a generator-expression for an object + # library. Object libraries will be ignored. + string(REGEX MATCH "TARGET_OBJECTS:([^ >]+)" _file ${FILE}) + if ("${_file}" STREQUAL "") + sanitizer_lang_of_source(${FILE} LANG) + if (LANG) + list(APPEND BUFFER ${CMAKE_${LANG}_COMPILER_ID}) + endif () + endif () + endforeach () + + list(REMOVE_DUPLICATES BUFFER) + set(${RETURN_VAR} "${BUFFER}" PARENT_SCOPE) +endfunction () + + +# Helper function to check compiler flags for language compiler. +function (sanitizer_check_compiler_flag FLAG LANG VARIABLE) + if (${LANG} STREQUAL "C") + include(CheckCCompilerFlag) + check_c_compiler_flag("${FLAG}" ${VARIABLE}) + + elseif (${LANG} STREQUAL "CXX") + include(CheckCXXCompilerFlag) + check_cxx_compiler_flag("${FLAG}" ${VARIABLE}) + + elseif (${LANG} STREQUAL "Fortran") + # CheckFortranCompilerFlag was introduced in CMake 3.x. To be compatible + # with older Cmake versions, we will check if this module is present + # before we use it. Otherwise we will define Fortran coverage support as + # not available. + include(CheckFortranCompilerFlag OPTIONAL RESULT_VARIABLE INCLUDED) + if (INCLUDED) + check_fortran_compiler_flag("${FLAG}" ${VARIABLE}) + elseif (NOT CMAKE_REQUIRED_QUIET) + message(STATUS "Performing Test ${VARIABLE}") + message(STATUS "Performing Test ${VARIABLE}" + " - Failed (Check not supported)") + endif () + endif() +endfunction () + + +# Helper function to test compiler flags. +function (sanitizer_check_compiler_flags FLAG_CANDIDATES NAME PREFIX) + set(CMAKE_REQUIRED_QUIET ${${PREFIX}_FIND_QUIETLY}) + + get_property(ENABLED_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES) + foreach (LANG ${ENABLED_LANGUAGES}) + # Sanitizer flags are not dependend on language, but the used compiler. + # So instead of searching flags foreach language, search flags foreach + # compiler used. + set(COMPILER ${CMAKE_${LANG}_COMPILER_ID}) + if (NOT DEFINED ${PREFIX}_${COMPILER}_FLAGS) + foreach (FLAG ${FLAG_CANDIDATES}) + if(NOT CMAKE_REQUIRED_QUIET) + message(STATUS "Try ${COMPILER} ${NAME} flag = [${FLAG}]") + endif() + + set(CMAKE_REQUIRED_FLAGS "${FLAG}") + unset(${PREFIX}_FLAG_DETECTED CACHE) + sanitizer_check_compiler_flag("${FLAG}" ${LANG} + ${PREFIX}_FLAG_DETECTED) + + if (${PREFIX}_FLAG_DETECTED) + # If compiler is a GNU compiler, search for static flag, if + # SANITIZE_LINK_STATIC is enabled. + if (SANITIZE_LINK_STATIC AND (${COMPILER} STREQUAL "GNU")) + string(TOLOWER ${PREFIX} PREFIX_lower) + sanitizer_check_compiler_flag( + "-static-lib${PREFIX_lower}" ${LANG} + ${PREFIX}_STATIC_FLAG_DETECTED) + + if (${PREFIX}_STATIC_FLAG_DETECTED) + set(FLAG "-static-lib${PREFIX_lower} ${FLAG}") + endif () + endif () + + set(${PREFIX}_${COMPILER}_FLAGS "${FLAG}" CACHE STRING + "${NAME} flags for ${COMPILER} compiler.") + mark_as_advanced(${PREFIX}_${COMPILER}_FLAGS) + break() + endif () + endforeach () + + if (NOT ${PREFIX}_FLAG_DETECTED) + set(${PREFIX}_${COMPILER}_FLAGS "" CACHE STRING + "${NAME} flags for ${COMPILER} compiler.") + mark_as_advanced(${PREFIX}_${COMPILER}_FLAGS) + + message(WARNING "${NAME} is not available for ${COMPILER} " + "compiler. Targets using this compiler will be " + "compiled without ${NAME}.") + endif () + endif () + endforeach () +endfunction () + + +# Helper to assign sanitizer flags for TARGET. +function (sanitizer_add_flags TARGET NAME PREFIX) + # Get list of compilers used by target and check, if sanitizer is available + # for this target. Other compiler checks like check for conflicting + # compilers will be done in add_sanitizers function. + sanitizer_target_compilers(${TARGET} TARGET_COMPILER) + list(LENGTH TARGET_COMPILER NUM_COMPILERS) + if ("${${PREFIX}_${TARGET_COMPILER}_FLAGS}" STREQUAL "") + return() + endif() + + # Set compile- and link-flags for target. + set_property(TARGET ${TARGET} APPEND_STRING + PROPERTY COMPILE_FLAGS " ${${PREFIX}_${TARGET_COMPILER}_FLAGS}") + set_property(TARGET ${TARGET} APPEND_STRING + PROPERTY COMPILE_FLAGS " ${SanBlist_${TARGET_COMPILER}_FLAGS}") + set_property(TARGET ${TARGET} APPEND_STRING + PROPERTY LINK_FLAGS " ${${PREFIX}_${TARGET_COMPILER}_FLAGS}") +endfunction () From 6dd187d4856a2f4baf9d50d70298499b0b5ffa0a Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 26 Jan 2024 07:56:54 +0900 Subject: [PATCH 124/139] format with black. --- python/sample.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/python/sample.py b/python/sample.py index 7c57bd14..4b80dba6 100644 --- a/python/sample.py +++ b/python/sample.py @@ -60,13 +60,13 @@ # # Or please consider using numpy_*** interface(e.g. numpy_vertices()) -for (i, v) in enumerate(attrib.vertices): +for i, v in enumerate(attrib.vertices): print("v[{}] = {}".format(i, v)) -for (i, v) in enumerate(attrib.normals): +for i, v in enumerate(attrib.normals): print("vn[{}] = {}".format(i, v)) -for (i, v) in enumerate(attrib.texcoords): +for i, v in enumerate(attrib.texcoords): print("vt[{}] = {}".format(i, t)) if is_numpy_available: @@ -95,7 +95,7 @@ for shape in shapes: print(shape.name) print("len(num_indices) = {}".format(len(shape.mesh.indices))) - for (i, idx) in enumerate(shape.mesh.indices): + for i, idx in enumerate(shape.mesh.indices): print("[{}] v_idx {}".format(i, idx.vertex_index)) print("[{}] vn_idx {}".format(i, idx.normal_index)) print("[{}] vt_idx {}".format(i, idx.texcoord_index)) @@ -103,7 +103,5 @@ if is_numpy_available: print("numpy_indices = {}".format(shape.mesh.numpy_indices())) - print( - "numpy_num_face_vertices = {}".format(shape.mesh.numpy_num_face_vertices()) - ) + print("numpy_num_face_vertices = {}".format(shape.mesh.numpy_num_face_vertices())) print("numpy_material_ids = {}".format(shape.mesh.numpy_material_ids())) From 6ac677b958a146b956635c95b40ac05d2502f564 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 26 Jan 2024 08:04:33 +0900 Subject: [PATCH 125/139] Remove travis ci badge. Add pypi badge. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5e3ee798..9a3a8896 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # tinyobjloader -[![Build Status](https://travis-ci.org/tinyobjloader/tinyobjloader.svg?branch=master)](https://travis-ci.org/tinyobjloader/tinyobjloader) +[![PyPI version](https://badge.fury.io/py/tinyobjloader.svg)](https://badge.fury.io/py/tinyobjloader) [![AZ Build Status](https://dev.azure.com/tinyobjloader/tinyobjloader/_apis/build/status/tinyobjloader.tinyobjloader?branchName=master)](https://dev.azure.com/tinyobjloader/tinyobjloader/_build/latest?definitionId=1&branchName=master) From b2f045736b6322593e6e805155f871db5b0e21fd Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Fri, 26 Jan 2024 08:13:36 +0900 Subject: [PATCH 126/139] v2.0.0rc11 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 13a36ea4..b46c7ce5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,7 +4,7 @@ cmake_minimum_required(VERSION 3.16) project(tinyobjloader CXX) set(TINYOBJLOADER_SOVERSION 2) -set(TINYOBJLOADER_VERSION 2.0.0-rc.10) +set(TINYOBJLOADER_VERSION 2.0.0-rc.11) set(PY_TARGET "aaa") #optional double precision support From 7b3ba0be7146b0414463203c31bdf6a9376accb2 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 27 Jan 2024 06:57:18 +0900 Subject: [PATCH 127/139] Fix parsing [w] component in `v` line. Improve python binding. - Expose more attrib to Python. --- CMakeLists.txt | 2 +- models/cube-vertex-w-component.obj | 31 +++++ python/bindings.cc | 97 +++++++++++++++- python/sample.py | 33 +++++- tiny_obj_loader.h | 181 ++++++++++++++++++----------- 5 files changed, 266 insertions(+), 78 deletions(-) create mode 100644 models/cube-vertex-w-component.obj diff --git a/CMakeLists.txt b/CMakeLists.txt index b46c7ce5..9d9d0efe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -125,7 +125,7 @@ endif() if (TINYOBJLOADER_WITH_PYTHON) # pybind11 method: - pybind11_add_module(${PY_TARGET} ${CMAKE_SOURCE_DIR}/python/bindings.cc ) + pybind11_add_module(${PY_TARGET} ${CMAKE_SOURCE_DIR}/python/bindings.cc ${CMAKE_SOURCE_DIR}/python/tiny_obj_loader.cc) add_sanitizers(${PY_TARGET}) set_target_properties(${PY_TARGET} PROPERTIES OUTPUT_NAME "tinyobjloader") diff --git a/models/cube-vertex-w-component.obj b/models/cube-vertex-w-component.obj new file mode 100644 index 00000000..b909f26d --- /dev/null +++ b/models/cube-vertex-w-component.obj @@ -0,0 +1,31 @@ +mtllib cube.mtl + +v 0.000000 2.000000 2.000000 0.1 +v 0.000000 0.000000 2.000000 0.2 +v 2.000000 0.000000 2.000000 0.3 +v 2.000000 2.000000 2.000000 0.4 +v 0.000000 2.000000 0.000000 0.5 +v 0.000000 0.000000 0.000000 0.6 +v 2.000000 0.000000 0.000000 0.7 +v 2.000000 2.000000 0.000000 0.8 +# 8 vertices + +g front cube +usemtl white +f 1 2 3 4 +g back cube +# expects white material +f 8 7 6 5 +g right cube +usemtl red +f 4 3 7 8 +g top cube +usemtl white +f 5 1 4 8 +g left cube +usemtl green +f 5 6 2 1 +g bottom cube +usemtl white +f 2 6 7 3 +# 6 elements diff --git a/python/bindings.cc b/python/bindings.cc index 1e98ac7e..ce327e18 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -38,15 +38,41 @@ PYBIND11_MODULE(tinyobjloader, tobj_module) py::class_(tobj_module, "attrib_t") .def(py::init<>()) .def_readonly("vertices", &attrib_t::vertices) + .def_readonly("vertex_weights", &attrib_t::vertex_weights) + .def_readonly("skin_weights", &attrib_t::skin_weights) + .def_readonly("normals", &attrib_t::normals) + .def_readonly("texcoords", &attrib_t::texcoords) + .def_readonly("colors", &attrib_t::colors) .def("numpy_vertices", [] (attrib_t &instance) { auto ret = py::array_t(instance.vertices.size()); py::buffer_info buf = ret.request(); memcpy(buf.ptr, instance.vertices.data(), instance.vertices.size() * sizeof(real_t)); return ret; }) - .def_readonly("normals", &attrib_t::normals) - .def_readonly("texcoords", &attrib_t::texcoords) - .def_readonly("colors", &attrib_t::colors) + .def("numpy_vertex_weights", [] (attrib_t &instance) { + auto ret = py::array_t(instance.vertex_weights.size()); + py::buffer_info buf = ret.request(); + memcpy(buf.ptr, instance.vertex_weights.data(), instance.vertex_weights.size() * sizeof(real_t)); + return ret; + }) + .def("numpy_normals", [] (attrib_t &instance) { + auto ret = py::array_t(instance.normals.size()); + py::buffer_info buf = ret.request(); + memcpy(buf.ptr, instance.normals.data(), instance.normals.size() * sizeof(real_t)); + return ret; + }) + .def("numpy_texcoords", [] (attrib_t &instance) { + auto ret = py::array_t(instance.texcoords.size()); + py::buffer_info buf = ret.request(); + memcpy(buf.ptr, instance.texcoords.data(), instance.texcoords.size() * sizeof(real_t)); + return ret; + }) + .def("numpy_colors", [] (attrib_t &instance) { + auto ret = py::array_t(instance.colors.size()); + py::buffer_info buf = ret.request(); + memcpy(buf.ptr, instance.colors.data(), instance.colors.size() * sizeof(real_t)); + return ret; + }) ; py::class_(tobj_module, "shape_t") @@ -119,7 +145,7 @@ PYBIND11_MODULE(tinyobjloader, tobj_module) .def("GetCustomParameter", &material_t::GetCustomParameter) ; - py::class_(tobj_module, "mesh_t") + py::class_(tobj_module, "mesh_t", py::buffer_protocol()) .def(py::init<>()) .def_readonly("num_face_vertices", &mesh_t::num_face_vertices) .def("numpy_num_face_vertices", [] (mesh_t &instance) { @@ -128,6 +154,41 @@ PYBIND11_MODULE(tinyobjloader, tobj_module) memcpy(buf.ptr, instance.num_face_vertices.data(), instance.num_face_vertices.size() * sizeof(unsigned char)); return ret; }) + .def("vertex_indices", [](mesh_t &self) { + // NOTE: we cannot use py::buffer_info and py:buffer as a return type. + // py::memoriview is not suited for vertex indices usecase, since indices data may be used after + // deleting C++ mesh_t object in Python world. + // + // So create a dedicated Python object(std::vector) + + std::vector indices; + indices.resize(self.indices.size()); + for (size_t i = 0; i < self.indices.size(); i++) { + indices[i] = self.indices[i].vertex_index; + } + + return indices; + }) + .def("normal_indices", [](mesh_t &self) { + + std::vector indices; + indices.resize(self.indices.size()); + for (size_t i = 0; i < self.indices.size(); i++) { + indices[i] = self.indices[i].normal_index; + } + + return indices; + }) + .def("texcoord_indices", [](mesh_t &self) -> py::buffer_info { + + std::vector indices; + indices.resize(self.indices.size()); + for (size_t i = 0; i < self.indices.size(); i++) { + indices[i] = self.indices[i].texcoord_index; + } + + return indices; + }) .def_readonly("indices", &mesh_t::indices) .def("numpy_indices", [] (mesh_t &instance) { // Flatten indexes. index_t is composed of 3 ints(vertex_index, normal_index, texcoord_index). @@ -154,10 +215,34 @@ PYBIND11_MODULE(tinyobjloader, tobj_module) }); py::class_(tobj_module, "lines_t") - .def(py::init<>()); + .def(py::init<>()) + .def_readonly("indices", &lines_t::indices) + .def_readonly("num_line_vertices", &lines_t::num_line_vertices) + ; py::class_(tobj_module, "points_t") - .def(py::init<>()); + .def(py::init<>()) + .def_readonly("indices", &points_t::indices) + ; + py::class_(tobj_module, "joint_and_weight_t") + .def(py::init<>()) + .def_readonly("joint_id", &joint_and_weight_t::joint_id, "Joint index(NOTE: Joint info is provided externally, not from .obj") + .def_readonly("weight", &joint_and_weight_t::weight, "Weight value(NOTE: weight is not normalized)") + ; + + py::class_(tobj_module, "skin_weight_t") + .def(py::init<>()) + .def_readonly("vertex_id", &skin_weight_t::vertex_id) + .def_readonly("weightValues", &skin_weight_t::weightValues) + ; + + py::class_(tobj_module, "tag_t") + .def(py::init<>()) + .def_readonly("name", &tag_t::name) + .def_readonly("intValues", &tag_t::intValues) + .def_readonly("floatValues", &tag_t::floatValues) + .def_readonly("stringValues", &tag_t::stringValues) + ; } diff --git a/python/sample.py b/python/sample.py index 4b80dba6..8b7fc5ed 100644 --- a/python/sample.py +++ b/python/sample.py @@ -13,6 +13,9 @@ filename = "../models/cornell_box.obj" +if len(sys.argv) > 1: + filename = sys.argv[1] + reader = tinyobjloader.ObjReader() @@ -35,8 +38,11 @@ attrib = reader.GetAttrib() print("len(attrib.vertices) = ", len(attrib.vertices)) +print("len(attrib.vertex_weights) = ", len(attrib.vertex_weights)) print("len(attrib.normals) = ", len(attrib.normals)) print("len(attrib.texcoords) = ", len(attrib.texcoords)) +print("len(attrib.colors) = ", len(attrib.colors)) +print("len(attrib.skin_weights) = ", len(attrib.skin_weights)) # vertex data must be `xyzxyzxyz...` assert len(attrib.vertices) % 3 == 0 @@ -47,6 +53,9 @@ # texcoords data must be `uvuvuv...` assert len(attrib.texcoords) % 2 == 0 +# colors data must be `rgbrgbrgb...` +assert len(attrib.texcoords) % 3 == 0 + # Performance note # (direct?) array access through member variable is quite slow. # https://github.com/tinyobjloader/tinyobjloader/issues/275#issuecomment-753465833 @@ -63,16 +72,34 @@ for i, v in enumerate(attrib.vertices): print("v[{}] = {}".format(i, v)) +# vw is filled with 1.0 if [w] component is not present in `v` line in .obj +for i, w in enumerate(attrib.vertex_weights): + print("vweight[{}] = {}".format(i, w)) + for i, v in enumerate(attrib.normals): print("vn[{}] = {}".format(i, v)) for i, v in enumerate(attrib.texcoords): - print("vt[{}] = {}".format(i, t)) + print("vt[{}] = {}".format(i, v)) + +for i, v in enumerate(attrib.colors): + print("vcol[{}] = {}".format(i, v)) + +if len(attrib.skin_weights): + print("num skin weights", len(attrib.skin_weights)) + + for i, skin in enumerate(attrib.skin_weights): + print("skin_weight[{}]".format(i)) + print(" vertex_id = ", skin.vertex_id) + print(" len(weights) = ", len(skin.weightValues)) + for k, w in enumerate(skin.weightValues): + print(" [{}] joint_id: {}, weight: {}".format(k, w.joint_id, w.weight)) if is_numpy_available: print("numpy_v = {}".format(attrib.numpy_vertices())) print("numpy_vn = {}".format(attrib.numpy_normals())) print("numpy_vt = {}".format(attrib.numpy_texcoords())) + print("numpy_vcol = {}".format(attrib.numpy_colors())) materials = reader.GetMaterials() print("Num materials: ", len(materials)) @@ -101,6 +128,10 @@ print("[{}] vt_idx {}".format(i, idx.texcoord_index)) print("material_ids = {}".format(shape.mesh.material_ids)) + # faster access to indices + a = shape.mesh.vertex_indices() + print("vertex_indices", shape.mesh.vertex_indices()) + if is_numpy_available: print("numpy_indices = {}".format(shape.mesh.numpy_indices())) print("numpy_num_face_vertices = {}".format(shape.mesh.numpy_num_face_vertices())) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index dbb64c3c..c23acc0d 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -24,12 +24,16 @@ THE SOFTWARE. // // version 2.0.0 : Add new object oriented API. 1.x API is still provided. +// * Add python binding. // * Support line primitive. // * Support points primitive. // * Support multiple search path for .mtl(v1 API). -// * Support vertex weight `vw`(as an tinyobj extension) +// * Support vertex skinning weight `vw`(as an tinyobj +// extension). Note that this differs vertex weight([w] +// component in `v` line) // * Support escaped whitespece in mtllib -// * Add robust triangulation using Mapbox earcut(TINYOBJLOADER_USE_MAPBOX_EARCUT). +// * Add robust triangulation using Mapbox +// earcut(TINYOBJLOADER_USE_MAPBOX_EARCUT). // version 1.4.0 : Modifed ParseTextureNameAndOption API // version 1.3.1 : Make ParseTextureNameAndOption API public // version 1.3.0 : Separate warning and error message(breaking API of LoadObj) @@ -194,9 +198,9 @@ struct material_t { int dummy; // Suppress padding warning. - std::string ambient_texname; // map_Ka. For ambient or ambient occlusion. - std::string diffuse_texname; // map_Kd - std::string specular_texname; // map_Ks + std::string ambient_texname; // map_Ka. For ambient or ambient occlusion. + std::string diffuse_texname; // map_Kd + std::string specular_texname; // map_Ks std::string specular_highlight_texname; // map_Ns std::string bump_texname; // map_bump, map_Bump, bump std::string displacement_texname; // disp @@ -675,6 +679,7 @@ bool ParseTextureNameAndOption(std::string *texname, texture_option_t *texopt, #endif #include + #include "mapbox/earcut.hpp" #ifdef __clang__ @@ -805,14 +810,14 @@ static inline std::string toString(const T &t) { return ss.str(); } -struct warning_context -{ - std::string *warn; - size_t line_number; +struct warning_context { + std::string *warn; + size_t line_number; }; // Make index zero-base, and also support relative index. -static inline bool fixIndex(int idx, int n, int *ret, bool allow_zero, const warning_context &context) { +static inline bool fixIndex(int idx, int n, int *ret, bool allow_zero, + const warning_context &context) { if (!ret) { return false; } @@ -825,8 +830,10 @@ static inline bool fixIndex(int idx, int n, int *ret, bool allow_zero, const war if (idx == 0) { // zero is not allowed according to the spec. if (context.warn) { - (*context.warn) += "A zero value index found (will have a value of -1 for normal and tex indices. Line " - + toString(context.line_number) + ").\n"; + (*context.warn) += + "A zero value index found (will have a value of -1 for normal and " + "tex indices. Line " + + toString(context.line_number) + ").\n"; } (*ret) = idx - 1; @@ -835,7 +842,7 @@ static inline bool fixIndex(int idx, int n, int *ret, bool allow_zero, const war if (idx < 0) { (*ret) = n + idx; // negative value = relative - if((*ret) < 0){ + if ((*ret) < 0) { return false; // invalid relative index } return true; @@ -996,7 +1003,8 @@ static bool tryParseDouble(const char *s, const char *s_end, double *result) { while (end_not_reached && IS_DIGIT(*curr)) { // To avoid annoying MSVC's min/max macro definiton, // Use hardcoded int max value - if (exponent > (2147483647/10)) { // 2147483647 = std::numeric_limits::max() + if (exponent > + (2147483647 / 10)) { // 2147483647 = std::numeric_limits::max() // Integer overflow goto fail; } @@ -1058,6 +1066,7 @@ static inline void parseReal3(real_t *x, real_t *y, real_t *z, (*z) = parseReal(token, default_z); } +#if 0 // not used static inline void parseV(real_t *x, real_t *y, real_t *z, real_t *w, const char **token, const double default_x = 0.0, const double default_y = 0.0, @@ -1068,26 +1077,45 @@ static inline void parseV(real_t *x, real_t *y, real_t *z, real_t *w, (*z) = parseReal(token, default_z); (*w) = parseReal(token, default_w); } +#endif // Extension: parse vertex with colors(6 items) -static inline bool parseVertexWithColor(real_t *x, real_t *y, real_t *z, - real_t *r, real_t *g, real_t *b, - const char **token, - const double default_x = 0.0, - const double default_y = 0.0, - const double default_z = 0.0) { +// Return 3: xyz, 4: xyzw, 6: xyzrgb +// `r`: red(case 6) or [w](case 4) +static inline int parseVertexWithColor(real_t *x, real_t *y, real_t *z, + real_t *r, real_t *g, real_t *b, + const char **token, + const double default_x = 0.0, + const double default_y = 0.0, + const double default_z = 0.0) { + // TODO: Check error (*x) = parseReal(token, default_x); (*y) = parseReal(token, default_y); (*z) = parseReal(token, default_z); - const bool found_color = - parseReal(token, r) && parseReal(token, g) && parseReal(token, b); + // - 4 components(x, y, z, w) ot 6 components + bool has_r = parseReal(token, r); + + if (!has_r) { + (*r) = (*g) = (*b) = 1.0; + return 3; + } + + bool has_g = parseReal(token, g); - if (!found_color) { + if (!has_g) { + (*g) = (*b) = 1.0; + return 4; + } + + bool has_b = parseReal(token, b); + + if (!has_b) { (*r) = (*g) = (*b) = 1.0; + return 3; // treated as xyz } - return found_color; + return 6; } static inline bool parseOnOff(const char **token, bool default_value = true) { @@ -1424,13 +1452,11 @@ static int pnpoly(int nvert, T *vertx, T *verty, T testx, T testy) { struct TinyObjPoint { real_t x, y, z; TinyObjPoint() : x(0), y(0), z(0) {} - TinyObjPoint(real_t x_, real_t y_, real_t z_) : - x(x_), y(y_), z(z_) {} + TinyObjPoint(real_t x_, real_t y_, real_t z_) : x(x_), y(y_), z(z_) {} }; inline TinyObjPoint cross(const TinyObjPoint &v1, const TinyObjPoint &v2) { - return TinyObjPoint(v1.y * v2.z - v1.z * v2.y, - v1.z * v2.x - v1.x * v2.z, + return TinyObjPoint(v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x); } @@ -1439,23 +1465,19 @@ inline real_t dot(const TinyObjPoint &v1, const TinyObjPoint &v2) { } inline real_t GetLength(TinyObjPoint &e) { - return std::sqrt(e.x*e.x + e.y*e.y + e.z*e.z); + return std::sqrt(e.x * e.x + e.y * e.y + e.z * e.z); } inline TinyObjPoint Normalize(TinyObjPoint e) { - real_t inv_length = real_t(1) / GetLength(e); - return TinyObjPoint(e.x * inv_length, e.y * inv_length, e.z * inv_length ); + real_t inv_length = real_t(1) / GetLength(e); + return TinyObjPoint(e.x * inv_length, e.y * inv_length, e.z * inv_length); } - -inline TinyObjPoint WorldToLocal(const TinyObjPoint& a, - const TinyObjPoint& u, - const TinyObjPoint& v, - const TinyObjPoint& w) { - return TinyObjPoint(dot(a,u),dot(a,v),dot(a,w)); +inline TinyObjPoint WorldToLocal(const TinyObjPoint &a, const TinyObjPoint &u, + const TinyObjPoint &v, const TinyObjPoint &w) { + return TinyObjPoint(dot(a, u), dot(a, v), dot(a, w)); } - // TODO(syoyo): refactor function. static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, const std::vector &tags, @@ -1597,7 +1619,8 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, vertex_index_t i0 = face.vertex_indices[0]; vertex_index_t i0_2 = i0; - // TMW change: Find the normal axis of the polygon using Newell's method + // TMW change: Find the normal axis of the polygon using Newell's + // method TinyObjPoint n; for (size_t k = 0; k < npolys; ++k) { i0 = face.vertex_indices[k % npolys]; @@ -1615,22 +1638,24 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, real_t v0y_2 = v[vi0_2 * 3 + 1]; real_t v0z_2 = v[vi0_2 * 3 + 2]; - const TinyObjPoint point1(v0x,v0y,v0z); - const TinyObjPoint point2(v0x_2,v0y_2,v0z_2); + const TinyObjPoint point1(v0x, v0y, v0z); + const TinyObjPoint point2(v0x_2, v0y_2, v0z_2); - TinyObjPoint a(point1.x - point2.x, point1.y - point2.y, point1.z - point2.z); - TinyObjPoint b(point1.x + point2.x, point1.y + point2.y, point1.z + point2.z); + TinyObjPoint a(point1.x - point2.x, point1.y - point2.y, + point1.z - point2.z); + TinyObjPoint b(point1.x + point2.x, point1.y + point2.y, + point1.z + point2.z); n.x += (a.y * b.z); n.y += (a.z * b.x); n.z += (a.x * b.y); } real_t length_n = GetLength(n); - //Check if zero length normal - if(length_n <= 0) { + // Check if zero length normal + if (length_n <= 0) { continue; } - //Negative is to flip the normal to the correct direction + // Negative is to flip the normal to the correct direction real_t inv_length = -real_t(1.0) / length_n; n.x *= inv_length; n.y *= inv_length; @@ -1639,10 +1664,10 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, TinyObjPoint axis_w, axis_v, axis_u; axis_w = n; TinyObjPoint a; - if(std::fabs(axis_w.x) > real_t(0.9999999)) { - a = TinyObjPoint(0,1,0); + if (std::fabs(axis_w.x) > real_t(0.9999999)) { + a = TinyObjPoint(0, 1, 0); } else { - a = TinyObjPoint(1,0,0); + a = TinyObjPoint(1, 0, 0); } axis_v = Normalize(cross(axis_w, a)); axis_u = cross(axis_w, axis_v); @@ -1654,8 +1679,9 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, std::vector polyline; - //TMW change: Find best normal and project v0x and v0y to those coordinates, instead of - //picking a plane aligned with an axis (which can flip polygons). + // TMW change: Find best normal and project v0x and v0y to those + // coordinates, instead of picking a plane aligned with an axis (which + // can flip polygons). // Fill polygon data(facevarying vertices). for (size_t k = 0; k < npolys; k++) { @@ -1668,7 +1694,7 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, real_t v0y = v[vi0 * 3 + 1]; real_t v0z = v[vi0 * 3 + 2]; - TinyObjPoint polypoint(v0x,v0y,v0z); + TinyObjPoint polypoint(v0x, v0y, v0z); TinyObjPoint loc = WorldToLocal(polypoint, axis_u, axis_v, axis_w); polyline.push_back({loc.x, loc.y}); @@ -1686,19 +1712,19 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, index_t idx0, idx1, idx2; idx0.vertex_index = face.vertex_indices[indices[3 * k + 0]].v_idx; idx0.normal_index = - face.vertex_indices[indices[3 * k + 0]].vn_idx; + face.vertex_indices[indices[3 * k + 0]].vn_idx; idx0.texcoord_index = - face.vertex_indices[indices[3 * k + 0]].vt_idx; + face.vertex_indices[indices[3 * k + 0]].vt_idx; idx1.vertex_index = face.vertex_indices[indices[3 * k + 1]].v_idx; idx1.normal_index = - face.vertex_indices[indices[3 * k + 1]].vn_idx; + face.vertex_indices[indices[3 * k + 1]].vn_idx; idx1.texcoord_index = - face.vertex_indices[indices[3 * k + 1]].vt_idx; + face.vertex_indices[indices[3 * k + 1]].vt_idx; idx2.vertex_index = face.vertex_indices[indices[3 * k + 2]].v_idx; idx2.normal_index = - face.vertex_indices[indices[3 * k + 2]].vn_idx; + face.vertex_indices[indices[3 * k + 2]].vn_idx; idx2.texcoord_index = - face.vertex_indices[indices[3 * k + 2]].vt_idx; + face.vertex_indices[indices[3 * k + 2]].vt_idx; shape->mesh.indices.push_back(idx0); shape->mesh.indices.push_back(idx1); @@ -1828,7 +1854,8 @@ static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group, // std::cout << "e0x, e0y, e1x, e1y " << e0x << ", " << e0y << ", " // << e1x << ", " << e1y << "\n"; - real_t area = (vx[0] * vy[1] - vy[0] * vx[1]) * static_cast(0.5); + real_t area = + (vx[0] * vy[1] - vy[0] * vx[1]) * static_cast(0.5); // std::cout << "cross " << cross << ", area " << area << "\n"; // if an internal angle if (cross * area < static_cast(0.0)) { @@ -2562,10 +2589,11 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, std::stringstream errss; std::vector v; + std::vector vertex_weights; // optional [w] component in `v` std::vector vn; std::vector vt; std::vector vc; - std::vector vw; + std::vector vw; // tinyobj extension: vertex skin weights std::vector tags; PrimGroup prim_group; std::string name; @@ -2585,7 +2613,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, shape_t shape; - bool found_all_colors = true; + bool found_all_colors = true; // check if all 'v' line has color info size_t line_num = 0; std::string linebuf; @@ -2624,13 +2652,17 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, real_t x, y, z; real_t r, g, b; - found_all_colors &= parseVertexWithColor(&x, &y, &z, &r, &g, &b, &token); + int num_components = parseVertexWithColor(&x, &y, &z, &r, &g, &b, &token); + found_all_colors &= (num_components == 6); v.push_back(x); v.push_back(y); v.push_back(z); - if (found_all_colors || default_vcols_fallback) { + vertex_weights.push_back( + r); // r = w, and initialized to 1.0 when `w` component is not found. + + if ((num_components == 6) || default_vcols_fallback) { vc.push_back(r); vc.push_back(g); vc.push_back(b); @@ -2723,7 +2755,9 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, static_cast(vn.size() / 3), static_cast(vt.size() / 2), &vi, context)) { if (err) { - (*err) += "Failed to parse `l' line (e.g. a zero value for vertex index. Line " + + (*err) += + "Failed to parse `l' line (e.g. a zero value for vertex index. " + "Line " + toString(line_num) + ").\n"; } return false; @@ -2752,7 +2786,9 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, static_cast(vn.size() / 3), static_cast(vt.size() / 2), &vi, context)) { if (err) { - (*err) += "Failed to parse `p' line (e.g. a zero value for vertex index. Line " + + (*err) += + "Failed to parse `p' line (e.g. a zero value for vertex index. " + "Line " + toString(line_num) + ").\n"; } return false; @@ -2785,7 +2821,9 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, static_cast(vn.size() / 3), static_cast(vt.size() / 2), &vi, context)) { if (err) { - (*err) += "Failed to parse `f' line (e.g. a zero value for vertex index or invalid relative vertex index). Line " + + (*err) += + "Failed to parse `f' line (e.g. a zero value for vertex index " + "or invalid relative vertex index). Line " + toString(line_num) + ").\n"; } return false; @@ -3075,14 +3113,16 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, if (greatest_vn_idx >= static_cast(vn.size() / 3)) { if (warn) { std::stringstream ss; - ss << "Vertex normal indices out of bounds (line " << line_num << ".)\n\n"; + ss << "Vertex normal indices out of bounds (line " << line_num + << ".)\n\n"; (*warn) += ss.str(); } } if (greatest_vt_idx >= static_cast(vt.size() / 2)) { if (warn) { std::stringstream ss; - ss << "Vertex texcoord indices out of bounds (line " << line_num << ".)\n\n"; + ss << "Vertex texcoord indices out of bounds (line " << line_num + << ".)\n\n"; (*warn) += ss.str(); } } @@ -3104,7 +3144,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, } attrib->vertices.swap(v); - attrib->vertex_weights.swap(v); + attrib->vertex_weights.swap(vertex_weights); attrib->normals.swap(vn); attrib->texcoords.swap(vt); attrib->texcoord_ws.swap(vt); @@ -3166,11 +3206,12 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, real_t x, y, z; real_t r, g, b; - bool found_color = parseVertexWithColor(&x, &y, &z, &r, &g, &b, &token); + int num_components = parseVertexWithColor(&x, &y, &z, &r, &g, &b, &token); if (callback.vertex_cb) { callback.vertex_cb(user_data, x, y, z, r); // r=w is optional } if (callback.vertex_color_cb) { + bool found_color = (num_components == 6); callback.vertex_color_cb(user_data, x, y, z, r, g, b, found_color); } continue; From f767ddfde2ee2853a65f5209a991bbfc1bb2c727 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 27 Jan 2024 07:10:07 +0900 Subject: [PATCH 128/139] fix python binding build. --- python/bindings.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/bindings.cc b/python/bindings.cc index ce327e18..e7e6c951 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -179,7 +179,7 @@ PYBIND11_MODULE(tinyobjloader, tobj_module) return indices; }) - .def("texcoord_indices", [](mesh_t &self) -> py::buffer_info { + .def("texcoord_indices", [](mesh_t &self) { std::vector indices; indices.resize(self.indices.size()); From b510ef8786ad8d7ff3d61584795edf0306410aa1 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 27 Jan 2024 07:47:08 +0900 Subject: [PATCH 129/139] bump version in cmake. --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9d9d0efe..d7d642a2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,8 +4,8 @@ cmake_minimum_required(VERSION 3.16) project(tinyobjloader CXX) set(TINYOBJLOADER_SOVERSION 2) -set(TINYOBJLOADER_VERSION 2.0.0-rc.11) -set(PY_TARGET "aaa") +set(TINYOBJLOADER_VERSION 2.0.0-rc.12) +set(PY_TARGET "pytinyobjloader") #optional double precision support option(TINYOBJLOADER_USE_DOUBLE "Build library with double precision instead of single (float)" OFF) From 2945a967c5303b2c8c14174117c45f3302591150 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 27 Jan 2024 08:34:12 +0900 Subject: [PATCH 130/139] apply python formatter. --- CMakeLists.txt | 2 +- README.md | 1 + python/sample.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d7d642a2..9aea91fa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,7 +4,7 @@ cmake_minimum_required(VERSION 3.16) project(tinyobjloader CXX) set(TINYOBJLOADER_SOVERSION 2) -set(TINYOBJLOADER_VERSION 2.0.0-rc.12) +set(TINYOBJLOADER_VERSION 2.0.0-rc.13) set(PY_TARGET "pytinyobjloader") #optional double precision support diff --git a/README.md b/README.md index 9a3a8896..2bf40db1 100644 --- a/README.md +++ b/README.md @@ -429,6 +429,7 @@ cibuildwheels + twine upload for each git tagging event is handled in Github Act #### How to bump version(For developer) +* Apply `black` to python files(`python/sample.py`) * Bump version in CMakeLists.txt * Commit and push `release`. Confirm C.I. build is OK. * Create tag starting with `v`(e.g. `v2.1.0`) diff --git a/python/sample.py b/python/sample.py index 8b7fc5ed..45c97612 100644 --- a/python/sample.py +++ b/python/sample.py @@ -93,7 +93,7 @@ print(" vertex_id = ", skin.vertex_id) print(" len(weights) = ", len(skin.weightValues)) for k, w in enumerate(skin.weightValues): - print(" [{}] joint_id: {}, weight: {}".format(k, w.joint_id, w.weight)) + print(" [{}] joint_id: {}, weight: {}".format(k, w.joint_id, w.weight)) if is_numpy_available: print("numpy_v = {}".format(attrib.numpy_vertices())) From 9b87dc00fd4c87d29f39deb9dceb597cbde366b2 Mon Sep 17 00:00:00 2001 From: Vertexwahn Date: Wed, 31 Jan 2024 21:35:23 +0100 Subject: [PATCH 131/139] Add Bazel build support (#376) --- BUILD.bazel | 9 +++++++++ MODULE.bazel | 9 +++++++++ 2 files changed, 18 insertions(+) create mode 100644 BUILD.bazel create mode 100644 MODULE.bazel diff --git a/BUILD.bazel b/BUILD.bazel new file mode 100644 index 00000000..dda60c76 --- /dev/null +++ b/BUILD.bazel @@ -0,0 +1,9 @@ +cc_library( + name = "tinyobjloader", + hdrs = ["tiny_obj_loader.h"], + copts = select({ + "@platforms//os:windows": [], + "//conditions:default": ["-Wno-maybe-uninitialized"], + }), + visibility = ["//visibility:public"], +) diff --git a/MODULE.bazel b/MODULE.bazel new file mode 100644 index 00000000..f8859286 --- /dev/null +++ b/MODULE.bazel @@ -0,0 +1,9 @@ +module( + name = "tinyobjloader", + compatibility_level = 1, +) + +bazel_dep( + name = "platforms", + version = "0.0.8", +) From cab4ad7254cbf7eaaafdb73d272f99e92f166df8 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Thu, 1 Feb 2024 05:41:42 +0900 Subject: [PATCH 132/139] bump cibuildwheels version to fix CI build. --- .github/workflows/wheels.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 91082540..b37104a2 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -19,7 +19,7 @@ jobs: fetch-tags: true # Optional, use if you use setuptools_scm - name: Build wheels - uses: pypa/cibuildwheel@v2.16.2 + uses: pypa/cibuildwheel@v2.16.5 # to supply options, put them in 'env', like: # env: # CIBW_SOME_OPTION: value @@ -50,7 +50,7 @@ jobs: fetch-tags: true # Optional, use if you use setuptools_scm - name: Build wheels - uses: pypa/cibuildwheel@v2.16.2 + uses: pypa/cibuildwheel@v2.16.5 # to supply options, put them in 'env', like: # env: # CIBW_SOME_OPTION: value From 50461d0e0a77c178bb478e9319d7de82f469a848 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sat, 24 Aug 2024 02:06:11 +0900 Subject: [PATCH 133/139] Create FUNDING.yml --- .github/FUNDING.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000..0fd988d9 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,15 @@ +# These are supported funding model platforms + +github: syoyo # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +polar: # Replace with a single Polar username +buy_me_a_coffee: # Replace with a single Buy Me a Coffee username +thanks_dev: # Replace with a single thanks.dev username +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] From b5346fac929c8a862a1c1d4d89db5477d30b7e6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?El=C3=ADas=20Mart=C3=ADnez?= Date: Wed, 23 Oct 2024 19:27:25 +0200 Subject: [PATCH 134/139] Update README.md (#387) Fix wrong C standard --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2bf40db1..5ed88291 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ Tiny but powerful single file wavefront obj loader written in C++03. No dependen `tinyobjloader` is good for embedding .obj loader to your (global illumination) renderer ;-) -If you are looking for C89 version, please see https://github.com/syoyo/tinyobjloader-c . +If you are looking for C99 version, please see https://github.com/syoyo/tinyobjloader-c . Version notice -------------- From 5cd3842fdca3b06cc993801cff1825fc6d999068 Mon Sep 17 00:00:00 2001 From: Diego Sanz <35377545+ElDigoXD@users.noreply.github.com> Date: Wed, 6 Nov 2024 16:13:01 +0100 Subject: [PATCH 135/139] Update README.md (fix typo) (#388) tri*na*gulation -> tri*an*gulation --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5ed88291..8569a352 100644 --- a/README.md +++ b/README.md @@ -250,7 +250,7 @@ You can enable `double(64bit)` precision by using `TINYOBJLOADER_USE_DOUBLE` def When you enable `triangulation`(default is enabled), TinyObjLoader triangulate polygons(faces with 4 or more vertices). -Built-in trinagulation code may not work well in some polygon shape. +Built-in triangulation code may not work well in some polygon shape. You can define `TINYOBJLOADER_USE_MAPBOX_EARCUT` for robust triangulation using `mapbox/earcut.hpp`. This requires C++11 compiler though. And you need to copy `mapbox/earcut.hpp` to your project. @@ -260,7 +260,7 @@ If you have your own `mapbox/earcut.hpp` file incuded in your project, you can d ```c++ #define TINYOBJLOADER_IMPLEMENTATION // define this in only *one* .cc -// Optional. define TINYOBJLOADER_USE_MAPBOX_EARCUT gives robust trinagulation. Requires C++11 +// Optional. define TINYOBJLOADER_USE_MAPBOX_EARCUT gives robust triangulation. Requires C++11 //#define TINYOBJLOADER_USE_MAPBOX_EARCUT #include "tiny_obj_loader.h" @@ -332,7 +332,7 @@ for (size_t s = 0; s < shapes.size(); s++) { ```c++ #define TINYOBJLOADER_IMPLEMENTATION // define this in only *one* .cc -// Optional. define TINYOBJLOADER_USE_MAPBOX_EARCUT gives robust trinagulation. Requires C++11 +// Optional. define TINYOBJLOADER_USE_MAPBOX_EARCUT gives robust triangulation. Requires C++11 //#define TINYOBJLOADER_USE_MAPBOX_EARCUT #include "tiny_obj_loader.h" From 3201329ba83c244e85a4933f5a08f813740761a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Sch=C3=A4fer?= <38749758+scschaefer@users.noreply.github.com> Date: Sat, 23 Nov 2024 23:20:00 +0100 Subject: [PATCH 136/139] Fix parsing of comments at the end of lines for tokens with variable number of elements. (#389) (#390) --- tiny_obj_loader.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index c23acc0d..829ea6de 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -2708,7 +2708,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, sw.vertex_id = vid; - while (!IS_NEW_LINE(token[0])) { + while (!IS_NEW_LINE(token[0]) && token[0] != '#') { real_t j, w; // joint_id should not be negative, weight may be negative // TODO(syoyo): # of elements check @@ -2749,7 +2749,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, __line_t line; - while (!IS_NEW_LINE(token[0])) { + while (!IS_NEW_LINE(token[0]) && token[0] != '#') { vertex_index_t vi; if (!parseTriple(&token, static_cast(v.size() / 3), static_cast(vn.size() / 3), @@ -2780,7 +2780,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, __points_t pts; - while (!IS_NEW_LINE(token[0])) { + while (!IS_NEW_LINE(token[0]) && token[0] != '#') { vertex_index_t vi; if (!parseTriple(&token, static_cast(v.size() / 3), static_cast(vn.size() / 3), @@ -2815,7 +2815,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, face.smoothing_group_id = current_smoothing_id; face.vertex_indices.reserve(3); - while (!IS_NEW_LINE(token[0])) { + while (!IS_NEW_LINE(token[0]) && token[0] != '#') { vertex_index_t vi; if (!parseTriple(&token, static_cast(v.size() / 3), static_cast(vn.size() / 3), @@ -2951,7 +2951,7 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, std::vector names; - while (!IS_NEW_LINE(token[0])) { + while (!IS_NEW_LINE(token[0]) && token[0] != '#') { std::string str = parseString(&token); names.push_back(str); token += strspn(token, " \t\r"); // skip tag @@ -3245,7 +3245,7 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, token += strspn(token, " \t"); indices.clear(); - while (!IS_NEW_LINE(token[0])) { + while (!IS_NEW_LINE(token[0]) && token[0] != '#') { vertex_index_t vi = parseRawTriple(&token); index_t idx; @@ -3360,7 +3360,7 @@ bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback, if (token[0] == 'g' && IS_SPACE((token[1]))) { names.clear(); - while (!IS_NEW_LINE(token[0])) { + while (!IS_NEW_LINE(token[0]) && token[0] != '#') { std::string str = parseString(&token); names.push_back(str); token += strspn(token, " \t\r"); // skip tag From fe9e7130a0eee720a28f39b33852108217114076 Mon Sep 17 00:00:00 2001 From: Syoyo Fujita Date: Sun, 24 Nov 2024 07:20:56 +0900 Subject: [PATCH 137/139] add regression test for #389 --- models/issue-389-comment.obj | 44 ++++++++++++++++++++++++++++++++++++ tests/tester.cc | 27 ++++++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 models/issue-389-comment.obj diff --git a/models/issue-389-comment.obj b/models/issue-389-comment.obj new file mode 100644 index 00000000..cf16d926 --- /dev/null +++ b/models/issue-389-comment.obj @@ -0,0 +1,44 @@ +g Part 1 +v 0.0576127 0.0488792 0.0423 +v 0.0576127 0.0488792 0 +v -0.0483158 0.0488792 0 +v -0.0483158 0.0488792 0.0423 +v -0.0483158 -0.0139454 0 +v -0.0483158 -0.0139454 0.0423 +v 0.0576127 -0.0139454 0 +v 0.0576127 -0.0139454 0.0423 +vn 0 1 0 +vn -1 0 0 +vn 0 -1 0 +vn 1 0 0 +vn 0 0 1 +vn 0 0 -1 +o mesh0 +f 1//1 2//1 3//1 +f 3//1 4//1 1//1 +o mesh1 +f 4//2 3//2 5//2 +f 5//2 6//2 4//2 +o mesh2 +f 6//3 5//3 7//3 +f 7//3 8//3 6//3 +o mesh3 +f 8//4 7//4 2//4 +f 2//4 1//4 8//4 +o mesh4 +f 8//5 1//5 4//5 +f 4//5 6//5 8//5 +o mesh5 +f 5//6 3//6 2//6 +f 2//6 7//6 5//6 + +# Zusätzliche Linien (aus der Oberseite) +o lines +v 0.0576127 0.0488792 0.0423 # Startpunkt Linie 1 (Ecke 1 Oberseite) +v 0.0576127 0.0488792 0.2423 # Endpunkt Linie 1 (2m Höhe) +v -0.0483158 -0.0139454 0.0423 # Startpunkt Linie 2 (Ecke 6 Oberseite) +v -0.0483158 -0.0139454 0.2423 # Endpunkt Linie 2 (2m Höhe) + +# Linien +l 1 9 # Linie 1 +l 6 10 # Linie 2 diff --git a/tests/tester.cc b/tests/tester.cc index 2f336c76..780d9755 100644 --- a/tests/tester.cc +++ b/tests/tester.cc @@ -1408,6 +1408,31 @@ void test_face_missing_issue295() { TEST_CHECK((3 * 28) == shapes[0].mesh.indices.size()); // 28 triangle faces x 3 } +void test_comment_issue389() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + std::string warn; + std::string err; + bool ret = tinyobj::LoadObj( + &attrib, &shapes, &materials, &warn, &err, + "../models/issue-389-comment.obj", + gMtlBasePath, /* triangualte */false); + + TEST_CHECK(warn.empty()); + + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; + } + + TEST_CHECK(true == ret); +} + // Fuzzer test. // Just check if it does not crash. // Disable by default since Windows filesystem can't create filename of afl @@ -1511,6 +1536,8 @@ TEST_LIST = { test_mtl_filename_with_whitespace_issue46}, {"test_face_missing_issue295", test_face_missing_issue295}, + {"test_comment_issue389", + test_comment_issue389}, {"test_invalid_relative_vertex_index", test_invalid_relative_vertex_index}, {"test_invalid_texture_vertex_index", From 3bb554cf74428d7db13418b4aca1b9752a1d2be8 Mon Sep 17 00:00:00 2001 From: Sean Curtis Date: Wed, 29 Jan 2025 05:47:59 -0800 Subject: [PATCH 138/139] Correct multi-material parsing (#392) Each material gets is state of having specified a diffuse color (kd) reset so that one material doesn't infect another. --- models/issue-391.mtl | 4 ++++ models/issue-391.obj | 9 +++++++++ tests/tester.cc | 37 +++++++++++++++++++++++++++++++++++++ tiny_obj_loader.h | 1 + 4 files changed, 51 insertions(+) create mode 100644 models/issue-391.mtl create mode 100644 models/issue-391.obj diff --git a/models/issue-391.mtl b/models/issue-391.mtl new file mode 100644 index 00000000..c23ced4b --- /dev/null +++ b/models/issue-391.mtl @@ -0,0 +1,4 @@ +newmtl has_kd +Kd 1 0 0 +newmtl has_map +map_Kd test.png \ No newline at end of file diff --git a/models/issue-391.obj b/models/issue-391.obj new file mode 100644 index 00000000..06d8774b --- /dev/null +++ b/models/issue-391.obj @@ -0,0 +1,9 @@ +mtllib issue-391.mtl +v 0 0 0 +v 1 0 0 +v 0 1 0 +vn 0 0 1 +usemtl has_map +f 1//1 2//1 3//1 +usemtl has_kd +f 1//1 2//1 3//1 \ No newline at end of file diff --git a/tests/tester.cc b/tests/tester.cc index 780d9755..41d02ed2 100644 --- a/tests/tester.cc +++ b/tests/tester.cc @@ -1433,6 +1433,41 @@ void test_comment_issue389() { TEST_CHECK(true == ret); } +void test_default_kd_for_multiple_materials_issue391() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + std::string warn; + std::string err; + bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, + "../models/issue-391.obj", gMtlBasePath); + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; + } + + const tinyobj::real_t kGrey[] = {0.6, 0.6, 0.6}; + const tinyobj::real_t kRed[] = {1.0, 0.0, 0.0}; + + TEST_CHECK(true == ret); + TEST_CHECK(2 == materials.size()); + for (size_t i = 0; i < materials.size(); ++i) { + const tinyobj::material_t& material = materials[i]; + if (material.name == "has_map") { + for (int i = 0; i < 3; ++i) TEST_CHECK(material.diffuse[i] == kGrey[i]); + } else if (material.name == "has_kd") { + for (int i = 0; i < 3; ++i) TEST_CHECK(material.diffuse[i] == kRed[i]); + } else { + std::cerr << "Unexpected material found!" << std::endl; + TEST_CHECK(false); + } + } +} + // Fuzzer test. // Just check if it does not crash. // Disable by default since Windows filesystem can't create filename of afl @@ -1542,4 +1577,6 @@ TEST_LIST = { test_invalid_relative_vertex_index}, {"test_invalid_texture_vertex_index", test_invalid_texture_vertex_index}, + {"default_kd_for_multiple_materials_issue391", + test_default_kd_for_multiple_materials_issue391}, {NULL, NULL}}; diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index 829ea6de..e5ea9470 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -2134,6 +2134,7 @@ void LoadMtl(std::map *material_map, has_d = false; has_tr = false; + has_kd = false; // set new mtl name token += 7; From a4e519b0a0f29c790464fcfeadfe25a7f9fa15ff Mon Sep 17 00:00:00 2001 From: Paul Bauriegel Date: Tue, 15 Jul 2025 19:00:37 +0200 Subject: [PATCH 139/139] Add remove Utf8 BOM (#394) --- models/cube_w_BOM.mtl | 24 +++++++++++++++++++ models/cube_w_BOM.obj | 32 +++++++++++++++++++++++++ tests/tester.cc | 54 +++++++++++++++++++++++++++++++++++++++++++ tiny_obj_loader.h | 17 ++++++++++++++ 4 files changed, 127 insertions(+) create mode 100644 models/cube_w_BOM.mtl create mode 100644 models/cube_w_BOM.obj diff --git a/models/cube_w_BOM.mtl b/models/cube_w_BOM.mtl new file mode 100644 index 00000000..96255b54 --- /dev/null +++ b/models/cube_w_BOM.mtl @@ -0,0 +1,24 @@ +newmtl white +Ka 0 0 0 +Kd 1 1 1 +Ks 0 0 0 + +newmtl red +Ka 0 0 0 +Kd 1 0 0 +Ks 0 0 0 + +newmtl green +Ka 0 0 0 +Kd 0 1 0 +Ks 0 0 0 + +newmtl blue +Ka 0 0 0 +Kd 0 0 1 +Ks 0 0 0 + +newmtl light +Ka 20 20 20 +Kd 1 1 1 +Ks 0 0 0 diff --git a/models/cube_w_BOM.obj b/models/cube_w_BOM.obj new file mode 100644 index 00000000..3c395f04 --- /dev/null +++ b/models/cube_w_BOM.obj @@ -0,0 +1,32 @@ +mtllib cube_w_BOM.mtl + +v 0.000000 2.000000 2.000000 +v 0.000000 0.000000 2.000000 +v 2.000000 0.000000 2.000000 +v 2.000000 2.000000 2.000000 +v 0.000000 2.000000 0.000000 +v 0.000000 0.000000 0.000000 +v 2.000000 0.000000 0.000000 +v 2.000000 2.000000 0.000000 +# 8 vertices + +g front cube +usemtl white +f 1 2 3 4 +# two white spaces between 'back' and 'cube' +g back cube +# expects white material +f 8 7 6 5 +g right cube +usemtl red +f 4 3 7 8 +g top cube +usemtl white +f 5 1 4 8 +g left cube +usemtl green +f 5 6 2 1 +g bottom cube +usemtl white +f 2 6 7 3 +# 6 elements diff --git a/tests/tester.cc b/tests/tester.cc index 41d02ed2..b5c4d0db 100644 --- a/tests/tester.cc +++ b/tests/tester.cc @@ -1465,9 +1465,61 @@ void test_default_kd_for_multiple_materials_issue391() { std::cerr << "Unexpected material found!" << std::endl; TEST_CHECK(false); } + } +} + +void test_removeUtf8Bom() { + // Basic input with BOM + std::string withBOM = "\xEF\xBB\xBFhello world"; + TEST_CHECK(tinyobj::removeUtf8Bom(withBOM) == "hello world"); + + // Input without BOM + std::string noBOM = "hello world"; + TEST_CHECK(tinyobj::removeUtf8Bom(noBOM) == "hello world"); + + // Leaves short string unchanged + std::string shortStr = "\xEF"; + TEST_CHECK(tinyobj::removeUtf8Bom(shortStr) == shortStr); + + std::string shortStr2 = "\xEF\xBB"; + TEST_CHECK(tinyobj::removeUtf8Bom(shortStr2) == shortStr2); + + // BOM only returns empty string + std::string justBom = "\xEF\xBB\xBF"; + TEST_CHECK(tinyobj::removeUtf8Bom(justBom) == ""); + + // Empty string + std::string emptyStr = ""; + TEST_CHECK(tinyobj::removeUtf8Bom(emptyStr) == ""); +} + +void test_loadObj_with_BOM() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + + std::string warn; + std::string err; + bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, + "../models/cube_w_BOM.obj", gMtlBasePath); + + if (!warn.empty()) { + std::cout << "WARN: " << warn << std::endl; + } + + if (!err.empty()) { + std::cerr << "ERR: " << err << std::endl; } + + TEST_CHECK(true == ret); + TEST_CHECK(6 == shapes.size()); + TEST_CHECK(0 == shapes[0].name.compare("front cube")); + TEST_CHECK(0 == shapes[1].name.compare("back cube")); // multiple whitespaces + // are aggregated as + // single white space. } + // Fuzzer test. // Just check if it does not crash. // Disable by default since Windows filesystem can't create filename of afl @@ -1579,4 +1631,6 @@ TEST_LIST = { test_invalid_texture_vertex_index}, {"default_kd_for_multiple_materials_issue391", test_default_kd_for_multiple_materials_issue391}, + {"test_removeUtf8Bom", test_removeUtf8Bom}, + {"test_loadObj_with_BOM", test_loadObj_with_BOM}, {NULL, NULL}}; diff --git a/tiny_obj_loader.h b/tiny_obj_loader.h index e5ea9470..a927864e 100644 --- a/tiny_obj_loader.h +++ b/tiny_obj_loader.h @@ -810,6 +810,17 @@ static inline std::string toString(const T &t) { return ss.str(); } +static inline std::string removeUtf8Bom(const std::string& input) { + // UTF-8 BOM = 0xEF,0xBB,0xBF + if (input.size() >= 3 && + static_cast(input[0]) == 0xEF && + static_cast(input[1]) == 0xBB && + static_cast(input[2]) == 0xBF) { + return input.substr(3); // Skip BOM + } + return input; +} + struct warning_context { std::string *warn; size_t line_number; @@ -2110,6 +2121,9 @@ void LoadMtl(std::map *material_map, if (linebuf.empty()) { continue; } + if (line_no == 1) { + linebuf = removeUtf8Bom(linebuf); + } // Skip leading space. const char *token = linebuf.c_str(); @@ -2637,6 +2651,9 @@ bool LoadObj(attrib_t *attrib, std::vector *shapes, if (linebuf.empty()) { continue; } + if (line_num == 1) { + linebuf = removeUtf8Bom(linebuf); + } // Skip leading space. const char *token = linebuf.c_str();