diff --git a/.clang-format b/.clang-format index 7da2b4389e5..bb2e3440d42 100644 --- a/.clang-format +++ b/.clang-format @@ -1,13 +1,5 @@ --- Language: Cpp BasedOnStyle: Google -DerivePointerAlignment: false -PointerAlignment: Right -IndentPPDirectives: AfterHash -Cpp11BracedListStyle: false -AlwaysBreakTemplateDeclarations: false -AllowShortCaseLabelsOnASingleLine: true -SpaceAfterTemplateKeyword: false -AllowShortBlocksOnASingleLine: true ... diff --git a/goldens/swift/basic_generated.swift b/goldens/swift/basic_generated.swift index 16ef66363b4..b4cb23488e5 100644 --- a/goldens/swift/basic_generated.swift +++ b/goldens/swift/basic_generated.swift @@ -2,6 +2,10 @@ // swiftlint:disable all // swiftformat:disable all +#if canImport(Common) +import Common +#endif + import FlatBuffers public struct flatbuffers_goldens_Galaxy: FlatBufferObject, Verifiable { diff --git a/grpc/src/compiler/schema_interface.h b/grpc/src/compiler/schema_interface.h index f89288d7560..483d5ac6af3 100644 --- a/grpc/src/compiler/schema_interface.h +++ b/grpc/src/compiler/schema_interface.h @@ -24,8 +24,8 @@ #include #ifndef GRPC_CUSTOM_STRING -# include -# define GRPC_CUSTOM_STRING std::string +#include +#define GRPC_CUSTOM_STRING std::string #endif namespace grpc { @@ -55,10 +55,10 @@ struct Method : public CommentHolder { virtual grpc::string output_type_name() const = 0; virtual bool get_module_and_message_path_input( - grpc::string *str, grpc::string generator_file_name, + grpc::string* str, grpc::string generator_file_name, bool generate_in_pb2_grpc, grpc::string import_prefix) const = 0; virtual bool get_module_and_message_path_output( - grpc::string *str, grpc::string generator_file_name, + grpc::string* str, grpc::string generator_file_name, bool generate_in_pb2_grpc, grpc::string import_prefix) const = 0; virtual std::vector get_input_namespace_parts() const = 0; @@ -89,9 +89,9 @@ struct Service : public CommentHolder { struct Printer { virtual ~Printer() {} - virtual void Print(const std::map &vars, - const char *template_string) = 0; - virtual void Print(const char *string) = 0; + virtual void Print(const std::map& vars, + const char* template_string) = 0; + virtual void Print(const char* string) = 0; virtual void SetIndentationSize(const size_t size) = 0; virtual void Indent() = 0; virtual void Outdent() = 0; @@ -112,7 +112,7 @@ struct File : public CommentHolder { virtual std::unique_ptr service(int i) const = 0; virtual std::unique_ptr CreatePrinter( - grpc::string *str, const char indentation_type = ' ') const = 0; + grpc::string* str, const char indentation_type = ' ') const = 0; }; } // namespace grpc_generator diff --git a/grpc/tests/grpctest.cpp b/grpc/tests/grpctest.cpp index 6991b7ebb87..f4798c9b5f0 100644 --- a/grpc/tests/grpctest.cpp +++ b/grpc/tests/grpctest.cpp @@ -33,9 +33,9 @@ void message_builder_tests(); // code. It implements all rpcs specified in the FlatBuffers schema. class ServiceImpl final : public MyGame::Example::MonsterStorage::Service { virtual ::grpc::Status Store( - ::grpc::ServerContext *context, - const flatbuffers::grpc::Message *request, - flatbuffers::grpc::Message *response) override { + ::grpc::ServerContext* context, + const flatbuffers::grpc::Message* request, + flatbuffers::grpc::Message* response) override { // Create a response from the incoming request name. fbb_.Clear(); auto stat_offset = CreateStat( @@ -46,9 +46,9 @@ class ServiceImpl final : public MyGame::Example::MonsterStorage::Service { return grpc::Status::OK; } virtual ::grpc::Status Retrieve( - ::grpc::ServerContext *context, - const flatbuffers::grpc::Message *request, - ::grpc::ServerWriter> *writer) + ::grpc::ServerContext* context, + const flatbuffers::grpc::Message* request, + ::grpc::ServerWriter>* writer) override { for (int i = 0; i < 5; i++) { fbb_.Clear(); @@ -73,7 +73,7 @@ class ServiceImpl final : public MyGame::Example::MonsterStorage::Service { }; // Track the server instance, so we can terminate it later. -grpc::Server *server_instance = nullptr; +grpc::Server* server_instance = nullptr; // Mutex to protec this variable. std::mutex wait_for_server; std::condition_variable server_instance_cv; @@ -98,7 +98,8 @@ void RunServer() { server_instance->Wait(); } -template void StoreRPC(MonsterStorage::Stub *stub) { +template +void StoreRPC(MonsterStorage::Stub* stub) { Builder fbb; grpc::ClientContext context; // Build a request with the name set. @@ -119,7 +120,8 @@ template void StoreRPC(MonsterStorage::Stub *stub) { } } -template void RetrieveRPC(MonsterStorage::Stub *stub) { +template +void RetrieveRPC(MonsterStorage::Stub* stub) { Builder fbb; grpc::ClientContext context; fbb.Clear(); @@ -179,7 +181,7 @@ int grpc_server_test() { return 0; } -int main(int /*argc*/, const char * /*argv*/[]) { +int main(int /*argc*/, const char* /*argv*/[]) { message_builder_tests(); grpc_server_test(); diff --git a/grpc/tests/grpctest_callback_client_compile.cpp b/grpc/tests/grpctest_callback_client_compile.cpp index 8057cc5b6e0..b3947ee974e 100644 --- a/grpc/tests/grpctest_callback_client_compile.cpp +++ b/grpc/tests/grpctest_callback_client_compile.cpp @@ -15,44 +15,43 @@ using namespace MyGame::Example; // NOLINT // Unary async overloads static_assert(std::is_member_function_pointer< decltype(static_cast &, - flatbuffers::grpc::Message *, + ::grpc::ClientContext*, + const flatbuffers::grpc::Message&, + flatbuffers::grpc::Message*, std::function)>( &Stub::async_Store))>::value, "Function-form unary async_Store missing"); -static_assert(std::is_member_function_pointer< - decltype(static_cast &, - flatbuffers::grpc::Message *, - ::grpc::ClientUnaryReactor *)>( - &Stub::async_Store))>::value, - "Reactor-form unary async_Store missing"); +static_assert( + std::is_member_function_pointer< + decltype(static_cast&, + flatbuffers::grpc::Message*, + ::grpc::ClientUnaryReactor*)>(&Stub::async_Store))>::value, + "Reactor-form unary async_Store missing"); // Streaming reactor entry points static_assert( std::is_member_function_pointer< decltype(static_cast &, + ::grpc::ClientContext*, + const flatbuffers::grpc::Message&, ::grpc::ClientReadReactor > *)>(&Stub::async_Retrieve))>::value, + Monster> >*)>(&Stub::async_Retrieve))>::value, "Server streaming reactor async_Retrieve missing"); static_assert( std::is_member_function_pointer< decltype(static_cast *, + ::grpc::ClientContext*, flatbuffers::grpc::Message*, ::grpc::ClientWriteReactor > *)>(&Stub::async_GetMaxHitPoint))>::value, + Monster> >*)>(&Stub::async_GetMaxHitPoint))>::value, "Client streaming reactor async_GetMaxHitPoint missing"); static_assert(std::is_member_function_pointer< decltype(static_cast, - flatbuffers::grpc::Message > *)>( + flatbuffers::grpc::Message >*)>( &Stub::async_GetMinMaxHitPoints))>::value, "Bidi streaming reactor async_GetMinMaxHitPoints missing"); #endif // FLATBUFFERS_GENERATED_GRPC_CALLBACK_API && diff --git a/grpc/tests/message_builder_test.cpp b/grpc/tests/message_builder_test.cpp index 8af0c2141b9..cdaf7cb7da5 100644 --- a/grpc/tests/message_builder_test.cpp +++ b/grpc/tests/message_builder_test.cpp @@ -7,9 +7,9 @@ using MyGame::Example::Any_NONE; using MyGame::Example::CreateStat; using MyGame::Example::Vec3; -bool verify(flatbuffers::grpc::Message &msg, - const std::string &expected_name, Color expected_color) { - const Monster *monster = msg.GetRoot(); +bool verify(flatbuffers::grpc::Message& msg, + const std::string& expected_name, Color expected_color) { + const Monster* monster = msg.GetRoot(); const auto name = monster->name()->str(); const auto color = monster->color(); TEST_EQ(name, expected_name); @@ -17,8 +17,8 @@ bool verify(flatbuffers::grpc::Message &msg, return (name == expected_name) && (color == expected_color); } -bool release_n_verify(flatbuffers::grpc::MessageBuilder &mbb, - const std::string &expected_name, Color expected_color) { +bool release_n_verify(flatbuffers::grpc::MessageBuilder& mbb, + const std::string& expected_name, Color expected_color) { flatbuffers::grpc::Message msg = mbb.ReleaseMessage(); return verify(msg, expected_name, expected_color); } @@ -41,11 +41,13 @@ void builder_move_assign_after_releaseraw_test( TEST_EQ(src.GetSize(), 0); } -template +template struct BuilderReuseTests { static void builder_reusable_after_release_message_test( TestSelector selector) { - if (!selector.count(REUSABLE_AFTER_RELEASE_MESSAGE)) { return; } + if (!selector.count(REUSABLE_AFTER_RELEASE_MESSAGE)) { + return; + } flatbuffers::grpc::MessageBuilder mb; std::vector> buffers; @@ -58,7 +60,9 @@ struct BuilderReuseTests { } static void builder_reusable_after_release_test(TestSelector selector) { - if (!selector.count(REUSABLE_AFTER_RELEASE)) { return; } + if (!selector.count(REUSABLE_AFTER_RELEASE)) { + return; + } // FIXME: Populate-Release loop fails assert(GRPC_SLICE_IS_EMPTY(slice_)) in // SliceAllocator::allocate in the second iteration. @@ -74,7 +78,9 @@ struct BuilderReuseTests { } static void builder_reusable_after_releaseraw_test(TestSelector selector) { - if (!selector.count(REUSABLE_AFTER_RELEASE_RAW)) { return; } + if (!selector.count(REUSABLE_AFTER_RELEASE_RAW)) { + return; + } flatbuffers::grpc::MessageBuilder mb; for (int i = 0; i < 5; ++i) { @@ -82,14 +88,16 @@ struct BuilderReuseTests { mb.Finish(root_offset1); size_t size, offset; ::grpc::Slice slice; - const uint8_t *buf = mb.ReleaseRaw(size, offset, slice); + const uint8_t* buf = mb.ReleaseRaw(size, offset, slice); TEST_ASSERT_FUNC(verify(buf, offset, m1_name(), m1_color())); } } static void builder_reusable_after_release_and_move_assign_test( TestSelector selector) { - if (!selector.count(REUSABLE_AFTER_RELEASE_AND_MOVE_ASSIGN)) { return; } + if (!selector.count(REUSABLE_AFTER_RELEASE_AND_MOVE_ASSIGN)) { + return; + } // FIXME: Release-move_assign loop fails assert(p == // GRPC_SLICE_START_PTR(slice_)) in DetachedBuffer destructor after all the @@ -137,7 +145,9 @@ struct BuilderReuseTests { static void builder_reusable_after_releaseraw_and_move_assign_test( TestSelector selector) { - if (!selector.count(REUSABLE_AFTER_RELEASE_RAW_AND_MOVE_ASSIGN)) { return; } + if (!selector.count(REUSABLE_AFTER_RELEASE_RAW_AND_MOVE_ASSIGN)) { + return; + } flatbuffers::grpc::MessageBuilder dst; for (int i = 0; i < 5; ++i) { @@ -145,7 +155,7 @@ struct BuilderReuseTests { dst.Finish(root_offset1); size_t size, offset; ::grpc::Slice slice; - const uint8_t *buf = dst.ReleaseRaw(size, offset, slice); + const uint8_t* buf = dst.ReleaseRaw(size, offset, slice); TEST_ASSERT_FUNC(verify(buf, offset, m1_name(), m1_color())); SrcBuilder src; @@ -170,7 +180,7 @@ void slice_allocator_tests() { { size_t size = 2048; flatbuffers::grpc::SliceAllocator sa1; - uint8_t *buf = sa1.allocate(size); + uint8_t* buf = sa1.allocate(size); TEST_ASSERT_FUNC(buf != 0); buf[0] = 100; buf[size - 1] = 200; @@ -184,7 +194,7 @@ void slice_allocator_tests() { // move-assign test { flatbuffers::grpc::SliceAllocator sa1, sa2; - uint8_t *buf = sa1.allocate(2048); + uint8_t* buf = sa1.allocate(2048); sa1 = std::move(sa2); // sa1 deletes previously allocated memory in move-assign. // So buf is no longer usable here. @@ -194,7 +204,7 @@ void slice_allocator_tests() { /// This function does not populate exactly the first half of the table. But it /// could. -void populate_first_half(MyGame::Example::MonsterBuilder &wrapper, +void populate_first_half(MyGame::Example::MonsterBuilder& wrapper, flatbuffers::Offset name_offset) { wrapper.add_name(name_offset); wrapper.add_color(m1_color()); @@ -202,7 +212,7 @@ void populate_first_half(MyGame::Example::MonsterBuilder &wrapper, /// This function does not populate exactly the second half of the table. But it /// could. -void populate_second_half(MyGame::Example::MonsterBuilder &wrapper) { +void populate_second_half(MyGame::Example::MonsterBuilder& wrapper) { wrapper.add_hp(77); wrapper.add_mana(88); Vec3 vec3; @@ -216,9 +226,9 @@ void populate_second_half(MyGame::Example::MonsterBuilder &wrapper) { /// between FlatBufferBuilders. If MonsterBuilder had a fbb_ pointer, this hack /// would be unnecessary. That involves a code-generator change though. void test_only_hack_update_fbb_reference( - MyGame::Example::MonsterBuilder &monsterBuilder, - flatbuffers::grpc::MessageBuilder &mb) { - *reinterpret_cast(&monsterBuilder) = &mb; + MyGame::Example::MonsterBuilder& monsterBuilder, + flatbuffers::grpc::MessageBuilder& mb) { + *reinterpret_cast(&monsterBuilder) = &mb; } /// This test validates correctness of move conversion of FlatBufferBuilder to a @@ -351,15 +361,14 @@ void message_builder_tests() { BuilderTests::all_tests(); BuilderReuseTestSelector tests[6] = { - // REUSABLE_AFTER_RELEASE, // Assertion failed: - // (GRPC_SLICE_IS_EMPTY(slice_)) - // REUSABLE_AFTER_RELEASE_AND_MOVE_ASSIGN, // Assertion failed: (p == - // GRPC_SLICE_START_PTR(slice_) - - REUSABLE_AFTER_RELEASE_RAW, REUSABLE_AFTER_RELEASE_MESSAGE, - REUSABLE_AFTER_RELEASE_MESSAGE_AND_MOVE_ASSIGN, - REUSABLE_AFTER_RELEASE_RAW_AND_MOVE_ASSIGN - }; + // REUSABLE_AFTER_RELEASE, // Assertion failed: + // (GRPC_SLICE_IS_EMPTY(slice_)) + // REUSABLE_AFTER_RELEASE_AND_MOVE_ASSIGN, // Assertion failed: (p == + // GRPC_SLICE_START_PTR(slice_) + + REUSABLE_AFTER_RELEASE_RAW, REUSABLE_AFTER_RELEASE_MESSAGE, + REUSABLE_AFTER_RELEASE_MESSAGE_AND_MOVE_ASSIGN, + REUSABLE_AFTER_RELEASE_RAW_AND_MOVE_ASSIGN}; BuilderReuseTests::run_tests( TestSelector(tests, tests + 6)); diff --git a/include/flatbuffers/allocator.h b/include/flatbuffers/allocator.h index 30427190b6c..d451818568a 100644 --- a/include/flatbuffers/allocator.h +++ b/include/flatbuffers/allocator.h @@ -28,21 +28,21 @@ class Allocator { virtual ~Allocator() {} // Allocate `size` bytes of memory. - virtual uint8_t *allocate(size_t size) = 0; + virtual uint8_t* allocate(size_t size) = 0; // Deallocate `size` bytes of memory at `p` allocated by this allocator. - virtual void deallocate(uint8_t *p, size_t size) = 0; + virtual void deallocate(uint8_t* p, size_t size) = 0; // Reallocate `new_size` bytes of memory, replacing the old region of size // `old_size` at `p`. In contrast to a normal realloc, this grows downwards, // and is intended specifcally for `vector_downward` use. // `in_use_back` and `in_use_front` indicate how much of `old_size` is // actually in use at each end, and needs to be copied. - virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size, + virtual uint8_t* reallocate_downward(uint8_t* old_p, size_t old_size, size_t new_size, size_t in_use_back, size_t in_use_front) { FLATBUFFERS_ASSERT(new_size > old_size); // vector_downward only grows - uint8_t *new_p = allocate(new_size); + uint8_t* new_p = allocate(new_size); memcpy_downward(old_p, old_size, new_p, new_size, in_use_back, in_use_front); deallocate(old_p, old_size); @@ -54,7 +54,7 @@ class Allocator { // to `new_p` of `new_size`. Only memory of size `in_use_front` and // `in_use_back` will be copied from the front and back of the old memory // allocation. - void memcpy_downward(uint8_t *old_p, size_t old_size, uint8_t *new_p, + void memcpy_downward(uint8_t* old_p, size_t old_size, uint8_t* new_p, size_t new_size, size_t in_use_back, size_t in_use_front) { memcpy(new_p + new_size - in_use_back, old_p + old_size - in_use_back, diff --git a/include/flatbuffers/array.h b/include/flatbuffers/array.h index 68c245de85f..914f479aab7 100644 --- a/include/flatbuffers/array.h +++ b/include/flatbuffers/array.h @@ -27,7 +27,8 @@ namespace flatbuffers { // This is used as a helper type for accessing arrays. -template class Array { +template +class Array { // Array can carry only POD data types (scalars or structs). typedef typename flatbuffers::bool_constant::value> scalar_tag; @@ -55,7 +56,8 @@ template class Array { // If this is a Vector of enums, T will be its storage type, not the enum // type. This function makes it convenient to retrieve value with enum // type E. - template E GetEnum(uoffset_t i) const { + template + E GetEnum(uoffset_t i) const { return static_cast(Get(i)); } @@ -80,28 +82,28 @@ template class Array { // operation. For primitive types use @p Mutate directly. // @warning Assignments and reads to/from the dereferenced pointer are not // automatically converted to the correct endianness. - typename flatbuffers::conditional::type + typename flatbuffers::conditional::type GetMutablePointer(uoffset_t i) const { FLATBUFFERS_ASSERT(i < size()); - return const_cast(&data()[i]); + return const_cast(&data()[i]); } // Change elements if you have a non-const pointer to this object. - void Mutate(uoffset_t i, const T &val) { MutateImpl(scalar_tag(), i, val); } + void Mutate(uoffset_t i, const T& val) { MutateImpl(scalar_tag(), i, val); } // The raw data in little endian format. Use with care. - const uint8_t *Data() const { return data_; } + const uint8_t* Data() const { return data_; } - uint8_t *Data() { return data_; } + uint8_t* Data() { return data_; } // Similarly, but typed, much like std::vector::data - const T *data() const { return reinterpret_cast(Data()); } - T *data() { return reinterpret_cast(Data()); } + const T* data() const { return reinterpret_cast(Data()); } + T* data() { return reinterpret_cast(Data()); } // Copy data from a span with endian conversion. // If this Array and the span overlap, the behavior is undefined. void CopyFromSpan(flatbuffers::span src) { - const auto p1 = reinterpret_cast(src.data()); + const auto p1 = reinterpret_cast(src.data()); const auto p2 = Data(); FLATBUFFERS_ASSERT(!(p1 >= p2 && p1 < (p2 + length)) && !(p2 >= p1 && p2 < (p1 + length))); @@ -111,12 +113,12 @@ template class Array { } protected: - void MutateImpl(flatbuffers::true_type, uoffset_t i, const T &val) { + void MutateImpl(flatbuffers::true_type, uoffset_t i, const T& val) { FLATBUFFERS_ASSERT(i < size()); WriteScalar(data() + i, val); } - void MutateImpl(flatbuffers::false_type, uoffset_t i, const T &val) { + void MutateImpl(flatbuffers::false_type, uoffset_t i, const T& val) { *(GetMutablePointer(i)) = val; } @@ -131,7 +133,9 @@ template class Array { // Copy data from flatbuffers::span with endian conversion. void CopyFromSpanImpl(flatbuffers::false_type, flatbuffers::span src) { - for (size_type k = 0; k < length; k++) { Mutate(k, src[k]); } + for (size_type k = 0; k < length; k++) { + Mutate(k, src[k]); + } } // This class is only used to access pre-existing data. Don't ever @@ -150,21 +154,21 @@ template class Array { private: // This class is a pointer. Copying will therefore create an invalid object. // Private and unimplemented copy constructor. - Array(const Array &); - Array &operator=(const Array &); + Array(const Array&); + Array& operator=(const Array&); }; // Specialization for Array[struct] with access using Offset pointer. // This specialization used by idl_gen_text.cpp. -template class OffsetT> +template class OffsetT> class Array, length> { static_assert(flatbuffers::is_same::value, "unexpected type T"); public: - typedef const void *return_type; + typedef const void* return_type; typedef uint16_t size_type; - const uint8_t *Data() const { return data_; } + const uint8_t* Data() const { return data_; } // Make idl_gen_text.cpp::PrintContainer happy. return_type operator[](uoffset_t) const { @@ -175,14 +179,14 @@ class Array, length> { private: // This class is only used to access pre-existing data. Array(); - Array(const Array &); - Array &operator=(const Array &); + Array(const Array&); + Array& operator=(const Array&); uint8_t data_[1]; }; -template -FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_span(Array &arr) +template +FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_span(Array& arr) FLATBUFFERS_NOEXCEPT { static_assert( Array::is_span_observable, @@ -190,26 +194,26 @@ FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_span(Array &arr) return span(arr.data(), N); } -template +template FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_span( - const Array &arr) FLATBUFFERS_NOEXCEPT { + const Array& arr) FLATBUFFERS_NOEXCEPT { static_assert( Array::is_span_observable, "wrong type U, only plain struct, LE-scalar, or byte types are allowed"); return span(arr.data(), N); } -template +template FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span -make_bytes_span(Array &arr) FLATBUFFERS_NOEXCEPT { +make_bytes_span(Array& arr) FLATBUFFERS_NOEXCEPT { static_assert(Array::is_span_observable, "internal error, Array might hold only scalars or structs"); return span(arr.Data(), sizeof(U) * N); } -template +template FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span -make_bytes_span(const Array &arr) FLATBUFFERS_NOEXCEPT { +make_bytes_span(const Array& arr) FLATBUFFERS_NOEXCEPT { static_assert(Array::is_span_observable, "internal error, Array might hold only scalars or structs"); return span(arr.Data(), sizeof(U) * N); @@ -218,31 +222,31 @@ make_bytes_span(const Array &arr) FLATBUFFERS_NOEXCEPT { // Cast a raw T[length] to a raw flatbuffers::Array // without endian conversion. Use with care. // TODO: move these Cast-methods to `internal` namespace. -template -Array &CastToArray(T (&arr)[length]) { - return *reinterpret_cast *>(arr); +template +Array& CastToArray(T (&arr)[length]) { + return *reinterpret_cast*>(arr); } -template -const Array &CastToArray(const T (&arr)[length]) { - return *reinterpret_cast *>(arr); +template +const Array& CastToArray(const T (&arr)[length]) { + return *reinterpret_cast*>(arr); } -template -Array &CastToArrayOfEnum(T (&arr)[length]) { +template +Array& CastToArrayOfEnum(T (&arr)[length]) { static_assert(sizeof(E) == sizeof(T), "invalid enum type E"); - return *reinterpret_cast *>(arr); + return *reinterpret_cast*>(arr); } -template -const Array &CastToArrayOfEnum(const T (&arr)[length]) { +template +const Array& CastToArrayOfEnum(const T (&arr)[length]) { static_assert(sizeof(E) == sizeof(T), "invalid enum type E"); - return *reinterpret_cast *>(arr); + return *reinterpret_cast*>(arr); } -template -bool operator==(const Array &lhs, - const Array &rhs) noexcept { +template +bool operator==(const Array& lhs, + const Array& rhs) noexcept { return std::addressof(lhs) == std::addressof(rhs) || (lhs.size() == rhs.size() && std::memcmp(lhs.Data(), rhs.Data(), rhs.size() * sizeof(T)) == 0); diff --git a/include/flatbuffers/buffer.h b/include/flatbuffers/buffer.h index 00c07c736d8..154d187ab75 100644 --- a/include/flatbuffers/buffer.h +++ b/include/flatbuffers/buffer.h @@ -26,7 +26,8 @@ namespace flatbuffers { // Wrapper for uoffset_t to allow safe template specialization. // Value is allowed to be 0 to indicate a null object (see e.g. AddOffset). -template struct Offset { +template +struct Offset { // The type of offset to use. typedef uoffset_t offset_type; @@ -37,12 +38,14 @@ template struct Offset { bool IsNull() const { return !o; } }; -template struct is_specialisation_of_Offset : false_type {}; -template +template +struct is_specialisation_of_Offset : false_type {}; +template struct is_specialisation_of_Offset> : true_type {}; // Wrapper for uoffset64_t Offsets. -template struct Offset64 { +template +struct Offset64 { // The type of offset to use. typedef uoffset64_t offset_type; @@ -53,8 +56,9 @@ template struct Offset64 { bool IsNull() const { return !o; } }; -template struct is_specialisation_of_Offset64 : false_type {}; -template +template +struct is_specialisation_of_Offset64 : false_type {}; +template struct is_specialisation_of_Offset64> : true_type {}; // Litmus check for ensuring the Offsets are the expected size. @@ -64,12 +68,13 @@ static_assert(sizeof(Offset64<>) == 8, "Offset64 has wrong size"); inline void EndianCheck() { int endiantest = 1; // If this fails, see FLATBUFFERS_LITTLEENDIAN above. - FLATBUFFERS_ASSERT(*reinterpret_cast(&endiantest) == + FLATBUFFERS_ASSERT(*reinterpret_cast(&endiantest) == FLATBUFFERS_LITTLEENDIAN); (void)endiantest; } -template FLATBUFFERS_CONSTEXPR size_t AlignOf() { +template +FLATBUFFERS_CONSTEXPR size_t AlignOf() { // clang-format off #ifdef _MSC_VER return __alignof(T); @@ -85,8 +90,8 @@ template FLATBUFFERS_CONSTEXPR size_t AlignOf() { // Lexicographically compare two strings (possibly containing nulls), and // return true if the first is less than the second. -static inline bool StringLessThan(const char *a_data, uoffset_t a_size, - const char *b_data, uoffset_t b_size) { +static inline bool StringLessThan(const char* a_data, uoffset_t a_size, + const char* b_data, uoffset_t b_size) { const auto cmp = memcmp(a_data, b_data, (std::min)(a_size, b_size)); return cmp == 0 ? a_size < b_size : cmp < 0; } @@ -99,42 +104,43 @@ static inline bool StringLessThan(const char *a_data, uoffset_t a_size, // return type like this. // The typedef is for the convenience of callers of this function // (avoiding the need for a trailing return decltype) -template struct IndirectHelper { +template +struct IndirectHelper { typedef T return_type; typedef T mutable_return_type; static const size_t element_stride = sizeof(T); - static return_type Read(const uint8_t *p, const size_t i) { - return EndianScalar((reinterpret_cast(p))[i]); + static return_type Read(const uint8_t* p, const size_t i) { + return EndianScalar((reinterpret_cast(p))[i]); } - static mutable_return_type Read(uint8_t *p, const size_t i) { + static mutable_return_type Read(uint8_t* p, const size_t i) { return reinterpret_cast( - Read(const_cast(p), i)); + Read(const_cast(p), i)); } }; // For vector of Offsets. -template class OffsetT> +template class OffsetT> struct IndirectHelper> { - typedef const T *return_type; - typedef T *mutable_return_type; + typedef const T* return_type; + typedef T* mutable_return_type; typedef typename OffsetT::offset_type offset_type; static const offset_type element_stride = sizeof(offset_type); - static return_type Read(const uint8_t *const p, const offset_type i) { + static return_type Read(const uint8_t* const p, const offset_type i) { // Offsets are relative to themselves, so first update the pointer to // point to the offset location. - const uint8_t *const offset_location = p + i * element_stride; + const uint8_t* const offset_location = p + i * element_stride; // Then read the scalar value of the offset (which may be 32 or 64-bits) and // then determine the relative location from the offset location. return reinterpret_cast( offset_location + ReadScalar(offset_location)); } - static mutable_return_type Read(uint8_t *const p, const offset_type i) { + static mutable_return_type Read(uint8_t* const p, const offset_type i) { // Offsets are relative to themselves, so first update the pointer to // point to the offset location. - uint8_t *const offset_location = p + i * element_stride; + uint8_t* const offset_location = p + i * element_stride; // Then read the scalar value of the offset (which may be 32 or 64-bits) and // then determine the relative location from the offset location. @@ -144,7 +150,7 @@ struct IndirectHelper> { }; // For vector of structs. -template +template struct IndirectHelper< T, typename std::enable_if< !std::is_scalar::type>::value && @@ -155,15 +161,15 @@ struct IndirectHelper< pointee_type; public: - typedef const pointee_type *return_type; - typedef pointee_type *mutable_return_type; + typedef const pointee_type* return_type; + typedef pointee_type* mutable_return_type; static const size_t element_stride = sizeof(pointee_type); - static return_type Read(const uint8_t *const p, const size_t i) { + static return_type Read(const uint8_t* const p, const size_t i) { // Structs are stored inline, relative to the first struct pointer. return reinterpret_cast(p + i * element_stride); } - static mutable_return_type Read(uint8_t *const p, const size_t i) { + static mutable_return_type Read(uint8_t* const p, const size_t i) { // Structs are stored inline, relative to the first struct pointer. return reinterpret_cast(p + i * element_stride); } @@ -176,14 +182,14 @@ struct IndirectHelper< /// This function is UNDEFINED for FlatBuffers whose schema does not include /// a file_identifier (likely points at padding or the start of a the root /// vtable). -inline const char *GetBufferIdentifier(const void *buf, +inline const char* GetBufferIdentifier(const void* buf, bool size_prefixed = false) { - return reinterpret_cast(buf) + + return reinterpret_cast(buf) + ((size_prefixed) ? 2 * sizeof(uoffset_t) : sizeof(uoffset_t)); } // Helper to see if the identifier in a buffer has the expected value. -inline bool BufferHasIdentifier(const void *buf, const char *identifier, +inline bool BufferHasIdentifier(const void* buf, const char* identifier, bool size_prefixed = false) { return strncmp(GetBufferIdentifier(buf, size_prefixed), identifier, flatbuffers::kFileIdentifierLength) == 0; @@ -191,26 +197,27 @@ inline bool BufferHasIdentifier(const void *buf, const char *identifier, /// @cond FLATBUFFERS_INTERNAL // Helpers to get a typed pointer to the root object contained in the buffer. -template T *GetMutableRoot(void *buf) { +template +T* GetMutableRoot(void* buf) { if (!buf) return nullptr; EndianCheck(); - return reinterpret_cast( - reinterpret_cast(buf) + - EndianScalar(*reinterpret_cast(buf))); + return reinterpret_cast(reinterpret_cast(buf) + + EndianScalar(*reinterpret_cast(buf))); } -template -T *GetMutableSizePrefixedRoot(void *buf) { - return GetMutableRoot(reinterpret_cast(buf) + sizeof(SizeT)); +template +T* GetMutableSizePrefixedRoot(void* buf) { + return GetMutableRoot(reinterpret_cast(buf) + sizeof(SizeT)); } -template const T *GetRoot(const void *buf) { - return GetMutableRoot(const_cast(buf)); +template +const T* GetRoot(const void* buf) { + return GetMutableRoot(const_cast(buf)); } -template -const T *GetSizePrefixedRoot(const void *buf) { - return GetRoot(reinterpret_cast(buf) + sizeof(SizeT)); +template +const T* GetSizePrefixedRoot(const void* buf) { + return GetRoot(reinterpret_cast(buf) + sizeof(SizeT)); } } // namespace flatbuffers diff --git a/include/flatbuffers/buffer_ref.h b/include/flatbuffers/buffer_ref.h index f70941fc64d..746903eb972 100644 --- a/include/flatbuffers/buffer_ref.h +++ b/include/flatbuffers/buffer_ref.h @@ -27,23 +27,24 @@ namespace flatbuffers { // A BufferRef does not own its buffer. struct BufferRefBase {}; // for std::is_base_of -template struct BufferRef : BufferRefBase { +template +struct BufferRef : BufferRefBase { BufferRef() : buf(nullptr), len(0), must_free(false) {} - BufferRef(uint8_t *_buf, uoffset_t _len) + BufferRef(uint8_t* _buf, uoffset_t _len) : buf(_buf), len(_len), must_free(false) {} ~BufferRef() { if (must_free) free(buf); } - const T *GetRoot() const { return flatbuffers::GetRoot(buf); } + const T* GetRoot() const { return flatbuffers::GetRoot(buf); } bool Verify() { Verifier verifier(buf, len); return verifier.VerifyBuffer(nullptr); } - uint8_t *buf; + uint8_t* buf; uoffset_t len; bool must_free; }; diff --git a/include/flatbuffers/code_generator.h b/include/flatbuffers/code_generator.h index 2971e556eec..cc4df7f1c8b 100644 --- a/include/flatbuffers/code_generator.h +++ b/include/flatbuffers/code_generator.h @@ -45,13 +45,13 @@ class CodeGenerator { // Generate code from the provided `parser`. // // DEPRECATED: prefer using the other overload of GenerateCode for bfbs. - virtual Status GenerateCode(const Parser &parser, const std::string &path, - const std::string &filename) = 0; + virtual Status GenerateCode(const Parser& parser, const std::string& path, + const std::string& filename) = 0; // Generate code from the provided `parser` and place it in the output. - virtual Status GenerateCodeString(const Parser &parser, - const std::string &filename, - std::string &output) { + virtual Status GenerateCodeString(const Parser& parser, + const std::string& filename, + std::string& output) { (void)parser; (void)filename; (void)output; @@ -60,18 +60,18 @@ class CodeGenerator { // Generate code from the provided `buffer` of given `length`. The buffer is a // serialized reflection.fbs. - virtual Status GenerateCode(const uint8_t *buffer, int64_t length, - const CodeGenOptions &options) = 0; + virtual Status GenerateCode(const uint8_t* buffer, int64_t length, + const CodeGenOptions& options) = 0; - virtual Status GenerateMakeRule(const Parser &parser, const std::string &path, - const std::string &filename, - std::string &output) = 0; + virtual Status GenerateMakeRule(const Parser& parser, const std::string& path, + const std::string& filename, + std::string& output) = 0; - virtual Status GenerateGrpcCode(const Parser &parser, const std::string &path, - const std::string &filename) = 0; + virtual Status GenerateGrpcCode(const Parser& parser, const std::string& path, + const std::string& filename) = 0; - virtual Status GenerateRootFile(const Parser &parser, - const std::string &path) = 0; + virtual Status GenerateRootFile(const Parser& parser, + const std::string& path) = 0; virtual bool IsSchemaOnly() const = 0; @@ -88,8 +88,8 @@ class CodeGenerator { private: // Copying is not supported. - CodeGenerator(const CodeGenerator &) = delete; - CodeGenerator &operator=(const CodeGenerator &) = delete; + CodeGenerator(const CodeGenerator&) = delete; + CodeGenerator& operator=(const CodeGenerator&) = delete; }; } // namespace flatbuffers diff --git a/include/flatbuffers/code_generators.h b/include/flatbuffers/code_generators.h index fc030d43943..d284ac5a683 100644 --- a/include/flatbuffers/code_generators.h +++ b/include/flatbuffers/code_generators.h @@ -51,11 +51,11 @@ class CodeWriter { // Associates a key with a value. All subsequent calls to operator+=, where // the specified key is contained in {{ and }} delimiters will be replaced by // the given value. - void SetValue(const std::string &key, const std::string &value) { + void SetValue(const std::string& key, const std::string& value) { value_map_[key] = value; } - std::string GetValue(const std::string &key) const { + std::string GetValue(const std::string& key) const { const auto it = value_map_.find(key); return it == value_map_.end() ? "" : it->second; } @@ -76,7 +76,7 @@ class CodeWriter { if (cur_ident_lvl_) cur_ident_lvl_--; } - void SetPadding(const std::string &padding) { pad_ = padding; } + void SetPadding(const std::string& padding) { pad_ = padding; } private: std::map value_map_; @@ -86,24 +86,24 @@ class CodeWriter { bool ignore_ident_; // Add ident padding (tab or space) based on ident level - void AppendIdent(std::stringstream &stream); + void AppendIdent(std::stringstream& stream); }; class BaseGenerator { public: virtual bool generate() = 0; - static std::string NamespaceDir(const Parser &parser, const std::string &path, - const Namespace &ns, + static std::string NamespaceDir(const Parser& parser, const std::string& path, + const Namespace& ns, const bool dasherize = false); - std::string GeneratedFileName(const std::string &path, - const std::string &file_name, - const IDLOptions &options) const; + std::string GeneratedFileName(const std::string& path, + const std::string& file_name, + const IDLOptions& options) const; protected: - BaseGenerator(const Parser &parser, const std::string &path, - const std::string &file_name, std::string qualifying_start, + BaseGenerator(const Parser& parser, const std::string& path, + const std::string& file_name, std::string qualifying_start, std::string qualifying_separator, std::string default_extension) : parser_(parser), path_(path), @@ -114,84 +114,84 @@ class BaseGenerator { virtual ~BaseGenerator() {} // No copy/assign. - BaseGenerator &operator=(const BaseGenerator &); - BaseGenerator(const BaseGenerator &); + BaseGenerator& operator=(const BaseGenerator&); + BaseGenerator(const BaseGenerator&); - std::string NamespaceDir(const Namespace &ns, + std::string NamespaceDir(const Namespace& ns, const bool dasherize = false) const; - static const char *FlatBuffersGeneratedWarning(); + static const char* FlatBuffersGeneratedWarning(); - static std::string FullNamespace(const char *separator, const Namespace &ns); + static std::string FullNamespace(const char* separator, const Namespace& ns); - static std::string LastNamespacePart(const Namespace &ns); + static std::string LastNamespacePart(const Namespace& ns); // tracks the current namespace for early exit in WrapInNameSpace // c++, java and csharp returns a different namespace from // the following default (no early exit, always fully qualify), // which works for js and php - virtual const Namespace *CurrentNameSpace() const { return nullptr; } + virtual const Namespace* CurrentNameSpace() const { return nullptr; } // Ensure that a type is prefixed with its namespace even within // its own namespace to avoid conflict between generated method // names and similarly named classes or structs - std::string WrapInNameSpace(const Namespace *ns, - const std::string &name) const; + std::string WrapInNameSpace(const Namespace* ns, + const std::string& name) const; - std::string WrapInNameSpace(const Definition &def, - const std::string &suffix = "") const; + std::string WrapInNameSpace(const Definition& def, + const std::string& suffix = "") const; - std::string GetNameSpace(const Definition &def) const; + std::string GetNameSpace(const Definition& def) const; - const Parser &parser_; - const std::string &path_; - const std::string &file_name_; + const Parser& parser_; + const std::string& path_; + const std::string& file_name_; const std::string qualifying_start_; const std::string qualifying_separator_; const std::string default_extension_; }; struct CommentConfig { - const char *first_line; - const char *content_line_prefix; - const char *last_line; + const char* first_line; + const char* content_line_prefix; + const char* last_line; }; -extern void GenComment(const std::vector &dc, - std::string *code_ptr, const CommentConfig *config, - const char *prefix = ""); +extern void GenComment(const std::vector& dc, + std::string* code_ptr, const CommentConfig* config, + const char* prefix = ""); class FloatConstantGenerator { public: virtual ~FloatConstantGenerator() {} - std::string GenFloatConstant(const FieldDef &field) const; + std::string GenFloatConstant(const FieldDef& field) const; private: - virtual std::string Value(double v, const std::string &src) const = 0; + virtual std::string Value(double v, const std::string& src) const = 0; virtual std::string Inf(double v) const = 0; virtual std::string NaN(double v) const = 0; - virtual std::string Value(float v, const std::string &src) const = 0; + virtual std::string Value(float v, const std::string& src) const = 0; virtual std::string Inf(float v) const = 0; virtual std::string NaN(float v) const = 0; - template - std::string GenFloatConstantImpl(const FieldDef &field) const; + template + std::string GenFloatConstantImpl(const FieldDef& field) const; }; class SimpleFloatConstantGenerator : public FloatConstantGenerator { public: - SimpleFloatConstantGenerator(const char *nan_number, - const char *pos_inf_number, - const char *neg_inf_number); + SimpleFloatConstantGenerator(const char* nan_number, + const char* pos_inf_number, + const char* neg_inf_number); private: std::string Value(double v, - const std::string &src) const FLATBUFFERS_OVERRIDE; + const std::string& src) const FLATBUFFERS_OVERRIDE; std::string Inf(double v) const FLATBUFFERS_OVERRIDE; std::string NaN(double v) const FLATBUFFERS_OVERRIDE; - std::string Value(float v, const std::string &src) const FLATBUFFERS_OVERRIDE; + std::string Value(float v, const std::string& src) const FLATBUFFERS_OVERRIDE; std::string Inf(float v) const FLATBUFFERS_OVERRIDE; std::string NaN(float v) const FLATBUFFERS_OVERRIDE; @@ -203,24 +203,24 @@ class SimpleFloatConstantGenerator : public FloatConstantGenerator { // C++, C#, Java like generator. class TypedFloatConstantGenerator : public FloatConstantGenerator { public: - TypedFloatConstantGenerator(const char *double_prefix, - const char *single_prefix, const char *nan_number, - const char *pos_inf_number, - const char *neg_inf_number = ""); + TypedFloatConstantGenerator(const char* double_prefix, + const char* single_prefix, const char* nan_number, + const char* pos_inf_number, + const char* neg_inf_number = ""); private: std::string Value(double v, - const std::string &src) const FLATBUFFERS_OVERRIDE; + const std::string& src) const FLATBUFFERS_OVERRIDE; std::string Inf(double v) const FLATBUFFERS_OVERRIDE; std::string NaN(double v) const FLATBUFFERS_OVERRIDE; - std::string Value(float v, const std::string &src) const FLATBUFFERS_OVERRIDE; + std::string Value(float v, const std::string& src) const FLATBUFFERS_OVERRIDE; std::string Inf(float v) const FLATBUFFERS_OVERRIDE; std::string NaN(float v) const FLATBUFFERS_OVERRIDE; - std::string MakeNaN(const std::string &prefix) const; - std::string MakeInf(bool neg, const std::string &prefix) const; + std::string MakeNaN(const std::string& prefix) const; + std::string MakeInf(bool neg, const std::string& prefix) const; const std::string double_prefix_; const std::string single_prefix_; @@ -229,9 +229,9 @@ class TypedFloatConstantGenerator : public FloatConstantGenerator { const std::string neg_inf_number_; }; -std::string JavaCSharpMakeRule(const bool java, const Parser &parser, - const std::string &path, - const std::string &file_name); +std::string JavaCSharpMakeRule(const bool java, const Parser& parser, + const std::string& path, + const std::string& file_name); } // namespace flatbuffers diff --git a/include/flatbuffers/default_allocator.h b/include/flatbuffers/default_allocator.h index d4724122cb5..d1cab08d743 100644 --- a/include/flatbuffers/default_allocator.h +++ b/include/flatbuffers/default_allocator.h @@ -25,32 +25,32 @@ namespace flatbuffers { // DefaultAllocator uses new/delete to allocate memory regions class DefaultAllocator : public Allocator { public: - uint8_t *allocate(size_t size) FLATBUFFERS_OVERRIDE { + uint8_t* allocate(size_t size) FLATBUFFERS_OVERRIDE { return new uint8_t[size]; } - void deallocate(uint8_t *p, size_t) FLATBUFFERS_OVERRIDE { delete[] p; } + void deallocate(uint8_t* p, size_t) FLATBUFFERS_OVERRIDE { delete[] p; } - static void dealloc(void *p, size_t) { delete[] static_cast(p); } + static void dealloc(void* p, size_t) { delete[] static_cast(p); } }; // These functions allow for a null allocator to mean use the default allocator, // as used by DetachedBuffer and vector_downward below. // This is to avoid having a statically or dynamically allocated default // allocator, or having to move it between the classes that may own it. -inline uint8_t *Allocate(Allocator *allocator, size_t size) { +inline uint8_t* Allocate(Allocator* allocator, size_t size) { return allocator ? allocator->allocate(size) : DefaultAllocator().allocate(size); } -inline void Deallocate(Allocator *allocator, uint8_t *p, size_t size) { +inline void Deallocate(Allocator* allocator, uint8_t* p, size_t size) { if (allocator) allocator->deallocate(p, size); else DefaultAllocator().deallocate(p, size); } -inline uint8_t *ReallocateDownward(Allocator *allocator, uint8_t *old_p, +inline uint8_t* ReallocateDownward(Allocator* allocator, uint8_t* old_p, size_t old_size, size_t new_size, size_t in_use_back, size_t in_use_front) { return allocator ? allocator->reallocate_downward(old_p, old_size, new_size, diff --git a/include/flatbuffers/detached_buffer.h b/include/flatbuffers/detached_buffer.h index 36d3f6d6deb..0577a42d969 100644 --- a/include/flatbuffers/detached_buffer.h +++ b/include/flatbuffers/detached_buffer.h @@ -36,8 +36,8 @@ class DetachedBuffer { cur_(nullptr), size_(0) {} - DetachedBuffer(Allocator *allocator, bool own_allocator, uint8_t *buf, - size_t reserved, uint8_t *cur, size_t sz) + DetachedBuffer(Allocator* allocator, bool own_allocator, uint8_t* buf, + size_t reserved, uint8_t* cur, size_t sz) : allocator_(allocator), own_allocator_(own_allocator), buf_(buf), @@ -45,7 +45,7 @@ class DetachedBuffer { cur_(cur), size_(sz) {} - DetachedBuffer(DetachedBuffer &&other) noexcept + DetachedBuffer(DetachedBuffer&& other) noexcept : allocator_(other.allocator_), own_allocator_(other.own_allocator_), buf_(other.buf_), @@ -55,7 +55,7 @@ class DetachedBuffer { other.reset(); } - DetachedBuffer &operator=(DetachedBuffer &&other) noexcept { + DetachedBuffer& operator=(DetachedBuffer&& other) noexcept { if (this == &other) return *this; destroy(); @@ -74,33 +74,35 @@ class DetachedBuffer { ~DetachedBuffer() { destroy(); } - const uint8_t *data() const { return cur_; } + const uint8_t* data() const { return cur_; } - uint8_t *data() { return cur_; } + uint8_t* data() { return cur_; } size_t size() const { return size_; } - uint8_t *begin() { return data(); } - const uint8_t *begin() const { return data(); } - uint8_t *end() { return data() + size(); } - const uint8_t *end() const { return data() + size(); } + uint8_t* begin() { return data(); } + const uint8_t* begin() const { return data(); } + uint8_t* end() { return data() + size(); } + const uint8_t* end() const { return data() + size(); } // These may change access mode, leave these at end of public section - FLATBUFFERS_DELETE_FUNC(DetachedBuffer(const DetachedBuffer &other)); + FLATBUFFERS_DELETE_FUNC(DetachedBuffer(const DetachedBuffer& other)); FLATBUFFERS_DELETE_FUNC( - DetachedBuffer &operator=(const DetachedBuffer &other)); + DetachedBuffer& operator=(const DetachedBuffer& other)); protected: - Allocator *allocator_; + Allocator* allocator_; bool own_allocator_; - uint8_t *buf_; + uint8_t* buf_; size_t reserved_; - uint8_t *cur_; + uint8_t* cur_; size_t size_; inline void destroy() { if (buf_) Deallocate(allocator_, buf_, reserved_); - if (own_allocator_ && allocator_) { delete allocator_; } + if (own_allocator_ && allocator_) { + delete allocator_; + } reset(); } diff --git a/include/flatbuffers/file_manager.h b/include/flatbuffers/file_manager.h index 069df5b8842..0941faef662 100644 --- a/include/flatbuffers/file_manager.h +++ b/include/flatbuffers/file_manager.h @@ -31,16 +31,16 @@ class FileManager { FileManager() = default; virtual ~FileManager() = default; - virtual bool SaveFile(const std::string &absolute_file_name, - const std::string &content) = 0; + virtual bool SaveFile(const std::string& absolute_file_name, + const std::string& content) = 0; - virtual bool LoadFile(const std::string &absolute_file_name, - std::string *buf) = 0; + virtual bool LoadFile(const std::string& absolute_file_name, + std::string* buf) = 0; private: // Copying is not supported. - FileManager(const FileManager &) = delete; - FileManager &operator=(const FileManager &) = delete; + FileManager(const FileManager&) = delete; + FileManager& operator=(const FileManager&) = delete; }; } // namespace flatbuffers diff --git a/include/flatbuffers/flatbuffer_builder.h b/include/flatbuffers/flatbuffer_builder.h index 9ceca8207b6..9eea6bab0c5 100644 --- a/include/flatbuffers/flatbuffer_builder.h +++ b/include/flatbuffers/flatbuffer_builder.h @@ -50,19 +50,19 @@ inline voffset_t FieldIndexToOffset(voffset_t field_id) { return static_cast(offset); } -template> -const T *data(const std::vector &v) { +template > +const T* data(const std::vector& v) { // Eventually the returned pointer gets passed down to memcpy, so // we need it to be non-null to avoid undefined behavior. static uint8_t t; - return v.empty() ? reinterpret_cast(&t) : &v.front(); + return v.empty() ? reinterpret_cast(&t) : &v.front(); } -template> -T *data(std::vector &v) { +template > +T* data(std::vector& v) { // Eventually the returned pointer gets passed down to memcpy, so // we need it to be non-null to avoid undefined behavior. static uint8_t t; - return v.empty() ? reinterpret_cast(&t) : &v.front(); + return v.empty() ? reinterpret_cast(&t) : &v.front(); } /// @addtogroup flatbuffers_cpp_api @@ -74,7 +74,8 @@ T *data(std::vector &v) { /// `PushElement`/`AddElement`/`EndTable`, or the builtin `CreateString`/ /// `CreateVector` functions. Do this is depth-first order to build up a tree to /// the root. `Finish()` wraps up the buffer ready for transport. -template class FlatBufferBuilderImpl { +template +class FlatBufferBuilderImpl { public: // This switches the size type of the builder, based on if its 64-bit aware // (uoffset64_t) or not (uoffset_t). @@ -93,7 +94,7 @@ template class FlatBufferBuilderImpl { /// types with custom alignment AND you wish to read the buffer in-place /// directly after creation. explicit FlatBufferBuilderImpl( - size_t initial_size = 1024, Allocator *allocator = nullptr, + size_t initial_size = 1024, Allocator* allocator = nullptr, bool own_allocator = false, size_t buffer_minalign = AlignOf()) : buf_(initial_size, allocator, own_allocator, buffer_minalign, @@ -112,7 +113,7 @@ template class FlatBufferBuilderImpl { } /// @brief Move constructor for FlatBufferBuilder. - FlatBufferBuilderImpl(FlatBufferBuilderImpl &&other) noexcept + FlatBufferBuilderImpl(FlatBufferBuilderImpl&& other) noexcept : buf_(1024, nullptr, false, AlignOf(), static_cast(Is64Aware ? FLATBUFFERS_MAX_64_BUFFER_SIZE : FLATBUFFERS_MAX_BUFFER_SIZE)), @@ -133,14 +134,14 @@ template class FlatBufferBuilderImpl { } /// @brief Move assignment operator for FlatBufferBuilder. - FlatBufferBuilderImpl &operator=(FlatBufferBuilderImpl &&other) noexcept { + FlatBufferBuilderImpl& operator=(FlatBufferBuilderImpl&& other) noexcept { // Move construct a temporary and swap idiom FlatBufferBuilderImpl temp(std::move(other)); Swap(temp); return *this; } - void Swap(FlatBufferBuilderImpl &other) { + void Swap(FlatBufferBuilderImpl& other) { using std::swap; buf_.swap(other.buf_); swap(num_field_loc, other.num_field_loc); @@ -182,7 +183,7 @@ template class FlatBufferBuilderImpl { /// @brief The current size of the serialized buffer relative to the end of /// the 32-bit region. /// @return Returns an `uoffset_t` with the current size of the buffer. - template + template // Only enable this method for the 64-bit builder, as only that builder is // concerned with the 32/64-bit boundary, and should be the one to bare any // run time costs. @@ -195,7 +196,7 @@ template class FlatBufferBuilderImpl { return static_cast(GetSize() - length_of_64_bit_region_); } - template + template // Only enable this method for the 32-bit builder. typename std::enable_if::type GetSizeRelative32BitRegion() const { @@ -205,7 +206,7 @@ template class FlatBufferBuilderImpl { /// @brief Get the serialized buffer (after you call `Finish()`). /// @return Returns an `uint8_t` pointer to the FlatBuffer data inside the /// buffer. - uint8_t *GetBufferPointer() const { + uint8_t* GetBufferPointer() const { Finished(); return buf_.data(); } @@ -220,7 +221,7 @@ template class FlatBufferBuilderImpl { /// @brief Get a pointer to an unfinished buffer. /// @return Returns a `uint8_t` pointer to the unfinished buffer. - uint8_t *GetCurrentBufferPointer() const { return buf_.data(); } + uint8_t* GetCurrentBufferPointer() const { return buf_.data(); } /// @brief Get the released DetachedBuffer. /// @return A `DetachedBuffer` that owns the buffer and its allocator. @@ -240,9 +241,9 @@ template class FlatBufferBuilderImpl { /// the serialized `FlatBuffer`. /// @remark If the allocator is owned, it gets deleted when the destructor is /// called. - uint8_t *ReleaseRaw(size_t &size, size_t &offset) { + uint8_t* ReleaseRaw(size_t& size, size_t& offset) { Finished(); - uint8_t *raw = buf_.release_raw(size, offset); + uint8_t* raw = buf_.release_raw(size, offset); Clear(); return raw; } @@ -291,22 +292,23 @@ template class FlatBufferBuilderImpl { buf_.fill(PaddingBytes(buf_.size(), elem_size)); } - void PushFlatBuffer(const uint8_t *bytes, size_t size) { + void PushFlatBuffer(const uint8_t* bytes, size_t size) { PushBytes(bytes, size); finished = true; } - void PushBytes(const uint8_t *bytes, size_t size) { buf_.push(bytes, size); } + void PushBytes(const uint8_t* bytes, size_t size) { buf_.push(bytes, size); } void PopBytes(size_t amount) { buf_.pop(amount); } - template void AssertScalarT() { + template + void AssertScalarT() { // The code assumes power of 2 sizes and endian-swap-ability. static_assert(flatbuffers::is_scalar::value, "T must be a scalar type"); } // Write a single aligned scalar to the buffer - template + template ReturnT PushElement(T element) { AssertScalarT(); Align(sizeof(T)); @@ -314,7 +316,7 @@ template class FlatBufferBuilderImpl { return CalculateOffset(); } - template class OffsetT = Offset> + template class OffsetT = Offset> uoffset_t PushElement(OffsetT off) { // Special case for offsets: see ReferTo below. return PushElement(ReferTo(off.o)); @@ -323,34 +325,41 @@ template class FlatBufferBuilderImpl { // When writing fields, we track where they are, so we can create correct // vtables later. void TrackField(voffset_t field, uoffset_t off) { - FieldLoc fl = { off, field }; + FieldLoc fl = {off, field}; buf_.scratch_push_small(fl); num_field_loc++; - if (field > max_voffset_) { max_voffset_ = field; } + if (field > max_voffset_) { + max_voffset_ = field; + } } // Like PushElement, but additionally tracks the field this represents. - template void AddElement(voffset_t field, T e, T def) { + template + void AddElement(voffset_t field, T e, T def) { // We don't serialize values equal to the default. if (IsTheSameAs(e, def) && !force_defaults_) return; TrackField(field, PushElement(e)); } - template void AddElement(voffset_t field, T e) { + template + void AddElement(voffset_t field, T e) { TrackField(field, PushElement(e)); } - template void AddOffset(voffset_t field, Offset off) { + template + void AddOffset(voffset_t field, Offset off) { if (off.IsNull()) return; // Don't store. AddElement(field, ReferTo(off.o), static_cast(0)); } - template void AddOffset(voffset_t field, Offset64 off) { + template + void AddOffset(voffset_t field, Offset64 off) { if (off.IsNull()) return; // Don't store. AddElement(field, ReferTo(off.o), static_cast(0)); } - template void AddStruct(voffset_t field, const T *structptr) { + template + void AddStruct(voffset_t field, const T* structptr) { if (!structptr) return; // Default, don't store. Align(AlignOf()); buf_.push_small(*structptr); @@ -380,12 +389,14 @@ template class FlatBufferBuilderImpl { return ReferTo(off, GetSize()); } - template T ReferTo(const T off, const T2 size) { + template + T ReferTo(const T off, const T2 size) { FLATBUFFERS_ASSERT(off && off <= size); return size - off + static_cast(sizeof(T)); } - template T ReferTo(const T off, const T size) { + template + T ReferTo(const T off, const T size) { FLATBUFFERS_ASSERT(off && off <= size); return size - off + static_cast(sizeof(T)); } @@ -441,7 +452,7 @@ template class FlatBufferBuilderImpl { // Write the offsets into the table for (auto it = buf_.scratch_end() - num_field_loc * sizeof(FieldLoc); it < buf_.scratch_end(); it += sizeof(FieldLoc)) { - auto field_location = reinterpret_cast(it); + auto field_location = reinterpret_cast(it); const voffset_t pos = static_cast(vtable_offset_loc - field_location->off); // If this asserts, it means you've set a field twice. @@ -450,7 +461,7 @@ template class FlatBufferBuilderImpl { WriteScalar(buf_.data() + field_location->id, pos); } ClearOffsets(); - auto vt1 = reinterpret_cast(buf_.data()); + auto vt1 = reinterpret_cast(buf_.data()); auto vt1_size = ReadScalar(vt1); auto vt_use = GetSizeRelative32BitRegion(); // See if we already have generated a vtable with this exact same @@ -458,8 +469,8 @@ template class FlatBufferBuilderImpl { if (dedup_vtables_) { for (auto it = buf_.scratch_data(); it < buf_.scratch_end(); it += sizeof(uoffset_t)) { - auto vt_offset_ptr = reinterpret_cast(it); - auto vt2 = reinterpret_cast(buf_.data_at(*vt_offset_ptr)); + auto vt_offset_ptr = reinterpret_cast(it); + auto vt2 = reinterpret_cast(buf_.data_at(*vt_offset_ptr)); auto vt2_size = ReadScalar(vt2); if (vt1_size != vt2_size || 0 != memcmp(vt2, vt1, vt1_size)) continue; vt_use = *vt_offset_ptr; @@ -490,8 +501,9 @@ template class FlatBufferBuilderImpl { // This checks a required field has been set in a given table that has // just been constructed. - template void Required(Offset table, voffset_t field) { - auto table_ptr = reinterpret_cast(buf_.data_at(table.o)); + template + void Required(Offset table, voffset_t field) { + auto table_ptr = reinterpret_cast(buf_.data_at(table.o)); bool ok = table_ptr->GetOptionalFieldOffset(field) != 0; // If this fails, the caller will show what field needs to be set. FLATBUFFERS_ASSERT(ok); @@ -521,7 +533,8 @@ template class FlatBufferBuilderImpl { // Aligns such than when "len" bytes are written, an object of type `AlignT` // can be written after it (forward in the buffer) without padding. - template void PreAlign(size_t len) { + template + void PreAlign(size_t len) { AssertScalarT(); PreAlign(len, AlignOf()); } @@ -531,8 +544,8 @@ template class FlatBufferBuilderImpl { /// @param[in] str A const char pointer to the data to be stored as a string. /// @param[in] len The number of bytes that should be stored from `str`. /// @return Returns the offset in the buffer where the string starts. - template class OffsetT = Offset> - OffsetT CreateString(const char *str, size_t len) { + template