From e1570268077b2b063f7397a804fe17690b2ddc9e Mon Sep 17 00:00:00 2001 From: Kern Handa Date: Fri, 25 Sep 2015 00:42:38 -0700 Subject: [PATCH 1/3] Rank and dimensions should be size_t. --- include/array_view.h | 132 +++++++++++++++++++++---------------------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/include/array_view.h b/include/array_view.h index 006f18c..f9eadc4 100644 --- a/include/array_view.h +++ b/include/array_view.h @@ -71,20 +71,20 @@ namespace details }; - template + template class coordinate_facade { static_assert(std::is_integral::value && sizeof(ValueType) <= sizeof(size_t), "ValueType must be unsigned integral type!"); static_assert(Rank > 0, "Rank must be greater than 0!"); - template + template friend class coordinate_facade; public: using reference = ValueType&; using const_reference = const ValueType&; using value_type = ValueType; - static const unsigned int rank = Rank; + static const size_t rank = Rank; _CONSTEXPR coordinate_facade() _NOEXCEPT { static_assert(std::is_base_of::value, "ConcreteType must be derived from coordinate_facade."); @@ -92,7 +92,7 @@ namespace details _CONSTEXPR coordinate_facade(const value_type(&values)[rank]) _NOEXCEPT { static_assert(std::is_base_of::value, "ConcreteType must be derived from coordinate_facade."); - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) elems[i] = values[i]; } _CONSTEXPR coordinate_facade(value_type e0) _NOEXCEPT @@ -106,7 +106,7 @@ namespace details { static_assert(std::is_base_of::value, "ConcreteType must be derived from coordinate_facade."); fail_fast_assert(il.size() == rank, "The size of the initializer list must match the rank of the array"); - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) { elems[i] = begin(il)[i]; } @@ -117,7 +117,7 @@ namespace details template _CONSTEXPR coordinate_facade(const coordinate_facade & other) { - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) { fail_fast_assert(static_cast(other.elems[i]) <= SizeTypeTraits::max_value); elems[i] = static_cast(other.elems[i]); @@ -126,20 +126,20 @@ namespace details protected: coordinate_facade& operator=(const coordinate_facade& rhs) = default; // Preconditions: component_idx < rank - _CONSTEXPR reference operator[](unsigned int component_idx) + _CONSTEXPR reference operator[](size_t component_idx) { fail_fast_assert(component_idx < rank, "Component index must be less than rank"); return elems[component_idx]; } // Preconditions: component_idx < rank - _CONSTEXPR const_reference operator[](unsigned int component_idx) const + _CONSTEXPR const_reference operator[](size_t component_idx) const { fail_fast_assert(component_idx < rank, "Component index must be less than rank"); return elems[component_idx]; } _CONSTEXPR bool operator==(const ConcreteType& rhs) const _NOEXCEPT { - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) { if (elems[i] != rhs.elems[i]) return false; @@ -157,7 +157,7 @@ namespace details _CONSTEXPR ConcreteType operator-() const { ConcreteType ret = to_concrete(); - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) ret.elems[i] = -ret.elems[i]; return ret; } @@ -175,13 +175,13 @@ namespace details } _CONSTEXPR ConcreteType& operator+=(const ConcreteType& rhs) { - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) elems[i] += rhs.elems[i]; return to_concrete(); } _CONSTEXPR ConcreteType& operator-=(const ConcreteType& rhs) { - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) elems[i] -= rhs.elems[i]; return to_concrete(); } @@ -229,13 +229,13 @@ namespace details } _CONSTEXPR ConcreteType& operator*=(value_type v) { - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) elems[i] *= v; return to_concrete(); } _CONSTEXPR ConcreteType& operator/=(value_type v) { - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) elems[i] /= v; return to_concrete(); } @@ -270,12 +270,12 @@ namespace details }; } -template +template class index : private details::coordinate_facade, ValueType, Rank> { using Base = details::coordinate_facade, ValueType, Rank>; friend Base; - template + template friend class index; public: using Base::rank; @@ -317,10 +317,10 @@ public: template class index<1, ValueType> { - template + template friend class index; public: - static const unsigned int rank = 1; + static const size_t rank = 1; using reference = ValueType&; using const_reference = const ValueType&; using size_type = ValueType; @@ -537,8 +537,8 @@ namespace details template struct BoundsRanges { - static const unsigned int Depth = 0; - static const unsigned int DynamicNum = 0; + static const size_t Depth = 0; + static const size_t DynamicNum = 0; static const SizeType CurrentRange = 1; static const SizeType TotalSize = 1; @@ -551,14 +551,14 @@ namespace details BoundsRanges() = default; - template + template void serialize(T &) const { } - template + template SizeType linearize(const T &) const { return 0; } - template + template ptrdiff_t contains(const T &) const { return 0; } @@ -576,8 +576,8 @@ namespace details template struct BoundsRanges : BoundsRanges{ using Base = BoundsRanges ; - static const unsigned int Depth = Base::Depth + 1; - static const unsigned int DynamicNum = Base::DynamicNum + 1; + static const size_t Depth = Base::Depth + 1; + static const size_t DynamicNum = Base::DynamicNum + 1; static const SizeType CurrentRange = dynamic_range; static const SizeType TotalSize = dynamic_range; const SizeType m_bound; @@ -596,19 +596,19 @@ namespace details { } - template + template void serialize(T & arr) const { arr[Dim] = elementNum(); this->Base::template serialize(arr); } - template + template SizeType linearize(const T & arr) const { const size_t index = this->Base::totalSize() * arr[Dim]; fail_fast_assert(index < static_cast(m_bound)); return static_cast(index) + this->Base::template linearize(arr); } - template + template ptrdiff_t contains(const T & arr) const { const ptrdiff_t last = this->Base::template contains(arr); if (last == -1) @@ -625,7 +625,7 @@ namespace details return static_cast(totalSize() / this->Base::totalSize()); } - SizeType elementNum(unsigned int dim) const _NOEXCEPT{ + SizeType elementNum(size_t dim) const _NOEXCEPT{ if (dim > 0) return this->Base::elementNum(dim - 1); else @@ -641,8 +641,8 @@ namespace details template struct BoundsRanges : BoundsRanges{ using Base = BoundsRanges ; - static const unsigned int Depth = Base::Depth + 1; - static const unsigned int DynamicNum = Base::DynamicNum; + static const size_t Depth = Base::Depth + 1; + static const size_t DynamicNum = Base::DynamicNum; static const SizeType CurrentRange = static_cast(CurRange); static const SizeType TotalSize = StaticSizeHelper::value; static_assert (CurRange <= SizeTypeTraits::max_value, "CurRange must be smaller than SizeType limits"); @@ -657,19 +657,19 @@ namespace details fail_fast_assert((firstLevel && totalSize() <= other.totalSize()) || totalSize() == other.totalSize()); } - template + template void serialize(T & arr) const { arr[Dim] = elementNum(); this->Base::template serialize(arr); } - template + template SizeType linearize(const T & arr) const { fail_fast_assert(arr[Dim] < CurrentRange, "Index is out of range"); return static_cast(this->Base::totalSize()) * arr[Dim] + this->Base::template linearize(arr); } - template + template ptrdiff_t contains(const T & arr) const { if (static_cast(arr[Dim]) >= CurrentRange) return -1; @@ -687,7 +687,7 @@ namespace details return CurrentRange; } - SizeType elementNum(unsigned int dim) const _NOEXCEPT{ + SizeType elementNum(size_t dim) const _NOEXCEPT{ if (dim > 0) return this->Base::elementNum(dim - 1); else @@ -732,17 +732,17 @@ namespace details { const TypeChain & obj; TypeListIndexer(const TypeChain & obj) :obj(obj){} - template + template const TypeChain & getObj(std::true_type) { return obj; } - template + template auto getObj(std::false_type) -> decltype(TypeListIndexer(static_cast(obj)).template get()) { return TypeListIndexer(static_cast(obj)).template get(); } - template + template auto get() -> decltype(getObj(std::integral_constant())) { return getObj(std::integral_constant()); @@ -779,8 +779,8 @@ class static_bounds template friend class static_bounds; public: - static const unsigned int rank = MyRanges::Depth; - static const unsigned int dynamic_rank = MyRanges::DynamicNum; + static const size_t rank = MyRanges::Depth; + static const size_t dynamic_rank = MyRanges::DynamicNum; static const SizeType static_size = static_cast(MyRanges::TotalSize); using size_type = SizeType; @@ -844,12 +844,12 @@ public: return m_ranges.contains(idx) != -1; } - _CONSTEXPR size_type operator[](unsigned int index) const _NOEXCEPT + _CONSTEXPR size_type operator[](size_t index) const _NOEXCEPT { return m_ranges.elementNum(index); } - template + template _CONSTEXPR size_type extent() const _NOEXCEPT { static_assert(Dim < rank, "dimension should be less than rank (dimension count starts from 0)"); @@ -888,12 +888,12 @@ public: } }; -template +template class strided_bounds : private details::coordinate_facade, SizeType, Rank> { using Base = details::coordinate_facade, SizeType, Rank>; friend Base; - template + template friend class strided_bounds; public: @@ -921,7 +921,7 @@ public: _CONSTEXPR strided_bounds(const index_type &extents, const index_type &strides) : m_strides(strides) { - for (unsigned int i = 0; i < rank; i++) + for (size_t i = 0; i < rank; i++) Base::elems[i] = extents[i]; } _CONSTEXPR strided_bounds(const value_type(&values)[rank], index_type strides) @@ -935,20 +935,20 @@ public: _CONSTEXPR size_type total_size() const _NOEXCEPT { size_type ret = 0; - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) ret += (Base::elems[i] - 1) * m_strides[i]; return ret + 1; } _CONSTEXPR size_type size() const _NOEXCEPT { size_type ret = 1; - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) ret *= Base::elems[i]; return ret; } _CONSTEXPR bool contains(const index_type& idx) const _NOEXCEPT { - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) { if (idx[i] < 0 || idx[i] >= Base::elems[i]) return false; @@ -958,7 +958,7 @@ public: _CONSTEXPR size_type linearize(const index_type & idx) const { size_type ret = 0; - for (unsigned int i = 0; i < rank; i++) + for (size_t i = 0; i < rank; i++) { fail_fast_assert(idx[i] < Base::elems[i], "index is out of bounds of the array"); ret += idx[i] * m_strides[i]; @@ -974,7 +974,7 @@ public: { return{ (value_type(&)[rank - 1])Base::elems[1], sliced_type::index_type::shift_left(m_strides) }; } - template + template _CONSTEXPR size_type extent() const _NOEXCEPT { static_assert(Dim < Rank, "dimension should be less than rank (dimension count starts from 0)"); @@ -1000,7 +1000,7 @@ template struct is_bounds : std::integral_constant {}; template struct is_bounds> : std::integral_constant {}; -template +template struct is_bounds> : std::integral_constant {}; template @@ -1014,7 +1014,7 @@ class bounds_iterator private: using Base = std::iterator , const IndexType>; public: - static const unsigned int rank = IndexType::rank; + static const size_t rank = IndexType::rank; using typename Base::reference; using typename Base::pointer; using typename Base::difference_type; @@ -1038,7 +1038,7 @@ public: } bounds_iterator& operator++() _NOEXCEPT { - for (unsigned int i = rank; i-- > 0;) + for (size_t i = rank; i-- > 0;) { if (++curr[i] < boundary[i]) { @@ -1050,7 +1050,7 @@ public: } } // If we're here we've wrapped over - set to past-the-end. - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) { curr[i] = boundary[i]; } @@ -1096,11 +1096,11 @@ public: auto linear_idx = linearize(curr) + n; value_type stride; stride[rank - 1] = 1; - for (unsigned int i = rank - 1; i-- > 0;) + for (size_t i = rank - 1; i-- > 0;) { stride[i] = stride[i + 1] * boundary[i + 1]; } - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) { curr[i] = linear_idx / stride[i]; linear_idx = linear_idx % stride[i]; @@ -1134,7 +1134,7 @@ public: } bool operator<(const bounds_iterator& rhs) const _NOEXCEPT { - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) { if (curr[i] < rhs.curr[i]) return true; @@ -1164,7 +1164,7 @@ private: // TODO: Smarter impl. // Check if past-the-end bool pte = true; - for (unsigned int i = 0; i < rank; ++i) + for (size_t i = 0; i < rank; ++i) { if (idx[i] != boundary[i]) { @@ -1177,7 +1177,7 @@ private: if (pte) { res = 1; - for (unsigned int i = rank; i-- > 0;) + for (size_t i = rank; i-- > 0;) { res += (idx[i] - 1) * multiplier; multiplier *= boundary[i]; @@ -1185,7 +1185,7 @@ private: } else { - for (unsigned int i = rank; i-- > 0;) + for (size_t i = rank; i-- > 0;) { res += idx[i] * multiplier; multiplier *= boundary[i]; @@ -1359,7 +1359,7 @@ template class basic_array_view { public: - static const unsigned int rank = BoundsType::rank; + static const size_t rank = BoundsType::rank; using bounds_type = BoundsType; using size_type = typename bounds_type::size_type; using index_type = typename bounds_type::index_type; @@ -1381,7 +1381,7 @@ public: { return m_bounds; } - template + template _CONSTEXPR size_type extent() const _NOEXCEPT { static_assert(Dim < rank, "dimension should be less than rank (dimension count starts from 0)"); @@ -1537,7 +1537,7 @@ struct dim template class array_view; -template +template class strided_array_view; namespace details @@ -1616,7 +1616,7 @@ namespace details template struct is_array_view_oracle> : std::true_type {}; - template + template struct is_array_view_oracle> : std::true_type {}; template @@ -1930,12 +1930,12 @@ template _CONSTEXPR auto as_array_view(Cont &&arr) -> std::enable_if_t>::value, array_view, dynamic_range>> = delete; -template +template class strided_array_view : public basic_array_view::value_type, strided_bounds::size_type>> { using Base = basic_array_view::value_type, strided_bounds::size_type>>; - template + template friend class strided_array_view; public: using Base::rank; From 437791e504462b2585808a717b61e084692638be Mon Sep 17 00:00:00 2001 From: saurabh singh Date: Sun, 27 Sep 2015 16:11:12 +0530 Subject: [PATCH 2/3] GSL::finally can make use of move semantics for eg consider this case [code] string value = "someVeryLongErrorMessageIAm"; finally([value] { PrintErrorMessage(value); } [/code] With the current changes before the call to PrintErrorMessage there will be 3 calls to copy constructor for string(1 when it's captured in closure, 2nd when finally is called and 3rd when it's passed to Final_act . With my patch there will be 1 call to the copy constructor and 2 to the move constructor for the scenario in example, so 2 potential deep copies will be saved for some objects. Validated that code builds from root, and all tests pass after my change. Also validated that indeed copy constructor calls are saved for objects that support move semantics. --- include/gsl.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/include/gsl.h b/include/gsl.h index 09a46c7..23b62a9 100644 --- a/include/gsl.h +++ b/include/gsl.h @@ -50,7 +50,7 @@ template class Final_act { public: - explicit Final_act(F f) : f_(f) {} + explicit Final_act(F f) : f_(std::move(f)) {} Final_act(const Final_act&& other) : f_(other.f_) {} Final_act(const Final_act&) = delete; @@ -64,7 +64,10 @@ private: // finally() - convenience function to generate a Final_act template -Final_act finally(F f) { return Final_act(f); } +Final_act finally(const F &f) { return Final_act(f); } + +template +Final_act finally(F &&f) { return Final_act(std::forward(f)); } // narrow_cast(): a searchable way to do narrowing casts of values template From 2b6d90436f3f0b0444efc918203663367e823b65 Mon Sep 17 00:00:00 2001 From: Kern Handa Date: Fri, 25 Sep 2015 09:41:40 -0700 Subject: [PATCH 3/3] not_null and maybe_null variants should only work on nullptr-assignable types. This is in accordance with the GSL.View guidance on not_null and maybe_null types in the CppCoreGuidelines document. --- include/gsl.h | 3 +++ tests/maybenull_tests.cpp | 13 +++++++++++++ tests/notnull_tests.cpp | 8 ++++++++ 3 files changed, 24 insertions(+) diff --git a/include/gsl.h b/include/gsl.h index 23b62a9..fed3b6c 100644 --- a/include/gsl.h +++ b/include/gsl.h @@ -108,6 +108,7 @@ typename Cont::value_type& at(Cont& cont, size_t index) { fail_fast_assert(index template class not_null { + static_assert(std::is_assignable::value, "T cannot be assigned nullptr."); public: not_null(T t) : ptr_(t) { ensure_invariant(); } @@ -168,6 +169,7 @@ private: template class maybe_null_dbg { + static_assert(std::is_assignable::value, "T cannot be assigned nullptr."); public: maybe_null_dbg() : ptr_(nullptr), tested_(false) {} @@ -243,6 +245,7 @@ private: template class maybe_null_ret { + static_assert(std::is_assignable::value, "T cannot be assigned nullptr."); public: maybe_null_ret() : ptr_(nullptr) {} maybe_null_ret(std::nullptr_t) : ptr_(nullptr) {} diff --git a/tests/maybenull_tests.cpp b/tests/maybenull_tests.cpp index 0a9d891..4b74114 100644 --- a/tests/maybenull_tests.cpp +++ b/tests/maybenull_tests.cpp @@ -16,6 +16,7 @@ #include #include +#include using namespace Guide; @@ -27,12 +28,24 @@ SUITE(MaybeNullTests) { TEST(TestMaybeNull1) { +#ifdef CONFIRM_COMPILATION_ERRORS + // Forbid non-nullptr assignable types + maybe_null_ret> f_ret(std::vector{1}); + maybe_null_ret> f_ret(std::vector{1}); + maybe_null_ret z_ret(10); + maybe_null_dbg> y_dbg({1,2}); + maybe_null_dbg z_dbg(10); + maybe_null_dbg> y_dbg({1,2}); +#endif int n = 5; maybe_null_dbg opt_n(&n); int result = 0; bool threw = false; CHECK_THROW(result = *opt_n, fail_fast); + + maybe_null_ret> x_ret(std::make_shared(10)); // shared_ptr is nullptr assignable + maybe_null_dbg> x_dbg(std::make_shared(10)); // shared_ptr is nullptr assignable } TEST(TestMaybeNull2) diff --git a/tests/notnull_tests.cpp b/tests/notnull_tests.cpp index 008cbb3..7232840 100644 --- a/tests/notnull_tests.cpp +++ b/tests/notnull_tests.cpp @@ -16,6 +16,7 @@ #include #include +#include using namespace Guide; @@ -48,11 +49,18 @@ SUITE(NotNullTests) not_null p; // yay...does not compile! std::unique_ptr up = std::make_unique(120); not_null p = up; + + // Forbid non-nullptr assignable types + not_null> f(std::vector{1}); + not_null z(10); + not_null> y({1,2}); #endif int i = 12; auto rp = RefCounted(&i); not_null p(rp); CHECK(p.get() == &i); + + not_null> x(std::make_shared(10)); // shared_ptr is nullptr assignable } TEST(TestNotNullCasting)