diff --git a/stl/inc/memory b/stl/inc/memory index a651cb96c83..f8c567ef3d3 100644 --- a/stl/inc/memory +++ b/stl/inc/memory @@ -1269,11 +1269,47 @@ private: _Owner._Call_deleter = false; } +#if _HAS_CXX20 + template + friend enable_if_t, shared_ptr<_Ty0>> make_shared(_Types&&... _Args); + + template + friend enable_if_t, shared_ptr<_Ty0>> allocate_shared(const _Alloc& _Al_arg, _Types&&... _Args); + + template + friend enable_if_t, shared_ptr<_Ty0>> make_shared(size_t _Count); + + template + friend enable_if_t, shared_ptr<_Ty0>> allocate_shared( + const _Alloc& _Al_arg, size_t _Count); + + template + friend enable_if_t, shared_ptr<_Ty0>> make_shared(); + + template + friend enable_if_t, shared_ptr<_Ty0>> allocate_shared(const _Alloc& _Al_arg); + + template + friend enable_if_t, shared_ptr<_Ty0>> make_shared( + size_t _Count, const remove_extent_t<_Ty0>& _Val); + + template + friend enable_if_t, shared_ptr<_Ty0>> allocate_shared( + const _Alloc& _Al_arg, size_t _Count, const remove_extent_t<_Ty0>& _Val); + + template + friend enable_if_t, shared_ptr<_Ty0>> make_shared(const remove_extent_t<_Ty0>& _Val); + + template + friend enable_if_t, shared_ptr<_Ty0>> allocate_shared( + const _Alloc& _Al_arg, const remove_extent_t<_Ty0>& _Val); +#else // ^^^ _HAS_CXX20 / !_HAS_CXX20 vvv template friend shared_ptr<_Ty0> make_shared(_Types&&... _Args); template friend shared_ptr<_Ty0> allocate_shared(const _Alloc& _Al_arg, _Types&&... _Args); +#endif // !_HAS_CXX20 template void _Set_ptr_rep_and_enable_shared(_Ux* const _Px, _Ref_count_base* const _Rx) noexcept { // take ownership of _Px @@ -1506,6 +1542,10 @@ public: ~_Ref_count_obj2() { // nothing to do, _Storage._Value was already destroyed in _Destroy + + // N4849 [class.dtor]/7: + // "A defaulted destructor for a class X is defined as deleted if: + // X is a union-like class that has a variant member with a non-trivial destructor" } union { @@ -1522,6 +1562,303 @@ private: } }; +#if _HAS_CXX20 +template +struct _Alignas_storage_unit { + alignas(_Align) char _Space[_Align]; +}; + +enum class _Check_overflow : bool { _No, _Yes }; + +template +_NODISCARD size_t _Calculate_bytes_for_flexible_array(const size_t _Count) noexcept(_Check == _Check_overflow::_No) { + constexpr size_t _Align = alignof(_Refc); + + size_t _Bytes = sizeof(_Refc); // contains storage for one element + + if (_Count > 1) { + constexpr size_t _Element_size = sizeof(typename _Refc::_Element_type); + + size_t _Extra_bytes; + + if constexpr (_Check == _Check_overflow::_Yes) { + _Extra_bytes = _Get_size_of_n<_Element_size>(_Count - 1); // check multiplication overflow + + if (_Extra_bytes > static_cast(-1) - _Bytes - (_Align - 1)) { // assume worst case adjustment + _Throw_bad_array_new_length(); // addition overflow + } + } else { + _Extra_bytes = _Element_size * (_Count - 1); + } + + _Bytes += _Extra_bytes; + + _Bytes = (_Bytes + _Align - 1) & ~(_Align - 1); + } + +#ifdef _ENABLE_STL_INTERNAL_CHECK + using _Storage = _Alignas_storage_unit<_Align>; + _STL_INTERNAL_CHECK(_Bytes % sizeof(_Storage) == 0); +#endif // _ENABLE_STL_INTERNAL_CHECK + + return _Bytes; +} + +template +_NODISCARD _Refc* _Allocate_flexible_array(const size_t _Count) { + const size_t _Bytes = _Calculate_bytes_for_flexible_array<_Refc, _Check_overflow::_Yes>(_Count); + constexpr size_t _Align = alignof(_Refc); + if constexpr (_Align <= __STDCPP_DEFAULT_NEW_ALIGNMENT__) { + return static_cast<_Refc*>(::operator new(_Bytes)); + } else { + return static_cast<_Refc*>(::operator new (_Bytes, align_val_t{_Align})); + } +} + +template +void _Deallocate_flexible_array(_Refc* const _Ptr) noexcept { + constexpr size_t _Align = alignof(_Refc); + if constexpr (_Align <= __STDCPP_DEFAULT_NEW_ALIGNMENT__) { + ::operator delete(static_cast(_Ptr)); + } else { + ::operator delete (static_cast(_Ptr), align_val_t{_Align}); + } +} + +template +struct _Uninitialized_rev_destroying_backout { + // struct to undo partially constructed ranges in _Uninitialized_xxx algorithms + _NoThrowIt _First; + _NoThrowIt _Last; + + explicit _Uninitialized_rev_destroying_backout(_NoThrowIt _Dest) noexcept + : _First(_Dest), _Last(_Dest) {} // TRANSITION, P1771R1 [[nodiscard]] For Constructors + + _Uninitialized_rev_destroying_backout(const _Uninitialized_rev_destroying_backout&) = delete; + _Uninitialized_rev_destroying_backout& operator=(const _Uninitialized_rev_destroying_backout&) = delete; + + ~_Uninitialized_rev_destroying_backout() { + while (_Last != _First) { + --_Last; + _STD destroy_at(_STD addressof(*_Last)); + } + } + + template + void _Emplace_back(_Types&&... _Vals) { // construct a new element at *_Last and increment + _Construct_in_place(*_Last, _STD forward<_Types>(_Vals)...); + ++_Last; + } + + _NoThrowIt _Release() noexcept { // suppress any exception handling backout and return _Last + _First = _Last; + return _Last; + } +}; + +template +void _Reverse_destroy_multidimensional_n(_Ty* const _Arr, size_t _Size) noexcept { + while (_Size > 0) { + --_Size; + if constexpr (is_array_v<_Ty>) { + _Reverse_destroy_multidimensional_n(_Arr[_Size], extent_v<_Ty>); + } else { + _Destroy_in_place(_Arr[_Size]); + } + } +} + +template +struct _Reverse_destroy_multidimensional_n_guard { + _Ty* _Target; + size_t _Index; + + ~_Reverse_destroy_multidimensional_n_guard() { + if (_Target) { + _Reverse_destroy_multidimensional_n(_Target, _Index); + } + } +}; + +template +void _Uninitialized_copy_multidimensional(const _Ty (&_In)[_Size], _Ty (&_Out)[_Size]) { + if constexpr (is_trivial_v<_Ty>) { + _Copy_memmove(_In, _In + _Size, _Out); + } else if constexpr (is_array_v<_Ty>) { + _Reverse_destroy_multidimensional_n_guard<_Ty> _Guard{_Out, 0}; + for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) { + _Uninitialized_copy_multidimensional(_In[_Idx], _Out[_Idx]); + } + _Guard._Target = nullptr; + } else { + _Uninitialized_rev_destroying_backout _Backout{_Out}; + for (size_t _Idx = 0; _Idx < _Size; ++_Idx) { + _Backout._Emplace_back(_In[_Idx]); + } + _Backout._Release(); + } +} + +template +void _Uninitialized_value_construct_multidimensional_n(_Ty* const _Out, const size_t _Size) { + using _Item = remove_all_extents_t<_Ty>; + if constexpr (_Use_memset_value_construct_v<_Item*>) { + _Zero_range(_Out, _Out + _Size); + } else if constexpr (is_array_v<_Ty>) { + _Reverse_destroy_multidimensional_n_guard<_Ty> _Guard{_Out, 0}; + for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) { + _Uninitialized_value_construct_multidimensional_n(_Out[_Idx], extent_v<_Ty>); + } + _Guard._Target = nullptr; + } else { + _Uninitialized_rev_destroying_backout _Backout{_Out}; + for (size_t _Idx = 0; _Idx < _Size; ++_Idx) { + _Backout._Emplace_back(); + } + _Backout._Release(); + } +} + +template +void _Uninitialized_fill_multidimensional_n(_Ty* const _Out, const size_t _Size, const _Ty& _Val) { + if constexpr (is_array_v<_Ty>) { + _Reverse_destroy_multidimensional_n_guard<_Ty> _Guard{_Out, 0}; + for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) { + _Uninitialized_copy_multidimensional(_Val, _Out[_Idx]); // intentionally copy, not fill + } + _Guard._Target = nullptr; + } else if constexpr (_Fill_memset_is_safe<_Ty*, _Ty>) { + _CSTD memset(_Out, static_cast(_Val), _Size); + } else { + _Uninitialized_rev_destroying_backout _Backout{_Out}; + for (size_t _Idx = 0; _Idx < _Size; ++_Idx) { + _Backout._Emplace_back(_Val); + } + _Backout._Release(); + } +} + +// CLASS TEMPLATE _Ref_count_unbounded_array +template >> +class _Ref_count_unbounded_array : public _Ref_count_base { + // handle reference counting for unbounded array with trivial destruction in control block, no allocator +public: + static_assert(is_unbounded_array_v<_Ty>); + + using _Element_type = remove_extent_t<_Ty>; + + explicit _Ref_count_unbounded_array(const size_t _Count) : _Ref_count_base() { + _Uninitialized_value_construct_multidimensional_n(_Get_ptr(), _Count); + } + + explicit _Ref_count_unbounded_array(const size_t _Count, const _Element_type& _Val) : _Ref_count_base() { + _Uninitialized_fill_multidimensional_n(_Get_ptr(), _Count, _Val); + } + + _NODISCARD auto _Get_ptr() noexcept { + return _STD addressof(_Storage._Value); + } + +private: + union { + _Wrap<_Element_type> _Storage; // flexible array must be last member + }; + + ~_Ref_count_unbounded_array() { + // nothing to do, _Ty is trivially destructible + + // See N4849 [class.dtor]/7. + } + + virtual void _Destroy() noexcept override { // destroy managed resource + // nothing to do, _Ty is trivially destructible + } + + virtual void _Delete_this() noexcept override { // destroy self + this->~_Ref_count_unbounded_array(); + _Deallocate_flexible_array(this); + } +}; + +template +class _Ref_count_unbounded_array<_Ty, false> : public _Ref_count_base { + // handle reference counting for unbounded array with non-trivial destruction in control block, no allocator +public: + static_assert(is_unbounded_array_v<_Ty>); + + using _Element_type = remove_extent_t<_Ty>; + + explicit _Ref_count_unbounded_array(const size_t _Count) : _Ref_count_base(), _Size(_Count) { + _Uninitialized_value_construct_multidimensional_n(_Get_ptr(), _Size); + } + + explicit _Ref_count_unbounded_array(const size_t _Count, const _Element_type& _Val) + : _Ref_count_base(), _Size(_Count) { + _Uninitialized_fill_multidimensional_n(_Get_ptr(), _Size, _Val); + } + + _NODISCARD auto _Get_ptr() noexcept { + return _STD addressof(_Storage._Value); + } + +private: + size_t _Size; + + union { + _Wrap<_Element_type> _Storage; // flexible array must be last member + }; + + ~_Ref_count_unbounded_array() { + // nothing to do, _Storage was already destroyed in _Destroy + + // See N4849 [class.dtor]/7. + } + + virtual void _Destroy() noexcept override { // destroy managed resource + _Reverse_destroy_multidimensional_n(_Get_ptr(), _Size); + } + + virtual void _Delete_this() noexcept override { // destroy self + this->~_Ref_count_unbounded_array(); + _Deallocate_flexible_array(this); + } +}; + +// CLASS TEMPLATE _Ref_count_bounded_array +template +class _Ref_count_bounded_array : public _Ref_count_base { + // handle reference counting for bounded array in control block, no allocator +public: + static_assert(is_bounded_array_v<_Ty>); + + _Ref_count_bounded_array() : _Ref_count_base(), _Storage() {} // value-initializing _Storage is necessary here + + explicit _Ref_count_bounded_array(const remove_extent_t<_Ty>& _Val) + : _Ref_count_base() { // don't value-initialize _Storage + _Uninitialized_fill_multidimensional_n(_Storage._Value, extent_v<_Ty>, _Val); + } + + union { + _Wrap<_Ty> _Storage; + }; + +private: + ~_Ref_count_bounded_array() { + // nothing to do, _Storage was already destroyed in _Destroy + + // See N4849 [class.dtor]/7. + } + + virtual void _Destroy() noexcept override { // destroy managed resource + _Destroy_in_place(_Storage); // not _Storage._Value, see N4849 [expr.prim.id.dtor] + } + + virtual void _Delete_this() noexcept override { // destroy self + delete this; + } +}; +#endif // _HAS_CXX20 + // CLASS TEMPLATE _Ebco_base template && !is_final_v<_Ty>> @@ -1562,17 +1899,22 @@ protected: } }; -// CLASS TEMPLATE _Ref_count_obj_alloc2 +// CLASS TEMPLATE _Ref_count_obj_alloc3 template -class __declspec(empty_bases) _Ref_count_obj_alloc2 : public _Ebco_base<_Alloc>, public _Ref_count_base { +class __declspec(empty_bases) _Ref_count_obj_alloc3 : public _Ebco_base<_Rebind_alloc_t<_Alloc, _Ty>>, + public _Ref_count_base { // handle reference counting for object in control block, allocator +private: + static_assert(is_same_v<_Ty, remove_cv_t<_Ty>>, "allocate_shared should remove_cv_t"); + + using _Rebound = _Rebind_alloc_t<_Alloc, _Ty>; + public: template - explicit _Ref_count_obj_alloc2(const _Alloc& _Al_arg, _Types&&... _Args) - : _Ebco_base<_Alloc>(_Al_arg), _Ref_count_base() { - _Maybe_rebind_alloc_t<_Alloc, _Ty> _Alty(this->_Get_val()); - allocator_traits<_Rebind_alloc_t<_Alloc, _Ty>>::construct( - _Alty, _STD addressof(_Storage._Value), _STD forward<_Types>(_Args)...); + explicit _Ref_count_obj_alloc3(const _Alloc& _Al_arg, _Types&&... _Args) + : _Ebco_base<_Rebound>(_Al_arg), _Ref_count_base() { + allocator_traits<_Rebound>::construct( + this->_Get_val(), _STD addressof(_Storage._Value), _STD forward<_Types>(_Args)...); } union { @@ -1580,37 +1922,350 @@ public: }; private: - ~_Ref_count_obj_alloc2() { + ~_Ref_count_obj_alloc3() { // nothing to do; _Storage._Value already destroyed by _Destroy() + + // See N4849 [class.dtor]/7. } virtual void _Destroy() noexcept override { // destroy managed resource - _Maybe_rebind_alloc_t<_Alloc, _Ty> _Alty(this->_Get_val()); - allocator_traits<_Rebind_alloc_t<_Alloc, _Ty>>::destroy(_Alty, _STD addressof(_Storage._Value)); + allocator_traits<_Rebound>::destroy(this->_Get_val(), _STD addressof(_Storage._Value)); } virtual void _Delete_this() noexcept override { // destroy self - _Rebind_alloc_t<_Alloc, _Ref_count_obj_alloc2> _Al(this->_Get_val()); - this->~_Ref_count_obj_alloc2(); + _Rebind_alloc_t<_Alloc, _Ref_count_obj_alloc3> _Al(this->_Get_val()); + this->~_Ref_count_obj_alloc3(); _Deallocate_plain(_Al, this); } }; +#if _HAS_CXX20 +template +class _Uninitialized_rev_destroying_backout_al { + // class to undo partially constructed ranges in _Uninitialized_xxx_al algorithms + +private: + using pointer = _Alloc_ptr_t<_Alloc>; + +public: + _Uninitialized_rev_destroying_backout_al(pointer _Dest, _Alloc& _Al_) noexcept + : _First(_Dest), _Last(_Dest), _Al(_Al_) {} // TRANSITION, P1771R1 [[nodiscard]] For Constructors + + _Uninitialized_rev_destroying_backout_al(const _Uninitialized_rev_destroying_backout_al&) = delete; + _Uninitialized_rev_destroying_backout_al& operator=(const _Uninitialized_rev_destroying_backout_al&) = delete; + + ~_Uninitialized_rev_destroying_backout_al() { + while (_Last != _First) { + --_Last; + allocator_traits<_Alloc>::destroy(_Al, _Last); + } + } + + template + void _Emplace_back(_Types&&... _Vals) { // construct a new element at *_Last and increment + allocator_traits<_Alloc>::construct(_Al, _Unfancy(_Last), _STD forward<_Types>(_Vals)...); + ++_Last; + } + + pointer _Release() noexcept { // suppress any exception handling backout and return _Last + _First = _Last; + return _Last; + } + +private: + pointer _First; + pointer _Last; + _Alloc& _Al; +}; + +template +void _Reverse_destroy_multidimensional_n_al(_Ty* const _Arr, size_t _Size, _Alloc& _Al) noexcept { + while (_Size > 0) { + --_Size; + if constexpr (is_array_v<_Ty>) { + _Reverse_destroy_multidimensional_n_al(_Arr[_Size], extent_v<_Ty>, _Al); + } else { + allocator_traits<_Alloc>::destroy(_Al, _Arr + _Size); + } + } +} + +template +struct _Reverse_destroy_multidimensional_n_al_guard { + _Ty* _Target; + size_t _Index; + _Alloc& _Al; + + ~_Reverse_destroy_multidimensional_n_al_guard() { + if (_Target) { + _Reverse_destroy_multidimensional_n_al(_Target, _Index, _Al); + } + } +}; + +template +void _Uninitialized_copy_multidimensional_al(const _Ty (&_In)[_Size], _Ty (&_Out)[_Size], _Alloc& _Al) { + using _Item = remove_all_extents_t<_Ty>; + if constexpr (conjunction_v, _Uses_default_construct<_Alloc, _Item*, const _Item&>>) { + (void) _Al; + _Copy_memmove(_In, _In + _Size, _Out); + } else if constexpr (is_array_v<_Ty>) { + _Reverse_destroy_multidimensional_n_al_guard<_Ty, _Alloc> _Guard{_Out, 0, _Al}; + for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) { + _Uninitialized_copy_multidimensional_al(_In[_Idx], _Out[_Idx], _Al); + } + _Guard._Target = nullptr; + } else { + _Uninitialized_rev_destroying_backout_al _Backout{_Out, _Al}; + for (size_t _Idx = 0; _Idx < _Size; ++_Idx) { + _Backout._Emplace_back(_In[_Idx]); + } + _Backout._Release(); + } +} + +template +void _Uninitialized_value_construct_multidimensional_n_al(_Ty* const _Out, const size_t _Size, _Alloc& _Al) { + using _Item = remove_all_extents_t<_Ty>; + if constexpr (_Use_memset_value_construct_v<_Item*> && _Uses_default_construct<_Alloc, _Item*>::value) { + (void) _Al; + _Zero_range(_Out, _Out + _Size); + } else if constexpr (is_array_v<_Ty>) { + _Reverse_destroy_multidimensional_n_al_guard<_Ty, _Alloc> _Guard{_Out, 0, _Al}; + for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) { + _Uninitialized_value_construct_multidimensional_n_al(_Out[_Idx], extent_v<_Ty>, _Al); + } + _Guard._Target = nullptr; + } else { + _Uninitialized_rev_destroying_backout_al _Backout{_Out, _Al}; + for (size_t _Idx = 0; _Idx < _Size; ++_Idx) { + _Backout._Emplace_back(); + } + _Backout._Release(); + } +} + +template +void _Uninitialized_fill_multidimensional_n_al(_Ty* const _Out, const size_t _Size, const _Ty& _Val, _Alloc& _Al) { + if constexpr (is_array_v<_Ty>) { + _Reverse_destroy_multidimensional_n_al_guard<_Ty, _Alloc> _Guard{_Out, 0, _Al}; + for (size_t& _Idx = _Guard._Index; _Idx < _Size; ++_Idx) { + _Uninitialized_copy_multidimensional_al(_Val, _Out[_Idx], _Al); // intentionally copy, not fill + } + _Guard._Target = nullptr; + } else if constexpr (_Fill_memset_is_safe<_Ty*, _Ty> && _Uses_default_construct<_Alloc, _Ty*, const _Ty&>::value) { + (void) _Al; + _CSTD memset(_Out, static_cast(_Val), _Size); + } else { + _Uninitialized_rev_destroying_backout_al _Backout{_Out, _Al}; + for (size_t _Idx = 0; _Idx < _Size; ++_Idx) { + _Backout._Emplace_back(_Val); + } + _Backout._Release(); + } +} + +// CLASS TEMPLATE _Ref_count_unbounded_array_alloc +template +class __declspec(empty_bases) _Ref_count_unbounded_array_alloc + : public _Ebco_base<_Rebind_alloc_t<_Alloc, remove_all_extents_t<_Ty>>>, + public _Ref_count_base { + // handle reference counting for unbounded array in control block, allocator +private: + static_assert(is_unbounded_array_v<_Ty>); + static_assert(is_same_v<_Ty, remove_cv_t<_Ty>>, "allocate_shared should remove_cv_t"); + + using _Item = remove_all_extents_t<_Ty>; + using _Rebound = _Rebind_alloc_t<_Alloc, _Item>; + +public: + using _Element_type = remove_extent_t<_Ty>; + + explicit _Ref_count_unbounded_array_alloc(const _Alloc& _Al_arg, const size_t _Count) + : _Ebco_base<_Rebound>(_Al_arg), _Ref_count_base(), _Size(_Count) { + _Uninitialized_value_construct_multidimensional_n_al(_Get_ptr(), _Size, this->_Get_val()); + } + + explicit _Ref_count_unbounded_array_alloc(const _Alloc& _Al_arg, const size_t _Count, const _Element_type& _Val) + : _Ebco_base<_Rebound>(_Al_arg), _Ref_count_base(), _Size(_Count) { + _Uninitialized_fill_multidimensional_n_al(_Get_ptr(), _Size, _Val, this->_Get_val()); + } + + _NODISCARD auto _Get_ptr() noexcept { + return _STD addressof(_Storage._Value); + } + +private: + size_t _Size; + + union { + _Wrap<_Element_type> _Storage; // flexible array must be last member + }; + + ~_Ref_count_unbounded_array_alloc() { + // nothing to do; _Storage._Value already destroyed by _Destroy() + + // See N4849 [class.dtor]/7. + } + + virtual void _Destroy() noexcept override { // destroy managed resource + if constexpr (!conjunction_v, _Uses_default_destroy<_Rebound, _Item*>>) { + _Reverse_destroy_multidimensional_n_al(_Get_ptr(), _Size, this->_Get_val()); + } + } + + virtual void _Delete_this() noexcept override { // destroy self + constexpr size_t _Align = alignof(_Ref_count_unbounded_array_alloc); + using _Storage = _Alignas_storage_unit<_Align>; + + _Rebind_alloc_t<_Alloc, _Storage> _Al(this->_Get_val()); + const size_t _Bytes = + _Calculate_bytes_for_flexible_array<_Ref_count_unbounded_array_alloc, _Check_overflow::_No>(_Size); + const size_t _Storage_units = _Bytes / sizeof(_Storage); + + this->~_Ref_count_unbounded_array_alloc(); + + _Al.deallocate(reinterpret_cast<_Storage*>(this), _Storage_units); + } +}; + +// CLASS TEMPLATE _Ref_count_bounded_array_alloc +template +class __declspec(empty_bases) _Ref_count_bounded_array_alloc + : public _Ebco_base<_Rebind_alloc_t<_Alloc, remove_all_extents_t<_Ty>>>, + public _Ref_count_base { + // handle reference counting for bounded array in control block, allocator +private: + static_assert(is_bounded_array_v<_Ty>); + static_assert(is_same_v<_Ty, remove_cv_t<_Ty>>, "allocate_shared should remove_cv_t"); + + using _Item = remove_all_extents_t<_Ty>; + using _Rebound = _Rebind_alloc_t<_Alloc, _Item>; + +public: + explicit _Ref_count_bounded_array_alloc(const _Alloc& _Al_arg) + : _Ebco_base<_Rebound>(_Al_arg), _Ref_count_base() { // don't value-initialize _Storage + _Uninitialized_value_construct_multidimensional_n_al(_Storage._Value, extent_v<_Ty>, this->_Get_val()); + } + + explicit _Ref_count_bounded_array_alloc(const _Alloc& _Al_arg, const remove_extent_t<_Ty>& _Val) + : _Ebco_base<_Rebound>(_Al_arg), _Ref_count_base() { // don't value-initialize _Storage + _Uninitialized_fill_multidimensional_n_al(_Storage._Value, extent_v<_Ty>, _Val, this->_Get_val()); + } + + union { + _Wrap<_Ty> _Storage; + }; + +private: + ~_Ref_count_bounded_array_alloc() { + // nothing to do; _Storage._Value already destroyed by _Destroy() + + // See N4849 [class.dtor]/7. + } + + virtual void _Destroy() noexcept override { // destroy managed resource + if constexpr (!conjunction_v, _Uses_default_destroy<_Rebound, _Item*>>) { + _Reverse_destroy_multidimensional_n_al(_Storage._Value, extent_v<_Ty>, this->_Get_val()); + } + } + + virtual void _Delete_this() noexcept override { // destroy self + _Rebind_alloc_t<_Alloc, _Ref_count_bounded_array_alloc> _Al(this->_Get_val()); + this->~_Ref_count_bounded_array_alloc(); + _Deallocate_plain(_Al, this); + } +}; +#endif // _HAS_CXX20 + // FUNCTION TEMPLATE make_shared template -_NODISCARD shared_ptr<_Ty> make_shared(_Types&&... _Args) { // make a shared_ptr +_NODISCARD +#if _HAS_CXX20 + enable_if_t, shared_ptr<_Ty>> +#else // _HAS_CXX20 + shared_ptr<_Ty> +#endif // _HAS_CXX20 + make_shared(_Types&&... _Args) { // make a shared_ptr to non-array object const auto _Rx = new _Ref_count_obj2<_Ty>(_STD forward<_Types>(_Args)...); shared_ptr<_Ty> _Ret; _Ret._Set_ptr_rep_and_enable_shared(_STD addressof(_Rx->_Storage._Value), _Rx); return _Ret; } +#if _HAS_CXX20 +template +struct _Global_delete_guard { + _Refc* _Target; + + ~_Global_delete_guard() { + // While this branch is technically unnecessary because N4849 [new.delete.single]/17 requires + // `::operator delete(nullptr)` to be a no-op, it's here to help optimizers see that after + // `_Guard._Target = nullptr;`, this destructor can be eliminated. + if (_Target) { + _Deallocate_flexible_array(_Target); + } + } +}; + +template +_NODISCARD enable_if_t, shared_ptr<_Ty>> make_shared(const size_t _Count) { + // make a shared_ptr to an unbounded array + using _Refc = _Ref_count_unbounded_array<_Ty>; + const auto _Rx = _Allocate_flexible_array<_Refc>(_Count); + _Global_delete_guard<_Refc> _Guard{_Rx}; + ::new (static_cast(_Rx)) _Refc(_Count); + _Guard._Target = nullptr; + shared_ptr<_Ty> _Ret; + _Ret._Set_ptr_rep_and_enable_shared(_Rx->_Get_ptr(), _Rx); + return _Ret; +} + +template +_NODISCARD enable_if_t, shared_ptr<_Ty>> make_shared( + const size_t _Count, const remove_extent_t<_Ty>& _Val) { + // make a shared_ptr to an unbounded array + using _Refc = _Ref_count_unbounded_array<_Ty>; + const auto _Rx = _Allocate_flexible_array<_Refc>(_Count); + _Global_delete_guard<_Refc> _Guard{_Rx}; + ::new (static_cast(_Rx)) _Refc(_Count, _Val); + _Guard._Target = nullptr; + shared_ptr<_Ty> _Ret; + _Ret._Set_ptr_rep_and_enable_shared(_Rx->_Get_ptr(), _Rx); + return _Ret; +} + +template +_NODISCARD enable_if_t, shared_ptr<_Ty>> make_shared() { + // make a shared_ptr to a bounded array + const auto _Rx = new _Ref_count_bounded_array<_Ty>(); + shared_ptr<_Ty> _Ret; + _Ret._Set_ptr_rep_and_enable_shared(_Rx->_Storage._Value, _Rx); + return _Ret; +} + +template +_NODISCARD enable_if_t, shared_ptr<_Ty>> make_shared(const remove_extent_t<_Ty>& _Val) { + // make a shared_ptr to a bounded array + const auto _Rx = new _Ref_count_bounded_array<_Ty>(_Val); + shared_ptr<_Ty> _Ret; + _Ret._Set_ptr_rep_and_enable_shared(_Rx->_Storage._Value, _Rx); + return _Ret; +} +#endif // _HAS_CXX20 + // FUNCTION TEMPLATE allocate_shared template -_NODISCARD shared_ptr<_Ty> allocate_shared(const _Alloc& _Al, _Types&&... _Args) { // make a shared_ptr +_NODISCARD +#if _HAS_CXX20 + enable_if_t, shared_ptr<_Ty>> +#else // _HAS_CXX20 + shared_ptr<_Ty> +#endif // _HAS_CXX20 + allocate_shared(const _Alloc& _Al, _Types&&... _Args) { // make a shared_ptr to non-array object // Note: As of 2019-05-28, this implements the proposed resolution of LWG-3210 (which controls whether // allocator::construct sees T or const T when _Ty is const qualified) - using _Refoa = _Ref_count_obj_alloc2, _Alloc>; + using _Refoa = _Ref_count_obj_alloc3, _Alloc>; using _Alblock = _Rebind_alloc_t<_Alloc, _Refoa>; _Alblock _Rebound(_Al); _Alloc_construct_ptr<_Alblock> _Constructor{_Rebound}; @@ -1622,6 +2277,95 @@ _NODISCARD shared_ptr<_Ty> allocate_shared(const _Alloc& _Al, _Types&&... _Args) return _Ret; } +#if _HAS_CXX20 +template +struct _Allocate_n_ptr { + _Alloc& _Al; + _Alloc_ptr_t<_Alloc> _Ptr; + size_t _Nx; + + _Allocate_n_ptr(_Alloc& _Al_, const size_t _Nx_) : _Al(_Al_), _Ptr(_Al_.allocate(_Nx_)), _Nx(_Nx_) {} + + ~_Allocate_n_ptr() { + if (_Ptr) { + _Al.deallocate(_Ptr, _Nx); + } + } + + _Allocate_n_ptr(const _Allocate_n_ptr&) = delete; + _Allocate_n_ptr& operator=(const _Allocate_n_ptr&) = delete; +}; + +template +_NODISCARD enable_if_t, shared_ptr<_Ty>> allocate_shared( + const _Alloc& _Al, const size_t _Count) { + // make a shared_ptr to an unbounded array + using _Refc = _Ref_count_unbounded_array_alloc, _Alloc>; + constexpr size_t _Align = alignof(_Refc); + using _Storage = _Alignas_storage_unit<_Align>; + _Rebind_alloc_t<_Alloc, _Storage> _Rebound(_Al); + const size_t _Bytes = _Calculate_bytes_for_flexible_array<_Refc, _Check_overflow::_Yes>(_Count); + const size_t _Storage_units = _Bytes / sizeof(_Storage); + _Allocate_n_ptr _Guard{_Rebound, _Storage_units}; + const auto _Rx = reinterpret_cast<_Refc*>(_Unfancy(_Guard._Ptr)); + ::new (static_cast(_Rx)) _Refc(_Al, _Count); + _Guard._Ptr = nullptr; + shared_ptr<_Ty> _Ret; + _Ret._Set_ptr_rep_and_enable_shared(_Rx->_Get_ptr(), _Rx); + return _Ret; +} + +template +_NODISCARD enable_if_t, shared_ptr<_Ty>> allocate_shared( + const _Alloc& _Al, const size_t _Count, const remove_extent_t<_Ty>& _Val) { + // make a shared_ptr to an unbounded array + using _Refc = _Ref_count_unbounded_array_alloc, _Alloc>; + constexpr size_t _Align = alignof(_Refc); + using _Storage = _Alignas_storage_unit<_Align>; + _Rebind_alloc_t<_Alloc, _Storage> _Rebound(_Al); + const size_t _Bytes = _Calculate_bytes_for_flexible_array<_Refc, _Check_overflow::_Yes>(_Count); + const size_t _Storage_units = _Bytes / sizeof(_Storage); + _Allocate_n_ptr _Guard{_Rebound, _Storage_units}; + const auto _Rx = reinterpret_cast<_Refc*>(_Unfancy(_Guard._Ptr)); + ::new (static_cast(_Rx)) _Refc(_Al, _Count, _Val); + _Guard._Ptr = nullptr; + shared_ptr<_Ty> _Ret; + _Ret._Set_ptr_rep_and_enable_shared(_Rx->_Get_ptr(), _Rx); + return _Ret; +} + +template +_NODISCARD enable_if_t, shared_ptr<_Ty>> allocate_shared(const _Alloc& _Al) { + // make a shared_ptr to a bounded array + using _Refc = _Ref_count_bounded_array_alloc, _Alloc>; + using _Alblock = _Rebind_alloc_t<_Alloc, _Refc>; + _Alblock _Rebound(_Al); + _Alloc_construct_ptr _Constructor{_Rebound}; + _Constructor._Allocate(); + ::new (static_cast(_Unfancy(_Constructor._Ptr))) _Refc(_Al); + shared_ptr<_Ty> _Ret; + const auto _Ptr = static_cast*>(_Constructor._Ptr->_Storage._Value); + _Ret._Set_ptr_rep_and_enable_shared(_Ptr, _Unfancy(_Constructor._Release())); + return _Ret; +} + +template +_NODISCARD enable_if_t, shared_ptr<_Ty>> allocate_shared( + const _Alloc& _Al, const remove_extent_t<_Ty>& _Val) { + // make a shared_ptr to a bounded array + using _Refc = _Ref_count_bounded_array_alloc, _Alloc>; + using _Alblock = _Rebind_alloc_t<_Alloc, _Refc>; + _Alblock _Rebound(_Al); + _Alloc_construct_ptr _Constructor{_Rebound}; + _Constructor._Allocate(); + ::new (static_cast(_Unfancy(_Constructor._Ptr))) _Refc(_Al, _Val); + shared_ptr<_Ty> _Ret; + const auto _Ptr = static_cast*>(_Constructor._Ptr->_Storage._Value); + _Ret._Set_ptr_rep_and_enable_shared(_Ptr, _Unfancy(_Constructor._Release())); + return _Ret; +} +#endif // _HAS_CXX20 + // CLASS TEMPLATE weak_ptr template class weak_ptr : public _Ptr_base<_Ty> { // class for pointer to reference counted resource diff --git a/stl/inc/yvals_core.h b/stl/inc/yvals_core.h index af8b115c858..5a10d0579b7 100644 --- a/stl/inc/yvals_core.h +++ b/stl/inc/yvals_core.h @@ -155,6 +155,7 @@ // P0646R1 list/forward_list remove()/remove_if()/unique() Return size_type // P0653R2 to_address() // P0655R1 visit() +// P0674R1 make_shared() For Arrays // P0758R1 is_nothrow_convertible // P0768R1 Library Support For The Spaceship Comparison Operator <=> // (partially implemented) @@ -1029,7 +1030,6 @@ #define __cpp_lib_map_try_emplace 201411L #define __cpp_lib_nonmember_container_access 201411L #define __cpp_lib_shared_mutex 201505L -#define __cpp_lib_shared_ptr_arrays 201611L #define __cpp_lib_transparent_operators 201510L #define __cpp_lib_type_trait_variable_templates 201510L #define __cpp_lib_uncaught_exceptions 201411L @@ -1044,8 +1044,7 @@ #if _HAS_STD_BYTE #define __cpp_lib_byte 201603L #endif // _HAS_STD_BYTE -#define __cpp_lib_chrono 201611L -#define __cpp_lib_clamp 201603L +#define __cpp_lib_clamp 201603L #ifndef _M_CEE #define __cpp_lib_execution 201603L #endif // _M_CEE @@ -1074,8 +1073,12 @@ #define __cpp_lib_string_view 201803L #define __cpp_lib_to_chars 201611L #define __cpp_lib_variant 201606L +#endif // _HAS_CXX17 + +#if _HAS_CXX17 +#define __cpp_lib_chrono 201611L // P0505R0 constexpr For (Again) #else // _HAS_CXX17 -#define __cpp_lib_chrono 201510L +#define __cpp_lib_chrono 201510L // P0092R1 floor(), ceil(), round(), abs() #endif // _HAS_CXX17 // C++20 @@ -1134,6 +1137,12 @@ #define __cpp_lib_array_constexpr 201803L #endif // _HAS_CXX17 +#if _HAS_CXX20 +#define __cpp_lib_shared_ptr_arrays 201707L // P0674R1 make_shared() For Arrays +#else // _HAS_CXX20 +#define __cpp_lib_shared_ptr_arrays 201611L // P0497R0 Fixing shared_ptr For Arrays +#endif // _HAS_CXX20 + // EXPERIMENTAL #define __cpp_lib_experimental_erase_if 201411L #define __cpp_lib_experimental_filesystem 201406L diff --git a/tests/std/tests/P0674R1_make_shared_for_arrays/env.lst b/tests/std/tests/P0674R1_make_shared_for_arrays/env.lst index 19f025bd0e6..642f530ffad 100644 --- a/tests/std/tests/P0674R1_make_shared_for_arrays/env.lst +++ b/tests/std/tests/P0674R1_make_shared_for_arrays/env.lst @@ -1,4 +1,4 @@ # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -RUNALL_INCLUDE ..\usual_matrix.lst +RUNALL_INCLUDE ..\usual_latest_matrix.lst diff --git a/tests/std/tests/P0674R1_make_shared_for_arrays/test.cpp b/tests/std/tests/P0674R1_make_shared_for_arrays/test.cpp index 785bb84adb2..3a3304571c7 100644 --- a/tests/std/tests/P0674R1_make_shared_for_arrays/test.cpp +++ b/tests/std/tests/P0674R1_make_shared_for_arrays/test.cpp @@ -1,12 +1,300 @@ // Copyright (c) Microsoft Corporation. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +#include +#include +#include #include +#include +#include +#include +#include #include #include +#include using namespace std; +#pragma warning(disable : 28251) // Inconsistent annotation for 'new': this instance has no annotations. + +int allocationCount = 0; +int canCreate = 10; // Counter to force an exception when constructing a + // sufficiently large ReportAddress array + +struct ReportAddress; +vector ascendingAddressBuffer; +vector descendingAddressBuffer; + +// According to N4849, the default behavior of operator new[](size) is to return +// operator new(size), so only the latter needs to be replaced. +void* operator new(size_t size) { + void* const p = ::operator new(size, nothrow); + + if (p) { + return p; + } else { + throw bad_alloc(); + } +} + +void* operator new(size_t size, const nothrow_t&) noexcept { + void* const result = malloc(size == 0 ? 1 : size); + ++allocationCount; + + return result; +} + +struct InitialValue { + int value = 106; + + InitialValue() = default; + + InitialValue(int a, int b) : value(a + b) {} +}; + +struct ThreeIntWrap { + int v1; + int v2; + int v3; +}; + +struct alignas(32) HighlyAligned { + uint64_t a; + uint64_t b; + uint64_t c; + uint64_t d; +}; + +struct ReportAddress { + ReportAddress() { + if (canCreate > 0) { + ascendingAddressBuffer.push_back(this); + --canCreate; + } else { + throw runtime_error("Can't create more ReportAddress objects."); + } + } + + ~ReportAddress() { + ++canCreate; + descendingAddressBuffer.push_back(this); + } +}; + +void assert_ascending_init() { + for (size_t i = 1; i < ascendingAddressBuffer.size(); ++i) { + assert(ascendingAddressBuffer[i - 1] < ascendingAddressBuffer[i]); + } + + ascendingAddressBuffer.clear(); +} + +void assert_descending_destruct() { + for (size_t i = 1; i < descendingAddressBuffer.size(); ++i) { + assert(descendingAddressBuffer[i - 1] > descendingAddressBuffer[i]); + } + + descendingAddressBuffer.clear(); +} + +template +void assert_shared_use_get(const shared_ptr& sp) { + assert(sp.use_count() == 1); + assert(sp.get() != nullptr); +} + +template +shared_ptr make_shared_assert(Args&&... vals) { + int count = allocationCount; + shared_ptr sp = make_shared(forward(vals)...); + assert_shared_use_get(sp); + assert(count + 1 == allocationCount); + return sp; +} + +template != 0, int> = 0> +shared_ptr make_shared_init_assert(const remove_extent_t& val) { + return make_shared_assert(val); +} + +template && extent_v == 0, int> = 0> +shared_ptr make_shared_init_assert(size_t size, const remove_extent_t& val) { + return make_shared_assert(size, val); +} + +template +void test_make_init_destruct_order(Args&&... vals) { + try { + shared_ptr sp = make_shared(forward(vals)...); + assert_shared_use_get(sp); + } catch (const runtime_error& exc) { + assert(exc.what() == "Can't create more ReportAddress objects."sv); + } + + assert_ascending_init(); + assert_descending_destruct(); +} + +void test_make_shared_not_array() { + shared_ptr> p0 = make_shared>(); + assert_shared_use_get(p0); + assert(p0->empty()); + + shared_ptr p1 = make_shared_assert(); + assert(p1->value == 106); + + shared_ptr p2 = make_shared("Meow!", 2u, 3u); + assert_shared_use_get(p2); + assert(p2->compare("ow!") == 0); + + shared_ptr p3 = make_shared_assert(40, 2); + assert(p3->value == 42); + + shared_ptr p4 = make_shared(); + assert_shared_use_get(p4); + assert(*p4 == 0); + + shared_ptr p5 = make_shared(); + assert_shared_use_get(p5); + assert(reinterpret_cast(p5.get()) % alignof(HighlyAligned) == 0); + assert(p5->a == 0 && p5->b == 0 && p5->c == 0 && p5->d == 0); +} + +void test_make_shared_array_known_bounds() { + shared_ptr p0 = make_shared(); + assert_shared_use_get(p0); + for (int i = 0; i < 100; ++i) { + assert(p0[i].empty()); + } + + shared_ptr p1 = make_shared_assert(); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 8; ++j) { + for (int k = 0; k < 9; ++k) { + assert(p1[i][j][k].value == 106); + } + } + } + + shared_ptr p2 = make_shared({"Meow!", "Purr"}); + assert_shared_use_get(p2); + for (int i = 0; i < 10; ++i) { + assert(p2[i][0].compare("Meow!") == 0); + assert(p2[i][1].compare("Purr") == 0); + } + + shared_ptr[3]> p3 = make_shared[3]>({9, 9, 9}); + assert_shared_use_get(p3); + for (int i = 0; i < 3; ++i) { + assert(p3[i].size() == 3); + for (const auto& val : p3[i]) { + assert(val == 9); + } + } + + shared_ptr p4 = make_shared_init_assert({2, 8, 9}); + for (int i = 0; i < 5; ++i) { + assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9); + } + + shared_ptr p5 = make_shared(); + assert_shared_use_get(p5); + for (int i = 0; i < 7; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 9; ++k) { + assert(p5[0][i][j][k] == 0); + } + } + } + + shared_ptr p6 = make_shared(); + assert_shared_use_get(p6); + assert(reinterpret_cast(p6.get()) % alignof(HighlyAligned) == 0); + for (int i = 0; i < 6; ++i) { + assert(p6[i].a == 0 && p6[i].b == 0 && p6[i].c == 0 && p6[i].d == 0); + } + + test_make_init_destruct_order(); // success one dimensional + + test_make_init_destruct_order(); // failure one dimensional + + test_make_init_destruct_order(); // success multidimensional + + test_make_init_destruct_order(); // failure multidimensional +} + +void test_make_shared_array_unknown_bounds() { + shared_ptr p0 = make_shared(100); + assert_shared_use_get(p0); + for (int i = 0; i < 100; ++i) { + assert(p0[i].empty()); + } + + shared_ptr p1 = make_shared_assert(2u); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 8; ++j) { + for (int k = 0; k < 9; ++k) { + assert(p1[i][j][k].value == 106); + } + } + } + + shared_ptr p2 = make_shared(10, {"Meow!", "Purr"}); + assert_shared_use_get(p2); + for (int i = 0; i < 10; ++i) { + assert(p2[i][0].compare("Meow!") == 0); + assert(p2[i][1].compare("Purr") == 0); + } + + shared_ptr[]> p3 = make_shared[]>(3, {9, 9, 9}); + assert_shared_use_get(p3); + for (int i = 0; i < 3; ++i) { + assert(p3[i].size() == 3); + for (const auto& val : p3[i]) { + assert(val == 9); + } + } + + shared_ptr p4 = make_shared_init_assert(5, {2, 8, 9}); + for (int i = 0; i < 5; ++i) { + assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9); + } + + shared_ptr p5 = make_shared_assert(0u); // p5 cannot be dereferenced + + shared_ptr p6 = make_shared(4u); + assert_shared_use_get(p6); + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 6; ++k) { + assert(p6[i][j][k] == 0); + } + } + } + + shared_ptr p7 = make_shared(7u); + assert_shared_use_get(p7); + assert(reinterpret_cast(p7.get()) % alignof(HighlyAligned) == 0); + for (int i = 0; i < 7; ++i) { + assert(p7[i].a == 0 && p7[i].b == 0 && p7[i].c == 0 && p7[i].d == 0); + } + + test_make_init_destruct_order(5u); // success one dimensional + + test_make_init_destruct_order(20u); // failure one dimensional + + test_make_init_destruct_order(2u); // success multidimensional + + test_make_init_destruct_order(3u); // failure multidimensional +} + +int constructCount = 0; +int destroyCount = 0; + +inline void assert_construct_destruct_equal() { + assert(constructCount == destroyCount); +} + template struct ConstructConstrainingAllocator { using value_type = T; @@ -30,6 +318,7 @@ struct ConstructConstrainingAllocator { allocator a; static_assert(is_same_v && is_same_v, "incorrect construct call"); allocator_traits>::construct(a, p, forward(vals)...); + ++constructCount; } template @@ -37,11 +326,287 @@ struct ConstructConstrainingAllocator { allocator a; static_assert(is_same_v && is_same_v, "incorrect destroy call"); allocator_traits>::destroy(a, p); + ++destroyCount; } }; +template +using CustomAlloc = ConstructConstrainingAllocator; + +template +shared_ptr allocate_shared_assert(int elemCount, Args&&... vals) { + int aCount = allocationCount; + int cCount = constructCount; + shared_ptr sp = allocate_shared(forward(vals)...); + assert_shared_use_get(sp); + assert(aCount + 1 == allocationCount); + assert(cCount + elemCount == constructCount); + return sp; +} + +template != 0, int> = 0> +shared_ptr allocate_shared_init_assert(int elemCount, const A& a, const remove_extent_t& val) { + return allocate_shared_assert(elemCount, a, val); +} + +template && extent_v == 0, int> = 0> +shared_ptr allocate_shared_init_assert(int elemCount, const A& a, size_t size, const remove_extent_t& val) { + return allocate_shared_assert(elemCount, a, size, val); +} + +template +void test_allocate_init_destruct_order(Args&&... vals) { + CustomAlloc> a{}; + + try { + shared_ptr sp = allocate_shared(a, forward(vals)...); + assert_shared_use_get(sp); + } catch (const runtime_error& exc) { + assert(exc.what() == "Can't create more ReportAddress objects."sv); + } + + assert_construct_destruct_equal(); + assert_ascending_init(); + assert_descending_destruct(); +} + +void test_allocate_shared_not_array() { + CustomAlloc> a0{}; + { + shared_ptr> p0 = allocate_shared>(a0); + assert_shared_use_get(p0); + assert(p0->empty()); + } + assert_construct_destruct_equal(); + + CustomAlloc a1{}; + { + shared_ptr p1 = allocate_shared_assert(1, a1); + assert(p1->value == 106); + } + assert_construct_destruct_equal(); + + CustomAlloc a2{}; + { + shared_ptr p2 = allocate_shared(a2, "Meow!", 2u, 3u); + assert_shared_use_get(p2); + assert(p2->compare("ow!") == 0); + } + assert_construct_destruct_equal(); + + { + shared_ptr p3 = allocate_shared_assert(1, a1, 40, 2); + assert(p3->value == 42); + } + assert_construct_destruct_equal(); + + CustomAlloc a4{}; + { + shared_ptr p4 = allocate_shared(a4); + assert_shared_use_get(p4); + assert(*p4 == 0); + } + assert_construct_destruct_equal(); + + CustomAlloc a5{}; + { + shared_ptr p5 = allocate_shared(a5); + assert_shared_use_get(p5); + assert(reinterpret_cast(p5.get()) % alignof(HighlyAligned) == 0); + assert(p5->a == 0 && p5->b == 0 && p5->c == 0 && p5->d == 0); + } + assert_construct_destruct_equal(); +} + +void test_allocate_shared_array_known_bounds() { + CustomAlloc a0{}; + { + shared_ptr p0 = allocate_shared(a0); + assert_shared_use_get(p0); + for (int i = 0; i < 100; ++i) { + assert(p0[i].empty()); + } + } + assert_construct_destruct_equal(); + + CustomAlloc a1{}; + { + shared_ptr p1 = allocate_shared_assert(144, a1); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 8; ++j) { + for (int k = 0; k < 9; ++k) { + assert(p1[i][j][k].value == 106); + } + } + } + } + assert_construct_destruct_equal(); + + { + shared_ptr p2 = allocate_shared(a0, {"Meow!", "Purr"}); + assert_shared_use_get(p2); + for (int i = 0; i < 10; ++i) { + assert(p2[i][0].compare("Meow!") == 0); + assert(p2[i][1].compare("Purr") == 0); + } + } + assert_construct_destruct_equal(); + + CustomAlloc> a3{}; + { + shared_ptr[3]> p3 = allocate_shared[3]>(a3, {9, 9, 9}); + assert_shared_use_get(p3); + for (int i = 0; i < 3; ++i) { + assert(p3[i].size() == 3); + for (const auto& val : p3[i]) { + assert(val == 9); + } + } + } + assert_construct_destruct_equal(); + + CustomAlloc a4{}; + { + shared_ptr p4 = allocate_shared_init_assert(5, a4, {2, 8, 9}); + for (int i = 0; i < 5; ++i) { + assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9); + } + } + assert_construct_destruct_equal(); + + CustomAlloc a5{}; + { + shared_ptr p5 = allocate_shared(a5); + assert_shared_use_get(p5); + for (int i = 0; i < 7; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 9; ++k) { + assert(p5[0][i][j][k] == 0); + } + } + } + } + assert_construct_destruct_equal(); + + CustomAlloc a6{}; + { + shared_ptr p6 = allocate_shared(a6); + assert_shared_use_get(p6); + assert(reinterpret_cast(p6.get()) % alignof(HighlyAligned) == 0); + for (int i = 0; i < 6; ++i) { + assert(p6[i].a == 0 && p6[i].b == 0 && p6[i].c == 0 && p6[i].d == 0); + } + } + assert_construct_destruct_equal(); + + test_allocate_init_destruct_order(); // success one dimensional + + test_allocate_init_destruct_order(); // failure one dimensional + + test_allocate_init_destruct_order(); // success multidimensional + + test_allocate_init_destruct_order(); // failure multidimensional +} + +void test_allocate_shared_array_unknown_bounds() { + CustomAlloc a0{}; + { + shared_ptr p0 = allocate_shared(a0, 100); + assert_shared_use_get(p0); + for (int i = 0; i < 100; ++i) { + assert(p0[i].empty()); + } + } + assert_construct_destruct_equal(); + + CustomAlloc a1{}; + { + shared_ptr p1 = allocate_shared_assert(144, a1, 2u); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 8; ++j) { + for (int k = 0; k < 9; ++k) { + assert(p1[i][j][k].value == 106); + } + } + } + } + assert_construct_destruct_equal(); + + { + shared_ptr p2 = allocate_shared(a0, 10, {"Meow!", "Purr"}); + assert_shared_use_get(p2); + for (int i = 0; i < 10; ++i) { + assert(p2[i][0].compare("Meow!") == 0); + assert(p2[i][1].compare("Purr") == 0); + } + } + assert_construct_destruct_equal(); + + CustomAlloc> a3{}; + { + shared_ptr[]> p3 = allocate_shared[]>(a3, 3, {9, 9, 9}); + assert_shared_use_get(p3); + for (int i = 0; i < 3; ++i) { + assert(p3[i].size() == 3); + for (const auto& val : p3[i]) { + assert(val == 9); + } + } + } + assert_construct_destruct_equal(); + + CustomAlloc a4{}; + { + shared_ptr p4 = allocate_shared_init_assert(5, a4, 5, {2, 8, 9}); + for (int i = 0; i < 5; ++i) { + assert(p4[i].v1 == 2 && p4[i].v2 == 8 && p4[i].v3 == 9); + } + } + assert_construct_destruct_equal(); + + CustomAlloc a5{}; + { shared_ptr p5 = allocate_shared_assert(0, a5, 0u); } // p5 cannot be dereferenced + assert_construct_destruct_equal(); + + { + shared_ptr p6 = allocate_shared(a5, 4u); + assert_shared_use_get(p6); + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 6; ++k) { + assert(p6[i][j][k] == 0); + } + } + } + } + assert_construct_destruct_equal(); + + CustomAlloc a7{}; + { + shared_ptr p7 = allocate_shared(a7, 7u); + assert_shared_use_get(p7); + assert(reinterpret_cast(p7.get()) % alignof(HighlyAligned) == 0); + for (int i = 0; i < 7; ++i) { + assert(p7[i].a == 0 && p7[i].b == 0 && p7[i].c == 0 && p7[i].d == 0); + } + } + assert_construct_destruct_equal(); + + test_allocate_init_destruct_order(5u); // success one dimensional + + test_allocate_init_destruct_order(20u); // failure one dimensional + + test_allocate_init_destruct_order(2u); // success multidimensional + + test_allocate_init_destruct_order(3u); // failure multidimensional +} + int main() { - ConstructConstrainingAllocator a{}; - (void) allocate_shared(a, 42); - (void) allocate_shared(a, 42); + test_make_shared_not_array(); + test_make_shared_array_known_bounds(); + test_make_shared_array_unknown_bounds(); + + test_allocate_shared_not_array(); + test_allocate_shared_array_known_bounds(); + test_allocate_shared_array_unknown_bounds(); } diff --git a/tests/std/tests/VSO_0157762_feature_test_macros/test.cpp b/tests/std/tests/VSO_0157762_feature_test_macros/test.cpp index 2a7a4f5afc8..3f65efd6f09 100644 --- a/tests/std/tests/VSO_0157762_feature_test_macros/test.cpp +++ b/tests/std/tests/VSO_0157762_feature_test_macros/test.cpp @@ -1568,11 +1568,19 @@ STATIC_ASSERT(__cpp_lib_shared_mutex == 201505L); #ifndef __cpp_lib_shared_ptr_arrays #error __cpp_lib_shared_ptr_arrays is not defined -#elif __cpp_lib_shared_ptr_arrays != 201611L +#elif _HAS_CXX20 +#if __cpp_lib_shared_ptr_arrays != 201707L +#error __cpp_lib_shared_ptr_arrays is not 201707L +#else +STATIC_ASSERT(__cpp_lib_shared_ptr_arrays == 201707L); +#endif +#else +#if __cpp_lib_shared_ptr_arrays != 201611L #error __cpp_lib_shared_ptr_arrays is not 201611L #else STATIC_ASSERT(__cpp_lib_shared_ptr_arrays == 201611L); #endif +#endif #if _HAS_CXX17 #ifndef __cpp_lib_shared_ptr_weak_type diff --git a/tests/tr1/test.lst b/tests/tr1/test.lst index 27319a45473..e97d6e55731 100644 --- a/tests/tr1/test.lst +++ b/tests/tr1/test.lst @@ -179,7 +179,10 @@ tests\string1 tests\string2 tests\strstream tests\system_error -tests\thread + +# TRANSITION, flaky test +# tests\thread + tests\tuple tests\type_traits1 tests\type_traits2