Fixup No.3 after #10779

This commit is contained in:
Eladash 2021-09-01 22:52:50 +03:00 committed by Ivan
parent 1cbcf7e1ad
commit fafefb2cf5
6 changed files with 80 additions and 52 deletions

View file

@ -1050,7 +1050,7 @@ namespace vm
// Mapped regions: addr -> shm handle
constexpr auto block_map = &auto_typemap<block_t>::get<std::map<u32, std::pair<u32, std::shared_ptr<utils::shm>>>>;
bool block_t::try_alloc(u32 addr, u8 flags, u32 size, std::shared_ptr<utils::shm>&& shm) const
bool block_t::try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&& shm) const
{
// Check if memory area is already mapped
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
@ -1064,6 +1064,34 @@ namespace vm
const u32 page_addr = addr + (this->flags & stack_guarded ? 0x1000 : 0);
const u32 page_size = size - (this->flags & stack_guarded ? 0x2000 : 0);
// No flags are default to readable/writable
// Explicit (un...) flags are used to protect from such access
u8 flags = 0;
if (~bflags & alloc_hidden)
{
flags |= page_readable;
if (~bflags & alloc_unwritable)
{
flags |= page_writable;
}
}
if (bflags & alloc_executable)
{
flags |= page_executable;
}
if ((flags & page_size_mask) == page_size_64k)
{
flags |= page_64k_size;
}
else if (!(flags & (page_size_mask & ~page_size_1m)))
{
flags |= page_1m_size;
}
if (this->flags & stack_guarded)
{
// Mark overflow/underflow guard pages as allocated
@ -1122,12 +1150,31 @@ namespace vm
return true;
}
static constexpr u64 process_block_flags(u64 flags)
{
if ((flags & page_size_mask) == 0)
{
flags |= page_size_1m;
}
if (flags & page_size_4k)
{
flags |= preallocated;
}
else
{
flags &= ~stack_guarded;
}
return flags;
}
block_t::block_t(u32 addr, u32 size, u64 flags)
: addr(addr)
, size(size)
, flags(flags)
, flags(process_block_flags(flags))
{
if (flags & page_size_4k || flags & preallocated)
if (this->flags & preallocated)
{
// Special path for whole-allocated areas allowing 4k granularity
m_common = std::make_shared<utils::shm>(size);
@ -1166,7 +1213,7 @@ namespace vm
if (!src)
{
// Use the block's flags (excpet for protection)
flags = (this->flags & ~page_prot_mask) | (flags & page_prot_mask);
flags = (this->flags & ~alloc_prot_mask) | (flags & alloc_prot_mask);
}
// Determine minimal alignment
@ -1187,17 +1234,6 @@ namespace vm
return 0;
}
u8 pflags = flags & page_hidden ? 0 : (~flags & (page_readable | page_writable));
if ((flags & page_size_64k) == page_size_64k)
{
pflags |= page_64k_size;
}
else if (!(flags & (page_size_mask & ~page_size_1m)))
{
pflags |= page_1m_size;
}
// Create or import shared memory object
std::shared_ptr<utils::shm> shm;
@ -1224,7 +1260,7 @@ namespace vm
// Search for an appropriate place (unoptimized)
for (;; addr += align)
{
if (try_alloc(addr, pflags, size, std::move(shm)))
if (try_alloc(addr, flags, size, std::move(shm)))
{
return addr + (flags & stack_guarded ? 0x1000 : 0);
}
@ -1243,7 +1279,7 @@ namespace vm
if (!src)
{
// Use the block's flags (excpet for protection)
flags = (this->flags & ~page_prot_mask) | (flags & page_prot_mask);
flags = (this->flags & ~alloc_prot_mask) | (flags & alloc_prot_mask);
}
// Determine minimal alignment
@ -1271,17 +1307,6 @@ namespace vm
// Force aligned address
addr -= addr % min_page_size;
u8 pflags = flags & page_hidden ? 0 : (~flags & (page_readable | page_writable));
if ((flags & page_size_64k) == page_size_64k)
{
pflags |= page_64k_size;
}
else if (!(flags & (page_size_mask & ~page_size_1m)))
{
pflags |= page_1m_size;
}
// Create or import shared memory object
std::shared_ptr<utils::shm> shm;
@ -1296,7 +1321,7 @@ namespace vm
vm::writer_lock lock(0);
if (!try_alloc(addr, pflags, size, std::move(shm)))
if (!try_alloc(addr, flags, size, std::move(shm)))
{
return 0;
}