diff --git a/src/libpmemobj/heap.c b/src/libpmemobj/heap.c
index 6e6ec9c6dff26c5522605bffcad6ef9fe0557480..123b3a1e8b23f11c5aa08a73e5c83f0400ef19c4 100644
--- a/src/libpmemobj/heap.c
+++ b/src/libpmemobj/heap.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015-2018, Intel Corporation
+ * Copyright 2015-2020, Intel Corporation
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -103,7 +103,6 @@ heap_alloc_classes(struct palloc_heap *heap)
 	return heap->rt->alloc_classes;
 }
 
-
 /*
  * heap_arena_init -- (internal) initializes arena instance
  */
@@ -280,9 +279,11 @@ zone_calc_size_idx(uint32_t zone_id, unsigned max_zone, size_t heap_size)
 	size_t zone_raw_size = heap_size - zone_id * ZONE_MAX_SIZE;
 
 	ASSERT(zone_raw_size >= (sizeof(struct zone_header) +
-			sizeof(struct chunk_header) * MAX_CHUNK));
+			sizeof(struct chunk_header) * MAX_CHUNK) +
+			sizeof(struct heap_header));
 	zone_raw_size -= sizeof(struct zone_header) +
-		sizeof(struct chunk_header) * MAX_CHUNK;
+		sizeof(struct chunk_header) * MAX_CHUNK +
+		sizeof(struct heap_header);
 
 	size_t zone_size_idx = zone_raw_size / CHUNKSIZE;
 	ASSERT(zone_size_idx <= UINT32_MAX);
@@ -301,8 +302,7 @@ heap_zone_init(struct palloc_heap *heap, uint32_t zone_id,
 	uint32_t size_idx = zone_calc_size_idx(zone_id, heap->rt->nzones,
 			*heap->sizep);
 
-	ASSERT(size_idx - first_chunk_id > 0);
-
+	ASSERT(size_idx > first_chunk_id);
 	memblock_huge_init(heap, first_chunk_id, zone_id,
 		size_idx - first_chunk_id);
 
@@ -1062,6 +1062,7 @@ heap_zone_update_if_needed(struct palloc_heap *heap)
 
 		size_t size_idx = zone_calc_size_idx(i, heap->rt->nzones,
 			*heap->sizep);
+
 		if (size_idx == z->header.size_idx)
 			continue;
 
diff --git a/src/test/obj_heap/obj_heap.c b/src/test/obj_heap/obj_heap.c
index c670946bae67413744d2e3bf8e83a0c1795ea1e4..2eee9c4a04dd8183ee2c8d91c665a0048d40ffd0 100644
--- a/src/test/obj_heap/obj_heap.c
+++ b/src/test/obj_heap/obj_heap.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015-2018, Intel Corporation
+ * Copyright 2015-2020, Intel Corporation
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -300,6 +300,77 @@ test_heap(void)
 	MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
 }
 
+/*
+ * test_heap_with_size -- tests scenarios with not-nicely aligned sizes
+ */
+static void
+test_heap_with_size()
+{
+	/*
+	 * To trigger bug with incorrect metadata alignment we need to
+	 * use a size that uses exactly the size used in bugged zone size
+	 * calculations.
+	 */
+	size_t size = PMEMOBJ_MIN_POOL + sizeof(struct zone_header) +
+		sizeof(struct chunk_header) * MAX_CHUNK +
+		sizeof(PMEMobjpool);
+
+	struct mock_pop *mpop = MMAP_ANON_ALIGNED(size,
+		Ut_mmap_align);
+	PMEMobjpool *pop = &mpop->p;
+	memset(pop, 0, size);
+	pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
+	pop->p_ops.persist = obj_heap_persist;
+	pop->p_ops.flush = obj_heap_flush;
+	pop->p_ops.drain = obj_heap_drain;
+	pop->p_ops.memset = obj_heap_memset;
+	pop->p_ops.base = pop;
+	pop->set = MALLOC(sizeof(*(pop->set)));
+	pop->set->options = 0;
+	pop->set->directory_based = 0;
+
+	void *heap_start = (char *)pop + pop->heap_offset;
+	uint64_t heap_size = size - sizeof(PMEMobjpool);
+	struct palloc_heap *heap = &pop->heap;
+	struct pmem_ops *p_ops = &pop->p_ops;
+
+	UT_ASSERT(heap_check(heap_start, heap_size) != 0);
+	UT_ASSERT(heap_init(heap_start, heap_size,
+		&pop->heap_size, p_ops) == 0);
+	UT_ASSERT(heap_boot(heap, heap_start, heap_size,
+		&pop->heap_size,
+		pop, p_ops, NULL, pop->set) == 0);
+	UT_ASSERT(heap_buckets_init(heap) == 0);
+	UT_ASSERT(pop->heap.rt != NULL);
+
+	struct bucket *b_def = heap_bucket_acquire_by_id(heap,
+		DEFAULT_ALLOC_CLASS_ID);
+
+	struct memory_block mb;
+	mb.size_idx = 1;
+	while (heap_get_bestfit_block(heap, b_def, &mb) == 0)
+		;
+
+	/* mb should now be the last chunk in the heap */
+	char *ptr = mb.m_ops->get_real_data(&mb);
+	size_t s = mb.m_ops->get_real_size(&mb);
+
+	/* last chunk should be within the heap and accessible */
+	UT_ASSERT((size_t)ptr + s <= (size_t)mpop + size);
+
+	VALGRIND_DO_MAKE_MEM_DEFINED(ptr, s);
+	memset(ptr, 0xc, s);
+
+	heap_bucket_release(heap, b_def);
+
+	UT_ASSERT(heap_check(heap_start, heap_size) == 0);
+	heap_cleanup(heap);
+	UT_ASSERT(heap->rt == NULL);
+
+	FREE(pop->set);
+	MUNMAP_ANON_ALIGNED(mpop, size);
+}
+
 static void
 test_recycler(void)
 {
@@ -456,6 +527,7 @@ main(int argc, char *argv[])
 	START(argc, argv, "obj_heap");
 
 	test_heap();
+	test_heap_with_size();
 	test_recycler();
 
 	DONE(NULL);