diff --git a/src/libpmemobj/tx.c b/src/libpmemobj/tx.c
index 05ef35e60b82b371983f6c7a83698e5514306491..f9eb57f47ca40d1e9c42b1a0086ba694eea1e686 100644
--- a/src/libpmemobj/tx.c
+++ b/src/libpmemobj/tx.c
@@ -1088,9 +1088,26 @@ pmemobj_tx_add_snapshot(struct tx *tx, struct tx_range_def *snapshot)
 		ULOG_OPERATION_BUF_CPY);
 }
 
+/*
+ * pmemobj_tx_merge_flags -- (internal) common code for merging flags between
+ * two ranges to ensure resultant behavior is correct
+ */
+static void
+pmemobj_tx_merge_flags(struct tx_range_def *dest, struct tx_range_def *merged)
+{
+	/*
+	 * POBJ_XADD_NO_FLUSH should only be set in merged range if set in
+	 * both ranges
+	 */
+	if ((dest->flags & POBJ_XADD_NO_FLUSH) &&
+				!(merged->flags & POBJ_XADD_NO_FLUSH)) {
+		dest->flags = dest->flags & (~POBJ_XADD_NO_FLUSH);
+	}
+}
+
 /*
  * pmemobj_tx_add_common -- (internal) common code for adding persistent memory
- *				into the transaction
+ * into the transaction
  */
 static int
 pmemobj_tx_add_common(struct tx *tx, struct tx_range_def *args)
@@ -1196,6 +1213,7 @@ pmemobj_tx_add_common(struct tx *tx, struct tx_range_def *args)
 			size_t intersection = fend - MAX(f->offset, r.offset);
 			r.size -= intersection + snapshot.size;
 			f->size += snapshot.size;
+			pmemobj_tx_merge_flags(f, args);
 
 			if (snapshot.size != 0) {
 				ret = pmemobj_tx_add_snapshot(tx, &snapshot);
@@ -1211,6 +1229,7 @@ pmemobj_tx_add_common(struct tx *tx, struct tx_range_def *args)
 				struct tx_range_def *fprev = ravl_data(nprev);
 				ASSERTeq(rend, fprev->offset);
 				f->size += fprev->size;
+				pmemobj_tx_merge_flags(f, fprev);
 				ravl_remove(tx->ranges, nprev);
 			}
 		} else if (fend >= r.offset) {
@@ -1234,6 +1253,7 @@ pmemobj_tx_add_common(struct tx *tx, struct tx_range_def *args)
 			 */
 			size_t overlap = rend - MAX(f->offset, r.offset);
 			r.size -= overlap;
+			pmemobj_tx_merge_flags(f, args);
 		} else {
 			ASSERT(0);
 		}
@@ -1654,7 +1674,7 @@ pmemobj_tx_free(PMEMoid oid)
 	if (n != NULL) {
 		VEC_FOREACH_BY_PTR(action, &tx->actions) {
 			if (action->type == POBJ_ACTION_TYPE_HEAP &&
-			    action->heap.offset == oid.off) {
+				action->heap.offset == oid.off) {
 				struct tx_range_def *r = ravl_data(n);
 				void *ptr = OBJ_OFF_TO_PTR(pop, r->offset);
 				VALGRIND_SET_CLEAN(ptr, r->size);
diff --git a/src/test/obj_tx_add_range/obj_tx_add_range.c b/src/test/obj_tx_add_range/obj_tx_add_range.c
index 3e985b526db3affc3e266c29fc67de4db0a333ec..5d61ae5dc273fe425ea01694786224c60b67037d 100644
--- a/src/test/obj_tx_add_range/obj_tx_add_range.c
+++ b/src/test/obj_tx_add_range/obj_tx_add_range.c
@@ -574,6 +574,87 @@ do_tx_add_range_overlapping(PMEMobjpool *pop)
 	UT_ASSERT(util_is_zeroed(D_RO(obj)->data, OVERLAP_SIZE));
 }
 
+/*
+ * do_tx_add_range_flag_merge_right -- call pmemobj_tx_add_range with
+ * overlapping ranges, but different flags
+ */
+static void
+do_tx_add_range_flag_merge_right(PMEMobjpool *pop)
+{
+	TOID(struct overlap_object) obj;
+	TOID_ASSIGN(obj, do_tx_zalloc(pop, 1));
+
+	/*
+	 * ++++--------
+	 * --++++++++--
+	 */
+	TX_BEGIN(pop) {
+		pmemobj_tx_xadd_range(obj.oid, 0, 4, POBJ_XADD_NO_FLUSH);
+		memset(D_RW(obj)->data, 1, 4);
+
+		pmemobj_tx_add_range(obj.oid, 2, 8);
+		memset(D_RW(obj)->data + 2, 3, 8);
+
+	} TX_ONABORT {
+		UT_ASSERT(0);
+	} TX_END
+}
+
+/*
+ * do_tx_add_range_flag_merge_left -- call pmemobj_tx_add_range with
+ * overlapping ranges, but different flags
+ */
+static void
+do_tx_add_range_flag_merge_left(PMEMobjpool *pop)
+{
+	TOID(struct overlap_object) obj;
+	TOID_ASSIGN(obj, do_tx_zalloc(pop, 1));
+
+	/*
+	 * --------++++
+	 * --++++++++--
+	 */
+	TX_BEGIN(pop) {
+		pmemobj_tx_xadd_range(obj.oid, 8, 4, POBJ_XADD_NO_FLUSH);
+		memset(D_RW(obj)->data + 8, 2, 4);
+
+		pmemobj_tx_add_range(obj.oid, 2, 8);
+		memset(D_RW(obj)->data + 2, 3, 8);
+
+	} TX_ONABORT {
+		UT_ASSERT(0);
+	} TX_END
+}
+
+/*
+ * do_tx_add_range_flag_merge_middle -- call pmemobj_tx_add_range with
+ * three adjacent ranges, but different flags
+ */
+static void
+do_tx_add_range_flag_merge_middle(PMEMobjpool *pop)
+{
+	TOID(struct overlap_object) obj;
+	TOID_ASSIGN(obj, do_tx_zalloc(pop, 1));
+
+	/*
+	 * ++++----++++
+	 * ----++++----
+	 */
+	TX_BEGIN(pop) {
+		pmemobj_tx_xadd_range(obj.oid, 0, 4, POBJ_XADD_NO_FLUSH);
+		memset(D_RW(obj)->data, 1, 4);
+
+		pmemobj_tx_xadd_range(obj.oid, 8, 4, POBJ_XADD_NO_FLUSH);
+		memset(D_RW(obj)->data + 8, 2, 4);
+
+		pmemobj_tx_add_range(obj.oid, 4, 4);
+		memset(D_RW(obj)->data + 4, 3, 4);
+
+	} TX_ONABORT {
+		UT_ASSERT(0);
+	} TX_END
+}
+
 /*
  * do_tx_add_range_reopen -- check for persistent memory leak in undo log set
  */
@@ -681,6 +762,12 @@ main(int argc, char *argv[])
 		VALGRIND_WRITE_STATS;
 		do_tx_add_range_zero(pop);
 		VALGRIND_WRITE_STATS;
+		do_tx_add_range_flag_merge_left(pop);
+		VALGRIND_WRITE_STATS;
+		do_tx_add_range_flag_merge_right(pop);
+		VALGRIND_WRITE_STATS;
+		do_tx_add_range_flag_merge_middle(pop);
+		VALGRIND_WRITE_STATS;
 		do_tx_xadd_range_commit(pop);
 		pmemobj_close(pop);
 	}
diff --git a/src/test/obj_tx_add_range/pmemcheck1.log.match b/src/test/obj_tx_add_range/pmemcheck1.log.match
index e801c7dde3db338b012363c82db01b04db926cb2..0fa838a5c28a3808a594b138f5b0101a7018db83 100644
--- a/src/test/obj_tx_add_range/pmemcheck1.log.match
+++ b/src/test/obj_tx_add_range/pmemcheck1.log.match
@@ -43,6 +43,15 @@
 ==$(*)== Number of stores not made persistent: 0
 ==$(*)== ERROR SUMMARY: 0 errors
 ==$(*)== 
+==$(*)== Number of stores not made persistent: 0
+==$(*)== ERROR SUMMARY: 0 errors
+==$(*)== 
+==$(*)== Number of stores not made persistent: 0
+==$(*)== ERROR SUMMARY: 0 errors
+==$(*)== 
+==$(*)== Number of stores not made persistent: 0
+==$(*)== ERROR SUMMARY: 0 errors
+==$(*)== 
 ==$(*)== 
 ==$(*)== Number of stores not made persistent: 1
 ==$(*)== Stores not made persistent properly: