+/*
+ * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
+ * The procecess is break-before-make. The target region will be marked as
+ * invalid during the process of changing.
+ */
+void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
+{
+ int level;
+ u64 r, size, start;
+
+ start = addr;
+ size = siz;
+ /*
+ * Loop through the address range until we find a page granule that fits
+ * our alignment constraints, then set it to "invalid".
+ */
+ while (size > 0) {
+ for (level = 1; level < 4; level++) {
+ /* Set PTE to fault */
+ r = set_one_region(start, size, PTE_TYPE_FAULT, true,
+ level);
+ if (r) {
+ /* PTE successfully invalidated */
+ size -= r;
+ start += r;
+ break;
+ }
+ }
+ }
+
+ flush_dcache_range(gd->arch.tlb_addr,
+ gd->arch.tlb_addr + gd->arch.tlb_size);
+ __asm_invalidate_tlb_all();
+
+ /*
+ * Loop through the address range until we find a page granule that fits
+ * our alignment constraints, then set it to the new cache attributes
+ */
+ start = addr;
+ size = siz;
+ while (size > 0) {
+ for (level = 1; level < 4; level++) {
+ /* Set PTE to new attributes */
+ r = set_one_region(start, size, attrs, true, level);
+ if (r) {
+ /* PTE successfully updated */
+ size -= r;
+ start += r;
+ break;
+ }
+ }
+ }
+ flush_dcache_range(gd->arch.tlb_addr,
+ gd->arch.tlb_addr + gd->arch.tlb_size);
+ __asm_invalidate_tlb_all();
+}
+
+#else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */