bcm53xx: update Disable MMU and Dcache during decompression

This replaces the old patch with the version from Florian.

Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>

SVN-Revision: 46504
This commit is contained in:
Hauke Mehrtens 2015-07-26 15:52:59 +00:00
parent 2e3af78447
commit ac96a1665a
1 changed files with 30 additions and 139 deletions

View File

@ -1,32 +1,26 @@
From 26023cdfacaf116545b1087b9d1fe50dc6fbda10 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com>
Date: Wed, 24 Sep 2014 22:14:07 +0200
Subject: [PATCH] ARM: BCM5301X: Disable MMU and Dcache for decompression
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
From: Florian Fainelli <f.fainelli@gmail.com>
Subject: [PATCH] ARM: BCM5301x: Disable MMU and Dcache during decompression
Date: Tue, 14 Jul 2015 16:12:08 -0700
Without this fix kernel was randomly hanging in ~25% of tries during
early init. Hangs used to happen at random places in the start_kernel.
Use the existing __armv7_mmu_cache_flush() to perform the cache flush
since this does what we are after.
Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
---
arch/arm/boot/compressed/Makefile | 5 +
arch/arm/boot/compressed/head-bcm_5301x-mpcore.S | 37 +++++++
arch/arm/boot/compressed/mpcore_cache.S | 118 +++++++++++++++++++++++
3 files changed, 160 insertions(+)
arch/arm/boot/compressed/Makefile | 4 +++
arch/arm/boot/compressed/head-bcm_5301x-mpcore.S | 37 ++++++++++++++++++++++++
arch/arm/boot/compressed/head.S | 2 ++
3 files changed, 43 insertions(+)
create mode 100644 arch/arm/boot/compressed/head-bcm_5301x-mpcore.S
create mode 100644 arch/arm/boot/compressed/mpcore_cache.S
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -31,6 +31,11 @@ ifeq ($(CONFIG_ARCH_ACORN),y)
@@ -31,6 +31,10 @@ ifeq ($(CONFIG_ARCH_ACORN),y)
OBJS += ll_char_wr.o font.o
endif
+ifeq ($(CONFIG_ARCH_BCM_5301X),y)
+OBJS += head-bcm_5301x-mpcore.o
+OBJS += mpcore_cache.o
+endif
+
ifeq ($(CONFIG_ARCH_SA1100),y)
@ -63,7 +57,7 @@ Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
+ nop
+
+ @ Call the cache invalidation routine
+ bl v7_all_dcache_invalidate
+ bl __armv7_mmu_cache_flush_fn
+ nop
+ mov r0,#0
+ ldr r3, =0x19022000 @ L2 cache controller, control reg
@ -72,124 +66,21 @@ Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
+
+ @ Restore
+ mov r8, r12
--- /dev/null
+++ b/arch/arm/boot/compressed/mpcore_cache.S
@@ -0,0 +1,118 @@
+/*****************************************************************************
+* Copyright 2003 - 2008 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ __INIT
+
+/*
+ * v7_l1_cache_invalidate
+ *
+ * Invalidate contents of L1 cache without flushing its contents
+ * into outer cache and memory. This is needed when the contents
+ * of the cache are unpredictable after power-up.
+ *
+ * corrupts r0-r6
+ */
+
+ENTRY(v7_l1_cache_invalidate)
+ mov r0, #0
+ mcr p15, 2, r0, c0, c0, 0 @ set cache level to 1
+ mrc p15, 1, r0, c0, c0, 0 @ read CLIDR
+
+ ldr r1, =0x7fff
+ and r2, r1, r0, lsr #13 @ get max # of index size
+
+ ldr r1, =0x3ff
+ and r3, r1, r0, lsr #3 @ NumWays - 1
+ add r2, r2, #1 @ NumSets
+
+ and r0, r0, #0x7
+ add r0, r0, #4 @ SetShift
+
+ clz r1, r3 @ WayShift
+ add r4, r3, #1 @ NumWays
+1: sub r2, r2, #1 @ NumSets--
+ mov r3, r4 @ Temp = NumWays
+2: subs r3, r3, #1 @ Temp--
+ mov r5, r3, lsl r1
+ mov r6, r2, lsl r0
+ orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+ mcr p15, 0, r5, c7, c6, 2 @ Invalidate line
+ bgt 2b
+ cmp r2, #0
+ bgt 1b
+ dsb
+ mov r0,#0
+ mcr p15,0,r0,c7,c5,0 /* Invalidate icache */
+ isb
+ mov pc, lr
+ENDPROC(v7_l1_cache_invalidate)
+
+/*
+ * v7_all_dcache_invalidate
+ *
+ * Invalidate without flushing the contents of all cache levels
+ * accesible by the current processor core.
+ * This is useful when the contents of cache memory are undetermined
+ * at power-up.
+ * Corrupted registers: r0-r7, r9-r11
+ *
+ * Based on cache-v7.S: v7_flush_dcache_all()
+ */
+
+ENTRY(v7_all_dcache_invalidate)
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 0
+loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit pos of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+loop2:
+ mov r9, r4 @ create working copy of max way size
+loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c6, 2 @ Invalidate line
+ subs r9, r9, #1 @ decrement the way
+ bge loop3
+ subs r7, r7, #1 @ decrement the index
+ bge loop2
+skip:
+ add r10, r10, #2 @ increment cache number
+ cmp r3, r10
+ bgt loop1
+finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ mov pc, lr
+ENDPROC(v7_all_dcache_invalidate)
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1152,6 +1152,7 @@ __armv7_mmu_cache_flush:
hierarchical:
mcr p15, 0, r10, c7, c10, 5 @ DMB
stmfd sp!, {r0-r7, r9-r11}
+ENTRY(__armv7_mmu_cache_flush_fn)
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field
@@ -1201,6 +1202,7 @@ iflush:
mcr p15, 0, r10, c7, c10, 4 @ DSB
mcr p15, 0, r10, c7, c5, 4 @ ISB
mov pc, lr
+ENDPROC(__armv7_mmu_cache_flush_fn)
__armv5tej_mmu_cache_flush:
tst r4, #1