generic: import patch lowering bitflip_threshold on SPI-NAND
Reporting an unclean read from SPI-NAND only when the maximum number of correctable bitflip errors has been hit seems a bit late. UBI LEB scrubbing, which depends on the lower MTD device reporting correctable bitflips, then only kicks in when it's almost too late. Set bitflip_threshold to 75% of the ECC strength, which is also the default for raw NAND. Signed-off-by: Daniel Golle <daniel@makrotopia.org>
This commit is contained in:
parent
c22ba7544e
commit
1554af4c43
|
@ -0,0 +1,63 @@
|
|||
From patchwork Mon Aug 12 01:56:41 2024
|
||||
Content-Type: text/plain; charset="utf-8"
|
||||
MIME-Version: 1.0
|
||||
Content-Transfer-Encoding: 7bit
|
||||
X-Patchwork-Submitter: Daniel Golle <daniel@makrotopia.org>
|
||||
X-Patchwork-Id: 1971406
|
||||
Return-Path:
|
||||
<linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org>
|
||||
X-Original-To: incoming@patchwork.ozlabs.org
|
||||
Delivered-To: patchwork-incoming@legolas.ozlabs.org
|
||||
Date: Mon, 12 Aug 2024 02:56:41 +0100
|
||||
From: Daniel Golle <daniel@makrotopia.org>
|
||||
To: Miquel Raynal <miquel.raynal@bootlin.com>,
|
||||
Richard Weinberger <richard@nod.at>,
|
||||
Vignesh Raghavendra <vigneshr@ti.com>,
|
||||
Tudor Ambarus <tudor.ambarus@linaro.org>,
|
||||
Daniel Golle <daniel@makrotopia.org>,
|
||||
Mika Westerberg <mika.westerberg@linux.intel.com>,
|
||||
Chia-Lin Kao <acelan.kao@canonical.com>,
|
||||
Martin Kurbanov <mmkurbanov@salutedevices.com>,
|
||||
linux-mtd@lists.infradead.org, linux-kernel@vger.kernel.org
|
||||
Subject: [PATCH] mtd: spinand: set bitflip_threshold to 75% of ECC strength
|
||||
Message-ID:
|
||||
<2117e387260b0a96f95b8e1652ff79e0e2d71d53.1723427450.git.daniel@makrotopia.org>
|
||||
MIME-Version: 1.0
|
||||
Content-Disposition: inline
|
||||
X-BeenThere: linux-mtd@lists.infradead.org
|
||||
X-Mailman-Version: 2.1.34
|
||||
Precedence: list
|
||||
List-Id: Linux MTD discussion mailing list <linux-mtd.lists.infradead.org>
|
||||
List-Unsubscribe: <http://lists.infradead.org/mailman/options/linux-mtd>,
|
||||
<mailto:linux-mtd-request@lists.infradead.org?subject=unsubscribe>
|
||||
List-Archive: <http://lists.infradead.org/pipermail/linux-mtd/>
|
||||
List-Post: <mailto:linux-mtd@lists.infradead.org>
|
||||
List-Help: <mailto:linux-mtd-request@lists.infradead.org?subject=help>
|
||||
List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mtd>,
|
||||
<mailto:linux-mtd-request@lists.infradead.org?subject=subscribe>
|
||||
Sender: "linux-mtd" <linux-mtd-bounces@lists.infradead.org>
|
||||
Errors-To: linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org
|
||||
|
||||
Reporting an unclean read from SPI-NAND only when the maximum number
|
||||
of correctable bitflip errors has been hit seems a bit late.
|
||||
UBI LEB scrubbing, which depends on the lower MTD device reporting
|
||||
correctable bitflips, then only kicks in when it's almost too late.
|
||||
|
||||
Set bitflip_threshold to 75% of the ECC strength, which is also the
|
||||
default for raw NAND.
|
||||
|
||||
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
|
||||
---
|
||||
drivers/mtd/nand/spi/core.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
--- a/drivers/mtd/nand/spi/core.c
|
||||
+++ b/drivers/mtd/nand/spi/core.c
|
||||
@@ -1286,6 +1286,7 @@ static int spinand_init(struct spinand_d
|
||||
/* Propagate ECC information to mtd_info */
|
||||
mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
|
||||
mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
|
||||
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
|
||||
|
||||
ret = spinand_create_dirmaps(spinand);
|
||||
if (ret) {
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
|
||||
{
|
||||
@@ -1345,6 +1346,7 @@ static int spinand_probe(struct spi_mem
|
||||
@@ -1346,6 +1347,7 @@ static int spinand_probe(struct spi_mem
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
|||
ret = mtd_device_register(mtd, NULL, 0);
|
||||
if (ret)
|
||||
goto err_spinand_cleanup;
|
||||
@@ -1352,6 +1354,7 @@ static int spinand_probe(struct spi_mem
|
||||
@@ -1353,6 +1355,7 @@ static int spinand_probe(struct spi_mem
|
||||
return 0;
|
||||
|
||||
err_spinand_cleanup:
|
||||
|
@ -24,7 +24,7 @@
|
|||
spinand_cleanup(spinand);
|
||||
|
||||
return ret;
|
||||
@@ -1370,6 +1373,7 @@ static int spinand_remove(struct spi_mem
|
||||
@@ -1371,6 +1374,7 @@ static int spinand_remove(struct spi_mem
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
Loading…
Reference in New Issue