mirror of
https://github.com/Telecominfraproject/wlan-ap.git
synced 2025-10-29 09:32:34 +00:00
this fixes the nand issue on the actiontec web7200 Signed-off-by: John Crispin <john@phrozen.org>
11151 lines
292 KiB
Diff
11151 lines
292 KiB
Diff
From ee31f71c832f27a1a58d1dd3c33704d077f389d3 Mon Sep 17 00:00:00 2001
|
|
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Thu, 10 Mar 2022 18:19:37 +0100
|
|
Subject: [PATCH 1/8] ramips: add mtk_bmt support (includes bbt, bmt v2, nmbm)
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
target/linux/generic/config-5.4 | 1 +
|
|
.../ramips/files/drivers/mtd/nand/mtk_bmt.c | 465 ++++
|
|
.../ramips/files/drivers/mtd/nand/mtk_bmt.h | 131 +
|
|
.../files/drivers/mtd/nand/mtk_bmt_bbt.c | 203 ++
|
|
.../files/drivers/mtd/nand/mtk_bmt_nmbm.c | 2348 +++++++++++++++++
|
|
.../files/drivers/mtd/nand/mtk_bmt_v2.c | 513 ++++
|
|
.../ramips/files/include/linux/mtd/mtk_bmt.h | 18 +
|
|
target/linux/ramips/mt7621/config-5.4 | 1 +
|
|
.../patches-5.4/430-mtk-bmt-support.patch | 23 +
|
|
9 files changed, 3703 insertions(+)
|
|
create mode 100644 target/linux/ramips/files/drivers/mtd/nand/mtk_bmt.c
|
|
create mode 100644 target/linux/ramips/files/drivers/mtd/nand/mtk_bmt.h
|
|
create mode 100644 target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_bbt.c
|
|
create mode 100644 target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_nmbm.c
|
|
create mode 100644 target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_v2.c
|
|
create mode 100644 target/linux/ramips/files/include/linux/mtd/mtk_bmt.h
|
|
create mode 100644 target/linux/ramips/patches-5.4/430-mtk-bmt-support.patch
|
|
|
|
diff --git a/target/linux/generic/config-5.4 b/target/linux/generic/config-5.4
|
|
index a62b2cadcbca..a821a1de5f16 100644
|
|
--- a/target/linux/generic/config-5.4
|
|
+++ b/target/linux/generic/config-5.4
|
|
@@ -3234,6 +3234,7 @@ CONFIG_MTD_NAND_IDS=y
|
|
# CONFIG_MTD_NAND_JZ4740 is not set
|
|
# CONFIG_MTD_NAND_MPC5121_NFC is not set
|
|
# CONFIG_MTD_NAND_MTK is not set
|
|
+# CONFIG_MTD_NAND_MTK_BMT is not set
|
|
# CONFIG_MTD_NAND_MXC is not set
|
|
# CONFIG_MTD_NAND_MXIC is not set
|
|
# CONFIG_MTD_NAND_NANDSIM is not set
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt.c b/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt.c
|
|
new file mode 100644
|
|
index 000000000000..f1df4a11f12d
|
|
--- /dev/null
|
|
+++ b/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt.c
|
|
@@ -0,0 +1,465 @@
|
|
+/*
|
|
+ * Copyright (c) 2017 MediaTek Inc.
|
|
+ * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
|
|
+ * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/gfp.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/bits.h>
|
|
+#include "mtk_bmt.h"
|
|
+
|
|
+struct bmt_desc bmtd = {};
|
|
+
|
|
+/* -------- Nand operations wrapper -------- */
|
|
+int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
|
|
+{
|
|
+ int pages = bmtd.blk_size >> bmtd.pg_shift;
|
|
+ loff_t src = (loff_t)src_blk << bmtd.blk_shift;
|
|
+ loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
|
|
+ loff_t offset = 0;
|
|
+ uint8_t oob[64];
|
|
+ int i, ret;
|
|
+
|
|
+ for (i = 0; i < pages; i++) {
|
|
+ struct mtd_oob_ops rd_ops = {
|
|
+ .mode = MTD_OPS_PLACE_OOB,
|
|
+ .oobbuf = oob,
|
|
+ .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
|
|
+ .datbuf = bmtd.data_buf,
|
|
+ .len = bmtd.pg_size,
|
|
+ };
|
|
+ struct mtd_oob_ops wr_ops = {
|
|
+ .mode = MTD_OPS_PLACE_OOB,
|
|
+ .oobbuf = oob,
|
|
+ .datbuf = bmtd.data_buf,
|
|
+ .len = bmtd.pg_size,
|
|
+ };
|
|
+
|
|
+ if (offset >= max_offset)
|
|
+ break;
|
|
+
|
|
+ ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
|
|
+ if (ret < 0 && !mtd_is_bitflip(ret))
|
|
+ return ret;
|
|
+
|
|
+ if (!rd_ops.retlen)
|
|
+ break;
|
|
+
|
|
+ ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ wr_ops.ooblen = rd_ops.oobretlen;
|
|
+ offset += rd_ops.retlen;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* -------- Bad Blocks Management -------- */
|
|
+bool mapping_block_in_range(int block, int *start, int *end)
|
|
+{
|
|
+ const __be32 *cur = bmtd.remap_range;
|
|
+ u32 addr = block << bmtd.blk_shift;
|
|
+ int i;
|
|
+
|
|
+ if (!cur || !bmtd.remap_range_len) {
|
|
+ *start = 0;
|
|
+ *end = bmtd.total_blks;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < bmtd.remap_range_len; i++, cur += 2) {
|
|
+ if (addr < be32_to_cpu(cur[0]) || addr >= be32_to_cpu(cur[1]))
|
|
+ continue;
|
|
+
|
|
+ *start = be32_to_cpu(cur[0]);
|
|
+ *end = be32_to_cpu(cur[1]);
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool
|
|
+mtk_bmt_remap_block(u32 block, u32 mapped_block, int copy_len)
|
|
+{
|
|
+ int start, end;
|
|
+
|
|
+ if (!mapping_block_in_range(block, &start, &end))
|
|
+ return false;
|
|
+
|
|
+ return bmtd.ops->remap_block(block, mapped_block, copy_len);
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_bmt_read(struct mtd_info *mtd, loff_t from,
|
|
+ struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct mtd_oob_ops cur_ops = *ops;
|
|
+ int retry_count = 0;
|
|
+ loff_t cur_from;
|
|
+ int ret = 0;
|
|
+ int max_bitflips = 0;
|
|
+
|
|
+ ops->retlen = 0;
|
|
+ ops->oobretlen = 0;
|
|
+
|
|
+ while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
|
|
+ int cur_ret;
|
|
+
|
|
+ u32 offset = from & (bmtd.blk_size - 1);
|
|
+ u32 block = from >> bmtd.blk_shift;
|
|
+ int cur_block;
|
|
+
|
|
+ cur_block = bmtd.ops->get_mapping_block(block);
|
|
+ if (cur_block < 0)
|
|
+ return -EIO;
|
|
+
|
|
+ cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
|
|
+
|
|
+ cur_ops.oobretlen = 0;
|
|
+ cur_ops.retlen = 0;
|
|
+ cur_ops.len = min_t(u32, mtd->erasesize - offset,
|
|
+ ops->len - ops->retlen);
|
|
+ cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
|
|
+ if (cur_ret < 0)
|
|
+ ret = cur_ret;
|
|
+ else
|
|
+ max_bitflips = max_t(int, max_bitflips, cur_ret);
|
|
+ if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
|
|
+ if (mtk_bmt_remap_block(block, cur_block, mtd->erasesize) &&
|
|
+ retry_count++ < 10)
|
|
+ continue;
|
|
+
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (cur_ret >= mtd->bitflip_threshold)
|
|
+ mtk_bmt_remap_block(block, cur_block, mtd->erasesize);
|
|
+
|
|
+ ops->retlen += cur_ops.retlen;
|
|
+ ops->oobretlen += cur_ops.oobretlen;
|
|
+
|
|
+ cur_ops.ooboffs = 0;
|
|
+ cur_ops.datbuf += cur_ops.retlen;
|
|
+ cur_ops.oobbuf += cur_ops.oobretlen;
|
|
+ cur_ops.ooblen -= cur_ops.oobretlen;
|
|
+
|
|
+ if (!cur_ops.len)
|
|
+ cur_ops.len = mtd->erasesize - offset;
|
|
+
|
|
+ from += cur_ops.len;
|
|
+ retry_count = 0;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ return max_bitflips;
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_bmt_write(struct mtd_info *mtd, loff_t to,
|
|
+ struct mtd_oob_ops *ops)
|
|
+{
|
|
+ struct mtd_oob_ops cur_ops = *ops;
|
|
+ int retry_count = 0;
|
|
+ loff_t cur_to;
|
|
+ int ret;
|
|
+
|
|
+ ops->retlen = 0;
|
|
+ ops->oobretlen = 0;
|
|
+
|
|
+ while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
|
|
+ u32 offset = to & (bmtd.blk_size - 1);
|
|
+ u32 block = to >> bmtd.blk_shift;
|
|
+ int cur_block;
|
|
+
|
|
+ cur_block = bmtd.ops->get_mapping_block(block);
|
|
+ if (cur_block < 0)
|
|
+ return -EIO;
|
|
+
|
|
+ cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
|
|
+
|
|
+ cur_ops.oobretlen = 0;
|
|
+ cur_ops.retlen = 0;
|
|
+ cur_ops.len = min_t(u32, bmtd.blk_size - offset,
|
|
+ ops->len - ops->retlen);
|
|
+ ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
|
|
+ if (ret < 0) {
|
|
+ if (mtk_bmt_remap_block(block, cur_block, offset) &&
|
|
+ retry_count++ < 10)
|
|
+ continue;
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ops->retlen += cur_ops.retlen;
|
|
+ ops->oobretlen += cur_ops.oobretlen;
|
|
+
|
|
+ cur_ops.ooboffs = 0;
|
|
+ cur_ops.datbuf += cur_ops.retlen;
|
|
+ cur_ops.oobbuf += cur_ops.oobretlen;
|
|
+ cur_ops.ooblen -= cur_ops.oobretlen;
|
|
+
|
|
+ if (!cur_ops.len)
|
|
+ cur_ops.len = mtd->erasesize - offset;
|
|
+
|
|
+ to += cur_ops.len;
|
|
+ retry_count = 0;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
|
|
+{
|
|
+ struct erase_info mapped_instr = {
|
|
+ .len = bmtd.blk_size,
|
|
+ };
|
|
+ int retry_count = 0;
|
|
+ u64 start_addr, end_addr;
|
|
+ int ret;
|
|
+ u16 orig_block;
|
|
+ int block;
|
|
+
|
|
+ start_addr = instr->addr & (~mtd->erasesize_mask);
|
|
+ end_addr = instr->addr + instr->len;
|
|
+
|
|
+ while (start_addr < end_addr) {
|
|
+ orig_block = start_addr >> bmtd.blk_shift;
|
|
+ block = bmtd.ops->get_mapping_block(orig_block);
|
|
+ if (block < 0)
|
|
+ return -EIO;
|
|
+ mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
|
|
+ ret = bmtd._erase(mtd, &mapped_instr);
|
|
+ if (ret) {
|
|
+ if (mtk_bmt_remap_block(orig_block, block, 0) &&
|
|
+ retry_count++ < 10)
|
|
+ continue;
|
|
+ instr->fail_addr = start_addr;
|
|
+ break;
|
|
+ }
|
|
+ start_addr += mtd->erasesize;
|
|
+ retry_count = 0;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+static int
|
|
+mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
|
|
+{
|
|
+ int retry_count = 0;
|
|
+ u16 orig_block = ofs >> bmtd.blk_shift;
|
|
+ u16 block;
|
|
+ int ret;
|
|
+
|
|
+retry:
|
|
+ block = bmtd.ops->get_mapping_block(orig_block);
|
|
+ ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
|
|
+ if (ret) {
|
|
+ if (mtk_bmt_remap_block(orig_block, block, bmtd.blk_size) &&
|
|
+ retry_count++ < 10)
|
|
+ goto retry;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
|
|
+{
|
|
+ u16 orig_block = ofs >> bmtd.blk_shift;
|
|
+ int block;
|
|
+
|
|
+ block = bmtd.ops->get_mapping_block(orig_block);
|
|
+ if (block < 0)
|
|
+ return -EIO;
|
|
+
|
|
+ mtk_bmt_remap_block(orig_block, block, bmtd.blk_size);
|
|
+
|
|
+ return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
|
|
+}
|
|
+
|
|
+static void
|
|
+mtk_bmt_replace_ops(struct mtd_info *mtd)
|
|
+{
|
|
+ bmtd._read_oob = mtd->_read_oob;
|
|
+ bmtd._write_oob = mtd->_write_oob;
|
|
+ bmtd._erase = mtd->_erase;
|
|
+ bmtd._block_isbad = mtd->_block_isbad;
|
|
+ bmtd._block_markbad = mtd->_block_markbad;
|
|
+
|
|
+ mtd->_read_oob = mtk_bmt_read;
|
|
+ mtd->_write_oob = mtk_bmt_write;
|
|
+ mtd->_erase = mtk_bmt_mtd_erase;
|
|
+ mtd->_block_isbad = mtk_bmt_block_isbad;
|
|
+ mtd->_block_markbad = mtk_bmt_block_markbad;
|
|
+}
|
|
+
|
|
+static int mtk_bmt_debug_repair(void *data, u64 val)
|
|
+{
|
|
+ int block = val >> bmtd.blk_shift;
|
|
+ int prev_block, new_block;
|
|
+
|
|
+ prev_block = bmtd.ops->get_mapping_block(block);
|
|
+ if (prev_block < 0)
|
|
+ return -EIO;
|
|
+
|
|
+ bmtd.ops->unmap_block(block);
|
|
+ new_block = bmtd.ops->get_mapping_block(block);
|
|
+ if (new_block < 0)
|
|
+ return -EIO;
|
|
+
|
|
+ if (prev_block == new_block)
|
|
+ return 0;
|
|
+
|
|
+ bbt_nand_erase(new_block);
|
|
+ bbt_nand_copy(new_block, prev_block, bmtd.blk_size);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mtk_bmt_debug_mark_good(void *data, u64 val)
|
|
+{
|
|
+ bmtd.ops->unmap_block(val >> bmtd.blk_shift);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mtk_bmt_debug_mark_bad(void *data, u64 val)
|
|
+{
|
|
+ u32 block = val >> bmtd.blk_shift;
|
|
+ int cur_block;
|
|
+
|
|
+ cur_block = bmtd.ops->get_mapping_block(block);
|
|
+ if (cur_block < 0)
|
|
+ return -EIO;
|
|
+
|
|
+ mtk_bmt_remap_block(block, cur_block, bmtd.blk_size);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mtk_bmt_debug(void *data, u64 val)
|
|
+{
|
|
+ return bmtd.ops->debug(data, val);
|
|
+}
|
|
+
|
|
+
|
|
+DEFINE_DEBUGFS_ATTRIBUTE(fops_repair, NULL, mtk_bmt_debug_repair, "%llu\n");
|
|
+DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
|
|
+DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
|
|
+DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
|
|
+
|
|
+static void
|
|
+mtk_bmt_add_debugfs(void)
|
|
+{
|
|
+ struct dentry *dir;
|
|
+
|
|
+ dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
|
|
+ if (!dir)
|
|
+ return;
|
|
+
|
|
+ debugfs_create_file_unsafe("repair", S_IWUSR, dir, NULL, &fops_repair);
|
|
+ debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
|
|
+ debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
|
|
+ debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
|
|
+}
|
|
+
|
|
+void mtk_bmt_detach(struct mtd_info *mtd)
|
|
+{
|
|
+ if (bmtd.mtd != mtd)
|
|
+ return;
|
|
+
|
|
+ if (bmtd.debugfs_dir)
|
|
+ debugfs_remove_recursive(bmtd.debugfs_dir);
|
|
+ bmtd.debugfs_dir = NULL;
|
|
+
|
|
+ kfree(bmtd.bbt_buf);
|
|
+ kfree(bmtd.data_buf);
|
|
+
|
|
+ mtd->_read_oob = bmtd._read_oob;
|
|
+ mtd->_write_oob = bmtd._write_oob;
|
|
+ mtd->_erase = bmtd._erase;
|
|
+ mtd->_block_isbad = bmtd._block_isbad;
|
|
+ mtd->_block_markbad = bmtd._block_markbad;
|
|
+ mtd->size = bmtd.total_blks << bmtd.blk_shift;
|
|
+
|
|
+ memset(&bmtd, 0, sizeof(bmtd));
|
|
+}
|
|
+
|
|
+
|
|
+int mtk_bmt_attach(struct mtd_info *mtd)
|
|
+{
|
|
+ struct device_node *np;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (bmtd.mtd)
|
|
+ return -ENOSPC;
|
|
+
|
|
+ np = mtd_get_of_node(mtd);
|
|
+ if (!np)
|
|
+ return 0;
|
|
+
|
|
+ if (of_property_read_bool(np, "mediatek,bmt-v2"))
|
|
+ bmtd.ops = &mtk_bmt_v2_ops;
|
|
+ else if (of_property_read_bool(np, "mediatek,nmbm"))
|
|
+ bmtd.ops = &mtk_bmt_nmbm_ops;
|
|
+ else if (of_property_read_bool(np, "mediatek,bbt"))
|
|
+ bmtd.ops = &mtk_bmt_bbt_ops;
|
|
+ else
|
|
+ return 0;
|
|
+
|
|
+ bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
|
|
+ &bmtd.remap_range_len);
|
|
+ bmtd.remap_range_len /= 8;
|
|
+
|
|
+ bmtd.mtd = mtd;
|
|
+ mtk_bmt_replace_ops(mtd);
|
|
+
|
|
+ bmtd.blk_size = mtd->erasesize;
|
|
+ bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
|
|
+ bmtd.pg_size = mtd->writesize;
|
|
+ bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
|
|
+ bmtd.total_blks = mtd->size >> bmtd.blk_shift;
|
|
+
|
|
+ bmtd.data_buf = kzalloc(bmtd.pg_size + bmtd.mtd->oobsize, GFP_KERNEL);
|
|
+ if (!bmtd.data_buf) {
|
|
+ pr_info("nand: FATAL ERR: allocate buffer failed!\n");
|
|
+ ret = -1;
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ memset(bmtd.data_buf, 0xff, bmtd.pg_size + bmtd.mtd->oobsize);
|
|
+
|
|
+ ret = bmtd.ops->init(np);
|
|
+ if (ret)
|
|
+ goto error;
|
|
+
|
|
+ mtk_bmt_add_debugfs();
|
|
+ return 0;
|
|
+
|
|
+error:
|
|
+ mtk_bmt_detach(mtd);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
+MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
|
|
+MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
|
|
+
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt.h b/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt.h
|
|
new file mode 100644
|
|
index 000000000000..dff1f28c8171
|
|
--- /dev/null
|
|
+++ b/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt.h
|
|
@@ -0,0 +1,131 @@
|
|
+#ifndef __MTK_BMT_PRIV_H
|
|
+#define __MTK_BMT_PRIV_H
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/mtd/mtd.h>
|
|
+#include <linux/mtd/partitions.h>
|
|
+#include <linux/mtd/mtk_bmt.h>
|
|
+#include <linux/debugfs.h>
|
|
+
|
|
+#define MAIN_SIGNATURE_OFFSET 0
|
|
+#define OOB_SIGNATURE_OFFSET 1
|
|
+
|
|
+#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
|
|
+
|
|
+struct mtk_bmt_ops {
|
|
+ char *sig;
|
|
+ unsigned int sig_len;
|
|
+ int (*init)(struct device_node *np);
|
|
+ bool (*remap_block)(u16 block, u16 mapped_block, int copy_len);
|
|
+ void (*unmap_block)(u16 block);
|
|
+ int (*get_mapping_block)(int block);
|
|
+ int (*debug)(void *data, u64 val);
|
|
+};
|
|
+
|
|
+struct bbbt;
|
|
+struct nmbm_instance;
|
|
+
|
|
+struct bmt_desc {
|
|
+ struct mtd_info *mtd;
|
|
+ unsigned char *bbt_buf;
|
|
+ unsigned char *data_buf;
|
|
+
|
|
+ int (*_read_oob) (struct mtd_info *mtd, loff_t from,
|
|
+ struct mtd_oob_ops *ops);
|
|
+ int (*_write_oob) (struct mtd_info *mtd, loff_t to,
|
|
+ struct mtd_oob_ops *ops);
|
|
+ int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
|
|
+ int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
|
|
+ int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
|
|
+
|
|
+ const struct mtk_bmt_ops *ops;
|
|
+
|
|
+ union {
|
|
+ struct bbbt *bbt;
|
|
+ struct nmbm_instance *ni;
|
|
+ };
|
|
+
|
|
+ struct dentry *debugfs_dir;
|
|
+
|
|
+ u32 table_size;
|
|
+ u32 pg_size;
|
|
+ u32 blk_size;
|
|
+ u16 pg_shift;
|
|
+ u16 blk_shift;
|
|
+ /* bbt logical address */
|
|
+ u16 pool_lba;
|
|
+ /* bbt physical address */
|
|
+ u16 pool_pba;
|
|
+ /* Maximum count of bad blocks that the vendor guaranteed */
|
|
+ u16 bb_max;
|
|
+ /* Total blocks of the Nand Chip */
|
|
+ u16 total_blks;
|
|
+ /* The block(n) BMT is located at (bmt_tbl[n]) */
|
|
+ u16 bmt_blk_idx;
|
|
+ /* How many pages needs to store 'struct bbbt' */
|
|
+ u32 bmt_pgs;
|
|
+
|
|
+ const __be32 *remap_range;
|
|
+ int remap_range_len;
|
|
+
|
|
+ /* to compensate for driver level remapping */
|
|
+ u8 oob_offset;
|
|
+};
|
|
+
|
|
+extern struct bmt_desc bmtd;
|
|
+extern const struct mtk_bmt_ops mtk_bmt_v2_ops;
|
|
+extern const struct mtk_bmt_ops mtk_bmt_bbt_ops;
|
|
+extern const struct mtk_bmt_ops mtk_bmt_nmbm_ops;
|
|
+
|
|
+static inline u32 blk_pg(u16 block)
|
|
+{
|
|
+ return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
|
|
+}
|
|
+
|
|
+static inline int
|
|
+bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
|
|
+ unsigned char *fdm, int fdm_len)
|
|
+{
|
|
+ struct mtd_oob_ops ops = {
|
|
+ .mode = MTD_OPS_PLACE_OOB,
|
|
+ .ooboffs = bmtd.oob_offset,
|
|
+ .oobbuf = fdm,
|
|
+ .ooblen = fdm_len,
|
|
+ .datbuf = dat,
|
|
+ .len = dat_len,
|
|
+ };
|
|
+
|
|
+ return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
|
|
+}
|
|
+
|
|
+static inline int bbt_nand_erase(u16 block)
|
|
+{
|
|
+ struct mtd_info *mtd = bmtd.mtd;
|
|
+ struct erase_info instr = {
|
|
+ .addr = (loff_t)block << bmtd.blk_shift,
|
|
+ .len = bmtd.blk_size,
|
|
+ };
|
|
+
|
|
+ return bmtd._erase(mtd, &instr);
|
|
+}
|
|
+
|
|
+static inline int write_bmt(u16 block, unsigned char *dat)
|
|
+{
|
|
+ struct mtd_oob_ops ops = {
|
|
+ .mode = MTD_OPS_PLACE_OOB,
|
|
+ .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
|
|
+ .oobbuf = bmtd.ops->sig,
|
|
+ .ooblen = bmtd.ops->sig_len,
|
|
+ .datbuf = dat,
|
|
+ .len = bmtd.bmt_pgs << bmtd.pg_shift,
|
|
+ };
|
|
+ loff_t addr = (loff_t)block << bmtd.blk_shift;
|
|
+
|
|
+ return bmtd._write_oob(bmtd.mtd, addr, &ops);
|
|
+}
|
|
+
|
|
+int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset);
|
|
+bool mapping_block_in_range(int block, int *start, int *end);
|
|
+
|
|
+#endif
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_bbt.c b/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_bbt.c
|
|
new file mode 100644
|
|
index 000000000000..519e1ed70c7b
|
|
--- /dev/null
|
|
+++ b/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_bbt.c
|
|
@@ -0,0 +1,203 @@
|
|
+/*
|
|
+ * Copyright (c) 2017 MediaTek Inc.
|
|
+ * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
|
|
+ * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/slab.h>
|
|
+#include "mtk_bmt.h"
|
|
+
|
|
+static bool
|
|
+bbt_block_is_bad(u16 block)
|
|
+{
|
|
+ u8 cur = bmtd.bbt_buf[block / 4];
|
|
+
|
|
+ return cur & (3 << ((block % 4) * 2));
|
|
+}
|
|
+
|
|
+static void
|
|
+bbt_set_block_state(u16 block, bool bad)
|
|
+{
|
|
+ u8 mask = (3 << ((block % 4) * 2));
|
|
+
|
|
+ if (bad)
|
|
+ bmtd.bbt_buf[block / 4] |= mask;
|
|
+ else
|
|
+ bmtd.bbt_buf[block / 4] &= ~mask;
|
|
+
|
|
+ bbt_nand_erase(bmtd.bmt_blk_idx);
|
|
+ write_bmt(bmtd.bmt_blk_idx, bmtd.bbt_buf);
|
|
+}
|
|
+
|
|
+static int
|
|
+get_mapping_block_index_bbt(int block)
|
|
+{
|
|
+ int start, end, ofs;
|
|
+ int bad_blocks = 0;
|
|
+ int i;
|
|
+
|
|
+ if (!mapping_block_in_range(block, &start, &end))
|
|
+ return block;
|
|
+
|
|
+ start >>= bmtd.blk_shift;
|
|
+ end >>= bmtd.blk_shift;
|
|
+ /* skip bad blocks within the mapping range */
|
|
+ ofs = block - start;
|
|
+ for (i = start; i < end; i++) {
|
|
+ if (bbt_block_is_bad(i))
|
|
+ bad_blocks++;
|
|
+ else if (ofs)
|
|
+ ofs--;
|
|
+ else
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (i < end)
|
|
+ return i;
|
|
+
|
|
+ /* when overflowing, remap remaining blocks to bad ones */
|
|
+ for (i = end - 1; bad_blocks > 0; i--) {
|
|
+ if (!bbt_block_is_bad(i))
|
|
+ continue;
|
|
+
|
|
+ bad_blocks--;
|
|
+ if (bad_blocks <= ofs)
|
|
+ return i;
|
|
+ }
|
|
+
|
|
+ return block;
|
|
+}
|
|
+
|
|
+static bool remap_block_bbt(u16 block, u16 mapped_blk, int copy_len)
|
|
+{
|
|
+ int start, end;
|
|
+ u16 new_blk;
|
|
+
|
|
+ if (!mapping_block_in_range(block, &start, &end))
|
|
+ return false;
|
|
+
|
|
+ bbt_set_block_state(mapped_blk, true);
|
|
+
|
|
+ new_blk = get_mapping_block_index_bbt(block);
|
|
+ bbt_nand_erase(new_blk);
|
|
+ if (copy_len > 0)
|
|
+ bbt_nand_copy(new_blk, mapped_blk, copy_len);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static void
|
|
+unmap_block_bbt(u16 block)
|
|
+{
|
|
+ bbt_set_block_state(block, false);
|
|
+}
|
|
+
|
|
+static int
|
|
+mtk_bmt_read_bbt(void)
|
|
+{
|
|
+ u8 oob_buf[8];
|
|
+ int i;
|
|
+
|
|
+ for (i = bmtd.total_blks - 1; i >= bmtd.total_blks - 5; i--) {
|
|
+ u32 page = i << (bmtd.blk_shift - bmtd.pg_shift);
|
|
+
|
|
+ if (bbt_nand_read(page, bmtd.bbt_buf, bmtd.pg_size,
|
|
+ oob_buf, sizeof(oob_buf))) {
|
|
+ pr_info("read_bbt: could not read block %d\n", i);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (oob_buf[0] != 0xff) {
|
|
+ pr_info("read_bbt: bad block at %d\n", i);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (memcmp(&oob_buf[1], "mtknand", 7) != 0) {
|
|
+ pr_info("read_bbt: signature mismatch in block %d\n", i);
|
|
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, oob_buf, 8, 1);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ pr_info("read_bbt: found bbt at block %d\n", i);
|
|
+ bmtd.bmt_blk_idx = i;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return -EIO;
|
|
+}
|
|
+
|
|
+
|
|
+static int
|
|
+mtk_bmt_init_bbt(struct device_node *np)
|
|
+{
|
|
+ int buf_size = round_up(bmtd.total_blks >> 2, bmtd.blk_size);
|
|
+ int ret;
|
|
+
|
|
+ bmtd.bbt_buf = kmalloc(buf_size, GFP_KERNEL);
|
|
+ if (!bmtd.bbt_buf)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ memset(bmtd.bbt_buf, 0xff, buf_size);
|
|
+ bmtd.mtd->size -= 4 * bmtd.mtd->erasesize;
|
|
+
|
|
+ ret = mtk_bmt_read_bbt();
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ bmtd.bmt_pgs = buf_size / bmtd.pg_size;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mtk_bmt_debug_bbt(void *data, u64 val)
|
|
+{
|
|
+ char buf[5];
|
|
+ int i, k;
|
|
+
|
|
+ switch (val) {
|
|
+ case 0:
|
|
+ for (i = 0; i < bmtd.total_blks; i += 4) {
|
|
+ u8 cur = bmtd.bbt_buf[i / 4];
|
|
+
|
|
+ for (k = 0; k < 4; k++, cur >>= 2)
|
|
+ buf[k] = (cur & 3) ? 'B' : '.';
|
|
+
|
|
+ buf[4] = 0;
|
|
+ printk("[%06x] %s\n", i * bmtd.blk_size, buf);
|
|
+ }
|
|
+ break;
|
|
+ case 100:
|
|
+#if 0
|
|
+ for (i = bmtd.bmt_blk_idx; i < bmtd.total_blks - 1; i++)
|
|
+ bbt_nand_erase(bmtd.bmt_blk_idx);
|
|
+#endif
|
|
+
|
|
+ bmtd.bmt_blk_idx = bmtd.total_blks - 1;
|
|
+ bbt_nand_erase(bmtd.bmt_blk_idx);
|
|
+ write_bmt(bmtd.bmt_blk_idx, bmtd.bbt_buf);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+const struct mtk_bmt_ops mtk_bmt_bbt_ops = {
|
|
+ .sig = "mtknand",
|
|
+ .sig_len = 7,
|
|
+ .init = mtk_bmt_init_bbt,
|
|
+ .remap_block = remap_block_bbt,
|
|
+ .unmap_block = unmap_block_bbt,
|
|
+ .get_mapping_block = get_mapping_block_index_bbt,
|
|
+ .debug = mtk_bmt_debug_bbt,
|
|
+};
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_nmbm.c b/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_nmbm.c
|
|
new file mode 100644
|
|
index 000000000000..a896e49ec047
|
|
--- /dev/null
|
|
+++ b/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_nmbm.c
|
|
@@ -0,0 +1,2348 @@
|
|
+#include <linux/crc32.h>
|
|
+#include <linux/slab.h>
|
|
+#include "mtk_bmt.h"
|
|
+
|
|
+#define nlog_err(ni, ...) printk(KERN_ERR __VA_ARGS__)
|
|
+#define nlog_info(ni, ...) printk(KERN_INFO __VA_ARGS__)
|
|
+#define nlog_debug(ni, ...) printk(KERN_INFO __VA_ARGS__)
|
|
+#define nlog_warn(ni, ...) printk(KERN_WARNING __VA_ARGS__)
|
|
+
|
|
+#define NMBM_MAGIC_SIGNATURE 0x304d4d4e /* NMM0 */
|
|
+#define NMBM_MAGIC_INFO_TABLE 0x314d4d4e /* NMM1 */
|
|
+
|
|
+#define NMBM_VERSION_MAJOR_S 0
|
|
+#define NMBM_VERSION_MAJOR_M 0xffff
|
|
+#define NMBM_VERSION_MINOR_S 16
|
|
+#define NMBM_VERSION_MINOR_M 0xffff
|
|
+#define NMBM_VERSION_MAKE(major, minor) (((major) & NMBM_VERSION_MAJOR_M) | \
|
|
+ (((minor) & NMBM_VERSION_MINOR_M) << \
|
|
+ NMBM_VERSION_MINOR_S))
|
|
+#define NMBM_VERSION_MAJOR_GET(ver) (((ver) >> NMBM_VERSION_MAJOR_S) & \
|
|
+ NMBM_VERSION_MAJOR_M)
|
|
+#define NMBM_VERSION_MINOR_GET(ver) (((ver) >> NMBM_VERSION_MINOR_S) & \
|
|
+ NMBM_VERSION_MINOR_M)
|
|
+
|
|
+#define NMBM_BITMAP_UNIT_SIZE (sizeof(u32))
|
|
+#define NMBM_BITMAP_BITS_PER_BLOCK 2
|
|
+#define NMBM_BITMAP_BITS_PER_UNIT (8 * sizeof(u32))
|
|
+#define NMBM_BITMAP_BLOCKS_PER_UNIT (NMBM_BITMAP_BITS_PER_UNIT / \
|
|
+ NMBM_BITMAP_BITS_PER_BLOCK)
|
|
+
|
|
+#define NMBM_SPARE_BLOCK_MULTI 1
|
|
+#define NMBM_SPARE_BLOCK_DIV 2
|
|
+#define NMBM_SPARE_BLOCK_MIN 2
|
|
+
|
|
+#define NMBM_MGMT_DIV 16
|
|
+#define NMBM_MGMT_BLOCKS_MIN 32
|
|
+
|
|
+#define NMBM_TRY_COUNT 3
|
|
+
|
|
+#define BLOCK_ST_BAD 0
|
|
+#define BLOCK_ST_NEED_REMAP 2
|
|
+#define BLOCK_ST_GOOD 3
|
|
+#define BLOCK_ST_MASK 3
|
|
+
|
|
+#define NMBM_VER_MAJOR 1
|
|
+#define NMBM_VER_MINOR 0
|
|
+#define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \
|
|
+ NMBM_VER_MINOR)
|
|
+
|
|
+struct nmbm_header {
|
|
+ u32 magic;
|
|
+ u32 version;
|
|
+ u32 size;
|
|
+ u32 checksum;
|
|
+};
|
|
+
|
|
+struct nmbm_signature {
|
|
+ struct nmbm_header header;
|
|
+ uint64_t nand_size;
|
|
+ u32 block_size;
|
|
+ u32 page_size;
|
|
+ u32 spare_size;
|
|
+ u32 mgmt_start_pb;
|
|
+ u8 max_try_count;
|
|
+ u8 padding[3];
|
|
+};
|
|
+
|
|
+struct nmbm_info_table_header {
|
|
+ struct nmbm_header header;
|
|
+ u32 write_count;
|
|
+ u32 state_table_off;
|
|
+ u32 mapping_table_off;
|
|
+ u32 padding;
|
|
+};
|
|
+
|
|
+struct nmbm_instance {
|
|
+ u32 rawpage_size;
|
|
+ u32 rawblock_size;
|
|
+ u32 rawchip_size;
|
|
+
|
|
+ struct nmbm_signature signature;
|
|
+
|
|
+ u8 *info_table_cache;
|
|
+ u32 info_table_size;
|
|
+ u32 info_table_spare_blocks;
|
|
+ struct nmbm_info_table_header info_table;
|
|
+
|
|
+ u32 *block_state;
|
|
+ u32 block_state_changed;
|
|
+ u32 state_table_size;
|
|
+
|
|
+ int32_t *block_mapping;
|
|
+ u32 block_mapping_changed;
|
|
+ u32 mapping_table_size;
|
|
+
|
|
+ u8 *page_cache;
|
|
+
|
|
+ int protected;
|
|
+
|
|
+ u32 block_count;
|
|
+ u32 data_block_count;
|
|
+
|
|
+ u32 mgmt_start_ba;
|
|
+ u32 main_table_ba;
|
|
+ u32 backup_table_ba;
|
|
+ u32 mapping_blocks_ba;
|
|
+ u32 mapping_blocks_top_ba;
|
|
+ u32 signature_ba;
|
|
+
|
|
+ u32 max_ratio;
|
|
+ u32 max_reserved_blocks;
|
|
+ bool empty_page_ecc_ok;
|
|
+ bool force_create;
|
|
+};
|
|
+
|
|
+static inline u32 nmbm_crc32(u32 crcval, const void *buf, size_t size)
|
|
+{
|
|
+ unsigned int chksz;
|
|
+ const unsigned char *p = buf;
|
|
+
|
|
+ while (size) {
|
|
+ if (size > UINT_MAX)
|
|
+ chksz = UINT_MAX;
|
|
+ else
|
|
+ chksz = (uint)size;
|
|
+
|
|
+ crcval = crc32_le(crcval, p, chksz);
|
|
+ size -= chksz;
|
|
+ p += chksz;
|
|
+ }
|
|
+
|
|
+ return crcval;
|
|
+}
|
|
+/*
|
|
+ * nlog_table_creation - Print log of table creation event
|
|
+ * @ni: NMBM instance structure
|
|
+ * @main_table: whether the table is main info table
|
|
+ * @start_ba: start block address of the table
|
|
+ * @end_ba: block address after the end of the table
|
|
+ */
|
|
+static void nlog_table_creation(struct nmbm_instance *ni, bool main_table,
|
|
+ uint32_t start_ba, uint32_t end_ba)
|
|
+{
|
|
+ if (start_ba == end_ba - 1)
|
|
+ nlog_info(ni, "%s info table has been written to block %u\n",
|
|
+ main_table ? "Main" : "Backup", start_ba);
|
|
+ else
|
|
+ nlog_info(ni, "%s info table has been written to block %u-%u\n",
|
|
+ main_table ? "Main" : "Backup", start_ba, end_ba - 1);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nlog_table_update - Print log of table update event
|
|
+ * @ni: NMBM instance structure
|
|
+ * @main_table: whether the table is main info table
|
|
+ * @start_ba: start block address of the table
|
|
+ * @end_ba: block address after the end of the table
|
|
+ */
|
|
+static void nlog_table_update(struct nmbm_instance *ni, bool main_table,
|
|
+ uint32_t start_ba, uint32_t end_ba)
|
|
+{
|
|
+ if (start_ba == end_ba - 1)
|
|
+ nlog_debug(ni, "%s info table has been updated in block %u\n",
|
|
+ main_table ? "Main" : "Backup", start_ba);
|
|
+ else
|
|
+ nlog_debug(ni, "%s info table has been updated in block %u-%u\n",
|
|
+ main_table ? "Main" : "Backup", start_ba, end_ba - 1);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nlog_table_found - Print log of table found event
|
|
+ * @ni: NMBM instance structure
|
|
+ * @first_table: whether the table is first found info table
|
|
+ * @write_count: write count of the info table
|
|
+ * @start_ba: start block address of the table
|
|
+ * @end_ba: block address after the end of the table
|
|
+ */
|
|
+static void nlog_table_found(struct nmbm_instance *ni, bool first_table,
|
|
+ uint32_t write_count, uint32_t start_ba,
|
|
+ uint32_t end_ba)
|
|
+{
|
|
+ if (start_ba == end_ba - 1)
|
|
+ nlog_info(ni, "%s info table with writecount %u found in block %u\n",
|
|
+ first_table ? "First" : "Second", write_count,
|
|
+ start_ba);
|
|
+ else
|
|
+ nlog_info(ni, "%s info table with writecount %u found in block %u-%u\n",
|
|
+ first_table ? "First" : "Second", write_count,
|
|
+ start_ba, end_ba - 1);
|
|
+}
|
|
+
|
|
+/*****************************************************************************/
|
|
+/* Address conversion functions */
|
|
+/*****************************************************************************/
|
|
+
|
|
+/*
|
|
+ * ba2addr - Convert a block address to linear address
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: Block address
|
|
+ */
|
|
+static uint64_t ba2addr(struct nmbm_instance *ni, uint32_t ba)
|
|
+{
|
|
+ return (uint64_t)ba << bmtd.blk_shift;
|
|
+}
|
|
+/*
|
|
+ * size2blk - Get minimum required blocks for storing specific size of data
|
|
+ * @ni: NMBM instance structure
|
|
+ * @size: size for storing
|
|
+ */
|
|
+static uint32_t size2blk(struct nmbm_instance *ni, uint64_t size)
|
|
+{
|
|
+ return (size + bmtd.blk_size - 1) >> bmtd.blk_shift;
|
|
+}
|
|
+
|
|
+/*****************************************************************************/
|
|
+/* High level NAND chip APIs */
|
|
+/*****************************************************************************/
|
|
+
|
|
+/*
|
|
+ * nmbm_read_phys_page - Read page with retry
|
|
+ * @ni: NMBM instance structure
|
|
+ * @addr: linear address where the data will be read from
|
|
+ * @data: the main data to be read
|
|
+ * @oob: the oob data to be read
|
|
+ *
|
|
+ * Read a page for at most NMBM_TRY_COUNT times.
|
|
+ *
|
|
+ * Return 0 for success, positive value for corrected bitflip count,
|
|
+ * -EBADMSG for ecc error, other negative values for other errors
|
|
+ */
|
|
+static int nmbm_read_phys_page(struct nmbm_instance *ni, uint64_t addr,
|
|
+ void *data, void *oob)
|
|
+{
|
|
+ int tries, ret;
|
|
+
|
|
+ for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
|
|
+ struct mtd_oob_ops ops = {
|
|
+ .mode = MTD_OPS_PLACE_OOB,
|
|
+ .oobbuf = oob,
|
|
+ .datbuf = data,
|
|
+ };
|
|
+
|
|
+ if (data)
|
|
+ ops.len = bmtd.pg_size;
|
|
+ if (oob)
|
|
+ ops.ooblen = mtd_oobavail(bmtd.mtd, &ops);
|
|
+
|
|
+ ret = bmtd._read_oob(bmtd.mtd, addr, &ops);
|
|
+ if (ret == -EUCLEAN)
|
|
+ return min_t(u32, bmtd.mtd->bitflip_threshold + 1,
|
|
+ bmtd.mtd->ecc_strength);
|
|
+ if (ret >= 0)
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (ret != -EBADMSG)
|
|
+ nlog_err(ni, "Page read failed at address 0x%08llx\n", addr);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_write_phys_page - Write page with retry
|
|
+ * @ni: NMBM instance structure
|
|
+ * @addr: linear address where the data will be written to
|
|
+ * @data: the main data to be written
|
|
+ * @oob: the oob data to be written
|
|
+ *
|
|
+ * Write a page for at most NMBM_TRY_COUNT times.
|
|
+ */
|
|
+static bool nmbm_write_phys_page(struct nmbm_instance *ni, uint64_t addr,
|
|
+ const void *data, const void *oob)
|
|
+{
|
|
+ int tries, ret;
|
|
+
|
|
+ for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
|
|
+ struct mtd_oob_ops ops = {
|
|
+ .mode = MTD_OPS_PLACE_OOB,
|
|
+ .oobbuf = (void *)oob,
|
|
+ .datbuf = (void *)data,
|
|
+ };
|
|
+
|
|
+ if (data)
|
|
+ ops.len = bmtd.pg_size;
|
|
+ if (oob)
|
|
+ ops.ooblen = mtd_oobavail(bmtd.mtd, &ops);
|
|
+
|
|
+ ret = bmtd._write_oob(bmtd.mtd, addr, &ops);
|
|
+ if (!ret)
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ nlog_err(ni, "Page write failed at address 0x%08llx\n", addr);
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_erase_phys_block - Erase a block with retry
|
|
+ * @ni: NMBM instance structure
|
|
+ * @addr: Linear address
|
|
+ *
|
|
+ * Erase a block for at most NMBM_TRY_COUNT times.
|
|
+ */
|
|
+static bool nmbm_erase_phys_block(struct nmbm_instance *ni, uint64_t addr)
|
|
+{
|
|
+ int tries, ret;
|
|
+
|
|
+ for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
|
|
+ struct erase_info ei = {
|
|
+ .addr = addr,
|
|
+ .len = bmtd.mtd->erasesize,
|
|
+ };
|
|
+
|
|
+ ret = bmtd._erase(bmtd.mtd, &ei);
|
|
+ if (!ret)
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ nlog_err(ni, "Block erasure failed at address 0x%08llx\n", addr);
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: block address
|
|
+ */
|
|
+static bool nmbm_check_bad_phys_block(struct nmbm_instance *ni, uint32_t ba)
|
|
+{
|
|
+ uint64_t addr = ba2addr(ni, ba);
|
|
+
|
|
+ return bmtd._block_isbad(bmtd.mtd, addr);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_mark_phys_bad_block - Mark a block bad
|
|
+ * @ni: NMBM instance structure
|
|
+ * @addr: Linear address
|
|
+ */
|
|
+static int nmbm_mark_phys_bad_block(struct nmbm_instance *ni, uint32_t ba)
|
|
+{
|
|
+ uint64_t addr = ba2addr(ni, ba);
|
|
+
|
|
+ nlog_info(ni, "Block %u [0x%08llx] will be marked bad\n", ba, addr);
|
|
+
|
|
+ return bmtd._block_markbad(bmtd.mtd, addr);
|
|
+}
|
|
+
|
|
+/*****************************************************************************/
|
|
+/* NMBM related functions */
|
|
+/*****************************************************************************/
|
|
+
|
|
+/*
|
|
+ * nmbm_check_header - Check whether a NMBM structure is valid
|
|
+ * @data: pointer to a NMBM structure with a NMBM header at beginning
|
|
+ * @size: Size of the buffer pointed by @header
|
|
+ *
|
|
+ * The size of the NMBM structure may be larger than NMBM header,
|
|
+ * e.g. block mapping table and block state table.
|
|
+ */
|
|
+static bool nmbm_check_header(const void *data, uint32_t size)
|
|
+{
|
|
+ const struct nmbm_header *header = data;
|
|
+ struct nmbm_header nhdr;
|
|
+ uint32_t new_checksum;
|
|
+
|
|
+ /*
|
|
+ * Make sure expected structure size is equal or smaller than
|
|
+ * buffer size.
|
|
+ */
|
|
+ if (header->size > size)
|
|
+ return false;
|
|
+
|
|
+ memcpy(&nhdr, data, sizeof(nhdr));
|
|
+
|
|
+ nhdr.checksum = 0;
|
|
+ new_checksum = nmbm_crc32(0, &nhdr, sizeof(nhdr));
|
|
+ if (header->size > sizeof(nhdr))
|
|
+ new_checksum = nmbm_crc32(new_checksum,
|
|
+ (const uint8_t *)data + sizeof(nhdr),
|
|
+ header->size - sizeof(nhdr));
|
|
+
|
|
+ if (header->checksum != new_checksum)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_update_checksum - Update checksum of a NMBM structure
|
|
+ * @header: pointer to a NMBM structure with a NMBM header at beginning
|
|
+ *
|
|
+ * The size of the NMBM structure must be specified by @header->size
|
|
+ */
|
|
+static void nmbm_update_checksum(struct nmbm_header *header)
|
|
+{
|
|
+ header->checksum = 0;
|
|
+ header->checksum = nmbm_crc32(0, header, header->size);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_get_spare_block_count - Calculate number of blocks should be reserved
|
|
+ * @block_count: number of blocks of data
|
|
+ *
|
|
+ * Calculate number of blocks should be reserved for data
|
|
+ */
|
|
+static uint32_t nmbm_get_spare_block_count(uint32_t block_count)
|
|
+{
|
|
+ uint32_t val;
|
|
+
|
|
+ val = (block_count + NMBM_SPARE_BLOCK_DIV / 2) / NMBM_SPARE_BLOCK_DIV;
|
|
+ val *= NMBM_SPARE_BLOCK_MULTI;
|
|
+
|
|
+ if (val < NMBM_SPARE_BLOCK_MIN)
|
|
+ val = NMBM_SPARE_BLOCK_MIN;
|
|
+
|
|
+ return val;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_get_block_state_raw - Get state of a block from raw block state table
|
|
+ * @block_state: pointer to raw block state table (bitmap)
|
|
+ * @ba: block address
|
|
+ */
|
|
+static uint32_t nmbm_get_block_state_raw(u32 *block_state,
|
|
+ uint32_t ba)
|
|
+{
|
|
+ uint32_t unit, shift;
|
|
+
|
|
+ unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
|
|
+ shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
|
|
+
|
|
+ return (block_state[unit] >> shift) & BLOCK_ST_MASK;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_get_block_state - Get state of a block from block state table
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: block address
|
|
+ */
|
|
+static uint32_t nmbm_get_block_state(struct nmbm_instance *ni, uint32_t ba)
|
|
+{
|
|
+ return nmbm_get_block_state_raw(ni->block_state, ba);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_set_block_state - Set state of a block to block state table
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: block address
|
|
+ * @state: block state
|
|
+ *
|
|
+ * Set state of a block. If the block state changed, ni->block_state_changed
|
|
+ * will be increased.
|
|
+ */
|
|
+static bool nmbm_set_block_state(struct nmbm_instance *ni, uint32_t ba,
|
|
+ uint32_t state)
|
|
+{
|
|
+ uint32_t unit, shift, orig;
|
|
+ u32 uv;
|
|
+
|
|
+ unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
|
|
+ shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
|
|
+
|
|
+ orig = (ni->block_state[unit] >> shift) & BLOCK_ST_MASK;
|
|
+ state &= BLOCK_ST_MASK;
|
|
+
|
|
+ uv = ni->block_state[unit] & (~(BLOCK_ST_MASK << shift));
|
|
+ uv |= state << shift;
|
|
+ ni->block_state[unit] = uv;
|
|
+
|
|
+ if (orig != state) {
|
|
+ ni->block_state_changed++;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr.
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: start physical block address
|
|
+ * @nba: return physical block address after walk
|
|
+ * @count: number of good blocks to be skipped
|
|
+ * @limit: highest block address allowed for walking
|
|
+ *
|
|
+ * Start from @ba, skipping any bad blocks, counting @count good blocks, and
|
|
+ * return the next good block address.
|
|
+ *
|
|
+ * If no enough good blocks counted while @limit reached, false will be returned.
|
|
+ *
|
|
+ * If @count == 0, nearest good block address will be returned.
|
|
+ * @limit is not counted in walking.
|
|
+ */
|
|
+static bool nmbm_block_walk_asc(struct nmbm_instance *ni, uint32_t ba,
|
|
+ uint32_t *nba, uint32_t count,
|
|
+ uint32_t limit)
|
|
+{
|
|
+ int32_t nblock = count;
|
|
+
|
|
+ if (limit >= ni->block_count)
|
|
+ limit = ni->block_count - 1;
|
|
+
|
|
+ while (ba < limit) {
|
|
+ if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
|
|
+ nblock--;
|
|
+
|
|
+ if (nblock < 0) {
|
|
+ *nba = ba;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ ba++;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: start physical block address
|
|
+ * @nba: return physical block address after walk
|
|
+ * @count: number of good blocks to be skipped
|
|
+ * @limit: lowest block address allowed for walking
|
|
+ *
|
|
+ * Start from @ba, skipping any bad blocks, counting @count good blocks, and
|
|
+ * return the next good block address.
|
|
+ *
|
|
+ * If no enough good blocks counted while @limit reached, false will be returned.
|
|
+ *
|
|
+ * If @count == 0, nearest good block address will be returned.
|
|
+ * @limit is not counted in walking.
|
|
+ */
|
|
+static bool nmbm_block_walk_desc(struct nmbm_instance *ni, uint32_t ba,
|
|
+ uint32_t *nba, uint32_t count, uint32_t limit)
|
|
+{
|
|
+ int32_t nblock = count;
|
|
+
|
|
+ if (limit >= ni->block_count)
|
|
+ limit = ni->block_count - 1;
|
|
+
|
|
+ while (ba > limit) {
|
|
+ if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
|
|
+ nblock--;
|
|
+
|
|
+ if (nblock < 0) {
|
|
+ *nba = ba;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ ba--;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_block_walk - Skip specified number of good blocks from curr. block addr
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ascending: whether to walk ascending
|
|
+ * @ba: start physical block address
|
|
+ * @nba: return physical block address after walk
|
|
+ * @count: number of good blocks to be skipped
|
|
+ * @limit: highest/lowest block address allowed for walking
|
|
+ *
|
|
+ * Start from @ba, skipping any bad blocks, counting @count good blocks, and
|
|
+ * return the next good block address.
|
|
+ *
|
|
+ * If no enough good blocks counted while @limit reached, false will be returned.
|
|
+ *
|
|
+ * If @count == 0, nearest good block address will be returned.
|
|
+ * @limit can be set to negative if no limit required.
|
|
+ * @limit is not counted in walking.
|
|
+ */
|
|
+static bool nmbm_block_walk(struct nmbm_instance *ni, bool ascending,
|
|
+ uint32_t ba, uint32_t *nba, int32_t count,
|
|
+ int32_t limit)
|
|
+{
|
|
+ if (ascending)
|
|
+ return nmbm_block_walk_asc(ni, ba, nba, count, limit);
|
|
+
|
|
+ return nmbm_block_walk_desc(ni, ba, nba, count, limit);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_scan_badblocks - Scan and record all bad blocks
|
|
+ * @ni: NMBM instance structure
|
|
+ *
|
|
+ * Scan the entire lower NAND chip and record all bad blocks in to block state
|
|
+ * table.
|
|
+ */
|
|
+static void nmbm_scan_badblocks(struct nmbm_instance *ni)
|
|
+{
|
|
+ uint32_t ba;
|
|
+
|
|
+ for (ba = 0; ba < ni->block_count; ba++) {
|
|
+ if (nmbm_check_bad_phys_block(ni, ba)) {
|
|
+ nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
+ nlog_info(ni, "Bad block %u [0x%08llx]\n", ba,
|
|
+ ba2addr(ni, ba));
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_build_mapping_table - Build initial block mapping table
|
|
+ * @ni: NMBM instance structure
|
|
+ *
|
|
+ * The initial mapping table will be compatible with the stratage of
|
|
+ * factory production.
|
|
+ */
|
|
+static void nmbm_build_mapping_table(struct nmbm_instance *ni)
|
|
+{
|
|
+ uint32_t pb, lb;
|
|
+
|
|
+ for (pb = 0, lb = 0; pb < ni->mgmt_start_ba; pb++) {
|
|
+ if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
|
|
+ continue;
|
|
+
|
|
+ /* Always map to the next good block */
|
|
+ ni->block_mapping[lb++] = pb;
|
|
+ }
|
|
+
|
|
+ ni->data_block_count = lb;
|
|
+
|
|
+ /* Unusable/Management blocks */
|
|
+ for (pb = lb; pb < ni->block_count; pb++)
|
|
+ ni->block_mapping[pb] = -1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_erase_block_and_check - Erase a block and check its usability
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: block address to be erased
|
|
+ *
|
|
+ * Erase a block anc check its usability
|
|
+ *
|
|
+ * Return true if the block is usable, false if erasure failure or the block
|
|
+ * has too many bitflips.
|
|
+ */
|
|
+static bool nmbm_erase_block_and_check(struct nmbm_instance *ni, uint32_t ba)
|
|
+{
|
|
+ uint64_t addr, off;
|
|
+ bool success;
|
|
+ int ret;
|
|
+
|
|
+ success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
|
|
+ if (!success)
|
|
+ return false;
|
|
+
|
|
+ if (!ni->empty_page_ecc_ok)
|
|
+ return true;
|
|
+
|
|
+ /* Check every page to make sure there aren't too many bitflips */
|
|
+
|
|
+ addr = ba2addr(ni, ba);
|
|
+
|
|
+ for (off = 0; off < bmtd.blk_size; off += bmtd.pg_size) {
|
|
+ ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL);
|
|
+ if (ret == -EBADMSG) {
|
|
+ /*
|
|
+ * empty_page_ecc_ok means the empty page is
|
|
+ * still protected by ECC. So reading pages with ECC
|
|
+ * enabled and -EBADMSG means there are too many
|
|
+ * bitflips that can't be recovered, and the block
|
|
+ * containing the page should be marked bad.
|
|
+ */
|
|
+ nlog_err(ni,
|
|
+ "Too many bitflips in empty page at 0x%llx\n",
|
|
+ addr + off);
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_erase_range - Erase a range of blocks
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: block address where the erasure will start
|
|
+ * @limit: top block address allowed for erasure
|
|
+ *
|
|
+ * Erase blocks within the specific range. Newly-found bad blocks will be
|
|
+ * marked.
|
|
+ *
|
|
+ * @limit is not counted into the allowed erasure address.
|
|
+ */
|
|
+static void nmbm_erase_range(struct nmbm_instance *ni, uint32_t ba,
|
|
+ uint32_t limit)
|
|
+{
|
|
+ bool success;
|
|
+
|
|
+ while (ba < limit) {
|
|
+ if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
|
|
+ goto next_block;
|
|
+
|
|
+ /* Insurance to detect unexpected bad block marked by user */
|
|
+ if (nmbm_check_bad_phys_block(ni, ba)) {
|
|
+ nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
+ goto next_block;
|
|
+ }
|
|
+
|
|
+ success = nmbm_erase_block_and_check(ni, ba);
|
|
+ if (success)
|
|
+ goto next_block;
|
|
+
|
|
+ nmbm_mark_phys_bad_block(ni, ba);
|
|
+ nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
+
|
|
+ next_block:
|
|
+ ba++;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_write_repeated_data - Write critical data to a block with retry
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: block address where the data will be written to
|
|
+ * @data: the data to be written
|
|
+ * @size: size of the data
|
|
+ *
|
|
+ * Write data to every page of the block. Success only if all pages within
|
|
+ * this block have been successfully written.
|
|
+ *
|
|
+ * Make sure data size is not bigger than one page.
|
|
+ *
|
|
+ * This function will write and verify every page for at most
|
|
+ * NMBM_TRY_COUNT times.
|
|
+ */
|
|
+static bool nmbm_write_repeated_data(struct nmbm_instance *ni, uint32_t ba,
|
|
+ const void *data, uint32_t size)
|
|
+{
|
|
+ uint64_t addr, off;
|
|
+ bool success;
|
|
+ int ret;
|
|
+
|
|
+ if (size > bmtd.pg_size)
|
|
+ return false;
|
|
+
|
|
+ addr = ba2addr(ni, ba);
|
|
+
|
|
+ for (off = 0; off < bmtd.blk_size; off += bmtd.pg_size) {
|
|
+ /* Prepare page data. fill 0xff to unused region */
|
|
+ memcpy(ni->page_cache, data, size);
|
|
+ memset(ni->page_cache + size, 0xff, ni->rawpage_size - size);
|
|
+
|
|
+ success = nmbm_write_phys_page(ni, addr + off, ni->page_cache, NULL);
|
|
+ if (!success)
|
|
+ return false;
|
|
+
|
|
+ /* Verify the data just written. ECC error indicates failure */
|
|
+ ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL);
|
|
+ if (ret < 0)
|
|
+ return false;
|
|
+
|
|
+ if (memcmp(ni->page_cache, data, size))
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_write_signature - Write signature to NAND chip
|
|
+ * @ni: NMBM instance structure
|
|
+ * @limit: top block address allowed for writing
|
|
+ * @signature: the signature to be written
|
|
+ * @signature_ba: the actual block address where signature is written to
|
|
+ *
|
|
+ * Write signature within a specific range, from chip bottom to limit.
|
|
+ * At most one block will be written.
|
|
+ *
|
|
+ * @limit is not counted into the allowed write address.
|
|
+ */
|
|
+static bool nmbm_write_signature(struct nmbm_instance *ni, uint32_t limit,
|
|
+ const struct nmbm_signature *signature,
|
|
+ uint32_t *signature_ba)
|
|
+{
|
|
+ uint32_t ba = ni->block_count - 1;
|
|
+ bool success;
|
|
+
|
|
+ while (ba > limit) {
|
|
+ if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
|
|
+ goto next_block;
|
|
+
|
|
+ /* Insurance to detect unexpected bad block marked by user */
|
|
+ if (nmbm_check_bad_phys_block(ni, ba)) {
|
|
+ nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
+ goto next_block;
|
|
+ }
|
|
+
|
|
+ success = nmbm_erase_block_and_check(ni, ba);
|
|
+ if (!success)
|
|
+ goto skip_bad_block;
|
|
+
|
|
+ success = nmbm_write_repeated_data(ni, ba, signature,
|
|
+ sizeof(*signature));
|
|
+ if (success) {
|
|
+ *signature_ba = ba;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ skip_bad_block:
|
|
+ nmbm_mark_phys_bad_block(ni, ba);
|
|
+ nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
+
|
|
+ next_block:
|
|
+ ba--;
|
|
+ };
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbn_read_data - Read data
|
|
+ * @ni: NMBM instance structure
|
|
+ * @addr: linear address where the data will be read from
|
|
+ * @data: the data to be read
|
|
+ * @size: the size of data
|
|
+ *
|
|
+ * Read data range.
|
|
+ * Every page will be tried for at most NMBM_TRY_COUNT times.
|
|
+ *
|
|
+ * Return 0 for success, positive value for corrected bitflip count,
|
|
+ * -EBADMSG for ecc error, other negative values for other errors
|
|
+ */
|
|
+static int nmbn_read_data(struct nmbm_instance *ni, uint64_t addr, void *data,
|
|
+ uint32_t size)
|
|
+{
|
|
+ uint64_t off = addr;
|
|
+ uint8_t *ptr = data;
|
|
+ uint32_t sizeremain = size, chunksize, leading;
|
|
+ int ret;
|
|
+
|
|
+ while (sizeremain) {
|
|
+ leading = off & (bmtd.pg_size - 1);
|
|
+ chunksize = bmtd.pg_size - leading;
|
|
+ if (chunksize > sizeremain)
|
|
+ chunksize = sizeremain;
|
|
+
|
|
+ if (chunksize == bmtd.pg_size) {
|
|
+ ret = nmbm_read_phys_page(ni, off - leading, ptr, NULL);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ } else {
|
|
+ ret = nmbm_read_phys_page(ni, off - leading,
|
|
+ ni->page_cache, NULL);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ memcpy(ptr, ni->page_cache + leading, chunksize);
|
|
+ }
|
|
+
|
|
+ off += chunksize;
|
|
+ ptr += chunksize;
|
|
+ sizeremain -= chunksize;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbn_write_verify_data - Write data with validation
|
|
+ * @ni: NMBM instance structure
|
|
+ * @addr: linear address where the data will be written to
|
|
+ * @data: the data to be written
|
|
+ * @size: the size of data
|
|
+ *
|
|
+ * Write data and verify.
|
|
+ * Every page will be tried for at most NMBM_TRY_COUNT times.
|
|
+ */
|
|
+static bool nmbn_write_verify_data(struct nmbm_instance *ni, uint64_t addr,
|
|
+ const void *data, uint32_t size)
|
|
+{
|
|
+ uint64_t off = addr;
|
|
+ const uint8_t *ptr = data;
|
|
+ uint32_t sizeremain = size, chunksize, leading;
|
|
+ bool success;
|
|
+ int ret;
|
|
+
|
|
+ while (sizeremain) {
|
|
+ leading = off & (bmtd.pg_size - 1);
|
|
+ chunksize = bmtd.pg_size - leading;
|
|
+ if (chunksize > sizeremain)
|
|
+ chunksize = sizeremain;
|
|
+
|
|
+ /* Prepare page data. fill 0xff to unused region */
|
|
+ memset(ni->page_cache, 0xff, ni->rawpage_size);
|
|
+ memcpy(ni->page_cache + leading, ptr, chunksize);
|
|
+
|
|
+ success = nmbm_write_phys_page(ni, off - leading,
|
|
+ ni->page_cache, NULL);
|
|
+ if (!success)
|
|
+ return false;
|
|
+
|
|
+ /* Verify the data just written. ECC error indicates failure */
|
|
+ ret = nmbm_read_phys_page(ni, off - leading, ni->page_cache, NULL);
|
|
+ if (ret < 0)
|
|
+ return false;
|
|
+
|
|
+ if (memcmp(ni->page_cache + leading, ptr, chunksize))
|
|
+ return false;
|
|
+
|
|
+ off += chunksize;
|
|
+ ptr += chunksize;
|
|
+ sizeremain -= chunksize;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_write_mgmt_range - Write management data into NAND within a range
|
|
+ * @ni: NMBM instance structure
|
|
+ * @addr: preferred start block address for writing
|
|
+ * @limit: highest block address allowed for writing
|
|
+ * @data: the data to be written
|
|
+ * @size: the size of data
|
|
+ * @actual_start_ba: actual start block address of data
|
|
+ * @actual_end_ba: block address after the end of data
|
|
+ *
|
|
+ * @limit is not counted into the allowed write address.
|
|
+ */
|
|
+static bool nmbm_write_mgmt_range(struct nmbm_instance *ni, uint32_t ba,
|
|
+ uint32_t limit, const void *data,
|
|
+ uint32_t size, uint32_t *actual_start_ba,
|
|
+ uint32_t *actual_end_ba)
|
|
+{
|
|
+ const uint8_t *ptr = data;
|
|
+ uint32_t sizeremain = size, chunksize;
|
|
+ bool success;
|
|
+
|
|
+ while (sizeremain && ba < limit) {
|
|
+ chunksize = sizeremain;
|
|
+ if (chunksize > bmtd.blk_size)
|
|
+ chunksize = bmtd.blk_size;
|
|
+
|
|
+ if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
|
|
+ goto next_block;
|
|
+
|
|
+ /* Insurance to detect unexpected bad block marked by user */
|
|
+ if (nmbm_check_bad_phys_block(ni, ba)) {
|
|
+ nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
+ goto next_block;
|
|
+ }
|
|
+
|
|
+ success = nmbm_erase_block_and_check(ni, ba);
|
|
+ if (!success)
|
|
+ goto skip_bad_block;
|
|
+
|
|
+ success = nmbn_write_verify_data(ni, ba2addr(ni, ba), ptr,
|
|
+ chunksize);
|
|
+ if (!success)
|
|
+ goto skip_bad_block;
|
|
+
|
|
+ if (sizeremain == size)
|
|
+ *actual_start_ba = ba;
|
|
+
|
|
+ ptr += chunksize;
|
|
+ sizeremain -= chunksize;
|
|
+
|
|
+ goto next_block;
|
|
+
|
|
+ skip_bad_block:
|
|
+ nmbm_mark_phys_bad_block(ni, ba);
|
|
+ nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
+
|
|
+ next_block:
|
|
+ ba++;
|
|
+ }
|
|
+
|
|
+ if (sizeremain)
|
|
+ return false;
|
|
+
|
|
+ *actual_end_ba = ba;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_generate_info_table_cache - Generate info table cache data
|
|
+ * @ni: NMBM instance structure
|
|
+ *
|
|
+ * Generate info table cache data to be written into flash.
|
|
+ */
|
|
+static bool nmbm_generate_info_table_cache(struct nmbm_instance *ni)
|
|
+{
|
|
+ bool changed = false;
|
|
+
|
|
+ memset(ni->info_table_cache, 0xff, ni->info_table_size);
|
|
+
|
|
+ memcpy(ni->info_table_cache + ni->info_table.state_table_off,
|
|
+ ni->block_state, ni->state_table_size);
|
|
+
|
|
+ memcpy(ni->info_table_cache + ni->info_table.mapping_table_off,
|
|
+ ni->block_mapping, ni->mapping_table_size);
|
|
+
|
|
+ ni->info_table.header.magic = NMBM_MAGIC_INFO_TABLE;
|
|
+ ni->info_table.header.version = NMBM_VER;
|
|
+ ni->info_table.header.size = ni->info_table_size;
|
|
+
|
|
+ if (ni->block_state_changed || ni->block_mapping_changed) {
|
|
+ ni->info_table.write_count++;
|
|
+ changed = true;
|
|
+ }
|
|
+
|
|
+ memcpy(ni->info_table_cache, &ni->info_table, sizeof(ni->info_table));
|
|
+
|
|
+ nmbm_update_checksum((struct nmbm_header *)ni->info_table_cache);
|
|
+
|
|
+ return changed;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_write_info_table - Write info table into NAND within a range
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: preferred start block address for writing
|
|
+ * @limit: highest block address allowed for writing
|
|
+ * @actual_start_ba: actual start block address of info table
|
|
+ * @actual_end_ba: block address after the end of info table
|
|
+ *
|
|
+ * @limit is counted into the allowed write address.
|
|
+ */
|
|
+static bool nmbm_write_info_table(struct nmbm_instance *ni, uint32_t ba,
|
|
+ uint32_t limit, uint32_t *actual_start_ba,
|
|
+ uint32_t *actual_end_ba)
|
|
+{
|
|
+ return nmbm_write_mgmt_range(ni, ba, limit, ni->info_table_cache,
|
|
+ ni->info_table_size, actual_start_ba,
|
|
+ actual_end_ba);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_mark_tables_clean - Mark info table `clean'
|
|
+ * @ni: NMBM instance structure
|
|
+ */
|
|
+static void nmbm_mark_tables_clean(struct nmbm_instance *ni)
|
|
+{
|
|
+ ni->block_state_changed = 0;
|
|
+ ni->block_mapping_changed = 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_try_reserve_blocks - Reserve blocks with compromisation
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: start physical block address
|
|
+ * @nba: return physical block address after reservation
|
|
+ * @count: number of good blocks to be skipped
|
|
+ * @min_count: minimum number of good blocks to be skipped
|
|
+ * @limit: highest/lowest block address allowed for walking
|
|
+ *
|
|
+ * Reserve specific blocks. If failed, try to reserve as many as possible.
|
|
+ */
|
|
+static bool nmbm_try_reserve_blocks(struct nmbm_instance *ni, uint32_t ba,
|
|
+ uint32_t *nba, uint32_t count,
|
|
+ int32_t min_count, int32_t limit)
|
|
+{
|
|
+ int32_t nblocks = count;
|
|
+ bool success;
|
|
+
|
|
+ while (nblocks >= min_count) {
|
|
+ success = nmbm_block_walk(ni, true, ba, nba, nblocks, limit);
|
|
+ if (success)
|
|
+ return true;
|
|
+
|
|
+ nblocks--;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_rebuild_info_table - Build main & backup info table from scratch
|
|
+ * @ni: NMBM instance structure
|
|
+ * @allow_no_gap: allow no spare blocks between two tables
|
|
+ */
|
|
+static bool nmbm_rebuild_info_table(struct nmbm_instance *ni)
|
|
+{
|
|
+ uint32_t table_start_ba, table_end_ba, next_start_ba;
|
|
+ uint32_t main_table_end_ba;
|
|
+ bool success;
|
|
+
|
|
+ /* Set initial value */
|
|
+ ni->main_table_ba = 0;
|
|
+ ni->backup_table_ba = 0;
|
|
+ ni->mapping_blocks_ba = ni->mapping_blocks_top_ba;
|
|
+
|
|
+ /* Write main table */
|
|
+ success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
|
|
+ ni->mapping_blocks_top_ba,
|
|
+ &table_start_ba, &table_end_ba);
|
|
+ if (!success) {
|
|
+ /* Failed to write main table, data will be lost */
|
|
+ nlog_err(ni, "Unable to write at least one info table!\n");
|
|
+ nlog_err(ni, "Please save your data before power off!\n");
|
|
+ ni->protected = 1;
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /* Main info table is successfully written, record its offset */
|
|
+ ni->main_table_ba = table_start_ba;
|
|
+ main_table_end_ba = table_end_ba;
|
|
+
|
|
+ /* Adjust mapping_blocks_ba */
|
|
+ ni->mapping_blocks_ba = table_end_ba;
|
|
+
|
|
+ nmbm_mark_tables_clean(ni);
|
|
+
|
|
+ nlog_table_creation(ni, true, table_start_ba, table_end_ba);
|
|
+
|
|
+ /* Reserve spare blocks for main info table. */
|
|
+ success = nmbm_try_reserve_blocks(ni, table_end_ba,
|
|
+ &next_start_ba,
|
|
+ ni->info_table_spare_blocks, 0,
|
|
+ ni->mapping_blocks_top_ba -
|
|
+ size2blk(ni, ni->info_table_size));
|
|
+ if (!success) {
|
|
+ /* There is no spare block. */
|
|
+ nlog_debug(ni, "No room for backup info table\n");
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Write backup info table. */
|
|
+ success = nmbm_write_info_table(ni, next_start_ba,
|
|
+ ni->mapping_blocks_top_ba,
|
|
+ &table_start_ba, &table_end_ba);
|
|
+ if (!success) {
|
|
+ /* There is no enough blocks for backup table. */
|
|
+ nlog_debug(ni, "No room for backup info table\n");
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Backup table is successfully written, record its offset */
|
|
+ ni->backup_table_ba = table_start_ba;
|
|
+
|
|
+ /* Adjust mapping_blocks_off */
|
|
+ ni->mapping_blocks_ba = table_end_ba;
|
|
+
|
|
+ /* Erase spare blocks of main table to clean possible interference data */
|
|
+ nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
|
|
+
|
|
+ nlog_table_creation(ni, false, table_start_ba, table_end_ba);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_rescue_single_info_table - Rescue when there is only one info table
|
|
+ * @ni: NMBM instance structure
|
|
+ *
|
|
+ * This function is called when there is only one info table exists.
|
|
+ * This function may fail if we can't write new info table
|
|
+ */
|
|
+static bool nmbm_rescue_single_info_table(struct nmbm_instance *ni)
|
|
+{
|
|
+ uint32_t table_start_ba, table_end_ba, write_ba;
|
|
+ bool success;
|
|
+
|
|
+ /* Try to write new info table in front of existing table */
|
|
+ success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
|
|
+ ni->main_table_ba,
|
|
+ &table_start_ba,
|
|
+ &table_end_ba);
|
|
+ if (success) {
|
|
+ /*
|
|
+ * New table becomes the main table, existing table becomes
|
|
+ * the backup table.
|
|
+ */
|
|
+ ni->backup_table_ba = ni->main_table_ba;
|
|
+ ni->main_table_ba = table_start_ba;
|
|
+
|
|
+ nmbm_mark_tables_clean(ni);
|
|
+
|
|
+ /* Erase spare blocks of main table to clean possible interference data */
|
|
+ nmbm_erase_range(ni, table_end_ba, ni->backup_table_ba);
|
|
+
|
|
+ nlog_table_creation(ni, true, table_start_ba, table_end_ba);
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Try to reserve spare blocks for existing table */
|
|
+ success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
|
|
+ ni->info_table_spare_blocks, 0,
|
|
+ ni->mapping_blocks_top_ba -
|
|
+ size2blk(ni, ni->info_table_size));
|
|
+ if (!success) {
|
|
+ nlog_warn(ni, "Failed to rescue single info table\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /* Try to write new info table next to the existing table */
|
|
+ while (write_ba >= ni->mapping_blocks_ba) {
|
|
+ success = nmbm_write_info_table(ni, write_ba,
|
|
+ ni->mapping_blocks_top_ba,
|
|
+ &table_start_ba,
|
|
+ &table_end_ba);
|
|
+ if (success)
|
|
+ break;
|
|
+
|
|
+ write_ba--;
|
|
+ }
|
|
+
|
|
+ if (success) {
|
|
+ /* Erase spare blocks of main table to clean possible interference data */
|
|
+ nmbm_erase_range(ni, ni->mapping_blocks_ba, table_start_ba);
|
|
+
|
|
+ /* New table becomes the backup table */
|
|
+ ni->backup_table_ba = table_start_ba;
|
|
+ ni->mapping_blocks_ba = table_end_ba;
|
|
+
|
|
+ nmbm_mark_tables_clean(ni);
|
|
+
|
|
+ nlog_table_creation(ni, false, table_start_ba, table_end_ba);
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ nlog_warn(ni, "Failed to rescue single info table\n");
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_update_single_info_table - Update specific one info table
|
|
+ * @ni: NMBM instance structure
|
|
+ */
|
|
+static bool nmbm_update_single_info_table(struct nmbm_instance *ni,
|
|
+ bool update_main_table)
|
|
+{
|
|
+ uint32_t write_start_ba, write_limit, table_start_ba, table_end_ba;
|
|
+ bool success;
|
|
+
|
|
+ /* Determine the write range */
|
|
+ if (update_main_table) {
|
|
+ write_start_ba = ni->main_table_ba;
|
|
+ write_limit = ni->backup_table_ba;
|
|
+ } else {
|
|
+ write_start_ba = ni->backup_table_ba;
|
|
+ write_limit = ni->mapping_blocks_top_ba;
|
|
+ }
|
|
+
|
|
+ success = nmbm_write_info_table(ni, write_start_ba, write_limit,
|
|
+ &table_start_ba, &table_end_ba);
|
|
+ if (success) {
|
|
+ if (update_main_table) {
|
|
+ ni->main_table_ba = table_start_ba;
|
|
+ } else {
|
|
+ ni->backup_table_ba = table_start_ba;
|
|
+ ni->mapping_blocks_ba = table_end_ba;
|
|
+ }
|
|
+
|
|
+ nmbm_mark_tables_clean(ni);
|
|
+
|
|
+ nlog_table_update(ni, update_main_table, table_start_ba,
|
|
+ table_end_ba);
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ if (update_main_table) {
|
|
+ /*
|
|
+ * If failed to update main table, make backup table the new
|
|
+ * main table, and call nmbm_rescue_single_info_table()
|
|
+ */
|
|
+ nlog_warn(ni, "Unable to update %s info table\n",
|
|
+ update_main_table ? "Main" : "Backup");
|
|
+
|
|
+ ni->main_table_ba = ni->backup_table_ba;
|
|
+ ni->backup_table_ba = 0;
|
|
+ return nmbm_rescue_single_info_table(ni);
|
|
+ }
|
|
+
|
|
+ /* Only one table left */
|
|
+ ni->mapping_blocks_ba = ni->backup_table_ba;
|
|
+ ni->backup_table_ba = 0;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_rescue_main_info_table - Rescue when failed to write main info table
|
|
+ * @ni: NMBM instance structure
|
|
+ *
|
|
+ * This function is called when main info table failed to be written, and
|
|
+ * backup info table exists.
|
|
+ */
|
|
+static bool nmbm_rescue_main_info_table(struct nmbm_instance *ni)
|
|
+{
|
|
+ uint32_t tmp_table_start_ba, tmp_table_end_ba, main_table_start_ba;
|
|
+ uint32_t main_table_end_ba, write_ba;
|
|
+ uint32_t info_table_erasesize = size2blk(ni, ni->info_table_size);
|
|
+ bool success;
|
|
+
|
|
+ /* Try to reserve spare blocks for existing backup info table */
|
|
+ success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
|
|
+ ni->info_table_spare_blocks, 0,
|
|
+ ni->mapping_blocks_top_ba -
|
|
+ info_table_erasesize);
|
|
+ if (!success) {
|
|
+ /* There is no spare block. Backup info table becomes the main table. */
|
|
+ nlog_err(ni, "No room for temporary info table\n");
|
|
+ ni->main_table_ba = ni->backup_table_ba;
|
|
+ ni->backup_table_ba = 0;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Try to write temporary info table into spare unmapped blocks */
|
|
+ while (write_ba >= ni->mapping_blocks_ba) {
|
|
+ success = nmbm_write_info_table(ni, write_ba,
|
|
+ ni->mapping_blocks_top_ba,
|
|
+ &tmp_table_start_ba,
|
|
+ &tmp_table_end_ba);
|
|
+ if (success)
|
|
+ break;
|
|
+
|
|
+ write_ba--;
|
|
+ }
|
|
+
|
|
+ if (!success) {
|
|
+ /* Backup info table becomes the main table */
|
|
+ nlog_err(ni, "Failed to update main info table\n");
|
|
+ ni->main_table_ba = ni->backup_table_ba;
|
|
+ ni->backup_table_ba = 0;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Adjust mapping_blocks_off */
|
|
+ ni->mapping_blocks_ba = tmp_table_end_ba;
|
|
+
|
|
+ /*
|
|
+ * Now write main info table at the beginning of management area.
|
|
+ * This operation will generally destroy the original backup info
|
|
+ * table.
|
|
+ */
|
|
+ success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
|
|
+ tmp_table_start_ba,
|
|
+ &main_table_start_ba,
|
|
+ &main_table_end_ba);
|
|
+ if (!success) {
|
|
+ /* Temporary info table becomes the main table */
|
|
+ ni->main_table_ba = tmp_table_start_ba;
|
|
+ ni->backup_table_ba = 0;
|
|
+
|
|
+ nmbm_mark_tables_clean(ni);
|
|
+
|
|
+ nlog_err(ni, "Failed to update main info table\n");
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Main info table has been successfully written, record its offset */
|
|
+ ni->main_table_ba = main_table_start_ba;
|
|
+
|
|
+ nmbm_mark_tables_clean(ni);
|
|
+
|
|
+ nlog_table_creation(ni, true, main_table_start_ba, main_table_end_ba);
|
|
+
|
|
+ /*
|
|
+ * Temporary info table becomes the new backup info table if it's
|
|
+ * not overwritten.
|
|
+ */
|
|
+ if (main_table_end_ba <= tmp_table_start_ba) {
|
|
+ ni->backup_table_ba = tmp_table_start_ba;
|
|
+
|
|
+ nlog_table_creation(ni, false, tmp_table_start_ba,
|
|
+ tmp_table_end_ba);
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Adjust mapping_blocks_off */
|
|
+ ni->mapping_blocks_ba = main_table_end_ba;
|
|
+
|
|
+ /* Try to reserve spare blocks for new main info table */
|
|
+ success = nmbm_try_reserve_blocks(ni, main_table_end_ba, &write_ba,
|
|
+ ni->info_table_spare_blocks, 0,
|
|
+ ni->mapping_blocks_top_ba -
|
|
+ info_table_erasesize);
|
|
+ if (!success) {
|
|
+ /* There is no spare block. Only main table exists. */
|
|
+ nlog_err(ni, "No room for backup info table\n");
|
|
+ ni->backup_table_ba = 0;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Write new backup info table. */
|
|
+ while (write_ba >= main_table_end_ba) {
|
|
+ success = nmbm_write_info_table(ni, write_ba,
|
|
+ ni->mapping_blocks_top_ba,
|
|
+ &tmp_table_start_ba,
|
|
+ &tmp_table_end_ba);
|
|
+ if (success)
|
|
+ break;
|
|
+
|
|
+ write_ba--;
|
|
+ }
|
|
+
|
|
+ if (!success) {
|
|
+ nlog_err(ni, "No room for backup info table\n");
|
|
+ ni->backup_table_ba = 0;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Backup info table has been successfully written, record its offset */
|
|
+ ni->backup_table_ba = tmp_table_start_ba;
|
|
+
|
|
+ /* Adjust mapping_blocks_off */
|
|
+ ni->mapping_blocks_ba = tmp_table_end_ba;
|
|
+
|
|
+ /* Erase spare blocks of main table to clean possible interference data */
|
|
+ nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
|
|
+
|
|
+ nlog_table_creation(ni, false, tmp_table_start_ba, tmp_table_end_ba);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_update_info_table_once - Update info table once
|
|
+ * @ni: NMBM instance structure
|
|
+ * @force: force update
|
|
+ *
|
|
+ * Update both main and backup info table. Return true if at least one info
|
|
+ * table has been successfully written.
|
|
+ * This function only try to update info table once regard less of the result.
|
|
+ */
|
|
+static bool nmbm_update_info_table_once(struct nmbm_instance *ni, bool force)
|
|
+{
|
|
+ uint32_t table_start_ba, table_end_ba;
|
|
+ uint32_t main_table_limit;
|
|
+ bool success;
|
|
+
|
|
+ /* Do nothing if there is no change */
|
|
+ if (!nmbm_generate_info_table_cache(ni) && !force)
|
|
+ return true;
|
|
+
|
|
+ /* Check whether both two tables exist */
|
|
+ if (!ni->backup_table_ba) {
|
|
+ main_table_limit = ni->mapping_blocks_top_ba;
|
|
+ goto write_main_table;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Write backup info table in its current range.
|
|
+ * Note that limit is set to mapping_blocks_top_off to provide as many
|
|
+ * spare blocks as possible for the backup table. If at last
|
|
+ * unmapped blocks are used by backup table, mapping_blocks_off will
|
|
+ * be adjusted.
|
|
+ */
|
|
+ success = nmbm_write_info_table(ni, ni->backup_table_ba,
|
|
+ ni->mapping_blocks_top_ba,
|
|
+ &table_start_ba, &table_end_ba);
|
|
+ if (!success) {
|
|
+ /*
|
|
+ * There is nothing to do if failed to write backup table.
|
|
+ * Write the main table now.
|
|
+ */
|
|
+ nlog_err(ni, "No room for backup table\n");
|
|
+ ni->mapping_blocks_ba = ni->backup_table_ba;
|
|
+ ni->backup_table_ba = 0;
|
|
+ main_table_limit = ni->mapping_blocks_top_ba;
|
|
+ goto write_main_table;
|
|
+ }
|
|
+
|
|
+ /* Backup table is successfully written, record its offset */
|
|
+ ni->backup_table_ba = table_start_ba;
|
|
+
|
|
+ /* Adjust mapping_blocks_off */
|
|
+ ni->mapping_blocks_ba = table_end_ba;
|
|
+
|
|
+ nmbm_mark_tables_clean(ni);
|
|
+
|
|
+ /* The normal limit of main table */
|
|
+ main_table_limit = ni->backup_table_ba;
|
|
+
|
|
+ nlog_table_update(ni, false, table_start_ba, table_end_ba);
|
|
+
|
|
+write_main_table:
|
|
+ if (!ni->main_table_ba)
|
|
+ goto rebuild_tables;
|
|
+
|
|
+ /* Write main info table in its current range */
|
|
+ success = nmbm_write_info_table(ni, ni->main_table_ba,
|
|
+ main_table_limit, &table_start_ba,
|
|
+ &table_end_ba);
|
|
+ if (!success) {
|
|
+ /* If failed to write main table, go rescue procedure */
|
|
+ if (!ni->backup_table_ba)
|
|
+ goto rebuild_tables;
|
|
+
|
|
+ return nmbm_rescue_main_info_table(ni);
|
|
+ }
|
|
+
|
|
+ /* Main info table is successfully written, record its offset */
|
|
+ ni->main_table_ba = table_start_ba;
|
|
+
|
|
+ /* Adjust mapping_blocks_off */
|
|
+ if (!ni->backup_table_ba)
|
|
+ ni->mapping_blocks_ba = table_end_ba;
|
|
+
|
|
+ nmbm_mark_tables_clean(ni);
|
|
+
|
|
+ nlog_table_update(ni, true, table_start_ba, table_end_ba);
|
|
+
|
|
+ return true;
|
|
+
|
|
+rebuild_tables:
|
|
+ return nmbm_rebuild_info_table(ni);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_update_info_table - Update info table
|
|
+ * @ni: NMBM instance structure
|
|
+ *
|
|
+ * Update both main and backup info table. Return true if at least one table
|
|
+ * has been successfully written.
|
|
+ * This function will try to update info table repeatedly until no new bad
|
|
+ * block found during updating.
|
|
+ */
|
|
+static bool nmbm_update_info_table(struct nmbm_instance *ni)
|
|
+{
|
|
+ bool success;
|
|
+
|
|
+ if (ni->protected)
|
|
+ return true;
|
|
+
|
|
+ while (ni->block_state_changed || ni->block_mapping_changed) {
|
|
+ success = nmbm_update_info_table_once(ni, false);
|
|
+ if (!success) {
|
|
+ nlog_err(ni, "Failed to update info table\n");
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_map_block - Map a bad block to a unused spare block
|
|
+ * @ni: NMBM instance structure
|
|
+ * @lb: logic block addr to map
|
|
+ */
|
|
+static bool nmbm_map_block(struct nmbm_instance *ni, uint32_t lb)
|
|
+{
|
|
+ uint32_t pb;
|
|
+ bool success;
|
|
+
|
|
+ if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
|
|
+ nlog_warn(ni, "No spare unmapped blocks.\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ success = nmbm_block_walk(ni, false, ni->mapping_blocks_top_ba, &pb, 0,
|
|
+ ni->mapping_blocks_ba);
|
|
+ if (!success) {
|
|
+ nlog_warn(ni, "No spare unmapped blocks.\n");
|
|
+ nmbm_update_info_table(ni);
|
|
+ ni->mapping_blocks_top_ba = ni->mapping_blocks_ba;
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ ni->block_mapping[lb] = pb;
|
|
+ ni->mapping_blocks_top_ba--;
|
|
+ ni->block_mapping_changed++;
|
|
+
|
|
+ nlog_info(ni, "Logic block %u mapped to physical block %u\n", lb, pb);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_create_info_table - Create info table(s)
|
|
+ * @ni: NMBM instance structure
|
|
+ *
|
|
+ * This function assumes that the chip has no existing info table(s)
|
|
+ */
|
|
+static bool nmbm_create_info_table(struct nmbm_instance *ni)
|
|
+{
|
|
+ uint32_t lb;
|
|
+ bool success;
|
|
+
|
|
+ /* Set initial mapping_blocks_top_off */
|
|
+ success = nmbm_block_walk(ni, false, ni->signature_ba,
|
|
+ &ni->mapping_blocks_top_ba, 1,
|
|
+ ni->mgmt_start_ba);
|
|
+ if (!success) {
|
|
+ nlog_err(ni, "No room for spare blocks\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /* Generate info table cache */
|
|
+ nmbm_generate_info_table_cache(ni);
|
|
+
|
|
+ /* Write info table */
|
|
+ success = nmbm_rebuild_info_table(ni);
|
|
+ if (!success) {
|
|
+ nlog_err(ni, "Failed to build info tables\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /* Remap bad block(s) at end of data area */
|
|
+ for (lb = ni->data_block_count; lb < ni->mgmt_start_ba; lb++) {
|
|
+ success = nmbm_map_block(ni, lb);
|
|
+ if (!success)
|
|
+ break;
|
|
+
|
|
+ ni->data_block_count++;
|
|
+ }
|
|
+
|
|
+ /* If state table and/or mapping table changed, update info table. */
|
|
+ success = nmbm_update_info_table(ni);
|
|
+ if (!success)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_create_new - Create NMBM on a new chip
|
|
+ * @ni: NMBM instance structure
|
|
+ */
|
|
+static bool nmbm_create_new(struct nmbm_instance *ni)
|
|
+{
|
|
+ bool success;
|
|
+
|
|
+ /* Determine the boundary of management blocks */
|
|
+ ni->mgmt_start_ba = ni->block_count * (NMBM_MGMT_DIV - ni->max_ratio) / NMBM_MGMT_DIV;
|
|
+
|
|
+ if (ni->max_reserved_blocks && ni->block_count - ni->mgmt_start_ba > ni->max_reserved_blocks)
|
|
+ ni->mgmt_start_ba = ni->block_count - ni->max_reserved_blocks;
|
|
+
|
|
+ nlog_info(ni, "NMBM management region starts at block %u [0x%08llx]\n",
|
|
+ ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
|
|
+
|
|
+ /* Fill block state table & mapping table */
|
|
+ nmbm_scan_badblocks(ni);
|
|
+ nmbm_build_mapping_table(ni);
|
|
+
|
|
+ /* Write signature */
|
|
+ ni->signature.header.magic = NMBM_MAGIC_SIGNATURE;
|
|
+ ni->signature.header.version = NMBM_VER;
|
|
+ ni->signature.header.size = sizeof(ni->signature);
|
|
+ ni->signature.nand_size = bmtd.total_blks << bmtd.blk_shift;
|
|
+ ni->signature.block_size = bmtd.blk_size;
|
|
+ ni->signature.page_size = bmtd.pg_size;
|
|
+ ni->signature.spare_size = bmtd.mtd->oobsize;
|
|
+ ni->signature.mgmt_start_pb = ni->mgmt_start_ba;
|
|
+ ni->signature.max_try_count = NMBM_TRY_COUNT;
|
|
+ nmbm_update_checksum(&ni->signature.header);
|
|
+
|
|
+ success = nmbm_write_signature(ni, ni->mgmt_start_ba,
|
|
+ &ni->signature, &ni->signature_ba);
|
|
+ if (!success) {
|
|
+ nlog_err(ni, "Failed to write signature to a proper offset\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ nlog_info(ni, "Signature has been written to block %u [0x%08llx]\n",
|
|
+ ni->signature_ba, ba2addr(ni, ni->signature_ba));
|
|
+
|
|
+ /* Write info table(s) */
|
|
+ success = nmbm_create_info_table(ni);
|
|
+ if (success) {
|
|
+ nlog_info(ni, "NMBM has been successfully created\n");
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_check_info_table_header - Check if a info table header is valid
|
|
+ * @ni: NMBM instance structure
|
|
+ * @data: pointer to the info table header
|
|
+ */
|
|
+static bool nmbm_check_info_table_header(struct nmbm_instance *ni, void *data)
|
|
+{
|
|
+ struct nmbm_info_table_header *ifthdr = data;
|
|
+
|
|
+ if (ifthdr->header.magic != NMBM_MAGIC_INFO_TABLE)
|
|
+ return false;
|
|
+
|
|
+ if (ifthdr->header.size != ni->info_table_size)
|
|
+ return false;
|
|
+
|
|
+ if (ifthdr->mapping_table_off - ifthdr->state_table_off < ni->state_table_size)
|
|
+ return false;
|
|
+
|
|
+ if (ni->info_table_size - ifthdr->mapping_table_off < ni->mapping_table_size)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_check_info_table - Check if a whole info table is valid
|
|
+ * @ni: NMBM instance structure
|
|
+ * @start_ba: start block address of this table
|
|
+ * @end_ba: end block address of this table
|
|
+ * @data: pointer to the info table header
|
|
+ * @mapping_blocks_top_ba: return the block address of top remapped block
|
|
+ */
|
|
+static bool nmbm_check_info_table(struct nmbm_instance *ni, uint32_t start_ba,
|
|
+ uint32_t end_ba, void *data,
|
|
+ uint32_t *mapping_blocks_top_ba)
|
|
+{
|
|
+ struct nmbm_info_table_header *ifthdr = data;
|
|
+ int32_t *block_mapping = (int32_t *)((uintptr_t)data + ifthdr->mapping_table_off);
|
|
+ u32 *block_state = (u32 *)((uintptr_t)data + ifthdr->state_table_off);
|
|
+ uint32_t minimum_mapping_pb = ni->signature_ba;
|
|
+ uint32_t ba;
|
|
+
|
|
+ for (ba = 0; ba < ni->data_block_count; ba++) {
|
|
+ if ((block_mapping[ba] >= ni->data_block_count && block_mapping[ba] < end_ba) ||
|
|
+ block_mapping[ba] == ni->signature_ba)
|
|
+ return false;
|
|
+
|
|
+ if (block_mapping[ba] >= end_ba && block_mapping[ba] < minimum_mapping_pb)
|
|
+ minimum_mapping_pb = block_mapping[ba];
|
|
+ }
|
|
+
|
|
+ for (ba = start_ba; ba < end_ba; ba++) {
|
|
+ if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
|
|
+ continue;
|
|
+
|
|
+ if (nmbm_get_block_state_raw(block_state, ba) != BLOCK_ST_GOOD)
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ *mapping_blocks_top_ba = minimum_mapping_pb - 1;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_try_load_info_table - Try to load info table from a address
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: start block address of the info table
|
|
+ * @eba: return the block address after end of the table
|
|
+ * @write_count: return the write count of this table
|
|
+ * @mapping_blocks_top_ba: return the block address of top remapped block
|
|
+ * @table_loaded: used to record whether ni->info_table has valid data
|
|
+ */
|
|
+static bool nmbm_try_load_info_table(struct nmbm_instance *ni, uint32_t ba,
|
|
+ uint32_t *eba, uint32_t *write_count,
|
|
+ uint32_t *mapping_blocks_top_ba,
|
|
+ bool table_loaded)
|
|
+{
|
|
+ struct nmbm_info_table_header *ifthdr = (void *)ni->info_table_cache;
|
|
+ uint8_t *off = ni->info_table_cache;
|
|
+ uint32_t limit = ba + size2blk(ni, ni->info_table_size);
|
|
+ uint32_t start_ba = 0, chunksize, sizeremain = ni->info_table_size;
|
|
+ bool success, checkhdr = true;
|
|
+ int ret;
|
|
+
|
|
+ while (sizeremain && ba < limit) {
|
|
+ if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
|
|
+ goto next_block;
|
|
+
|
|
+ if (nmbm_check_bad_phys_block(ni, ba)) {
|
|
+ nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
+ goto next_block;
|
|
+ }
|
|
+
|
|
+ chunksize = sizeremain;
|
|
+ if (chunksize > bmtd.blk_size)
|
|
+ chunksize = bmtd.blk_size;
|
|
+
|
|
+ /* Assume block with ECC error has no info table data */
|
|
+ ret = nmbn_read_data(ni, ba2addr(ni, ba), off, chunksize);
|
|
+ if (ret < 0)
|
|
+ goto skip_bad_block;
|
|
+ else if (ret > 0)
|
|
+ return false;
|
|
+
|
|
+ if (checkhdr) {
|
|
+ success = nmbm_check_info_table_header(ni, off);
|
|
+ if (!success)
|
|
+ return false;
|
|
+
|
|
+ start_ba = ba;
|
|
+ checkhdr = false;
|
|
+ }
|
|
+
|
|
+ off += chunksize;
|
|
+ sizeremain -= chunksize;
|
|
+
|
|
+ goto next_block;
|
|
+
|
|
+ skip_bad_block:
|
|
+ /* Only mark bad in memory */
|
|
+ nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
+
|
|
+ next_block:
|
|
+ ba++;
|
|
+ }
|
|
+
|
|
+ if (sizeremain)
|
|
+ return false;
|
|
+
|
|
+ success = nmbm_check_header(ni->info_table_cache, ni->info_table_size);
|
|
+ if (!success)
|
|
+ return false;
|
|
+
|
|
+ *eba = ba;
|
|
+ *write_count = ifthdr->write_count;
|
|
+
|
|
+ success = nmbm_check_info_table(ni, start_ba, ba, ni->info_table_cache,
|
|
+ mapping_blocks_top_ba);
|
|
+ if (!success)
|
|
+ return false;
|
|
+
|
|
+ if (!table_loaded || ifthdr->write_count > ni->info_table.write_count) {
|
|
+ memcpy(&ni->info_table, ifthdr, sizeof(ni->info_table));
|
|
+ memcpy(ni->block_state,
|
|
+ (uint8_t *)ifthdr + ifthdr->state_table_off,
|
|
+ ni->state_table_size);
|
|
+ memcpy(ni->block_mapping,
|
|
+ (uint8_t *)ifthdr + ifthdr->mapping_table_off,
|
|
+ ni->mapping_table_size);
|
|
+ ni->info_table.write_count = ifthdr->write_count;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_search_info_table - Search info table from specific address
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: start block address to search
|
|
+ * @limit: highest block address allowed for searching
|
|
+ * @table_start_ba: return the start block address of this table
|
|
+ * @table_end_ba: return the block address after end of this table
|
|
+ * @write_count: return the write count of this table
|
|
+ * @mapping_blocks_top_ba: return the block address of top remapped block
|
|
+ * @table_loaded: used to record whether ni->info_table has valid data
|
|
+ */
|
|
+static bool nmbm_search_info_table(struct nmbm_instance *ni, uint32_t ba,
|
|
+ uint32_t limit, uint32_t *table_start_ba,
|
|
+ uint32_t *table_end_ba,
|
|
+ uint32_t *write_count,
|
|
+ uint32_t *mapping_blocks_top_ba,
|
|
+ bool table_loaded)
|
|
+{
|
|
+ bool success;
|
|
+
|
|
+ while (ba < limit - size2blk(ni, ni->info_table_size)) {
|
|
+ success = nmbm_try_load_info_table(ni, ba, table_end_ba,
|
|
+ write_count,
|
|
+ mapping_blocks_top_ba,
|
|
+ table_loaded);
|
|
+ if (success) {
|
|
+ *table_start_ba = ba;
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ ba++;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_load_info_table - Load info table(s) from a chip
|
|
+ * @ni: NMBM instance structure
|
|
+ * @ba: start block address to search info table
|
|
+ * @limit: highest block address allowed for searching
|
|
+ */
|
|
+static bool nmbm_load_info_table(struct nmbm_instance *ni, uint32_t ba,
|
|
+ uint32_t limit)
|
|
+{
|
|
+ uint32_t main_table_end_ba, backup_table_end_ba, table_end_ba;
|
|
+ uint32_t main_mapping_blocks_top_ba, backup_mapping_blocks_top_ba;
|
|
+ uint32_t main_table_write_count, backup_table_write_count;
|
|
+ uint32_t i;
|
|
+ bool success;
|
|
+
|
|
+ /* Set initial value */
|
|
+ ni->main_table_ba = 0;
|
|
+ ni->backup_table_ba = 0;
|
|
+ ni->info_table.write_count = 0;
|
|
+ ni->mapping_blocks_top_ba = ni->signature_ba - 1;
|
|
+ ni->data_block_count = ni->signature.mgmt_start_pb;
|
|
+
|
|
+ /* Find first info table */
|
|
+ success = nmbm_search_info_table(ni, ba, limit, &ni->main_table_ba,
|
|
+ &main_table_end_ba, &main_table_write_count,
|
|
+ &main_mapping_blocks_top_ba, false);
|
|
+ if (!success) {
|
|
+ nlog_warn(ni, "No valid info table found\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ table_end_ba = main_table_end_ba;
|
|
+
|
|
+ nlog_table_found(ni, true, main_table_write_count, ni->main_table_ba,
|
|
+ main_table_end_ba);
|
|
+
|
|
+ /* Find second info table */
|
|
+ success = nmbm_search_info_table(ni, main_table_end_ba, limit,
|
|
+ &ni->backup_table_ba, &backup_table_end_ba,
|
|
+ &backup_table_write_count, &backup_mapping_blocks_top_ba, true);
|
|
+ if (!success) {
|
|
+ nlog_warn(ni, "Second info table not found\n");
|
|
+ } else {
|
|
+ table_end_ba = backup_table_end_ba;
|
|
+
|
|
+ nlog_table_found(ni, false, backup_table_write_count,
|
|
+ ni->backup_table_ba, backup_table_end_ba);
|
|
+ }
|
|
+
|
|
+ /* Pick mapping_blocks_top_ba */
|
|
+ if (!ni->backup_table_ba) {
|
|
+ ni->mapping_blocks_top_ba= main_mapping_blocks_top_ba;
|
|
+ } else {
|
|
+ if (main_table_write_count >= backup_table_write_count)
|
|
+ ni->mapping_blocks_top_ba = main_mapping_blocks_top_ba;
|
|
+ else
|
|
+ ni->mapping_blocks_top_ba = backup_mapping_blocks_top_ba;
|
|
+ }
|
|
+
|
|
+ /* Set final mapping_blocks_ba */
|
|
+ ni->mapping_blocks_ba = table_end_ba;
|
|
+
|
|
+ /* Set final data_block_count */
|
|
+ for (i = ni->signature.mgmt_start_pb; i > 0; i--) {
|
|
+ if (ni->block_mapping[i - 1] >= 0) {
|
|
+ ni->data_block_count = i;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Regenerate the info table cache from the final selected info table */
|
|
+ nmbm_generate_info_table_cache(ni);
|
|
+
|
|
+ /*
|
|
+ * If only one table exists, try to write another table.
|
|
+ * If two tables have different write count, try to update info table
|
|
+ */
|
|
+ if (!ni->backup_table_ba) {
|
|
+ success = nmbm_rescue_single_info_table(ni);
|
|
+ } else if (main_table_write_count != backup_table_write_count) {
|
|
+ /* Mark state & mapping tables changed */
|
|
+ ni->block_state_changed = 1;
|
|
+ ni->block_mapping_changed = 1;
|
|
+
|
|
+ success = nmbm_update_single_info_table(ni,
|
|
+ main_table_write_count < backup_table_write_count);
|
|
+ } else {
|
|
+ success = true;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If there is no spare unmapped blocks, or still only one table
|
|
+ * exists, set the chip to read-only
|
|
+ */
|
|
+ if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
|
|
+ nlog_warn(ni, "No spare unmapped blocks. Device is now read-only\n");
|
|
+ ni->protected = 1;
|
|
+ } else if (!success) {
|
|
+ nlog_warn(ni, "Only one info table found. Device is now read-only\n");
|
|
+ ni->protected = 1;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_load_existing - Load NMBM from a new chip
|
|
+ * @ni: NMBM instance structure
|
|
+ */
|
|
+static bool nmbm_load_existing(struct nmbm_instance *ni)
|
|
+{
|
|
+ bool success;
|
|
+
|
|
+ /* Calculate the boundary of management blocks */
|
|
+ ni->mgmt_start_ba = ni->signature.mgmt_start_pb;
|
|
+
|
|
+ nlog_debug(ni, "NMBM management region starts at block %u [0x%08llx]\n",
|
|
+ ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
|
|
+
|
|
+ /* Look for info table(s) */
|
|
+ success = nmbm_load_info_table(ni, ni->mgmt_start_ba,
|
|
+ ni->signature_ba);
|
|
+ if (success) {
|
|
+ nlog_info(ni, "NMBM has been successfully attached\n");
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ if (!ni->force_create) {
|
|
+ printk("not creating NMBM table\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /* Fill block state table & mapping table */
|
|
+ nmbm_scan_badblocks(ni);
|
|
+ nmbm_build_mapping_table(ni);
|
|
+
|
|
+ /* Write info table(s) */
|
|
+ success = nmbm_create_info_table(ni);
|
|
+ if (success) {
|
|
+ nlog_info(ni, "NMBM has been successfully created\n");
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_find_signature - Find signature in the lower NAND chip
|
|
+ * @ni: NMBM instance structure
|
|
+ * @signature_ba: used for storing block address of the signature
|
|
+ * @signature_ba: return the actual block address of signature block
|
|
+ *
|
|
+ * Find a valid signature from a specific range in the lower NAND chip,
|
|
+ * from bottom (highest address) to top (lowest address)
|
|
+ *
|
|
+ * Return true if found.
|
|
+ */
|
|
+static bool nmbm_find_signature(struct nmbm_instance *ni,
|
|
+ struct nmbm_signature *signature,
|
|
+ uint32_t *signature_ba)
|
|
+{
|
|
+ struct nmbm_signature sig;
|
|
+ uint64_t off, addr;
|
|
+ uint32_t block_count, ba, limit;
|
|
+ bool success;
|
|
+ int ret;
|
|
+
|
|
+ /* Calculate top and bottom block address */
|
|
+ block_count = bmtd.total_blks;
|
|
+ ba = block_count;
|
|
+ limit = (block_count / NMBM_MGMT_DIV) * (NMBM_MGMT_DIV - ni->max_ratio);
|
|
+ if (ni->max_reserved_blocks && block_count - limit > ni->max_reserved_blocks)
|
|
+ limit = block_count - ni->max_reserved_blocks;
|
|
+
|
|
+ while (ba >= limit) {
|
|
+ ba--;
|
|
+ addr = ba2addr(ni, ba);
|
|
+
|
|
+ if (nmbm_check_bad_phys_block(ni, ba))
|
|
+ continue;
|
|
+
|
|
+ /* Check every page.
|
|
+ * As long as at leaset one page contains valid signature,
|
|
+ * the block is treated as a valid signature block.
|
|
+ */
|
|
+ for (off = 0; off < bmtd.blk_size;
|
|
+ off += bmtd.pg_size) {
|
|
+ ret = nmbn_read_data(ni, addr + off, &sig,
|
|
+ sizeof(sig));
|
|
+ if (ret)
|
|
+ continue;
|
|
+
|
|
+ /* Check for header size and checksum */
|
|
+ success = nmbm_check_header(&sig, sizeof(sig));
|
|
+ if (!success)
|
|
+ continue;
|
|
+
|
|
+ /* Check for header magic */
|
|
+ if (sig.header.magic == NMBM_MAGIC_SIGNATURE) {
|
|
+ /* Found it */
|
|
+ memcpy(signature, &sig, sizeof(sig));
|
|
+ *signature_ba = ba;
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ };
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_calc_structure_size - Calculate the instance structure size
|
|
+ * @nld: NMBM lower device structure
|
|
+ */
|
|
+static size_t nmbm_calc_structure_size(void)
|
|
+{
|
|
+ uint32_t state_table_size, mapping_table_size, info_table_size;
|
|
+ uint32_t block_count;
|
|
+
|
|
+ block_count = bmtd.total_blks;
|
|
+
|
|
+ /* Calculate info table size */
|
|
+ state_table_size = ((block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
|
|
+ NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
|
|
+ mapping_table_size = block_count * sizeof(int32_t);
|
|
+
|
|
+ info_table_size = ALIGN(sizeof(struct nmbm_info_table_header),
|
|
+ bmtd.pg_size);
|
|
+ info_table_size += ALIGN(state_table_size, bmtd.pg_size);
|
|
+ info_table_size += ALIGN(mapping_table_size, bmtd.pg_size);
|
|
+
|
|
+ return info_table_size + state_table_size + mapping_table_size +
|
|
+ sizeof(struct nmbm_instance);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_init_structure - Initialize members of instance structure
|
|
+ * @ni: NMBM instance structure
|
|
+ */
|
|
+static void nmbm_init_structure(struct nmbm_instance *ni)
|
|
+{
|
|
+ uint32_t pages_per_block, blocks_per_chip;
|
|
+ uintptr_t ptr;
|
|
+
|
|
+ pages_per_block = bmtd.blk_size / bmtd.pg_size;
|
|
+ blocks_per_chip = bmtd.total_blks;
|
|
+
|
|
+ ni->rawpage_size = bmtd.pg_size + bmtd.mtd->oobsize;
|
|
+ ni->rawblock_size = pages_per_block * ni->rawpage_size;
|
|
+ ni->rawchip_size = blocks_per_chip * ni->rawblock_size;
|
|
+
|
|
+ /* Calculate number of block this chip */
|
|
+ ni->block_count = blocks_per_chip;
|
|
+
|
|
+ /* Calculate info table size */
|
|
+ ni->state_table_size = ((ni->block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
|
|
+ NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
|
|
+ ni->mapping_table_size = ni->block_count * sizeof(*ni->block_mapping);
|
|
+
|
|
+ ni->info_table_size = ALIGN(sizeof(ni->info_table),
|
|
+ bmtd.pg_size);
|
|
+ ni->info_table.state_table_off = ni->info_table_size;
|
|
+
|
|
+ ni->info_table_size += ALIGN(ni->state_table_size,
|
|
+ bmtd.pg_size);
|
|
+ ni->info_table.mapping_table_off = ni->info_table_size;
|
|
+
|
|
+ ni->info_table_size += ALIGN(ni->mapping_table_size,
|
|
+ bmtd.pg_size);
|
|
+
|
|
+ ni->info_table_spare_blocks = nmbm_get_spare_block_count(
|
|
+ size2blk(ni, ni->info_table_size));
|
|
+
|
|
+ /* Assign memory to members */
|
|
+ ptr = (uintptr_t)ni + sizeof(*ni);
|
|
+
|
|
+ ni->info_table_cache = (void *)ptr;
|
|
+ ptr += ni->info_table_size;
|
|
+
|
|
+ ni->block_state = (void *)ptr;
|
|
+ ptr += ni->state_table_size;
|
|
+
|
|
+ ni->block_mapping = (void *)ptr;
|
|
+ ptr += ni->mapping_table_size;
|
|
+
|
|
+ ni->page_cache = bmtd.data_buf;
|
|
+
|
|
+ /* Initialize block state table */
|
|
+ ni->block_state_changed = 0;
|
|
+ memset(ni->block_state, 0xff, ni->state_table_size);
|
|
+
|
|
+ /* Initialize block mapping table */
|
|
+ ni->block_mapping_changed = 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * nmbm_attach - Attach to a lower device
|
|
+ * @ni: NMBM instance structure
|
|
+ */
|
|
+static int nmbm_attach(struct nmbm_instance *ni)
|
|
+{
|
|
+ bool success;
|
|
+
|
|
+ if (!ni)
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* Initialize NMBM instance */
|
|
+ nmbm_init_structure(ni);
|
|
+
|
|
+ success = nmbm_find_signature(ni, &ni->signature, &ni->signature_ba);
|
|
+ if (!success) {
|
|
+ if (!ni->force_create) {
|
|
+ nlog_err(ni, "Signature not found\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ success = nmbm_create_new(ni);
|
|
+ if (!success)
|
|
+ return -ENODEV;
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ nlog_info(ni, "Signature found at block %u [0x%08llx]\n",
|
|
+ ni->signature_ba, ba2addr(ni, ni->signature_ba));
|
|
+
|
|
+ if (ni->signature.header.version != NMBM_VER) {
|
|
+ nlog_err(ni, "NMBM version %u.%u is not supported\n",
|
|
+ NMBM_VERSION_MAJOR_GET(ni->signature.header.version),
|
|
+ NMBM_VERSION_MINOR_GET(ni->signature.header.version));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ni->signature.nand_size != bmtd.total_blks << bmtd.blk_shift ||
|
|
+ ni->signature.block_size != bmtd.blk_size ||
|
|
+ ni->signature.page_size != bmtd.pg_size ||
|
|
+ ni->signature.spare_size != bmtd.mtd->oobsize) {
|
|
+ nlog_err(ni, "NMBM configuration mismatch\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ success = nmbm_load_existing(ni);
|
|
+ if (!success)
|
|
+ return -ENODEV;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static bool remap_block_nmbm(u16 block, u16 mapped_block, int copy_len)
|
|
+{
|
|
+ struct nmbm_instance *ni = bmtd.ni;
|
|
+ int new_block;
|
|
+
|
|
+ if (block >= ni->data_block_count)
|
|
+ return false;
|
|
+
|
|
+ nmbm_set_block_state(ni, mapped_block, BLOCK_ST_BAD);
|
|
+ if (!nmbm_map_block(ni, block))
|
|
+ return false;
|
|
+
|
|
+ new_block = ni->block_mapping[block];
|
|
+ bbt_nand_erase(new_block);
|
|
+ if (copy_len > 0)
|
|
+ bbt_nand_copy(new_block, mapped_block, copy_len);
|
|
+ nmbm_update_info_table(ni);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static int get_mapping_block_index_nmbm(int block)
|
|
+{
|
|
+ struct nmbm_instance *ni = bmtd.ni;
|
|
+
|
|
+ if (block >= ni->data_block_count)
|
|
+ return -1;
|
|
+
|
|
+ return ni->block_mapping[block];
|
|
+}
|
|
+
|
|
+static int mtk_bmt_init_nmbm(struct device_node *np)
|
|
+{
|
|
+ struct nmbm_instance *ni;
|
|
+ int ret;
|
|
+
|
|
+ ni = kzalloc(nmbm_calc_structure_size(), GFP_KERNEL);
|
|
+ if (!ni)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ bmtd.ni = ni;
|
|
+
|
|
+ if (of_property_read_u32(np, "mediatek,bmt-max-ratio", &ni->max_ratio))
|
|
+ ni->max_ratio = 1;
|
|
+ if (of_property_read_u32(np, "mediatek,bmt-max-reserved-blocks",
|
|
+ &ni->max_reserved_blocks))
|
|
+ ni->max_reserved_blocks = 256;
|
|
+ if (of_property_read_bool(np, "mediatek,empty-page-ecc-protected"))
|
|
+ ni->empty_page_ecc_ok = true;
|
|
+ if (of_property_read_bool(np, "mediatek,bmt-force-create"))
|
|
+ ni->force_create = true;
|
|
+
|
|
+ ret = nmbm_attach(ni);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+
|
|
+ bmtd.mtd->size = ni->data_block_count << bmtd.blk_shift;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+out:
|
|
+ kfree(ni);
|
|
+ bmtd.ni = NULL;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int mtk_bmt_debug_nmbm(void *data, u64 val)
|
|
+{
|
|
+ struct nmbm_instance *ni = bmtd.ni;
|
|
+ int i;
|
|
+
|
|
+ switch (val) {
|
|
+ case 0:
|
|
+ for (i = 1; i < ni->data_block_count; i++) {
|
|
+ if (ni->block_mapping[i] < ni->mapping_blocks_ba)
|
|
+ continue;
|
|
+
|
|
+ printk("remap [%x->%x]\n", i, ni->block_mapping[i]);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void unmap_block_nmbm(u16 block)
|
|
+{
|
|
+ struct nmbm_instance *ni = bmtd.ni;
|
|
+ int start, offset;
|
|
+ int new_block;
|
|
+
|
|
+ if (block >= ni->data_block_count)
|
|
+ return;
|
|
+
|
|
+ start = block;
|
|
+ offset = 0;
|
|
+ while (ni->block_mapping[start] >= ni->mapping_blocks_ba) {
|
|
+ start--;
|
|
+ offset++;
|
|
+ if (start < 0)
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!offset)
|
|
+ return;
|
|
+
|
|
+ new_block = ni->block_mapping[start] + offset;
|
|
+ nmbm_set_block_state(ni, new_block, BLOCK_ST_GOOD);
|
|
+ ni->block_mapping[block] = new_block;
|
|
+ ni->block_mapping_changed++;
|
|
+
|
|
+ new_block = ni->signature_ba - 1;
|
|
+ for (block = 0; block < ni->data_block_count; block++) {
|
|
+ int cur = ni->block_mapping[block];
|
|
+
|
|
+ if (cur < ni->mapping_blocks_ba)
|
|
+ continue;
|
|
+
|
|
+ if (cur <= new_block)
|
|
+ new_block = cur - 1;
|
|
+ }
|
|
+
|
|
+ ni->mapping_blocks_top_ba = new_block;
|
|
+
|
|
+ nmbm_update_info_table(ni);
|
|
+}
|
|
+
|
|
+const struct mtk_bmt_ops mtk_bmt_nmbm_ops = {
|
|
+ .init = mtk_bmt_init_nmbm,
|
|
+ .remap_block = remap_block_nmbm,
|
|
+ .unmap_block = unmap_block_nmbm,
|
|
+ .get_mapping_block = get_mapping_block_index_nmbm,
|
|
+ .debug = mtk_bmt_debug_nmbm,
|
|
+};
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_v2.c b/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_v2.c
|
|
new file mode 100644
|
|
index 000000000000..2770376e98bb
|
|
--- /dev/null
|
|
+++ b/target/linux/ramips/files/drivers/mtd/nand/mtk_bmt_v2.c
|
|
@@ -0,0 +1,513 @@
|
|
+/*
|
|
+ * Copyright (c) 2017 MediaTek Inc.
|
|
+ * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
|
|
+ * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/slab.h>
|
|
+#include "mtk_bmt.h"
|
|
+
|
|
+struct bbbt {
|
|
+ char signature[3];
|
|
+ /* This version is used to distinguish the legacy and new algorithm */
|
|
+#define BBMT_VERSION 2
|
|
+ unsigned char version;
|
|
+ /* Below 2 tables will be written in SLC */
|
|
+ u16 bb_tbl[];
|
|
+};
|
|
+
|
|
+struct bbmt {
|
|
+ u16 block;
|
|
+#define NO_MAPPED 0
|
|
+#define NORMAL_MAPPED 1
|
|
+#define BMT_MAPPED 2
|
|
+ u16 mapped;
|
|
+};
|
|
+
|
|
+/* Maximum 8k blocks */
|
|
+#define BBPOOL_RATIO 2
|
|
+#define BB_TABLE_MAX bmtd.table_size
|
|
+#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
|
|
+#define BMT_TBL_DEF_VAL 0x0
|
|
+
|
|
+static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
|
|
+{
|
|
+ return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
|
|
+}
|
|
+
|
|
+static u16 find_valid_block(u16 block)
|
|
+{
|
|
+ u8 fdm[4];
|
|
+ int ret;
|
|
+ int loop = 0;
|
|
+
|
|
+retry:
|
|
+ if (block >= bmtd.total_blks)
|
|
+ return 0;
|
|
+
|
|
+ ret = bbt_nand_read(blk_pg(block), bmtd.data_buf, bmtd.pg_size,
|
|
+ fdm, sizeof(fdm));
|
|
+ /* Read the 1st byte of FDM to judge whether it's a bad
|
|
+ * or not
|
|
+ */
|
|
+ if (ret || fdm[0] != 0xff) {
|
|
+ pr_info("nand: found bad block 0x%x\n", block);
|
|
+ if (loop >= bmtd.bb_max) {
|
|
+ pr_info("nand: FATAL ERR: too many bad blocks!!\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ loop++;
|
|
+ block++;
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
+ return block;
|
|
+}
|
|
+
|
|
+/* Find out all bad blocks, and fill in the mapping table */
|
|
+static int scan_bad_blocks(struct bbbt *bbt)
|
|
+{
|
|
+ int i;
|
|
+ u16 block = 0;
|
|
+
|
|
+ /* First time download, the block0 MUST NOT be a bad block,
|
|
+ * this is guaranteed by vendor
|
|
+ */
|
|
+ bbt->bb_tbl[0] = 0;
|
|
+
|
|
+ /*
|
|
+ * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
|
|
+ * G - Good block; B - Bad block
|
|
+ * ---------------------------
|
|
+ * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
|
|
+ * ---------------------------
|
|
+ * What bb_tbl[i] looks like:
|
|
+ * physical block(i):
|
|
+ * 0 1 2 3 4 5 6 7 8 9 a b c
|
|
+ * mapped block(bb_tbl[i]):
|
|
+ * 0 1 3 6 7 8 9 b ......
|
|
+ * ATTENTION:
|
|
+ * If new bad block ocurred(n), search bmt_tbl to find
|
|
+ * a available block(x), and fill in the bb_tbl[n] = x;
|
|
+ */
|
|
+ for (i = 1; i < bmtd.pool_lba; i++) {
|
|
+ bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
|
|
+ BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
|
|
+ if (bbt->bb_tbl[i] == 0)
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ /* Physical Block start Address of BMT pool */
|
|
+ bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
|
|
+ if (bmtd.pool_pba >= bmtd.total_blks - 2) {
|
|
+ pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
|
|
+ i = 0;
|
|
+ block = bmtd.pool_pba;
|
|
+ /*
|
|
+ * The bmt table is used for runtime bad block mapping
|
|
+ * G - Good block; B - Bad block
|
|
+ * ---------------------------
|
|
+ * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
|
|
+ * ---------------------------
|
|
+ * block: 0 1 2 3 4 5 6 7 8 9 a b c
|
|
+ * What bmt_tbl[i] looks like in initial state:
|
|
+ * i:
|
|
+ * 0 1 2 3 4 5 6 7
|
|
+ * bmt_tbl[i].block:
|
|
+ * 0 1 3 6 7 8 9 b
|
|
+ * bmt_tbl[i].mapped:
|
|
+ * N N N N N N N B
|
|
+ * N - Not mapped(Available)
|
|
+ * M - Mapped
|
|
+ * B - BMT
|
|
+ * ATTENTION:
|
|
+ * BMT always in the last valid block in pool
|
|
+ */
|
|
+ while ((block = find_valid_block(block)) != 0) {
|
|
+ bmt_tbl(bbt)[i].block = block;
|
|
+ bmt_tbl(bbt)[i].mapped = NO_MAPPED;
|
|
+ BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
|
|
+ block++;
|
|
+ i++;
|
|
+ }
|
|
+
|
|
+ /* i - How many available blocks in pool, which is the length of bmt_tbl[]
|
|
+ * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
|
|
+ */
|
|
+ bmtd.bmt_blk_idx = i - 1;
|
|
+ bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
|
|
+
|
|
+ if (i < 1) {
|
|
+ pr_info("nand: FATAL ERR: no space to store BMT!!\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ pr_info("[BBT] %d available blocks in BMT pool\n", i);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
|
|
+{
|
|
+ struct bbbt *bbt = (struct bbbt *)buf;
|
|
+ u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
|
|
+
|
|
+
|
|
+ if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
|
|
+ memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
|
|
+ if (bbt->version == BBMT_VERSION)
|
|
+ return true;
|
|
+ }
|
|
+ BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
|
|
+ sig[0], sig[1], sig[2],
|
|
+ fdm[1], fdm[2], fdm[3]);
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static u16 get_bmt_index(struct bbmt *bmt)
|
|
+{
|
|
+ int i = 0;
|
|
+
|
|
+ while (bmt[i].block != BMT_TBL_DEF_VAL) {
|
|
+ if (bmt[i].mapped == BMT_MAPPED)
|
|
+ return i;
|
|
+ i++;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
|
|
+{
|
|
+ u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
|
|
+
|
|
+ return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
|
|
+}
|
|
+
|
|
+static struct bbbt *scan_bmt(u16 block)
|
|
+{
|
|
+ u8 fdm[4];
|
|
+
|
|
+ if (block < bmtd.pool_lba)
|
|
+ return NULL;
|
|
+
|
|
+ if (read_bmt(block, bmtd.bbt_buf, fdm, sizeof(fdm)))
|
|
+ return scan_bmt(block - 1);
|
|
+
|
|
+ if (is_valid_bmt(bmtd.bbt_buf, fdm)) {
|
|
+ bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)bmtd.bbt_buf));
|
|
+ if (bmtd.bmt_blk_idx == 0) {
|
|
+ pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
|
|
+ return NULL;
|
|
+ }
|
|
+ pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
|
|
+ return (struct bbbt *)bmtd.bbt_buf;
|
|
+ } else
|
|
+ return scan_bmt(block - 1);
|
|
+}
|
|
+
|
|
+/* Write the Burner Bad Block Table to Nand Flash
|
|
+ * n - write BMT to bmt_tbl[n]
|
|
+ */
|
|
+static u16 upload_bmt(struct bbbt *bbt, int n)
|
|
+{
|
|
+ u16 block;
|
|
+
|
|
+retry:
|
|
+ if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
|
|
+ pr_info("nand: FATAL ERR: no space to store BMT!\n");
|
|
+ return (u16)-1;
|
|
+ }
|
|
+
|
|
+ block = bmt_tbl(bbt)[n].block;
|
|
+ BBT_LOG("n = 0x%x, block = 0x%x", n, block);
|
|
+ if (bbt_nand_erase(block)) {
|
|
+ bmt_tbl(bbt)[n].block = 0;
|
|
+ /* erase failed, try the previous block: bmt_tbl[n - 1].block */
|
|
+ n--;
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
+ /* The signature offset is fixed set to 0,
|
|
+ * oob signature offset is fixed set to 1
|
|
+ */
|
|
+ memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
|
|
+ bbt->version = BBMT_VERSION;
|
|
+
|
|
+ if (write_bmt(block, (unsigned char *)bbt)) {
|
|
+ bmt_tbl(bbt)[n].block = 0;
|
|
+
|
|
+ /* write failed, try the previous block in bmt_tbl[n - 1] */
|
|
+ n--;
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
+ /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
|
|
+ return n;
|
|
+}
|
|
+
|
|
+static u16 find_valid_block_in_pool(struct bbbt *bbt)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ if (bmtd.bmt_blk_idx == 0)
|
|
+ goto error;
|
|
+
|
|
+ for (i = 0; i < bmtd.bmt_blk_idx; i++) {
|
|
+ if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
|
|
+ bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
|
|
+ return bmt_tbl(bbt)[i].block;
|
|
+ }
|
|
+ }
|
|
+
|
|
+error:
|
|
+ pr_info("nand: FATAL ERR: BMT pool is run out!\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* We met a bad block, mark it as bad and map it to a valid block in pool,
|
|
+ * if it's a write failure, we need to write the data to mapped block
|
|
+ */
|
|
+static bool remap_block_v2(u16 block, u16 mapped_block, int copy_len)
|
|
+{
|
|
+ u16 new_block;
|
|
+ struct bbbt *bbt;
|
|
+
|
|
+ bbt = bmtd.bbt;
|
|
+ new_block = find_valid_block_in_pool(bbt);
|
|
+ if (new_block == 0)
|
|
+ return false;
|
|
+
|
|
+ /* Map new bad block to available block in pool */
|
|
+ bbt->bb_tbl[block] = new_block;
|
|
+
|
|
+ /* Erase new block */
|
|
+ bbt_nand_erase(new_block);
|
|
+ if (copy_len > 0)
|
|
+ bbt_nand_copy(new_block, mapped_block, copy_len);
|
|
+
|
|
+ bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static int get_mapping_block_index_v2(int block)
|
|
+{
|
|
+ int start, end;
|
|
+
|
|
+ if (block >= bmtd.pool_lba)
|
|
+ return block;
|
|
+
|
|
+ if (!mapping_block_in_range(block, &start, &end))
|
|
+ return block;
|
|
+
|
|
+ return bmtd.bbt->bb_tbl[block];
|
|
+}
|
|
+
|
|
+static void
|
|
+unmap_block_v2(u16 block)
|
|
+{
|
|
+ bmtd.bbt->bb_tbl[block] = block;
|
|
+ bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
|
|
+}
|
|
+
|
|
+static unsigned long *
|
|
+mtk_bmt_get_mapping_mask(void)
|
|
+{
|
|
+ struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
|
|
+ int main_blocks = bmtd.mtd->size >> bmtd.blk_shift;
|
|
+ unsigned long *used;
|
|
+ int i, k;
|
|
+
|
|
+ used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL);
|
|
+ if (!used)
|
|
+ return NULL;
|
|
+
|
|
+ for (i = 1; i < main_blocks; i++) {
|
|
+ if (bmtd.bbt->bb_tbl[i] == i)
|
|
+ continue;
|
|
+
|
|
+ for (k = 0; k < bmtd.bmt_blk_idx; k++) {
|
|
+ if (bmtd.bbt->bb_tbl[i] != bbmt[k].block)
|
|
+ continue;
|
|
+
|
|
+ set_bit(k, used);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return used;
|
|
+}
|
|
+
|
|
+static int mtk_bmt_debug_v2(void *data, u64 val)
|
|
+{
|
|
+ struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
|
|
+ struct mtd_info *mtd = bmtd.mtd;
|
|
+ unsigned long *used;
|
|
+ int main_blocks = mtd->size >> bmtd.blk_shift;
|
|
+ int n_remap = 0;
|
|
+ int i;
|
|
+
|
|
+ used = mtk_bmt_get_mapping_mask();
|
|
+ if (!used)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ switch (val) {
|
|
+ case 0:
|
|
+ for (i = 1; i < main_blocks; i++) {
|
|
+ if (bmtd.bbt->bb_tbl[i] == i)
|
|
+ continue;
|
|
+
|
|
+ printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]);
|
|
+ n_remap++;
|
|
+ }
|
|
+ for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
|
|
+ char c;
|
|
+
|
|
+ switch (bbmt[i].mapped) {
|
|
+ case NO_MAPPED:
|
|
+ continue;
|
|
+ case NORMAL_MAPPED:
|
|
+ c = 'm';
|
|
+ if (test_bit(i, used))
|
|
+ c = 'M';
|
|
+ break;
|
|
+ case BMT_MAPPED:
|
|
+ c = 'B';
|
|
+ break;
|
|
+ default:
|
|
+ c = 'X';
|
|
+ break;
|
|
+ }
|
|
+ printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block);
|
|
+ }
|
|
+ break;
|
|
+ case 100:
|
|
+ for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
|
|
+ if (bbmt[i].mapped != NORMAL_MAPPED)
|
|
+ continue;
|
|
+
|
|
+ if (test_bit(i, used))
|
|
+ continue;
|
|
+
|
|
+ n_remap++;
|
|
+ bbmt[i].mapped = NO_MAPPED;
|
|
+ printk("free block [%d:%x]\n", i, bbmt[i].block);
|
|
+ }
|
|
+ if (n_remap)
|
|
+ bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ kfree(used);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mtk_bmt_init_v2(struct device_node *np)
|
|
+{
|
|
+ u32 bmt_pool_size, bmt_table_size;
|
|
+ u32 bufsz, block;
|
|
+ u16 pmt_block;
|
|
+
|
|
+ if (of_property_read_u32(np, "mediatek,bmt-pool-size",
|
|
+ &bmt_pool_size) != 0)
|
|
+ bmt_pool_size = 80;
|
|
+
|
|
+ if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
|
|
+ &bmtd.oob_offset) != 0)
|
|
+ bmtd.oob_offset = 0;
|
|
+
|
|
+ if (of_property_read_u32(np, "mediatek,bmt-table-size",
|
|
+ &bmt_table_size) != 0)
|
|
+ bmt_table_size = 0x2000U;
|
|
+
|
|
+ bmtd.table_size = bmt_table_size;
|
|
+
|
|
+ pmt_block = bmtd.total_blks - bmt_pool_size - 2;
|
|
+
|
|
+ bmtd.mtd->size = pmt_block << bmtd.blk_shift;
|
|
+
|
|
+ /*
|
|
+ * ---------------------------------------
|
|
+ * | PMT(2blks) | BMT POOL(totalblks * 2%) |
|
|
+ * ---------------------------------------
|
|
+ * ^ ^
|
|
+ * | |
|
|
+ * pmt_block pmt_block + 2blocks(pool_lba)
|
|
+ *
|
|
+ * ATTETION!!!!!!
|
|
+ * The blocks ahead of the boundary block are stored in bb_tbl
|
|
+ * and blocks behind are stored in bmt_tbl
|
|
+ */
|
|
+
|
|
+ bmtd.pool_lba = (u16)(pmt_block + 2);
|
|
+ bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
|
|
+
|
|
+ bufsz = round_up(sizeof(struct bbbt) +
|
|
+ bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
|
|
+ bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
|
|
+
|
|
+ bmtd.bbt_buf = kzalloc(bufsz, GFP_KERNEL);
|
|
+ if (!bmtd.bbt_buf)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ memset(bmtd.bbt_buf, 0xff, bufsz);
|
|
+
|
|
+ /* Scanning start from the first page of the last block
|
|
+ * of whole flash
|
|
+ */
|
|
+ bmtd.bbt = scan_bmt(bmtd.total_blks - 1);
|
|
+ if (!bmtd.bbt) {
|
|
+ /* BMT not found */
|
|
+ if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
|
|
+ pr_info("nand: FATAL: Too many blocks, can not support!\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ bmtd.bbt = (struct bbbt *)bmtd.bbt_buf;
|
|
+ memset(bmt_tbl(bmtd.bbt), BMT_TBL_DEF_VAL,
|
|
+ bmtd.table_size * sizeof(struct bbmt));
|
|
+
|
|
+ if (scan_bad_blocks(bmtd.bbt))
|
|
+ return -1;
|
|
+
|
|
+ /* BMT always in the last valid block in pool */
|
|
+ bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
|
|
+ block = bmt_tbl(bmtd.bbt)[bmtd.bmt_blk_idx].block;
|
|
+ pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
|
|
+
|
|
+ if (bmtd.bmt_blk_idx == 0)
|
|
+ pr_info("nand: Warning: no available block in BMT pool!\n");
|
|
+ else if (bmtd.bmt_blk_idx == (u16)-1)
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+const struct mtk_bmt_ops mtk_bmt_v2_ops = {
|
|
+ .sig = "bmt",
|
|
+ .sig_len = 3,
|
|
+ .init = mtk_bmt_init_v2,
|
|
+ .remap_block = remap_block_v2,
|
|
+ .unmap_block = unmap_block_v2,
|
|
+ .get_mapping_block = get_mapping_block_index_v2,
|
|
+ .debug = mtk_bmt_debug_v2,
|
|
+};
|
|
diff --git a/target/linux/ramips/files/include/linux/mtd/mtk_bmt.h b/target/linux/ramips/files/include/linux/mtd/mtk_bmt.h
|
|
new file mode 100644
|
|
index 000000000000..cbb6d04d8952
|
|
--- /dev/null
|
|
+++ b/target/linux/ramips/files/include/linux/mtd/mtk_bmt.h
|
|
@@ -0,0 +1,18 @@
|
|
+#ifndef __MTK_BMT_H
|
|
+#define __MTK_BMT_H
|
|
+
|
|
+#ifdef CONFIG_MTD_NAND_MTK_BMT
|
|
+int mtk_bmt_attach(struct mtd_info *mtd);
|
|
+void mtk_bmt_detach(struct mtd_info *mtd);
|
|
+#else
|
|
+static inline int mtk_bmt_attach(struct mtd_info *mtd)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline void mtk_bmt_detach(struct mtd_info *mtd)
|
|
+{
|
|
+}
|
|
+#endif
|
|
+
|
|
+#endif
|
|
diff --git a/target/linux/ramips/mt7621/config-5.4 b/target/linux/ramips/mt7621/config-5.4
|
|
index 1484fc026944..ae4a0679bd76 100644
|
|
--- a/target/linux/ramips/mt7621/config-5.4
|
|
+++ b/target/linux/ramips/mt7621/config-5.4
|
|
@@ -158,6 +158,7 @@ CONFIG_MTD_CMDLINE_PARTS=y
|
|
CONFIG_MTD_NAND_CORE=y
|
|
CONFIG_MTD_NAND_ECC_SW_HAMMING=y
|
|
CONFIG_MTD_NAND_MT7621=y
|
|
+CONFIG_MTD_NAND_MTK_BMT=y
|
|
CONFIG_MTD_PHYSMAP=y
|
|
CONFIG_MTD_RAW_NAND=y
|
|
CONFIG_MTD_ROUTERBOOT_PARTS=y
|
|
diff --git a/target/linux/ramips/patches-5.4/430-mtk-bmt-support.patch b/target/linux/ramips/patches-5.4/430-mtk-bmt-support.patch
|
|
new file mode 100644
|
|
index 000000000000..9533e4a7ee43
|
|
--- /dev/null
|
|
+++ b/target/linux/ramips/patches-5.4/430-mtk-bmt-support.patch
|
|
@@ -0,0 +1,23 @@
|
|
+--- a/drivers/mtd/nand/Kconfig
|
|
++++ b/drivers/mtd/nand/Kconfig
|
|
+@@ -2,6 +2,10 @@
|
|
+ config MTD_NAND_CORE
|
|
+ tristate
|
|
+
|
|
++config MTD_NAND_MTK_BMT
|
|
++ bool "Support MediaTek NAND Bad-block Management Table"
|
|
++ default n
|
|
++
|
|
+ source "drivers/mtd/nand/onenand/Kconfig"
|
|
+ source "drivers/mtd/nand/raw/Kconfig"
|
|
+ source "drivers/mtd/nand/spi/Kconfig"
|
|
+--- a/drivers/mtd/nand/Makefile
|
|
++++ b/drivers/mtd/nand/Makefile
|
|
+@@ -2,6 +2,7 @@
|
|
+
|
|
+ nandcore-objs := core.o bbt.o
|
|
+ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
|
|
++obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
|
|
+
|
|
+ obj-y += onenand/
|
|
+ obj-y += raw/
|
|
--
|
|
2.32.0
|
|
|
|
|
|
From f25e41e05c139a8efa4582148a2d9ebacaf165a9 Mon Sep 17 00:00:00 2001
|
|
From: Stijn Tintel <stijn@linux-ipv6.be>
|
|
Date: Wed, 19 Jan 2022 15:44:58 +0200
|
|
Subject: [PATCH 2/8] ramips: move mt7621_nand driver to files
|
|
|
|
The patch was rejected by upstream. The mtk_nand driver should be
|
|
modified to support the mt7621 flash controller instead. As there is no
|
|
newer version to backport, or no upstream version to fix bugs, let's
|
|
move the driver to the files dir under the ramips target. This makes it
|
|
easier to make changes to the driver while waiting for mt7621 support to
|
|
land in mtk_nand.
|
|
|
|
Signed-off-by: Stijn Tintel <stijn@linux-ipv6.be>
|
|
---
|
|
.../files/drivers/mtd/nand/raw/mt7621_nand.c | 1350 ++++++++++++++++
|
|
...driver-support-for-MT7621-nand-flash.patch | 1354 +----------------
|
|
2 files changed, 1351 insertions(+), 1353 deletions(-)
|
|
create mode 100644 target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
new file mode 100644
|
|
index 000000000000..678295a68db3
|
|
--- /dev/null
|
|
+++ b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
@@ -0,0 +1,1350 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * MediaTek MT7621 NAND Flash Controller driver
|
|
+ *
|
|
+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
|
|
+ *
|
|
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
|
|
+ */
|
|
+
|
|
+#include <linux/io.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/sizes.h>
|
|
+#include <linux/iopoll.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mtd/mtd.h>
|
|
+#include <linux/mtd/rawnand.h>
|
|
+#include <linux/mtd/partitions.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <asm/addrspace.h>
|
|
+
|
|
+/* NFI core registers */
|
|
+#define NFI_CNFG 0x000
|
|
+#define CNFG_OP_MODE_S 12
|
|
+#define CNFG_OP_MODE_M GENMASK(14, 12)
|
|
+#define CNFG_OP_CUSTOM 6
|
|
+#define CNFG_AUTO_FMT_EN BIT(9)
|
|
+#define CNFG_HW_ECC_EN BIT(8)
|
|
+#define CNFG_BYTE_RW BIT(6)
|
|
+#define CNFG_READ_MODE BIT(1)
|
|
+
|
|
+#define NFI_PAGEFMT 0x004
|
|
+#define PAGEFMT_FDM_ECC_S 12
|
|
+#define PAGEFMT_FDM_ECC_M GENMASK(15, 12)
|
|
+#define PAGEFMT_FDM_S 8
|
|
+#define PAGEFMT_FDM_M GENMASK(11, 8)
|
|
+#define PAGEFMT_SPARE_S 4
|
|
+#define PAGEFMT_SPARE_M GENMASK(5, 4)
|
|
+#define PAGEFMT_PAGE_S 0
|
|
+#define PAGEFMT_PAGE_M GENMASK(1, 0)
|
|
+
|
|
+#define NFI_CON 0x008
|
|
+#define CON_NFI_SEC_S 12
|
|
+#define CON_NFI_SEC_M GENMASK(15, 12)
|
|
+#define CON_NFI_BWR BIT(9)
|
|
+#define CON_NFI_BRD BIT(8)
|
|
+#define CON_NFI_RST BIT(1)
|
|
+#define CON_FIFO_FLUSH BIT(0)
|
|
+
|
|
+#define NFI_ACCCON 0x00c
|
|
+#define ACCCON_POECS_S 28
|
|
+#define ACCCON_POECS_MAX 0x0f
|
|
+#define ACCCON_POECS_DEF 3
|
|
+#define ACCCON_PRECS_S 22
|
|
+#define ACCCON_PRECS_MAX 0x3f
|
|
+#define ACCCON_PRECS_DEF 3
|
|
+#define ACCCON_C2R_S 16
|
|
+#define ACCCON_C2R_MAX 0x3f
|
|
+#define ACCCON_C2R_DEF 7
|
|
+#define ACCCON_W2R_S 12
|
|
+#define ACCCON_W2R_MAX 0x0f
|
|
+#define ACCCON_W2R_DEF 7
|
|
+#define ACCCON_WH_S 8
|
|
+#define ACCCON_WH_MAX 0x0f
|
|
+#define ACCCON_WH_DEF 15
|
|
+#define ACCCON_WST_S 4
|
|
+#define ACCCON_WST_MAX 0x0f
|
|
+#define ACCCON_WST_DEF 15
|
|
+#define ACCCON_WST_MIN 3
|
|
+#define ACCCON_RLT_S 0
|
|
+#define ACCCON_RLT_MAX 0x0f
|
|
+#define ACCCON_RLT_DEF 15
|
|
+#define ACCCON_RLT_MIN 3
|
|
+
|
|
+#define NFI_CMD 0x020
|
|
+
|
|
+#define NFI_ADDRNOB 0x030
|
|
+#define ADDR_ROW_NOB_S 4
|
|
+#define ADDR_ROW_NOB_M GENMASK(6, 4)
|
|
+#define ADDR_COL_NOB_S 0
|
|
+#define ADDR_COL_NOB_M GENMASK(2, 0)
|
|
+
|
|
+#define NFI_COLADDR 0x034
|
|
+#define NFI_ROWADDR 0x038
|
|
+
|
|
+#define NFI_STRDATA 0x040
|
|
+#define STR_DATA BIT(0)
|
|
+
|
|
+#define NFI_CNRNB 0x044
|
|
+#define CB2R_TIME_S 4
|
|
+#define CB2R_TIME_M GENMASK(7, 4)
|
|
+#define STR_CNRNB BIT(0)
|
|
+
|
|
+#define NFI_DATAW 0x050
|
|
+#define NFI_DATAR 0x054
|
|
+
|
|
+#define NFI_PIO_DIRDY 0x058
|
|
+#define PIO_DIRDY BIT(0)
|
|
+
|
|
+#define NFI_STA 0x060
|
|
+#define STA_NFI_FSM_S 16
|
|
+#define STA_NFI_FSM_M GENMASK(19, 16)
|
|
+#define STA_FSM_CUSTOM_DATA 14
|
|
+#define STA_BUSY BIT(8)
|
|
+#define STA_ADDR BIT(1)
|
|
+#define STA_CMD BIT(0)
|
|
+
|
|
+#define NFI_ADDRCNTR 0x070
|
|
+#define SEC_CNTR_S 12
|
|
+#define SEC_CNTR_M GENMASK(15, 12)
|
|
+#define SEC_ADDR_S 0
|
|
+#define SEC_ADDR_M GENMASK(9, 0)
|
|
+
|
|
+#define NFI_CSEL 0x090
|
|
+#define CSEL_S 0
|
|
+#define CSEL_M GENMASK(1, 0)
|
|
+
|
|
+#define NFI_FDM0L 0x0a0
|
|
+#define NFI_FDML(n) (0x0a0 + ((n) << 3))
|
|
+
|
|
+#define NFI_FDM0M 0x0a4
|
|
+#define NFI_FDMM(n) (0x0a4 + ((n) << 3))
|
|
+
|
|
+#define NFI_MASTER_STA 0x210
|
|
+#define MAS_ADDR GENMASK(11, 9)
|
|
+#define MAS_RD GENMASK(8, 6)
|
|
+#define MAS_WR GENMASK(5, 3)
|
|
+#define MAS_RDDLY GENMASK(2, 0)
|
|
+
|
|
+/* ECC engine registers */
|
|
+#define ECC_ENCCON 0x000
|
|
+#define ENC_EN BIT(0)
|
|
+
|
|
+#define ECC_ENCCNFG 0x004
|
|
+#define ENC_CNFG_MSG_S 16
|
|
+#define ENC_CNFG_MSG_M GENMASK(28, 16)
|
|
+#define ENC_MODE_S 4
|
|
+#define ENC_MODE_M GENMASK(5, 4)
|
|
+#define ENC_MODE_NFI 1
|
|
+#define ENC_TNUM_S 0
|
|
+#define ENC_TNUM_M GENMASK(2, 0)
|
|
+
|
|
+#define ECC_ENCIDLE 0x00c
|
|
+#define ENC_IDLE BIT(0)
|
|
+
|
|
+#define ECC_DECCON 0x100
|
|
+#define DEC_EN BIT(0)
|
|
+
|
|
+#define ECC_DECCNFG 0x104
|
|
+#define DEC_EMPTY_EN BIT(31)
|
|
+#define DEC_CS_S 16
|
|
+#define DEC_CS_M GENMASK(28, 16)
|
|
+#define DEC_CON_S 12
|
|
+#define DEC_CON_M GENMASK(13, 12)
|
|
+#define DEC_CON_EL 2
|
|
+#define DEC_MODE_S 4
|
|
+#define DEC_MODE_M GENMASK(5, 4)
|
|
+#define DEC_MODE_NFI 1
|
|
+#define DEC_TNUM_S 0
|
|
+#define DEC_TNUM_M GENMASK(2, 0)
|
|
+
|
|
+#define ECC_DECIDLE 0x10c
|
|
+#define DEC_IDLE BIT(1)
|
|
+
|
|
+#define ECC_DECENUM 0x114
|
|
+#define ERRNUM_S 2
|
|
+#define ERRNUM_M GENMASK(3, 0)
|
|
+
|
|
+#define ECC_DECDONE 0x118
|
|
+#define DEC_DONE7 BIT(7)
|
|
+#define DEC_DONE6 BIT(6)
|
|
+#define DEC_DONE5 BIT(5)
|
|
+#define DEC_DONE4 BIT(4)
|
|
+#define DEC_DONE3 BIT(3)
|
|
+#define DEC_DONE2 BIT(2)
|
|
+#define DEC_DONE1 BIT(1)
|
|
+#define DEC_DONE0 BIT(0)
|
|
+
|
|
+#define ECC_DECEL(n) (0x11c + (n) * 4)
|
|
+#define DEC_EL_ODD_S 16
|
|
+#define DEC_EL_EVEN_S 0
|
|
+#define DEC_EL_M 0x1fff
|
|
+#define DEC_EL_BYTE_POS_S 3
|
|
+#define DEC_EL_BIT_POS_M GENMASK(2, 0)
|
|
+
|
|
+#define ECC_FDMADDR 0x13c
|
|
+
|
|
+/* ENCIDLE and DECIDLE */
|
|
+#define ECC_IDLE BIT(0)
|
|
+
|
|
+#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
|
|
+ ((tpoecs) << ACCCON_POECS_S | (tprecs) << ACCCON_PRECS_S | \
|
|
+ (tc2r) << ACCCON_C2R_S | (tw2r) << ACCCON_W2R_S | \
|
|
+ (twh) << ACCCON_WH_S | (twst) << ACCCON_WST_S | (trlt))
|
|
+
|
|
+#define MASTER_STA_MASK (MAS_ADDR | MAS_RD | MAS_WR | \
|
|
+ MAS_RDDLY)
|
|
+#define NFI_RESET_TIMEOUT 1000000
|
|
+#define NFI_CORE_TIMEOUT 500000
|
|
+#define ECC_ENGINE_TIMEOUT 500000
|
|
+
|
|
+#define ECC_SECTOR_SIZE 512
|
|
+#define ECC_PARITY_BITS 13
|
|
+
|
|
+#define NFI_FDM_SIZE 8
|
|
+
|
|
+#define MT7621_NFC_NAME "mt7621-nand"
|
|
+
|
|
+struct mt7621_nfc {
|
|
+ struct nand_controller controller;
|
|
+ struct nand_chip nand;
|
|
+ struct clk *nfi_clk;
|
|
+ struct device *dev;
|
|
+
|
|
+ void __iomem *nfi_regs;
|
|
+ void __iomem *ecc_regs;
|
|
+
|
|
+ u32 spare_per_sector;
|
|
+};
|
|
+
|
|
+static const u16 mt7621_nfi_page_size[] = { SZ_512, SZ_2K, SZ_4K };
|
|
+static const u8 mt7621_nfi_spare_size[] = { 16, 26, 27, 28 };
|
|
+static const u8 mt7621_ecc_strength[] = { 4, 6, 8, 10, 12 };
|
|
+
|
|
+static inline u32 nfi_read32(struct mt7621_nfc *nfc, u32 reg)
|
|
+{
|
|
+ return readl(nfc->nfi_regs + reg);
|
|
+}
|
|
+
|
|
+static inline void nfi_write32(struct mt7621_nfc *nfc, u32 reg, u32 val)
|
|
+{
|
|
+ writel(val, nfc->nfi_regs + reg);
|
|
+}
|
|
+
|
|
+static inline u16 nfi_read16(struct mt7621_nfc *nfc, u32 reg)
|
|
+{
|
|
+ return readw(nfc->nfi_regs + reg);
|
|
+}
|
|
+
|
|
+static inline void nfi_write16(struct mt7621_nfc *nfc, u32 reg, u16 val)
|
|
+{
|
|
+ writew(val, nfc->nfi_regs + reg);
|
|
+}
|
|
+
|
|
+static inline void ecc_write16(struct mt7621_nfc *nfc, u32 reg, u16 val)
|
|
+{
|
|
+ writew(val, nfc->ecc_regs + reg);
|
|
+}
|
|
+
|
|
+static inline u32 ecc_read32(struct mt7621_nfc *nfc, u32 reg)
|
|
+{
|
|
+ return readl(nfc->ecc_regs + reg);
|
|
+}
|
|
+
|
|
+static inline void ecc_write32(struct mt7621_nfc *nfc, u32 reg, u32 val)
|
|
+{
|
|
+ return writel(val, nfc->ecc_regs + reg);
|
|
+}
|
|
+
|
|
+static inline u8 *oob_fdm_ptr(struct nand_chip *nand, int sect)
|
|
+{
|
|
+ return nand->oob_poi + sect * NFI_FDM_SIZE;
|
|
+}
|
|
+
|
|
+static inline u8 *oob_ecc_ptr(struct mt7621_nfc *nfc, int sect)
|
|
+{
|
|
+ struct nand_chip *nand = &nfc->nand;
|
|
+
|
|
+ return nand->oob_poi + nand->ecc.steps * NFI_FDM_SIZE +
|
|
+ sect * (nfc->spare_per_sector - NFI_FDM_SIZE);
|
|
+}
|
|
+
|
|
+static inline u8 *page_data_ptr(struct nand_chip *nand, const u8 *buf,
|
|
+ int sect)
|
|
+{
|
|
+ return (u8 *)buf + sect * nand->ecc.size;
|
|
+}
|
|
+
|
|
+static int mt7621_ecc_wait_idle(struct mt7621_nfc *nfc, u32 reg)
|
|
+{
|
|
+ struct device *dev = nfc->dev;
|
|
+ u32 val;
|
|
+ int ret;
|
|
+
|
|
+ ret = readw_poll_timeout_atomic(nfc->ecc_regs + reg, val,
|
|
+ val & ECC_IDLE, 10,
|
|
+ ECC_ENGINE_TIMEOUT);
|
|
+ if (ret) {
|
|
+ dev_warn(dev, "ECC engine timed out entering idle mode\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt7621_ecc_decoder_wait_done(struct mt7621_nfc *nfc, u32 sect)
|
|
+{
|
|
+ struct device *dev = nfc->dev;
|
|
+ u32 val;
|
|
+ int ret;
|
|
+
|
|
+ ret = readw_poll_timeout_atomic(nfc->ecc_regs + ECC_DECDONE, val,
|
|
+ val & (1 << sect), 10,
|
|
+ ECC_ENGINE_TIMEOUT);
|
|
+
|
|
+ if (ret) {
|
|
+ dev_warn(dev, "ECC decoder for sector %d timed out\n",
|
|
+ sect);
|
|
+ return -ETIMEDOUT;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void mt7621_ecc_encoder_op(struct mt7621_nfc *nfc, bool enable)
|
|
+{
|
|
+ mt7621_ecc_wait_idle(nfc, ECC_ENCIDLE);
|
|
+ ecc_write16(nfc, ECC_ENCCON, enable ? ENC_EN : 0);
|
|
+}
|
|
+
|
|
+static void mt7621_ecc_decoder_op(struct mt7621_nfc *nfc, bool enable)
|
|
+{
|
|
+ mt7621_ecc_wait_idle(nfc, ECC_DECIDLE);
|
|
+ ecc_write16(nfc, ECC_DECCON, enable ? DEC_EN : 0);
|
|
+}
|
|
+
|
|
+static int mt7621_ecc_correct_check(struct mt7621_nfc *nfc, u8 *sector_buf,
|
|
+ u8 *fdm_buf, u32 sect)
|
|
+{
|
|
+ struct nand_chip *nand = &nfc->nand;
|
|
+ u32 decnum, num_error_bits, fdm_end_bits;
|
|
+ u32 error_locations, error_bit_loc;
|
|
+ u32 error_byte_pos, error_bit_pos;
|
|
+ int bitflips = 0;
|
|
+ u32 i;
|
|
+
|
|
+ decnum = ecc_read32(nfc, ECC_DECENUM);
|
|
+ num_error_bits = (decnum >> (sect << ERRNUM_S)) & ERRNUM_M;
|
|
+ fdm_end_bits = (nand->ecc.size + NFI_FDM_SIZE) << 3;
|
|
+
|
|
+ if (!num_error_bits)
|
|
+ return 0;
|
|
+
|
|
+ if (num_error_bits == ERRNUM_M)
|
|
+ return -1;
|
|
+
|
|
+ for (i = 0; i < num_error_bits; i++) {
|
|
+ error_locations = ecc_read32(nfc, ECC_DECEL(i / 2));
|
|
+ error_bit_loc = (error_locations >> ((i % 2) * DEC_EL_ODD_S)) &
|
|
+ DEC_EL_M;
|
|
+ error_byte_pos = error_bit_loc >> DEC_EL_BYTE_POS_S;
|
|
+ error_bit_pos = error_bit_loc & DEC_EL_BIT_POS_M;
|
|
+
|
|
+ if (error_bit_loc < (nand->ecc.size << 3)) {
|
|
+ if (sector_buf) {
|
|
+ sector_buf[error_byte_pos] ^=
|
|
+ (1 << error_bit_pos);
|
|
+ }
|
|
+ } else if (error_bit_loc < fdm_end_bits) {
|
|
+ if (fdm_buf) {
|
|
+ fdm_buf[error_byte_pos - nand->ecc.size] ^=
|
|
+ (1 << error_bit_pos);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ bitflips++;
|
|
+ }
|
|
+
|
|
+ return bitflips;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_wait_write_completion(struct mt7621_nfc *nfc,
|
|
+ struct nand_chip *nand)
|
|
+{
|
|
+ struct device *dev = nfc->dev;
|
|
+ u16 val;
|
|
+ int ret;
|
|
+
|
|
+ ret = readw_poll_timeout_atomic(nfc->nfi_regs + NFI_ADDRCNTR, val,
|
|
+ ((val & SEC_CNTR_M) >> SEC_CNTR_S) >= nand->ecc.steps, 10,
|
|
+ NFI_CORE_TIMEOUT);
|
|
+
|
|
+ if (ret) {
|
|
+ dev_warn(dev, "NFI core write operation timed out\n");
|
|
+ return -ETIMEDOUT;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void mt7621_nfc_hw_reset(struct mt7621_nfc *nfc)
|
|
+{
|
|
+ u32 val;
|
|
+ int ret;
|
|
+
|
|
+ /* reset all registers and force the NFI master to terminate */
|
|
+ nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
|
|
+
|
|
+ /* wait for the master to finish the last transaction */
|
|
+ ret = readw_poll_timeout(nfc->nfi_regs + NFI_MASTER_STA, val,
|
|
+ !(val & MASTER_STA_MASK), 50,
|
|
+ NFI_RESET_TIMEOUT);
|
|
+ if (ret) {
|
|
+ dev_warn(nfc->dev, "Failed to reset NFI master in %dms\n",
|
|
+ NFI_RESET_TIMEOUT);
|
|
+ }
|
|
+
|
|
+ /* ensure any status register affected by the NFI master is reset */
|
|
+ nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
|
|
+ nfi_write16(nfc, NFI_STRDATA, 0);
|
|
+}
|
|
+
|
|
+static inline void mt7621_nfc_hw_init(struct mt7621_nfc *nfc)
|
|
+{
|
|
+ u32 acccon;
|
|
+
|
|
+ /*
|
|
+ * CNRNB: nand ready/busy register
|
|
+ * -------------------------------
|
|
+ * 7:4: timeout register for polling the NAND busy/ready signal
|
|
+ * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
|
|
+ */
|
|
+ nfi_write16(nfc, NFI_CNRNB, CB2R_TIME_M | STR_CNRNB);
|
|
+
|
|
+ mt7621_nfc_hw_reset(nfc);
|
|
+
|
|
+ /* Apply default access timing */
|
|
+ acccon = ACCTIMING(ACCCON_POECS_DEF, ACCCON_PRECS_DEF, ACCCON_C2R_DEF,
|
|
+ ACCCON_W2R_DEF, ACCCON_WH_DEF, ACCCON_WST_DEF,
|
|
+ ACCCON_RLT_DEF);
|
|
+
|
|
+ nfi_write32(nfc, NFI_ACCCON, acccon);
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_send_command(struct mt7621_nfc *nfc, u8 command)
|
|
+{
|
|
+ struct device *dev = nfc->dev;
|
|
+ u32 val;
|
|
+ int ret;
|
|
+
|
|
+ nfi_write32(nfc, NFI_CMD, command);
|
|
+
|
|
+ ret = readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val,
|
|
+ !(val & STA_CMD), 10,
|
|
+ NFI_CORE_TIMEOUT);
|
|
+ if (ret) {
|
|
+ dev_warn(dev, "NFI core timed out entering command mode\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_send_address_byte(struct mt7621_nfc *nfc, int addr)
|
|
+{
|
|
+ struct device *dev = nfc->dev;
|
|
+ u32 val;
|
|
+ int ret;
|
|
+
|
|
+ nfi_write32(nfc, NFI_COLADDR, addr);
|
|
+ nfi_write32(nfc, NFI_ROWADDR, 0);
|
|
+ nfi_write16(nfc, NFI_ADDRNOB, 1);
|
|
+
|
|
+ ret = readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val,
|
|
+ !(val & STA_ADDR), 10,
|
|
+ NFI_CORE_TIMEOUT);
|
|
+ if (ret) {
|
|
+ dev_warn(dev, "NFI core timed out entering address mode\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_send_address(struct mt7621_nfc *nfc, const u8 *addr,
|
|
+ unsigned int naddrs)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ while (naddrs) {
|
|
+ ret = mt7621_nfc_send_address_byte(nfc, *addr);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ addr++;
|
|
+ naddrs--;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void mt7621_nfc_wait_pio_ready(struct mt7621_nfc *nfc)
|
|
+{
|
|
+ struct device *dev = nfc->dev;
|
|
+ int ret;
|
|
+ u16 val;
|
|
+
|
|
+ ret = readw_poll_timeout_atomic(nfc->nfi_regs + NFI_PIO_DIRDY, val,
|
|
+ val & PIO_DIRDY, 10,
|
|
+ NFI_CORE_TIMEOUT);
|
|
+ if (ret < 0)
|
|
+ dev_err(dev, "NFI core PIO mode not ready\n");
|
|
+}
|
|
+
|
|
+static u32 mt7621_nfc_pio_read(struct mt7621_nfc *nfc, bool br)
|
|
+{
|
|
+ u32 reg;
|
|
+
|
|
+ /* after each byte read, the NFI_STA reg is reset by the hardware */
|
|
+ reg = (nfi_read32(nfc, NFI_STA) & STA_NFI_FSM_M) >> STA_NFI_FSM_S;
|
|
+ if (reg != STA_FSM_CUSTOM_DATA) {
|
|
+ reg = nfi_read16(nfc, NFI_CNFG);
|
|
+ reg |= CNFG_READ_MODE | CNFG_BYTE_RW;
|
|
+ if (!br)
|
|
+ reg &= ~CNFG_BYTE_RW;
|
|
+ nfi_write16(nfc, NFI_CNFG, reg);
|
|
+
|
|
+ /*
|
|
+ * set to max sector to allow the HW to continue reading over
|
|
+ * unaligned accesses
|
|
+ */
|
|
+ nfi_write16(nfc, NFI_CON, CON_NFI_SEC_M | CON_NFI_BRD);
|
|
+
|
|
+ /* trigger to fetch data */
|
|
+ nfi_write16(nfc, NFI_STRDATA, STR_DATA);
|
|
+ }
|
|
+
|
|
+ mt7621_nfc_wait_pio_ready(nfc);
|
|
+
|
|
+ return nfi_read32(nfc, NFI_DATAR);
|
|
+}
|
|
+
|
|
+static void mt7621_nfc_read_data(struct mt7621_nfc *nfc, u8 *buf, u32 len)
|
|
+{
|
|
+ while (((uintptr_t)buf & 3) && len) {
|
|
+ *buf = mt7621_nfc_pio_read(nfc, true);
|
|
+ buf++;
|
|
+ len--;
|
|
+ }
|
|
+
|
|
+ while (len >= 4) {
|
|
+ *(u32 *)buf = mt7621_nfc_pio_read(nfc, false);
|
|
+ buf += 4;
|
|
+ len -= 4;
|
|
+ }
|
|
+
|
|
+ while (len) {
|
|
+ *buf = mt7621_nfc_pio_read(nfc, true);
|
|
+ buf++;
|
|
+ len--;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void mt7621_nfc_read_data_discard(struct mt7621_nfc *nfc, u32 len)
|
|
+{
|
|
+ while (len >= 4) {
|
|
+ mt7621_nfc_pio_read(nfc, false);
|
|
+ len -= 4;
|
|
+ }
|
|
+
|
|
+ while (len) {
|
|
+ mt7621_nfc_pio_read(nfc, true);
|
|
+ len--;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void mt7621_nfc_pio_write(struct mt7621_nfc *nfc, u32 val, bool bw)
|
|
+{
|
|
+ u32 reg;
|
|
+
|
|
+ reg = (nfi_read32(nfc, NFI_STA) & STA_NFI_FSM_M) >> STA_NFI_FSM_S;
|
|
+ if (reg != STA_FSM_CUSTOM_DATA) {
|
|
+ reg = nfi_read16(nfc, NFI_CNFG);
|
|
+ reg &= ~(CNFG_READ_MODE | CNFG_BYTE_RW);
|
|
+ if (bw)
|
|
+ reg |= CNFG_BYTE_RW;
|
|
+ nfi_write16(nfc, NFI_CNFG, reg);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CON, CON_NFI_SEC_M | CON_NFI_BWR);
|
|
+ nfi_write16(nfc, NFI_STRDATA, STR_DATA);
|
|
+ }
|
|
+
|
|
+ mt7621_nfc_wait_pio_ready(nfc);
|
|
+ nfi_write32(nfc, NFI_DATAW, val);
|
|
+}
|
|
+
|
|
+static void mt7621_nfc_write_data(struct mt7621_nfc *nfc, const u8 *buf,
|
|
+ u32 len)
|
|
+{
|
|
+ while (((uintptr_t)buf & 3) && len) {
|
|
+ mt7621_nfc_pio_write(nfc, *buf, true);
|
|
+ buf++;
|
|
+ len--;
|
|
+ }
|
|
+
|
|
+ while (len >= 4) {
|
|
+ mt7621_nfc_pio_write(nfc, *(const u32 *)buf, false);
|
|
+ buf += 4;
|
|
+ len -= 4;
|
|
+ }
|
|
+
|
|
+ while (len) {
|
|
+ mt7621_nfc_pio_write(nfc, *buf, true);
|
|
+ buf++;
|
|
+ len--;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void mt7621_nfc_write_data_empty(struct mt7621_nfc *nfc, u32 len)
|
|
+{
|
|
+ while (len >= 4) {
|
|
+ mt7621_nfc_pio_write(nfc, 0xffffffff, false);
|
|
+ len -= 4;
|
|
+ }
|
|
+
|
|
+ while (len) {
|
|
+ mt7621_nfc_pio_write(nfc, 0xff, true);
|
|
+ len--;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_dev_ready(struct mt7621_nfc *nfc,
|
|
+ unsigned int timeout_ms)
|
|
+{
|
|
+ u32 val;
|
|
+
|
|
+ return readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val,
|
|
+ !(val & STA_BUSY), 10,
|
|
+ timeout_ms * 1000);
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_exec_instr(struct nand_chip *nand,
|
|
+ const struct nand_op_instr *instr)
|
|
+{
|
|
+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
+
|
|
+ switch (instr->type) {
|
|
+ case NAND_OP_CMD_INSTR:
|
|
+ mt7621_nfc_hw_reset(nfc);
|
|
+ nfi_write16(nfc, NFI_CNFG, CNFG_OP_CUSTOM << CNFG_OP_MODE_S);
|
|
+ return mt7621_nfc_send_command(nfc, instr->ctx.cmd.opcode);
|
|
+ case NAND_OP_ADDR_INSTR:
|
|
+ return mt7621_nfc_send_address(nfc, instr->ctx.addr.addrs,
|
|
+ instr->ctx.addr.naddrs);
|
|
+ case NAND_OP_DATA_IN_INSTR:
|
|
+ mt7621_nfc_read_data(nfc, instr->ctx.data.buf.in,
|
|
+ instr->ctx.data.len);
|
|
+ return 0;
|
|
+ case NAND_OP_DATA_OUT_INSTR:
|
|
+ mt7621_nfc_write_data(nfc, instr->ctx.data.buf.out,
|
|
+ instr->ctx.data.len);
|
|
+ return 0;
|
|
+ case NAND_OP_WAITRDY_INSTR:
|
|
+ return mt7621_nfc_dev_ready(nfc,
|
|
+ instr->ctx.waitrdy.timeout_ms);
|
|
+ default:
|
|
+ WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
|
|
+ instr->type);
|
|
+
|
|
+ return -EINVAL;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_exec_op(struct nand_chip *nand,
|
|
+ const struct nand_operation *op, bool check_only)
|
|
+{
|
|
+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
+ int i, ret;
|
|
+
|
|
+ if (check_only)
|
|
+ return 0;
|
|
+
|
|
+ /* Only CS0 available */
|
|
+ nfi_write16(nfc, NFI_CSEL, 0);
|
|
+
|
|
+ for (i = 0; i < op->ninstrs; i++) {
|
|
+ ret = mt7621_nfc_exec_instr(nand, &op->instrs[i]);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_setup_data_interface(struct nand_chip *nand, int csline,
|
|
+ const struct nand_data_interface *conf)
|
|
+{
|
|
+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
+ const struct nand_sdr_timings *timings;
|
|
+ u32 acccon, temp, rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
|
|
+
|
|
+ if (!nfc->nfi_clk)
|
|
+ return -ENOTSUPP;
|
|
+
|
|
+ timings = nand_get_sdr_timings(conf);
|
|
+ if (IS_ERR(timings))
|
|
+ return -ENOTSUPP;
|
|
+
|
|
+ rate = clk_get_rate(nfc->nfi_clk);
|
|
+
|
|
+ /* turn clock rate into KHZ */
|
|
+ rate /= 1000;
|
|
+
|
|
+ tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000;
|
|
+ tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000);
|
|
+ tpoecs = min_t(u32, tpoecs, ACCCON_POECS_MAX);
|
|
+
|
|
+ tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000;
|
|
+ tprecs = DIV_ROUND_UP(tprecs * rate, 1000000);
|
|
+ tprecs = min_t(u32, tprecs, ACCCON_PRECS_MAX);
|
|
+
|
|
+ /* sdr interface has no tCR which means CE# low to RE# low */
|
|
+ tc2r = 0;
|
|
+
|
|
+ tw2r = timings->tWHR_min / 1000;
|
|
+ tw2r = DIV_ROUND_UP(tw2r * rate, 1000000);
|
|
+ tw2r = DIV_ROUND_UP(tw2r - 1, 2);
|
|
+ tw2r = min_t(u32, tw2r, ACCCON_W2R_MAX);
|
|
+
|
|
+ twh = max(timings->tREH_min, timings->tWH_min) / 1000;
|
|
+ twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
|
|
+ twh = min_t(u32, twh, ACCCON_WH_MAX);
|
|
+
|
|
+ /* Calculate real WE#/RE# hold time in nanosecond */
|
|
+ temp = (twh + 1) * 1000000 / rate;
|
|
+ /* nanosecond to picosecond */
|
|
+ temp *= 1000;
|
|
+
|
|
+ /*
|
|
+ * WE# low level time should be expaned to meet WE# pulse time
|
|
+ * and WE# cycle time at the same time.
|
|
+ */
|
|
+ if (temp < timings->tWC_min)
|
|
+ twst = timings->tWC_min - temp;
|
|
+ else
|
|
+ twst = 0;
|
|
+ twst = max(timings->tWP_min, twst) / 1000;
|
|
+ twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
|
|
+ twst = min_t(u32, twst, ACCCON_WST_MAX);
|
|
+
|
|
+ /*
|
|
+ * RE# low level time should be expaned to meet RE# pulse time
|
|
+ * and RE# cycle time at the same time.
|
|
+ */
|
|
+ if (temp < timings->tRC_min)
|
|
+ trlt = timings->tRC_min - temp;
|
|
+ else
|
|
+ trlt = 0;
|
|
+ trlt = max(trlt, timings->tRP_min) / 1000;
|
|
+ trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
|
|
+ trlt = min_t(u32, trlt, ACCCON_RLT_MAX);
|
|
+
|
|
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY) {
|
|
+ if (twst < ACCCON_WST_MIN || trlt < ACCCON_RLT_MIN)
|
|
+ return -ENOTSUPP;
|
|
+ }
|
|
+
|
|
+ acccon = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
|
|
+
|
|
+ dev_info(nfc->dev, "Using programmed access timing: %08x\n", acccon);
|
|
+
|
|
+ nfi_write32(nfc, NFI_ACCCON, acccon);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_calc_ecc_strength(struct mt7621_nfc *nfc,
|
|
+ u32 avail_ecc_bytes)
|
|
+{
|
|
+ struct nand_chip *nand = &nfc->nand;
|
|
+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
+ u32 strength;
|
|
+ int i;
|
|
+
|
|
+ strength = avail_ecc_bytes * 8 / ECC_PARITY_BITS;
|
|
+
|
|
+ /* Find the closest supported ecc strength */
|
|
+ for (i = ARRAY_SIZE(mt7621_ecc_strength) - 1; i >= 0; i--) {
|
|
+ if (mt7621_ecc_strength[i] <= strength)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (unlikely(i < 0)) {
|
|
+ dev_err(nfc->dev, "OOB size (%u) is not supported\n",
|
|
+ mtd->oobsize);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ nand->ecc.strength = mt7621_ecc_strength[i];
|
|
+ nand->ecc.bytes =
|
|
+ DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
|
|
+
|
|
+ dev_info(nfc->dev, "ECC strength adjusted to %u bits\n",
|
|
+ nand->ecc.strength);
|
|
+
|
|
+ return i;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_set_spare_per_sector(struct mt7621_nfc *nfc)
|
|
+{
|
|
+ struct nand_chip *nand = &nfc->nand;
|
|
+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
+ u32 size;
|
|
+ int i;
|
|
+
|
|
+ size = nand->ecc.bytes + NFI_FDM_SIZE;
|
|
+
|
|
+ /* Find the closest supported spare size */
|
|
+ for (i = 0; i < ARRAY_SIZE(mt7621_nfi_spare_size); i++) {
|
|
+ if (mt7621_nfi_spare_size[i] >= size)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_spare_size))) {
|
|
+ dev_err(nfc->dev, "OOB size (%u) is not supported\n",
|
|
+ mtd->oobsize);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ nfc->spare_per_sector = mt7621_nfi_spare_size[i];
|
|
+
|
|
+ return i;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_ecc_init(struct mt7621_nfc *nfc)
|
|
+{
|
|
+ struct nand_chip *nand = &nfc->nand;
|
|
+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
+ u32 spare_per_sector, encode_block_size, decode_block_size;
|
|
+ u32 ecc_enccfg, ecc_deccfg;
|
|
+ int ecc_cap;
|
|
+
|
|
+ /* Only hardware ECC mode is supported */
|
|
+ if (nand->ecc.mode != NAND_ECC_HW_SYNDROME) {
|
|
+ dev_err(nfc->dev, "Only hardware ECC mode is supported\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ nand->ecc.size = ECC_SECTOR_SIZE;
|
|
+ nand->ecc.steps = mtd->writesize / nand->ecc.size;
|
|
+
|
|
+ spare_per_sector = mtd->oobsize / nand->ecc.steps;
|
|
+
|
|
+ ecc_cap = mt7621_nfc_calc_ecc_strength(nfc,
|
|
+ spare_per_sector - NFI_FDM_SIZE);
|
|
+ if (ecc_cap < 0)
|
|
+ return ecc_cap;
|
|
+
|
|
+ /* Sector + FDM */
|
|
+ encode_block_size = (nand->ecc.size + NFI_FDM_SIZE) * 8;
|
|
+ ecc_enccfg = ecc_cap | (ENC_MODE_NFI << ENC_MODE_S) |
|
|
+ (encode_block_size << ENC_CNFG_MSG_S);
|
|
+
|
|
+ /* Sector + FDM + ECC parity bits */
|
|
+ decode_block_size = ((nand->ecc.size + NFI_FDM_SIZE) * 8) +
|
|
+ nand->ecc.strength * ECC_PARITY_BITS;
|
|
+ ecc_deccfg = ecc_cap | (DEC_MODE_NFI << DEC_MODE_S) |
|
|
+ (decode_block_size << DEC_CS_S) |
|
|
+ (DEC_CON_EL << DEC_CON_S) | DEC_EMPTY_EN;
|
|
+
|
|
+ mt7621_ecc_encoder_op(nfc, false);
|
|
+ ecc_write32(nfc, ECC_ENCCNFG, ecc_enccfg);
|
|
+
|
|
+ mt7621_ecc_decoder_op(nfc, false);
|
|
+ ecc_write32(nfc, ECC_DECCNFG, ecc_deccfg);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_set_page_format(struct mt7621_nfc *nfc)
|
|
+{
|
|
+ struct nand_chip *nand = &nfc->nand;
|
|
+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
+ int i, spare_size;
|
|
+ u32 pagefmt;
|
|
+
|
|
+ spare_size = mt7621_nfc_set_spare_per_sector(nfc);
|
|
+ if (spare_size < 0)
|
|
+ return spare_size;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(mt7621_nfi_page_size); i++) {
|
|
+ if (mt7621_nfi_page_size[i] == mtd->writesize)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_page_size))) {
|
|
+ dev_err(nfc->dev, "Page size (%u) is not supported\n",
|
|
+ mtd->writesize);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ pagefmt = i | (spare_size << PAGEFMT_SPARE_S) |
|
|
+ (NFI_FDM_SIZE << PAGEFMT_FDM_S) |
|
|
+ (NFI_FDM_SIZE << PAGEFMT_FDM_ECC_S);
|
|
+
|
|
+ nfi_write16(nfc, NFI_PAGEFMT, pagefmt);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_attach_chip(struct nand_chip *nand)
|
|
+{
|
|
+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
+ int ret;
|
|
+
|
|
+ if (nand->options & NAND_BUSWIDTH_16) {
|
|
+ dev_err(nfc->dev, "16-bit buswidth is not supported");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ret = mt7621_nfc_ecc_init(nfc);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return mt7621_nfc_set_page_format(nfc);
|
|
+}
|
|
+
|
|
+static const struct nand_controller_ops mt7621_nfc_controller_ops = {
|
|
+ .attach_chip = mt7621_nfc_attach_chip,
|
|
+ .exec_op = mt7621_nfc_exec_op,
|
|
+ .setup_data_interface = mt7621_nfc_setup_data_interface,
|
|
+};
|
|
+
|
|
+static int mt7621_nfc_ooblayout_free(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oob_region)
|
|
+{
|
|
+ struct nand_chip *nand = mtd_to_nand(mtd);
|
|
+
|
|
+ if (section >= nand->ecc.steps)
|
|
+ return -ERANGE;
|
|
+
|
|
+ oob_region->length = NFI_FDM_SIZE - 1;
|
|
+ oob_region->offset = section * NFI_FDM_SIZE + 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|
+ struct mtd_oob_region *oob_region)
|
|
+{
|
|
+ struct nand_chip *nand = mtd_to_nand(mtd);
|
|
+
|
|
+ if (section)
|
|
+ return -ERANGE;
|
|
+
|
|
+ oob_region->offset = NFI_FDM_SIZE * nand->ecc.steps;
|
|
+ oob_region->length = mtd->oobsize - oob_region->offset;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct mtd_ooblayout_ops mt7621_nfc_ooblayout_ops = {
|
|
+ .free = mt7621_nfc_ooblayout_free,
|
|
+ .ecc = mt7621_nfc_ooblayout_ecc,
|
|
+};
|
|
+
|
|
+static void mt7621_nfc_write_fdm(struct mt7621_nfc *nfc)
|
|
+{
|
|
+ struct nand_chip *nand = &nfc->nand;
|
|
+ u32 vall, valm;
|
|
+ u8 *oobptr;
|
|
+ int i, j;
|
|
+
|
|
+ for (i = 0; i < nand->ecc.steps; i++) {
|
|
+ vall = 0;
|
|
+ valm = 0;
|
|
+ oobptr = oob_fdm_ptr(nand, i);
|
|
+
|
|
+ for (j = 0; j < 4; j++)
|
|
+ vall |= (u32)oobptr[j] << (j * 8);
|
|
+
|
|
+ for (j = 0; j < 4; j++)
|
|
+ valm |= (u32)oobptr[j + 4] << (j * 8);
|
|
+
|
|
+ nfi_write32(nfc, NFI_FDML(i), vall);
|
|
+ nfi_write32(nfc, NFI_FDMM(i), valm);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void mt7621_nfc_read_sector_fdm(struct mt7621_nfc *nfc, u32 sect)
|
|
+{
|
|
+ struct nand_chip *nand = &nfc->nand;
|
|
+ u32 vall, valm;
|
|
+ u8 *oobptr;
|
|
+ int i;
|
|
+
|
|
+ vall = nfi_read32(nfc, NFI_FDML(sect));
|
|
+ valm = nfi_read32(nfc, NFI_FDMM(sect));
|
|
+ oobptr = oob_fdm_ptr(nand, sect);
|
|
+
|
|
+ for (i = 0; i < 4; i++)
|
|
+ oobptr[i] = (vall >> (i * 8)) & 0xff;
|
|
+
|
|
+ for (i = 0; i < 4; i++)
|
|
+ oobptr[i + 4] = (valm >> (i * 8)) & 0xff;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_read_page_hwecc(struct nand_chip *nand, uint8_t *buf,
|
|
+ int oob_required, int page)
|
|
+{
|
|
+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
+ int bitflips = 0;
|
|
+ int rc, i;
|
|
+
|
|
+ nand_read_page_op(nand, page, 0, NULL, 0);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) |
|
|
+ CNFG_READ_MODE | CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
|
|
+
|
|
+ mt7621_ecc_decoder_op(nfc, true);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CON,
|
|
+ CON_NFI_BRD | (nand->ecc.steps << CON_NFI_SEC_S));
|
|
+
|
|
+ for (i = 0; i < nand->ecc.steps; i++) {
|
|
+ if (buf)
|
|
+ mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i),
|
|
+ nand->ecc.size);
|
|
+ else
|
|
+ mt7621_nfc_read_data_discard(nfc, nand->ecc.size);
|
|
+
|
|
+ rc = mt7621_ecc_decoder_wait_done(nfc, i);
|
|
+
|
|
+ mt7621_nfc_read_sector_fdm(nfc, i);
|
|
+
|
|
+ if (rc < 0) {
|
|
+ bitflips = -EIO;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ rc = mt7621_ecc_correct_check(nfc,
|
|
+ buf ? page_data_ptr(nand, buf, i) : NULL,
|
|
+ oob_fdm_ptr(nand, i), i);
|
|
+
|
|
+ if (rc < 0) {
|
|
+ dev_dbg(nfc->dev,
|
|
+ "Uncorrectable ECC error at page %d.%d\n",
|
|
+ page, i);
|
|
+ bitflips = -EBADMSG;
|
|
+ mtd->ecc_stats.failed++;
|
|
+ } else if (bitflips >= 0) {
|
|
+ bitflips += rc;
|
|
+ mtd->ecc_stats.corrected += rc;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ mt7621_ecc_decoder_op(nfc, false);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CON, 0);
|
|
+
|
|
+ return bitflips;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_read_page_raw(struct nand_chip *nand, uint8_t *buf,
|
|
+ int oob_required, int page)
|
|
+{
|
|
+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
+ int i;
|
|
+
|
|
+ nand_read_page_op(nand, page, 0, NULL, 0);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) |
|
|
+ CNFG_READ_MODE);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CON,
|
|
+ CON_NFI_BRD | (nand->ecc.steps << CON_NFI_SEC_S));
|
|
+
|
|
+ for (i = 0; i < nand->ecc.steps; i++) {
|
|
+ /* Read data */
|
|
+ if (buf)
|
|
+ mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i),
|
|
+ nand->ecc.size);
|
|
+ else
|
|
+ mt7621_nfc_read_data_discard(nfc, nand->ecc.size);
|
|
+
|
|
+ /* Read FDM */
|
|
+ mt7621_nfc_read_data(nfc, oob_fdm_ptr(nand, i), NFI_FDM_SIZE);
|
|
+
|
|
+ /* Read ECC parity data */
|
|
+ mt7621_nfc_read_data(nfc, oob_ecc_ptr(nfc, i),
|
|
+ nfc->spare_per_sector - NFI_FDM_SIZE);
|
|
+ }
|
|
+
|
|
+ nfi_write16(nfc, NFI_CON, 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_read_oob_hwecc(struct nand_chip *nand, int page)
|
|
+{
|
|
+ return mt7621_nfc_read_page_hwecc(nand, NULL, 1, page);
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_read_oob_raw(struct nand_chip *nand, int page)
|
|
+{
|
|
+ return mt7621_nfc_read_page_raw(nand, NULL, 1, page);
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_check_empty_page(struct nand_chip *nand, const u8 *buf)
|
|
+{
|
|
+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
+ uint32_t i, j;
|
|
+ u8 *oobptr;
|
|
+
|
|
+ if (buf) {
|
|
+ for (i = 0; i < mtd->writesize; i++)
|
|
+ if (buf[i] != 0xff)
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < nand->ecc.steps; i++) {
|
|
+ oobptr = oob_fdm_ptr(nand, i);
|
|
+ for (j = 0; j < NFI_FDM_SIZE; j++)
|
|
+ if (oobptr[j] != 0xff)
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_write_page_hwecc(struct nand_chip *nand,
|
|
+ const uint8_t *buf, int oob_required,
|
|
+ int page)
|
|
+{
|
|
+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
+
|
|
+ if (mt7621_nfc_check_empty_page(nand, buf)) {
|
|
+ /*
|
|
+ * MT7621 ECC engine always generates parity code for input
|
|
+ * pages, even for empty pages. Doing so will write back ECC
|
|
+ * parity code to the oob region, which means such pages will
|
|
+ * no longer be empty pages.
|
|
+ *
|
|
+ * To avoid this, stop write operation if current page is an
|
|
+ * empty page.
|
|
+ */
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) |
|
|
+ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
|
|
+
|
|
+ mt7621_ecc_encoder_op(nfc, true);
|
|
+
|
|
+ mt7621_nfc_write_fdm(nfc);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CON,
|
|
+ CON_NFI_BWR | (nand->ecc.steps << CON_NFI_SEC_S));
|
|
+
|
|
+ if (buf)
|
|
+ mt7621_nfc_write_data(nfc, buf, mtd->writesize);
|
|
+ else
|
|
+ mt7621_nfc_write_data_empty(nfc, mtd->writesize);
|
|
+
|
|
+ mt7621_nfc_wait_write_completion(nfc, nand);
|
|
+
|
|
+ mt7621_ecc_encoder_op(nfc, false);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CON, 0);
|
|
+
|
|
+ return nand_prog_page_end_op(nand);
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_write_page_raw(struct nand_chip *nand,
|
|
+ const uint8_t *buf, int oob_required,
|
|
+ int page)
|
|
+{
|
|
+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
+ int i;
|
|
+
|
|
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S));
|
|
+
|
|
+ nfi_write16(nfc, NFI_CON,
|
|
+ CON_NFI_BWR | (nand->ecc.steps << CON_NFI_SEC_S));
|
|
+
|
|
+ for (i = 0; i < nand->ecc.steps; i++) {
|
|
+ /* Write data */
|
|
+ if (buf)
|
|
+ mt7621_nfc_write_data(nfc, page_data_ptr(nand, buf, i),
|
|
+ nand->ecc.size);
|
|
+ else
|
|
+ mt7621_nfc_write_data_empty(nfc, nand->ecc.size);
|
|
+
|
|
+ /* Write FDM */
|
|
+ mt7621_nfc_write_data(nfc, oob_fdm_ptr(nand, i),
|
|
+ NFI_FDM_SIZE);
|
|
+
|
|
+ /* Write dummy ECC parity data */
|
|
+ mt7621_nfc_write_data_empty(nfc, nfc->spare_per_sector -
|
|
+ NFI_FDM_SIZE);
|
|
+ }
|
|
+
|
|
+ mt7621_nfc_wait_write_completion(nfc, nand);
|
|
+
|
|
+ nfi_write16(nfc, NFI_CON, 0);
|
|
+
|
|
+ return nand_prog_page_end_op(nand);
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_write_oob_hwecc(struct nand_chip *nand, int page)
|
|
+{
|
|
+ return mt7621_nfc_write_page_hwecc(nand, NULL, 1, page);
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_write_oob_raw(struct nand_chip *nand, int page)
|
|
+{
|
|
+ return mt7621_nfc_write_page_raw(nand, NULL, 1, page);
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_init_chip(struct mt7621_nfc *nfc)
|
|
+{
|
|
+ struct nand_chip *nand = &nfc->nand;
|
|
+ struct mtd_info *mtd;
|
|
+ int ret;
|
|
+
|
|
+ nand->controller = &nfc->controller;
|
|
+ nand_set_controller_data(nand, (void *)nfc);
|
|
+ nand_set_flash_node(nand, nfc->dev->of_node);
|
|
+
|
|
+ nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_NO_SUBPAGE_WRITE;
|
|
+ if (!nfc->nfi_clk)
|
|
+ nand->options |= NAND_KEEP_TIMINGS;
|
|
+
|
|
+ nand->ecc.mode = NAND_ECC_HW_SYNDROME;
|
|
+ nand->ecc.read_page = mt7621_nfc_read_page_hwecc;
|
|
+ nand->ecc.read_page_raw = mt7621_nfc_read_page_raw;
|
|
+ nand->ecc.write_page = mt7621_nfc_write_page_hwecc;
|
|
+ nand->ecc.write_page_raw = mt7621_nfc_write_page_raw;
|
|
+ nand->ecc.read_oob = mt7621_nfc_read_oob_hwecc;
|
|
+ nand->ecc.read_oob_raw = mt7621_nfc_read_oob_raw;
|
|
+ nand->ecc.write_oob = mt7621_nfc_write_oob_hwecc;
|
|
+ nand->ecc.write_oob_raw = mt7621_nfc_write_oob_raw;
|
|
+
|
|
+ mtd = nand_to_mtd(nand);
|
|
+ mtd->owner = THIS_MODULE;
|
|
+ mtd->dev.parent = nfc->dev;
|
|
+ mtd->name = MT7621_NFC_NAME;
|
|
+ mtd_set_ooblayout(mtd, &mt7621_nfc_ooblayout_ops);
|
|
+
|
|
+ mt7621_nfc_hw_init(nfc);
|
|
+
|
|
+ ret = nand_scan(nand, 1);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = mtd_device_register(mtd, NULL, 0);
|
|
+ if (ret) {
|
|
+ dev_err(nfc->dev, "Failed to register MTD: %d\n", ret);
|
|
+ nand_release(nand);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct mt7621_nfc *nfc;
|
|
+ struct resource *res;
|
|
+ int ret;
|
|
+
|
|
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
|
|
+ if (!nfc)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ nand_controller_init(&nfc->controller);
|
|
+ nfc->controller.ops = &mt7621_nfc_controller_ops;
|
|
+ nfc->dev = dev;
|
|
+
|
|
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
|
|
+ nfc->nfi_regs = devm_ioremap_resource(dev, res);
|
|
+ if (IS_ERR(nfc->nfi_regs)) {
|
|
+ ret = PTR_ERR(nfc->nfi_regs);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc");
|
|
+ nfc->ecc_regs = devm_ioremap_resource(dev, res);
|
|
+ if (IS_ERR(nfc->ecc_regs)) {
|
|
+ ret = PTR_ERR(nfc->ecc_regs);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ nfc->nfi_clk = devm_clk_get(dev, "nfi_clk");
|
|
+ if (IS_ERR(nfc->nfi_clk)) {
|
|
+ dev_warn(dev, "nfi clk not provided\n");
|
|
+ nfc->nfi_clk = NULL;
|
|
+ } else {
|
|
+ ret = clk_prepare_enable(nfc->nfi_clk);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "Failed to enable nfi core clock\n");
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ platform_set_drvdata(pdev, nfc);
|
|
+
|
|
+ ret = mt7621_nfc_init_chip(nfc);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "Failed to initialize nand chip\n");
|
|
+ goto clk_disable;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+clk_disable:
|
|
+ clk_disable_unprepare(nfc->nfi_clk);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int mt7621_nfc_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct mt7621_nfc *nfc = platform_get_drvdata(pdev);
|
|
+
|
|
+ nand_release(&nfc->nand);
|
|
+ clk_disable_unprepare(nfc->nfi_clk);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id mt7621_nfc_id_table[] = {
|
|
+ { .compatible = "mediatek,mt7621-nfc" },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, match);
|
|
+
|
|
+static struct platform_driver mt7621_nfc_driver = {
|
|
+ .probe = mt7621_nfc_probe,
|
|
+ .remove = mt7621_nfc_remove,
|
|
+ .driver = {
|
|
+ .name = MT7621_NFC_NAME,
|
|
+ .owner = THIS_MODULE,
|
|
+ .of_match_table = mt7621_nfc_id_table,
|
|
+ },
|
|
+};
|
|
+module_platform_driver(mt7621_nfc_driver);
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
+MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
|
|
+MODULE_DESCRIPTION("MediaTek MT7621 NAND Flash Controller driver");
|
|
diff --git a/target/linux/ramips/patches-5.4/0300-mtd-rawnand-add-driver-support-for-MT7621-nand-flash.patch b/target/linux/ramips/patches-5.4/0300-mtd-rawnand-add-driver-support-for-MT7621-nand-flash.patch
|
|
index ba844fed0f0e..586db208dbe6 100644
|
|
--- a/target/linux/ramips/patches-5.4/0300-mtd-rawnand-add-driver-support-for-MT7621-nand-flash.patch
|
|
+++ b/target/linux/ramips/patches-5.4/0300-mtd-rawnand-add-driver-support-for-MT7621-nand-flash.patch
|
|
@@ -45,1356 +45,4 @@ Signed-off-by: Weijie Gao <weijie.gao@mediatek.com>
|
|
obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
|
|
obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o
|
|
obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
|
|
---- /dev/null
|
|
-+++ b/drivers/mtd/nand/raw/mt7621_nand.c
|
|
-@@ -0,0 +1,1350 @@
|
|
-+// SPDX-License-Identifier: GPL-2.0
|
|
-+/*
|
|
-+ * MediaTek MT7621 NAND Flash Controller driver
|
|
-+ *
|
|
-+ * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
|
|
-+ *
|
|
-+ * Author: Weijie Gao <weijie.gao@mediatek.com>
|
|
-+ */
|
|
-+
|
|
-+#include <linux/io.h>
|
|
-+#include <linux/clk.h>
|
|
-+#include <linux/init.h>
|
|
-+#include <linux/errno.h>
|
|
-+#include <linux/sizes.h>
|
|
-+#include <linux/iopoll.h>
|
|
-+#include <linux/kernel.h>
|
|
-+#include <linux/module.h>
|
|
-+#include <linux/mtd/mtd.h>
|
|
-+#include <linux/mtd/rawnand.h>
|
|
-+#include <linux/mtd/partitions.h>
|
|
-+#include <linux/platform_device.h>
|
|
-+#include <asm/addrspace.h>
|
|
-+
|
|
-+/* NFI core registers */
|
|
-+#define NFI_CNFG 0x000
|
|
-+#define CNFG_OP_MODE_S 12
|
|
-+#define CNFG_OP_MODE_M GENMASK(14, 12)
|
|
-+#define CNFG_OP_CUSTOM 6
|
|
-+#define CNFG_AUTO_FMT_EN BIT(9)
|
|
-+#define CNFG_HW_ECC_EN BIT(8)
|
|
-+#define CNFG_BYTE_RW BIT(6)
|
|
-+#define CNFG_READ_MODE BIT(1)
|
|
-+
|
|
-+#define NFI_PAGEFMT 0x004
|
|
-+#define PAGEFMT_FDM_ECC_S 12
|
|
-+#define PAGEFMT_FDM_ECC_M GENMASK(15, 12)
|
|
-+#define PAGEFMT_FDM_S 8
|
|
-+#define PAGEFMT_FDM_M GENMASK(11, 8)
|
|
-+#define PAGEFMT_SPARE_S 4
|
|
-+#define PAGEFMT_SPARE_M GENMASK(5, 4)
|
|
-+#define PAGEFMT_PAGE_S 0
|
|
-+#define PAGEFMT_PAGE_M GENMASK(1, 0)
|
|
-+
|
|
-+#define NFI_CON 0x008
|
|
-+#define CON_NFI_SEC_S 12
|
|
-+#define CON_NFI_SEC_M GENMASK(15, 12)
|
|
-+#define CON_NFI_BWR BIT(9)
|
|
-+#define CON_NFI_BRD BIT(8)
|
|
-+#define CON_NFI_RST BIT(1)
|
|
-+#define CON_FIFO_FLUSH BIT(0)
|
|
-+
|
|
-+#define NFI_ACCCON 0x00c
|
|
-+#define ACCCON_POECS_S 28
|
|
-+#define ACCCON_POECS_MAX 0x0f
|
|
-+#define ACCCON_POECS_DEF 3
|
|
-+#define ACCCON_PRECS_S 22
|
|
-+#define ACCCON_PRECS_MAX 0x3f
|
|
-+#define ACCCON_PRECS_DEF 3
|
|
-+#define ACCCON_C2R_S 16
|
|
-+#define ACCCON_C2R_MAX 0x3f
|
|
-+#define ACCCON_C2R_DEF 7
|
|
-+#define ACCCON_W2R_S 12
|
|
-+#define ACCCON_W2R_MAX 0x0f
|
|
-+#define ACCCON_W2R_DEF 7
|
|
-+#define ACCCON_WH_S 8
|
|
-+#define ACCCON_WH_MAX 0x0f
|
|
-+#define ACCCON_WH_DEF 15
|
|
-+#define ACCCON_WST_S 4
|
|
-+#define ACCCON_WST_MAX 0x0f
|
|
-+#define ACCCON_WST_DEF 15
|
|
-+#define ACCCON_WST_MIN 3
|
|
-+#define ACCCON_RLT_S 0
|
|
-+#define ACCCON_RLT_MAX 0x0f
|
|
-+#define ACCCON_RLT_DEF 15
|
|
-+#define ACCCON_RLT_MIN 3
|
|
-+
|
|
-+#define NFI_CMD 0x020
|
|
-+
|
|
-+#define NFI_ADDRNOB 0x030
|
|
-+#define ADDR_ROW_NOB_S 4
|
|
-+#define ADDR_ROW_NOB_M GENMASK(6, 4)
|
|
-+#define ADDR_COL_NOB_S 0
|
|
-+#define ADDR_COL_NOB_M GENMASK(2, 0)
|
|
-+
|
|
-+#define NFI_COLADDR 0x034
|
|
-+#define NFI_ROWADDR 0x038
|
|
-+
|
|
-+#define NFI_STRDATA 0x040
|
|
-+#define STR_DATA BIT(0)
|
|
-+
|
|
-+#define NFI_CNRNB 0x044
|
|
-+#define CB2R_TIME_S 4
|
|
-+#define CB2R_TIME_M GENMASK(7, 4)
|
|
-+#define STR_CNRNB BIT(0)
|
|
-+
|
|
-+#define NFI_DATAW 0x050
|
|
-+#define NFI_DATAR 0x054
|
|
-+
|
|
-+#define NFI_PIO_DIRDY 0x058
|
|
-+#define PIO_DIRDY BIT(0)
|
|
-+
|
|
-+#define NFI_STA 0x060
|
|
-+#define STA_NFI_FSM_S 16
|
|
-+#define STA_NFI_FSM_M GENMASK(19, 16)
|
|
-+#define STA_FSM_CUSTOM_DATA 14
|
|
-+#define STA_BUSY BIT(8)
|
|
-+#define STA_ADDR BIT(1)
|
|
-+#define STA_CMD BIT(0)
|
|
-+
|
|
-+#define NFI_ADDRCNTR 0x070
|
|
-+#define SEC_CNTR_S 12
|
|
-+#define SEC_CNTR_M GENMASK(15, 12)
|
|
-+#define SEC_ADDR_S 0
|
|
-+#define SEC_ADDR_M GENMASK(9, 0)
|
|
-+
|
|
-+#define NFI_CSEL 0x090
|
|
-+#define CSEL_S 0
|
|
-+#define CSEL_M GENMASK(1, 0)
|
|
-+
|
|
-+#define NFI_FDM0L 0x0a0
|
|
-+#define NFI_FDML(n) (0x0a0 + ((n) << 3))
|
|
-+
|
|
-+#define NFI_FDM0M 0x0a4
|
|
-+#define NFI_FDMM(n) (0x0a4 + ((n) << 3))
|
|
-+
|
|
-+#define NFI_MASTER_STA 0x210
|
|
-+#define MAS_ADDR GENMASK(11, 9)
|
|
-+#define MAS_RD GENMASK(8, 6)
|
|
-+#define MAS_WR GENMASK(5, 3)
|
|
-+#define MAS_RDDLY GENMASK(2, 0)
|
|
-+
|
|
-+/* ECC engine registers */
|
|
-+#define ECC_ENCCON 0x000
|
|
-+#define ENC_EN BIT(0)
|
|
-+
|
|
-+#define ECC_ENCCNFG 0x004
|
|
-+#define ENC_CNFG_MSG_S 16
|
|
-+#define ENC_CNFG_MSG_M GENMASK(28, 16)
|
|
-+#define ENC_MODE_S 4
|
|
-+#define ENC_MODE_M GENMASK(5, 4)
|
|
-+#define ENC_MODE_NFI 1
|
|
-+#define ENC_TNUM_S 0
|
|
-+#define ENC_TNUM_M GENMASK(2, 0)
|
|
-+
|
|
-+#define ECC_ENCIDLE 0x00c
|
|
-+#define ENC_IDLE BIT(0)
|
|
-+
|
|
-+#define ECC_DECCON 0x100
|
|
-+#define DEC_EN BIT(0)
|
|
-+
|
|
-+#define ECC_DECCNFG 0x104
|
|
-+#define DEC_EMPTY_EN BIT(31)
|
|
-+#define DEC_CS_S 16
|
|
-+#define DEC_CS_M GENMASK(28, 16)
|
|
-+#define DEC_CON_S 12
|
|
-+#define DEC_CON_M GENMASK(13, 12)
|
|
-+#define DEC_CON_EL 2
|
|
-+#define DEC_MODE_S 4
|
|
-+#define DEC_MODE_M GENMASK(5, 4)
|
|
-+#define DEC_MODE_NFI 1
|
|
-+#define DEC_TNUM_S 0
|
|
-+#define DEC_TNUM_M GENMASK(2, 0)
|
|
-+
|
|
-+#define ECC_DECIDLE 0x10c
|
|
-+#define DEC_IDLE BIT(1)
|
|
-+
|
|
-+#define ECC_DECENUM 0x114
|
|
-+#define ERRNUM_S 2
|
|
-+#define ERRNUM_M GENMASK(3, 0)
|
|
-+
|
|
-+#define ECC_DECDONE 0x118
|
|
-+#define DEC_DONE7 BIT(7)
|
|
-+#define DEC_DONE6 BIT(6)
|
|
-+#define DEC_DONE5 BIT(5)
|
|
-+#define DEC_DONE4 BIT(4)
|
|
-+#define DEC_DONE3 BIT(3)
|
|
-+#define DEC_DONE2 BIT(2)
|
|
-+#define DEC_DONE1 BIT(1)
|
|
-+#define DEC_DONE0 BIT(0)
|
|
-+
|
|
-+#define ECC_DECEL(n) (0x11c + (n) * 4)
|
|
-+#define DEC_EL_ODD_S 16
|
|
-+#define DEC_EL_EVEN_S 0
|
|
-+#define DEC_EL_M 0x1fff
|
|
-+#define DEC_EL_BYTE_POS_S 3
|
|
-+#define DEC_EL_BIT_POS_M GENMASK(3, 0)
|
|
-+
|
|
-+#define ECC_FDMADDR 0x13c
|
|
-+
|
|
-+/* ENCIDLE and DECIDLE */
|
|
-+#define ECC_IDLE BIT(0)
|
|
-+
|
|
-+#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
|
|
-+ ((tpoecs) << ACCCON_POECS_S | (tprecs) << ACCCON_PRECS_S | \
|
|
-+ (tc2r) << ACCCON_C2R_S | (tw2r) << ACCCON_W2R_S | \
|
|
-+ (twh) << ACCCON_WH_S | (twst) << ACCCON_WST_S | (trlt))
|
|
-+
|
|
-+#define MASTER_STA_MASK (MAS_ADDR | MAS_RD | MAS_WR | \
|
|
-+ MAS_RDDLY)
|
|
-+#define NFI_RESET_TIMEOUT 1000000
|
|
-+#define NFI_CORE_TIMEOUT 500000
|
|
-+#define ECC_ENGINE_TIMEOUT 500000
|
|
-+
|
|
-+#define ECC_SECTOR_SIZE 512
|
|
-+#define ECC_PARITY_BITS 13
|
|
-+
|
|
-+#define NFI_FDM_SIZE 8
|
|
-+
|
|
-+#define MT7621_NFC_NAME "mt7621-nand"
|
|
-+
|
|
-+struct mt7621_nfc {
|
|
-+ struct nand_controller controller;
|
|
-+ struct nand_chip nand;
|
|
-+ struct clk *nfi_clk;
|
|
-+ struct device *dev;
|
|
-+
|
|
-+ void __iomem *nfi_regs;
|
|
-+ void __iomem *ecc_regs;
|
|
-+
|
|
-+ u32 spare_per_sector;
|
|
-+};
|
|
-+
|
|
-+static const u16 mt7621_nfi_page_size[] = { SZ_512, SZ_2K, SZ_4K };
|
|
-+static const u8 mt7621_nfi_spare_size[] = { 16, 26, 27, 28 };
|
|
-+static const u8 mt7621_ecc_strength[] = { 4, 6, 8, 10, 12 };
|
|
-+
|
|
-+static inline u32 nfi_read32(struct mt7621_nfc *nfc, u32 reg)
|
|
-+{
|
|
-+ return readl(nfc->nfi_regs + reg);
|
|
-+}
|
|
-+
|
|
-+static inline void nfi_write32(struct mt7621_nfc *nfc, u32 reg, u32 val)
|
|
-+{
|
|
-+ writel(val, nfc->nfi_regs + reg);
|
|
-+}
|
|
-+
|
|
-+static inline u16 nfi_read16(struct mt7621_nfc *nfc, u32 reg)
|
|
-+{
|
|
-+ return readw(nfc->nfi_regs + reg);
|
|
-+}
|
|
-+
|
|
-+static inline void nfi_write16(struct mt7621_nfc *nfc, u32 reg, u16 val)
|
|
-+{
|
|
-+ writew(val, nfc->nfi_regs + reg);
|
|
-+}
|
|
-+
|
|
-+static inline void ecc_write16(struct mt7621_nfc *nfc, u32 reg, u16 val)
|
|
-+{
|
|
-+ writew(val, nfc->ecc_regs + reg);
|
|
-+}
|
|
-+
|
|
-+static inline u32 ecc_read32(struct mt7621_nfc *nfc, u32 reg)
|
|
-+{
|
|
-+ return readl(nfc->ecc_regs + reg);
|
|
-+}
|
|
-+
|
|
-+static inline void ecc_write32(struct mt7621_nfc *nfc, u32 reg, u32 val)
|
|
-+{
|
|
-+ return writel(val, nfc->ecc_regs + reg);
|
|
-+}
|
|
-+
|
|
-+static inline u8 *oob_fdm_ptr(struct nand_chip *nand, int sect)
|
|
-+{
|
|
-+ return nand->oob_poi + sect * NFI_FDM_SIZE;
|
|
-+}
|
|
-+
|
|
-+static inline u8 *oob_ecc_ptr(struct mt7621_nfc *nfc, int sect)
|
|
-+{
|
|
-+ struct nand_chip *nand = &nfc->nand;
|
|
-+
|
|
-+ return nand->oob_poi + nand->ecc.steps * NFI_FDM_SIZE +
|
|
-+ sect * (nfc->spare_per_sector - NFI_FDM_SIZE);
|
|
-+}
|
|
-+
|
|
-+static inline u8 *page_data_ptr(struct nand_chip *nand, const u8 *buf,
|
|
-+ int sect)
|
|
-+{
|
|
-+ return (u8 *)buf + sect * nand->ecc.size;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_ecc_wait_idle(struct mt7621_nfc *nfc, u32 reg)
|
|
-+{
|
|
-+ struct device *dev = nfc->dev;
|
|
-+ u32 val;
|
|
-+ int ret;
|
|
-+
|
|
-+ ret = readw_poll_timeout_atomic(nfc->ecc_regs + reg, val,
|
|
-+ val & ECC_IDLE, 10,
|
|
-+ ECC_ENGINE_TIMEOUT);
|
|
-+ if (ret) {
|
|
-+ dev_warn(dev, "ECC engine timed out entering idle mode\n");
|
|
-+ return -EIO;
|
|
-+ }
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_ecc_decoder_wait_done(struct mt7621_nfc *nfc, u32 sect)
|
|
-+{
|
|
-+ struct device *dev = nfc->dev;
|
|
-+ u32 val;
|
|
-+ int ret;
|
|
-+
|
|
-+ ret = readw_poll_timeout_atomic(nfc->ecc_regs + ECC_DECDONE, val,
|
|
-+ val & (1 << sect), 10,
|
|
-+ ECC_ENGINE_TIMEOUT);
|
|
-+
|
|
-+ if (ret) {
|
|
-+ dev_warn(dev, "ECC decoder for sector %d timed out\n",
|
|
-+ sect);
|
|
-+ return -ETIMEDOUT;
|
|
-+ }
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static void mt7621_ecc_encoder_op(struct mt7621_nfc *nfc, bool enable)
|
|
-+{
|
|
-+ mt7621_ecc_wait_idle(nfc, ECC_ENCIDLE);
|
|
-+ ecc_write16(nfc, ECC_ENCCON, enable ? ENC_EN : 0);
|
|
-+}
|
|
-+
|
|
-+static void mt7621_ecc_decoder_op(struct mt7621_nfc *nfc, bool enable)
|
|
-+{
|
|
-+ mt7621_ecc_wait_idle(nfc, ECC_DECIDLE);
|
|
-+ ecc_write16(nfc, ECC_DECCON, enable ? DEC_EN : 0);
|
|
-+}
|
|
-+
|
|
-+static int mt7621_ecc_correct_check(struct mt7621_nfc *nfc, u8 *sector_buf,
|
|
-+ u8 *fdm_buf, u32 sect)
|
|
-+{
|
|
-+ struct nand_chip *nand = &nfc->nand;
|
|
-+ u32 decnum, num_error_bits, fdm_end_bits;
|
|
-+ u32 error_locations, error_bit_loc;
|
|
-+ u32 error_byte_pos, error_bit_pos;
|
|
-+ int bitflips = 0;
|
|
-+ u32 i;
|
|
-+
|
|
-+ decnum = ecc_read32(nfc, ECC_DECENUM);
|
|
-+ num_error_bits = (decnum >> (sect << ERRNUM_S)) & ERRNUM_M;
|
|
-+ fdm_end_bits = (nand->ecc.size + NFI_FDM_SIZE) << 3;
|
|
-+
|
|
-+ if (!num_error_bits)
|
|
-+ return 0;
|
|
-+
|
|
-+ if (num_error_bits == ERRNUM_M)
|
|
-+ return -1;
|
|
-+
|
|
-+ for (i = 0; i < num_error_bits; i++) {
|
|
-+ error_locations = ecc_read32(nfc, ECC_DECEL(i / 2));
|
|
-+ error_bit_loc = (error_locations >> ((i % 2) * DEC_EL_ODD_S)) &
|
|
-+ DEC_EL_M;
|
|
-+ error_byte_pos = error_bit_loc >> DEC_EL_BYTE_POS_S;
|
|
-+ error_bit_pos = error_bit_loc & DEC_EL_BIT_POS_M;
|
|
-+
|
|
-+ if (error_bit_loc < (nand->ecc.size << 3)) {
|
|
-+ if (sector_buf) {
|
|
-+ sector_buf[error_byte_pos] ^=
|
|
-+ (1 << error_bit_pos);
|
|
-+ }
|
|
-+ } else if (error_bit_loc < fdm_end_bits) {
|
|
-+ if (fdm_buf) {
|
|
-+ fdm_buf[error_byte_pos - nand->ecc.size] ^=
|
|
-+ (1 << error_bit_pos);
|
|
-+ }
|
|
-+ }
|
|
-+
|
|
-+ bitflips++;
|
|
-+ }
|
|
-+
|
|
-+ return bitflips;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_wait_write_completion(struct mt7621_nfc *nfc,
|
|
-+ struct nand_chip *nand)
|
|
-+{
|
|
-+ struct device *dev = nfc->dev;
|
|
-+ u16 val;
|
|
-+ int ret;
|
|
-+
|
|
-+ ret = readw_poll_timeout_atomic(nfc->nfi_regs + NFI_ADDRCNTR, val,
|
|
-+ ((val & SEC_CNTR_M) >> SEC_CNTR_S) >= nand->ecc.steps, 10,
|
|
-+ NFI_CORE_TIMEOUT);
|
|
-+
|
|
-+ if (ret) {
|
|
-+ dev_warn(dev, "NFI core write operation timed out\n");
|
|
-+ return -ETIMEDOUT;
|
|
-+ }
|
|
-+
|
|
-+ return ret;
|
|
-+}
|
|
-+
|
|
-+static void mt7621_nfc_hw_reset(struct mt7621_nfc *nfc)
|
|
-+{
|
|
-+ u32 val;
|
|
-+ int ret;
|
|
-+
|
|
-+ /* reset all registers and force the NFI master to terminate */
|
|
-+ nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
|
|
-+
|
|
-+ /* wait for the master to finish the last transaction */
|
|
-+ ret = readw_poll_timeout(nfc->nfi_regs + NFI_MASTER_STA, val,
|
|
-+ !(val & MASTER_STA_MASK), 50,
|
|
-+ NFI_RESET_TIMEOUT);
|
|
-+ if (ret) {
|
|
-+ dev_warn(nfc->dev, "Failed to reset NFI master in %dms\n",
|
|
-+ NFI_RESET_TIMEOUT);
|
|
-+ }
|
|
-+
|
|
-+ /* ensure any status register affected by the NFI master is reset */
|
|
-+ nfi_write16(nfc, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
|
|
-+ nfi_write16(nfc, NFI_STRDATA, 0);
|
|
-+}
|
|
-+
|
|
-+static inline void mt7621_nfc_hw_init(struct mt7621_nfc *nfc)
|
|
-+{
|
|
-+ u32 acccon;
|
|
-+
|
|
-+ /*
|
|
-+ * CNRNB: nand ready/busy register
|
|
-+ * -------------------------------
|
|
-+ * 7:4: timeout register for polling the NAND busy/ready signal
|
|
-+ * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
|
|
-+ */
|
|
-+ nfi_write16(nfc, NFI_CNRNB, CB2R_TIME_M | STR_CNRNB);
|
|
-+
|
|
-+ mt7621_nfc_hw_reset(nfc);
|
|
-+
|
|
-+ /* Apply default access timing */
|
|
-+ acccon = ACCTIMING(ACCCON_POECS_DEF, ACCCON_PRECS_DEF, ACCCON_C2R_DEF,
|
|
-+ ACCCON_W2R_DEF, ACCCON_WH_DEF, ACCCON_WST_DEF,
|
|
-+ ACCCON_RLT_DEF);
|
|
-+
|
|
-+ nfi_write32(nfc, NFI_ACCCON, acccon);
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_send_command(struct mt7621_nfc *nfc, u8 command)
|
|
-+{
|
|
-+ struct device *dev = nfc->dev;
|
|
-+ u32 val;
|
|
-+ int ret;
|
|
-+
|
|
-+ nfi_write32(nfc, NFI_CMD, command);
|
|
-+
|
|
-+ ret = readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val,
|
|
-+ !(val & STA_CMD), 10,
|
|
-+ NFI_CORE_TIMEOUT);
|
|
-+ if (ret) {
|
|
-+ dev_warn(dev, "NFI core timed out entering command mode\n");
|
|
-+ return -EIO;
|
|
-+ }
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_send_address_byte(struct mt7621_nfc *nfc, int addr)
|
|
-+{
|
|
-+ struct device *dev = nfc->dev;
|
|
-+ u32 val;
|
|
-+ int ret;
|
|
-+
|
|
-+ nfi_write32(nfc, NFI_COLADDR, addr);
|
|
-+ nfi_write32(nfc, NFI_ROWADDR, 0);
|
|
-+ nfi_write16(nfc, NFI_ADDRNOB, 1);
|
|
-+
|
|
-+ ret = readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val,
|
|
-+ !(val & STA_ADDR), 10,
|
|
-+ NFI_CORE_TIMEOUT);
|
|
-+ if (ret) {
|
|
-+ dev_warn(dev, "NFI core timed out entering address mode\n");
|
|
-+ return -EIO;
|
|
-+ }
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_send_address(struct mt7621_nfc *nfc, const u8 *addr,
|
|
-+ unsigned int naddrs)
|
|
-+{
|
|
-+ int ret;
|
|
-+
|
|
-+ while (naddrs) {
|
|
-+ ret = mt7621_nfc_send_address_byte(nfc, *addr);
|
|
-+ if (ret)
|
|
-+ return ret;
|
|
-+
|
|
-+ addr++;
|
|
-+ naddrs--;
|
|
-+ }
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static void mt7621_nfc_wait_pio_ready(struct mt7621_nfc *nfc)
|
|
-+{
|
|
-+ struct device *dev = nfc->dev;
|
|
-+ int ret;
|
|
-+ u16 val;
|
|
-+
|
|
-+ ret = readw_poll_timeout_atomic(nfc->nfi_regs + NFI_PIO_DIRDY, val,
|
|
-+ val & PIO_DIRDY, 10,
|
|
-+ NFI_CORE_TIMEOUT);
|
|
-+ if (ret < 0)
|
|
-+ dev_err(dev, "NFI core PIO mode not ready\n");
|
|
-+}
|
|
-+
|
|
-+static u32 mt7621_nfc_pio_read(struct mt7621_nfc *nfc, bool br)
|
|
-+{
|
|
-+ u32 reg;
|
|
-+
|
|
-+ /* after each byte read, the NFI_STA reg is reset by the hardware */
|
|
-+ reg = (nfi_read32(nfc, NFI_STA) & STA_NFI_FSM_M) >> STA_NFI_FSM_S;
|
|
-+ if (reg != STA_FSM_CUSTOM_DATA) {
|
|
-+ reg = nfi_read16(nfc, NFI_CNFG);
|
|
-+ reg |= CNFG_READ_MODE | CNFG_BYTE_RW;
|
|
-+ if (!br)
|
|
-+ reg &= ~CNFG_BYTE_RW;
|
|
-+ nfi_write16(nfc, NFI_CNFG, reg);
|
|
-+
|
|
-+ /*
|
|
-+ * set to max sector to allow the HW to continue reading over
|
|
-+ * unaligned accesses
|
|
-+ */
|
|
-+ nfi_write16(nfc, NFI_CON, CON_NFI_SEC_M | CON_NFI_BRD);
|
|
-+
|
|
-+ /* trigger to fetch data */
|
|
-+ nfi_write16(nfc, NFI_STRDATA, STR_DATA);
|
|
-+ }
|
|
-+
|
|
-+ mt7621_nfc_wait_pio_ready(nfc);
|
|
-+
|
|
-+ return nfi_read32(nfc, NFI_DATAR);
|
|
-+}
|
|
-+
|
|
-+static void mt7621_nfc_read_data(struct mt7621_nfc *nfc, u8 *buf, u32 len)
|
|
-+{
|
|
-+ while (((uintptr_t)buf & 3) && len) {
|
|
-+ *buf = mt7621_nfc_pio_read(nfc, true);
|
|
-+ buf++;
|
|
-+ len--;
|
|
-+ }
|
|
-+
|
|
-+ while (len >= 4) {
|
|
-+ *(u32 *)buf = mt7621_nfc_pio_read(nfc, false);
|
|
-+ buf += 4;
|
|
-+ len -= 4;
|
|
-+ }
|
|
-+
|
|
-+ while (len) {
|
|
-+ *buf = mt7621_nfc_pio_read(nfc, true);
|
|
-+ buf++;
|
|
-+ len--;
|
|
-+ }
|
|
-+}
|
|
-+
|
|
-+static void mt7621_nfc_read_data_discard(struct mt7621_nfc *nfc, u32 len)
|
|
-+{
|
|
-+ while (len >= 4) {
|
|
-+ mt7621_nfc_pio_read(nfc, false);
|
|
-+ len -= 4;
|
|
-+ }
|
|
-+
|
|
-+ while (len) {
|
|
-+ mt7621_nfc_pio_read(nfc, true);
|
|
-+ len--;
|
|
-+ }
|
|
-+}
|
|
-+
|
|
-+static void mt7621_nfc_pio_write(struct mt7621_nfc *nfc, u32 val, bool bw)
|
|
-+{
|
|
-+ u32 reg;
|
|
-+
|
|
-+ reg = (nfi_read32(nfc, NFI_STA) & STA_NFI_FSM_M) >> STA_NFI_FSM_S;
|
|
-+ if (reg != STA_FSM_CUSTOM_DATA) {
|
|
-+ reg = nfi_read16(nfc, NFI_CNFG);
|
|
-+ reg &= ~(CNFG_READ_MODE | CNFG_BYTE_RW);
|
|
-+ if (bw)
|
|
-+ reg |= CNFG_BYTE_RW;
|
|
-+ nfi_write16(nfc, NFI_CNFG, reg);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CON, CON_NFI_SEC_M | CON_NFI_BWR);
|
|
-+ nfi_write16(nfc, NFI_STRDATA, STR_DATA);
|
|
-+ }
|
|
-+
|
|
-+ mt7621_nfc_wait_pio_ready(nfc);
|
|
-+ nfi_write32(nfc, NFI_DATAW, val);
|
|
-+}
|
|
-+
|
|
-+static void mt7621_nfc_write_data(struct mt7621_nfc *nfc, const u8 *buf,
|
|
-+ u32 len)
|
|
-+{
|
|
-+ while (((uintptr_t)buf & 3) && len) {
|
|
-+ mt7621_nfc_pio_write(nfc, *buf, true);
|
|
-+ buf++;
|
|
-+ len--;
|
|
-+ }
|
|
-+
|
|
-+ while (len >= 4) {
|
|
-+ mt7621_nfc_pio_write(nfc, *(const u32 *)buf, false);
|
|
-+ buf += 4;
|
|
-+ len -= 4;
|
|
-+ }
|
|
-+
|
|
-+ while (len) {
|
|
-+ mt7621_nfc_pio_write(nfc, *buf, true);
|
|
-+ buf++;
|
|
-+ len--;
|
|
-+ }
|
|
-+}
|
|
-+
|
|
-+static void mt7621_nfc_write_data_empty(struct mt7621_nfc *nfc, u32 len)
|
|
-+{
|
|
-+ while (len >= 4) {
|
|
-+ mt7621_nfc_pio_write(nfc, 0xffffffff, false);
|
|
-+ len -= 4;
|
|
-+ }
|
|
-+
|
|
-+ while (len) {
|
|
-+ mt7621_nfc_pio_write(nfc, 0xff, true);
|
|
-+ len--;
|
|
-+ }
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_dev_ready(struct mt7621_nfc *nfc,
|
|
-+ unsigned int timeout_ms)
|
|
-+{
|
|
-+ u32 val;
|
|
-+
|
|
-+ return readl_poll_timeout_atomic(nfc->nfi_regs + NFI_STA, val,
|
|
-+ !(val & STA_BUSY), 10,
|
|
-+ timeout_ms * 1000);
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_exec_instr(struct nand_chip *nand,
|
|
-+ const struct nand_op_instr *instr)
|
|
-+{
|
|
-+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
-+
|
|
-+ switch (instr->type) {
|
|
-+ case NAND_OP_CMD_INSTR:
|
|
-+ mt7621_nfc_hw_reset(nfc);
|
|
-+ nfi_write16(nfc, NFI_CNFG, CNFG_OP_CUSTOM << CNFG_OP_MODE_S);
|
|
-+ return mt7621_nfc_send_command(nfc, instr->ctx.cmd.opcode);
|
|
-+ case NAND_OP_ADDR_INSTR:
|
|
-+ return mt7621_nfc_send_address(nfc, instr->ctx.addr.addrs,
|
|
-+ instr->ctx.addr.naddrs);
|
|
-+ case NAND_OP_DATA_IN_INSTR:
|
|
-+ mt7621_nfc_read_data(nfc, instr->ctx.data.buf.in,
|
|
-+ instr->ctx.data.len);
|
|
-+ return 0;
|
|
-+ case NAND_OP_DATA_OUT_INSTR:
|
|
-+ mt7621_nfc_write_data(nfc, instr->ctx.data.buf.out,
|
|
-+ instr->ctx.data.len);
|
|
-+ return 0;
|
|
-+ case NAND_OP_WAITRDY_INSTR:
|
|
-+ return mt7621_nfc_dev_ready(nfc,
|
|
-+ instr->ctx.waitrdy.timeout_ms);
|
|
-+ default:
|
|
-+ WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
|
|
-+ instr->type);
|
|
-+
|
|
-+ return -EINVAL;
|
|
-+ }
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_exec_op(struct nand_chip *nand,
|
|
-+ const struct nand_operation *op, bool check_only)
|
|
-+{
|
|
-+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
-+ int i, ret;
|
|
-+
|
|
-+ if (check_only)
|
|
-+ return 0;
|
|
-+
|
|
-+ /* Only CS0 available */
|
|
-+ nfi_write16(nfc, NFI_CSEL, 0);
|
|
-+
|
|
-+ for (i = 0; i < op->ninstrs; i++) {
|
|
-+ ret = mt7621_nfc_exec_instr(nand, &op->instrs[i]);
|
|
-+ if (ret)
|
|
-+ return ret;
|
|
-+ }
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_setup_data_interface(struct nand_chip *nand, int csline,
|
|
-+ const struct nand_data_interface *conf)
|
|
-+{
|
|
-+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
-+ const struct nand_sdr_timings *timings;
|
|
-+ u32 acccon, temp, rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
|
|
-+
|
|
-+ if (!nfc->nfi_clk)
|
|
-+ return -ENOTSUPP;
|
|
-+
|
|
-+ timings = nand_get_sdr_timings(conf);
|
|
-+ if (IS_ERR(timings))
|
|
-+ return -ENOTSUPP;
|
|
-+
|
|
-+ rate = clk_get_rate(nfc->nfi_clk);
|
|
-+
|
|
-+ /* turn clock rate into KHZ */
|
|
-+ rate /= 1000;
|
|
-+
|
|
-+ tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000;
|
|
-+ tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000);
|
|
-+ tpoecs = min_t(u32, tpoecs, ACCCON_POECS_MAX);
|
|
-+
|
|
-+ tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000;
|
|
-+ tprecs = DIV_ROUND_UP(tprecs * rate, 1000000);
|
|
-+ tprecs = min_t(u32, tprecs, ACCCON_PRECS_MAX);
|
|
-+
|
|
-+ /* sdr interface has no tCR which means CE# low to RE# low */
|
|
-+ tc2r = 0;
|
|
-+
|
|
-+ tw2r = timings->tWHR_min / 1000;
|
|
-+ tw2r = DIV_ROUND_UP(tw2r * rate, 1000000);
|
|
-+ tw2r = DIV_ROUND_UP(tw2r - 1, 2);
|
|
-+ tw2r = min_t(u32, tw2r, ACCCON_W2R_MAX);
|
|
-+
|
|
-+ twh = max(timings->tREH_min, timings->tWH_min) / 1000;
|
|
-+ twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
|
|
-+ twh = min_t(u32, twh, ACCCON_WH_MAX);
|
|
-+
|
|
-+ /* Calculate real WE#/RE# hold time in nanosecond */
|
|
-+ temp = (twh + 1) * 1000000 / rate;
|
|
-+ /* nanosecond to picosecond */
|
|
-+ temp *= 1000;
|
|
-+
|
|
-+ /*
|
|
-+ * WE# low level time should be expaned to meet WE# pulse time
|
|
-+ * and WE# cycle time at the same time.
|
|
-+ */
|
|
-+ if (temp < timings->tWC_min)
|
|
-+ twst = timings->tWC_min - temp;
|
|
-+ else
|
|
-+ twst = 0;
|
|
-+ twst = max(timings->tWP_min, twst) / 1000;
|
|
-+ twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
|
|
-+ twst = min_t(u32, twst, ACCCON_WST_MAX);
|
|
-+
|
|
-+ /*
|
|
-+ * RE# low level time should be expaned to meet RE# pulse time
|
|
-+ * and RE# cycle time at the same time.
|
|
-+ */
|
|
-+ if (temp < timings->tRC_min)
|
|
-+ trlt = timings->tRC_min - temp;
|
|
-+ else
|
|
-+ trlt = 0;
|
|
-+ trlt = max(trlt, timings->tRP_min) / 1000;
|
|
-+ trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
|
|
-+ trlt = min_t(u32, trlt, ACCCON_RLT_MAX);
|
|
-+
|
|
-+ if (csline == NAND_DATA_IFACE_CHECK_ONLY) {
|
|
-+ if (twst < ACCCON_WST_MIN || trlt < ACCCON_RLT_MIN)
|
|
-+ return -ENOTSUPP;
|
|
-+ }
|
|
-+
|
|
-+ acccon = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
|
|
-+
|
|
-+ dev_info(nfc->dev, "Using programmed access timing: %08x\n", acccon);
|
|
-+
|
|
-+ nfi_write32(nfc, NFI_ACCCON, acccon);
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_calc_ecc_strength(struct mt7621_nfc *nfc,
|
|
-+ u32 avail_ecc_bytes)
|
|
-+{
|
|
-+ struct nand_chip *nand = &nfc->nand;
|
|
-+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
-+ u32 strength;
|
|
-+ int i;
|
|
-+
|
|
-+ strength = avail_ecc_bytes * 8 / ECC_PARITY_BITS;
|
|
-+
|
|
-+ /* Find the closest supported ecc strength */
|
|
-+ for (i = ARRAY_SIZE(mt7621_ecc_strength) - 1; i >= 0; i--) {
|
|
-+ if (mt7621_ecc_strength[i] <= strength)
|
|
-+ break;
|
|
-+ }
|
|
-+
|
|
-+ if (unlikely(i < 0)) {
|
|
-+ dev_err(nfc->dev, "OOB size (%u) is not supported\n",
|
|
-+ mtd->oobsize);
|
|
-+ return -EINVAL;
|
|
-+ }
|
|
-+
|
|
-+ nand->ecc.strength = mt7621_ecc_strength[i];
|
|
-+ nand->ecc.bytes =
|
|
-+ DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
|
|
-+
|
|
-+ dev_info(nfc->dev, "ECC strength adjusted to %u bits\n",
|
|
-+ nand->ecc.strength);
|
|
-+
|
|
-+ return i;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_set_spare_per_sector(struct mt7621_nfc *nfc)
|
|
-+{
|
|
-+ struct nand_chip *nand = &nfc->nand;
|
|
-+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
-+ u32 size;
|
|
-+ int i;
|
|
-+
|
|
-+ size = nand->ecc.bytes + NFI_FDM_SIZE;
|
|
-+
|
|
-+ /* Find the closest supported spare size */
|
|
-+ for (i = 0; i < ARRAY_SIZE(mt7621_nfi_spare_size); i++) {
|
|
-+ if (mt7621_nfi_spare_size[i] >= size)
|
|
-+ break;
|
|
-+ }
|
|
-+
|
|
-+ if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_spare_size))) {
|
|
-+ dev_err(nfc->dev, "OOB size (%u) is not supported\n",
|
|
-+ mtd->oobsize);
|
|
-+ return -EINVAL;
|
|
-+ }
|
|
-+
|
|
-+ nfc->spare_per_sector = mt7621_nfi_spare_size[i];
|
|
-+
|
|
-+ return i;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_ecc_init(struct mt7621_nfc *nfc)
|
|
-+{
|
|
-+ struct nand_chip *nand = &nfc->nand;
|
|
-+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
-+ u32 spare_per_sector, encode_block_size, decode_block_size;
|
|
-+ u32 ecc_enccfg, ecc_deccfg;
|
|
-+ int ecc_cap;
|
|
-+
|
|
-+ /* Only hardware ECC mode is supported */
|
|
-+ if (nand->ecc.mode != NAND_ECC_HW_SYNDROME) {
|
|
-+ dev_err(nfc->dev, "Only hardware ECC mode is supported\n");
|
|
-+ return -EINVAL;
|
|
-+ }
|
|
-+
|
|
-+ nand->ecc.size = ECC_SECTOR_SIZE;
|
|
-+ nand->ecc.steps = mtd->writesize / nand->ecc.size;
|
|
-+
|
|
-+ spare_per_sector = mtd->oobsize / nand->ecc.steps;
|
|
-+
|
|
-+ ecc_cap = mt7621_nfc_calc_ecc_strength(nfc,
|
|
-+ spare_per_sector - NFI_FDM_SIZE);
|
|
-+ if (ecc_cap < 0)
|
|
-+ return ecc_cap;
|
|
-+
|
|
-+ /* Sector + FDM */
|
|
-+ encode_block_size = (nand->ecc.size + NFI_FDM_SIZE) * 8;
|
|
-+ ecc_enccfg = ecc_cap | (ENC_MODE_NFI << ENC_MODE_S) |
|
|
-+ (encode_block_size << ENC_CNFG_MSG_S);
|
|
-+
|
|
-+ /* Sector + FDM + ECC parity bits */
|
|
-+ decode_block_size = ((nand->ecc.size + NFI_FDM_SIZE) * 8) +
|
|
-+ nand->ecc.strength * ECC_PARITY_BITS;
|
|
-+ ecc_deccfg = ecc_cap | (DEC_MODE_NFI << DEC_MODE_S) |
|
|
-+ (decode_block_size << DEC_CS_S) |
|
|
-+ (DEC_CON_EL << DEC_CON_S) | DEC_EMPTY_EN;
|
|
-+
|
|
-+ mt7621_ecc_encoder_op(nfc, false);
|
|
-+ ecc_write32(nfc, ECC_ENCCNFG, ecc_enccfg);
|
|
-+
|
|
-+ mt7621_ecc_decoder_op(nfc, false);
|
|
-+ ecc_write32(nfc, ECC_DECCNFG, ecc_deccfg);
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_set_page_format(struct mt7621_nfc *nfc)
|
|
-+{
|
|
-+ struct nand_chip *nand = &nfc->nand;
|
|
-+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
-+ int i, spare_size;
|
|
-+ u32 pagefmt;
|
|
-+
|
|
-+ spare_size = mt7621_nfc_set_spare_per_sector(nfc);
|
|
-+ if (spare_size < 0)
|
|
-+ return spare_size;
|
|
-+
|
|
-+ for (i = 0; i < ARRAY_SIZE(mt7621_nfi_page_size); i++) {
|
|
-+ if (mt7621_nfi_page_size[i] == mtd->writesize)
|
|
-+ break;
|
|
-+ }
|
|
-+
|
|
-+ if (unlikely(i >= ARRAY_SIZE(mt7621_nfi_page_size))) {
|
|
-+ dev_err(nfc->dev, "Page size (%u) is not supported\n",
|
|
-+ mtd->writesize);
|
|
-+ return -EINVAL;
|
|
-+ }
|
|
-+
|
|
-+ pagefmt = i | (spare_size << PAGEFMT_SPARE_S) |
|
|
-+ (NFI_FDM_SIZE << PAGEFMT_FDM_S) |
|
|
-+ (NFI_FDM_SIZE << PAGEFMT_FDM_ECC_S);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_PAGEFMT, pagefmt);
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_attach_chip(struct nand_chip *nand)
|
|
-+{
|
|
-+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
-+ int ret;
|
|
-+
|
|
-+ if (nand->options & NAND_BUSWIDTH_16) {
|
|
-+ dev_err(nfc->dev, "16-bit buswidth is not supported");
|
|
-+ return -EINVAL;
|
|
-+ }
|
|
-+
|
|
-+ ret = mt7621_nfc_ecc_init(nfc);
|
|
-+ if (ret)
|
|
-+ return ret;
|
|
-+
|
|
-+ return mt7621_nfc_set_page_format(nfc);
|
|
-+}
|
|
-+
|
|
-+static const struct nand_controller_ops mt7621_nfc_controller_ops = {
|
|
-+ .attach_chip = mt7621_nfc_attach_chip,
|
|
-+ .exec_op = mt7621_nfc_exec_op,
|
|
-+ .setup_data_interface = mt7621_nfc_setup_data_interface,
|
|
-+};
|
|
-+
|
|
-+static int mt7621_nfc_ooblayout_free(struct mtd_info *mtd, int section,
|
|
-+ struct mtd_oob_region *oob_region)
|
|
-+{
|
|
-+ struct nand_chip *nand = mtd_to_nand(mtd);
|
|
-+
|
|
-+ if (section >= nand->ecc.steps)
|
|
-+ return -ERANGE;
|
|
-+
|
|
-+ oob_region->length = NFI_FDM_SIZE - 1;
|
|
-+ oob_region->offset = section * NFI_FDM_SIZE + 1;
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|
-+ struct mtd_oob_region *oob_region)
|
|
-+{
|
|
-+ struct nand_chip *nand = mtd_to_nand(mtd);
|
|
-+
|
|
-+ if (section)
|
|
-+ return -ERANGE;
|
|
-+
|
|
-+ oob_region->offset = NFI_FDM_SIZE * nand->ecc.steps;
|
|
-+ oob_region->length = mtd->oobsize - oob_region->offset;
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static const struct mtd_ooblayout_ops mt7621_nfc_ooblayout_ops = {
|
|
-+ .free = mt7621_nfc_ooblayout_free,
|
|
-+ .ecc = mt7621_nfc_ooblayout_ecc,
|
|
-+};
|
|
-+
|
|
-+static void mt7621_nfc_write_fdm(struct mt7621_nfc *nfc)
|
|
-+{
|
|
-+ struct nand_chip *nand = &nfc->nand;
|
|
-+ u32 vall, valm;
|
|
-+ u8 *oobptr;
|
|
-+ int i, j;
|
|
-+
|
|
-+ for (i = 0; i < nand->ecc.steps; i++) {
|
|
-+ vall = 0;
|
|
-+ valm = 0;
|
|
-+ oobptr = oob_fdm_ptr(nand, i);
|
|
-+
|
|
-+ for (j = 0; j < 4; j++)
|
|
-+ vall |= (u32)oobptr[j] << (j * 8);
|
|
-+
|
|
-+ for (j = 0; j < 4; j++)
|
|
-+ valm |= (u32)oobptr[j + 4] << ((j - 4) * 8);
|
|
-+
|
|
-+ nfi_write32(nfc, NFI_FDML(i), vall);
|
|
-+ nfi_write32(nfc, NFI_FDMM(i), valm);
|
|
-+ }
|
|
-+}
|
|
-+
|
|
-+static void mt7621_nfc_read_sector_fdm(struct mt7621_nfc *nfc, u32 sect)
|
|
-+{
|
|
-+ struct nand_chip *nand = &nfc->nand;
|
|
-+ u32 vall, valm;
|
|
-+ u8 *oobptr;
|
|
-+ int i;
|
|
-+
|
|
-+ vall = nfi_read32(nfc, NFI_FDML(sect));
|
|
-+ valm = nfi_read32(nfc, NFI_FDMM(sect));
|
|
-+ oobptr = oob_fdm_ptr(nand, sect);
|
|
-+
|
|
-+ for (i = 0; i < 4; i++)
|
|
-+ oobptr[i] = (vall >> (i * 8)) & 0xff;
|
|
-+
|
|
-+ for (i = 0; i < 4; i++)
|
|
-+ oobptr[i + 4] = (valm >> (i * 8)) & 0xff;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_read_page_hwecc(struct nand_chip *nand, uint8_t *buf,
|
|
-+ int oob_required, int page)
|
|
-+{
|
|
-+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
-+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
-+ int bitflips = 0;
|
|
-+ int rc, i;
|
|
-+
|
|
-+ nand_read_page_op(nand, page, 0, NULL, 0);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) |
|
|
-+ CNFG_READ_MODE | CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
|
|
-+
|
|
-+ mt7621_ecc_decoder_op(nfc, true);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CON,
|
|
-+ CON_NFI_BRD | (nand->ecc.steps << CON_NFI_SEC_S));
|
|
-+
|
|
-+ for (i = 0; i < nand->ecc.steps; i++) {
|
|
-+ if (buf)
|
|
-+ mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i),
|
|
-+ nand->ecc.size);
|
|
-+ else
|
|
-+ mt7621_nfc_read_data_discard(nfc, nand->ecc.size);
|
|
-+
|
|
-+ rc = mt7621_ecc_decoder_wait_done(nfc, i);
|
|
-+
|
|
-+ mt7621_nfc_read_sector_fdm(nfc, i);
|
|
-+
|
|
-+ if (rc < 0) {
|
|
-+ bitflips = -EIO;
|
|
-+ continue;
|
|
-+ }
|
|
-+
|
|
-+ rc = mt7621_ecc_correct_check(nfc,
|
|
-+ buf ? page_data_ptr(nand, buf, i) : NULL,
|
|
-+ oob_fdm_ptr(nand, i), i);
|
|
-+
|
|
-+ if (rc < 0) {
|
|
-+ dev_warn(nfc->dev,
|
|
-+ "Uncorrectable ECC error at page %d.%d\n",
|
|
-+ page, i);
|
|
-+ bitflips = -EBADMSG;
|
|
-+ mtd->ecc_stats.failed++;
|
|
-+ } else if (bitflips >= 0) {
|
|
-+ bitflips += rc;
|
|
-+ mtd->ecc_stats.corrected += rc;
|
|
-+ }
|
|
-+ }
|
|
-+
|
|
-+ mt7621_ecc_decoder_op(nfc, false);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CON, 0);
|
|
-+
|
|
-+ return bitflips;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_read_page_raw(struct nand_chip *nand, uint8_t *buf,
|
|
-+ int oob_required, int page)
|
|
-+{
|
|
-+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
-+ int i;
|
|
-+
|
|
-+ nand_read_page_op(nand, page, 0, NULL, 0);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) |
|
|
-+ CNFG_READ_MODE);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CON,
|
|
-+ CON_NFI_BRD | (nand->ecc.steps << CON_NFI_SEC_S));
|
|
-+
|
|
-+ for (i = 0; i < nand->ecc.steps; i++) {
|
|
-+ /* Read data */
|
|
-+ if (buf)
|
|
-+ mt7621_nfc_read_data(nfc, page_data_ptr(nand, buf, i),
|
|
-+ nand->ecc.size);
|
|
-+ else
|
|
-+ mt7621_nfc_read_data_discard(nfc, nand->ecc.size);
|
|
-+
|
|
-+ /* Read FDM */
|
|
-+ mt7621_nfc_read_data(nfc, oob_fdm_ptr(nand, i), NFI_FDM_SIZE);
|
|
-+
|
|
-+ /* Read ECC parity data */
|
|
-+ mt7621_nfc_read_data(nfc, oob_ecc_ptr(nfc, i),
|
|
-+ nfc->spare_per_sector - NFI_FDM_SIZE);
|
|
-+ }
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CON, 0);
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_read_oob_hwecc(struct nand_chip *nand, int page)
|
|
-+{
|
|
-+ return mt7621_nfc_read_page_hwecc(nand, NULL, 1, page);
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_read_oob_raw(struct nand_chip *nand, int page)
|
|
-+{
|
|
-+ return mt7621_nfc_read_page_raw(nand, NULL, 1, page);
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_check_empty_page(struct nand_chip *nand, const u8 *buf)
|
|
-+{
|
|
-+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
-+ uint32_t i, j;
|
|
-+ u8 *oobptr;
|
|
-+
|
|
-+ if (buf) {
|
|
-+ for (i = 0; i < mtd->writesize; i++)
|
|
-+ if (buf[i] != 0xff)
|
|
-+ return 0;
|
|
-+ }
|
|
-+
|
|
-+ for (i = 0; i < nand->ecc.steps; i++) {
|
|
-+ oobptr = oob_fdm_ptr(nand, i);
|
|
-+ for (j = 0; j < NFI_FDM_SIZE; j++)
|
|
-+ if (oobptr[j] != 0xff)
|
|
-+ return 0;
|
|
-+ }
|
|
-+
|
|
-+ return 1;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_write_page_hwecc(struct nand_chip *nand,
|
|
-+ const uint8_t *buf, int oob_required,
|
|
-+ int page)
|
|
-+{
|
|
-+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
-+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
-+
|
|
-+ if (mt7621_nfc_check_empty_page(nand, buf)) {
|
|
-+ /*
|
|
-+ * MT7621 ECC engine always generates parity code for input
|
|
-+ * pages, even for empty pages. Doing so will write back ECC
|
|
-+ * parity code to the oob region, which means such pages will
|
|
-+ * no longer be empty pages.
|
|
-+ *
|
|
-+ * To avoid this, stop write operation if current page is an
|
|
-+ * empty page.
|
|
-+ */
|
|
-+ return 0;
|
|
-+ }
|
|
-+
|
|
-+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S) |
|
|
-+ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
|
|
-+
|
|
-+ mt7621_ecc_encoder_op(nfc, true);
|
|
-+
|
|
-+ mt7621_nfc_write_fdm(nfc);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CON,
|
|
-+ CON_NFI_BWR | (nand->ecc.steps << CON_NFI_SEC_S));
|
|
-+
|
|
-+ if (buf)
|
|
-+ mt7621_nfc_write_data(nfc, buf, mtd->writesize);
|
|
-+ else
|
|
-+ mt7621_nfc_write_data_empty(nfc, mtd->writesize);
|
|
-+
|
|
-+ mt7621_nfc_wait_write_completion(nfc, nand);
|
|
-+
|
|
-+ mt7621_ecc_encoder_op(nfc, false);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CON, 0);
|
|
-+
|
|
-+ return nand_prog_page_end_op(nand);
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_write_page_raw(struct nand_chip *nand,
|
|
-+ const uint8_t *buf, int oob_required,
|
|
-+ int page)
|
|
-+{
|
|
-+ struct mt7621_nfc *nfc = nand_get_controller_data(nand);
|
|
-+ int i;
|
|
-+
|
|
-+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CNFG, (CNFG_OP_CUSTOM << CNFG_OP_MODE_S));
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CON,
|
|
-+ CON_NFI_BWR | (nand->ecc.steps << CON_NFI_SEC_S));
|
|
-+
|
|
-+ for (i = 0; i < nand->ecc.steps; i++) {
|
|
-+ /* Write data */
|
|
-+ if (buf)
|
|
-+ mt7621_nfc_write_data(nfc, page_data_ptr(nand, buf, i),
|
|
-+ nand->ecc.size);
|
|
-+ else
|
|
-+ mt7621_nfc_write_data_empty(nfc, nand->ecc.size);
|
|
-+
|
|
-+ /* Write FDM */
|
|
-+ mt7621_nfc_write_data(nfc, oob_fdm_ptr(nand, i),
|
|
-+ NFI_FDM_SIZE);
|
|
-+
|
|
-+ /* Write dummy ECC parity data */
|
|
-+ mt7621_nfc_write_data_empty(nfc, nfc->spare_per_sector -
|
|
-+ NFI_FDM_SIZE);
|
|
-+ }
|
|
-+
|
|
-+ mt7621_nfc_wait_write_completion(nfc, nand);
|
|
-+
|
|
-+ nfi_write16(nfc, NFI_CON, 0);
|
|
-+
|
|
-+ return nand_prog_page_end_op(nand);
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_write_oob_hwecc(struct nand_chip *nand, int page)
|
|
-+{
|
|
-+ return mt7621_nfc_write_page_hwecc(nand, NULL, 1, page);
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_write_oob_raw(struct nand_chip *nand, int page)
|
|
-+{
|
|
-+ return mt7621_nfc_write_page_raw(nand, NULL, 1, page);
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_init_chip(struct mt7621_nfc *nfc)
|
|
-+{
|
|
-+ struct nand_chip *nand = &nfc->nand;
|
|
-+ struct mtd_info *mtd;
|
|
-+ int ret;
|
|
-+
|
|
-+ nand->controller = &nfc->controller;
|
|
-+ nand_set_controller_data(nand, (void *)nfc);
|
|
-+ nand_set_flash_node(nand, nfc->dev->of_node);
|
|
-+
|
|
-+ nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_NO_SUBPAGE_WRITE;
|
|
-+ if (!nfc->nfi_clk)
|
|
-+ nand->options |= NAND_KEEP_TIMINGS;
|
|
-+
|
|
-+ nand->ecc.mode = NAND_ECC_HW_SYNDROME;
|
|
-+ nand->ecc.read_page = mt7621_nfc_read_page_hwecc;
|
|
-+ nand->ecc.read_page_raw = mt7621_nfc_read_page_raw;
|
|
-+ nand->ecc.write_page = mt7621_nfc_write_page_hwecc;
|
|
-+ nand->ecc.write_page_raw = mt7621_nfc_write_page_raw;
|
|
-+ nand->ecc.read_oob = mt7621_nfc_read_oob_hwecc;
|
|
-+ nand->ecc.read_oob_raw = mt7621_nfc_read_oob_raw;
|
|
-+ nand->ecc.write_oob = mt7621_nfc_write_oob_hwecc;
|
|
-+ nand->ecc.write_oob_raw = mt7621_nfc_write_oob_raw;
|
|
-+
|
|
-+ mtd = nand_to_mtd(nand);
|
|
-+ mtd->owner = THIS_MODULE;
|
|
-+ mtd->dev.parent = nfc->dev;
|
|
-+ mtd->name = MT7621_NFC_NAME;
|
|
-+ mtd_set_ooblayout(mtd, &mt7621_nfc_ooblayout_ops);
|
|
-+
|
|
-+ mt7621_nfc_hw_init(nfc);
|
|
-+
|
|
-+ ret = nand_scan(nand, 1);
|
|
-+ if (ret)
|
|
-+ return ret;
|
|
-+
|
|
-+ ret = mtd_device_register(mtd, NULL, 0);
|
|
-+ if (ret) {
|
|
-+ dev_err(nfc->dev, "Failed to register MTD: %d\n", ret);
|
|
-+ nand_release(nand);
|
|
-+ return ret;
|
|
-+ }
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_probe(struct platform_device *pdev)
|
|
-+{
|
|
-+ struct device *dev = &pdev->dev;
|
|
-+ struct mt7621_nfc *nfc;
|
|
-+ struct resource *res;
|
|
-+ int ret;
|
|
-+
|
|
-+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
|
|
-+ if (!nfc)
|
|
-+ return -ENOMEM;
|
|
-+
|
|
-+ nand_controller_init(&nfc->controller);
|
|
-+ nfc->controller.ops = &mt7621_nfc_controller_ops;
|
|
-+ nfc->dev = dev;
|
|
-+
|
|
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
|
|
-+ nfc->nfi_regs = devm_ioremap_resource(dev, res);
|
|
-+ if (IS_ERR(nfc->nfi_regs)) {
|
|
-+ ret = PTR_ERR(nfc->nfi_regs);
|
|
-+ return ret;
|
|
-+ }
|
|
-+
|
|
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc");
|
|
-+ nfc->ecc_regs = devm_ioremap_resource(dev, res);
|
|
-+ if (IS_ERR(nfc->ecc_regs)) {
|
|
-+ ret = PTR_ERR(nfc->ecc_regs);
|
|
-+ return ret;
|
|
-+ }
|
|
-+
|
|
-+ nfc->nfi_clk = devm_clk_get(dev, "nfi_clk");
|
|
-+ if (IS_ERR(nfc->nfi_clk)) {
|
|
-+ dev_warn(dev, "nfi clk not provided\n");
|
|
-+ nfc->nfi_clk = NULL;
|
|
-+ } else {
|
|
-+ ret = clk_prepare_enable(nfc->nfi_clk);
|
|
-+ if (ret) {
|
|
-+ dev_err(dev, "Failed to enable nfi core clock\n");
|
|
-+ return ret;
|
|
-+ }
|
|
-+ }
|
|
-+
|
|
-+ platform_set_drvdata(pdev, nfc);
|
|
-+
|
|
-+ ret = mt7621_nfc_init_chip(nfc);
|
|
-+ if (ret) {
|
|
-+ dev_err(dev, "Failed to initialize nand chip\n");
|
|
-+ goto clk_disable;
|
|
-+ }
|
|
-+
|
|
-+ return 0;
|
|
-+
|
|
-+clk_disable:
|
|
-+ clk_disable_unprepare(nfc->nfi_clk);
|
|
-+
|
|
-+ return ret;
|
|
-+}
|
|
-+
|
|
-+static int mt7621_nfc_remove(struct platform_device *pdev)
|
|
-+{
|
|
-+ struct mt7621_nfc *nfc = platform_get_drvdata(pdev);
|
|
-+
|
|
-+ nand_release(&nfc->nand);
|
|
-+ clk_disable_unprepare(nfc->nfi_clk);
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+static const struct of_device_id mt7621_nfc_id_table[] = {
|
|
-+ { .compatible = "mediatek,mt7621-nfc" },
|
|
-+ { },
|
|
-+};
|
|
-+MODULE_DEVICE_TABLE(of, match);
|
|
-+
|
|
-+static struct platform_driver mt7621_nfc_driver = {
|
|
-+ .probe = mt7621_nfc_probe,
|
|
-+ .remove = mt7621_nfc_remove,
|
|
-+ .driver = {
|
|
-+ .name = MT7621_NFC_NAME,
|
|
-+ .owner = THIS_MODULE,
|
|
-+ .of_match_table = mt7621_nfc_id_table,
|
|
-+ },
|
|
-+};
|
|
-+module_platform_driver(mt7621_nfc_driver);
|
|
-+
|
|
-+MODULE_LICENSE("GPL");
|
|
-+MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
|
|
-+MODULE_DESCRIPTION("MediaTek MT7621 NAND Flash Controller driver");
|
|
+
|
|
--
|
|
2.32.0
|
|
|
|
|
|
From e4726abccf94cd3e111c7d54fa5c66f9568f3a1a Mon Sep 17 00:00:00 2001
|
|
From: Stijn Tintel <stijn@linux-ipv6.be>
|
|
Date: Wed, 19 Jan 2022 17:59:51 +0200
|
|
Subject: [PATCH 3/8] ramips: mt7621_nand: reduce log verbosity
|
|
|
|
Avoid flooding the log with the message below by increasing the log
|
|
level to debug:
|
|
|
|
mt7621-nand 1e003000.nand: Using programmed access timing: 31c07388
|
|
|
|
Signed-off-by: Stijn Tintel <stijn@linux-ipv6.be>
|
|
---
|
|
target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
index 678295a68db3..16493a2e974c 100644
|
|
--- a/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
+++ b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
@@ -759,7 +759,7 @@ static int mt7621_nfc_setup_data_interface(struct nand_chip *nand, int csline,
|
|
|
|
acccon = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
|
|
|
|
- dev_info(nfc->dev, "Using programmed access timing: %08x\n", acccon);
|
|
+ dev_dbg(nfc->dev, "Using programmed access timing: %08x\n", acccon);
|
|
|
|
nfi_write32(nfc, NFI_ACCCON, acccon);
|
|
|
|
--
|
|
2.32.0
|
|
|
|
|
|
From b7fcea8ff0ed783310ac72ad14c8281a6c05f6b5 Mon Sep 17 00:00:00 2001
|
|
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Wed, 9 Mar 2022 20:46:21 +0100
|
|
Subject: [PATCH 4/8] ramips: mt7621_nand: initialize ECC_FDMADDR
|
|
|
|
This is needed for the ECC controller to access FDM data
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c | 4 ++++
|
|
1 file changed, 4 insertions(+)
|
|
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
index 16493a2e974c..0751e59c3741 100644
|
|
--- a/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
+++ b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
@@ -214,6 +214,7 @@ struct mt7621_nfc {
|
|
struct clk *nfi_clk;
|
|
struct device *dev;
|
|
|
|
+ u32 nfi_base;
|
|
void __iomem *nfi_regs;
|
|
void __iomem *ecc_regs;
|
|
|
|
@@ -860,6 +861,8 @@ static int mt7621_nfc_ecc_init(struct mt7621_nfc *nfc)
|
|
(decode_block_size << DEC_CS_S) |
|
|
(DEC_CON_EL << DEC_CON_S) | DEC_EMPTY_EN;
|
|
|
|
+ ecc_write32(nfc, ECC_FDMADDR, nfc->nfi_base + NFI_FDML(0));
|
|
+
|
|
mt7621_ecc_encoder_op(nfc, false);
|
|
ecc_write32(nfc, ECC_ENCCNFG, ecc_enccfg);
|
|
|
|
@@ -1277,6 +1280,7 @@ static int mt7621_nfc_probe(struct platform_device *pdev)
|
|
nfc->dev = dev;
|
|
|
|
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
|
|
+ nfc->nfi_base = res->start;
|
|
nfc->nfi_regs = devm_ioremap_resource(dev, res);
|
|
if (IS_ERR(nfc->nfi_regs)) {
|
|
ret = PTR_ERR(nfc->nfi_regs);
|
|
--
|
|
2.32.0
|
|
|
|
|
|
From b848a7ab2db5e74aa4baed5842af72395e2c763e Mon Sep 17 00:00:00 2001
|
|
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Thu, 10 Mar 2022 11:45:00 +0100
|
|
Subject: [PATCH 5/8] ramips: enable support for mtk_bmt in the nand flash
|
|
driver
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
.../linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c | 8 ++++++++
|
|
1 file changed, 8 insertions(+)
|
|
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
index 0751e59c3741..1072450898ab 100644
|
|
--- a/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
+++ b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <linux/mtd/mtd.h>
|
|
#include <linux/mtd/rawnand.h>
|
|
#include <linux/mtd/partitions.h>
|
|
+#include <linux/mtd/mtk_bmt.h>
|
|
#include <linux/platform_device.h>
|
|
#include <asm/addrspace.h>
|
|
|
|
@@ -1254,9 +1255,12 @@ static int mt7621_nfc_init_chip(struct mt7621_nfc *nfc)
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ mtk_bmt_attach(mtd);
|
|
+
|
|
ret = mtd_device_register(mtd, NULL, 0);
|
|
if (ret) {
|
|
dev_err(nfc->dev, "Failed to register MTD: %d\n", ret);
|
|
+ mtk_bmt_detach(mtd);
|
|
nand_release(nand);
|
|
return ret;
|
|
}
|
|
@@ -1325,7 +1329,11 @@ clk_disable:
|
|
static int mt7621_nfc_remove(struct platform_device *pdev)
|
|
{
|
|
struct mt7621_nfc *nfc = platform_get_drvdata(pdev);
|
|
+ struct nand_chip *nand = &nfc->nand;
|
|
+ struct mtd_info *mtd = nand_to_mtd(nand);
|
|
|
|
+ mtk_bmt_detach(mtd);
|
|
+ mtd_device_unregister(mtd);
|
|
nand_release(&nfc->nand);
|
|
clk_disable_unprepare(nfc->nfi_clk);
|
|
|
|
--
|
|
2.32.0
|
|
|
|
|
|
From 110ae2a34144df4dde224230a25d5243c16563b5 Mon Sep 17 00:00:00 2001
|
|
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Thu, 10 Mar 2022 15:20:29 +0100
|
|
Subject: [PATCH 6/8] ramips: skip bbt scan on mt7621
|
|
|
|
reduces unnecessary flash reads and speeds up boot time
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c | 3 ++-
|
|
1 file changed, 2 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
index 1072450898ab..f01e1e8a8ec4 100644
|
|
--- a/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
+++ b/target/linux/ramips/files/drivers/mtd/nand/raw/mt7621_nand.c
|
|
@@ -1229,7 +1229,8 @@ static int mt7621_nfc_init_chip(struct mt7621_nfc *nfc)
|
|
nand_set_controller_data(nand, (void *)nfc);
|
|
nand_set_flash_node(nand, nfc->dev->of_node);
|
|
|
|
- nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_NO_SUBPAGE_WRITE;
|
|
+ nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_NO_SUBPAGE_WRITE |
|
|
+ NAND_SKIP_BBTSCAN;
|
|
if (!nfc->nfi_clk)
|
|
nand->options |= NAND_KEEP_TIMINGS;
|
|
|
|
--
|
|
2.32.0
|
|
|
|
|
|
From ef3812488bf110e05e42d7cf97e29d99248d364b Mon Sep 17 00:00:00 2001
|
|
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Thu, 10 Mar 2022 17:32:20 +0100
|
|
Subject: [PATCH 7/8] kernel: mtdsplit: support UBI after FIT images
|
|
|
|
Change the partition name accordingly. Same behavior as mtdsplit_uimage
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
.../generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c | 8 ++++++--
|
|
1 file changed, 6 insertions(+), 2 deletions(-)
|
|
|
|
diff --git a/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c b/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c
|
|
index 5cc1658dbde6..f0434289494d 100644
|
|
--- a/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c
|
|
+++ b/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c
|
|
@@ -57,6 +57,7 @@ mtdsplit_fit_parse(struct mtd_info *mtd,
|
|
size_t fit_offset, fit_size;
|
|
size_t rootfs_offset, rootfs_size;
|
|
struct mtd_partition *parts;
|
|
+ enum mtdsplit_part_type type;
|
|
int ret;
|
|
|
|
of_property_read_string(np, "openwrt,cmdline-match", &cmdline_match);
|
|
@@ -101,7 +102,7 @@ mtdsplit_fit_parse(struct mtd_info *mtd,
|
|
|
|
/* Search for the rootfs partition after the FIT image */
|
|
ret = mtd_find_rootfs_from(mtd, fit_offset + fit_size, mtd->size,
|
|
- &rootfs_offset, NULL);
|
|
+ &rootfs_offset, &type);
|
|
if (ret) {
|
|
pr_info("no rootfs found after FIT image in \"%s\"\n",
|
|
mtd->name);
|
|
@@ -118,7 +119,10 @@ mtdsplit_fit_parse(struct mtd_info *mtd,
|
|
parts[0].offset = fit_offset;
|
|
parts[0].size = mtd_rounddown_to_eb(fit_size, mtd) + mtd->erasesize;
|
|
|
|
- parts[1].name = ROOTFS_PART_NAME;
|
|
+ if (type == MTDSPLIT_PART_TYPE_UBI)
|
|
+ parts[1].name = UBI_PART_NAME;
|
|
+ else
|
|
+ parts[1].name = ROOTFS_PART_NAME;
|
|
parts[1].offset = rootfs_offset;
|
|
parts[1].size = rootfs_size;
|
|
|
|
--
|
|
2.32.0
|
|
|
|
|
|
From 3b215d68e84ceb40979cc1db98623aaec2ec0e93 Mon Sep 17 00:00:00 2001
|
|
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Thu, 10 Mar 2022 18:09:15 +0100
|
|
Subject: [PATCH 8/8] update actiontec web7200 support
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
.../ramips/dts/mt7621_actiontec_web7200.dts | 98 +-
|
|
.../ramips/files/drivers/mtd/nmbm/Kconfig | 35 -
|
|
.../ramips/files/drivers/mtd/nmbm/Makefile | 6 -
|
|
.../ramips/files/drivers/mtd/nmbm/nmbm-core.c | 2936 -----------------
|
|
.../files/drivers/mtd/nmbm/nmbm-debug.h | 20 -
|
|
.../files/drivers/mtd/nmbm/nmbm-debug.inl | 0
|
|
.../ramips/files/drivers/mtd/nmbm/nmbm-mtd.c | 795 -----
|
|
.../files/drivers/mtd/nmbm/nmbm-private.h | 137 -
|
|
.../linux/ramips/files/include/nmbm/nmbm-os.h | 69 -
|
|
target/linux/ramips/files/include/nmbm/nmbm.h | 102 -
|
|
target/linux/ramips/image/mt7621.mk | 20 +-
|
|
.../mt7621/base-files/lib/upgrade/platform.sh | 1 +
|
|
target/linux/ramips/mt7621/config-5.4 | 8 -
|
|
.../499-mtd-add-nmbm-support.patch | 21 -
|
|
14 files changed, 65 insertions(+), 4183 deletions(-)
|
|
delete mode 100644 target/linux/ramips/files/drivers/mtd/nmbm/Kconfig
|
|
delete mode 100644 target/linux/ramips/files/drivers/mtd/nmbm/Makefile
|
|
delete mode 100644 target/linux/ramips/files/drivers/mtd/nmbm/nmbm-core.c
|
|
delete mode 100644 target/linux/ramips/files/drivers/mtd/nmbm/nmbm-debug.h
|
|
delete mode 100644 target/linux/ramips/files/drivers/mtd/nmbm/nmbm-debug.inl
|
|
delete mode 100644 target/linux/ramips/files/drivers/mtd/nmbm/nmbm-mtd.c
|
|
delete mode 100644 target/linux/ramips/files/drivers/mtd/nmbm/nmbm-private.h
|
|
delete mode 100644 target/linux/ramips/files/include/nmbm/nmbm-os.h
|
|
delete mode 100644 target/linux/ramips/files/include/nmbm/nmbm.h
|
|
delete mode 100644 target/linux/ramips/patches-5.4/499-mtd-add-nmbm-support.patch
|
|
|
|
diff --git a/target/linux/ramips/dts/mt7621_actiontec_web7200.dts b/target/linux/ramips/dts/mt7621_actiontec_web7200.dts
|
|
index f112354ec375..eb7dbdd31689 100644
|
|
--- a/target/linux/ramips/dts/mt7621_actiontec_web7200.dts
|
|
+++ b/target/linux/ramips/dts/mt7621_actiontec_web7200.dts
|
|
@@ -51,68 +51,60 @@
|
|
};
|
|
};
|
|
|
|
- nmbm {
|
|
- compatible = "generic,nmbm";
|
|
- lower-mtd-device = <&nand>;
|
|
- forced-create;
|
|
- #size-cells = <0x01>;
|
|
- #address-cells = <0x01>;
|
|
-
|
|
- partitions {
|
|
- compatible = "fixed-partitions";
|
|
- #size-cells = <0x01>;
|
|
- #address-cells = <0x01>;
|
|
-
|
|
- partition@0 {
|
|
- label = "u-boot";
|
|
- reg = <0x00 0x80000>;
|
|
- read-only;
|
|
- };
|
|
-
|
|
- partition@80000 {
|
|
- label = "u-boot-env";
|
|
- reg = <0x80000 0x80000>;
|
|
- };
|
|
-
|
|
- factory: partition@100000 {
|
|
- label = "factory";
|
|
- reg = <0x100000 0x80000>;
|
|
- };
|
|
-
|
|
- partition@180000 {
|
|
- compatible = "denx,fit";
|
|
- label = "firmware";
|
|
- openwrt,cmdline-match = "bootselect=0";
|
|
- reg = <0x180000 0x3000000>;
|
|
- };
|
|
-
|
|
- partition@3180000 {
|
|
- compatible = "denx,fit";
|
|
- label = "firmware2";
|
|
- openwrt,cmdline-match = "bootselect=1";
|
|
- reg = <0x3180000 0x3000000>;
|
|
- };
|
|
-
|
|
- partition@6980000 {
|
|
- label = "aei_data";
|
|
- reg = <0x6980000 0x400000>;
|
|
- };
|
|
-
|
|
- partition@6180000 {
|
|
- label = "aei_data_ext";
|
|
- reg = <0x6180000 0x800000>;
|
|
- };
|
|
- };
|
|
- };
|
|
};
|
|
|
|
&nand {
|
|
status = "okay";
|
|
|
|
+ mediatek,nmbm;
|
|
+ mediatek,bmt-remap-range =
|
|
+ <0x0 0x580000>,
|
|
+ <0x3180000 0x3580000>;
|
|
+
|
|
partitions {
|
|
compatible = "fixed-partitions";
|
|
#address-cells = <1>;
|
|
#size-cells = <1>;
|
|
+
|
|
+ partition@0 {
|
|
+ label = "u-boot";
|
|
+ reg = <0x00 0x80000>;
|
|
+ read-only;
|
|
+ };
|
|
+
|
|
+ partition@80000 {
|
|
+ label = "u-boot-env";
|
|
+ reg = <0x80000 0x80000>;
|
|
+ };
|
|
+
|
|
+ factory: partition@100000 {
|
|
+ label = "factory";
|
|
+ reg = <0x100000 0x80000>;
|
|
+ };
|
|
+
|
|
+ partition@180000 {
|
|
+ compatible = "denx,fit";
|
|
+ label = "firmware";
|
|
+ openwrt,cmdline-match = "bootselect=0";
|
|
+ reg = <0x180000 0x3000000>;
|
|
+ };
|
|
+
|
|
+ partition@3180000 {
|
|
+ compatible = "denx,fit";
|
|
+ label = "firmware2";
|
|
+ openwrt,cmdline-match = "bootselect=1";
|
|
+ reg = <0x3180000 0x3000000>;
|
|
+ };
|
|
+
|
|
+ partition@6980000 {
|
|
+ label = "aei_data";
|
|
+ reg = <0x6980000 0x400000>;
|
|
+ };
|
|
+
|
|
+ partition@6180000 {
|
|
+ label = "aei_data_ext";
|
|
+ reg = <0x6180000 0x800000>;
|
|
+ };
|
|
};
|
|
};
|
|
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nmbm/Kconfig b/target/linux/ramips/files/drivers/mtd/nmbm/Kconfig
|
|
deleted file mode 100644
|
|
index 98df305728bc..000000000000
|
|
--- a/target/linux/ramips/files/drivers/mtd/nmbm/Kconfig
|
|
+++ /dev/null
|
|
@@ -1,35 +0,0 @@
|
|
-
|
|
-config NMBM
|
|
- bool "Enable NAND mapping block management"
|
|
- default n
|
|
- select CRC32
|
|
-
|
|
-choice
|
|
- prompt "Default log level"
|
|
- depends on NMBM
|
|
- default NMBM_LOG_LEVEL_INFO
|
|
-
|
|
-config NMBM_LOG_LEVEL_DEBUG
|
|
- bool "0 - Debug"
|
|
-
|
|
-config NMBM_LOG_LEVEL_INFO
|
|
- bool "1 - Info"
|
|
-
|
|
-config NMBM_LOG_LEVEL_WARN
|
|
- bool "2 - Warn"
|
|
-
|
|
-config NMBM_LOG_LEVEL_ERR
|
|
- bool "3 - Error"
|
|
-
|
|
-config NMBM_LOG_LEVEL_EMERG
|
|
- bool "4 - Emergency"
|
|
-
|
|
-config NMBM_LOG_LEVEL_NONE
|
|
- bool "5 - None"
|
|
-
|
|
-endchoice
|
|
-
|
|
-config NMBM_MTD
|
|
- bool "Enable MTD based NAND mapping block management"
|
|
- default n
|
|
- depends on NMBM
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nmbm/Makefile b/target/linux/ramips/files/drivers/mtd/nmbm/Makefile
|
|
deleted file mode 100644
|
|
index 46e6d50a800f..000000000000
|
|
--- a/target/linux/ramips/files/drivers/mtd/nmbm/Makefile
|
|
+++ /dev/null
|
|
@@ -1,6 +0,0 @@
|
|
-# SPDX-License-Identifier: GPL-2.0
|
|
-#
|
|
-# (C) Copyright 2020 MediaTek Inc. All rights reserved.
|
|
-
|
|
-obj-$(CONFIG_NMBM) += nmbm-core.o
|
|
-obj-$(CONFIG_NMBM_MTD) += nmbm-mtd.o
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-core.c b/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-core.c
|
|
deleted file mode 100644
|
|
index 18dfb6adda06..000000000000
|
|
--- a/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-core.c
|
|
+++ /dev/null
|
|
@@ -1,2936 +0,0 @@
|
|
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
|
-/*
|
|
- * Copyright (C) 2021 MediaTek Inc. All Rights Reserved.
|
|
- *
|
|
- * Author: Weijie Gao <weijie.gao@mediatek.com>
|
|
- */
|
|
-
|
|
-#include "nmbm-private.h"
|
|
-
|
|
-#include "nmbm-debug.h"
|
|
-
|
|
-#define NMBM_VER_MAJOR 1
|
|
-#define NMBM_VER_MINOR 0
|
|
-#define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \
|
|
- NMBM_VER_MINOR)
|
|
-
|
|
-#define NMBM_ALIGN(v, a) (((v) + (a) - 1) & ~((a) - 1))
|
|
-
|
|
-/*****************************************************************************/
|
|
-/* Logging related functions */
|
|
-/*****************************************************************************/
|
|
-
|
|
-/*
|
|
- * nmbm_log_lower - Print log using OS specific routine
|
|
- * @nld: NMBM lower device structure
|
|
- * @level: log level
|
|
- * @fmt: format string
|
|
- */
|
|
-static void nmbm_log_lower(struct nmbm_lower_device *nld,
|
|
- enum nmbm_log_category level, const char *fmt, ...)
|
|
-{
|
|
- va_list ap;
|
|
-
|
|
- if (!nld->logprint)
|
|
- return;
|
|
-
|
|
- va_start(ap, fmt);
|
|
- nld->logprint(nld->arg, level, fmt, ap);
|
|
- va_end(ap);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_log - Print log using OS specific routine
|
|
- * @ni: NMBM instance structure
|
|
- * @level: log level
|
|
- * @fmt: format string
|
|
- */
|
|
-static void nmbm_log(struct nmbm_instance *ni, enum nmbm_log_category level,
|
|
- const char *fmt, ...)
|
|
-{
|
|
- va_list ap;
|
|
-
|
|
- if (!ni)
|
|
- return;
|
|
-
|
|
- if (!ni->lower.logprint || level < ni->log_display_level)
|
|
- return;
|
|
-
|
|
- va_start(ap, fmt);
|
|
- ni->lower.logprint(ni->lower.arg, level, fmt, ap);
|
|
- va_end(ap);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_set_log_level - Set log display level
|
|
- * @ni: NMBM instance structure
|
|
- * @level: log display level
|
|
- */
|
|
-enum nmbm_log_category nmbm_set_log_level(struct nmbm_instance *ni,
|
|
- enum nmbm_log_category level)
|
|
-{
|
|
- enum nmbm_log_category old;
|
|
-
|
|
- if (!ni)
|
|
- return __NMBM_LOG_MAX;
|
|
-
|
|
- old = ni->log_display_level;
|
|
- ni->log_display_level = level;
|
|
- return old;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nlog_table_creation - Print log of table creation event
|
|
- * @ni: NMBM instance structure
|
|
- * @main_table: whether the table is main info table
|
|
- * @start_ba: start block address of the table
|
|
- * @end_ba: block address after the end of the table
|
|
- */
|
|
-static void nlog_table_creation(struct nmbm_instance *ni, bool main_table,
|
|
- uint32_t start_ba, uint32_t end_ba)
|
|
-{
|
|
- if (start_ba == end_ba - 1)
|
|
- nlog_info(ni, "%s info table has been written to block %u\n",
|
|
- main_table ? "Main" : "Backup", start_ba);
|
|
- else
|
|
- nlog_info(ni, "%s info table has been written to block %u-%u\n",
|
|
- main_table ? "Main" : "Backup", start_ba, end_ba - 1);
|
|
-
|
|
- nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nlog_table_update - Print log of table update event
|
|
- * @ni: NMBM instance structure
|
|
- * @main_table: whether the table is main info table
|
|
- * @start_ba: start block address of the table
|
|
- * @end_ba: block address after the end of the table
|
|
- */
|
|
-static void nlog_table_update(struct nmbm_instance *ni, bool main_table,
|
|
- uint32_t start_ba, uint32_t end_ba)
|
|
-{
|
|
- if (start_ba == end_ba - 1)
|
|
- nlog_debug(ni, "%s info table has been updated in block %u\n",
|
|
- main_table ? "Main" : "Backup", start_ba);
|
|
- else
|
|
- nlog_debug(ni, "%s info table has been updated in block %u-%u\n",
|
|
- main_table ? "Main" : "Backup", start_ba, end_ba - 1);
|
|
-
|
|
- nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nlog_table_found - Print log of table found event
|
|
- * @ni: NMBM instance structure
|
|
- * @first_table: whether the table is first found info table
|
|
- * @write_count: write count of the info table
|
|
- * @start_ba: start block address of the table
|
|
- * @end_ba: block address after the end of the table
|
|
- */
|
|
-static void nlog_table_found(struct nmbm_instance *ni, bool first_table,
|
|
- uint32_t write_count, uint32_t start_ba,
|
|
- uint32_t end_ba)
|
|
-{
|
|
- if (start_ba == end_ba - 1)
|
|
- nlog_info(ni, "%s info table with writecount %u found in block %u\n",
|
|
- first_table ? "First" : "Second", write_count,
|
|
- start_ba);
|
|
- else
|
|
- nlog_info(ni, "%s info table with writecount %u found in block %u-%u\n",
|
|
- first_table ? "First" : "Second", write_count,
|
|
- start_ba, end_ba - 1);
|
|
-
|
|
- nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
|
|
-}
|
|
-
|
|
-/*****************************************************************************/
|
|
-/* Address conversion functions */
|
|
-/*****************************************************************************/
|
|
-
|
|
-/*
|
|
- * addr2ba - Convert a linear address to block address
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: Linear address
|
|
- */
|
|
-static uint32_t addr2ba(struct nmbm_instance *ni, uint64_t addr)
|
|
-{
|
|
- return addr >> ni->erasesize_shift;
|
|
-}
|
|
-
|
|
-/*
|
|
- * ba2addr - Convert a block address to linear address
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: Block address
|
|
- */
|
|
-static uint64_t ba2addr(struct nmbm_instance *ni, uint32_t ba)
|
|
-{
|
|
- return (uint64_t)ba << ni->erasesize_shift;
|
|
-}
|
|
-/*
|
|
- * size2blk - Get minimum required blocks for storing specific size of data
|
|
- * @ni: NMBM instance structure
|
|
- * @size: size for storing
|
|
- */
|
|
-static uint32_t size2blk(struct nmbm_instance *ni, uint64_t size)
|
|
-{
|
|
- return (size + ni->lower.erasesize - 1) >> ni->erasesize_shift;
|
|
-}
|
|
-
|
|
-/*****************************************************************************/
|
|
-/* High level NAND chip APIs */
|
|
-/*****************************************************************************/
|
|
-
|
|
-/*
|
|
- * nmbm_reset_chip - Reset NAND device
|
|
- * @nld: Lower NAND chip structure
|
|
- */
|
|
-static void nmbm_reset_chip(struct nmbm_instance *ni)
|
|
-{
|
|
- if (ni->lower.reset_chip)
|
|
- ni->lower.reset_chip(ni->lower.arg);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_read_phys_page - Read page with retry
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: linear address where the data will be read from
|
|
- * @data: the main data to be read
|
|
- * @oob: the oob data to be read
|
|
- * @mode: mode for processing oob data
|
|
- *
|
|
- * Read a page for at most NMBM_TRY_COUNT times.
|
|
- *
|
|
- * Return 0 for success, positive value for corrected bitflip count,
|
|
- * -EBADMSG for ecc error, other negative values for other errors
|
|
- */
|
|
-static int nmbm_read_phys_page(struct nmbm_instance *ni, uint64_t addr,
|
|
- void *data, void *oob, enum nmbm_oob_mode mode)
|
|
-{
|
|
- int tries, ret;
|
|
-
|
|
- for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
|
|
- ret = ni->lower.read_page(ni->lower.arg, addr, data, oob, mode);
|
|
- if (ret >= 0)
|
|
- return ret;
|
|
-
|
|
- nmbm_reset_chip(ni);
|
|
- }
|
|
-
|
|
- if (ret != -EBADMSG)
|
|
- nlog_err(ni, "Page read failed at address 0x%08llx\n", addr);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_write_phys_page - Write page with retry
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: linear address where the data will be written to
|
|
- * @data: the main data to be written
|
|
- * @oob: the oob data to be written
|
|
- * @mode: mode for processing oob data
|
|
- *
|
|
- * Write a page for at most NMBM_TRY_COUNT times.
|
|
- */
|
|
-static bool nmbm_write_phys_page(struct nmbm_instance *ni, uint64_t addr,
|
|
- const void *data, const void *oob,
|
|
- enum nmbm_oob_mode mode)
|
|
-{
|
|
- int tries, ret;
|
|
-
|
|
- if (ni->lower.flags & NMBM_F_READ_ONLY) {
|
|
- nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
|
|
- return false;
|
|
- }
|
|
-
|
|
- for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
|
|
- ret = ni->lower.write_page(ni->lower.arg, addr, data, oob, mode);
|
|
- if (!ret)
|
|
- return true;
|
|
-
|
|
- nmbm_reset_chip(ni);
|
|
- }
|
|
-
|
|
- nlog_err(ni, "Page write failed at address 0x%08llx\n", addr);
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_erase_phys_block - Erase a block with retry
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: Linear address
|
|
- *
|
|
- * Erase a block for at most NMBM_TRY_COUNT times.
|
|
- */
|
|
-static bool nmbm_erase_phys_block(struct nmbm_instance *ni, uint64_t addr)
|
|
-{
|
|
- int tries, ret;
|
|
-
|
|
- if (ni->lower.flags & NMBM_F_READ_ONLY) {
|
|
- nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
|
|
- return false;
|
|
- }
|
|
-
|
|
- for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
|
|
- ret = ni->lower.erase_block(ni->lower.arg, addr);
|
|
- if (!ret)
|
|
- return true;
|
|
-
|
|
- nmbm_reset_chip(ni);
|
|
- }
|
|
-
|
|
- nlog_err(ni, "Block erasure failed at address 0x%08llx\n", addr);
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: block address
|
|
- */
|
|
-static bool nmbm_check_bad_phys_block(struct nmbm_instance *ni, uint32_t ba)
|
|
-{
|
|
- uint64_t addr = ba2addr(ni, ba);
|
|
- int ret;
|
|
-
|
|
- if (ni->lower.is_bad_block)
|
|
- return ni->lower.is_bad_block(ni->lower.arg, addr);
|
|
-
|
|
- /* Treat ECC error as read success */
|
|
- ret = nmbm_read_phys_page(ni, addr, NULL,
|
|
- ni->page_cache + ni->lower.writesize,
|
|
- NMBM_MODE_RAW);
|
|
- if (ret < 0 && ret != -EBADMSG)
|
|
- return true;
|
|
-
|
|
- return ni->page_cache[ni->lower.writesize] != 0xff;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_mark_phys_bad_block - Mark a block bad
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: Linear address
|
|
- */
|
|
-static int nmbm_mark_phys_bad_block(struct nmbm_instance *ni, uint32_t ba)
|
|
-{
|
|
- uint64_t addr = ba2addr(ni, ba);
|
|
- enum nmbm_log_category level;
|
|
- uint32_t off;
|
|
-
|
|
- if (ni->lower.flags & NMBM_F_READ_ONLY) {
|
|
- nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
|
|
- return false;
|
|
- }
|
|
-
|
|
- nlog_info(ni, "Block %u [0x%08llx] will be marked bad\n", ba, addr);
|
|
-
|
|
- if (ni->lower.mark_bad_block)
|
|
- return ni->lower.mark_bad_block(ni->lower.arg, addr);
|
|
-
|
|
- /* Whole page set to 0x00 */
|
|
- memset(ni->page_cache, 0, ni->rawpage_size);
|
|
-
|
|
- /* Write to all pages within this block, disable all errors */
|
|
- level = nmbm_set_log_level(ni, __NMBM_LOG_MAX);
|
|
-
|
|
- for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
|
|
- nmbm_write_phys_page(ni, addr + off, ni->page_cache,
|
|
- ni->page_cache + ni->lower.writesize,
|
|
- NMBM_MODE_RAW);
|
|
- }
|
|
-
|
|
- nmbm_set_log_level(ni, level);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*****************************************************************************/
|
|
-/* NMBM related functions */
|
|
-/*****************************************************************************/
|
|
-
|
|
-/*
|
|
- * nmbm_check_header - Check whether a NMBM structure is valid
|
|
- * @data: pointer to a NMBM structure with a NMBM header at beginning
|
|
- * @size: Size of the buffer pointed by @header
|
|
- *
|
|
- * The size of the NMBM structure may be larger than NMBM header,
|
|
- * e.g. block mapping table and block state table.
|
|
- */
|
|
-static bool nmbm_check_header(const void *data, uint32_t size)
|
|
-{
|
|
- const struct nmbm_header *header = data;
|
|
- struct nmbm_header nhdr;
|
|
- uint32_t new_checksum;
|
|
-
|
|
- /*
|
|
- * Make sure expected structure size is equal or smaller than
|
|
- * buffer size.
|
|
- */
|
|
- if (header->size > size)
|
|
- return false;
|
|
-
|
|
- memcpy(&nhdr, data, sizeof(nhdr));
|
|
-
|
|
- nhdr.checksum = 0;
|
|
- new_checksum = nmbm_crc32(0, &nhdr, sizeof(nhdr));
|
|
- if (header->size > sizeof(nhdr))
|
|
- new_checksum = nmbm_crc32(new_checksum,
|
|
- (const uint8_t *)data + sizeof(nhdr),
|
|
- header->size - sizeof(nhdr));
|
|
-
|
|
- if (header->checksum != new_checksum)
|
|
- return false;
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_update_checksum - Update checksum of a NMBM structure
|
|
- * @header: pointer to a NMBM structure with a NMBM header at beginning
|
|
- *
|
|
- * The size of the NMBM structure must be specified by @header->size
|
|
- */
|
|
-static void nmbm_update_checksum(struct nmbm_header *header)
|
|
-{
|
|
- header->checksum = 0;
|
|
- header->checksum = nmbm_crc32(0, header, header->size);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_get_spare_block_count - Calculate number of blocks should be reserved
|
|
- * @block_count: number of blocks of data
|
|
- *
|
|
- * Calculate number of blocks should be reserved for data
|
|
- */
|
|
-static uint32_t nmbm_get_spare_block_count(uint32_t block_count)
|
|
-{
|
|
- uint32_t val;
|
|
-
|
|
- val = (block_count + NMBM_SPARE_BLOCK_DIV / 2) / NMBM_SPARE_BLOCK_DIV;
|
|
- val *= NMBM_SPARE_BLOCK_MULTI;
|
|
-
|
|
- if (val < NMBM_SPARE_BLOCK_MIN)
|
|
- val = NMBM_SPARE_BLOCK_MIN;
|
|
-
|
|
- return val;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_get_block_state_raw - Get state of a block from raw block state table
|
|
- * @block_state: pointer to raw block state table (bitmap)
|
|
- * @ba: block address
|
|
- */
|
|
-static uint32_t nmbm_get_block_state_raw(nmbm_bitmap_t *block_state,
|
|
- uint32_t ba)
|
|
-{
|
|
- uint32_t unit, shift;
|
|
-
|
|
- unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
|
|
- shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
|
|
-
|
|
- return (block_state[unit] >> shift) & BLOCK_ST_MASK;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_get_block_state - Get state of a block from block state table
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: block address
|
|
- */
|
|
-static uint32_t nmbm_get_block_state(struct nmbm_instance *ni, uint32_t ba)
|
|
-{
|
|
- return nmbm_get_block_state_raw(ni->block_state, ba);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_set_block_state - Set state of a block to block state table
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: block address
|
|
- * @state: block state
|
|
- *
|
|
- * Set state of a block. If the block state changed, ni->block_state_changed
|
|
- * will be increased.
|
|
- */
|
|
-static bool nmbm_set_block_state(struct nmbm_instance *ni, uint32_t ba,
|
|
- uint32_t state)
|
|
-{
|
|
- uint32_t unit, shift, orig;
|
|
- nmbm_bitmap_t uv;
|
|
-
|
|
- unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
|
|
- shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
|
|
-
|
|
- orig = (ni->block_state[unit] >> shift) & BLOCK_ST_MASK;
|
|
- state &= BLOCK_ST_MASK;
|
|
-
|
|
- uv = ni->block_state[unit] & (~(BLOCK_ST_MASK << shift));
|
|
- uv |= state << shift;
|
|
- ni->block_state[unit] = uv;
|
|
-
|
|
- if (state == BLOCK_ST_BAD)
|
|
- nmbm_mark_block_color_bad(ni, ba);
|
|
-
|
|
- if (orig != state) {
|
|
- ni->block_state_changed++;
|
|
- return true;
|
|
- }
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr.
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: start physical block address
|
|
- * @nba: return physical block address after walk
|
|
- * @count: number of good blocks to be skipped
|
|
- * @limit: highest block address allowed for walking
|
|
- *
|
|
- * Start from @ba, skipping any bad blocks, counting @count good blocks, and
|
|
- * return the next good block address.
|
|
- *
|
|
- * If no enough good blocks counted while @limit reached, false will be returned.
|
|
- *
|
|
- * If @count == 0, nearest good block address will be returned.
|
|
- * @limit is not counted in walking.
|
|
- */
|
|
-static bool nmbm_block_walk_asc(struct nmbm_instance *ni, uint32_t ba,
|
|
- uint32_t *nba, uint32_t count,
|
|
- uint32_t limit)
|
|
-{
|
|
- int32_t nblock = count;
|
|
-
|
|
- if (limit >= ni->block_count)
|
|
- limit = ni->block_count - 1;
|
|
-
|
|
- while (ba < limit) {
|
|
- if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
|
|
- nblock--;
|
|
-
|
|
- if (nblock < 0) {
|
|
- *nba = ba;
|
|
- return true;
|
|
- }
|
|
-
|
|
- ba++;
|
|
- }
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: start physical block address
|
|
- * @nba: return physical block address after walk
|
|
- * @count: number of good blocks to be skipped
|
|
- * @limit: lowest block address allowed for walking
|
|
- *
|
|
- * Start from @ba, skipping any bad blocks, counting @count good blocks, and
|
|
- * return the next good block address.
|
|
- *
|
|
- * If no enough good blocks counted while @limit reached, false will be returned.
|
|
- *
|
|
- * If @count == 0, nearest good block address will be returned.
|
|
- * @limit is not counted in walking.
|
|
- */
|
|
-static bool nmbm_block_walk_desc(struct nmbm_instance *ni, uint32_t ba,
|
|
- uint32_t *nba, uint32_t count, uint32_t limit)
|
|
-{
|
|
- int32_t nblock = count;
|
|
-
|
|
- if (limit >= ni->block_count)
|
|
- limit = ni->block_count - 1;
|
|
-
|
|
- while (ba > limit) {
|
|
- if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
|
|
- nblock--;
|
|
-
|
|
- if (nblock < 0) {
|
|
- *nba = ba;
|
|
- return true;
|
|
- }
|
|
-
|
|
- ba--;
|
|
- }
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_block_walk - Skip specified number of good blocks from curr. block addr
|
|
- * @ni: NMBM instance structure
|
|
- * @ascending: whether to walk ascending
|
|
- * @ba: start physical block address
|
|
- * @nba: return physical block address after walk
|
|
- * @count: number of good blocks to be skipped
|
|
- * @limit: highest/lowest block address allowed for walking
|
|
- *
|
|
- * Start from @ba, skipping any bad blocks, counting @count good blocks, and
|
|
- * return the next good block address.
|
|
- *
|
|
- * If no enough good blocks counted while @limit reached, false will be returned.
|
|
- *
|
|
- * If @count == 0, nearest good block address will be returned.
|
|
- * @limit can be set to negative if no limit required.
|
|
- * @limit is not counted in walking.
|
|
- */
|
|
-static bool nmbm_block_walk(struct nmbm_instance *ni, bool ascending,
|
|
- uint32_t ba, uint32_t *nba, int32_t count,
|
|
- int32_t limit)
|
|
-{
|
|
- if (ascending)
|
|
- return nmbm_block_walk_asc(ni, ba, nba, count, limit);
|
|
-
|
|
- return nmbm_block_walk_desc(ni, ba, nba, count, limit);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_scan_badblocks - Scan and record all bad blocks
|
|
- * @ni: NMBM instance structure
|
|
- *
|
|
- * Scan the entire lower NAND chip and record all bad blocks in to block state
|
|
- * table.
|
|
- */
|
|
-static void nmbm_scan_badblocks(struct nmbm_instance *ni)
|
|
-{
|
|
- uint32_t ba;
|
|
-
|
|
- for (ba = 0; ba < ni->block_count; ba++) {
|
|
- if (nmbm_check_bad_phys_block(ni, ba)) {
|
|
- nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
- nlog_info(ni, "Bad block %u [0x%08llx]\n", ba,
|
|
- ba2addr(ni, ba));
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_build_mapping_table - Build initial block mapping table
|
|
- * @ni: NMBM instance structure
|
|
- *
|
|
- * The initial mapping table will be compatible with the stratage of
|
|
- * factory production.
|
|
- */
|
|
-static void nmbm_build_mapping_table(struct nmbm_instance *ni)
|
|
-{
|
|
- uint32_t pb, lb;
|
|
-
|
|
- for (pb = 0, lb = 0; pb < ni->mgmt_start_ba; pb++) {
|
|
- if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
|
|
- continue;
|
|
-
|
|
- /* Always map to the next good block */
|
|
- ni->block_mapping[lb++] = pb;
|
|
- }
|
|
-
|
|
- ni->data_block_count = lb;
|
|
-
|
|
- /* Unusable/Management blocks */
|
|
- for (pb = lb; pb < ni->block_count; pb++)
|
|
- ni->block_mapping[pb] = -1;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_erase_block_and_check - Erase a block and check its usability
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: block address to be erased
|
|
- *
|
|
- * Erase a block anc check its usability
|
|
- *
|
|
- * Return true if the block is usable, false if erasure failure or the block
|
|
- * has too many bitflips.
|
|
- */
|
|
-static bool nmbm_erase_block_and_check(struct nmbm_instance *ni, uint32_t ba)
|
|
-{
|
|
- uint64_t addr, off;
|
|
- bool success;
|
|
- int ret;
|
|
-
|
|
- success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
|
|
- if (!success)
|
|
- return false;
|
|
-
|
|
- if (!(ni->lower.flags & NMBM_F_EMPTY_PAGE_ECC_OK))
|
|
- return true;
|
|
-
|
|
- /* Check every page to make sure there aren't too many bitflips */
|
|
-
|
|
- addr = ba2addr(ni, ba);
|
|
-
|
|
- for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
|
|
- NMBM_MODE_PLACE_OOB);
|
|
- if (ret == -EBADMSG) {
|
|
- /*
|
|
- * NMBM_F_EMPTY_PAGE_ECC_OK means the empty page is
|
|
- * still protected by ECC. So reading pages with ECC
|
|
- * enabled and -EBADMSG means there are too many
|
|
- * bitflips that can't be recovered, and the block
|
|
- * containing the page should be marked bad.
|
|
- */
|
|
- nlog_err(ni,
|
|
- "Too many bitflips in empty page at 0x%llx\n",
|
|
- addr + off);
|
|
- return false;
|
|
- }
|
|
- }
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_erase_range - Erase a range of blocks
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: block address where the erasure will start
|
|
- * @limit: top block address allowed for erasure
|
|
- *
|
|
- * Erase blocks within the specific range. Newly-found bad blocks will be
|
|
- * marked.
|
|
- *
|
|
- * @limit is not counted into the allowed erasure address.
|
|
- */
|
|
-static void nmbm_erase_range(struct nmbm_instance *ni, uint32_t ba,
|
|
- uint32_t limit)
|
|
-{
|
|
- bool success;
|
|
-
|
|
- while (ba < limit) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
|
|
- goto next_block;
|
|
-
|
|
- /* Insurance to detect unexpected bad block marked by user */
|
|
- if (nmbm_check_bad_phys_block(ni, ba)) {
|
|
- nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
- goto next_block;
|
|
- }
|
|
-
|
|
- success = nmbm_erase_block_and_check(ni, ba);
|
|
- if (success)
|
|
- goto next_block;
|
|
-
|
|
- nmbm_mark_phys_bad_block(ni, ba);
|
|
- nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
-
|
|
- next_block:
|
|
- ba++;
|
|
- }
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_write_repeated_data - Write critical data to a block with retry
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: block address where the data will be written to
|
|
- * @data: the data to be written
|
|
- * @size: size of the data
|
|
- *
|
|
- * Write data to every page of the block. Success only if all pages within
|
|
- * this block have been successfully written.
|
|
- *
|
|
- * Make sure data size is not bigger than one page.
|
|
- *
|
|
- * This function will write and verify every page for at most
|
|
- * NMBM_TRY_COUNT times.
|
|
- */
|
|
-static bool nmbm_write_repeated_data(struct nmbm_instance *ni, uint32_t ba,
|
|
- const void *data, uint32_t size)
|
|
-{
|
|
- uint64_t addr, off;
|
|
- bool success;
|
|
- int ret;
|
|
-
|
|
- if (size > ni->lower.writesize)
|
|
- return false;
|
|
-
|
|
- addr = ba2addr(ni, ba);
|
|
-
|
|
- for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- /* Prepare page data. fill 0xff to unused region */
|
|
- memcpy(ni->page_cache, data, size);
|
|
- memset(ni->page_cache + size, 0xff, ni->rawpage_size - size);
|
|
-
|
|
- success = nmbm_write_phys_page(ni, addr + off, ni->page_cache,
|
|
- NULL, NMBM_MODE_PLACE_OOB);
|
|
- if (!success)
|
|
- return false;
|
|
-
|
|
- /* Verify the data just written. ECC error indicates failure */
|
|
- ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
|
|
- NMBM_MODE_PLACE_OOB);
|
|
- if (ret < 0)
|
|
- return false;
|
|
-
|
|
- if (memcmp(ni->page_cache, data, size))
|
|
- return false;
|
|
- }
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_write_signature - Write signature to NAND chip
|
|
- * @ni: NMBM instance structure
|
|
- * @limit: top block address allowed for writing
|
|
- * @signature: the signature to be written
|
|
- * @signature_ba: the actual block address where signature is written to
|
|
- *
|
|
- * Write signature within a specific range, from chip bottom to limit.
|
|
- * At most one block will be written.
|
|
- *
|
|
- * @limit is not counted into the allowed write address.
|
|
- */
|
|
-static bool nmbm_write_signature(struct nmbm_instance *ni, uint32_t limit,
|
|
- const struct nmbm_signature *signature,
|
|
- uint32_t *signature_ba)
|
|
-{
|
|
- uint32_t ba = ni->block_count - 1;
|
|
- bool success;
|
|
-
|
|
- while (ba > limit) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
|
|
- goto next_block;
|
|
-
|
|
- /* Insurance to detect unexpected bad block marked by user */
|
|
- if (nmbm_check_bad_phys_block(ni, ba)) {
|
|
- nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
- goto next_block;
|
|
- }
|
|
-
|
|
- success = nmbm_erase_block_and_check(ni, ba);
|
|
- if (!success)
|
|
- goto skip_bad_block;
|
|
-
|
|
- success = nmbm_write_repeated_data(ni, ba, signature,
|
|
- sizeof(*signature));
|
|
- if (success) {
|
|
- *signature_ba = ba;
|
|
- return true;
|
|
- }
|
|
-
|
|
- skip_bad_block:
|
|
- nmbm_mark_phys_bad_block(ni, ba);
|
|
- nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
-
|
|
- next_block:
|
|
- ba--;
|
|
- };
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbn_read_data - Read data
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: linear address where the data will be read from
|
|
- * @data: the data to be read
|
|
- * @size: the size of data
|
|
- *
|
|
- * Read data range.
|
|
- * Every page will be tried for at most NMBM_TRY_COUNT times.
|
|
- *
|
|
- * Return 0 for success, positive value for corrected bitflip count,
|
|
- * -EBADMSG for ecc error, other negative values for other errors
|
|
- */
|
|
-static int nmbn_read_data(struct nmbm_instance *ni, uint64_t addr, void *data,
|
|
- uint32_t size)
|
|
-{
|
|
- uint64_t off = addr;
|
|
- uint8_t *ptr = data;
|
|
- uint32_t sizeremain = size, chunksize, leading;
|
|
- int ret;
|
|
-
|
|
- while (sizeremain) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- leading = off & ni->writesize_mask;
|
|
- chunksize = ni->lower.writesize - leading;
|
|
- if (chunksize > sizeremain)
|
|
- chunksize = sizeremain;
|
|
-
|
|
- if (chunksize == ni->lower.writesize) {
|
|
- ret = nmbm_read_phys_page(ni, off - leading, ptr, NULL,
|
|
- NMBM_MODE_PLACE_OOB);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
- } else {
|
|
- ret = nmbm_read_phys_page(ni, off - leading,
|
|
- ni->page_cache, NULL,
|
|
- NMBM_MODE_PLACE_OOB);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
- memcpy(ptr, ni->page_cache + leading, chunksize);
|
|
- }
|
|
-
|
|
- off += chunksize;
|
|
- ptr += chunksize;
|
|
- sizeremain -= chunksize;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbn_write_verify_data - Write data with validation
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: linear address where the data will be written to
|
|
- * @data: the data to be written
|
|
- * @size: the size of data
|
|
- *
|
|
- * Write data and verify.
|
|
- * Every page will be tried for at most NMBM_TRY_COUNT times.
|
|
- */
|
|
-static bool nmbn_write_verify_data(struct nmbm_instance *ni, uint64_t addr,
|
|
- const void *data, uint32_t size)
|
|
-{
|
|
- uint64_t off = addr;
|
|
- const uint8_t *ptr = data;
|
|
- uint32_t sizeremain = size, chunksize, leading;
|
|
- bool success;
|
|
- int ret;
|
|
-
|
|
- while (sizeremain) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- leading = off & ni->writesize_mask;
|
|
- chunksize = ni->lower.writesize - leading;
|
|
- if (chunksize > sizeremain)
|
|
- chunksize = sizeremain;
|
|
-
|
|
- /* Prepare page data. fill 0xff to unused region */
|
|
- memset(ni->page_cache, 0xff, ni->rawpage_size);
|
|
- memcpy(ni->page_cache + leading, ptr, chunksize);
|
|
-
|
|
- success = nmbm_write_phys_page(ni, off - leading,
|
|
- ni->page_cache, NULL,
|
|
- NMBM_MODE_PLACE_OOB);
|
|
- if (!success)
|
|
- return false;
|
|
-
|
|
- /* Verify the data just written. ECC error indicates failure */
|
|
- ret = nmbm_read_phys_page(ni, off - leading, ni->page_cache,
|
|
- NULL, NMBM_MODE_PLACE_OOB);
|
|
- if (ret < 0)
|
|
- return false;
|
|
-
|
|
- if (memcmp(ni->page_cache + leading, ptr, chunksize))
|
|
- return false;
|
|
-
|
|
- off += chunksize;
|
|
- ptr += chunksize;
|
|
- sizeremain -= chunksize;
|
|
- }
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_write_mgmt_range - Write management data into NAND within a range
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: preferred start block address for writing
|
|
- * @limit: highest block address allowed for writing
|
|
- * @data: the data to be written
|
|
- * @size: the size of data
|
|
- * @actual_start_ba: actual start block address of data
|
|
- * @actual_end_ba: block address after the end of data
|
|
- *
|
|
- * @limit is not counted into the allowed write address.
|
|
- */
|
|
-static bool nmbm_write_mgmt_range(struct nmbm_instance *ni, uint32_t ba,
|
|
- uint32_t limit, const void *data,
|
|
- uint32_t size, uint32_t *actual_start_ba,
|
|
- uint32_t *actual_end_ba)
|
|
-{
|
|
- const uint8_t *ptr = data;
|
|
- uint32_t sizeremain = size, chunksize;
|
|
- bool success;
|
|
-
|
|
- while (sizeremain && ba < limit) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- chunksize = sizeremain;
|
|
- if (chunksize > ni->lower.erasesize)
|
|
- chunksize = ni->lower.erasesize;
|
|
-
|
|
- if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
|
|
- goto next_block;
|
|
-
|
|
- /* Insurance to detect unexpected bad block marked by user */
|
|
- if (nmbm_check_bad_phys_block(ni, ba)) {
|
|
- nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
- goto next_block;
|
|
- }
|
|
-
|
|
- success = nmbm_erase_block_and_check(ni, ba);
|
|
- if (!success)
|
|
- goto skip_bad_block;
|
|
-
|
|
- success = nmbn_write_verify_data(ni, ba2addr(ni, ba), ptr,
|
|
- chunksize);
|
|
- if (!success)
|
|
- goto skip_bad_block;
|
|
-
|
|
- if (sizeremain == size)
|
|
- *actual_start_ba = ba;
|
|
-
|
|
- ptr += chunksize;
|
|
- sizeremain -= chunksize;
|
|
-
|
|
- goto next_block;
|
|
-
|
|
- skip_bad_block:
|
|
- nmbm_mark_phys_bad_block(ni, ba);
|
|
- nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
-
|
|
- next_block:
|
|
- ba++;
|
|
- }
|
|
-
|
|
- if (sizeremain)
|
|
- return false;
|
|
-
|
|
- *actual_end_ba = ba;
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_generate_info_table_cache - Generate info table cache data
|
|
- * @ni: NMBM instance structure
|
|
- *
|
|
- * Generate info table cache data to be written into flash.
|
|
- */
|
|
-static bool nmbm_generate_info_table_cache(struct nmbm_instance *ni)
|
|
-{
|
|
- bool changed = false;
|
|
-
|
|
- memset(ni->info_table_cache, 0xff, ni->info_table_size);
|
|
-
|
|
- memcpy(ni->info_table_cache + ni->info_table.state_table_off,
|
|
- ni->block_state, ni->state_table_size);
|
|
-
|
|
- memcpy(ni->info_table_cache + ni->info_table.mapping_table_off,
|
|
- ni->block_mapping, ni->mapping_table_size);
|
|
-
|
|
- ni->info_table.header.magic = NMBM_MAGIC_INFO_TABLE;
|
|
- ni->info_table.header.version = NMBM_VER;
|
|
- ni->info_table.header.size = ni->info_table_size;
|
|
-
|
|
- if (ni->block_state_changed || ni->block_mapping_changed) {
|
|
- ni->info_table.write_count++;
|
|
- changed = true;
|
|
- }
|
|
-
|
|
- memcpy(ni->info_table_cache, &ni->info_table, sizeof(ni->info_table));
|
|
-
|
|
- nmbm_update_checksum((struct nmbm_header *)ni->info_table_cache);
|
|
-
|
|
- return changed;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_write_info_table - Write info table into NAND within a range
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: preferred start block address for writing
|
|
- * @limit: highest block address allowed for writing
|
|
- * @actual_start_ba: actual start block address of info table
|
|
- * @actual_end_ba: block address after the end of info table
|
|
- *
|
|
- * @limit is counted into the allowed write address.
|
|
- */
|
|
-static bool nmbm_write_info_table(struct nmbm_instance *ni, uint32_t ba,
|
|
- uint32_t limit, uint32_t *actual_start_ba,
|
|
- uint32_t *actual_end_ba)
|
|
-{
|
|
- return nmbm_write_mgmt_range(ni, ba, limit, ni->info_table_cache,
|
|
- ni->info_table_size, actual_start_ba,
|
|
- actual_end_ba);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_mark_tables_clean - Mark info table `clean'
|
|
- * @ni: NMBM instance structure
|
|
- */
|
|
-static void nmbm_mark_tables_clean(struct nmbm_instance *ni)
|
|
-{
|
|
- ni->block_state_changed = 0;
|
|
- ni->block_mapping_changed = 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_try_reserve_blocks - Reserve blocks with compromisation
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: start physical block address
|
|
- * @nba: return physical block address after reservation
|
|
- * @count: number of good blocks to be skipped
|
|
- * @min_count: minimum number of good blocks to be skipped
|
|
- * @limit: highest/lowest block address allowed for walking
|
|
- *
|
|
- * Reserve specific blocks. If failed, try to reserve as many as possible.
|
|
- */
|
|
-static bool nmbm_try_reserve_blocks(struct nmbm_instance *ni, uint32_t ba,
|
|
- uint32_t *nba, uint32_t count,
|
|
- int32_t min_count, int32_t limit)
|
|
-{
|
|
- int32_t nblocks = count;
|
|
- bool success;
|
|
-
|
|
- while (nblocks >= min_count) {
|
|
- success = nmbm_block_walk(ni, true, ba, nba, nblocks, limit);
|
|
- if (success)
|
|
- return true;
|
|
-
|
|
- nblocks--;
|
|
- }
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_rebuild_info_table - Build main & backup info table from scratch
|
|
- * @ni: NMBM instance structure
|
|
- * @allow_no_gap: allow no spare blocks between two tables
|
|
- */
|
|
-static bool nmbm_rebuild_info_table(struct nmbm_instance *ni)
|
|
-{
|
|
- uint32_t table_start_ba, table_end_ba, next_start_ba;
|
|
- uint32_t main_table_end_ba;
|
|
- bool success;
|
|
-
|
|
- /* Set initial value */
|
|
- ni->main_table_ba = 0;
|
|
- ni->backup_table_ba = 0;
|
|
- ni->mapping_blocks_ba = ni->mapping_blocks_top_ba;
|
|
-
|
|
- /* Write main table */
|
|
- success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
|
|
- ni->mapping_blocks_top_ba,
|
|
- &table_start_ba, &table_end_ba);
|
|
- if (!success) {
|
|
- /* Failed to write main table, data will be lost */
|
|
- nlog_emerg(ni, "Unable to write at least one info table!\n");
|
|
- nlog_emerg(ni, "Please save your data before power off!\n");
|
|
- ni->protected = 1;
|
|
- return false;
|
|
- }
|
|
-
|
|
- /* Main info table is successfully written, record its offset */
|
|
- ni->main_table_ba = table_start_ba;
|
|
- main_table_end_ba = table_end_ba;
|
|
-
|
|
- /* Adjust mapping_blocks_ba */
|
|
- ni->mapping_blocks_ba = table_end_ba;
|
|
-
|
|
- nmbm_mark_tables_clean(ni);
|
|
-
|
|
- nlog_table_creation(ni, true, table_start_ba, table_end_ba);
|
|
-
|
|
- /* Reserve spare blocks for main info table. */
|
|
- success = nmbm_try_reserve_blocks(ni, table_end_ba,
|
|
- &next_start_ba,
|
|
- ni->info_table_spare_blocks, 0,
|
|
- ni->mapping_blocks_top_ba -
|
|
- size2blk(ni, ni->info_table_size));
|
|
- if (!success) {
|
|
- /* There is no spare block. */
|
|
- nlog_debug(ni, "No room for backup info table\n");
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Write backup info table. */
|
|
- success = nmbm_write_info_table(ni, next_start_ba,
|
|
- ni->mapping_blocks_top_ba,
|
|
- &table_start_ba, &table_end_ba);
|
|
- if (!success) {
|
|
- /* There is no enough blocks for backup table. */
|
|
- nlog_debug(ni, "No room for backup info table\n");
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Backup table is successfully written, record its offset */
|
|
- ni->backup_table_ba = table_start_ba;
|
|
-
|
|
- /* Adjust mapping_blocks_off */
|
|
- ni->mapping_blocks_ba = table_end_ba;
|
|
-
|
|
- /* Erase spare blocks of main table to clean possible interference data */
|
|
- nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
|
|
-
|
|
- nlog_table_creation(ni, false, table_start_ba, table_end_ba);
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_rescue_single_info_table - Rescue when there is only one info table
|
|
- * @ni: NMBM instance structure
|
|
- *
|
|
- * This function is called when there is only one info table exists.
|
|
- * This function may fail if we can't write new info table
|
|
- */
|
|
-static bool nmbm_rescue_single_info_table(struct nmbm_instance *ni)
|
|
-{
|
|
- uint32_t table_start_ba, table_end_ba, write_ba;
|
|
- bool success;
|
|
-
|
|
- /* Try to write new info table in front of existing table */
|
|
- success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
|
|
- ni->main_table_ba,
|
|
- &table_start_ba,
|
|
- &table_end_ba);
|
|
- if (success) {
|
|
- /*
|
|
- * New table becomes the main table, existing table becomes
|
|
- * the backup table.
|
|
- */
|
|
- ni->backup_table_ba = ni->main_table_ba;
|
|
- ni->main_table_ba = table_start_ba;
|
|
-
|
|
- nmbm_mark_tables_clean(ni);
|
|
-
|
|
- /* Erase spare blocks of main table to clean possible interference data */
|
|
- nmbm_erase_range(ni, table_end_ba, ni->backup_table_ba);
|
|
-
|
|
- nlog_table_creation(ni, true, table_start_ba, table_end_ba);
|
|
-
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Try to reserve spare blocks for existing table */
|
|
- success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
|
|
- ni->info_table_spare_blocks, 0,
|
|
- ni->mapping_blocks_top_ba -
|
|
- size2blk(ni, ni->info_table_size));
|
|
- if (!success) {
|
|
- nlog_warn(ni, "Failed to rescue single info table\n");
|
|
- return false;
|
|
- }
|
|
-
|
|
- /* Try to write new info table next to the existing table */
|
|
- while (write_ba >= ni->mapping_blocks_ba) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- success = nmbm_write_info_table(ni, write_ba,
|
|
- ni->mapping_blocks_top_ba,
|
|
- &table_start_ba,
|
|
- &table_end_ba);
|
|
- if (success)
|
|
- break;
|
|
-
|
|
- write_ba--;
|
|
- }
|
|
-
|
|
- if (success) {
|
|
- /* Erase spare blocks of main table to clean possible interference data */
|
|
- nmbm_erase_range(ni, ni->mapping_blocks_ba, table_start_ba);
|
|
-
|
|
- /* New table becomes the backup table */
|
|
- ni->backup_table_ba = table_start_ba;
|
|
- ni->mapping_blocks_ba = table_end_ba;
|
|
-
|
|
- nmbm_mark_tables_clean(ni);
|
|
-
|
|
- nlog_table_creation(ni, false, table_start_ba, table_end_ba);
|
|
-
|
|
- return true;
|
|
- }
|
|
-
|
|
- nlog_warn(ni, "Failed to rescue single info table\n");
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_update_single_info_table - Update specific one info table
|
|
- * @ni: NMBM instance structure
|
|
- */
|
|
-static bool nmbm_update_single_info_table(struct nmbm_instance *ni,
|
|
- bool update_main_table)
|
|
-{
|
|
- uint32_t write_start_ba, write_limit, table_start_ba, table_end_ba;
|
|
- bool success;
|
|
-
|
|
- /* Determine the write range */
|
|
- if (update_main_table) {
|
|
- write_start_ba = ni->main_table_ba;
|
|
- write_limit = ni->backup_table_ba;
|
|
- } else {
|
|
- write_start_ba = ni->backup_table_ba;
|
|
- write_limit = ni->mapping_blocks_top_ba;
|
|
- }
|
|
-
|
|
- nmbm_mark_block_color_mgmt(ni, write_start_ba, write_limit - 1);
|
|
-
|
|
- success = nmbm_write_info_table(ni, write_start_ba, write_limit,
|
|
- &table_start_ba, &table_end_ba);
|
|
- if (success) {
|
|
- if (update_main_table) {
|
|
- ni->main_table_ba = table_start_ba;
|
|
- } else {
|
|
- ni->backup_table_ba = table_start_ba;
|
|
- ni->mapping_blocks_ba = table_end_ba;
|
|
- }
|
|
-
|
|
- nmbm_mark_tables_clean(ni);
|
|
-
|
|
- nlog_table_update(ni, update_main_table, table_start_ba,
|
|
- table_end_ba);
|
|
-
|
|
- return true;
|
|
- }
|
|
-
|
|
- if (update_main_table) {
|
|
- /*
|
|
- * If failed to update main table, make backup table the new
|
|
- * main table, and call nmbm_rescue_single_info_table()
|
|
- */
|
|
- nlog_warn(ni, "Unable to update %s info table\n",
|
|
- update_main_table ? "Main" : "Backup");
|
|
-
|
|
- ni->main_table_ba = ni->backup_table_ba;
|
|
- ni->backup_table_ba = 0;
|
|
- return nmbm_rescue_single_info_table(ni);
|
|
- }
|
|
-
|
|
- /* Only one table left */
|
|
- ni->mapping_blocks_ba = ni->backup_table_ba;
|
|
- ni->backup_table_ba = 0;
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_rescue_main_info_table - Rescue when failed to write main info table
|
|
- * @ni: NMBM instance structure
|
|
- *
|
|
- * This function is called when main info table failed to be written, and
|
|
- * backup info table exists.
|
|
- */
|
|
-static bool nmbm_rescue_main_info_table(struct nmbm_instance *ni)
|
|
-{
|
|
- uint32_t tmp_table_start_ba, tmp_table_end_ba, main_table_start_ba;
|
|
- uint32_t main_table_end_ba, write_ba;
|
|
- uint32_t info_table_erasesize = size2blk(ni, ni->info_table_size);
|
|
- bool success;
|
|
-
|
|
- /* Try to reserve spare blocks for existing backup info table */
|
|
- success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
|
|
- ni->info_table_spare_blocks, 0,
|
|
- ni->mapping_blocks_top_ba -
|
|
- info_table_erasesize);
|
|
- if (!success) {
|
|
- /* There is no spare block. Backup info table becomes the main table. */
|
|
- nlog_err(ni, "No room for temporary info table\n");
|
|
- ni->main_table_ba = ni->backup_table_ba;
|
|
- ni->backup_table_ba = 0;
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Try to write temporary info table into spare unmapped blocks */
|
|
- while (write_ba >= ni->mapping_blocks_ba) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- success = nmbm_write_info_table(ni, write_ba,
|
|
- ni->mapping_blocks_top_ba,
|
|
- &tmp_table_start_ba,
|
|
- &tmp_table_end_ba);
|
|
- if (success)
|
|
- break;
|
|
-
|
|
- write_ba--;
|
|
- }
|
|
-
|
|
- if (!success) {
|
|
- /* Backup info table becomes the main table */
|
|
- nlog_err(ni, "Failed to update main info table\n");
|
|
- ni->main_table_ba = ni->backup_table_ba;
|
|
- ni->backup_table_ba = 0;
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Adjust mapping_blocks_off */
|
|
- ni->mapping_blocks_ba = tmp_table_end_ba;
|
|
-
|
|
- nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
|
|
- tmp_table_end_ba - 1);
|
|
-
|
|
- /*
|
|
- * Now write main info table at the beginning of management area.
|
|
- * This operation will generally destroy the original backup info
|
|
- * table.
|
|
- */
|
|
- success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
|
|
- tmp_table_start_ba,
|
|
- &main_table_start_ba,
|
|
- &main_table_end_ba);
|
|
- if (!success) {
|
|
- /* Temporary info table becomes the main table */
|
|
- ni->main_table_ba = tmp_table_start_ba;
|
|
- ni->backup_table_ba = 0;
|
|
-
|
|
- nmbm_mark_tables_clean(ni);
|
|
-
|
|
- nlog_err(ni, "Failed to update main info table\n");
|
|
- nmbm_mark_block_color_info_table(ni, tmp_table_start_ba,
|
|
- tmp_table_end_ba - 1);
|
|
-
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Main info table has been successfully written, record its offset */
|
|
- ni->main_table_ba = main_table_start_ba;
|
|
-
|
|
- nmbm_mark_tables_clean(ni);
|
|
-
|
|
- nlog_table_creation(ni, true, main_table_start_ba, main_table_end_ba);
|
|
-
|
|
- /*
|
|
- * Temporary info table becomes the new backup info table if it's
|
|
- * not overwritten.
|
|
- */
|
|
- if (main_table_end_ba <= tmp_table_start_ba) {
|
|
- ni->backup_table_ba = tmp_table_start_ba;
|
|
-
|
|
- nlog_table_creation(ni, false, tmp_table_start_ba,
|
|
- tmp_table_end_ba);
|
|
-
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Adjust mapping_blocks_off */
|
|
- ni->mapping_blocks_ba = main_table_end_ba;
|
|
-
|
|
- /* Try to reserve spare blocks for new main info table */
|
|
- success = nmbm_try_reserve_blocks(ni, main_table_end_ba, &write_ba,
|
|
- ni->info_table_spare_blocks, 0,
|
|
- ni->mapping_blocks_top_ba -
|
|
- info_table_erasesize);
|
|
- if (!success) {
|
|
- /* There is no spare block. Only main table exists. */
|
|
- nlog_err(ni, "No room for backup info table\n");
|
|
- ni->backup_table_ba = 0;
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Write new backup info table. */
|
|
- while (write_ba >= main_table_end_ba) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- success = nmbm_write_info_table(ni, write_ba,
|
|
- ni->mapping_blocks_top_ba,
|
|
- &tmp_table_start_ba,
|
|
- &tmp_table_end_ba);
|
|
- if (success)
|
|
- break;
|
|
-
|
|
- write_ba--;
|
|
- }
|
|
-
|
|
- if (!success) {
|
|
- nlog_err(ni, "No room for backup info table\n");
|
|
- ni->backup_table_ba = 0;
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Backup info table has been successfully written, record its offset */
|
|
- ni->backup_table_ba = tmp_table_start_ba;
|
|
-
|
|
- /* Adjust mapping_blocks_off */
|
|
- ni->mapping_blocks_ba = tmp_table_end_ba;
|
|
-
|
|
- /* Erase spare blocks of main table to clean possible interference data */
|
|
- nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
|
|
-
|
|
- nlog_table_creation(ni, false, tmp_table_start_ba, tmp_table_end_ba);
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_update_info_table_once - Update info table once
|
|
- * @ni: NMBM instance structure
|
|
- * @force: force update
|
|
- *
|
|
- * Update both main and backup info table. Return true if at least one info
|
|
- * table has been successfully written.
|
|
- * This function only try to update info table once regard less of the result.
|
|
- */
|
|
-static bool nmbm_update_info_table_once(struct nmbm_instance *ni, bool force)
|
|
-{
|
|
- uint32_t table_start_ba, table_end_ba;
|
|
- uint32_t main_table_limit;
|
|
- bool success;
|
|
-
|
|
- /* Do nothing if there is no change */
|
|
- if (!nmbm_generate_info_table_cache(ni) && !force)
|
|
- return true;
|
|
-
|
|
- /* Check whether both two tables exist */
|
|
- if (!ni->backup_table_ba) {
|
|
- main_table_limit = ni->mapping_blocks_top_ba;
|
|
- goto write_main_table;
|
|
- }
|
|
-
|
|
- nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
|
|
- ni->mapping_blocks_ba - 1);
|
|
-
|
|
- /*
|
|
- * Write backup info table in its current range.
|
|
- * Note that limit is set to mapping_blocks_top_off to provide as many
|
|
- * spare blocks as possible for the backup table. If at last
|
|
- * unmapped blocks are used by backup table, mapping_blocks_off will
|
|
- * be adjusted.
|
|
- */
|
|
- success = nmbm_write_info_table(ni, ni->backup_table_ba,
|
|
- ni->mapping_blocks_top_ba,
|
|
- &table_start_ba, &table_end_ba);
|
|
- if (!success) {
|
|
- /*
|
|
- * There is nothing to do if failed to write backup table.
|
|
- * Write the main table now.
|
|
- */
|
|
- nlog_err(ni, "No room for backup table\n");
|
|
- ni->mapping_blocks_ba = ni->backup_table_ba;
|
|
- ni->backup_table_ba = 0;
|
|
- main_table_limit = ni->mapping_blocks_top_ba;
|
|
- goto write_main_table;
|
|
- }
|
|
-
|
|
- /* Backup table is successfully written, record its offset */
|
|
- ni->backup_table_ba = table_start_ba;
|
|
-
|
|
- /* Adjust mapping_blocks_off */
|
|
- ni->mapping_blocks_ba = table_end_ba;
|
|
-
|
|
- nmbm_mark_tables_clean(ni);
|
|
-
|
|
- /* The normal limit of main table */
|
|
- main_table_limit = ni->backup_table_ba;
|
|
-
|
|
- nlog_table_update(ni, false, table_start_ba, table_end_ba);
|
|
-
|
|
-write_main_table:
|
|
- if (!ni->main_table_ba)
|
|
- goto rebuild_tables;
|
|
-
|
|
- if (!ni->backup_table_ba)
|
|
- nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
|
|
- ni->mapping_blocks_ba - 1);
|
|
- else
|
|
- nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
|
|
- ni->backup_table_ba - 1);
|
|
-
|
|
- /* Write main info table in its current range */
|
|
- success = nmbm_write_info_table(ni, ni->main_table_ba,
|
|
- main_table_limit, &table_start_ba,
|
|
- &table_end_ba);
|
|
- if (!success) {
|
|
- /* If failed to write main table, go rescue procedure */
|
|
- if (!ni->backup_table_ba)
|
|
- goto rebuild_tables;
|
|
-
|
|
- return nmbm_rescue_main_info_table(ni);
|
|
- }
|
|
-
|
|
- /* Main info table is successfully written, record its offset */
|
|
- ni->main_table_ba = table_start_ba;
|
|
-
|
|
- /* Adjust mapping_blocks_off */
|
|
- if (!ni->backup_table_ba)
|
|
- ni->mapping_blocks_ba = table_end_ba;
|
|
-
|
|
- nmbm_mark_tables_clean(ni);
|
|
-
|
|
- nlog_table_update(ni, true, table_start_ba, table_end_ba);
|
|
-
|
|
- return true;
|
|
-
|
|
-rebuild_tables:
|
|
- return nmbm_rebuild_info_table(ni);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_update_info_table - Update info table
|
|
- * @ni: NMBM instance structure
|
|
- *
|
|
- * Update both main and backup info table. Return true if at least one table
|
|
- * has been successfully written.
|
|
- * This function will try to update info table repeatedly until no new bad
|
|
- * block found during updating.
|
|
- */
|
|
-static bool nmbm_update_info_table(struct nmbm_instance *ni)
|
|
-{
|
|
- bool success;
|
|
-
|
|
- if (ni->protected)
|
|
- return true;
|
|
-
|
|
- while (ni->block_state_changed || ni->block_mapping_changed) {
|
|
- success = nmbm_update_info_table_once(ni, false);
|
|
- if (!success) {
|
|
- nlog_err(ni, "Failed to update info table\n");
|
|
- return false;
|
|
- }
|
|
- }
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_map_block - Map a bad block to a unused spare block
|
|
- * @ni: NMBM instance structure
|
|
- * @lb: logic block addr to map
|
|
- */
|
|
-static bool nmbm_map_block(struct nmbm_instance *ni, uint32_t lb)
|
|
-{
|
|
- uint32_t pb;
|
|
- bool success;
|
|
-
|
|
- if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
|
|
- nlog_warn(ni, "No spare unmapped blocks.\n");
|
|
- return false;
|
|
- }
|
|
-
|
|
- success = nmbm_block_walk(ni, false, ni->mapping_blocks_top_ba, &pb, 0,
|
|
- ni->mapping_blocks_ba);
|
|
- if (!success) {
|
|
- nlog_warn(ni, "No spare unmapped blocks.\n");
|
|
- nmbm_update_info_table(ni);
|
|
- ni->mapping_blocks_top_ba = ni->mapping_blocks_ba;
|
|
- return false;
|
|
- }
|
|
-
|
|
- ni->block_mapping[lb] = pb;
|
|
- ni->mapping_blocks_top_ba--;
|
|
- ni->block_mapping_changed++;
|
|
-
|
|
- nlog_info(ni, "Logic block %u mapped to physical blcok %u\n", lb, pb);
|
|
- nmbm_mark_block_color_mapped(ni, pb);
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_create_info_table - Create info table(s)
|
|
- * @ni: NMBM instance structure
|
|
- *
|
|
- * This function assumes that the chip has no existing info table(s)
|
|
- */
|
|
-static bool nmbm_create_info_table(struct nmbm_instance *ni)
|
|
-{
|
|
- uint32_t lb;
|
|
- bool success;
|
|
-
|
|
- /* Set initial mapping_blocks_top_off */
|
|
- success = nmbm_block_walk(ni, false, ni->signature_ba,
|
|
- &ni->mapping_blocks_top_ba, 1,
|
|
- ni->mgmt_start_ba);
|
|
- if (!success) {
|
|
- nlog_err(ni, "No room for spare blocks\n");
|
|
- return false;
|
|
- }
|
|
-
|
|
- /* Generate info table cache */
|
|
- nmbm_generate_info_table_cache(ni);
|
|
-
|
|
- /* Write info table */
|
|
- success = nmbm_rebuild_info_table(ni);
|
|
- if (!success) {
|
|
- nlog_err(ni, "Failed to build info tables\n");
|
|
- return false;
|
|
- }
|
|
-
|
|
- /* Remap bad block(s) at end of data area */
|
|
- for (lb = ni->data_block_count; lb < ni->mgmt_start_ba; lb++) {
|
|
- success = nmbm_map_block(ni, lb);
|
|
- if (!success)
|
|
- break;
|
|
-
|
|
- ni->data_block_count++;
|
|
- }
|
|
-
|
|
- /* If state table and/or mapping table changed, update info table. */
|
|
- success = nmbm_update_info_table(ni);
|
|
- if (!success)
|
|
- return false;
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_create_new - Create NMBM on a new chip
|
|
- * @ni: NMBM instance structure
|
|
- */
|
|
-static bool nmbm_create_new(struct nmbm_instance *ni)
|
|
-{
|
|
- bool success;
|
|
-
|
|
- /* Determine the boundary of management blocks */
|
|
- ni->mgmt_start_ba = ni->block_count * (NMBM_MGMT_DIV - ni->lower.max_ratio) / NMBM_MGMT_DIV;
|
|
-
|
|
- if (ni->lower.max_reserved_blocks && ni->block_count - ni->mgmt_start_ba > ni->lower.max_reserved_blocks)
|
|
- ni->mgmt_start_ba = ni->block_count - ni->lower.max_reserved_blocks;
|
|
-
|
|
- nlog_info(ni, "NMBM management region starts at block %u [0x%08llx]\n",
|
|
- ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
|
|
- nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba, ni->block_count - 1);
|
|
-
|
|
- /* Fill block state table & mapping table */
|
|
- nmbm_scan_badblocks(ni);
|
|
- nmbm_build_mapping_table(ni);
|
|
-
|
|
- /* Write signature */
|
|
- ni->signature.header.magic = NMBM_MAGIC_SIGNATURE;
|
|
- ni->signature.header.version = NMBM_VER;
|
|
- ni->signature.header.size = sizeof(ni->signature);
|
|
- ni->signature.nand_size = ni->lower.size;
|
|
- ni->signature.block_size = ni->lower.erasesize;
|
|
- ni->signature.page_size = ni->lower.writesize;
|
|
- ni->signature.spare_size = ni->lower.oobsize;
|
|
- ni->signature.mgmt_start_pb = ni->mgmt_start_ba;
|
|
- ni->signature.max_try_count = NMBM_TRY_COUNT;
|
|
- nmbm_update_checksum(&ni->signature.header);
|
|
-
|
|
- if (ni->lower.flags & NMBM_F_READ_ONLY) {
|
|
- nlog_info(ni, "NMBM has been initialized in read-only mode\n");
|
|
- return true;
|
|
- }
|
|
-
|
|
- success = nmbm_write_signature(ni, ni->mgmt_start_ba,
|
|
- &ni->signature, &ni->signature_ba);
|
|
- if (!success) {
|
|
- nlog_err(ni, "Failed to write signature to a proper offset\n");
|
|
- return false;
|
|
- }
|
|
-
|
|
- nlog_info(ni, "Signature has been written to block %u [0x%08llx]\n",
|
|
- ni->signature_ba, ba2addr(ni, ni->signature_ba));
|
|
- nmbm_mark_block_color_signature(ni, ni->signature_ba);
|
|
-
|
|
- /* Write info table(s) */
|
|
- success = nmbm_create_info_table(ni);
|
|
- if (success) {
|
|
- nlog_info(ni, "NMBM has been successfully created\n");
|
|
- return true;
|
|
- }
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_check_info_table_header - Check if a info table header is valid
|
|
- * @ni: NMBM instance structure
|
|
- * @data: pointer to the info table header
|
|
- */
|
|
-static bool nmbm_check_info_table_header(struct nmbm_instance *ni, void *data)
|
|
-{
|
|
- struct nmbm_info_table_header *ifthdr = data;
|
|
-
|
|
- if (ifthdr->header.magic != NMBM_MAGIC_INFO_TABLE)
|
|
- return false;
|
|
-
|
|
- if (ifthdr->header.size != ni->info_table_size)
|
|
- return false;
|
|
-
|
|
- if (ifthdr->mapping_table_off - ifthdr->state_table_off < ni->state_table_size)
|
|
- return false;
|
|
-
|
|
- if (ni->info_table_size - ifthdr->mapping_table_off < ni->mapping_table_size)
|
|
- return false;
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_check_info_table - Check if a whole info table is valid
|
|
- * @ni: NMBM instance structure
|
|
- * @start_ba: start block address of this table
|
|
- * @end_ba: end block address of this table
|
|
- * @data: pointer to the info table header
|
|
- * @mapping_blocks_top_ba: return the block address of top remapped block
|
|
- */
|
|
-static bool nmbm_check_info_table(struct nmbm_instance *ni, uint32_t start_ba,
|
|
- uint32_t end_ba, void *data,
|
|
- uint32_t *mapping_blocks_top_ba)
|
|
-{
|
|
- struct nmbm_info_table_header *ifthdr = data;
|
|
- int32_t *block_mapping = (int32_t *)((uintptr_t)data + ifthdr->mapping_table_off);
|
|
- nmbm_bitmap_t *block_state = (nmbm_bitmap_t *)((uintptr_t)data + ifthdr->state_table_off);
|
|
- uint32_t minimum_mapping_pb = ni->signature_ba;
|
|
- uint32_t ba;
|
|
-
|
|
- for (ba = 0; ba < ni->data_block_count; ba++) {
|
|
- if ((block_mapping[ba] >= ni->data_block_count && block_mapping[ba] < end_ba) ||
|
|
- block_mapping[ba] == ni->signature_ba)
|
|
- return false;
|
|
-
|
|
- if (block_mapping[ba] >= end_ba && block_mapping[ba] < minimum_mapping_pb)
|
|
- minimum_mapping_pb = block_mapping[ba];
|
|
- }
|
|
-
|
|
- for (ba = start_ba; ba < end_ba; ba++) {
|
|
- if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
|
|
- continue;
|
|
-
|
|
- if (nmbm_get_block_state_raw(block_state, ba) != BLOCK_ST_GOOD)
|
|
- return false;
|
|
- }
|
|
-
|
|
- *mapping_blocks_top_ba = minimum_mapping_pb - 1;
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_try_load_info_table - Try to load info table from a address
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: start block address of the info table
|
|
- * @eba: return the block address after end of the table
|
|
- * @write_count: return the write count of this table
|
|
- * @mapping_blocks_top_ba: return the block address of top remapped block
|
|
- * @table_loaded: used to record whether ni->info_table has valid data
|
|
- */
|
|
-static bool nmbm_try_load_info_table(struct nmbm_instance *ni, uint32_t ba,
|
|
- uint32_t *eba, uint32_t *write_count,
|
|
- uint32_t *mapping_blocks_top_ba,
|
|
- bool table_loaded)
|
|
-{
|
|
- struct nmbm_info_table_header *ifthdr = (void *)ni->info_table_cache;
|
|
- uint8_t *off = ni->info_table_cache;
|
|
- uint32_t limit = ba + size2blk(ni, ni->info_table_size);
|
|
- uint32_t start_ba = 0, chunksize, sizeremain = ni->info_table_size;
|
|
- bool success, checkhdr = true;
|
|
- int ret;
|
|
-
|
|
- while (sizeremain && ba < limit) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
|
|
- goto next_block;
|
|
-
|
|
- if (nmbm_check_bad_phys_block(ni, ba)) {
|
|
- nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
- goto next_block;
|
|
- }
|
|
-
|
|
- chunksize = sizeremain;
|
|
- if (chunksize > ni->lower.erasesize)
|
|
- chunksize = ni->lower.erasesize;
|
|
-
|
|
- /* Assume block with ECC error has no info table data */
|
|
- ret = nmbn_read_data(ni, ba2addr(ni, ba), off, chunksize);
|
|
- if (ret < 0)
|
|
- goto skip_bad_block;
|
|
- else if (ret > 0)
|
|
- return false;
|
|
-
|
|
- if (checkhdr) {
|
|
- success = nmbm_check_info_table_header(ni, off);
|
|
- if (!success)
|
|
- return false;
|
|
-
|
|
- start_ba = ba;
|
|
- checkhdr = false;
|
|
- }
|
|
-
|
|
- off += chunksize;
|
|
- sizeremain -= chunksize;
|
|
-
|
|
- goto next_block;
|
|
-
|
|
- skip_bad_block:
|
|
- /* Only mark bad in memory */
|
|
- nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
|
|
-
|
|
- next_block:
|
|
- ba++;
|
|
- }
|
|
-
|
|
- if (sizeremain)
|
|
- return false;
|
|
-
|
|
- success = nmbm_check_header(ni->info_table_cache, ni->info_table_size);
|
|
- if (!success)
|
|
- return false;
|
|
-
|
|
- *eba = ba;
|
|
- *write_count = ifthdr->write_count;
|
|
-
|
|
- success = nmbm_check_info_table(ni, start_ba, ba, ni->info_table_cache,
|
|
- mapping_blocks_top_ba);
|
|
- if (!success)
|
|
- return false;
|
|
-
|
|
- if (!table_loaded || ifthdr->write_count > ni->info_table.write_count) {
|
|
- memcpy(&ni->info_table, ifthdr, sizeof(ni->info_table));
|
|
- memcpy(ni->block_state,
|
|
- (uint8_t *)ifthdr + ifthdr->state_table_off,
|
|
- ni->state_table_size);
|
|
- memcpy(ni->block_mapping,
|
|
- (uint8_t *)ifthdr + ifthdr->mapping_table_off,
|
|
- ni->mapping_table_size);
|
|
- ni->info_table.write_count = ifthdr->write_count;
|
|
- }
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_search_info_table - Search info table from specific address
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: start block address to search
|
|
- * @limit: highest block address allowed for searching
|
|
- * @table_start_ba: return the start block address of this table
|
|
- * @table_end_ba: return the block address after end of this table
|
|
- * @write_count: return the write count of this table
|
|
- * @mapping_blocks_top_ba: return the block address of top remapped block
|
|
- * @table_loaded: used to record whether ni->info_table has valid data
|
|
- */
|
|
-static bool nmbm_search_info_table(struct nmbm_instance *ni, uint32_t ba,
|
|
- uint32_t limit, uint32_t *table_start_ba,
|
|
- uint32_t *table_end_ba,
|
|
- uint32_t *write_count,
|
|
- uint32_t *mapping_blocks_top_ba,
|
|
- bool table_loaded)
|
|
-{
|
|
- bool success;
|
|
-
|
|
- while (ba < limit - size2blk(ni, ni->info_table_size)) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- success = nmbm_try_load_info_table(ni, ba, table_end_ba,
|
|
- write_count,
|
|
- mapping_blocks_top_ba,
|
|
- table_loaded);
|
|
- if (success) {
|
|
- *table_start_ba = ba;
|
|
- return true;
|
|
- }
|
|
-
|
|
- ba++;
|
|
- }
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_load_info_table - Load info table(s) from a chip
|
|
- * @ni: NMBM instance structure
|
|
- * @ba: start block address to search info table
|
|
- * @limit: highest block address allowed for searching
|
|
- */
|
|
-static bool nmbm_load_info_table(struct nmbm_instance *ni, uint32_t ba,
|
|
- uint32_t limit)
|
|
-{
|
|
- uint32_t main_table_end_ba, backup_table_end_ba, table_end_ba;
|
|
- uint32_t main_mapping_blocks_top_ba, backup_mapping_blocks_top_ba;
|
|
- uint32_t main_table_write_count, backup_table_write_count;
|
|
- uint32_t i;
|
|
- bool success;
|
|
-
|
|
- /* Set initial value */
|
|
- ni->main_table_ba = 0;
|
|
- ni->backup_table_ba = 0;
|
|
- ni->info_table.write_count = 0;
|
|
- ni->mapping_blocks_top_ba = ni->signature_ba - 1;
|
|
- ni->data_block_count = ni->signature.mgmt_start_pb;
|
|
-
|
|
- /* Find first info table */
|
|
- success = nmbm_search_info_table(ni, ba, limit, &ni->main_table_ba,
|
|
- &main_table_end_ba, &main_table_write_count,
|
|
- &main_mapping_blocks_top_ba, false);
|
|
- if (!success) {
|
|
- nlog_warn(ni, "No valid info table found\n");
|
|
- return false;
|
|
- }
|
|
-
|
|
- table_end_ba = main_table_end_ba;
|
|
-
|
|
- nlog_table_found(ni, true, main_table_write_count, ni->main_table_ba,
|
|
- main_table_end_ba);
|
|
-
|
|
- /* Find second info table */
|
|
- success = nmbm_search_info_table(ni, main_table_end_ba, limit,
|
|
- &ni->backup_table_ba, &backup_table_end_ba,
|
|
- &backup_table_write_count, &backup_mapping_blocks_top_ba, true);
|
|
- if (!success) {
|
|
- nlog_warn(ni, "Second info table not found\n");
|
|
- } else {
|
|
- table_end_ba = backup_table_end_ba;
|
|
-
|
|
- nlog_table_found(ni, false, backup_table_write_count,
|
|
- ni->backup_table_ba, backup_table_end_ba);
|
|
- }
|
|
-
|
|
- /* Pick mapping_blocks_top_ba */
|
|
- if (!ni->backup_table_ba) {
|
|
- ni->mapping_blocks_top_ba= main_mapping_blocks_top_ba;
|
|
- } else {
|
|
- if (main_table_write_count >= backup_table_write_count)
|
|
- ni->mapping_blocks_top_ba = main_mapping_blocks_top_ba;
|
|
- else
|
|
- ni->mapping_blocks_top_ba = backup_mapping_blocks_top_ba;
|
|
- }
|
|
-
|
|
- /* Set final mapping_blocks_ba */
|
|
- ni->mapping_blocks_ba = table_end_ba;
|
|
-
|
|
- /* Set final data_block_count */
|
|
- for (i = ni->signature.mgmt_start_pb; i > 0; i--) {
|
|
- if (ni->block_mapping[i - 1] >= 0) {
|
|
- ni->data_block_count = i;
|
|
- break;
|
|
- }
|
|
- }
|
|
-
|
|
- /* Debug purpose: mark mapped blocks and bad blocks */
|
|
- for (i = 0; i < ni->data_block_count; i++) {
|
|
- if (ni->block_mapping[i] > ni->mapping_blocks_top_ba)
|
|
- nmbm_mark_block_color_mapped(ni, ni->block_mapping[i]);
|
|
- }
|
|
-
|
|
- for (i = 0; i < ni->block_count; i++) {
|
|
- if (nmbm_get_block_state(ni, i) == BLOCK_ST_BAD)
|
|
- nmbm_mark_block_color_bad(ni, i);
|
|
- }
|
|
-
|
|
- /* Regenerate the info table cache from the final selected info table */
|
|
- nmbm_generate_info_table_cache(ni);
|
|
-
|
|
- if (ni->lower.flags & NMBM_F_READ_ONLY)
|
|
- return true;
|
|
-
|
|
- /*
|
|
- * If only one table exists, try to write another table.
|
|
- * If two tables have different write count, try to update info table
|
|
- */
|
|
- if (!ni->backup_table_ba) {
|
|
- success = nmbm_rescue_single_info_table(ni);
|
|
- } else if (main_table_write_count != backup_table_write_count) {
|
|
- /* Mark state & mapping tables changed */
|
|
- ni->block_state_changed = 1;
|
|
- ni->block_mapping_changed = 1;
|
|
-
|
|
- success = nmbm_update_single_info_table(ni,
|
|
- main_table_write_count < backup_table_write_count);
|
|
- } else {
|
|
- success = true;
|
|
- }
|
|
-
|
|
- /*
|
|
- * If there is no spare unmapped blocks, or still only one table
|
|
- * exists, set the chip to read-only
|
|
- */
|
|
- if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
|
|
- nlog_warn(ni, "No spare unmapped blocks. Device is now read-only\n");
|
|
- ni->protected = 1;
|
|
- } else if (!success) {
|
|
- nlog_warn(ni, "Only one info table found. Device is now read-only\n");
|
|
- ni->protected = 1;
|
|
- }
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_load_existing - Load NMBM from a new chip
|
|
- * @ni: NMBM instance structure
|
|
- */
|
|
-static bool nmbm_load_existing(struct nmbm_instance *ni)
|
|
-{
|
|
- bool success;
|
|
-
|
|
- /* Calculate the boundary of management blocks */
|
|
- ni->mgmt_start_ba = ni->signature.mgmt_start_pb;
|
|
-
|
|
- nlog_debug(ni, "NMBM management region starts at block %u [0x%08llx]\n",
|
|
- ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
|
|
- nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
|
|
- ni->signature_ba - 1);
|
|
-
|
|
- /* Look for info table(s) */
|
|
- success = nmbm_load_info_table(ni, ni->mgmt_start_ba,
|
|
- ni->signature_ba);
|
|
- if (success) {
|
|
- nlog_info(ni, "NMBM has been successfully attached %s\n",
|
|
- (ni->lower.flags & NMBM_F_READ_ONLY) ? "in read-only mode" : "");
|
|
- return true;
|
|
- }
|
|
-
|
|
- if (!(ni->lower.flags & NMBM_F_CREATE))
|
|
- return false;
|
|
-
|
|
- /* Fill block state table & mapping table */
|
|
- nmbm_scan_badblocks(ni);
|
|
- nmbm_build_mapping_table(ni);
|
|
-
|
|
- if (ni->lower.flags & NMBM_F_READ_ONLY) {
|
|
- nlog_info(ni, "NMBM has been initialized in read-only mode\n");
|
|
- return true;
|
|
- }
|
|
-
|
|
- /* Write info table(s) */
|
|
- success = nmbm_create_info_table(ni);
|
|
- if (success) {
|
|
- nlog_info(ni, "NMBM has been successfully created\n");
|
|
- return true;
|
|
- }
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_find_signature - Find signature in the lower NAND chip
|
|
- * @ni: NMBM instance structure
|
|
- * @signature_ba: used for storing block address of the signature
|
|
- * @signature_ba: return the actual block address of signature block
|
|
- *
|
|
- * Find a valid signature from a specific range in the lower NAND chip,
|
|
- * from bottom (highest address) to top (lowest address)
|
|
- *
|
|
- * Return true if found.
|
|
- */
|
|
-static bool nmbm_find_signature(struct nmbm_instance *ni,
|
|
- struct nmbm_signature *signature,
|
|
- uint32_t *signature_ba)
|
|
-{
|
|
- struct nmbm_signature sig;
|
|
- uint64_t off, addr;
|
|
- uint32_t block_count, ba, limit;
|
|
- bool success;
|
|
- int ret;
|
|
-
|
|
- /* Calculate top and bottom block address */
|
|
- block_count = ni->lower.size >> ni->erasesize_shift;
|
|
- ba = block_count;
|
|
- limit = (block_count / NMBM_MGMT_DIV) * (NMBM_MGMT_DIV - ni->lower.max_ratio);
|
|
- if (ni->lower.max_reserved_blocks && block_count - limit > ni->lower.max_reserved_blocks)
|
|
- limit = block_count - ni->lower.max_reserved_blocks;
|
|
-
|
|
- while (ba >= limit) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- ba--;
|
|
- addr = ba2addr(ni, ba);
|
|
-
|
|
- if (nmbm_check_bad_phys_block(ni, ba))
|
|
- continue;
|
|
-
|
|
- /* Check every page.
|
|
- * As long as at leaset one page contains valid signature,
|
|
- * the block is treated as a valid signature block.
|
|
- */
|
|
- for (off = 0; off < ni->lower.erasesize;
|
|
- off += ni->lower.writesize) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- ret = nmbn_read_data(ni, addr + off, &sig,
|
|
- sizeof(sig));
|
|
- if (ret)
|
|
- continue;
|
|
-
|
|
- /* Check for header size and checksum */
|
|
- success = nmbm_check_header(&sig, sizeof(sig));
|
|
- if (!success)
|
|
- continue;
|
|
-
|
|
- /* Check for header magic */
|
|
- if (sig.header.magic == NMBM_MAGIC_SIGNATURE) {
|
|
- /* Found it */
|
|
- memcpy(signature, &sig, sizeof(sig));
|
|
- *signature_ba = ba;
|
|
- return true;
|
|
- }
|
|
- }
|
|
- };
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-/*
|
|
- * is_power_of_2_u64 - Check whether a 64-bit integer is power of 2
|
|
- * @n: number to check
|
|
- */
|
|
-static bool is_power_of_2_u64(uint64_t n)
|
|
-{
|
|
- return (n != 0 && ((n & (n - 1)) == 0));
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_check_lower_members - Validate the members of lower NAND device
|
|
- * @nld: Lower NAND chip structure
|
|
- */
|
|
-static bool nmbm_check_lower_members(struct nmbm_lower_device *nld)
|
|
-{
|
|
-
|
|
- if (!nld->size || !is_power_of_2_u64(nld->size)) {
|
|
- nmbm_log_lower(nld, NMBM_LOG_ERR,
|
|
- "Chip size %llu is not valid\n", nld->size);
|
|
- return false;
|
|
- }
|
|
-
|
|
- if (!nld->erasesize || !is_power_of_2(nld->erasesize)) {
|
|
- nmbm_log_lower(nld, NMBM_LOG_ERR,
|
|
- "Block size %u is not valid\n", nld->erasesize);
|
|
- return false;
|
|
- }
|
|
-
|
|
- if (!nld->writesize || !is_power_of_2(nld->writesize)) {
|
|
- nmbm_log_lower(nld, NMBM_LOG_ERR,
|
|
- "Page size %u is not valid\n", nld->writesize);
|
|
- return false;
|
|
- }
|
|
-
|
|
- if (!nld->oobsize || !is_power_of_2(nld->oobsize)) {
|
|
- nmbm_log_lower(nld, NMBM_LOG_ERR,
|
|
- "Page spare size %u is not valid\n", nld->oobsize);
|
|
- return false;
|
|
- }
|
|
-
|
|
- if (!nld->read_page) {
|
|
- nmbm_log_lower(nld, NMBM_LOG_ERR, "read_page() is required\n");
|
|
- return false;
|
|
- }
|
|
-
|
|
- if (!(nld->flags & NMBM_F_READ_ONLY) && (!nld->write_page || !nld->erase_block)) {
|
|
- nmbm_log_lower(nld, NMBM_LOG_ERR,
|
|
- "write_page() and erase_block() are required\n");
|
|
- return false;
|
|
- }
|
|
-
|
|
- /* Data sanity check */
|
|
- if (!nld->max_ratio)
|
|
- nld->max_ratio = 1;
|
|
-
|
|
- if (nld->max_ratio >= NMBM_MGMT_DIV - 1) {
|
|
- nmbm_log_lower(nld, NMBM_LOG_ERR,
|
|
- "max ratio %u is invalid\n", nld->max_ratio);
|
|
- return false;
|
|
- }
|
|
-
|
|
- if (nld->max_reserved_blocks && nld->max_reserved_blocks < NMBM_MGMT_BLOCKS_MIN) {
|
|
- nmbm_log_lower(nld, NMBM_LOG_ERR,
|
|
- "max reserved blocks %u is too small\n", nld->max_reserved_blocks);
|
|
- return false;
|
|
- }
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_calc_structure_size - Calculate the instance structure size
|
|
- * @nld: NMBM lower device structure
|
|
- */
|
|
-size_t nmbm_calc_structure_size(struct nmbm_lower_device *nld)
|
|
-{
|
|
- uint32_t state_table_size, mapping_table_size, info_table_size;
|
|
- uint32_t block_count;
|
|
-
|
|
- block_count = nmbm_lldiv(nld->size, nld->erasesize);
|
|
-
|
|
- /* Calculate info table size */
|
|
- state_table_size = ((block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
|
|
- NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
|
|
- mapping_table_size = block_count * sizeof(int32_t);
|
|
-
|
|
- info_table_size = NMBM_ALIGN(sizeof(struct nmbm_info_table_header),
|
|
- nld->writesize);
|
|
- info_table_size += NMBM_ALIGN(state_table_size, nld->writesize);
|
|
- info_table_size += NMBM_ALIGN(mapping_table_size, nld->writesize);
|
|
-
|
|
- return info_table_size + state_table_size + mapping_table_size +
|
|
- nld->writesize + nld->oobsize + sizeof(struct nmbm_instance);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_init_structure - Initialize members of instance structure
|
|
- * @ni: NMBM instance structure
|
|
- */
|
|
-static void nmbm_init_structure(struct nmbm_instance *ni)
|
|
-{
|
|
- uint32_t pages_per_block, blocks_per_chip;
|
|
- uintptr_t ptr;
|
|
-
|
|
- pages_per_block = ni->lower.erasesize / ni->lower.writesize;
|
|
- blocks_per_chip = nmbm_lldiv(ni->lower.size, ni->lower.erasesize);
|
|
-
|
|
- ni->rawpage_size = ni->lower.writesize + ni->lower.oobsize;
|
|
- ni->rawblock_size = pages_per_block * ni->rawpage_size;
|
|
- ni->rawchip_size = blocks_per_chip * ni->rawblock_size;
|
|
-
|
|
- ni->writesize_mask = ni->lower.writesize - 1;
|
|
- ni->erasesize_mask = ni->lower.erasesize - 1;
|
|
-
|
|
- ni->writesize_shift = ffs(ni->lower.writesize) - 1;
|
|
- ni->erasesize_shift = ffs(ni->lower.erasesize) - 1;
|
|
-
|
|
- /* Calculate number of block this chip */
|
|
- ni->block_count = ni->lower.size >> ni->erasesize_shift;
|
|
-
|
|
- /* Calculate info table size */
|
|
- ni->state_table_size = ((ni->block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
|
|
- NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
|
|
- ni->mapping_table_size = ni->block_count * sizeof(*ni->block_mapping);
|
|
-
|
|
- ni->info_table_size = NMBM_ALIGN(sizeof(ni->info_table),
|
|
- ni->lower.writesize);
|
|
- ni->info_table.state_table_off = ni->info_table_size;
|
|
-
|
|
- ni->info_table_size += NMBM_ALIGN(ni->state_table_size,
|
|
- ni->lower.writesize);
|
|
- ni->info_table.mapping_table_off = ni->info_table_size;
|
|
-
|
|
- ni->info_table_size += NMBM_ALIGN(ni->mapping_table_size,
|
|
- ni->lower.writesize);
|
|
-
|
|
- ni->info_table_spare_blocks = nmbm_get_spare_block_count(
|
|
- size2blk(ni, ni->info_table_size));
|
|
-
|
|
- /* Assign memory to members */
|
|
- ptr = (uintptr_t)ni + sizeof(*ni);
|
|
-
|
|
- ni->info_table_cache = (void *)ptr;
|
|
- ptr += ni->info_table_size;
|
|
-
|
|
- ni->block_state = (void *)ptr;
|
|
- ptr += ni->state_table_size;
|
|
-
|
|
- ni->block_mapping = (void *)ptr;
|
|
- ptr += ni->mapping_table_size;
|
|
-
|
|
- ni->page_cache = (uint8_t *)ptr;
|
|
-
|
|
- /* Initialize block state table */
|
|
- ni->block_state_changed = 0;
|
|
- memset(ni->block_state, 0xff, ni->state_table_size);
|
|
-
|
|
- /* Initialize block mapping table */
|
|
- ni->block_mapping_changed = 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_attach - Attach to a lower device
|
|
- * @nld: NMBM lower device structure
|
|
- * @ni: NMBM instance structure
|
|
- */
|
|
-int nmbm_attach(struct nmbm_lower_device *nld, struct nmbm_instance *ni)
|
|
-{
|
|
- bool success;
|
|
-
|
|
- if (!nld || !ni)
|
|
- return -EINVAL;
|
|
-
|
|
- /* Set default log level */
|
|
- ni->log_display_level = NMBM_DEFAULT_LOG_LEVEL;
|
|
-
|
|
- /* Check lower members */
|
|
- success = nmbm_check_lower_members(nld);
|
|
- if (!success)
|
|
- return -EINVAL;
|
|
-
|
|
- /* Initialize NMBM instance */
|
|
- memcpy(&ni->lower, nld, sizeof(struct nmbm_lower_device));
|
|
- nmbm_init_structure(ni);
|
|
-
|
|
- success = nmbm_find_signature(ni, &ni->signature, &ni->signature_ba);
|
|
- if (!success) {
|
|
- if (!(nld->flags & NMBM_F_CREATE)) {
|
|
- nlog_err(ni, "Signature not found\n");
|
|
- return -ENODEV;
|
|
- }
|
|
-
|
|
- success = nmbm_create_new(ni);
|
|
- if (!success)
|
|
- return -ENODEV;
|
|
-
|
|
- return 0;
|
|
- }
|
|
-
|
|
- nlog_info(ni, "Signature found at block %u [0x%08llx]\n",
|
|
- ni->signature_ba, ba2addr(ni, ni->signature_ba));
|
|
- nmbm_mark_block_color_signature(ni, ni->signature_ba);
|
|
-
|
|
- if (ni->signature.header.version != NMBM_VER) {
|
|
- nlog_err(ni, "NMBM version %u.%u is not supported\n",
|
|
- NMBM_VERSION_MAJOR_GET(ni->signature.header.version),
|
|
- NMBM_VERSION_MINOR_GET(ni->signature.header.version));
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (ni->signature.nand_size != nld->size ||
|
|
- ni->signature.block_size != nld->erasesize ||
|
|
- ni->signature.page_size != nld->writesize ||
|
|
- ni->signature.spare_size != nld->oobsize) {
|
|
- nlog_err(ni, "NMBM configuration mismatch\n");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- success = nmbm_load_existing(ni);
|
|
- if (!success)
|
|
- return -ENODEV;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_detach - Detach from a lower device, and save all tables
|
|
- * @ni: NMBM instance structure
|
|
- */
|
|
-int nmbm_detach(struct nmbm_instance *ni)
|
|
-{
|
|
- if (!ni)
|
|
- return -EINVAL;
|
|
-
|
|
- if (!(ni->lower.flags & NMBM_F_READ_ONLY))
|
|
- nmbm_update_info_table(ni);
|
|
-
|
|
- nmbm_mark_block_color_normal(ni, 0, ni->block_count - 1);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_erase_logic_block - Erase a logic block
|
|
- * @ni: NMBM instance structure
|
|
- * @nmbm_erase_logic_block: logic block address
|
|
- *
|
|
- * Logic block will be mapped to physical block before erasing.
|
|
- * Bad block found during erasinh will be remapped to a good block if there is
|
|
- * still at least one good spare block available.
|
|
- */
|
|
-static int nmbm_erase_logic_block(struct nmbm_instance *ni, uint32_t block_addr)
|
|
-{
|
|
- uint32_t pb;
|
|
- bool success;
|
|
-
|
|
-retry:
|
|
- /* Map logic block to physical block */
|
|
- pb = ni->block_mapping[block_addr];
|
|
-
|
|
- /* Whether the logic block is good (has valid mapping) */
|
|
- if ((int32_t)pb < 0) {
|
|
- nlog_debug(ni, "Logic block %u is a bad block\n", block_addr);
|
|
- return -EIO;
|
|
- }
|
|
-
|
|
- /* Remap logic block if current physical block is a bad block */
|
|
- if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD ||
|
|
- nmbm_get_block_state(ni, pb) == BLOCK_ST_NEED_REMAP)
|
|
- goto remap_logic_block;
|
|
-
|
|
- /* Insurance to detect unexpected bad block marked by user */
|
|
- if (nmbm_check_bad_phys_block(ni, pb)) {
|
|
- nlog_warn(ni, "Found unexpected bad block possibly marked by user\n");
|
|
- nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
|
|
- goto remap_logic_block;
|
|
- }
|
|
-
|
|
- success = nmbm_erase_block_and_check(ni, pb);
|
|
- if (success)
|
|
- return 0;
|
|
-
|
|
- /* Mark bad block */
|
|
- nmbm_mark_phys_bad_block(ni, pb);
|
|
- nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
|
|
-
|
|
-remap_logic_block:
|
|
- /* Try to assign a new block */
|
|
- success = nmbm_map_block(ni, block_addr);
|
|
- if (!success) {
|
|
- /* Mark logic block unusable, and update info table */
|
|
- ni->block_mapping[block_addr] = -1;
|
|
- if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
|
|
- nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
|
|
- nmbm_update_info_table(ni);
|
|
- return -EIO;
|
|
- }
|
|
-
|
|
- /* Update info table before erasing */
|
|
- if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
|
|
- nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
|
|
- nmbm_update_info_table(ni);
|
|
-
|
|
- goto retry;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_erase_block_range - Erase logic blocks
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: logic linear address
|
|
- * @size: erase range
|
|
- * @failed_addr: return failed block address if error occurs
|
|
- */
|
|
-int nmbm_erase_block_range(struct nmbm_instance *ni, uint64_t addr,
|
|
- uint64_t size, uint64_t *failed_addr)
|
|
-{
|
|
- uint32_t start_ba, end_ba;
|
|
- int ret;
|
|
-
|
|
- if (!ni)
|
|
- return -EINVAL;
|
|
-
|
|
- /* Sanity check */
|
|
- if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
|
|
- nlog_debug(ni, "Device is forced read-only\n");
|
|
- return -EROFS;
|
|
- }
|
|
-
|
|
- if (addr >= ba2addr(ni, ni->data_block_count)) {
|
|
- nlog_err(ni, "Address 0x%llx is invalid\n", addr);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (addr + size > ba2addr(ni, ni->data_block_count)) {
|
|
- nlog_err(ni, "Erase range 0xllxu is too large\n", size);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (!size) {
|
|
- nlog_warn(ni, "No blocks to be erased\n");
|
|
- return 0;
|
|
- }
|
|
-
|
|
- start_ba = addr2ba(ni, addr);
|
|
- end_ba = addr2ba(ni, addr + size - 1);
|
|
-
|
|
- while (start_ba <= end_ba) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- ret = nmbm_erase_logic_block(ni, start_ba);
|
|
- if (ret) {
|
|
- if (failed_addr)
|
|
- *failed_addr = ba2addr(ni, start_ba);
|
|
- return ret;
|
|
- }
|
|
-
|
|
- start_ba++;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_read_logic_page - Read page based on logic address
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: logic linear address
|
|
- * @data: buffer to store main data. optional.
|
|
- * @oob: buffer to store oob data. optional.
|
|
- * @mode: read mode
|
|
- *
|
|
- * Return 0 for success, positive value for corrected bitflip count,
|
|
- * -EBADMSG for ecc error, other negative values for other errors
|
|
- */
|
|
-static int nmbm_read_logic_page(struct nmbm_instance *ni, uint64_t addr,
|
|
- void *data, void *oob, enum nmbm_oob_mode mode)
|
|
-{
|
|
- uint32_t lb, pb, offset;
|
|
- uint64_t paddr;
|
|
-
|
|
- /* Extract block address and in-block offset */
|
|
- lb = addr2ba(ni, addr);
|
|
- offset = addr & ni->erasesize_mask;
|
|
-
|
|
- /* Map logic block to physical block */
|
|
- pb = ni->block_mapping[lb];
|
|
-
|
|
- /* Whether the logic block is good (has valid mapping) */
|
|
- if ((int32_t)pb < 0) {
|
|
- nlog_debug(ni, "Logic block %u is a bad block\n", lb);
|
|
- return -EIO;
|
|
- }
|
|
-
|
|
- /* Fail if physical block is marked bad */
|
|
- if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
|
|
- return -EIO;
|
|
-
|
|
- /* Assemble new address */
|
|
- paddr = ba2addr(ni, pb) + offset;
|
|
-
|
|
- return nmbm_read_phys_page(ni, paddr, data, oob, mode);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_read_single_page - Read one page based on logic address
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: logic linear address
|
|
- * @data: buffer to store main data. optional.
|
|
- * @oob: buffer to store oob data. optional.
|
|
- * @mode: read mode
|
|
- *
|
|
- * Return 0 for success, positive value for corrected bitflip count,
|
|
- * -EBADMSG for ecc error, other negative values for other errors
|
|
- */
|
|
-int nmbm_read_single_page(struct nmbm_instance *ni, uint64_t addr, void *data,
|
|
- void *oob, enum nmbm_oob_mode mode)
|
|
-{
|
|
- if (!ni)
|
|
- return -EINVAL;
|
|
-
|
|
- /* Sanity check */
|
|
- if (ni->protected) {
|
|
- nlog_debug(ni, "Device is forced read-only\n");
|
|
- return -EROFS;
|
|
- }
|
|
-
|
|
- if (addr >= ba2addr(ni, ni->data_block_count)) {
|
|
- nlog_err(ni, "Address 0x%llx is invalid\n", addr);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- return nmbm_read_logic_page(ni, addr, data, oob, mode);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_read_range - Read data without oob
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: logic linear address
|
|
- * @size: data size to read
|
|
- * @data: buffer to store main data to be read
|
|
- * @mode: read mode
|
|
- * @retlen: return actual data size read
|
|
- *
|
|
- * Return 0 for success, positive value for corrected bitflip count,
|
|
- * -EBADMSG for ecc error, other negative values for other errors
|
|
- */
|
|
-int nmbm_read_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
|
|
- void *data, enum nmbm_oob_mode mode, size_t *retlen)
|
|
-{
|
|
- uint64_t off = addr;
|
|
- uint8_t *ptr = data;
|
|
- size_t sizeremain = size, chunksize, leading;
|
|
- bool has_ecc_err = false;
|
|
- int ret, max_bitflips = 0;
|
|
-
|
|
- if (!ni)
|
|
- return -EINVAL;
|
|
-
|
|
- /* Sanity check */
|
|
- if (ni->protected) {
|
|
- nlog_debug(ni, "Device is forced read-only\n");
|
|
- return -EROFS;
|
|
- }
|
|
-
|
|
- if (addr >= ba2addr(ni, ni->data_block_count)) {
|
|
- nlog_err(ni, "Address 0x%llx is invalid\n", addr);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (addr + size > ba2addr(ni, ni->data_block_count)) {
|
|
- nlog_err(ni, "Read range 0x%llx is too large\n", size);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (!size) {
|
|
- nlog_warn(ni, "No data to be read\n");
|
|
- return 0;
|
|
- }
|
|
-
|
|
- while (sizeremain) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- leading = off & ni->writesize_mask;
|
|
- chunksize = ni->lower.writesize - leading;
|
|
- if (chunksize > sizeremain)
|
|
- chunksize = sizeremain;
|
|
-
|
|
- if (chunksize == ni->lower.writesize) {
|
|
- ret = nmbm_read_logic_page(ni, off - leading, ptr,
|
|
- NULL, mode);
|
|
- if (ret < 0 && ret != -EBADMSG)
|
|
- break;
|
|
- } else {
|
|
- ret = nmbm_read_logic_page(ni, off - leading,
|
|
- ni->page_cache, NULL,
|
|
- mode);
|
|
- if (ret < 0 && ret != -EBADMSG)
|
|
- break;
|
|
-
|
|
- memcpy(ptr, ni->page_cache + leading, chunksize);
|
|
- }
|
|
-
|
|
- if (ret == -EBADMSG)
|
|
- has_ecc_err = true;
|
|
-
|
|
- if (ret > max_bitflips)
|
|
- max_bitflips = ret;
|
|
-
|
|
- off += chunksize;
|
|
- ptr += chunksize;
|
|
- sizeremain -= chunksize;
|
|
- }
|
|
-
|
|
- if (retlen)
|
|
- *retlen = size - sizeremain;
|
|
-
|
|
- if (ret < 0 && ret != -EBADMSG)
|
|
- return ret;
|
|
-
|
|
- if (has_ecc_err)
|
|
- return -EBADMSG;
|
|
-
|
|
- return max_bitflips;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_write_logic_page - Read page based on logic address
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: logic linear address
|
|
- * @data: buffer contains main data. optional.
|
|
- * @oob: buffer contains oob data. optional.
|
|
- * @mode: write mode
|
|
- */
|
|
-static int nmbm_write_logic_page(struct nmbm_instance *ni, uint64_t addr,
|
|
- const void *data, const void *oob,
|
|
- enum nmbm_oob_mode mode)
|
|
-{
|
|
- uint32_t lb, pb, offset;
|
|
- uint64_t paddr;
|
|
- bool success;
|
|
-
|
|
- /* Extract block address and in-block offset */
|
|
- lb = addr2ba(ni, addr);
|
|
- offset = addr & ni->erasesize_mask;
|
|
-
|
|
- /* Map logic block to physical block */
|
|
- pb = ni->block_mapping[lb];
|
|
-
|
|
- /* Whether the logic block is good (has valid mapping) */
|
|
- if ((int32_t)pb < 0) {
|
|
- nlog_debug(ni, "Logic block %u is a bad block\n", lb);
|
|
- return -EIO;
|
|
- }
|
|
-
|
|
- /* Fail if physical block is marked bad */
|
|
- if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
|
|
- return -EIO;
|
|
-
|
|
- /* Assemble new address */
|
|
- paddr = ba2addr(ni, pb) + offset;
|
|
-
|
|
- success = nmbm_write_phys_page(ni, paddr, data, oob, mode);
|
|
- if (success)
|
|
- return 0;
|
|
-
|
|
- /*
|
|
- * Do not remap bad block here. Just mark this block in state table.
|
|
- * Remap this block on erasing.
|
|
- */
|
|
- nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
|
|
- nmbm_update_info_table(ni);
|
|
-
|
|
- return -EIO;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_write_single_page - Write one page based on logic address
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: logic linear address
|
|
- * @data: buffer contains main data. optional.
|
|
- * @oob: buffer contains oob data. optional.
|
|
- * @mode: write mode
|
|
- */
|
|
-int nmbm_write_single_page(struct nmbm_instance *ni, uint64_t addr,
|
|
- const void *data, const void *oob,
|
|
- enum nmbm_oob_mode mode)
|
|
-{
|
|
- if (!ni)
|
|
- return -EINVAL;
|
|
-
|
|
- /* Sanity check */
|
|
- if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
|
|
- nlog_debug(ni, "Device is forced read-only\n");
|
|
- return -EROFS;
|
|
- }
|
|
-
|
|
- if (addr >= ba2addr(ni, ni->data_block_count)) {
|
|
- nlog_err(ni, "Address 0x%llx is invalid\n", addr);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- return nmbm_write_logic_page(ni, addr, data, oob, mode);
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_write_range - Write data without oob
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: logic linear address
|
|
- * @size: data size to write
|
|
- * @data: buffer contains data to be written
|
|
- * @mode: write mode
|
|
- * @retlen: return actual data size written
|
|
- */
|
|
-int nmbm_write_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
|
|
- const void *data, enum nmbm_oob_mode mode,
|
|
- size_t *retlen)
|
|
-{
|
|
- uint64_t off = addr;
|
|
- const uint8_t *ptr = data;
|
|
- size_t sizeremain = size, chunksize, leading;
|
|
- int ret;
|
|
-
|
|
- if (!ni)
|
|
- return -EINVAL;
|
|
-
|
|
- /* Sanity check */
|
|
- if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
|
|
- nlog_debug(ni, "Device is forced read-only\n");
|
|
- return -EROFS;
|
|
- }
|
|
-
|
|
- if (addr >= ba2addr(ni, ni->data_block_count)) {
|
|
- nlog_err(ni, "Address 0x%llx is invalid\n", addr);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (addr + size > ba2addr(ni, ni->data_block_count)) {
|
|
- nlog_err(ni, "Write size 0x%zx is too large\n", size);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (!size) {
|
|
- nlog_warn(ni, "No data to be written\n");
|
|
- return 0;
|
|
- }
|
|
-
|
|
- while (sizeremain) {
|
|
- WATCHDOG_RESET();
|
|
-
|
|
- leading = off & ni->writesize_mask;
|
|
- chunksize = ni->lower.writesize - leading;
|
|
- if (chunksize > sizeremain)
|
|
- chunksize = sizeremain;
|
|
-
|
|
- if (chunksize == ni->lower.writesize) {
|
|
- ret = nmbm_write_logic_page(ni, off - leading, ptr,
|
|
- NULL, mode);
|
|
- if (ret)
|
|
- break;
|
|
- } else {
|
|
- memset(ni->page_cache, 0xff, leading);
|
|
- memcpy(ni->page_cache + leading, ptr, chunksize);
|
|
-
|
|
- ret = nmbm_write_logic_page(ni, off - leading,
|
|
- ni->page_cache, NULL,
|
|
- mode);
|
|
- if (ret)
|
|
- break;
|
|
- }
|
|
-
|
|
- off += chunksize;
|
|
- ptr += chunksize;
|
|
- sizeremain -= chunksize;
|
|
- }
|
|
-
|
|
- if (retlen)
|
|
- *retlen = size - sizeremain;
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_check_bad_block - Check whether a logic block is usable
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: logic linear address
|
|
- */
|
|
-int nmbm_check_bad_block(struct nmbm_instance *ni, uint64_t addr)
|
|
-{
|
|
- uint32_t lb, pb;
|
|
-
|
|
- if (!ni)
|
|
- return -EINVAL;
|
|
-
|
|
- if (addr >= ba2addr(ni, ni->data_block_count)) {
|
|
- nlog_err(ni, "Address 0x%llx is invalid\n", addr);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- lb = addr2ba(ni, addr);
|
|
-
|
|
- /* Map logic block to physical block */
|
|
- pb = ni->block_mapping[lb];
|
|
-
|
|
- if ((int32_t)pb < 0)
|
|
- return 1;
|
|
-
|
|
- if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
|
|
- return 1;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_mark_bad_block - Mark a logic block unusable
|
|
- * @ni: NMBM instance structure
|
|
- * @addr: logic linear address
|
|
- */
|
|
-int nmbm_mark_bad_block(struct nmbm_instance *ni, uint64_t addr)
|
|
-{
|
|
- uint32_t lb, pb;
|
|
-
|
|
- if (!ni)
|
|
- return -EINVAL;
|
|
-
|
|
- /* Sanity check */
|
|
- if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
|
|
- nlog_debug(ni, "Device is forced read-only\n");
|
|
- return -EROFS;
|
|
- }
|
|
-
|
|
- if (addr >= ba2addr(ni, ni->data_block_count)) {
|
|
- nlog_err(ni, "Address 0x%llx is invalid\n", addr);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- lb = addr2ba(ni, addr);
|
|
-
|
|
- /* Map logic block to physical block */
|
|
- pb = ni->block_mapping[lb];
|
|
-
|
|
- if ((int32_t)pb < 0)
|
|
- return 0;
|
|
-
|
|
- ni->block_mapping[lb] = -1;
|
|
- nmbm_mark_phys_bad_block(ni, pb);
|
|
- nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
|
|
- nmbm_update_info_table(ni);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_get_avail_size - Get available user data size
|
|
- * @ni: NMBM instance structure
|
|
- */
|
|
-uint64_t nmbm_get_avail_size(struct nmbm_instance *ni)
|
|
-{
|
|
- if (!ni)
|
|
- return 0;
|
|
-
|
|
- return (uint64_t)ni->data_block_count << ni->erasesize_shift;
|
|
-}
|
|
-
|
|
-/*
|
|
- * nmbm_get_lower_device - Get lower device structure
|
|
- * @ni: NMBM instance structure
|
|
- * @nld: pointer to hold the data of lower device structure
|
|
- */
|
|
-int nmbm_get_lower_device(struct nmbm_instance *ni, struct nmbm_lower_device *nld)
|
|
-{
|
|
- if (!ni)
|
|
- return -EINVAL;
|
|
-
|
|
- if (nld)
|
|
- memcpy(nld, &ni->lower, sizeof(*nld));
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-#include "nmbm-debug.inl"
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-debug.h b/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-debug.h
|
|
deleted file mode 100644
|
|
index 7f9dfed9e188..000000000000
|
|
--- a/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-debug.h
|
|
+++ /dev/null
|
|
@@ -1,20 +0,0 @@
|
|
-/* SPDX-License-Identifier: GPL-2.0 */
|
|
-/*
|
|
- * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
|
|
- *
|
|
- * Debug addons for NAND Mapped-block Management (NMBM)
|
|
- *
|
|
- * Author: Weijie Gao <weijie.gao@mediatek.com>
|
|
- */
|
|
-
|
|
-#ifndef _NMBM_DEBUG_H_
|
|
-#define _NMBM_DEBUG_H_
|
|
-
|
|
-#define nmbm_mark_block_color_normal(ni, start_ba, end_ba)
|
|
-#define nmbm_mark_block_color_bad(ni, ba)
|
|
-#define nmbm_mark_block_color_mgmt(ni, start_ba, end_ba)
|
|
-#define nmbm_mark_block_color_signature(ni, ba)
|
|
-#define nmbm_mark_block_color_info_table(ni, start_ba, end_ba)
|
|
-#define nmbm_mark_block_color_mapped(ni, ba)
|
|
-
|
|
-#endif /* _NMBM_DEBUG_H_ */
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-debug.inl b/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-debug.inl
|
|
deleted file mode 100644
|
|
index e69de29bb2d1..000000000000
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-mtd.c b/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-mtd.c
|
|
deleted file mode 100644
|
|
index a3e9e1832deb..000000000000
|
|
--- a/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-mtd.c
|
|
+++ /dev/null
|
|
@@ -1,795 +0,0 @@
|
|
-// SPDX-License-Identifier: GPL-2.0
|
|
-/*
|
|
- * MTD layer for NAND Mapped-block Management (NMBM)
|
|
- *
|
|
- * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
|
|
- *
|
|
- * Author: Weijie Gao <weijie.gao@mediatek.com>
|
|
- */
|
|
-
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/module.h>
|
|
-#include <linux/init.h>
|
|
-#include <linux/device.h>
|
|
-#include <linux/slab.h>
|
|
-#include <linux/interrupt.h>
|
|
-#include <linux/sched.h>
|
|
-#include <linux/wait.h>
|
|
-#include <linux/spinlock.h>
|
|
-#include <linux/mtd/mtd.h>
|
|
-#include <linux/mtd/flashchip.h>
|
|
-#include <linux/mtd/partitions.h>
|
|
-#include <linux/of_platform.h>
|
|
-#include <linux/kern_levels.h>
|
|
-
|
|
-#include "nmbm-private.h"
|
|
-#include "nmbm-debug.h"
|
|
-
|
|
-#define NMBM_MAX_RATIO_DEFAULT 1
|
|
-#define NMBM_MAX_BLOCKS_DEFAULT 256
|
|
-
|
|
-struct nmbm_mtd {
|
|
- struct mtd_info upper;
|
|
- struct mtd_info *lower;
|
|
-
|
|
- struct nmbm_instance *ni;
|
|
- uint8_t *page_cache;
|
|
-
|
|
- flstate_t state;
|
|
- spinlock_t lock;
|
|
- wait_queue_head_t wq;
|
|
-
|
|
- struct device *dev;
|
|
- struct list_head node;
|
|
-};
|
|
-
|
|
-struct list_head nmbm_devs;
|
|
-static DEFINE_MUTEX(nmbm_devs_lock);
|
|
-
|
|
-static int nmbm_lower_read_page(void *arg, uint64_t addr, void *buf, void *oob,
|
|
- enum nmbm_oob_mode mode)
|
|
-{
|
|
- struct nmbm_mtd *nm = arg;
|
|
- struct mtd_oob_ops ops;
|
|
- int ret;
|
|
-
|
|
- memset(&ops, 0, sizeof(ops));
|
|
-
|
|
- switch (mode) {
|
|
- case NMBM_MODE_PLACE_OOB:
|
|
- ops.mode = MTD_OPS_PLACE_OOB;
|
|
- break;
|
|
- case NMBM_MODE_AUTO_OOB:
|
|
- ops.mode = MTD_OPS_AUTO_OOB;
|
|
- break;
|
|
- case NMBM_MODE_RAW:
|
|
- ops.mode = MTD_OPS_RAW;
|
|
- break;
|
|
- default:
|
|
- pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
|
|
- return -ENOTSUPP;
|
|
- }
|
|
-
|
|
- if (buf) {
|
|
- ops.datbuf = buf;
|
|
- ops.len = nm->lower->writesize;
|
|
- }
|
|
-
|
|
- if (oob) {
|
|
- ops.oobbuf = oob;
|
|
- ops.ooblen = mtd_oobavail(nm->lower, &ops);
|
|
- }
|
|
-
|
|
- ret = mtd_read_oob(nm->lower, addr, &ops);
|
|
- nm->upper.ecc_stats.corrected = nm->lower->ecc_stats.corrected;
|
|
- nm->upper.ecc_stats.failed = nm->lower->ecc_stats.failed;
|
|
-
|
|
- /* Report error on failure (including ecc error) */
|
|
- if (ret < 0 && ret != -EUCLEAN)
|
|
- return ret;
|
|
-
|
|
- /*
|
|
- * Since mtd_read_oob() won't report exact bitflips, what we can know
|
|
- * is whether bitflips exceeds the threshold.
|
|
- * We want the -EUCLEAN to be passed to the upper layer, but not the
|
|
- * error value itself. To achieve this, report bitflips above the
|
|
- * threshold.
|
|
- */
|
|
-
|
|
- if (ret == -EUCLEAN) {
|
|
- return min_t(u32, nm->lower->bitflip_threshold + 1,
|
|
- nm->lower->ecc_strength);
|
|
- }
|
|
-
|
|
- /* For bitflips less than the threshold, return 0 */
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int nmbm_lower_write_page(void *arg, uint64_t addr, const void *buf,
|
|
- const void *oob, enum nmbm_oob_mode mode)
|
|
-{
|
|
- struct nmbm_mtd *nm = arg;
|
|
- struct mtd_oob_ops ops;
|
|
-
|
|
- memset(&ops, 0, sizeof(ops));
|
|
-
|
|
- switch (mode) {
|
|
- case NMBM_MODE_PLACE_OOB:
|
|
- ops.mode = MTD_OPS_PLACE_OOB;
|
|
- break;
|
|
- case NMBM_MODE_AUTO_OOB:
|
|
- ops.mode = MTD_OPS_AUTO_OOB;
|
|
- break;
|
|
- case NMBM_MODE_RAW:
|
|
- ops.mode = MTD_OPS_RAW;
|
|
- break;
|
|
- default:
|
|
- pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
|
|
- return -ENOTSUPP;
|
|
- }
|
|
-
|
|
- if (buf) {
|
|
- ops.datbuf = (uint8_t *)buf;
|
|
- ops.len = nm->lower->writesize;
|
|
- }
|
|
-
|
|
- if (oob) {
|
|
- ops.oobbuf = (uint8_t *)oob;
|
|
- ops.ooblen = mtd_oobavail(nm->lower, &ops);
|
|
- }
|
|
-
|
|
- return mtd_write_oob(nm->lower, addr, &ops);
|
|
-}
|
|
-
|
|
-static int nmbm_lower_erase_block(void *arg, uint64_t addr)
|
|
-{
|
|
- struct nmbm_mtd *nm = arg;
|
|
- struct erase_info ei;
|
|
-
|
|
- memset(&ei, 0, sizeof(ei));
|
|
-
|
|
- ei.addr = addr;
|
|
- ei.len = nm->lower->erasesize;
|
|
-
|
|
- return mtd_erase(nm->lower, &ei);
|
|
-}
|
|
-
|
|
-static int nmbm_lower_is_bad_block(void *arg, uint64_t addr)
|
|
-{
|
|
- struct nmbm_mtd *nm = arg;
|
|
-
|
|
- return mtd_block_isbad(nm->lower, addr);
|
|
-}
|
|
-
|
|
-static int nmbm_lower_mark_bad_block(void *arg, uint64_t addr)
|
|
-{
|
|
- struct nmbm_mtd *nm = arg;
|
|
-
|
|
- return mtd_block_markbad(nm->lower, addr);
|
|
-}
|
|
-
|
|
-static void nmbm_lower_log(void *arg, enum nmbm_log_category level,
|
|
- const char *fmt, va_list ap)
|
|
-{
|
|
- struct nmbm_mtd *nm = arg;
|
|
- char *msg;
|
|
- char *kl;
|
|
-
|
|
- msg = kvasprintf(GFP_KERNEL, fmt, ap);
|
|
- if (!msg) {
|
|
- dev_warn(nm->dev, "unable to print log\n");
|
|
- return;
|
|
- }
|
|
-
|
|
- switch (level) {
|
|
- case NMBM_LOG_DEBUG:
|
|
- kl = KERN_DEBUG;
|
|
- break;
|
|
- case NMBM_LOG_WARN:
|
|
- kl = KERN_WARNING;
|
|
- break;
|
|
- case NMBM_LOG_ERR:
|
|
- kl = KERN_ERR;
|
|
- break;
|
|
- case NMBM_LOG_EMERG:
|
|
- kl = KERN_EMERG;
|
|
- break;
|
|
- default:
|
|
- kl = KERN_INFO ;
|
|
- }
|
|
-
|
|
- dev_printk(kl, nm->dev, "%s", msg);
|
|
-
|
|
- kfree(msg);
|
|
-}
|
|
-
|
|
-static int nmbm_get_device(struct nmbm_mtd *nm, int new_state)
|
|
-{
|
|
- DECLARE_WAITQUEUE(wait, current);
|
|
-
|
|
-retry:
|
|
- spin_lock(&nm->lock);
|
|
-
|
|
- if (nm->state == FL_READY) {
|
|
- nm->state = new_state;
|
|
- spin_unlock(&nm->lock);
|
|
- return 0;
|
|
- }
|
|
-
|
|
- if (new_state == FL_PM_SUSPENDED) {
|
|
- if (nm->state == FL_PM_SUSPENDED) {
|
|
- spin_unlock(&nm->lock);
|
|
- return 0;
|
|
- }
|
|
- }
|
|
-
|
|
- set_current_state(TASK_UNINTERRUPTIBLE);
|
|
- add_wait_queue(&nm->wq, &wait);
|
|
- spin_unlock(&nm->lock);
|
|
- schedule();
|
|
- remove_wait_queue(&nm->wq, &wait);
|
|
- goto retry;
|
|
-}
|
|
-
|
|
-static void nmbm_release_device(struct nmbm_mtd *nm)
|
|
-{
|
|
- spin_lock(&nm->lock);
|
|
- nm->state = FL_READY;
|
|
- wake_up(&nm->wq);
|
|
- spin_unlock(&nm->lock);
|
|
-}
|
|
-
|
|
-static int nmbm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
|
|
-{
|
|
- struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
- int ret;
|
|
-
|
|
- nmbm_get_device(nm, FL_ERASING);
|
|
-
|
|
- ret = nmbm_erase_block_range(nm->ni, instr->addr, instr->len,
|
|
- &instr->fail_addr);
|
|
-
|
|
- nmbm_release_device(nm);
|
|
-
|
|
- if (!ret)
|
|
- return 0;
|
|
-
|
|
- return -EIO;
|
|
-}
|
|
-
|
|
-static int nmbm_mtd_read_data(struct nmbm_mtd *nm, uint64_t addr,
|
|
- struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
|
|
-{
|
|
- size_t len, ooblen, maxooblen, chklen;
|
|
- uint32_t col, ooboffs;
|
|
- uint8_t *datcache, *oobcache;
|
|
- bool has_ecc_err = false;
|
|
- int ret, max_bitflips = 0;
|
|
-
|
|
- col = addr & nm->lower->writesize_mask;
|
|
- addr &= ~nm->lower->writesize_mask;
|
|
- maxooblen = mtd_oobavail(nm->lower, ops);
|
|
- ooboffs = ops->ooboffs;
|
|
- ooblen = ops->ooblen;
|
|
- len = ops->len;
|
|
-
|
|
- datcache = len ? nm->page_cache : NULL;
|
|
- oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
|
|
-
|
|
- ops->oobretlen = 0;
|
|
- ops->retlen = 0;
|
|
-
|
|
- while (len || ooblen) {
|
|
- ret = nmbm_read_single_page(nm->ni, addr, datcache, oobcache,
|
|
- mode);
|
|
- if (ret < 0 && ret != -EBADMSG)
|
|
- return ret;
|
|
-
|
|
- /* Continue reading on ecc error */
|
|
- if (ret == -EBADMSG)
|
|
- has_ecc_err = true;
|
|
-
|
|
- /* Record the maximum bitflips between pages */
|
|
- if (ret > max_bitflips)
|
|
- max_bitflips = ret;
|
|
-
|
|
- if (len) {
|
|
- /* Move data */
|
|
- chklen = nm->lower->writesize - col;
|
|
- if (chklen > len)
|
|
- chklen = len;
|
|
-
|
|
- memcpy(ops->datbuf + ops->retlen, datcache + col,
|
|
- chklen);
|
|
- len -= chklen;
|
|
- col = 0; /* (col + chklen) % */
|
|
- ops->retlen += chklen;
|
|
- }
|
|
-
|
|
- if (ooblen) {
|
|
- /* Move oob */
|
|
- chklen = maxooblen - ooboffs;
|
|
- if (chklen > ooblen)
|
|
- chklen = ooblen;
|
|
-
|
|
- memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
|
|
- chklen);
|
|
- ooblen -= chklen;
|
|
- ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
|
|
- ops->oobretlen += chklen;
|
|
- }
|
|
-
|
|
- addr += nm->lower->writesize;
|
|
- }
|
|
-
|
|
- if (has_ecc_err)
|
|
- return -EBADMSG;
|
|
-
|
|
- return max_bitflips;
|
|
-}
|
|
-
|
|
-static int nmbm_mtd_read_oob(struct mtd_info *mtd, loff_t from,
|
|
- struct mtd_oob_ops *ops)
|
|
-{
|
|
- struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
- uint32_t maxooblen;
|
|
- enum nmbm_oob_mode mode;
|
|
- int ret;
|
|
-
|
|
- if (!ops->oobbuf && !ops->datbuf) {
|
|
- if (ops->ooblen || ops->len)
|
|
- return -EINVAL;
|
|
-
|
|
- return 0;
|
|
- }
|
|
-
|
|
- switch (ops->mode) {
|
|
- case MTD_OPS_PLACE_OOB:
|
|
- mode = NMBM_MODE_PLACE_OOB;
|
|
- break;
|
|
- case MTD_OPS_AUTO_OOB:
|
|
- mode = NMBM_MODE_AUTO_OOB;
|
|
- break;
|
|
- case MTD_OPS_RAW:
|
|
- mode = NMBM_MODE_RAW;
|
|
- break;
|
|
- default:
|
|
- pr_debug("%s: unsupported oob mode: %u\n", __func__, ops->mode);
|
|
- return -ENOTSUPP;
|
|
- }
|
|
-
|
|
- maxooblen = mtd_oobavail(mtd, ops);
|
|
-
|
|
- /* Do not allow read past end of device */
|
|
- if (ops->datbuf && (from + ops->len) > mtd->size) {
|
|
- pr_debug("%s: attempt to read beyond end of device\n",
|
|
- __func__);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (!ops->oobbuf) {
|
|
- nmbm_get_device(nm, FL_READING);
|
|
-
|
|
- /* Optimized for reading data only */
|
|
- ret = nmbm_read_range(nm->ni, from, ops->len, ops->datbuf,
|
|
- mode, &ops->retlen);
|
|
-
|
|
- nmbm_release_device(nm);
|
|
-
|
|
- return ret;
|
|
- }
|
|
-
|
|
- if (unlikely(ops->ooboffs >= maxooblen)) {
|
|
- pr_debug("%s: attempt to start read outside oob\n",
|
|
- __func__);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (unlikely(from >= mtd->size ||
|
|
- ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
|
|
- (from >> mtd->writesize_shift)) * maxooblen)) {
|
|
- pr_debug("%s: attempt to read beyond end of device\n",
|
|
- __func__);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- nmbm_get_device(nm, FL_READING);
|
|
- ret = nmbm_mtd_read_data(nm, from, ops, mode);
|
|
- nmbm_release_device(nm);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int nmbm_mtd_write_data(struct nmbm_mtd *nm, uint64_t addr,
|
|
- struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
|
|
-{
|
|
- size_t len, ooblen, maxooblen, chklen;
|
|
- uint32_t col, ooboffs;
|
|
- uint8_t *datcache, *oobcache;
|
|
- int ret;
|
|
-
|
|
- col = addr & nm->lower->writesize_mask;
|
|
- addr &= ~nm->lower->writesize_mask;
|
|
- maxooblen = mtd_oobavail(nm->lower, ops);
|
|
- ooboffs = ops->ooboffs;
|
|
- ooblen = ops->ooblen;
|
|
- len = ops->len;
|
|
-
|
|
- datcache = len ? nm->page_cache : NULL;
|
|
- oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
|
|
-
|
|
- ops->oobretlen = 0;
|
|
- ops->retlen = 0;
|
|
-
|
|
- while (len || ooblen) {
|
|
- if (len) {
|
|
- /* Move data */
|
|
- chklen = nm->lower->writesize - col;
|
|
- if (chklen > len)
|
|
- chklen = len;
|
|
-
|
|
- memset(datcache, 0xff, col);
|
|
- memcpy(datcache + col, ops->datbuf + ops->retlen,
|
|
- chklen);
|
|
- memset(datcache + col + chklen, 0xff,
|
|
- nm->lower->writesize - col - chklen);
|
|
- len -= chklen;
|
|
- col = 0; /* (col + chklen) % */
|
|
- ops->retlen += chklen;
|
|
- }
|
|
-
|
|
- if (ooblen) {
|
|
- /* Move oob */
|
|
- chklen = maxooblen - ooboffs;
|
|
- if (chklen > ooblen)
|
|
- chklen = ooblen;
|
|
-
|
|
- memset(oobcache, 0xff, ooboffs);
|
|
- memcpy(oobcache + ooboffs,
|
|
- ops->oobbuf + ops->oobretlen, chklen);
|
|
- memset(oobcache + ooboffs + chklen, 0xff,
|
|
- nm->lower->oobsize - ooboffs - chklen);
|
|
- ooblen -= chklen;
|
|
- ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
|
|
- ops->oobretlen += chklen;
|
|
- }
|
|
-
|
|
- ret = nmbm_write_single_page(nm->ni, addr, datcache, oobcache,
|
|
- mode);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- addr += nm->lower->writesize;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int nmbm_mtd_write_oob(struct mtd_info *mtd, loff_t to,
|
|
- struct mtd_oob_ops *ops)
|
|
-{
|
|
- struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
- enum nmbm_oob_mode mode;
|
|
- uint32_t maxooblen;
|
|
- int ret;
|
|
-
|
|
- if (!ops->oobbuf && !ops->datbuf) {
|
|
- if (ops->ooblen || ops->len)
|
|
- return -EINVAL;
|
|
-
|
|
- return 0;
|
|
- }
|
|
-
|
|
- switch (ops->mode) {
|
|
- case MTD_OPS_PLACE_OOB:
|
|
- mode = NMBM_MODE_PLACE_OOB;
|
|
- break;
|
|
- case MTD_OPS_AUTO_OOB:
|
|
- mode = NMBM_MODE_AUTO_OOB;
|
|
- break;
|
|
- case MTD_OPS_RAW:
|
|
- mode = NMBM_MODE_RAW;
|
|
- break;
|
|
- default:
|
|
- pr_debug("%s: unsupported oob mode: %u\n", __func__,
|
|
- ops->mode);
|
|
- return -ENOTSUPP;
|
|
- }
|
|
-
|
|
- maxooblen = mtd_oobavail(mtd, ops);
|
|
-
|
|
- /* Do not allow write past end of device */
|
|
- if (ops->datbuf && (to + ops->len) > mtd->size) {
|
|
- pr_debug("%s: attempt to write beyond end of device\n",
|
|
- __func__);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (!ops->oobbuf) {
|
|
- nmbm_get_device(nm, FL_WRITING);
|
|
-
|
|
- /* Optimized for writing data only */
|
|
- ret = nmbm_write_range(nm->ni, to, ops->len, ops->datbuf,
|
|
- mode, &ops->retlen);
|
|
-
|
|
- nmbm_release_device(nm);
|
|
-
|
|
- return ret;
|
|
- }
|
|
-
|
|
- if (unlikely(ops->ooboffs >= maxooblen)) {
|
|
- pr_debug("%s: attempt to start write outside oob\n",
|
|
- __func__);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (unlikely(to >= mtd->size ||
|
|
- ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
|
|
- (to >> mtd->writesize_shift)) * maxooblen)) {
|
|
- pr_debug("%s: attempt to write beyond end of device\n",
|
|
- __func__);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- nmbm_get_device(nm, FL_WRITING);
|
|
- ret = nmbm_mtd_write_data(nm, to, ops, mode);
|
|
- nmbm_release_device(nm);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int nmbm_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
|
|
-{
|
|
- struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
- int ret;
|
|
-
|
|
- nmbm_get_device(nm, FL_READING);
|
|
- ret = nmbm_check_bad_block(nm->ni, offs);
|
|
- nmbm_release_device(nm);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int nmbm_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
|
|
-{
|
|
- struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
- int ret;
|
|
-
|
|
- nmbm_get_device(nm, FL_WRITING);
|
|
- ret = nmbm_mark_bad_block(nm->ni, offs);
|
|
- nmbm_release_device(nm);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static void nmbm_mtd_shutdown(struct mtd_info *mtd)
|
|
-{
|
|
- struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
|
|
-
|
|
- nmbm_get_device(nm, FL_PM_SUSPENDED);
|
|
-}
|
|
-
|
|
-static int nmbm_probe(struct platform_device *pdev)
|
|
-{
|
|
- struct device_node *mtd_np, *np = pdev->dev.of_node;
|
|
- uint32_t max_ratio, max_reserved_blocks, alloc_size;
|
|
- bool forced_create, empty_page_ecc_ok;
|
|
- struct nmbm_lower_device nld;
|
|
- struct mtd_info *lower, *mtd;
|
|
- struct nmbm_mtd *nm;
|
|
- const char *mtdname;
|
|
- int ret;
|
|
-
|
|
- mtd_np = of_parse_phandle(np, "lower-mtd-device", 0);
|
|
- if (mtd_np) {
|
|
- lower = get_mtd_device_by_node(mtd_np);
|
|
- if (!IS_ERR(lower))
|
|
- goto do_attach_mtd;
|
|
-
|
|
- dev_dbg(&pdev->dev, "failed to find mtd device by phandle\n");
|
|
- return -EPROBE_DEFER;
|
|
- }
|
|
-
|
|
- ret = of_property_read_string(np, "lower-mtd-name", &mtdname);
|
|
- if (!ret) {
|
|
- lower = get_mtd_device_nm(mtdname);
|
|
- if (!IS_ERR(lower))
|
|
- goto do_attach_mtd;
|
|
-
|
|
- dev_dbg(&pdev->dev, "failed to find mtd device by name '%s'\n",
|
|
- mtdname);
|
|
- return -EPROBE_DEFER;
|
|
- }
|
|
-
|
|
-do_attach_mtd:
|
|
- if (of_property_read_u32(np, "max-ratio", &max_ratio))
|
|
- max_ratio = NMBM_MAX_RATIO_DEFAULT;
|
|
-
|
|
- if (of_property_read_u32(np, "max-reserved-blocks",
|
|
- &max_reserved_blocks))
|
|
- max_reserved_blocks = NMBM_MAX_BLOCKS_DEFAULT;
|
|
-
|
|
- forced_create = of_property_read_bool(np, "forced-create");
|
|
- empty_page_ecc_ok = of_property_read_bool(np,
|
|
- "empty-page-ecc-protected");
|
|
-
|
|
- memset(&nld, 0, sizeof(nld));
|
|
-
|
|
- nld.flags = 0;
|
|
-
|
|
- if (forced_create)
|
|
- nld.flags |= NMBM_F_CREATE;
|
|
-
|
|
- if (empty_page_ecc_ok)
|
|
- nld.flags |= NMBM_F_EMPTY_PAGE_ECC_OK;
|
|
-
|
|
- nld.max_ratio = max_ratio;
|
|
- nld.max_reserved_blocks = max_reserved_blocks;
|
|
-
|
|
- nld.size = lower->size;
|
|
- nld.erasesize = lower->erasesize;
|
|
- nld.writesize = lower->writesize;
|
|
- nld.oobsize = lower->oobsize;
|
|
- nld.oobavail = lower->oobavail;
|
|
-
|
|
- nld.read_page = nmbm_lower_read_page;
|
|
- nld.write_page = nmbm_lower_write_page;
|
|
- nld.erase_block = nmbm_lower_erase_block;
|
|
- nld.is_bad_block = nmbm_lower_is_bad_block;
|
|
- nld.mark_bad_block = nmbm_lower_mark_bad_block;
|
|
-
|
|
- nld.logprint = nmbm_lower_log;
|
|
-
|
|
- alloc_size = nmbm_calc_structure_size(&nld);
|
|
-
|
|
- nm = devm_kzalloc(&pdev->dev, sizeof(*nm) + alloc_size +
|
|
- lower->writesize + lower->oobsize, GFP_KERNEL);
|
|
- if (!nm) {
|
|
- ret = -ENOMEM;
|
|
- goto out;
|
|
- }
|
|
-
|
|
- nm->ni = (void *)nm + sizeof(*nm);
|
|
- nm->page_cache = (uint8_t *)nm->ni + alloc_size;
|
|
- nm->lower = lower;
|
|
- nm->dev = &pdev->dev;
|
|
-
|
|
- INIT_LIST_HEAD(&nm->node);
|
|
- spin_lock_init(&nm->lock);
|
|
- init_waitqueue_head(&nm->wq);
|
|
-
|
|
- nld.arg = nm;
|
|
-
|
|
- ret = nmbm_attach(&nld, nm->ni);
|
|
- if (ret)
|
|
- goto out;
|
|
-
|
|
- /* Initialize upper mtd */
|
|
- mtd = &nm->upper;
|
|
-
|
|
- mtd->owner = THIS_MODULE;
|
|
- mtd->dev.parent = &pdev->dev;
|
|
- mtd->type = lower->type;
|
|
- mtd->flags = lower->flags;
|
|
-
|
|
- mtd->size = (uint64_t)nm->ni->data_block_count * lower->erasesize;
|
|
- mtd->erasesize = lower->erasesize;
|
|
- mtd->writesize = lower->writesize;
|
|
- mtd->writebufsize = lower->writesize;
|
|
- mtd->oobsize = lower->oobsize;
|
|
- mtd->oobavail = lower->oobavail;
|
|
-
|
|
- mtd->erasesize_shift = lower->erasesize_shift;
|
|
- mtd->writesize_shift = lower->writesize_shift;
|
|
- mtd->erasesize_mask = lower->erasesize_mask;
|
|
- mtd->writesize_mask = lower->writesize_mask;
|
|
-
|
|
- mtd->bitflip_threshold = lower->bitflip_threshold;
|
|
-
|
|
- mtd->ooblayout = lower->ooblayout;
|
|
-
|
|
- mtd->ecc_step_size = lower->ecc_step_size;
|
|
- mtd->ecc_strength = lower->ecc_strength;
|
|
-
|
|
- mtd->numeraseregions = lower->numeraseregions;
|
|
- mtd->eraseregions = lower->eraseregions;
|
|
-
|
|
- mtd->_erase = nmbm_mtd_erase;
|
|
- mtd->_read_oob = nmbm_mtd_read_oob;
|
|
- mtd->_write_oob = nmbm_mtd_write_oob;
|
|
- mtd->_block_isbad = nmbm_mtd_block_isbad;
|
|
- mtd->_block_markbad = nmbm_mtd_block_markbad;
|
|
- mtd->_reboot = nmbm_mtd_shutdown;
|
|
-
|
|
- mtd_set_of_node(mtd, np);
|
|
-
|
|
- ret = mtd_device_register(mtd, NULL, 0);
|
|
- if (ret) {
|
|
- dev_err(&pdev->dev, "failed to register mtd device\n");
|
|
- nmbm_detach(nm->ni);
|
|
- goto out;
|
|
- }
|
|
-
|
|
- platform_set_drvdata(pdev, nm);
|
|
-
|
|
- mutex_lock(&nmbm_devs_lock);
|
|
- list_add_tail(&nm->node, &nmbm_devs);
|
|
- mutex_unlock(&nmbm_devs_lock);
|
|
-
|
|
- return 0;
|
|
-
|
|
-out:
|
|
- if (nm)
|
|
- devm_kfree(&pdev->dev, nm);
|
|
-
|
|
- put_mtd_device(lower);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int nmbm_remove(struct platform_device *pdev)
|
|
-{
|
|
- struct nmbm_mtd *nm = platform_get_drvdata(pdev);
|
|
- struct mtd_info *lower = nm->lower;
|
|
- int ret;
|
|
-
|
|
- ret = mtd_device_unregister(&nm->upper);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- nmbm_detach(nm->ni);
|
|
-
|
|
- mutex_lock(&nmbm_devs_lock);
|
|
- list_add_tail(&nm->node, &nmbm_devs);
|
|
- mutex_unlock(&nmbm_devs_lock);
|
|
-
|
|
- devm_kfree(&pdev->dev, nm);
|
|
-
|
|
- put_mtd_device(lower);
|
|
-
|
|
- platform_set_drvdata(pdev, NULL);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static const struct of_device_id nmbm_ids[] = {
|
|
- { .compatible = "generic,nmbm" },
|
|
- { },
|
|
-};
|
|
-
|
|
-MODULE_DEVICE_TABLE(of, nmbm_ids);
|
|
-
|
|
-static struct platform_driver nmbm_driver = {
|
|
- .probe = nmbm_probe,
|
|
- .remove = nmbm_remove,
|
|
- .driver = {
|
|
- .name = "nmbm",
|
|
- .of_match_table = nmbm_ids,
|
|
- },
|
|
-};
|
|
-
|
|
-static int __init nmbm_init(void)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- INIT_LIST_HEAD(&nmbm_devs);
|
|
-
|
|
- ret = platform_driver_register(&nmbm_driver);
|
|
- if (ret) {
|
|
- pr_err("failed to register nmbm driver\n");
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-module_init(nmbm_init);
|
|
-
|
|
-static void __exit nmbm_exit(void)
|
|
-{
|
|
- platform_driver_unregister(&nmbm_driver);
|
|
-}
|
|
-module_exit(nmbm_exit);
|
|
-
|
|
-MODULE_LICENSE("GPL");
|
|
-MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
|
|
-MODULE_DESCRIPTION("NAND mapping block management");
|
|
diff --git a/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-private.h b/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-private.h
|
|
deleted file mode 100644
|
|
index c285aeb9ddb0..000000000000
|
|
--- a/target/linux/ramips/files/drivers/mtd/nmbm/nmbm-private.h
|
|
+++ /dev/null
|
|
@@ -1,137 +0,0 @@
|
|
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
|
-/*
|
|
- * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
|
|
- *
|
|
- * Definitions for NAND Mapped-block Management (NMBM)
|
|
- *
|
|
- * Author: Weijie Gao <weijie.gao@mediatek.com>
|
|
- */
|
|
-
|
|
-#ifndef _NMBM_PRIVATE_H_
|
|
-#define _NMBM_PRIVATE_H_
|
|
-
|
|
-#include <nmbm/nmbm.h>
|
|
-
|
|
-#define NMBM_MAGIC_SIGNATURE 0x304d4d4e /* NMM0 */
|
|
-#define NMBM_MAGIC_INFO_TABLE 0x314d4d4e /* NMM1 */
|
|
-
|
|
-#define NMBM_VERSION_MAJOR_S 0
|
|
-#define NMBM_VERSION_MAJOR_M 0xffff
|
|
-#define NMBM_VERSION_MINOR_S 16
|
|
-#define NMBM_VERSION_MINOR_M 0xffff
|
|
-#define NMBM_VERSION_MAKE(major, minor) (((major) & NMBM_VERSION_MAJOR_M) | \
|
|
- (((minor) & NMBM_VERSION_MINOR_M) << \
|
|
- NMBM_VERSION_MINOR_S))
|
|
-#define NMBM_VERSION_MAJOR_GET(ver) (((ver) >> NMBM_VERSION_MAJOR_S) & \
|
|
- NMBM_VERSION_MAJOR_M)
|
|
-#define NMBM_VERSION_MINOR_GET(ver) (((ver) >> NMBM_VERSION_MINOR_S) & \
|
|
- NMBM_VERSION_MINOR_M)
|
|
-
|
|
-typedef uint32_t nmbm_bitmap_t;
|
|
-#define NMBM_BITMAP_UNIT_SIZE (sizeof(nmbm_bitmap_t))
|
|
-#define NMBM_BITMAP_BITS_PER_BLOCK 2
|
|
-#define NMBM_BITMAP_BITS_PER_UNIT (8 * sizeof(nmbm_bitmap_t))
|
|
-#define NMBM_BITMAP_BLOCKS_PER_UNIT (NMBM_BITMAP_BITS_PER_UNIT / \
|
|
- NMBM_BITMAP_BITS_PER_BLOCK)
|
|
-
|
|
-#define NMBM_SPARE_BLOCK_MULTI 1
|
|
-#define NMBM_SPARE_BLOCK_DIV 2
|
|
-#define NMBM_SPARE_BLOCK_MIN 2
|
|
-
|
|
-#define NMBM_MGMT_DIV 16
|
|
-#define NMBM_MGMT_BLOCKS_MIN 32
|
|
-
|
|
-#define NMBM_TRY_COUNT 3
|
|
-
|
|
-#define BLOCK_ST_BAD 0
|
|
-#define BLOCK_ST_NEED_REMAP 2
|
|
-#define BLOCK_ST_GOOD 3
|
|
-#define BLOCK_ST_MASK 3
|
|
-
|
|
-struct nmbm_header {
|
|
- uint32_t magic;
|
|
- uint32_t version;
|
|
- uint32_t size;
|
|
- uint32_t checksum;
|
|
-};
|
|
-
|
|
-struct nmbm_signature {
|
|
- struct nmbm_header header;
|
|
- uint64_t nand_size;
|
|
- uint32_t block_size;
|
|
- uint32_t page_size;
|
|
- uint32_t spare_size;
|
|
- uint32_t mgmt_start_pb;
|
|
- uint8_t max_try_count;
|
|
- uint8_t padding[3];
|
|
-};
|
|
-
|
|
-struct nmbm_info_table_header {
|
|
- struct nmbm_header header;
|
|
- uint32_t write_count;
|
|
- uint32_t state_table_off;
|
|
- uint32_t mapping_table_off;
|
|
- uint32_t padding;
|
|
-};
|
|
-
|
|
-struct nmbm_instance {
|
|
- struct nmbm_lower_device lower;
|
|
-
|
|
- uint32_t rawpage_size;
|
|
- uint32_t rawblock_size;
|
|
- uint32_t rawchip_size;
|
|
-
|
|
- uint32_t writesize_mask;
|
|
- uint32_t erasesize_mask;
|
|
- uint16_t writesize_shift;
|
|
- uint16_t erasesize_shift;
|
|
-
|
|
- struct nmbm_signature signature;
|
|
-
|
|
- uint8_t *info_table_cache;
|
|
- uint32_t info_table_size;
|
|
- uint32_t info_table_spare_blocks;
|
|
- struct nmbm_info_table_header info_table;
|
|
-
|
|
- nmbm_bitmap_t *block_state;
|
|
- uint32_t block_state_changed;
|
|
- uint32_t state_table_size;
|
|
-
|
|
- int32_t *block_mapping;
|
|
- uint32_t block_mapping_changed;
|
|
- uint32_t mapping_table_size;
|
|
-
|
|
- uint8_t *page_cache;
|
|
-
|
|
- int protected;
|
|
-
|
|
- uint32_t block_count;
|
|
- uint32_t data_block_count;
|
|
-
|
|
- uint32_t mgmt_start_ba;
|
|
- uint32_t main_table_ba;
|
|
- uint32_t backup_table_ba;
|
|
- uint32_t mapping_blocks_ba;
|
|
- uint32_t mapping_blocks_top_ba;
|
|
- uint32_t signature_ba;
|
|
-
|
|
- enum nmbm_log_category log_display_level;
|
|
-};
|
|
-
|
|
-/* Log utilities */
|
|
-#define nlog_debug(ni, fmt, ...) \
|
|
- nmbm_log(ni, NMBM_LOG_DEBUG, fmt, ##__VA_ARGS__)
|
|
-
|
|
-#define nlog_info(ni, fmt, ...) \
|
|
- nmbm_log(ni, NMBM_LOG_INFO, fmt, ##__VA_ARGS__)
|
|
-
|
|
-#define nlog_warn(ni, fmt, ...) \
|
|
- nmbm_log(ni, NMBM_LOG_WARN, fmt, ##__VA_ARGS__)
|
|
-
|
|
-#define nlog_err(ni, fmt, ...) \
|
|
- nmbm_log(ni, NMBM_LOG_ERR, fmt, ##__VA_ARGS__)
|
|
-
|
|
-#define nlog_emerg(ni, fmt, ...) \
|
|
- nmbm_log(ni, NMBM_LOG_EMERG, fmt, ##__VA_ARGS__)
|
|
-
|
|
-#endif /* _NMBM_PRIVATE_H_ */
|
|
diff --git a/target/linux/ramips/files/include/nmbm/nmbm-os.h b/target/linux/ramips/files/include/nmbm/nmbm-os.h
|
|
deleted file mode 100644
|
|
index 1cae854df159..000000000000
|
|
--- a/target/linux/ramips/files/include/nmbm/nmbm-os.h
|
|
+++ /dev/null
|
|
@@ -1,69 +0,0 @@
|
|
-/* SPDX-License-Identifier: GPL-2.0 */
|
|
-/*
|
|
- * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
|
|
- *
|
|
- * OS-dependent definitions for NAND Mapped-block Management (NMBM)
|
|
- *
|
|
- * Author: Weijie Gao <weijie.gao@mediatek.com>
|
|
- */
|
|
-
|
|
-#ifndef _NMBM_OS_H_
|
|
-#define _NMBM_OS_H_
|
|
-
|
|
-#include <linux/kernel.h>
|
|
-#include <linux/limits.h>
|
|
-#include <linux/string.h>
|
|
-#include <linux/errno.h>
|
|
-#include <linux/types.h>
|
|
-#include <linux/crc32.h>
|
|
-#include <linux/log2.h>
|
|
-#include <asm/div64.h>
|
|
-
|
|
-static inline uint32_t nmbm_crc32(uint32_t crcval, const void *buf, size_t size)
|
|
-{
|
|
- uint chksz;
|
|
- const unsigned char *p = buf;
|
|
-
|
|
- while (size) {
|
|
- if (size > UINT_MAX)
|
|
- chksz = UINT_MAX;
|
|
- else
|
|
- chksz = (uint)size;
|
|
-
|
|
- crcval = crc32_le(crcval, p, chksz);
|
|
- size -= chksz;
|
|
- p += chksz;
|
|
- }
|
|
-
|
|
- return crcval;
|
|
-}
|
|
-
|
|
-static inline uint32_t nmbm_lldiv(uint64_t dividend, uint32_t divisor)
|
|
-{
|
|
-#if BITS_PER_LONG == 64
|
|
- return dividend / divisor;
|
|
-#else
|
|
- do_div(dividend, divisor);
|
|
- return dividend;
|
|
-#endif
|
|
-}
|
|
-
|
|
-#define WATCHDOG_RESET()
|
|
-
|
|
-#ifdef CONFIG_NMBM_LOG_LEVEL_DEBUG
|
|
-#define NMBM_DEFAULT_LOG_LEVEL 0
|
|
-#elif defined(NMBM_LOG_LEVEL_INFO)
|
|
-#define NMBM_DEFAULT_LOG_LEVEL 1
|
|
-#elif defined(NMBM_LOG_LEVEL_WARN)
|
|
-#define NMBM_DEFAULT_LOG_LEVEL 2
|
|
-#elif defined(NMBM_LOG_LEVEL_ERR)
|
|
-#define NMBM_DEFAULT_LOG_LEVEL 3
|
|
-#elif defined(NMBM_LOG_LEVEL_EMERG)
|
|
-#define NMBM_DEFAULT_LOG_LEVEL 4
|
|
-#elif defined(NMBM_LOG_LEVEL_NONE)
|
|
-#define NMBM_DEFAULT_LOG_LEVEL 5
|
|
-#else
|
|
-#define NMBM_DEFAULT_LOG_LEVEL 1
|
|
-#endif
|
|
-
|
|
-#endif /* _NMBM_OS_H_ */
|
|
diff --git a/target/linux/ramips/files/include/nmbm/nmbm.h b/target/linux/ramips/files/include/nmbm/nmbm.h
|
|
deleted file mode 100644
|
|
index c0400988b1f6..000000000000
|
|
--- a/target/linux/ramips/files/include/nmbm/nmbm.h
|
|
+++ /dev/null
|
|
@@ -1,102 +0,0 @@
|
|
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
|
-/*
|
|
- * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
|
|
- *
|
|
- * Definitions for NAND Mapped-block Management (NMBM)
|
|
- *
|
|
- * Author: Weijie Gao <weijie.gao@mediatek.com>
|
|
- */
|
|
-
|
|
-#ifndef _NMBM_H_
|
|
-#define _NMBM_H_
|
|
-
|
|
-#include <nmbm/nmbm-os.h>
|
|
-
|
|
-enum nmbm_log_category {
|
|
- NMBM_LOG_DEBUG,
|
|
- NMBM_LOG_INFO,
|
|
- NMBM_LOG_WARN,
|
|
- NMBM_LOG_ERR,
|
|
- NMBM_LOG_EMERG,
|
|
-
|
|
- __NMBM_LOG_MAX
|
|
-};
|
|
-
|
|
-enum nmbm_oob_mode {
|
|
- NMBM_MODE_PLACE_OOB,
|
|
- NMBM_MODE_AUTO_OOB,
|
|
- NMBM_MODE_RAW,
|
|
-
|
|
- __NMBM_MODE_MAX
|
|
-};
|
|
-
|
|
-struct nmbm_lower_device {
|
|
- uint32_t max_ratio;
|
|
- uint32_t max_reserved_blocks;
|
|
- int flags;
|
|
-
|
|
- uint64_t size;
|
|
- uint32_t erasesize;
|
|
- uint32_t writesize;
|
|
- uint32_t oobsize;
|
|
- uint32_t oobavail;
|
|
-
|
|
- void *arg;
|
|
- int (*reset_chip)(void *arg);
|
|
-
|
|
- /*
|
|
- * read_page:
|
|
- * return 0 if succeeds
|
|
- * return positive number for ecc error
|
|
- * return negative number for other errors
|
|
- */
|
|
- int (*read_page)(void *arg, uint64_t addr, void *buf, void *oob, enum nmbm_oob_mode mode);
|
|
- int (*write_page)(void *arg, uint64_t addr, const void *buf, const void *oob, enum nmbm_oob_mode mode);
|
|
- int (*erase_block)(void *arg, uint64_t addr);
|
|
-
|
|
- int (*is_bad_block)(void *arg, uint64_t addr);
|
|
- int (*mark_bad_block)(void *arg, uint64_t addr);
|
|
-
|
|
- /* OS-dependent logging function */
|
|
- void (*logprint)(void *arg, enum nmbm_log_category level, const char *fmt, va_list ap);
|
|
-};
|
|
-
|
|
-struct nmbm_instance;
|
|
-
|
|
-/* Create NMBM if management area not found, or not complete */
|
|
-#define NMBM_F_CREATE 0x01
|
|
-
|
|
-/* Empty page is also protected by ECC, and bitflip(s) can be corrected */
|
|
-#define NMBM_F_EMPTY_PAGE_ECC_OK 0x02
|
|
-
|
|
-/* Do not write anything back to flash */
|
|
-#define NMBM_F_READ_ONLY 0x04
|
|
-
|
|
-size_t nmbm_calc_structure_size(struct nmbm_lower_device *nld);
|
|
-int nmbm_attach(struct nmbm_lower_device *nld, struct nmbm_instance *ni);
|
|
-int nmbm_detach(struct nmbm_instance *ni);
|
|
-
|
|
-enum nmbm_log_category nmbm_set_log_level(struct nmbm_instance *ni,
|
|
- enum nmbm_log_category level);
|
|
-
|
|
-int nmbm_erase_block_range(struct nmbm_instance *ni, uint64_t addr,
|
|
- uint64_t size, uint64_t *failed_addr);
|
|
-int nmbm_read_single_page(struct nmbm_instance *ni, uint64_t addr, void *data,
|
|
- void *oob, enum nmbm_oob_mode mode);
|
|
-int nmbm_read_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
|
|
- void *data, enum nmbm_oob_mode mode, size_t *retlen);
|
|
-int nmbm_write_single_page(struct nmbm_instance *ni, uint64_t addr,
|
|
- const void *data, const void *oob,
|
|
- enum nmbm_oob_mode mode);
|
|
-int nmbm_write_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
|
|
- const void *data, enum nmbm_oob_mode mode,
|
|
- size_t *retlen);
|
|
-
|
|
-int nmbm_check_bad_block(struct nmbm_instance *ni, uint64_t addr);
|
|
-int nmbm_mark_bad_block(struct nmbm_instance *ni, uint64_t addr);
|
|
-
|
|
-uint64_t nmbm_get_avail_size(struct nmbm_instance *ni);
|
|
-
|
|
-int nmbm_get_lower_device(struct nmbm_instance *ni, struct nmbm_lower_device *nld);
|
|
-
|
|
-#endif /* _NMBM_H_ */
|
|
diff --git a/target/linux/ramips/image/mt7621.mk b/target/linux/ramips/image/mt7621.mk
|
|
index 57c3efcf2b92..81ef6bbd142e 100644
|
|
--- a/target/linux/ramips/image/mt7621.mk
|
|
+++ b/target/linux/ramips/image/mt7621.mk
|
|
@@ -111,11 +111,20 @@ define Device/dsa-migration
|
|
endef
|
|
|
|
define Device/actiontec_web7200
|
|
+ $(Device/dsa-migration)
|
|
DEVICE_VENDOR := Actiontec
|
|
DEVICE_MODEL := EB7200
|
|
DEVICE_PACKAGES += kmod-mt7603 kmod-mt7915e kmod-usb3 uboot-envtools kmod-i2c-core
|
|
+ LOADER_TYPE := bin
|
|
+ KERNEL_SIZE := 4096k
|
|
+ BLOCKSIZE := 128k
|
|
+ PAGESIZE := 2048
|
|
+ UBINIZE_OPTS := -E 5
|
|
+ KERNEL_INITRAMFS := kernel-bin | lzma | loader-kernel | gzip | fit-relocate gzip $$(KDIR)/image-$$(firstword $$(DEVICE_DTS)).dtb
|
|
KERNEL := kernel-bin | relocate-kernel | lzma | fit-relocate lzma $$(KDIR)/image-$$(firstword $$(DEVICE_DTS)).dtb
|
|
- IMAGE_SIZE := 15552k
|
|
+ IMAGES += factory.bin
|
|
+ IMAGE/sysupgrade.bin := sysupgrade-tar | append-metadata
|
|
+ IMAGE/factory.bin := append-kernel | pad-to $$(KERNEL_SIZE) | append-ubi
|
|
endef
|
|
TARGET_DEVICES += actiontec_web7200
|
|
|
|
@@ -1404,6 +1413,15 @@ define Device/ubnt_unifi-6-lite
|
|
endef
|
|
TARGET_DEVICES += ubnt_unifi-6-lite
|
|
|
|
+define Device/ubnt_unifi-nanohd
|
|
+ $(Device/dsa-migration)
|
|
+ DEVICE_VENDOR := Ubiquiti
|
|
+ DEVICE_MODEL := UniFi nanoHD
|
|
+ DEVICE_PACKAGES += kmod-mt7603 kmod-mt7615e kmod-mt7615-firmware
|
|
+ IMAGE_SIZE := 15552k
|
|
+endef
|
|
+TARGET_DEVICES += ubnt_unifi-nanohd
|
|
+
|
|
define Device/unielec_u7621-01-16m
|
|
$(Device/dsa-migration)
|
|
$(Device/uimage-lzma-loader)
|
|
diff --git a/target/linux/ramips/mt7621/base-files/lib/upgrade/platform.sh b/target/linux/ramips/mt7621/base-files/lib/upgrade/platform.sh
|
|
index 924f259e2563..033ec23425db 100755
|
|
--- a/target/linux/ramips/mt7621/base-files/lib/upgrade/platform.sh
|
|
+++ b/target/linux/ramips/mt7621/base-files/lib/upgrade/platform.sh
|
|
@@ -58,6 +58,7 @@ platform_do_upgrade() {
|
|
esac
|
|
|
|
case "$board" in
|
|
+ actiontec,web7200|\
|
|
ampedwireless,ally-00x19k|\
|
|
ampedwireless,ally-r1900k|\
|
|
asus,rt-ac65p|\
|
|
diff --git a/target/linux/ramips/mt7621/config-5.4 b/target/linux/ramips/mt7621/config-5.4
|
|
index ae4a0679bd76..eada0ff6021c 100644
|
|
--- a/target/linux/ramips/mt7621/config-5.4
|
|
+++ b/target/linux/ramips/mt7621/config-5.4
|
|
@@ -185,14 +185,6 @@ CONFIG_NET_MEDIATEK_SOC=y
|
|
CONFIG_NET_SWITCHDEV=y
|
|
CONFIG_NET_VENDOR_MEDIATEK=y
|
|
# CONFIG_NET_VENDOR_RALINK is not set
|
|
-CONFIG_NMBM=y
|
|
-# CONFIG_NMBM_LOG_LEVEL_DEBUG is not set
|
|
-# CONFIG_NMBM_LOG_LEVEL_EMERG is not set
|
|
-# CONFIG_NMBM_LOG_LEVEL_ERR is not set
|
|
-CONFIG_NMBM_LOG_LEVEL_INFO=y
|
|
-# CONFIG_NMBM_LOG_LEVEL_NONE is not set
|
|
-# CONFIG_NMBM_LOG_LEVEL_WARN is not set
|
|
-CONFIG_NMBM_MTD=y
|
|
CONFIG_NR_CPUS=4
|
|
CONFIG_OF=y
|
|
CONFIG_OF_ADDRESS=y
|
|
diff --git a/target/linux/ramips/patches-5.4/499-mtd-add-nmbm-support.patch b/target/linux/ramips/patches-5.4/499-mtd-add-nmbm-support.patch
|
|
deleted file mode 100644
|
|
index 5cbaae2b7bba..000000000000
|
|
--- a/target/linux/ramips/patches-5.4/499-mtd-add-nmbm-support.patch
|
|
+++ /dev/null
|
|
@@ -1,21 +0,0 @@
|
|
---- a/drivers/mtd/Kconfig
|
|
-+++ b/drivers/mtd/Kconfig
|
|
-@@ -228,6 +228,8 @@ source "drivers/mtd/ubi/Kconfig"
|
|
-
|
|
- source "drivers/mtd/hyperbus/Kconfig"
|
|
-
|
|
-+source "drivers/mtd/nmbm/Kconfig"
|
|
-+
|
|
- source "drivers/mtd/composite/Kconfig"
|
|
-
|
|
- endif # MTD
|
|
---- a/drivers/mtd/Makefile
|
|
-+++ b/drivers/mtd/Makefile
|
|
-@@ -33,5 +33,7 @@ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
|
|
- obj-$(CONFIG_MTD_UBI) += ubi/
|
|
- obj-$(CONFIG_MTD_HYPERBUS) += hyperbus/
|
|
-
|
|
-+obj-y += nmbm/
|
|
-+
|
|
- # Composite drivers must be loaded last
|
|
- obj-y += composite/
|
|
--
|
|
2.32.0
|
|
|