mirror of
https://github.com/littlefs-project/littlefs.git
synced 2025-10-16 23:07:55 +08:00
Removed recursion from commit/relocate code path
lfs_dir_commit originally relied heavily on tail-recursion, though at least one path (through relocations) was not tail-recursive, and could cause unbounded stack usage in extreme cases of bad blocks. (Keep in mind even extreme cases of bad blocks should be in scope for littlefs). In order to remove recursion from this code path, several changed were raequired: - The lfs_dir_compact logic had to be somewhat inverted. Instead of first compacting and then resolving issues such as relocations and orphans, the overarching lfs_dir_commit now contains a state-machine which after committing or compacting handles the extra changes to the filesystem in a single, non-recursive loop - Instead of fixing all relocations recursively, >1 relocation requires defering to a full deorphan step. This step is unfortunately an additional n^2 process. It also required some changes to lfs_deorphan in order to ignore intentional orphans created as an intermediary in lfs_mkdir. Maybe in the future we should remove these. - Tail recursion normally found in lfs_fs_deorphan had to be rewritten as a loop which restarts any time a new commit causes a relocation. This does show that the algorithm may not terminate, but only if every block is bad, which will eventually cause littlefs to run out of blocks to write to.
This commit is contained in:
700
lfs.c
700
lfs.c
@@ -7,9 +7,24 @@
|
||||
#include "lfs.h"
|
||||
#include "lfs_util.h"
|
||||
|
||||
|
||||
// some constants used throughout the code
|
||||
#define LFS_BLOCK_NULL ((lfs_block_t)-1)
|
||||
#define LFS_BLOCK_INLINE ((lfs_block_t)-2)
|
||||
|
||||
enum {
|
||||
LFS_OK_RELOCATED = 1,
|
||||
LFS_OK_DROPPED = 2,
|
||||
LFS_OK_ORPHANED = 3,
|
||||
};
|
||||
|
||||
enum {
|
||||
LFS_CMP_EQ = 0,
|
||||
LFS_CMP_LT = 1,
|
||||
LFS_CMP_GT = 2,
|
||||
};
|
||||
|
||||
|
||||
/// Caching block device operations ///
|
||||
static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
|
||||
// do not zero, cheaper if cache is readonly or only going to be
|
||||
@@ -107,12 +122,6 @@ static int lfs_bd_read(lfs_t *lfs,
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
LFS_CMP_EQ = 0,
|
||||
LFS_CMP_LT = 1,
|
||||
LFS_CMP_GT = 2,
|
||||
};
|
||||
|
||||
static int lfs_bd_cmp(lfs_t *lfs,
|
||||
const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
|
||||
lfs_block_t block, lfs_off_t off,
|
||||
@@ -467,6 +476,7 @@ static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file);
|
||||
static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
|
||||
static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
|
||||
|
||||
static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
|
||||
static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
|
||||
static void lfs_fs_prepmove(lfs_t *lfs,
|
||||
uint16_t id, const lfs_block_t pair[2]);
|
||||
@@ -474,8 +484,6 @@ static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
|
||||
lfs_mdir_t *pdir);
|
||||
static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
|
||||
lfs_mdir_t *parent);
|
||||
static int lfs_fs_relocate(lfs_t *lfs,
|
||||
const lfs_block_t oldpair[2], lfs_block_t newpair[2]);
|
||||
static int lfs_fs_forceconsistency(lfs_t *lfs);
|
||||
#endif
|
||||
|
||||
@@ -1508,7 +1516,7 @@ static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
|
||||
static int lfs_dir_split(lfs_t *lfs,
|
||||
lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
|
||||
lfs_mdir_t *source, uint16_t split, uint16_t end) {
|
||||
// create tail directory
|
||||
// create tail metadata pair
|
||||
lfs_alloc_ack(lfs);
|
||||
lfs_mdir_t tail;
|
||||
int err = lfs_dir_alloc(lfs, &tail);
|
||||
@@ -1520,9 +1528,10 @@ static int lfs_dir_split(lfs_t *lfs,
|
||||
tail.tail[0] = dir->tail[0];
|
||||
tail.tail[1] = dir->tail[1];
|
||||
|
||||
err = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
|
||||
if (err) {
|
||||
return err;
|
||||
// note we don't care about LFS_OK_RELOCATED
|
||||
int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
|
||||
if (res < 0) {
|
||||
return res;
|
||||
}
|
||||
|
||||
dir->tail[0] = tail.pair[0];
|
||||
@@ -1564,106 +1573,44 @@ static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
static int lfs_dir_compact(lfs_t *lfs,
|
||||
lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
|
||||
lfs_mdir_t *source, uint16_t begin, uint16_t end) {
|
||||
// save some state in case block is bad
|
||||
const lfs_block_t oldpair[2] = {dir->pair[0], dir->pair[1]};
|
||||
bool relocated = false;
|
||||
bool tired = false;
|
||||
|
||||
// should we split?
|
||||
while (end - begin > 1) {
|
||||
// find size
|
||||
lfs_size_t size = 0;
|
||||
int err = lfs_dir_traverse(lfs,
|
||||
source, 0, 0xffffffff, attrs, attrcount,
|
||||
LFS_MKTAG(0x400, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
|
||||
begin, end, -begin,
|
||||
lfs_dir_commit_size, &size);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// space is complicated, we need room for tail, crc, gstate,
|
||||
// cleanup delete, and we cap at half a block to give room
|
||||
// for metadata updates.
|
||||
if (end - begin < 0xff &&
|
||||
size <= lfs_min(lfs->cfg->block_size - 36,
|
||||
lfs_alignup((lfs->cfg->metadata_max ?
|
||||
lfs->cfg->metadata_max : lfs->cfg->block_size)/2,
|
||||
lfs->cfg->prog_size))) {
|
||||
break;
|
||||
}
|
||||
|
||||
// can't fit, need to split, we should really be finding the
|
||||
// largest size that fits with a small binary search, but right now
|
||||
// it's not worth the code size
|
||||
uint16_t split = (end - begin) / 2;
|
||||
err = lfs_dir_split(lfs, dir, attrs, attrcount,
|
||||
source, begin+split, end);
|
||||
if (err) {
|
||||
// if we fail to split, we may be able to overcompact, unless
|
||||
// we're too big for even the full block, in which case our
|
||||
// only option is to error
|
||||
if (err == LFS_ERR_NOSPC && size <= lfs->cfg->block_size - 36) {
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
end = begin + split;
|
||||
}
|
||||
|
||||
// increment revision count
|
||||
dir->rev += 1;
|
||||
static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) {
|
||||
// If our revision count == n * block_cycles, we should force a relocation,
|
||||
// this is how littlefs wear-levels at the metadata-pair level. Note that we
|
||||
// actually use (block_cycles+1)|1, this is to avoid two corner cases:
|
||||
// 1. block_cycles = 1, which would prevent relocations from terminating
|
||||
// 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
|
||||
// one metadata block in the pair, effectively making this useless
|
||||
if (lfs->cfg->block_cycles > 0 &&
|
||||
(dir->rev % ((lfs->cfg->block_cycles+1)|1) == 0)) {
|
||||
if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
|
||||
// oh no! we're writing too much to the superblock,
|
||||
// should we expand?
|
||||
lfs_ssize_t res = lfs_fs_rawsize(lfs);
|
||||
if (res < 0) {
|
||||
return res;
|
||||
return (lfs->cfg->block_cycles > 0
|
||||
&& ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0));
|
||||
}
|
||||
#endif
|
||||
|
||||
// do we have extra space? littlefs can't reclaim this space
|
||||
// by itself, so expand cautiously
|
||||
if ((lfs_size_t)res < lfs->cfg->block_count/2) {
|
||||
LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
|
||||
int err = lfs_dir_split(lfs, dir, attrs, attrcount,
|
||||
source, begin, end);
|
||||
if (err && err != LFS_ERR_NOSPC) {
|
||||
return err;
|
||||
}
|
||||
#ifndef LFS_READONLY
|
||||
static int lfs_dir_compact(lfs_t *lfs,
|
||||
lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
|
||||
lfs_mdir_t *source, uint16_t begin, uint16_t end) {
|
||||
// save some state in case block is bad
|
||||
bool relocated = false;
|
||||
bool tired = lfs_dir_needsrelocation(lfs, dir);
|
||||
|
||||
// increment revision count
|
||||
dir->rev += 1;
|
||||
|
||||
// welp, we tried, if we ran out of space there's not much
|
||||
// we can do, we'll error later if we've become frozen
|
||||
if (!err) {
|
||||
end = begin;
|
||||
}
|
||||
}
|
||||
#ifdef LFS_MIGRATE
|
||||
} else if (lfs->lfs1) {
|
||||
// do not proactively relocate blocks during migrations, this
|
||||
// can cause a number of failure states such: clobbering the
|
||||
// v1 superblock if we relocate root, and invalidating directory
|
||||
// pointers if we relocate the head of a directory. On top of
|
||||
// this, relocations increase the overall complexity of
|
||||
// lfs_migration, which is already a delicate operation.
|
||||
#endif
|
||||
} else {
|
||||
// we're writing too much, time to relocate
|
||||
tired = true;
|
||||
goto relocate;
|
||||
#ifdef LFS_MIGRATE
|
||||
if (lfs->lfs1) {
|
||||
tired = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) {
|
||||
// we're writing too much, time to relocate
|
||||
goto relocate;
|
||||
}
|
||||
|
||||
// begin loop to commit compaction to blocks until a compact sticks
|
||||
@@ -1807,44 +1754,113 @@ relocate:
|
||||
continue;
|
||||
}
|
||||
|
||||
if (relocated) {
|
||||
// update references if we relocated
|
||||
LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
|
||||
"-> {0x%"PRIx32", 0x%"PRIx32"}",
|
||||
oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]);
|
||||
int err = lfs_fs_relocate(lfs, oldpair, dir->pair);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return relocated ? LFS_OK_RELOCATED : 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
const struct lfs_mattr *attrs, int attrcount) {
|
||||
// check for any inline files that aren't RAM backed and
|
||||
// forcefully evict them, needed for filesystem consistency
|
||||
for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
|
||||
if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
|
||||
f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
|
||||
f->ctz.size > lfs->cfg->cache_size) {
|
||||
int err = lfs_file_outline(lfs, f);
|
||||
static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
const struct lfs_mattr *attrs, int attrcount,
|
||||
lfs_mdir_t *source, uint16_t begin, uint16_t end) {
|
||||
while (true) {
|
||||
// find size of first split, we do this by halving the split until
|
||||
// the metadata is guaranteed to fit
|
||||
//
|
||||
// Note that this isn't a true binary search, we never increase the
|
||||
// split size. This may result in poorly distributed metadata but isn't
|
||||
// worth the extra code size or performance hit to fix.
|
||||
lfs_size_t split = begin;
|
||||
while (end - split > 1) {
|
||||
lfs_size_t size = 0;
|
||||
int err = lfs_dir_traverse(lfs,
|
||||
source, 0, 0xffffffff, attrs, attrcount,
|
||||
LFS_MKTAG(0x400, 0x3ff, 0),
|
||||
LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
|
||||
split, end, -split,
|
||||
lfs_dir_commit_size, &size);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = lfs_file_flush(lfs, f);
|
||||
if (err) {
|
||||
// space is complicated, we need room for tail, crc, gstate,
|
||||
// cleanup delete, and we cap at half a block to give room
|
||||
// for metadata updates.
|
||||
if (end - split < 0xff
|
||||
&& size <= lfs_min(lfs->cfg->block_size - 36,
|
||||
lfs_alignup(
|
||||
(lfs->cfg->metadata_max
|
||||
? lfs->cfg->metadata_max
|
||||
: lfs->cfg->block_size)/2,
|
||||
lfs->cfg->prog_size))) {
|
||||
break;
|
||||
}
|
||||
|
||||
split = split + ((end - split) / 2);
|
||||
}
|
||||
|
||||
if (split == begin) {
|
||||
// no split needed
|
||||
break;
|
||||
}
|
||||
|
||||
// split into two metadata pairs and continue
|
||||
int err = lfs_dir_split(lfs, dir, attrs, attrcount,
|
||||
source, split, end);
|
||||
if (err && err != LFS_ERR_NOSPC) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
// we can't allocate a new block, try to compact with degraded
|
||||
// performance
|
||||
LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}",
|
||||
dir->pair[0], dir->pair[1]);
|
||||
} else {
|
||||
end = split;
|
||||
}
|
||||
}
|
||||
|
||||
if (lfs_dir_needsrelocation(lfs, dir)
|
||||
&& lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
|
||||
// oh no! we're writing too much to the superblock,
|
||||
// should we expand?
|
||||
lfs_ssize_t size = lfs_fs_rawsize(lfs);
|
||||
if (size < 0) {
|
||||
return size;
|
||||
}
|
||||
|
||||
// do we have extra space? littlefs can't reclaim this space
|
||||
// by itself, so expand cautiously
|
||||
if ((lfs_size_t)size < lfs->cfg->block_count/2) {
|
||||
LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
|
||||
int err = lfs_dir_split(lfs, dir, attrs, attrcount,
|
||||
source, begin, end);
|
||||
if (err && err != LFS_ERR_NOSPC) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
// welp, we tried, if we ran out of space there's not much
|
||||
// we can do, we'll error later if we've become frozen
|
||||
LFS_WARN("Unable to expand superblock");
|
||||
} else {
|
||||
end = begin;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
const lfs_block_t pair[2],
|
||||
const struct lfs_mattr *attrs, int attrcount,
|
||||
lfs_mdir_t *pdir) {
|
||||
int state = 0;
|
||||
|
||||
// calculate changes to the directory
|
||||
lfs_mdir_t olddir = *dir;
|
||||
bool hasdelete = false;
|
||||
for (int i = 0; i < attrcount; i++) {
|
||||
if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
|
||||
@@ -1863,23 +1879,19 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
|
||||
// should we actually drop the directory block?
|
||||
if (hasdelete && dir->count == 0) {
|
||||
lfs_mdir_t pdir;
|
||||
int err = lfs_fs_pred(lfs, dir->pair, &pdir);
|
||||
LFS_ASSERT(pdir);
|
||||
int err = lfs_fs_pred(lfs, dir->pair, pdir);
|
||||
if (err && err != LFS_ERR_NOENT) {
|
||||
*dir = olddir;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (err != LFS_ERR_NOENT && pdir.split) {
|
||||
err = lfs_dir_drop(lfs, &pdir, dir);
|
||||
if (err) {
|
||||
*dir = olddir;
|
||||
return err;
|
||||
}
|
||||
if (err != LFS_ERR_NOENT && pdir->split) {
|
||||
state = LFS_OK_DROPPED;
|
||||
goto fixmlist;
|
||||
}
|
||||
}
|
||||
|
||||
if (dir->erased || dir->count >= 0xff) {
|
||||
if (dir->erased) {
|
||||
// try to commit
|
||||
struct lfs_commit commit = {
|
||||
.block = dir->pair[0],
|
||||
@@ -1904,7 +1916,6 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
|
||||
goto compact;
|
||||
}
|
||||
*dir = olddir;
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1917,7 +1928,6 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
if (!lfs_gstate_iszero(&delta)) {
|
||||
err = lfs_dir_getgstate(lfs, dir, &delta);
|
||||
if (err) {
|
||||
*dir = olddir;
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1929,7 +1939,6 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
|
||||
goto compact;
|
||||
}
|
||||
*dir = olddir;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
@@ -1940,7 +1949,6 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
|
||||
goto compact;
|
||||
}
|
||||
*dir = olddir;
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1951,19 +1959,23 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
// and update gstate
|
||||
lfs->gdisk = lfs->gstate;
|
||||
lfs->gdelta = (lfs_gstate_t){0};
|
||||
} else {
|
||||
|
||||
goto fixmlist;
|
||||
}
|
||||
|
||||
compact:
|
||||
// fall back to compaction
|
||||
lfs_cache_drop(lfs, &lfs->pcache);
|
||||
|
||||
int err = lfs_dir_compact(lfs, dir, attrs, attrcount,
|
||||
state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount,
|
||||
dir, 0, dir->count);
|
||||
if (err) {
|
||||
*dir = olddir;
|
||||
return err;
|
||||
}
|
||||
if (state < 0) {
|
||||
return state;
|
||||
}
|
||||
|
||||
goto fixmlist;
|
||||
|
||||
fixmlist:;
|
||||
// this complicated bit of logic is for fixing up any active
|
||||
// metadata-pairs that we may have affected
|
||||
//
|
||||
@@ -1971,9 +1983,11 @@ compact:
|
||||
// lfs_dir_commit could also be in this list, and even then
|
||||
// we need to copy the pair so they don't get clobbered if we refetch
|
||||
// our mdir.
|
||||
lfs_block_t oldpair[2] = {pair[0], pair[1]};
|
||||
for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
|
||||
if (&d->m != dir && lfs_pair_cmp(d->m.pair, olddir.pair) == 0) {
|
||||
if (lfs_pair_cmp(d->m.pair, oldpair) == 0) {
|
||||
d->m = *dir;
|
||||
if (d->m.pair != pair) {
|
||||
for (int i = 0; i < attrcount; i++) {
|
||||
if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
|
||||
d->id == lfs_tag_id(attrs[i].tag)) {
|
||||
@@ -1994,10 +2008,7 @@ compact:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
|
||||
if (lfs_pair_cmp(d->m.pair, olddir.pair) == 0) {
|
||||
while (d->id >= d->m.count && d->m.split) {
|
||||
// we split and id is on tail now
|
||||
d->id -= d->m.count;
|
||||
@@ -2009,6 +2020,221 @@ compact:
|
||||
}
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
const struct lfs_mattr *attrs, int attrcount) {
|
||||
// check for any inline files that aren't RAM backed and
|
||||
// forcefully evict them, needed for filesystem consistency
|
||||
for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
|
||||
if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
|
||||
f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
|
||||
f->ctz.size > lfs->cfg->cache_size) {
|
||||
int err = lfs_file_outline(lfs, f);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = lfs_file_flush(lfs, f);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]};
|
||||
lfs_mdir_t ldir = *dir;
|
||||
lfs_mdir_t pdir;
|
||||
int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair,
|
||||
attrs, attrcount, &pdir);
|
||||
if (state < 0) {
|
||||
return state;
|
||||
}
|
||||
|
||||
// update if we're not in mlist, note we may have already been
|
||||
// updated if we are in mlist
|
||||
if (lfs_pair_cmp(dir->pair, lpair) == 0) {
|
||||
*dir = ldir;
|
||||
}
|
||||
|
||||
// commit was successful, but may require other changes in the
|
||||
// filesystem, these would normally be tail recursive, but we have
|
||||
// flattened them here avoid unbounded stack usage
|
||||
|
||||
// need to drop?
|
||||
if (state == LFS_OK_DROPPED) {
|
||||
// steal state
|
||||
int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// steal tail, note that this can't create a recursive drop
|
||||
lpair[0] = pdir.pair[0];
|
||||
lpair[1] = pdir.pair[1];
|
||||
lfs_pair_tole32(dir->tail);
|
||||
state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
|
||||
dir->tail}),
|
||||
NULL);
|
||||
lfs_pair_fromle32(dir->tail);
|
||||
if (state < 0) {
|
||||
return state;
|
||||
}
|
||||
|
||||
ldir = pdir;
|
||||
}
|
||||
|
||||
// need to relocate?
|
||||
bool orphans = false;
|
||||
while (state == LFS_OK_RELOCATED) {
|
||||
LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
|
||||
"-> {0x%"PRIx32", 0x%"PRIx32"}",
|
||||
lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]);
|
||||
state = 0;
|
||||
|
||||
// update internal root
|
||||
if (lfs_pair_cmp(lpair, lfs->root) == 0) {
|
||||
lfs->root[0] = ldir.pair[0];
|
||||
lfs->root[1] = ldir.pair[1];
|
||||
}
|
||||
|
||||
// update internally tracked dirs
|
||||
for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
|
||||
if (lfs_pair_cmp(lpair, d->m.pair) == 0) {
|
||||
d->m.pair[0] = ldir.pair[0];
|
||||
d->m.pair[1] = ldir.pair[1];
|
||||
}
|
||||
|
||||
if (d->type == LFS_TYPE_DIR &&
|
||||
lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) {
|
||||
((lfs_dir_t*)d)->head[0] = ldir.pair[0];
|
||||
((lfs_dir_t*)d)->head[1] = ldir.pair[1];
|
||||
}
|
||||
}
|
||||
|
||||
// find parent
|
||||
lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir);
|
||||
if (tag < 0 && tag != LFS_ERR_NOENT) {
|
||||
return tag;
|
||||
}
|
||||
|
||||
bool hasparent = (tag != LFS_ERR_NOENT);
|
||||
if (tag != LFS_ERR_NOENT) {
|
||||
// note that if we have a parent, we must have a pred, so this will
|
||||
// always create an orphan
|
||||
int err = lfs_fs_preporphans(lfs, +1);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// fix pending move in this pair? this looks like an optimization but
|
||||
// is in fact _required_ since relocating may outdate the move.
|
||||
uint16_t moveid = 0x3ff;
|
||||
if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
|
||||
moveid = lfs_tag_id(lfs->gstate.tag);
|
||||
LFS_DEBUG("Fixing move while relocating "
|
||||
"{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
|
||||
pdir.pair[0], pdir.pair[1], moveid);
|
||||
lfs_fs_prepmove(lfs, 0x3ff, NULL);
|
||||
if (moveid < lfs_tag_id(tag)) {
|
||||
tag -= LFS_MKTAG(0, 1, 0);
|
||||
}
|
||||
}
|
||||
|
||||
lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]};
|
||||
lfs_pair_tole32(ldir.pair);
|
||||
state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS(
|
||||
{LFS_MKTAG_IF(moveid != 0x3ff,
|
||||
LFS_TYPE_DELETE, moveid, 0), NULL},
|
||||
{tag, ldir.pair}),
|
||||
NULL);
|
||||
lfs_pair_fromle32(ldir.pair);
|
||||
if (state < 0) {
|
||||
return state;
|
||||
}
|
||||
|
||||
if (state == LFS_OK_RELOCATED) {
|
||||
lpair[0] = ppair[0];
|
||||
lpair[1] = ppair[1];
|
||||
ldir = pdir;
|
||||
orphans = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// find pred
|
||||
int err = lfs_fs_pred(lfs, lpair, &pdir);
|
||||
if (err && err != LFS_ERR_NOENT) {
|
||||
return err;
|
||||
}
|
||||
LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT));
|
||||
|
||||
// if we can't find dir, it must be new
|
||||
if (err != LFS_ERR_NOENT) {
|
||||
if (lfs_gstate_hasorphans(&lfs->gstate)) {
|
||||
// next step, clean up orphans
|
||||
err = lfs_fs_preporphans(lfs, -hasparent);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
// fix pending move in this pair? this looks like an optimization
|
||||
// but is in fact _required_ since relocating may outdate the move.
|
||||
uint16_t moveid = 0x3ff;
|
||||
if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
|
||||
moveid = lfs_tag_id(lfs->gstate.tag);
|
||||
LFS_DEBUG("Fixing move while relocating "
|
||||
"{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
|
||||
pdir.pair[0], pdir.pair[1], moveid);
|
||||
lfs_fs_prepmove(lfs, 0x3ff, NULL);
|
||||
}
|
||||
|
||||
// replace bad pair, either we clean up desync, or no desync occured
|
||||
lpair[0] = pdir.pair[0];
|
||||
lpair[1] = pdir.pair[1];
|
||||
lfs_pair_tole32(ldir.pair);
|
||||
state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
|
||||
{LFS_MKTAG_IF(moveid != 0x3ff,
|
||||
LFS_TYPE_DELETE, moveid, 0), NULL},
|
||||
{LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8),
|
||||
ldir.pair}),
|
||||
NULL);
|
||||
lfs_pair_fromle32(ldir.pair);
|
||||
if (state < 0) {
|
||||
return state;
|
||||
}
|
||||
|
||||
ldir = pdir;
|
||||
}
|
||||
}
|
||||
|
||||
return orphans ? LFS_OK_ORPHANED : 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
|
||||
const struct lfs_mattr *attrs, int attrcount) {
|
||||
int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount);
|
||||
if (orphans < 0) {
|
||||
return orphans;
|
||||
}
|
||||
|
||||
if (orphans) {
|
||||
// make sure we've removed all orphans, this is a noop if there
|
||||
// are none, but if we had nested blocks failures we may have
|
||||
// created some
|
||||
int err = lfs_fs_deorphan(lfs, false);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@@ -2063,7 +2289,7 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// current block end of list?
|
||||
// current block not end of list?
|
||||
if (cwd.m.split) {
|
||||
// update tails, this creates a desync
|
||||
err = lfs_fs_preporphans(lfs, +1);
|
||||
@@ -3381,7 +3607,8 @@ static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) {
|
||||
}
|
||||
|
||||
lfs->mlist = prevdir.next;
|
||||
if (prevtag != LFS_ERR_NOENT && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
|
||||
if (prevtag != LFS_ERR_NOENT
|
||||
&& lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
|
||||
// fix orphan
|
||||
err = lfs_fs_preporphans(lfs, -1);
|
||||
if (err) {
|
||||
@@ -3987,109 +4214,6 @@ static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
static int lfs_fs_relocate(lfs_t *lfs,
|
||||
const lfs_block_t oldpair[2], lfs_block_t newpair[2]) {
|
||||
// update internal root
|
||||
if (lfs_pair_cmp(oldpair, lfs->root) == 0) {
|
||||
lfs->root[0] = newpair[0];
|
||||
lfs->root[1] = newpair[1];
|
||||
}
|
||||
|
||||
// update internally tracked dirs
|
||||
for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
|
||||
if (lfs_pair_cmp(oldpair, d->m.pair) == 0) {
|
||||
d->m.pair[0] = newpair[0];
|
||||
d->m.pair[1] = newpair[1];
|
||||
}
|
||||
|
||||
if (d->type == LFS_TYPE_DIR &&
|
||||
lfs_pair_cmp(oldpair, ((lfs_dir_t*)d)->head) == 0) {
|
||||
((lfs_dir_t*)d)->head[0] = newpair[0];
|
||||
((lfs_dir_t*)d)->head[1] = newpair[1];
|
||||
}
|
||||
}
|
||||
|
||||
// find parent
|
||||
lfs_mdir_t parent;
|
||||
lfs_stag_t tag = lfs_fs_parent(lfs, oldpair, &parent);
|
||||
if (tag < 0 && tag != LFS_ERR_NOENT) {
|
||||
return tag;
|
||||
}
|
||||
|
||||
if (tag != LFS_ERR_NOENT) {
|
||||
// update disk, this creates a desync
|
||||
int err = lfs_fs_preporphans(lfs, +1);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// fix pending move in this pair? this looks like an optimization but
|
||||
// is in fact _required_ since relocating may outdate the move.
|
||||
uint16_t moveid = 0x3ff;
|
||||
if (lfs_gstate_hasmovehere(&lfs->gstate, parent.pair)) {
|
||||
moveid = lfs_tag_id(lfs->gstate.tag);
|
||||
LFS_DEBUG("Fixing move while relocating "
|
||||
"{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
|
||||
parent.pair[0], parent.pair[1], moveid);
|
||||
lfs_fs_prepmove(lfs, 0x3ff, NULL);
|
||||
if (moveid < lfs_tag_id(tag)) {
|
||||
tag -= LFS_MKTAG(0, 1, 0);
|
||||
}
|
||||
}
|
||||
|
||||
lfs_pair_tole32(newpair);
|
||||
err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS(
|
||||
{LFS_MKTAG_IF(moveid != 0x3ff,
|
||||
LFS_TYPE_DELETE, moveid, 0), NULL},
|
||||
{tag, newpair}));
|
||||
lfs_pair_fromle32(newpair);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// next step, clean up orphans
|
||||
err = lfs_fs_preporphans(lfs, -1);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
// find pred
|
||||
int err = lfs_fs_pred(lfs, oldpair, &parent);
|
||||
if (err && err != LFS_ERR_NOENT) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// if we can't find dir, it must be new
|
||||
if (err != LFS_ERR_NOENT) {
|
||||
// fix pending move in this pair? this looks like an optimization but
|
||||
// is in fact _required_ since relocating may outdate the move.
|
||||
uint16_t moveid = 0x3ff;
|
||||
if (lfs_gstate_hasmovehere(&lfs->gstate, parent.pair)) {
|
||||
moveid = lfs_tag_id(lfs->gstate.tag);
|
||||
LFS_DEBUG("Fixing move while relocating "
|
||||
"{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
|
||||
parent.pair[0], parent.pair[1], moveid);
|
||||
lfs_fs_prepmove(lfs, 0x3ff, NULL);
|
||||
}
|
||||
|
||||
// replace bad pair, either we clean up desync, or no desync occured
|
||||
lfs_pair_tole32(newpair);
|
||||
err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS(
|
||||
{LFS_MKTAG_IF(moveid != 0x3ff,
|
||||
LFS_TYPE_DELETE, moveid, 0), NULL},
|
||||
{LFS_MKTAG(LFS_TYPE_TAIL + parent.split, 0x3ff, 8), newpair}));
|
||||
lfs_pair_fromle32(newpair);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
|
||||
LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0);
|
||||
@@ -4144,11 +4268,14 @@ static int lfs_fs_demove(lfs_t *lfs) {
|
||||
#endif
|
||||
|
||||
#ifndef LFS_READONLY
|
||||
static int lfs_fs_deorphan(lfs_t *lfs) {
|
||||
static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
|
||||
if (!lfs_gstate_hasorphans(&lfs->gstate)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int8_t found = 0;
|
||||
restart:
|
||||
{
|
||||
// Fix any orphans
|
||||
lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
|
||||
lfs_mdir_t dir;
|
||||
@@ -4169,52 +4296,101 @@ static int lfs_fs_deorphan(lfs_t *lfs) {
|
||||
return tag;
|
||||
}
|
||||
|
||||
if (tag == LFS_ERR_NOENT) {
|
||||
// note we only check for full orphans if we may have had a
|
||||
// power-loss, otherwise orphans are created intentionally
|
||||
// during operations such as lfs_mkdir
|
||||
if (tag == LFS_ERR_NOENT && powerloss) {
|
||||
// we are an orphan
|
||||
LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
|
||||
pdir.tail[0], pdir.tail[1]);
|
||||
|
||||
err = lfs_dir_drop(lfs, &pdir, &dir);
|
||||
// steal state
|
||||
err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// steal tail
|
||||
lfs_pair_tole32(dir.tail);
|
||||
int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
|
||||
dir.tail}));
|
||||
lfs_pair_fromle32(dir.tail);
|
||||
if (state < 0) {
|
||||
return state;
|
||||
}
|
||||
|
||||
found += 1;
|
||||
|
||||
// did our commit create more orphans?
|
||||
if (state == LFS_OK_ORPHANED) {
|
||||
goto restart;
|
||||
}
|
||||
|
||||
// refetch tail
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tag != LFS_ERR_NOENT) {
|
||||
lfs_block_t pair[2];
|
||||
lfs_stag_t res = lfs_dir_get(lfs, &parent,
|
||||
lfs_stag_t state = lfs_dir_get(lfs, &parent,
|
||||
LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
|
||||
if (res < 0) {
|
||||
return res;
|
||||
if (state < 0) {
|
||||
return state;
|
||||
}
|
||||
lfs_pair_fromle32(pair);
|
||||
|
||||
if (!lfs_pair_sync(pair, pdir.tail)) {
|
||||
// we have desynced
|
||||
LFS_DEBUG("Fixing half-orphan {0x%"PRIx32", 0x%"PRIx32"} "
|
||||
LFS_DEBUG("Fixing half-orphan "
|
||||
"{0x%"PRIx32", 0x%"PRIx32"} "
|
||||
"-> {0x%"PRIx32", 0x%"PRIx32"}",
|
||||
pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
|
||||
|
||||
// fix pending move in this pair? this looks like an
|
||||
// optimization but is in fact _required_ since
|
||||
// relocating may outdate the move.
|
||||
uint16_t moveid = 0x3ff;
|
||||
if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
|
||||
moveid = lfs_tag_id(lfs->gstate.tag);
|
||||
LFS_DEBUG("Fixing move while fixing orphans "
|
||||
"{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
|
||||
pdir.pair[0], pdir.pair[1], moveid);
|
||||
lfs_fs_prepmove(lfs, 0x3ff, NULL);
|
||||
}
|
||||
|
||||
lfs_pair_tole32(pair);
|
||||
err = lfs_dir_commit(lfs, &pdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pair}));
|
||||
state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
|
||||
{LFS_MKTAG_IF(moveid != 0x3ff,
|
||||
LFS_TYPE_DELETE, moveid, 0), NULL},
|
||||
{LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8),
|
||||
pair}));
|
||||
lfs_pair_fromle32(pair);
|
||||
if (err) {
|
||||
return err;
|
||||
if (state < 0) {
|
||||
return state;
|
||||
}
|
||||
|
||||
found += 1;
|
||||
|
||||
// did our commit create more orphans?
|
||||
if (state == LFS_OK_ORPHANED) {
|
||||
goto restart;
|
||||
}
|
||||
|
||||
// refetch tail
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pdir = dir;
|
||||
}
|
||||
}
|
||||
|
||||
// mark orphans as fixed
|
||||
return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
|
||||
return lfs_fs_preporphans(lfs, -lfs_min(
|
||||
lfs_gstate_getorphans(&lfs->gstate),
|
||||
found));
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -4225,7 +4401,7 @@ static int lfs_fs_forceconsistency(lfs_t *lfs) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = lfs_fs_deorphan(lfs);
|
||||
err = lfs_fs_deorphan(lfs, true);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
Reference in New Issue
Block a user