1
0
mirror of https://github.com/kdave/btrfs-progs synced 2025-04-19 21:45:18 +00:00

btrfs-progs: minor source sync with kernel 6.4

Sync a few more file on the source level with kernel 6.4, no functional
changes.

Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
David Sterba 2023-06-27 16:15:35 +02:00
parent 2bc4002583
commit 53b64cc366
6 changed files with 121 additions and 25 deletions

View File

@ -12,6 +12,8 @@ struct btrfs_inode;
struct address_space; struct address_space;
struct inode; struct inode;
struct bio; struct bio;
/* Stub for kernel-user parity. */
struct btrfs_bio { };
/* /*
* We want to make sure that amount of RAM required to uncompress an extent is * We want to make sure that amount of RAM required to uncompress an extent is
@ -58,6 +60,9 @@ struct compressed_bio {
struct btrfs_bio *orig_bbio; struct btrfs_bio *orig_bbio;
struct work_struct write_end_work; struct work_struct write_end_work;
}; };
/* Must be last. */
struct btrfs_bio bbio;
}; };
static inline unsigned int btrfs_compress_type(unsigned int type_level) static inline unsigned int btrfs_compress_type(unsigned int type_level)

View File

@ -233,13 +233,35 @@ noinline void btrfs_release_path(struct btrfs_path *p)
int i; int i;
for (i = 0; i < BTRFS_MAX_LEVEL; i++) { for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
p->slots[i] = 0;
if (!p->nodes[i]) if (!p->nodes[i])
continue; continue;
if (p->locks[i]) {
btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
p->locks[i] = 0;
}
free_extent_buffer(p->nodes[i]); free_extent_buffer(p->nodes[i]);
p->nodes[i] = NULL;
} }
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
} }
/*
* We want the transaction abort to print stack trace only for errors where the
* cause could be a bug, eg. due to ENOSPC, and not for common errors that are
* caused by external factors.
*/
bool __cold abort_should_print_stack(int errno)
{
switch (errno) {
case -EIO:
case -EROFS:
case -ENOMEM:
return false;
}
return true;
}
void add_root_to_dirty_list(struct btrfs_root *root) void add_root_to_dirty_list(struct btrfs_root *root)
{ {
if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) && if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) &&
@ -289,6 +311,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
btrfs_item_key(buf, &disk_key, 0); btrfs_item_key(buf, &disk_key, 0);
else else
btrfs_node_key(buf, &disk_key, 0); btrfs_node_key(buf, &disk_key, 0);
cow = btrfs_alloc_tree_block(trans, new_root, buf->len, cow = btrfs_alloc_tree_block(trans, new_root, buf->len,
new_root_objectid, &disk_key, new_root_objectid, &disk_key,
level, buf->start, 0, level, buf->start, 0,
@ -451,10 +474,9 @@ static int btrfs_block_can_be_shared(struct btrfs_root *root,
struct extent_buffer *buf) struct extent_buffer *buf)
{ {
/* /*
* Tree blocks not in reference counted trees and tree roots * Tree blocks not in shareable trees and tree roots are never shared.
* are never shared. If a block was allocated after the last * If a block was allocated after the last snapshot and the block was
* snapshot and the block was not allocated by tree relocation, * not allocated by tree relocation, we know the block is not shared.
* we know the block is not shared.
*/ */
if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
buf != root->node && buf != root->commit_root && buf != root->node && buf != root->commit_root &&
@ -462,6 +484,7 @@ static int btrfs_block_can_be_shared(struct btrfs_root *root,
btrfs_root_last_snapshot(&root->root_item) || btrfs_root_last_snapshot(&root->root_item) ||
btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
return 1; return 1;
return 0; return 0;
} }
@ -557,7 +580,19 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
static int __btrfs_cow_block(struct btrfs_trans_handle *trans, /*
* does the dirty work in cow of a single block. The parent block (if
* supplied) is updated to point to the new cow copy. The new buffer is marked
* dirty and returned locked. If you modify the block it needs to be marked
* dirty again.
*
* search_start -- an allocation hint for the new block
*
* empty_size -- a hint that you plan on doing more cow. This is the size in
* bytes the allocator should try to find free next to the block it returns.
* This is just a hint and may be ignored by the allocator.
*/
static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct extent_buffer *buf, struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot, struct extent_buffer *parent, int parent_slot,
@ -679,7 +714,24 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) /*
* helper function for defrag to decide if two blocks pointed to by a
* node are actually close by
*/
static __attribute__((unused)) int close_blocks(u64 blocknr, u64 other, u32 blocksize)
{
if (blocknr < other && other - (blocknr + blocksize) < 32768)
return 1;
if (blocknr > other && blocknr - (other + blocksize) < 32768)
return 1;
return 0;
}
/*
* same as comp_keys only with two btrfs_key's
*/
int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
{ {
if (k1->objectid > k2->objectid) if (k1->objectid > k2->objectid)
return 1; return 1;
@ -818,15 +870,20 @@ struct extent_buffer *read_node_slot(struct btrfs_fs_info *fs_info,
return ret; return ret;
} }
static int balance_level(struct btrfs_trans_handle *trans, /*
* node level balancing, used to make sure nodes are in proper order for
* item deletion. We balance from the top down, so we have to make sure
* that a deletion won't leave an node completely empty later on.
*/
static noinline int balance_level(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_path *path, int level) struct btrfs_path *path, int level)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *right = NULL; struct extent_buffer *right = NULL;
struct extent_buffer *mid; struct extent_buffer *mid;
struct extent_buffer *left = NULL; struct extent_buffer *left = NULL;
struct extent_buffer *parent = NULL; struct extent_buffer *parent = NULL;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0; int ret = 0;
int wret; int wret;
int pslot; int pslot;
@ -1013,16 +1070,19 @@ enospc:
return ret; return ret;
} }
/* returns zero if the push worked, non-zero otherwise */ /* Node balancing for insertion. Here we only split or push nodes around
static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, * when they are completely full. This is also done top down, so we
* have to be pessimistic.
*/
static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_path *path, int level) struct btrfs_path *path, int level)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *right = NULL; struct extent_buffer *right = NULL;
struct extent_buffer *mid; struct extent_buffer *mid;
struct extent_buffer *left = NULL; struct extent_buffer *left = NULL;
struct extent_buffer *parent = NULL; struct extent_buffer *parent = NULL;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0; int ret = 0;
int wret; int wret;
int pslot; int pslot;
@ -1128,11 +1188,12 @@ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans,
} }
/* /*
* readahead one full node of leaves * readahead one full node of leaves, finding things that are close
* to the block in 'slot', and triggering ra on them.
*/ */
static void reada_for_search(struct btrfs_fs_info *fs_info, static void reada_for_search(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, int level, int slot, struct btrfs_path *path,
u64 objectid) int level, int slot, u64 objectid)
{ {
struct extent_buffer *node; struct extent_buffer *node;
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;

View File

@ -22,6 +22,7 @@
#include <stdbool.h> #include <stdbool.h>
#include "kernel-lib/list.h" #include "kernel-lib/list.h"
#include "kernel-lib/rbtree.h"
#include "kerncompat.h" #include "kerncompat.h"
#include "common/extent-cache.h" #include "common/extent-cache.h"
#include "kernel-shared/uapi/btrfs.h" #include "kernel-shared/uapi/btrfs.h"
@ -148,13 +149,20 @@ enum {
READA_FORWARD_ALWAYS, READA_FORWARD_ALWAYS,
}; };
/*
* btrfs_paths remember the path taken from the root down to the leaf.
* level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
* to any other levels that are present.
*
* The slots array records the index of the item or block pointer
* used while walking the tree.
*/
struct btrfs_path { struct btrfs_path {
struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
int slots[BTRFS_MAX_LEVEL]; int slots[BTRFS_MAX_LEVEL];
/* The kernel locking scheme is not done in userspace. */ /* The kernel locking scheme is not done in userspace. */
int locks[BTRFS_MAX_LEVEL]; u8 locks[BTRFS_MAX_LEVEL];
u8 reada;
signed char reada;
/* keep some upper locks as we walk down */ /* keep some upper locks as we walk down */
u8 lowest_level; u8 lowest_level;
@ -477,16 +485,21 @@ enum {
* and for the extent tree extent_root root. * and for the extent tree extent_root root.
*/ */
struct btrfs_root { struct btrfs_root {
struct rb_node rb_node;
struct extent_buffer *node; struct extent_buffer *node;
struct extent_buffer *commit_root; struct extent_buffer *commit_root;
struct btrfs_root *log_root;
struct btrfs_root *reloc_root;
unsigned long state;
struct btrfs_root_item root_item; struct btrfs_root_item root_item;
struct btrfs_key root_key; struct btrfs_key root_key;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
u64 objectid; u64 objectid;
u64 last_trans; u64 last_trans;
unsigned long state;
u32 type; u32 type;
u64 last_inode_alloc; u64 last_inode_alloc;
@ -494,7 +507,6 @@ struct btrfs_root {
/* the dirty list is only used by non-reference counted roots */ /* the dirty list is only used by non-reference counted roots */
struct list_head dirty_list; struct list_head dirty_list;
struct rb_node rb_node;
spinlock_t accounting_lock; spinlock_t accounting_lock;
}; };

View File

@ -138,10 +138,9 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
} }
/* /*
* find an head entry based on bytenr. This returns the delayed ref * Find a head entry based on bytenr. This returns the delayed ref head if it
* head if it was able to find one, or NULL if nothing was in that spot. * was able to find one, or NULL if nothing was in that spot. If return_bigger
* If return_bigger is given, the next bigger entry is returned if no exact * is given, the next bigger entry is returned if no exact match is found.
* match is found.
*/ */
static struct btrfs_delayed_ref_head * static struct btrfs_delayed_ref_head *
find_ref_head(struct rb_root *root, u64 bytenr, find_ref_head(struct rb_root *root, u64 bytenr,

View File

@ -21,6 +21,14 @@
#include "kernel-shared/disk-io.h" #include "kernel-shared/disk-io.h"
#include "kernel-shared/transaction.h" #include "kernel-shared/transaction.h"
/*
* insert a name into a directory, doing overflow properly if there is a hash
* collision. data_size indicates how big the item inserted should be. On
* success a struct btrfs_dir_item pointer is returned, otherwise it is
* an ERR_PTR.
*
* The name is not copied into the dir item, you have to do that yourself.
*/
static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
*trans, *trans,
struct btrfs_root *root, struct btrfs_root *root,
@ -53,6 +61,10 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
return (struct btrfs_dir_item *)ptr; return (struct btrfs_dir_item *)ptr;
} }
/*
* xattrs work a lot like directories, this inserts an xattr item
* into the tree
*/
int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name, struct btrfs_root *root, const char *name,
u16 name_len, const void *data, u16 data_len, u16 name_len, const void *data, u16 data_len,
@ -103,6 +115,14 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
/*
* insert a directory item in the tree, doing all the magic for
* both indexes. 'dir' indicates which objectid to insert it into,
* 'location' is the key to stuff into the directory item, 'type' is the
* type of the inode we're pointing to, and 'index' is the sequence number
* to use for the second index (if one is created).
* Will return 0 or -ENOMEM
*/
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
*root, const char *name, int name_len, u64 dir, *root, const char *name, int name_len, u64 dir,
struct btrfs_key *location, u8 type, u64 index) struct btrfs_key *location, u8 type, u64 index)

View File

@ -195,7 +195,6 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf; struct extent_buffer *leaf;
unsigned long ptr; unsigned long ptr;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;