1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2012 Alexander Block. All rights reserved. 4 */ 5 6 #include <linux/bsearch.h> 7 #include <linux/fs.h> 8 #include <linux/file.h> 9 #include <linux/sort.h> 10 #include <linux/mount.h> 11 #include <linux/xattr.h> 12 #include <linux/posix_acl_xattr.h> 13 #include <linux/radix-tree.h> 14 #include <linux/vmalloc.h> 15 #include <linux/string.h> 16 #include <linux/compat.h> 17 #include <linux/crc32c.h> 18 19 #include "send.h" 20 #include "backref.h" 21 #include "locking.h" 22 #include "disk-io.h" 23 #include "btrfs_inode.h" 24 #include "transaction.h" 25 #include "compression.h" 26 #include "xattr.h" 27 #include "print-tree.h" 28 29 /* 30 * Maximum number of references an extent can have in order for us to attempt to 31 * issue clone operations instead of write operations. This currently exists to 32 * avoid hitting limitations of the backreference walking code (taking a lot of 33 * time and using too much memory for extents with large number of references). 34 */ 35 #define SEND_MAX_EXTENT_REFS 64 36 37 /* 38 * A fs_path is a helper to dynamically build path names with unknown size. 39 * It reallocates the internal buffer on demand. 40 * It allows fast adding of path elements on the right side (normal path) and 41 * fast adding to the left side (reversed path). A reversed path can also be 42 * unreversed if needed. 43 */ 44 struct fs_path { 45 union { 46 struct { 47 char *start; 48 char *end; 49 50 char *buf; 51 unsigned short buf_len:15; 52 unsigned short reversed:1; 53 char inline_buf[]; 54 }; 55 /* 56 * Average path length does not exceed 200 bytes, we'll have 57 * better packing in the slab and higher chance to satisfy 58 * a allocation later during send. 59 */ 60 char pad[256]; 61 }; 62 }; 63 #define FS_PATH_INLINE_SIZE \ 64 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf)) 65 66 67 /* reused for each extent */ 68 struct clone_root { 69 struct btrfs_root *root; 70 u64 ino; 71 u64 offset; 72 73 u64 found_refs; 74 }; 75 76 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128 77 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2) 78 79 struct send_ctx { 80 struct file *send_filp; 81 loff_t send_off; 82 char *send_buf; 83 u32 send_size; 84 u32 send_max_size; 85 u64 total_send_size; 86 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1]; 87 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */ 88 /* Protocol version compatibility requested */ 89 u32 proto; 90 91 struct btrfs_root *send_root; 92 struct btrfs_root *parent_root; 93 struct clone_root *clone_roots; 94 int clone_roots_cnt; 95 96 /* current state of the compare_tree call */ 97 struct btrfs_path *left_path; 98 struct btrfs_path *right_path; 99 struct btrfs_key *cmp_key; 100 101 /* 102 * Keep track of the generation of the last transaction that was used 103 * for relocating a block group. This is periodically checked in order 104 * to detect if a relocation happened since the last check, so that we 105 * don't operate on stale extent buffers for nodes (level >= 1) or on 106 * stale disk_bytenr values of file extent items. 107 */ 108 u64 last_reloc_trans; 109 110 /* 111 * infos of the currently processed inode. In case of deleted inodes, 112 * these are the values from the deleted inode. 113 */ 114 u64 cur_ino; 115 u64 cur_inode_gen; 116 int cur_inode_new; 117 int cur_inode_new_gen; 118 int cur_inode_deleted; 119 u64 cur_inode_size; 120 u64 cur_inode_mode; 121 u64 cur_inode_rdev; 122 u64 cur_inode_last_extent; 123 u64 cur_inode_next_write_offset; 124 bool ignore_cur_inode; 125 126 u64 send_progress; 127 128 struct list_head new_refs; 129 struct list_head deleted_refs; 130 131 struct radix_tree_root name_cache; 132 struct list_head name_cache_list; 133 int name_cache_size; 134 135 struct file_ra_state ra; 136 137 /* 138 * We process inodes by their increasing order, so if before an 139 * incremental send we reverse the parent/child relationship of 140 * directories such that a directory with a lower inode number was 141 * the parent of a directory with a higher inode number, and the one 142 * becoming the new parent got renamed too, we can't rename/move the 143 * directory with lower inode number when we finish processing it - we 144 * must process the directory with higher inode number first, then 145 * rename/move it and then rename/move the directory with lower inode 146 * number. Example follows. 147 * 148 * Tree state when the first send was performed: 149 * 150 * . 151 * |-- a (ino 257) 152 * |-- b (ino 258) 153 * | 154 * | 155 * |-- c (ino 259) 156 * | |-- d (ino 260) 157 * | 158 * |-- c2 (ino 261) 159 * 160 * Tree state when the second (incremental) send is performed: 161 * 162 * . 163 * |-- a (ino 257) 164 * |-- b (ino 258) 165 * |-- c2 (ino 261) 166 * |-- d2 (ino 260) 167 * |-- cc (ino 259) 168 * 169 * The sequence of steps that lead to the second state was: 170 * 171 * mv /a/b/c/d /a/b/c2/d2 172 * mv /a/b/c /a/b/c2/d2/cc 173 * 174 * "c" has lower inode number, but we can't move it (2nd mv operation) 175 * before we move "d", which has higher inode number. 176 * 177 * So we just memorize which move/rename operations must be performed 178 * later when their respective parent is processed and moved/renamed. 179 */ 180 181 /* Indexed by parent directory inode number. */ 182 struct rb_root pending_dir_moves; 183 184 /* 185 * Reverse index, indexed by the inode number of a directory that 186 * is waiting for the move/rename of its immediate parent before its 187 * own move/rename can be performed. 188 */ 189 struct rb_root waiting_dir_moves; 190 191 /* 192 * A directory that is going to be rm'ed might have a child directory 193 * which is in the pending directory moves index above. In this case, 194 * the directory can only be removed after the move/rename of its child 195 * is performed. Example: 196 * 197 * Parent snapshot: 198 * 199 * . (ino 256) 200 * |-- a/ (ino 257) 201 * |-- b/ (ino 258) 202 * |-- c/ (ino 259) 203 * | |-- x/ (ino 260) 204 * | 205 * |-- y/ (ino 261) 206 * 207 * Send snapshot: 208 * 209 * . (ino 256) 210 * |-- a/ (ino 257) 211 * |-- b/ (ino 258) 212 * |-- YY/ (ino 261) 213 * |-- x/ (ino 260) 214 * 215 * Sequence of steps that lead to the send snapshot: 216 * rm -f /a/b/c/foo.txt 217 * mv /a/b/y /a/b/YY 218 * mv /a/b/c/x /a/b/YY 219 * rmdir /a/b/c 220 * 221 * When the child is processed, its move/rename is delayed until its 222 * parent is processed (as explained above), but all other operations 223 * like update utimes, chown, chgrp, etc, are performed and the paths 224 * that it uses for those operations must use the orphanized name of 225 * its parent (the directory we're going to rm later), so we need to 226 * memorize that name. 227 * 228 * Indexed by the inode number of the directory to be deleted. 229 */ 230 struct rb_root orphan_dirs; 231 }; 232 233 struct pending_dir_move { 234 struct rb_node node; 235 struct list_head list; 236 u64 parent_ino; 237 u64 ino; 238 u64 gen; 239 struct list_head update_refs; 240 }; 241 242 struct waiting_dir_move { 243 struct rb_node node; 244 u64 ino; 245 /* 246 * There might be some directory that could not be removed because it 247 * was waiting for this directory inode to be moved first. Therefore 248 * after this directory is moved, we can try to rmdir the ino rmdir_ino. 249 */ 250 u64 rmdir_ino; 251 u64 rmdir_gen; 252 bool orphanized; 253 }; 254 255 struct orphan_dir_info { 256 struct rb_node node; 257 u64 ino; 258 u64 gen; 259 u64 last_dir_index_offset; 260 }; 261 262 struct name_cache_entry { 263 struct list_head list; 264 /* 265 * radix_tree has only 32bit entries but we need to handle 64bit inums. 266 * We use the lower 32bit of the 64bit inum to store it in the tree. If 267 * more then one inum would fall into the same entry, we use radix_list 268 * to store the additional entries. radix_list is also used to store 269 * entries where two entries have the same inum but different 270 * generations. 271 */ 272 struct list_head radix_list; 273 u64 ino; 274 u64 gen; 275 u64 parent_ino; 276 u64 parent_gen; 277 int ret; 278 int need_later_update; 279 int name_len; 280 char name[]; 281 }; 282 283 #define ADVANCE 1 284 #define ADVANCE_ONLY_NEXT -1 285 286 enum btrfs_compare_tree_result { 287 BTRFS_COMPARE_TREE_NEW, 288 BTRFS_COMPARE_TREE_DELETED, 289 BTRFS_COMPARE_TREE_CHANGED, 290 BTRFS_COMPARE_TREE_SAME, 291 }; 292 293 __cold 294 static void inconsistent_snapshot_error(struct send_ctx *sctx, 295 enum btrfs_compare_tree_result result, 296 const char *what) 297 { 298 const char *result_string; 299 300 switch (result) { 301 case BTRFS_COMPARE_TREE_NEW: 302 result_string = "new"; 303 break; 304 case BTRFS_COMPARE_TREE_DELETED: 305 result_string = "deleted"; 306 break; 307 case BTRFS_COMPARE_TREE_CHANGED: 308 result_string = "updated"; 309 break; 310 case BTRFS_COMPARE_TREE_SAME: 311 ASSERT(0); 312 result_string = "unchanged"; 313 break; 314 default: 315 ASSERT(0); 316 result_string = "unexpected"; 317 } 318 319 btrfs_err(sctx->send_root->fs_info, 320 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu", 321 result_string, what, sctx->cmp_key->objectid, 322 sctx->send_root->root_key.objectid, 323 (sctx->parent_root ? 324 sctx->parent_root->root_key.objectid : 0)); 325 } 326 327 __maybe_unused 328 static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd) 329 { 330 switch (sctx->proto) { 331 case 1: return cmd < __BTRFS_SEND_C_MAX_V1; 332 case 2: return cmd < __BTRFS_SEND_C_MAX_V2; 333 default: return false; 334 } 335 } 336 337 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); 338 339 static struct waiting_dir_move * 340 get_waiting_dir_move(struct send_ctx *sctx, u64 ino); 341 342 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen); 343 344 static int need_send_hole(struct send_ctx *sctx) 345 { 346 return (sctx->parent_root && !sctx->cur_inode_new && 347 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted && 348 S_ISREG(sctx->cur_inode_mode)); 349 } 350 351 static void fs_path_reset(struct fs_path *p) 352 { 353 if (p->reversed) { 354 p->start = p->buf + p->buf_len - 1; 355 p->end = p->start; 356 *p->start = 0; 357 } else { 358 p->start = p->buf; 359 p->end = p->start; 360 *p->start = 0; 361 } 362 } 363 364 static struct fs_path *fs_path_alloc(void) 365 { 366 struct fs_path *p; 367 368 p = kmalloc(sizeof(*p), GFP_KERNEL); 369 if (!p) 370 return NULL; 371 p->reversed = 0; 372 p->buf = p->inline_buf; 373 p->buf_len = FS_PATH_INLINE_SIZE; 374 fs_path_reset(p); 375 return p; 376 } 377 378 static struct fs_path *fs_path_alloc_reversed(void) 379 { 380 struct fs_path *p; 381 382 p = fs_path_alloc(); 383 if (!p) 384 return NULL; 385 p->reversed = 1; 386 fs_path_reset(p); 387 return p; 388 } 389 390 static void fs_path_free(struct fs_path *p) 391 { 392 if (!p) 393 return; 394 if (p->buf != p->inline_buf) 395 kfree(p->buf); 396 kfree(p); 397 } 398 399 static int fs_path_len(struct fs_path *p) 400 { 401 return p->end - p->start; 402 } 403 404 static int fs_path_ensure_buf(struct fs_path *p, int len) 405 { 406 char *tmp_buf; 407 int path_len; 408 int old_buf_len; 409 410 len++; 411 412 if (p->buf_len >= len) 413 return 0; 414 415 if (len > PATH_MAX) { 416 WARN_ON(1); 417 return -ENOMEM; 418 } 419 420 path_len = p->end - p->start; 421 old_buf_len = p->buf_len; 422 423 /* 424 * First time the inline_buf does not suffice 425 */ 426 if (p->buf == p->inline_buf) { 427 tmp_buf = kmalloc(len, GFP_KERNEL); 428 if (tmp_buf) 429 memcpy(tmp_buf, p->buf, old_buf_len); 430 } else { 431 tmp_buf = krealloc(p->buf, len, GFP_KERNEL); 432 } 433 if (!tmp_buf) 434 return -ENOMEM; 435 p->buf = tmp_buf; 436 /* 437 * The real size of the buffer is bigger, this will let the fast path 438 * happen most of the time 439 */ 440 p->buf_len = ksize(p->buf); 441 442 if (p->reversed) { 443 tmp_buf = p->buf + old_buf_len - path_len - 1; 444 p->end = p->buf + p->buf_len - 1; 445 p->start = p->end - path_len; 446 memmove(p->start, tmp_buf, path_len + 1); 447 } else { 448 p->start = p->buf; 449 p->end = p->start + path_len; 450 } 451 return 0; 452 } 453 454 static int fs_path_prepare_for_add(struct fs_path *p, int name_len, 455 char **prepared) 456 { 457 int ret; 458 int new_len; 459 460 new_len = p->end - p->start + name_len; 461 if (p->start != p->end) 462 new_len++; 463 ret = fs_path_ensure_buf(p, new_len); 464 if (ret < 0) 465 goto out; 466 467 if (p->reversed) { 468 if (p->start != p->end) 469 *--p->start = '/'; 470 p->start -= name_len; 471 *prepared = p->start; 472 } else { 473 if (p->start != p->end) 474 *p->end++ = '/'; 475 *prepared = p->end; 476 p->end += name_len; 477 *p->end = 0; 478 } 479 480 out: 481 return ret; 482 } 483 484 static int fs_path_add(struct fs_path *p, const char *name, int name_len) 485 { 486 int ret; 487 char *prepared; 488 489 ret = fs_path_prepare_for_add(p, name_len, &prepared); 490 if (ret < 0) 491 goto out; 492 memcpy(prepared, name, name_len); 493 494 out: 495 return ret; 496 } 497 498 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) 499 { 500 int ret; 501 char *prepared; 502 503 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared); 504 if (ret < 0) 505 goto out; 506 memcpy(prepared, p2->start, p2->end - p2->start); 507 508 out: 509 return ret; 510 } 511 512 static int fs_path_add_from_extent_buffer(struct fs_path *p, 513 struct extent_buffer *eb, 514 unsigned long off, int len) 515 { 516 int ret; 517 char *prepared; 518 519 ret = fs_path_prepare_for_add(p, len, &prepared); 520 if (ret < 0) 521 goto out; 522 523 read_extent_buffer(eb, prepared, off, len); 524 525 out: 526 return ret; 527 } 528 529 static int fs_path_copy(struct fs_path *p, struct fs_path *from) 530 { 531 int ret; 532 533 p->reversed = from->reversed; 534 fs_path_reset(p); 535 536 ret = fs_path_add_path(p, from); 537 538 return ret; 539 } 540 541 542 static void fs_path_unreverse(struct fs_path *p) 543 { 544 char *tmp; 545 int len; 546 547 if (!p->reversed) 548 return; 549 550 tmp = p->start; 551 len = p->end - p->start; 552 p->start = p->buf; 553 p->end = p->start + len; 554 memmove(p->start, tmp, len + 1); 555 p->reversed = 0; 556 } 557 558 static struct btrfs_path *alloc_path_for_send(void) 559 { 560 struct btrfs_path *path; 561 562 path = btrfs_alloc_path(); 563 if (!path) 564 return NULL; 565 path->search_commit_root = 1; 566 path->skip_locking = 1; 567 path->need_commit_sem = 1; 568 return path; 569 } 570 571 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) 572 { 573 int ret; 574 u32 pos = 0; 575 576 while (pos < len) { 577 ret = kernel_write(filp, buf + pos, len - pos, off); 578 /* TODO handle that correctly */ 579 /*if (ret == -ERESTARTSYS) { 580 continue; 581 }*/ 582 if (ret < 0) 583 return ret; 584 if (ret == 0) { 585 return -EIO; 586 } 587 pos += ret; 588 } 589 590 return 0; 591 } 592 593 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) 594 { 595 struct btrfs_tlv_header *hdr; 596 int total_len = sizeof(*hdr) + len; 597 int left = sctx->send_max_size - sctx->send_size; 598 599 if (unlikely(left < total_len)) 600 return -EOVERFLOW; 601 602 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); 603 put_unaligned_le16(attr, &hdr->tlv_type); 604 put_unaligned_le16(len, &hdr->tlv_len); 605 memcpy(hdr + 1, data, len); 606 sctx->send_size += total_len; 607 608 return 0; 609 } 610 611 #define TLV_PUT_DEFINE_INT(bits) \ 612 static int tlv_put_u##bits(struct send_ctx *sctx, \ 613 u##bits attr, u##bits value) \ 614 { \ 615 __le##bits __tmp = cpu_to_le##bits(value); \ 616 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \ 617 } 618 619 TLV_PUT_DEFINE_INT(64) 620 621 static int tlv_put_string(struct send_ctx *sctx, u16 attr, 622 const char *str, int len) 623 { 624 if (len == -1) 625 len = strlen(str); 626 return tlv_put(sctx, attr, str, len); 627 } 628 629 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, 630 const u8 *uuid) 631 { 632 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); 633 } 634 635 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, 636 struct extent_buffer *eb, 637 struct btrfs_timespec *ts) 638 { 639 struct btrfs_timespec bts; 640 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); 641 return tlv_put(sctx, attr, &bts, sizeof(bts)); 642 } 643 644 645 #define TLV_PUT(sctx, attrtype, data, attrlen) \ 646 do { \ 647 ret = tlv_put(sctx, attrtype, data, attrlen); \ 648 if (ret < 0) \ 649 goto tlv_put_failure; \ 650 } while (0) 651 652 #define TLV_PUT_INT(sctx, attrtype, bits, value) \ 653 do { \ 654 ret = tlv_put_u##bits(sctx, attrtype, value); \ 655 if (ret < 0) \ 656 goto tlv_put_failure; \ 657 } while (0) 658 659 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data) 660 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data) 661 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data) 662 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data) 663 #define TLV_PUT_STRING(sctx, attrtype, str, len) \ 664 do { \ 665 ret = tlv_put_string(sctx, attrtype, str, len); \ 666 if (ret < 0) \ 667 goto tlv_put_failure; \ 668 } while (0) 669 #define TLV_PUT_PATH(sctx, attrtype, p) \ 670 do { \ 671 ret = tlv_put_string(sctx, attrtype, p->start, \ 672 p->end - p->start); \ 673 if (ret < 0) \ 674 goto tlv_put_failure; \ 675 } while(0) 676 #define TLV_PUT_UUID(sctx, attrtype, uuid) \ 677 do { \ 678 ret = tlv_put_uuid(sctx, attrtype, uuid); \ 679 if (ret < 0) \ 680 goto tlv_put_failure; \ 681 } while (0) 682 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ 683 do { \ 684 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ 685 if (ret < 0) \ 686 goto tlv_put_failure; \ 687 } while (0) 688 689 static int send_header(struct send_ctx *sctx) 690 { 691 struct btrfs_stream_header hdr; 692 693 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); 694 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION); 695 696 return write_buf(sctx->send_filp, &hdr, sizeof(hdr), 697 &sctx->send_off); 698 } 699 700 /* 701 * For each command/item we want to send to userspace, we call this function. 702 */ 703 static int begin_cmd(struct send_ctx *sctx, int cmd) 704 { 705 struct btrfs_cmd_header *hdr; 706 707 if (WARN_ON(!sctx->send_buf)) 708 return -EINVAL; 709 710 BUG_ON(sctx->send_size); 711 712 sctx->send_size += sizeof(*hdr); 713 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 714 put_unaligned_le16(cmd, &hdr->cmd); 715 716 return 0; 717 } 718 719 static int send_cmd(struct send_ctx *sctx) 720 { 721 int ret; 722 struct btrfs_cmd_header *hdr; 723 u32 crc; 724 725 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 726 put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len); 727 put_unaligned_le32(0, &hdr->crc); 728 729 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); 730 put_unaligned_le32(crc, &hdr->crc); 731 732 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, 733 &sctx->send_off); 734 735 sctx->total_send_size += sctx->send_size; 736 sctx->cmd_send_size[get_unaligned_le16(&hdr->cmd)] += sctx->send_size; 737 sctx->send_size = 0; 738 739 return ret; 740 } 741 742 /* 743 * Sends a move instruction to user space 744 */ 745 static int send_rename(struct send_ctx *sctx, 746 struct fs_path *from, struct fs_path *to) 747 { 748 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 749 int ret; 750 751 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start); 752 753 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); 754 if (ret < 0) 755 goto out; 756 757 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); 758 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); 759 760 ret = send_cmd(sctx); 761 762 tlv_put_failure: 763 out: 764 return ret; 765 } 766 767 /* 768 * Sends a link instruction to user space 769 */ 770 static int send_link(struct send_ctx *sctx, 771 struct fs_path *path, struct fs_path *lnk) 772 { 773 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 774 int ret; 775 776 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start); 777 778 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); 779 if (ret < 0) 780 goto out; 781 782 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 783 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); 784 785 ret = send_cmd(sctx); 786 787 tlv_put_failure: 788 out: 789 return ret; 790 } 791 792 /* 793 * Sends an unlink instruction to user space 794 */ 795 static int send_unlink(struct send_ctx *sctx, struct fs_path *path) 796 { 797 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 798 int ret; 799 800 btrfs_debug(fs_info, "send_unlink %s", path->start); 801 802 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); 803 if (ret < 0) 804 goto out; 805 806 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 807 808 ret = send_cmd(sctx); 809 810 tlv_put_failure: 811 out: 812 return ret; 813 } 814 815 /* 816 * Sends a rmdir instruction to user space 817 */ 818 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) 819 { 820 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 821 int ret; 822 823 btrfs_debug(fs_info, "send_rmdir %s", path->start); 824 825 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); 826 if (ret < 0) 827 goto out; 828 829 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 830 831 ret = send_cmd(sctx); 832 833 tlv_put_failure: 834 out: 835 return ret; 836 } 837 838 /* 839 * Helper function to retrieve some fields from an inode item. 840 */ 841 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path, 842 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid, 843 u64 *gid, u64 *rdev) 844 { 845 int ret; 846 struct btrfs_inode_item *ii; 847 struct btrfs_key key; 848 849 key.objectid = ino; 850 key.type = BTRFS_INODE_ITEM_KEY; 851 key.offset = 0; 852 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 853 if (ret) { 854 if (ret > 0) 855 ret = -ENOENT; 856 return ret; 857 } 858 859 ii = btrfs_item_ptr(path->nodes[0], path->slots[0], 860 struct btrfs_inode_item); 861 if (size) 862 *size = btrfs_inode_size(path->nodes[0], ii); 863 if (gen) 864 *gen = btrfs_inode_generation(path->nodes[0], ii); 865 if (mode) 866 *mode = btrfs_inode_mode(path->nodes[0], ii); 867 if (uid) 868 *uid = btrfs_inode_uid(path->nodes[0], ii); 869 if (gid) 870 *gid = btrfs_inode_gid(path->nodes[0], ii); 871 if (rdev) 872 *rdev = btrfs_inode_rdev(path->nodes[0], ii); 873 874 return ret; 875 } 876 877 static int get_inode_info(struct btrfs_root *root, 878 u64 ino, u64 *size, u64 *gen, 879 u64 *mode, u64 *uid, u64 *gid, 880 u64 *rdev) 881 { 882 struct btrfs_path *path; 883 int ret; 884 885 path = alloc_path_for_send(); 886 if (!path) 887 return -ENOMEM; 888 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid, 889 rdev); 890 btrfs_free_path(path); 891 return ret; 892 } 893 894 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index, 895 struct fs_path *p, 896 void *ctx); 897 898 /* 899 * Helper function to iterate the entries in ONE btrfs_inode_ref or 900 * btrfs_inode_extref. 901 * The iterate callback may return a non zero value to stop iteration. This can 902 * be a negative value for error codes or 1 to simply stop it. 903 * 904 * path must point to the INODE_REF or INODE_EXTREF when called. 905 */ 906 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path, 907 struct btrfs_key *found_key, int resolve, 908 iterate_inode_ref_t iterate, void *ctx) 909 { 910 struct extent_buffer *eb = path->nodes[0]; 911 struct btrfs_item *item; 912 struct btrfs_inode_ref *iref; 913 struct btrfs_inode_extref *extref; 914 struct btrfs_path *tmp_path; 915 struct fs_path *p; 916 u32 cur = 0; 917 u32 total; 918 int slot = path->slots[0]; 919 u32 name_len; 920 char *start; 921 int ret = 0; 922 int num = 0; 923 int index; 924 u64 dir; 925 unsigned long name_off; 926 unsigned long elem_size; 927 unsigned long ptr; 928 929 p = fs_path_alloc_reversed(); 930 if (!p) 931 return -ENOMEM; 932 933 tmp_path = alloc_path_for_send(); 934 if (!tmp_path) { 935 fs_path_free(p); 936 return -ENOMEM; 937 } 938 939 940 if (found_key->type == BTRFS_INODE_REF_KEY) { 941 ptr = (unsigned long)btrfs_item_ptr(eb, slot, 942 struct btrfs_inode_ref); 943 item = btrfs_item_nr(slot); 944 total = btrfs_item_size(eb, item); 945 elem_size = sizeof(*iref); 946 } else { 947 ptr = btrfs_item_ptr_offset(eb, slot); 948 total = btrfs_item_size_nr(eb, slot); 949 elem_size = sizeof(*extref); 950 } 951 952 while (cur < total) { 953 fs_path_reset(p); 954 955 if (found_key->type == BTRFS_INODE_REF_KEY) { 956 iref = (struct btrfs_inode_ref *)(ptr + cur); 957 name_len = btrfs_inode_ref_name_len(eb, iref); 958 name_off = (unsigned long)(iref + 1); 959 index = btrfs_inode_ref_index(eb, iref); 960 dir = found_key->offset; 961 } else { 962 extref = (struct btrfs_inode_extref *)(ptr + cur); 963 name_len = btrfs_inode_extref_name_len(eb, extref); 964 name_off = (unsigned long)&extref->name; 965 index = btrfs_inode_extref_index(eb, extref); 966 dir = btrfs_inode_extref_parent(eb, extref); 967 } 968 969 if (resolve) { 970 start = btrfs_ref_to_path(root, tmp_path, name_len, 971 name_off, eb, dir, 972 p->buf, p->buf_len); 973 if (IS_ERR(start)) { 974 ret = PTR_ERR(start); 975 goto out; 976 } 977 if (start < p->buf) { 978 /* overflow , try again with larger buffer */ 979 ret = fs_path_ensure_buf(p, 980 p->buf_len + p->buf - start); 981 if (ret < 0) 982 goto out; 983 start = btrfs_ref_to_path(root, tmp_path, 984 name_len, name_off, 985 eb, dir, 986 p->buf, p->buf_len); 987 if (IS_ERR(start)) { 988 ret = PTR_ERR(start); 989 goto out; 990 } 991 BUG_ON(start < p->buf); 992 } 993 p->start = start; 994 } else { 995 ret = fs_path_add_from_extent_buffer(p, eb, name_off, 996 name_len); 997 if (ret < 0) 998 goto out; 999 } 1000 1001 cur += elem_size + name_len; 1002 ret = iterate(num, dir, index, p, ctx); 1003 if (ret) 1004 goto out; 1005 num++; 1006 } 1007 1008 out: 1009 btrfs_free_path(tmp_path); 1010 fs_path_free(p); 1011 return ret; 1012 } 1013 1014 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key, 1015 const char *name, int name_len, 1016 const char *data, int data_len, 1017 u8 type, void *ctx); 1018 1019 /* 1020 * Helper function to iterate the entries in ONE btrfs_dir_item. 1021 * The iterate callback may return a non zero value to stop iteration. This can 1022 * be a negative value for error codes or 1 to simply stop it. 1023 * 1024 * path must point to the dir item when called. 1025 */ 1026 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, 1027 iterate_dir_item_t iterate, void *ctx) 1028 { 1029 int ret = 0; 1030 struct extent_buffer *eb; 1031 struct btrfs_item *item; 1032 struct btrfs_dir_item *di; 1033 struct btrfs_key di_key; 1034 char *buf = NULL; 1035 int buf_len; 1036 u32 name_len; 1037 u32 data_len; 1038 u32 cur; 1039 u32 len; 1040 u32 total; 1041 int slot; 1042 int num; 1043 u8 type; 1044 1045 /* 1046 * Start with a small buffer (1 page). If later we end up needing more 1047 * space, which can happen for xattrs on a fs with a leaf size greater 1048 * then the page size, attempt to increase the buffer. Typically xattr 1049 * values are small. 1050 */ 1051 buf_len = PATH_MAX; 1052 buf = kmalloc(buf_len, GFP_KERNEL); 1053 if (!buf) { 1054 ret = -ENOMEM; 1055 goto out; 1056 } 1057 1058 eb = path->nodes[0]; 1059 slot = path->slots[0]; 1060 item = btrfs_item_nr(slot); 1061 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 1062 cur = 0; 1063 len = 0; 1064 total = btrfs_item_size(eb, item); 1065 1066 num = 0; 1067 while (cur < total) { 1068 name_len = btrfs_dir_name_len(eb, di); 1069 data_len = btrfs_dir_data_len(eb, di); 1070 type = btrfs_dir_type(eb, di); 1071 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 1072 1073 if (type == BTRFS_FT_XATTR) { 1074 if (name_len > XATTR_NAME_MAX) { 1075 ret = -ENAMETOOLONG; 1076 goto out; 1077 } 1078 if (name_len + data_len > 1079 BTRFS_MAX_XATTR_SIZE(root->fs_info)) { 1080 ret = -E2BIG; 1081 goto out; 1082 } 1083 } else { 1084 /* 1085 * Path too long 1086 */ 1087 if (name_len + data_len > PATH_MAX) { 1088 ret = -ENAMETOOLONG; 1089 goto out; 1090 } 1091 } 1092 1093 if (name_len + data_len > buf_len) { 1094 buf_len = name_len + data_len; 1095 if (is_vmalloc_addr(buf)) { 1096 vfree(buf); 1097 buf = NULL; 1098 } else { 1099 char *tmp = krealloc(buf, buf_len, 1100 GFP_KERNEL | __GFP_NOWARN); 1101 1102 if (!tmp) 1103 kfree(buf); 1104 buf = tmp; 1105 } 1106 if (!buf) { 1107 buf = kvmalloc(buf_len, GFP_KERNEL); 1108 if (!buf) { 1109 ret = -ENOMEM; 1110 goto out; 1111 } 1112 } 1113 } 1114 1115 read_extent_buffer(eb, buf, (unsigned long)(di + 1), 1116 name_len + data_len); 1117 1118 len = sizeof(*di) + name_len + data_len; 1119 di = (struct btrfs_dir_item *)((char *)di + len); 1120 cur += len; 1121 1122 ret = iterate(num, &di_key, buf, name_len, buf + name_len, 1123 data_len, type, ctx); 1124 if (ret < 0) 1125 goto out; 1126 if (ret) { 1127 ret = 0; 1128 goto out; 1129 } 1130 1131 num++; 1132 } 1133 1134 out: 1135 kvfree(buf); 1136 return ret; 1137 } 1138 1139 static int __copy_first_ref(int num, u64 dir, int index, 1140 struct fs_path *p, void *ctx) 1141 { 1142 int ret; 1143 struct fs_path *pt = ctx; 1144 1145 ret = fs_path_copy(pt, p); 1146 if (ret < 0) 1147 return ret; 1148 1149 /* we want the first only */ 1150 return 1; 1151 } 1152 1153 /* 1154 * Retrieve the first path of an inode. If an inode has more then one 1155 * ref/hardlink, this is ignored. 1156 */ 1157 static int get_inode_path(struct btrfs_root *root, 1158 u64 ino, struct fs_path *path) 1159 { 1160 int ret; 1161 struct btrfs_key key, found_key; 1162 struct btrfs_path *p; 1163 1164 p = alloc_path_for_send(); 1165 if (!p) 1166 return -ENOMEM; 1167 1168 fs_path_reset(path); 1169 1170 key.objectid = ino; 1171 key.type = BTRFS_INODE_REF_KEY; 1172 key.offset = 0; 1173 1174 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0); 1175 if (ret < 0) 1176 goto out; 1177 if (ret) { 1178 ret = 1; 1179 goto out; 1180 } 1181 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); 1182 if (found_key.objectid != ino || 1183 (found_key.type != BTRFS_INODE_REF_KEY && 1184 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1185 ret = -ENOENT; 1186 goto out; 1187 } 1188 1189 ret = iterate_inode_ref(root, p, &found_key, 1, 1190 __copy_first_ref, path); 1191 if (ret < 0) 1192 goto out; 1193 ret = 0; 1194 1195 out: 1196 btrfs_free_path(p); 1197 return ret; 1198 } 1199 1200 struct backref_ctx { 1201 struct send_ctx *sctx; 1202 1203 /* number of total found references */ 1204 u64 found; 1205 1206 /* 1207 * used for clones found in send_root. clones found behind cur_objectid 1208 * and cur_offset are not considered as allowed clones. 1209 */ 1210 u64 cur_objectid; 1211 u64 cur_offset; 1212 1213 /* may be truncated in case it's the last extent in a file */ 1214 u64 extent_len; 1215 1216 /* Just to check for bugs in backref resolving */ 1217 int found_itself; 1218 }; 1219 1220 static int __clone_root_cmp_bsearch(const void *key, const void *elt) 1221 { 1222 u64 root = (u64)(uintptr_t)key; 1223 const struct clone_root *cr = elt; 1224 1225 if (root < cr->root->root_key.objectid) 1226 return -1; 1227 if (root > cr->root->root_key.objectid) 1228 return 1; 1229 return 0; 1230 } 1231 1232 static int __clone_root_cmp_sort(const void *e1, const void *e2) 1233 { 1234 const struct clone_root *cr1 = e1; 1235 const struct clone_root *cr2 = e2; 1236 1237 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid) 1238 return -1; 1239 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid) 1240 return 1; 1241 return 0; 1242 } 1243 1244 /* 1245 * Called for every backref that is found for the current extent. 1246 * Results are collected in sctx->clone_roots->ino/offset/found_refs 1247 */ 1248 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) 1249 { 1250 struct backref_ctx *bctx = ctx_; 1251 struct clone_root *found; 1252 1253 /* First check if the root is in the list of accepted clone sources */ 1254 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots, 1255 bctx->sctx->clone_roots_cnt, 1256 sizeof(struct clone_root), 1257 __clone_root_cmp_bsearch); 1258 if (!found) 1259 return 0; 1260 1261 if (found->root == bctx->sctx->send_root && 1262 ino == bctx->cur_objectid && 1263 offset == bctx->cur_offset) { 1264 bctx->found_itself = 1; 1265 } 1266 1267 /* 1268 * Make sure we don't consider clones from send_root that are 1269 * behind the current inode/offset. 1270 */ 1271 if (found->root == bctx->sctx->send_root) { 1272 /* 1273 * If the source inode was not yet processed we can't issue a 1274 * clone operation, as the source extent does not exist yet at 1275 * the destination of the stream. 1276 */ 1277 if (ino > bctx->cur_objectid) 1278 return 0; 1279 /* 1280 * We clone from the inode currently being sent as long as the 1281 * source extent is already processed, otherwise we could try 1282 * to clone from an extent that does not exist yet at the 1283 * destination of the stream. 1284 */ 1285 if (ino == bctx->cur_objectid && 1286 offset + bctx->extent_len > 1287 bctx->sctx->cur_inode_next_write_offset) 1288 return 0; 1289 } 1290 1291 bctx->found++; 1292 found->found_refs++; 1293 if (ino < found->ino) { 1294 found->ino = ino; 1295 found->offset = offset; 1296 } else if (found->ino == ino) { 1297 /* 1298 * same extent found more then once in the same file. 1299 */ 1300 if (found->offset > offset + bctx->extent_len) 1301 found->offset = offset; 1302 } 1303 1304 return 0; 1305 } 1306 1307 /* 1308 * Given an inode, offset and extent item, it finds a good clone for a clone 1309 * instruction. Returns -ENOENT when none could be found. The function makes 1310 * sure that the returned clone is usable at the point where sending is at the 1311 * moment. This means, that no clones are accepted which lie behind the current 1312 * inode+offset. 1313 * 1314 * path must point to the extent item when called. 1315 */ 1316 static int find_extent_clone(struct send_ctx *sctx, 1317 struct btrfs_path *path, 1318 u64 ino, u64 data_offset, 1319 u64 ino_size, 1320 struct clone_root **found) 1321 { 1322 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 1323 int ret; 1324 int extent_type; 1325 u64 logical; 1326 u64 disk_byte; 1327 u64 num_bytes; 1328 u64 extent_item_pos; 1329 u64 flags = 0; 1330 struct btrfs_file_extent_item *fi; 1331 struct extent_buffer *eb = path->nodes[0]; 1332 struct backref_ctx backref_ctx = {0}; 1333 struct clone_root *cur_clone_root; 1334 struct btrfs_key found_key; 1335 struct btrfs_path *tmp_path; 1336 struct btrfs_extent_item *ei; 1337 int compressed; 1338 u32 i; 1339 1340 tmp_path = alloc_path_for_send(); 1341 if (!tmp_path) 1342 return -ENOMEM; 1343 1344 /* We only use this path under the commit sem */ 1345 tmp_path->need_commit_sem = 0; 1346 1347 if (data_offset >= ino_size) { 1348 /* 1349 * There may be extents that lie behind the file's size. 1350 * I at least had this in combination with snapshotting while 1351 * writing large files. 1352 */ 1353 ret = 0; 1354 goto out; 1355 } 1356 1357 fi = btrfs_item_ptr(eb, path->slots[0], 1358 struct btrfs_file_extent_item); 1359 extent_type = btrfs_file_extent_type(eb, fi); 1360 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1361 ret = -ENOENT; 1362 goto out; 1363 } 1364 compressed = btrfs_file_extent_compression(eb, fi); 1365 1366 num_bytes = btrfs_file_extent_num_bytes(eb, fi); 1367 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 1368 if (disk_byte == 0) { 1369 ret = -ENOENT; 1370 goto out; 1371 } 1372 logical = disk_byte + btrfs_file_extent_offset(eb, fi); 1373 1374 down_read(&fs_info->commit_root_sem); 1375 ret = extent_from_logical(fs_info, disk_byte, tmp_path, 1376 &found_key, &flags); 1377 up_read(&fs_info->commit_root_sem); 1378 1379 if (ret < 0) 1380 goto out; 1381 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1382 ret = -EIO; 1383 goto out; 1384 } 1385 1386 ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0], 1387 struct btrfs_extent_item); 1388 /* 1389 * Backreference walking (iterate_extent_inodes() below) is currently 1390 * too expensive when an extent has a large number of references, both 1391 * in time spent and used memory. So for now just fallback to write 1392 * operations instead of clone operations when an extent has more than 1393 * a certain amount of references. 1394 */ 1395 if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) { 1396 ret = -ENOENT; 1397 goto out; 1398 } 1399 btrfs_release_path(tmp_path); 1400 1401 /* 1402 * Setup the clone roots. 1403 */ 1404 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1405 cur_clone_root = sctx->clone_roots + i; 1406 cur_clone_root->ino = (u64)-1; 1407 cur_clone_root->offset = 0; 1408 cur_clone_root->found_refs = 0; 1409 } 1410 1411 backref_ctx.sctx = sctx; 1412 backref_ctx.found = 0; 1413 backref_ctx.cur_objectid = ino; 1414 backref_ctx.cur_offset = data_offset; 1415 backref_ctx.found_itself = 0; 1416 backref_ctx.extent_len = num_bytes; 1417 1418 /* 1419 * The last extent of a file may be too large due to page alignment. 1420 * We need to adjust extent_len in this case so that the checks in 1421 * __iterate_backrefs work. 1422 */ 1423 if (data_offset + num_bytes >= ino_size) 1424 backref_ctx.extent_len = ino_size - data_offset; 1425 1426 /* 1427 * Now collect all backrefs. 1428 */ 1429 if (compressed == BTRFS_COMPRESS_NONE) 1430 extent_item_pos = logical - found_key.objectid; 1431 else 1432 extent_item_pos = 0; 1433 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1434 extent_item_pos, 1, __iterate_backrefs, 1435 &backref_ctx, false); 1436 1437 if (ret < 0) 1438 goto out; 1439 1440 down_read(&fs_info->commit_root_sem); 1441 if (fs_info->last_reloc_trans > sctx->last_reloc_trans) { 1442 /* 1443 * A transaction commit for a transaction in which block group 1444 * relocation was done just happened. 1445 * The disk_bytenr of the file extent item we processed is 1446 * possibly stale, referring to the extent's location before 1447 * relocation. So act as if we haven't found any clone sources 1448 * and fallback to write commands, which will read the correct 1449 * data from the new extent location. Otherwise we will fail 1450 * below because we haven't found our own back reference or we 1451 * could be getting incorrect sources in case the old extent 1452 * was already reallocated after the relocation. 1453 */ 1454 up_read(&fs_info->commit_root_sem); 1455 ret = -ENOENT; 1456 goto out; 1457 } 1458 up_read(&fs_info->commit_root_sem); 1459 1460 if (!backref_ctx.found_itself) { 1461 /* found a bug in backref code? */ 1462 ret = -EIO; 1463 btrfs_err(fs_info, 1464 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu", 1465 ino, data_offset, disk_byte, found_key.objectid); 1466 goto out; 1467 } 1468 1469 btrfs_debug(fs_info, 1470 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu", 1471 data_offset, ino, num_bytes, logical); 1472 1473 if (!backref_ctx.found) 1474 btrfs_debug(fs_info, "no clones found"); 1475 1476 cur_clone_root = NULL; 1477 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1478 if (sctx->clone_roots[i].found_refs) { 1479 if (!cur_clone_root) 1480 cur_clone_root = sctx->clone_roots + i; 1481 else if (sctx->clone_roots[i].root == sctx->send_root) 1482 /* prefer clones from send_root over others */ 1483 cur_clone_root = sctx->clone_roots + i; 1484 } 1485 1486 } 1487 1488 if (cur_clone_root) { 1489 *found = cur_clone_root; 1490 ret = 0; 1491 } else { 1492 ret = -ENOENT; 1493 } 1494 1495 out: 1496 btrfs_free_path(tmp_path); 1497 return ret; 1498 } 1499 1500 static int read_symlink(struct btrfs_root *root, 1501 u64 ino, 1502 struct fs_path *dest) 1503 { 1504 int ret; 1505 struct btrfs_path *path; 1506 struct btrfs_key key; 1507 struct btrfs_file_extent_item *ei; 1508 u8 type; 1509 u8 compression; 1510 unsigned long off; 1511 int len; 1512 1513 path = alloc_path_for_send(); 1514 if (!path) 1515 return -ENOMEM; 1516 1517 key.objectid = ino; 1518 key.type = BTRFS_EXTENT_DATA_KEY; 1519 key.offset = 0; 1520 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1521 if (ret < 0) 1522 goto out; 1523 if (ret) { 1524 /* 1525 * An empty symlink inode. Can happen in rare error paths when 1526 * creating a symlink (transaction committed before the inode 1527 * eviction handler removed the symlink inode items and a crash 1528 * happened in between or the subvol was snapshoted in between). 1529 * Print an informative message to dmesg/syslog so that the user 1530 * can delete the symlink. 1531 */ 1532 btrfs_err(root->fs_info, 1533 "Found empty symlink inode %llu at root %llu", 1534 ino, root->root_key.objectid); 1535 ret = -EIO; 1536 goto out; 1537 } 1538 1539 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 1540 struct btrfs_file_extent_item); 1541 type = btrfs_file_extent_type(path->nodes[0], ei); 1542 compression = btrfs_file_extent_compression(path->nodes[0], ei); 1543 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE); 1544 BUG_ON(compression); 1545 1546 off = btrfs_file_extent_inline_start(ei); 1547 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei); 1548 1549 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); 1550 1551 out: 1552 btrfs_free_path(path); 1553 return ret; 1554 } 1555 1556 /* 1557 * Helper function to generate a file name that is unique in the root of 1558 * send_root and parent_root. This is used to generate names for orphan inodes. 1559 */ 1560 static int gen_unique_name(struct send_ctx *sctx, 1561 u64 ino, u64 gen, 1562 struct fs_path *dest) 1563 { 1564 int ret = 0; 1565 struct btrfs_path *path; 1566 struct btrfs_dir_item *di; 1567 char tmp[64]; 1568 int len; 1569 u64 idx = 0; 1570 1571 path = alloc_path_for_send(); 1572 if (!path) 1573 return -ENOMEM; 1574 1575 while (1) { 1576 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", 1577 ino, gen, idx); 1578 ASSERT(len < sizeof(tmp)); 1579 1580 di = btrfs_lookup_dir_item(NULL, sctx->send_root, 1581 path, BTRFS_FIRST_FREE_OBJECTID, 1582 tmp, strlen(tmp), 0); 1583 btrfs_release_path(path); 1584 if (IS_ERR(di)) { 1585 ret = PTR_ERR(di); 1586 goto out; 1587 } 1588 if (di) { 1589 /* not unique, try again */ 1590 idx++; 1591 continue; 1592 } 1593 1594 if (!sctx->parent_root) { 1595 /* unique */ 1596 ret = 0; 1597 break; 1598 } 1599 1600 di = btrfs_lookup_dir_item(NULL, sctx->parent_root, 1601 path, BTRFS_FIRST_FREE_OBJECTID, 1602 tmp, strlen(tmp), 0); 1603 btrfs_release_path(path); 1604 if (IS_ERR(di)) { 1605 ret = PTR_ERR(di); 1606 goto out; 1607 } 1608 if (di) { 1609 /* not unique, try again */ 1610 idx++; 1611 continue; 1612 } 1613 /* unique */ 1614 break; 1615 } 1616 1617 ret = fs_path_add(dest, tmp, strlen(tmp)); 1618 1619 out: 1620 btrfs_free_path(path); 1621 return ret; 1622 } 1623 1624 enum inode_state { 1625 inode_state_no_change, 1626 inode_state_will_create, 1627 inode_state_did_create, 1628 inode_state_will_delete, 1629 inode_state_did_delete, 1630 }; 1631 1632 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) 1633 { 1634 int ret; 1635 int left_ret; 1636 int right_ret; 1637 u64 left_gen; 1638 u64 right_gen; 1639 1640 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, 1641 NULL, NULL); 1642 if (ret < 0 && ret != -ENOENT) 1643 goto out; 1644 left_ret = ret; 1645 1646 if (!sctx->parent_root) { 1647 right_ret = -ENOENT; 1648 } else { 1649 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, 1650 NULL, NULL, NULL, NULL); 1651 if (ret < 0 && ret != -ENOENT) 1652 goto out; 1653 right_ret = ret; 1654 } 1655 1656 if (!left_ret && !right_ret) { 1657 if (left_gen == gen && right_gen == gen) { 1658 ret = inode_state_no_change; 1659 } else if (left_gen == gen) { 1660 if (ino < sctx->send_progress) 1661 ret = inode_state_did_create; 1662 else 1663 ret = inode_state_will_create; 1664 } else if (right_gen == gen) { 1665 if (ino < sctx->send_progress) 1666 ret = inode_state_did_delete; 1667 else 1668 ret = inode_state_will_delete; 1669 } else { 1670 ret = -ENOENT; 1671 } 1672 } else if (!left_ret) { 1673 if (left_gen == gen) { 1674 if (ino < sctx->send_progress) 1675 ret = inode_state_did_create; 1676 else 1677 ret = inode_state_will_create; 1678 } else { 1679 ret = -ENOENT; 1680 } 1681 } else if (!right_ret) { 1682 if (right_gen == gen) { 1683 if (ino < sctx->send_progress) 1684 ret = inode_state_did_delete; 1685 else 1686 ret = inode_state_will_delete; 1687 } else { 1688 ret = -ENOENT; 1689 } 1690 } else { 1691 ret = -ENOENT; 1692 } 1693 1694 out: 1695 return ret; 1696 } 1697 1698 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) 1699 { 1700 int ret; 1701 1702 if (ino == BTRFS_FIRST_FREE_OBJECTID) 1703 return 1; 1704 1705 ret = get_cur_inode_state(sctx, ino, gen); 1706 if (ret < 0) 1707 goto out; 1708 1709 if (ret == inode_state_no_change || 1710 ret == inode_state_did_create || 1711 ret == inode_state_will_delete) 1712 ret = 1; 1713 else 1714 ret = 0; 1715 1716 out: 1717 return ret; 1718 } 1719 1720 /* 1721 * Helper function to lookup a dir item in a dir. 1722 */ 1723 static int lookup_dir_item_inode(struct btrfs_root *root, 1724 u64 dir, const char *name, int name_len, 1725 u64 *found_inode, 1726 u8 *found_type) 1727 { 1728 int ret = 0; 1729 struct btrfs_dir_item *di; 1730 struct btrfs_key key; 1731 struct btrfs_path *path; 1732 1733 path = alloc_path_for_send(); 1734 if (!path) 1735 return -ENOMEM; 1736 1737 di = btrfs_lookup_dir_item(NULL, root, path, 1738 dir, name, name_len, 0); 1739 if (IS_ERR_OR_NULL(di)) { 1740 ret = di ? PTR_ERR(di) : -ENOENT; 1741 goto out; 1742 } 1743 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 1744 if (key.type == BTRFS_ROOT_ITEM_KEY) { 1745 ret = -ENOENT; 1746 goto out; 1747 } 1748 *found_inode = key.objectid; 1749 *found_type = btrfs_dir_type(path->nodes[0], di); 1750 1751 out: 1752 btrfs_free_path(path); 1753 return ret; 1754 } 1755 1756 /* 1757 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir, 1758 * generation of the parent dir and the name of the dir entry. 1759 */ 1760 static int get_first_ref(struct btrfs_root *root, u64 ino, 1761 u64 *dir, u64 *dir_gen, struct fs_path *name) 1762 { 1763 int ret; 1764 struct btrfs_key key; 1765 struct btrfs_key found_key; 1766 struct btrfs_path *path; 1767 int len; 1768 u64 parent_dir; 1769 1770 path = alloc_path_for_send(); 1771 if (!path) 1772 return -ENOMEM; 1773 1774 key.objectid = ino; 1775 key.type = BTRFS_INODE_REF_KEY; 1776 key.offset = 0; 1777 1778 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); 1779 if (ret < 0) 1780 goto out; 1781 if (!ret) 1782 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1783 path->slots[0]); 1784 if (ret || found_key.objectid != ino || 1785 (found_key.type != BTRFS_INODE_REF_KEY && 1786 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1787 ret = -ENOENT; 1788 goto out; 1789 } 1790 1791 if (found_key.type == BTRFS_INODE_REF_KEY) { 1792 struct btrfs_inode_ref *iref; 1793 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1794 struct btrfs_inode_ref); 1795 len = btrfs_inode_ref_name_len(path->nodes[0], iref); 1796 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1797 (unsigned long)(iref + 1), 1798 len); 1799 parent_dir = found_key.offset; 1800 } else { 1801 struct btrfs_inode_extref *extref; 1802 extref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1803 struct btrfs_inode_extref); 1804 len = btrfs_inode_extref_name_len(path->nodes[0], extref); 1805 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1806 (unsigned long)&extref->name, len); 1807 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref); 1808 } 1809 if (ret < 0) 1810 goto out; 1811 btrfs_release_path(path); 1812 1813 if (dir_gen) { 1814 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, 1815 NULL, NULL, NULL); 1816 if (ret < 0) 1817 goto out; 1818 } 1819 1820 *dir = parent_dir; 1821 1822 out: 1823 btrfs_free_path(path); 1824 return ret; 1825 } 1826 1827 static int is_first_ref(struct btrfs_root *root, 1828 u64 ino, u64 dir, 1829 const char *name, int name_len) 1830 { 1831 int ret; 1832 struct fs_path *tmp_name; 1833 u64 tmp_dir; 1834 1835 tmp_name = fs_path_alloc(); 1836 if (!tmp_name) 1837 return -ENOMEM; 1838 1839 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name); 1840 if (ret < 0) 1841 goto out; 1842 1843 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) { 1844 ret = 0; 1845 goto out; 1846 } 1847 1848 ret = !memcmp(tmp_name->start, name, name_len); 1849 1850 out: 1851 fs_path_free(tmp_name); 1852 return ret; 1853 } 1854 1855 /* 1856 * Used by process_recorded_refs to determine if a new ref would overwrite an 1857 * already existing ref. In case it detects an overwrite, it returns the 1858 * inode/gen in who_ino/who_gen. 1859 * When an overwrite is detected, process_recorded_refs does proper orphanizing 1860 * to make sure later references to the overwritten inode are possible. 1861 * Orphanizing is however only required for the first ref of an inode. 1862 * process_recorded_refs does an additional is_first_ref check to see if 1863 * orphanizing is really required. 1864 */ 1865 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, 1866 const char *name, int name_len, 1867 u64 *who_ino, u64 *who_gen, u64 *who_mode) 1868 { 1869 int ret = 0; 1870 u64 gen; 1871 u64 other_inode = 0; 1872 u8 other_type = 0; 1873 1874 if (!sctx->parent_root) 1875 goto out; 1876 1877 ret = is_inode_existent(sctx, dir, dir_gen); 1878 if (ret <= 0) 1879 goto out; 1880 1881 /* 1882 * If we have a parent root we need to verify that the parent dir was 1883 * not deleted and then re-created, if it was then we have no overwrite 1884 * and we can just unlink this entry. 1885 */ 1886 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) { 1887 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, 1888 NULL, NULL, NULL); 1889 if (ret < 0 && ret != -ENOENT) 1890 goto out; 1891 if (ret) { 1892 ret = 0; 1893 goto out; 1894 } 1895 if (gen != dir_gen) 1896 goto out; 1897 } 1898 1899 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, 1900 &other_inode, &other_type); 1901 if (ret < 0 && ret != -ENOENT) 1902 goto out; 1903 if (ret) { 1904 ret = 0; 1905 goto out; 1906 } 1907 1908 /* 1909 * Check if the overwritten ref was already processed. If yes, the ref 1910 * was already unlinked/moved, so we can safely assume that we will not 1911 * overwrite anything at this point in time. 1912 */ 1913 if (other_inode > sctx->send_progress || 1914 is_waiting_for_move(sctx, other_inode)) { 1915 ret = get_inode_info(sctx->parent_root, other_inode, NULL, 1916 who_gen, who_mode, NULL, NULL, NULL); 1917 if (ret < 0) 1918 goto out; 1919 1920 ret = 1; 1921 *who_ino = other_inode; 1922 } else { 1923 ret = 0; 1924 } 1925 1926 out: 1927 return ret; 1928 } 1929 1930 /* 1931 * Checks if the ref was overwritten by an already processed inode. This is 1932 * used by __get_cur_name_and_parent to find out if the ref was orphanized and 1933 * thus the orphan name needs be used. 1934 * process_recorded_refs also uses it to avoid unlinking of refs that were 1935 * overwritten. 1936 */ 1937 static int did_overwrite_ref(struct send_ctx *sctx, 1938 u64 dir, u64 dir_gen, 1939 u64 ino, u64 ino_gen, 1940 const char *name, int name_len) 1941 { 1942 int ret = 0; 1943 u64 gen; 1944 u64 ow_inode; 1945 u8 other_type; 1946 1947 if (!sctx->parent_root) 1948 goto out; 1949 1950 ret = is_inode_existent(sctx, dir, dir_gen); 1951 if (ret <= 0) 1952 goto out; 1953 1954 if (dir != BTRFS_FIRST_FREE_OBJECTID) { 1955 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, 1956 NULL, NULL, NULL); 1957 if (ret < 0 && ret != -ENOENT) 1958 goto out; 1959 if (ret) { 1960 ret = 0; 1961 goto out; 1962 } 1963 if (gen != dir_gen) 1964 goto out; 1965 } 1966 1967 /* check if the ref was overwritten by another ref */ 1968 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, 1969 &ow_inode, &other_type); 1970 if (ret < 0 && ret != -ENOENT) 1971 goto out; 1972 if (ret) { 1973 /* was never and will never be overwritten */ 1974 ret = 0; 1975 goto out; 1976 } 1977 1978 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, 1979 NULL, NULL); 1980 if (ret < 0) 1981 goto out; 1982 1983 if (ow_inode == ino && gen == ino_gen) { 1984 ret = 0; 1985 goto out; 1986 } 1987 1988 /* 1989 * We know that it is or will be overwritten. Check this now. 1990 * The current inode being processed might have been the one that caused 1991 * inode 'ino' to be orphanized, therefore check if ow_inode matches 1992 * the current inode being processed. 1993 */ 1994 if ((ow_inode < sctx->send_progress) || 1995 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino && 1996 gen == sctx->cur_inode_gen)) 1997 ret = 1; 1998 else 1999 ret = 0; 2000 2001 out: 2002 return ret; 2003 } 2004 2005 /* 2006 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode 2007 * that got overwritten. This is used by process_recorded_refs to determine 2008 * if it has to use the path as returned by get_cur_path or the orphan name. 2009 */ 2010 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) 2011 { 2012 int ret = 0; 2013 struct fs_path *name = NULL; 2014 u64 dir; 2015 u64 dir_gen; 2016 2017 if (!sctx->parent_root) 2018 goto out; 2019 2020 name = fs_path_alloc(); 2021 if (!name) 2022 return -ENOMEM; 2023 2024 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name); 2025 if (ret < 0) 2026 goto out; 2027 2028 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, 2029 name->start, fs_path_len(name)); 2030 2031 out: 2032 fs_path_free(name); 2033 return ret; 2034 } 2035 2036 /* 2037 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit, 2038 * so we need to do some special handling in case we have clashes. This function 2039 * takes care of this with the help of name_cache_entry::radix_list. 2040 * In case of error, nce is kfreed. 2041 */ 2042 static int name_cache_insert(struct send_ctx *sctx, 2043 struct name_cache_entry *nce) 2044 { 2045 int ret = 0; 2046 struct list_head *nce_head; 2047 2048 nce_head = radix_tree_lookup(&sctx->name_cache, 2049 (unsigned long)nce->ino); 2050 if (!nce_head) { 2051 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL); 2052 if (!nce_head) { 2053 kfree(nce); 2054 return -ENOMEM; 2055 } 2056 INIT_LIST_HEAD(nce_head); 2057 2058 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); 2059 if (ret < 0) { 2060 kfree(nce_head); 2061 kfree(nce); 2062 return ret; 2063 } 2064 } 2065 list_add_tail(&nce->radix_list, nce_head); 2066 list_add_tail(&nce->list, &sctx->name_cache_list); 2067 sctx->name_cache_size++; 2068 2069 return ret; 2070 } 2071 2072 static void name_cache_delete(struct send_ctx *sctx, 2073 struct name_cache_entry *nce) 2074 { 2075 struct list_head *nce_head; 2076 2077 nce_head = radix_tree_lookup(&sctx->name_cache, 2078 (unsigned long)nce->ino); 2079 if (!nce_head) { 2080 btrfs_err(sctx->send_root->fs_info, 2081 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory", 2082 nce->ino, sctx->name_cache_size); 2083 } 2084 2085 list_del(&nce->radix_list); 2086 list_del(&nce->list); 2087 sctx->name_cache_size--; 2088 2089 /* 2090 * We may not get to the final release of nce_head if the lookup fails 2091 */ 2092 if (nce_head && list_empty(nce_head)) { 2093 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); 2094 kfree(nce_head); 2095 } 2096 } 2097 2098 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, 2099 u64 ino, u64 gen) 2100 { 2101 struct list_head *nce_head; 2102 struct name_cache_entry *cur; 2103 2104 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino); 2105 if (!nce_head) 2106 return NULL; 2107 2108 list_for_each_entry(cur, nce_head, radix_list) { 2109 if (cur->ino == ino && cur->gen == gen) 2110 return cur; 2111 } 2112 return NULL; 2113 } 2114 2115 /* 2116 * Remove some entries from the beginning of name_cache_list. 2117 */ 2118 static void name_cache_clean_unused(struct send_ctx *sctx) 2119 { 2120 struct name_cache_entry *nce; 2121 2122 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE) 2123 return; 2124 2125 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) { 2126 nce = list_entry(sctx->name_cache_list.next, 2127 struct name_cache_entry, list); 2128 name_cache_delete(sctx, nce); 2129 kfree(nce); 2130 } 2131 } 2132 2133 static void name_cache_free(struct send_ctx *sctx) 2134 { 2135 struct name_cache_entry *nce; 2136 2137 while (!list_empty(&sctx->name_cache_list)) { 2138 nce = list_entry(sctx->name_cache_list.next, 2139 struct name_cache_entry, list); 2140 name_cache_delete(sctx, nce); 2141 kfree(nce); 2142 } 2143 } 2144 2145 /* 2146 * Used by get_cur_path for each ref up to the root. 2147 * Returns 0 if it succeeded. 2148 * Returns 1 if the inode is not existent or got overwritten. In that case, the 2149 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1 2150 * is returned, parent_ino/parent_gen are not guaranteed to be valid. 2151 * Returns <0 in case of error. 2152 */ 2153 static int __get_cur_name_and_parent(struct send_ctx *sctx, 2154 u64 ino, u64 gen, 2155 u64 *parent_ino, 2156 u64 *parent_gen, 2157 struct fs_path *dest) 2158 { 2159 int ret; 2160 int nce_ret; 2161 struct name_cache_entry *nce = NULL; 2162 2163 /* 2164 * First check if we already did a call to this function with the same 2165 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes 2166 * return the cached result. 2167 */ 2168 nce = name_cache_search(sctx, ino, gen); 2169 if (nce) { 2170 if (ino < sctx->send_progress && nce->need_later_update) { 2171 name_cache_delete(sctx, nce); 2172 kfree(nce); 2173 nce = NULL; 2174 } else { 2175 /* 2176 * Removes the entry from the list and adds it back to 2177 * the end. This marks the entry as recently used so 2178 * that name_cache_clean_unused does not remove it. 2179 */ 2180 list_move_tail(&nce->list, &sctx->name_cache_list); 2181 2182 *parent_ino = nce->parent_ino; 2183 *parent_gen = nce->parent_gen; 2184 ret = fs_path_add(dest, nce->name, nce->name_len); 2185 if (ret < 0) 2186 goto out; 2187 ret = nce->ret; 2188 goto out; 2189 } 2190 } 2191 2192 /* 2193 * If the inode is not existent yet, add the orphan name and return 1. 2194 * This should only happen for the parent dir that we determine in 2195 * __record_new_ref 2196 */ 2197 ret = is_inode_existent(sctx, ino, gen); 2198 if (ret < 0) 2199 goto out; 2200 2201 if (!ret) { 2202 ret = gen_unique_name(sctx, ino, gen, dest); 2203 if (ret < 0) 2204 goto out; 2205 ret = 1; 2206 goto out_cache; 2207 } 2208 2209 /* 2210 * Depending on whether the inode was already processed or not, use 2211 * send_root or parent_root for ref lookup. 2212 */ 2213 if (ino < sctx->send_progress) 2214 ret = get_first_ref(sctx->send_root, ino, 2215 parent_ino, parent_gen, dest); 2216 else 2217 ret = get_first_ref(sctx->parent_root, ino, 2218 parent_ino, parent_gen, dest); 2219 if (ret < 0) 2220 goto out; 2221 2222 /* 2223 * Check if the ref was overwritten by an inode's ref that was processed 2224 * earlier. If yes, treat as orphan and return 1. 2225 */ 2226 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, 2227 dest->start, dest->end - dest->start); 2228 if (ret < 0) 2229 goto out; 2230 if (ret) { 2231 fs_path_reset(dest); 2232 ret = gen_unique_name(sctx, ino, gen, dest); 2233 if (ret < 0) 2234 goto out; 2235 ret = 1; 2236 } 2237 2238 out_cache: 2239 /* 2240 * Store the result of the lookup in the name cache. 2241 */ 2242 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL); 2243 if (!nce) { 2244 ret = -ENOMEM; 2245 goto out; 2246 } 2247 2248 nce->ino = ino; 2249 nce->gen = gen; 2250 nce->parent_ino = *parent_ino; 2251 nce->parent_gen = *parent_gen; 2252 nce->name_len = fs_path_len(dest); 2253 nce->ret = ret; 2254 strcpy(nce->name, dest->start); 2255 2256 if (ino < sctx->send_progress) 2257 nce->need_later_update = 0; 2258 else 2259 nce->need_later_update = 1; 2260 2261 nce_ret = name_cache_insert(sctx, nce); 2262 if (nce_ret < 0) 2263 ret = nce_ret; 2264 name_cache_clean_unused(sctx); 2265 2266 out: 2267 return ret; 2268 } 2269 2270 /* 2271 * Magic happens here. This function returns the first ref to an inode as it 2272 * would look like while receiving the stream at this point in time. 2273 * We walk the path up to the root. For every inode in between, we check if it 2274 * was already processed/sent. If yes, we continue with the parent as found 2275 * in send_root. If not, we continue with the parent as found in parent_root. 2276 * If we encounter an inode that was deleted at this point in time, we use the 2277 * inodes "orphan" name instead of the real name and stop. Same with new inodes 2278 * that were not created yet and overwritten inodes/refs. 2279 * 2280 * When do we have orphan inodes: 2281 * 1. When an inode is freshly created and thus no valid refs are available yet 2282 * 2. When a directory lost all it's refs (deleted) but still has dir items 2283 * inside which were not processed yet (pending for move/delete). If anyone 2284 * tried to get the path to the dir items, it would get a path inside that 2285 * orphan directory. 2286 * 3. When an inode is moved around or gets new links, it may overwrite the ref 2287 * of an unprocessed inode. If in that case the first ref would be 2288 * overwritten, the overwritten inode gets "orphanized". Later when we 2289 * process this overwritten inode, it is restored at a new place by moving 2290 * the orphan inode. 2291 * 2292 * sctx->send_progress tells this function at which point in time receiving 2293 * would be. 2294 */ 2295 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, 2296 struct fs_path *dest) 2297 { 2298 int ret = 0; 2299 struct fs_path *name = NULL; 2300 u64 parent_inode = 0; 2301 u64 parent_gen = 0; 2302 int stop = 0; 2303 2304 name = fs_path_alloc(); 2305 if (!name) { 2306 ret = -ENOMEM; 2307 goto out; 2308 } 2309 2310 dest->reversed = 1; 2311 fs_path_reset(dest); 2312 2313 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { 2314 struct waiting_dir_move *wdm; 2315 2316 fs_path_reset(name); 2317 2318 if (is_waiting_for_rm(sctx, ino, gen)) { 2319 ret = gen_unique_name(sctx, ino, gen, name); 2320 if (ret < 0) 2321 goto out; 2322 ret = fs_path_add_path(dest, name); 2323 break; 2324 } 2325 2326 wdm = get_waiting_dir_move(sctx, ino); 2327 if (wdm && wdm->orphanized) { 2328 ret = gen_unique_name(sctx, ino, gen, name); 2329 stop = 1; 2330 } else if (wdm) { 2331 ret = get_first_ref(sctx->parent_root, ino, 2332 &parent_inode, &parent_gen, name); 2333 } else { 2334 ret = __get_cur_name_and_parent(sctx, ino, gen, 2335 &parent_inode, 2336 &parent_gen, name); 2337 if (ret) 2338 stop = 1; 2339 } 2340 2341 if (ret < 0) 2342 goto out; 2343 2344 ret = fs_path_add_path(dest, name); 2345 if (ret < 0) 2346 goto out; 2347 2348 ino = parent_inode; 2349 gen = parent_gen; 2350 } 2351 2352 out: 2353 fs_path_free(name); 2354 if (!ret) 2355 fs_path_unreverse(dest); 2356 return ret; 2357 } 2358 2359 /* 2360 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace 2361 */ 2362 static int send_subvol_begin(struct send_ctx *sctx) 2363 { 2364 int ret; 2365 struct btrfs_root *send_root = sctx->send_root; 2366 struct btrfs_root *parent_root = sctx->parent_root; 2367 struct btrfs_path *path; 2368 struct btrfs_key key; 2369 struct btrfs_root_ref *ref; 2370 struct extent_buffer *leaf; 2371 char *name = NULL; 2372 int namelen; 2373 2374 path = btrfs_alloc_path(); 2375 if (!path) 2376 return -ENOMEM; 2377 2378 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL); 2379 if (!name) { 2380 btrfs_free_path(path); 2381 return -ENOMEM; 2382 } 2383 2384 key.objectid = send_root->root_key.objectid; 2385 key.type = BTRFS_ROOT_BACKREF_KEY; 2386 key.offset = 0; 2387 2388 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root, 2389 &key, path, 1, 0); 2390 if (ret < 0) 2391 goto out; 2392 if (ret) { 2393 ret = -ENOENT; 2394 goto out; 2395 } 2396 2397 leaf = path->nodes[0]; 2398 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2399 if (key.type != BTRFS_ROOT_BACKREF_KEY || 2400 key.objectid != send_root->root_key.objectid) { 2401 ret = -ENOENT; 2402 goto out; 2403 } 2404 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 2405 namelen = btrfs_root_ref_name_len(leaf, ref); 2406 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); 2407 btrfs_release_path(path); 2408 2409 if (parent_root) { 2410 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); 2411 if (ret < 0) 2412 goto out; 2413 } else { 2414 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); 2415 if (ret < 0) 2416 goto out; 2417 } 2418 2419 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); 2420 2421 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid)) 2422 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2423 sctx->send_root->root_item.received_uuid); 2424 else 2425 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2426 sctx->send_root->root_item.uuid); 2427 2428 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, 2429 btrfs_root_ctransid(&sctx->send_root->root_item)); 2430 if (parent_root) { 2431 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid)) 2432 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2433 parent_root->root_item.received_uuid); 2434 else 2435 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2436 parent_root->root_item.uuid); 2437 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 2438 btrfs_root_ctransid(&sctx->parent_root->root_item)); 2439 } 2440 2441 ret = send_cmd(sctx); 2442 2443 tlv_put_failure: 2444 out: 2445 btrfs_free_path(path); 2446 kfree(name); 2447 return ret; 2448 } 2449 2450 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) 2451 { 2452 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2453 int ret = 0; 2454 struct fs_path *p; 2455 2456 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size); 2457 2458 p = fs_path_alloc(); 2459 if (!p) 2460 return -ENOMEM; 2461 2462 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); 2463 if (ret < 0) 2464 goto out; 2465 2466 ret = get_cur_path(sctx, ino, gen, p); 2467 if (ret < 0) 2468 goto out; 2469 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2470 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); 2471 2472 ret = send_cmd(sctx); 2473 2474 tlv_put_failure: 2475 out: 2476 fs_path_free(p); 2477 return ret; 2478 } 2479 2480 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) 2481 { 2482 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2483 int ret = 0; 2484 struct fs_path *p; 2485 2486 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode); 2487 2488 p = fs_path_alloc(); 2489 if (!p) 2490 return -ENOMEM; 2491 2492 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); 2493 if (ret < 0) 2494 goto out; 2495 2496 ret = get_cur_path(sctx, ino, gen, p); 2497 if (ret < 0) 2498 goto out; 2499 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2500 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); 2501 2502 ret = send_cmd(sctx); 2503 2504 tlv_put_failure: 2505 out: 2506 fs_path_free(p); 2507 return ret; 2508 } 2509 2510 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) 2511 { 2512 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2513 int ret = 0; 2514 struct fs_path *p; 2515 2516 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu", 2517 ino, uid, gid); 2518 2519 p = fs_path_alloc(); 2520 if (!p) 2521 return -ENOMEM; 2522 2523 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); 2524 if (ret < 0) 2525 goto out; 2526 2527 ret = get_cur_path(sctx, ino, gen, p); 2528 if (ret < 0) 2529 goto out; 2530 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2531 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); 2532 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); 2533 2534 ret = send_cmd(sctx); 2535 2536 tlv_put_failure: 2537 out: 2538 fs_path_free(p); 2539 return ret; 2540 } 2541 2542 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) 2543 { 2544 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2545 int ret = 0; 2546 struct fs_path *p = NULL; 2547 struct btrfs_inode_item *ii; 2548 struct btrfs_path *path = NULL; 2549 struct extent_buffer *eb; 2550 struct btrfs_key key; 2551 int slot; 2552 2553 btrfs_debug(fs_info, "send_utimes %llu", ino); 2554 2555 p = fs_path_alloc(); 2556 if (!p) 2557 return -ENOMEM; 2558 2559 path = alloc_path_for_send(); 2560 if (!path) { 2561 ret = -ENOMEM; 2562 goto out; 2563 } 2564 2565 key.objectid = ino; 2566 key.type = BTRFS_INODE_ITEM_KEY; 2567 key.offset = 0; 2568 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2569 if (ret > 0) 2570 ret = -ENOENT; 2571 if (ret < 0) 2572 goto out; 2573 2574 eb = path->nodes[0]; 2575 slot = path->slots[0]; 2576 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 2577 2578 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); 2579 if (ret < 0) 2580 goto out; 2581 2582 ret = get_cur_path(sctx, ino, gen, p); 2583 if (ret < 0) 2584 goto out; 2585 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2586 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); 2587 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); 2588 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); 2589 /* TODO Add otime support when the otime patches get into upstream */ 2590 2591 ret = send_cmd(sctx); 2592 2593 tlv_put_failure: 2594 out: 2595 fs_path_free(p); 2596 btrfs_free_path(path); 2597 return ret; 2598 } 2599 2600 /* 2601 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have 2602 * a valid path yet because we did not process the refs yet. So, the inode 2603 * is created as orphan. 2604 */ 2605 static int send_create_inode(struct send_ctx *sctx, u64 ino) 2606 { 2607 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2608 int ret = 0; 2609 struct fs_path *p; 2610 int cmd; 2611 u64 gen; 2612 u64 mode; 2613 u64 rdev; 2614 2615 btrfs_debug(fs_info, "send_create_inode %llu", ino); 2616 2617 p = fs_path_alloc(); 2618 if (!p) 2619 return -ENOMEM; 2620 2621 if (ino != sctx->cur_ino) { 2622 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, 2623 NULL, NULL, &rdev); 2624 if (ret < 0) 2625 goto out; 2626 } else { 2627 gen = sctx->cur_inode_gen; 2628 mode = sctx->cur_inode_mode; 2629 rdev = sctx->cur_inode_rdev; 2630 } 2631 2632 if (S_ISREG(mode)) { 2633 cmd = BTRFS_SEND_C_MKFILE; 2634 } else if (S_ISDIR(mode)) { 2635 cmd = BTRFS_SEND_C_MKDIR; 2636 } else if (S_ISLNK(mode)) { 2637 cmd = BTRFS_SEND_C_SYMLINK; 2638 } else if (S_ISCHR(mode) || S_ISBLK(mode)) { 2639 cmd = BTRFS_SEND_C_MKNOD; 2640 } else if (S_ISFIFO(mode)) { 2641 cmd = BTRFS_SEND_C_MKFIFO; 2642 } else if (S_ISSOCK(mode)) { 2643 cmd = BTRFS_SEND_C_MKSOCK; 2644 } else { 2645 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o", 2646 (int)(mode & S_IFMT)); 2647 ret = -EOPNOTSUPP; 2648 goto out; 2649 } 2650 2651 ret = begin_cmd(sctx, cmd); 2652 if (ret < 0) 2653 goto out; 2654 2655 ret = gen_unique_name(sctx, ino, gen, p); 2656 if (ret < 0) 2657 goto out; 2658 2659 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2660 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); 2661 2662 if (S_ISLNK(mode)) { 2663 fs_path_reset(p); 2664 ret = read_symlink(sctx->send_root, ino, p); 2665 if (ret < 0) 2666 goto out; 2667 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); 2668 } else if (S_ISCHR(mode) || S_ISBLK(mode) || 2669 S_ISFIFO(mode) || S_ISSOCK(mode)) { 2670 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev)); 2671 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode); 2672 } 2673 2674 ret = send_cmd(sctx); 2675 if (ret < 0) 2676 goto out; 2677 2678 2679 tlv_put_failure: 2680 out: 2681 fs_path_free(p); 2682 return ret; 2683 } 2684 2685 /* 2686 * We need some special handling for inodes that get processed before the parent 2687 * directory got created. See process_recorded_refs for details. 2688 * This function does the check if we already created the dir out of order. 2689 */ 2690 static int did_create_dir(struct send_ctx *sctx, u64 dir) 2691 { 2692 int ret = 0; 2693 struct btrfs_path *path = NULL; 2694 struct btrfs_key key; 2695 struct btrfs_key found_key; 2696 struct btrfs_key di_key; 2697 struct extent_buffer *eb; 2698 struct btrfs_dir_item *di; 2699 int slot; 2700 2701 path = alloc_path_for_send(); 2702 if (!path) { 2703 ret = -ENOMEM; 2704 goto out; 2705 } 2706 2707 key.objectid = dir; 2708 key.type = BTRFS_DIR_INDEX_KEY; 2709 key.offset = 0; 2710 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2711 if (ret < 0) 2712 goto out; 2713 2714 while (1) { 2715 eb = path->nodes[0]; 2716 slot = path->slots[0]; 2717 if (slot >= btrfs_header_nritems(eb)) { 2718 ret = btrfs_next_leaf(sctx->send_root, path); 2719 if (ret < 0) { 2720 goto out; 2721 } else if (ret > 0) { 2722 ret = 0; 2723 break; 2724 } 2725 continue; 2726 } 2727 2728 btrfs_item_key_to_cpu(eb, &found_key, slot); 2729 if (found_key.objectid != key.objectid || 2730 found_key.type != key.type) { 2731 ret = 0; 2732 goto out; 2733 } 2734 2735 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 2736 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2737 2738 if (di_key.type != BTRFS_ROOT_ITEM_KEY && 2739 di_key.objectid < sctx->send_progress) { 2740 ret = 1; 2741 goto out; 2742 } 2743 2744 path->slots[0]++; 2745 } 2746 2747 out: 2748 btrfs_free_path(path); 2749 return ret; 2750 } 2751 2752 /* 2753 * Only creates the inode if it is: 2754 * 1. Not a directory 2755 * 2. Or a directory which was not created already due to out of order 2756 * directories. See did_create_dir and process_recorded_refs for details. 2757 */ 2758 static int send_create_inode_if_needed(struct send_ctx *sctx) 2759 { 2760 int ret; 2761 2762 if (S_ISDIR(sctx->cur_inode_mode)) { 2763 ret = did_create_dir(sctx, sctx->cur_ino); 2764 if (ret < 0) 2765 return ret; 2766 else if (ret > 0) 2767 return 0; 2768 } 2769 2770 return send_create_inode(sctx, sctx->cur_ino); 2771 } 2772 2773 struct recorded_ref { 2774 struct list_head list; 2775 char *name; 2776 struct fs_path *full_path; 2777 u64 dir; 2778 u64 dir_gen; 2779 int name_len; 2780 }; 2781 2782 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path) 2783 { 2784 ref->full_path = path; 2785 ref->name = (char *)kbasename(ref->full_path->start); 2786 ref->name_len = ref->full_path->end - ref->name; 2787 } 2788 2789 /* 2790 * We need to process new refs before deleted refs, but compare_tree gives us 2791 * everything mixed. So we first record all refs and later process them. 2792 * This function is a helper to record one ref. 2793 */ 2794 static int __record_ref(struct list_head *head, u64 dir, 2795 u64 dir_gen, struct fs_path *path) 2796 { 2797 struct recorded_ref *ref; 2798 2799 ref = kmalloc(sizeof(*ref), GFP_KERNEL); 2800 if (!ref) 2801 return -ENOMEM; 2802 2803 ref->dir = dir; 2804 ref->dir_gen = dir_gen; 2805 set_ref_path(ref, path); 2806 list_add_tail(&ref->list, head); 2807 return 0; 2808 } 2809 2810 static int dup_ref(struct recorded_ref *ref, struct list_head *list) 2811 { 2812 struct recorded_ref *new; 2813 2814 new = kmalloc(sizeof(*ref), GFP_KERNEL); 2815 if (!new) 2816 return -ENOMEM; 2817 2818 new->dir = ref->dir; 2819 new->dir_gen = ref->dir_gen; 2820 new->full_path = NULL; 2821 INIT_LIST_HEAD(&new->list); 2822 list_add_tail(&new->list, list); 2823 return 0; 2824 } 2825 2826 static void __free_recorded_refs(struct list_head *head) 2827 { 2828 struct recorded_ref *cur; 2829 2830 while (!list_empty(head)) { 2831 cur = list_entry(head->next, struct recorded_ref, list); 2832 fs_path_free(cur->full_path); 2833 list_del(&cur->list); 2834 kfree(cur); 2835 } 2836 } 2837 2838 static void free_recorded_refs(struct send_ctx *sctx) 2839 { 2840 __free_recorded_refs(&sctx->new_refs); 2841 __free_recorded_refs(&sctx->deleted_refs); 2842 } 2843 2844 /* 2845 * Renames/moves a file/dir to its orphan name. Used when the first 2846 * ref of an unprocessed inode gets overwritten and for all non empty 2847 * directories. 2848 */ 2849 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, 2850 struct fs_path *path) 2851 { 2852 int ret; 2853 struct fs_path *orphan; 2854 2855 orphan = fs_path_alloc(); 2856 if (!orphan) 2857 return -ENOMEM; 2858 2859 ret = gen_unique_name(sctx, ino, gen, orphan); 2860 if (ret < 0) 2861 goto out; 2862 2863 ret = send_rename(sctx, path, orphan); 2864 2865 out: 2866 fs_path_free(orphan); 2867 return ret; 2868 } 2869 2870 static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx, 2871 u64 dir_ino, u64 dir_gen) 2872 { 2873 struct rb_node **p = &sctx->orphan_dirs.rb_node; 2874 struct rb_node *parent = NULL; 2875 struct orphan_dir_info *entry, *odi; 2876 2877 while (*p) { 2878 parent = *p; 2879 entry = rb_entry(parent, struct orphan_dir_info, node); 2880 if (dir_ino < entry->ino) 2881 p = &(*p)->rb_left; 2882 else if (dir_ino > entry->ino) 2883 p = &(*p)->rb_right; 2884 else if (dir_gen < entry->gen) 2885 p = &(*p)->rb_left; 2886 else if (dir_gen > entry->gen) 2887 p = &(*p)->rb_right; 2888 else 2889 return entry; 2890 } 2891 2892 odi = kmalloc(sizeof(*odi), GFP_KERNEL); 2893 if (!odi) 2894 return ERR_PTR(-ENOMEM); 2895 odi->ino = dir_ino; 2896 odi->gen = dir_gen; 2897 odi->last_dir_index_offset = 0; 2898 2899 rb_link_node(&odi->node, parent, p); 2900 rb_insert_color(&odi->node, &sctx->orphan_dirs); 2901 return odi; 2902 } 2903 2904 static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx, 2905 u64 dir_ino, u64 gen) 2906 { 2907 struct rb_node *n = sctx->orphan_dirs.rb_node; 2908 struct orphan_dir_info *entry; 2909 2910 while (n) { 2911 entry = rb_entry(n, struct orphan_dir_info, node); 2912 if (dir_ino < entry->ino) 2913 n = n->rb_left; 2914 else if (dir_ino > entry->ino) 2915 n = n->rb_right; 2916 else if (gen < entry->gen) 2917 n = n->rb_left; 2918 else if (gen > entry->gen) 2919 n = n->rb_right; 2920 else 2921 return entry; 2922 } 2923 return NULL; 2924 } 2925 2926 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen) 2927 { 2928 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen); 2929 2930 return odi != NULL; 2931 } 2932 2933 static void free_orphan_dir_info(struct send_ctx *sctx, 2934 struct orphan_dir_info *odi) 2935 { 2936 if (!odi) 2937 return; 2938 rb_erase(&odi->node, &sctx->orphan_dirs); 2939 kfree(odi); 2940 } 2941 2942 /* 2943 * Returns 1 if a directory can be removed at this point in time. 2944 * We check this by iterating all dir items and checking if the inode behind 2945 * the dir item was already processed. 2946 */ 2947 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, 2948 u64 send_progress) 2949 { 2950 int ret = 0; 2951 struct btrfs_root *root = sctx->parent_root; 2952 struct btrfs_path *path; 2953 struct btrfs_key key; 2954 struct btrfs_key found_key; 2955 struct btrfs_key loc; 2956 struct btrfs_dir_item *di; 2957 struct orphan_dir_info *odi = NULL; 2958 2959 /* 2960 * Don't try to rmdir the top/root subvolume dir. 2961 */ 2962 if (dir == BTRFS_FIRST_FREE_OBJECTID) 2963 return 0; 2964 2965 path = alloc_path_for_send(); 2966 if (!path) 2967 return -ENOMEM; 2968 2969 key.objectid = dir; 2970 key.type = BTRFS_DIR_INDEX_KEY; 2971 key.offset = 0; 2972 2973 odi = get_orphan_dir_info(sctx, dir, dir_gen); 2974 if (odi) 2975 key.offset = odi->last_dir_index_offset; 2976 2977 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2978 if (ret < 0) 2979 goto out; 2980 2981 while (1) { 2982 struct waiting_dir_move *dm; 2983 2984 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2985 ret = btrfs_next_leaf(root, path); 2986 if (ret < 0) 2987 goto out; 2988 else if (ret > 0) 2989 break; 2990 continue; 2991 } 2992 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2993 path->slots[0]); 2994 if (found_key.objectid != key.objectid || 2995 found_key.type != key.type) 2996 break; 2997 2998 di = btrfs_item_ptr(path->nodes[0], path->slots[0], 2999 struct btrfs_dir_item); 3000 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); 3001 3002 dm = get_waiting_dir_move(sctx, loc.objectid); 3003 if (dm) { 3004 odi = add_orphan_dir_info(sctx, dir, dir_gen); 3005 if (IS_ERR(odi)) { 3006 ret = PTR_ERR(odi); 3007 goto out; 3008 } 3009 odi->gen = dir_gen; 3010 odi->last_dir_index_offset = found_key.offset; 3011 dm->rmdir_ino = dir; 3012 dm->rmdir_gen = dir_gen; 3013 ret = 0; 3014 goto out; 3015 } 3016 3017 if (loc.objectid > send_progress) { 3018 odi = add_orphan_dir_info(sctx, dir, dir_gen); 3019 if (IS_ERR(odi)) { 3020 ret = PTR_ERR(odi); 3021 goto out; 3022 } 3023 odi->gen = dir_gen; 3024 odi->last_dir_index_offset = found_key.offset; 3025 ret = 0; 3026 goto out; 3027 } 3028 3029 path->slots[0]++; 3030 } 3031 free_orphan_dir_info(sctx, odi); 3032 3033 ret = 1; 3034 3035 out: 3036 btrfs_free_path(path); 3037 return ret; 3038 } 3039 3040 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) 3041 { 3042 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino); 3043 3044 return entry != NULL; 3045 } 3046 3047 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized) 3048 { 3049 struct rb_node **p = &sctx->waiting_dir_moves.rb_node; 3050 struct rb_node *parent = NULL; 3051 struct waiting_dir_move *entry, *dm; 3052 3053 dm = kmalloc(sizeof(*dm), GFP_KERNEL); 3054 if (!dm) 3055 return -ENOMEM; 3056 dm->ino = ino; 3057 dm->rmdir_ino = 0; 3058 dm->rmdir_gen = 0; 3059 dm->orphanized = orphanized; 3060 3061 while (*p) { 3062 parent = *p; 3063 entry = rb_entry(parent, struct waiting_dir_move, node); 3064 if (ino < entry->ino) { 3065 p = &(*p)->rb_left; 3066 } else if (ino > entry->ino) { 3067 p = &(*p)->rb_right; 3068 } else { 3069 kfree(dm); 3070 return -EEXIST; 3071 } 3072 } 3073 3074 rb_link_node(&dm->node, parent, p); 3075 rb_insert_color(&dm->node, &sctx->waiting_dir_moves); 3076 return 0; 3077 } 3078 3079 static struct waiting_dir_move * 3080 get_waiting_dir_move(struct send_ctx *sctx, u64 ino) 3081 { 3082 struct rb_node *n = sctx->waiting_dir_moves.rb_node; 3083 struct waiting_dir_move *entry; 3084 3085 while (n) { 3086 entry = rb_entry(n, struct waiting_dir_move, node); 3087 if (ino < entry->ino) 3088 n = n->rb_left; 3089 else if (ino > entry->ino) 3090 n = n->rb_right; 3091 else 3092 return entry; 3093 } 3094 return NULL; 3095 } 3096 3097 static void free_waiting_dir_move(struct send_ctx *sctx, 3098 struct waiting_dir_move *dm) 3099 { 3100 if (!dm) 3101 return; 3102 rb_erase(&dm->node, &sctx->waiting_dir_moves); 3103 kfree(dm); 3104 } 3105 3106 static int add_pending_dir_move(struct send_ctx *sctx, 3107 u64 ino, 3108 u64 ino_gen, 3109 u64 parent_ino, 3110 struct list_head *new_refs, 3111 struct list_head *deleted_refs, 3112 const bool is_orphan) 3113 { 3114 struct rb_node **p = &sctx->pending_dir_moves.rb_node; 3115 struct rb_node *parent = NULL; 3116 struct pending_dir_move *entry = NULL, *pm; 3117 struct recorded_ref *cur; 3118 int exists = 0; 3119 int ret; 3120 3121 pm = kmalloc(sizeof(*pm), GFP_KERNEL); 3122 if (!pm) 3123 return -ENOMEM; 3124 pm->parent_ino = parent_ino; 3125 pm->ino = ino; 3126 pm->gen = ino_gen; 3127 INIT_LIST_HEAD(&pm->list); 3128 INIT_LIST_HEAD(&pm->update_refs); 3129 RB_CLEAR_NODE(&pm->node); 3130 3131 while (*p) { 3132 parent = *p; 3133 entry = rb_entry(parent, struct pending_dir_move, node); 3134 if (parent_ino < entry->parent_ino) { 3135 p = &(*p)->rb_left; 3136 } else if (parent_ino > entry->parent_ino) { 3137 p = &(*p)->rb_right; 3138 } else { 3139 exists = 1; 3140 break; 3141 } 3142 } 3143 3144 list_for_each_entry(cur, deleted_refs, list) { 3145 ret = dup_ref(cur, &pm->update_refs); 3146 if (ret < 0) 3147 goto out; 3148 } 3149 list_for_each_entry(cur, new_refs, list) { 3150 ret = dup_ref(cur, &pm->update_refs); 3151 if (ret < 0) 3152 goto out; 3153 } 3154 3155 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan); 3156 if (ret) 3157 goto out; 3158 3159 if (exists) { 3160 list_add_tail(&pm->list, &entry->list); 3161 } else { 3162 rb_link_node(&pm->node, parent, p); 3163 rb_insert_color(&pm->node, &sctx->pending_dir_moves); 3164 } 3165 ret = 0; 3166 out: 3167 if (ret) { 3168 __free_recorded_refs(&pm->update_refs); 3169 kfree(pm); 3170 } 3171 return ret; 3172 } 3173 3174 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx, 3175 u64 parent_ino) 3176 { 3177 struct rb_node *n = sctx->pending_dir_moves.rb_node; 3178 struct pending_dir_move *entry; 3179 3180 while (n) { 3181 entry = rb_entry(n, struct pending_dir_move, node); 3182 if (parent_ino < entry->parent_ino) 3183 n = n->rb_left; 3184 else if (parent_ino > entry->parent_ino) 3185 n = n->rb_right; 3186 else 3187 return entry; 3188 } 3189 return NULL; 3190 } 3191 3192 static int path_loop(struct send_ctx *sctx, struct fs_path *name, 3193 u64 ino, u64 gen, u64 *ancestor_ino) 3194 { 3195 int ret = 0; 3196 u64 parent_inode = 0; 3197 u64 parent_gen = 0; 3198 u64 start_ino = ino; 3199 3200 *ancestor_ino = 0; 3201 while (ino != BTRFS_FIRST_FREE_OBJECTID) { 3202 fs_path_reset(name); 3203 3204 if (is_waiting_for_rm(sctx, ino, gen)) 3205 break; 3206 if (is_waiting_for_move(sctx, ino)) { 3207 if (*ancestor_ino == 0) 3208 *ancestor_ino = ino; 3209 ret = get_first_ref(sctx->parent_root, ino, 3210 &parent_inode, &parent_gen, name); 3211 } else { 3212 ret = __get_cur_name_and_parent(sctx, ino, gen, 3213 &parent_inode, 3214 &parent_gen, name); 3215 if (ret > 0) { 3216 ret = 0; 3217 break; 3218 } 3219 } 3220 if (ret < 0) 3221 break; 3222 if (parent_inode == start_ino) { 3223 ret = 1; 3224 if (*ancestor_ino == 0) 3225 *ancestor_ino = ino; 3226 break; 3227 } 3228 ino = parent_inode; 3229 gen = parent_gen; 3230 } 3231 return ret; 3232 } 3233 3234 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) 3235 { 3236 struct fs_path *from_path = NULL; 3237 struct fs_path *to_path = NULL; 3238 struct fs_path *name = NULL; 3239 u64 orig_progress = sctx->send_progress; 3240 struct recorded_ref *cur; 3241 u64 parent_ino, parent_gen; 3242 struct waiting_dir_move *dm = NULL; 3243 u64 rmdir_ino = 0; 3244 u64 rmdir_gen; 3245 u64 ancestor; 3246 bool is_orphan; 3247 int ret; 3248 3249 name = fs_path_alloc(); 3250 from_path = fs_path_alloc(); 3251 if (!name || !from_path) { 3252 ret = -ENOMEM; 3253 goto out; 3254 } 3255 3256 dm = get_waiting_dir_move(sctx, pm->ino); 3257 ASSERT(dm); 3258 rmdir_ino = dm->rmdir_ino; 3259 rmdir_gen = dm->rmdir_gen; 3260 is_orphan = dm->orphanized; 3261 free_waiting_dir_move(sctx, dm); 3262 3263 if (is_orphan) { 3264 ret = gen_unique_name(sctx, pm->ino, 3265 pm->gen, from_path); 3266 } else { 3267 ret = get_first_ref(sctx->parent_root, pm->ino, 3268 &parent_ino, &parent_gen, name); 3269 if (ret < 0) 3270 goto out; 3271 ret = get_cur_path(sctx, parent_ino, parent_gen, 3272 from_path); 3273 if (ret < 0) 3274 goto out; 3275 ret = fs_path_add_path(from_path, name); 3276 } 3277 if (ret < 0) 3278 goto out; 3279 3280 sctx->send_progress = sctx->cur_ino + 1; 3281 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor); 3282 if (ret < 0) 3283 goto out; 3284 if (ret) { 3285 LIST_HEAD(deleted_refs); 3286 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); 3287 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, 3288 &pm->update_refs, &deleted_refs, 3289 is_orphan); 3290 if (ret < 0) 3291 goto out; 3292 if (rmdir_ino) { 3293 dm = get_waiting_dir_move(sctx, pm->ino); 3294 ASSERT(dm); 3295 dm->rmdir_ino = rmdir_ino; 3296 dm->rmdir_gen = rmdir_gen; 3297 } 3298 goto out; 3299 } 3300 fs_path_reset(name); 3301 to_path = name; 3302 name = NULL; 3303 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); 3304 if (ret < 0) 3305 goto out; 3306 3307 ret = send_rename(sctx, from_path, to_path); 3308 if (ret < 0) 3309 goto out; 3310 3311 if (rmdir_ino) { 3312 struct orphan_dir_info *odi; 3313 u64 gen; 3314 3315 odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen); 3316 if (!odi) { 3317 /* already deleted */ 3318 goto finish; 3319 } 3320 gen = odi->gen; 3321 3322 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino); 3323 if (ret < 0) 3324 goto out; 3325 if (!ret) 3326 goto finish; 3327 3328 name = fs_path_alloc(); 3329 if (!name) { 3330 ret = -ENOMEM; 3331 goto out; 3332 } 3333 ret = get_cur_path(sctx, rmdir_ino, gen, name); 3334 if (ret < 0) 3335 goto out; 3336 ret = send_rmdir(sctx, name); 3337 if (ret < 0) 3338 goto out; 3339 } 3340 3341 finish: 3342 ret = send_utimes(sctx, pm->ino, pm->gen); 3343 if (ret < 0) 3344 goto out; 3345 3346 /* 3347 * After rename/move, need to update the utimes of both new parent(s) 3348 * and old parent(s). 3349 */ 3350 list_for_each_entry(cur, &pm->update_refs, list) { 3351 /* 3352 * The parent inode might have been deleted in the send snapshot 3353 */ 3354 ret = get_inode_info(sctx->send_root, cur->dir, NULL, 3355 NULL, NULL, NULL, NULL, NULL); 3356 if (ret == -ENOENT) { 3357 ret = 0; 3358 continue; 3359 } 3360 if (ret < 0) 3361 goto out; 3362 3363 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 3364 if (ret < 0) 3365 goto out; 3366 } 3367 3368 out: 3369 fs_path_free(name); 3370 fs_path_free(from_path); 3371 fs_path_free(to_path); 3372 sctx->send_progress = orig_progress; 3373 3374 return ret; 3375 } 3376 3377 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m) 3378 { 3379 if (!list_empty(&m->list)) 3380 list_del(&m->list); 3381 if (!RB_EMPTY_NODE(&m->node)) 3382 rb_erase(&m->node, &sctx->pending_dir_moves); 3383 __free_recorded_refs(&m->update_refs); 3384 kfree(m); 3385 } 3386 3387 static void tail_append_pending_moves(struct send_ctx *sctx, 3388 struct pending_dir_move *moves, 3389 struct list_head *stack) 3390 { 3391 if (list_empty(&moves->list)) { 3392 list_add_tail(&moves->list, stack); 3393 } else { 3394 LIST_HEAD(list); 3395 list_splice_init(&moves->list, &list); 3396 list_add_tail(&moves->list, stack); 3397 list_splice_tail(&list, stack); 3398 } 3399 if (!RB_EMPTY_NODE(&moves->node)) { 3400 rb_erase(&moves->node, &sctx->pending_dir_moves); 3401 RB_CLEAR_NODE(&moves->node); 3402 } 3403 } 3404 3405 static int apply_children_dir_moves(struct send_ctx *sctx) 3406 { 3407 struct pending_dir_move *pm; 3408 struct list_head stack; 3409 u64 parent_ino = sctx->cur_ino; 3410 int ret = 0; 3411 3412 pm = get_pending_dir_moves(sctx, parent_ino); 3413 if (!pm) 3414 return 0; 3415 3416 INIT_LIST_HEAD(&stack); 3417 tail_append_pending_moves(sctx, pm, &stack); 3418 3419 while (!list_empty(&stack)) { 3420 pm = list_first_entry(&stack, struct pending_dir_move, list); 3421 parent_ino = pm->ino; 3422 ret = apply_dir_move(sctx, pm); 3423 free_pending_move(sctx, pm); 3424 if (ret) 3425 goto out; 3426 pm = get_pending_dir_moves(sctx, parent_ino); 3427 if (pm) 3428 tail_append_pending_moves(sctx, pm, &stack); 3429 } 3430 return 0; 3431 3432 out: 3433 while (!list_empty(&stack)) { 3434 pm = list_first_entry(&stack, struct pending_dir_move, list); 3435 free_pending_move(sctx, pm); 3436 } 3437 return ret; 3438 } 3439 3440 /* 3441 * We might need to delay a directory rename even when no ancestor directory 3442 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was 3443 * renamed. This happens when we rename a directory to the old name (the name 3444 * in the parent root) of some other unrelated directory that got its rename 3445 * delayed due to some ancestor with higher number that got renamed. 3446 * 3447 * Example: 3448 * 3449 * Parent snapshot: 3450 * . (ino 256) 3451 * |---- a/ (ino 257) 3452 * | |---- file (ino 260) 3453 * | 3454 * |---- b/ (ino 258) 3455 * |---- c/ (ino 259) 3456 * 3457 * Send snapshot: 3458 * . (ino 256) 3459 * |---- a/ (ino 258) 3460 * |---- x/ (ino 259) 3461 * |---- y/ (ino 257) 3462 * |----- file (ino 260) 3463 * 3464 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257 3465 * from 'a' to 'x/y' happening first, which in turn depends on the rename of 3466 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream 3467 * must issue is: 3468 * 3469 * 1 - rename 259 from 'c' to 'x' 3470 * 2 - rename 257 from 'a' to 'x/y' 3471 * 3 - rename 258 from 'b' to 'a' 3472 * 3473 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can 3474 * be done right away and < 0 on error. 3475 */ 3476 static int wait_for_dest_dir_move(struct send_ctx *sctx, 3477 struct recorded_ref *parent_ref, 3478 const bool is_orphan) 3479 { 3480 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info; 3481 struct btrfs_path *path; 3482 struct btrfs_key key; 3483 struct btrfs_key di_key; 3484 struct btrfs_dir_item *di; 3485 u64 left_gen; 3486 u64 right_gen; 3487 int ret = 0; 3488 struct waiting_dir_move *wdm; 3489 3490 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) 3491 return 0; 3492 3493 path = alloc_path_for_send(); 3494 if (!path) 3495 return -ENOMEM; 3496 3497 key.objectid = parent_ref->dir; 3498 key.type = BTRFS_DIR_ITEM_KEY; 3499 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len); 3500 3501 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); 3502 if (ret < 0) { 3503 goto out; 3504 } else if (ret > 0) { 3505 ret = 0; 3506 goto out; 3507 } 3508 3509 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name, 3510 parent_ref->name_len); 3511 if (!di) { 3512 ret = 0; 3513 goto out; 3514 } 3515 /* 3516 * di_key.objectid has the number of the inode that has a dentry in the 3517 * parent directory with the same name that sctx->cur_ino is being 3518 * renamed to. We need to check if that inode is in the send root as 3519 * well and if it is currently marked as an inode with a pending rename, 3520 * if it is, we need to delay the rename of sctx->cur_ino as well, so 3521 * that it happens after that other inode is renamed. 3522 */ 3523 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key); 3524 if (di_key.type != BTRFS_INODE_ITEM_KEY) { 3525 ret = 0; 3526 goto out; 3527 } 3528 3529 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL, 3530 &left_gen, NULL, NULL, NULL, NULL); 3531 if (ret < 0) 3532 goto out; 3533 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL, 3534 &right_gen, NULL, NULL, NULL, NULL); 3535 if (ret < 0) { 3536 if (ret == -ENOENT) 3537 ret = 0; 3538 goto out; 3539 } 3540 3541 /* Different inode, no need to delay the rename of sctx->cur_ino */ 3542 if (right_gen != left_gen) { 3543 ret = 0; 3544 goto out; 3545 } 3546 3547 wdm = get_waiting_dir_move(sctx, di_key.objectid); 3548 if (wdm && !wdm->orphanized) { 3549 ret = add_pending_dir_move(sctx, 3550 sctx->cur_ino, 3551 sctx->cur_inode_gen, 3552 di_key.objectid, 3553 &sctx->new_refs, 3554 &sctx->deleted_refs, 3555 is_orphan); 3556 if (!ret) 3557 ret = 1; 3558 } 3559 out: 3560 btrfs_free_path(path); 3561 return ret; 3562 } 3563 3564 /* 3565 * Check if inode ino2, or any of its ancestors, is inode ino1. 3566 * Return 1 if true, 0 if false and < 0 on error. 3567 */ 3568 static int check_ino_in_path(struct btrfs_root *root, 3569 const u64 ino1, 3570 const u64 ino1_gen, 3571 const u64 ino2, 3572 const u64 ino2_gen, 3573 struct fs_path *fs_path) 3574 { 3575 u64 ino = ino2; 3576 3577 if (ino1 == ino2) 3578 return ino1_gen == ino2_gen; 3579 3580 while (ino > BTRFS_FIRST_FREE_OBJECTID) { 3581 u64 parent; 3582 u64 parent_gen; 3583 int ret; 3584 3585 fs_path_reset(fs_path); 3586 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path); 3587 if (ret < 0) 3588 return ret; 3589 if (parent == ino1) 3590 return parent_gen == ino1_gen; 3591 ino = parent; 3592 } 3593 return 0; 3594 } 3595 3596 /* 3597 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any 3598 * possible path (in case ino2 is not a directory and has multiple hard links). 3599 * Return 1 if true, 0 if false and < 0 on error. 3600 */ 3601 static int is_ancestor(struct btrfs_root *root, 3602 const u64 ino1, 3603 const u64 ino1_gen, 3604 const u64 ino2, 3605 struct fs_path *fs_path) 3606 { 3607 bool free_fs_path = false; 3608 int ret = 0; 3609 struct btrfs_path *path = NULL; 3610 struct btrfs_key key; 3611 3612 if (!fs_path) { 3613 fs_path = fs_path_alloc(); 3614 if (!fs_path) 3615 return -ENOMEM; 3616 free_fs_path = true; 3617 } 3618 3619 path = alloc_path_for_send(); 3620 if (!path) { 3621 ret = -ENOMEM; 3622 goto out; 3623 } 3624 3625 key.objectid = ino2; 3626 key.type = BTRFS_INODE_REF_KEY; 3627 key.offset = 0; 3628 3629 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3630 if (ret < 0) 3631 goto out; 3632 3633 while (true) { 3634 struct extent_buffer *leaf = path->nodes[0]; 3635 int slot = path->slots[0]; 3636 u32 cur_offset = 0; 3637 u32 item_size; 3638 3639 if (slot >= btrfs_header_nritems(leaf)) { 3640 ret = btrfs_next_leaf(root, path); 3641 if (ret < 0) 3642 goto out; 3643 if (ret > 0) 3644 break; 3645 continue; 3646 } 3647 3648 btrfs_item_key_to_cpu(leaf, &key, slot); 3649 if (key.objectid != ino2) 3650 break; 3651 if (key.type != BTRFS_INODE_REF_KEY && 3652 key.type != BTRFS_INODE_EXTREF_KEY) 3653 break; 3654 3655 item_size = btrfs_item_size_nr(leaf, slot); 3656 while (cur_offset < item_size) { 3657 u64 parent; 3658 u64 parent_gen; 3659 3660 if (key.type == BTRFS_INODE_EXTREF_KEY) { 3661 unsigned long ptr; 3662 struct btrfs_inode_extref *extref; 3663 3664 ptr = btrfs_item_ptr_offset(leaf, slot); 3665 extref = (struct btrfs_inode_extref *) 3666 (ptr + cur_offset); 3667 parent = btrfs_inode_extref_parent(leaf, 3668 extref); 3669 cur_offset += sizeof(*extref); 3670 cur_offset += btrfs_inode_extref_name_len(leaf, 3671 extref); 3672 } else { 3673 parent = key.offset; 3674 cur_offset = item_size; 3675 } 3676 3677 ret = get_inode_info(root, parent, NULL, &parent_gen, 3678 NULL, NULL, NULL, NULL); 3679 if (ret < 0) 3680 goto out; 3681 ret = check_ino_in_path(root, ino1, ino1_gen, 3682 parent, parent_gen, fs_path); 3683 if (ret) 3684 goto out; 3685 } 3686 path->slots[0]++; 3687 } 3688 ret = 0; 3689 out: 3690 btrfs_free_path(path); 3691 if (free_fs_path) 3692 fs_path_free(fs_path); 3693 return ret; 3694 } 3695 3696 static int wait_for_parent_move(struct send_ctx *sctx, 3697 struct recorded_ref *parent_ref, 3698 const bool is_orphan) 3699 { 3700 int ret = 0; 3701 u64 ino = parent_ref->dir; 3702 u64 ino_gen = parent_ref->dir_gen; 3703 u64 parent_ino_before, parent_ino_after; 3704 struct fs_path *path_before = NULL; 3705 struct fs_path *path_after = NULL; 3706 int len1, len2; 3707 3708 path_after = fs_path_alloc(); 3709 path_before = fs_path_alloc(); 3710 if (!path_after || !path_before) { 3711 ret = -ENOMEM; 3712 goto out; 3713 } 3714 3715 /* 3716 * Our current directory inode may not yet be renamed/moved because some 3717 * ancestor (immediate or not) has to be renamed/moved first. So find if 3718 * such ancestor exists and make sure our own rename/move happens after 3719 * that ancestor is processed to avoid path build infinite loops (done 3720 * at get_cur_path()). 3721 */ 3722 while (ino > BTRFS_FIRST_FREE_OBJECTID) { 3723 u64 parent_ino_after_gen; 3724 3725 if (is_waiting_for_move(sctx, ino)) { 3726 /* 3727 * If the current inode is an ancestor of ino in the 3728 * parent root, we need to delay the rename of the 3729 * current inode, otherwise don't delayed the rename 3730 * because we can end up with a circular dependency 3731 * of renames, resulting in some directories never 3732 * getting the respective rename operations issued in 3733 * the send stream or getting into infinite path build 3734 * loops. 3735 */ 3736 ret = is_ancestor(sctx->parent_root, 3737 sctx->cur_ino, sctx->cur_inode_gen, 3738 ino, path_before); 3739 if (ret) 3740 break; 3741 } 3742 3743 fs_path_reset(path_before); 3744 fs_path_reset(path_after); 3745 3746 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, 3747 &parent_ino_after_gen, path_after); 3748 if (ret < 0) 3749 goto out; 3750 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, 3751 NULL, path_before); 3752 if (ret < 0 && ret != -ENOENT) { 3753 goto out; 3754 } else if (ret == -ENOENT) { 3755 ret = 0; 3756 break; 3757 } 3758 3759 len1 = fs_path_len(path_before); 3760 len2 = fs_path_len(path_after); 3761 if (ino > sctx->cur_ino && 3762 (parent_ino_before != parent_ino_after || len1 != len2 || 3763 memcmp(path_before->start, path_after->start, len1))) { 3764 u64 parent_ino_gen; 3765 3766 ret = get_inode_info(sctx->parent_root, ino, NULL, 3767 &parent_ino_gen, NULL, NULL, NULL, 3768 NULL); 3769 if (ret < 0) 3770 goto out; 3771 if (ino_gen == parent_ino_gen) { 3772 ret = 1; 3773 break; 3774 } 3775 } 3776 ino = parent_ino_after; 3777 ino_gen = parent_ino_after_gen; 3778 } 3779 3780 out: 3781 fs_path_free(path_before); 3782 fs_path_free(path_after); 3783 3784 if (ret == 1) { 3785 ret = add_pending_dir_move(sctx, 3786 sctx->cur_ino, 3787 sctx->cur_inode_gen, 3788 ino, 3789 &sctx->new_refs, 3790 &sctx->deleted_refs, 3791 is_orphan); 3792 if (!ret) 3793 ret = 1; 3794 } 3795 3796 return ret; 3797 } 3798 3799 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) 3800 { 3801 int ret; 3802 struct fs_path *new_path; 3803 3804 /* 3805 * Our reference's name member points to its full_path member string, so 3806 * we use here a new path. 3807 */ 3808 new_path = fs_path_alloc(); 3809 if (!new_path) 3810 return -ENOMEM; 3811 3812 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path); 3813 if (ret < 0) { 3814 fs_path_free(new_path); 3815 return ret; 3816 } 3817 ret = fs_path_add(new_path, ref->name, ref->name_len); 3818 if (ret < 0) { 3819 fs_path_free(new_path); 3820 return ret; 3821 } 3822 3823 fs_path_free(ref->full_path); 3824 set_ref_path(ref, new_path); 3825 3826 return 0; 3827 } 3828 3829 /* 3830 * When processing the new references for an inode we may orphanize an existing 3831 * directory inode because its old name conflicts with one of the new references 3832 * of the current inode. Later, when processing another new reference of our 3833 * inode, we might need to orphanize another inode, but the path we have in the 3834 * reference reflects the pre-orphanization name of the directory we previously 3835 * orphanized. For example: 3836 * 3837 * parent snapshot looks like: 3838 * 3839 * . (ino 256) 3840 * |----- f1 (ino 257) 3841 * |----- f2 (ino 258) 3842 * |----- d1/ (ino 259) 3843 * |----- d2/ (ino 260) 3844 * 3845 * send snapshot looks like: 3846 * 3847 * . (ino 256) 3848 * |----- d1 (ino 258) 3849 * |----- f2/ (ino 259) 3850 * |----- f2_link/ (ino 260) 3851 * | |----- f1 (ino 257) 3852 * | 3853 * |----- d2 (ino 258) 3854 * 3855 * When processing inode 257 we compute the name for inode 259 as "d1", and we 3856 * cache it in the name cache. Later when we start processing inode 258, when 3857 * collecting all its new references we set a full path of "d1/d2" for its new 3858 * reference with name "d2". When we start processing the new references we 3859 * start by processing the new reference with name "d1", and this results in 3860 * orphanizing inode 259, since its old reference causes a conflict. Then we 3861 * move on the next new reference, with name "d2", and we find out we must 3862 * orphanize inode 260, as its old reference conflicts with ours - but for the 3863 * orphanization we use a source path corresponding to the path we stored in the 3864 * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the 3865 * receiver fail since the path component "d1/" no longer exists, it was renamed 3866 * to "o259-6-0/" when processing the previous new reference. So in this case we 3867 * must recompute the path in the new reference and use it for the new 3868 * orphanization operation. 3869 */ 3870 static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) 3871 { 3872 char *name; 3873 int ret; 3874 3875 name = kmemdup(ref->name, ref->name_len, GFP_KERNEL); 3876 if (!name) 3877 return -ENOMEM; 3878 3879 fs_path_reset(ref->full_path); 3880 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path); 3881 if (ret < 0) 3882 goto out; 3883 3884 ret = fs_path_add(ref->full_path, name, ref->name_len); 3885 if (ret < 0) 3886 goto out; 3887 3888 /* Update the reference's base name pointer. */ 3889 set_ref_path(ref, ref->full_path); 3890 out: 3891 kfree(name); 3892 return ret; 3893 } 3894 3895 /* 3896 * This does all the move/link/unlink/rmdir magic. 3897 */ 3898 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) 3899 { 3900 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 3901 int ret = 0; 3902 struct recorded_ref *cur; 3903 struct recorded_ref *cur2; 3904 struct list_head check_dirs; 3905 struct fs_path *valid_path = NULL; 3906 u64 ow_inode = 0; 3907 u64 ow_gen; 3908 u64 ow_mode; 3909 int did_overwrite = 0; 3910 int is_orphan = 0; 3911 u64 last_dir_ino_rm = 0; 3912 bool can_rename = true; 3913 bool orphanized_dir = false; 3914 bool orphanized_ancestor = false; 3915 3916 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino); 3917 3918 /* 3919 * This should never happen as the root dir always has the same ref 3920 * which is always '..' 3921 */ 3922 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); 3923 INIT_LIST_HEAD(&check_dirs); 3924 3925 valid_path = fs_path_alloc(); 3926 if (!valid_path) { 3927 ret = -ENOMEM; 3928 goto out; 3929 } 3930 3931 /* 3932 * First, check if the first ref of the current inode was overwritten 3933 * before. If yes, we know that the current inode was already orphanized 3934 * and thus use the orphan name. If not, we can use get_cur_path to 3935 * get the path of the first ref as it would like while receiving at 3936 * this point in time. 3937 * New inodes are always orphan at the beginning, so force to use the 3938 * orphan name in this case. 3939 * The first ref is stored in valid_path and will be updated if it 3940 * gets moved around. 3941 */ 3942 if (!sctx->cur_inode_new) { 3943 ret = did_overwrite_first_ref(sctx, sctx->cur_ino, 3944 sctx->cur_inode_gen); 3945 if (ret < 0) 3946 goto out; 3947 if (ret) 3948 did_overwrite = 1; 3949 } 3950 if (sctx->cur_inode_new || did_overwrite) { 3951 ret = gen_unique_name(sctx, sctx->cur_ino, 3952 sctx->cur_inode_gen, valid_path); 3953 if (ret < 0) 3954 goto out; 3955 is_orphan = 1; 3956 } else { 3957 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, 3958 valid_path); 3959 if (ret < 0) 3960 goto out; 3961 } 3962 3963 /* 3964 * Before doing any rename and link operations, do a first pass on the 3965 * new references to orphanize any unprocessed inodes that may have a 3966 * reference that conflicts with one of the new references of the current 3967 * inode. This needs to happen first because a new reference may conflict 3968 * with the old reference of a parent directory, so we must make sure 3969 * that the path used for link and rename commands don't use an 3970 * orphanized name when an ancestor was not yet orphanized. 3971 * 3972 * Example: 3973 * 3974 * Parent snapshot: 3975 * 3976 * . (ino 256) 3977 * |----- testdir/ (ino 259) 3978 * | |----- a (ino 257) 3979 * | 3980 * |----- b (ino 258) 3981 * 3982 * Send snapshot: 3983 * 3984 * . (ino 256) 3985 * |----- testdir_2/ (ino 259) 3986 * | |----- a (ino 260) 3987 * | 3988 * |----- testdir (ino 257) 3989 * |----- b (ino 257) 3990 * |----- b2 (ino 258) 3991 * 3992 * Processing the new reference for inode 257 with name "b" may happen 3993 * before processing the new reference with name "testdir". If so, we 3994 * must make sure that by the time we send a link command to create the 3995 * hard link "b", inode 259 was already orphanized, since the generated 3996 * path in "valid_path" already contains the orphanized name for 259. 3997 * We are processing inode 257, so only later when processing 259 we do 3998 * the rename operation to change its temporary (orphanized) name to 3999 * "testdir_2". 4000 */ 4001 list_for_each_entry(cur, &sctx->new_refs, list) { 4002 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 4003 if (ret < 0) 4004 goto out; 4005 if (ret == inode_state_will_create) 4006 continue; 4007 4008 /* 4009 * Check if this new ref would overwrite the first ref of another 4010 * unprocessed inode. If yes, orphanize the overwritten inode. 4011 * If we find an overwritten ref that is not the first ref, 4012 * simply unlink it. 4013 */ 4014 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, 4015 cur->name, cur->name_len, 4016 &ow_inode, &ow_gen, &ow_mode); 4017 if (ret < 0) 4018 goto out; 4019 if (ret) { 4020 ret = is_first_ref(sctx->parent_root, 4021 ow_inode, cur->dir, cur->name, 4022 cur->name_len); 4023 if (ret < 0) 4024 goto out; 4025 if (ret) { 4026 struct name_cache_entry *nce; 4027 struct waiting_dir_move *wdm; 4028 4029 if (orphanized_dir) { 4030 ret = refresh_ref_path(sctx, cur); 4031 if (ret < 0) 4032 goto out; 4033 } 4034 4035 ret = orphanize_inode(sctx, ow_inode, ow_gen, 4036 cur->full_path); 4037 if (ret < 0) 4038 goto out; 4039 if (S_ISDIR(ow_mode)) 4040 orphanized_dir = true; 4041 4042 /* 4043 * If ow_inode has its rename operation delayed 4044 * make sure that its orphanized name is used in 4045 * the source path when performing its rename 4046 * operation. 4047 */ 4048 if (is_waiting_for_move(sctx, ow_inode)) { 4049 wdm = get_waiting_dir_move(sctx, 4050 ow_inode); 4051 ASSERT(wdm); 4052 wdm->orphanized = true; 4053 } 4054 4055 /* 4056 * Make sure we clear our orphanized inode's 4057 * name from the name cache. This is because the 4058 * inode ow_inode might be an ancestor of some 4059 * other inode that will be orphanized as well 4060 * later and has an inode number greater than 4061 * sctx->send_progress. We need to prevent 4062 * future name lookups from using the old name 4063 * and get instead the orphan name. 4064 */ 4065 nce = name_cache_search(sctx, ow_inode, ow_gen); 4066 if (nce) { 4067 name_cache_delete(sctx, nce); 4068 kfree(nce); 4069 } 4070 4071 /* 4072 * ow_inode might currently be an ancestor of 4073 * cur_ino, therefore compute valid_path (the 4074 * current path of cur_ino) again because it 4075 * might contain the pre-orphanization name of 4076 * ow_inode, which is no longer valid. 4077 */ 4078 ret = is_ancestor(sctx->parent_root, 4079 ow_inode, ow_gen, 4080 sctx->cur_ino, NULL); 4081 if (ret > 0) { 4082 orphanized_ancestor = true; 4083 fs_path_reset(valid_path); 4084 ret = get_cur_path(sctx, sctx->cur_ino, 4085 sctx->cur_inode_gen, 4086 valid_path); 4087 } 4088 if (ret < 0) 4089 goto out; 4090 } else { 4091 /* 4092 * If we previously orphanized a directory that 4093 * collided with a new reference that we already 4094 * processed, recompute the current path because 4095 * that directory may be part of the path. 4096 */ 4097 if (orphanized_dir) { 4098 ret = refresh_ref_path(sctx, cur); 4099 if (ret < 0) 4100 goto out; 4101 } 4102 ret = send_unlink(sctx, cur->full_path); 4103 if (ret < 0) 4104 goto out; 4105 } 4106 } 4107 4108 } 4109 4110 list_for_each_entry(cur, &sctx->new_refs, list) { 4111 /* 4112 * We may have refs where the parent directory does not exist 4113 * yet. This happens if the parent directories inum is higher 4114 * than the current inum. To handle this case, we create the 4115 * parent directory out of order. But we need to check if this 4116 * did already happen before due to other refs in the same dir. 4117 */ 4118 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 4119 if (ret < 0) 4120 goto out; 4121 if (ret == inode_state_will_create) { 4122 ret = 0; 4123 /* 4124 * First check if any of the current inodes refs did 4125 * already create the dir. 4126 */ 4127 list_for_each_entry(cur2, &sctx->new_refs, list) { 4128 if (cur == cur2) 4129 break; 4130 if (cur2->dir == cur->dir) { 4131 ret = 1; 4132 break; 4133 } 4134 } 4135 4136 /* 4137 * If that did not happen, check if a previous inode 4138 * did already create the dir. 4139 */ 4140 if (!ret) 4141 ret = did_create_dir(sctx, cur->dir); 4142 if (ret < 0) 4143 goto out; 4144 if (!ret) { 4145 ret = send_create_inode(sctx, cur->dir); 4146 if (ret < 0) 4147 goto out; 4148 } 4149 } 4150 4151 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { 4152 ret = wait_for_dest_dir_move(sctx, cur, is_orphan); 4153 if (ret < 0) 4154 goto out; 4155 if (ret == 1) { 4156 can_rename = false; 4157 *pending_move = 1; 4158 } 4159 } 4160 4161 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root && 4162 can_rename) { 4163 ret = wait_for_parent_move(sctx, cur, is_orphan); 4164 if (ret < 0) 4165 goto out; 4166 if (ret == 1) { 4167 can_rename = false; 4168 *pending_move = 1; 4169 } 4170 } 4171 4172 /* 4173 * link/move the ref to the new place. If we have an orphan 4174 * inode, move it and update valid_path. If not, link or move 4175 * it depending on the inode mode. 4176 */ 4177 if (is_orphan && can_rename) { 4178 ret = send_rename(sctx, valid_path, cur->full_path); 4179 if (ret < 0) 4180 goto out; 4181 is_orphan = 0; 4182 ret = fs_path_copy(valid_path, cur->full_path); 4183 if (ret < 0) 4184 goto out; 4185 } else if (can_rename) { 4186 if (S_ISDIR(sctx->cur_inode_mode)) { 4187 /* 4188 * Dirs can't be linked, so move it. For moved 4189 * dirs, we always have one new and one deleted 4190 * ref. The deleted ref is ignored later. 4191 */ 4192 ret = send_rename(sctx, valid_path, 4193 cur->full_path); 4194 if (!ret) 4195 ret = fs_path_copy(valid_path, 4196 cur->full_path); 4197 if (ret < 0) 4198 goto out; 4199 } else { 4200 /* 4201 * We might have previously orphanized an inode 4202 * which is an ancestor of our current inode, 4203 * so our reference's full path, which was 4204 * computed before any such orphanizations, must 4205 * be updated. 4206 */ 4207 if (orphanized_dir) { 4208 ret = update_ref_path(sctx, cur); 4209 if (ret < 0) 4210 goto out; 4211 } 4212 ret = send_link(sctx, cur->full_path, 4213 valid_path); 4214 if (ret < 0) 4215 goto out; 4216 } 4217 } 4218 ret = dup_ref(cur, &check_dirs); 4219 if (ret < 0) 4220 goto out; 4221 } 4222 4223 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { 4224 /* 4225 * Check if we can already rmdir the directory. If not, 4226 * orphanize it. For every dir item inside that gets deleted 4227 * later, we do this check again and rmdir it then if possible. 4228 * See the use of check_dirs for more details. 4229 */ 4230 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen, 4231 sctx->cur_ino); 4232 if (ret < 0) 4233 goto out; 4234 if (ret) { 4235 ret = send_rmdir(sctx, valid_path); 4236 if (ret < 0) 4237 goto out; 4238 } else if (!is_orphan) { 4239 ret = orphanize_inode(sctx, sctx->cur_ino, 4240 sctx->cur_inode_gen, valid_path); 4241 if (ret < 0) 4242 goto out; 4243 is_orphan = 1; 4244 } 4245 4246 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4247 ret = dup_ref(cur, &check_dirs); 4248 if (ret < 0) 4249 goto out; 4250 } 4251 } else if (S_ISDIR(sctx->cur_inode_mode) && 4252 !list_empty(&sctx->deleted_refs)) { 4253 /* 4254 * We have a moved dir. Add the old parent to check_dirs 4255 */ 4256 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, 4257 list); 4258 ret = dup_ref(cur, &check_dirs); 4259 if (ret < 0) 4260 goto out; 4261 } else if (!S_ISDIR(sctx->cur_inode_mode)) { 4262 /* 4263 * We have a non dir inode. Go through all deleted refs and 4264 * unlink them if they were not already overwritten by other 4265 * inodes. 4266 */ 4267 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4268 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, 4269 sctx->cur_ino, sctx->cur_inode_gen, 4270 cur->name, cur->name_len); 4271 if (ret < 0) 4272 goto out; 4273 if (!ret) { 4274 /* 4275 * If we orphanized any ancestor before, we need 4276 * to recompute the full path for deleted names, 4277 * since any such path was computed before we 4278 * processed any references and orphanized any 4279 * ancestor inode. 4280 */ 4281 if (orphanized_ancestor) { 4282 ret = update_ref_path(sctx, cur); 4283 if (ret < 0) 4284 goto out; 4285 } 4286 ret = send_unlink(sctx, cur->full_path); 4287 if (ret < 0) 4288 goto out; 4289 } 4290 ret = dup_ref(cur, &check_dirs); 4291 if (ret < 0) 4292 goto out; 4293 } 4294 /* 4295 * If the inode is still orphan, unlink the orphan. This may 4296 * happen when a previous inode did overwrite the first ref 4297 * of this inode and no new refs were added for the current 4298 * inode. Unlinking does not mean that the inode is deleted in 4299 * all cases. There may still be links to this inode in other 4300 * places. 4301 */ 4302 if (is_orphan) { 4303 ret = send_unlink(sctx, valid_path); 4304 if (ret < 0) 4305 goto out; 4306 } 4307 } 4308 4309 /* 4310 * We did collect all parent dirs where cur_inode was once located. We 4311 * now go through all these dirs and check if they are pending for 4312 * deletion and if it's finally possible to perform the rmdir now. 4313 * We also update the inode stats of the parent dirs here. 4314 */ 4315 list_for_each_entry(cur, &check_dirs, list) { 4316 /* 4317 * In case we had refs into dirs that were not processed yet, 4318 * we don't need to do the utime and rmdir logic for these dirs. 4319 * The dir will be processed later. 4320 */ 4321 if (cur->dir > sctx->cur_ino) 4322 continue; 4323 4324 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 4325 if (ret < 0) 4326 goto out; 4327 4328 if (ret == inode_state_did_create || 4329 ret == inode_state_no_change) { 4330 /* TODO delayed utimes */ 4331 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 4332 if (ret < 0) 4333 goto out; 4334 } else if (ret == inode_state_did_delete && 4335 cur->dir != last_dir_ino_rm) { 4336 ret = can_rmdir(sctx, cur->dir, cur->dir_gen, 4337 sctx->cur_ino); 4338 if (ret < 0) 4339 goto out; 4340 if (ret) { 4341 ret = get_cur_path(sctx, cur->dir, 4342 cur->dir_gen, valid_path); 4343 if (ret < 0) 4344 goto out; 4345 ret = send_rmdir(sctx, valid_path); 4346 if (ret < 0) 4347 goto out; 4348 last_dir_ino_rm = cur->dir; 4349 } 4350 } 4351 } 4352 4353 ret = 0; 4354 4355 out: 4356 __free_recorded_refs(&check_dirs); 4357 free_recorded_refs(sctx); 4358 fs_path_free(valid_path); 4359 return ret; 4360 } 4361 4362 static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name, 4363 void *ctx, struct list_head *refs) 4364 { 4365 int ret = 0; 4366 struct send_ctx *sctx = ctx; 4367 struct fs_path *p; 4368 u64 gen; 4369 4370 p = fs_path_alloc(); 4371 if (!p) 4372 return -ENOMEM; 4373 4374 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL, 4375 NULL, NULL); 4376 if (ret < 0) 4377 goto out; 4378 4379 ret = get_cur_path(sctx, dir, gen, p); 4380 if (ret < 0) 4381 goto out; 4382 ret = fs_path_add_path(p, name); 4383 if (ret < 0) 4384 goto out; 4385 4386 ret = __record_ref(refs, dir, gen, p); 4387 4388 out: 4389 if (ret) 4390 fs_path_free(p); 4391 return ret; 4392 } 4393 4394 static int __record_new_ref(int num, u64 dir, int index, 4395 struct fs_path *name, 4396 void *ctx) 4397 { 4398 struct send_ctx *sctx = ctx; 4399 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs); 4400 } 4401 4402 4403 static int __record_deleted_ref(int num, u64 dir, int index, 4404 struct fs_path *name, 4405 void *ctx) 4406 { 4407 struct send_ctx *sctx = ctx; 4408 return record_ref(sctx->parent_root, dir, name, ctx, 4409 &sctx->deleted_refs); 4410 } 4411 4412 static int record_new_ref(struct send_ctx *sctx) 4413 { 4414 int ret; 4415 4416 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 4417 sctx->cmp_key, 0, __record_new_ref, sctx); 4418 if (ret < 0) 4419 goto out; 4420 ret = 0; 4421 4422 out: 4423 return ret; 4424 } 4425 4426 static int record_deleted_ref(struct send_ctx *sctx) 4427 { 4428 int ret; 4429 4430 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 4431 sctx->cmp_key, 0, __record_deleted_ref, sctx); 4432 if (ret < 0) 4433 goto out; 4434 ret = 0; 4435 4436 out: 4437 return ret; 4438 } 4439 4440 struct find_ref_ctx { 4441 u64 dir; 4442 u64 dir_gen; 4443 struct btrfs_root *root; 4444 struct fs_path *name; 4445 int found_idx; 4446 }; 4447 4448 static int __find_iref(int num, u64 dir, int index, 4449 struct fs_path *name, 4450 void *ctx_) 4451 { 4452 struct find_ref_ctx *ctx = ctx_; 4453 u64 dir_gen; 4454 int ret; 4455 4456 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) && 4457 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) { 4458 /* 4459 * To avoid doing extra lookups we'll only do this if everything 4460 * else matches. 4461 */ 4462 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL, 4463 NULL, NULL, NULL); 4464 if (ret) 4465 return ret; 4466 if (dir_gen != ctx->dir_gen) 4467 return 0; 4468 ctx->found_idx = num; 4469 return 1; 4470 } 4471 return 0; 4472 } 4473 4474 static int find_iref(struct btrfs_root *root, 4475 struct btrfs_path *path, 4476 struct btrfs_key *key, 4477 u64 dir, u64 dir_gen, struct fs_path *name) 4478 { 4479 int ret; 4480 struct find_ref_ctx ctx; 4481 4482 ctx.dir = dir; 4483 ctx.name = name; 4484 ctx.dir_gen = dir_gen; 4485 ctx.found_idx = -1; 4486 ctx.root = root; 4487 4488 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx); 4489 if (ret < 0) 4490 return ret; 4491 4492 if (ctx.found_idx == -1) 4493 return -ENOENT; 4494 4495 return ctx.found_idx; 4496 } 4497 4498 static int __record_changed_new_ref(int num, u64 dir, int index, 4499 struct fs_path *name, 4500 void *ctx) 4501 { 4502 u64 dir_gen; 4503 int ret; 4504 struct send_ctx *sctx = ctx; 4505 4506 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL, 4507 NULL, NULL, NULL); 4508 if (ret) 4509 return ret; 4510 4511 ret = find_iref(sctx->parent_root, sctx->right_path, 4512 sctx->cmp_key, dir, dir_gen, name); 4513 if (ret == -ENOENT) 4514 ret = __record_new_ref(num, dir, index, name, sctx); 4515 else if (ret > 0) 4516 ret = 0; 4517 4518 return ret; 4519 } 4520 4521 static int __record_changed_deleted_ref(int num, u64 dir, int index, 4522 struct fs_path *name, 4523 void *ctx) 4524 { 4525 u64 dir_gen; 4526 int ret; 4527 struct send_ctx *sctx = ctx; 4528 4529 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL, 4530 NULL, NULL, NULL); 4531 if (ret) 4532 return ret; 4533 4534 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key, 4535 dir, dir_gen, name); 4536 if (ret == -ENOENT) 4537 ret = __record_deleted_ref(num, dir, index, name, sctx); 4538 else if (ret > 0) 4539 ret = 0; 4540 4541 return ret; 4542 } 4543 4544 static int record_changed_ref(struct send_ctx *sctx) 4545 { 4546 int ret = 0; 4547 4548 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 4549 sctx->cmp_key, 0, __record_changed_new_ref, sctx); 4550 if (ret < 0) 4551 goto out; 4552 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 4553 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx); 4554 if (ret < 0) 4555 goto out; 4556 ret = 0; 4557 4558 out: 4559 return ret; 4560 } 4561 4562 /* 4563 * Record and process all refs at once. Needed when an inode changes the 4564 * generation number, which means that it was deleted and recreated. 4565 */ 4566 static int process_all_refs(struct send_ctx *sctx, 4567 enum btrfs_compare_tree_result cmd) 4568 { 4569 int ret; 4570 struct btrfs_root *root; 4571 struct btrfs_path *path; 4572 struct btrfs_key key; 4573 struct btrfs_key found_key; 4574 struct extent_buffer *eb; 4575 int slot; 4576 iterate_inode_ref_t cb; 4577 int pending_move = 0; 4578 4579 path = alloc_path_for_send(); 4580 if (!path) 4581 return -ENOMEM; 4582 4583 if (cmd == BTRFS_COMPARE_TREE_NEW) { 4584 root = sctx->send_root; 4585 cb = __record_new_ref; 4586 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) { 4587 root = sctx->parent_root; 4588 cb = __record_deleted_ref; 4589 } else { 4590 btrfs_err(sctx->send_root->fs_info, 4591 "Wrong command %d in process_all_refs", cmd); 4592 ret = -EINVAL; 4593 goto out; 4594 } 4595 4596 key.objectid = sctx->cmp_key->objectid; 4597 key.type = BTRFS_INODE_REF_KEY; 4598 key.offset = 0; 4599 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4600 if (ret < 0) 4601 goto out; 4602 4603 while (1) { 4604 eb = path->nodes[0]; 4605 slot = path->slots[0]; 4606 if (slot >= btrfs_header_nritems(eb)) { 4607 ret = btrfs_next_leaf(root, path); 4608 if (ret < 0) 4609 goto out; 4610 else if (ret > 0) 4611 break; 4612 continue; 4613 } 4614 4615 btrfs_item_key_to_cpu(eb, &found_key, slot); 4616 4617 if (found_key.objectid != key.objectid || 4618 (found_key.type != BTRFS_INODE_REF_KEY && 4619 found_key.type != BTRFS_INODE_EXTREF_KEY)) 4620 break; 4621 4622 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); 4623 if (ret < 0) 4624 goto out; 4625 4626 path->slots[0]++; 4627 } 4628 btrfs_release_path(path); 4629 4630 /* 4631 * We don't actually care about pending_move as we are simply 4632 * re-creating this inode and will be rename'ing it into place once we 4633 * rename the parent directory. 4634 */ 4635 ret = process_recorded_refs(sctx, &pending_move); 4636 out: 4637 btrfs_free_path(path); 4638 return ret; 4639 } 4640 4641 static int send_set_xattr(struct send_ctx *sctx, 4642 struct fs_path *path, 4643 const char *name, int name_len, 4644 const char *data, int data_len) 4645 { 4646 int ret = 0; 4647 4648 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); 4649 if (ret < 0) 4650 goto out; 4651 4652 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 4653 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 4654 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); 4655 4656 ret = send_cmd(sctx); 4657 4658 tlv_put_failure: 4659 out: 4660 return ret; 4661 } 4662 4663 static int send_remove_xattr(struct send_ctx *sctx, 4664 struct fs_path *path, 4665 const char *name, int name_len) 4666 { 4667 int ret = 0; 4668 4669 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); 4670 if (ret < 0) 4671 goto out; 4672 4673 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 4674 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 4675 4676 ret = send_cmd(sctx); 4677 4678 tlv_put_failure: 4679 out: 4680 return ret; 4681 } 4682 4683 static int __process_new_xattr(int num, struct btrfs_key *di_key, 4684 const char *name, int name_len, 4685 const char *data, int data_len, 4686 u8 type, void *ctx) 4687 { 4688 int ret; 4689 struct send_ctx *sctx = ctx; 4690 struct fs_path *p; 4691 struct posix_acl_xattr_header dummy_acl; 4692 4693 /* Capabilities are emitted by finish_inode_if_needed */ 4694 if (!strncmp(name, XATTR_NAME_CAPS, name_len)) 4695 return 0; 4696 4697 p = fs_path_alloc(); 4698 if (!p) 4699 return -ENOMEM; 4700 4701 /* 4702 * This hack is needed because empty acls are stored as zero byte 4703 * data in xattrs. Problem with that is, that receiving these zero byte 4704 * acls will fail later. To fix this, we send a dummy acl list that 4705 * only contains the version number and no entries. 4706 */ 4707 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || 4708 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) { 4709 if (data_len == 0) { 4710 dummy_acl.a_version = 4711 cpu_to_le32(POSIX_ACL_XATTR_VERSION); 4712 data = (char *)&dummy_acl; 4713 data_len = sizeof(dummy_acl); 4714 } 4715 } 4716 4717 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4718 if (ret < 0) 4719 goto out; 4720 4721 ret = send_set_xattr(sctx, p, name, name_len, data, data_len); 4722 4723 out: 4724 fs_path_free(p); 4725 return ret; 4726 } 4727 4728 static int __process_deleted_xattr(int num, struct btrfs_key *di_key, 4729 const char *name, int name_len, 4730 const char *data, int data_len, 4731 u8 type, void *ctx) 4732 { 4733 int ret; 4734 struct send_ctx *sctx = ctx; 4735 struct fs_path *p; 4736 4737 p = fs_path_alloc(); 4738 if (!p) 4739 return -ENOMEM; 4740 4741 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4742 if (ret < 0) 4743 goto out; 4744 4745 ret = send_remove_xattr(sctx, p, name, name_len); 4746 4747 out: 4748 fs_path_free(p); 4749 return ret; 4750 } 4751 4752 static int process_new_xattr(struct send_ctx *sctx) 4753 { 4754 int ret = 0; 4755 4756 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 4757 __process_new_xattr, sctx); 4758 4759 return ret; 4760 } 4761 4762 static int process_deleted_xattr(struct send_ctx *sctx) 4763 { 4764 return iterate_dir_item(sctx->parent_root, sctx->right_path, 4765 __process_deleted_xattr, sctx); 4766 } 4767 4768 struct find_xattr_ctx { 4769 const char *name; 4770 int name_len; 4771 int found_idx; 4772 char *found_data; 4773 int found_data_len; 4774 }; 4775 4776 static int __find_xattr(int num, struct btrfs_key *di_key, 4777 const char *name, int name_len, 4778 const char *data, int data_len, 4779 u8 type, void *vctx) 4780 { 4781 struct find_xattr_ctx *ctx = vctx; 4782 4783 if (name_len == ctx->name_len && 4784 strncmp(name, ctx->name, name_len) == 0) { 4785 ctx->found_idx = num; 4786 ctx->found_data_len = data_len; 4787 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL); 4788 if (!ctx->found_data) 4789 return -ENOMEM; 4790 return 1; 4791 } 4792 return 0; 4793 } 4794 4795 static int find_xattr(struct btrfs_root *root, 4796 struct btrfs_path *path, 4797 struct btrfs_key *key, 4798 const char *name, int name_len, 4799 char **data, int *data_len) 4800 { 4801 int ret; 4802 struct find_xattr_ctx ctx; 4803 4804 ctx.name = name; 4805 ctx.name_len = name_len; 4806 ctx.found_idx = -1; 4807 ctx.found_data = NULL; 4808 ctx.found_data_len = 0; 4809 4810 ret = iterate_dir_item(root, path, __find_xattr, &ctx); 4811 if (ret < 0) 4812 return ret; 4813 4814 if (ctx.found_idx == -1) 4815 return -ENOENT; 4816 if (data) { 4817 *data = ctx.found_data; 4818 *data_len = ctx.found_data_len; 4819 } else { 4820 kfree(ctx.found_data); 4821 } 4822 return ctx.found_idx; 4823 } 4824 4825 4826 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key, 4827 const char *name, int name_len, 4828 const char *data, int data_len, 4829 u8 type, void *ctx) 4830 { 4831 int ret; 4832 struct send_ctx *sctx = ctx; 4833 char *found_data = NULL; 4834 int found_data_len = 0; 4835 4836 ret = find_xattr(sctx->parent_root, sctx->right_path, 4837 sctx->cmp_key, name, name_len, &found_data, 4838 &found_data_len); 4839 if (ret == -ENOENT) { 4840 ret = __process_new_xattr(num, di_key, name, name_len, data, 4841 data_len, type, ctx); 4842 } else if (ret >= 0) { 4843 if (data_len != found_data_len || 4844 memcmp(data, found_data, data_len)) { 4845 ret = __process_new_xattr(num, di_key, name, name_len, 4846 data, data_len, type, ctx); 4847 } else { 4848 ret = 0; 4849 } 4850 } 4851 4852 kfree(found_data); 4853 return ret; 4854 } 4855 4856 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key, 4857 const char *name, int name_len, 4858 const char *data, int data_len, 4859 u8 type, void *ctx) 4860 { 4861 int ret; 4862 struct send_ctx *sctx = ctx; 4863 4864 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key, 4865 name, name_len, NULL, NULL); 4866 if (ret == -ENOENT) 4867 ret = __process_deleted_xattr(num, di_key, name, name_len, data, 4868 data_len, type, ctx); 4869 else if (ret >= 0) 4870 ret = 0; 4871 4872 return ret; 4873 } 4874 4875 static int process_changed_xattr(struct send_ctx *sctx) 4876 { 4877 int ret = 0; 4878 4879 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 4880 __process_changed_new_xattr, sctx); 4881 if (ret < 0) 4882 goto out; 4883 ret = iterate_dir_item(sctx->parent_root, sctx->right_path, 4884 __process_changed_deleted_xattr, sctx); 4885 4886 out: 4887 return ret; 4888 } 4889 4890 static int process_all_new_xattrs(struct send_ctx *sctx) 4891 { 4892 int ret; 4893 struct btrfs_root *root; 4894 struct btrfs_path *path; 4895 struct btrfs_key key; 4896 struct btrfs_key found_key; 4897 struct extent_buffer *eb; 4898 int slot; 4899 4900 path = alloc_path_for_send(); 4901 if (!path) 4902 return -ENOMEM; 4903 4904 root = sctx->send_root; 4905 4906 key.objectid = sctx->cmp_key->objectid; 4907 key.type = BTRFS_XATTR_ITEM_KEY; 4908 key.offset = 0; 4909 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4910 if (ret < 0) 4911 goto out; 4912 4913 while (1) { 4914 eb = path->nodes[0]; 4915 slot = path->slots[0]; 4916 if (slot >= btrfs_header_nritems(eb)) { 4917 ret = btrfs_next_leaf(root, path); 4918 if (ret < 0) { 4919 goto out; 4920 } else if (ret > 0) { 4921 ret = 0; 4922 break; 4923 } 4924 continue; 4925 } 4926 4927 btrfs_item_key_to_cpu(eb, &found_key, slot); 4928 if (found_key.objectid != key.objectid || 4929 found_key.type != key.type) { 4930 ret = 0; 4931 goto out; 4932 } 4933 4934 ret = iterate_dir_item(root, path, __process_new_xattr, sctx); 4935 if (ret < 0) 4936 goto out; 4937 4938 path->slots[0]++; 4939 } 4940 4941 out: 4942 btrfs_free_path(path); 4943 return ret; 4944 } 4945 4946 static inline u64 max_send_read_size(const struct send_ctx *sctx) 4947 { 4948 return sctx->send_max_size - SZ_16K; 4949 } 4950 4951 static int put_data_header(struct send_ctx *sctx, u32 len) 4952 { 4953 struct btrfs_tlv_header *hdr; 4954 4955 if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len) 4956 return -EOVERFLOW; 4957 hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size); 4958 put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type); 4959 put_unaligned_le16(len, &hdr->tlv_len); 4960 sctx->send_size += sizeof(*hdr); 4961 return 0; 4962 } 4963 4964 static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len) 4965 { 4966 struct btrfs_root *root = sctx->send_root; 4967 struct btrfs_fs_info *fs_info = root->fs_info; 4968 struct inode *inode; 4969 struct page *page; 4970 pgoff_t index = offset >> PAGE_SHIFT; 4971 pgoff_t last_index; 4972 unsigned pg_offset = offset_in_page(offset); 4973 int ret; 4974 4975 ret = put_data_header(sctx, len); 4976 if (ret) 4977 return ret; 4978 4979 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root); 4980 if (IS_ERR(inode)) 4981 return PTR_ERR(inode); 4982 4983 last_index = (offset + len - 1) >> PAGE_SHIFT; 4984 4985 /* initial readahead */ 4986 memset(&sctx->ra, 0, sizeof(struct file_ra_state)); 4987 file_ra_state_init(&sctx->ra, inode->i_mapping); 4988 4989 while (index <= last_index) { 4990 unsigned cur_len = min_t(unsigned, len, 4991 PAGE_SIZE - pg_offset); 4992 4993 page = find_lock_page(inode->i_mapping, index); 4994 if (!page) { 4995 page_cache_sync_readahead(inode->i_mapping, &sctx->ra, 4996 NULL, index, last_index + 1 - index); 4997 4998 page = find_or_create_page(inode->i_mapping, index, 4999 GFP_KERNEL); 5000 if (!page) { 5001 ret = -ENOMEM; 5002 break; 5003 } 5004 } 5005 5006 if (PageReadahead(page)) { 5007 page_cache_async_readahead(inode->i_mapping, &sctx->ra, 5008 NULL, page, index, last_index + 1 - index); 5009 } 5010 5011 if (!PageUptodate(page)) { 5012 btrfs_readpage(NULL, page); 5013 lock_page(page); 5014 if (!PageUptodate(page)) { 5015 unlock_page(page); 5016 btrfs_err(fs_info, 5017 "send: IO error at offset %llu for inode %llu root %llu", 5018 page_offset(page), sctx->cur_ino, 5019 sctx->send_root->root_key.objectid); 5020 put_page(page); 5021 ret = -EIO; 5022 break; 5023 } 5024 } 5025 5026 memcpy_from_page(sctx->send_buf + sctx->send_size, page, 5027 pg_offset, cur_len); 5028 unlock_page(page); 5029 put_page(page); 5030 index++; 5031 pg_offset = 0; 5032 len -= cur_len; 5033 sctx->send_size += cur_len; 5034 } 5035 iput(inode); 5036 return ret; 5037 } 5038 5039 /* 5040 * Read some bytes from the current inode/file and send a write command to 5041 * user space. 5042 */ 5043 static int send_write(struct send_ctx *sctx, u64 offset, u32 len) 5044 { 5045 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 5046 int ret = 0; 5047 struct fs_path *p; 5048 5049 p = fs_path_alloc(); 5050 if (!p) 5051 return -ENOMEM; 5052 5053 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len); 5054 5055 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 5056 if (ret < 0) 5057 goto out; 5058 5059 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5060 if (ret < 0) 5061 goto out; 5062 5063 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5064 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5065 ret = put_file_data(sctx, offset, len); 5066 if (ret < 0) 5067 goto out; 5068 5069 ret = send_cmd(sctx); 5070 5071 tlv_put_failure: 5072 out: 5073 fs_path_free(p); 5074 return ret; 5075 } 5076 5077 /* 5078 * Send a clone command to user space. 5079 */ 5080 static int send_clone(struct send_ctx *sctx, 5081 u64 offset, u32 len, 5082 struct clone_root *clone_root) 5083 { 5084 int ret = 0; 5085 struct fs_path *p; 5086 u64 gen; 5087 5088 btrfs_debug(sctx->send_root->fs_info, 5089 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu", 5090 offset, len, clone_root->root->root_key.objectid, 5091 clone_root->ino, clone_root->offset); 5092 5093 p = fs_path_alloc(); 5094 if (!p) 5095 return -ENOMEM; 5096 5097 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); 5098 if (ret < 0) 5099 goto out; 5100 5101 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5102 if (ret < 0) 5103 goto out; 5104 5105 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5106 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); 5107 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5108 5109 if (clone_root->root == sctx->send_root) { 5110 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, 5111 &gen, NULL, NULL, NULL, NULL); 5112 if (ret < 0) 5113 goto out; 5114 ret = get_cur_path(sctx, clone_root->ino, gen, p); 5115 } else { 5116 ret = get_inode_path(clone_root->root, clone_root->ino, p); 5117 } 5118 if (ret < 0) 5119 goto out; 5120 5121 /* 5122 * If the parent we're using has a received_uuid set then use that as 5123 * our clone source as that is what we will look for when doing a 5124 * receive. 5125 * 5126 * This covers the case that we create a snapshot off of a received 5127 * subvolume and then use that as the parent and try to receive on a 5128 * different host. 5129 */ 5130 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid)) 5131 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 5132 clone_root->root->root_item.received_uuid); 5133 else 5134 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 5135 clone_root->root->root_item.uuid); 5136 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 5137 btrfs_root_ctransid(&clone_root->root->root_item)); 5138 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); 5139 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, 5140 clone_root->offset); 5141 5142 ret = send_cmd(sctx); 5143 5144 tlv_put_failure: 5145 out: 5146 fs_path_free(p); 5147 return ret; 5148 } 5149 5150 /* 5151 * Send an update extent command to user space. 5152 */ 5153 static int send_update_extent(struct send_ctx *sctx, 5154 u64 offset, u32 len) 5155 { 5156 int ret = 0; 5157 struct fs_path *p; 5158 5159 p = fs_path_alloc(); 5160 if (!p) 5161 return -ENOMEM; 5162 5163 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); 5164 if (ret < 0) 5165 goto out; 5166 5167 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5168 if (ret < 0) 5169 goto out; 5170 5171 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5172 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5173 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); 5174 5175 ret = send_cmd(sctx); 5176 5177 tlv_put_failure: 5178 out: 5179 fs_path_free(p); 5180 return ret; 5181 } 5182 5183 static int send_hole(struct send_ctx *sctx, u64 end) 5184 { 5185 struct fs_path *p = NULL; 5186 u64 read_size = max_send_read_size(sctx); 5187 u64 offset = sctx->cur_inode_last_extent; 5188 int ret = 0; 5189 5190 /* 5191 * A hole that starts at EOF or beyond it. Since we do not yet support 5192 * fallocate (for extent preallocation and hole punching), sending a 5193 * write of zeroes starting at EOF or beyond would later require issuing 5194 * a truncate operation which would undo the write and achieve nothing. 5195 */ 5196 if (offset >= sctx->cur_inode_size) 5197 return 0; 5198 5199 /* 5200 * Don't go beyond the inode's i_size due to prealloc extents that start 5201 * after the i_size. 5202 */ 5203 end = min_t(u64, end, sctx->cur_inode_size); 5204 5205 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) 5206 return send_update_extent(sctx, offset, end - offset); 5207 5208 p = fs_path_alloc(); 5209 if (!p) 5210 return -ENOMEM; 5211 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5212 if (ret < 0) 5213 goto tlv_put_failure; 5214 while (offset < end) { 5215 u64 len = min(end - offset, read_size); 5216 5217 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 5218 if (ret < 0) 5219 break; 5220 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5221 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5222 ret = put_data_header(sctx, len); 5223 if (ret < 0) 5224 break; 5225 memset(sctx->send_buf + sctx->send_size, 0, len); 5226 sctx->send_size += len; 5227 ret = send_cmd(sctx); 5228 if (ret < 0) 5229 break; 5230 offset += len; 5231 } 5232 sctx->cur_inode_next_write_offset = offset; 5233 tlv_put_failure: 5234 fs_path_free(p); 5235 return ret; 5236 } 5237 5238 static int send_extent_data(struct send_ctx *sctx, 5239 const u64 offset, 5240 const u64 len) 5241 { 5242 u64 read_size = max_send_read_size(sctx); 5243 u64 sent = 0; 5244 5245 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) 5246 return send_update_extent(sctx, offset, len); 5247 5248 while (sent < len) { 5249 u64 size = min(len - sent, read_size); 5250 int ret; 5251 5252 ret = send_write(sctx, offset + sent, size); 5253 if (ret < 0) 5254 return ret; 5255 sent += size; 5256 } 5257 return 0; 5258 } 5259 5260 /* 5261 * Search for a capability xattr related to sctx->cur_ino. If the capability is 5262 * found, call send_set_xattr function to emit it. 5263 * 5264 * Return 0 if there isn't a capability, or when the capability was emitted 5265 * successfully, or < 0 if an error occurred. 5266 */ 5267 static int send_capabilities(struct send_ctx *sctx) 5268 { 5269 struct fs_path *fspath = NULL; 5270 struct btrfs_path *path; 5271 struct btrfs_dir_item *di; 5272 struct extent_buffer *leaf; 5273 unsigned long data_ptr; 5274 char *buf = NULL; 5275 int buf_len; 5276 int ret = 0; 5277 5278 path = alloc_path_for_send(); 5279 if (!path) 5280 return -ENOMEM; 5281 5282 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, 5283 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); 5284 if (!di) { 5285 /* There is no xattr for this inode */ 5286 goto out; 5287 } else if (IS_ERR(di)) { 5288 ret = PTR_ERR(di); 5289 goto out; 5290 } 5291 5292 leaf = path->nodes[0]; 5293 buf_len = btrfs_dir_data_len(leaf, di); 5294 5295 fspath = fs_path_alloc(); 5296 buf = kmalloc(buf_len, GFP_KERNEL); 5297 if (!fspath || !buf) { 5298 ret = -ENOMEM; 5299 goto out; 5300 } 5301 5302 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); 5303 if (ret < 0) 5304 goto out; 5305 5306 data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); 5307 read_extent_buffer(leaf, buf, data_ptr, buf_len); 5308 5309 ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, 5310 strlen(XATTR_NAME_CAPS), buf, buf_len); 5311 out: 5312 kfree(buf); 5313 fs_path_free(fspath); 5314 btrfs_free_path(path); 5315 return ret; 5316 } 5317 5318 static int clone_range(struct send_ctx *sctx, 5319 struct clone_root *clone_root, 5320 const u64 disk_byte, 5321 u64 data_offset, 5322 u64 offset, 5323 u64 len) 5324 { 5325 struct btrfs_path *path; 5326 struct btrfs_key key; 5327 int ret; 5328 u64 clone_src_i_size = 0; 5329 5330 /* 5331 * Prevent cloning from a zero offset with a length matching the sector 5332 * size because in some scenarios this will make the receiver fail. 5333 * 5334 * For example, if in the source filesystem the extent at offset 0 5335 * has a length of sectorsize and it was written using direct IO, then 5336 * it can never be an inline extent (even if compression is enabled). 5337 * Then this extent can be cloned in the original filesystem to a non 5338 * zero file offset, but it may not be possible to clone in the 5339 * destination filesystem because it can be inlined due to compression 5340 * on the destination filesystem (as the receiver's write operations are 5341 * always done using buffered IO). The same happens when the original 5342 * filesystem does not have compression enabled but the destination 5343 * filesystem has. 5344 */ 5345 if (clone_root->offset == 0 && 5346 len == sctx->send_root->fs_info->sectorsize) 5347 return send_extent_data(sctx, offset, len); 5348 5349 path = alloc_path_for_send(); 5350 if (!path) 5351 return -ENOMEM; 5352 5353 /* 5354 * There are inodes that have extents that lie behind its i_size. Don't 5355 * accept clones from these extents. 5356 */ 5357 ret = __get_inode_info(clone_root->root, path, clone_root->ino, 5358 &clone_src_i_size, NULL, NULL, NULL, NULL, NULL); 5359 btrfs_release_path(path); 5360 if (ret < 0) 5361 goto out; 5362 5363 /* 5364 * We can't send a clone operation for the entire range if we find 5365 * extent items in the respective range in the source file that 5366 * refer to different extents or if we find holes. 5367 * So check for that and do a mix of clone and regular write/copy 5368 * operations if needed. 5369 * 5370 * Example: 5371 * 5372 * mkfs.btrfs -f /dev/sda 5373 * mount /dev/sda /mnt 5374 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo 5375 * cp --reflink=always /mnt/foo /mnt/bar 5376 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo 5377 * btrfs subvolume snapshot -r /mnt /mnt/snap 5378 * 5379 * If when we send the snapshot and we are processing file bar (which 5380 * has a higher inode number than foo) we blindly send a clone operation 5381 * for the [0, 100K[ range from foo to bar, the receiver ends up getting 5382 * a file bar that matches the content of file foo - iow, doesn't match 5383 * the content from bar in the original filesystem. 5384 */ 5385 key.objectid = clone_root->ino; 5386 key.type = BTRFS_EXTENT_DATA_KEY; 5387 key.offset = clone_root->offset; 5388 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0); 5389 if (ret < 0) 5390 goto out; 5391 if (ret > 0 && path->slots[0] > 0) { 5392 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); 5393 if (key.objectid == clone_root->ino && 5394 key.type == BTRFS_EXTENT_DATA_KEY) 5395 path->slots[0]--; 5396 } 5397 5398 while (true) { 5399 struct extent_buffer *leaf = path->nodes[0]; 5400 int slot = path->slots[0]; 5401 struct btrfs_file_extent_item *ei; 5402 u8 type; 5403 u64 ext_len; 5404 u64 clone_len; 5405 u64 clone_data_offset; 5406 5407 if (slot >= btrfs_header_nritems(leaf)) { 5408 ret = btrfs_next_leaf(clone_root->root, path); 5409 if (ret < 0) 5410 goto out; 5411 else if (ret > 0) 5412 break; 5413 continue; 5414 } 5415 5416 btrfs_item_key_to_cpu(leaf, &key, slot); 5417 5418 /* 5419 * We might have an implicit trailing hole (NO_HOLES feature 5420 * enabled). We deal with it after leaving this loop. 5421 */ 5422 if (key.objectid != clone_root->ino || 5423 key.type != BTRFS_EXTENT_DATA_KEY) 5424 break; 5425 5426 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5427 type = btrfs_file_extent_type(leaf, ei); 5428 if (type == BTRFS_FILE_EXTENT_INLINE) { 5429 ext_len = btrfs_file_extent_ram_bytes(leaf, ei); 5430 ext_len = PAGE_ALIGN(ext_len); 5431 } else { 5432 ext_len = btrfs_file_extent_num_bytes(leaf, ei); 5433 } 5434 5435 if (key.offset + ext_len <= clone_root->offset) 5436 goto next; 5437 5438 if (key.offset > clone_root->offset) { 5439 /* Implicit hole, NO_HOLES feature enabled. */ 5440 u64 hole_len = key.offset - clone_root->offset; 5441 5442 if (hole_len > len) 5443 hole_len = len; 5444 ret = send_extent_data(sctx, offset, hole_len); 5445 if (ret < 0) 5446 goto out; 5447 5448 len -= hole_len; 5449 if (len == 0) 5450 break; 5451 offset += hole_len; 5452 clone_root->offset += hole_len; 5453 data_offset += hole_len; 5454 } 5455 5456 if (key.offset >= clone_root->offset + len) 5457 break; 5458 5459 if (key.offset >= clone_src_i_size) 5460 break; 5461 5462 if (key.offset + ext_len > clone_src_i_size) 5463 ext_len = clone_src_i_size - key.offset; 5464 5465 clone_data_offset = btrfs_file_extent_offset(leaf, ei); 5466 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) { 5467 clone_root->offset = key.offset; 5468 if (clone_data_offset < data_offset && 5469 clone_data_offset + ext_len > data_offset) { 5470 u64 extent_offset; 5471 5472 extent_offset = data_offset - clone_data_offset; 5473 ext_len -= extent_offset; 5474 clone_data_offset += extent_offset; 5475 clone_root->offset += extent_offset; 5476 } 5477 } 5478 5479 clone_len = min_t(u64, ext_len, len); 5480 5481 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte && 5482 clone_data_offset == data_offset) { 5483 const u64 src_end = clone_root->offset + clone_len; 5484 const u64 sectorsize = SZ_64K; 5485 5486 /* 5487 * We can't clone the last block, when its size is not 5488 * sector size aligned, into the middle of a file. If we 5489 * do so, the receiver will get a failure (-EINVAL) when 5490 * trying to clone or will silently corrupt the data in 5491 * the destination file if it's on a kernel without the 5492 * fix introduced by commit ac765f83f1397646 5493 * ("Btrfs: fix data corruption due to cloning of eof 5494 * block). 5495 * 5496 * So issue a clone of the aligned down range plus a 5497 * regular write for the eof block, if we hit that case. 5498 * 5499 * Also, we use the maximum possible sector size, 64K, 5500 * because we don't know what's the sector size of the 5501 * filesystem that receives the stream, so we have to 5502 * assume the largest possible sector size. 5503 */ 5504 if (src_end == clone_src_i_size && 5505 !IS_ALIGNED(src_end, sectorsize) && 5506 offset + clone_len < sctx->cur_inode_size) { 5507 u64 slen; 5508 5509 slen = ALIGN_DOWN(src_end - clone_root->offset, 5510 sectorsize); 5511 if (slen > 0) { 5512 ret = send_clone(sctx, offset, slen, 5513 clone_root); 5514 if (ret < 0) 5515 goto out; 5516 } 5517 ret = send_extent_data(sctx, offset + slen, 5518 clone_len - slen); 5519 } else { 5520 ret = send_clone(sctx, offset, clone_len, 5521 clone_root); 5522 } 5523 } else { 5524 ret = send_extent_data(sctx, offset, clone_len); 5525 } 5526 5527 if (ret < 0) 5528 goto out; 5529 5530 len -= clone_len; 5531 if (len == 0) 5532 break; 5533 offset += clone_len; 5534 clone_root->offset += clone_len; 5535 5536 /* 5537 * If we are cloning from the file we are currently processing, 5538 * and using the send root as the clone root, we must stop once 5539 * the current clone offset reaches the current eof of the file 5540 * at the receiver, otherwise we would issue an invalid clone 5541 * operation (source range going beyond eof) and cause the 5542 * receiver to fail. So if we reach the current eof, bail out 5543 * and fallback to a regular write. 5544 */ 5545 if (clone_root->root == sctx->send_root && 5546 clone_root->ino == sctx->cur_ino && 5547 clone_root->offset >= sctx->cur_inode_next_write_offset) 5548 break; 5549 5550 data_offset += clone_len; 5551 next: 5552 path->slots[0]++; 5553 } 5554 5555 if (len > 0) 5556 ret = send_extent_data(sctx, offset, len); 5557 else 5558 ret = 0; 5559 out: 5560 btrfs_free_path(path); 5561 return ret; 5562 } 5563 5564 static int send_write_or_clone(struct send_ctx *sctx, 5565 struct btrfs_path *path, 5566 struct btrfs_key *key, 5567 struct clone_root *clone_root) 5568 { 5569 int ret = 0; 5570 u64 offset = key->offset; 5571 u64 end; 5572 u64 bs = sctx->send_root->fs_info->sb->s_blocksize; 5573 5574 end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size); 5575 if (offset >= end) 5576 return 0; 5577 5578 if (clone_root && IS_ALIGNED(end, bs)) { 5579 struct btrfs_file_extent_item *ei; 5580 u64 disk_byte; 5581 u64 data_offset; 5582 5583 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 5584 struct btrfs_file_extent_item); 5585 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); 5586 data_offset = btrfs_file_extent_offset(path->nodes[0], ei); 5587 ret = clone_range(sctx, clone_root, disk_byte, data_offset, 5588 offset, end - offset); 5589 } else { 5590 ret = send_extent_data(sctx, offset, end - offset); 5591 } 5592 sctx->cur_inode_next_write_offset = end; 5593 return ret; 5594 } 5595 5596 static int is_extent_unchanged(struct send_ctx *sctx, 5597 struct btrfs_path *left_path, 5598 struct btrfs_key *ekey) 5599 { 5600 int ret = 0; 5601 struct btrfs_key key; 5602 struct btrfs_path *path = NULL; 5603 struct extent_buffer *eb; 5604 int slot; 5605 struct btrfs_key found_key; 5606 struct btrfs_file_extent_item *ei; 5607 u64 left_disknr; 5608 u64 right_disknr; 5609 u64 left_offset; 5610 u64 right_offset; 5611 u64 left_offset_fixed; 5612 u64 left_len; 5613 u64 right_len; 5614 u64 left_gen; 5615 u64 right_gen; 5616 u8 left_type; 5617 u8 right_type; 5618 5619 path = alloc_path_for_send(); 5620 if (!path) 5621 return -ENOMEM; 5622 5623 eb = left_path->nodes[0]; 5624 slot = left_path->slots[0]; 5625 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 5626 left_type = btrfs_file_extent_type(eb, ei); 5627 5628 if (left_type != BTRFS_FILE_EXTENT_REG) { 5629 ret = 0; 5630 goto out; 5631 } 5632 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5633 left_len = btrfs_file_extent_num_bytes(eb, ei); 5634 left_offset = btrfs_file_extent_offset(eb, ei); 5635 left_gen = btrfs_file_extent_generation(eb, ei); 5636 5637 /* 5638 * Following comments will refer to these graphics. L is the left 5639 * extents which we are checking at the moment. 1-8 are the right 5640 * extents that we iterate. 5641 * 5642 * |-----L-----| 5643 * |-1-|-2a-|-3-|-4-|-5-|-6-| 5644 * 5645 * |-----L-----| 5646 * |--1--|-2b-|...(same as above) 5647 * 5648 * Alternative situation. Happens on files where extents got split. 5649 * |-----L-----| 5650 * |-----------7-----------|-6-| 5651 * 5652 * Alternative situation. Happens on files which got larger. 5653 * |-----L-----| 5654 * |-8-| 5655 * Nothing follows after 8. 5656 */ 5657 5658 key.objectid = ekey->objectid; 5659 key.type = BTRFS_EXTENT_DATA_KEY; 5660 key.offset = ekey->offset; 5661 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); 5662 if (ret < 0) 5663 goto out; 5664 if (ret) { 5665 ret = 0; 5666 goto out; 5667 } 5668 5669 /* 5670 * Handle special case where the right side has no extents at all. 5671 */ 5672 eb = path->nodes[0]; 5673 slot = path->slots[0]; 5674 btrfs_item_key_to_cpu(eb, &found_key, slot); 5675 if (found_key.objectid != key.objectid || 5676 found_key.type != key.type) { 5677 /* If we're a hole then just pretend nothing changed */ 5678 ret = (left_disknr) ? 0 : 1; 5679 goto out; 5680 } 5681 5682 /* 5683 * We're now on 2a, 2b or 7. 5684 */ 5685 key = found_key; 5686 while (key.offset < ekey->offset + left_len) { 5687 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 5688 right_type = btrfs_file_extent_type(eb, ei); 5689 if (right_type != BTRFS_FILE_EXTENT_REG && 5690 right_type != BTRFS_FILE_EXTENT_INLINE) { 5691 ret = 0; 5692 goto out; 5693 } 5694 5695 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 5696 right_len = btrfs_file_extent_ram_bytes(eb, ei); 5697 right_len = PAGE_ALIGN(right_len); 5698 } else { 5699 right_len = btrfs_file_extent_num_bytes(eb, ei); 5700 } 5701 5702 /* 5703 * Are we at extent 8? If yes, we know the extent is changed. 5704 * This may only happen on the first iteration. 5705 */ 5706 if (found_key.offset + right_len <= ekey->offset) { 5707 /* If we're a hole just pretend nothing changed */ 5708 ret = (left_disknr) ? 0 : 1; 5709 goto out; 5710 } 5711 5712 /* 5713 * We just wanted to see if when we have an inline extent, what 5714 * follows it is a regular extent (wanted to check the above 5715 * condition for inline extents too). This should normally not 5716 * happen but it's possible for example when we have an inline 5717 * compressed extent representing data with a size matching 5718 * the page size (currently the same as sector size). 5719 */ 5720 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 5721 ret = 0; 5722 goto out; 5723 } 5724 5725 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5726 right_offset = btrfs_file_extent_offset(eb, ei); 5727 right_gen = btrfs_file_extent_generation(eb, ei); 5728 5729 left_offset_fixed = left_offset; 5730 if (key.offset < ekey->offset) { 5731 /* Fix the right offset for 2a and 7. */ 5732 right_offset += ekey->offset - key.offset; 5733 } else { 5734 /* Fix the left offset for all behind 2a and 2b */ 5735 left_offset_fixed += key.offset - ekey->offset; 5736 } 5737 5738 /* 5739 * Check if we have the same extent. 5740 */ 5741 if (left_disknr != right_disknr || 5742 left_offset_fixed != right_offset || 5743 left_gen != right_gen) { 5744 ret = 0; 5745 goto out; 5746 } 5747 5748 /* 5749 * Go to the next extent. 5750 */ 5751 ret = btrfs_next_item(sctx->parent_root, path); 5752 if (ret < 0) 5753 goto out; 5754 if (!ret) { 5755 eb = path->nodes[0]; 5756 slot = path->slots[0]; 5757 btrfs_item_key_to_cpu(eb, &found_key, slot); 5758 } 5759 if (ret || found_key.objectid != key.objectid || 5760 found_key.type != key.type) { 5761 key.offset += right_len; 5762 break; 5763 } 5764 if (found_key.offset != key.offset + right_len) { 5765 ret = 0; 5766 goto out; 5767 } 5768 key = found_key; 5769 } 5770 5771 /* 5772 * We're now behind the left extent (treat as unchanged) or at the end 5773 * of the right side (treat as changed). 5774 */ 5775 if (key.offset >= ekey->offset + left_len) 5776 ret = 1; 5777 else 5778 ret = 0; 5779 5780 5781 out: 5782 btrfs_free_path(path); 5783 return ret; 5784 } 5785 5786 static int get_last_extent(struct send_ctx *sctx, u64 offset) 5787 { 5788 struct btrfs_path *path; 5789 struct btrfs_root *root = sctx->send_root; 5790 struct btrfs_key key; 5791 int ret; 5792 5793 path = alloc_path_for_send(); 5794 if (!path) 5795 return -ENOMEM; 5796 5797 sctx->cur_inode_last_extent = 0; 5798 5799 key.objectid = sctx->cur_ino; 5800 key.type = BTRFS_EXTENT_DATA_KEY; 5801 key.offset = offset; 5802 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1); 5803 if (ret < 0) 5804 goto out; 5805 ret = 0; 5806 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 5807 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY) 5808 goto out; 5809 5810 sctx->cur_inode_last_extent = btrfs_file_extent_end(path); 5811 out: 5812 btrfs_free_path(path); 5813 return ret; 5814 } 5815 5816 static int range_is_hole_in_parent(struct send_ctx *sctx, 5817 const u64 start, 5818 const u64 end) 5819 { 5820 struct btrfs_path *path; 5821 struct btrfs_key key; 5822 struct btrfs_root *root = sctx->parent_root; 5823 u64 search_start = start; 5824 int ret; 5825 5826 path = alloc_path_for_send(); 5827 if (!path) 5828 return -ENOMEM; 5829 5830 key.objectid = sctx->cur_ino; 5831 key.type = BTRFS_EXTENT_DATA_KEY; 5832 key.offset = search_start; 5833 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5834 if (ret < 0) 5835 goto out; 5836 if (ret > 0 && path->slots[0] > 0) 5837 path->slots[0]--; 5838 5839 while (search_start < end) { 5840 struct extent_buffer *leaf = path->nodes[0]; 5841 int slot = path->slots[0]; 5842 struct btrfs_file_extent_item *fi; 5843 u64 extent_end; 5844 5845 if (slot >= btrfs_header_nritems(leaf)) { 5846 ret = btrfs_next_leaf(root, path); 5847 if (ret < 0) 5848 goto out; 5849 else if (ret > 0) 5850 break; 5851 continue; 5852 } 5853 5854 btrfs_item_key_to_cpu(leaf, &key, slot); 5855 if (key.objectid < sctx->cur_ino || 5856 key.type < BTRFS_EXTENT_DATA_KEY) 5857 goto next; 5858 if (key.objectid > sctx->cur_ino || 5859 key.type > BTRFS_EXTENT_DATA_KEY || 5860 key.offset >= end) 5861 break; 5862 5863 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5864 extent_end = btrfs_file_extent_end(path); 5865 if (extent_end <= start) 5866 goto next; 5867 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) { 5868 search_start = extent_end; 5869 goto next; 5870 } 5871 ret = 0; 5872 goto out; 5873 next: 5874 path->slots[0]++; 5875 } 5876 ret = 1; 5877 out: 5878 btrfs_free_path(path); 5879 return ret; 5880 } 5881 5882 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, 5883 struct btrfs_key *key) 5884 { 5885 int ret = 0; 5886 5887 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx)) 5888 return 0; 5889 5890 if (sctx->cur_inode_last_extent == (u64)-1) { 5891 ret = get_last_extent(sctx, key->offset - 1); 5892 if (ret) 5893 return ret; 5894 } 5895 5896 if (path->slots[0] == 0 && 5897 sctx->cur_inode_last_extent < key->offset) { 5898 /* 5899 * We might have skipped entire leafs that contained only 5900 * file extent items for our current inode. These leafs have 5901 * a generation number smaller (older) than the one in the 5902 * current leaf and the leaf our last extent came from, and 5903 * are located between these 2 leafs. 5904 */ 5905 ret = get_last_extent(sctx, key->offset - 1); 5906 if (ret) <