cache-tree.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. #include "cache.h"
  2. #include "lockfile.h"
  3. #include "tree.h"
  4. #include "tree-walk.h"
  5. #include "cache-tree.h"
  6. #include "object-store.h"
  7. #include "replace-object.h"
  8. #include "promisor-remote.h"
  9. #ifndef DEBUG_CACHE_TREE
  10. #define DEBUG_CACHE_TREE 0
  11. #endif
  12. struct cache_tree *cache_tree(void)
  13. {
  14. struct cache_tree *it = xcalloc(1, sizeof(struct cache_tree));
  15. it->entry_count = -1;
  16. return it;
  17. }
  18. void cache_tree_free(struct cache_tree **it_p)
  19. {
  20. int i;
  21. struct cache_tree *it = *it_p;
  22. if (!it)
  23. return;
  24. for (i = 0; i < it->subtree_nr; i++)
  25. if (it->down[i]) {
  26. cache_tree_free(&it->down[i]->cache_tree);
  27. free(it->down[i]);
  28. }
  29. free(it->down);
  30. free(it);
  31. *it_p = NULL;
  32. }
  33. static int subtree_name_cmp(const char *one, int onelen,
  34. const char *two, int twolen)
  35. {
  36. if (onelen < twolen)
  37. return -1;
  38. if (twolen < onelen)
  39. return 1;
  40. return memcmp(one, two, onelen);
  41. }
  42. static int subtree_pos(struct cache_tree *it, const char *path, int pathlen)
  43. {
  44. struct cache_tree_sub **down = it->down;
  45. int lo, hi;
  46. lo = 0;
  47. hi = it->subtree_nr;
  48. while (lo < hi) {
  49. int mi = lo + (hi - lo) / 2;
  50. struct cache_tree_sub *mdl = down[mi];
  51. int cmp = subtree_name_cmp(path, pathlen,
  52. mdl->name, mdl->namelen);
  53. if (!cmp)
  54. return mi;
  55. if (cmp < 0)
  56. hi = mi;
  57. else
  58. lo = mi + 1;
  59. }
  60. return -lo-1;
  61. }
  62. static struct cache_tree_sub *find_subtree(struct cache_tree *it,
  63. const char *path,
  64. int pathlen,
  65. int create)
  66. {
  67. struct cache_tree_sub *down;
  68. int pos = subtree_pos(it, path, pathlen);
  69. if (0 <= pos)
  70. return it->down[pos];
  71. if (!create)
  72. return NULL;
  73. pos = -pos-1;
  74. ALLOC_GROW(it->down, it->subtree_nr + 1, it->subtree_alloc);
  75. it->subtree_nr++;
  76. FLEX_ALLOC_MEM(down, name, path, pathlen);
  77. down->cache_tree = NULL;
  78. down->namelen = pathlen;
  79. if (pos < it->subtree_nr)
  80. MOVE_ARRAY(it->down + pos + 1, it->down + pos,
  81. it->subtree_nr - pos - 1);
  82. it->down[pos] = down;
  83. return down;
  84. }
  85. struct cache_tree_sub *cache_tree_sub(struct cache_tree *it, const char *path)
  86. {
  87. int pathlen = strlen(path);
  88. return find_subtree(it, path, pathlen, 1);
  89. }
  90. static int do_invalidate_path(struct cache_tree *it, const char *path)
  91. {
  92. /* a/b/c
  93. * ==> invalidate self
  94. * ==> find "a", have it invalidate "b/c"
  95. * a
  96. * ==> invalidate self
  97. * ==> if "a" exists as a subtree, remove it.
  98. */
  99. const char *slash;
  100. int namelen;
  101. struct cache_tree_sub *down;
  102. #if DEBUG_CACHE_TREE
  103. fprintf(stderr, "cache-tree invalidate <%s>\n", path);
  104. #endif
  105. if (!it)
  106. return 0;
  107. slash = strchrnul(path, '/');
  108. namelen = slash - path;
  109. it->entry_count = -1;
  110. if (!*slash) {
  111. int pos;
  112. pos = subtree_pos(it, path, namelen);
  113. if (0 <= pos) {
  114. cache_tree_free(&it->down[pos]->cache_tree);
  115. free(it->down[pos]);
  116. /* 0 1 2 3 4 5
  117. * ^ ^subtree_nr = 6
  118. * pos
  119. * move 4 and 5 up one place (2 entries)
  120. * 2 = 6 - 3 - 1 = subtree_nr - pos - 1
  121. */
  122. MOVE_ARRAY(it->down + pos, it->down + pos + 1,
  123. it->subtree_nr - pos - 1);
  124. it->subtree_nr--;
  125. }
  126. return 1;
  127. }
  128. down = find_subtree(it, path, namelen, 0);
  129. if (down)
  130. do_invalidate_path(down->cache_tree, slash + 1);
  131. return 1;
  132. }
  133. void cache_tree_invalidate_path(struct index_state *istate, const char *path)
  134. {
  135. if (do_invalidate_path(istate->cache_tree, path))
  136. istate->cache_changed |= CACHE_TREE_CHANGED;
  137. }
  138. static int verify_cache(struct cache_entry **cache,
  139. int entries, int flags)
  140. {
  141. int i, funny;
  142. int silent = flags & WRITE_TREE_SILENT;
  143. /* Verify that the tree is merged */
  144. funny = 0;
  145. for (i = 0; i < entries; i++) {
  146. const struct cache_entry *ce = cache[i];
  147. if (ce_stage(ce)) {
  148. if (silent)
  149. return -1;
  150. if (10 < ++funny) {
  151. fprintf(stderr, "...\n");
  152. break;
  153. }
  154. fprintf(stderr, "%s: unmerged (%s)\n",
  155. ce->name, oid_to_hex(&ce->oid));
  156. }
  157. }
  158. if (funny)
  159. return -1;
  160. /* Also verify that the cache does not have path and path/file
  161. * at the same time. At this point we know the cache has only
  162. * stage 0 entries.
  163. */
  164. funny = 0;
  165. for (i = 0; i < entries - 1; i++) {
  166. /* path/file always comes after path because of the way
  167. * the cache is sorted. Also path can appear only once,
  168. * which means conflicting one would immediately follow.
  169. */
  170. const char *this_name = cache[i]->name;
  171. const char *next_name = cache[i+1]->name;
  172. int this_len = strlen(this_name);
  173. if (this_len < strlen(next_name) &&
  174. strncmp(this_name, next_name, this_len) == 0 &&
  175. next_name[this_len] == '/') {
  176. if (10 < ++funny) {
  177. fprintf(stderr, "...\n");
  178. break;
  179. }
  180. fprintf(stderr, "You have both %s and %s\n",
  181. this_name, next_name);
  182. }
  183. }
  184. if (funny)
  185. return -1;
  186. return 0;
  187. }
  188. static void discard_unused_subtrees(struct cache_tree *it)
  189. {
  190. struct cache_tree_sub **down = it->down;
  191. int nr = it->subtree_nr;
  192. int dst, src;
  193. for (dst = src = 0; src < nr; src++) {
  194. struct cache_tree_sub *s = down[src];
  195. if (s->used)
  196. down[dst++] = s;
  197. else {
  198. cache_tree_free(&s->cache_tree);
  199. free(s);
  200. it->subtree_nr--;
  201. }
  202. }
  203. }
  204. int cache_tree_fully_valid(struct cache_tree *it)
  205. {
  206. int i;
  207. if (!it)
  208. return 0;
  209. if (it->entry_count < 0 || !has_object_file(&it->oid))
  210. return 0;
  211. for (i = 0; i < it->subtree_nr; i++) {
  212. if (!cache_tree_fully_valid(it->down[i]->cache_tree))
  213. return 0;
  214. }
  215. return 1;
  216. }
  217. static int update_one(struct cache_tree *it,
  218. struct cache_entry **cache,
  219. int entries,
  220. const char *base,
  221. int baselen,
  222. int *skip_count,
  223. int flags)
  224. {
  225. struct strbuf buffer;
  226. int missing_ok = flags & WRITE_TREE_MISSING_OK;
  227. int dryrun = flags & WRITE_TREE_DRY_RUN;
  228. int repair = flags & WRITE_TREE_REPAIR;
  229. int to_invalidate = 0;
  230. int i;
  231. assert(!(dryrun && repair));
  232. *skip_count = 0;
  233. if (0 <= it->entry_count && has_object_file(&it->oid))
  234. return it->entry_count;
  235. /*
  236. * We first scan for subtrees and update them; we start by
  237. * marking existing subtrees -- the ones that are unmarked
  238. * should not be in the result.
  239. */
  240. for (i = 0; i < it->subtree_nr; i++)
  241. it->down[i]->used = 0;
  242. /*
  243. * Find the subtrees and update them.
  244. */
  245. i = 0;
  246. while (i < entries) {
  247. const struct cache_entry *ce = cache[i];
  248. struct cache_tree_sub *sub;
  249. const char *path, *slash;
  250. int pathlen, sublen, subcnt, subskip;
  251. path = ce->name;
  252. pathlen = ce_namelen(ce);
  253. if (pathlen <= baselen || memcmp(base, path, baselen))
  254. break; /* at the end of this level */
  255. slash = strchr(path + baselen, '/');
  256. if (!slash) {
  257. i++;
  258. continue;
  259. }
  260. /*
  261. * a/bbb/c (base = a/, slash = /c)
  262. * ==>
  263. * path+baselen = bbb/c, sublen = 3
  264. */
  265. sublen = slash - (path + baselen);
  266. sub = find_subtree(it, path + baselen, sublen, 1);
  267. if (!sub->cache_tree)
  268. sub->cache_tree = cache_tree();
  269. subcnt = update_one(sub->cache_tree,
  270. cache + i, entries - i,
  271. path,
  272. baselen + sublen + 1,
  273. &subskip,
  274. flags);
  275. if (subcnt < 0)
  276. return subcnt;
  277. if (!subcnt)
  278. die("index cache-tree records empty sub-tree");
  279. i += subcnt;
  280. sub->count = subcnt; /* to be used in the next loop */
  281. *skip_count += subskip;
  282. sub->used = 1;
  283. }
  284. discard_unused_subtrees(it);
  285. /*
  286. * Then write out the tree object for this level.
  287. */
  288. strbuf_init(&buffer, 8192);
  289. i = 0;
  290. while (i < entries) {
  291. const struct cache_entry *ce = cache[i];
  292. struct cache_tree_sub *sub = NULL;
  293. const char *path, *slash;
  294. int pathlen, entlen;
  295. const struct object_id *oid;
  296. unsigned mode;
  297. int expected_missing = 0;
  298. int contains_ita = 0;
  299. int ce_missing_ok;
  300. path = ce->name;
  301. pathlen = ce_namelen(ce);
  302. if (pathlen <= baselen || memcmp(base, path, baselen))
  303. break; /* at the end of this level */
  304. slash = strchr(path + baselen, '/');
  305. if (slash) {
  306. entlen = slash - (path + baselen);
  307. sub = find_subtree(it, path + baselen, entlen, 0);
  308. if (!sub)
  309. die("cache-tree.c: '%.*s' in '%s' not found",
  310. entlen, path + baselen, path);
  311. i += sub->count;
  312. oid = &sub->cache_tree->oid;
  313. mode = S_IFDIR;
  314. contains_ita = sub->cache_tree->entry_count < 0;
  315. if (contains_ita) {
  316. to_invalidate = 1;
  317. expected_missing = 1;
  318. }
  319. }
  320. else {
  321. oid = &ce->oid;
  322. mode = ce->ce_mode;
  323. entlen = pathlen - baselen;
  324. i++;
  325. }
  326. ce_missing_ok = mode == S_IFGITLINK || missing_ok ||
  327. (has_promisor_remote() &&
  328. ce_skip_worktree(ce));
  329. if (is_null_oid(oid) ||
  330. (!ce_missing_ok && !has_object_file(oid))) {
  331. strbuf_release(&buffer);
  332. if (expected_missing)
  333. return -1;
  334. return error("invalid object %06o %s for '%.*s'",
  335. mode, oid_to_hex(oid), entlen+baselen, path);
  336. }
  337. /*
  338. * CE_REMOVE entries are removed before the index is
  339. * written to disk. Skip them to remain consistent
  340. * with the future on-disk index.
  341. */
  342. if (ce->ce_flags & CE_REMOVE) {
  343. *skip_count = *skip_count + 1;
  344. continue;
  345. }
  346. /*
  347. * CE_INTENT_TO_ADD entries exist on on-disk index but
  348. * they are not part of generated trees. Invalidate up
  349. * to root to force cache-tree users to read elsewhere.
  350. */
  351. if (!sub && ce_intent_to_add(ce)) {
  352. to_invalidate = 1;
  353. continue;
  354. }
  355. /*
  356. * "sub" can be an empty tree if all subentries are i-t-a.
  357. */
  358. if (contains_ita && is_empty_tree_oid(oid))
  359. continue;
  360. strbuf_grow(&buffer, entlen + 100);
  361. strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
  362. strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
  363. #if DEBUG_CACHE_TREE
  364. fprintf(stderr, "cache-tree update-one %o %.*s\n",
  365. mode, entlen, path + baselen);
  366. #endif
  367. }
  368. if (repair) {
  369. struct object_id oid;
  370. hash_object_file(the_hash_algo, buffer.buf, buffer.len,
  371. tree_type, &oid);
  372. if (has_object_file_with_flags(&oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
  373. oidcpy(&it->oid, &oid);
  374. else
  375. to_invalidate = 1;
  376. } else if (dryrun) {
  377. hash_object_file(the_hash_algo, buffer.buf, buffer.len,
  378. tree_type, &it->oid);
  379. } else if (write_object_file(buffer.buf, buffer.len, tree_type,
  380. &it->oid)) {
  381. strbuf_release(&buffer);
  382. return -1;
  383. }
  384. strbuf_release(&buffer);
  385. it->entry_count = to_invalidate ? -1 : i - *skip_count;
  386. #if DEBUG_CACHE_TREE
  387. fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n",
  388. it->entry_count, it->subtree_nr,
  389. oid_to_hex(&it->oid));
  390. #endif
  391. return i;
  392. }
  393. int cache_tree_update(struct index_state *istate, int flags)
  394. {
  395. struct cache_tree *it = istate->cache_tree;
  396. struct cache_entry **cache = istate->cache;
  397. int entries = istate->cache_nr;
  398. int skip, i = verify_cache(cache, entries, flags);
  399. if (i)
  400. return i;
  401. trace_performance_enter();
  402. i = update_one(it, cache, entries, "", 0, &skip, flags);
  403. trace_performance_leave("cache_tree_update");
  404. if (i < 0)
  405. return i;
  406. istate->cache_changed |= CACHE_TREE_CHANGED;
  407. return 0;
  408. }
  409. static void write_one(struct strbuf *buffer, struct cache_tree *it,
  410. const char *path, int pathlen)
  411. {
  412. int i;
  413. /* One "cache-tree" entry consists of the following:
  414. * path (NUL terminated)
  415. * entry_count, subtree_nr ("%d %d\n")
  416. * tree-sha1 (missing if invalid)
  417. * subtree_nr "cache-tree" entries for subtrees.
  418. */
  419. strbuf_grow(buffer, pathlen + 100);
  420. strbuf_add(buffer, path, pathlen);
  421. strbuf_addf(buffer, "%c%d %d\n", 0, it->entry_count, it->subtree_nr);
  422. #if DEBUG_CACHE_TREE
  423. if (0 <= it->entry_count)
  424. fprintf(stderr, "cache-tree <%.*s> (%d ent, %d subtree) %s\n",
  425. pathlen, path, it->entry_count, it->subtree_nr,
  426. oid_to_hex(&it->oid));
  427. else
  428. fprintf(stderr, "cache-tree <%.*s> (%d subtree) invalid\n",
  429. pathlen, path, it->subtree_nr);
  430. #endif
  431. if (0 <= it->entry_count) {
  432. strbuf_add(buffer, it->oid.hash, the_hash_algo->rawsz);
  433. }
  434. for (i = 0; i < it->subtree_nr; i++) {
  435. struct cache_tree_sub *down = it->down[i];
  436. if (i) {
  437. struct cache_tree_sub *prev = it->down[i-1];
  438. if (subtree_name_cmp(down->name, down->namelen,
  439. prev->name, prev->namelen) <= 0)
  440. die("fatal - unsorted cache subtree");
  441. }
  442. write_one(buffer, down->cache_tree, down->name, down->namelen);
  443. }
  444. }
  445. void cache_tree_write(struct strbuf *sb, struct cache_tree *root)
  446. {
  447. write_one(sb, root, "", 0);
  448. }
  449. static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
  450. {
  451. const char *buf = *buffer;
  452. unsigned long size = *size_p;
  453. const char *cp;
  454. char *ep;
  455. struct cache_tree *it;
  456. int i, subtree_nr;
  457. const unsigned rawsz = the_hash_algo->rawsz;
  458. it = NULL;
  459. /* skip name, but make sure name exists */
  460. while (size && *buf) {
  461. size--;
  462. buf++;
  463. }
  464. if (!size)
  465. goto free_return;
  466. buf++; size--;
  467. it = cache_tree();
  468. cp = buf;
  469. it->entry_count = strtol(cp, &ep, 10);
  470. if (cp == ep)
  471. goto free_return;
  472. cp = ep;
  473. subtree_nr = strtol(cp, &ep, 10);
  474. if (cp == ep)
  475. goto free_return;
  476. while (size && *buf && *buf != '\n') {
  477. size--;
  478. buf++;
  479. }
  480. if (!size)
  481. goto free_return;
  482. buf++; size--;
  483. if (0 <= it->entry_count) {
  484. if (size < rawsz)
  485. goto free_return;
  486. oidread(&it->oid, (const unsigned char *)buf);
  487. buf += rawsz;
  488. size -= rawsz;
  489. }
  490. #if DEBUG_CACHE_TREE
  491. if (0 <= it->entry_count)
  492. fprintf(stderr, "cache-tree <%s> (%d ent, %d subtree) %s\n",
  493. *buffer, it->entry_count, subtree_nr,
  494. oid_to_hex(&it->oid));
  495. else
  496. fprintf(stderr, "cache-tree <%s> (%d subtrees) invalid\n",
  497. *buffer, subtree_nr);
  498. #endif
  499. /*
  500. * Just a heuristic -- we do not add directories that often but
  501. * we do not want to have to extend it immediately when we do,
  502. * hence +2.
  503. */
  504. it->subtree_alloc = subtree_nr + 2;
  505. it->down = xcalloc(it->subtree_alloc, sizeof(struct cache_tree_sub *));
  506. for (i = 0; i < subtree_nr; i++) {
  507. /* read each subtree */
  508. struct cache_tree *sub;
  509. struct cache_tree_sub *subtree;
  510. const char *name = buf;
  511. sub = read_one(&buf, &size);
  512. if (!sub)
  513. goto free_return;
  514. subtree = cache_tree_sub(it, name);
  515. subtree->cache_tree = sub;
  516. }
  517. if (subtree_nr != it->subtree_nr)
  518. die("cache-tree: internal error");
  519. *buffer = buf;
  520. *size_p = size;
  521. return it;
  522. free_return:
  523. cache_tree_free(&it);
  524. return NULL;
  525. }
  526. struct cache_tree *cache_tree_read(const char *buffer, unsigned long size)
  527. {
  528. if (buffer[0])
  529. return NULL; /* not the whole tree */
  530. return read_one(&buffer, &size);
  531. }
  532. static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
  533. {
  534. if (!it)
  535. return NULL;
  536. while (*path) {
  537. const char *slash;
  538. struct cache_tree_sub *sub;
  539. slash = strchrnul(path, '/');
  540. /*
  541. * Between path and slash is the name of the subtree
  542. * to look for.
  543. */
  544. sub = find_subtree(it, path, slash - path, 0);
  545. if (!sub)
  546. return NULL;
  547. it = sub->cache_tree;
  548. path = slash;
  549. while (*path == '/')
  550. path++;
  551. }
  552. return it;
  553. }
  554. static int write_index_as_tree_internal(struct object_id *oid,
  555. struct index_state *index_state,
  556. int cache_tree_valid,
  557. int flags,
  558. const char *prefix)
  559. {
  560. if (flags & WRITE_TREE_IGNORE_CACHE_TREE) {
  561. cache_tree_free(&index_state->cache_tree);
  562. cache_tree_valid = 0;
  563. }
  564. if (!index_state->cache_tree)
  565. index_state->cache_tree = cache_tree();
  566. if (!cache_tree_valid && cache_tree_update(index_state, flags) < 0)
  567. return WRITE_TREE_UNMERGED_INDEX;
  568. if (prefix) {
  569. struct cache_tree *subtree;
  570. subtree = cache_tree_find(index_state->cache_tree, prefix);
  571. if (!subtree)
  572. return WRITE_TREE_PREFIX_ERROR;
  573. oidcpy(oid, &subtree->oid);
  574. }
  575. else
  576. oidcpy(oid, &index_state->cache_tree->oid);
  577. return 0;
  578. }
  579. struct tree* write_in_core_index_as_tree(struct repository *repo) {
  580. struct object_id o;
  581. int was_valid, ret;
  582. struct index_state *index_state = repo->index;
  583. was_valid = index_state->cache_tree &&
  584. cache_tree_fully_valid(index_state->cache_tree);
  585. ret = write_index_as_tree_internal(&o, index_state, was_valid, 0, NULL);
  586. if (ret == WRITE_TREE_UNMERGED_INDEX) {
  587. int i;
  588. fprintf(stderr, "BUG: There are unmerged index entries:\n");
  589. for (i = 0; i < index_state->cache_nr; i++) {
  590. const struct cache_entry *ce = index_state->cache[i];
  591. if (ce_stage(ce))
  592. fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce),
  593. (int)ce_namelen(ce), ce->name);
  594. }
  595. BUG("unmerged index entries when writing inmemory index");
  596. }
  597. return lookup_tree(repo, &index_state->cache_tree->oid);
  598. }
  599. int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
  600. {
  601. int entries, was_valid;
  602. struct lock_file lock_file = LOCK_INIT;
  603. int ret;
  604. hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
  605. entries = read_index_from(index_state, index_path, get_git_dir());
  606. if (entries < 0) {
  607. ret = WRITE_TREE_UNREADABLE_INDEX;
  608. goto out;
  609. }
  610. was_valid = !(flags & WRITE_TREE_IGNORE_CACHE_TREE) &&
  611. index_state->cache_tree &&
  612. cache_tree_fully_valid(index_state->cache_tree);
  613. ret = write_index_as_tree_internal(oid, index_state, was_valid, flags,
  614. prefix);
  615. if (!ret && !was_valid) {
  616. write_locked_index(index_state, &lock_file, COMMIT_LOCK);
  617. /* Not being able to write is fine -- we are only interested
  618. * in updating the cache-tree part, and if the next caller
  619. * ends up using the old index with unupdated cache-tree part
  620. * it misses the work we did here, but that is just a
  621. * performance penalty and not a big deal.
  622. */
  623. }
  624. out:
  625. rollback_lock_file(&lock_file);
  626. return ret;
  627. }
  628. static void prime_cache_tree_rec(struct repository *r,
  629. struct cache_tree *it,
  630. struct tree *tree)
  631. {
  632. struct tree_desc desc;
  633. struct name_entry entry;
  634. int cnt;
  635. oidcpy(&it->oid, &tree->object.oid);
  636. init_tree_desc(&desc, tree->buffer, tree->size);
  637. cnt = 0;
  638. while (tree_entry(&desc, &entry)) {
  639. if (!S_ISDIR(entry.mode))
  640. cnt++;
  641. else {
  642. struct cache_tree_sub *sub;
  643. struct tree *subtree = lookup_tree(r, &entry.oid);
  644. if (!subtree->object.parsed)
  645. parse_tree(subtree);
  646. sub = cache_tree_sub(it, entry.path);
  647. sub->cache_tree = cache_tree();
  648. prime_cache_tree_rec(r, sub->cache_tree, subtree);
  649. cnt += sub->cache_tree->entry_count;
  650. }
  651. }
  652. it->entry_count = cnt;
  653. }
  654. void prime_cache_tree(struct repository *r,
  655. struct index_state *istate,
  656. struct tree *tree)
  657. {
  658. cache_tree_free(&istate->cache_tree);
  659. istate->cache_tree = cache_tree();
  660. prime_cache_tree_rec(r, istate->cache_tree, tree);
  661. istate->cache_changed |= CACHE_TREE_CHANGED;
  662. }
  663. /*
  664. * find the cache_tree that corresponds to the current level without
  665. * exploding the full path into textual form. The root of the
  666. * cache tree is given as "root", and our current level is "info".
  667. * (1) When at root level, info->prev is NULL, so it is "root" itself.
  668. * (2) Otherwise, find the cache_tree that corresponds to one level
  669. * above us, and find ourselves in there.
  670. */
  671. static struct cache_tree *find_cache_tree_from_traversal(struct cache_tree *root,
  672. struct traverse_info *info)
  673. {
  674. struct cache_tree *our_parent;
  675. if (!info->prev)
  676. return root;
  677. our_parent = find_cache_tree_from_traversal(root, info->prev);
  678. return cache_tree_find(our_parent, info->name);
  679. }
  680. int cache_tree_matches_traversal(struct cache_tree *root,
  681. struct name_entry *ent,
  682. struct traverse_info *info)
  683. {
  684. struct cache_tree *it;
  685. it = find_cache_tree_from_traversal(root, info);
  686. it = cache_tree_find(it, ent->path);
  687. if (it && it->entry_count > 0 && oideq(&ent->oid, &it->oid))
  688. return it->entry_count;
  689. return 0;
  690. }
  691. static void verify_one(struct repository *r,
  692. struct index_state *istate,
  693. struct cache_tree *it,
  694. struct strbuf *path)
  695. {
  696. int i, pos, len = path->len;
  697. struct strbuf tree_buf = STRBUF_INIT;
  698. struct object_id new_oid;
  699. for (i = 0; i < it->subtree_nr; i++) {
  700. strbuf_addf(path, "%s/", it->down[i]->name);
  701. verify_one(r, istate, it->down[i]->cache_tree, path);
  702. strbuf_setlen(path, len);
  703. }
  704. if (it->entry_count < 0 ||
  705. /* no verification on tests (t7003) that replace trees */
  706. lookup_replace_object(r, &it->oid) != &it->oid)
  707. return;
  708. if (path->len) {
  709. pos = index_name_pos(istate, path->buf, path->len);
  710. pos = -pos - 1;
  711. } else {
  712. pos = 0;
  713. }
  714. i = 0;
  715. while (i < it->entry_count) {
  716. struct cache_entry *ce = istate->cache[pos + i];
  717. const char *slash;
  718. struct cache_tree_sub *sub = NULL;
  719. const struct object_id *oid;
  720. const char *name;
  721. unsigned mode;
  722. int entlen;
  723. if (ce->ce_flags & (CE_STAGEMASK | CE_INTENT_TO_ADD | CE_REMOVE))
  724. BUG("%s with flags 0x%x should not be in cache-tree",
  725. ce->name, ce->ce_flags);
  726. name = ce->name + path->len;
  727. slash = strchr(name, '/');
  728. if (slash) {
  729. entlen = slash - name;
  730. sub = find_subtree(it, ce->name + path->len, entlen, 0);
  731. if (!sub || sub->cache_tree->entry_count < 0)
  732. BUG("bad subtree '%.*s'", entlen, name);
  733. oid = &sub->cache_tree->oid;
  734. mode = S_IFDIR;
  735. i += sub->cache_tree->entry_count;
  736. } else {
  737. oid = &ce->oid;
  738. mode = ce->ce_mode;
  739. entlen = ce_namelen(ce) - path->len;
  740. i++;
  741. }
  742. strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0');
  743. strbuf_add(&tree_buf, oid->hash, r->hash_algo->rawsz);
  744. }
  745. hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, tree_type,
  746. &new_oid);
  747. if (!oideq(&new_oid, &it->oid))
  748. BUG("cache-tree for path %.*s does not match. "
  749. "Expected %s got %s", len, path->buf,
  750. oid_to_hex(&new_oid), oid_to_hex(&it->oid));
  751. strbuf_setlen(path, len);
  752. strbuf_release(&tree_buf);
  753. }
  754. void cache_tree_verify(struct repository *r, struct index_state *istate)
  755. {
  756. struct strbuf path = STRBUF_INIT;
  757. if (!istate->cache_tree)
  758. return;
  759. verify_one(r, istate, istate->cache_tree, &path);
  760. strbuf_release(&path);
  761. }