shallow.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. #include "cache.h"
  2. #include "repository.h"
  3. #include "tempfile.h"
  4. #include "lockfile.h"
  5. #include "object-store.h"
  6. #include "commit.h"
  7. #include "tag.h"
  8. #include "pkt-line.h"
  9. #include "remote.h"
  10. #include "refs.h"
  11. #include "oid-array.h"
  12. #include "diff.h"
  13. #include "revision.h"
  14. #include "commit-slab.h"
  15. #include "list-objects.h"
  16. #include "commit-reach.h"
  17. #include "shallow.h"
  18. void set_alternate_shallow_file(struct repository *r, const char *path, int override)
  19. {
  20. if (r->parsed_objects->is_shallow != -1)
  21. BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
  22. if (r->parsed_objects->alternate_shallow_file && !override)
  23. return;
  24. free(r->parsed_objects->alternate_shallow_file);
  25. r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path);
  26. }
  27. int register_shallow(struct repository *r, const struct object_id *oid)
  28. {
  29. struct commit_graft *graft =
  30. xmalloc(sizeof(struct commit_graft));
  31. struct commit *commit = lookup_commit(the_repository, oid);
  32. oidcpy(&graft->oid, oid);
  33. graft->nr_parent = -1;
  34. if (commit && commit->object.parsed)
  35. commit->parents = NULL;
  36. return register_commit_graft(r, graft, 0);
  37. }
  38. int unregister_shallow(const struct object_id *oid)
  39. {
  40. int pos = commit_graft_pos(the_repository, oid->hash);
  41. if (pos < 0)
  42. return -1;
  43. if (pos + 1 < the_repository->parsed_objects->grafts_nr)
  44. MOVE_ARRAY(the_repository->parsed_objects->grafts + pos,
  45. the_repository->parsed_objects->grafts + pos + 1,
  46. the_repository->parsed_objects->grafts_nr - pos - 1);
  47. the_repository->parsed_objects->grafts_nr--;
  48. return 0;
  49. }
  50. int is_repository_shallow(struct repository *r)
  51. {
  52. FILE *fp;
  53. char buf[1024];
  54. const char *path = r->parsed_objects->alternate_shallow_file;
  55. if (r->parsed_objects->is_shallow >= 0)
  56. return r->parsed_objects->is_shallow;
  57. if (!path)
  58. path = git_path_shallow(r);
  59. /*
  60. * fetch-pack sets '--shallow-file ""' as an indicator that no
  61. * shallow file should be used. We could just open it and it
  62. * will likely fail. But let's do an explicit check instead.
  63. */
  64. if (!*path || (fp = fopen(path, "r")) == NULL) {
  65. stat_validity_clear(r->parsed_objects->shallow_stat);
  66. r->parsed_objects->is_shallow = 0;
  67. return r->parsed_objects->is_shallow;
  68. }
  69. stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp));
  70. r->parsed_objects->is_shallow = 1;
  71. while (fgets(buf, sizeof(buf), fp)) {
  72. struct object_id oid;
  73. if (get_oid_hex(buf, &oid))
  74. die("bad shallow line: %s", buf);
  75. register_shallow(r, &oid);
  76. }
  77. fclose(fp);
  78. return r->parsed_objects->is_shallow;
  79. }
  80. static void reset_repository_shallow(struct repository *r)
  81. {
  82. r->parsed_objects->is_shallow = -1;
  83. stat_validity_clear(r->parsed_objects->shallow_stat);
  84. }
  85. int commit_shallow_file(struct repository *r, struct shallow_lock *lk)
  86. {
  87. int res = commit_lock_file(&lk->lock);
  88. reset_repository_shallow(r);
  89. return res;
  90. }
  91. void rollback_shallow_file(struct repository *r, struct shallow_lock *lk)
  92. {
  93. rollback_lock_file(&lk->lock);
  94. reset_repository_shallow(r);
  95. }
  96. /*
  97. * TODO: use "int" elemtype instead of "int *" when/if commit-slab
  98. * supports a "valid" flag.
  99. */
  100. define_commit_slab(commit_depth, int *);
  101. static void free_depth_in_slab(int **ptr)
  102. {
  103. FREE_AND_NULL(*ptr);
  104. }
  105. struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
  106. int shallow_flag, int not_shallow_flag)
  107. {
  108. int i = 0, cur_depth = 0;
  109. struct commit_list *result = NULL;
  110. struct object_array stack = OBJECT_ARRAY_INIT;
  111. struct commit *commit = NULL;
  112. struct commit_graft *graft;
  113. struct commit_depth depths;
  114. init_commit_depth(&depths);
  115. while (commit || i < heads->nr || stack.nr) {
  116. struct commit_list *p;
  117. if (!commit) {
  118. if (i < heads->nr) {
  119. int **depth_slot;
  120. commit = (struct commit *)
  121. deref_tag(the_repository,
  122. heads->objects[i++].item,
  123. NULL, 0);
  124. if (!commit || commit->object.type != OBJ_COMMIT) {
  125. commit = NULL;
  126. continue;
  127. }
  128. depth_slot = commit_depth_at(&depths, commit);
  129. if (!*depth_slot)
  130. *depth_slot = xmalloc(sizeof(int));
  131. **depth_slot = 0;
  132. cur_depth = 0;
  133. } else {
  134. commit = (struct commit *)
  135. object_array_pop(&stack);
  136. cur_depth = **commit_depth_at(&depths, commit);
  137. }
  138. }
  139. parse_commit_or_die(commit);
  140. cur_depth++;
  141. if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
  142. (is_repository_shallow(the_repository) && !commit->parents &&
  143. (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL &&
  144. graft->nr_parent < 0)) {
  145. commit_list_insert(commit, &result);
  146. commit->object.flags |= shallow_flag;
  147. commit = NULL;
  148. continue;
  149. }
  150. commit->object.flags |= not_shallow_flag;
  151. for (p = commit->parents, commit = NULL; p; p = p->next) {
  152. int **depth_slot = commit_depth_at(&depths, p->item);
  153. if (!*depth_slot) {
  154. *depth_slot = xmalloc(sizeof(int));
  155. **depth_slot = cur_depth;
  156. } else {
  157. if (cur_depth >= **depth_slot)
  158. continue;
  159. **depth_slot = cur_depth;
  160. }
  161. if (p->next)
  162. add_object_array(&p->item->object,
  163. NULL, &stack);
  164. else {
  165. commit = p->item;
  166. cur_depth = **commit_depth_at(&depths, commit);
  167. }
  168. }
  169. }
  170. deep_clear_commit_depth(&depths, free_depth_in_slab);
  171. return result;
  172. }
  173. static void show_commit(struct commit *commit, void *data)
  174. {
  175. commit_list_insert(commit, data);
  176. }
  177. /*
  178. * Given rev-list arguments, run rev-list. All reachable commits
  179. * except border ones are marked with not_shallow_flag. Border commits
  180. * are marked with shallow_flag. The list of border/shallow commits
  181. * are also returned.
  182. */
  183. struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
  184. int shallow_flag,
  185. int not_shallow_flag)
  186. {
  187. struct commit_list *result = NULL, *p;
  188. struct commit_list *not_shallow_list = NULL;
  189. struct rev_info revs;
  190. int both_flags = shallow_flag | not_shallow_flag;
  191. /*
  192. * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
  193. * set at this point. But better be safe than sorry.
  194. */
  195. clear_object_flags(both_flags);
  196. is_repository_shallow(the_repository); /* make sure shallows are read */
  197. repo_init_revisions(the_repository, &revs, NULL);
  198. save_commit_buffer = 0;
  199. setup_revisions(ac, av, &revs, NULL);
  200. if (prepare_revision_walk(&revs))
  201. die("revision walk setup failed");
  202. traverse_commit_list(&revs, show_commit, NULL, &not_shallow_list);
  203. if (!not_shallow_list)
  204. die("no commits selected for shallow requests");
  205. /* Mark all reachable commits as NOT_SHALLOW */
  206. for (p = not_shallow_list; p; p = p->next)
  207. p->item->object.flags |= not_shallow_flag;
  208. /*
  209. * mark border commits SHALLOW + NOT_SHALLOW.
  210. * We cannot clear NOT_SHALLOW right now. Imagine border
  211. * commit A is processed first, then commit B, whose parent is
  212. * A, later. If NOT_SHALLOW on A is cleared at step 1, B
  213. * itself is considered border at step 2, which is incorrect.
  214. */
  215. for (p = not_shallow_list; p; p = p->next) {
  216. struct commit *c = p->item;
  217. struct commit_list *parent;
  218. if (parse_commit(c))
  219. die("unable to parse commit %s",
  220. oid_to_hex(&c->object.oid));
  221. for (parent = c->parents; parent; parent = parent->next)
  222. if (!(parent->item->object.flags & not_shallow_flag)) {
  223. c->object.flags |= shallow_flag;
  224. commit_list_insert(c, &result);
  225. break;
  226. }
  227. }
  228. free_commit_list(not_shallow_list);
  229. /*
  230. * Now we can clean up NOT_SHALLOW on border commits. Having
  231. * both flags set can confuse the caller.
  232. */
  233. for (p = result; p; p = p->next) {
  234. struct object *o = &p->item->object;
  235. if ((o->flags & both_flags) == both_flags)
  236. o->flags &= ~not_shallow_flag;
  237. }
  238. return result;
  239. }
  240. static void check_shallow_file_for_update(struct repository *r)
  241. {
  242. if (r->parsed_objects->is_shallow == -1)
  243. BUG("shallow must be initialized by now");
  244. if (!stat_validity_check(r->parsed_objects->shallow_stat,
  245. git_path_shallow(r)))
  246. die("shallow file has changed since we read it");
  247. }
  248. #define SEEN_ONLY 1
  249. #define VERBOSE 2
  250. #define QUICK 4
  251. struct write_shallow_data {
  252. struct strbuf *out;
  253. int use_pack_protocol;
  254. int count;
  255. unsigned flags;
  256. };
  257. static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
  258. {
  259. struct write_shallow_data *data = cb_data;
  260. const char *hex = oid_to_hex(&graft->oid);
  261. if (graft->nr_parent != -1)
  262. return 0;
  263. if (data->flags & QUICK) {
  264. if (!has_object_file(&graft->oid))
  265. return 0;
  266. } else if (data->flags & SEEN_ONLY) {
  267. struct commit *c = lookup_commit(the_repository, &graft->oid);
  268. if (!c || !(c->object.flags & SEEN)) {
  269. if (data->flags & VERBOSE)
  270. printf("Removing %s from .git/shallow\n",
  271. oid_to_hex(&c->object.oid));
  272. return 0;
  273. }
  274. }
  275. data->count++;
  276. if (data->use_pack_protocol)
  277. packet_buf_write(data->out, "shallow %s", hex);
  278. else {
  279. strbuf_addstr(data->out, hex);
  280. strbuf_addch(data->out, '\n');
  281. }
  282. return 0;
  283. }
  284. static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
  285. const struct oid_array *extra,
  286. unsigned flags)
  287. {
  288. struct write_shallow_data data;
  289. int i;
  290. data.out = out;
  291. data.use_pack_protocol = use_pack_protocol;
  292. data.count = 0;
  293. data.flags = flags;
  294. for_each_commit_graft(write_one_shallow, &data);
  295. if (!extra)
  296. return data.count;
  297. for (i = 0; i < extra->nr; i++) {
  298. strbuf_addstr(out, oid_to_hex(extra->oid + i));
  299. strbuf_addch(out, '\n');
  300. data.count++;
  301. }
  302. return data.count;
  303. }
  304. int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
  305. const struct oid_array *extra)
  306. {
  307. return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
  308. }
  309. const char *setup_temporary_shallow(const struct oid_array *extra)
  310. {
  311. struct tempfile *temp;
  312. struct strbuf sb = STRBUF_INIT;
  313. if (write_shallow_commits(&sb, 0, extra)) {
  314. temp = xmks_tempfile(git_path("shallow_XXXXXX"));
  315. if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
  316. close_tempfile_gently(temp) < 0)
  317. die_errno("failed to write to %s",
  318. get_tempfile_path(temp));
  319. strbuf_release(&sb);
  320. return get_tempfile_path(temp);
  321. }
  322. /*
  323. * is_repository_shallow() sees empty string as "no shallow
  324. * file".
  325. */
  326. return "";
  327. }
  328. void setup_alternate_shallow(struct shallow_lock *shallow_lock,
  329. const char **alternate_shallow_file,
  330. const struct oid_array *extra)
  331. {
  332. struct strbuf sb = STRBUF_INIT;
  333. int fd;
  334. fd = hold_lock_file_for_update(&shallow_lock->lock,
  335. git_path_shallow(the_repository),
  336. LOCK_DIE_ON_ERROR);
  337. check_shallow_file_for_update(the_repository);
  338. if (write_shallow_commits(&sb, 0, extra)) {
  339. if (write_in_full(fd, sb.buf, sb.len) < 0)
  340. die_errno("failed to write to %s",
  341. get_lock_file_path(&shallow_lock->lock));
  342. *alternate_shallow_file = get_lock_file_path(&shallow_lock->lock);
  343. } else
  344. /*
  345. * is_repository_shallow() sees empty string as "no
  346. * shallow file".
  347. */
  348. *alternate_shallow_file = "";
  349. strbuf_release(&sb);
  350. }
  351. static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
  352. {
  353. int fd = *(int *)cb;
  354. if (graft->nr_parent == -1)
  355. packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
  356. return 0;
  357. }
  358. void advertise_shallow_grafts(int fd)
  359. {
  360. if (!is_repository_shallow(the_repository))
  361. return;
  362. for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
  363. }
  364. /*
  365. * mark_reachable_objects() should have been run prior to this and all
  366. * reachable commits marked as "SEEN", except when quick_prune is non-zero,
  367. * in which case lines are excised from the shallow file if they refer to
  368. * commits that do not exist (any longer).
  369. */
  370. void prune_shallow(unsigned options)
  371. {
  372. struct shallow_lock shallow_lock = SHALLOW_LOCK_INIT;
  373. struct strbuf sb = STRBUF_INIT;
  374. unsigned flags = SEEN_ONLY;
  375. int fd;
  376. if (options & PRUNE_QUICK)
  377. flags |= QUICK;
  378. if (options & PRUNE_SHOW_ONLY) {
  379. flags |= VERBOSE;
  380. write_shallow_commits_1(&sb, 0, NULL, flags);
  381. strbuf_release(&sb);
  382. return;
  383. }
  384. fd = hold_lock_file_for_update(&shallow_lock.lock,
  385. git_path_shallow(the_repository),
  386. LOCK_DIE_ON_ERROR);
  387. check_shallow_file_for_update(the_repository);
  388. if (write_shallow_commits_1(&sb, 0, NULL, flags)) {
  389. if (write_in_full(fd, sb.buf, sb.len) < 0)
  390. die_errno("failed to write to %s",
  391. get_lock_file_path(&shallow_lock.lock));
  392. commit_shallow_file(the_repository, &shallow_lock);
  393. } else {
  394. unlink(git_path_shallow(the_repository));
  395. rollback_shallow_file(the_repository, &shallow_lock);
  396. }
  397. strbuf_release(&sb);
  398. }
  399. struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
  400. /*
  401. * Step 1, split sender shallow commits into "ours" and "theirs"
  402. * Step 2, clean "ours" based on .git/shallow
  403. */
  404. void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
  405. {
  406. int i;
  407. trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
  408. memset(info, 0, sizeof(*info));
  409. info->shallow = sa;
  410. if (!sa)
  411. return;
  412. ALLOC_ARRAY(info->ours, sa->nr);
  413. ALLOC_ARRAY(info->theirs, sa->nr);
  414. for (i = 0; i < sa->nr; i++) {
  415. if (has_object_file(sa->oid + i)) {
  416. struct commit_graft *graft;
  417. graft = lookup_commit_graft(the_repository,
  418. &sa->oid[i]);
  419. if (graft && graft->nr_parent < 0)
  420. continue;
  421. info->ours[info->nr_ours++] = i;
  422. } else
  423. info->theirs[info->nr_theirs++] = i;
  424. }
  425. }
  426. void clear_shallow_info(struct shallow_info *info)
  427. {
  428. free(info->ours);
  429. free(info->theirs);
  430. }
  431. /* Step 4, remove non-existent ones in "theirs" after getting the pack */
  432. void remove_nonexistent_theirs_shallow(struct shallow_info *info)
  433. {
  434. struct object_id *oid = info->shallow->oid;
  435. int i, dst;
  436. trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
  437. for (i = dst = 0; i < info->nr_theirs; i++) {
  438. if (i != dst)
  439. info->theirs[dst] = info->theirs[i];
  440. if (has_object_file(oid + info->theirs[i]))
  441. dst++;
  442. }
  443. info->nr_theirs = dst;
  444. }
  445. define_commit_slab(ref_bitmap, uint32_t *);
  446. #define POOL_SIZE (512 * 1024)
  447. struct paint_info {
  448. struct ref_bitmap ref_bitmap;
  449. unsigned nr_bits;
  450. char **pools;
  451. char *free, *end;
  452. unsigned pool_count;
  453. };
  454. static uint32_t *paint_alloc(struct paint_info *info)
  455. {
  456. unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
  457. unsigned size = nr * sizeof(uint32_t);
  458. void *p;
  459. if (!info->pool_count || size > info->end - info->free) {
  460. if (size > POOL_SIZE)
  461. BUG("pool size too small for %d in paint_alloc()",
  462. size);
  463. info->pool_count++;
  464. REALLOC_ARRAY(info->pools, info->pool_count);
  465. info->free = xmalloc(POOL_SIZE);
  466. info->pools[info->pool_count - 1] = info->free;
  467. info->end = info->free + POOL_SIZE;
  468. }
  469. p = info->free;
  470. info->free += size;
  471. return p;
  472. }
  473. /*
  474. * Given a commit SHA-1, walk down to parents until either SEEN,
  475. * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
  476. * all walked commits.
  477. */
  478. static void paint_down(struct paint_info *info, const struct object_id *oid,
  479. unsigned int id)
  480. {
  481. unsigned int i, nr;
  482. struct commit_list *head = NULL;
  483. int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
  484. size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
  485. struct commit *c = lookup_commit_reference_gently(the_repository, oid,
  486. 1);
  487. uint32_t *tmp; /* to be freed before return */
  488. uint32_t *bitmap;
  489. if (!c)
  490. return;
  491. tmp = xmalloc(bitmap_size);
  492. bitmap = paint_alloc(info);
  493. memset(bitmap, 0, bitmap_size);
  494. bitmap[id / 32] |= (1U << (id % 32));
  495. commit_list_insert(c, &head);
  496. while (head) {
  497. struct commit_list *p;
  498. struct commit *c = pop_commit(&head);
  499. uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
  500. /* XXX check "UNINTERESTING" from pack bitmaps if available */
  501. if (c->object.flags & (SEEN | UNINTERESTING))
  502. continue;
  503. else
  504. c->object.flags |= SEEN;
  505. if (*refs == NULL)
  506. *refs = bitmap;
  507. else {
  508. memcpy(tmp, *refs, bitmap_size);
  509. for (i = 0; i < bitmap_nr; i++)
  510. tmp[i] |= bitmap[i];
  511. if (memcmp(tmp, *refs, bitmap_size)) {
  512. *refs = paint_alloc(info);
  513. memcpy(*refs, tmp, bitmap_size);
  514. }
  515. }
  516. if (c->object.flags & BOTTOM)
  517. continue;
  518. if (parse_commit(c))
  519. die("unable to parse commit %s",
  520. oid_to_hex(&c->object.oid));
  521. for (p = c->parents; p; p = p->next) {
  522. if (p->item->object.flags & SEEN)
  523. continue;
  524. commit_list_insert(p->item, &head);
  525. }
  526. }
  527. nr = get_max_object_index();
  528. for (i = 0; i < nr; i++) {
  529. struct object *o = get_indexed_object(i);
  530. if (o && o->type == OBJ_COMMIT)
  531. o->flags &= ~SEEN;
  532. }
  533. free(tmp);
  534. }
  535. static int mark_uninteresting(const char *refname, const struct object_id *oid,
  536. int flags, void *cb_data)
  537. {
  538. struct commit *commit = lookup_commit_reference_gently(the_repository,
  539. oid, 1);
  540. if (!commit)
  541. return 0;
  542. commit->object.flags |= UNINTERESTING;
  543. mark_parents_uninteresting(commit);
  544. return 0;
  545. }
  546. static void post_assign_shallow(struct shallow_info *info,
  547. struct ref_bitmap *ref_bitmap,
  548. int *ref_status);
  549. /*
  550. * Step 6(+7), associate shallow commits with new refs
  551. *
  552. * info->ref must be initialized before calling this function.
  553. *
  554. * If used is not NULL, it's an array of info->shallow->nr
  555. * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
  556. * m-th shallow commit from info->shallow.
  557. *
  558. * If used is NULL, "ours" and "theirs" are updated. And if ref_status
  559. * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
  560. * the ref needs some shallow commits from either info->ours or
  561. * info->theirs.
  562. */
  563. void assign_shallow_commits_to_refs(struct shallow_info *info,
  564. uint32_t **used, int *ref_status)
  565. {
  566. struct object_id *oid = info->shallow->oid;
  567. struct oid_array *ref = info->ref;
  568. unsigned int i, nr;
  569. int *shallow, nr_shallow = 0;
  570. struct paint_info pi;
  571. trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
  572. ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
  573. for (i = 0; i < info->nr_ours; i++)
  574. shallow[nr_shallow++] = info->ours[i];
  575. for (i = 0; i < info->nr_theirs; i++)
  576. shallow[nr_shallow++] = info->theirs[i];
  577. /*
  578. * Prepare the commit graph to track what refs can reach what
  579. * (new) shallow commits.
  580. */
  581. nr = get_max_object_index();
  582. for (i = 0; i < nr; i++) {
  583. struct object *o = get_indexed_object(i);
  584. if (!o || o->type != OBJ_COMMIT)
  585. continue;
  586. o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
  587. }
  588. memset(&pi, 0, sizeof(pi));
  589. init_ref_bitmap(&pi.ref_bitmap);
  590. pi.nr_bits = ref->nr;
  591. /*
  592. * "--not --all" to cut short the traversal if new refs
  593. * connect to old refs. If not (e.g. force ref updates) it'll
  594. * have to go down to the current shallow commits.
  595. */
  596. head_ref(mark_uninteresting, NULL);
  597. for_each_ref(mark_uninteresting, NULL);
  598. /* Mark potential bottoms so we won't go out of bound */
  599. for (i = 0; i < nr_shallow; i++) {
  600. struct commit *c = lookup_commit(the_repository,
  601. &oid[shallow[i]]);
  602. c->object.flags |= BOTTOM;
  603. }
  604. for (i = 0; i < ref->nr; i++)
  605. paint_down(&pi, ref->oid + i, i);
  606. if (used) {
  607. int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
  608. memset(used, 0, sizeof(*used) * info->shallow->nr);
  609. for (i = 0; i < nr_shallow; i++) {
  610. const struct commit *c = lookup_commit(the_repository,
  611. &oid[shallow[i]]);
  612. uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
  613. if (*map)
  614. used[shallow[i]] = xmemdupz(*map, bitmap_size);
  615. }
  616. /*
  617. * unreachable shallow commits are not removed from
  618. * "ours" and "theirs". The user is supposed to run
  619. * step 7 on every ref separately and not trust "ours"
  620. * and "theirs" any more.
  621. */
  622. } else
  623. post_assign_shallow(info, &pi.ref_bitmap, ref_status);
  624. clear_ref_bitmap(&pi.ref_bitmap);
  625. for (i = 0; i < pi.pool_count; i++)
  626. free(pi.pools[i]);
  627. free(pi.pools);
  628. free(shallow);
  629. }
  630. struct commit_array {
  631. struct commit **commits;
  632. int nr, alloc;
  633. };
  634. static int add_ref(const char *refname, const struct object_id *oid,
  635. int flags, void *cb_data)
  636. {
  637. struct commit_array *ca = cb_data;
  638. ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
  639. ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository,
  640. oid, 1);
  641. if (ca->commits[ca->nr])
  642. ca->nr++;
  643. return 0;
  644. }
  645. static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
  646. {
  647. unsigned int i;
  648. if (!ref_status)
  649. return;
  650. for (i = 0; i < nr; i++)
  651. if (bitmap[i / 32] & (1U << (i % 32)))
  652. ref_status[i]++;
  653. }
  654. /*
  655. * Step 7, reachability test on "ours" at commit level
  656. */
  657. static void post_assign_shallow(struct shallow_info *info,
  658. struct ref_bitmap *ref_bitmap,
  659. int *ref_status)
  660. {
  661. struct object_id *oid = info->shallow->oid;
  662. struct commit *c;
  663. uint32_t **bitmap;
  664. int dst, i, j;
  665. int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
  666. struct commit_array ca;
  667. trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
  668. if (ref_status)
  669. memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
  670. /* Remove unreachable shallow commits from "theirs" */
  671. for (i = dst = 0; i < info->nr_theirs; i++) {
  672. if (i != dst)
  673. info->theirs[dst] = info->theirs[i];
  674. c = lookup_commit(the_repository, &oid[info->theirs[i]]);
  675. bitmap = ref_bitmap_at(ref_bitmap, c);
  676. if (!*bitmap)
  677. continue;
  678. for (j = 0; j < bitmap_nr; j++)
  679. if (bitmap[0][j]) {
  680. update_refstatus(ref_status, info->ref->nr, *bitmap);
  681. dst++;
  682. break;
  683. }
  684. }
  685. info->nr_theirs = dst;
  686. memset(&ca, 0, sizeof(ca));
  687. head_ref(add_ref, &ca);
  688. for_each_ref(add_ref, &ca);
  689. /* Remove unreachable shallow commits from "ours" */
  690. for (i = dst = 0; i < info->nr_ours; i++) {
  691. if (i != dst)
  692. info->ours[dst] = info->ours[i];
  693. c = lookup_commit(the_repository, &oid[info->ours[i]]);
  694. bitmap = ref_bitmap_at(ref_bitmap, c);
  695. if (!*bitmap)
  696. continue;
  697. for (j = 0; j < bitmap_nr; j++)
  698. if (bitmap[0][j] &&
  699. /* Step 7, reachability test at commit level */
  700. !in_merge_bases_many(c, ca.nr, ca.commits)) {
  701. update_refstatus(ref_status, info->ref->nr, *bitmap);
  702. dst++;
  703. break;
  704. }
  705. }
  706. info->nr_ours = dst;
  707. free(ca.commits);
  708. }
  709. /* (Delayed) step 7, reachability test at commit level */
  710. int delayed_reachability_test(struct shallow_info *si, int c)
  711. {
  712. if (si->need_reachability_test[c]) {
  713. struct commit *commit = lookup_commit(the_repository,
  714. &si->shallow->oid[c]);
  715. if (!si->commits) {
  716. struct commit_array ca;
  717. memset(&ca, 0, sizeof(ca));
  718. head_ref(add_ref, &ca);
  719. for_each_ref(add_ref, &ca);
  720. si->commits = ca.commits;
  721. si->nr_commits = ca.nr;
  722. }
  723. si->reachable[c] = in_merge_bases_many(commit,
  724. si->nr_commits,
  725. si->commits);
  726. si->need_reachability_test[c] = 0;
  727. }
  728. return si->reachable[c];
  729. }