bmo_fill_grid.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. /*
  2. * ***** BEGIN GPL LICENSE BLOCK *****
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software Foundation,
  16. * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. *
  18. * Contributor(s): Campbell Barton.
  19. *
  20. * ***** END GPL LICENSE BLOCK *****
  21. */
  22. /** \file blender/bmesh/operators/bmo_fill_grid.c
  23. * \ingroup bmesh
  24. *
  25. * Fill 2 isolated, open edge loops with a grid of quads.
  26. */
  27. #include "MEM_guardedalloc.h"
  28. #include "BLI_listbase.h"
  29. #include "BLI_math.h"
  30. #include "BKE_customdata.h"
  31. #include "bmesh.h"
  32. #include "intern/bmesh_operators_private.h" /* own include */
  33. #include "BLI_strict_flags.h"
  34. #define EDGE_MARK 4
  35. #define FACE_OUT 16
  36. #define BARYCENTRIC_INTERP
  37. #ifdef BARYCENTRIC_INTERP
  38. /**
  39. * 2 edge vectors to normal.
  40. */
  41. static void quad_edges_to_normal(
  42. float no[3],
  43. const float co_a1[3], const float co_a2[3],
  44. const float co_b1[3], const float co_b2[3])
  45. {
  46. float diff_a[3];
  47. float diff_b[3];
  48. sub_v3_v3v3(diff_a, co_a2, co_a1);
  49. sub_v3_v3v3(diff_b, co_b2, co_b1);
  50. normalize_v3(diff_a);
  51. normalize_v3(diff_b);
  52. add_v3_v3v3(no, diff_a, diff_b);
  53. normalize_v3(no);
  54. }
  55. static void quad_verts_to_barycentric_tri(
  56. float tri[3][3],
  57. const float co_a[3],
  58. const float co_b[3],
  59. const float co_a_next[3],
  60. const float co_b_next[3],
  61. const float co_a_prev[3],
  62. const float co_b_prev[3],
  63. const bool is_flip
  64. )
  65. {
  66. float no[3];
  67. copy_v3_v3(tri[0], co_a);
  68. copy_v3_v3(tri[1], co_b);
  69. quad_edges_to_normal(no,
  70. co_a, co_a_next,
  71. co_b, co_b_next);
  72. if (co_a_prev) {
  73. float no_t[3];
  74. quad_edges_to_normal(no_t,
  75. co_a_prev, co_a,
  76. co_b_prev, co_b);
  77. add_v3_v3(no, no_t);
  78. normalize_v3(no);
  79. }
  80. if (is_flip) negate_v3(no);
  81. mul_v3_fl(no, len_v3v3(tri[0], tri[1]));
  82. mid_v3_v3v3(tri[2], tri[0], tri[1]);
  83. add_v3_v3(tri[2], no);
  84. }
  85. #endif
  86. /* -------------------------------------------------------------------- */
  87. /* Handle Loop Pairs */
  88. /** \name Loop Pairs
  89. * \{ */
  90. /**
  91. * Assign a loop pair from 2 verts (which _must_ share an edge)
  92. */
  93. static void bm_loop_pair_from_verts(
  94. BMVert *v_a, BMVert *v_b,
  95. BMLoop *l_pair[2])
  96. {
  97. BMEdge *e = BM_edge_exists(v_a, v_b);
  98. if (e->l) {
  99. if (e->l->v == v_a) {
  100. l_pair[0] = e->l;
  101. l_pair[1] = e->l->next;
  102. }
  103. else {
  104. l_pair[0] = e->l->next;
  105. l_pair[1] = e->l;
  106. }
  107. }
  108. else {
  109. l_pair[0] = NULL;
  110. l_pair[1] = NULL;
  111. }
  112. }
  113. /**
  114. * Copy loop pair from one side to the other if either is missing,
  115. * this simplifies interpolation code so we only need to check if x/y are missing,
  116. * rather then checking each loop.
  117. */
  118. static void bm_loop_pair_test_copy(BMLoop *l_pair_a[2], BMLoop *l_pair_b[2])
  119. {
  120. /* if the first one is set, we know the second is too */
  121. if (l_pair_a[0] && l_pair_b[0] == NULL) {
  122. l_pair_b[0] = l_pair_a[1];
  123. l_pair_b[1] = l_pair_a[0];
  124. }
  125. else if (l_pair_b[0] && l_pair_a[0] == NULL) {
  126. l_pair_a[0] = l_pair_b[1];
  127. l_pair_a[1] = l_pair_b[0];
  128. }
  129. }
  130. /**
  131. * Interpolate from boundary loops.
  132. *
  133. * \note These weights will be calculated multiple times per vertex.
  134. */
  135. static void bm_loop_interp_from_grid_boundary_4(BMesh *bm, BMLoop *l, BMLoop *l_bound[4], const float w[4])
  136. {
  137. const void *l_cdata[4] = {
  138. l_bound[0]->head.data,
  139. l_bound[1]->head.data,
  140. l_bound[2]->head.data,
  141. l_bound[3]->head.data};
  142. CustomData_bmesh_interp(&bm->ldata, l_cdata, w, NULL, 4, l->head.data);
  143. }
  144. static void bm_loop_interp_from_grid_boundary_2(BMesh *bm, BMLoop *l, BMLoop *l_bound[2], const float t)
  145. {
  146. const void *l_cdata[2] = {
  147. l_bound[0]->head.data,
  148. l_bound[1]->head.data};
  149. const float w[2] = {1.0f - t, t};
  150. CustomData_bmesh_interp(&bm->ldata, l_cdata, w, NULL, 2, l->head.data);
  151. }
  152. /** \} */
  153. /**
  154. * Avoids calling #barycentric_weights_v2_quad often by caching weights into an array.
  155. */
  156. static void barycentric_weights_v2_grid_cache(
  157. const uint xtot, const uint ytot,
  158. float (*weight_table)[4])
  159. {
  160. float x_step = 1.0f / (float)(xtot - 1);
  161. float y_step = 1.0f / (float)(ytot - 1);
  162. uint i = 0;
  163. float xy_fl[2];
  164. uint x, y;
  165. for (y = 0; y < ytot; y++) {
  166. xy_fl[1] = y_step * (float)y;
  167. for (x = 0; x < xtot; x++) {
  168. xy_fl[0] = x_step * (float)x;
  169. {
  170. const float cos[4][2] = {
  171. {xy_fl[0], 0.0f},
  172. {0.0f, xy_fl[1]},
  173. {xy_fl[0], 1.0f},
  174. {1.0f, xy_fl[1]}};
  175. barycentric_weights_v2_quad(UNPACK4(cos), xy_fl, weight_table[i++]);
  176. }
  177. }
  178. }
  179. }
  180. /**
  181. * This may be useful outside the bmesh operator.
  182. *
  183. * \param v_grid 2d array of verts, all boundary verts must be set, we fill in the middle.
  184. */
  185. static void bm_grid_fill_array(
  186. BMesh *bm, BMVert **v_grid, const uint xtot, unsigned const int ytot,
  187. const short mat_nr, const bool use_smooth,
  188. const bool use_flip, const bool use_interp_simple)
  189. {
  190. const bool use_vert_interp = CustomData_has_interp(&bm->vdata);
  191. const bool use_loop_interp = CustomData_has_interp(&bm->ldata);
  192. uint x, y;
  193. /* for use_loop_interp */
  194. BMLoop *((*larr_x_a)[2]), *((*larr_x_b)[2]), *((*larr_y_a)[2]), *((*larr_y_b)[2]);
  195. float (*weight_table)[4];
  196. #define XY(_x, _y) ((_x) + ((_y) * (xtot)))
  197. #ifdef BARYCENTRIC_INTERP
  198. float tri_a[3][3];
  199. float tri_b[3][3];
  200. float tri_t[3][3]; /* temp */
  201. quad_verts_to_barycentric_tri(
  202. tri_a,
  203. v_grid[XY(0, 0)]->co,
  204. v_grid[XY(xtot - 1, 0)]->co,
  205. v_grid[XY(0, 1)]->co,
  206. v_grid[XY(xtot - 1, 1)]->co,
  207. NULL, NULL,
  208. false);
  209. quad_verts_to_barycentric_tri(
  210. tri_b,
  211. v_grid[XY(0, (ytot - 1))]->co,
  212. v_grid[XY(xtot - 1, (ytot - 1))]->co,
  213. v_grid[XY(0, (ytot - 2))]->co,
  214. v_grid[XY(xtot - 1, (ytot - 2))]->co,
  215. NULL, NULL,
  216. true);
  217. #endif
  218. if (use_interp_simple || use_vert_interp || use_loop_interp) {
  219. weight_table = MEM_mallocN(sizeof(*weight_table) * (size_t)(xtot * ytot), __func__);
  220. barycentric_weights_v2_grid_cache(xtot, ytot, weight_table);
  221. }
  222. else {
  223. weight_table = NULL;
  224. }
  225. /* Store loops */
  226. if (use_loop_interp) {
  227. /* x2 because each edge connects 2 loops */
  228. larr_x_a = MEM_mallocN(sizeof(*larr_x_a) * (xtot - 1), __func__);
  229. larr_x_b = MEM_mallocN(sizeof(*larr_x_b) * (xtot - 1), __func__);
  230. larr_y_a = MEM_mallocN(sizeof(*larr_y_a) * (ytot - 1), __func__);
  231. larr_y_b = MEM_mallocN(sizeof(*larr_y_b) * (ytot - 1), __func__);
  232. /* fill in the loops */
  233. for (x = 0; x < xtot - 1; x++) {
  234. bm_loop_pair_from_verts(v_grid[XY(x, 0)], v_grid[XY(x + 1, 0)], larr_x_a[x]);
  235. bm_loop_pair_from_verts(v_grid[XY(x, ytot - 1)], v_grid[XY(x + 1, ytot - 1)], larr_x_b[x]);
  236. bm_loop_pair_test_copy(larr_x_a[x], larr_x_b[x]);
  237. }
  238. for (y = 0; y < ytot - 1; y++) {
  239. bm_loop_pair_from_verts(v_grid[XY(0, y)], v_grid[XY(0, y + 1)], larr_y_a[y]);
  240. bm_loop_pair_from_verts(v_grid[XY(xtot - 1, y)], v_grid[XY(xtot - 1, y + 1)], larr_y_b[y]);
  241. bm_loop_pair_test_copy(larr_y_a[y], larr_y_b[y]);
  242. }
  243. }
  244. /* Build Verts */
  245. for (y = 1; y < ytot - 1; y++) {
  246. #ifdef BARYCENTRIC_INTERP
  247. quad_verts_to_barycentric_tri(
  248. tri_t,
  249. v_grid[XY(0, y + 0)]->co,
  250. v_grid[XY(xtot - 1, y + 0)]->co,
  251. v_grid[XY(0, y + 1)]->co,
  252. v_grid[XY(xtot - 1, y + 1)]->co,
  253. v_grid[XY(0, y - 1)]->co,
  254. v_grid[XY(xtot - 1, y - 1)]->co,
  255. false);
  256. #endif
  257. for (x = 1; x < xtot - 1; x++) {
  258. float co[3];
  259. BMVert *v;
  260. /* we may want to allow sparse filled arrays, but for now, ensure its empty */
  261. BLI_assert(v_grid[(y * xtot) + x] == NULL);
  262. /* place the vertex */
  263. #ifdef BARYCENTRIC_INTERP
  264. if (use_interp_simple == false) {
  265. float co_a[3], co_b[3];
  266. transform_point_by_tri_v3(
  267. co_a,
  268. v_grid[x]->co,
  269. tri_t[0], tri_t[1], tri_t[2],
  270. tri_a[0], tri_a[1], tri_a[2]);
  271. transform_point_by_tri_v3(
  272. co_b,
  273. v_grid[(xtot * ytot) + (x - xtot)]->co,
  274. tri_t[0], tri_t[1], tri_t[2],
  275. tri_b[0], tri_b[1], tri_b[2]);
  276. interp_v3_v3v3(co, co_a, co_b, (float)y / ((float)ytot - 1));
  277. }
  278. else
  279. #endif
  280. {
  281. const float *w = weight_table[XY(x, y)];
  282. zero_v3(co);
  283. madd_v3_v3fl(co, v_grid[XY(x, 0)]->co, w[0]);
  284. madd_v3_v3fl(co, v_grid[XY(0, y)]->co, w[1]);
  285. madd_v3_v3fl(co, v_grid[XY(x, ytot - 1)]->co, w[2]);
  286. madd_v3_v3fl(co, v_grid[XY(xtot - 1, y)]->co, w[3]);
  287. }
  288. v = BM_vert_create(bm, co, NULL, BM_CREATE_NOP);
  289. v_grid[(y * xtot) + x] = v;
  290. /* interpolate only along one axis, this could be changed
  291. * but from user pov gives predictable results since these are selected loop */
  292. if (use_vert_interp) {
  293. const float *w = weight_table[XY(x, y)];
  294. const void *v_cdata[4] = {
  295. v_grid[XY(x, 0)]->head.data,
  296. v_grid[XY(0, y)]->head.data,
  297. v_grid[XY(x, ytot - 1)]->head.data,
  298. v_grid[XY(xtot - 1, y)]->head.data,
  299. };
  300. CustomData_bmesh_interp(&bm->vdata, v_cdata, w, NULL, 4, v->head.data);
  301. }
  302. }
  303. }
  304. /* Build Faces */
  305. for (x = 0; x < xtot - 1; x++) {
  306. for (y = 0; y < ytot - 1; y++) {
  307. BMFace *f;
  308. if (use_flip) {
  309. f = BM_face_create_quad_tri(
  310. bm,
  311. v_grid[XY(x, y + 0)], /* BL */
  312. v_grid[XY(x, y + 1)], /* TL */
  313. v_grid[XY(x + 1, y + 1)], /* TR */
  314. v_grid[XY(x + 1, y + 0)], /* BR */
  315. NULL,
  316. BM_CREATE_NOP);
  317. }
  318. else {
  319. f = BM_face_create_quad_tri(
  320. bm,
  321. v_grid[XY(x + 1, y + 0)], /* BR */
  322. v_grid[XY(x + 1, y + 1)], /* TR */
  323. v_grid[XY(x, y + 1)], /* TL */
  324. v_grid[XY(x, y + 0)], /* BL */
  325. NULL,
  326. BM_CREATE_NOP);
  327. }
  328. if (use_loop_interp && (larr_x_a[x][0] || larr_y_a[y][0])) {
  329. /* bottom/left/top/right */
  330. BMLoop *l_quad[4];
  331. BMLoop *l_bound[4];
  332. BMLoop *l_tmp;
  333. uint x_side, y_side, i;
  334. char interp_from;
  335. if (larr_x_a[x][0] && larr_y_a[y][0]) {
  336. interp_from = 'B'; /* B == both */
  337. l_tmp = larr_x_a[x][0];
  338. }
  339. else if (larr_x_a[x][0]) {
  340. interp_from = 'X';
  341. l_tmp = larr_x_a[x][0];
  342. }
  343. else {
  344. interp_from = 'Y';
  345. l_tmp = larr_y_a[y][0];
  346. }
  347. BM_elem_attrs_copy(bm, bm, l_tmp->f, f);
  348. BM_face_as_array_loop_quad(f, l_quad);
  349. l_tmp = BM_FACE_FIRST_LOOP(f);
  350. if (use_flip) {
  351. l_quad[0] = l_tmp; l_tmp = l_tmp->next;
  352. l_quad[1] = l_tmp; l_tmp = l_tmp->next;
  353. l_quad[3] = l_tmp; l_tmp = l_tmp->next;
  354. l_quad[2] = l_tmp;
  355. }
  356. else {
  357. l_quad[2] = l_tmp; l_tmp = l_tmp->next;
  358. l_quad[3] = l_tmp; l_tmp = l_tmp->next;
  359. l_quad[1] = l_tmp; l_tmp = l_tmp->next;
  360. l_quad[0] = l_tmp;
  361. }
  362. i = 0;
  363. for (x_side = 0; x_side < 2; x_side++) {
  364. for (y_side = 0; y_side < 2; y_side++) {
  365. if (interp_from == 'B') {
  366. const float *w = weight_table[XY(x + x_side, y + y_side)];
  367. l_bound[0] = larr_x_a[x][x_side]; /* B */
  368. l_bound[1] = larr_y_a[y][y_side]; /* L */
  369. l_bound[2] = larr_x_b[x][x_side]; /* T */
  370. l_bound[3] = larr_y_b[y][y_side]; /* R */
  371. bm_loop_interp_from_grid_boundary_4(bm, l_quad[i++], l_bound, w);
  372. }
  373. else if (interp_from == 'X') {
  374. const float t = (float)(y + y_side) / (float)(ytot - 1);
  375. l_bound[0] = larr_x_a[x][x_side]; /* B */
  376. l_bound[1] = larr_x_b[x][x_side]; /* T */
  377. bm_loop_interp_from_grid_boundary_2(bm, l_quad[i++], l_bound, t);
  378. }
  379. else if (interp_from == 'Y') {
  380. const float t = (float)(x + x_side) / (float)(xtot - 1);
  381. l_bound[0] = larr_y_a[y][y_side]; /* L */
  382. l_bound[1] = larr_y_b[y][y_side]; /* R */
  383. bm_loop_interp_from_grid_boundary_2(bm, l_quad[i++], l_bound, t);
  384. }
  385. else {
  386. BLI_assert(0);
  387. }
  388. }
  389. }
  390. }
  391. /* end interp */
  392. BMO_face_flag_enable(bm, f, FACE_OUT);
  393. f->mat_nr = mat_nr;
  394. if (use_smooth) {
  395. BM_elem_flag_enable(f, BM_ELEM_SMOOTH);
  396. }
  397. }
  398. }
  399. if (use_loop_interp) {
  400. MEM_freeN(larr_x_a);
  401. MEM_freeN(larr_y_a);
  402. MEM_freeN(larr_x_b);
  403. MEM_freeN(larr_y_b);
  404. }
  405. if (weight_table) {
  406. MEM_freeN(weight_table);
  407. }
  408. #undef XY
  409. }
  410. static void bm_grid_fill(
  411. BMesh *bm,
  412. struct BMEdgeLoopStore *estore_a, struct BMEdgeLoopStore *estore_b,
  413. struct BMEdgeLoopStore *estore_rail_a, struct BMEdgeLoopStore *estore_rail_b,
  414. const short mat_nr, const bool use_smooth, const bool use_interp_simple)
  415. {
  416. #define USE_FLIP_DETECT
  417. const uint xtot = (uint)BM_edgeloop_length_get(estore_a);
  418. const uint ytot = (uint)BM_edgeloop_length_get(estore_rail_a);
  419. //BMVert *v;
  420. uint i;
  421. #ifdef DEBUG
  422. uint x, y;
  423. #endif
  424. LinkData *el;
  425. bool use_flip = false;
  426. ListBase *lb_a = BM_edgeloop_verts_get(estore_a);
  427. ListBase *lb_b = BM_edgeloop_verts_get(estore_b);
  428. ListBase *lb_rail_a = BM_edgeloop_verts_get(estore_rail_a);
  429. ListBase *lb_rail_b = BM_edgeloop_verts_get(estore_rail_b);
  430. BMVert **v_grid = MEM_callocN(sizeof(BMVert *) * (size_t)(xtot * ytot), __func__);
  431. /**
  432. * <pre>
  433. * estore_b
  434. * +------------------+
  435. * ^ | |
  436. * end | | |
  437. * | | |
  438. * | |estore_rail_a |estore_rail_b
  439. * | | |
  440. * start | | |
  441. * |estore_a |
  442. * +------------------+
  443. * --->
  444. * start -> end
  445. * </pre>
  446. */
  447. BLI_assert(((LinkData *)lb_a->first)->data == ((LinkData *)lb_rail_a->first)->data); /* BL */
  448. BLI_assert(((LinkData *)lb_b->first)->data == ((LinkData *)lb_rail_a->last)->data); /* TL */
  449. BLI_assert(((LinkData *)lb_b->last)->data == ((LinkData *)lb_rail_b->last)->data); /* TR */
  450. BLI_assert(((LinkData *)lb_a->last)->data == ((LinkData *)lb_rail_b->first)->data); /* BR */
  451. for (el = lb_a->first, i = 0; el; el = el->next, i++) { v_grid[i] = el->data; }
  452. for (el = lb_b->first, i = 0; el; el = el->next, i++) { v_grid[(ytot * xtot) + (i - xtot)] = el->data; }
  453. for (el = lb_rail_a->first, i = 0; el; el = el->next, i++) { v_grid[xtot * i] = el->data; }
  454. for (el = lb_rail_b->first, i = 0; el; el = el->next, i++) { v_grid[(xtot * i) + (xtot - 1)] = el->data; }
  455. #ifdef DEBUG
  456. for (x = 1; x < xtot - 1; x++) { for (y = 1; y < ytot - 1; y++) { BLI_assert(v_grid[(y * xtot) + x] == NULL); }}
  457. #endif
  458. #ifdef USE_FLIP_DETECT
  459. {
  460. ListBase *lb_iter[4] = {lb_a, lb_b, lb_rail_a, lb_rail_b};
  461. const int lb_iter_dir[4] = {-1, 1, 1, -1};
  462. int winding_votes = 0;
  463. for (i = 0; i < 4; i++) {
  464. LinkData *el_next;
  465. for (el = lb_iter[i]->first; el && (el_next = el->next); el = el->next) {
  466. BMEdge *e = BM_edge_exists(el->data, el_next->data);
  467. if (BM_edge_is_boundary(e)) {
  468. winding_votes += (e->l->v == el->data) ? lb_iter_dir[i] : -lb_iter_dir[i];
  469. }
  470. }
  471. }
  472. use_flip = (winding_votes < 0);
  473. }
  474. #endif
  475. bm_grid_fill_array(bm, v_grid, xtot, ytot, mat_nr, use_smooth, use_flip, use_interp_simple);
  476. MEM_freeN(v_grid);
  477. #undef USE_FLIP_DETECT
  478. }
  479. static void bm_edgeloop_flag_set(struct BMEdgeLoopStore *estore, char hflag, bool set)
  480. {
  481. /* only handle closed loops in this case */
  482. LinkData *link = BM_edgeloop_verts_get(estore)->first;
  483. link = link->next;
  484. while (link) {
  485. BMEdge *e = BM_edge_exists(link->data, link->prev->data);
  486. if (e) {
  487. BM_elem_flag_set(e, hflag, set);
  488. }
  489. link = link->next;
  490. }
  491. }
  492. static bool bm_edge_test_cb(BMEdge *e, void *bm_v)
  493. {
  494. return BMO_edge_flag_test_bool((BMesh *)bm_v, e, EDGE_MARK);
  495. }
  496. static bool bm_edge_test_rail_cb(BMEdge *e, void *UNUSED(bm_v))
  497. {
  498. /* normally operators dont check for hidden state
  499. * but alternative would be to pass slot of rail edges */
  500. if (BM_elem_flag_test(e, BM_ELEM_HIDDEN)) {
  501. return false;
  502. }
  503. return BM_edge_is_wire(e) || BM_edge_is_boundary(e);
  504. }
  505. void bmo_grid_fill_exec(BMesh *bm, BMOperator *op)
  506. {
  507. ListBase eloops = {NULL, NULL};
  508. ListBase eloops_rail = {NULL, NULL};
  509. struct BMEdgeLoopStore *estore_a, *estore_b;
  510. struct BMEdgeLoopStore *estore_rail_a, *estore_rail_b;
  511. BMVert *v_a_first, *v_a_last;
  512. BMVert *v_b_first, *v_b_last;
  513. const short mat_nr = (short)BMO_slot_int_get(op->slots_in, "mat_nr");
  514. const bool use_smooth = BMO_slot_bool_get(op->slots_in, "use_smooth");
  515. const bool use_interp_simple = BMO_slot_bool_get(op->slots_in, "use_interp_simple");
  516. GSet *split_edges = NULL;
  517. int count;
  518. bool changed = false;
  519. BMO_slot_buffer_flag_enable(bm, op->slots_in, "edges", BM_EDGE, EDGE_MARK);
  520. count = BM_mesh_edgeloops_find(bm, &eloops, bm_edge_test_cb, (void *)bm);
  521. if (count != 2) {
  522. BMO_error_raise(bm, op, BMERR_INVALID_SELECTION,
  523. "Select two edge loops");
  524. goto cleanup;
  525. }
  526. estore_a = eloops.first;
  527. estore_b = eloops.last;
  528. v_a_first = ((LinkData *)BM_edgeloop_verts_get(estore_a)->first)->data;
  529. v_a_last = ((LinkData *)BM_edgeloop_verts_get(estore_a)->last)->data;
  530. v_b_first = ((LinkData *)BM_edgeloop_verts_get(estore_b)->first)->data;
  531. v_b_last = ((LinkData *)BM_edgeloop_verts_get(estore_b)->last)->data;
  532. if (BM_edgeloop_is_closed(estore_a) || BM_edgeloop_is_closed(estore_b)) {
  533. BMO_error_raise(bm, op, BMERR_INVALID_SELECTION,
  534. "Closed loops unsupported");
  535. goto cleanup;
  536. }
  537. /* ok. all error checking done, now we can find the rail edges */
  538. /* cheat here, temp hide all edges so they won't be included in rails
  539. * this puts the mesh in an invalid state for a short time. */
  540. bm_edgeloop_flag_set(estore_a, BM_ELEM_HIDDEN, true);
  541. bm_edgeloop_flag_set(estore_b, BM_ELEM_HIDDEN, true);
  542. if ((BM_mesh_edgeloops_find_path(bm, &eloops_rail, bm_edge_test_rail_cb, bm, v_a_first, v_b_first)) &&
  543. (BM_mesh_edgeloops_find_path(bm, &eloops_rail, bm_edge_test_rail_cb, bm, v_a_last, v_b_last)))
  544. {
  545. estore_rail_a = eloops_rail.first;
  546. estore_rail_b = eloops_rail.last;
  547. }
  548. else {
  549. BM_mesh_edgeloops_free(&eloops_rail);
  550. if ((BM_mesh_edgeloops_find_path(bm, &eloops_rail, bm_edge_test_rail_cb, bm, v_a_first, v_b_last)) &&
  551. (BM_mesh_edgeloops_find_path(bm, &eloops_rail, bm_edge_test_rail_cb, bm, v_a_last, v_b_first)))
  552. {
  553. estore_rail_a = eloops_rail.first;
  554. estore_rail_b = eloops_rail.last;
  555. BM_edgeloop_flip(bm, estore_b);
  556. }
  557. else {
  558. BM_mesh_edgeloops_free(&eloops_rail);
  559. }
  560. }
  561. bm_edgeloop_flag_set(estore_a, BM_ELEM_HIDDEN, false);
  562. bm_edgeloop_flag_set(estore_b, BM_ELEM_HIDDEN, false);
  563. if (BLI_listbase_is_empty(&eloops_rail)) {
  564. BMO_error_raise(bm, op, BMERR_INVALID_SELECTION,
  565. "Loops are not connected by wire/boundary edges");
  566. goto cleanup;
  567. }
  568. BLI_assert(estore_a != estore_b);
  569. BLI_assert(v_a_last != v_b_last);
  570. if (BM_edgeloop_overlap_check(estore_rail_a, estore_rail_b)) {
  571. BMO_error_raise(bm, op, BMERR_INVALID_SELECTION,
  572. "Connecting edge loops overlap");
  573. goto cleanup;
  574. }
  575. /* add vertices if needed */
  576. {
  577. struct BMEdgeLoopStore *estore_pairs[2][2] = {{estore_a, estore_b}, {estore_rail_a, estore_rail_b}};
  578. int i;
  579. for (i = 0; i < 2; i++) {
  580. const int len_a = BM_edgeloop_length_get(estore_pairs[i][0]);
  581. const int len_b = BM_edgeloop_length_get(estore_pairs[i][1]);
  582. if (len_a != len_b) {
  583. if (split_edges == NULL) {
  584. split_edges = BLI_gset_ptr_new(__func__);
  585. }
  586. if (len_a < len_b) {
  587. BM_edgeloop_expand(bm, estore_pairs[i][0], len_b, true, split_edges);
  588. }
  589. else {
  590. BM_edgeloop_expand(bm, estore_pairs[i][1], len_a, true, split_edges);
  591. }
  592. }
  593. }
  594. }
  595. /* finally we have all edge loops needed */
  596. bm_grid_fill(bm, estore_a, estore_b, estore_rail_a, estore_rail_b,
  597. mat_nr, use_smooth, use_interp_simple);
  598. changed = true;
  599. if (split_edges) {
  600. GSetIterator gs_iter;
  601. GSET_ITER (gs_iter, split_edges) {
  602. BMEdge *e = BLI_gsetIterator_getKey(&gs_iter);
  603. BM_edge_collapse(bm, e, e->v2, true, true);
  604. }
  605. BLI_gset_free(split_edges, NULL);
  606. }
  607. cleanup:
  608. BM_mesh_edgeloops_free(&eloops);
  609. BM_mesh_edgeloops_free(&eloops_rail);
  610. if (changed) {
  611. BMO_slot_buffer_from_enabled_flag(bm, op, op->slots_out, "faces.out", BM_FACE, FACE_OUT);
  612. }
  613. }