particle_distribute.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511
  1. /*
  2. * ***** BEGIN GPL LICENSE BLOCK *****
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software Foundation,
  16. * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. *
  18. * The Original Code is Copyright (C) 2007 by Janne Karhu.
  19. * All rights reserved.
  20. *
  21. * The Original Code is: all of this file.
  22. *
  23. * Contributor(s): Raul Fernandez Hernandez (Farsthary),
  24. * Stephen Swhitehorn,
  25. * Lukas Toenne
  26. *
  27. * ***** END GPL LICENSE BLOCK *****
  28. */
  29. /** \file blender/blenkernel/intern/particle_distribute.c
  30. * \ingroup bke
  31. */
  32. #include <string.h>
  33. #include "MEM_guardedalloc.h"
  34. #include "BLI_utildefines.h"
  35. #include "BLI_jitter.h"
  36. #include "BLI_kdtree.h"
  37. #include "BLI_math.h"
  38. #include "BLI_math_geom.h"
  39. #include "BLI_rand.h"
  40. #include "BLI_sort.h"
  41. #include "BLI_task.h"
  42. #include "DNA_mesh_types.h"
  43. #include "DNA_meshdata_types.h"
  44. #include "DNA_modifier_types.h"
  45. #include "DNA_particle_types.h"
  46. #include "DNA_scene_types.h"
  47. #include "BKE_cdderivedmesh.h"
  48. #include "BKE_DerivedMesh.h"
  49. #include "BKE_global.h"
  50. #include "BKE_mesh.h"
  51. #include "BKE_object.h"
  52. #include "BKE_particle.h"
  53. static int psys_render_simplify_distribution(ParticleThreadContext *ctx, int tot);
  54. static void alloc_child_particles(ParticleSystem *psys, int tot)
  55. {
  56. if (psys->child) {
  57. /* only re-allocate if we have to */
  58. if (psys->part->childtype && psys->totchild == tot) {
  59. memset(psys->child, 0, tot*sizeof(ChildParticle));
  60. return;
  61. }
  62. MEM_freeN(psys->child);
  63. psys->child=NULL;
  64. psys->totchild=0;
  65. }
  66. if (psys->part->childtype) {
  67. psys->totchild= tot;
  68. if (psys->totchild)
  69. psys->child= MEM_callocN(psys->totchild*sizeof(ChildParticle), "child_particles");
  70. }
  71. }
  72. static void distribute_simple_children(Scene *scene, Object *ob, DerivedMesh *finaldm, DerivedMesh *deformdm, ParticleSystem *psys)
  73. {
  74. ChildParticle *cpa = NULL;
  75. int i, p;
  76. int child_nbr= psys_get_child_number(scene, psys);
  77. int totpart= psys_get_tot_child(scene, psys);
  78. alloc_child_particles(psys, totpart);
  79. cpa = psys->child;
  80. for (i=0; i<child_nbr; i++) {
  81. for (p=0; p<psys->totpart; p++,cpa++) {
  82. float length=2.0;
  83. cpa->parent=p;
  84. /* create even spherical distribution inside unit sphere */
  85. while (length>=1.0f) {
  86. cpa->fuv[0]=2.0f*BLI_frand()-1.0f;
  87. cpa->fuv[1]=2.0f*BLI_frand()-1.0f;
  88. cpa->fuv[2]=2.0f*BLI_frand()-1.0f;
  89. length=len_v3(cpa->fuv);
  90. }
  91. cpa->num=-1;
  92. }
  93. }
  94. /* dmcache must be updated for parent particles if children from faces is used */
  95. psys_calc_dmcache(ob, finaldm, deformdm, psys);
  96. }
  97. static void distribute_grid(DerivedMesh *dm, ParticleSystem *psys)
  98. {
  99. ParticleData *pa=NULL;
  100. float min[3], max[3], delta[3], d;
  101. MVert *mv, *mvert = dm->getVertDataArray(dm,0);
  102. int totvert=dm->getNumVerts(dm), from=psys->part->from;
  103. int i, j, k, p, res=psys->part->grid_res, size[3], axis;
  104. /* find bounding box of dm */
  105. if (totvert > 0) {
  106. mv=mvert;
  107. copy_v3_v3(min, mv->co);
  108. copy_v3_v3(max, mv->co);
  109. mv++;
  110. for (i = 1; i < totvert; i++, mv++) {
  111. minmax_v3v3_v3(min, max, mv->co);
  112. }
  113. }
  114. else {
  115. zero_v3(min);
  116. zero_v3(max);
  117. }
  118. sub_v3_v3v3(delta, max, min);
  119. /* determine major axis */
  120. axis = axis_dominant_v3_single(delta);
  121. d = delta[axis]/(float)res;
  122. size[axis] = res;
  123. size[(axis+1)%3] = (int)ceil(delta[(axis+1)%3]/d);
  124. size[(axis+2)%3] = (int)ceil(delta[(axis+2)%3]/d);
  125. /* float errors grrr.. */
  126. size[(axis+1)%3] = MIN2(size[(axis+1)%3],res);
  127. size[(axis+2)%3] = MIN2(size[(axis+2)%3],res);
  128. size[0] = MAX2(size[0], 1);
  129. size[1] = MAX2(size[1], 1);
  130. size[2] = MAX2(size[2], 1);
  131. /* no full offset for flat/thin objects */
  132. min[0]+= d < delta[0] ? d/2.f : delta[0]/2.f;
  133. min[1]+= d < delta[1] ? d/2.f : delta[1]/2.f;
  134. min[2]+= d < delta[2] ? d/2.f : delta[2]/2.f;
  135. for (i=0,p=0,pa=psys->particles; i<res; i++) {
  136. for (j=0; j<res; j++) {
  137. for (k=0; k<res; k++,p++,pa++) {
  138. pa->fuv[0] = min[0] + (float)i*d;
  139. pa->fuv[1] = min[1] + (float)j*d;
  140. pa->fuv[2] = min[2] + (float)k*d;
  141. pa->flag |= PARS_UNEXIST;
  142. pa->hair_index = 0; /* abused in volume calculation */
  143. }
  144. }
  145. }
  146. /* enable particles near verts/edges/faces/inside surface */
  147. if (from==PART_FROM_VERT) {
  148. float vec[3];
  149. pa=psys->particles;
  150. min[0] -= d/2.0f;
  151. min[1] -= d/2.0f;
  152. min[2] -= d/2.0f;
  153. for (i=0,mv=mvert; i<totvert; i++,mv++) {
  154. sub_v3_v3v3(vec,mv->co,min);
  155. vec[0]/=delta[0];
  156. vec[1]/=delta[1];
  157. vec[2]/=delta[2];
  158. pa[((int)(vec[0] * (size[0] - 1)) * res +
  159. (int)(vec[1] * (size[1] - 1))) * res +
  160. (int)(vec[2] * (size[2] - 1))].flag &= ~PARS_UNEXIST;
  161. }
  162. }
  163. else if (ELEM(from,PART_FROM_FACE,PART_FROM_VOLUME)) {
  164. float co1[3], co2[3];
  165. MFace *mface= NULL, *mface_array;
  166. float v1[3], v2[3], v3[3], v4[4], lambda;
  167. int a, a1, a2, a0mul, a1mul, a2mul, totface;
  168. int amax= from==PART_FROM_FACE ? 3 : 1;
  169. totface=dm->getNumTessFaces(dm);
  170. mface=mface_array=dm->getTessFaceDataArray(dm,CD_MFACE);
  171. for (a=0; a<amax; a++) {
  172. if (a==0) { a0mul=res*res; a1mul=res; a2mul=1; }
  173. else if (a==1) { a0mul=res; a1mul=1; a2mul=res*res; }
  174. else { a0mul=1; a1mul=res*res; a2mul=res; }
  175. for (a1=0; a1<size[(a+1)%3]; a1++) {
  176. for (a2=0; a2<size[(a+2)%3]; a2++) {
  177. mface= mface_array;
  178. pa = psys->particles + a1*a1mul + a2*a2mul;
  179. copy_v3_v3(co1, pa->fuv);
  180. co1[a] -= d < delta[a] ? d/2.f : delta[a]/2.f;
  181. copy_v3_v3(co2, co1);
  182. co2[a] += delta[a] + 0.001f*d;
  183. co1[a] -= 0.001f*d;
  184. struct IsectRayPrecalc isect_precalc;
  185. float ray_direction[3];
  186. sub_v3_v3v3(ray_direction, co2, co1);
  187. isect_ray_tri_watertight_v3_precalc(&isect_precalc, ray_direction);
  188. /* lets intersect the faces */
  189. for (i=0; i<totface; i++,mface++) {
  190. copy_v3_v3(v1, mvert[mface->v1].co);
  191. copy_v3_v3(v2, mvert[mface->v2].co);
  192. copy_v3_v3(v3, mvert[mface->v3].co);
  193. bool intersects_tri = isect_ray_tri_watertight_v3(co1,
  194. &isect_precalc,
  195. v1, v2, v3,
  196. &lambda, NULL);
  197. if (intersects_tri) {
  198. if (from==PART_FROM_FACE)
  199. (pa+(int)(lambda*size[a])*a0mul)->flag &= ~PARS_UNEXIST;
  200. else /* store number of intersections */
  201. (pa+(int)(lambda*size[a])*a0mul)->hair_index++;
  202. }
  203. if (mface->v4 && (!intersects_tri || from==PART_FROM_VOLUME)) {
  204. copy_v3_v3(v4, mvert[mface->v4].co);
  205. if (isect_ray_tri_watertight_v3(co1,
  206. &isect_precalc,
  207. v1, v3, v4,
  208. &lambda, NULL)) {
  209. if (from==PART_FROM_FACE)
  210. (pa+(int)(lambda*size[a])*a0mul)->flag &= ~PARS_UNEXIST;
  211. else
  212. (pa+(int)(lambda*size[a])*a0mul)->hair_index++;
  213. }
  214. }
  215. }
  216. if (from==PART_FROM_VOLUME) {
  217. int in=pa->hair_index%2;
  218. if (in) pa->hair_index++;
  219. for (i=0; i<size[0]; i++) {
  220. if (in || (pa+i*a0mul)->hair_index%2)
  221. (pa+i*a0mul)->flag &= ~PARS_UNEXIST;
  222. /* odd intersections == in->out / out->in */
  223. /* even intersections -> in stays same */
  224. in=(in + (pa+i*a0mul)->hair_index) % 2;
  225. }
  226. }
  227. }
  228. }
  229. }
  230. }
  231. if (psys->part->flag & PART_GRID_HEXAGONAL) {
  232. for (i=0,p=0,pa=psys->particles; i<res; i++) {
  233. for (j=0; j<res; j++) {
  234. for (k=0; k<res; k++,p++,pa++) {
  235. if (j%2)
  236. pa->fuv[0] += d/2.f;
  237. if (k%2) {
  238. pa->fuv[0] += d/2.f;
  239. pa->fuv[1] += d/2.f;
  240. }
  241. }
  242. }
  243. }
  244. }
  245. if (psys->part->flag & PART_GRID_INVERT) {
  246. for (i=0; i<size[0]; i++) {
  247. for (j=0; j<size[1]; j++) {
  248. pa=psys->particles + res*(i*res + j);
  249. for (k=0; k<size[2]; k++, pa++) {
  250. pa->flag ^= PARS_UNEXIST;
  251. }
  252. }
  253. }
  254. }
  255. if (psys->part->grid_rand > 0.f) {
  256. float rfac = d * psys->part->grid_rand;
  257. for (p=0,pa=psys->particles; p<psys->totpart; p++,pa++) {
  258. if (pa->flag & PARS_UNEXIST)
  259. continue;
  260. pa->fuv[0] += rfac * (psys_frand(psys, p + 31) - 0.5f);
  261. pa->fuv[1] += rfac * (psys_frand(psys, p + 32) - 0.5f);
  262. pa->fuv[2] += rfac * (psys_frand(psys, p + 33) - 0.5f);
  263. }
  264. }
  265. }
  266. /* modified copy from rayshade.c */
  267. static void hammersley_create(float *out, int n, int seed, float amount)
  268. {
  269. RNG *rng;
  270. double p, t, offs[2];
  271. int k, kk;
  272. rng = BLI_rng_new(31415926 + n + seed);
  273. offs[0] = BLI_rng_get_double(rng) + (double)amount;
  274. offs[1] = BLI_rng_get_double(rng) + (double)amount;
  275. BLI_rng_free(rng);
  276. for (k = 0; k < n; k++) {
  277. t = 0;
  278. for (p = 0.5, kk = k; kk; p *= 0.5, kk >>= 1)
  279. if (kk & 1) /* kk mod 2 = 1 */
  280. t += p;
  281. out[2*k + 0] = fmod((double)k/(double)n + offs[0], 1.0);
  282. out[2*k + 1] = fmod(t + offs[1], 1.0);
  283. }
  284. }
  285. /* almost exact copy of BLI_jitter_init */
  286. static void init_mv_jit(float *jit, int num, int seed2, float amount)
  287. {
  288. RNG *rng;
  289. float *jit2, x, rad1, rad2, rad3;
  290. int i, num2;
  291. if (num==0) return;
  292. rad1= (float)(1.0f/sqrtf((float)num));
  293. rad2= (float)(1.0f/((float)num));
  294. rad3= (float)sqrtf((float)num)/((float)num);
  295. rng = BLI_rng_new(31415926 + num + seed2);
  296. x= 0;
  297. num2 = 2 * num;
  298. for (i=0; i<num2; i+=2) {
  299. jit[i] = x + amount*rad1*(0.5f - BLI_rng_get_float(rng));
  300. jit[i+1] = i/(2.0f*num) + amount*rad1*(0.5f - BLI_rng_get_float(rng));
  301. jit[i]-= (float)floor(jit[i]);
  302. jit[i+1]-= (float)floor(jit[i+1]);
  303. x+= rad3;
  304. x -= (float)floor(x);
  305. }
  306. jit2= MEM_mallocN(12 + 2*sizeof(float)*num, "initjit");
  307. for (i=0 ; i<4 ; i++) {
  308. BLI_jitterate1((float (*)[2])jit, (float (*)[2])jit2, num, rad1);
  309. BLI_jitterate1((float (*)[2])jit, (float (*)[2])jit2, num, rad1);
  310. BLI_jitterate2((float (*)[2])jit, (float (*)[2])jit2, num, rad2);
  311. }
  312. MEM_freeN(jit2);
  313. BLI_rng_free(rng);
  314. }
  315. static void psys_uv_to_w(float u, float v, int quad, float *w)
  316. {
  317. float vert[4][3], co[3];
  318. if (!quad) {
  319. if (u+v > 1.0f)
  320. v= 1.0f-v;
  321. else
  322. u= 1.0f-u;
  323. }
  324. vert[0][0] = 0.0f; vert[0][1] = 0.0f; vert[0][2] = 0.0f;
  325. vert[1][0] = 1.0f; vert[1][1] = 0.0f; vert[1][2] = 0.0f;
  326. vert[2][0] = 1.0f; vert[2][1] = 1.0f; vert[2][2] = 0.0f;
  327. co[0] = u;
  328. co[1] = v;
  329. co[2] = 0.0f;
  330. if (quad) {
  331. vert[3][0] = 0.0f; vert[3][1] = 1.0f; vert[3][2] = 0.0f;
  332. interp_weights_poly_v3( w,vert, 4, co);
  333. }
  334. else {
  335. interp_weights_poly_v3( w,vert, 3, co);
  336. w[3] = 0.0f;
  337. }
  338. }
  339. /* Find the index in "sum" array before "value" is crossed. */
  340. static int distribute_binary_search(float *sum, int n, float value)
  341. {
  342. int mid, low = 0, high = n - 1;
  343. if (high == low)
  344. return low;
  345. if (sum[low] >= value)
  346. return low;
  347. if (sum[high - 1] < value)
  348. return high;
  349. while (low < high) {
  350. mid = (low + high) / 2;
  351. if ((sum[mid] >= value) && (sum[mid - 1] < value))
  352. return mid;
  353. if (sum[mid] > value) {
  354. high = mid - 1;
  355. }
  356. else {
  357. low = mid + 1;
  358. }
  359. }
  360. return low;
  361. }
  362. /* the max number if calls to rng_* funcs within psys_thread_distribute_particle
  363. * be sure to keep up to date if this changes */
  364. #define PSYS_RND_DIST_SKIP 2
  365. /* note: this function must be thread safe, for from == PART_FROM_CHILD */
  366. #define ONLY_WORKING_WITH_PA_VERTS 0
  367. static void distribute_from_verts_exec(ParticleTask *thread, ParticleData *pa, int p)
  368. {
  369. ParticleThreadContext *ctx= thread->ctx;
  370. MFace *mface;
  371. mface = ctx->dm->getTessFaceDataArray(ctx->dm, CD_MFACE);
  372. int rng_skip_tot = PSYS_RND_DIST_SKIP; /* count how many rng_* calls wont need skipping */
  373. /* TODO_PARTICLE - use original index */
  374. pa->num = ctx->index[p];
  375. zero_v4(pa->fuv);
  376. if (pa->num != DMCACHE_NOTFOUND && pa->num < ctx->dm->getNumVerts(ctx->dm)) {
  377. /* This finds the first face to contain the emitting vertex,
  378. * this is not ideal, but is mostly fine as UV seams generally
  379. * map to equal-colored parts of a texture */
  380. for (int i = 0; i < ctx->dm->getNumTessFaces(ctx->dm); i++, mface++) {
  381. if (ELEM(pa->num, mface->v1, mface->v2, mface->v3, mface->v4)) {
  382. unsigned int *vert = &mface->v1;
  383. for (int j = 0; j < 4; j++, vert++) {
  384. if (*vert == pa->num) {
  385. pa->fuv[j] = 1.0f;
  386. break;
  387. }
  388. }
  389. break;
  390. }
  391. }
  392. }
  393. #if ONLY_WORKING_WITH_PA_VERTS
  394. if (ctx->tree) {
  395. KDTreeNearest ptn[3];
  396. int w, maxw;
  397. psys_particle_on_dm(ctx->dm,from,pa->num,pa->num_dmcache,pa->fuv,pa->foffset,co1,0,0,0,orco1,0);
  398. BKE_mesh_orco_verts_transform((Mesh*)ob->data, &orco1, 1, 1);
  399. maxw = BLI_kdtree_find_nearest_n(ctx->tree,orco1,ptn,3);
  400. for (w=0; w<maxw; w++) {
  401. pa->verts[w]=ptn->num;
  402. }
  403. }
  404. #endif
  405. if (rng_skip_tot > 0) /* should never be below zero */
  406. BLI_rng_skip(thread->rng, rng_skip_tot);
  407. }
  408. static void distribute_from_faces_exec(ParticleTask *thread, ParticleData *pa, int p) {
  409. ParticleThreadContext *ctx= thread->ctx;
  410. DerivedMesh *dm= ctx->dm;
  411. float randu, randv;
  412. int distr= ctx->distr;
  413. int i;
  414. int rng_skip_tot= PSYS_RND_DIST_SKIP; /* count how many rng_* calls wont need skipping */
  415. MFace *mface;
  416. pa->num = i = ctx->index[p];
  417. mface = dm->getTessFaceData(dm,i,CD_MFACE);
  418. switch (distr) {
  419. case PART_DISTR_JIT:
  420. if (ctx->jitlevel == 1) {
  421. if (mface->v4)
  422. psys_uv_to_w(0.5f, 0.5f, mface->v4, pa->fuv);
  423. else
  424. psys_uv_to_w(1.0f / 3.0f, 1.0f / 3.0f, mface->v4, pa->fuv);
  425. }
  426. else {
  427. float offset = fmod(ctx->jitoff[i] + (float)p, (float)ctx->jitlevel);
  428. if (!isnan(offset)) {
  429. psys_uv_to_w(ctx->jit[2*(int)offset], ctx->jit[2*(int)offset+1], mface->v4, pa->fuv);
  430. }
  431. }
  432. break;
  433. case PART_DISTR_RAND:
  434. randu= BLI_rng_get_float(thread->rng);
  435. randv= BLI_rng_get_float(thread->rng);
  436. rng_skip_tot -= 2;
  437. psys_uv_to_w(randu, randv, mface->v4, pa->fuv);
  438. break;
  439. }
  440. pa->foffset= 0.0f;
  441. if (rng_skip_tot > 0) /* should never be below zero */
  442. BLI_rng_skip(thread->rng, rng_skip_tot);
  443. }
  444. static void distribute_from_volume_exec(ParticleTask *thread, ParticleData *pa, int p) {
  445. ParticleThreadContext *ctx= thread->ctx;
  446. DerivedMesh *dm= ctx->dm;
  447. float *v1, *v2, *v3, *v4, nor[3], co[3];
  448. float cur_d, min_d, randu, randv;
  449. int distr= ctx->distr;
  450. int i, intersect, tot;
  451. int rng_skip_tot= PSYS_RND_DIST_SKIP; /* count how many rng_* calls wont need skipping */
  452. MFace *mface;
  453. MVert *mvert=dm->getVertDataArray(dm,CD_MVERT);
  454. pa->num = i = ctx->index[p];
  455. mface = dm->getTessFaceData(dm,i,CD_MFACE);
  456. switch (distr) {
  457. case PART_DISTR_JIT:
  458. if (ctx->jitlevel == 1) {
  459. if (mface->v4)
  460. psys_uv_to_w(0.5f, 0.5f, mface->v4, pa->fuv);
  461. else
  462. psys_uv_to_w(1.0f / 3.0f, 1.0f / 3.0f, mface->v4, pa->fuv);
  463. }
  464. else {
  465. float offset = fmod(ctx->jitoff[i] + (float)p, (float)ctx->jitlevel);
  466. if (!isnan(offset)) {
  467. psys_uv_to_w(ctx->jit[2*(int)offset], ctx->jit[2*(int)offset+1], mface->v4, pa->fuv);
  468. }
  469. }
  470. break;
  471. case PART_DISTR_RAND:
  472. randu= BLI_rng_get_float(thread->rng);
  473. randv= BLI_rng_get_float(thread->rng);
  474. rng_skip_tot -= 2;
  475. psys_uv_to_w(randu, randv, mface->v4, pa->fuv);
  476. break;
  477. }
  478. pa->foffset= 0.0f;
  479. /* experimental */
  480. tot=dm->getNumTessFaces(dm);
  481. psys_interpolate_face(mvert,mface,0,0,pa->fuv,co,nor,0,0,0,0);
  482. normalize_v3(nor);
  483. negate_v3(nor);
  484. min_d=FLT_MAX;
  485. intersect=0;
  486. for (i=0,mface=dm->getTessFaceDataArray(dm,CD_MFACE); i<tot; i++,mface++) {
  487. if (i==pa->num) continue;
  488. v1=mvert[mface->v1].co;
  489. v2=mvert[mface->v2].co;
  490. v3=mvert[mface->v3].co;
  491. if (isect_ray_tri_v3(co, nor, v2, v3, v1, &cur_d, NULL)) {
  492. if (cur_d<min_d) {
  493. min_d=cur_d;
  494. pa->foffset=cur_d*0.5f; /* to the middle of volume */
  495. intersect=1;
  496. }
  497. }
  498. if (mface->v4) {
  499. v4=mvert[mface->v4].co;
  500. if (isect_ray_tri_v3(co, nor, v4, v1, v3, &cur_d, NULL)) {
  501. if (cur_d<min_d) {
  502. min_d=cur_d;
  503. pa->foffset=cur_d*0.5f; /* to the middle of volume */
  504. intersect=1;
  505. }
  506. }
  507. }
  508. }
  509. if (intersect==0)
  510. pa->foffset=0.0;
  511. else {
  512. switch (distr) {
  513. case PART_DISTR_JIT:
  514. pa->foffset *= ctx->jit[p % (2 * ctx->jitlevel)];
  515. break;
  516. case PART_DISTR_RAND:
  517. pa->foffset *= BLI_frand();
  518. break;
  519. }
  520. }
  521. if (rng_skip_tot > 0) /* should never be below zero */
  522. BLI_rng_skip(thread->rng, rng_skip_tot);
  523. }
  524. static void distribute_children_exec(ParticleTask *thread, ChildParticle *cpa, int p) {
  525. ParticleThreadContext *ctx= thread->ctx;
  526. Object *ob= ctx->sim.ob;
  527. DerivedMesh *dm= ctx->dm;
  528. float orco1[3], co1[3], nor1[3];
  529. float randu, randv;
  530. int cfrom= ctx->cfrom;
  531. int i;
  532. int rng_skip_tot= PSYS_RND_DIST_SKIP; /* count how many rng_* calls wont need skipping */
  533. MFace *mf;
  534. if (ctx->index[p] < 0) {
  535. cpa->num=0;
  536. cpa->fuv[0]=cpa->fuv[1]=cpa->fuv[2]=cpa->fuv[3]=0.0f;
  537. cpa->pa[0]=cpa->pa[1]=cpa->pa[2]=cpa->pa[3]=0;
  538. return;
  539. }
  540. mf= dm->getTessFaceData(dm, ctx->index[p], CD_MFACE);
  541. randu= BLI_rng_get_float(thread->rng);
  542. randv= BLI_rng_get_float(thread->rng);
  543. rng_skip_tot -= 2;
  544. psys_uv_to_w(randu, randv, mf->v4, cpa->fuv);
  545. cpa->num = ctx->index[p];
  546. if (ctx->tree) {
  547. KDTreeNearest ptn[10];
  548. int w,maxw;//, do_seams;
  549. float maxd /*, mind,dd */, totw= 0.0f;
  550. int parent[10];
  551. float pweight[10];
  552. psys_particle_on_dm(dm,cfrom,cpa->num,DMCACHE_ISCHILD,cpa->fuv,cpa->foffset,co1,nor1,NULL,NULL,orco1,NULL);
  553. BKE_mesh_orco_verts_transform((Mesh*)ob->data, &orco1, 1, 1);
  554. maxw = BLI_kdtree_find_nearest_n(ctx->tree,orco1,ptn,3);
  555. maxd=ptn[maxw-1].dist;
  556. /* mind=ptn[0].dist; */ /* UNUSED */
  557. /* the weights here could be done better */
  558. for (w=0; w<maxw; w++) {
  559. parent[w]=ptn[w].index;
  560. pweight[w]=(float)pow(2.0,(double)(-6.0f*ptn[w].dist/maxd));
  561. }
  562. for (;w<10; w++) {
  563. parent[w]=-1;
  564. pweight[w]=0.0f;
  565. }
  566. for (w=0,i=0; w<maxw && i<4; w++) {
  567. if (parent[w]>=0) {
  568. cpa->pa[i]=parent[w];
  569. cpa->w[i]=pweight[w];
  570. totw+=pweight[w];
  571. i++;
  572. }
  573. }
  574. for (;i<4; i++) {
  575. cpa->pa[i]=-1;
  576. cpa->w[i]=0.0f;
  577. }
  578. if (totw > 0.0f) {
  579. for (w = 0; w < 4; w++) {
  580. cpa->w[w] /= totw;
  581. }
  582. }
  583. cpa->parent=cpa->pa[0];
  584. }
  585. if (rng_skip_tot > 0) /* should never be below zero */
  586. BLI_rng_skip(thread->rng, rng_skip_tot);
  587. }
  588. static void exec_distribute_parent(TaskPool * __restrict UNUSED(pool), void *taskdata, int UNUSED(threadid))
  589. {
  590. ParticleTask *task = taskdata;
  591. ParticleSystem *psys= task->ctx->sim.psys;
  592. ParticleData *pa;
  593. int p;
  594. BLI_rng_skip(task->rng, PSYS_RND_DIST_SKIP * task->begin);
  595. pa= psys->particles + task->begin;
  596. switch (psys->part->from) {
  597. case PART_FROM_FACE:
  598. for (p = task->begin; p < task->end; ++p, ++pa)
  599. distribute_from_faces_exec(task, pa, p);
  600. break;
  601. case PART_FROM_VOLUME:
  602. for (p = task->begin; p < task->end; ++p, ++pa)
  603. distribute_from_volume_exec(task, pa, p);
  604. break;
  605. case PART_FROM_VERT:
  606. for (p = task->begin; p < task->end; ++p, ++pa)
  607. distribute_from_verts_exec(task, pa, p);
  608. break;
  609. }
  610. }
  611. static void exec_distribute_child(TaskPool * __restrict UNUSED(pool), void *taskdata, int UNUSED(threadid))
  612. {
  613. ParticleTask *task = taskdata;
  614. ParticleSystem *psys = task->ctx->sim.psys;
  615. ChildParticle *cpa;
  616. int p;
  617. /* RNG skipping at the beginning */
  618. cpa = psys->child;
  619. for (p = 0; p < task->begin; ++p, ++cpa) {
  620. if (task->ctx->skip) /* simplification skip */
  621. BLI_rng_skip(task->rng, PSYS_RND_DIST_SKIP * task->ctx->skip[p]);
  622. BLI_rng_skip(task->rng, PSYS_RND_DIST_SKIP);
  623. }
  624. for (; p < task->end; ++p, ++cpa) {
  625. if (task->ctx->skip) /* simplification skip */
  626. BLI_rng_skip(task->rng, PSYS_RND_DIST_SKIP * task->ctx->skip[p]);
  627. distribute_children_exec(task, cpa, p);
  628. }
  629. }
  630. static int distribute_compare_orig_index(const void *p1, const void *p2, void *user_data)
  631. {
  632. int *orig_index = (int *) user_data;
  633. int index1 = orig_index[*(const int *)p1];
  634. int index2 = orig_index[*(const int *)p2];
  635. if (index1 < index2)
  636. return -1;
  637. else if (index1 == index2) {
  638. /* this pointer comparison appears to make qsort stable for glibc,
  639. * and apparently on solaris too, makes the renders reproducible */
  640. if (p1 < p2)
  641. return -1;
  642. else if (p1 == p2)
  643. return 0;
  644. else
  645. return 1;
  646. }
  647. else
  648. return 1;
  649. }
  650. static void distribute_invalid(Scene *scene, ParticleSystem *psys, int from)
  651. {
  652. if (from == PART_FROM_CHILD) {
  653. ChildParticle *cpa;
  654. int p, totchild = psys_get_tot_child(scene, psys);
  655. if (psys->child && totchild) {
  656. for (p=0,cpa=psys->child; p<totchild; p++,cpa++) {
  657. cpa->fuv[0]=cpa->fuv[1]=cpa->fuv[2]=cpa->fuv[3] = 0.0;
  658. cpa->foffset= 0.0f;
  659. cpa->parent=0;
  660. cpa->pa[0]=cpa->pa[1]=cpa->pa[2]=cpa->pa[3]=0;
  661. cpa->num= -1;
  662. }
  663. }
  664. }
  665. else {
  666. PARTICLE_P;
  667. LOOP_PARTICLES {
  668. pa->fuv[0] = pa->fuv[1] = pa->fuv[2] = pa->fuv[3] = 0.0;
  669. pa->foffset= 0.0f;
  670. pa->num= -1;
  671. }
  672. }
  673. }
  674. /* Creates a distribution of coordinates on a DerivedMesh */
  675. /* This is to denote functionality that does not yet work with mesh - only derived mesh */
  676. static int psys_thread_context_init_distribute(ParticleThreadContext *ctx, ParticleSimulationData *sim, int from)
  677. {
  678. Scene *scene = sim->scene;
  679. DerivedMesh *finaldm = sim->psmd->dm_final;
  680. Object *ob = sim->ob;
  681. ParticleSystem *psys= sim->psys;
  682. ParticleData *pa=0, *tpars= 0;
  683. ParticleSettings *part;
  684. ParticleSeam *seams= 0;
  685. KDTree *tree=0;
  686. DerivedMesh *dm= NULL;
  687. float *jit= NULL;
  688. int i, p=0;
  689. int cfrom=0;
  690. int totelem=0, totpart, *particle_element=0, children=0, totseam=0;
  691. int jitlevel= 1, distr;
  692. float *element_weight=NULL,*jitter_offset=NULL, *vweight=NULL;
  693. float cur, maxweight=0.0, tweight, totweight, inv_totweight, co[3], nor[3], orco[3];
  694. if (ELEM(NULL, ob, psys, psys->part))
  695. return 0;
  696. part=psys->part;
  697. totpart=psys->totpart;
  698. if (totpart==0)
  699. return 0;
  700. if (!finaldm->deformedOnly && !finaldm->getTessFaceDataArray(finaldm, CD_ORIGINDEX)) {
  701. printf("Can't create particles with the current modifier stack, disable destructive modifiers\n");
  702. // XXX error("Can't paint with the current modifier stack, disable destructive modifiers");
  703. return 0;
  704. }
  705. /* XXX This distribution code is totally broken in case from == PART_FROM_CHILD, it's always using finaldm
  706. * even if use_modifier_stack is unset... But making things consistent here break all existing edited
  707. * hair systems, so better wait for complete rewrite.
  708. */
  709. psys_thread_context_init(ctx, sim);
  710. /* First handle special cases */
  711. if (from == PART_FROM_CHILD) {
  712. /* Simple children */
  713. if (part->childtype != PART_CHILD_FACES) {
  714. BLI_srandom(31415926 + psys->seed + psys->child_seed);
  715. distribute_simple_children(scene, ob, finaldm, sim->psmd->dm_deformed, psys);
  716. return 0;
  717. }
  718. }
  719. else {
  720. /* Grid distribution */
  721. if (part->distr==PART_DISTR_GRID && from != PART_FROM_VERT) {
  722. BLI_srandom(31415926 + psys->seed);
  723. if (psys->part->use_modifier_stack) {
  724. dm = finaldm;
  725. }
  726. else {
  727. dm = CDDM_from_mesh((Mesh*)ob->data);
  728. }
  729. DM_ensure_tessface(dm);
  730. distribute_grid(dm,psys);
  731. if (dm != finaldm) {
  732. dm->release(dm);
  733. }
  734. return 0;
  735. }
  736. }
  737. /* Create trees and original coordinates if needed */
  738. if (from == PART_FROM_CHILD) {
  739. distr=PART_DISTR_RAND;
  740. BLI_srandom(31415926 + psys->seed + psys->child_seed);
  741. dm= finaldm;
  742. /* BMESH ONLY */
  743. DM_ensure_tessface(dm);
  744. children=1;
  745. tree=BLI_kdtree_new(totpart);
  746. for (p=0,pa=psys->particles; p<totpart; p++,pa++) {
  747. psys_particle_on_dm(dm,part->from,pa->num,pa->num_dmcache,pa->fuv,pa->foffset,co,nor,0,0,orco,NULL);
  748. BKE_mesh_orco_verts_transform((Mesh*)ob->data, &orco, 1, 1);
  749. BLI_kdtree_insert(tree, p, orco);
  750. }
  751. BLI_kdtree_balance(tree);
  752. totpart = psys_get_tot_child(scene, psys);
  753. cfrom = from = PART_FROM_FACE;
  754. }
  755. else {
  756. distr = part->distr;
  757. BLI_srandom(31415926 + psys->seed);
  758. if (psys->part->use_modifier_stack)
  759. dm = finaldm;
  760. else
  761. dm= CDDM_from_mesh((Mesh*)ob->data);
  762. DM_ensure_tessface(dm);
  763. /* we need orco for consistent distributions */
  764. if (!CustomData_has_layer(&dm->vertData, CD_ORCO))
  765. DM_add_vert_layer(dm, CD_ORCO, CD_ASSIGN, BKE_mesh_orco_verts_get(ob));
  766. if (from == PART_FROM_VERT) {
  767. MVert *mv= dm->getVertDataArray(dm, CD_MVERT);
  768. float (*orcodata)[3] = dm->getVertDataArray(dm, CD_ORCO);
  769. int totvert = dm->getNumVerts(dm);
  770. tree=BLI_kdtree_new(totvert);
  771. for (p=0; p<totvert; p++) {
  772. if (orcodata) {
  773. copy_v3_v3(co,orcodata[p]);
  774. BKE_mesh_orco_verts_transform((Mesh*)ob->data, &co, 1, 1);
  775. }
  776. else
  777. copy_v3_v3(co,mv[p].co);
  778. BLI_kdtree_insert(tree, p, co);
  779. }
  780. BLI_kdtree_balance(tree);
  781. }
  782. }
  783. /* Get total number of emission elements and allocate needed arrays */
  784. totelem = (from == PART_FROM_VERT) ? dm->getNumVerts(dm) : dm->getNumTessFaces(dm);
  785. if (totelem == 0) {
  786. distribute_invalid(scene, psys, children ? PART_FROM_CHILD : 0);
  787. if (G.debug & G_DEBUG)
  788. fprintf(stderr,"Particle distribution error: Nothing to emit from!\n");
  789. if (dm != finaldm) dm->release(dm);
  790. BLI_kdtree_free(tree);
  791. return 0;
  792. }
  793. element_weight = MEM_callocN(sizeof(float)*totelem, "particle_distribution_weights");
  794. particle_element= MEM_callocN(sizeof(int)*totpart, "particle_distribution_indexes");
  795. jitter_offset = MEM_callocN(sizeof(float)*totelem, "particle_distribution_jitoff");
  796. /* Calculate weights from face areas */
  797. if ((part->flag&PART_EDISTR || children) && from != PART_FROM_VERT) {
  798. MVert *v1, *v2, *v3, *v4;
  799. float totarea=0.f, co1[3], co2[3], co3[3], co4[3];
  800. float (*orcodata)[3];
  801. orcodata= dm->getVertDataArray(dm, CD_ORCO);
  802. for (i=0; i<totelem; i++) {
  803. MFace *mf=dm->getTessFaceData(dm,i,CD_MFACE);
  804. if (orcodata) {
  805. copy_v3_v3(co1, orcodata[mf->v1]);
  806. copy_v3_v3(co2, orcodata[mf->v2]);
  807. copy_v3_v3(co3, orcodata[mf->v3]);
  808. BKE_mesh_orco_verts_transform((Mesh*)ob->data, &co1, 1, 1);
  809. BKE_mesh_orco_verts_transform((Mesh*)ob->data, &co2, 1, 1);
  810. BKE_mesh_orco_verts_transform((Mesh*)ob->data, &co3, 1, 1);
  811. if (mf->v4) {
  812. copy_v3_v3(co4, orcodata[mf->v4]);
  813. BKE_mesh_orco_verts_transform((Mesh*)ob->data, &co4, 1, 1);
  814. }
  815. }
  816. else {
  817. v1= (MVert*)dm->getVertData(dm,mf->v1,CD_MVERT);
  818. v2= (MVert*)dm->getVertData(dm,mf->v2,CD_MVERT);
  819. v3= (MVert*)dm->getVertData(dm,mf->v3,CD_MVERT);
  820. copy_v3_v3(co1, v1->co);
  821. copy_v3_v3(co2, v2->co);
  822. copy_v3_v3(co3, v3->co);
  823. if (mf->v4) {
  824. v4= (MVert*)dm->getVertData(dm,mf->v4,CD_MVERT);
  825. copy_v3_v3(co4, v4->co);
  826. }
  827. }
  828. cur = mf->v4 ? area_quad_v3(co1, co2, co3, co4) : area_tri_v3(co1, co2, co3);
  829. if (cur > maxweight)
  830. maxweight = cur;
  831. element_weight[i] = cur;
  832. totarea += cur;
  833. }
  834. for (i=0; i<totelem; i++)
  835. element_weight[i] /= totarea;
  836. maxweight /= totarea;
  837. }
  838. else {
  839. float min=1.0f/(float)(MIN2(totelem,totpart));
  840. for (i=0; i<totelem; i++)
  841. element_weight[i]=min;
  842. maxweight=min;
  843. }
  844. /* Calculate weights from vgroup */
  845. vweight = psys_cache_vgroup(dm,psys,PSYS_VG_DENSITY);
  846. if (vweight) {
  847. if (from==PART_FROM_VERT) {
  848. for (i=0;i<totelem; i++)
  849. element_weight[i]*=vweight[i];
  850. }
  851. else { /* PART_FROM_FACE / PART_FROM_VOLUME */
  852. for (i=0;i<totelem; i++) {
  853. MFace *mf=dm->getTessFaceData(dm,i,CD_MFACE);
  854. tweight = vweight[mf->v1] + vweight[mf->v2] + vweight[mf->v3];
  855. if (mf->v4) {
  856. tweight += vweight[mf->v4];
  857. tweight /= 4.0f;
  858. }
  859. else {
  860. tweight /= 3.0f;
  861. }
  862. element_weight[i]*=tweight;
  863. }
  864. }
  865. MEM_freeN(vweight);
  866. }
  867. /* Calculate total weight of all elements */
  868. int totmapped = 0;
  869. totweight = 0.0f;
  870. for (i = 0; i < totelem; i++) {
  871. if (element_weight[i] > 0.0f) {
  872. totmapped++;
  873. totweight += element_weight[i];
  874. }
  875. }
  876. if (totmapped == 0) {
  877. /* We are not allowed to distribute particles anywhere... */
  878. return 0;
  879. }
  880. inv_totweight = 1.0f / totweight;
  881. /* Calculate cumulative weights.
  882. * We remove all null-weighted elements from element_sum, and create a new mapping
  883. * 'activ'_elem_index -> orig_elem_index.
  884. * This simplifies greatly the filtering of zero-weighted items - and can be much more efficient
  885. * especially in random case (reducing a lot the size of binary-searched array)...
  886. */
  887. float *element_sum = MEM_mallocN(sizeof(*element_sum) * totmapped, __func__);
  888. int *element_map = MEM_mallocN(sizeof(*element_map) * totmapped, __func__);
  889. int i_mapped = 0;
  890. for (i = 0; i < totelem && element_weight[i] == 0.0f; i++);
  891. element_sum[i_mapped] = element_weight[i] * inv_totweight;
  892. element_map[i_mapped] = i;
  893. i_mapped++;
  894. for (i++; i < totelem; i++) {
  895. if (element_weight[i] > 0.0f) {
  896. element_sum[i_mapped] = element_sum[i_mapped - 1] + element_weight[i] * inv_totweight;
  897. /* Skip elements which weight is so small that it does not affect the sum. */
  898. if (element_sum[i_mapped] > element_sum[i_mapped - 1]) {
  899. element_map[i_mapped] = i;
  900. i_mapped++;
  901. }
  902. }
  903. }
  904. totmapped = i_mapped;
  905. /* Finally assign elements to particles */
  906. if ((part->flag & PART_TRAND) || (part->simplify_flag & PART_SIMPLIFY_ENABLE)) {
  907. for (p = 0; p < totpart; p++) {
  908. /* In theory element_sum[totmapped - 1] should be 1.0,
  909. * but due to float errors this is not necessarily always true, so scale pos accordingly. */
  910. const float pos = BLI_frand() * element_sum[totmapped - 1];
  911. const int eidx = distribute_binary_search(element_sum, totmapped, pos);
  912. particle_element[p] = element_map[eidx];
  913. BLI_assert(pos <= element_sum[eidx]);
  914. BLI_assert(eidx ? (pos > element_sum[eidx - 1]) : (pos >= 0.0f));
  915. jitter_offset[particle_element[p]] = pos;
  916. }
  917. }
  918. else {
  919. double step, pos;
  920. step = (totpart < 2) ? 0.5 : 1.0 / (double)totpart;
  921. /* This is to address tricky issues with vertex-emitting when user tries (and expects) exact 1-1 vert/part
  922. * distribution (see T47983 and its two example files). It allows us to consider pos as
  923. * 'midpoint between v and v+1' (or 'p and p+1', depending whether we have more vertices than particles or not),
  924. * and avoid stumbling over float imprecisions in element_sum. */
  925. if (from == PART_FROM_VERT) {
  926. pos = (totpart < totmapped) ? 0.5 / (double)totmapped : step * 0.5; /* We choose the smaller step. */
  927. }
  928. else {
  929. pos = 0.0;
  930. }
  931. for (i = 0, p = 0; p < totpart; p++, pos += step) {
  932. for ( ; (i < totmapped - 1) && (pos > (double)element_sum[i]); i++);
  933. particle_element[p] = element_map[i];
  934. jitter_offset[particle_element[p]] = pos;
  935. }
  936. }
  937. MEM_freeN(element_sum);
  938. MEM_freeN(element_map);
  939. /* For hair, sort by origindex (allows optimization's in rendering), */
  940. /* however with virtual parents the children need to be in random order. */
  941. if (part->type == PART_HAIR && !(part->childtype==PART_CHILD_FACES && part->parents != 0.0f)) {
  942. int *orig_index = NULL;
  943. if (from == PART_FROM_VERT) {
  944. if (dm->numVertData)
  945. orig_index = dm->getVertDataArray(dm, CD_ORIGINDEX);
  946. }
  947. else {
  948. if (dm->numTessFaceData)
  949. orig_index = dm->getTessFaceDataArray(dm, CD_ORIGINDEX);
  950. }
  951. if (orig_index) {
  952. BLI_qsort_r(particle_element, totpart, sizeof(int), distribute_compare_orig_index, orig_index);
  953. }
  954. }
  955. /* Create jittering if needed */
  956. if (distr==PART_DISTR_JIT && ELEM(from,PART_FROM_FACE,PART_FROM_VOLUME)) {
  957. jitlevel= part->userjit;
  958. if (jitlevel == 0) {
  959. jitlevel= totpart/totelem;
  960. if (part->flag & PART_EDISTR) jitlevel*= 2; /* looks better in general, not very scietific */
  961. if (jitlevel<3) jitlevel= 3;
  962. }
  963. jit= MEM_callocN((2+ jitlevel*2)*sizeof(float), "jit");
  964. /* for small amounts of particles we use regular jitter since it looks
  965. * a bit better, for larger amounts we switch to hammersley sequence
  966. * because it is much faster */
  967. if (jitlevel < 25)
  968. init_mv_jit(jit, jitlevel, psys->seed, part->jitfac);
  969. else
  970. hammersley_create(jit, jitlevel+1, psys->seed, part->jitfac);
  971. BLI_array_randomize(jit, 2*sizeof(float), jitlevel, psys->seed); /* for custom jit or even distribution */
  972. }
  973. /* Setup things for threaded distribution */
  974. ctx->tree= tree;
  975. ctx->seams= seams;
  976. ctx->totseam= totseam;
  977. ctx->sim.psys= psys;
  978. ctx->index= particle_element;
  979. ctx->jit= jit;
  980. ctx->jitlevel= jitlevel;
  981. ctx->jitoff= jitter_offset;
  982. ctx->weight= element_weight;
  983. ctx->maxweight= maxweight;
  984. ctx->cfrom= cfrom;
  985. ctx->distr= distr;
  986. ctx->dm= dm;
  987. ctx->tpars= tpars;
  988. if (children) {
  989. totpart= psys_render_simplify_distribution(ctx, totpart);
  990. alloc_child_particles(psys, totpart);
  991. }
  992. return 1;
  993. }
  994. static void psys_task_init_distribute(ParticleTask *task, ParticleSimulationData *sim)
  995. {
  996. /* init random number generator */
  997. int seed = 31415926 + sim->psys->seed;
  998. task->rng = BLI_rng_new(seed);
  999. }
  1000. static void distribute_particles_on_dm(ParticleSimulationData *sim, int from)
  1001. {
  1002. TaskScheduler *task_scheduler;
  1003. TaskPool *task_pool;
  1004. ParticleThreadContext ctx;
  1005. ParticleTask *tasks;
  1006. DerivedMesh *finaldm = sim->psmd->dm_final;
  1007. int i, totpart, numtasks;
  1008. /* create a task pool for distribution tasks */
  1009. if (!psys_thread_context_init_distribute(&ctx, sim, from))
  1010. return;
  1011. task_scheduler = BLI_task_scheduler_get();
  1012. task_pool = BLI_task_pool_create(task_scheduler, &ctx);
  1013. totpart = (from == PART_FROM_CHILD ? sim->psys->totchild : sim->psys->totpart);
  1014. psys_tasks_create(&ctx, 0, totpart, &tasks, &numtasks);
  1015. for (i = 0; i < numtasks; ++i) {
  1016. ParticleTask *task = &tasks[i];
  1017. psys_task_init_distribute(task, sim);
  1018. if (from == PART_FROM_CHILD)
  1019. BLI_task_pool_push(task_pool, exec_distribute_child, task, false, TASK_PRIORITY_LOW);
  1020. else
  1021. BLI_task_pool_push(task_pool, exec_distribute_parent, task, false, TASK_PRIORITY_LOW);
  1022. }
  1023. BLI_task_pool_work_and_wait(task_pool);
  1024. BLI_task_pool_free(task_pool);
  1025. psys_calc_dmcache(sim->ob, finaldm, sim->psmd->dm_deformed, sim->psys);
  1026. if (ctx.dm != finaldm)
  1027. ctx.dm->release(ctx.dm);
  1028. psys_tasks_free(tasks, numtasks);
  1029. psys_thread_context_free(&ctx);
  1030. }
  1031. /* ready for future use, to emit particles without geometry */
  1032. static void distribute_particles_on_shape(ParticleSimulationData *sim, int UNUSED(from))
  1033. {
  1034. distribute_invalid(sim->scene, sim->psys, 0);
  1035. fprintf(stderr,"Shape emission not yet possible!\n");
  1036. }
  1037. void distribute_particles(ParticleSimulationData *sim, int from)
  1038. {
  1039. PARTICLE_PSMD;
  1040. int distr_error=0;
  1041. if (psmd) {
  1042. if (psmd->dm_final)
  1043. distribute_particles_on_dm(sim, from);
  1044. else
  1045. distr_error=1;
  1046. }
  1047. else
  1048. distribute_particles_on_shape(sim, from);
  1049. if (distr_error) {
  1050. distribute_invalid(sim->scene, sim->psys, from);
  1051. fprintf(stderr,"Particle distribution error!\n");
  1052. }
  1053. }
  1054. /* ======== Simplify ======== */
  1055. static float psys_render_viewport_falloff(double rate, float dist, float width)
  1056. {
  1057. return pow(rate, dist / width);
  1058. }
  1059. static float psys_render_projected_area(ParticleSystem *psys, const float center[3], float area, double vprate, float *viewport)
  1060. {
  1061. ParticleRenderData *data = psys->renderdata;
  1062. float co[4], view[3], ortho1[3], ortho2[3], w, dx, dy, radius;
  1063. /* transform to view space */
  1064. copy_v3_v3(co, center);
  1065. co[3] = 1.0f;
  1066. mul_m4_v4(data->viewmat, co);
  1067. /* compute two vectors orthogonal to view vector */
  1068. normalize_v3_v3(view, co);
  1069. ortho_basis_v3v3_v3(ortho1, ortho2, view);
  1070. /* compute on screen minification */
  1071. w = co[2] * data->winmat[2][3] + data->winmat[3][3];
  1072. dx = data->winx * ortho2[0] * data->winmat[0][0];
  1073. dy = data->winy * ortho2[1] * data->winmat[1][1];
  1074. w = sqrtf(dx * dx + dy * dy) / w;
  1075. /* w squared because we are working with area */
  1076. area = area * w * w;
  1077. /* viewport of the screen test */
  1078. /* project point on screen */
  1079. mul_m4_v4(data->winmat, co);
  1080. if (co[3] != 0.0f) {
  1081. co[0] = 0.5f * data->winx * (1.0f + co[0] / co[3]);
  1082. co[1] = 0.5f * data->winy * (1.0f + co[1] / co[3]);
  1083. }
  1084. /* screen space radius */
  1085. radius = sqrtf(area / (float)M_PI);
  1086. /* make smaller using fallof once over screen edge */
  1087. *viewport = 1.0f;
  1088. if (co[0] + radius < 0.0f)
  1089. *viewport *= psys_render_viewport_falloff(vprate, -(co[0] + radius), data->winx);
  1090. else if (co[0] - radius > data->winx)
  1091. *viewport *= psys_render_viewport_falloff(vprate, (co[0] - radius) - data->winx, data->winx);
  1092. if (co[1] + radius < 0.0f)
  1093. *viewport *= psys_render_viewport_falloff(vprate, -(co[1] + radius), data->winy);
  1094. else if (co[1] - radius > data->winy)
  1095. *viewport *= psys_render_viewport_falloff(vprate, (co[1] - radius) - data->winy, data->winy);
  1096. return area;
  1097. }
  1098. /* BMESH_TODO, for orig face data, we need to use MPoly */
  1099. static int psys_render_simplify_distribution(ParticleThreadContext *ctx, int tot)
  1100. {
  1101. DerivedMesh *dm = ctx->dm;
  1102. Mesh *me = (Mesh *)(ctx->sim.ob->data);
  1103. MFace *mf, *mface;
  1104. MVert *mvert;
  1105. ParticleRenderData *data;
  1106. ParticleRenderElem *elems, *elem;
  1107. ParticleSettings *part = ctx->sim.psys->part;
  1108. float *facearea, (*facecenter)[3], size[3], fac, powrate, scaleclamp;
  1109. float co1[3], co2[3], co3[3], co4[3], lambda, arearatio, t, area, viewport;
  1110. double vprate;
  1111. int *facetotvert;
  1112. int a, b, totorigface, totface, newtot, skipped;
  1113. /* double lookup */
  1114. const int *index_mf_to_mpoly;
  1115. const int *index_mp_to_orig;
  1116. if (part->ren_as != PART_DRAW_PATH || !(part->draw & PART_DRAW_REN_STRAND))
  1117. return tot;
  1118. if (!ctx->sim.psys->renderdata)
  1119. return tot;
  1120. data = ctx->sim.psys->renderdata;
  1121. if (data->timeoffset)
  1122. return 0;
  1123. if (!(part->simplify_flag & PART_SIMPLIFY_ENABLE))
  1124. return tot;
  1125. mvert = dm->getVertArray(dm);
  1126. mface = dm->getTessFaceArray(dm);
  1127. totface = dm->getNumTessFaces(dm);
  1128. totorigface = me->totpoly;
  1129. if (totface == 0 || totorigface == 0)
  1130. return tot;
  1131. index_mf_to_mpoly = dm->getTessFaceDataArray(dm, CD_ORIGINDEX);
  1132. index_mp_to_orig = dm->getPolyDataArray(dm, CD_ORIGINDEX);
  1133. if (index_mf_to_mpoly == NULL) {
  1134. index_mp_to_orig = NULL;
  1135. }
  1136. facearea = MEM_callocN(sizeof(float) * totorigface, "SimplifyFaceArea");
  1137. facecenter = MEM_callocN(sizeof(float[3]) * totorigface, "SimplifyFaceCenter");
  1138. facetotvert = MEM_callocN(sizeof(int) * totorigface, "SimplifyFaceArea");
  1139. elems = MEM_callocN(sizeof(ParticleRenderElem) * totorigface, "SimplifyFaceElem");
  1140. if (data->elems)
  1141. MEM_freeN(data->elems);
  1142. data->do_simplify = true;
  1143. data->elems = elems;
  1144. data->index_mf_to_mpoly = index_mf_to_mpoly;
  1145. data->index_mp_to_orig = index_mp_to_orig;
  1146. /* compute number of children per original face */
  1147. for (a = 0; a < tot; a++) {
  1148. b = (index_mf_to_mpoly) ? DM_origindex_mface_mpoly(index_mf_to_mpoly, index_mp_to_orig, ctx->index[a]) : ctx->index[a];
  1149. if (b != ORIGINDEX_NONE) {
  1150. elems[b].totchild++;
  1151. }
  1152. }
  1153. /* compute areas and centers of original faces */
  1154. for (mf = mface, a = 0; a < totface; a++, mf++) {
  1155. b = (index_mf_to_mpoly) ? DM_origindex_mface_mpoly(index_mf_to_mpoly, index_mp_to_orig, a) : a;
  1156. if (b != ORIGINDEX_NONE) {
  1157. copy_v3_v3(co1, mvert[mf->v1].co);
  1158. copy_v3_v3(co2, mvert[mf->v2].co);
  1159. copy_v3_v3(co3, mvert[mf->v3].co);
  1160. add_v3_v3(facecenter[b], co1);
  1161. add_v3_v3(facecenter[b], co2);
  1162. add_v3_v3(facecenter[b], co3);
  1163. if (mf->v4) {
  1164. copy_v3_v3(co4, mvert[mf->v4].co);
  1165. add_v3_v3(facecenter[b], co4);
  1166. facearea[b] += area_quad_v3(co1, co2, co3, co4);
  1167. facetotvert[b] += 4;
  1168. }
  1169. else {
  1170. facearea[b] += area_tri_v3(co1, co2, co3);
  1171. facetotvert[b] += 3;
  1172. }
  1173. }
  1174. }
  1175. for (a = 0; a < totorigface; a++)
  1176. if (facetotvert[a] > 0)
  1177. mul_v3_fl(facecenter[a], 1.0f / facetotvert[a]);
  1178. /* for conversion from BU area / pixel area to reference screen size */
  1179. BKE_mesh_texspace_get(me, 0, 0, size);
  1180. fac = ((size[0] + size[1] + size[2]) / 3.0f) / part->simplify_refsize;
  1181. fac = fac * fac;
  1182. powrate = log(0.5f) / log(part->simplify_rate * 0.5f);
  1183. if (part->simplify_flag & PART_SIMPLIFY_VIEWPORT)
  1184. vprate = pow(1.0f - part->simplify_viewport, 5.0);
  1185. else
  1186. vprate = 1.0;
  1187. /* set simplification parameters per original face */
  1188. for (a = 0, elem = elems; a < totorigface; a++, elem++) {
  1189. area = psys_render_projected_area(ctx->sim.psys, facecenter[a], facearea[a], vprate, &viewport);
  1190. arearatio = fac * area / facearea[a];
  1191. if ((arearatio < 1.0f || viewport < 1.0f) && elem->totchild) {
  1192. /* lambda is percentage of elements to keep */
  1193. lambda = (arearatio < 1.0f) ? powf(arearatio, powrate) : 1.0f;
  1194. lambda *= viewport;
  1195. lambda = MAX2(lambda, 1.0f / elem->totchild);
  1196. /* compute transition region */
  1197. t = part->simplify_transition;
  1198. elem->t = (lambda - t < 0.0f) ? lambda : (lambda + t > 1.0f) ? 1.0f - lambda : t;
  1199. elem->reduce = 1;
  1200. /* scale at end and beginning of the transition region */
  1201. elem->scalemax = (lambda + t < 1.0f) ? 1.0f / lambda : 1.0f / (1.0f - elem->t * elem->t / t);
  1202. elem->scalemin = (lambda + t < 1.0f) ? 0.0f : elem->scalemax * (1.0f - elem->t / t);
  1203. elem->scalemin = sqrtf(elem->scalemin);
  1204. elem->scalemax = sqrtf(elem->scalemax);
  1205. /* clamp scaling */
  1206. scaleclamp = (float)min_ii(elem->totchild, 10);
  1207. elem->scalemin = MIN2(scaleclamp, elem->scalemin);
  1208. elem->scalemax = MIN2(scaleclamp, elem->scalemax);
  1209. /* extend lambda to include transition */
  1210. lambda = lambda + elem->t;
  1211. if (lambda > 1.0f)
  1212. lambda = 1.0f;
  1213. }
  1214. else {
  1215. lambda = arearatio;
  1216. elem->scalemax = 1.0f; //sqrt(lambda);
  1217. elem->scalemin = 1.0f; //sqrt(lambda);
  1218. elem->reduce = 0;
  1219. }
  1220. elem->lambda = lambda;
  1221. elem->scalemin = sqrtf(elem->scalemin);
  1222. elem->scalemax = sqrtf(elem->scalemax);
  1223. elem->curchild = 0;
  1224. }
  1225. MEM_freeN(facearea);
  1226. MEM_freeN(facecenter);
  1227. MEM_freeN(facetotvert);
  1228. /* move indices and set random number skipping */
  1229. ctx->skip = MEM_callocN(sizeof(int) * tot, "SimplificationSkip");
  1230. skipped = 0;
  1231. for (a = 0, newtot = 0; a < tot; a++) {
  1232. b = (index_mf_to_mpoly) ? DM_origindex_mface_mpoly(index_mf_to_mpoly, index_mp_to_orig, ctx->index[a]) : ctx->index[a];
  1233. if (b != ORIGINDEX_NONE) {
  1234. if (elems[b].curchild++ < ceil(elems[b].lambda * elems[b].totchild)) {
  1235. ctx->index[newtot] = ctx->index[a];
  1236. ctx->skip[newtot] = skipped;
  1237. skipped = 0;
  1238. newtot++;
  1239. }
  1240. else skipped++;
  1241. }
  1242. else skipped++;
  1243. }
  1244. for (a = 0, elem = elems; a < totorigface; a++, elem++)
  1245. elem->curchild = 0;
  1246. return newtot;
  1247. }