NUM2.cpp 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980
  1. /* NUM2.cpp
  2. *
  3. * Copyright (C) 1993-2018 David Weenink, Paul Boersma 2017
  4. *
  5. * This code is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This code is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this work. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. /*
  19. djmw 20020819 GPL header
  20. djmw 20020819 Split nonGLP part off.
  21. djmw 20001109 Changed stop criteria in NUMsvdcmp and NUMtqli.
  22. djmw 20020819 Split into GPL and nonGPL part.
  23. djmw 20021008 Removed SVD_sort.
  24. djmw 20030619 Removed calls to NRC svd-routines.
  25. djmw 20030623 Removed tqli en tred calls.
  26. djmw 20030703 Replaced NUMincompleteBeta with gsl_sf_beta_inc.
  27. djmw 20030710 NUMminimize_brent now also returns the minimum function value.
  28. djmw 20030731 NUMridders: better approximation for small d.
  29. NUMinvFisherQ better approximation for p < 0.5
  30. djmw 20030813 Added NUMmad and NUMstatistics_huber.
  31. djmw 20030825 Replaced gsl_sf_beta_inc with NUMincompleteBeta
  32. pb 20030828 Improvements for invFisherQ, ridders, studentP, studentQ,
  33. invStudentQ, invChiSquareQ: modifications for 'undefined' return values.
  34. djmw 20030830 Corrected a bug in NUMtriangularfilter_amplitude
  35. djmw 20031111 Added NUMdmatrix_transpose, NUMdmatrix_printMatlabForm
  36. djmw 20040105 Added NUMmahalanobisDistance_chi
  37. djmw 20040303 Added NUMstring_containsPrintableCharacter.
  38. djmw 20050406 NUMprocrutus->NUMprocrustes
  39. djmw 20060319 NUMinverse_cholesky: calculation of determinant is made optional
  40. djmw 20060517 Added NUMregexp_compile
  41. djmw 20060518 Treat NULL string as empty string in strs_replace_regexp/literal. Don't accept empty search in replace_regexStr
  42. djmw 20060626 Extra NULL argument for ExecRE.
  43. djmw 20070302 NUMclipLineWithinRectangle
  44. djmw 20070614 updated to version 1.30 of regular expressions.
  45. djmw 20071022 Removed function NUMfvector_moment2.
  46. djmw 20071201 Melder_warning<n>
  47. djmw 20080107 Changed assertion to "npoints > 0" in NUMcosinesTable
  48. djmw 20080110 Corrected some bugs in replace_regexStr
  49. djmw 20080122 Bug in replace_regexStr
  50. djmw 20080317 +NUMsinc
  51. pb 20080410 FisherQ from gsl
  52. djmw 20090630 NUMlogNormalP/Q from gsl
  53. djmw 20090707 Rename NUMinverse_cholesky to NUMlowerCholeskyInverse,
  54. +NUMcovarianceFromColumnCentredMatrix, +NUMmultivariateKurtosis
  55. djmw 20100311 +NUMsolveQuadraticEquation
  56. djmw 20100426 replace wcstok by Melder_wcstok
  57. djmw 20101209 removed NUMwcscmp is Melder_wcscmp now
  58. djmw 20110304 Thing_new
  59. djmw 20111110 use autostringvector
  60. */
  61. #include "SVD.h"
  62. #include "Eigen.h"
  63. #include "NUMclapack.h"
  64. #include "NUM2.h"
  65. #include "NUMmachar.h"
  66. #include "melder.h"
  67. #include "gsl_randist.h"
  68. #include "gsl_errno.h"
  69. #include "gsl_sf_bessel.h"
  70. #include "gsl_sf_gamma.h"
  71. #include "gsl_sf_erf.h"
  72. #include "gsl_sf_trig.h"
  73. #include "gsl_poly.h"
  74. #include "gsl_cdf.h"
  75. #define SIGN(a,b) ((b < 0) ? -fabs(a) : fabs(a))
  76. struct pdf1_struct {
  77. double p;
  78. double df;
  79. };
  80. struct pdf2_struct {
  81. double p;
  82. double df1;
  83. double df2;
  84. };
  85. void MATprintMatlabForm (constMAT m, conststring32 name) {
  86. integer npc = 5;
  87. ldiv_t n = ldiv (m.ncol, npc);
  88. MelderInfo_open ();
  89. MelderInfo_write (name, U"= [");
  90. for (integer i = 1; i <= m.nrow; i ++) {
  91. for (integer j = 1; j <= n.quot; j ++) {
  92. for (integer k = 1; k <= npc; k ++) {
  93. MelderInfo_write (m [i] [(j - 1) * npc + k], (k < npc ? U", " : U""));
  94. }
  95. MelderInfo_write (j < n.quot ? U",\n" : U"");
  96. }
  97. for (integer k = 1; k <= n.rem; k ++) {
  98. MelderInfo_write (m [i] [n.quot * npc + k], (k < n.rem ? U", " : U""));
  99. }
  100. MelderInfo_write (i < m.nrow ? U";\n" : U"];\n");
  101. }
  102. MelderInfo_close ();
  103. }
  104. void MATnormalizeColumns_inplace (MAT a, double power, double norm) {
  105. Melder_assert (norm > 0.0);
  106. autoVEC column = VECraw (a.nrow);
  107. for (integer icol = 1; icol <= a.ncol; icol ++) {
  108. for (integer irow = 1; irow <= column.size; irow ++)
  109. column [irow] = a [irow] [icol];
  110. VEC_normalize_inplace (column.get(), power, norm);
  111. for (integer irow = 1; irow <= column.size; irow ++)
  112. a [irow] [icol] = column [irow];
  113. }
  114. }
  115. void NUMaverageColumns (double **a, integer rb, integer re, integer cb, integer ce) {
  116. integer n = re - rb + 1;
  117. if (n < 2) {
  118. return;
  119. }
  120. for (integer j = cb; j <= ce; j ++) {
  121. longdouble ave = 0.0;
  122. for (integer i = rb; i <= re; i ++) {
  123. ave += a [i] [j];
  124. }
  125. ave /= n;
  126. for (integer i = rb; i <= re; i ++) {
  127. a [i] [j] = (double) ave;
  128. }
  129. }
  130. }
  131. void VECsmoothByMovingAverage_preallocated (VEC out, constVEC in, integer window) {
  132. Melder_assert (out.size == in.size);
  133. for (integer i = 1; i <= out.size; i ++) {
  134. integer jfrom = i - window / 2, jto = i + window / 2;
  135. if (window % 2 == 0) {
  136. jto --;
  137. }
  138. jfrom = jfrom < 1 ? 1 : jfrom;
  139. jto = jto > out.size ? out.size : jto;
  140. out [i] = 0.0;
  141. for (integer j = jfrom; j <= jto; j ++) {
  142. out [i] += in [j];
  143. }
  144. out [i] /= jto - jfrom + 1;
  145. }
  146. }
  147. autoMAT MATcovarianceFromColumnCentredMatrix (constMAT x, integer ndf) {
  148. Melder_require (ndf >= 0 && x.nrow - ndf > 0, U"Invalid arguments.");
  149. autoMAT covar = MATmul_tn (x, x);
  150. MATmultiply_inplace (covar.get(), 1.0 / (x.nrow - ndf));
  151. return covar;
  152. }
  153. double NUMmultivariateKurtosis (constMAT m, int method) {
  154. double kurt = undefined;
  155. if (m.nrow < 5) {
  156. return kurt;
  157. }
  158. autoMAT x = MATcopy (m);
  159. autoVEC mean = VECraw (x.ncol);
  160. VECcolumnMeans_preallocated (mean.get(), x.get());
  161. MATsubtract_inplace (x.get(), mean.get());
  162. autoMAT covar = MATcovarianceFromColumnCentredMatrix (x.get(), 1);
  163. if (method == 1) { // Schott (2001, page 33)
  164. kurt = 0.0;
  165. for (integer l = 1; l <= x.ncol; l ++) {
  166. double zl = 0.0, wl, sll2 = covar [l] [l] * covar [l] [l];
  167. for (integer j = 1; j <= x.nrow; j ++) {
  168. double d = x [j] [l] - mean [l], d2 = d * d;
  169. zl += d2 * d2;
  170. }
  171. zl = (zl - 6.0 * sll2) / (x.nrow - 4);
  172. wl = (sll2 - zl / x.nrow) * x.nrow / (x.nrow - 1);
  173. kurt += zl / wl;
  174. }
  175. kurt = kurt / (3 * x.ncol) - 1.0;
  176. }
  177. return kurt;
  178. }
  179. void eigenSort (double d [], double **v, integer n, int sort) {
  180. if (sort == 0) {
  181. return;
  182. }
  183. for (integer i = 1; i < n; i ++) {
  184. integer k;
  185. double temp = d [k = i];
  186. if (sort > 0) {
  187. for (integer j = i + 1; j <= n; j ++) {
  188. if (d [j] > temp) {
  189. temp = d [k = j];
  190. }
  191. }
  192. } else {
  193. for (integer j = i + 1; j <= n; j ++) {
  194. if (d [j] < temp) {
  195. temp = d [k = j];
  196. }
  197. }
  198. }
  199. if (k != i) {
  200. d [k] = d [i];
  201. d [i] = temp;
  202. if (v) {
  203. for (integer j = 1; j <= n; j ++) {
  204. temp = v [j] [i];
  205. v [j] [i] = v [j] [k];
  206. v [j] [k] = temp;
  207. }
  208. }
  209. }
  210. }
  211. }
  212. /*
  213. The following algorithm for monotone regession is on the average
  214. 3.5 times faster than
  215. Kruskal's algorithm for monotone regression (and much simpler).
  216. Regression is ascending
  217. */
  218. autoVEC VECmonotoneRegression (constVEC x) {
  219. autoVEC fit = VECcopy (x);
  220. double xt = undefined; // only to stop gcc from complaining "may be used uninitialized"
  221. for (integer i = 2; i <= x.size; i ++) {
  222. if (fit [i] >= fit [i - 1])
  223. continue;
  224. longdouble sum = fit [i];
  225. integer nt = 1;
  226. for (integer j = 1; j <= i - 1; j ++) {
  227. sum += fit [i - j];
  228. nt ++;
  229. xt = (double) sum / nt; // i >= 2 -> xt always gets a value
  230. if (j < i - 1 && xt >= fit [i - j - 1])
  231. break;
  232. }
  233. for (integer j = i - nt + 1; j <= i; j ++)
  234. fit [j] = xt;
  235. }
  236. return fit;
  237. }
  238. #undef TINY
  239. void NUMcholeskySolve (double **a, integer n, double d [], double b [], double x []) {
  240. for (integer i = 1; i <= n; i++) { /* Solve L.y=b */
  241. longdouble sum = b [i];
  242. for (integer k = i - 1; k >= 1; k--) {
  243. sum -= a [i] [k] * x [k];
  244. }
  245. x [i] = (double) sum / d [i];
  246. }
  247. for (integer i = n; i >= 1; i --) { /* Solve L^T.x=y */
  248. longdouble sum = x [i];
  249. for (integer k = i + 1; k <= n; k ++) {
  250. sum -= a [k] [i] * x [k];
  251. }
  252. x [i] = (double) sum / d [i];
  253. }
  254. }
  255. double MATdeterminant_fromSymmetricMatrix (constMAT m) {
  256. Melder_assert (m.nrow == m.ncol);
  257. autoMAT a = MATcopy (m);
  258. // Cholesky decomposition in lower, leave upper intact
  259. char uplo = 'U';
  260. integer lda = m.nrow, info;
  261. NUMlapack_dpotf2 (& uplo, & a.nrow, & a [1] [1], & lda, & info);
  262. Melder_require (info == 0, U"dpotf2 cannot determine Cholesky decomposition.");
  263. longdouble lnd = 0.0;
  264. for (integer i = 1; i <= a.nrow; i ++) {
  265. lnd += log (a [i] [i]);
  266. }
  267. lnd *= 2.0; // because A = L . L' TODO
  268. return (double) lnd;
  269. }
  270. void MATlowerCholeskyInverse_inplace (MAT a, double *out_lnd) {
  271. Melder_assert (a.nrow == a.ncol);
  272. char uplo = 'U', diag = 'N';
  273. integer info;
  274. // Cholesky decomposition in lower, leave upper intact
  275. // Fortran storage -> use uplo='U' to get 'L'.
  276. (void) NUMlapack_dpotf2 (& uplo, & a.nrow, & a [1] [1], & a.nrow, & info);
  277. Melder_require (info == 0, U"dpotf2 fails.");
  278. // Determinant from diagonal, diagonal is now sqrt (a [i] [i]) !
  279. if (out_lnd) {
  280. longdouble lnd = 0.0;
  281. for (integer i = 1; i <= a.nrow; i ++) {
  282. lnd += log (a [i] [i]);
  283. }
  284. *out_lnd *= 2.0 * lnd; /* because A = L . L' */
  285. }
  286. // Get the inverse */
  287. (void) NUMlapack_dtrtri (& uplo, & diag, & a.nrow, & a [1] [1], & a.nrow, & info);
  288. Melder_require (info == 0, U"dtrtri fails.");
  289. }
  290. autoMAT MATinverse_fromLowerCholeskyInverse (constMAT m) {
  291. Melder_assert (m.nrow == m.ncol);
  292. autoMAT r = MATraw (m.nrow, m.nrow);
  293. for (integer i = 1; i <= m.nrow; i ++) {
  294. for (integer j = 1; j <= i; j ++) {
  295. longdouble sum = 0.0;
  296. for (integer k = i; k <= m.nrow; k ++) {
  297. sum += m [k] [i] * m [k] [j];
  298. }
  299. r [i] [j] = r [j] [i] = (double) sum;
  300. }
  301. }
  302. return r;
  303. }
  304. double NUMmahalanobisDistance (constMAT lowerInverse, constVEC v, constVEC m) {
  305. Melder_assert (lowerInverse.ncol == v.size && v.size == m.size);
  306. longdouble chisq = 0.0;
  307. if (lowerInverse.nrow == 1) { // 1xn matrix
  308. for (integer j = 1; j <= v.size; j ++) {
  309. double t = lowerInverse [1] [j] * (v [j] - m [j]);
  310. chisq += t * t;
  311. }
  312. } else { // nxn matrix
  313. for (integer i = v.size; i > 0; i --) {
  314. double t = 0.0;
  315. for (integer j = 1; j <= i; j ++) {
  316. t += lowerInverse [i] [j] * (v [j] - m [j]);
  317. }
  318. chisq += t * t;
  319. }
  320. }
  321. return (double) chisq;
  322. }
  323. double NUMmahalanobisDistance_chi (double **linv, double *v, double *m, integer nr, integer n) {
  324. longdouble chisq = 0.0;
  325. if (nr == 1) { // 1xn matrix
  326. for (integer j = 1; j <= n; j ++) {
  327. double t = linv [1] [j] * (v [j] - m [j]);
  328. chisq += t * t;
  329. }
  330. } else { // nxn matrix
  331. for (integer i = n; i > 0; i --) {
  332. double t = 0.0;
  333. for (integer j = 1; j <= i; j ++) {
  334. t += linv [i] [j] * (v [j] - m [j]);
  335. }
  336. chisq += t * t;
  337. }
  338. }
  339. return (double) chisq;
  340. }
  341. void NUMdominantEigenvector (constMAT m, VEC q, double *out_lambda, double tolerance) {
  342. Melder_assert (m.nrow == m.ncol && q.size == m.nrow);
  343. longdouble lambda = 0.0, lambda0;
  344. for (integer k = 1; k <= q.size; k ++) {
  345. for (integer l = 1; l <= q.size; l ++) {
  346. lambda += q [k] * m [k] [l] * q [l];
  347. }
  348. }
  349. Melder_require (lambda > 0.0, U"Zero matrices ??");
  350. autoVEC z = VECraw (m.nrow);
  351. integer iter = 0;
  352. do {
  353. longdouble znorm2 = 0.0;
  354. for (integer l = 1; l <= q.size; l ++) {
  355. z [l] = 0.0;
  356. for (integer k = 1; k <= q.size; k ++) {
  357. z [l] += m [l] [k] * q [k];
  358. }
  359. }
  360. for (integer k = 1; k <= q.size; k ++) {
  361. znorm2 += z [k] * z [k];
  362. }
  363. znorm2 = sqrt (znorm2);
  364. for (integer k = 1; k <= q.size; k ++) {
  365. q [k] = z [k] / znorm2;
  366. }
  367. lambda0 = lambda;
  368. lambda = 0.0;
  369. for (integer k = 1; k <= q.size; k ++) {
  370. for (integer l = 1; l <= q.size; l ++) {
  371. lambda += q [k] * m [k] [l] * q [l];
  372. }
  373. }
  374. } while (fabs (lambda - lambda0) > tolerance || ++ iter < 30);
  375. if (out_lambda) {
  376. *out_lambda = (double) lambda;
  377. }
  378. }
  379. /* Input:
  380. * data [numberOfRows, from_col - 1 + my dimension]
  381. * contains the 'numberOfRows' vectors to be projected on the eigenspace.
  382. * eigenvectors [numberOfEigenvectors] [dimension]
  383. * the eigenvectors stored as rows
  384. * Input/Output
  385. * projection [numberOfRows, to_colbegin - 1 + numberOfEigenvectors]
  386. * the projected vectors from 'data'
  387. *
  388. * Project (part of) the vectors in matrix 'data' along the 'numberOfEigenvectors' eigenvectors into the matrix 'projection'.
  389. */
  390. void MATprojectRowsOnEigenspace_preallocated (MAT projection, integer toColumn, constMAT data, integer fromColumn, constMAT eigenvectors) {
  391. Melder_assert (projection.nrow = data.nrow);
  392. fromColumn = fromColumn <= 0 ? 1 : fromColumn;
  393. toColumn = toColumn <= 0 ? 1 : toColumn;
  394. Melder_assert (fromColumn + eigenvectors.ncol - 1 <= data.ncol);
  395. Melder_assert (toColumn + eigenvectors.ncol - 1 <= projection.ncol);
  396. for (integer irow = 1; irow <= data.nrow; irow ++)
  397. for (integer icol = 1; icol <= eigenvectors.nrow; icol ++) {
  398. longdouble r = 0.0;
  399. for (integer k = 1; k <= eigenvectors.ncol; k ++) {
  400. r += eigenvectors [icol] [k] * data [irow] [fromColumn + k - 1];
  401. }
  402. projection [irow] [toColumn + icol - 1] = (double) r;
  403. }
  404. }
  405. void MATprojectColumnsOnEigenspace_preallocated (MAT projection, constMAT data, constMAT eigenvectors) {
  406. Melder_assert (data.nrow == eigenvectors.ncol && projection.nrow == eigenvectors.nrow);
  407. for (integer icol = 1; icol <= data.ncol; icol ++)
  408. for (integer irow = 1; irow <= eigenvectors.nrow; irow ++) {
  409. longdouble r = 0.0;
  410. for (integer k = 1; k <= eigenvectors.ncol; k ++) {
  411. r += eigenvectors [irow] [k] * data [k] [icol];
  412. }
  413. projection [irow] [icol] = (double) r;
  414. }
  415. // MATmul_tt (data.get(), eigenvectors.get()) ??
  416. }
  417. void NUMdmatrix_into_principalComponents (double **m, integer nrows, integer ncols, integer numberOfComponents, double **pc) {
  418. Melder_assert (numberOfComponents > 0 && numberOfComponents <= ncols);
  419. autoMAT mc = MATcopy (MAT (m, nrows, ncols));
  420. /*MATcentreEachColumn_inplace (mc.get());*/
  421. autoSVD svd = SVD_createFromGeneralMatrix (mc.get());
  422. for (integer i = 1; i <= nrows; i ++) {
  423. for (integer j = 1; j <= numberOfComponents; j ++) {
  424. longdouble sum = 0.0;
  425. for (integer k = 1; k <= ncols; k ++) {
  426. sum += svd -> v [k] [j] * m [i] [k];
  427. }
  428. pc [i] [j] = (double) sum;
  429. }
  430. }
  431. // MATmul_tt (svd->v.get(), m.get()); ??
  432. }
  433. void NUMpseudoInverse (double **y, integer nr, integer nc, double **yinv, double tolerance) {
  434. autoSVD me = SVD_createFromGeneralMatrix ({y, nr, nc});
  435. (void) SVD_zeroSmallSingularValues (me.get(), tolerance);
  436. for (integer i = 1; i <= nc; i ++) {
  437. for (integer j = 1; j <= nr; j ++) {
  438. longdouble s = 0.0;
  439. for (integer k = 1; k <= nc; k ++) {
  440. if (my d [k] != 0.0) {
  441. s += my v [i] [k] * my u [j] [k] / my d [k];
  442. }
  443. }
  444. yinv [i] [j] = (double) s;
  445. }
  446. }
  447. }
  448. integer NUMsolveQuadraticEquation (double a, double b, double c, double *x1, double *x2) {
  449. return gsl_poly_solve_quadratic (a, b, c, x1, x2);
  450. }
  451. autoVEC NUMsolveEquation (constMAT a, constVEC b, double tolerance) {
  452. Melder_assert (a.nrow == b.size);
  453. autoSVD me = SVD_createFromGeneralMatrix (a);
  454. SVD_zeroSmallSingularValues (me.get(), tolerance);
  455. autoVEC x = SVD_solve (me.get(), b);
  456. return x;
  457. }
  458. /*void NUMsolveEquation (double **a, integer nr, integer nc, double *b, double tolerance, double *result) {
  459. double tol = tolerance > 0 ? tolerance : NUMfpp -> eps * nr;
  460. Melder_require (nr > 0 && nc > 0, U"The number of rows and the number of columns should at least be 1.");
  461. autoSVD me = SVD_createFromGeneralMatrix ({a, nr, nc});
  462. SVD_zeroSmallSingularValues (me.get(), tol);
  463. SVD_solve (me.get(), b, result);
  464. }*/
  465. autoMAT NUMsolveEquations (constMAT a, constMAT b, double tolerance) {
  466. Melder_assert (a.nrow == b.nrow);
  467. double tol = tolerance > 0 ? tolerance : NUMfpp -> eps * a.nrow;
  468. autoSVD me = SVD_createFromGeneralMatrix (a);
  469. autoMAT x = MATraw (b.nrow, b.ncol);
  470. autoVEC bt = VECraw (b.nrow);
  471. SVD_zeroSmallSingularValues (me.get(), tol);
  472. for (integer k = 1; k <= b.ncol; k ++) {
  473. for (integer j = 1; j <= b.nrow; j ++) { // copy b[.][k]
  474. bt [j] = b [j] [k];
  475. }
  476. autoVEC xt = SVD_solve (me.get(), bt.get());
  477. for (integer j = 1; j <= b.nrow; j ++) {
  478. x [j] [k] = xt [j];
  479. }
  480. }
  481. return x;
  482. }
  483. autoVEC NUMsolveNonNegativeLeastSquaresRegression (constMAT m, constVEC d, double tol, integer itermax) {
  484. Melder_assert (m.nrow == d.size);
  485. long nr = m.nrow, nc = m.ncol;
  486. autoVEC b = VECzero (nc);
  487. for (integer iter = 1; iter <= itermax; iter ++) {
  488. // Fix all weights except b [j]
  489. for (integer j = 1; j <= nc; j ++) {
  490. longdouble mjr = 0.0, mjmj = 0.0;
  491. for (integer i = 1; i <= nr; i ++) {
  492. double ri = d [i], mij = m [i] [j];
  493. for (integer l = 1; l <= nc; l ++) {
  494. if (l != j) {
  495. ri -= b [l] * m [i] [l];
  496. }
  497. }
  498. mjr += mij * ri;
  499. mjmj += mij * mij;
  500. }
  501. b [j] = mjr / mjmj;
  502. if (b [j] < 0.0) {
  503. b [j] = 0.0;
  504. }
  505. }
  506. // Calculate t(b) and compare with previous result.
  507. longdouble difsq = 0.0, difsqp = 0.0;
  508. for (integer i = 1; i <= nr; i ++) {
  509. double dmb = d [i];
  510. for (integer j = 1; j <= nc; j ++) {
  511. dmb -= m [i] [j] * b [j];
  512. }
  513. difsq += dmb * dmb;
  514. }
  515. if (fabs (difsq - difsqp) / difsq < tol) {
  516. break;
  517. }
  518. difsqp = difsq;
  519. }
  520. return b;
  521. }
  522. struct nr_struct {
  523. double *y, *delta;
  524. };
  525. /*
  526. f (lambda) = sum (y [i]^2 delta [i] / (delta [i]-lambda)^2, i=1..3)
  527. f'(lambda) = 2 * sum (y [i]^2 delta [i] / (delta [i]-lambda)^3, i=1..3)
  528. */
  529. static void nr_func (double x, double *f, double *df, void *data) {
  530. struct nr_struct *me = (struct nr_struct *) data;
  531. *f = *df = 0.0;
  532. for (integer i = 1; i <= 3; i ++) {
  533. double t1 = (my delta [i] - x);
  534. double t2 = my y [i] / t1;
  535. double t3 = t2 * t2 * my delta [i];
  536. *f += t3;
  537. *df += t3 * 2.0 / t1;
  538. }
  539. }
  540. void NUMsolveConstrainedLSQuadraticRegression (constMAT o, constVEC d, double *out_alpha, double *out_gamma) {
  541. Melder_assert (o.ncol == o.nrow && d.size == o.ncol && d.size == 3);
  542. integer n3 = 3, info;
  543. double eps = 1e-5, t1, t2, t3;
  544. autoMAT g = MATzero (n3, n3);
  545. autoMAT ptfinv = MATzero (n3, n3);
  546. // Construct O'.O [1..3] [1..3].
  547. autoMAT ftinv = MATmul_tn (o, o);
  548. // Get lower triangular decomposition from O'.O and
  549. // get F'^-1 from it (eq. (2)) (F^-1 not done ????)
  550. char uplo = 'U';
  551. (void) NUMlapack_dpotf2 (& uplo, & n3, & ftinv [1] [1], & n3, & info);
  552. Melder_require (info == 0, U"dpotf2 fails.");
  553. ftinv [1] [2] = ftinv [1] [3] = ftinv [2] [3] = 0.0;
  554. // Construct G and its eigen-decomposition (eq. (4,5))
  555. // Sort eigenvalues (& eigenvectors) ascending.
  556. autoMAT b = MATzero (n3, n3);
  557. b [3] [1] = b [1] [3] = -0.5;
  558. b [2] [2] = 1.0;
  559. // G = F^-1 B (F')^-1 (eq. 4)
  560. for (integer i = 1; i <= 3; i ++) {
  561. for (integer j = 1; j <= 3; j ++) {
  562. for (integer k = 1; k <= 3; k ++) {
  563. if (ftinv [k] [i] != 0.0) {
  564. for (integer l = 1; l <= 3; l ++) {
  565. g [i] [j] += ftinv [k] [i] * b [k] [l] * ftinv [l] [j];
  566. }
  567. }
  568. }
  569. }
  570. }
  571. // G's eigen-decomposition with eigenvalues (assumed ascending). (eq. 5)
  572. autoMAT p;
  573. autoVEC delta;
  574. MAT_getEigenSystemFromSymmetricMatrix (g.get(), & p, & delta, true);
  575. // Construct y = P'.F'.O'.d ==> Solve (F')^-1 . P .y = (O'.d) (page 632)
  576. // Get P'F^-1 from the transpose of (F')^-1 . P
  577. autoVEC otd (n3, kTensorInitializationType::ZERO);
  578. autoMAT ftinvp (n3, n3, kTensorInitializationType::ZERO);
  579. for (integer i = 1; i <= 3; i ++) {
  580. for (integer j = 1; j <= 3; j ++) {
  581. if (ftinv [i] [j] != 0.0) {
  582. for (integer k = 1; k <= 3; k ++) {
  583. ftinvp [i] [k] += ftinv [i] [j] * p [j] [k];
  584. }
  585. }
  586. }
  587. for (integer k = 1; k <= n3; k ++) {
  588. otd [i] += o [k] [i] * d [k];
  589. }
  590. }
  591. autoMAT ptfinvc (n3, n3, kTensorInitializationType::ZERO);
  592. for (integer i = 1; i <= 3; i ++) {
  593. for (integer j = 1; j <= 3; j ++) {
  594. ptfinvc [j] [i] = ptfinv [j] [i] = ftinvp [i] [j];
  595. }
  596. }
  597. autoVEC y = NUMsolveEquation (ftinvp.get(), otd.get(), 1e-6);
  598. // The solution (3 cases)
  599. autoVEC w (n3, kTensorInitializationType::ZERO);
  600. autoVEC chi;
  601. autoVEC diag (n3, kTensorInitializationType::ZERO);
  602. if (fabs (y [1]) < eps) {
  603. // Case 1: page 633
  604. t2 = y [2] / (delta [2] - delta [1]);
  605. t3 = y [3] / (delta [3] - delta [1]);
  606. /* +- */
  607. w [1] = sqrt (- delta [1] * (t2 * t2 * delta [2] + t3 * t3 * delta [3]));
  608. w [2] = t2 * delta [2];
  609. w [3] = t3 * delta [3];
  610. chi = NUMsolveEquation (ptfinv.get(), w.get(), 1e-6);
  611. w [1] = -w [1];
  612. if (fabs (chi [3] / chi [1]) < eps) chi = NUMsolveEquation (ptfinvc.get(), w.get(), 1e-6);
  613. } else if (fabs (y [2]) < eps) {
  614. // Case 2: page 633
  615. t1 = y [1] / (delta [1] - delta [2]);
  616. t3 = y [3] / (delta [3] - delta [2]);
  617. w [1] = t1 * delta [1];
  618. if ( (delta [2] < delta [3] && (t2 = (t1 * t1 * delta [1] + t3 * t3 * delta [3])) < eps)) {
  619. w [2] = sqrt (- delta [2] * t2); /* +- */
  620. w [3] = t3 * delta [3];
  621. chi = NUMsolveEquation (ptfinv.get(), w.get(), 1e-6);
  622. w [2] = -w [2];
  623. if (fabs (chi [3] / chi [1]) < eps) chi = NUMsolveEquation (ptfinvc.get(), w.get(), 1e-6);
  624. } else if (((delta [2] < delta [3] + eps) || (delta [2] > delta [3] - eps)) && fabs (y [3]) < eps) {
  625. // choose one value for w [2] from an infinite number
  626. w [2] = w [1];
  627. w [3] = sqrt (- t1 * t1 * delta [1] * delta [2] - w [2] * w [2]);
  628. chi = NUMsolveEquation (ptfinv.get(), w.get(), 1e-6);
  629. }
  630. } else {
  631. // Case 3: page 634 use Newton-Raphson root finder
  632. struct nr_struct me;
  633. double xlambda, eps2 = (delta [2] - delta [1]) * 1e-6;
  634. me.y = y.at;
  635. me.delta = delta.at;
  636. NUMnrbis (nr_func, delta [1] + eps, delta [2] - eps2, & me, & xlambda);
  637. for (integer i = 1; i <= 3; i++) {
  638. w [i] = y [i] / (1.0 - xlambda / delta [i]);
  639. }
  640. chi = NUMsolveEquation (ptfinv.get(), w.get(), 1e-6);
  641. }
  642. if (out_alpha) *out_alpha = chi [1];
  643. if (out_gamma) *out_gamma = chi [3];
  644. }
  645. /*
  646. f (b) = delta - b / (2 alpha) - sum (x [i]^2 / (c [i] - b)^2, i=1..n)
  647. f'(b) = - 1 / (2 alpha) + 2 * sum (x [i]^2 / (c [i] - b)^3, i=1..n)
  648. */
  649. struct nr2_struct {
  650. integer m;
  651. double delta, alpha, *x, *c;
  652. };
  653. static void nr2_func (double b, double *f, double *df, void *data) {
  654. struct nr2_struct *me = (struct nr2_struct *) data;
  655. *df = - 0.5 / my alpha;
  656. *f = my delta + *df * b;
  657. for (integer i = 1; i <= my m; i ++) {
  658. double c1 = (my c [i] - b);
  659. double c2 = my x [i] / c1;
  660. double c2sq = c2 * c2;
  661. *f -= c2sq;
  662. *df += 2 * c2sq / c1;
  663. }
  664. }
  665. autoVEC NUMsolveWeaklyConstrainedLinearRegression (constMAT f, constVEC phi, double alpha, double delta) {
  666. // n = f.nrow m=f.ncol
  667. autoMAT u = MATzero (f.ncol, f.ncol);
  668. autoVEC c = VECzero (f.ncol);
  669. autoVEC x = VECzero (f.nrow);
  670. autoVEC t;
  671. autoSVD svd = SVD_createFromGeneralMatrix (f);
  672. if (alpha == 0.0) {
  673. t = SVD_solve (svd.get(), phi); // standard least squares
  674. }
  675. // Step 1: Compute U and C from the eigendecomposition F'F = UCU'
  676. // Evaluate q, the multiplicity of the smallest eigenvalue in C
  677. autoINTVEC indx = NUMindexx (svd -> d.get());
  678. for (integer j = f.ncol; j > 0; j --) {
  679. double tmp = svd -> d [indx [j]];
  680. c [f.ncol - j + 1] = tmp * tmp;
  681. for (integer k = 1; k <= f.ncol; k ++) {
  682. u [f.ncol - j + 1] [k] = svd -> v [indx [j]] [k];
  683. }
  684. }
  685. integer q = 1;
  686. double tol = 1e-6;
  687. while (q < f.ncol && (c [f.ncol - q] - c [f.ncol]) < tol) {
  688. q ++;
  689. }
  690. // step 2: x = U'F'phi
  691. for (integer i = 1; i <= f.ncol; i ++) {
  692. for (integer j = 1; j <= f.ncol; j ++) {
  693. for (integer k = 1; k <= f.nrow; k ++) {
  694. x [i] += u [j] [i] * f [k] [j] * phi [k];
  695. }
  696. }
  697. }
  698. // step 3:
  699. struct nr2_struct me;
  700. me.m = f.ncol;
  701. me.delta = delta;
  702. me.alpha = alpha;
  703. me.x = x.at;
  704. me.c = c.at;
  705. double xqsq = 0.0;
  706. for (integer j = f.ncol - q + 1; j <= f.ncol; j ++) {
  707. xqsq += x [j] * x [j];
  708. }
  709. integer r = f.ncol;
  710. if (xqsq < tol) { /* xqsq == 0 */
  711. double fm, df;
  712. r = f.ncol - q;
  713. me.m = r;
  714. nr2_func (c [f.ncol], &fm, &df, & me);
  715. if (fm >= 0.0) { /* step 3.b1 */
  716. x [r + 1] = sqrt (fm);
  717. for (integer j = 1; j <= r; j ++) {
  718. x [j] /= c [j] - c [f.ncol];
  719. }
  720. for (integer j = 1; j <= r + 1; j ++) {
  721. for (integer k = 1; k <= r + 1; k ++) {
  722. t [j] += u [j] [k] * x [k];
  723. }
  724. }
  725. return t;
  726. }
  727. // else continue with r = m - q
  728. }
  729. // step 3a & 3b2, determine interval lower bound for Newton-Raphson root finder
  730. double xCx = 0.0;
  731. for (integer j = 1; j <= r; j ++) {
  732. xCx += x [j] * x [j] / c [j];
  733. }
  734. double b0, bmin = delta > 0.0 ? - xCx / delta : -2.0 * sqrt (alpha * xCx);
  735. double eps = (c [f.ncol] - bmin) * tol;
  736. // find the root of d(psi(b)/db in interval (bmin, c [m])
  737. NUMnrbis (nr2_func, bmin + eps, c [f.ncol] - eps, & me, & b0);
  738. for (integer j = 1; j <= r; j ++) {
  739. for (integer k = 1; k <= r; k ++) {
  740. t [j] += u [j] [k] * x [k] / (c [k] - b0);
  741. }
  742. }
  743. return t;
  744. }
  745. void NUMprocrustes (constMAT x, constMAT y, autoMAT *out_rotation, autoVEC *out_translation, double *out_scale) {
  746. Melder_assert (x.nrow == y.nrow && x.ncol == y.ncol);
  747. Melder_assert (x.nrow >= x.ncol);
  748. bool orthogonal = ! out_translation || ! out_scale; // else similarity transform
  749. /*
  750. Reference: Borg & Groenen (1997), Modern multidimensional scaling,
  751. Springer
  752. 1. Calculate C = X'JY (page 346) for similarity transform
  753. else X'Y for othogonal (page 341)
  754. JY amounts to centering the columns of Y.
  755. */
  756. autoMAT yc = MATcopy (y);
  757. if (! orthogonal)
  758. MATcentreEachColumn_inplace (yc.get());
  759. autoMAT c = MATmul_tn (x, yc.get()); // X'(JY)
  760. // 2. Decompose C by SVD: C = UDV' (our SVD has eigenvectors stored row-wise V!)
  761. autoSVD svd = SVD_createFromGeneralMatrix (c.get());
  762. double trace = NUMsum (svd -> d.get());
  763. Melder_require (trace > 0.0, U"NUMprocrustes: degenerate configuration(s).");
  764. // 3. T = VU'
  765. autoMAT rotation = MATmul_nt (svd->v.get(), svd->u.get());
  766. if (! orthogonal) {
  767. // 4. Dilation factor s = (tr X'JYT) / (tr Y'JY)
  768. // First we need YT.
  769. autoMAT yt = MATmul (y, rotation.get());
  770. // X'J = (JX)' centering the columns of X
  771. autoMAT xc = MATcopy (x);
  772. MATcentreEachColumn_inplace (xc.get());
  773. // tr X'J YT == tr xc' yt
  774. double traceXtJYT = NUMtrace2_tn (xc.get(), yt.get()); // trace (Xc'.(YT))
  775. double traceYtJY = NUMtrace2_tn (y, yc.get()); // trace (Y'.Yc)
  776. longdouble scale = traceXtJYT / traceYtJY;
  777. // 5. Translation vector tr = (X - sYT)'1 / x.nrow
  778. if (out_translation) {
  779. autoVEC translation = VECzero (x.ncol);
  780. for (integer i = 1; i <= x.ncol; i ++) {
  781. longdouble productsum = 0.0;
  782. for (integer j = 1; j <= x.nrow; j ++)
  783. productsum += x [j] [i] - scale * yt [j] [i];
  784. translation [i] = productsum / x.nrow;
  785. }
  786. *out_translation = translation.move();
  787. }
  788. if (out_scale) *out_scale = (double) scale;
  789. }
  790. if (out_rotation) *out_rotation = rotation.move();
  791. }
  792. double NUMmspline (constVEC knot, integer order, integer i, double x) {
  793. integer jj, nSplines = knot.size - order;
  794. double y = 0.0;
  795. Melder_require (nSplines > 0, U"No splines.");
  796. Melder_require (order > 0 && i <= nSplines, U"Combination of order and index not correct.");
  797. /*
  798. Find the interval where x is located.
  799. M-splines of order k have degree k-1.
  800. M-splines are zero outside interval [ knot [i], knot [i+order] ).
  801. First and last 'order' knots are equal, i.e.,
  802. knot [1] = ... = knot [order] && knot [knot.size-order+1] = ... knot [knot.size].
  803. */
  804. for (jj = order; jj <= knot.size - order + 1; jj ++) {
  805. if (x < knot [jj]) {
  806. break;
  807. }
  808. }
  809. if (jj < i || (jj > i + order) || jj == order || jj > (knot.size - order + 1)) {
  810. return y;
  811. }
  812. // Calculate M [i](x|1,t) according to eq.2.
  813. integer ito = i + order - 1;
  814. autoNUMvector<double> m (i, ito);
  815. for (integer j = i; j <= ito; j ++) {
  816. if (x >= knot [j] && x < knot [j + 1]) {
  817. m [j] = 1 / (knot [j + 1] - knot [j]);
  818. }
  819. }
  820. // Iterate to get M [i](x|k,t)
  821. for (integer k = 2; k <= order; k ++) {
  822. for (integer j = i; j <= i + order - k; j ++) {
  823. double kj = knot [j], kjpk = knot [j + k];
  824. if (kjpk > kj) {
  825. m [j] = k * ((x - kj) * m [j] + (kjpk - x) * m [j + 1]) / ((k - 1) * (kjpk - kj));
  826. }
  827. }
  828. }
  829. y = m [i];
  830. return y;
  831. }
  832. double NUMispline (constVEC aknot, integer order, integer i, double x) {
  833. integer j, orderp1 = order + 1;
  834. double y = 0.0;
  835. for (j = orderp1; j <= aknot.size - order; j ++) {
  836. if (x < aknot [j]) {
  837. break;
  838. }
  839. }
  840. if (-- j < i) {
  841. return y;
  842. }
  843. if (j > i + order || (j == aknot.size - order && x == aknot [j])) {
  844. return 1.0;
  845. }
  846. /*
  847. Equation 5 in Ramsay's article contains some errors!!!
  848. 1. the interval selection should be 'j-k <= i <= j' instead of
  849. j-k+1 <= i <= j'
  850. 2. the summation index m starts at 'i+1' instead of 'i'
  851. */
  852. for (integer m = i + 1; m <= j; m ++) {
  853. double r = NUMmspline (aknot, orderp1, m, x);
  854. y += (aknot [m + orderp1] - aknot [m]) * r;
  855. }
  856. y /= orderp1;
  857. return y;
  858. }
  859. double NUMwilksLambda (double *lambda, integer from, integer to) {
  860. double result = 1.0;
  861. for (integer i = from; i <= to; i ++) {
  862. result /= (1.0 + lambda [i]);
  863. }
  864. return result;
  865. }
  866. double NUMfactln (int n) {
  867. static double table [101];
  868. if (n < 0) {
  869. return undefined;
  870. }
  871. if (n <= 1) {
  872. return 0.0;
  873. }
  874. return n > 100 ? NUMlnGamma (n + 1.0) : table [n] != 0.0 ? table [n] : (table [n] = NUMlnGamma (n + 1.0));
  875. }
  876. void NUMnrbis (void (*f) (double x, double *fx, double *dfx, void *closure), double xmin, double xmax, void *closure, double *root) {
  877. double df, fx, fh, fl, tmp, xh, xl, tol;
  878. integer itermax = 1000; // 80 or so could be enough; 60 is too small
  879. (*f) (xmin, &fl, &df, closure);
  880. if (fl == 0.0) {
  881. *root = xmin;
  882. return;
  883. }
  884. (*f) (xmax, &fh, &df, closure);
  885. if (fh == 0.0) {
  886. *root = xmax;
  887. return;
  888. }
  889. if ((fl > 0.0 && fh > 0.0) || (fl < 0.0 && fh < 0.0)) {
  890. *root = undefined;
  891. return;
  892. }
  893. if (fl < 0.0) {
  894. xl = xmin;
  895. xh = xmax;
  896. } else {
  897. xh = xmin;
  898. xl = xmax;
  899. }
  900. double dxold = fabs (xmax - xmin);
  901. double dx = dxold;
  902. *root = 0.5 * (xmin + xmax);
  903. (*f) (*root, &fx, &df, closure);
  904. for (integer iter = 1; iter <= itermax; iter ++) {
  905. if ((((*root - xh) * df - fx) * ((*root - xl) * df - fx) >= 0.0) || (fabs (2.0 * fx) > fabs (dxold * df))) {
  906. dxold = dx;
  907. dx = 0.5 * (xh - xl);
  908. *root = xl + dx;
  909. if (xl == *root) {
  910. return;
  911. }
  912. } else {
  913. dxold = dx;
  914. dx = fx / df;
  915. tmp = *root;
  916. *root -= dx;
  917. if (tmp == *root) {
  918. return;
  919. }
  920. }
  921. tol = NUMfpp -> eps * (*root == 0.0 ? 1.0 : fabs (*root));
  922. if (fabs (dx) < tol) {
  923. return;
  924. }
  925. (*f) (*root, &fx, &df, closure);
  926. if (fx < 0.0) {
  927. xl = *root;
  928. } else {
  929. xh = *root;
  930. }
  931. }
  932. Melder_warning (U"NUMnrbis: maximum number of iterations (", itermax, U") exceeded.");
  933. }
  934. double NUMridders (double (*f) (double x, void *closure), double x1, double x2, void *closure) {
  935. /* There is still a problem with this implementation:
  936. tol may be zero;
  937. */
  938. double x3, x4, d, root = undefined, tol;
  939. integer itermax = 100;
  940. double f1 = f (x1, closure);
  941. if (f1 == 0.0) {
  942. return x1;
  943. }
  944. if (isundef (f1)) {
  945. return undefined;
  946. }
  947. double f2 = f (x2, closure);
  948. if (f2 == 0.0) {
  949. return x2;
  950. }
  951. if (isundef (f2)) {
  952. return undefined;
  953. }
  954. if ((f1 < 0.0 && f2 < 0.0) || (f1 > 0.0 && f2 > 0.0)) {
  955. return undefined;
  956. }
  957. for (integer iter = 1; iter <= itermax; iter ++) {
  958. x3 = 0.5 * (x1 + x2);
  959. double f3 = f (x3, closure);
  960. if (f3 == 0.0) {
  961. return x3;
  962. }
  963. if (isundef (f3)) {
  964. return undefined;
  965. }
  966. // New guess: x4 = x3 + (x3 - x1) * sign(f1 - f2) * f3 / sqrt(f3^2 - f1*f2)
  967. d = f3 * f3 - f1 * f2;
  968. if (d < 0.0) {
  969. Melder_warning (U"d < 0 in ridders (iter = ", iter, U").");
  970. return undefined;
  971. }
  972. if (d == 0.0) {
  973. // pb test added because f1 f2 f3 may be 1e-170 or so
  974. tol = NUMfpp -> eps * (x3 == 0.0 ? 1.0 : fabs (x3));
  975. if (iter > 1 && fabs (x3 - root) < tol) {
  976. return root;
  977. }
  978. root = x3;
  979. // Perform bisection.
  980. if (f1 > 0.0) {
  981. // falling curve: f1 > 0, f2 < 0
  982. if (f3 > 0.0) {
  983. x1 = x3; f1 = f3; // retain invariant: f1 > 0, f2 < 0
  984. } else {
  985. // f3 <= 0.0
  986. x2 = x3; f2 = f3; // retain invariant: f1 > 0, f2 < 0
  987. }
  988. } else {
  989. // rising curve: f1 < 0, f2 > 0
  990. if (f3 > 0.0) {
  991. x2 = x3; f2 = f3; // retain invariant: f1 < 0, f2 > 0
  992. } else {
  993. // f3 < 0.0
  994. x1 = x3; f1 = f3; // retain invariant: f1 < 0, f2 > 0
  995. }
  996. }
  997. } else {
  998. d = sqrt (d);
  999. if (isnan (d)) {
  1000. // pb: square root of denormalized small number fails on some computers
  1001. tol = NUMfpp -> eps * (x3 == 0.0 ? 1.0 : fabs (x3));
  1002. if (iter > 1 && fabs (x3 - root) < tol) {
  1003. return root;
  1004. }
  1005. root = x3;
  1006. // Perform bisection.
  1007. if (f1 > 0.0) {
  1008. // falling curve: f1 > 0, f2 < 0
  1009. if (f3 > 0.0) {
  1010. x1 = x3; f1 = f3; // retain invariant: f1 > 0, f2 < 0
  1011. } else {
  1012. // f3 <= 0.0
  1013. x2 = x3; f2 = f3; // retain invariant: f1 > 0, f2 < 0
  1014. }
  1015. } else {
  1016. // rising curve: f1 < 0, f2 > 0
  1017. if (f3 > 0.0) {
  1018. x2 = x3; f2 = f3; // retain invariant: f1 < 0, f2 > 0
  1019. } else {
  1020. // f3 < 0.0
  1021. x1 = x3; f1 = f3; // retain invariant: f1 < 0, f2 > 0 */
  1022. }
  1023. }
  1024. } else {
  1025. d = (x3 - x1) * f3 / d;
  1026. x4 = f1 - f2 < 0 ? x3 - d : x3 + d;
  1027. tol = NUMfpp -> eps * (x4 == 0.0 ? 1.0 : fabs (x4));
  1028. if (iter > 1 && fabs (x4 - root) < tol) {
  1029. return root;
  1030. }
  1031. root = x4;
  1032. double f4 = f (x4, closure);
  1033. if (f4 == 0.0) {
  1034. return root;
  1035. }
  1036. if (isundef (f4)) {
  1037. return undefined;
  1038. }
  1039. if ((f1 > f2) == (d > 0.0) /* pb: instead of x3 < x4 */) {
  1040. if (SIGN (f3, f4) != f3) {
  1041. x1 = x3; f1 = f3;
  1042. x2 = x4; f2 = f4;
  1043. } else {
  1044. x1 = x4; f1 = f4;
  1045. }
  1046. } else {
  1047. if (SIGN (f3, f4) != f3) {
  1048. x1 = x4; f1 = f4;
  1049. x2 = x3; f2 = f3;
  1050. } else {
  1051. x2 = x4; f2 = f4;
  1052. }
  1053. }
  1054. }
  1055. }
  1056. if (fabs (x1 - x2) < tol) {
  1057. return root;
  1058. }
  1059. }
  1060. {
  1061. static integer nwarnings = 0;
  1062. nwarnings ++;
  1063. Melder_warning (U"NUMridders: maximum number of iterations (", itermax, U") exceeded.");
  1064. }
  1065. return root;
  1066. }
  1067. double NUMlogNormalP (double x, double zeta, double sigma) {
  1068. return gsl_cdf_lognormal_P (x, zeta, sigma);
  1069. }
  1070. double NUMlogNormalQ (double x, double zeta, double sigma) {
  1071. return gsl_cdf_lognormal_Q (x, zeta, sigma);
  1072. }
  1073. double NUMstudentP (double t, double df) {
  1074. if (df < 1.0) {
  1075. return undefined;
  1076. }
  1077. double ib = NUMincompleteBeta (0.5 * df, 0.5, df / (df + t * t));
  1078. if (isundef (ib)) {
  1079. return undefined;
  1080. }
  1081. ib *= 0.5;
  1082. return t < 0.0 ? ib : 1.0 - ib;
  1083. }
  1084. double NUMstudentQ (double t, double df) {
  1085. if (df < 1) {
  1086. return undefined;
  1087. }
  1088. double ib = NUMincompleteBeta (0.5 * df, 0.5, df / (df + t * t));
  1089. if (isundef (ib)) {
  1090. return undefined;
  1091. }
  1092. ib *= 0.5;
  1093. return t > 0.0 ? ib : 1.0 - ib;
  1094. }
  1095. double NUMfisherP (double f, double df1, double df2) {
  1096. if (f < 0.0 || df1 < 1.0 || df2 < 1.0) {
  1097. return undefined;
  1098. }
  1099. double ib = NUMincompleteBeta (0.5 * df2, 0.5 * df1, df2 / (df2 + f * df1));
  1100. if (isundef (ib)) {
  1101. return undefined;
  1102. }
  1103. return 1.0 - ib;
  1104. }
  1105. double NUMfisherQ (double f, double df1, double df2) {
  1106. if (f < 0.0 || df1 < 1.0 || df2 < 1.0) {
  1107. return undefined;
  1108. }
  1109. if (Melder_debug == 28) {
  1110. return NUMincompleteBeta (0.5 * df2, 0.5 * df1, df2 / (df2 + f * df1));
  1111. } else {
  1112. double result = gsl_cdf_fdist_Q (f, df1, df2);
  1113. if (isnan (result)) {
  1114. return undefined;
  1115. }
  1116. return result;
  1117. }
  1118. }
  1119. double NUMinvGaussQ (double p) {
  1120. double pc = p;
  1121. if (p <= 0.0 || p >= 1.0) {
  1122. return undefined;
  1123. }
  1124. if (p > 0.5) {
  1125. pc = 1.0 - p;
  1126. }
  1127. double t = sqrt (- 2.0 * log (pc));
  1128. t -= (2.515517 + (0.802853 + 0.010328 * t) * t) /
  1129. (1.0 + (1.432788 + (0.189269 + 0.001308 * t) * t) * t);
  1130. return p > 0.5 ? -t : t;
  1131. }
  1132. static double studentQ_func (double x, void *voidParams) {
  1133. struct pdf1_struct *params = (struct pdf1_struct *) voidParams;
  1134. double q = NUMstudentQ (x, params -> df);
  1135. return ( isundef (q) ? undefined : q - params -> p );
  1136. }
  1137. double NUMinvStudentQ (double p, double df) {
  1138. struct pdf1_struct params;
  1139. double pc = ( p > 0.5 ? 1.0 - p : p ), xmin, xmax = 1.0, x;
  1140. if (p < 0.0 || p >= 1.0) {
  1141. return undefined;
  1142. }
  1143. // Bracket the function f(x) = NUMstudentQ (x, df) - p.
  1144. for (;;) {
  1145. double q = NUMstudentQ (xmax, df);
  1146. if (isundef (q)) {
  1147. return undefined;
  1148. }
  1149. if (q < pc) {
  1150. break;
  1151. }
  1152. xmax *= 2.0;
  1153. }
  1154. xmin = ( xmax > 1.0 ? xmax / 2.0 : 0.0 );
  1155. // Find zero of f(x) with Ridders' method.
  1156. params. df = df;
  1157. params. p = pc;
  1158. x = NUMridders (studentQ_func, xmin, xmax, & params);
  1159. if (isundef (x)) {
  1160. return undefined;
  1161. }
  1162. return ( p > 0.5 ? -x : x );
  1163. }
  1164. static double chiSquareQ_func (double x, void *voidParams) {
  1165. struct pdf1_struct *params = (struct pdf1_struct *) voidParams;
  1166. double q = NUMchiSquareQ (x, params -> df);
  1167. return ( isundef (q) ? undefined : q - params -> p );
  1168. }
  1169. double NUMinvChiSquareQ (double p, double df) {
  1170. struct pdf1_struct params;
  1171. double xmin, xmax = 1;
  1172. if (p < 0.0 || p >= 1.0) {
  1173. return undefined;
  1174. }
  1175. // Bracket the function f(x) = NUMchiSquareQ (x, df) - p.
  1176. for (;;) {
  1177. double q = NUMchiSquareQ (xmax, df);
  1178. if (isundef (q)) {
  1179. return undefined;
  1180. }
  1181. if (q < p) {
  1182. break;
  1183. }
  1184. xmax *= 2.0;
  1185. }
  1186. xmin = ( xmax > 1.0 ? xmax / 2.0 : 0.0 );
  1187. // Find zero of f(x) with Ridders' method.
  1188. params. df = df;
  1189. params. p = p;
  1190. return NUMridders (chiSquareQ_func, xmin, xmax, & params);
  1191. }
  1192. static double fisherQ_func (double x, void *voidParams) {
  1193. struct pdf2_struct *params = (struct pdf2_struct *) voidParams;
  1194. double q = NUMfisherQ (x, params -> df1, params -> df2);
  1195. return ( isundef (q) ? undefined : q - params -> p );
  1196. }
  1197. double NUMinvFisherQ (double p, double df1, double df2) {
  1198. if (p <= 0.0 || p > 1.0 || df1 < 1.0 || df2 < 1.0) {
  1199. return undefined;
  1200. }
  1201. if (Melder_debug == 29) {
  1202. //if (p == 1.0) return 0.0;
  1203. return gsl_cdf_fdist_Qinv (p, df1, df2);
  1204. } else {
  1205. struct pdf2_struct params;
  1206. double top = 1000.0;
  1207. if (p == 1.0) {
  1208. return 0.0;
  1209. }
  1210. params. p = p;
  1211. params. df1 = df1;
  1212. params. df2 = df2;
  1213. for (;;) {
  1214. double q = NUMfisherQ (top, df1, df2);
  1215. if (isundef (q)) {
  1216. return undefined;
  1217. }
  1218. if (q < p) {
  1219. break;
  1220. }
  1221. if (top > 0.9e300) {
  1222. return undefined;
  1223. }
  1224. top *= 1e9;
  1225. }
  1226. return NUMridders (fisherQ_func, 0.0, p > 0.5 ? 2.2 : top, & params);
  1227. }
  1228. }
  1229. double NUMbeta2 (double z, double w) {
  1230. gsl_sf_result result;
  1231. int status = gsl_sf_beta_e (z, w, &result);
  1232. return status == GSL_SUCCESS ? result.val : undefined;
  1233. }
  1234. double NUMlnBeta (double a, double b) {
  1235. gsl_sf_result result;
  1236. int status = gsl_sf_lnbeta_e (a, b, &result);
  1237. return status == GSL_SUCCESS ? result.val : undefined;
  1238. }
  1239. double NUMnormalityTest_HenzeZirkler (constMAT data, double *beta, double *tnb, double *lnmu, double *lnvar) {
  1240. const integer n = data.nrow, p = data.ncol;
  1241. if (*beta <= 0)
  1242. *beta = (1.0 / sqrt (2.0)) * pow ((1.0 + 2 * p) / 4.0, 1.0 / (p + 4)) * pow (n, 1.0 / (p + 4));
  1243. const double p2 = p / 2.0;
  1244. const double beta2 = *beta * *beta, beta4 = beta2 * beta2, beta8 = beta4 * beta4;
  1245. const double gamma = 1.0 + 2.0 * beta2, gamma2 = gamma * gamma, gamma4 = gamma2 * gamma2;
  1246. const double delta = 1.0 + beta2 * (4.0 + 3.0 * beta2), delta2 = delta * delta;
  1247. double prob = undefined;
  1248. *tnb = *lnmu = *lnvar = undefined;
  1249. if (n < 2 || p < 1)
  1250. return prob;
  1251. autoVEC zero = VECzero (p);
  1252. autoMAT x = MATcopy (data);
  1253. MATcentreEachColumn_inplace (x.get()); // x - xmean
  1254. autoMAT covar = MATcovarianceFromColumnCentredMatrix (x.get(), 0);
  1255. try {
  1256. MATlowerCholeskyInverse_inplace (covar.get(), nullptr);
  1257. longdouble sumjk = 0.0, sumj = 0.0;
  1258. const double b1 = beta2 / 2.0, b2 = b1 / (1.0 + beta2);
  1259. /* Heinze & Wagner (1997), page 3
  1260. We use d [j] [k] = ||Y [j]-Y [k]||^2 = (Y [j]-Y [k])'S^(-1)(Y [j]-Y [k])
  1261. So d [j] [k]= d [k] [j] and d [j] [j] = 0
  1262. */
  1263. for (integer j = 1; j <= n; j ++) {
  1264. for (integer k = 1; k < j; k ++) {
  1265. const double djk = NUMmahalanobisDistance (covar.get(), x.row (j), x.row(k));
  1266. sumjk += 2.0 * exp (-b1 * djk); // factor 2 because d [j] [k] == d [k] [j]
  1267. }
  1268. sumjk += 1.0; // for k == j
  1269. const double djj = NUMmahalanobisDistance (covar.get(), x.row(j), zero.get());
  1270. sumj += exp (-b2 * djj);
  1271. }
  1272. *tnb = (1.0 / n) * (double) sumjk - 2.0 * pow (1.0 + beta2, - p2) * (double) sumj + n * pow (gamma, - p2); // n *
  1273. } catch (MelderError) {
  1274. Melder_clearError ();
  1275. *tnb = 4.0 * n;
  1276. }
  1277. double mu = 1.0 - pow (gamma, -p2) * (1.0 + p * beta2 / gamma + p * (p + 2) * beta4 / (2.0 * gamma2));
  1278. double var = 2.0 * pow (1.0 + 4.0 * beta2, -p2)
  1279. + 2.0 * pow (gamma, -p) * (1.0 + 2.0 * p * beta4 / gamma2 + 3.0 * p * (p + 2) * beta8 / (4.0 * gamma4))
  1280. - 4.0 * pow (delta, -p2) * (1.0 + 3.0 * p * beta4 / (2.0 * delta) + p * (p + 2) * beta8 / (2.0 * delta2));
  1281. double mu2 = mu * mu;
  1282. *lnmu = log (sqrt (mu2 * mu2 / (mu2 + var)));
  1283. *lnvar = sqrt (log ( (mu2 + var) / mu2));
  1284. prob = NUMlogNormalQ (*tnb, *lnmu, *lnvar);
  1285. return prob;
  1286. }
  1287. /*************** Hz <--> other freq reps *********************/
  1288. double NUMmelToHertz3 (double mel) {
  1289. if (mel < 0.0) {
  1290. return undefined;
  1291. }
  1292. return mel < 1000.0 ? mel : 1000.0 * (exp (mel * log10 (2.0) / 1000.0) - 1.0);
  1293. }
  1294. double NUMhertzToMel3 (double hz) {
  1295. if (hz < 0.0) {
  1296. return undefined;
  1297. }
  1298. return hz < 1000.0 ? hz : 1000.0 * log10 (1.0 + hz / 1000.0) / log10 (2.0);
  1299. }
  1300. double NUMmelToHertz2 (double mel) {
  1301. if (mel < 0.0) {
  1302. return undefined;
  1303. }
  1304. return 700.0 * (pow (10.0, mel / 2595.0) - 1.0);
  1305. }
  1306. double NUMhertzToMel2 (double hz) {
  1307. if (hz < 0.0) {
  1308. return undefined;
  1309. }
  1310. return 2595.0 * log10 (1.0 + hz / 700.0);
  1311. }
  1312. double NUMhertzToBark_traunmueller (double hz) {
  1313. if (hz < 0.0) {
  1314. return undefined;
  1315. }
  1316. return 26.81 * hz / (1960.0 + hz) - 0.53;
  1317. }
  1318. double NUMbarkToHertz_traunmueller (double bark) {
  1319. if (bark < 0.0 || bark > 26.28) {
  1320. return undefined;
  1321. }
  1322. return 1960.0 * (bark + 0.53) / (26.28 - bark);
  1323. }
  1324. double NUMbarkToHertz_schroeder (double bark) {
  1325. return 650.0 * sinh (bark / 7.0);
  1326. }
  1327. double NUMbarkToHertz_zwickerterhardt (double hz) {
  1328. if (hz < 0.0) {
  1329. return undefined;
  1330. }
  1331. return 13.0 * atan (0.00076 * hz) + 3.5 * atan (hz / 7500.0);
  1332. }
  1333. double NUMhertzToBark_schroeder (double hz) {
  1334. if (hz < 0.0) {
  1335. return undefined;
  1336. }
  1337. double h650 = hz / 650.0;
  1338. return 7.0 * log (h650 + sqrt (1.0 + h650 * h650));
  1339. }
  1340. double NUMbarkToHertz2 (double bark) {
  1341. if (bark < 0.0) {
  1342. return undefined;
  1343. }
  1344. return 650.0 * sinh (bark / 7.0);
  1345. }
  1346. double NUMhertzToBark2 (double hz) {
  1347. if (hz < 0) {
  1348. return undefined;
  1349. }
  1350. double h650 = hz / 650.0;
  1351. return 7.0 * log (h650 + sqrt (1.0 + h650 * h650));
  1352. }
  1353. double NUMbladonlindblomfilter_amplitude (double zc, double z) {
  1354. double dz = zc - z + 0.474;
  1355. return pow (10.0, 1.581 + 0.75 * dz - 1.75 * sqrt (1.0 + dz * dz));
  1356. }
  1357. double NUMsekeyhansonfilter_amplitude (double zc, double z) {
  1358. double dz = zc - z - 0.215;
  1359. return pow (10.0, 0.7 - 0.75 * dz - 1.75 * sqrt (0.196 + dz * dz));
  1360. }
  1361. double NUMtriangularfilter_amplitude (double fl, double fc, double fh, double f) {
  1362. double a = 0.0;
  1363. if (f > fl && f < fh) {
  1364. a = f < fc ? (f - fl) / (fc - fl) : (fh - f) / (fh - fc);
  1365. /* Normalize such that area under the filter is always 1. ???
  1366. a /= 2 * (fh - fl);*/
  1367. }
  1368. return a;
  1369. }
  1370. double NUMformantfilter_amplitude (double fc, double bw, double f) {
  1371. double dq = (fc * fc - f * f) / (bw * f);
  1372. return 1.0 / (dq * dq + 1.0);
  1373. }
  1374. /* Childers (1978), Modern Spectrum analysis, IEEE Press, 252-255) */
  1375. /* work [1..n+n+n];
  1376. b1 = & work [1];
  1377. b2 = & work [n+1];
  1378. aa = & work [n+n+1];
  1379. for (i=1; i<=n+n+n; i ++) work [i]=0;
  1380. */
  1381. double NUMburg_preallocated (VEC a, constVEC x) {
  1382. integer n = x.size, m = a.size;
  1383. for (integer j = 1; j <= m; j ++) {
  1384. a [j] = 0.0;
  1385. }
  1386. autoVEC b1 = VECzero (n), b2 = VECzero (n), aa = VECzero (m);
  1387. // (3)
  1388. longdouble p = 0.0;
  1389. for (integer j = 1; j <= n; j ++) {
  1390. p += x [j] * x [j];
  1391. }
  1392. longdouble xms = p / n;
  1393. if (xms <= 0.0) {
  1394. return xms; // warning empty
  1395. }
  1396. // (9)
  1397. b1 [1] = x [1];
  1398. b2 [n - 1] = x [n];
  1399. for (integer j = 2; j <= n - 1; j ++) {
  1400. b1 [j] = b2 [j - 1] = x [j];
  1401. }
  1402. for (integer i = 1; i <= m; i ++) {
  1403. // (7)
  1404. longdouble num = 0.0, denum = 0.0;
  1405. for (integer j = 1; j <= n - i; j ++) {
  1406. num += b1 [j] * b2 [j];
  1407. denum += b1 [j] * b1 [j] + b2 [j] * b2 [j];
  1408. }
  1409. if (denum <= 0.0) {
  1410. return 0.0; // warning ill-conditioned
  1411. }
  1412. a [i] = 2.0 * num / denum;
  1413. // (10)
  1414. xms *= 1.0 - a [i] * a [i];
  1415. // (5)
  1416. for (integer j = 1; j <= i - 1; j ++) {
  1417. a [j] = aa [j] - a [i] * aa [i - j];
  1418. }
  1419. if (i < m) {
  1420. // (8) Watch out: i -> i+1
  1421. for (integer j = 1; j <= i; j ++) {
  1422. aa [j] = a [j];
  1423. }
  1424. for (integer j = 1; j <= n - i - 1; j ++) {
  1425. b1 [j] -= aa [i] * b2 [j];
  1426. b2 [j] = b2 [j + 1] - aa [i] * b1 [j + 1];
  1427. }
  1428. }
  1429. }
  1430. return xms;
  1431. }
  1432. autoVEC NUMburg (constVEC x, integer numberOfPredictionCoefficients, double *out_xms) {
  1433. autoVEC a = VECraw (numberOfPredictionCoefficients);
  1434. double xms = NUMburg_preallocated (a.get(), x);
  1435. if (out_xms) *out_xms = xms;
  1436. return a;
  1437. }
  1438. void NUMdmatrix_to_dBs (double **m, integer rb, integer re, integer cb, integer ce, double ref, double factor, double floor) {
  1439. double ref_db, factor10 = factor * 10.0;
  1440. double max = m [rb] [cb], min = max;
  1441. Melder_assert (ref > 0 && factor > 0 && rb <= re && cb <= ce);
  1442. for (integer i = rb; i <= re; i ++) {
  1443. for (integer j = cb; j <= ce; j ++) {
  1444. if (m [i] [j] > max) {
  1445. max = m [i] [j];
  1446. } else if (m [i] [j] < min) {
  1447. min = m [i] [j];
  1448. }
  1449. }
  1450. }
  1451. Melder_require (min >= 0.0 && max >= 0.0, U"All matrix elements should be positive.");
  1452. ref_db = factor10 * log10 (ref);
  1453. for (integer i = rb; i <= re; i ++) {
  1454. for (integer j = cb; j <= ce; j ++) {
  1455. double mij = floor;
  1456. if (m [i] [j] > 0.0) {
  1457. mij = factor10 * log10 (m [i] [j]) - ref_db;
  1458. if (mij < floor) {
  1459. mij = floor;
  1460. }
  1461. }
  1462. m [i] [j] = mij;
  1463. }
  1464. }
  1465. }
  1466. double **NUMcosinesTable (integer first, integer last, integer npoints) {
  1467. Melder_assert (0 < first && first <= last && npoints > 0);
  1468. autoNUMmatrix<double> m (first, last, 1, npoints);
  1469. for (integer i = first; i <= last; i ++) {
  1470. double f = i * NUMpi / npoints;
  1471. for (integer j = 1; j <= npoints; j ++) {
  1472. m [i] [j] = cos (f * (j - 0.5));
  1473. }
  1474. }
  1475. return m.transfer();
  1476. }
  1477. void NUMcubicSplineInterpolation_getSecondDerivatives (double x [], double y [], integer n, double yp1, double ypn, double y2 []) {
  1478. autoNUMvector<double> u (1, n - 1);
  1479. if (yp1 > 0.99e30) {
  1480. y2 [1] = u [1] = 0.0;
  1481. } else {
  1482. y2 [1] = -0.5;
  1483. u [1] = (3.0 / (x [2] - x [1])) * ( (y [2] - y [1]) / (x [2] - x [1]) - yp1);
  1484. }
  1485. for (integer i = 2; i <= n - 1; i ++) {
  1486. double sig = (x [i] - x [i - 1]) / (x [i + 1] - x [i - 1]);
  1487. double p = sig * y2 [i - 1] + 2.0;
  1488. y2 [i] = (sig - 1.0) / p;
  1489. u [i] = (y [i + 1] - y [i]) / (x [i + 1] - x [i]) - (y [i] - y [i - 1]) / (x [i] - x [i - 1]);
  1490. u [i] = (6.0 * u [i] / (x [i + 1] - x [i - 1]) - sig * u [i - 1]) / p;
  1491. }
  1492. double qn, un;
  1493. if (ypn > 0.99e30) {
  1494. qn = un = 0.0;
  1495. } else {
  1496. qn = 0.5;
  1497. un = (3.0 / (x [n] - x [n - 1])) * (ypn - (y [n] - y [n - 1]) / (x [n] - x [n - 1]));
  1498. }
  1499. y2 [n] = (un - qn * u [n - 1]) / (qn * y2 [n - 1] + 1.0);
  1500. for (integer k = n - 1; k >= 1; k--) {
  1501. y2 [k] = y2 [k] * y2 [k + 1] + u [k];
  1502. }
  1503. }
  1504. double NUMcubicSplineInterpolation (double xa [], double ya [], double y2a [], integer n, double x) {
  1505. integer klo = 1, khi = n;
  1506. while (khi - klo > 1) {
  1507. integer k = (khi + klo) >> 1;
  1508. if (xa [k] > x) {
  1509. khi = k;
  1510. } else {
  1511. klo = k;
  1512. }
  1513. }
  1514. double h = xa [khi] - xa [klo];
  1515. Melder_require (h != 0.0, U"NUMcubicSplineInterpolation: bad input value.");
  1516. double a = (xa [khi] - x) / h;
  1517. double b = (x - xa [klo]) / h;
  1518. double y = a * ya [klo] + b * ya [khi] + ( (a * a * a - a) * y2a [klo] + (b * b * b - b) * y2a [khi]) * (h * h) / 6.0;
  1519. return y;
  1520. }
  1521. double NUMsinc (const double x) {
  1522. struct gsl_sf_result_struct result;
  1523. int status = gsl_sf_sinc_e (x / NUMpi, &result);
  1524. return status == GSL_SUCCESS ? result. val : undefined;
  1525. }
  1526. double NUMsincpi (const double x) {
  1527. struct gsl_sf_result_struct result;
  1528. int status = gsl_sf_sinc_e (x, &result);
  1529. return status == GSL_SUCCESS ? result. val : undefined;
  1530. }
  1531. /* Does the line segment from (x1,y1) to (x2,y2) intersect with the line segment from (x3,y3) to (x4,y4)? */
  1532. int NUMdoLineSegmentsIntersect (double x1, double y1, double x2, double y2, double x3, double y3, double x4, double y4) {
  1533. int o11 = NUMgetOrientationOfPoints (x1, y1, x2, y2, x3, y3);
  1534. int o12 = NUMgetOrientationOfPoints (x1, y1, x2, y2, x4, y4);
  1535. int o21 = NUMgetOrientationOfPoints (x3, y3, x4, y4, x1, y1);
  1536. int o22 = NUMgetOrientationOfPoints (x3, y3, x4, y4, x2, y2);
  1537. return ((o11 * o12 < 0) && (o21 * o22 < 0)) || (o11 *o12 *o21 *o22 == 0);
  1538. }
  1539. int NUMgetOrientationOfPoints (double x1, double y1, double x2, double y2, double x3, double y3) {
  1540. int orientation;
  1541. double dx2 = x2 - x1, dy2 = y2 - y1;
  1542. double dx3 = x3 - x1, dy3 = y3 - y1;
  1543. if (dx2 * dy3 > dy2 * dx3) {
  1544. orientation = 1;
  1545. } else if (dx2 * dy3 < dy2 * dx3) {
  1546. orientation = -1;
  1547. } else {
  1548. if ((dx2 * dx3 < 0) || (dy2 * dy3 < 0)) {
  1549. orientation = -1;
  1550. } else if ((dx2 * dx2 + dy2 * dy2) >= (dx3 * dx3 + dy3 * dy3)) {
  1551. orientation = 0;
  1552. } else {
  1553. orientation = 1;
  1554. }
  1555. }
  1556. return orientation;
  1557. }
  1558. int NUMgetIntersectionsWithRectangle (double x1, double y1, double x2, double y2, double xmin, double ymin, double xmax, double ymax, double *xi, double *yi) {
  1559. double x [6], y [6];
  1560. integer ni = 0;
  1561. x [1] = x [4] = x [5] = xmin;
  1562. x [2] = x [3] = xmax;
  1563. y [1] = y [2] = y [5] = ymin;
  1564. y [3] = y [4] = ymax;
  1565. /*
  1566. Calculate intersection of line segment through p1=(x1,y1) to p2(x2,y2) with line segment
  1567. through p3=(x3,y3) to p4=(x4,y4).
  1568. Parametrisation of the lines:
  1569. l1 = p1 + s (p2 - p1), s in (-inf,+inf)
  1570. l2 = p3 + t (p4 - p3), t in (-inf,+inf).
  1571. When s and t are in [0,1] we have line segments between the points.
  1572. At the intersection l1 == l2. We get for the x and y coordinates:
  1573. x1 + s (x2 - x1) = x3 + t (x4 - x3).............(1)
  1574. y1 + s (y2 - y1) = y3 + t (y4 - y3).............(2)
  1575. Multiply (1)*(y2 - y1) and (2)*(x2 - x1):
  1576. x1 (y2 - y1) + s (x2 - x1)(y2 - y1) = x3 (y2 - y1) + t (x4 - x3)(y2 - y1).......(3)
  1577. y1 (x2 - x1) + s (y2 - y1)(x2 - x1) = y3 (x2 - x1) + t (y4 - y3)(x2 - x1).......(4)
  1578. (3)-(4) with y21 = y2 -y1, x21 = x2 - x1, x43 = x4 - x3, ...
  1579. x1 y21 - y1 x21 = x3 y21 - y3 x21 +t (x43 y21 - y43 x21)
  1580. Combining:
  1581. y31 x21 - x31 y21 = t (x43 y21 - y43 x21)
  1582. Therefor at the intersection we have:
  1583. t = (y31 x21 - x31 y21) / (x43 y21 - y43 x21)
  1584. If (x43 y21 - y43 x21) == 0
  1585. There is no intersection.
  1586. If (t < 0 || t >= 1)
  1587. No intersection in the segment l2
  1588. To count intersections in a corner only once we have t < 0 instead of t <= 0!
  1589. */
  1590. for (integer i = 1; i <= 4; i ++) {
  1591. double denom = (x [i + 1] - x [i]) * (y2 - y1) - (y [i + 1] - y [i]) * (x2 - x1);
  1592. double s, t, x3, y3;
  1593. if (denom == 0.0) {
  1594. continue;
  1595. }
  1596. /* We have an intersection. */
  1597. t = ((y [i] - y1) * (x2 - x1) - (x [i] - x1) * (y2 - y1)) / denom;
  1598. if (t < 0 || t >= 1) {
  1599. continue;
  1600. }
  1601. /* Intersection is within rectangle side. */
  1602. x3 = x [i] + t * (x [i + 1] - x [i]);
  1603. y3 = y [i] + t * (y [i + 1] - y [i]);
  1604. /* s must also be valid */
  1605. if (x1 != x2) {
  1606. s = (x3 - x1) / (x2 - x1);
  1607. } else {
  1608. s = (y3 - y1) / (y2 - y1);
  1609. }
  1610. if (s < 0 || s >= 1) {
  1611. continue;
  1612. }
  1613. ni ++;
  1614. Melder_require (ni <= 3, U"Too many intersections.");
  1615. xi [ni] = x3;
  1616. yi [ni] = y3;
  1617. }
  1618. return ni;
  1619. }
  1620. bool NUMclipLineWithinRectangle (double xl1, double yl1, double xl2, double yl2, double xr1, double yr1, double xr2, double yr2, double *xo1, double *yo1, double *xo2, double *yo2) {
  1621. int ncrossings = 0;
  1622. bool xswap, yswap;
  1623. double xc [5], yc [5], xmin, xmax, ymin, ymax;
  1624. *xo1 = xl1; *yo1 = yl1; *xo2 = xl2; *yo2 = yl2;
  1625. // This test first because we expect the majority of the tested segments to be
  1626. // within the rectangle
  1627. if (xl1 >= xr1 && xl1 <= xr2 && yl1 >= yr1 && yl1 <= yr2 &&
  1628. xl2 >= xr1 && xl2 <= xr2 && yl2 >= yr1 && yl2 <= yr2) {
  1629. return true;
  1630. }
  1631. // All lines that are completely outside the rectangle
  1632. if ( (xl1 <= xr1 && xl2 <= xr1) || (xl1 >= xr2 && xl2 >= xr2) ||
  1633. (yl1 <= yr1 && yl2 <= yr1) || (yl1 >= yr2 && yl2 >= yr2)) {
  1634. return false;
  1635. }
  1636. // At least line spans (part of) the rectangle.
  1637. // Get extremes in x and y of the line for easy testing further on.
  1638. if (xl1 < xl2) {
  1639. xmin = xl1; xmax = xl2; xswap = false;
  1640. } else {
  1641. xmin = xl2; xmax = xl1; xswap = true;
  1642. }
  1643. if (yl1 < yl2) {
  1644. ymin = yl1; ymax = yl2; yswap = false;
  1645. } else {
  1646. ymin = yl2; ymax = yl1; yswap = true;
  1647. }
  1648. bool hline = yl1 == yl2, vline = xl1 == xl2;
  1649. if (hline) {
  1650. if (xmin < xr1) {
  1651. *xo1 = xr1;
  1652. }
  1653. if (xmax > xr2) {
  1654. *xo2 = xr2;
  1655. }
  1656. if (xswap) {
  1657. std::swap (*xo1, *xo2);
  1658. }
  1659. return true;
  1660. }
  1661. if (vline) {
  1662. if (ymin < yr1) {
  1663. *yo1 = yr1;
  1664. }
  1665. if (ymax > yr2) {
  1666. *yo2 = yr2;
  1667. }
  1668. if (yswap) {
  1669. std::swap (*yo1, *yo2);
  1670. }
  1671. return true;
  1672. }
  1673. // Now we know that the line from (x1,y1) to (x2,y2) is neither horizontal nor vertical.
  1674. // Parametrize it as y = ax + b
  1675. double a = (yl1 - yl2) / (xl1 - xl2);
  1676. double b = yl1 - a * xl1;
  1677. // To determine the crossings we have to avoid counting the crossings in a corner twice.
  1678. // Therefore we test the corners inclusive (..<=..<=..) on the vertical borders of the rectangle
  1679. // and exclusive (..<..<) at the horizontal borders.
  1680. double y = a * xr1 + b; // Crossing at y with left border: x = xr1
  1681. if (y >= yr1 && y <= yr2 && xmin < xr1) { // Within vertical range?
  1682. ncrossings ++;
  1683. xc [ncrossings] = xr1; yc [ncrossings] = y;
  1684. xc [2] = xmax;
  1685. yc [2] = xl1 > xl2 ? yl1 : yl2;
  1686. }
  1687. double x = (yr2 - b) / a; // Crossing at x with top border: y = yr2
  1688. if (x > xr1 && x < xr2 && ymax > yr2) { // Within horizontal range?
  1689. ncrossings ++;
  1690. xc [ncrossings] = x; yc [ncrossings] = yr2;
  1691. if (ncrossings == 1) {
  1692. yc [2] = ymin;
  1693. xc [2] = yl1 < yl2 ? xl1 : xl2;
  1694. }
  1695. }
  1696. y = a * xr2 + b; // Crossing at y with right border: x = xr2
  1697. if (y >= yr1 && y <= yr2 && xmax > xr2) { // Within vertical range?
  1698. ncrossings ++;
  1699. xc [ncrossings] = xr2; yc [ncrossings] = y;
  1700. if (ncrossings == 1) {
  1701. xc [2] = xmin;
  1702. yc [2] = xl1 < xl2 ? yl1 : yl2;
  1703. }
  1704. }
  1705. x = (yr1 - b) / a; // Crossing at x with bottom border: y = yr1
  1706. if (x > xr1 && x < xr2 && ymin < yr1) {
  1707. ncrossings ++;
  1708. xc [ncrossings] = x; yc [ncrossings] = yr1;
  1709. if (ncrossings == 1) {
  1710. yc [2] = ymax;
  1711. xc [2] = yl1 > yl2 ? xl1 : xl2;
  1712. }
  1713. }
  1714. if (ncrossings == 0) {
  1715. return false;
  1716. }
  1717. Melder_require (ncrossings <= 2, U"Too many crossings found.");
  1718. /*
  1719. if start and endpoint of line are outside rectangle and ncrossings == 1, than the line only touches.
  1720. */
  1721. if (ncrossings == 1 && (xl1 < xr1 || xl1 > xr2 || yl1 < yr1 || yl1 > yr2) &&
  1722. (xl2 < xr1 || xl2 > xr2 || yl2 < yr1 || yl2 > yr2)) {
  1723. return true;
  1724. }
  1725. if ((xc [1] > xc [2] && ! xswap) || (xc [1] < xc [2] && xswap)) {
  1726. std::swap (xc [1], xc [2]);
  1727. std::swap (yc [1], yc [2]);
  1728. }
  1729. *xo1 = xc [1]; *yo1 = yc [1]; *xo2 = xc [2]; *yo2 = yc [2];
  1730. return true;
  1731. }
  1732. void NUMgetEllipseBoundingBox (double a, double b, double cospsi, double *width, double *height) {
  1733. Melder_assert (cospsi >= -1 && cospsi <= 1);
  1734. if (cospsi == 1) {
  1735. // a-axis along x-axis
  1736. *width = a;
  1737. *height = b;
  1738. } else if (cospsi == 0) {
  1739. // a-axis along y-axis
  1740. *width = b;
  1741. *height = a;
  1742. } else {
  1743. double psi = acos (cospsi), sn = sin (psi);
  1744. double phi = atan2 (-b * sn, a * cospsi);
  1745. *width = fabs (a * cospsi * cos (phi) - b * sn * sin (phi));
  1746. phi = atan2 (b * cospsi , a * sn);
  1747. *height = fabs (a * sn * cos (phi) + b * cospsi * sin (phi));
  1748. }
  1749. }
  1750. /*
  1751. Closely modeled after the netlib code by Oleg Keselyov.
  1752. */
  1753. double NUMminimize_brent (double (*f) (double x, void *closure), double a, double b, void *closure, double tol, double *fx) {
  1754. double x, v, fv, w, fw;
  1755. const double golden = 1 - NUM_goldenSection;
  1756. const double sqrt_epsilon = sqrt (NUMfpp -> eps);
  1757. integer itermax = 60;
  1758. Melder_assert (tol > 0 && a < b);
  1759. /* First step - golden section */
  1760. v = a + golden * (b - a);
  1761. fv = (*f) (v, closure);
  1762. x = v; w = v;
  1763. *fx = fv; fw = fv;
  1764. for (integer iter = 1; iter <= itermax; iter ++) {
  1765. double range = b - a;
  1766. double middle_range = (a + b) / 2.0;
  1767. double tol_act = sqrt_epsilon * fabs (x) + tol / 3.0;
  1768. double new_step; /* Step at this iteration */
  1769. if (fabs (x - middle_range) + range / 2.0 <= 2.0 * tol_act) {
  1770. return x;
  1771. }
  1772. // Obtain the golden section step
  1773. new_step = golden * (x < middle_range ? b - x : a - x);
  1774. // Decide if the parabolic interpolation can be tried
  1775. if (fabs (x - w) >= tol_act) {
  1776. /*
  1777. Interpolation step is calculated as p/q;
  1778. division operation is delayed until last moment.
  1779. */
  1780. double t = (x - w) * (*fx - fv);
  1781. double q = (x - v) * (*fx - fw);
  1782. double p = (x - v) * q - (x - w) * t;
  1783. q = 2.0 * (q - t);
  1784. if (q > 0.0) {
  1785. p = -p;
  1786. } else {
  1787. q = -q;
  1788. }
  1789. /*
  1790. If x+p/q falls in [a,b], not too close to a and b,
  1791. and isn't too large, it is accepted.
  1792. If p/q is too large then the golden section procedure can
  1793. reduce [a,b] range.
  1794. */
  1795. if (fabs (p) < fabs (new_step * q) &&
  1796. p > q * (a - x + 2.0 * tol_act) &&
  1797. p < q * (b - x - 2.0 * tol_act)) {
  1798. new_step = p / q;
  1799. }
  1800. }
  1801. // Adjust the step to be not less than tolerance.
  1802. if (fabs (new_step) < tol_act) {
  1803. new_step = new_step > 0.0 ? tol_act : - tol_act;
  1804. }
  1805. // Obtain the next approximation to min and reduce the enveloping range
  1806. {
  1807. double t = x + new_step; // Tentative point for the min
  1808. double ft = (*f) (t, closure);
  1809. /*
  1810. If t is a better approximation, reduce the range so that
  1811. t would fall within it. If x remains the best, reduce the range
  1812. so that x falls within it.
  1813. */
  1814. if (ft <= *fx) {
  1815. if (t < x) {
  1816. b = x;
  1817. } else {
  1818. a = x;
  1819. }
  1820. v = w; w = x; x = t;
  1821. fv = fw; fw = *fx; *fx = ft;
  1822. } else {
  1823. if (t < x) {
  1824. a = t;
  1825. } else {
  1826. b = t;
  1827. }
  1828. if (ft <= fw || w == x) {
  1829. v = w; w = t;
  1830. fv = fw; fw = ft;
  1831. } else if (ft <= fv || v == x || v == w) {
  1832. v = t;
  1833. fv = ft;
  1834. }
  1835. }
  1836. }
  1837. }
  1838. Melder_warning (U"NUMminimize_brent: maximum number of iterations (", itermax, U") exceeded.");
  1839. return x;
  1840. }
  1841. /*
  1842. probs is probability vector, i.e. all 0 <= probs [i] <= 1 and sum(i=1;i=nprobs, probs [i])= 1
  1843. p is a probability
  1844. */
  1845. integer NUMgetIndexFromProbability (double *probs, integer nprobs, double p) {
  1846. integer index = 1;
  1847. double psum = probs [index];
  1848. while (p > psum && index < nprobs) {
  1849. psum += probs [++ index];
  1850. }
  1851. return index;
  1852. }
  1853. // straight line fitting
  1854. void NUMlineFit_theil (double *x, double *y, integer numberOfPoints,
  1855. double *out_m, double *out_intercept, bool incompleteMethod)
  1856. {
  1857. try {
  1858. /* Theil's incomplete method:
  1859. Split (x [i],y [i]) as
  1860. (x [i],y [i]), (x [N+i],y [N=i], i=1..numberOfPoints/2
  1861. m [i] = (y [N+i]-y [i])/(x [N+i]-x [i])
  1862. m = median (m [i])
  1863. b = median(y [i]-m*x [i])
  1864. */
  1865. double m, intercept;
  1866. if (numberOfPoints <= 0) {
  1867. m = intercept = undefined;
  1868. } else if (numberOfPoints == 1) {
  1869. intercept = y [1];
  1870. m = 0.0;
  1871. } else if (numberOfPoints == 2) {
  1872. m = (y [2] - y [1]) / (x [2] - x [1]);
  1873. intercept = y [1] - m * x [1];
  1874. } else {
  1875. integer numberOfCombinations;
  1876. autoVEC mbs;
  1877. if (incompleteMethod) { // incomplete method
  1878. numberOfCombinations = numberOfPoints / 2;
  1879. mbs = VECzero (numberOfPoints); //
  1880. integer n2 = numberOfPoints % 2 == 1 ? numberOfCombinations + 1 : numberOfCombinations;
  1881. for (integer i = 1; i <= numberOfCombinations; i ++)
  1882. mbs [i] = (y [n2 + i] - y [i]) / (x [n2 + i] - x [i]);
  1883. } else { // use all combinations
  1884. numberOfCombinations = (numberOfPoints - 1) * numberOfPoints / 2;
  1885. mbs = VECzero (numberOfCombinations);
  1886. integer index = 0;
  1887. for (integer i = 1; i < numberOfPoints; i ++)
  1888. for (integer j = i + 1; j <= numberOfPoints; j ++)
  1889. mbs [++ index] = (y [j] - y [i]) / (x [j] - x [i]);
  1890. }
  1891. VECsort_inplace (mbs.subview (1, numberOfCombinations));
  1892. m = NUMquantile (mbs.subview (1, numberOfCombinations), 0.5);
  1893. for (integer i = 1; i <= numberOfPoints; i ++)
  1894. mbs [i] = y [i] - m * x [i];
  1895. VECsort_inplace (mbs.subview (1, numberOfPoints));
  1896. intercept = NUMquantile (mbs.subview (1, numberOfPoints), 0.5);
  1897. }
  1898. if (out_m)
  1899. *out_m = m;
  1900. if (out_intercept)
  1901. *out_intercept = intercept;
  1902. } catch (MelderError) {
  1903. Melder_throw (U"No line fit (Theil's method).");
  1904. }
  1905. }
  1906. void NUMlineFit_LS (double *x, double *y, integer numberOfPoints, double *p_m, double *intercept) {
  1907. double sx = 0.0, sy = 0.0;
  1908. for (integer i = 1; i <= numberOfPoints; i ++) {
  1909. sx += x [i];
  1910. sy += y [i];
  1911. }
  1912. double xmean = sx / numberOfPoints;
  1913. double st2 = 0.0, m = 0.0;
  1914. for (integer i = 1; i <= numberOfPoints; i ++) {
  1915. double t = x [i] - xmean;
  1916. st2 += t * t;
  1917. m += t * y [i];
  1918. }
  1919. // y = m*x + b
  1920. m /= st2;
  1921. if (intercept) {
  1922. *intercept = (sy - m * sx) / numberOfPoints;
  1923. }
  1924. if (p_m) {
  1925. *p_m = m;
  1926. }
  1927. }
  1928. void NUMlineFit (double *x, double *y, integer numberOfPoints, double *m, double *intercept, int method) {
  1929. if (method == 1) {
  1930. NUMlineFit_LS (x, y, numberOfPoints, m, intercept);
  1931. } else if (method == 3) {
  1932. NUMlineFit_theil (x, y, numberOfPoints, m, intercept, false);
  1933. } else {
  1934. NUMlineFit_theil (x, y, numberOfPoints, m, intercept, true);
  1935. }
  1936. }
  1937. // IEEE: Programs for digital signal processing section 4.3 LPTRN
  1938. // lpc [1..n] to rc [1..n]
  1939. void NUMlpc_lpc_to_rc (double *lpc, integer p, double *rc) {
  1940. autoNUMvector<double> b (1, p);
  1941. autoNUMvector<double> a (NUMvector_copy<double> (lpc, 1, p), 1);
  1942. for (integer m = p; m > 0; m--) {
  1943. rc [m] = a [m];
  1944. Melder_require (fabs (rc [m]) <= 1.0, U"Relection coefficient [", m, U"] larger than 1.");
  1945. for (integer i = 1; i < m; i ++) {
  1946. b [i] = a [i];
  1947. }
  1948. for (integer i = 1; i < m; i ++) {
  1949. a [i] = (b [i] - rc [m] * b [m - i]) / (1.0 - rc [m] * rc [m]);
  1950. }
  1951. }
  1952. }
  1953. void NUMlpc_rc_to_area2 (double *rc, integer n, double *area);
  1954. void NUMlpc_rc_to_area2 (double *rc, integer n, double *area) {
  1955. double s = 0.0001; /* 1.0 cm^2 at glottis */
  1956. for (integer i = n; i > 0; i--) {
  1957. s *= (1.0 + rc [i]) / (1.0 - rc [i]);
  1958. area [i] = s;
  1959. }
  1960. }
  1961. void NUMlpc_area_to_lpc2 (double *area, integer n, double *lpc);
  1962. void NUMlpc_area_to_lpc2 (double *area, integer n, double *lpc) {
  1963. // from area to reflection coefficients
  1964. autoNUMvector<double> rc (1, n);
  1965. // normalisation: area [n+1] = 0.0001
  1966. for (integer j = n; j > 0; j--) {
  1967. double ar = area [j+1] / area [j];
  1968. rc [j] = (1 - ar) / (1 + ar);
  1969. }
  1970. // LPTRAN works from mouth to lips:
  1971. for (integer j = 1; j <= n; j ++) {
  1972. lpc [j] = rc [n - j + 1];
  1973. }
  1974. for (integer j = 2; j <= n; j ++) {
  1975. integer nh = j / 2;
  1976. double q = rc [j];
  1977. for (integer k = 1; k <= nh; k ++) {
  1978. double at = lpc [k] + q * lpc [j - k];
  1979. lpc [j - k] += q * lpc [k];
  1980. lpc [k] = at;
  1981. }
  1982. }
  1983. }
  1984. void NUMlpc_lpc_to_rc2 (double *lpc, integer m, double *rc);
  1985. void NUMlpc_lpc_to_rc2 (double *lpc, integer m, double *rc) { // klopt nog niet
  1986. NUMvector_copyElements<double> (lpc, rc, 1, m);
  1987. for (integer j = 2; j <= m; j ++) {
  1988. integer jb = m + 1 - j;
  1989. integer mh = (jb + 1) / 2;
  1990. double rct = rc [jb+1];
  1991. double d = 1.0 - rct * rct;
  1992. for (integer k = 1; k <= mh; k ++) {
  1993. rc [k] *= (1 - rct) / d;
  1994. }
  1995. }
  1996. }
  1997. // area [1] at lips generates n+1 areas from n rc's
  1998. void NUMlpc_rc_to_area (double *rc, integer m, double *area) {
  1999. area [m+1] = 0.0001; /* 1.0 cm^2 */
  2000. for (integer j = 1; j <= m; j ++) {
  2001. double ar = (1.0 - rc [m+1-j]) / (1.0 + rc [m+1-j]);
  2002. area [m+1-j] = area [m+2-j] / ar;
  2003. }
  2004. }
  2005. // returns m-1 reflection coefficients from m areas
  2006. void NUMlpc_area_to_rc (double *area, integer m, double *rc) {
  2007. for (integer j = 1; j <= m - 1; j ++) {
  2008. double ar = area [j+1] / area [j];
  2009. rc [j] = (1.0 - ar) / (1.0 + ar);
  2010. }
  2011. }
  2012. void NUMlpc_rc_to_lpc (double *rc, integer m, double *lpc);
  2013. void NUMlpc_rc_to_lpc (double *rc, integer m, double *lpc) {
  2014. NUMvector_copyElements<double> (rc, lpc, 1, m);
  2015. for (integer j = 2; j <= m; j ++) {
  2016. for (integer k = 1; k <= j / 2; k ++) {
  2017. double at = lpc [k] + rc [j] * lpc [j - k];
  2018. lpc [j - k] += rc [j] * lpc [k];
  2019. lpc [k] = at;
  2020. }
  2021. }
  2022. }
  2023. void NUMlpc_area_to_lpc (double *area, integer m, double *lpc) {
  2024. // from area to reflection coefficients
  2025. autoNUMvector<double> rc (1, m);
  2026. // normalisation: area [n+1] = 0.0001
  2027. NUMlpc_area_to_rc (area, m, rc.peek());
  2028. NUMlpc_rc_to_lpc (rc.peek(), m - 1, lpc);
  2029. }
  2030. void NUMlpc_lpc_to_area (double *lpc, integer m, double *area) {
  2031. autoNUMvector<double> rc (1, m);
  2032. NUMlpc_lpc_to_rc (lpc, m, rc.peek());
  2033. NUMlpc_rc_to_area (rc.peek(), m, area);
  2034. }
  2035. #undef SIGN
  2036. #define SMALL_MEAN 14
  2037. /* If n*p < SMALL_MEAN then use BINV algorithm. The ranlib implementation used cutoff=30;
  2038. * but on my (Brian Gough) computer 14 works better
  2039. */
  2040. #define BINV_CUTOFF 110
  2041. /* In BINV, do not permit ix too large */
  2042. #define FAR_FROM_MEAN 20
  2043. /* If ix-n*p is larger than this, then use the "squeeze" algorithm.
  2044. * Ranlib used 20, and this seems to be the best choice on my (Brian Gough) machine as well.
  2045. */
  2046. #define LNFACT(x) gsl_sf_lnfact(x)
  2047. inline static double Stirling (double y1)
  2048. {
  2049. double y2 = y1 * y1;
  2050. double s = (13860.0 - (462.0 - (132.0 - (99.0 - 140.0 / y2) / y2) / y2) / y2) / y1 / 166320.0;
  2051. return s;
  2052. }
  2053. // djmw 20121211 replaced calls to gsl_rng_uniform with NUMrandomUniform (0,1)
  2054. integer NUMrandomBinomial (double p, integer n) {
  2055. if (p < 0.0 || p > 1.0 || n < 0) {
  2056. return -100000000;
  2057. }
  2058. integer ix; // return value
  2059. int flipped = 0;
  2060. if (n == 0) {
  2061. return 0;
  2062. }
  2063. if (p > 0.5) {
  2064. p = 1.0 - p; // work with small p
  2065. flipped = 1;
  2066. }
  2067. double q = 1.0 - p;
  2068. double s = p / q;
  2069. double np = n * p;
  2070. /*
  2071. Inverse cdf logic for small mean (BINV in K+S)
  2072. */
  2073. if (np < SMALL_MEAN) {
  2074. double f0 = pow (q, n); // djmw gsl_pow_int (q, n); f(x), starting with x=0
  2075. while (1) {
  2076. /*
  2077. This while(1) loop will almost certainly only loop once; but
  2078. if u=1 to within a few epsilons of machine precision, then it
  2079. is possible for roundoff to prevent the main loop over ix to
  2080. achieve its proper value. Following the ranlib implementation,
  2081. we introduce a check for that situation, and when it occurs,
  2082. we just try again.
  2083. */
  2084. double f = f0;
  2085. double u = NUMrandomUniform (0.0, 1.0); // djmw gsl_rng_uniform (rng);
  2086. for (ix = 0; ix <= BINV_CUTOFF; ++ ix) {
  2087. if (u < f) {
  2088. goto Finish;
  2089. }
  2090. u -= f;
  2091. // Use recursion f(x+1) = f(x)* [(n-x)/(x+1)]* [p/(1-p)]
  2092. f *= s * (n - ix) / (ix + 1.0);
  2093. }
  2094. /*
  2095. It should be the case that the 'goto Finish' was encountered
  2096. before this point was ever reached. But if we have reached
  2097. this point, then roundoff has prevented u from decreasing
  2098. all the way to zero. This can happen only if the initial u
  2099. was very nearly equal to 1, which is a rare situation. In
  2100. that rare situation, we just try again.
  2101. Note, following the ranlib implementation, we loop ix only to
  2102. a hardcoded value of SMALL_MEAN_LARGE_N=110; we could have
  2103. looped to n, and 99.99...% of the time it won't matter. This
  2104. choice, I think is a little more robust against the rare
  2105. roundoff error. If n>LARGE_N, then it is technically
  2106. possible for ix>LARGE_N, but it is astronomically rare, and
  2107. if ix is that large, it is more likely due to roundoff than
  2108. probability, so better to nip it at LARGE_N than to take a
  2109. chance that roundoff will somehow conspire to produce an even
  2110. larger (and more improbable) ix. If n<LARGE_N, then once
  2111. ix=n, f=0, and the loop will continue until ix=LARGE_N.
  2112. */
  2113. }
  2114. } else {
  2115. /*
  2116. For n >= SMALL_MEAN, we invoke the BTPE algorithm
  2117. */
  2118. double ffm = np + p; // ffm = n*p+p
  2119. integer m = (integer) ffm; // m = int floor [n*p+p]
  2120. double fm = m; // fm = double m
  2121. double xm = fm + 0.5; // xm = half integer mean (tip of triangle)
  2122. double npq = np * q; // npq = n*p*q
  2123. /*
  2124. Compute cumulative area of tri, para, exp tails
  2125. p1: radius of triangle region; since height=1, also: area of region
  2126. p2: p1 + area of parallelogram region
  2127. p3: p2 + area of left tail
  2128. p4: p3 + area of right tail
  2129. pi/p4: probability of i'th area (i=1,2,3,4)
  2130. Note: magic numbers 2.195, 4.6, 0.134, 20.5, 15.3
  2131. These magic numbers are not adjustable...at least not easily!
  2132. */
  2133. double p1 = Melder_roundDown (2.195 * sqrt (npq) - 4.6 * q) + 0.5;
  2134. // xl, xr: left and right edges of triangle
  2135. double xl = xm - p1;
  2136. double xr = xm + p1;
  2137. /*
  2138. Parameter of exponential tails
  2139. Left tail: t(x) = c*exp(-lambda_l* [xl - (x+0.5)])
  2140. Right tail: t(x) = c*exp(-lambda_r* [(x+0.5) - xr])
  2141. */
  2142. double c = 0.134 + 20.5 / (15.3 + fm);
  2143. double p2 = p1 * (1.0 + c + c);
  2144. double al = (ffm - xl) / (ffm - xl * p);
  2145. double lambda_l = al * (1.0 + 0.5 * al);
  2146. double ar = (xr - ffm) / (xr * q);
  2147. double lambda_r = ar * (1.0 + 0.5 * ar);
  2148. double p3 = p2 + c / lambda_l;
  2149. double p4 = p3 + c / lambda_r;
  2150. double var, accept;
  2151. double u, v; /* random variates */
  2152. TryAgain:
  2153. /*
  2154. Generate random variates, u specifies which region: Tri, Par, Tail
  2155. */
  2156. u = p4 * NUMrandomUniform (0.0, 1.0); // djmw gsl_rng_uniform (rng) * p4;
  2157. v = NUMrandomUniform (0.0, 1.0); // djmw gsl_rng_uniform (rng);
  2158. if (u <= p1) {
  2159. // Triangular region
  2160. ix = (integer) (xm - p1 * v + u);
  2161. goto Finish;
  2162. } else if (u <= p2) {
  2163. // Parallelogram region
  2164. double x = xl + (u - p1) / c;
  2165. v = v * c + 1.0 - fabs (x - xm) / p1;
  2166. if (v > 1.0 || v <= 0.0) {
  2167. goto TryAgain;
  2168. }
  2169. ix = (integer) x;
  2170. } else if (u <= p3) {
  2171. // Left tail
  2172. ix = (integer) (xl + log (v) / lambda_l);
  2173. if (ix < 0) {
  2174. goto TryAgain;
  2175. }
  2176. v *= ((u - p2) * lambda_l);
  2177. } else {
  2178. // Right tail
  2179. ix = (integer) (xr - log (v) / lambda_r);
  2180. if (ix > (double) n) {
  2181. goto TryAgain;
  2182. }
  2183. v *= ((u - p3) * lambda_r);
  2184. }
  2185. /*
  2186. At this point, the goal is to test whether v <= f(x)/f(m)
  2187. v <= f(x)/f(m) = (m!(n-m)! / (x!(n-x)!)) * (p/q)^{x-m}
  2188. Here is a direct test using logarithms. It is a little
  2189. slower than the various "squeezing" computations below, but
  2190. if things are working, it should give exactly the same answer
  2191. (given the same random number seed).
  2192. */
  2193. #ifdef DIRECT
  2194. var = log (v);
  2195. accept = LNFACT (m) + LNFACT (n - m) - LNFACT (ix) - LNFACT (n - ix) + (ix - m) * log (p / q);
  2196. #else // SQUEEZE METHOD
  2197. /*
  2198. More efficient determination of whether v < f(x)/f(M)
  2199. */
  2200. integer k = labs (ix - m);
  2201. if (k <= FAR_FROM_MEAN) {
  2202. /*
  2203. If ix near m (ie, |ix-m|<FAR_FROM_MEAN), then do
  2204. explicit evaluation using recursion relation for f(x)
  2205. */
  2206. double g = (n + 1) * s;
  2207. double f = 1.0;
  2208. var = v;
  2209. if (m < ix) {
  2210. for (integer i = m + 1; i <= ix; i ++) {
  2211. f *= (g / i - s);
  2212. }
  2213. } else if (m > ix) {
  2214. for (integer i = ix + 1; i <= m; i ++) {
  2215. f /= (g / i - s);
  2216. }
  2217. }
  2218. accept = f;
  2219. } else {
  2220. // If ix is far from the mean m: k=ABS(ix-m) large
  2221. var = log (v);
  2222. if (k < npq / 2 - 1) {
  2223. /*
  2224. "Squeeze" using upper and lower bounds on
  2225. log(f(x)) The squeeze condition was derived
  2226. under the condition k < npq/2-1
  2227. */
  2228. double amaxp = k / npq * ((k * (k / 3.0 + 0.625) + (1.0 / 6.0)) / npq + 0.5);
  2229. double ynorm = -(k * k / (2.0 * npq));
  2230. if (var < ynorm - amaxp) {
  2231. goto Finish;
  2232. }
  2233. if (var > ynorm + amaxp) {
  2234. goto TryAgain;
  2235. }
  2236. }
  2237. /*
  2238. Now, again: do the test log(v) vs. log f(x)/f(M)
  2239. */
  2240. #if USE_EXACT
  2241. /*
  2242. This is equivalent to the above, but is a little (~20%) slower
  2243. There are five log's vs three above, maybe that's it?
  2244. */
  2245. accept = LNFACT (m) + LNFACT (n - m) - LNFACT (ix) - LNFACT (n - ix) + (ix - m) * log (p / q);
  2246. #else
  2247. /* USE STIRLING:
  2248. The "#define Stirling" above corresponds to the first five
  2249. terms in asymptotic formula for
  2250. log Gamma (y) - (y-0.5)log(y) + y - 0.5 log(2*pi);
  2251. See Abramowitz and Stegun, eq 6.1.40
  2252. Note below: two Stirling's are added, and two are
  2253. subtracted. In both K+S, and in the ranlib
  2254. implementation, all four are added. I (jt) believe that
  2255. is a mistake -- this has been confirmed by personal
  2256. correspondence w/ Dr. Kachitvichyanukul. Note, however,
  2257. the corrections are so small, that I couldn't find an
  2258. example where it made a difference that could be
  2259. observed, let alone tested. In fact, define'ing Stirling
  2260. to be zero gave identical results!! In practice, alv is
  2261. O(1), ranging 0 to -10 or so, while the Stirling
  2262. correction is typically O(10^{-5}) ...setting the
  2263. correction to zero gives about a 2% performance boost;
  2264. might as well keep it just to be pedantic.
  2265. */
  2266. {
  2267. double x1 = ix + 1.0;
  2268. double w1 = n - ix + 1.0;
  2269. double f1 = fm + 1.0;
  2270. double z1 = n + 1.0 - fm;
  2271. accept = xm * log (f1 / x1) + (n - m + 0.5) * log (z1 / w1) + (ix - m) * log (w1 * p / (x1 * q))
  2272. + Stirling (f1) + Stirling (z1) - Stirling (x1) - Stirling (w1);
  2273. }
  2274. #endif
  2275. #endif
  2276. }
  2277. if (var <= accept) {
  2278. goto Finish;
  2279. } else {
  2280. goto TryAgain;
  2281. }
  2282. }
  2283. Finish:
  2284. return (flipped) ? (n - ix) : ix;
  2285. }
  2286. double NUMrandomBinomial_real (double p, integer n) {
  2287. if (p < 0.0 || p > 1.0 || n < 0) {
  2288. return undefined;
  2289. } else {
  2290. return (double) NUMrandomBinomial (p, n);
  2291. }
  2292. }
  2293. void NUMlngamma_complex (double zr, double zi, double *lnr, double *arg) {
  2294. double ln_re = undefined, ln_arg = undefined;
  2295. gsl_sf_result gsl_lnr, gsl_arg;
  2296. if (gsl_sf_lngamma_complex_e (zr, zi, & gsl_lnr, & gsl_arg)) {
  2297. ln_re = gsl_lnr.val;
  2298. ln_arg = gsl_arg.val;
  2299. }
  2300. if (lnr) {
  2301. *lnr = ln_re;
  2302. }
  2303. if (arg) {
  2304. *arg = ln_arg;
  2305. }
  2306. }
  2307. void NUMdmatrix_diagnoseCells (double **m, integer rb, integer re, integer cb, integer ce, integer maximumNumberOfPositionsToReport) {
  2308. integer numberOfInvalids = 0;
  2309. bool firstTime = true;
  2310. for (integer i = rb; i <= re; i ++) {
  2311. for (integer j = cb; j <= ce; j ++) {
  2312. if (isundef (m [i] [j])) {
  2313. numberOfInvalids ++;
  2314. if (firstTime) {
  2315. MelderInfo_writeLine (U"Invalid data at the following [row] [column] positions:");
  2316. firstTime = false;
  2317. }
  2318. if (numberOfInvalids <= maximumNumberOfPositionsToReport) {
  2319. if (numberOfInvalids % 10 != 0) {
  2320. MelderInfo_write (U" [", i, U"] [", j, U"] ");
  2321. } else {
  2322. MelderInfo_writeLine (U" [", i, U"] [", j, U"]");
  2323. }
  2324. } else {
  2325. return;
  2326. }
  2327. }
  2328. }
  2329. }
  2330. if (numberOfInvalids == 0) {
  2331. MelderInfo_writeLine (U"All cells have valid data.");
  2332. }
  2333. }
  2334. autoVEC NUMbiharmonic2DSplineInterpolation_getWeights (constVEC x, constVEC y, constVEC z) {
  2335. Melder_assert (x.size == y.size && x.size == z.size);
  2336. autoMAT g (x.size, x.size, kTensorInitializationType :: RAW);
  2337. /*
  2338. 1. Calculate the Green matrix G = |point [i]-point [j]|^2 (ln (|point [i]-point [j]|) - 1.0)
  2339. 2. Solve z = G.w for w
  2340. */
  2341. for (integer i = 1; i <= x.size; i ++) {
  2342. for (integer j = i + 1; j <= x.size; j ++) {
  2343. double dx = x [i] - x [j], dy = y [i] - y [j];
  2344. double distanceSquared = dx * dx + dy * dy;
  2345. g [i] [j] = g [j] [i] = distanceSquared * (0.5 * log (distanceSquared) - 1.0); // Green's function
  2346. }
  2347. g [i] [i] = 0.0;
  2348. }
  2349. autoVEC w = NUMsolveEquation (g.get(), z, 0.0);
  2350. return w;
  2351. }
  2352. double NUMbiharmonic2DSplineInterpolation (constVEC x, constVEC y, constVEC w, double xp, double yp) {
  2353. Melder_assert (x.size == y.size && x.size == w.size);
  2354. longdouble result = 0.0;
  2355. for (integer i = 1; i <= x.size; i ++) {
  2356. double dx = xp - x [i], dy = yp - y [i];
  2357. double d = dx * dx + dy * dy;
  2358. result += w [i] * d * (0.5 * log (d) - 1.0);
  2359. }
  2360. return (double) result;
  2361. }
  2362. void NUMfixIndicesInRange (integer lowerLimit, integer upperLimit, integer *lowIndex, integer *highIndex) {
  2363. Melder_require (lowerLimit <= upperLimit, U"The lower limit should not exceed the upper limit.");
  2364. if (*highIndex < *lowIndex) {
  2365. *lowIndex = lowerLimit; *highIndex = upperLimit;
  2366. } else if (*highIndex == *lowIndex) {
  2367. Melder_require (*lowIndex >= lowerLimit && *highIndex <= upperLimit, U"Both lower and upper indices are out of range.");
  2368. } else { // low < high
  2369. Melder_require (*lowIndex < upperLimit && *highIndex > lowerLimit, U"Both lower and upper indices are out of range.");
  2370. if (*lowIndex < lowerLimit) {
  2371. *lowIndex = lowerLimit;
  2372. }
  2373. if (*highIndex > upperLimit) {
  2374. *highIndex = upperLimit;
  2375. }
  2376. }
  2377. }
  2378. void MAT_getEntropies (constMAT m, double *out_h, double *out_hx,
  2379. double *out_hy, double *out_hygx, double *out_hxgy, double *out_uygx, double *out_uxgy, double *out_uxy) {
  2380. double h = undefined, hx = undefined, hy = undefined;
  2381. double hxgy = undefined, hygx = undefined, uygx = undefined, uxgy = undefined, uxy = undefined;
  2382. // Get total sum and check if all elements are not negative.
  2383. longdouble totalSum = 0.0;
  2384. for (integer i = 1; i <= m.nrow; i ++) {
  2385. for (integer j = 1; j <= m.ncol; j++) {
  2386. Melder_require (m [i][j] >= 0, U"Matrix elements should not be negative.");
  2387. totalSum += m [i] [j];
  2388. }
  2389. }
  2390. if (totalSum > 0.0) {
  2391. longdouble hy_t = 0.0;
  2392. for (integer i = 1; i <= m.nrow; i ++) {
  2393. longdouble rowsum = 0.0;
  2394. for (integer j = 1; j <= m.ncol; j++) rowsum += m [i] [j];
  2395. if (rowsum > 0.0) {
  2396. longdouble p = rowsum / totalSum;
  2397. hy_t -= p * NUMlog2 (p);
  2398. }
  2399. }
  2400. hy = (double) hy_t;
  2401. longdouble hx_t = 0.0;
  2402. for (integer j = 1; j <= m.ncol; j ++) {
  2403. longdouble colsum = 0.0;
  2404. for (integer i = 1; i <= m.nrow; i++) colsum += m [i] [j];
  2405. if (colsum > 0.0) {
  2406. longdouble p = colsum / totalSum;
  2407. hx_t -= p * NUMlog2 (p);
  2408. }
  2409. }
  2410. hx = (double) hx_t;
  2411. // Total entropy
  2412. longdouble h_t = 0.0;
  2413. for (integer i = 1; i <= m.nrow; i ++) {
  2414. for (integer j = 1; j <= m.ncol; j ++) {
  2415. if (m [i] [j] > 0.0) {
  2416. double p = m [i] [j] / totalSum;
  2417. h_t -= p * NUMlog2 (p);
  2418. }
  2419. }
  2420. }
  2421. h = (double) h_t;
  2422. hygx = h - hx;
  2423. hxgy = h - hy;
  2424. uygx = (hy - hygx) / hy;
  2425. uxgy = (hx - hxgy) / hx;
  2426. uxy = 2.0 * (hx + hy - h) / (hx + hy);
  2427. }
  2428. if (out_h) *out_h = h;
  2429. if (out_hx) *out_hx = hx;
  2430. if (out_hy) *out_hy = hy;
  2431. // Conditional entropies
  2432. if (out_hygx) *out_hygx = hygx;
  2433. if (*out_hxgy) *out_hxgy = hxgy;
  2434. if (*out_uygx) *out_uygx = uygx;
  2435. if (*out_uxgy) *out_uxgy = uxgy;
  2436. if (*out_uxy) *out_uxy = uxy;
  2437. }
  2438. #undef TINY
  2439. double NUMfrobeniusnorm (constMAT x) {
  2440. longdouble scale = 0.0;
  2441. longdouble ssq = 1.0;
  2442. for (integer i = 1; i <= x.nrow; i ++) {
  2443. for (integer j = 1; j <= x.ncol; j ++) {
  2444. if (x [i] [j] != 0.0) {
  2445. longdouble absxi = fabs (x [i] [j]);
  2446. if (scale < absxi) {
  2447. longdouble t = scale / absxi;
  2448. ssq = 1 + ssq * t * t;
  2449. scale = absxi;
  2450. } else {
  2451. longdouble t = absxi / scale;
  2452. ssq += t * t;
  2453. }
  2454. }
  2455. }
  2456. }
  2457. return scale * sqrt ((double) ssq);
  2458. }
  2459. double NUMtrace (constMAT a) {
  2460. Melder_assert (a.nrow == a.ncol);
  2461. longdouble trace = 0.0;
  2462. for (integer i = 1; i <= a.nrow; i ++) {
  2463. trace += a [i] [i];
  2464. }
  2465. return (double) trace;
  2466. }
  2467. double NUMtrace2_nn (constMAT x, constMAT y) {
  2468. Melder_assert (x.ncol == y.nrow && x.nrow == y.ncol);
  2469. longdouble trace = 0.0;
  2470. for (integer irow = 1; irow <= x.nrow; irow ++) {
  2471. for (integer k = 1; k <= x.ncol; k ++) {
  2472. trace += x [irow] [k] * y [k] [irow];
  2473. }
  2474. }
  2475. return (double) trace;
  2476. }
  2477. double NUMtrace2_tn (constMAT x, constMAT y) {
  2478. Melder_assert (x.ncol == y.ncol && x.nrow == y.nrow);
  2479. longdouble trace = 0.0;
  2480. for (integer irow = 1; irow <= x.ncol; irow ++) {
  2481. for (integer k = 1; k <= x.nrow; k ++) {
  2482. trace += x [k] [irow] * y [k] [irow];
  2483. }
  2484. }
  2485. return (double) trace;
  2486. }
  2487. double NUMtrace2_nt (constMAT x, constMAT y) {
  2488. return NUMtrace2_tn (y, x);
  2489. }
  2490. double NUMtrace2_tt (constMAT x, constMAT y) {
  2491. return NUMtrace2_nn (y, x);
  2492. }
  2493. void NUMeigencmp22 (double a, double b, double c, double *out_rt1, double *out_rt2, double *out_cs1, double *out_sn1) {
  2494. longdouble sm = a + c, df = a - c, adf = fabs (df);
  2495. longdouble tb = b + b, ab = fabs (tb);
  2496. longdouble acmx = c, acmn = a;
  2497. if (fabs (a) > fabs (c)) {
  2498. acmx = a;
  2499. acmn = c;
  2500. }
  2501. longdouble rt, tn;
  2502. if (adf > ab) {
  2503. tn = ab / adf;
  2504. rt = adf * sqrt (1.0 + tn * tn);
  2505. } else if (adf < ab) {
  2506. tn = adf / ab;
  2507. rt = ab * sqrt (1.0 + tn * tn);
  2508. } else {
  2509. rt = ab * sqrt (2.0);
  2510. }
  2511. longdouble rt1, rt2;
  2512. integer sgn1, sgn2;
  2513. if (sm < 0) {
  2514. rt1 = 0.5 * (sm - rt);
  2515. sgn1 = -1;
  2516. /*
  2517. Order of execution important.
  2518. To get fully accurate smaller eigenvalue,
  2519. next line needs to be executed in higher precision.
  2520. */
  2521. rt2 = (acmx / rt1) * acmn - (b / rt1) * b;
  2522. } else if (sm > 0) {
  2523. rt1 = 0.5 * (sm + rt);
  2524. sgn1 = 1;
  2525. /*
  2526. Order of execution important.
  2527. To get fully accurate smaller eigenvalue,
  2528. next line needs to be executed in higher precision.
  2529. */
  2530. rt2 = (acmx / rt1) * acmn - (b / rt1) * b;
  2531. } else {
  2532. rt1 = 0.5 * rt;
  2533. rt2 = -0.5 * rt;
  2534. sgn1 = 1;
  2535. }
  2536. // Compute the eigenvector
  2537. longdouble cs;
  2538. if (df >= 0) {
  2539. cs = df + rt;
  2540. sgn2 = 1;
  2541. } else {
  2542. cs = df - rt;
  2543. sgn2 = -1;
  2544. }
  2545. longdouble acs = fabs (cs), cs1, sn1;
  2546. if (acs > ab) {
  2547. longdouble ct = -tb / cs;
  2548. sn1 = 1 / sqrt (1 + ct * ct);
  2549. cs1 = ct * sn1;
  2550. } else {
  2551. if (ab == 0) {
  2552. cs1 = 1;
  2553. sn1 = 0;
  2554. } else {
  2555. tn = -cs / tb;
  2556. cs1 = 1 / sqrt (1 + tn * tn);
  2557. sn1 = tn * cs1;
  2558. }
  2559. }
  2560. if (sgn1 == sgn2) {
  2561. tn = cs1;
  2562. cs1 = -sn1;
  2563. sn1 = tn;
  2564. }
  2565. if (out_rt1) *out_rt1 = (double) rt1;
  2566. if (out_rt2) *out_rt2 = (double) rt2;
  2567. if (out_cs1) *out_cs1 = (double) cs1;
  2568. if (out_sn1) *out_sn1 = (double) sn1;
  2569. }
  2570. /* End of file NUM2.cpp */