vbrquantize.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581
  1. /*
  2. * MP3 quantization
  3. *
  4. * Copyright (c) 1999-2000 Mark Taylor
  5. * Copyright (c) 2000-2012 Robert Hegemann
  6. *
  7. * This library is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Library General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2 of the License, or (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Library General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Library General Public
  18. * License along with this library; if not, write to the
  19. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  20. * Boston, MA 02111-1307, USA.
  21. */
  22. /* $Id: vbrquantize.c,v 1.142 2012/02/07 13:36:35 robert Exp $ */
  23. #ifdef HAVE_CONFIG_H
  24. # include <config.h>
  25. #endif
  26. #include "lame.h"
  27. #include "machine.h"
  28. #include "encoder.h"
  29. #include "util.h"
  30. #include "vbrquantize.h"
  31. #include "quantize_pvt.h"
  32. struct algo_s;
  33. typedef struct algo_s algo_t;
  34. typedef void (*alloc_sf_f) (const algo_t *, const int *, const int *, int);
  35. typedef uint8_t (*find_sf_f) (const FLOAT *, const FLOAT *, FLOAT, unsigned int, uint8_t);
  36. struct algo_s {
  37. alloc_sf_f alloc;
  38. find_sf_f find;
  39. const FLOAT *xr34orig;
  40. lame_internal_flags *gfc;
  41. gr_info *cod_info;
  42. int mingain_l;
  43. int mingain_s[3];
  44. };
  45. /* Remarks on optimizing compilers:
  46. *
  47. * the MSVC compiler may get into aliasing problems when accessing
  48. * memory through the fi_union. declaring it volatile does the trick here
  49. *
  50. * the calc_sfb_noise_* functions are not inlined because the intel compiler
  51. * optimized executeables won't work as expected anymore
  52. */
  53. #ifdef _MSC_VER
  54. # if _MSC_VER < 1400
  55. # define VOLATILE volatile
  56. # else
  57. # define VOLATILE
  58. # endif
  59. #else
  60. # define VOLATILE
  61. #endif
  62. typedef VOLATILE union {
  63. float f;
  64. int i;
  65. } fi_union;
  66. #ifdef TAKEHIRO_IEEE754_HACK
  67. #define DOUBLEX double
  68. #else
  69. #define DOUBLEX FLOAT
  70. #endif
  71. #define MAGIC_FLOAT_def (65536*(128))
  72. #define MAGIC_INT_def 0x4b000000
  73. #ifdef TAKEHIRO_IEEE754_HACK
  74. #else
  75. /*********************************************************************
  76. * XRPOW_FTOI is a macro to convert floats to ints.
  77. * if XRPOW_FTOI(x) = nearest_int(x), then QUANTFAC(x)=adj43asm[x]
  78. * ROUNDFAC= -0.0946
  79. *
  80. * if XRPOW_FTOI(x) = floor(x), then QUANTFAC(x)=asj43[x]
  81. * ROUNDFAC=0.4054
  82. *********************************************************************/
  83. # define QUANTFAC(rx) adj43[rx]
  84. # define ROUNDFAC_def 0.4054f
  85. # define XRPOW_FTOI(src,dest) ((dest) = (int)(src))
  86. #endif
  87. static int const MAGIC_INT = MAGIC_INT_def;
  88. #ifndef TAKEHIRO_IEEE754_HACK
  89. static DOUBLEX const ROUNDFAC = ROUNDFAC_def;
  90. #endif
  91. static DOUBLEX const MAGIC_FLOAT = MAGIC_FLOAT_def;
  92. inline static float
  93. vec_max_c(const float * xr34, unsigned int bw)
  94. {
  95. float xfsf = 0;
  96. unsigned int i = bw >> 2u;
  97. unsigned int const remaining = (bw & 0x03u);
  98. while (i-- > 0) {
  99. if (xfsf < xr34[0]) {
  100. xfsf = xr34[0];
  101. }
  102. if (xfsf < xr34[1]) {
  103. xfsf = xr34[1];
  104. }
  105. if (xfsf < xr34[2]) {
  106. xfsf = xr34[2];
  107. }
  108. if (xfsf < xr34[3]) {
  109. xfsf = xr34[3];
  110. }
  111. xr34 += 4;
  112. }
  113. switch( remaining ) {
  114. case 3: if (xfsf < xr34[2]) xfsf = xr34[2];
  115. case 2: if (xfsf < xr34[1]) xfsf = xr34[1];
  116. case 1: if (xfsf < xr34[0]) xfsf = xr34[0];
  117. default: break;
  118. }
  119. return xfsf;
  120. }
  121. inline static uint8_t
  122. find_lowest_scalefac(const FLOAT xr34)
  123. {
  124. uint8_t sf_ok = 255;
  125. uint8_t sf = 128, delsf = 64;
  126. uint8_t i;
  127. FLOAT const ixmax_val = IXMAX_VAL;
  128. for (i = 0; i < 8; ++i) {
  129. FLOAT const xfsf = ipow20[sf] * xr34;
  130. if (xfsf <= ixmax_val) {
  131. sf_ok = sf;
  132. sf -= delsf;
  133. }
  134. else {
  135. sf += delsf;
  136. }
  137. delsf >>= 1;
  138. }
  139. return sf_ok;
  140. }
  141. inline static void
  142. k_34_4(DOUBLEX x[4], int l3[4])
  143. {
  144. #ifdef TAKEHIRO_IEEE754_HACK
  145. fi_union fi[4];
  146. assert(x[0] <= IXMAX_VAL && x[1] <= IXMAX_VAL && x[2] <= IXMAX_VAL && x[3] <= IXMAX_VAL);
  147. x[0] += MAGIC_FLOAT;
  148. fi[0].f = x[0];
  149. x[1] += MAGIC_FLOAT;
  150. fi[1].f = x[1];
  151. x[2] += MAGIC_FLOAT;
  152. fi[2].f = x[2];
  153. x[3] += MAGIC_FLOAT;
  154. fi[3].f = x[3];
  155. fi[0].f = x[0] + adj43asm[fi[0].i - MAGIC_INT];
  156. fi[1].f = x[1] + adj43asm[fi[1].i - MAGIC_INT];
  157. fi[2].f = x[2] + adj43asm[fi[2].i - MAGIC_INT];
  158. fi[3].f = x[3] + adj43asm[fi[3].i - MAGIC_INT];
  159. l3[0] = fi[0].i - MAGIC_INT;
  160. l3[1] = fi[1].i - MAGIC_INT;
  161. l3[2] = fi[2].i - MAGIC_INT;
  162. l3[3] = fi[3].i - MAGIC_INT;
  163. #else
  164. assert(x[0] <= IXMAX_VAL && x[1] <= IXMAX_VAL && x[2] <= IXMAX_VAL && x[3] <= IXMAX_VAL);
  165. XRPOW_FTOI(x[0], l3[0]);
  166. XRPOW_FTOI(x[1], l3[1]);
  167. XRPOW_FTOI(x[2], l3[2]);
  168. XRPOW_FTOI(x[3], l3[3]);
  169. x[0] += QUANTFAC(l3[0]);
  170. x[1] += QUANTFAC(l3[1]);
  171. x[2] += QUANTFAC(l3[2]);
  172. x[3] += QUANTFAC(l3[3]);
  173. XRPOW_FTOI(x[0], l3[0]);
  174. XRPOW_FTOI(x[1], l3[1]);
  175. XRPOW_FTOI(x[2], l3[2]);
  176. XRPOW_FTOI(x[3], l3[3]);
  177. #endif
  178. }
  179. /* do call the calc_sfb_noise_* functions only with sf values
  180. * for which holds: sfpow34*xr34 <= IXMAX_VAL
  181. */
  182. static FLOAT
  183. calc_sfb_noise_x34(const FLOAT * xr, const FLOAT * xr34, unsigned int bw, uint8_t sf)
  184. {
  185. DOUBLEX x[4];
  186. int l3[4];
  187. const FLOAT sfpow = pow20[sf + Q_MAX2]; /*pow(2.0,sf/4.0); */
  188. const FLOAT sfpow34 = ipow20[sf]; /*pow(sfpow,-3.0/4.0); */
  189. FLOAT xfsf = 0;
  190. unsigned int i = bw >> 2u;
  191. unsigned int const remaining = (bw & 0x03u);
  192. while (i-- > 0) {
  193. x[0] = sfpow34 * xr34[0];
  194. x[1] = sfpow34 * xr34[1];
  195. x[2] = sfpow34 * xr34[2];
  196. x[3] = sfpow34 * xr34[3];
  197. k_34_4(x, l3);
  198. x[0] = fabsf(xr[0]) - sfpow * pow43[l3[0]];
  199. x[1] = fabsf(xr[1]) - sfpow * pow43[l3[1]];
  200. x[2] = fabsf(xr[2]) - sfpow * pow43[l3[2]];
  201. x[3] = fabsf(xr[3]) - sfpow * pow43[l3[3]];
  202. xfsf += (x[0] * x[0] + x[1] * x[1]) + (x[2] * x[2] + x[3] * x[3]);
  203. xr += 4;
  204. xr34 += 4;
  205. }
  206. if (remaining) {
  207. x[0] = x[1] = x[2] = x[3] = 0;
  208. switch( remaining ) {
  209. case 3: x[2] = sfpow34 * xr34[2];
  210. case 2: x[1] = sfpow34 * xr34[1];
  211. case 1: x[0] = sfpow34 * xr34[0];
  212. }
  213. k_34_4(x, l3);
  214. x[0] = x[1] = x[2] = x[3] = 0;
  215. switch( remaining ) {
  216. case 3: x[2] = fabsf(xr[2]) - sfpow * pow43[l3[2]];
  217. case 2: x[1] = fabsf(xr[1]) - sfpow * pow43[l3[1]];
  218. case 1: x[0] = fabsf(xr[0]) - sfpow * pow43[l3[0]];
  219. }
  220. xfsf += (x[0] * x[0] + x[1] * x[1]) + (x[2] * x[2] + x[3] * x[3]);
  221. }
  222. return xfsf;
  223. }
  224. struct calc_noise_cache {
  225. int valid;
  226. FLOAT value;
  227. };
  228. typedef struct calc_noise_cache calc_noise_cache_t;
  229. static uint8_t
  230. tri_calc_sfb_noise_x34(const FLOAT * xr, const FLOAT * xr34, FLOAT l3_xmin, unsigned int bw,
  231. uint8_t sf, calc_noise_cache_t * did_it)
  232. {
  233. if (did_it[sf].valid == 0) {
  234. did_it[sf].valid = 1;
  235. did_it[sf].value = calc_sfb_noise_x34(xr, xr34, bw, sf);
  236. }
  237. if (l3_xmin < did_it[sf].value) {
  238. return 1;
  239. }
  240. if (sf < 255) {
  241. uint8_t const sf_x = sf + 1;
  242. if (did_it[sf_x].valid == 0) {
  243. did_it[sf_x].valid = 1;
  244. did_it[sf_x].value = calc_sfb_noise_x34(xr, xr34, bw, sf_x);
  245. }
  246. if (l3_xmin < did_it[sf_x].value) {
  247. return 1;
  248. }
  249. }
  250. if (sf > 0) {
  251. uint8_t const sf_x = sf - 1;
  252. if (did_it[sf_x].valid == 0) {
  253. did_it[sf_x].valid = 1;
  254. did_it[sf_x].value = calc_sfb_noise_x34(xr, xr34, bw, sf_x);
  255. }
  256. if (l3_xmin < did_it[sf_x].value) {
  257. return 1;
  258. }
  259. }
  260. return 0;
  261. }
  262. /**
  263. * Robert Hegemann 2001-05-01
  264. * calculates quantization step size determined by allowed masking
  265. */
  266. static int
  267. calc_scalefac(FLOAT l3_xmin, int bw)
  268. {
  269. FLOAT const c = 5.799142446; /* 10 * 10^(2/3) * log10(4/3) */
  270. return 210 + (int) (c * log10f(l3_xmin / bw) - .5f);
  271. }
  272. static uint8_t
  273. guess_scalefac_x34(const FLOAT * xr, const FLOAT * xr34, FLOAT l3_xmin, unsigned int bw, uint8_t sf_min)
  274. {
  275. int const guess = calc_scalefac(l3_xmin, bw);
  276. if (guess < sf_min) return sf_min;
  277. if (guess >= 255) return 255;
  278. (void) xr;
  279. (void) xr34;
  280. return guess;
  281. }
  282. /* the find_scalefac* routines calculate
  283. * a quantization step size which would
  284. * introduce as much noise as is allowed.
  285. * The larger the step size the more
  286. * quantization noise we'll get. The
  287. * scalefactors are there to lower the
  288. * global step size, allowing limited
  289. * differences in quantization step sizes
  290. * per band (shaping the noise).
  291. */
  292. static uint8_t
  293. find_scalefac_x34(const FLOAT * xr, const FLOAT * xr34, FLOAT l3_xmin, unsigned int bw,
  294. uint8_t sf_min)
  295. {
  296. calc_noise_cache_t did_it[256];
  297. uint8_t sf = 128, sf_ok = 255, delsf = 128, seen_good_one = 0, i;
  298. memset(did_it, 0, sizeof(did_it));
  299. for (i = 0; i < 8; ++i) {
  300. delsf >>= 1;
  301. if (sf <= sf_min) {
  302. sf += delsf;
  303. }
  304. else {
  305. uint8_t const bad = tri_calc_sfb_noise_x34(xr, xr34, l3_xmin, bw, sf, did_it);
  306. if (bad) { /* distortion. try a smaller scalefactor */
  307. sf -= delsf;
  308. }
  309. else {
  310. sf_ok = sf;
  311. sf += delsf;
  312. seen_good_one = 1;
  313. }
  314. }
  315. }
  316. /* returning a scalefac without distortion, if possible
  317. */
  318. if (seen_good_one > 0) {
  319. sf = sf_ok;
  320. }
  321. if (sf <= sf_min) {
  322. sf = sf_min;
  323. }
  324. return sf;
  325. }
  326. /***********************************************************************
  327. *
  328. * calc_short_block_vbr_sf()
  329. * calc_long_block_vbr_sf()
  330. *
  331. * Mark Taylor 2000-??-??
  332. * Robert Hegemann 2000-10-25 made functions of it
  333. *
  334. ***********************************************************************/
  335. /* a variation for vbr-mtrh */
  336. static int
  337. block_sf(algo_t * that, const FLOAT l3_xmin[SFBMAX], int vbrsf[SFBMAX], int vbrsfmin[SFBMAX])
  338. {
  339. FLOAT max_xr34;
  340. const FLOAT *const xr = &that->cod_info->xr[0];
  341. const FLOAT *const xr34_orig = &that->xr34orig[0];
  342. const int *const width = &that->cod_info->width[0];
  343. const char *const energy_above_cutoff = &that->cod_info->energy_above_cutoff[0];
  344. unsigned int const max_nonzero_coeff = (unsigned int) that->cod_info->max_nonzero_coeff;
  345. uint8_t maxsf = 0;
  346. int sfb = 0, m_o = -1;
  347. unsigned int j = 0, i = 0;
  348. int const psymax = that->cod_info->psymax;
  349. assert(that->cod_info->max_nonzero_coeff >= 0);
  350. that->mingain_l = 0;
  351. that->mingain_s[0] = 0;
  352. that->mingain_s[1] = 0;
  353. that->mingain_s[2] = 0;
  354. while (j <= max_nonzero_coeff) {
  355. unsigned int const w = (unsigned int) width[sfb];
  356. unsigned int const m = (unsigned int) (max_nonzero_coeff - j + 1);
  357. unsigned int l = w;
  358. uint8_t m1, m2;
  359. if (l > m) {
  360. l = m;
  361. }
  362. max_xr34 = vec_max_c(&xr34_orig[j], l);
  363. m1 = find_lowest_scalefac(max_xr34);
  364. vbrsfmin[sfb] = m1;
  365. if (that->mingain_l < m1) {
  366. that->mingain_l = m1;
  367. }
  368. if (that->mingain_s[i] < m1) {
  369. that->mingain_s[i] = m1;
  370. }
  371. if (++i > 2) {
  372. i = 0;
  373. }
  374. if (sfb < psymax && w > 2) { /* mpeg2.5 at 8 kHz doesn't use all scalefactors, unused have width 2 */
  375. if (energy_above_cutoff[sfb]) {
  376. m2 = that->find(&xr[j], &xr34_orig[j], l3_xmin[sfb], l, m1);
  377. #if 0
  378. if (0) {
  379. /** Robert Hegemann 2007-09-29:
  380. * It seems here is some more potential for speed improvements.
  381. * Current find method does 11-18 quantization calculations.
  382. * Using a "good guess" may help to reduce this amount.
  383. */
  384. uint8_t guess = calc_scalefac(l3_xmin[sfb], l);
  385. DEBUGF(that->gfc, "sfb=%3d guess=%3d found=%3d diff=%3d\n", sfb, guess, m2,
  386. m2 - guess);
  387. }
  388. #endif
  389. if (maxsf < m2) {
  390. maxsf = m2;
  391. }
  392. if (m_o < m2 && m2 < 255) {
  393. m_o = m2;
  394. }
  395. }
  396. else {
  397. m2 = 255;
  398. maxsf = 255;
  399. }
  400. }
  401. else {
  402. if (maxsf < m1) {
  403. maxsf = m1;
  404. }
  405. m2 = maxsf;
  406. }
  407. vbrsf[sfb] = m2;
  408. ++sfb;
  409. j += w;
  410. }
  411. for (; sfb < SFBMAX; ++sfb) {
  412. vbrsf[sfb] = maxsf;
  413. vbrsfmin[sfb] = 0;
  414. }
  415. if (m_o > -1) {
  416. maxsf = m_o;
  417. for (sfb = 0; sfb < SFBMAX; ++sfb) {
  418. if (vbrsf[sfb] == 255) {
  419. vbrsf[sfb] = m_o;
  420. }
  421. }
  422. }
  423. return maxsf;
  424. }
  425. /***********************************************************************
  426. *
  427. * quantize xr34 based on scalefactors
  428. *
  429. * block_xr34
  430. *
  431. * Mark Taylor 2000-??-??
  432. * Robert Hegemann 2000-10-20 made functions of them
  433. *
  434. ***********************************************************************/
  435. static void
  436. quantize_x34(const algo_t * that)
  437. {
  438. DOUBLEX x[4];
  439. const FLOAT *xr34_orig = that->xr34orig;
  440. gr_info *const cod_info = that->cod_info;
  441. int const ifqstep = (cod_info->scalefac_scale == 0) ? 2 : 4;
  442. int *l3 = cod_info->l3_enc;
  443. unsigned int j = 0, sfb = 0;
  444. unsigned int const max_nonzero_coeff = (unsigned int) cod_info->max_nonzero_coeff;
  445. assert(cod_info->max_nonzero_coeff >= 0);
  446. assert(cod_info->max_nonzero_coeff < 576);
  447. while (j <= max_nonzero_coeff) {
  448. int const s =
  449. (cod_info->scalefac[sfb] + (cod_info->preflag ? pretab[sfb] : 0)) * ifqstep
  450. + cod_info->subblock_gain[cod_info->window[sfb]] * 8;
  451. uint8_t const sfac = (uint8_t) (cod_info->global_gain - s);
  452. FLOAT const sfpow34 = ipow20[sfac];
  453. unsigned int const w = (unsigned int) cod_info->width[sfb];
  454. unsigned int const m = (unsigned int) (max_nonzero_coeff - j + 1);
  455. unsigned int i, remaining;
  456. assert((cod_info->global_gain - s) >= 0);
  457. assert(cod_info->width[sfb] >= 0);
  458. j += w;
  459. ++sfb;
  460. i = (w <= m) ? w : m;
  461. remaining = (i & 0x03u);
  462. i >>= 2u;
  463. while (i-- > 0) {
  464. x[0] = sfpow34 * xr34_orig[0];
  465. x[1] = sfpow34 * xr34_orig[1];
  466. x[2] = sfpow34 * xr34_orig[2];
  467. x[3] = sfpow34 * xr34_orig[3];
  468. k_34_4(x, l3);
  469. l3 += 4;
  470. xr34_orig += 4;
  471. }
  472. if (remaining) {
  473. int tmp_l3[4];
  474. x[0] = x[1] = x[2] = x[3] = 0;
  475. switch( remaining ) {
  476. case 3: x[2] = sfpow34 * xr34_orig[2];
  477. case 2: x[1] = sfpow34 * xr34_orig[1];
  478. case 1: x[0] = sfpow34 * xr34_orig[0];
  479. }
  480. k_34_4(x, tmp_l3);
  481. switch( remaining ) {
  482. case 3: l3[2] = tmp_l3[2];
  483. case 2: l3[1] = tmp_l3[1];
  484. case 1: l3[0] = tmp_l3[0];
  485. }
  486. l3 += remaining;
  487. xr34_orig += remaining;
  488. }
  489. }
  490. }
  491. static const uint8_t max_range_short[SBMAX_s * 3] = {
  492. 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
  493. 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
  494. 0, 0, 0
  495. };
  496. static const uint8_t max_range_long[SBMAX_l] = {
  497. 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0
  498. };
  499. static const uint8_t max_range_long_lsf_pretab[SBMAX_l] = {
  500. 7, 7, 7, 7, 7, 7, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  501. };
  502. /*
  503. sfb=0..5 scalefac < 16
  504. sfb>5 scalefac < 8
  505. ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
  506. ol_sf = (cod_info->global_gain-210.0);
  507. ol_sf -= 8*cod_info->subblock_gain[i];
  508. ol_sf -= ifqstep*scalefac[gr][ch].s[sfb][i];
  509. */
  510. static void
  511. set_subblock_gain(gr_info * cod_info, const int mingain_s[3], int sf[])
  512. {
  513. const int maxrange1 = 15, maxrange2 = 7;
  514. const int ifqstepShift = (cod_info->scalefac_scale == 0) ? 1 : 2;
  515. int *const sbg = cod_info->subblock_gain;
  516. unsigned int const psymax = (unsigned int) cod_info->psymax;
  517. unsigned int psydiv = 18;
  518. int sbg0, sbg1, sbg2;
  519. unsigned int sfb, i;
  520. int min_sbg = 7;
  521. if (psydiv > psymax) {
  522. psydiv = psymax;
  523. }
  524. for (i = 0; i < 3; ++i) {
  525. int maxsf1 = 0, maxsf2 = 0, minsf = 1000;
  526. /* see if we should use subblock gain */
  527. for (sfb = i; sfb < psydiv; sfb += 3) { /* part 1 */
  528. int const v = -sf[sfb];
  529. if (maxsf1 < v) {
  530. maxsf1 = v;
  531. }
  532. if (minsf > v) {
  533. minsf = v;
  534. }
  535. }
  536. for (; sfb < SFBMAX; sfb += 3) { /* part 2 */
  537. int const v = -sf[sfb];
  538. if (maxsf2 < v) {
  539. maxsf2 = v;
  540. }
  541. if (minsf > v) {
  542. minsf = v;
  543. }
  544. }
  545. /* boost subblock gain as little as possible so we can
  546. * reach maxsf1 with scalefactors
  547. * 8*sbg >= maxsf1
  548. */
  549. {
  550. int const m1 = maxsf1 - (maxrange1 << ifqstepShift);
  551. int const m2 = maxsf2 - (maxrange2 << ifqstepShift);
  552. maxsf1 = Max(m1, m2);
  553. }
  554. if (minsf > 0) {
  555. sbg[i] = minsf >> 3;
  556. }
  557. else {
  558. sbg[i] = 0;
  559. }
  560. if (maxsf1 > 0) {
  561. int const m1 = sbg[i];
  562. int const m2 = (maxsf1 + 7) >> 3;
  563. sbg[i] = Max(m1, m2);
  564. }
  565. if (sbg[i] > 0 && mingain_s[i] > (cod_info->global_gain - sbg[i] * 8)) {
  566. sbg[i] = (cod_info->global_gain - mingain_s[i]) >> 3;
  567. }
  568. if (sbg[i] > 7) {
  569. sbg[i] = 7;
  570. }
  571. if (min_sbg > sbg[i]) {
  572. min_sbg = sbg[i];
  573. }
  574. }
  575. sbg0 = sbg[0] * 8;
  576. sbg1 = sbg[1] * 8;
  577. sbg2 = sbg[2] * 8;
  578. for (sfb = 0; sfb < SFBMAX; sfb += 3) {
  579. sf[sfb + 0] += sbg0;
  580. sf[sfb + 1] += sbg1;
  581. sf[sfb + 2] += sbg2;
  582. }
  583. if (min_sbg > 0) {
  584. for (i = 0; i < 3; ++i) {
  585. sbg[i] -= min_sbg;
  586. }
  587. cod_info->global_gain -= min_sbg * 8;
  588. }
  589. }
  590. /*
  591. ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
  592. ol_sf = (cod_info->global_gain-210.0);
  593. ol_sf -= ifqstep*scalefac[gr][ch].l[sfb];
  594. if (cod_info->preflag && sfb>=11)
  595. ol_sf -= ifqstep*pretab[sfb];
  596. */
  597. static void
  598. set_scalefacs(gr_info * cod_info, const int *vbrsfmin, int sf[], const uint8_t * max_range)
  599. {
  600. const int ifqstep = (cod_info->scalefac_scale == 0) ? 2 : 4;
  601. const int ifqstepShift = (cod_info->scalefac_scale == 0) ? 1 : 2;
  602. int *const scalefac = cod_info->scalefac;
  603. int const sfbmax = cod_info->sfbmax;
  604. int sfb;
  605. int const *const sbg = cod_info->subblock_gain;
  606. int const *const window = cod_info->window;
  607. int const preflag = cod_info->preflag;
  608. if (preflag) {
  609. for (sfb = 11; sfb < sfbmax; ++sfb) {
  610. sf[sfb] += pretab[sfb] * ifqstep;
  611. }
  612. }
  613. for (sfb = 0; sfb < sfbmax; ++sfb) {
  614. int const gain = cod_info->global_gain - (sbg[window[sfb]] * 8)
  615. - ((preflag ? pretab[sfb] : 0) * ifqstep);
  616. if (sf[sfb] < 0) {
  617. int const m = gain - vbrsfmin[sfb];
  618. /* ifqstep*scalefac >= -sf[sfb], so round UP */
  619. scalefac[sfb] = (ifqstep - 1 - sf[sfb]) >> ifqstepShift;
  620. if (scalefac[sfb] > max_range[sfb]) {
  621. scalefac[sfb] = max_range[sfb];
  622. }
  623. if (scalefac[sfb] > 0 && (scalefac[sfb] << ifqstepShift) > m) {
  624. scalefac[sfb] = m >> ifqstepShift;
  625. }
  626. }
  627. else {
  628. scalefac[sfb] = 0;
  629. }
  630. }
  631. for (; sfb < SFBMAX; ++sfb) {
  632. scalefac[sfb] = 0; /* sfb21 */
  633. }
  634. }
  635. #ifndef NDEBUG
  636. static int
  637. checkScalefactor(const gr_info * cod_info, const int vbrsfmin[SFBMAX])
  638. {
  639. int const ifqstep = cod_info->scalefac_scale == 0 ? 2 : 4;
  640. int sfb;
  641. for (sfb = 0; sfb < cod_info->psymax; ++sfb) {
  642. const int s =
  643. ((cod_info->scalefac[sfb] +
  644. (cod_info->preflag ? pretab[sfb] : 0)) * ifqstep) +
  645. cod_info->subblock_gain[cod_info->window[sfb]] * 8;
  646. if ((cod_info->global_gain - s) < vbrsfmin[sfb]) {
  647. /*
  648. fprintf( stdout, "sf %d\n", sfb );
  649. fprintf( stdout, "min %d\n", vbrsfmin[sfb] );
  650. fprintf( stdout, "ggain %d\n", cod_info->global_gain );
  651. fprintf( stdout, "scalefac %d\n", cod_info->scalefac[sfb] );
  652. fprintf( stdout, "pretab %d\n", (cod_info->preflag ? pretab[sfb] : 0) );
  653. fprintf( stdout, "scale %d\n", (cod_info->scalefac_scale + 1) );
  654. fprintf( stdout, "subgain %d\n", cod_info->subblock_gain[cod_info->window[sfb]] * 8 );
  655. fflush( stdout );
  656. exit(-1);
  657. */
  658. return 0;
  659. }
  660. }
  661. return 1;
  662. }
  663. #endif
  664. /******************************************************************
  665. *
  666. * short block scalefacs
  667. *
  668. ******************************************************************/
  669. static void
  670. short_block_constrain(const algo_t * that, const int vbrsf[SFBMAX],
  671. const int vbrsfmin[SFBMAX], int vbrmax)
  672. {
  673. gr_info *const cod_info = that->cod_info;
  674. lame_internal_flags const *const gfc = that->gfc;
  675. SessionConfig_t const *const cfg = &gfc->cfg;
  676. int const maxminsfb = that->mingain_l;
  677. int mover, maxover0 = 0, maxover1 = 0, delta = 0;
  678. int v, v0, v1;
  679. int sfb;
  680. int const psymax = cod_info->psymax;
  681. for (sfb = 0; sfb < psymax; ++sfb) {
  682. assert(vbrsf[sfb] >= vbrsfmin[sfb]);
  683. v = vbrmax - vbrsf[sfb];
  684. if (delta < v) {
  685. delta = v;
  686. }
  687. v0 = v - (4 * 14 + 2 * max_range_short[sfb]);
  688. v1 = v - (4 * 14 + 4 * max_range_short[sfb]);
  689. if (maxover0 < v0) {
  690. maxover0 = v0;
  691. }
  692. if (maxover1 < v1) {
  693. maxover1 = v1;
  694. }
  695. }
  696. if (cfg->noise_shaping == 2) {
  697. /* allow scalefac_scale=1 */
  698. mover = Min(maxover0, maxover1);
  699. }
  700. else {
  701. mover = maxover0;
  702. }
  703. if (delta > mover) {
  704. delta = mover;
  705. }
  706. vbrmax -= delta;
  707. maxover0 -= mover;
  708. maxover1 -= mover;
  709. if (maxover0 == 0) {
  710. cod_info->scalefac_scale = 0;
  711. }
  712. else if (maxover1 == 0) {
  713. cod_info->scalefac_scale = 1;
  714. }
  715. if (vbrmax < maxminsfb) {
  716. vbrmax = maxminsfb;
  717. }
  718. cod_info->global_gain = vbrmax;
  719. if (cod_info->global_gain < 0) {
  720. cod_info->global_gain = 0;
  721. }
  722. else if (cod_info->global_gain > 255) {
  723. cod_info->global_gain = 255;
  724. }
  725. {
  726. int sf_temp[SFBMAX];
  727. for (sfb = 0; sfb < SFBMAX; ++sfb) {
  728. sf_temp[sfb] = vbrsf[sfb] - vbrmax;
  729. }
  730. set_subblock_gain(cod_info, &that->mingain_s[0], sf_temp);
  731. set_scalefacs(cod_info, vbrsfmin, sf_temp, max_range_short);
  732. }
  733. assert(checkScalefactor(cod_info, vbrsfmin));
  734. }
  735. /******************************************************************
  736. *
  737. * long block scalefacs
  738. *
  739. ******************************************************************/
  740. static void
  741. long_block_constrain(const algo_t * that, const int vbrsf[SFBMAX], const int vbrsfmin[SFBMAX],
  742. int vbrmax)
  743. {
  744. gr_info *const cod_info = that->cod_info;
  745. lame_internal_flags const *const gfc = that->gfc;
  746. SessionConfig_t const *const cfg = &gfc->cfg;
  747. uint8_t const *max_rangep;
  748. int const maxminsfb = that->mingain_l;
  749. int sfb;
  750. int maxover0, maxover1, maxover0p, maxover1p, mover, delta = 0;
  751. int v, v0, v1, v0p, v1p, vm0p = 1, vm1p = 1;
  752. int const psymax = cod_info->psymax;
  753. max_rangep = cfg->mode_gr == 2 ? max_range_long : max_range_long_lsf_pretab;
  754. maxover0 = 0;
  755. maxover1 = 0;
  756. maxover0p = 0; /* pretab */
  757. maxover1p = 0; /* pretab */
  758. for (sfb = 0; sfb < psymax; ++sfb) {
  759. assert(vbrsf[sfb] >= vbrsfmin[sfb]);
  760. v = vbrmax - vbrsf[sfb];
  761. if (delta < v) {
  762. delta = v;
  763. }
  764. v0 = v - 2 * max_range_long[sfb];
  765. v1 = v - 4 * max_range_long[sfb];
  766. v0p = v - 2 * (max_rangep[sfb] + pretab[sfb]);
  767. v1p = v - 4 * (max_rangep[sfb] + pretab[sfb]);
  768. if (maxover0 < v0) {
  769. maxover0 = v0;
  770. }
  771. if (maxover1 < v1) {
  772. maxover1 = v1;
  773. }
  774. if (maxover0p < v0p) {
  775. maxover0p = v0p;
  776. }
  777. if (maxover1p < v1p) {
  778. maxover1p = v1p;
  779. }
  780. }
  781. if (vm0p == 1) {
  782. int gain = vbrmax - maxover0p;
  783. if (gain < maxminsfb) {
  784. gain = maxminsfb;
  785. }
  786. for (sfb = 0; sfb < psymax; ++sfb) {
  787. int const a = (gain - vbrsfmin[sfb]) - 2 * pretab[sfb];
  788. if (a <= 0) {
  789. vm0p = 0;
  790. vm1p = 0;
  791. break;
  792. }
  793. }
  794. }
  795. if (vm1p == 1) {
  796. int gain = vbrmax - maxover1p;
  797. if (gain < maxminsfb) {
  798. gain = maxminsfb;
  799. }
  800. for (sfb = 0; sfb < psymax; ++sfb) {
  801. int const b = (gain - vbrsfmin[sfb]) - 4 * pretab[sfb];
  802. if (b <= 0) {
  803. vm1p = 0;
  804. break;
  805. }
  806. }
  807. }
  808. if (vm0p == 0) {
  809. maxover0p = maxover0;
  810. }
  811. if (vm1p == 0) {
  812. maxover1p = maxover1;
  813. }
  814. if (cfg->noise_shaping != 2) {
  815. maxover1 = maxover0;
  816. maxover1p = maxover0p;
  817. }
  818. mover = Min(maxover0, maxover0p);
  819. mover = Min(mover, maxover1);
  820. mover = Min(mover, maxover1p);
  821. if (delta > mover) {
  822. delta = mover;
  823. }
  824. vbrmax -= delta;
  825. if (vbrmax < maxminsfb) {
  826. vbrmax = maxminsfb;
  827. }
  828. maxover0 -= mover;
  829. maxover0p -= mover;
  830. maxover1 -= mover;
  831. maxover1p -= mover;
  832. if (maxover0 == 0) {
  833. cod_info->scalefac_scale = 0;
  834. cod_info->preflag = 0;
  835. max_rangep = max_range_long;
  836. }
  837. else if (maxover0p == 0) {
  838. cod_info->scalefac_scale = 0;
  839. cod_info->preflag = 1;
  840. }
  841. else if (maxover1 == 0) {
  842. cod_info->scalefac_scale = 1;
  843. cod_info->preflag = 0;
  844. max_rangep = max_range_long;
  845. }
  846. else if (maxover1p == 0) {
  847. cod_info->scalefac_scale = 1;
  848. cod_info->preflag = 1;
  849. }
  850. else {
  851. assert(0); /* this should not happen */
  852. }
  853. cod_info->global_gain = vbrmax;
  854. if (cod_info->global_gain < 0) {
  855. cod_info->global_gain = 0;
  856. }
  857. else if (cod_info->global_gain > 255) {
  858. cod_info->global_gain = 255;
  859. }
  860. {
  861. int sf_temp[SFBMAX];
  862. for (sfb = 0; sfb < SFBMAX; ++sfb) {
  863. sf_temp[sfb] = vbrsf[sfb] - vbrmax;
  864. }
  865. set_scalefacs(cod_info, vbrsfmin, sf_temp, max_rangep);
  866. }
  867. assert(checkScalefactor(cod_info, vbrsfmin));
  868. }
  869. static void
  870. bitcount(const algo_t * that)
  871. {
  872. int rc = scale_bitcount(that->gfc, that->cod_info);
  873. if (rc == 0) {
  874. return;
  875. }
  876. /* this should not happen due to the way the scalefactors are selected */
  877. ERRORF(that->gfc, "INTERNAL ERROR IN VBR NEW CODE (986), please send bug report\n");
  878. exit(-1);
  879. }
  880. static int
  881. quantizeAndCountBits(const algo_t * that)
  882. {
  883. quantize_x34(that);
  884. that->cod_info->part2_3_length = noquant_count_bits(that->gfc, that->cod_info, 0);
  885. return that->cod_info->part2_3_length;
  886. }
  887. static int
  888. tryGlobalStepsize(const algo_t * that, const int sfwork[SFBMAX],
  889. const int vbrsfmin[SFBMAX], int delta)
  890. {
  891. FLOAT const xrpow_max = that->cod_info->xrpow_max;
  892. int sftemp[SFBMAX], i, nbits;
  893. int gain, vbrmax = 0;
  894. for (i = 0; i < SFBMAX; ++i) {
  895. gain = sfwork[i] + delta;
  896. if (gain < vbrsfmin[i]) {
  897. gain = vbrsfmin[i];
  898. }
  899. if (gain > 255) {
  900. gain = 255;
  901. }
  902. if (vbrmax < gain) {
  903. vbrmax = gain;
  904. }
  905. sftemp[i] = gain;
  906. }
  907. that->alloc(that, sftemp, vbrsfmin, vbrmax);
  908. bitcount(that);
  909. nbits = quantizeAndCountBits(that);
  910. that->cod_info->xrpow_max = xrpow_max;
  911. return nbits;
  912. }
  913. static void
  914. searchGlobalStepsizeMax(const algo_t * that, const int sfwork[SFBMAX],
  915. const int vbrsfmin[SFBMAX], int target)
  916. {
  917. gr_info const *const cod_info = that->cod_info;
  918. const int gain = cod_info->global_gain;
  919. int curr = gain;
  920. int gain_ok = 1024;
  921. int nbits = LARGE_BITS;
  922. int l = gain, r = 512;
  923. assert(gain >= 0);
  924. while (l <= r) {
  925. curr = (l + r) >> 1;
  926. nbits = tryGlobalStepsize(that, sfwork, vbrsfmin, curr - gain);
  927. if (nbits == 0 || (nbits + cod_info->part2_length) < target) {
  928. r = curr - 1;
  929. gain_ok = curr;
  930. }
  931. else {
  932. l = curr + 1;
  933. if (gain_ok == 1024) {
  934. gain_ok = curr;
  935. }
  936. }
  937. }
  938. if (gain_ok != curr) {
  939. curr = gain_ok;
  940. nbits = tryGlobalStepsize(that, sfwork, vbrsfmin, curr - gain);
  941. }
  942. }
  943. static int
  944. sfDepth(const int sfwork[SFBMAX])
  945. {
  946. int m = 0;
  947. unsigned int i, j;
  948. for (j = SFBMAX, i = 0; j > 0; --j, ++i) {
  949. int const di = 255 - sfwork[i];
  950. if (m < di) {
  951. m = di;
  952. }
  953. assert(sfwork[i] >= 0);
  954. assert(sfwork[i] <= 255);
  955. }
  956. assert(m >= 0);
  957. assert(m <= 255);
  958. return m;
  959. }
  960. static void
  961. cutDistribution(const int sfwork[SFBMAX], int sf_out[SFBMAX], int cut)
  962. {
  963. unsigned int i, j;
  964. for (j = SFBMAX, i = 0; j > 0; --j, ++i) {
  965. int const x = sfwork[i];
  966. sf_out[i] = x < cut ? x : cut;
  967. }
  968. }
  969. static int
  970. flattenDistribution(const int sfwork[SFBMAX], int sf_out[SFBMAX], int dm, int k, int p)
  971. {
  972. unsigned int i, j;
  973. int x, sfmax = 0;
  974. if (dm > 0) {
  975. for (j = SFBMAX, i = 0; j > 0; --j, ++i) {
  976. int const di = p - sfwork[i];
  977. x = sfwork[i] + (k * di) / dm;
  978. if (x < 0) {
  979. x = 0;
  980. }
  981. else {
  982. if (x > 255) {
  983. x = 255;
  984. }
  985. }
  986. sf_out[i] = x;
  987. if (sfmax < x) {
  988. sfmax = x;
  989. }
  990. }
  991. }
  992. else {
  993. for (j = SFBMAX, i = 0; j > 0u; --j, ++i) {
  994. x = sfwork[i];
  995. sf_out[i] = x;
  996. if (sfmax < x) {
  997. sfmax = x;
  998. }
  999. }
  1000. }
  1001. return sfmax;
  1002. }
  1003. static int
  1004. tryThatOne(algo_t const* that, const int sftemp[SFBMAX], const int vbrsfmin[SFBMAX], int vbrmax)
  1005. {
  1006. FLOAT const xrpow_max = that->cod_info->xrpow_max;
  1007. int nbits = LARGE_BITS;
  1008. that->alloc(that, sftemp, vbrsfmin, vbrmax);
  1009. bitcount(that);
  1010. nbits = quantizeAndCountBits(that);
  1011. nbits += that->cod_info->part2_length;
  1012. that->cod_info->xrpow_max = xrpow_max;
  1013. return nbits;
  1014. }
  1015. static void
  1016. outOfBitsStrategy(algo_t const* that, const int sfwork[SFBMAX], const int vbrsfmin[SFBMAX], int target)
  1017. {
  1018. int wrk[SFBMAX];
  1019. int const dm = sfDepth(sfwork);
  1020. int const p = that->cod_info->global_gain;
  1021. int nbits;
  1022. /* PART 1 */
  1023. {
  1024. int bi = dm / 2;
  1025. int bi_ok = -1;
  1026. int bu = 0;
  1027. int bo = dm;
  1028. for (;;) {
  1029. int const sfmax = flattenDistribution(sfwork, wrk, dm, bi, p);
  1030. nbits = tryThatOne(that, wrk, vbrsfmin, sfmax);
  1031. if (nbits <= target) {
  1032. bi_ok = bi;
  1033. bo = bi - 1;
  1034. }
  1035. else {
  1036. bu = bi + 1;
  1037. }
  1038. if (bu <= bo) {
  1039. bi = (bu + bo) / 2;
  1040. }
  1041. else {
  1042. break;
  1043. }
  1044. }
  1045. if (bi_ok >= 0) {
  1046. if (bi != bi_ok) {
  1047. int const sfmax = flattenDistribution(sfwork, wrk, dm, bi_ok, p);
  1048. nbits = tryThatOne(that, wrk, vbrsfmin, sfmax);
  1049. }
  1050. return;
  1051. }
  1052. }
  1053. /* PART 2: */
  1054. {
  1055. int bi = (255 + p) / 2;
  1056. int bi_ok = -1;
  1057. int bu = p;
  1058. int bo = 255;
  1059. for (;;) {
  1060. int const sfmax = flattenDistribution(sfwork, wrk, dm, dm, bi);
  1061. nbits = tryThatOne(that, wrk, vbrsfmin, sfmax);
  1062. if (nbits <= target) {
  1063. bi_ok = bi;
  1064. bo = bi - 1;
  1065. }
  1066. else {
  1067. bu = bi + 1;
  1068. }
  1069. if (bu <= bo) {
  1070. bi = (bu + bo) / 2;
  1071. }
  1072. else {
  1073. break;
  1074. }
  1075. }
  1076. if (bi_ok >= 0) {
  1077. if (bi != bi_ok) {
  1078. int const sfmax = flattenDistribution(sfwork, wrk, dm, dm, bi_ok);
  1079. nbits = tryThatOne(that, wrk, vbrsfmin, sfmax);
  1080. }
  1081. return;
  1082. }
  1083. }
  1084. /* fall back to old code, likely to be never called */
  1085. searchGlobalStepsizeMax(that, wrk, vbrsfmin, target);
  1086. }
  1087. static int
  1088. reduce_bit_usage(lame_internal_flags * gfc, int gr, int ch
  1089. #if 0
  1090. , const FLOAT xr34orig[576], const FLOAT l3_xmin[SFBMAX], int maxbits
  1091. #endif
  1092. )
  1093. {
  1094. SessionConfig_t const *const cfg = &gfc->cfg;
  1095. gr_info *const cod_info = &gfc->l3_side.tt[gr][ch];
  1096. /* try some better scalefac storage
  1097. */
  1098. best_scalefac_store(gfc, gr, ch, &gfc->l3_side);
  1099. /* best huffman_divide may save some bits too
  1100. */
  1101. if (cfg->use_best_huffman == 1)
  1102. best_huffman_divide(gfc, cod_info);
  1103. return cod_info->part2_3_length + cod_info->part2_length;
  1104. }
  1105. int
  1106. VBR_encode_frame(lame_internal_flags * gfc, const FLOAT xr34orig[2][2][576],
  1107. const FLOAT l3_xmin[2][2][SFBMAX], const int max_bits[2][2])
  1108. {
  1109. SessionConfig_t const *const cfg = &gfc->cfg;
  1110. int sfwork_[2][2][SFBMAX];
  1111. int vbrsfmin_[2][2][SFBMAX];
  1112. algo_t that_[2][2];
  1113. int const ngr = cfg->mode_gr;
  1114. int const nch = cfg->channels_out;
  1115. int max_nbits_ch[2][2] = {{0, 0}, {0 ,0}};
  1116. int max_nbits_gr[2] = {0, 0};
  1117. int max_nbits_fr = 0;
  1118. int use_nbits_ch[2][2] = {{MAX_BITS_PER_CHANNEL+1, MAX_BITS_PER_CHANNEL+1}
  1119. ,{MAX_BITS_PER_CHANNEL+1, MAX_BITS_PER_CHANNEL+1}};
  1120. int use_nbits_gr[2] = { MAX_BITS_PER_GRANULE+1, MAX_BITS_PER_GRANULE+1 };
  1121. int use_nbits_fr = MAX_BITS_PER_GRANULE+MAX_BITS_PER_GRANULE;
  1122. int gr, ch;
  1123. int ok, sum_fr;
  1124. /* set up some encoding parameters
  1125. */
  1126. for (gr = 0; gr < ngr; ++gr) {
  1127. max_nbits_gr[gr] = 0;
  1128. for (ch = 0; ch < nch; ++ch) {
  1129. max_nbits_ch[gr][ch] = max_bits[gr][ch];
  1130. use_nbits_ch[gr][ch] = 0;
  1131. max_nbits_gr[gr] += max_bits[gr][ch];
  1132. max_nbits_fr += max_bits[gr][ch];
  1133. that_[gr][ch].find = (cfg->full_outer_loop < 0) ? guess_scalefac_x34 : find_scalefac_x34;
  1134. that_[gr][ch].gfc = gfc;
  1135. that_[gr][ch].cod_info = &gfc->l3_side.tt[gr][ch];
  1136. that_[gr][ch].xr34orig = xr34orig[gr][ch];
  1137. if (that_[gr][ch].cod_info->block_type == SHORT_TYPE) {
  1138. that_[gr][ch].alloc = short_block_constrain;
  1139. }
  1140. else {
  1141. that_[gr][ch].alloc = long_block_constrain;
  1142. }
  1143. } /* for ch */
  1144. }
  1145. /* searches scalefactors
  1146. */
  1147. for (gr = 0; gr < ngr; ++gr) {
  1148. for (ch = 0; ch < nch; ++ch) {
  1149. if (max_bits[gr][ch] > 0) {
  1150. algo_t *that = &that_[gr][ch];
  1151. int *sfwork = sfwork_[gr][ch];
  1152. int *vbrsfmin = vbrsfmin_[gr][ch];
  1153. int vbrmax;
  1154. vbrmax = block_sf(that, l3_xmin[gr][ch], sfwork, vbrsfmin);
  1155. that->alloc(that, sfwork, vbrsfmin, vbrmax);
  1156. bitcount(that);
  1157. }
  1158. else {
  1159. /* xr contains no energy
  1160. * l3_enc, our encoding data, will be quantized to zero
  1161. * continue with next channel
  1162. */
  1163. }
  1164. } /* for ch */
  1165. }
  1166. /* encode 'as is'
  1167. */
  1168. use_nbits_fr = 0;
  1169. for (gr = 0; gr < ngr; ++gr) {
  1170. use_nbits_gr[gr] = 0;
  1171. for (ch = 0; ch < nch; ++ch) {
  1172. algo_t const *that = &that_[gr][ch];
  1173. if (max_bits[gr][ch] > 0) {
  1174. memset(&that->cod_info->l3_enc[0], 0, sizeof(that->cod_info->l3_enc));
  1175. (void) quantizeAndCountBits(that);
  1176. }
  1177. else {
  1178. /* xr contains no energy
  1179. * l3_enc, our encoding data, will be quantized to zero
  1180. * continue with next channel
  1181. */
  1182. }
  1183. use_nbits_ch[gr][ch] = reduce_bit_usage(gfc, gr, ch);
  1184. use_nbits_gr[gr] += use_nbits_ch[gr][ch];
  1185. } /* for ch */
  1186. use_nbits_fr += use_nbits_gr[gr];
  1187. }
  1188. /* check bit constrains
  1189. */
  1190. if (use_nbits_fr <= max_nbits_fr) {
  1191. ok = 1;
  1192. for (gr = 0; gr < ngr; ++gr) {
  1193. if (use_nbits_gr[gr] > MAX_BITS_PER_GRANULE) {
  1194. /* violates the rule that every granule has to use no more
  1195. * bits than MAX_BITS_PER_GRANULE
  1196. */
  1197. ok = 0;
  1198. }
  1199. for (ch = 0; ch < nch; ++ch) {
  1200. if (use_nbits_ch[gr][ch] > MAX_BITS_PER_CHANNEL) {
  1201. /* violates the rule that every gr_ch has to use no more
  1202. * bits than MAX_BITS_PER_CHANNEL
  1203. *
  1204. * This isn't explicitly stated in the ISO docs, but the
  1205. * part2_3_length field has only 12 bits, that makes it
  1206. * up to a maximum size of 4095 bits!!!
  1207. */
  1208. ok = 0;
  1209. }
  1210. }
  1211. }
  1212. if (ok) {
  1213. return use_nbits_fr;
  1214. }
  1215. }
  1216. /* OK, we are in trouble and have to define how many bits are
  1217. * to be used for each granule
  1218. */
  1219. {
  1220. ok = 1;
  1221. sum_fr = 0;
  1222. for (gr = 0; gr < ngr; ++gr) {
  1223. max_nbits_gr[gr] = 0;
  1224. for (ch = 0; ch < nch; ++ch) {
  1225. if (use_nbits_ch[gr][ch] > MAX_BITS_PER_CHANNEL) {
  1226. max_nbits_ch[gr][ch] = MAX_BITS_PER_CHANNEL;
  1227. }
  1228. else {
  1229. max_nbits_ch[gr][ch] = use_nbits_ch[gr][ch];
  1230. }
  1231. max_nbits_gr[gr] += max_nbits_ch[gr][ch];
  1232. }
  1233. if (max_nbits_gr[gr] > MAX_BITS_PER_GRANULE) {
  1234. float f[2] = {0.0f, 0.0f}, s = 0.0f;
  1235. for (ch = 0; ch < nch; ++ch) {
  1236. if (max_nbits_ch[gr][ch] > 0) {
  1237. f[ch] = sqrt(sqrt(max_nbits_ch[gr][ch]));
  1238. s += f[ch];
  1239. }
  1240. else {
  1241. f[ch] = 0;
  1242. }
  1243. }
  1244. for (ch = 0; ch < nch; ++ch) {
  1245. if (s > 0) {
  1246. max_nbits_ch[gr][ch] = MAX_BITS_PER_GRANULE * f[ch] / s;
  1247. }
  1248. else {
  1249. max_nbits_ch[gr][ch] = 0;
  1250. }
  1251. }
  1252. if (nch > 1) {
  1253. if (max_nbits_ch[gr][0] > use_nbits_ch[gr][0] + 32) {
  1254. max_nbits_ch[gr][1] += max_nbits_ch[gr][0];
  1255. max_nbits_ch[gr][1] -= use_nbits_ch[gr][0] + 32;
  1256. max_nbits_ch[gr][0] = use_nbits_ch[gr][0] + 32;
  1257. }
  1258. if (max_nbits_ch[gr][1] > use_nbits_ch[gr][1] + 32) {
  1259. max_nbits_ch[gr][0] += max_nbits_ch[gr][1];
  1260. max_nbits_ch[gr][0] -= use_nbits_ch[gr][1] + 32;
  1261. max_nbits_ch[gr][1] = use_nbits_ch[gr][1] + 32;
  1262. }
  1263. if (max_nbits_ch[gr][0] > MAX_BITS_PER_CHANNEL) {
  1264. max_nbits_ch[gr][0] = MAX_BITS_PER_CHANNEL;
  1265. }
  1266. if (max_nbits_ch[gr][1] > MAX_BITS_PER_CHANNEL) {
  1267. max_nbits_ch[gr][1] = MAX_BITS_PER_CHANNEL;
  1268. }
  1269. }
  1270. max_nbits_gr[gr] = 0;
  1271. for (ch = 0; ch < nch; ++ch) {
  1272. max_nbits_gr[gr] += max_nbits_ch[gr][ch];
  1273. }
  1274. }
  1275. sum_fr += max_nbits_gr[gr];
  1276. }
  1277. if (sum_fr > max_nbits_fr) {
  1278. {
  1279. float f[2] = {0.0f, 0.0f}, s = 0.0f;
  1280. for (gr = 0; gr < ngr; ++gr) {
  1281. if (max_nbits_gr[gr] > 0) {
  1282. f[gr] = sqrt(max_nbits_gr[gr]);
  1283. s += f[gr];
  1284. }
  1285. else {
  1286. f[gr] = 0;
  1287. }
  1288. }
  1289. for (gr = 0; gr < ngr; ++gr) {
  1290. if (s > 0) {
  1291. max_nbits_gr[gr] = max_nbits_fr * f[gr] / s;
  1292. }
  1293. else {
  1294. max_nbits_gr[gr] = 0;
  1295. }
  1296. }
  1297. }
  1298. if (ngr > 1) {
  1299. if (max_nbits_gr[0] > use_nbits_gr[0] + 125) {
  1300. max_nbits_gr[1] += max_nbits_gr[0];
  1301. max_nbits_gr[1] -= use_nbits_gr[0] + 125;
  1302. max_nbits_gr[0] = use_nbits_gr[0] + 125;
  1303. }
  1304. if (max_nbits_gr[1] > use_nbits_gr[1] + 125) {
  1305. max_nbits_gr[0] += max_nbits_gr[1];
  1306. max_nbits_gr[0] -= use_nbits_gr[1] + 125;
  1307. max_nbits_gr[1] = use_nbits_gr[1] + 125;
  1308. }
  1309. for (gr = 0; gr < ngr; ++gr) {
  1310. if (max_nbits_gr[gr] > MAX_BITS_PER_GRANULE) {
  1311. max_nbits_gr[gr] = MAX_BITS_PER_GRANULE;
  1312. }
  1313. }
  1314. }
  1315. for (gr = 0; gr < ngr; ++gr) {
  1316. float f[2] = {0.0f, 0.0f}, s = 0.0f;
  1317. for (ch = 0; ch < nch; ++ch) {
  1318. if (max_nbits_ch[gr][ch] > 0) {
  1319. f[ch] = sqrt(max_nbits_ch[gr][ch]);
  1320. s += f[ch];
  1321. }
  1322. else {
  1323. f[ch] = 0;
  1324. }
  1325. }
  1326. for (ch = 0; ch < nch; ++ch) {
  1327. if (s > 0) {
  1328. max_nbits_ch[gr][ch] = max_nbits_gr[gr] * f[ch] / s;
  1329. }
  1330. else {
  1331. max_nbits_ch[gr][ch] = 0;
  1332. }
  1333. }
  1334. if (nch > 1) {
  1335. if (max_nbits_ch[gr][0] > use_nbits_ch[gr][0] + 32) {
  1336. max_nbits_ch[gr][1] += max_nbits_ch[gr][0];
  1337. max_nbits_ch[gr][1] -= use_nbits_ch[gr][0] + 32;
  1338. max_nbits_ch[gr][0] = use_nbits_ch[gr][0] + 32;
  1339. }
  1340. if (max_nbits_ch[gr][1] > use_nbits_ch[gr][1] + 32) {
  1341. max_nbits_ch[gr][0] += max_nbits_ch[gr][1];
  1342. max_nbits_ch[gr][0] -= use_nbits_ch[gr][1] + 32;
  1343. max_nbits_ch[gr][1] = use_nbits_ch[gr][1] + 32;
  1344. }
  1345. for (ch = 0; ch < nch; ++ch) {
  1346. if (max_nbits_ch[gr][ch] > MAX_BITS_PER_CHANNEL) {
  1347. max_nbits_ch[gr][ch] = MAX_BITS_PER_CHANNEL;
  1348. }
  1349. }
  1350. }
  1351. }
  1352. }
  1353. /* sanity check */
  1354. sum_fr = 0;
  1355. for (gr = 0; gr < ngr; ++gr) {
  1356. int sum_gr = 0;
  1357. for (ch = 0; ch < nch; ++ch) {
  1358. sum_gr += max_nbits_ch[gr][ch];
  1359. if (max_nbits_ch[gr][ch] > MAX_BITS_PER_CHANNEL) {
  1360. ok = 0;
  1361. }
  1362. }
  1363. sum_fr += sum_gr;
  1364. if (sum_gr > MAX_BITS_PER_GRANULE) {
  1365. ok = 0;
  1366. }
  1367. }
  1368. if (sum_fr > max_nbits_fr) {
  1369. ok = 0;
  1370. }
  1371. if (!ok) {
  1372. /* we must have done something wrong, fallback to 'on_pe' based constrain */
  1373. for (gr = 0; gr < ngr; ++gr) {
  1374. for (ch = 0; ch < nch; ++ch) {
  1375. max_nbits_ch[gr][ch] = max_bits[gr][ch];
  1376. }
  1377. }
  1378. }
  1379. }
  1380. /* we already called the 'best_scalefac_store' function, so we need to reset some
  1381. * variables before we can do it again.
  1382. */
  1383. for (ch = 0; ch < nch; ++ch) {
  1384. gfc->l3_side.scfsi[ch][0] = 0;
  1385. gfc->l3_side.scfsi[ch][1] = 0;
  1386. gfc->l3_side.scfsi[ch][2] = 0;
  1387. gfc->l3_side.scfsi[ch][3] = 0;
  1388. }
  1389. for (gr = 0; gr < ngr; ++gr) {
  1390. for (ch = 0; ch < nch; ++ch) {
  1391. gfc->l3_side.tt[gr][ch].scalefac_compress = 0;
  1392. }
  1393. }
  1394. /* alter our encoded data, until it fits into the target bitrate
  1395. */
  1396. use_nbits_fr = 0;
  1397. for (gr = 0; gr < ngr; ++gr) {
  1398. use_nbits_gr[gr] = 0;
  1399. for (ch = 0; ch < nch; ++ch) {
  1400. algo_t const *that = &that_[gr][ch];
  1401. use_nbits_ch[gr][ch] = 0;
  1402. if (max_bits[gr][ch] > 0) {
  1403. int *sfwork = sfwork_[gr][ch];
  1404. int const *vbrsfmin = vbrsfmin_[gr][ch];
  1405. cutDistribution(sfwork, sfwork, that->cod_info->global_gain);
  1406. outOfBitsStrategy(that, sfwork, vbrsfmin, max_nbits_ch[gr][ch]);
  1407. }
  1408. use_nbits_ch[gr][ch] = reduce_bit_usage(gfc, gr, ch);
  1409. assert(use_nbits_ch[gr][ch] <= max_nbits_ch[gr][ch]);
  1410. use_nbits_gr[gr] += use_nbits_ch[gr][ch];
  1411. } /* for ch */
  1412. use_nbits_fr += use_nbits_gr[gr];
  1413. }
  1414. /* check bit constrains, but it should always be ok, iff there are no bugs ;-)
  1415. */
  1416. if (use_nbits_fr <= max_nbits_fr) {
  1417. return use_nbits_fr;
  1418. }
  1419. ERRORF(gfc, "INTERNAL ERROR IN VBR NEW CODE (1313), please send bug report\n"
  1420. "maxbits=%d usedbits=%d\n", max_nbits_fr, use_nbits_fr);
  1421. exit(-1);
  1422. }