lib.scm 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. ;;;; benchmark-suite/lib.scm --- generic support for benchmarking
  2. ;;;; Copyright (C) 2002, 2006 Free Software Foundation, Inc.
  3. ;;;;
  4. ;;;; This program is free software; you can redistribute it and/or modify
  5. ;;;; it under the terms of the GNU General Public License as published by
  6. ;;;; the Free Software Foundation; either version 2, or (at your option)
  7. ;;;; any later version.
  8. ;;;;
  9. ;;;; This program is distributed in the hope that it will be useful,
  10. ;;;; but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. ;;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. ;;;; GNU General Public License for more details.
  13. ;;;;
  14. ;;;; You should have received a copy of the GNU General Public License
  15. ;;;; along with this software; see the file COPYING. If not, write to
  16. ;;;; the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  17. ;;;; Boston, MA 02110-1301 USA
  18. (define-module (benchmark-suite lib)
  19. :export (
  20. ;; Controlling the execution.
  21. iteration-factor
  22. scale-iterations
  23. ;; Running benchmarks.
  24. run-benchmark
  25. benchmark
  26. ;; Naming groups of benchmarks in a regular fashion.
  27. with-benchmark-prefix with-benchmark-prefix* current-benchmark-prefix
  28. format-benchmark-name
  29. ;; Computing timing results
  30. benchmark-time-base
  31. benchmark-total-time benchmark-user-time benchmark-system-time
  32. benchmark-frame-time benchmark-core-time
  33. benchmark-user-time\interpreter benchmark-core-time\interpreter
  34. ;; Reporting results in various ways.
  35. register-reporter unregister-reporter reporter-registered?
  36. make-log-reporter
  37. full-reporter
  38. user-reporter))
  39. ;;;; If you're using Emacs's Scheme mode:
  40. ;;;; (put 'with-benchmark-prefix 'scheme-indent-function 1)
  41. ;;;; (put 'benchmark 'scheme-indent-function 1)
  42. ;;;; CORE FUNCTIONS
  43. ;;;;
  44. ;;;; The function (run-benchmark name iterations thunk) is the heart of the
  45. ;;;; benchmarking environment. The first parameter NAME is a unique name for
  46. ;;;; the benchmark to be executed (for an explanation of this parameter see
  47. ;;;; below under ;;;; NAMES. The second parameter ITERATIONS is a positive
  48. ;;;; integer value that indicates how often the thunk shall be executed (for
  49. ;;;; an explanation of how iteration counts should be used, see below under
  50. ;;;; ;;;; ITERATION COUNTS). For example:
  51. ;;;;
  52. ;;;; (run-benchmark "small integer addition" 100000 (lambda () (+ 1 1)))
  53. ;;;;
  54. ;;;; This will run the function (lambda () (+ 1 1)) a 100000 times (the
  55. ;;;; iteration count can, however be scaled. See below for details). Some
  56. ;;;; different time data for running the thunk for the given number of
  57. ;;;; iterations is measured and reported.
  58. ;;;;
  59. ;;;; Convenience macro
  60. ;;;;
  61. ;;;; * (benchmark name iterations body) is a short form for
  62. ;;;; (run-benchmark name iterations (lambda () body))
  63. ;;;; NAMES
  64. ;;;;
  65. ;;;; Every benchmark in the benchmark suite has a unique name to be able to
  66. ;;;; compare the results of individual benchmarks across several runs of the
  67. ;;;; benchmark suite.
  68. ;;;;
  69. ;;;; A benchmark name is a list of printable objects. For example:
  70. ;;;; ("ports.scm" "file" "read and write back list of strings")
  71. ;;;; ("ports.scm" "pipe" "read")
  72. ;;;;
  73. ;;;; Benchmark names may contain arbitrary objects, but they always have
  74. ;;;; the following properties:
  75. ;;;; - Benchmark names can be compared with EQUAL?.
  76. ;;;; - Benchmark names can be reliably stored and retrieved with the standard
  77. ;;;; WRITE and READ procedures; doing so preserves their identity.
  78. ;;;;
  79. ;;;; For example:
  80. ;;;;
  81. ;;;; (benchmark "simple addition" 100000 (+ 2 2))
  82. ;;;;
  83. ;;;; In that case, the benchmark name is the list ("simple addition").
  84. ;;;;
  85. ;;;; The WITH-BENCHMARK-PREFIX syntax and WITH-BENCHMARK-PREFIX* procedure
  86. ;;;; establish a prefix for the names of all benchmarks whose results are
  87. ;;;; reported within their dynamic scope. For example:
  88. ;;;;
  89. ;;;; (begin
  90. ;;;; (with-benchmark-prefix "basic arithmetic"
  91. ;;;; (benchmark "addition" 100000 (+ 2 2))
  92. ;;;; (benchmark "subtraction" 100000 (- 4 2)))
  93. ;;;; (benchmark "multiplication" 100000 (* 2 2))))
  94. ;;;;
  95. ;;;; In that example, the three benchmark names are:
  96. ;;;; ("basic arithmetic" "addition"),
  97. ;;;; ("basic arithmetic" "subtraction"), and
  98. ;;;; ("multiplication").
  99. ;;;;
  100. ;;;; WITH-BENCHMARK-PREFIX can be nested. Each WITH-BENCHMARK-PREFIX
  101. ;;;; postpends a new element to the current prefix:
  102. ;;;;
  103. ;;;; (with-benchmark-prefix "arithmetic"
  104. ;;;; (with-benchmark-prefix "addition"
  105. ;;;; (benchmark "integer" 100000 (+ 2 2))
  106. ;;;; (benchmark "complex" 100000 (+ 2+3i 4+5i)))
  107. ;;;; (with-benchmark-prefix "subtraction"
  108. ;;;; (benchmark "integer" 100000 (- 2 2))
  109. ;;;; (benchmark "complex" 100000 (- 2+3i 1+2i))))
  110. ;;;;
  111. ;;;; The four benchmark names here are:
  112. ;;;; ("arithmetic" "addition" "integer")
  113. ;;;; ("arithmetic" "addition" "complex")
  114. ;;;; ("arithmetic" "subtraction" "integer")
  115. ;;;; ("arithmetic" "subtraction" "complex")
  116. ;;;;
  117. ;;;; To print a name for a human reader, we DISPLAY its elements,
  118. ;;;; separated by ": ". So, the last set of benchmark names would be
  119. ;;;; reported as:
  120. ;;;;
  121. ;;;; arithmetic: addition: integer
  122. ;;;; arithmetic: addition: complex
  123. ;;;; arithmetic: subtraction: integer
  124. ;;;; arithmetic: subtraction: complex
  125. ;;;;
  126. ;;;; The Guile benchmarks use with-benchmark-prefix to include the name of
  127. ;;;; the source file containing the benchmark in the benchmark name, to
  128. ;;;; provide each file with its own namespace.
  129. ;;;; ITERATION COUNTS
  130. ;;;;
  131. ;;;; Every benchmark has to be given an iteration count that indicates how
  132. ;;;; often it should be executed. The reason is, that in most cases a single
  133. ;;;; execution of the benchmark code would not deliver usable timing results:
  134. ;;;; The resolution of the system time is not arbitrarily fine. Thus, some
  135. ;;;; benchmarks would be executed too quickly to be measured at all. A rule
  136. ;;;; of thumb is, that the longer a benchmark runs, the more exact is the
  137. ;;;; information about the execution time.
  138. ;;;;
  139. ;;;; However, execution time depends on several influences: First, the
  140. ;;;; machine you are running the benchmark on. Second, the compiler you use.
  141. ;;;; Third, which compiler options you use. Fourth, which version of guile
  142. ;;;; you are using. Fifth, which guile options you are using (for example if
  143. ;;;; you are using the debugging evaluator or not). There are even more
  144. ;;;; influences.
  145. ;;;;
  146. ;;;; For this reason, the same number of iterations for a single benchmark may
  147. ;;;; lead to completely different execution times in different
  148. ;;;; constellations. For someone working on a slow machine, the default
  149. ;;;; execution counts may lead to an inacceptable execution time of the
  150. ;;;; benchmark suite. For someone on a very fast machine, however, it may be
  151. ;;;; desireable to increase the number of iterations in order to increase the
  152. ;;;; accuracy of the time data.
  153. ;;;;
  154. ;;;; For this reason, the benchmark suite allows to scale the number of
  155. ;;;; executions by a global factor, stored in the exported variable
  156. ;;;; iteration-factor. The default for iteration-factor is 1. A number of 2
  157. ;;;; means, that all benchmarks are executed twice as often, which will also
  158. ;;;; roughly double the execution time for the benchmark suite. Similarly, if
  159. ;;;; iteration-factor holds a value of 0.5, only about half the execution time
  160. ;;;; will be required.
  161. ;;;;
  162. ;;;; It is probably a good idea to choose the iteration count for each
  163. ;;;; benchmark such that all benchmarks will take about the same time, for
  164. ;;;; example one second. To achieve this, the benchmark suite holds an empty
  165. ;;;; benchmark in the file 0-reference.bm named "reference benchmark for
  166. ;;;; iteration counts". It's iteration count is calibrated to make the
  167. ;;;; benchmark run about one second on Dirk's laptop :-) If you are adding
  168. ;;;; benchmarks to the suite, it would be nice if you could calibrate the
  169. ;;;; number of iterations such that each of your added benchmarks takes about
  170. ;;;; as long to run as the reference benchmark. But: Don't be too accurate
  171. ;;;; to figure out the correct iteration count.
  172. ;;;; REPORTERS
  173. ;;;;
  174. ;;;; A reporter is a function which we apply to each benchmark outcome.
  175. ;;;; Reporters can log results, print interesting results to the standard
  176. ;;;; output, collect statistics, etc.
  177. ;;;;
  178. ;;;; A reporter function takes the following arguments: NAME ITERATIONS
  179. ;;;; BEFORE AFTER GC-TIME. The argument NAME holds the name of the benchmark,
  180. ;;;; ITERATIONS holds the actual number of iterations that were performed.
  181. ;;;; BEFORE holds the result of the function (times) at the very beginning of
  182. ;;;; the excution of the benchmark, AFTER holds the result of the function
  183. ;;;; (times) after the execution of the benchmark. GC-TIME, finally, holds
  184. ;;;; the difference of calls to (gc-run-time) before and after the execution
  185. ;;;; of the benchmark.
  186. ;;;;
  187. ;;;; This library provides some standard reporters for logging results
  188. ;;;; to a file, reporting interesting results to the user, (FIXME: and
  189. ;;;; collecting totals).
  190. ;;;;
  191. ;;;; You can use the REGISTER-REPORTER function and friends to add whatever
  192. ;;;; reporting functions you like. See under ;;;; TIMING DATA to see how the
  193. ;;;; library helps you to extract relevant timing information from the values
  194. ;;;; ITERATIONS, BEFORE, AFTER and GC-TIME. If you don't register any
  195. ;;;; reporters, the library uses USER-REPORTER, which writes the most
  196. ;;;; interesting results to the standard output.
  197. ;;;; TIME CALCULATION
  198. ;;;;
  199. ;;;; The library uses the guile functions (times) and (gc-run-time) to
  200. ;;;; determine the execution time for a single benchmark. Based on these
  201. ;;;; functions, the values of BEFORE, AFTER and GC-TIME are computed, which
  202. ;;;; are then passed to the reporter functions. All three values BEFORE,
  203. ;;;; AFTER and GC-TIME include the time needed to executed the benchmark code
  204. ;;;; itself, but also the surrounding code that implements the loop to run the
  205. ;;;; benchmark code for the given number of times. This is undesirable, since
  206. ;;;; one would prefer to only get the timing data for the benchmarking code.
  207. ;;;;
  208. ;;;; To cope with this, the benchmarking framework uses a trick: During
  209. ;;;; initialization of the library, the time for executing an empty benchmark
  210. ;;;; is measured and stored. This is an estimate for the time needed by the
  211. ;;;; benchmarking framework itself. For later benchmarks, this time can then
  212. ;;;; be subtracted from the measured execution times.
  213. ;;;;
  214. ;;;; In order to simplify the time calculation for users who want to write
  215. ;;;; their own reporters, benchmarking framework provides the following
  216. ;;;; definitions:
  217. ;;;;
  218. ;;;; benchmark-time-base : This variable holds the number of time units that
  219. ;;;; make up a second. By deviding the results of each of the functions
  220. ;;;; below by this value, you get the corresponding time in seconds. For
  221. ;;;; example (/ (benchmark-total-time before after) benchmark-time-base)
  222. ;;;; will give you the total time in seconds.
  223. ;;;; benchmark-total-time : this function takes two arguments BEFORE and AFTER
  224. ;;;; and computes the total time between the two timestamps. The result
  225. ;;;; of this function is what the time command of the unix command line
  226. ;;;; would report as real time.
  227. ;;;; benchmark-user-time : this function takes two arguments BEFORE and AFTER
  228. ;;;; and computes the time spent in the benchmarking process between the
  229. ;;;; two timestamps. That means, the time consumed by other processes
  230. ;;;; running on the same machine is not part of the resulting time,
  231. ;;;; neither is time spent within the operating system. The result of
  232. ;;;; this function is what the time command of the unix command line would
  233. ;;;; report as user time.
  234. ;;;; benchmark-system-time : similar to benchmark-user-time, but here the time
  235. ;;;; spent within the operating system is given. The result of this
  236. ;;;; function is what the time command of the unix command line would
  237. ;;;; report as system time.
  238. ;;;; benchmark-frame-time : this function takes the argument ITERATIONS. It
  239. ;;;; reports the part of the user time that is consumed by the
  240. ;;;; benchmarking framework itself to run some benchmark for the given
  241. ;;;; number of iterations. You can think of this as the time that would
  242. ;;;; still be consumed, even if the benchmarking code itself was empty.
  243. ;;;; This value does not include any time for garbage collection, even if
  244. ;;;; it is the benchmarking framework which is responsible for causing a
  245. ;;;; garbage collection.
  246. ;;;; benchmark-core-time : this function takes three arguments ITERATIONS,
  247. ;;;; BEFORE and AFTER. It reports the part of the user time that is
  248. ;;;; actually spent within the benchmarking code. That is, the time
  249. ;;;; needed for the benchmarking framework is subtracted from the user
  250. ;;;; time. This value, however, includes all garbage collection times,
  251. ;;;; even if some part of the gc-time had actually to be attributed to the
  252. ;;;; benchmarking framework.
  253. ;;;; benchmark-user-time\interpreter : this function takes three arguments
  254. ;;;; BEFORE AFTER and GC-TIME. It reports the part of the user time that
  255. ;;;; is spent in the interpreter (and not in garbage collection).
  256. ;;;; benchmark-core-time\interpreter : this function takes four arguments
  257. ;;;; ITERATIONS, BEFORE, AFTER. and GC-TIME. It reports the part of the
  258. ;;;; benchmark-core-time that is spent in the interpreter (and not in
  259. ;;;; garbage collection). This value is most probably the one you are
  260. ;;;; interested in, except if you are doing some garbage collection
  261. ;;;; checks.
  262. ;;;;
  263. ;;;; There is no function to calculate the garbage-collection time, since the
  264. ;;;; garbage collection time is already passed as an argument GC-TIME to the
  265. ;;;; reporter functions.
  266. ;;;; MISCELLANEOUS
  267. ;;;;
  268. ;;; Perform a division and convert the result to inexact.
  269. (define (i/ a b)
  270. (exact->inexact (/ a b)))
  271. ;;; Scale the number of iterations according to the given scaling factor.
  272. (define iteration-factor 1)
  273. (define (scale-iterations iterations)
  274. (let* ((i (inexact->exact (round (* iterations iteration-factor)))))
  275. (if (< i 1) 1 i)))
  276. ;;;; CORE FUNCTIONS
  277. ;;;;
  278. ;;; The central routine for executing benchmarks.
  279. ;;; The idea is taken from Greg, the GNUstep regression test environment.
  280. (define run-benchmark #f)
  281. (let ((benchmark-running #f))
  282. (define (local-run-benchmark name iterations thunk)
  283. (if benchmark-running
  284. (error "Nested calls to run-benchmark are not permitted.")
  285. (let ((benchmark-name (full-name name))
  286. (iterations (scale-iterations iterations)))
  287. (set! benchmark-running #t)
  288. (let ((before #f) (after #f) (gc-time #f))
  289. (gc)
  290. (set! gc-time (gc-run-time))
  291. (set! before (times))
  292. (do ((i 0 (+ i 1)))
  293. ((= i iterations))
  294. (thunk))
  295. (set! after (times))
  296. (set! gc-time (- (gc-run-time) gc-time))
  297. (report benchmark-name iterations before after gc-time))
  298. (set! benchmark-running #f))))
  299. (set! run-benchmark local-run-benchmark))
  300. ;;; A short form for benchmarks.
  301. (defmacro benchmark (name iterations body . rest)
  302. `(,run-benchmark ,name ,iterations (lambda () ,body ,@rest)))
  303. ;;;; BENCHMARK NAMES
  304. ;;;;
  305. ;;;; Turn a benchmark name into a nice human-readable string.
  306. (define (format-benchmark-name name)
  307. (call-with-output-string
  308. (lambda (port)
  309. (let loop ((name name)
  310. (separator ""))
  311. (if (pair? name)
  312. (begin
  313. (display separator port)
  314. (display (car name) port)
  315. (loop (cdr name) ": ")))))))
  316. ;;;; For a given benchmark-name, deliver the full name including all prefixes.
  317. (define (full-name name)
  318. (append (current-benchmark-prefix) (list name)))
  319. ;;; A fluid containing the current benchmark prefix, as a list.
  320. (define prefix-fluid (make-fluid))
  321. (fluid-set! prefix-fluid '())
  322. (define (current-benchmark-prefix)
  323. (fluid-ref prefix-fluid))
  324. ;;; Postpend PREFIX to the current name prefix while evaluting THUNK.
  325. ;;; The name prefix is only changed within the dynamic scope of the
  326. ;;; call to with-benchmark-prefix*. Return the value returned by THUNK.
  327. (define (with-benchmark-prefix* prefix thunk)
  328. (with-fluids ((prefix-fluid
  329. (append (fluid-ref prefix-fluid) (list prefix))))
  330. (thunk)))
  331. ;;; (with-benchmark-prefix PREFIX BODY ...)
  332. ;;; Postpend PREFIX to the current name prefix while evaluating BODY ...
  333. ;;; The name prefix is only changed within the dynamic scope of the
  334. ;;; with-benchmark-prefix expression. Return the value returned by the last
  335. ;;; BODY expression.
  336. (defmacro with-benchmark-prefix (prefix . body)
  337. `(with-benchmark-prefix* ,prefix (lambda () ,@body)))
  338. ;;;; TIME CALCULATION
  339. ;;;;
  340. (define benchmark-time-base
  341. internal-time-units-per-second)
  342. (define time-base ;; short-cut, not exported
  343. benchmark-time-base)
  344. (define frame-time/iteration
  345. "<will be set during initialization>")
  346. (define (benchmark-total-time before after)
  347. (- (tms:clock after) (tms:clock before)))
  348. (define (benchmark-user-time before after)
  349. (- (tms:utime after) (tms:utime before)))
  350. (define (benchmark-system-time before after)
  351. (- (tms:stime after) (tms:stime before)))
  352. (define (benchmark-frame-time iterations)
  353. (* iterations frame-time/iteration))
  354. (define (benchmark-core-time iterations before after)
  355. (- (benchmark-user-time before after) (benchmark-frame-time iterations)))
  356. (define (benchmark-user-time\interpreter before after gc-time)
  357. (- (benchmark-user-time before after) gc-time))
  358. (define (benchmark-core-time\interpreter iterations before after gc-time)
  359. (- (benchmark-core-time iterations before after) gc-time))
  360. ;;;; REPORTERS
  361. ;;;;
  362. ;;; The global list of reporters.
  363. (define reporters '())
  364. ;;; The default reporter, to be used only if no others exist.
  365. (define default-reporter #f)
  366. ;;; Add the procedure REPORTER to the current set of reporter functions.
  367. ;;; Signal an error if that reporter procedure object is already registered.
  368. (define (register-reporter reporter)
  369. (if (memq reporter reporters)
  370. (error "register-reporter: reporter already registered: " reporter))
  371. (set! reporters (cons reporter reporters)))
  372. ;;; Remove the procedure REPORTER from the current set of reporter
  373. ;;; functions. Signal an error if REPORTER is not currently registered.
  374. (define (unregister-reporter reporter)
  375. (if (memq reporter reporters)
  376. (set! reporters (delq! reporter reporters))
  377. (error "unregister-reporter: reporter not registered: " reporter)))
  378. ;;; Return true iff REPORTER is in the current set of reporter functions.
  379. (define (reporter-registered? reporter)
  380. (if (memq reporter reporters) #t #f))
  381. ;;; Send RESULT to all currently registered reporter functions.
  382. (define (report . args)
  383. (if (pair? reporters)
  384. (for-each (lambda (reporter) (apply reporter args))
  385. reporters)
  386. (apply default-reporter args)))
  387. ;;;; Some useful standard reporters:
  388. ;;;; Log reporters write all benchmark results to a given log file.
  389. ;;;; Full reporters write all benchmark results to the standard output.
  390. ;;;; User reporters write some interesting results to the standard output.
  391. ;;; Display a single benchmark result to the given port
  392. (define (print-result port name iterations before after gc-time)
  393. (let* ((name (format-benchmark-name name))
  394. (total-time (benchmark-total-time before after))
  395. (user-time (benchmark-user-time before after))
  396. (system-time (benchmark-system-time before after))
  397. (frame-time (benchmark-frame-time iterations))
  398. (benchmark-time (benchmark-core-time iterations before after))
  399. (user-time\interpreter
  400. (benchmark-user-time\interpreter before after gc-time))
  401. (benchmark-core-time\interpreter
  402. (benchmark-core-time\interpreter iterations before after gc-time)))
  403. (write (list name iterations
  404. 'total (i/ total-time time-base)
  405. 'user (i/ user-time time-base)
  406. 'system (i/ system-time time-base)
  407. 'frame (i/ frame-time time-base)
  408. 'benchmark (i/ benchmark-time time-base)
  409. 'user/interp (i/ user-time\interpreter time-base)
  410. 'bench/interp (i/ benchmark-core-time\interpreter time-base)
  411. 'gc (i/ gc-time time-base))
  412. port)
  413. (newline port)))
  414. ;;; Return a reporter procedure which prints all results to the file
  415. ;;; FILE, in human-readable form. FILE may be a filename, or a port.
  416. (define (make-log-reporter file)
  417. (let ((port (if (output-port? file) file
  418. (open-output-file file))))
  419. (lambda args
  420. (apply print-result port args)
  421. (force-output port))))
  422. ;;; A reporter that reports all results to the user.
  423. (define (full-reporter . args)
  424. (apply print-result (current-output-port) args))
  425. ;;; Display interesting results of a single benchmark to the given port
  426. (define (print-user-result port name iterations before after gc-time)
  427. (let* ((name (format-benchmark-name name))
  428. (user-time (benchmark-user-time before after))
  429. (benchmark-time (benchmark-core-time iterations before after))
  430. (benchmark-core-time\interpreter
  431. (benchmark-core-time\interpreter iterations before after gc-time)))
  432. (write (list name iterations
  433. 'user (i/ user-time time-base)
  434. 'benchmark (i/ benchmark-time time-base)
  435. 'bench/interp (i/ benchmark-core-time\interpreter time-base)
  436. 'gc (i/ gc-time time-base))
  437. port)
  438. (newline port)))
  439. ;;; A reporter that reports interesting results to the user.
  440. (define (user-reporter . args)
  441. (apply print-user-result (current-output-port) args))
  442. ;;;; Initialize the benchmarking system:
  443. ;;;;
  444. ;;; First, display version information
  445. (display ";; running guile version " (current-output-port))
  446. (display (version) (current-output-port))
  447. (newline (current-output-port))
  448. ;;; Second, make sure the benchmarking routines are compiled.
  449. (define (null-reporter . args) #t)
  450. (set! default-reporter null-reporter)
  451. (benchmark "empty initialization benchmark" 2 #t)
  452. ;;; Third, initialize the system constants
  453. (display ";; calibrating the benchmarking framework..." (current-output-port))
  454. (newline (current-output-port))
  455. (define (initialization-reporter name iterations before after gc-time)
  456. (let* ((frame-time (- (tms:utime after) (tms:utime before) gc-time 3)))
  457. (set! frame-time/iteration (/ frame-time iterations))
  458. (display ";; framework time per iteration: " (current-output-port))
  459. (display (i/ frame-time/iteration time-base) (current-output-port))
  460. (newline (current-output-port))))
  461. (set! default-reporter initialization-reporter)
  462. (benchmark "empty initialization benchmark" 524288 #t)
  463. ;;; Finally, set the default reporter
  464. (set! default-reporter user-reporter)