refman.txt 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288
  1. SCMUTILS Reference Manual
  2. This is a description of the Scmutils system, an integrated library of
  3. procedures, embedded in the programming language Scheme, and intended
  4. to support teaching and research in mathematical physics and electrical
  5. engineering. Scmutils and Scheme are particularly effective in work
  6. where the almost-functional nature of Scheme is advantageous, such as
  7. classical mechanics, where many of the procedures are most easily
  8. formulated as quite abstract manipulations of functions.
  9. Many people contributed to the development of the Scmutils library
  10. over many years, so we may miss some of them. The principal
  11. contributors have been:
  12. Gerald Jay Sussman, Harold Abelson, Jack Wisdom, Jacob Katzenelson,
  13. Hardy Mayer, Christopher P. Hanson, Matthew Halfant, Bill Siebert,
  14. Guillermo Juan Rozas, Panayotis Skordos, Kleanthes Koniaris, Kevin
  15. Lin, Dan Zuras
  16. Scheme and Functional Programming
  17. Scheme is a simple computer language in the Lisp family of languages,
  18. with important structural features derived from Algol-60. We will not
  19. attempt to document Scheme here, as it is adequately documented in the
  20. IEEE standard (IEEE-1178) and in numerous articles and books. The
  21. R^4RS is a terse document describing Scheme in detail. It is included
  22. with this document. We assume that a reader of this document is
  23. familiar with Scheme and has read a book such as
  24. Harold Abelson, Gerald Jay Sussman and Julie Sussman
  25. Structure and Interpretation of Computer Programs
  26. MIT Press and McGraw-Hill (1985, 1996)
  27. As a reminder, Scheme is an expression-oriented language. Expressions
  28. have values. The value of an expression is constructed from the
  29. values of the constituent parts of the expression and the way the
  30. expression is constructed. Thus, the value of
  31. (+ (* :pi (square r)) 1)
  32. is constructed from the values of the symbols
  33. +, *, :pi, square, r, 1
  34. and by the parenthetical organization.
  35. In any Lisp-based language, an expression is constructed from parts
  36. with parentheses. The first subexpresson always denotes a procedure
  37. and the rest of the subexpressions denote arguments. So in the case
  38. of the expression above, "square" is a symbol whose value is a
  39. procedure that is applied to a thing (probably a number) denoted by
  40. "r". That value of the application of square to r is then combined
  41. with the number denoted by ":pi" using the procedure denoted by "*"
  42. to make an object (again probably a number) that is combined with the
  43. number denoted by "1" using the procedure denoted by "+". Indeed, if
  44. the symbols have the values we expect from their (hopefully mnemonic)
  45. names,
  46. + = a means of addition
  47. * = a means of multiplication
  48. :pi = a number approximately equal to 3.14159265358979
  49. square = a means of squaring
  50. 1 = the multiplicative identity in the reals
  51. r some number, say for example 4
  52. then this expression would have the approximate value denoted by the
  53. numeral 51.26548245743669.
  54. We can write expressions denoting procedures. For example the
  55. procedure for squaring can be written
  56. (lambda (x) (* x x)) ,
  57. which may be read
  58. "the procedure of one argument x that multiplies x by x" .
  59. We may bind a symbol to a value by definition
  60. (define square
  61. (lambda (x) (* x x))) ,
  62. or equivalently, by
  63. (define (square x) (* x x)) .
  64. The application of a defined procedure to operands will bind the
  65. symbols that are the formal parameters to the actual arguments that
  66. are the values of the operands:
  67. (+ (square 3) (square 4)) => 25
  68. This concludes the reminders about Scheme. You must consult an
  69. alternate source for more information.
  70. One caveat: unlike the Scheme standard the Scmutils system is case
  71. sensitive.
  72. Generic Arithmetic
  73. In the Scmutils library arithmetic operators are generic over a
  74. variety of mathematical datatypes. For example, addition makes sense
  75. when applied to such diverse data as numbers, vectors, matrices, and
  76. functions. Additionally, many operations can be given a meaning when
  77. applied to different datatypes. For example, multiplication makes
  78. sense when applied to a number and a vector.
  79. The traditional operator symbols, such as "+" and "*" are bound to
  80. procedures that implement the generic operations. The details of
  81. which operations are defined for which datatypes is described below,
  82. organized by the datatype.
  83. In addition to the standard operations, every piece of mathematical
  84. data, x, can give answers to the following questions:
  85. (type x)
  86. Returns a symbol describing the type of x. For example,
  87. (type 3.14) => *number*
  88. (type (vector 1 2 3)) => *vector*
  89. (type-predicate x)
  90. Returns a predicate that is true on objects that
  91. are the same type as x
  92. (arity p)
  93. Returns a description of the number of arguments that p,
  94. interpreted as a procedure, accepts, compatible with the MIT
  95. Scheme procedure-arity procedure, except that it is extended for
  96. datatypes that are not usually interpreted as procedures. A
  97. structured object, like a vector, may be applied as a vector of
  98. procedures, and its arity is the intersection of the arities of
  99. the components.
  100. An arity is a newly allocated pair whose car field is the minimum
  101. number of arguments, and whose cdr field is the maximum number of
  102. arguments. The minimum is an exact non-negative integer. The
  103. maximum is either an exact non-negative integer, or `#f' meaning
  104. that the procedure has no maximum number of arguments. In our
  105. version of Scheme #f is the same as the empty list, and a pair
  106. with the empty list in the cdr field is a singleton list, so the
  107. arity will print as shown in the second column.
  108. (arity (lambda () 3)) => (0 . 0) = (0 . 0)
  109. (arity (lambda (x) x)) => (1 . 1) = (1 . 1)
  110. (arity car) => (1 . 1) = (1 . 1)
  111. (arity (lambda x x)) => (0 . #f) = (0)
  112. (arity (lambda (x . y) x)) => (1 . #f) = (1)
  113. (arity (lambda (x #!optional y) x)) => (1 . 2) = (1 . 2)
  114. (arity (vector cos sin)) => (1 . 1) = (1 . 1)
  115. We will now describe each of the generic operations. These operations
  116. are defined for many but not all of the mathematical datatypes. For
  117. particular datatypes we will list and discuss the operations that only
  118. make sense for them.
  119. (inexact? x)
  120. This procedure is a predicate -- a boolean-valued procedure.
  121. See the R^4RS for an explanation of exactness of numbers.
  122. A compound object, such as a vector or a matrix, is
  123. inexact if it has inexact components.
  124. (zero-like x)
  125. In general, this procedure returns the additive identity of the
  126. type of its argument, if it exists. For numbers this is 0.
  127. (one-like x)
  128. In general, this procedure returns the multiplicative identity of
  129. the type of its argument, if it exists. For numbers this is 1.
  130. (zero? x)
  131. Is true if x is an additive identity.
  132. (one? x)
  133. Is true if x is a multiplicative identity.
  134. (negate x) = (- (zero-like x) x)
  135. Gives an object that when added to x yields zero.
  136. (invert x) = (/ (one-like x) x)
  137. Gives an object that when multiplied by x yields one.
  138. Most of the numerical functions have been generalized to many of the
  139. datatypes, but the meaning may depend upon the particular datatype.
  140. Some are defined for numerical data only.
  141. (= x1 x2 ...) ==> <boolean>
  142. (+ x1 x2 ...)
  143. (* x1 x2 ...)
  144. (- x1 x2 ...)
  145. (/ x1 x2 ...)
  146. (expt x1 x2)
  147. (gcd n1 n2 ...)
  148. (sqrt x) Gives a square root of x, or an approximation to it.
  149. (exp x) = :e^x
  150. (exp10 x) = 10^x
  151. (exp2 x) = 2^x
  152. (log x)
  153. (log10 x) = (/ (log x) (log 10))
  154. (log2 x) = (/ (log x) (log 2))
  155. (sin x), (cos x), (tan x)
  156. (sec x), (csc x)
  157. (asin x), (acos x), (atan x)
  158. (atan x1 x2) = (atan (/ x1 x2)) but retains quadrant information
  159. (sinh x), (cosh x), (tanh x)
  160. (sech x), (csch x)
  161. (make-rectangular a1 a2) = a1+ia2
  162. (make-polar a1 a2) = a1*:e^(* +i a2)
  163. (real-part z)
  164. (imag-part z)
  165. (magnitude z)
  166. (angle z)
  167. (conjugate z)
  168. If M is a quantity that can be interpreted as a square matrix,
  169. (determinant M)
  170. (trace M)
  171. Scheme Numbers
  172. Operations on the Scheme Number datatype that are part of standard
  173. Scheme are listed here without comment; those that are not part of
  174. standard Scheme are described. In the following <n> is (any
  175. expression that denotes) an integer. <a> is any real number, <z> is
  176. any complex number, and <x> and <y> are any kind of number.
  177. (type <x>) = *number*
  178. (inexact? <x>) ==> <boolean>
  179. (zero-like <x>) = 0
  180. (one-like <x>) = 1
  181. (zero? <x>) ==> <boolean>
  182. (one? <x>) ==> <boolean>
  183. (negate <x>), (invert <x>), (sqrt <x>)
  184. (exp <x>), (exp10 <x>), (exp2 <x>)
  185. (log <x>), (log10 <x>), (log2 <x>)
  186. (sin <x>), (cos <x>), (tan <x>), (sec <x>), (csc <x>)
  187. (asin <x>), (acos <x>), (atan <x>)
  188. (atan <x1> <x2>)
  189. (sinh <x>), (cosh <x>), (tanh <x>), (sech <x>), (csch <x>)
  190. (= <x1> <x2> ...) ==> <boolean>
  191. (+ <x1> <x2> ...)
  192. (* <x1> <x2> ...)
  193. (- <x1> <x2> ...)
  194. (/ <x1> <x2> ...)
  195. (expt <x1> <x2>)
  196. (gcd <n1> <n2> ...)
  197. (make-rectangular <a1> <a2>) = <a1>+i<a2>
  198. (make-polar <a1> <a2>) = <a1>*:e^(* +i <a2>)
  199. (real-part <z>)
  200. (imag-part <z>)
  201. (magnitude <z>)
  202. (angle <z>)
  203. (conjugate <z>)
  204. Structured Objects
  205. Scmutils supports a variety of structured object types, such as
  206. vectors, up and down tuples, matrices, and power series.
  207. The explicit constructor for a structured object is a procedure whose
  208. name is what we call objects of that type. For example, we make
  209. explicit vectors with the procedure named "vector", and explicit lists
  210. with the procedure named "list". For example
  211. (list 1 2 3 4 5) a list of the first five positive integers
  212. (vector 1 2 3 4 5) a vector of the first five positive integers
  213. (down 10 3 4) a down tuple with three components
  214. There is no natural way to notate a matrix, except by giving its rows
  215. (or columns). To make a matrix with three rows and five columns:
  216. (define M
  217. (matrix-by-rows (list 1 2 3 4 5)
  218. (list 6 7 8 9 10)
  219. (list 11 12 13 14 15)))
  220. A power series may be constructed from an explicit set of coefficients
  221. (series 1 2 3 4 5)
  222. is the power series whose first five coefficients are the first five
  223. positive integers and all of the rest of the coefficients are zero.
  224. Although each datatype has its own specialized procedures, there are a
  225. variety of generic procedures for selecting the components from
  226. structured objects. To get the n-th component from a linear data
  227. structure, v, such as a vector or a list, one may in general use the
  228. generic selector, "ref":
  229. (ref x n)
  230. All structured objects are accessed by zero-based indexing, as is the
  231. custom in Scheme programs and in relativity. For example, to get the
  232. third element (index = 2) of a vector or a list we can use
  233. (ref (vector 1 2 3 4 5) 2) = 3
  234. (ref (list 1 2 3 4 5) 2) = 3
  235. If M is a matrix, then the component in the i-th row and j-th column
  236. can be obtained by (ref M i j). For the matrix given above
  237. (ref M 1 3) = 9
  238. Other structured objects are more magical
  239. (ref cos-series 6) = -1/720
  240. The number of components of a structured object can be found with the
  241. "size" generic operator:
  242. (size (vector 1 2 3 4 5)) = 5
  243. Besides the extensional constructors, most structured-object datatypes
  244. can be intentionally constructed by giving a procedure whose values
  245. are the components of the object. These "generate" procedures are
  246. (list:generate n proc)
  247. (vector:generate n proc)
  248. (matrix:generate m n proc)
  249. (series:generate proc)
  250. For example, one may make a 6 component vector each of whose
  251. components is :pi times the index of that component, as follows:
  252. (vector:generate 6 (lambda (i) (* :pi i)))
  253. Or a 3X5 matrix whose components are the sum of :pi times the row
  254. number and the speed of light times the column number:
  255. (matrix:generate 3 5 (lambda (i j) (+ (* :pi i) (* :c j))))
  256. Also, it is commonly useful to deal with a structured object in an
  257. elementwise fashion. We provide special combinators for many
  258. structured datatypes that allow one to make a new structure, of the
  259. same type and size of the given ones, where the components of the new
  260. structure are the result of applying the given procedure to the
  261. corresponding components of the given structures.
  262. ((list:elementwise proc) <l1> ... <ln>)
  263. ((vector:elementwise proc) <v1> ... <vn>)
  264. ((structure:elementwise proc) <s1> ... <sn>)
  265. ((matrix:elementwise proc) <M1> ... <Mn>)
  266. ((series:elementwise proc) <p1> ... <pn>)
  267. Thus, vector addition is equivalent to (vector:elementwise +).
  268. Scheme Vectors
  269. We identify the Scheme vector data type with mathematical
  270. n-dimensional vectors. These are interpreted as up tuples when a
  271. distinction between up tuples and down tuples is made.
  272. We inherit from Scheme the constructors VECTOR and MAKE-VECTOR, the
  273. selectors VECTOR-LENGTH and VECTOR-REF, and zero-based indexing. We
  274. also get the iterator MAKE-INITIALIZED-VECTOR, and the type predicate
  275. VECTOR? In the documentation that follows, <v> will stand for a
  276. vector-valued expression.
  277. (vector? <any>) ==> <boolean>
  278. (type <v>) ==> *vector*
  279. (inexact? <v>) ==> <boolean>
  280. Is true if any component of <v> is inexact, otherwise it is false.
  281. (vector-length <v>) ==> <+integer>
  282. gets the number of components of <v>
  283. (vector-ref <v> <i>)
  284. gets the <i>th (zero-based) component of vector <v>
  285. (make-initialized-vector <n> <procedure>)
  286. this is also called (v:generate <n> <procedure>)
  287. and (vector:generate <n> <procedure>)
  288. generates an <n>-dimensional vector whose <i>th component is the
  289. result of the application of the <procedure> to the number <i>.
  290. (zero-like <v>) ==> <vector>
  291. Gives the zero vector of the dimension of vector <v>.
  292. (zero? <v>) ==> <boolean>
  293. (negate <v>) ==> <vector>
  294. (conjugate <v>) ==> <vector>
  295. Elementwise complex-conjugate of <v>
  296. Simple arithmetic on vectors is componentwise
  297. (= <v1> <v2> ...) ==> <boolean>
  298. (+ <v1> <v2> ...) ==> <vector>
  299. (- <v1> <v2> ...) ==> <vector>
  300. There are a variety of products defined on vectors.
  301. (dot-product <v1> <v2>) ==> <x>
  302. (cross-product <v1> <v2>)
  303. Cross product only makes sense for 3-dimensional vectors.
  304. (* <x> <v>) = (scalar*vector <x> <v>) ==> <vector>
  305. (* <v> <x>) = (vector*scalar <v> <x>) ==> <vector>
  306. (/ <v> <x>) = (vector*scalar <v> (/ 1 <x>)) ==> <vector>
  307. The product of two vectors makes an outer product structure.
  308. (* <v> <v>) = (outer-product <v> <v>) ==> <structure>
  309. (euclidean-norm <v>) = (sqrt (dot-product <v> <v>))
  310. (abs <v>) = (euclidean-norm <v>)
  311. (v:inner-product <v1> <v2>) = (dot-product (conjugate <v1>) <v2>)
  312. (complex-norm <v>) = (sqrt (v:inner-product <v> <v>))
  313. (magnitude <v>) = (complex-norm <v>)
  314. (maxnorm <v>)
  315. gives the maximum of the magnitudes of the components of <v>
  316. (v:make-unit <v>) = (/ <v> (euclidean-norm <v>))
  317. (v:unit? <v>) = (one? (euclidean-norm <v>))
  318. (v:make-basis-unit <n> <i>)
  319. Makes the n-dimensional basis unit vector with zero in all
  320. components except for the ith component, which is one.
  321. (v:basis-unit? <v>)
  322. Is true if and only if <v> is a basis unit vector.
  323. Up Tuples and Down Tuples
  324. Sometimes it is advantageous to distinguish down tuples and up tuples.
  325. If the elements of up tuples are interpreted to be the components of
  326. vectors in a particular coordinate system, the elements of the down
  327. tuples may be thought of as the components of the dual vectors in that
  328. coordinate system. The union of the up tuple and the down tuple data
  329. types is the data type we call "structures."
  330. Structures may be recursive and they need not be uniform. Thus it is
  331. possible to have an up structure with three components: the first is a
  332. number, the second is an up structure with two numerical components,
  333. and the third is a down structure with two numerical components. Such
  334. a structure has size (or length) 3, but it has five dimensions.
  335. In Scmutils, the Scheme vectors are interpreted as up tuples, and
  336. the down tuples are distinguished. The predicate "structure?" is true
  337. of any down or up tuple, but the two can be distinguished by the
  338. predicates "up?" and "down?".
  339. (up? <any>) ==> <boolean>
  340. (down? <any>) ==> <boolean>
  341. (structure? <any>) = (or (down? <any>) (up? <any>))
  342. In the following, <s> stands for any structure-valued expression; <up>
  343. and <down> will be used if necessary to make the distinction.
  344. The generic type operation distinguishes the types:
  345. (type <s>) ==> *vector* or *down*
  346. We reserve the right to change this implementation to distinguish
  347. Scheme vectors from up tuples. Thus, we provide (null) conversions
  348. between vectors and up tuples.
  349. (vector->up <scheme-vector>) ==> <up>
  350. (vector->down <scheme-vector>) ==> <down>
  351. (up->vector <up>) ==> <scheme-vector>
  352. (down->vector <down>) ==> <scheme-vector>
  353. Constructors are provided for these types, analogous to list and
  354. vector.
  355. (up . args) ==> <up>
  356. (down . args) ==> <down>
  357. The dimension of a structure is the number of entries, adding up the
  358. numbers of entries from substructures. The dimension of any structure
  359. can be determined by
  360. (s:dimension <s> ==> <+integer>
  361. Processes that need to traverse a structure need to know the number of
  362. components at the top level. This is the length of the structure,
  363. (s:length <s>) ==> <+integer>
  364. The ith component (zero-based) can be accessed by
  365. (s:ref <s> i)
  366. For example,
  367. (s:ref (up 3 (up 5 6) (down 2 4)) 1)
  368. (up 5 6)
  369. As usual, the generic ref procedure can recursively access
  370. substructure
  371. (ref (up 3 (up 5 6) (down 2 4)) 1 0)
  372. 5
  373. Given a structure <s> we can make a new structure of the same type with
  374. <x> substituted for the <n>th component of the given structure using
  375. (s:with-substituted-coord <s> <n> <x>)
  376. We can construct an entirely new structure of length <n> whose
  377. components are the values of a procedure using s:generate:
  378. (s:generate <n> up/down <procedure>)
  379. The up/down argument may be either up or down.
  380. The following generic arithmetic operations are defined for
  381. structures.
  382. (zero? <s>) ==> <boolean>
  383. is true if all of the components of the structure are zero.
  384. (zero-like <s>) ==> <s>
  385. produces a new structure with the same shape as the given structure
  386. but with all components being zero-like the corresponding component in
  387. the given structure.
  388. (negate <s>) ==> <s>
  389. (magnitude <s>) ==> <s>
  390. (abs <s>) ==> <s>
  391. (conjugate <s>) ==> <s>
  392. produce new structures which are the result of applying the generic
  393. procedure elementwise to the given structure.
  394. (= <s1> ... <sn>) ==> <boolean>
  395. is true only when the corresponding components are =.
  396. (+ <s1> ... <sn>) ==> <s>
  397. (- <s1> ... <sn>) ==> <s>
  398. These are componentwise addition and subtraction.
  399. (* <s1> <s2>) ==> <s> or <x> , a structure or a number
  400. magically does what you want: If the structures are compatible for
  401. contraction the product is the contraction (the sum of the products of
  402. the corresponding components.) If the structures are not compatible
  403. for contraction the product is the structure of the shape and length
  404. of <s2> whose components are the products of <s1> with the
  405. corresponding components of <s2>.
  406. Structures are compatible for contraction if they are of the same
  407. length, of opposite type, and if their corresponding elements are
  408. compatible for contraction.
  409. It is not obvious why this is what you want, but try it, you'll like
  410. it!
  411. For example, the following are compatible for contraction:
  412. (print-expression (* (up (up 2 3) (down 5 7 11))
  413. (down (down 13 17) (up 19 23 29))))
  414. 652
  415. Two up tuples are not compatible for contraction.
  416. Their product is an outer product:
  417. (print-expression (* (up 2 3) (up 5 7 11)))
  418. (up (up 10 15) (up 14 21) (up 22 33))
  419. (print-expression (* (up 5 7 11) (up 2 3)))
  420. (up (up 10 14 22) (up 15 21 33))
  421. This product is not generally associative or commutative. It is
  422. commutative for structures that contract, and it is associative for
  423. structures that represent linear transformations.
  424. To yield additional flavor, the definition of square for structures is
  425. inconsistent with the definition of product. It is possible to square
  426. an up tuple or a down tuple. The result is the sum of the squares of
  427. the components. This makes it convenient to write such things as
  428. (/ (square p) (* 2 m)), but it is sometimes confusing.
  429. Some structures, such as the ones that represent inertia tensors, must
  430. be inverted. (The "m" above may be an inertia tensor!) Division is
  431. arranged to make this work, when possible. The details are too hairy
  432. to explain in this short document. We probably need to write a book
  433. about this!
  434. Matrices
  435. There is an extensive set of operations for manipulating matrices.
  436. Let <M>, <N> be matrix-valued expressions. The following operations
  437. are provided
  438. (matrix? <any>) ==> <boolean>
  439. (type <M>) ==> *matrix*
  440. (inexact? <M>) ==> <boolean>
  441. (m:num-rows <M>) ==> <n>,
  442. the number of rows in matrix M.
  443. (m:num-cols <M>) ==> <n>,
  444. the number of columns in matrix M.
  445. (m:dimension <M>) ==> <n>
  446. the number of rows (or columns) in a square matrix M.
  447. It is an error to try to get the dimension of a matrix
  448. that is not square.
  449. (column-matrix? <M>)
  450. is true if M is a matrix with one column.
  451. Note: neither a tuple nor a scheme vector is a column matrix.
  452. (row-matrix? <M>)
  453. is true if M is a matrix with one row.
  454. Note: neither a tuple nor a scheme vector is a row matrix.
  455. There are general constructors for matrices:
  456. (matrix-by-rows <row-list-1> ... <row-list-n>)
  457. where the row lists are lists of elements that are to appear in the
  458. corresponding row of the matrix
  459. (matrix-by-cols <col-list-1> ... <col-list-n>)
  460. where the column lists are lists of elements that are to appear in
  461. the corresponding column of the matrix
  462. (column-matrix <x1> ... <xn>)
  463. returns a column matrix with the given elements
  464. (row-matrix <x1> ... <xn>)
  465. returns a row matrix with the given elements
  466. And a standard selector for the elements of a matrix
  467. (matrix-ref <M> <n> <m>)
  468. returns the element in the m-th column and the n-th row of matrix M.
  469. Remember, this is zero-based indexing.
  470. We can access various parts of a matrix
  471. (m:nth-col <M> <n>) ==> <scheme-vector>
  472. returns a Scheme vector with the elements of the n-th column of M.
  473. (m:nth-row <M> <n>) ==> <scheme-vector>
  474. returns a Scheme vector with the elements of the n-th row of M.
  475. (m:diagonal <M>) ==> <scheme-vector>
  476. returns a Scheme vector with the elements of the diagonal of the
  477. square matrix M.
  478. (m:submatrix <M> <from-row> <upto-row> <from-col> <upto-col>)
  479. extracts a submatrix from M, as in the following example
  480. (print-expression
  481. (m:submatrix
  482. (m:generate 3 4
  483. (lambda (i j)
  484. (* (square i) (cube j))))
  485. 1 3 1 4))
  486. (matrix-by-rows (list 1 8 27)
  487. (list 4 32 108))
  488. (m:generate <n> <m> <procedure>) ==> <M>
  489. returns the nXm (n rows by m columns) matrix whose ij-th element is
  490. the value of the procedure when applied to arguments i, j.
  491. (simplify
  492. (m:generate 3 4
  493. (lambda (i j)
  494. (* (square i) (cube j)))))
  495. => (matrix-by-rows (list 0 0 0 0)
  496. (list 0 1 8 27)
  497. (list 0 4 32 108))
  498. (matrix-with-substituted-row <M> <n> <scheme-vector>)
  499. returns a new matrix constructed from M by substituting the Scheme
  500. vector v for the n-th row in M.
  501. We can transpose a matrix (producing a new matrix whose columns are
  502. the rows of the given matrix and whose rows are the columns of the
  503. given matrix with:
  504. (m:transpose <M>)
  505. There are coercions between Scheme vectors and matrices:
  506. (vector->column-matrix <scheme-vector>) ==> <M>
  507. (column-matrix->vector <M>) ==> <scheme-vector>
  508. (vector->row-matrix <scheme-vector>) ==> <M>
  509. (row-matrix->vector <M>) ==> <scheme-vector>
  510. And similarly for up and down tuples:
  511. (up->column-matrix <up>) ==> <M>
  512. (column-matrix->up <M>) ==> <up>
  513. (down->row-matrix <down>) ==> <M>
  514. (row-matrix->down <M>) ==> <down>
  515. Matrices can be tested with the usual tests:
  516. (zero? <M>)
  517. (identity? <M>)
  518. (diagonal? <M>)
  519. (m:make-zero <n>) ==> <M>
  520. returns an nXn (square) matrix of zeros
  521. (m:make-zero <n> <m>) ==> <M>
  522. returns an nXm matrix of zeros
  523. Useful matrices can be made easily
  524. (zero-like <M>) ==> <N>
  525. returns a zero matrix of the same dimensions as the given matrix
  526. (m:make-identity <n>) ==> <M>
  527. returns an identity matrix of dimension n
  528. (m:make-diagonal <scheme-vector>) ==> <M>
  529. returns a square matrix with the given vector elements on the
  530. diagonal and zeros everywhere else.
  531. Matrices have the usual unary generic operators:
  532. negate, invert, conjugate,
  533. However the generic operators
  534. exp, sin, cos,
  535. yield a power series in the given matrix.
  536. Square matrices may be exponentiated to any exact positive integer
  537. power:
  538. (expt <M> <n>)
  539. We may also get the determinant and the trace of a square matrix:
  540. (determinant <M>)
  541. (trace <M>)
  542. The usual binary generic operators make sense when applied to
  543. matrices. However they have been extended to interact with other
  544. datatypes in a few useful ways. The componentwise operators
  545. =, +, -
  546. are extended so that if one argument is a square matrix, M, and the
  547. other is a scalar, x, then the scalar is promoted to a diagonal matrix
  548. of the correct dimension and then the operation is done on those:
  549. (= <M> <x>) and (= <x> <M>) tests if M = xI
  550. (+ <M> <x>) and (+ <x> <M>) = M+xI
  551. (- <M> <x>) = M-xI and (- <x> <M>) = xI-M
  552. Multiplication, *, is extended to allow a matrix to be multiplied on
  553. either side by a scalar. Additionally, a matrix may be multiplied on
  554. the left by a conforming down tuple, or on the right by a conforming
  555. up tuple.
  556. Division is interpreted to mean a number of different things depending
  557. on the types of the arguments. For any matrix M and scalar x
  558. (/ <M> <x>) = (* <M> (/ 1 <x>))
  559. If M is a square matrix then it is possible that it is invertible, so
  560. if <x> is either a scalar or a matrix, then
  561. (/ <x> <M>) = (* <x> <N>), where N is the matrix inverse of M.
  562. In general, if M is a square matrix and v is either an up tuple or a
  563. column matrix, then
  564. (/ <v> <M>) = <w>, where w is of the same type as v and where v=Mw.
  565. Similarly, for v a down tuple
  566. (/ <v> <M>) = <w>, where w is a down tuple and where v=wM.
  567. Functions
  568. In Scmutils, functions are data just like other mathematical objects,
  569. and the generic arithmetic system is extended to include them. If
  570. <f> is an expression denoting a function then
  571. (function? <any>) ==> <boolean>
  572. (type <f>) ==> *function*
  573. Operations on functions generally construct new functions that are the
  574. composition of the operation with its arguments, thus applying the
  575. operation to the value of the functions: if U is a unary operation, if
  576. f is a function, and if x is arguments appropriate to f, then
  577. ((U f) x) = (U (f x))
  578. If B is a binary operation, if f and g are functions, and if x is
  579. arguments appropriate to both f and g, then
  580. ((B f g) x) = (B (f x) (g x))
  581. All of the usual unary operations are available. So if <f> is an
  582. expression representing a function, and if <x> is any kind of argument
  583. for <f> then, for example,
  584. ((negate <f>) <x>) = (negate (f <x>))
  585. ((invert <f>) <x>) = (invert (f <x>))
  586. ((sqrt <f>) <x>) = (sqrt (f <x>))
  587. The other operations that behave this way are:
  588. exp, log, sin, cos, asin, acos, sinh, cosh, abs,
  589. real-part, imag-part, magnitude, angle, conjugate, atan
  590. The binary operations are similar, with the exception that
  591. mathematical objects that may not be normally viewed as functions are
  592. coerced to constant functions for combination with functions.
  593. ((+ <f> <g>) <x>) = (+ (f <x>) (g <x>))
  594. ((- <f> <g>) <x>) = (- (f <x>) (g <x>))
  595. For example:
  596. ((+ sin 1) x) = (+ (sin x) 1)
  597. The other operations that behave in this way are:
  598. *, /, expt, gcd, make-rectangular, make-polar
  599. Operators
  600. Operators are a special class of functions that manipulate functions.
  601. They differ from other functions in that multiplication of operators
  602. is understood as their composition, rather than the product of their
  603. values for each input. The prototypical operator is the derivative,
  604. D. For an ordinary function, such as "sin"
  605. ((expt sin 2) x) = (expt (sin x) 2),
  606. but derivative is treated differently:
  607. ((expt D 2) f) = (D (D f))
  608. New operators can be made by combining others. So, for example,
  609. (expt D 2) is an operator, as is (+ (expt D 2) (* 2 D) 3).
  610. We start with a few primitive operators, the total and partial
  611. derivatives, which will be explained in detail later.
  612. o:identity
  613. derivative (also named D)
  614. (partial <component-selectors>)
  615. If <O> is an expression representing an operator then
  616. (operator? <any>) ==> <boolean>
  617. (type <O>) ==> *operator*
  618. Operators can be added, subtracted, multiplied, and scaled. If they
  619. are combined with an object that is not an operator, the non-operator
  620. is coerced to an operator that multiplies its input by the
  621. non-operator.
  622. The transcendental functions exp, sin, and cos are extended to take
  623. operator arguments. The resulting operators are expanded as power
  624. series.
  625. Power Series
  626. Power series are often needed in mathematical computations.
  627. There are a few primitive power series, and new power series can be
  628. formed by operations on existing power series. If <p> is an
  629. expression denoting a power series then:
  630. (series? <any>) ==> <boolean>
  631. (type <p>) ==> *series*
  632. Series can be constructed in a variety of ways. If one has a
  633. procedure that implements the general form of a coefficient then this
  634. gives the most direct method:
  635. For example, the n-th coefficient of the power series for the
  636. exponential function is 1/n!. We can write this as
  637. (series:generate (lambda (n) (/ 1 (factorial n))))
  638. Sometimes we have a finite number of coefficients and we want to make
  639. a series with those given coefficients (assuming zeros for all
  640. higher-order coefficients). We can do this with the extensional
  641. constructor. Thus
  642. (series 1 2 3 4 5)
  643. is the series whose first coefficients are the arguments given.
  644. There are some nice initial series:
  645. series:zero
  646. is the series of all zero coefficients
  647. series:one
  648. is the series of all zero coefficients except for the first
  649. (constant), which is one.
  650. (constant-series c)
  651. is the series of all zero coefficients except for the first
  652. (constant), which is the given constant.
  653. ((binomial-series a) x) = (1+x)^a
  654. In addition, we provide the following initial series:
  655. exp-series, cos-series, sin-series, tan-series,
  656. cosh-series, sinh-series, atan-series
  657. Series can also be formed by processes such as exponentiation of an
  658. operator or a square matrix. For example, if f is any function of one
  659. argument, and if x and dx are numerical expressions, then this
  660. expression denotes the Taylor expansion of f around x.
  661. (((exp (* dx D)) f) x)
  662. = (+ (f x) (* dx ((D f) x)) (* 1/2 (expt dx 2) (((expt D 2) f) x)) ...)
  663. We often want to show a few (n) terms of a series:
  664. (series:print <p> <n>)
  665. For example, to show eight coefficients of the cosine series we might
  666. write:
  667. (series:print (((exp D) cos) 0) 8)
  668. 1
  669. 0
  670. -1/2
  671. 0
  672. 1/24
  673. 0
  674. -1/720
  675. 0
  676. ;Value: ...
  677. We can make the sequence of partial sums of a series.
  678. The sequence is a stream, not a series.
  679. (stream:for-each write-line (partial-sums (((exp D) cos) 0.)) 10)
  680. 1.
  681. 1.
  682. .5
  683. .5
  684. .5416666666666666
  685. .5416666666666666
  686. .5402777777777777
  687. .5402777777777777
  688. .5403025793650793
  689. .5403025793650793
  690. ;Value: ...
  691. Note that the sequence of partial sums approaches (cos 1).
  692. (cos 1)
  693. ;Value: .5403023058681398
  694. In addition to the special operations for series, the following
  695. generic operations are defined for series
  696. negate, invert, +, -, *, /, expt
  697. Generic extensions
  698. In addition to ordinary generic operations, there are a few important
  699. generic extensions. These are operations that apply to a whole class
  700. of datatypes, because they are defined in terms of more primitive
  701. generic operations.
  702. (identity x) = x
  703. (square x) = (* x x)
  704. (cube x) = (* x x x)
  705. (arg-shift <f> <k1> ... <kn>)
  706. (arg-scale <f> <k1> ... <kn>)
  707. Takes a function, f, of n arguments and returns a new function of
  708. n arguments that is the old function with arguments shifted or scaled
  709. by the given offsets or factors:
  710. ((arg-shift square 3) 4) ==> 49
  711. ((arg-scale square 3) 4) ==> 144
  712. (sigma <f> <lo> <hi>)
  713. Produces the sum of the values of the function f when called with
  714. the numbers between lo and hi inclusive.
  715. (sigma square 1 5) ==> 55
  716. (sigma identity 1 100) ==> 5050
  717. (compose <f1> ... <fn>)
  718. Produces a procedure that computes composition of the functions
  719. represented by the procedures that are its arguments.
  720. ((compose square sin) 3) ==> .01991485667481699
  721. (square (sin 3)) ==> .01991485667481699
  722. Differentiation
  723. In this system we work in terms of functions; the derivative of a
  724. function is a function. The procedure for producing the derivative of
  725. a function is named "derivative", though we also use the single-letter
  726. symbol "D" to denote this operator.
  727. We start with functions of a real variable to a real variable:
  728. ((D cube) 5) ==> 75
  729. It is possible to compute the derivative of any composition of
  730. functions,
  731. ((D (+ (square sin) (square cos))) 3) ==> 0
  732. (define (unity1 x)
  733. (+ (square (sin x)) (square (cos x))))
  734. ((D unity1) 4) ==> 0
  735. (define unity2
  736. (+ (compose square sin) (compose square cos)))
  737. ((D unity2) 4) ==> 0
  738. except that the computation of the value of the function may not
  739. require evaluating a conditional.
  740. These derivatives are not numerical approximations estimated by some
  741. limiting process. However, as usual, some of the procedures that are
  742. used to compute the derivative may be numerical approximations.
  743. ((D sin) 3) ==> -.9899924966004454
  744. (cos 3) ==> -.9899924966004454
  745. Of course, not all functions are simple compositions of univariate
  746. real-valued functions of real arguments. Some functions have multiple
  747. arguments, and some have structured values.
  748. First we consider the case of multiple arguments. If a function maps
  749. several real arguments to a real value, then its derivative is a
  750. representation of the gradient of that function -- we must be able to
  751. multiply the derivative by an incremental up tuple to get a linear
  752. approximation to an increment of the function, if we take a step
  753. described by the incremental up tuple. Thus the derivative must be a
  754. down tuple of partial derivatives. We will talk about computing
  755. partial derivatives later.
  756. Let's understand this in a simple case. Let f(x,y) = x^3 y^5.
  757. (define (f x y)
  758. (* (expt x 3) (expt y 5)))
  759. Then Df(x,y) is a down tuple with components [2 x^2 y^5, 5 x^3 y^4].
  760. (simplify ((D f) 2 3)) ==> (down 2916 3240)
  761. And the inner product with an incremental up tuple is the appropriate
  762. increment.
  763. (* ((D f) 2 3) (up .1 .2)) ==> 939.6
  764. This is exactly the same as if we had a function of one up-tuple
  765. argument. Of course, we must supply an up-tuple to the derivative in
  766. this case:
  767. (define (g v)
  768. (let ((x (ref v 0))
  769. (y (ref v 1)))
  770. (* (expt x 3) (expt y 5))))
  771. (simplify ((D g) (up 2 3))) ==> (down 2916 3240)
  772. (* ((D g) (up 2 3)) (up .1 .2)) ==> 939.6
  773. Things get somewhat more complicated when we have functions with
  774. multiple structured arguments. Consider a function whose first
  775. argument is an up tuple and whose second argument is a number, which adds
  776. the cube of the number to the dot product of the up tuple with itself.
  777. (define (h v x)
  778. (+ (cube x) (square v)))
  779. What is its derivative? Well, it had better be something that can
  780. multiply an increment in the arguments, to get an increment in the
  781. function. The increment in the first argument is an incremental up
  782. tuple. The increment in the second argument is a small number. Thus
  783. we need a down-tuple of two parts, a row of the values of the partial
  784. derivatives with respect to each component of the first argument and
  785. the value of the partial derivative with respect to the second
  786. argument. This is easier to see symbolically:
  787. (simplify ((derivative h) (up 'a 'b) 'c))
  788. ==> (down (down (* 2 a) (* 2 b)) (* 3 (expt c 2)))
  789. The idea generalizes.
  790. Partial derivatives are just the components of the derivative of a
  791. function that takes multiple arguments or structured arguments or both.
  792. Thus, a partial derivative of a function is a composition of a
  793. component selector and the derivative of that function.
  794. The procedure that makes a partial derivative operator given a
  795. selection chain is named "partial".
  796. (simplify (((partial 0) h) (up 'a 'b) 'c))
  797. ==> (down (* 2 a) (* 2 b))
  798. (simplify (((partial 1) h) (up 'a 'b) 'c))
  799. ==> (* 3 (expt c 2))
  800. (simplify (((partial 0 0) h) (up 'a 'b) 'c))
  801. ==> (* 2 a)
  802. (simplify (((partial 0 1) h) (up 'a 'b) 'c))
  803. ==> (* 2 b)
  804. This naming scheme is consistent, except for one special case. If a
  805. function takes exactly one up-tuple argument then one level of the
  806. hierarchy is eliminated, allowing one to naturally write:
  807. (simplify ((D g) (up 'a 'b)))
  808. ==> (down (* 3 (expt a 2) (expt b 5))
  809. (* 5 (expt a 3) (expt b 4)))
  810. (simplify (((partial 0) g) (up 'a 'b)))
  811. ==> (* 3 (expt a 2) (expt b 5))
  812. (simplify (((partial 1) g) (up 'a 'b)))
  813. ==> (* 5 (expt a 3) (expt b 4))
  814. Symbolic Extensions
  815. All primitive mathematical procedures are extended to be generic over
  816. symbolic arguments. When given symbolic arguments these procedures
  817. construct a symbolic representation of the required answer. There are
  818. primitive literal numbers. We can make a literal number that is
  819. represented as an expression by the symbol "a" as follows:
  820. (literal-number 'a) ==> (*number* (expression a))
  821. The literal number is an object that has the type of a number, but its
  822. representation as an expression is the symbol "a".
  823. (type (literal-number 'a)) ==> *number*
  824. (expression (literal-number 'a)) ==> a
  825. Literal numbers may be manipulated, using the generic operators.
  826. (sin (+ (literal-number 'a) 3))
  827. ==> (*number* (expression (sin (+ 3 a))))
  828. To make it easy to work with literal numbers, Scheme symbols are
  829. interpreted by the generic operations as literal numbers.
  830. (sin (+ 'a 3)) ==> (*number* (expression (sin (+ 3 a))))
  831. We can extract the numerical expression from its type-tagged
  832. representation with the "expression" procedure
  833. (expression (sin (+ 'a 3))) ==> (sin (+ 3 a))
  834. but usually we really don't want to look at raw expressions
  835. (expression ((D cube) 'x)) ==> (+ (* x (+ x x)) (* x x))
  836. because they are unsimplified. We will talk about simplification
  837. later, but "simplify" will usually give a better form,
  838. (simplify ((D cube) 'x)) ==> (* 3 (expt x 2))
  839. and "print-expression", which incorporates "simplify", will attempt to
  840. format the expression nicely.
  841. Besides literal numbers, there are other literal mathematical objects,
  842. such as vectors and matrices, that can be constructed with appropriate
  843. constructors:
  844. (literal-vector <name>)
  845. (literal-down-tuple <name>)
  846. (literal-up-tuple <name>)
  847. (literal-matrix <name>)
  848. (literal-function <name>)
  849. There are currently no simplifiers that can manipulate literal objects
  850. of these types into a nice form.
  851. We often need literal functions in our computations. The object
  852. produced by "(literal-function 'f)" acts as a function of one real
  853. variable that produces a real result. The name (expression
  854. representation) of this function is the symbol "f". This literal
  855. function has a derivative, which is the literal function with
  856. expression representation "(D f)". Thus, we may make up and
  857. manipulate expressions involving literal functions:
  858. (expression ((literal-function 'f) 3)) ==> (f 3)
  859. (simplify ((D (* (literal-function 'f) cos)) 'a))
  860. ==> (+ (* (cos a) ((D f) a)) (* -1 (f a) (sin a)))
  861. (simplify ((compose (D (* (literal-function 'f) cos))
  862. (literal-function 'g))
  863. 'a))
  864. ==> (+ (* (cos (g a)) ((D f) (g a)))
  865. (* -1 (f (g a)) (sin (g a))))
  866. We may use such a literal function anywhere that an explicit function
  867. of the same type may be used.
  868. The Literal function descriptor language.
  869. We can also specify literal functions with multiple arguments and with
  870. structured arguments and results. For example, to denote a
  871. literal function named g that takes two real arguments and
  872. returns a real value ( g:RXR -> R ) we may write:
  873. (define g (literal-function 'g (-> (X Real Real) Real)))
  874. (print-expression (g 'x 'y))
  875. (g x y)
  876. The descriptors for literal functions look like prefix versions of
  877. the standard function types. Thus, we write:
  878. (literal-function 'g (-> (X Real Real) Real))
  879. The base types are the real numbers, designated by "Real". We
  880. will later extend the system to include complex numbers,
  881. designated by "Complex".
  882. Types can be combined in several ways. The cartesian product of
  883. types is designated by:
  884. (X <type1> <type2> ...)
  885. We use this to specify an argument tuple of objects of the given
  886. types arranged in the given order.
  887. Similarly, we can specify an up tuple or a down tuple with:
  888. (UP <type1> <type2> ...)
  889. (DOWN <type1> <type2> ...)
  890. We can also specify a uniform tuple of a number of elements of the
  891. same type using:
  892. (UP* <type> [n])
  893. (DOWN* <type> [n])
  894. So we can write specifications of more general functions:
  895. (define H
  896. (literal-function 'H
  897. (-> (UP Real (UP Real Real) (DOWN Real Real)) Real)))
  898. (define s (up 't (up 'x 'y) (down 'p_x 'p_y)))
  899. (print-expression (H s))
  900. (H (up t (up x y) (down p_x p_y)))
  901. (print-expression ((D H) s))
  902. (down
  903. (((partial 0) H) (up t (up x y) (down p_x p_y)))
  904. (down (((partial 1 0) H) (up t (up x y) (down p_x p_y)))
  905. (((partial 1 1) H) (up t (up x y) (down p_x p_y))))
  906. (up (((partial 2 0) H) (up t (up x y) (down p_x p_y)))
  907. (((partial 2 1) H) (up t (up x y) (down p_x p_y)))))}
  908. Numerical Methods
  909. There are a great variety of numerical methods that are coded in
  910. Scheme and are available in the Scmutils system. Here we give a
  911. a short description of a few that are needed in the Mechanics course.
  912. Univariate Minimization
  913. One may search for local minima of a univariate function in a number
  914. of ways. The procedure "minimize", used as follows,
  915. (minimize f lowx highx)
  916. is the default minimizer. It searches for a minimum of the univariate
  917. function f in the region of the argument delimited by the values lowx
  918. and highx. Our univariate optimization programs typically return a
  919. list (x fx ...) where x is the argmument at which the extremal value
  920. fx is achieved. The following helps destructure this list.
  921. (define extremal-arg car)
  922. (define extremal-value cadr)
  923. The procedure minimize uses Brent's method (don't ask how it works!).
  924. The actual procedure in the system is:
  925. (define (minimize f lowx highx)
  926. (brent-min f lowx highx brent-error))
  927. (define brent-error 1.0e-5)
  928. We personally like Brent's algorithm for univariate minimization, as
  929. found on pages 79-80 of his book "Algorithms for Minimization Without
  930. Derivatives". It is pretty reliable and pretty fast, but we cannot
  931. explain how it works. The parameter "eps" is a measure of the error
  932. to be tolerated.
  933. (brent-min f a b eps)
  934. (brent-max f a b eps)
  935. Thus, for example, if we make a function that is a quadratic
  936. polynomial with a minimum of 1 at 3,
  937. (define foo (Lagrange-interpolation-function '(2 1 2) '(2 3 4)))
  938. we can find the minimum quickly (in five iterations) with Brent's
  939. method:
  940. (brent-min foo 0 5 1e-2) ==> (3. 1. 5)
  941. Pretty good, eh?
  942. Golden Section search is sometimes an effective method, but it must be
  943. supplied with a convergence-test procedure, called good-enuf?.
  944. (golden-section-min f lowx highx good-enuf?)
  945. (golden-section-max f lowx highx good-enuf?)
  946. The predicate good-enuf? takes seven arguments. It is true if
  947. convergence has occured. The arguments to good-enuf? are
  948. lowx, minx, highx, flowx, fminx, fhighx, count
  949. where lowx and highx are values of the argument that the minimum has
  950. been localized to be between, and minx is the argument currently being
  951. tendered. The values flowx, fminx, and fhighx are the values of the
  952. function at the corresponding points; count is the number of
  953. iterations of the search. For example, suppose we want to squeeze the
  954. minimum of the polynomial function foo to a difference of argument
  955. positions of .001.
  956. (golden-section-min foo 0 5
  957. (lambda (lowx minx highx flowx fminx fhighx count)
  958. (< (abs (- highx lowx)) .001)))
  959. ==> (3.0001322139227034 1.0000000174805213 17)
  960. This is not so nice. It took 17 iterations and we didn't get anywhere
  961. near as good an answer as we got with Brent. On the other hand, we
  962. understand how this works!
  963. We can find a number of local minima of a multimodal function using a
  964. search that divides the initial interval up into a number of
  965. subintervals and then does Golden Section search in each interval.
  966. For example, we may make a quartic polynomial:
  967. (define bar
  968. (Lagrange-interpolation-function '(2 1 2 0 3) '(2 3 4 5 6)))
  969. Now we can look for local minima of this function in the range -10 to
  970. +10, breaking the region up into 15 intervals as follows:
  971. (local-minima bar -10 10 15 .0000001)
  972. ==> ((5.303446964995252 -.32916549541536905 18)
  973. (2.5312725379910592 .42583263999526233 18))
  974. The search has found two local minima, each requiring 18 iterations to
  975. localize. The local maxima are also worth chasing:
  976. (local-maxima bar -10 10 15 .0000001)
  977. ==> ((3.8192274368217713 2.067961961032311 17)
  978. (10 680 31)
  979. (-10 19735 29))
  980. Here we found three maxima, but two are at the endpoints of the
  981. search.
  982. Multivariate minimization
  983. The default multivariate minimizer is multidimensional-minimize, which
  984. is a heavily sugared call to the Nelder-Mead minimizer. The function
  985. f being minimized is a function of a Scheme list. The search starts
  986. at the given initial point, and proceeds to search for a point that is
  987. a local minimum of f. When the process terminates, the continuation
  988. function is called with three arguments. The first is true if the
  989. process converged and false if the minimizer gave up. The second is
  990. the actual point that the minimizer has found, and the third is the
  991. value of the function at that point.
  992. (multidimensional-minimize f initial-point continuation)
  993. Thus, for example, to find a minimum of the function
  994. (define (baz v)
  995. (* (foo (ref v 0)) (bar (ref v 1))))
  996. made from the two polynomials we constructed before, near the point
  997. (4 3), we can try:
  998. (multidimensional-minimize baz '(4 3) list)
  999. ==> (#t #(2.9999927603607803 2.5311967755369285) .4258326193383596)
  1000. Indeed, a minimum was found, at about #(3 2.53) with value .4258...
  1001. Of course, we usually need to have more control of the minimizer when
  1002. searching a large space. Without the sugar, the minimizers act on
  1003. functions of Scheme vectors (not lists, as above). The simplest
  1004. minimizer is the Nelder Mead downhill simplex method, a slow but
  1005. reasonably reliable method.
  1006. (nelder-mead f start-pt start-step epsilon maxiter)
  1007. We give it a function, a starting point, a starting step, a measure of
  1008. the acceptable error, and a maximum number of iterations we want it to
  1009. try before giving up. It returns a message telling whether it found a
  1010. minimum, the place and value of the purported minimum, and the number
  1011. of iterations it performed. For example, we can allow the algorithm
  1012. an initial step of 1, and it will find the minimum after 21 steps
  1013. (nelder-mead baz #(4 3) 1 .00001 100)
  1014. ==> (ok (#(2.9955235887900926 2.5310866303625517) . .4258412014077558) 21)
  1015. or we can let it take steps of size 3, which will allow it to wander
  1016. off into oblivion
  1017. (nelder-mead baz #(4 3) 3 .00001 100)
  1018. ==> (maxcount
  1019. (#(-1219939968107.8127 5.118445485647498) . -2.908404414767431e23)
  1020. 100)
  1021. The default minimizer uses the values:
  1022. (define nelder-start-step .01)
  1023. (define nelder-epsilon 1.0e-10)
  1024. (define nelder-maxiter 1000)
  1025. If we know more than just the function to minimize we can use that
  1026. information to obtain a better minimum faster than with the
  1027. Nelder-Mead algorithm.
  1028. In the Davidon-Fletcher-Powell algorithm, f is a function of a single
  1029. vector argument that returns a real value to be minimized, g is the
  1030. vector-valued gradient of f, x0 is a (vector) starting point, and
  1031. estimate is an estimate of the minimum function value. ftol is the
  1032. convergence criterion: the search is stopped when the relative change
  1033. in f falls below ftol or when the maximum number of iterations is
  1034. exceeded.
  1035. The procedure dfp uses Davidon's line search algorithm, which is
  1036. efficient and would be the normal choice, but dfp-brent uses Brent's
  1037. line search, which is less efficient but more reliable. The procedure
  1038. bfgs, due to Broyden, Fletcher, Goldfarb, and Shanno, is said to be
  1039. more immune than dfp to imprecise line search.
  1040. (dfp f g x0 estimate ftol maxiter)
  1041. (dfp-brent f g x0 estimate ftol maxiter)
  1042. (bfgs f g x0 estimate ftol maxiter)
  1043. These are all used in the same way:
  1044. (dfp baz (compose down->vector (D baz)) #(4 3) .4 .00001 100)
  1045. ==> (ok (#(2.9999717563962305 2.5312137271310036) . .4258326204265246) 4)
  1046. They all converge very fast, four iterations in this case.
  1047. Quadrature
  1048. Quadrature is the process of computing definite integrals of
  1049. functions. A sugared default procedure for quadrature is provided,
  1050. and we hope that it is adequate for most purposes.
  1051. (definite-integral <integrand>
  1052. <lower-limit> <upper-limit>
  1053. [compile? #t/#f])
  1054. The integrand must be a real-valued function of a real argument. The
  1055. limits of integration are specified as additional arguments. There is
  1056. an optional fourth argument that can be used to suppress compilation
  1057. of the integrand, thus forcing it to be interpreted. This is usually
  1058. to be ignored.
  1059. Because of the many additional features of numerical quadrature that
  1060. can be independently controlled we provide a special uniform interface
  1061. for a variety of mechanisms for computing the definite integrals of
  1062. functions. The quadrature interface is organized around
  1063. definite-integrators. A definite integrator is a message-acceptor
  1064. that organizes the options and defaults that are necessary to specify
  1065. an integration plan.
  1066. To make an integrator, and to give it the name I, do:
  1067. (define I (make-definite-integrator))
  1068. You may have as many definite integrators outstanding as you like. An
  1069. definite integrator can be given the following commands:
  1070. (I 'integrand)
  1071. returns the integrand assigned to the integrator I.
  1072. (I 'set-integrand! <f>)
  1073. sets the integrand of I to the procedure <f>.
  1074. The integrand must be a real-valued function of one real argument.
  1075. (I 'lower-limit)
  1076. returns the lower integration limit.
  1077. (I 'set-lower-limit! <ll>)
  1078. sets the lower integration limit of the integrator to <ll>.
  1079. (I 'upper-limit)
  1080. returns the upper integration limit.
  1081. (I 'set-upper-limit! <ul>)
  1082. sets the upper integration limit of the integrator to <ul>.
  1083. The limits of integration may be numbers, but they may also be the
  1084. special values :+infinity or :-infinity.
  1085. (I 'integral)
  1086. performs the integral specified and returns its value.
  1087. (I 'error)
  1088. returns the value of the allowable error of integration.
  1089. (I 'set-error! <epsilon>)
  1090. sets the allowable error of integration to <epsilon>.
  1091. The default value of the error is 1.0e-11.
  1092. (I 'method)
  1093. returns the integration method to be used.
  1094. (I 'set-method! <method>)
  1095. sets the integration method to be used to <method>.
  1096. The default method is open-open.
  1097. Other methods are open-closed, closed-open, closed-closed
  1098. romberg, bulirsch-stoer
  1099. The quadrature methods are all based on extrapolation. The Romberg
  1100. method is a Richardson extrapolation of the trapezoid rule. It is
  1101. usually worse than the other methods, which are adaptive rational
  1102. function extrapolations of trapezoid and Euler-MacLaurin formulas.
  1103. Closed integrators are best if we can include the endpoints of
  1104. integration. This cannot be done if the endpoint is singular: thus
  1105. the open formulas. Also, open formulas are forced when we have
  1106. infinite limits.
  1107. Let's do an example, it is as easy as pi!
  1108. (define witch
  1109. (lambda (x)
  1110. (/ 4.0 (+ 1.0 (* x x)))))
  1111. (define integrator (make-definite-integrator))
  1112. (integrator 'set-method! 'romberg)
  1113. (integrator 'set-error! 1e-12)
  1114. (integrator 'set-integrand! witch)
  1115. (integrator 'set-lower-limit! 0.0)
  1116. (integrator 'set-upper-limit! 1.0)
  1117. (integrator 'integral)
  1118. ;Value: 3.141592653589793
  1119. Programming with optional arguments
  1120. Definite integrators are so common and important that, to make
  1121. the programming a bit easier we allow one to be set up slightly
  1122. differently. In particular, we can specify the important parameters
  1123. as optional arguments to the maker. The specification of the maker
  1124. is:
  1125. (make-definite-integrator #!optional integrand
  1126. lower-limit upper-limit
  1127. allowable-error
  1128. method)
  1129. So, for example, we can investigate the following integral easily:
  1130. (define (foo n)
  1131. ((make-definite-integrator
  1132. (lambda (x) (expt (log (/ 1 x)) n))
  1133. 0.0 1.0
  1134. 1e-12 'open-closed)
  1135. 'integral))
  1136. (foo 0)
  1137. ;Value: 1.
  1138. (foo 1)
  1139. ;Value: .9999999999979357
  1140. (foo 2)
  1141. ;Value: 1.9999999999979101
  1142. (foo 3)
  1143. ;Value: 5.99999999999799
  1144. (foo 4)
  1145. ;Value: 23.999999999997893
  1146. (foo 5)
  1147. ;Value: 119.99999999999828
  1148. Do you recognize this function? What is (foo 6)?
  1149. ODE Initial Value Problems
  1150. Initial-value problems for ordinary differential equations can be
  1151. attacked by a great many specialized methods. Numerical analysts
  1152. agree that there is no best method. Each has situations where it
  1153. works best and other situations where it fails or is not very good.
  1154. Also, each technique has numerous parameters, options and defaults.
  1155. The default integration method is Bulirsch-Stoer. Usually, the
  1156. Bulirsch-Stoer algorithm will give better and faster results than
  1157. others, but there are applications where a quality-controlled
  1158. trapezoidal method or a quality-controlled 4th order Runge-Kutta
  1159. method is appropriate. The algorithm used can be set by the user:
  1160. (set-ode-integration-method! 'qcrk4)
  1161. (set-ode-integration-method! 'bulirsch-stoer)
  1162. (set-ode-integration-method! 'qcctrap2)
  1163. (set-ode-integration-method! 'explicit-gear)
  1164. The integration methods all automatically select the step sizes to
  1165. maintain the error tolerances. But if we have an exceptionally stiff
  1166. system, or a bad discontinuity, for most integrators the step size
  1167. will go down to zero and the integrator will make no progress. If you
  1168. encounter such a disaster try explicit-gear.
  1169. We have programs that implement other methods of integration, such as
  1170. an implicit version of Gear's stiff solver, and we have a whole
  1171. language for describing error control, but these features are not
  1172. available through this interface.
  1173. The two main interfaces are "evolve" and "state-advancer".
  1174. The procedure "state-advancer" is used to advance the state of a
  1175. system according to a system of first order ordinary differential
  1176. equations for a specified interval of the independent variable. The
  1177. state may have arbitrary structure, however we require that the first
  1178. component of the state is the independent variable.
  1179. The procedure "evolve" uses "state-advancer" to repeatedly advance the
  1180. state of the system by a specified interval, examining aspects of the
  1181. state as the evolution proceeds.
  1182. In the following descriptions we assume that "sysder" is a user
  1183. provided procedure that gives the parametric system derivative. The
  1184. parametric system derivative takes parameters, such as a mass or
  1185. length, and produces a procedure that takes a state and returns the
  1186. derivative of the state. Thus, the system derivative takes arguments
  1187. in the following way:
  1188. ((sysder parameter-1 ... parameter-n) state)
  1189. There may be no parameters, but then the system derivative procedure
  1190. must still be called with no arguments to produce the procedure that
  1191. takes states to the derivative of the state.
  1192. For example, if we have the differential equations for an ellipse
  1193. centered on the origin and aligned with the coordinate axes:
  1194. Dx(t) = -a y(t)
  1195. Dy(t) = +b x(t)
  1196. We can make a parametric system derivative for this system as follows:
  1197. (define ((ellipse-sysder a b) state)
  1198. (let ((t (ref state 0))
  1199. (x (ref state 1))
  1200. (y (ref state 2)))
  1201. (up 1 ; dt/dt
  1202. (* -1 a y) ; dx/dt
  1203. (* b x)))) ; dy/dt
  1204. The procedure "evolve" is invoked as follows:
  1205. ((evolve sysder . parameters) initial-state monitor dt final-t eps)
  1206. The user provides a procedure (here named "monitor") that takes the
  1207. state as an argument. The monitor is passed successive states of the
  1208. system as the evolution proceeds. For example it might be used to
  1209. print the state or to plot some interesting function of the state.
  1210. The interval between calls to the monitor is the argument "dt". The
  1211. evolution stops when the independent variable is larger than
  1212. "final-t". The parameter "eps" specifies the allowable error.
  1213. For example, we can draw our ellipse in a plotting window:
  1214. (define win (frame -2 2 -2 2 500 500))
  1215. (define ((monitor-xy win) state)
  1216. (plot-point win (ref state 1) (ref state 2)))
  1217. ((evolve ellipse-sysder 0.5 2.)
  1218. (up 0. .5 .5) ; initial state
  1219. (monitor-xy win) ; the monitor
  1220. 0.01 ; plotting step
  1221. 10.) ; final value of t
  1222. To take more control of the integration one may use the state advancer
  1223. directly.
  1224. The procedure "state-advancer" is invoked as follows:
  1225. ((state-advancer sysder . parameters) start-state dt eps)
  1226. The state advancer will give a new state resulting from evolving the
  1227. start state by the increment dt of the independent variable. The
  1228. allowed local truncation error is eps.
  1229. For example,
  1230. ((state-advancer ellipse-sysder 0.5 2.0) (up 0. .5 .5) 3.0 1e-10)
  1231. ;Value: #(3. -.5302762503146702 -.3538762402420404)
  1232. For a more complex example that shows the use of substructure in the
  1233. state, consider two-dimensional harmonic oscillator:
  1234. (define ((harmonic-sysder m k) state)
  1235. (let ((q (coordinate state)) (p (momentum state)))
  1236. (let ((x (ref q 0)) (y (ref q 1))
  1237. (px (ref p 0)) (py (ref p 1)))
  1238. (up 1 ;dt/dt
  1239. (up (/ px m) (/ py m)) ;dq/dt
  1240. (down (* -1 k x) (* -1 k y)) ;dp/dt
  1241. ))))
  1242. We could monitor the energy (the Hamiltonian):
  1243. (define ((H m k) state)
  1244. (+ (/ (square (momentum state))
  1245. (* 2 m))
  1246. (* 1/2 k
  1247. (square (coordinate state)))))
  1248. (let ((m 0.5) (k 2.0))
  1249. ((evolve harmonic-sysder m k)
  1250. (up 0. ; initial state
  1251. (up .5 .5)
  1252. (down 0.1 0.0))
  1253. (lambda (state) ; the monitor
  1254. (write-line
  1255. (list (time state) ((H m k) state))))
  1256. 1.0 ; output step
  1257. 10.))
  1258. (0. .51)
  1259. (1. .5099999999999999)
  1260. (2. .5099999999999997)
  1261. (3. .5099999999999992)
  1262. (4. .509999999999997)
  1263. (5. .509999999999997)
  1264. (6. .5099999999999973)
  1265. (7. .5099999999999975)
  1266. (8. .5100000000000032)
  1267. (9. .5100000000000036)
  1268. (10. .5100000000000033)
  1269. Constants
  1270. There are a few constants that we find useful, and are thus provided
  1271. in Scmutils. Many constants have multiple names.
  1272. There are purely mathematical constants:
  1273. (define zero 0) (define :zero zero)
  1274. (define one 1) (define :one one)
  1275. (define -one -1) (define :-one -one)
  1276. (define two 2) (define :two two)
  1277. (define three 3) (define :three three)
  1278. (define pi (* 4 (atan 1 1))) (define :pi pi)
  1279. (define -pi (- pi)) (define :+pi pi)
  1280. (define :-pi -pi)
  1281. (define pi/6 (/ pi 6)) (define :pi/6 pi/6)
  1282. (define -pi/6 (- pi/6)) (define :+pi/6 pi/6)
  1283. (define :-pi/6 -pi/6)
  1284. (define pi/4 (/ pi 4)) (define :pi/4 pi/4)
  1285. (define -pi/4 (- pi/4)) (define :+pi/4 pi/4)
  1286. (define :-pi/4 -pi/4)
  1287. (define pi/3 (/ pi 3)) (define :pi/3 pi/3)
  1288. (define -pi/3 (- pi/3)) (define :+pi/3 pi/3)
  1289. (define :-pi/3 -pi/3)
  1290. (define pi/2 (/ pi 2)) (define :pi/2 pi/2)
  1291. (define -pi/2 (- pi/2)) (define :+pi/2 pi/2)
  1292. (define :-pi/2 -pi/2)
  1293. (define 2pi (+ pi pi)) (define :2pi 2pi)
  1294. (define -2pi (- 2pi)) (define :+2pi 2pi)
  1295. (define :-2pi -2pi)
  1296. For numerical analysis, we provide the smallest number that when added
  1297. to 1.0 makes a difference.
  1298. (define *machine-epsilon*
  1299. (let loop ((e 1.0))
  1300. (if (= 1.0 (+ e 1.0))
  1301. (* 2 e)
  1302. (loop (/ e 2)))))
  1303. (define *sqrt-machine-epsilon*
  1304. (sqrt *machine-epsilon*))
  1305. Useful units conversions
  1306. (define arcsec/radian
  1307. (/ (* 60 60 360) :+2pi))
  1308. ;;; Physical Units
  1309. (define kg/amu
  1310. 1.661e-27)
  1311. (define joule/eV
  1312. 1.602e-19)
  1313. (define joule/cal
  1314. 4.1840)
  1315. ;;; Universal Physical constants
  1316. (define light-speed ;c
  1317. 2.99792458e8) ;meter/sec
  1318. (define :c light-speed)
  1319. (define esu/coul
  1320. (* 10 light-speed))
  1321. (define gravitation ;G
  1322. 6.6732e-11) ;(Newton*meter^2)/kg^2
  1323. (define :G gravitation)
  1324. (define electron-charge ;e
  1325. 1.602189e-19) ;Coulomb
  1326. ;;=4.80324e-10 esu
  1327. (define :e electron-charge)
  1328. (define electron-mass ;m_e
  1329. 9.10953e-31) ;kg
  1330. (define :m_e electron-mass)
  1331. (define proton-mass ;m_p
  1332. 1.672649e-27) ;kg
  1333. (define :m_p proton-mass)
  1334. (define neutron-mass ;m_n
  1335. 1.67492e-27) ;kg
  1336. (define :m_n neutron-mass)
  1337. (define planck ;h
  1338. 6.62618e-34) ;Joule*sec
  1339. (define :h planck)
  1340. (define h-bar ;\bar{h}
  1341. (/ planck :+2pi)) ;Joule*sec
  1342. (define :h-bar h-bar)
  1343. (define permittivity ;\epsilon_0
  1344. 8.85419e-12) ;Coulomb/(volt*meter)
  1345. (define :epsilon_0 permittivity)
  1346. (define boltzman ;k
  1347. 1.38066e-23) ;Joule/Kelvin
  1348. (define :k boltzman)
  1349. (define avogadaro ;N_A
  1350. 6.02217e23) ;1/mol
  1351. (define :N_A avogadaro)
  1352. (define faraday ;F
  1353. 9.64867e4) ;Coulomb/mol
  1354. (define gas ;R
  1355. 8.31434) ;Joule/(mol*Kelvin)
  1356. (define :R gas)
  1357. (define gas-atm ;R
  1358. 8.2054e-2) ;(liter*atm)/(mol*Kelvin)
  1359. (define radiation
  1360. (/ (* 8 (expt pi 5) (expt boltzman 4))
  1361. (* 15 (expt light-speed 3) (expt planck 3))))
  1362. (define stefan-boltzman
  1363. (/ (* light-speed radiation) 4))
  1364. (define thomson-cross-section
  1365. 6.652440539306967e-29) ;m^2
  1366. ;;; (define thomson-cross-section
  1367. ;;; (/ (* 8 pi (expt electron-charge 4))
  1368. ;;; (* 3 (expt electron-mass 2) (expt light-speed 4)))
  1369. ;;; in the SI version of ESU.
  1370. ;;; Observed and measured
  1371. (define background-temperature ;Cobe 1994
  1372. 2.726) ;+-.005 Kelvin
  1373. ;;; Thermodynamic
  1374. (define water-freezing-temperature
  1375. 273.15) ;Kelvin
  1376. (define room-temperature
  1377. 300.00) ;Kelvin
  1378. (define water-boiling-temperature
  1379. 373.15) ;Kelvin
  1380. ;;; Astronomical
  1381. (define m/AU
  1382. 1.49597892e11)
  1383. (define m/pc
  1384. (/ m/au (tan (/ 1 arcsec/radian))))
  1385. (define AU/pc
  1386. (/ 648000 pi)) ;=(/ m/pc m/AU)
  1387. (define sec/sidereal-yr ;1900
  1388. 3.1558149984e7)
  1389. (define sec/tropical-yr ;1900
  1390. 31556925.9747)
  1391. (define m/lyr
  1392. (* light-speed sec/tropical-yr))
  1393. (define AU/lyr (/ m/lyr m/AU))
  1394. (define lyr/pc (/ m/pc m/lyr))
  1395. (define m/parsec
  1396. 3.084e16)
  1397. (define m/light-year
  1398. 9.46e15)
  1399. (define sec/day
  1400. 86400)
  1401. ;;; Earth
  1402. (define earth-orbital-velocity
  1403. 29.8e3) ;meter/sec
  1404. (define earth-mass
  1405. 5.976e24) ;kg
  1406. (define earth-radius
  1407. 6371e3) ;meters
  1408. (define earth-surface-area
  1409. 5.101e14) ;meter^2
  1410. (define earth-escape-velocity
  1411. 11.2e3) ;meter/sec
  1412. (define earth-grav-accel ;g
  1413. 9.80665) ;meter/sec^2
  1414. (define earth-mean-density
  1415. 5.52e3) ;kg/m^3
  1416. ;;; This is the average amount of
  1417. ;;; sunlight available at Earth on an
  1418. ;;; element of surface normal to a
  1419. ;;; radius from the sun. The actual
  1420. ;;; power at the surface of the earth,
  1421. ;;; for a panel in full sunlight, is
  1422. ;;; not very different, because, in
  1423. ;;; absence of clouds the atmosphere
  1424. ;;; is quite transparent. The number
  1425. ;;; differs from the obvious geometric
  1426. ;;; number
  1427. ;;; (/ sun-luminosity (* 4 :pi (square m/AU)))
  1428. ;;; ;Value: 1360.454914748201
  1429. ;;; because of the eccentricity of the
  1430. ;;; Earth's orbit.
  1431. (define earth-incident-sunlight
  1432. 1370.) ;watts/meter^2
  1433. (define vol@stp
  1434. 2.24136e-2) ;(meter^3)/mol
  1435. (define sound-speed@stp ;c_s
  1436. 331.45) ;meter/sec
  1437. (define pressure@stp
  1438. 101.325) ;kPa
  1439. (define earth-surface-temperature
  1440. (+ 15 water-freezing-temperature))
  1441. ;;; Sun
  1442. (define sun-mass
  1443. 1.989e30) ;kg
  1444. (define :m_sun sun-mass)
  1445. (define sun-radius
  1446. 6.9599e8) ;meter
  1447. (define :r_sun sun-radius)
  1448. (define sun-luminosity
  1449. 3.826e26) ;watts
  1450. (define :l_sun sun-luminosity)
  1451. (define sun-surface-temperature
  1452. 5770.0) ;Kelvin
  1453. (define sun-rotation-period
  1454. 2.14e6) ;sec
  1455. ;;; The Gaussian constant
  1456. (define GMsun ;=(* gravitation sun-mass)
  1457. 1.32712497e20)
  1458. MORE TO BE DOCUMENTED
  1459. Solutions of Equations
  1460. Linear Equations (lu, gauss-jordan, full-pivot)
  1461. Linear Least Squares (svd)
  1462. Roots of Polynomials
  1463. Searching for roots of other nonlinear disasters
  1464. Matrices
  1465. Eigenvalues and Eigenvectors
  1466. Series and Sequence Extrapolation
  1467. Special Functions
  1468. Displaying results
  1469. Lots of other stuff that we cannot remember.