manual_gram.cpp 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274
  1. /* manual_gram.cpp
  2. *
  3. * Copyright (C) 1997-2011,2013-2017 Paul Boersma
  4. *
  5. * This code is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This code is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. * See the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this work. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "ManPagesM.h"
  19. #include "OTGrammar.h"
  20. static void draw_NoCoda_pat (Graphics g) {
  21. autoOTGrammar ot = OTGrammar_create_NoCoda_grammar ();
  22. OTGrammar_drawTableau (ot.get(), g, false, U"pat");
  23. }
  24. static void draw_NoCoda_pa (Graphics g) {
  25. autoOTGrammar ot = OTGrammar_create_NoCoda_grammar ();
  26. OTGrammar_drawTableau (ot.get(), g, false, U"pa");
  27. }
  28. static void draw_NoCoda_reverse (Graphics g) {
  29. autoOTGrammar ot = OTGrammar_create_NoCoda_grammar ();
  30. ot -> index [1] = 2;
  31. ot -> index [2] = 1;
  32. OTGrammar_drawTableau (ot.get(), g, false, U"pat");
  33. }
  34. static void draw_NPA_assimilate_anpa (Graphics g) {
  35. autoOTGrammar ot = OTGrammar_create_NPA_grammar ();
  36. ot -> index [1] = 3;
  37. ot -> index [2] = 1;
  38. ot -> index [3] = 2;
  39. OTGrammar_drawTableau (ot.get(), g, false, U"an+pa");
  40. }
  41. static void draw_NPA_assimilate_atma (Graphics g) {
  42. autoOTGrammar ot = OTGrammar_create_NPA_grammar ();
  43. ot -> index [1] = 3;
  44. ot -> index [2] = 1;
  45. ot -> index [3] = 2;
  46. OTGrammar_drawTableau (ot.get(), g, false, U"at+ma");
  47. }
  48. static void draw_NPA_faithful_anpa (Graphics g) {
  49. autoOTGrammar ot = OTGrammar_create_NPA_grammar ();
  50. ot -> index [1] = 3;
  51. ot -> index [2] = 2;
  52. ot -> index [3] = 1;
  53. OTGrammar_drawTableau (ot.get(), g, false, U"an+pa");
  54. }
  55. static void draw_NPA_faithful_atma (Graphics g) {
  56. autoOTGrammar ot = OTGrammar_create_NPA_grammar ();
  57. ot -> index [1] = 3;
  58. ot -> index [2] = 2;
  59. ot -> index [3] = 1;
  60. OTGrammar_drawTableau (ot.get(), g, false, U"at+ma");
  61. }
  62. static void draw_Wolof_ItI (Graphics g) {
  63. autoOTGrammar ot = OTGrammar_create_tongueRoot_grammar
  64. (kOTGrammar_createTongueRootGrammar_constraintSet::FIVE, kOTGrammar_createTongueRootGrammar_ranking::WOLOF);
  65. OTGrammar_drawTableau (ot.get(), g, false, U"\\ict\\ic");
  66. }
  67. static void draw_Wolof_itE (Graphics g) {
  68. autoOTGrammar ot = OTGrammar_create_tongueRoot_grammar
  69. (kOTGrammar_createTongueRootGrammar_constraintSet::FIVE, kOTGrammar_createTongueRootGrammar_ranking::WOLOF);
  70. OTGrammar_drawTableau (ot.get(), g, false, U"it\\ef");
  71. }
  72. static void draw_Wolof_etE (Graphics g) {
  73. autoOTGrammar ot = OTGrammar_create_tongueRoot_grammar
  74. (kOTGrammar_createTongueRootGrammar_constraintSet::FIVE, kOTGrammar_createTongueRootGrammar_ranking::WOLOF);
  75. OTGrammar_drawTableau (ot.get(), g, false, U"et\\ef");
  76. }
  77. static void draw_Wolof_schwatschwa (Graphics g) {
  78. autoOTGrammar ot = OTGrammar_create_tongueRoot_grammar
  79. (kOTGrammar_createTongueRootGrammar_constraintSet::FIVE, kOTGrammar_createTongueRootGrammar_ranking::WOLOF);
  80. OTGrammar_drawTableau (ot.get(), g, false, U"\\swt\\sw");
  81. }
  82. static void draw_Infant_swtI (Graphics g) {
  83. autoOTGrammar ot = OTGrammar_create_tongueRoot_grammar
  84. (kOTGrammar_createTongueRootGrammar_constraintSet::FIVE, kOTGrammar_createTongueRootGrammar_ranking::INFANT);
  85. ot -> constraints [1]. disharmony = 3;
  86. ot -> constraints [2]. disharmony = 4;
  87. ot -> constraints [3]. disharmony = 2;
  88. ot -> constraints [4]. disharmony = 1;
  89. ot -> constraints [5]. disharmony = 5;
  90. OTGrammar_sort (ot.get());
  91. OTGrammar_drawTableau (ot.get(), g, false, U"\\swt\\ic");
  92. }
  93. void manual_gram_init (ManPages me);
  94. void manual_gram_init (ManPages me) {
  95. MAN_BEGIN (U"constraints", U"ppgb", 20021105)
  96. INTRO (U"In @@Optimality Theory@, the `rules' that an output form has to satisfy. Since there can be many constraints "
  97. "and these constraints can conflict with each other, the constraints are %violable and the highest-ranked constraints "
  98. "have the largest say in determining the optimal output.")
  99. NORMAL (U"See the @@OT learning@ tutorial for many examples.")
  100. MAN_END
  101. MAN_BEGIN (U"Create tongue-root grammar...", U"ppgb", 20141001)
  102. INTRO (U"A command in the @@New menu@ for creating an @OTGrammar object with a tongue-root-harmony grammar.")
  103. NORMAL (U"These OTGrammar grammars only accept inputs of the form V__1_tV__2_, where V__1_ and V__2_ are "
  104. "chosen from the six front vowels i, ɪ, e, ɛ, ə, and a.")
  105. NORMAL (U"The following phonological features are relevant:")
  106. LIST_ITEM (U"\t\tATR\tRTR")
  107. LIST_ITEM (U"\thigh\ti\tɪ")
  108. LIST_ITEM (U"\tmid\te\tɛ")
  109. LIST_ITEM (U"\tlow\tə\ta")
  110. ENTRY (U"Constraints")
  111. NORMAL (U"The resulting OTGrammar will usually contain at least the following five constraints:")
  112. TAG (U"*[rtr / hi]")
  113. DEFINITION (U"\"do not implement [retracted tongue root] if the vowel is high.\"")
  114. TAG (U"*[atr / lo]")
  115. DEFINITION (U"\"do not implement [advanced tongue root] if the vowel is low.\"")
  116. TAG (U"P\\s{ARSE} (rtr)")
  117. DEFINITION (U"\"make an underlying [retracted tongue root] specification surface.\"")
  118. TAG (U"P\\s{ARSE} (atr)")
  119. DEFINITION (U"\"make an underlying [advanced tongue root] specification surface.\"")
  120. TAG (U"*G\\s{ESTURE} (contour)")
  121. DEFINITION (U"\"do not go from advanced to retracted tongue root, nor the other way around, within a word.\"")
  122. NORMAL (U"This set of constraints thus comprises: ")
  123. LIST_ITEM (U"• two %%##grounding conditions#% (@@Archangeli & Pulleyblank (1994)@), "
  124. "which we can see as gestural constraints;")
  125. LIST_ITEM (U"• two %%##faithfulness constraints#%, which favour the similarity between input and output, "
  126. "and can be seen as implementing the principle of maximization of perceptual contrast;")
  127. LIST_ITEM (U"• a %%##harmony constraint#%, which, if crucially ranked higher than at least one faithfulness constraint, "
  128. "forces %%##tongue-root harmony#%.")
  129. NORMAL (U"In addition, there may be the following four constraints:")
  130. TAG (U"*[rtr / mid]")
  131. DEFINITION (U"\"do not implement [retracted tongue root] if the vowel is mid; universally ranked lower "
  132. "than *[rtr / hi].\"")
  133. TAG (U"*[rtr / lo]")
  134. DEFINITION (U"\"do not implement [retracted tongue root] if the vowel is low; universally ranked lower "
  135. "than *[rtr / mid].\"")
  136. TAG (U"*[atr / mid]")
  137. DEFINITION (U"\"do not implement [advanced tongue root] if the vowel is mid; universally ranked lower "
  138. "than *[atr / lo].\"")
  139. TAG (U"*[atr / hi]")
  140. DEFINITION (U"\"do not implement [advanced tongue root] if the vowel is high; universally ranked lower "
  141. "than *[atr / mid].\"")
  142. NORMAL (U"The universal rankings referred to are due to the %%##local-ranking principle#% (@@Boersma (1998)@). "
  143. "A learning algorithm may enforce this principle, e.g., if *[rtr / hi] falls down the ranking scale, "
  144. "*[rtr / mid] may be pushed along.")
  145. NORMAL (U"For information on learning these tongue-root grammars, see @@OT learning@ "
  146. "and @@Boersma (2000)@.")
  147. MAN_END
  148. MAN_BEGIN (U"Optimality Theory", U"ppgb", 20021105)
  149. INTRO (U"A framework for transferring one linguistic representation into another, "
  150. "e.g. transferring an underlying form into a surface form. Before @@Prince & Smolensky (1993)@, "
  151. "phonologists tended to this with a sequentially ordered set of rules, each of which transferred a representation "
  152. "into another. With @OT (that's the abbreviation), there are no intermediate steps in the derivation, but a set of ranked "
  153. "@constraints chooses the optimal output form from a set of candidates.")
  154. NORMAL (U"In Praat, you can draw Optimality-Theoretic tableaus and simulate Optimality-Theoretic learning. "
  155. "See the @@OT learning@ tutorial.")
  156. MAN_END
  157. MAN_BEGIN (U"OT", U"ppgb", 20021105)
  158. INTRO (U"An abbreviation for @@Optimality Theory@.")
  159. MAN_END
  160. MAN_BEGIN (U"OT learning", U"ppgb", 20070423)
  161. INTRO (U"This tutorial describes how you can draw Optimality-Theoretic and Harmonic-Grammar tableaus and "
  162. "simulate Optimality-Theoretic and Harmonic-Grammar learning with Praat.")
  163. NORMAL (U"You can read this tutorial sequentially with the help of the \"< 1\" and \"1 >\" buttons.")
  164. LIST_ITEM (U"1. @@OT learning 1. Kinds of grammars|Kinds of grammars@ (ordinal and stochastic, @OTGrammar)")
  165. LIST_ITEM (U"2. @@OT learning 2. The grammar|The grammar@")
  166. LIST_ITEM1 (U"2.1. @@OT learning 2.1. Viewing a grammar|Viewing a grammar@ (N\\s{O}C\\s{ODA} example, @OTGrammarEditor)")
  167. LIST_ITEM1 (U"2.2. @@OT learning 2.2. Inside the grammar|Inside the grammar@ (saving, inspecting)")
  168. LIST_ITEM1 (U"2.3. @@OT learning 2.3. Defining your own grammar|Defining your own grammar@")
  169. LIST_ITEM1 (U"2.4. @@OT learning 2.4. Evaluation|Evaluation@ (noise)")
  170. LIST_ITEM1 (U"2.5. @@OT learning 2.5. Editing a grammar|Editing a grammar@")
  171. LIST_ITEM1 (U"2.6. @@OT learning 2.6. Variable output|Variable output@ (place assimilation example)")
  172. LIST_ITEM1 (U"2.7. @@OT learning 2.7. Tableau pictures|Tableau pictures@ (printing, EPS)")
  173. LIST_ITEM1 (U"2.8. @@OT learning 2.8. Asking for one output|Asking for one output@")
  174. LIST_ITEM1 (U"2.9. @@OT learning 2.9. Output distributions|Output distributions@")
  175. LIST_ITEM (U"3. @@OT learning 3. Generating language data|Generating language data@")
  176. LIST_ITEM1 (U"3.1. @@OT learning 3.1. Data from a pair distribution|Data from a pair distribution@")
  177. LIST_ITEM1 (U"3.2. @@OT learning 3.2. Data from another grammar|Data from another grammar@ (tongue-root-harmony example)")
  178. LIST_ITEM (U"4. @@OT learning 4. Learning an ordinal grammar|Learning an ordinal grammar@")
  179. LIST_ITEM (U"5. @@OT learning 5. Learning a stochastic grammar|Learning a stochastic grammar@")
  180. LIST_ITEM (U"6. @@OT learning 6. Shortcut to grammar learning|Shortcut to grammar learning@")
  181. LIST_ITEM (U"7. @@OT learning 7. Learning from overt forms|Learning from overt forms@")
  182. MAN_END
  183. MAN_BEGIN (U"OT learning 1. Kinds of grammars", U"ppgb", 20100330)
  184. INTRO (U"This is chapter 1 of the @@OT learning@ tutorial.")
  185. NORMAL (U"According to @@Prince & Smolensky (1993)@, an @@Optimality Theory|Optimality-Theoretic@ (@OT) grammar "
  186. "consists of a number of ranked @constraints. "
  187. "For every possible input (usually an underlying form), GEN (the generator) generates a (possibly very large) number of "
  188. "%%output candidates%, and the ranking order of the constraints determines the winning candidate, "
  189. "which becomes the single optimal output.")
  190. NORMAL (U"According to @@Prince & Smolensky (1993)@ and @@Smolensky & Legendre (2006)@, a Harmonic Grammar (HG) "
  191. "consists of a number of weighted @constraints. "
  192. "The winning candidate, which becomes the single optimal output, is the one with the greatest %harmony, which "
  193. "is a measure of goodness determined by the weights of the constraints violated by each candidate.")
  194. NORMAL (U"In OT, ranking is %strict, i.e., if a constraint %A is ranked higher than the constraints %B, %C, and %D, "
  195. "a candidate that violates only constraint %A will always be beaten by any candidate that respects %A "
  196. "(and any higher constraints), even if it violates %B, %C, and %D.")
  197. NORMAL (U"In HG, weighting is %additive, i.e., a candidate that only violates a constraint %A with a weight of 100 "
  198. "has a harmony of -100 and will therefore beat a candidate that violates both a constraint %B with a weight of 70 "
  199. "and a constraint %C with a weight of 40 and therefore has a harmony of only -110. Also, two violations of constraint %B "
  200. "(harmony 2 * -70 = -140) are worse than one violation of constraint %A (harmony -100).")
  201. ENTRY (U"1. Ordinal OT grammars")
  202. NORMAL (U"Because only the ranking order of the constraints plays a role in evaluating the output candidates, "
  203. "Prince & Smolensky took an OT grammar to contain no absolute ranking values, i.e., they accepted only an ordinal relation "
  204. "between the constraint rankings. For such a grammar, @@Tesar & Smolensky (1998)@ devised an on-line learning algorithm "
  205. "(Error-Driven Constraint Demotion, EDCD) that changes the ranking order "
  206. "whenever the form produced by the learner is different from the adult form "
  207. "(a corrected version of the algorithm can be found in @@Boersma (2009b)@). Such a learning step "
  208. "can sometimes lead to a large change in the behaviour of the grammar.")
  209. ENTRY (U"2. Stochastic OT grammars")
  210. NORMAL (U"The EDCD algorithm is fast and convergent. As a model of language acquisition, however, its drawbacks are that it "
  211. "is extremely sensitive to errors in the learning data and that it does not show realistic gradual learning curves. "
  212. "For these reasons, @@Boersma (1997)@ "
  213. "proposed stochastic OT grammars in which every constraint has a %%ranking value% along a continuous ranking scale, "
  214. "and a small amount of %noise is added to this ranking value at evaluation time. "
  215. "The associated error-driven on-line learning algorithm (Gradual Learning Algorithm, GLA) effects small changes in the "
  216. "ranking values of the constraints with every learning step. An added virtue of the GLA is that it can learn "
  217. "languages with optionality and variation, which was something that EDCD could not do. "
  218. "For how this algorithm works on some traditional phonological problems, see @@Boersma & Hayes (2001)@.")
  219. NORMAL (U"Ordinal OT grammars can be seen as a special case of the more general stochastic OT grammars: "
  220. "they have integer ranking values (%strata) and zero evaluation noise. "
  221. "In Praat, therefore, every constraint is taken to have a ranking value, "
  222. "so that you can do stochastic as well as ordinal OT.")
  223. ENTRY (U"3. Categorical Harmonic Grammars")
  224. NORMAL (U"@@Jäger (2003)@ and @@Soderstrom, Mathis & Smolensky (2006)@ devised an on-line learning algorithm "
  225. "for Harmonic Grammars (stochastic gradient ascent). As proven by @@Fischer (2005)@, "
  226. "this algorithm is guaranteed to converge upon a correct grammar, if there exists one that handles the data.")
  227. ENTRY (U"4. Stochastic Harmonic Grammars")
  228. NORMAL (U"There are two kinds of stochastic models of HG, namely MaxEnt (= Maximum Entropy) grammars "
  229. "(@@Smolensky (1986)@, @@Jäger (2003)@), in which the probablity of a candidate winning depends on its harmony, "
  230. "and Noisy HG (@@Boersma & Escudero (2008)@, @@Boersma & Pater (2008)@), in which noise is added to constraint weights "
  231. "at evaluation time, as in Stochastic OT.")
  232. NORMAL (U"The algorithm by @@Jäger (2003)@ and @@Soderstrom, Mathis & Smolensky (2006)@ "
  233. "can learn languages with optionality and variation (@@Boersma & Pater (2008)@).")
  234. ENTRY (U"The OTGrammar object")
  235. NORMAL (U"An OT grammar is implemented as an @OTGrammar object. "
  236. "In an OTGrammar object, you specify all the constraints, all the possible inputs and all their possible outputs.")
  237. MAN_END
  238. MAN_BEGIN (U"OT learning 2. The grammar", U"ppgb", 20000122)
  239. INTRO (U"This is chapter 2 of the @@OT learning@ tutorial.")
  240. NORMAL (U"We can ask the grammar to produce an output form for any input form that is in its list of tableaus.")
  241. LIST_ITEM (U"2.1. @@OT learning 2.1. Viewing a grammar|Viewing a grammar@ (N\\s{O}C\\s{ODA} example, @OTGrammarEditor)")
  242. LIST_ITEM (U"2.2. @@OT learning 2.2. Inside the grammar|Inside the grammar@ (saving, inspecting)")
  243. LIST_ITEM (U"2.3. @@OT learning 2.3. Defining your own grammar|Defining your own grammar@")
  244. LIST_ITEM (U"2.4. @@OT learning 2.4. Evaluation|Evaluation@ (noise)")
  245. LIST_ITEM (U"2.5. @@OT learning 2.5. Editing a grammar|Editing a grammar@")
  246. LIST_ITEM (U"2.6. @@OT learning 2.6. Variable output|Variable output@ (place assimilation example)")
  247. LIST_ITEM (U"2.7. @@OT learning 2.7. Tableau pictures|Tableau pictures@ (printing, EPS files)")
  248. LIST_ITEM (U"2.8. @@OT learning 2.8. Asking for one output|Asking for one output@")
  249. LIST_ITEM (U"2.9. @@OT learning 2.9. Output distributions|Output distributions@")
  250. MAN_END
  251. MAN_BEGIN (U"OT learning 2.1. Viewing a grammar", U"ppgb", 20070725)
  252. NORMAL (U"Consider a language where the underlying form /pat/ leads to the surface form [pa], "
  253. "presumably because the structural constraint N\\s{O}C\\s{ODA} outranks the faithfulness constraint P\\s{ARSE}.")
  254. NORMAL (U"To create such a grammar in Praat, choose ##Create NoCoda grammar# from the Optimality Theory submenu of the @@New menu@. "
  255. "An @OTGrammar object will then appear in the list of objects. "
  256. "If you click ##View & Edit#, an @OTGrammarEditor will show up, containing:")
  257. LIST_ITEM (U"1. the constraint list, sorted by %#disharmony (= ranking value + noise):")
  258. LIST_ITEM1 (U" ")
  259. LIST_ITEM1 (U"\t\t %%ranking value%\t %disharmony\t %plasticity")
  260. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 100.000\t 100.000\t 1.000")
  261. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 90.000\t 90.000\t 1.000")
  262. LIST_ITEM1 (U" ")
  263. LIST_ITEM (U"2. the tableaus for the two possible inputs /pat/ and /pa/:")
  264. PICTURE (3.0, 1.0, draw_NoCoda_pat)
  265. PICTURE (3.0, 0.7, draw_NoCoda_pa)
  266. NORMAL (U"From the first tableau, we see that the underlying form /pat/ will surface as [pa], "
  267. "because the alternative [pat] violates a constraint (namely, N\\s{O}C\\s{ODA}) with a higher disharmony than does [pa], "
  268. "which only violates P\\s{ARSE}, which has a lower disharmony.")
  269. NORMAL (U"Note the standard OT tableau layout: asterisks (*) showing violations, exclamation marks (!) showing crucial violations, "
  270. "greying of cells that do not contribute to determining the winning candidate, and a finger (☞) pointing to the winner "
  271. "(this may look like a plus sign (+) if you don't have the Zapf Dingbats font installed on your computer or printer). "
  272. "An HG tableau contains asterisks and a pointing finger, but no exclamation marks or grey cells.")
  273. NORMAL (U"The second tableau shows that /pa/ always surfaces as [pa], which is no wonder since this is "
  274. "the only candidate. All cells are grey because none of them contributes to the determination of the winner.")
  275. MAN_END
  276. MAN_BEGIN (U"OT learning 2.2. Inside the grammar", U"ppgb", 20110129)
  277. NORMAL (U"You can save an @OTGrammar grammar as a text file by choosing @@Save as text file...@ from the #Save menu "
  278. "of the Objects window. For the N\\s{O}C\\s{ODA} example, the contents of the file will look like:")
  279. CODE (U"File type = \"ooTextFile\"")
  280. CODE (U"Object class = \"OTGrammar 2\"")
  281. CODE (U"decisionStrategy = <OptimalityTheory>")
  282. CODE (U"leak = 0")
  283. CODE (U"2 constraints")
  284. CODE (U"constraint [1]: \"N\\bss{O}C\\bss{ODA}\" 100 100 1 ! NOCODA")
  285. CODE (U"constraint [2]: \"P\\bss{ARSE}\" 90 90 1 ! PARSE")
  286. CODE (U" ")
  287. CODE (U"0 fixed rankings")
  288. CODE (U" ")
  289. CODE (U"2 tableaus")
  290. CODE (U"input [1]: \"pat\" 2")
  291. CODE1 (U"candidate [1]: \"pa\" 0 1")
  292. CODE1 (U"candidate [2]: \"pat\" 1 0")
  293. CODE (U"input [2]: \"pa\" 1")
  294. CODE1 (U"candidate [1]: \"pa\" 0 0")
  295. NORMAL (U"To understand more about this data structure, consult the @OTGrammar class description "
  296. "or click #Inspect after selecting the OTGrammar object. The $$\"\\bss{...}\"$ braces ensure that "
  297. "the constraint names show up with their traditional small capitals (see @@Text styles@).")
  298. NORMAL (U"You can read this text file into Praat again with @@Read from file...@ from the #Open menu in the Objects window.")
  299. MAN_END
  300. MAN_BEGIN (U"OT learning 2.3. Defining your own grammar", U"ppgb", 20110129)
  301. NORMAL (U"By editing a text file created from an example in the @@New menu@, you can define your own OT grammars.")
  302. NORMAL (U"As explained at @@Save as text file...@, Praat is quite resilient about its text file formats. "
  303. "As long as the strings and numbers appear in the correct order, you can redistribute the data "
  304. "across the lines, add all kinds of comments, or leave the comments out. "
  305. "For the N\\s{O}C\\s{ODA} example, the text file could also have looked like:")
  306. CODE (U"\"ooTextFile\"")
  307. CODE (U"\"OTGrammar 2\"")
  308. CODE (U"<OptimalityTheory>")
  309. CODE (U"0.0 ! leak")
  310. CODE (U"2 ! number of constraints")
  311. CODE (U"\"N\\bss{O}C\\bss{ODA}\" 100 100 1")
  312. CODE (U"\"P\\bss{ARSE}\" 90 90 1")
  313. CODE (U"0 ! number of fixed rankings")
  314. CODE (U"2 ! number of accepted inputs")
  315. CODE (U"\"pat\" 2 ! input form with number of output candidates")
  316. CODE1 (U"\"pa\" 0 1 ! first candidate with violations")
  317. CODE1 (U"\"pat\" 1 0 ! second candidate with violations")
  318. CODE (U"\"pa\" 1 ! input form with number of candidates")
  319. CODE1 (U"\"pa\" 0 0")
  320. NORMAL (U"To define your own grammar, you just provide a number of constraints and their rankings, "
  321. "and all the possible input forms with all their output candidates, and all the constraint violations "
  322. "for each candidate. The order in which you specify the constraints is free (you don't have to specify "
  323. "the highest-ranked first), as long as the violations are in the same order; you could also have reversed "
  324. "the order of the two input forms, as long as the corresponding candidates follow them; "
  325. "and, you could also have reversed the order of the candidates within the /pat/ tableau, "
  326. "as long as the violations follow the output forms. Thus, you could just as well have written:")
  327. CODE (U"\"ooTextFile\"")
  328. CODE (U"\"OTGrammar 2\"")
  329. CODE (U"<OptimalityTheory> 0.0")
  330. CODE (U"2")
  331. CODE (U"\"P\\bss{ARSE}\" 90 90 1.0")
  332. CODE (U"\"N\\bss{O}C\\bss{ODA}\" 100 100 1.0")
  333. CODE (U"0")
  334. CODE (U"2")
  335. CODE (U"\"pa\" 1")
  336. CODE1 (U"\"pa\" 0 0")
  337. CODE (U"\"pat\" 2")
  338. CODE1 (U"\"pat\" 0 1")
  339. CODE1 (U"\"pa\" 1 0")
  340. NORMAL (U"The $$<OptimalityTheory>$ thing in the above refers to the %%decision strategy%. "
  341. "In this tutorial I mostly assume OT's strict ranking, "
  342. "but you can experiment with Smolensky's $$<HarmonicGrammar>$ (where the constraint disharmonies represent addable, "
  343. "possibly negative weights), or with Frank Keller's $$<LinearOT>$ (like Harmonic Grammar, but with the restriction "
  344. "that negative disharmonies do not count), or with $$<PositiveHG>$ (like Harmonic Grammar, but with the restriction "
  345. "that disharmonies below 1.0 have weight 1.0), or with $$<ExponentialHG>$ (where the weights are exp(disharmony), somewhere "
  346. "between Harmonic Grammar and Linear OT), or with a $$<MaximumEntropy>$ grammar "
  347. "(where the probability that a candidate is chosen is proportional to exp(-disharmony)).")
  348. NORMAL (U"The $$leak$ thing in the above refers to the amount to which constraint weights (especially in Harmonic Grammar) "
  349. "can leak while learning.")
  350. MAN_END
  351. MAN_BEGIN (U"OT learning 2.4. Evaluation", U"ppgb", 20070725)
  352. NORMAL (U"In an Optimality-Theoretic model of grammar, %#evaluation refers to the determination "
  353. "of the winning candidate on the basis of the constraint ranking.")
  354. NORMAL (U"In an ordinal OT model of grammar, repeated evaluations will yield the same winner again and again. "
  355. "We can simulate this behaviour with our N\\s{O}C\\s{ODA} example. "
  356. "In the editor, you can choose ##Evaluate (zero noise)# or use its keyboard shortcut Command-0 (= Command-zero). "
  357. "Repeated evaluations (keep Command-0 pressed) will always yield the following grammar:")
  358. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  359. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 100.000\t 100.000\t 1.000")
  360. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 90.000\t 90.000\t 1.000")
  361. NORMAL (U"In a stochastic OT model of grammar, repeated evaluations will yield different disharmonies each time. "
  362. "To see this, choose ##Evaluate (noise 2.0)# or use its keyboard shortcut Command-2. "
  363. "Repeated evaluations will yield grammars like the following:")
  364. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  365. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 100.000\t 100.427\t 1.000")
  366. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 90.000\t 87.502\t 1.000")
  367. NORMAL (U"and")
  368. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  369. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 100.000\t 101.041\t 1.000")
  370. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 90.000\t 90.930\t 1.000")
  371. NORMAL (U"and")
  372. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  373. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 100.000\t 96.398\t 1.000")
  374. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 90.000\t 89.482\t 1.000")
  375. NORMAL (U"The disharmonies vary around the ranking values, "
  376. "according to a Gaussian distribution with a standard deviation of 2.0. "
  377. "The winner will still be [pa] in almost all cases, because the probability of bridging "
  378. "the gap between the two ranking values is very low, namely 0.02 per cent according "
  379. "to @@Boersma (1998)@, page 332.")
  380. NORMAL (U"With a noise much higher than 2.0, the chances of P\\s{ARSE} outranking N\\s{O}C\\s{ODA} will rise. "
  381. "To see this, choose ##Evaluate...# and supply 5.0 for the noise. Typical outcomes are:")
  382. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  383. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 100.000\t 92.634\t 1.000")
  384. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 90.000\t 86.931\t 1.000")
  385. NORMAL (U"and")
  386. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  387. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 100.000\t 101.162\t 1.000")
  388. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 90.000\t 85.311\t 1.000")
  389. NORMAL (U"and")
  390. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  391. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 90.000\t 99.778\t 1.000")
  392. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 100.000\t 98.711\t 1.000")
  393. NORMAL (U"In the last case, the order of the constraints has been reversed. "
  394. "You will see that [pat] has become the winning candidate:")
  395. PICTURE (3.0, 1.0, draw_NoCoda_reverse)
  396. NORMAL (U"However, in the remaining part of this tutorial, we will stick with a noise "
  397. "with a standard deviation of 2.0. This specific number ensures that we can "
  398. "model fairly rigid rankings by giving the constraints a ranking difference of 10, a nice round number. "
  399. "Also, the learning algorithm will separate many constraints in such a way that "
  400. "the differences between their ranking values are in the vicinity of 10.")
  401. MAN_END
  402. MAN_BEGIN (U"OT learning 2.5. Editing a grammar", U"ppgb", 20161028)
  403. NORMAL (U"In the N\\s{O}C\\s{ODA} example, the winning candidate for the input /pat/ was always [pa].")
  404. NORMAL (U"To make [pat] the winner instead, N\\s{O}C\\s{ODA} should be ranked lower than P\\s{ARSE}. "
  405. "To achieve this even with zero noise, "
  406. "go to the OTGrammar window and select the N\\s{O}C\\s{ODA} constraint by clicking on it "
  407. "(a spade symbol ♠︎ will mark the selected constraint), "
  408. "and choose ##Edit ranking...# from the #Edit menu, or use the keyboard shortcut Command-E.")
  409. NORMAL (U"In the resulting command window, we lower the ranking of the constraint from 100 to 80, and click OK. "
  410. "This is what you will then see in the OTGrammar window:")
  411. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  412. LIST_ITEM1 (U"\t♠︎ ##N\\s{O}C\\s{ODA}#\t 80.000\t 103.429\t 1.000")
  413. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 90.000\t 88.083\t 1.000")
  414. PICTURE (3.0, 1.0, draw_NoCoda_pat)
  415. NORMAL (U"Nothing has happened to the tableau, because the disharmonies still have their old values. So choose "
  416. "##Evaluate (noise 2.0)# (Command-2) or ##Evaluate (zero noise)# (Command-0). The new disharmonies "
  417. "will centre around the new ranking values, and we see that [pat] becomes the new winner:")
  418. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  419. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 90.000\t 90.743\t 1.000")
  420. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 80.000\t 81.581\t 1.000")
  421. PICTURE (3.0, 1.0, draw_NoCoda_reverse)
  422. MAN_END
  423. MAN_BEGIN (U"OT learning 2.6. Variable output", U"ppgb", 20070725)
  424. NORMAL (U"Each time you press Command-2, which invokes the command ##Evaluate (noise 2.0)# from the #Edit menu, "
  425. "you will see the disharmonies changing. If the distance between the constraint rankings is 10, however, "
  426. "the winning candidates will very probably stay the same.")
  427. NORMAL (U"So starting from the N\\s{O}C\\s{ODA} example, we edit the rankings of the constraints again, "
  428. "setting the ranking value of P\\s{ARSE} to 88 and that of N\\s{O}C\\s{ODA} to 85. If we now press Command-2 "
  429. "repeatedly, we will get [pat] in most of the cases, "
  430. "but we will see the finger pointing at [pa] in 14 percent of the cases:")
  431. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  432. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 88.000\t 87.421\t 1.000")
  433. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 85.000\t 85.585\t 1.000")
  434. PICTURE (3.0, 1.0, draw_NoCoda_reverse)
  435. NORMAL (U"but")
  436. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  437. LIST_ITEM1 (U"\t##N\\s{O}C\\s{ODA}#\t 85.000\t 87.128\t 1.000")
  438. LIST_ITEM1 (U"\t##P\\s{ARSE}#\t 88.000\t 85.076\t 1.000")
  439. PICTURE (3.0, 1.0, draw_NoCoda_pat)
  440. NORMAL (U"As a more functionally oriented example, we consider nasal place assimilation. "
  441. "Suppose that the underlying sequence /an+pa/ surfaces as the assimilated [ampa] "
  442. "in 80 percent of the cases, and as the faithful [anpa] in the remaining 20 percent, "
  443. "while the non-nasal stop /t/ never assimilates. "
  444. "This can be achieved by having the articulatory constraint *G\\s{ESTURE} "
  445. "ranked at a short distance above *R\\s{EPLACE} (n, m):")
  446. CODE (U"\"ooTextFile\"")
  447. CODE (U"\"OTGrammar 2\"")
  448. CODE (U"decisionStrategy = <OptimalityTheory>")
  449. CODE (U"leak = 0.0")
  450. CODE (U"3 constraints")
  451. CODE (U"\"*G\\bss{ESTURE}\" 102.7 0 1")
  452. CODE (U"\"*R\\bss{EPLACE} (n, m)\" 100.0 0 1")
  453. CODE (U"\"*R\\bss{EPLACE} (t, p)\" 112.0 0 1")
  454. CODE (U"0 fixed rankings")
  455. CODE (U"2 tableaus")
  456. CODE (U"\"an+pa\" 2")
  457. CODE1 (U"\"anpa\" 1 0 0")
  458. CODE1 (U"\"ampa\" 0 1 0")
  459. CODE (U"\"at+ma\" 2")
  460. CODE1 (U"\"atma\" 1 0 0")
  461. CODE1 (U"\"apma\" 0 0 1")
  462. NORMAL (U"You can create this grammar with ##Create place assimilation grammar# from the @@New menu@. "
  463. "In the editor, it will often look like follows:")
  464. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  465. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (t, p)#\t 112.000\t 109.806\t 1.000")
  466. LIST_ITEM1 (U"\t##*G\\s{ESTURE}#\t 102.700\t 102.742\t 1.000")
  467. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (n, m)#\t 100.000\t 101.044\t 1.000")
  468. PICTURE (4.0, 1.0, draw_NPA_assimilate_anpa)
  469. PICTURE (4.0, 1.0, draw_NPA_assimilate_atma)
  470. NORMAL (U"If you keep the Command-2 keys pressed, however, you will see that the tableaus change "
  471. "into something like the following in approximately 20 percent of the cases:")
  472. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  473. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (t, p)#\t 112.000\t 113.395\t 1.000")
  474. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (n, m)#\t 100.000\t 103.324\t 1.000")
  475. LIST_ITEM1 (U"\t##*G\\s{ESTURE}#\t 102.700\t 101.722\t 1.000")
  476. PICTURE (4.0, 1.0, draw_NPA_faithful_anpa)
  477. PICTURE (4.0, 1.0, draw_NPA_faithful_atma)
  478. NORMAL (U"We see that /at+ma/ always surfaces at [atma], because *R\\s{EPLACE} (t, p) is ranked much higher "
  479. "than the other two, and that the output of /an+pa/ is variable because of the close rankings "
  480. "of *G\\s{ESTURE} and *R\\s{EPLACE} (n, m).")
  481. NORMAL (U"If you try this with a Harmonic Grammar or in Linear OT, you will see the same kind of variation. "
  482. "Although in HG, e.g. in @@Smolensky & Legendre (2006)@, the variation is usually obtained at the candidate level, "
  483. "namely by giving each candidate a probability proportional to exp(%harmony/%temperature), "
  484. "in our version of HG the variation comes about at the constraint level, "
  485. "namely by the noise that is temporarily added to the ranking of each constraint at evaluation time.")
  486. MAN_END
  487. MAN_BEGIN (U"OT learning 2.7. Tableau pictures", U"ppgb", 20110129)
  488. NORMAL (U"To show a tableau in the @@Picture window@ instead of in the editor, "
  489. "you select an @OTGrammar object and click ##Draw tableau...#. "
  490. "After you specify the input form, a tableau is drawn with the current font and size "
  491. "at the location of the current selection (%viewport) in the Picture window. The top left corner of the tableau "
  492. "is aligned with the top left corner of the selection. You can draw more than one object into the Picture "
  493. "window, whose menus also allow you to add a lot of graphics of your own design.")
  494. NORMAL (U"Besides printing the entire picture (with @@Print...@), you can save a part of it to an EPS file "
  495. "for inclusion into your favourite word processor (with @@Save as EPS file...@). "
  496. "For the latter to succeed, make sure that the selection includes at least your tableau; "
  497. "otherwise, some part of your tableau may end up truncated.")
  498. MAN_END
  499. MAN_BEGIN (U"OT learning 2.8. Asking for one output", U"ppgb", 20110808)
  500. NORMAL (U"To ask the grammar to produce a single output for a specified input form, "
  501. "you can choose @@OTGrammar: Input to output...@. The command window will ask you to provide "
  502. "an input form and the strength of the noise (the standard value is 2.0 again). "
  503. "This will perform an evaluation and write the result into the Info window.")
  504. NORMAL (U"If you are viewing the grammar in the @OTGrammarEditor, you will see the disharmonies change, "
  505. "and if the grammar allows variation, you will see that the winner in the tableau in the editor "
  506. "varies with the winner shown in the Info window.")
  507. NORMAL (U"Since the editor shows more information than the Info window, "
  508. "this command is not very useful except for purposes of scripting. "
  509. "See the following page for some related but more useful commands.")
  510. MAN_END
  511. MAN_BEGIN (U"OT learning 2.9. Output distributions", U"ppgb", 20110808)
  512. NORMAL (U"To ask the grammar to produce %many outputs for a specified input form, "
  513. "and collect them in a @Strings object, "
  514. "you select an @OTGrammar and choose @@OTGrammar: Input to outputs...|Input to outputs...@.")
  515. NORMAL (U"For example, select the object \"OTGrammar assimilation\" from our place assimilation example "
  516. "(@@OT learning 2.6. Variable output|§2.6@), and click ##Input to outputs...#. "
  517. "In the resulting command window, you specify 1000 trials, a noise strength of 2.0, and \"an+pa\" for the input form.")
  518. NORMAL (U"After you click OK, a @Strings object will appear in the list. "
  519. "If you click Info, you will see that it contains 1000 strings. "
  520. "If you click Inspect, you will see that most of the strings are \"ampa\", "
  521. "but some of them are \"anpa\". These are the output forms computed from 1000 evaluations "
  522. "for the input /an+pa/.")
  523. NORMAL (U"To count how many instances of [ampa] and [anpa] were generated, you select the @Strings object "
  524. "and click @@Strings: To Distributions|To Distributions@. You will see a new @Distributions object appear in the list. "
  525. "If you draw this to the Picture window (with ##Draw as numbers...#), you will see something like:")
  526. LIST_ITEM (U"\tampa\t815")
  527. LIST_ITEM (U"\tanpa\t185")
  528. NORMAL (U"which means that our grammar, when fed with 1000 /an+pa/ inputs, produced [ampa] 815 times, "
  529. "and [anpa] 185 times, which is consistent with our initial guess that a ranking difference of 2.7 "
  530. "would cause approximately an 80\\% - 20\\% distribution of [ampa] and [anpa].")
  531. ENTRY (U"Checking the distribution hypothesis")
  532. NORMAL (U"To see whether the guess of a 2.7 ranking difference is correct, we perform 1,000,000 trials instead of 1000. "
  533. "The output distribution (if you have enough memory in your computer) becomes something like "
  534. "(set the %Precision to 7 in the #Draw command window):")
  535. LIST_ITEM (U"\tampa\t830080")
  536. LIST_ITEM (U"\tanpa\t169920")
  537. NORMAL (U"The expected values under the 80\\% - 20\\% distribution hypothesis are:")
  538. LIST_ITEM (U"\tampa\t800000")
  539. LIST_ITEM (U"\tanpa\t200000")
  540. NORMAL (U"We compute (e.g. with @@Calculator...@) a %χ^2 of 30080^2/800000 + 30080^2/200000 = 5655.04, "
  541. "which, of course, is much too high for a distribution with a single degree of freedom. "
  542. "So the ranking difference must be smaller. If it is 2.4 (change the ranking of *G\\s{ESTURE} to 102.4), "
  543. "the numbers become something like")
  544. LIST_ITEM (U"\tampa\t801974")
  545. LIST_ITEM (U"\tanpa\t198026")
  546. NORMAL (U"which gives a %χ^2 of 24.35. By using the Calculator with the formula $$chiSquareQ (24.35, 1)$, "
  547. "we find that values larger than this have a probability of 8·10^^-7^ "
  548. "under the 80\\% - 20\\% distribution hypothesis, which must therefore be rejected again.")
  549. NORMAL (U"Rather than continuing this iterative procedure to find the correct ranking values for an "
  550. "80\\% - 20\\% grammar, we will use the Gradual Learning Algorithm "
  551. "(@@OT learning 5. Learning a stochastic grammar|§5@) to determine the rankings automatically, "
  552. "without any memory of past events other than the memory associated with maintaining the ranking values.")
  553. ENTRY (U"Measuring all inputs")
  554. NORMAL (U"To measure the outcomes of all the possible inputs at once, you select an @OTGrammar "
  555. "and choose @@OTGrammar: To output Distributions...|To output Distributions...@. "
  556. "As an example, try this on our place assimilation grammar. You can supply 1000000 for the number of trials, "
  557. "and the usual 2.0 for the standard deviation of the noise. "
  558. "After you click OK, a @Distributions object will appear in the list. "
  559. "If you draw this to the Picture window, the result will look like:")
  560. LIST_ITEM (U"\t/an+pa/ \\-> anpa\t169855")
  561. LIST_ITEM (U"\t/an+pa/ \\-> ampa\t830145")
  562. LIST_ITEM (U"\t/at+ma/ \\-> atma\t999492")
  563. LIST_ITEM (U"\t/at+ma/ \\-> apma\t508")
  564. NORMAL (U"We see that the number of [apma] outputs is not zero. This is due to the difference of 9.3 "
  565. "between the rankings of *R\\s{EPLACE} (t, p) and *G\\s{ESTURE}. If you rank "
  566. "*R\\s{EPLACE} (t, p) at 116.0, the number of produced [apma] reduces to about one in a million, "
  567. "as you can easily check with some patience.")
  568. MAN_END
  569. MAN_BEGIN (U"OT learning 3. Generating language data", U"ppgb", 20021204)
  570. NORMAL (U"A learner needs two things: a grammar that she can adjust (@@OT learning 2. The grammar|\\SS2@), and language data.")
  571. LIST_ITEM (U"3.1. @@OT learning 3.1. Data from a pair distribution|Data from a pair distribution@")
  572. LIST_ITEM (U"3.2. @@OT learning 3.2. Data from another grammar|Data from another grammar@ (tongue-root-harmony example)")
  573. MAN_END
  574. MAN_BEGIN (U"OT learning 3.1. Data from a pair distribution", U"ppgb", 20110131)
  575. NORMAL (U"If the grammar contains faithfulness constraints, the learner needs pairs of "
  576. "underlying and adult surface forms. For our place assimilation example, she needs a lot of "
  577. "/at+ma/ - [atma] pairs, and four times as many /an+pa/ - [ampa] pairs as /an+pa/ - [anpa] pairs. "
  578. "We can specify this language-data distribution in a @PairDistribution object, "
  579. "which we could simply save as a text file:")
  580. CODE (U"\"ooTextFile\"")
  581. CODE (U"\"PairDistribution\"")
  582. CODE (U"4 pairs")
  583. CODE (U"\"at+ma\" \"atma\" 100")
  584. CODE (U"\"at+ma\" \"apma\" 0")
  585. CODE (U"\"an+pa\" \"anpa\" 20")
  586. CODE (U"\"an+pa\" \"ampa\" 80")
  587. NORMAL (U"The values appear to represent percentages, but they could also have been 1.0, 0.0, 0.2, and 0.8, "
  588. "or any other values with the same proportions. We could also have left out the second pair "
  589. "and specified \"3 pairs\" instead of \"4 pairs\" in the third line.")
  590. NORMAL (U"We can create this pair distribution with ##Create place assimilation distribution# from the "
  591. "Optimality Theory submenu of the @@New menu@ in the Objects window. To see that it really contains "
  592. "the above data, you can draw it to the Picture window. To change the values, use Inspect "
  593. "(in which case you should remember to click Change after any change).")
  594. NORMAL (U"To generate input-output pairs from the above distribution, select the @PairDistribution and click "
  595. "@@PairDistribution: To Stringses...|To Stringses...@. "
  596. "If you then just click OK, there will appear two @Strings objects in the list, called \"input\" "
  597. "(underlying forms) and \"output\" (surface forms). Both contain 1000 strings. If you Inspect them both, "
  598. "you can see that e.g. the 377th string in \"input\" corresponds to the 377th string in \"output\", "
  599. "i.e., the two series of strings are aligned. See also the example at @@PairDistribution: To Stringses...@.")
  600. NORMAL (U"These two Strings objects are sufficient to help an @OTGrammar grammar to change its constraint rankings "
  601. "in such a way that the output distributions generated by the grammar match the output distributions "
  602. "in the language data. See @@OT learning 5. Learning a stochastic grammar|§5@.")
  603. MAN_END
  604. MAN_BEGIN (U"OT learning 3.2. Data from another grammar", U"ppgb", 20110128)
  605. NORMAL (U"Instead of generating input-output pairs directly from a @PairDistribution object, "
  606. "you can also generate input forms and their winning outputs from an @OTGrammar grammar. Of course, "
  607. "that's what the language data presented to real children comes from. Our example will be "
  608. "a tongue-root harmony grammar.")
  609. NORMAL (U"Choose @@Create tongue-root grammar...@ from the Optimality Theory submenu of the @@New menu@. "
  610. "Set %%Constraint set% to \"Five\", and %Ranking to \"Wolof\". Click OK. An object called "
  611. "\"OTGrammar Wolof\" will appear in the list. Click ##View & Edit#. You will see the following grammar "
  612. "appear in the @OTGrammarEditor:")
  613. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  614. LIST_ITEM1 (U"\t##*[rtr / hi]#\t 100.000\t 100.000\t 1.000")
  615. LIST_ITEM1 (U"\t##P\\s{ARSE} (rtr)#\t 50.000\t 50.000\t 1.000")
  616. LIST_ITEM1 (U"\t##*G\\s{ESTURE} (contour)#\t 30.000\t 30.000\t 1.000")
  617. LIST_ITEM1 (U"\t##P\\s{ARSE} (atr)#\t 20.000\t 20.000\t 1.000")
  618. LIST_ITEM1 (U"\t##*[atr / lo]#\t 10.000\t 10.000\t 1.000")
  619. NORMAL (U"This simplified Wolof grammar, with five constraints with clearly different rankings, is equivalent "
  620. "to the traditional OT ranking")
  621. FORMULA (U"*[rtr / hi] >> P\\s{ARSE} (rtr) >> *G\\s{ESTURE} (contour) >> P\\s{ARSE} (atr) >> *[atr / lo]")
  622. NORMAL (U"These constraints are based on a description of Wolof by "
  623. "@@Archangeli & Pulleyblank (1994)|Archangeli & Pulleyblank (1994: 225–239)@. "
  624. "For the meaning of these constraints, see @@Boersma (1998)|Boersma (1998: 295)@, "
  625. "or the @@Create tongue-root grammar...@ manual page.")
  626. NORMAL (U"For each input, there are four output candidates: "
  627. "the vowel heights will be the same as those in the input, but the tongue-root values of V__1_ and V__2_ are varied. "
  628. "For example, for the input [ita] we will have the four candidates "
  629. "[ita], [itə], [ɪta], and [ɪtə].")
  630. NORMAL (U"With this way of generating candidates, we see that the five constraints are completely ranked. "
  631. "First, the absolute prohibition on surface [ɪ] shows that *[rtr / hi] outranks RTR faithfulness "
  632. "(otherwise, [ɪtɪ] would have been the winner):")
  633. PICTURE (4.0, 1.5, draw_Wolof_ItI)
  634. NORMAL (U"Second, the faithful surfacing of the disharmonic /itɛ/ shows that RTR faithfulness must outrank "
  635. "the harmony (anti-contour) constraint (otherwise, [ite] would have been the winner):")
  636. PICTURE (4.0, 1.5, draw_Wolof_itE)
  637. NORMAL (U"Third, the RTR-dominant harmonicization of underlying disharmonic /etɛ/ shows that harmony must outrank ATR faithfulness "
  638. "(otherwise, [etɛ] would have won):")
  639. PICTURE (4.0, 1.5, draw_Wolof_etE)
  640. NORMAL (U"Finally, the faithful surfacing of the low ATR vowel /ə/ even if not forced by harmony, shows that "
  641. "ATR faithfulness outranks *[atr / lo] (otherwise, [ata] would have been the winning candidate):")
  642. PICTURE (4.0, 1.5, draw_Wolof_schwatschwa)
  643. NORMAL (U"These four ranking arguments clearly establish the crucial rankings of all five constraints.")
  644. ENTRY (U"Generating inputs from the grammar")
  645. NORMAL (U"According to @@Prince & Smolensky (1993)@, the input to an OT grammar can be %anything. "
  646. "This is the idea of %%##richness of the base%#. "
  647. "When doing a practical investigation, however, we are only interested in the inputs "
  648. "that will illustrate the properties of our partial grammars. "
  649. "In the case of simplified Wolof, this means the 36 possible V__1_tV__2_ sequences "
  650. "where V__1_ and V__2_ are any of the six front vowels i, ɪ, e, ɛ, ə, and a "
  651. "(see @@Create tongue-root grammar...@).")
  652. NORMAL (U"A set of inputs can be generated from an @OTGrammar object by inspecting the list of tableaus. "
  653. "So select the Wolof tongue-root grammar and choose @@OTGrammar: Generate inputs...|Generate inputs...@. "
  654. "Set %%Number of trials% to 100, and click OK. A @Strings object named \"Wolof_in\" "
  655. "will appear in the list. Click Inspect and examine the 100 input strings. "
  656. "You will see that they have been randomly chosen from the 36 possible V__1_tV__2_ sequences "
  657. "as described at @@Create tongue-root grammar...@:")
  658. FORMULA (U"ɛta, etɛ, ɛti, itɛ, ɛtɛ, iti, ɛtɪ, itɪ, ɪti, etɛ, ...")
  659. NORMAL (U"Thus, when asked to generate a random input, these grammars produce any of the 36 possible V__1_tV__2_ "
  660. "sequences, all with equal probability.")
  661. ENTRY (U"Generating outputs from the grammar")
  662. NORMAL (U"To compute the outputs for the above set of input forms, select %both the @OTGrammar object "
  663. "%and the input @Strings object, and choose @@OTGrammar & Strings: Inputs to outputs...|Inputs to outputs...@, "
  664. "perhaps specifying zero evaluation noise. "
  665. "A new Strings objects called \"Wolof_out\" will appear in the list. "
  666. "If you Inspect it, you will see that it contains a string sequence aligned with the original input strings:")
  667. FORMULA (U"ɛta, ɛtɛ, ɛti, itɛ, ɛtɛ, iti, ɛti, iti, iti, ɛtɛ, ...")
  668. NORMAL (U"In this way, we have created two Strings objects, which together form a series of input-output pairs "
  669. "needed for learning a grammar that contains faithfulness constraints.")
  670. MAN_END
  671. MAN_BEGIN (U"OT learning 4. Learning an ordinal grammar", U"ppgb", 20100331)
  672. NORMAL (U"With the data from a tongue-root-harmony language with five completely ranked constraints, "
  673. "we can have a throw at learning this language, starting with a grammar in which all the constraints "
  674. "are ranked at the same height, or randomly ranked, or with articulatory constraints outranking "
  675. "faithfulness constraints.")
  676. NORMAL (U"Let's try the third of these. Create an infant tongue-root grammar by choosing "
  677. "@@Create tongue-root grammar...@ and specifying \"Five\" for the constraint set "
  678. "and \"Infant\" for the ranking. The result after a single evaluation will be like:")
  679. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  680. LIST_ITEM1 (U"\t##*G\\s{ESTURE} (contour)#\t 100.000\t 100.631\t 1.000")
  681. LIST_ITEM1 (U"\t##*[atr / lo]#\t 100.000\t 100.244\t 1.000")
  682. LIST_ITEM1 (U"\t##*[rtr / hi]#\t 100.000\t 97.086\t 1.000")
  683. LIST_ITEM1 (U"\t##P\\s{ARSE} (rtr)#\t 50.000\t 51.736\t 1.000")
  684. LIST_ITEM1 (U"\t##P\\s{ARSE} (atr)#\t 50.000\t 46.959\t 1.000")
  685. NORMAL (U"Such a grammar produces all kinds of non-adult results. For instance, the input /ətɪ/ "
  686. "will surface as [atɪ]:")
  687. PICTURE (4.0, 1.5, draw_Infant_swtI)
  688. NORMAL (U"The adult form is very different: [əti]. The cause of the discrepancy is in the order of "
  689. "the constraints *[atr / lo] and *[rtr / hi], which militate against [ə] and [ɪ], respectively. "
  690. "Simply reversing the rankings of these two constraints would solve the problem in this case. "
  691. "More generally, @@Tesar & Smolensky (1998)@ claim that demoting all the constraints that cause the "
  692. "adult form to lose into the stratum just below the highest-ranked constraint "
  693. "violated in the learner's form (here, moving *[atr / lo] just below *[rtr / hi] "
  694. "into the same stratum as P\\s{ARSE} (rtr)), "
  695. "will guarantee convergence to the target grammar, "
  696. "%%if there is no variation in the data% (Tesar & Smolensky's algorithm is actually incorrect, "
  697. "but can be repaired easily, as shown by @@Boersma (2009b)@).")
  698. NORMAL (U"But Tesar & Smolensky's algorithm cannot be used for variable data, since all constraints would be "
  699. "tumbling down, exchanging places and producing wildly different grammars at each learning step. "
  700. "Since language data do tend to be variable, we need a gradual and balanced learning algorithm, "
  701. "and the following algorithm is guaranteed to converge "
  702. "to the target language, if that language can be described by a stochastic OT grammar.")
  703. NORMAL (U"The reaction of the learner to hearing the mismatch between the adult [əti] and her own [atɪ], "
  704. "is simply:")
  705. LIST_ITEM (U"1. to move the constraints violated in her own form, i.e. *[rtr / hi] and P\\s{ARSE} (atr), "
  706. "up by a small step along the ranking scale, thus decreasing the probability that her form will be the winner "
  707. "at the next evaluation of the same input;")
  708. LIST_ITEM (U"2. and to move the constraints violated in the adult form, namely *[atr / lo] and P\\s{ARSE} (rtr), "
  709. "down along the ranking scale, thus increasing the probability that the adult form will be the learner's "
  710. "winner the next time.")
  711. NORMAL (U"If the small reranking step (the %#plasticity) is 0.1, the grammar will become:")
  712. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  713. LIST_ITEM1 (U"\t##*G\\s{ESTURE} (contour)#\t 100.000\t 100.631\t 1.000")
  714. LIST_ITEM1 (U"\t##*[atr / lo]#\t 99.900\t 100.244\t 1.000")
  715. LIST_ITEM1 (U"\t##*[rtr / hi]#\t 100.100\t 97.086\t 1.000")
  716. LIST_ITEM1 (U"\t##P\\s{ARSE} (rtr)#\t 49.900\t 51.736\t 1.000")
  717. LIST_ITEM1 (U"\t##P\\s{ARSE} (atr)#\t 50.100\t 46.959\t 1.000")
  718. NORMAL (U"The disharmonies, of course, will be different at the next evaluation, with a probability slightly higher "
  719. "than 50\\% that *[rtr / hi] will outrank *[atr / lo]. Thus the relative rankings of these two grounding "
  720. "constraints have moved into the direction of the adult grammar, in which they are ranked at opposite "
  721. "ends of the grammar.")
  722. NORMAL (U"Note that the relative rankings of P\\s{ARSE} (atr) and P\\s{ARSE} (rtr) are "
  723. "now moving in a direction opposite to where they will have to end up in this RTR-dominant language. "
  724. "This does not matter: the procedure will converge nevertheless.")
  725. NORMAL (U"We are now going to simulate the infant who learns simplified Wolof. Take an adult Wolof grammar "
  726. "and generate 1000 input strings and the corresponding 1000 output strings "
  727. "following the procedure described in @@OT learning 3.2. Data from another grammar|§3.2@. "
  728. "Now select the infant @OTGrammar and both @Strings objects, and choose @@OTGrammar & 2 Strings: Learn...|Learn...@. "
  729. "After you click OK, the learner processes each of the 1000 input-output pairs in succession, "
  730. "gradually changing the constraint ranking in case of a mismatch. The resulting grammar may look like:")
  731. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  732. LIST_ITEM1 (U"\t##*[rtr / hi]#\t 100.800\t 98.644\t 1.000")
  733. LIST_ITEM1 (U"\t##*G\\s{ESTURE} (contour)#\t 89.728\t 94.774\t 1.000")
  734. LIST_ITEM1 (U"\t##*[atr / lo]#\t 89.544\t 86.442\t 1.000")
  735. LIST_ITEM1 (U"\t##P\\s{ARSE} (rtr)#\t 66.123\t 65.010\t 1.000")
  736. LIST_ITEM1 (U"\t##P\\s{ARSE} (atr)#\t 63.553\t 64.622\t 1.000")
  737. NORMAL (U"We already see some features of the target grammar, namely the top ranking of *[rtr / hi] "
  738. "and RTR dominance (the mutual ranking of the P\\s{ARSE} constraints). The steps have not been exactly 0.1, "
  739. "because we also specified a relative plasticity spreading of 0.1, thus giving steps typically in the range of 0.7 to 1.3. "
  740. "The step is also multiplied by the %%constraint plasticity%, which is simply 1.000 in all examples in this tutorial; "
  741. "you could set it to 0.0 to prevent a constraint from moving up or down at all. "
  742. "The %leak is the part of the constraint weight (especially in Harmonic Grammar) that is thrown away whenever a constraint is reranked; "
  743. "e.g if the leak is 0.01 and the step is 0.11, the constraint weight is multiplied by (1 – 0.01·0.11) = 0.9989 before "
  744. "the learning step is taken; in this way you could implement forgetful learning of correlations.")
  745. NORMAL (U"After learning once more with the same data, the result is:")
  746. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  747. LIST_ITEM1 (U"\t##*[rtr / hi]#\t 100.800\t 104.320\t 1.000")
  748. LIST_ITEM1 (U"\t##P\\s{ARSE} (rtr)#\t 81.429\t 82.684\t 1.000")
  749. LIST_ITEM1 (U"\t##*[atr / lo]#\t 79.966\t 78.764\t 1.000")
  750. LIST_ITEM1 (U"\t##*G\\s{ESTURE} (contour)#\t 81.316\t 78.166\t 1.000")
  751. LIST_ITEM1 (U"\t##P\\s{ARSE} (atr)#\t 77.991\t 77.875\t 1.000")
  752. NORMAL (U"This grammar now sometimes produces faithful disharmonic utterances, because the P\\s{ARSE} now often "
  753. "outrank the gestural constraints at evaluation time. But there is still a lot of variation produced. "
  754. "Learning once more with the same data gives:")
  755. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  756. LIST_ITEM1 (U"\t##*[rtr / hi]#\t 100.800\t 100.835\t 1.000")
  757. LIST_ITEM1 (U"\t##P\\s{ARSE} (rtr)#\t 86.392\t 82.937\t 1.000")
  758. LIST_ITEM1 (U"\t##*G\\s{ESTURE} (contour)#\t 81.855\t 81.018\t 1.000")
  759. LIST_ITEM1 (U"\t##*[atr / lo]#\t 78.447\t 78.457\t 1.000")
  760. LIST_ITEM1 (U"\t##P\\s{ARSE} (atr)#\t 79.409\t 76.853\t 1.000")
  761. NORMAL (U"By inspecting the first column, you can see that the ranking values are already in the same order as in the target grammar, "
  762. "so that the learner will produce 100 percent correct adult utterances if her evaluation noise is zero. However, "
  763. "with a noise of 2.0, there will still be variation. For instance, the disharmonies above will "
  764. "produce [ata] instead of [ətə] for underlying /ətə/. Learning seven times more "
  765. "with the same data gives a reasonable proficiency:")
  766. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  767. LIST_ITEM1 (U"\t##*[rtr / hi]#\t 100.800\t 99.167\t 1.000")
  768. LIST_ITEM1 (U"\t##P\\s{ARSE} (rtr)#\t 91.580\t 93.388\t 1.000")
  769. LIST_ITEM1 (U"\t##*G\\s{ESTURE} (contour)#\t 85.487\t 86.925\t 1.000")
  770. LIST_ITEM1 (U"\t##P\\s{ARSE} (atr)#\t 80.369\t 78.290\t 1.000")
  771. LIST_ITEM1 (U"\t##*[atr / lo]#\t 75.407\t 74.594\t 1.000")
  772. NORMAL (U"No input forms have error rates above 4 percent now, so the child has learned a lot with only 10,000 data, "
  773. "which may be on the order of the number of input data she receives every day.")
  774. NORMAL (U"We could have sped up the learning process appreciably by using a plasticity of 1.0 instead of 0.1. "
  775. "This would have given a comparable grammar after only 1000 data. After 10,000 data, we would have")
  776. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  777. LIST_ITEM1 (U"\t##*[rtr / hi]#\t 107.013\t 104.362\t 1.000")
  778. LIST_ITEM1 (U"\t##P\\s{ARSE} (rtr)#\t 97.924\t 99.984\t 1.000")
  779. LIST_ITEM1 (U"\t##*G\\s{ESTURE} (contour)#\t 89.679\t 89.473\t 1.000")
  780. LIST_ITEM1 (U"\t##P\\s{ARSE} (atr)#\t 81.479\t 83.510\t 1.000")
  781. LIST_ITEM1 (U"\t##*[atr / lo]#\t 73.067\t 72.633\t 1.000")
  782. NORMAL (U"With this grammar, all the error rates are below 0.2 percent. We see that crucially ranked constraints "
  783. "will become separated after a while by a gap of about 10 along the ranking scale.")
  784. NORMAL (U"If we have three constraints obligatorily ranked as A >> B >> C in the adult grammar, with ranking differences of 8 between "
  785. "A and B and between B and C in the learner's grammar (giving an error rate of 0.2\\% ), the ranking A >> C has a chance of less than 1 in 100 million "
  786. "to be reversed at evaluation time. This relativity of error rates is an empirical prediction of our stochastic OT grammar model.")
  787. NORMAL (U"Our Harmonic Grammars with constraint noise (Noisy HG) are slightly different in that respect, "
  788. "but are capable of learning a constraint ranking for any language that can be generated from an ordinal ranking. "
  789. "As proved by @@Boersma & Pater (2008)@, the same learning rule as was devised for MaxEnt grammars by @@Jäger (2003)@ "
  790. "is able to learn all languages generated by %nonnoisy HG grammars as well; "
  791. "the GLA, by contrast, failed to converge on 0.4 percent of randomly generated OT languages "
  792. "(failures of the GLA on ordinal grammars were discovered first by @@Pater (2008)@). "
  793. "This learning rule for HG and MaxEnt is the same as the GLA described above, "
  794. "except that the learning step of each constraint is multiplied by "
  795. "the difference of the number of violations of this constraint between the correct form and the incorrect winner; "
  796. "this multiplication is crucial (without it, stochastic gradient ascent is not guaranteed to converge), "
  797. "as was noted by Jäger for MaxEnt. The same procedure for updating weights occurs "
  798. "in @@Soderstrom, Mathis & Smolensky (2006)@, who propose "
  799. "an incremental version (formulas 21 and 35d) of the harmony version (formulas 14 and 18) "
  800. "of the learning equation for Boltzmann machines (formula 13). "
  801. "The differences between the three implementations is that in Stochastic OT and Noisy HG the evaluation noise (or %temperature) is in the constraint rankings, "
  802. "in MaxEnt it is in the candidate probabilities, and in Boltzmann machines it is in the activities (i.e. the constraint violations). "
  803. "The upate procedure is also similar to that of the %perceptron, a neural network invented by @@Rosenblatt (1962)@ "
  804. "for classifying continuous inputs.")
  805. MAN_END
  806. MAN_BEGIN (U"OT learning 5. Learning a stochastic grammar", U"ppgb", 20070725)
  807. NORMAL (U"Having shown that the algorithm can learn deep obligatory rankings, we will now see "
  808. "that it also performs well in replicating the variation in the language environment.")
  809. NORMAL (U"Create a place assimilation grammar as described in @@OT learning 2.6. Variable output|§2.6@, "
  810. "and set all its rankings to 100.000:")
  811. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  812. LIST_ITEM1 (U"\t##*G\\s{ESTURE}#\t 100.000\t 100.000\t 1.000")
  813. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (t, p)#\t 100.000\t 100.000\t 1.000")
  814. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (n, m)#\t 100.000\t 100.000\t 1.000")
  815. NORMAL (U"Create a place assimilation distribution and generate 1000 string pairs (@@OT learning 3.1. Data from a pair distribution|§3.1@). "
  816. "Select the grammar and the two @Strings objects, and learn with a plasticity of 0.1:")
  817. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  818. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (t, p)#\t 104.540\t 103.140\t 1.000")
  819. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (n, m)#\t 96.214\t 99.321\t 1.000")
  820. LIST_ITEM1 (U"\t##*G\\s{ESTURE}#\t 99.246\t 97.861")
  821. NORMAL (U"The output distributions are now (using @@OTGrammar: To output Distributions...@, see @@OT learning 2.9. Output distributions|§2.9@):")
  822. LIST_ITEM1 (U"\t/an+pa/ \\-> anpa\t14.3\\% ")
  823. LIST_ITEM1 (U"\t/an+pa/ \\-> ampa\t85.7\\% ")
  824. LIST_ITEM1 (U"\t/at+ma/ \\-> atma\t96.9\\% ")
  825. LIST_ITEM1 (U"\t/at+ma/ \\-> apma\t3.1\\% ")
  826. NORMAL (U"After another 10,000 new string pairs, we have:")
  827. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  828. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (t, p)#\t 106.764\t 107.154\t 1.000")
  829. LIST_ITEM1 (U"\t##*G\\s{ESTURE}#\t 97.899\t 97.161\t 1.000")
  830. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (n, m)#\t 95.337\t 96.848\t 1.000")
  831. NORMAL (U"With the following output distributions (measured with a million draws):")
  832. LIST_ITEM1 (U"\t/an+pa/ \\-> anpa\t18.31\\% ")
  833. LIST_ITEM1 (U"\t/an+pa/ \\-> ampa\t81.69\\% ")
  834. LIST_ITEM1 (U"\t/at+ma/ \\-> atma\t99.91\\% ")
  835. LIST_ITEM1 (U"\t/at+ma/ \\-> apma\t0.09\\% ")
  836. NORMAL (U"The error rate is acceptably low, but the accuracy in reproducing the 80\\% - 20\\% "
  837. "distribution could be better. This is because the relatively high plasticity of 0.1 "
  838. "can only give a coarse approximation. So we lower the plasticity to 0.001, "
  839. "and supply 100,000 new data:")
  840. LIST_ITEM1 (U"\t\t %%ranking value\t %disharmony\t %plasticity")
  841. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (t, p)#\t 106.810\t 107.184\t 1.000")
  842. LIST_ITEM1 (U"\t##*G\\s{ESTURE}#\t 97.782\t 99.682\t 1.000")
  843. LIST_ITEM1 (U"\t##*R\\s{EPLACE} (n, m)#\t 95.407\t 98.760\t 1.000")
  844. NORMAL (U"With the following output distributions:")
  845. LIST_ITEM1 (U"\t/an+pa/ \\-> anpa\t20.08\\% ")
  846. LIST_ITEM1 (U"\t/an+pa/ \\-> ampa\t79.92\\% ")
  847. LIST_ITEM1 (U"\t/at+ma/ \\-> atma\t99.94\\% ")
  848. LIST_ITEM1 (U"\t/at+ma/ \\-> apma\t0.06\\% ")
  849. NORMAL (U"So besides learning obligatory rankings like a child does, "
  850. "the algorithm can also replicate very well the probabilities of the environment. "
  851. "This means that a GLA learner can learn stochastic grammars.")
  852. MAN_END
  853. MAN_BEGIN (U"OT learning 6. Shortcut to grammar learning", U"ppgb", 20070523)
  854. INTRO (U"Once you have mastered the tedious procedures of making Praat learn stochastic grammars, "
  855. "as described in the previous chapters of this tutorial, you can try a faster procedure, "
  856. "which simply involves selecting an @OTGrammar object together with a @PairDistribution object, "
  857. "and clicking ##Learn...#. Once you click OK, Praat will feed the selected grammar with input/output "
  858. "pairs drawn from the selected distribution, and the grammar will be modified every time its output "
  859. "is different from the given output. Here is the meaning of the arguments:")
  860. TAG (U"%%Evaluation noise% (standard value: 2.0)")
  861. DEFINITION (U"the standard deviation of the noise added to the ranking of each constraint at evaluation time.")
  862. TAG (U"%%Strategy% (standard value: Symmetric all)")
  863. DEFINITION (U"what to do when the learner's output is different from the given output. Possibilities:")
  864. LIST_ITEM1 (U"Demotion only: lower the ranking of every constraint that is violated more in the correct output "
  865. "than in the learner's output. This algorithm crashes if there is variation in the data, i.e. if some inputs "
  866. "can have more than one possible adult outputs.")
  867. LIST_ITEM1 (U"Symmetric one: lower the ranking of the highest-ranked constraint that is violated more in the adult output "
  868. "than in the learner's output, and raise the ranking of the highest-ranked constraint that is violated more "
  869. "in the learner's output than in the adult output. This is the \"minimal\" algorithm described and refuted in "
  870. "@@Boersma (1998)@, chapters 14-15.")
  871. LIST_ITEM1 (U"Symmetric all: lower the ranking of all constraints that are violated more in the adult output "
  872. "than in the learner's output, and raise the ranking of all constraints that are violated more "
  873. "in the learner's output than in the adult output. This is the algorithm described in @@Boersma & Hayes (2001)@.")
  874. LIST_ITEM1 (U"Weighted uncancelled: the same as \"Symmetric all\", but the size of the learning step "
  875. "is divided by the number of moving constraints. This makes sure that the average ranking of all the constraints "
  876. "is constant.")
  877. LIST_ITEM1 (U"Weighted all: the \"Symmetric all\" strategy can reworded as follows: \"lower the ranking of all constraints "
  878. "that are violated in the adult output, and raise the ranking of all constraints that are violated in the learner's output\". "
  879. "Do that, but divide the size of the learning step by the number of moving constraints.")
  880. LIST_ITEM1 (U"EDCD: Error-Driven Constraint Demotion, the algorithm described by @@Tesar & Smolensky (1998)@. "
  881. "All constraints that prefer the adult form and are ranked above the highest-ranked constraint that prefers the learner's form, "
  882. "are demoted to the ranking of that last constraint minus 1.0.")
  883. TAG (U"%%Initial plasticity% (standard value: 1.0)")
  884. TAG (U"%%Replications per plasticity% (standard value: 100000)")
  885. TAG (U"%%Plasticity decrement% (standard value: 0.1)")
  886. TAG (U"%%Number of plasticities% (standard value: 4)")
  887. DEFINITION (U"these four arguments determine the %%learning scheme%, i.e. the number of times the grammar will "
  888. "receive data at a certain plasticity. With the standard values, there will be 100000 data while the plasticity is 1.0 "
  889. "(the initial plasticity), 100000 data while the plasticity is 0.1, 100000 data while the plasticity is 0.01, "
  890. "and 100000 data while the plasticity is 0.001. If you want learning at a constant plasticity, set the "
  891. "%%number of plasticities% to 1. Note that for the decision strategies of HarmonicGrammar, LinearOT, PositiveHG or MaximumEntropy "
  892. "the learning step for a constraint equals the plasticity multiplied by the difference between the "
  893. "numbers of violations of this constraint in the adult output and in the learner's output.")
  894. TAG (U"%%Rel. plasticity spreading% (standard value: 0.1)")
  895. DEFINITION (U"if this is not 0, the size of the learning step will vary randomly. For instance, if the plasticity is set to 0.01, "
  896. "and the relative plasticity spreading is 0.1, you will get actual learning steps that could be anywhere between 0.007 "
  897. "and 0.013, according to a Gaussian distribution with mean 0.01 and standard deviation 0.001.")
  898. TAG (U"%%Honour local rankings% (standard value: on)")
  899. DEFINITION (U"if this is on, the fixed rankings that you supplied in the grammar will be maintained during learning: "
  900. "if a constraint falls below a constraint that is supposed to be universally lower-ranked, this second constraint "
  901. "will be demoted as well.")
  902. TAG (U"%%Number of chews% (standard value: 1)")
  903. DEFINITION (U"the number of times that each input-output pair is fed to the grammar. Setting this number to 20 "
  904. "will give a slightly different (perhaps more accurate) result than simply raising the plasticity by a factor of 20.")
  905. MAN_END
  906. MAN_BEGIN (U"OT learning 7. Learning from overt forms", U"ppgb", 20031220)
  907. INTRO (U"In order to be able to learn phonological production, both EDCD and GLA require pairs of underlying form "
  908. "and surface form. However, the language-learning child hears neither of these forms: she only hears ##%%overt forms%#, "
  909. "with less structural information than the underlying and surface forms contain.")
  910. ENTRY (U"Interpretive parsing")
  911. NORMAL (U"The language-learning child has to construct both the surface form and the underlying form from the overt form "
  912. "that she hears. @@Tesar & Smolensky (1998)@ proposed that the child computes a surface form from the "
  913. "overt form by using the same constraint ranking as in production. For instance, the overt form [σ σ σ], which "
  914. "is a sequence of three syllables with stress on the second syllable, will be interpreted as the surface form "
  915. "/(σ σˈ) σ/ in iambic left-aligning languages (I\\s{AMBIC} >> T\\s{ROCHAIC}, and A\\s{LL}F\\s{EET}L\\s{EFT} "
  916. ">> A\\s{LL}F\\s{EET}R\\s{IGHT}), but as the surface form /σ (σˈ σ)/ in trochaic right-aligning languages. "
  917. "Tesar & Smolensky call this procedure @@Robust Interpretive Parsing@, because it works even if the listener's grammar "
  918. "would never produce such a form. For instance, if I\\s{AMBIC} >> A\\s{LL}F\\s{EET}R\\s{IGHT} >> T\\s{ROCHAIC} >> "
  919. "A\\s{LL}F\\s{EET}L\\s{EFT}, the listener herself would produce the iambic right-aligned /σ (σ σˈ)/ "
  920. "for any trisyllabic underlying form, though she will still interpret [σ σˈ σ] as /(σ σˈ) σ/, "
  921. "which is illegal in her own grammar. Hearing forms that are illegal in one's own grammar is of course a common "
  922. "situation for language-learning children.")
  923. NORMAL (U"In Tesar & Smolensky's view, the underlying form can be trivially computed from the surface form, "
  924. "since the surface form %contains enough information. For instance, the surface form /(σ σˈ) σ/ must "
  925. "lead to the underlying form |σ σ σ| if all parentheses and stress marks are removed. Since "
  926. "@@McCarthy & Prince (1995)@, this %containment view of surface representations has been abandoned. "
  927. "In Praat, therefore, the underlying form is not trivially computed from the surface form, "
  928. "but all the tableaus are scanned for the surface form that violates the least high-ranked constraints (in the usual "
  929. "OT sense), as long as it contains the given overt form. For instance, if I\\s{AMBIC} >> A\\s{LL}F\\s{EET}R\\s{IGHT} "
  930. ">> T\\s{ROCHAIC} >> A\\s{LL}F\\s{EET}L\\s{EFT}, the overt form [σ σˈ σ] occurs in two candidates: "
  931. "the surface form /(σ σˈ) σ/ in the tableau for the underlying form |σ σ σ|, and "
  932. "the surface form /σ (σˈ σ)/ in the tableau for the underlying form |σ σ σ|. The best candidate "
  933. "is the surface form /(σ σˈ) σ/ in the tableau for the underlying form |σ σ σ|. Hence, "
  934. "Praat's version of Robust Interpretive Parsing will map the overt form [σ σˈ σ] to the underlying form "
  935. "|σ σ σ| (the ‘winning tableau’) and to the surface form /(σ σˈ) σ/ (to be sure, "
  936. "this is the same result as in Tesar & Smolensky's "
  937. "version, but crucial differences between the two versions will appear when faithfulness constraints are involved).")
  938. NORMAL (U"In Praat, you can do interpretive parsing. For example, create a grammar with ##Create metrics grammar...# "
  939. "from the @@New menu@. Then choose ##Get interpretive parse...# from the #Query submenu and supply \"[L1 L L]\" for the "
  940. "overt form, which means a sequence of three light syllables with a main stress on the first. The Info window will show you "
  941. "the optimal underlying and surface forms, given the current constraint ranking.")
  942. ENTRY (U"Learning from partial forms")
  943. NORMAL (U"Now that the learning child can convert an overt form to an underlying-surface pair, she can compare this "
  944. "surface form to the surface form that she herself would have derived from this underlying form. For instance, "
  945. "If I\\s{AMBIC} >> A\\s{LL}F\\s{EET}R\\s{IGHT} >> T\\s{ROCHAIC} >> A\\s{LL}F\\s{EET}L\\s{EFT}, the winning "
  946. "tableau is |σ σ σ|, and the perceived adult surface form is /(σ σˈ) σ/. "
  947. "But from the underlying form |σ σ σ|, the learner will derive /σ (σ σˈ)/ as her own surface form. "
  948. "The two surface forms are different, so that the learner can take action by reranking one or more constraints, "
  949. "perhaps with EDCD or GLA.")
  950. NORMAL (U"In Praat, you can learn from partial forms. Select the metrics grammar and choose ##Learn from one partial output...#, "
  951. "and supply \"[L1 L L]\". If you do this several times, you will see that the winner for the tableau \"|L L L|\" will become one of "
  952. "the two forms with overt part \"[L1 L L]\".")
  953. NORMAL (U"To run a whole simulation, you supply a @Distributions object with one column, perhaps from a text file. The following "
  954. "text file shows the overt forms for Latin, with the bisyllabic forms occurring more often than the trisyllabic forms:")
  955. CODE (U"\"ooTextFile\"")
  956. CODE (U"\"Distributions\"")
  957. CODE (U"1 column with numeric data")
  958. CODE (U" \"Latin\"")
  959. CODE (U"28 rows")
  960. CODE (U"\"[L1 L]\" 25")
  961. CODE (U"\"[L1 H]\" 25")
  962. CODE (U"\"[H1 L]\" 25")
  963. CODE (U"\"[H1 H]\" 25")
  964. CODE (U"\"[L1 L L]\" 5")
  965. CODE (U"\"[H1 L L]\" 5")
  966. CODE (U"\"[L H1 L]\" 5")
  967. CODE (U"\"[H H1 L]\" 5")
  968. CODE (U"\"[L1 L H]\" 5")
  969. CODE (U"\"[H1 L H]\" 5")
  970. CODE (U"\"[L H1 H]\" 5")
  971. CODE (U"\"[H H1 H]\" 5")
  972. CODE (U"\"[L L1 L L]\" 1")
  973. CODE (U"\"[L H1 L L]\" 1")
  974. CODE (U"\"[L L H1 L]\" 1")
  975. CODE (U"\"[L H H1 L]\" 1")
  976. CODE (U"\"[L L1 L H]\" 1")
  977. CODE (U"\"[L H1 L H]\" 1")
  978. CODE (U"\"[L L H1 H]\" 1")
  979. CODE (U"\"[L H H1 H]\" 1")
  980. CODE (U"\"[H L1 L L]\" 1")
  981. CODE (U"\"[H H1 L L]\" 1")
  982. CODE (U"\"[H L H1 L]\" 1")
  983. CODE (U"\"[H H H1 L]\" 1")
  984. CODE (U"\"[H L1 L H]\" 1")
  985. CODE (U"\"[H H1 L H]\" 1")
  986. CODE (U"\"[H L H1 H]\" 1")
  987. CODE (U"\"[H H H1 H]\" 1")
  988. NORMAL (U"Read this file into Praat with @@Read from file...@. A @Distributions object then appears in the object list. "
  989. "Click @@Distributions: To Strings...|To Strings...@, then OK. A @Strings object containing 1000 strings, drawn randomly "
  990. "from the distribution, with relative frequencies as in the text file, will appear in the list. Click @Inspect to check the contents.")
  991. NORMAL (U"You can now select the @OTGrammar together with the @Strings and choose ##Learn from partial outputs...#. "
  992. "A thousand times, Praat will construct a surface form from the overt form by interpretive parsing, "
  993. "and also construct the underlying form in the same way, from which it will construct another surface form by evaluating the "
  994. "tableau. Whenever the two surface forms are not identical, some constraints will be reranked. In the current implementation, "
  995. "the disharmonies for interpretive parsing and for production are the same, i.e., "
  996. "if the evaluation noise is not zero, the disharmonies are randomly renewed before each interpretive parsing "
  997. "but stay the same for the subsequent virtual production.")
  998. MAN_END
  999. /*
  1000. MAN_BEGIN (U"OTGrammar examples", U"ppgb", 20001027)
  1001. ENTRY (U"Safety margin and stochastic evaluation")
  1002. NORMAL (U"What is a %small demotion step? This must be taken relative to another quantity. "
  1003. "This quantity is the evaluation noise.")
  1004. NORMAL (U"If the %%ranking spreading% were zero, the demotions in the GLA would immediately stop once that "
  1005. "a constraint has fallen below its competitors. If the data contain an error, the grammar "
  1006. "will change to an incorrect state, and the learner has to make an error to correct it. "
  1007. "Though the error ratio is now one (because of the constant demotion step), "
  1008. "the learner's behaviour can still be described as a `tit-for-tat' strategy, "
  1009. "which is an unknown phenomenon in human speech variation.")
  1010. NORMAL (U"The solution is to have a finite ranking spreading: in this way, the learner will continue making "
  1011. "errors (though less than 50\\% ) after the constraint has fallen below its competitors. "
  1012. "For instance, with a ranking spreading of 2.0, the distance between the constraints will be about "
  1013. "10 after a few thousand relevant data: the %%safety margin%. "
  1014. "If the mean demotion step is 0.1, there is no chance "
  1015. "that a modest amount of erroneous data will reverse the ranking. It is true, however, that "
  1016. "one erroneous datum will decrease the constraint distance by 0.1, so that the learner will have to "
  1017. "make one mistake herself to restore the original distance. But this is only natural, "
  1018. "as she may well wait a long time before doing this: "
  1019. "on the average, the same number of thousands of data. This is the %%patient error-matching learner%.")
  1020. ENTRY (U"Learning from surface data alone")
  1021. NORMAL (U"Many a young learner will take the adult surface forms, as perceived by herself, "
  1022. "as her underlying forms. In other words, the input to her grammar will equal "
  1023. "the output of the adult grammar.")
  1024. NORMAL (U"We can simulate what happens here by taking the adult output as the input to the learning algorithms:")
  1025. LIST_ITEM (U"1. Create a five-constraint Wolof grammar.")
  1026. LIST_ITEM (U"2. Generate 1000 input strings.")
  1027. LIST_ITEM (U"3. Compute the 1000 output strings.")
  1028. LIST_ITEM (U"4. Create a five-constraint \"equal\" grammar.")
  1029. LIST_ITEM (U"5. Select this monostratal grammar and the output strings only, "
  1030. "and click ##Learn output (T&S)#. See @@OTAnyGrammar & Strings: Learn output (T&S)@.")
  1031. NORMAL (U"The result will be a grammar where the faithfulness constraints outrank all the "
  1032. "gestural constraints that can be violated in any output string:")
  1033. LIST_ITEM (U"##*[rtr / hi]# 100.000 100.000")
  1034. LIST_ITEM (U"##P\\s{ARSE} (atr)# 100.000 100.000")
  1035. LIST_ITEM (U"##P\\s{ARSE} (rtr)# 100.000 100.000")
  1036. LIST_ITEM (U"##*G\\s{ESTURE} (contour)# 99.000 99.000")
  1037. LIST_ITEM (U"##*[atr / lo]# 99.000 99.000")
  1038. NORMAL (U"You will get a comparable result with @@OTAnyGrammar & Strings: Learn output (GLA)...@.")
  1039. NORMAL (U"The resulting grammar represents the learner's state after the acquisition of "
  1040. "all the relevant gestures. The learner will now faithfully reproduce /etɛ/ "
  1041. "if that were in her lexicon. Before being able to render such an underlying form as [ɛtɛ], "
  1042. "she must learn that faithfulness can be violated.")
  1043. ENTRY (U"Example 2: When underlying forms are irrelevant")
  1044. NORMAL (U"Underlying forms are relevant only if faithfulness constraints are involved. "
  1045. "If a grammar only contains constraints that evaluate the output, "
  1046. "we need no input strings for our simulations. However, "
  1047. "if the relevant constraint had fixed rankings, there would only be a single possible output, "
  1048. "which seems uninteresting. An interesting output-only grammar, therefore, necessarily "
  1049. "features stochastic evaluation, and at least some of the constraints wil have rankings that "
  1050. "are close to each other.")
  1051. NORMAL (U"#%Example. @@Hayes & MacEachern (1998)@ identify 11 output-oriented constraints "
  1052. "for the forms of quatrains in English folk verse.")
  1053. LIST_ITEM (U"1. Create a folk-verse grammar with equal constraint rankings (all 100). "
  1054. "You may find it in the file ##folkVerse.OTGrammar# in the #demo directory of your #Praat distribution, "
  1055. "or get it from ##http://www.fon.hum.uva.nl/praat/folkVerse.OTGrammar#.")
  1056. LIST_ITEM (U"2. Generate 1000 input strings. They will all be empty strings.")
  1057. LIST_ITEM (U"3. Read the file that contains the surface distribution of the possible outputs. "
  1058. "It is in the #demo folder or at ##http://www.fon.hum.uva.nl/praat/folkVerse.Distributions#. "
  1059. "A @Distributions object will appear in the list. "
  1060. "Column \"Actual\" is the last column of table (10) in Hayes & MacEachern (1998).")
  1061. LIST_ITEM (U"4. From this surface distribution, create a list of 1000 output strings, "
  1062. "using @@Distributions: To Strings...@ (set %column to 1).")
  1063. LIST_ITEM (U"5. Select the grammar, the \"input strings\", and the output strings, "
  1064. "and learn in the usual way. After learning, you can see that some constraints have risen above 100, "
  1065. "and some have fallen below 100.")
  1066. NORMAL (U"With each of the 1000 outputs, the learner can be regarded as having generated a quatrain herself "
  1067. "and compared it to a quatrain in her folk-verse environment. If these quatrains are equal "
  1068. "(a 10\\% chance or so), nothing happens. Otherwise, the learner will demote the highest "
  1069. "violated constraint (i.e., the one that is most disharmonic during her stochastic evaluation) "
  1070. "in the heard quatrain that is not (or less) violated in the winner (the quatrain that she generated herself). "
  1071. "She will also promote the highest violated constraint in the winner "
  1072. "that is not (or less) violated in the heard quatrain.")
  1073. NORMAL (U"We are next going to generate a set of 589 quatrains, in order to be able to compare "
  1074. "the behaviours of our folk-verse grammar and the English folk-verse writers:")
  1075. LIST_ITEM (U"1. Select the learned grammar and generate 589 (empty) input strings.")
  1076. LIST_ITEM (U"2. Select the learned grammar and the so-called input strings, "
  1077. "and generate the output strings.")
  1078. LIST_ITEM (U"3. To see the distribution of the output strings, choose @@Strings: To Distributions@, "
  1079. "and draw the resulting @Distributions object to your Picture window.")
  1080. LIST_ITEM (U"4. You can now compare the two distributions.")
  1081. NORMAL (U"Instead of generating the data from a @Distributions, you could have generated them from "
  1082. "the target grammar in table (9) of Hayes & MacEachern (1998). "
  1083. "Such a grammar is in your #demo folder (##folkVerse59.OTGrammar#) "
  1084. "or at ##http://www.fon.hum.uva.nl/praat/folkVerse59.OTGrammar#. "
  1085. "Because of the loosening of the tie between two of the constraints (see H & McE, fn. 43), "
  1086. "this grammar will give different distributions from the \"actual\" values, "
  1087. "but our algorithm will learn them correctly, provided you choose ##Symmetric all# "
  1088. "or ##weighted uncancelled# for the learning strategy.")
  1089. MAN_END
  1090. */
  1091. MAN_BEGIN (U"OTGrammar", U"ppgb", 20141001)
  1092. INTRO (U"One of the @@types of objects@ in Praat. See the @@OT learning@ tutorial.")
  1093. ENTRY (U"Inside an OTGrammar")
  1094. NORMAL (U"With @Inspect, you will see the following attributes:")
  1095. TAG (U"%constraints")
  1096. DEFINITION (U"a list of constraints. Each constraint contains the following attributes:")
  1097. TAG1 (U"%name")
  1098. DEFINITION1 (U"the fixed name of the constraint, for instance \"P\\s{ARSE}\".")
  1099. TAG1 (U"%ranking")
  1100. DEFINITION1 (U"the continuous ranking value; will change during learning.")
  1101. TAG1 (U"%disharmony")
  1102. DEFINITION1 (U"the effective ranking value during stochastic evaluation; with a non-zero evaluation noise, "
  1103. "this will be different from %ranking.")
  1104. TAG (U"%fixedRankings")
  1105. DEFINITION (U"an often empty list of locally ranked pairs of constraints. "
  1106. "Each local-ranking pair contains the following attributes:")
  1107. TAG1 (U"%higher")
  1108. DEFINITION1 (U"the index of the universally higher-ranked of the two constraints, "
  1109. "a number between 1 and the number of constraints.")
  1110. TAG1 (U"%lower")
  1111. DEFINITION1 (U"the index of the universally lower-ranked of the two constraints.")
  1112. TAG (U"%tableaus")
  1113. DEFINITION (U"a list of tableaus. Each tableau contains the following attributes:")
  1114. TAG1 (U"%input")
  1115. DEFINITION1 (U"the input string of the tableau. For production grammaras, the underlying form of the utterance, for example "
  1116. "|an+pa| or |bɹɪŋ + \\s{PAST}|.")
  1117. TAG1 (U"%candidates")
  1118. DEFINITION1 (U"a list of output candidates. Each output candidate consists of:")
  1119. TAG2 (U"%output")
  1120. DEFINITION2 (U"the output string of the tableau. In two-level phonology: the surface form of the utterance, for example "
  1121. "[anpa] or [ampa] or [bɹɔːt] or [bɹæŋ]. In multi-level phonology: the combination of phonological surface "
  1122. "and phonetic results, for example /anpa/[anpa] or /ampa/[ampa] or /bɹɔːt/[bɹɔːt] or "
  1123. "/bɹæŋ/[bɹæŋ].")
  1124. TAG2 (U"%marks")
  1125. DEFINITION2 (U"a list of the number of violations of each constraint for this output form. If there are 13 constraints, "
  1126. "this list will contain 13 integer numbers for each candidate.")
  1127. ENTRY (U"OTGrammar creation")
  1128. NORMAL (U"You can easily create some #OTGrammar examples from the @@New menu@, "
  1129. "or type your own grammars into a text file and read the file into Praat. See the @@OT learning@ tutorial.")
  1130. ENTRY (U"OTGrammar actions")
  1131. NORMAL (U"You can perform the following actions on selected #OTGrammar objects:")
  1132. LIST_ITEM (U"@@OTGrammar: Generate inputs...@")
  1133. /*LIST_ITEM (U"@@OTGrammar: Sort...@")*/
  1134. LIST_ITEM (U"@@OTGrammar: Input to output...@")
  1135. LIST_ITEM (U"@@OTGrammar: Input to outputs...@ (compute the output distribution for a given input)")
  1136. LIST_ITEM (U"@@OTGrammar: To output Distributions...@")
  1137. LIST_ITEM (U"@@OTGrammar & Strings: Inputs to outputs...@")
  1138. LIST_ITEM (U"@@OTGrammar: Learn one...@")
  1139. LIST_ITEM (U"@@OTGrammar & 2 Strings: Learn...@")
  1140. NORMAL (U"You can view an #OTGrammar in an @OTGrammarEditor.")
  1141. MAN_END
  1142. MAN_BEGIN (U"OTGrammar: Generate inputs...", U"ppgb", 19981230)
  1143. INTRO (U"A command to create a @Strings object from a selected @OTGrammar.")
  1144. NORMAL (U"A practical grammar-specific implementation of the %%richness of the base%: "
  1145. "the inputs are drawn at random with equal probabilities from the inputs associated with the tableaus. "
  1146. "For an example, see @@OT learning 3.2. Data from another grammar@.")
  1147. ENTRY (U"Setting")
  1148. TAG (U"##Number of trials")
  1149. DEFINITION (U"the number of times a string will be drawn from the possible inputs to the grammar.")
  1150. MAN_END
  1151. MAN_BEGIN (U"OTGrammar: Input to output...", U"ppgb", 20030916)
  1152. INTRO (U"A command to ask the selected @OTGrammar object to evaluate the candidates associated with a specified input form.")
  1153. NORMAL (U"See @@OT learning 2.8. Asking for one output@ for tutorial information.")
  1154. ENTRY (U"Settings")
  1155. TAG (U"##Input form")
  1156. DEFINITION (U"the input form whose surface form you want to know. If this string is not in the list "
  1157. "of the possible inputs of the selected OTGrammar, you will get an error message.")
  1158. TAG (U"##Noise# (standard value: 2.0)")
  1159. DEFINITION (U"the standard deviation of the noise added to the ranking value of every constraint during evaluation. "
  1160. "See @@OT learning 2.4. Evaluation@.")
  1161. MAN_END
  1162. MAN_BEGIN (U"OTGrammar: Input to outputs...", U"ppgb", 20030916)
  1163. INTRO (U"A command to ask the selected @OTGrammar object to evaluate a number of times the candidates associated "
  1164. "with a specified input form. The result is a @Distributions object.")
  1165. NORMAL (U"See @@OT learning 2.9. Output distributions@ for tutorial information and examples.")
  1166. ENTRY (U"Settings")
  1167. TAG (U"##Trials# (standard value: 1000)")
  1168. DEFINITION (U"the number of evaluations that you want to perform.")
  1169. TAG (U"##Noise# (standard value: 2.0)")
  1170. DEFINITION (U"the standard deviation of the noise added to the ranking value of every constraint during the evaluations. "
  1171. "See @@OT learning 2.4. Evaluation@.")
  1172. TAG (U"##Input form")
  1173. DEFINITION (U"the input form whose surface forms you want to measure. If this string is not in the list "
  1174. "of the possible inputs of the selected OTGrammar, you will get an error message.")
  1175. MAN_END
  1176. MAN_BEGIN (U"OTGrammar: Learn one...", U"ppgb", 20011120)
  1177. INTRO (U"Causes every selected @OTGrammar object to process one input/output pair "
  1178. "according to the Gradual Learning Algorithm "
  1179. "by @@Boersma (1998)@ and @@Boersma (2000)@. See @@OT learning 4. Learning an ordinal grammar@ "
  1180. "and @@OT learning 5. Learning a stochastic grammar@.")
  1181. MAN_END
  1182. MAN_BEGIN (U"OTGrammar: To output Distributions...", U"ppgb", 19981230)
  1183. INTRO (U"A command to ask the selected @OTGrammar object to evaluate a number of times the candidates associated "
  1184. "with every input form. The result is a @Distributions object. See @@OT learning 2.9. Output distributions@.")
  1185. MAN_END
  1186. MAN_BEGIN (U"OTGrammar & PairDistribution: Find positive weights...", U"ppgb", 20080331)
  1187. INTRO (U"A command to change the weights of the selected @OTGrammar on the basis of the language data "
  1188. "in the selected @PairDistribution.")
  1189. NORMAL (U"This command works only if the decision strategy of the selected OTGrammar is "
  1190. "HarmonicGrammar, LinearOT, PositiveHG, or ExponentialHG. Also, "
  1191. "the selected PairDistribution must contain exactly one non-zero-probability output for every possible input of the grammar.")
  1192. NORMAL (U"The procedure follows the linear programming method by @@Pater, Potts & Bhatt (2007)@. "
  1193. "This method tries to find a special correct weighting of the constraints, "
  1194. "namely one that minimizes the sum of the constraint weights.")
  1195. ENTRY (U"Settings")
  1196. TAG (U"##Weight floor# (standard value: 1.0)")
  1197. DEFINITION (U"After the command finishes, every weight will have at least this value.")
  1198. TAG (U"##Margin of separation# (standard value: 1.0)")
  1199. DEFINITION (U"After the command finishes, the harmony of every optimal (and correct) output candidate "
  1200. "will be at least this much greater than the harmony of any competitor in the same tableau.")
  1201. MAN_END
  1202. MAN_BEGIN (U"OTGrammar & Strings: Inputs to outputs...", U"ppgb", 19981230)
  1203. INTRO (U"An action that creates a @Strings object from a selected @OTGrammar and a selected @Strings.")
  1204. NORMAL (U"The selected Strings object is considered as a list of inputs to the OTGrammar grammar.")
  1205. ENTRY (U"Settings")
  1206. TAG (U"##Noise")
  1207. DEFINITION (U"the standard deviation of the noise that will be temporarily added to the ranking value at each evaluation.")
  1208. NORMAL (U"The resulting Strings object will contain the output string of the grammar for each of the input strings.")
  1209. NORMAL (U"See @@OT learning 3.2. Data from another grammar@.")
  1210. MAN_END
  1211. MAN_BEGIN (U"OTGrammar & 2 Strings: Learn...", U"ppgb", 20100331)
  1212. INTRO (U"Causes the selected @OTGrammar object to process a number of input/output pairs "
  1213. "according to the Gradual Learning Algorithm by @@Boersma (1997)@ and @@Boersma & Hayes (2001)@ "
  1214. "or with any other reranking strategies. See @@OT learning 4. Learning an ordinal grammar@ "
  1215. "and @@OT learning 5. Learning a stochastic grammar@.")
  1216. MAN_END
  1217. MAN_BEGIN (U"OTGrammarEditor", U"ppgb", 20030316)
  1218. INTRO (U"One of the @editors in Praat, for viewing and editing the grammar in an @OTGrammar object.")
  1219. NORMAL (U"See the @@OT learning@ tutorial for examples.")
  1220. ENTRY (U"Usage")
  1221. NORMAL (U"The menu command that you will probably use most often if you investigate variation, "
  1222. "is the ##Evaluate (noise 2.0)# command, which you can invoke from the #Edit menu or by pressing Command-2.")
  1223. NORMAL (U"This command performs a new evaluation with the current ranking values. Some noise is added to the "
  1224. "ranking values, so that the %#disharmonies of the constraint will change. This may cause a change in the "
  1225. "ranking order of the constraints, which in its turn may cause a different candidate to win in some tableaus.")
  1226. MAN_END
  1227. MAN_BEGIN (U"Robust Interpretive Parsing", U"ppgb", 20021105)
  1228. INTRO (U"The mapping from overt forms to surface forms in the acquisition model by @@Tesar & Smolensky (1998)@.")
  1229. NORMAL (U"In Praat, you can do robust interpretive parsing on any @OTGrammar object. "
  1230. "See @@OT learning 7. Learning from overt forms@.")
  1231. MAN_END
  1232. }
  1233. /* End of file manual_gram.cpp */