verbs.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. /* ------------------------------------------------------------------------- */
  2. /* "verbs" : Manages actions and grammar tables; parses the directives */
  3. /* Verb and Extend. */
  4. /* */
  5. /* Part of Inform 6.33 */
  6. /* copyright (c) Graham Nelson 1993 - 2014 */
  7. /* */
  8. /* ------------------------------------------------------------------------- */
  9. #include "header.h"
  10. int grammar_version_number; /* 1 for pre-Inform 6.06 table format */
  11. int32 grammar_version_symbol; /* Index of "Grammar__Version"
  12. within symbols table */
  13. /* ------------------------------------------------------------------------- */
  14. /* Actions. */
  15. /* ------------------------------------------------------------------------- */
  16. /* Array defined below: */
  17. /* */
  18. /* int32 action_byte_offset[n] The (byte) offset in the Z-machine */
  19. /* code area of the ...Sub routine */
  20. /* for action n. (NB: This is left */
  21. /* blank until the end of the */
  22. /* compilation pass.) */
  23. /* int32 action_symbol[n] The symbol table index of the n-th */
  24. /* action's name. */
  25. /* ------------------------------------------------------------------------- */
  26. int no_actions, /* Number of actions made so far */
  27. no_fake_actions; /* Number of fake actions made so far */
  28. /* ------------------------------------------------------------------------- */
  29. /* Adjectives. (The term "adjective" is traditional; they are mainly */
  30. /* prepositions, such as "onto".) */
  31. /* ------------------------------------------------------------------------- */
  32. /* Arrays defined below: */
  33. /* */
  34. /* int32 adjectives[n] Byte address of dictionary entry */
  35. /* for the nth adjective */
  36. /* dict_word adjective_sort_code[n] Dictionary sort code of nth adj */
  37. /* ------------------------------------------------------------------------- */
  38. int no_adjectives; /* Number of adjectives made so far */
  39. /* ------------------------------------------------------------------------- */
  40. /* Verbs. Note that Inform-verbs are not quite the same as English verbs: */
  41. /* for example the English verbs "take" and "drop" both normally */
  42. /* correspond in a game's dictionary to the same Inform verb. An */
  43. /* Inform verb is essentially a list of grammar lines. */
  44. /* ------------------------------------------------------------------------- */
  45. /* Arrays defined below: */
  46. /* */
  47. /* verbt Inform_verbs[n] The n-th grammar line sequence: */
  48. /* see "header.h" for the definition */
  49. /* of the typedef struct verbt */
  50. /* int32 grammar_token_routine[n] The byte offset from start of code */
  51. /* area of the n-th one */
  52. /* ------------------------------------------------------------------------- */
  53. int no_Inform_verbs, /* Number of Inform-verbs made so far */
  54. no_grammar_token_routines; /* Number of routines given in tokens */
  55. /* ------------------------------------------------------------------------- */
  56. /* We keep a list of English verb-words known (e.g. "take" or "eat") and */
  57. /* which Inform-verbs they correspond to. (This list is needed for some */
  58. /* of the grammar extension operations.) */
  59. /* The format of this list is a sequence of variable-length records: */
  60. /* */
  61. /* Byte offset to start of next record (1 byte) */
  62. /* Inform verb number this word corresponds to (1 byte) */
  63. /* The English verb-word (reduced to lower case), null-terminated */
  64. /* ------------------------------------------------------------------------- */
  65. static char *English_verb_list, /* First byte of first record */
  66. *English_verb_list_top; /* Next byte free for new record */
  67. static int English_verb_list_size; /* Size of the list in bytes
  68. (redundant but convenient) */
  69. /* ------------------------------------------------------------------------- */
  70. /* Arrays used by this file */
  71. /* ------------------------------------------------------------------------- */
  72. verbt *Inform_verbs;
  73. uchar *grammar_lines;
  74. int32 grammar_lines_top;
  75. int no_grammar_lines, no_grammar_tokens;
  76. int32 *action_byte_offset,
  77. *action_symbol,
  78. *grammar_token_routine,
  79. *adjectives;
  80. static uchar *adjective_sort_code;
  81. /* ------------------------------------------------------------------------- */
  82. /* Tracing for compiler maintenance */
  83. /* ------------------------------------------------------------------------- */
  84. extern void list_verb_table(void)
  85. { int i;
  86. for (i=0; i<no_Inform_verbs; i++)
  87. printf("Verb %2d has %d lines\n", i, Inform_verbs[i].lines);
  88. }
  89. /* ------------------------------------------------------------------------- */
  90. /* Actions. */
  91. /* ------------------------------------------------------------------------- */
  92. static void new_action(char *b, int c)
  93. {
  94. /* Called whenever a new action (or fake action) is created (either
  95. by using make_action above, or the Fake_Action directive, or by
  96. the linker). At present just a hook for some tracing code. */
  97. if (printprops_switch)
  98. printf("Action '%s' is numbered %d\n",b,c);
  99. }
  100. /* Note that fake actions are numbered from a high base point upwards;
  101. real actions are numbered from 0 upward in GV2. */
  102. extern void make_fake_action(void)
  103. { int i;
  104. char action_sub[MAX_IDENTIFIER_LENGTH+4];
  105. debug_location_beginning beginning_debug_location =
  106. get_token_location_beginning();
  107. get_next_token();
  108. if (token_type != SYMBOL_TT)
  109. { discard_token_location(beginning_debug_location);
  110. ebf_error("new fake action name", token_text);
  111. panic_mode_error_recovery(); return;
  112. }
  113. sprintf(action_sub, "%s__A", token_text);
  114. i = symbol_index(action_sub, -1);
  115. if (!(sflags[i] & UNKNOWN_SFLAG))
  116. { discard_token_location(beginning_debug_location);
  117. ebf_error("new fake action name", token_text);
  118. panic_mode_error_recovery(); return;
  119. }
  120. assign_symbol(i, ((grammar_version_number==1)?256:4096)+no_fake_actions++,
  121. FAKE_ACTION_T);
  122. new_action(token_text, i);
  123. if (debugfile_switch)
  124. { debug_file_printf("<fake-action>");
  125. debug_file_printf("<identifier>##%s</identifier>", token_text);
  126. debug_file_printf("<value>%d</value>", svals[i]);
  127. get_next_token();
  128. write_debug_locations
  129. (get_token_location_end(beginning_debug_location));
  130. put_token_back();
  131. debug_file_printf("</fake-action>");
  132. }
  133. return;
  134. }
  135. extern assembly_operand action_of_name(char *name)
  136. {
  137. /* Returns the action number of the given name, creating it as a new
  138. action name if it isn't already known as such. */
  139. char action_sub[MAX_IDENTIFIER_LENGTH+4];
  140. int j;
  141. assembly_operand AO;
  142. sprintf(action_sub, "%s__A", name);
  143. j = symbol_index(action_sub, -1);
  144. if (stypes[j] == FAKE_ACTION_T)
  145. { AO.value = svals[j];
  146. AO.marker = 0;
  147. if (!glulx_mode)
  148. AO.type = LONG_CONSTANT_OT;
  149. else
  150. set_constant_ot(&AO);
  151. sflags[j] |= USED_SFLAG;
  152. return AO;
  153. }
  154. if (sflags[j] & UNKNOWN_SFLAG)
  155. {
  156. if (no_actions>=MAX_ACTIONS) memoryerror("MAX_ACTIONS",MAX_ACTIONS);
  157. new_action(name, no_actions);
  158. action_symbol[no_actions] = j;
  159. assign_symbol(j, no_actions++, CONSTANT_T);
  160. sflags[j] |= ACTION_SFLAG;
  161. }
  162. sflags[j] |= USED_SFLAG;
  163. AO.value = svals[j];
  164. AO.marker = ACTION_MV;
  165. if (!glulx_mode) {
  166. AO.type = (module_switch)?LONG_CONSTANT_OT:SHORT_CONSTANT_OT;
  167. if (svals[j] >= 256) AO.type = LONG_CONSTANT_OT;
  168. }
  169. else {
  170. AO.type = CONSTANT_OT;
  171. }
  172. return AO;
  173. }
  174. extern void find_the_actions(void)
  175. { int i; int32 j;
  176. char action_name[MAX_IDENTIFIER_LENGTH];
  177. char action_sub[MAX_IDENTIFIER_LENGTH+4];
  178. if (module_switch)
  179. for (i=0; i<no_actions; i++) action_byte_offset[i] = 0;
  180. else
  181. for (i=0; i<no_actions; i++)
  182. { strcpy(action_name, (char *) symbs[action_symbol[i]]);
  183. action_name[strlen(action_name) - 3] = '\0'; /* remove "__A" */
  184. strcpy(action_sub, action_name);
  185. strcat(action_sub, "Sub");
  186. j = symbol_index(action_sub, -1);
  187. if (sflags[j] & UNKNOWN_SFLAG)
  188. {
  189. error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
  190. }
  191. else
  192. if (stypes[j] != ROUTINE_T)
  193. {
  194. error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
  195. error_named_at("-- ...Sub symbol found, but not a routine:", action_sub, slines[j]);
  196. }
  197. else
  198. { action_byte_offset[i] = svals[j];
  199. sflags[j] |= USED_SFLAG;
  200. }
  201. }
  202. }
  203. /* ------------------------------------------------------------------------- */
  204. /* Adjectives. */
  205. /* ------------------------------------------------------------------------- */
  206. static int make_adjective(char *English_word)
  207. {
  208. /* Returns adjective number of the English word supplied, creating
  209. a new adjective number if need be.
  210. Note that (partly for historical reasons) adjectives are numbered
  211. from 0xff downwards. (And partly to make them stand out as tokens.)
  212. This routine is used only in grammar version 1: the corresponding
  213. table is left empty in GV2. */
  214. int i;
  215. uchar new_sort_code[MAX_DICT_WORD_BYTES];
  216. if (no_adjectives >= MAX_ADJECTIVES)
  217. memoryerror("MAX_ADJECTIVES", MAX_ADJECTIVES);
  218. dictionary_prepare(English_word, new_sort_code);
  219. for (i=0; i<no_adjectives; i++)
  220. if (compare_sorts(new_sort_code,
  221. adjective_sort_code+i*DICT_WORD_BYTES) == 0)
  222. return(0xff-i);
  223. adjectives[no_adjectives]
  224. = dictionary_add(English_word,8,0,0xff-no_adjectives);
  225. copy_sorts(adjective_sort_code+no_adjectives*DICT_WORD_BYTES,
  226. new_sort_code);
  227. return(0xff-no_adjectives++);
  228. }
  229. /* ------------------------------------------------------------------------- */
  230. /* Parsing routines. */
  231. /* ------------------------------------------------------------------------- */
  232. static int make_parsing_routine(int32 routine_address)
  233. {
  234. /* This routine is used only in grammar version 1: the corresponding
  235. table is left empty in GV2. */
  236. int l;
  237. for (l=0; l<no_grammar_token_routines; l++)
  238. if (grammar_token_routine[l] == routine_address)
  239. return l;
  240. grammar_token_routine[l] = routine_address;
  241. return(no_grammar_token_routines++);
  242. }
  243. /* ------------------------------------------------------------------------- */
  244. /* The English-verb list. */
  245. /* ------------------------------------------------------------------------- */
  246. static int find_or_renumber_verb(char *English_verb, int *new_number)
  247. {
  248. /* If new_number is null, returns the Inform-verb number which the
  249. * given English verb causes, or -1 if the given verb is not in the
  250. * dictionary */
  251. /* If new_number is non-null, renumbers the Inform-verb number which
  252. * English_verb matches in English_verb_list to account for the case
  253. * when we are extending a verb. Returns 0 if successful, or -1 if
  254. * the given verb is not in the dictionary (which shouldn't happen as
  255. * get_verb has already run) */
  256. char *p;
  257. p=English_verb_list;
  258. while (p < English_verb_list_top)
  259. { if (strcmp(English_verb, p+3) == 0)
  260. { if (new_number)
  261. { p[1] = (*new_number)/256;
  262. p[2] = (*new_number)%256;
  263. return 0;
  264. }
  265. return(256*((uchar)p[1]))+((uchar)p[2]);
  266. }
  267. p=p+(uchar)p[0];
  268. }
  269. return(-1);
  270. }
  271. static void register_verb(char *English_verb, int number)
  272. {
  273. /* Registers a new English verb as referring to the given Inform-verb
  274. number. (See comments above for format of the list.) */
  275. if (find_or_renumber_verb(English_verb, NULL) != -1)
  276. { error_named("Two different verb definitions refer to", English_verb);
  277. return;
  278. }
  279. English_verb_list_size += strlen(English_verb)+4;
  280. if (English_verb_list_size >= MAX_VERBSPACE)
  281. memoryerror("MAX_VERBSPACE", MAX_VERBSPACE);
  282. English_verb_list_top[0] = 4+strlen(English_verb);
  283. English_verb_list_top[1] = number/256;
  284. English_verb_list_top[2] = number%256;
  285. strcpy(English_verb_list_top+3, English_verb);
  286. English_verb_list_top += English_verb_list_top[0];
  287. }
  288. static int get_verb(void)
  289. {
  290. /* Look at the last-read token: if it's the name of an English verb
  291. understood by Inform, in double-quotes, then return the Inform-verb
  292. that word refers to: otherwise give an error and return -1. */
  293. int j;
  294. if ((token_type == DQ_TT) || (token_type == SQ_TT))
  295. { j = find_or_renumber_verb(token_text, NULL);
  296. if (j==-1)
  297. error_named("There is no previous grammar for the verb",
  298. token_text);
  299. return j;
  300. }
  301. ebf_error("an English verb in quotes", token_text);
  302. return -1;
  303. }
  304. /* ------------------------------------------------------------------------- */
  305. /* Grammar lines for Verb/Extend directives. */
  306. /* ------------------------------------------------------------------------- */
  307. static int grammar_line(int verbnum, int line)
  308. {
  309. /* Parse a grammar line, to be written into grammar_lines[mark] onward.
  310. Syntax: * <token1> ... <token-n> -> <action>
  311. is compiled to a table in the form:
  312. <action number : word>
  313. <token 1> ... <token n> <ENDIT>
  314. where <ENDIT> is the byte 15, and each <token> is 3 bytes long.
  315. If grammar_version_number is 1, the token holds
  316. <bytecode> 00 00
  317. and otherwise a GV2 token.
  318. Return TRUE if grammar continues after the line, FALSE if the
  319. directive comes to an end. */
  320. int j, bytecode, mark; int32 wordcode;
  321. int grammar_token, slash_mode, last_was_slash;
  322. int reverse_action, TOKEN_SIZE;
  323. debug_location_beginning beginning_debug_location =
  324. get_token_location_beginning();
  325. get_next_token();
  326. if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
  327. { discard_token_location(beginning_debug_location);
  328. return FALSE;
  329. }
  330. if (!((token_type == SEP_TT) && (token_value == TIMES_SEP)))
  331. { discard_token_location(beginning_debug_location);
  332. ebf_error("'*' divider", token_text);
  333. panic_mode_error_recovery();
  334. return FALSE;
  335. }
  336. /* Have we run out of lines or token space? */
  337. if (line >= MAX_LINES_PER_VERB)
  338. { discard_token_location(beginning_debug_location);
  339. error("Too many lines of grammar for verb. This maximum is built \
  340. into Inform, so suggest rewriting grammar using general parsing routines");
  341. return(FALSE);
  342. }
  343. /* Internally, a line can be up to 3*32 + 1 + 2 = 99 bytes long */
  344. /* In Glulx, that's 5*32 + 4 = 164 bytes */
  345. mark = grammar_lines_top;
  346. if (!glulx_mode) {
  347. if (mark + 100 >= MAX_LINESPACE)
  348. { discard_token_location(beginning_debug_location);
  349. memoryerror("MAX_LINESPACE", MAX_LINESPACE);
  350. }
  351. }
  352. else {
  353. if (mark + 165 >= MAX_LINESPACE)
  354. { discard_token_location(beginning_debug_location);
  355. memoryerror("MAX_LINESPACE", MAX_LINESPACE);
  356. }
  357. }
  358. Inform_verbs[verbnum].l[line] = mark;
  359. if (!glulx_mode) {
  360. mark = mark + 2;
  361. TOKEN_SIZE = 3;
  362. }
  363. else {
  364. mark = mark + 3;
  365. TOKEN_SIZE = 5;
  366. }
  367. grammar_token = 0; last_was_slash = TRUE; slash_mode = FALSE;
  368. no_grammar_lines++;
  369. do
  370. { get_next_token();
  371. bytecode = 0; wordcode = 0;
  372. if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
  373. { discard_token_location(beginning_debug_location);
  374. ebf_error("'->' clause", token_text);
  375. return FALSE;
  376. }
  377. if ((token_type == SEP_TT) && (token_value == ARROW_SEP))
  378. { if (last_was_slash && (grammar_token>0))
  379. ebf_error("grammar token", token_text);
  380. break;
  381. }
  382. if (!last_was_slash) slash_mode = FALSE;
  383. if ((token_type == SEP_TT) && (token_value == DIVIDE_SEP))
  384. { if (grammar_version_number == 1)
  385. error("'/' can only be used with Library 6/3 or later");
  386. if (last_was_slash)
  387. ebf_error("grammar token or '->'", token_text);
  388. else
  389. { last_was_slash = TRUE;
  390. slash_mode = TRUE;
  391. if (((grammar_lines[mark-TOKEN_SIZE]) & 0x0f) != 2)
  392. error("'/' can only be applied to prepositions");
  393. grammar_lines[mark-TOKEN_SIZE] |= 0x20;
  394. continue;
  395. }
  396. }
  397. else last_was_slash = FALSE;
  398. if ((token_type == DQ_TT) || (token_type == SQ_TT))
  399. { if (grammar_version_number == 1)
  400. bytecode = make_adjective(token_text);
  401. else
  402. { bytecode = 0x42;
  403. wordcode = dictionary_add(token_text, 8, 0, 0);
  404. }
  405. }
  406. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NOUN_DK))
  407. { get_next_token();
  408. if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
  409. {
  410. /* noun = <routine> */
  411. get_next_token();
  412. if ((token_type != SYMBOL_TT)
  413. || (stypes[token_value] != ROUTINE_T))
  414. { discard_token_location(beginning_debug_location);
  415. ebf_error("routine name after 'noun='", token_text);
  416. panic_mode_error_recovery();
  417. return FALSE;
  418. }
  419. if (grammar_version_number == 1)
  420. bytecode
  421. = 16 + make_parsing_routine(svals[token_value]);
  422. else
  423. { bytecode = 0x83;
  424. wordcode = svals[token_value];
  425. }
  426. sflags[token_value] |= USED_SFLAG;
  427. }
  428. else
  429. { put_token_back();
  430. if (grammar_version_number == 1) bytecode=0;
  431. else { bytecode = 1; wordcode = 0; }
  432. }
  433. }
  434. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==HELD_DK))
  435. { if (grammar_version_number==1) bytecode=1;
  436. else { bytecode=1; wordcode=1; } }
  437. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTI_DK))
  438. { if (grammar_version_number==1) bytecode=2;
  439. else { bytecode=1; wordcode=2; } }
  440. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIHELD_DK))
  441. { if (grammar_version_number==1) bytecode=3;
  442. else { bytecode=1; wordcode=3; } }
  443. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIEXCEPT_DK))
  444. { if (grammar_version_number==1) bytecode=4;
  445. else { bytecode=1; wordcode=4; } }
  446. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIINSIDE_DK))
  447. { if (grammar_version_number==1) bytecode=5;
  448. else { bytecode=1; wordcode=5; } }
  449. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==CREATURE_DK))
  450. { if (grammar_version_number==1) bytecode=6;
  451. else { bytecode=1; wordcode=6; } }
  452. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SPECIAL_DK))
  453. { if (grammar_version_number==1) bytecode=7;
  454. else { bytecode=1; wordcode=7; } }
  455. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NUMBER_DK))
  456. { if (grammar_version_number==1) bytecode=8;
  457. else { bytecode=1; wordcode=8; } }
  458. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==TOPIC_DK))
  459. { if (grammar_version_number==1)
  460. error("The 'topic' token is only available if you \
  461. are using Library 6/3 or later");
  462. else { bytecode=1; wordcode=9; } }
  463. else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SCOPE_DK))
  464. {
  465. /* scope = <routine> */
  466. get_next_token();
  467. if (!((token_type==SEP_TT)&&(token_value==SETEQUALS_SEP)))
  468. { discard_token_location(beginning_debug_location);
  469. ebf_error("'=' after 'scope'", token_text);
  470. panic_mode_error_recovery();
  471. return FALSE;
  472. }
  473. get_next_token();
  474. if ((token_type != SYMBOL_TT)
  475. || (stypes[token_value] != ROUTINE_T))
  476. { discard_token_location(beginning_debug_location);
  477. ebf_error("routine name after 'scope='", token_text);
  478. panic_mode_error_recovery();
  479. return FALSE;
  480. }
  481. if (grammar_version_number == 1)
  482. bytecode = 80 +
  483. make_parsing_routine(svals[token_value]);
  484. else { bytecode = 0x85; wordcode = svals[token_value]; }
  485. sflags[token_value] |= USED_SFLAG;
  486. }
  487. else if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
  488. { discard_token_location(beginning_debug_location);
  489. error("'=' is only legal here as 'noun=Routine'");
  490. panic_mode_error_recovery();
  491. return FALSE;
  492. }
  493. else { /* <attribute> or <general-parsing-routine> tokens */
  494. if ((token_type != SYMBOL_TT)
  495. || ((stypes[token_value] != ATTRIBUTE_T)
  496. && (stypes[token_value] != ROUTINE_T)))
  497. { discard_token_location(beginning_debug_location);
  498. error_named("No such grammar token as", token_text);
  499. panic_mode_error_recovery();
  500. return FALSE;
  501. }
  502. if (stypes[token_value]==ATTRIBUTE_T)
  503. { if (grammar_version_number == 1)
  504. bytecode = 128 + svals[token_value];
  505. else { bytecode = 4; wordcode = svals[token_value]; }
  506. }
  507. else
  508. { if (grammar_version_number == 1)
  509. bytecode = 48 +
  510. make_parsing_routine(svals[token_value]);
  511. else { bytecode = 0x86; wordcode = svals[token_value]; }
  512. }
  513. sflags[token_value] |= USED_SFLAG;
  514. }
  515. grammar_token++; no_grammar_tokens++;
  516. if ((grammar_version_number == 1) && (grammar_token > 6))
  517. { if (grammar_token == 7)
  518. warning("Grammar line cut short: you can only have up to 6 \
  519. tokens in any line (unless you're compiling with library 6/3 or later)");
  520. }
  521. else
  522. { if (slash_mode)
  523. { if (bytecode != 0x42)
  524. error("'/' can only be applied to prepositions");
  525. bytecode |= 0x10;
  526. }
  527. grammar_lines[mark++] = bytecode;
  528. if (!glulx_mode) {
  529. grammar_lines[mark++] = wordcode/256;
  530. grammar_lines[mark++] = wordcode%256;
  531. }
  532. else {
  533. grammar_lines[mark++] = ((wordcode >> 24) & 0xFF);
  534. grammar_lines[mark++] = ((wordcode >> 16) & 0xFF);
  535. grammar_lines[mark++] = ((wordcode >> 8) & 0xFF);
  536. grammar_lines[mark++] = ((wordcode) & 0xFF);
  537. }
  538. }
  539. } while (TRUE);
  540. grammar_lines[mark++] = 15;
  541. grammar_lines_top = mark;
  542. dont_enter_into_symbol_table = TRUE;
  543. get_next_token();
  544. dont_enter_into_symbol_table = FALSE;
  545. if (token_type != DQ_TT)
  546. { discard_token_location(beginning_debug_location);
  547. ebf_error("name of new or existing action", token_text);
  548. panic_mode_error_recovery();
  549. return FALSE;
  550. }
  551. { assembly_operand AO = action_of_name(token_text);
  552. j = AO.value;
  553. if (j >= ((grammar_version_number==1)?256:4096))
  554. error_named("This is a fake action, not a real one:", token_text);
  555. }
  556. reverse_action = FALSE;
  557. get_next_token();
  558. if ((token_type == DIR_KEYWORD_TT) && (token_value == REVERSE_DK))
  559. { if (grammar_version_number == 1)
  560. error("'reverse' actions can only be used with \
  561. Library 6/3 or later");
  562. reverse_action = TRUE;
  563. }
  564. else put_token_back();
  565. mark = Inform_verbs[verbnum].l[line];
  566. if (debugfile_switch)
  567. { debug_file_printf("<table-entry>");
  568. debug_file_printf("<type>grammar line</type>");
  569. debug_file_printf("<address>");
  570. write_debug_grammar_backpatch(mark);
  571. debug_file_printf("</address>");
  572. debug_file_printf("<end-address>");
  573. write_debug_grammar_backpatch(grammar_lines_top);
  574. debug_file_printf("</end-address>");
  575. write_debug_locations
  576. (get_token_location_end(beginning_debug_location));
  577. debug_file_printf("</table-entry>");
  578. }
  579. if (!glulx_mode) {
  580. if (reverse_action)
  581. j = j + 0x400;
  582. grammar_lines[mark++] = j/256;
  583. grammar_lines[mark++] = j%256;
  584. }
  585. else {
  586. grammar_lines[mark++] = ((j >> 8) & 0xFF);
  587. grammar_lines[mark++] = ((j) & 0xFF);
  588. grammar_lines[mark++] = (reverse_action ? 1 : 0);
  589. }
  590. return TRUE;
  591. }
  592. /* ------------------------------------------------------------------------- */
  593. /* The Verb directive: */
  594. /* */
  595. /* Verb [meta] "word-1" ... "word-n" | = "existing-English-verb" */
  596. /* | <grammar-line-1> ... <g-line-n> */
  597. /* */
  598. /* ------------------------------------------------------------------------- */
  599. extern void make_verb(void)
  600. {
  601. /* Parse an entire Verb ... directive. */
  602. int Inform_verb, meta_verb_flag=FALSE, verb_equals_form=FALSE;
  603. char *English_verbs_given[32]; int no_given = 0, i;
  604. directive_keywords.enabled = TRUE;
  605. get_next_token();
  606. if ((token_type == DIR_KEYWORD_TT) && (token_value == META_DK))
  607. { meta_verb_flag = TRUE;
  608. get_next_token();
  609. }
  610. while ((token_type == DQ_TT) || (token_type == SQ_TT))
  611. { English_verbs_given[no_given++] = token_text;
  612. get_next_token();
  613. }
  614. if (no_given == 0)
  615. { ebf_error("English verb in quotes", token_text);
  616. panic_mode_error_recovery(); return;
  617. }
  618. if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
  619. { verb_equals_form = TRUE;
  620. get_next_token();
  621. Inform_verb = get_verb();
  622. if (Inform_verb == -1) return;
  623. get_next_token();
  624. if (!((token_type == SEP_TT) && (token_value == SEMICOLON_SEP)))
  625. ebf_error("';' after English verb", token_text);
  626. }
  627. else
  628. { Inform_verb = no_Inform_verbs;
  629. if (no_Inform_verbs == MAX_VERBS)
  630. memoryerror("MAX_VERBS",MAX_VERBS);
  631. }
  632. for (i=0; i<no_given; i++)
  633. { dictionary_add(English_verbs_given[i],
  634. 0x41 + ((meta_verb_flag)?0x02:0x00),
  635. (glulx_mode)?(0xffff-Inform_verb):(0xff-Inform_verb), 0);
  636. register_verb(English_verbs_given[i], Inform_verb);
  637. }
  638. if (!verb_equals_form)
  639. { int lines = 0;
  640. put_token_back();
  641. while (grammar_line(no_Inform_verbs, lines++)) ;
  642. Inform_verbs[no_Inform_verbs++].lines = --lines;
  643. }
  644. directive_keywords.enabled = FALSE;
  645. }
  646. /* ------------------------------------------------------------------------- */
  647. /* The Extend directive: */
  648. /* */
  649. /* Extend | only "verb-1" ... "verb-n" | <grammar-lines> */
  650. /* | "verb" | "replace" */
  651. /* | "first" */
  652. /* | "last" */
  653. /* */
  654. /* ------------------------------------------------------------------------- */
  655. #define EXTEND_REPLACE 1
  656. #define EXTEND_FIRST 2
  657. #define EXTEND_LAST 3
  658. extern void extend_verb(void)
  659. {
  660. /* Parse an entire Extend ... directive. */
  661. int Inform_verb, k, l, lines, extend_mode;
  662. directive_keywords.enabled = TRUE;
  663. directives.enabled = FALSE;
  664. get_next_token();
  665. if ((token_type == DIR_KEYWORD_TT) && (token_value == ONLY_DK))
  666. { l = -1;
  667. if (no_Inform_verbs == MAX_VERBS)
  668. memoryerror("MAX_VERBS", MAX_VERBS);
  669. while (get_next_token(),
  670. ((token_type == DQ_TT) || (token_type == SQ_TT)))
  671. { Inform_verb = get_verb();
  672. if (Inform_verb == -1) return;
  673. if ((l!=-1) && (Inform_verb!=l))
  674. warning_named("Verb disagrees with previous verbs:", token_text);
  675. l = Inform_verb;
  676. dictionary_set_verb_number(token_text,
  677. (glulx_mode)?(0xffff-no_Inform_verbs):(0xff-no_Inform_verbs));
  678. /* make call to renumber verb in English_verb_list too */
  679. if (find_or_renumber_verb(token_text, &no_Inform_verbs) == -1)
  680. warning_named("Verb to extend not found in English_verb_list:",
  681. token_text);
  682. }
  683. /* Copy the old Inform-verb into a new one which the list of
  684. English-verbs given have had their dictionary entries modified
  685. to point to */
  686. Inform_verbs[no_Inform_verbs] = Inform_verbs[Inform_verb];
  687. Inform_verb = no_Inform_verbs++;
  688. }
  689. else
  690. { Inform_verb = get_verb();
  691. if (Inform_verb == -1) return;
  692. get_next_token();
  693. }
  694. /* Inform_verb now contains the number of the Inform-verb to extend... */
  695. extend_mode = EXTEND_LAST;
  696. if ((token_type == SEP_TT) && (token_value == TIMES_SEP))
  697. put_token_back();
  698. else
  699. { extend_mode = 0;
  700. if ((token_type == DIR_KEYWORD_TT) && (token_value == REPLACE_DK))
  701. extend_mode = EXTEND_REPLACE;
  702. if ((token_type == DIR_KEYWORD_TT) && (token_value == FIRST_DK))
  703. extend_mode = EXTEND_FIRST;
  704. if ((token_type == DIR_KEYWORD_TT) && (token_value == LAST_DK))
  705. extend_mode = EXTEND_LAST;
  706. if (extend_mode==0)
  707. { ebf_error("'replace', 'last', 'first' or '*'", token_text);
  708. extend_mode = EXTEND_LAST;
  709. }
  710. }
  711. l = Inform_verbs[Inform_verb].lines;
  712. lines = 0;
  713. if (extend_mode == EXTEND_LAST) lines=l;
  714. do
  715. { if (extend_mode == EXTEND_FIRST)
  716. for (k=l; k>0; k--)
  717. Inform_verbs[Inform_verb].l[k+lines]
  718. = Inform_verbs[Inform_verb].l[k-1+lines];
  719. } while (grammar_line(Inform_verb, lines++));
  720. if (extend_mode == EXTEND_FIRST)
  721. { Inform_verbs[Inform_verb].lines = l+lines-1;
  722. for (k=0; k<l; k++)
  723. Inform_verbs[Inform_verb].l[k+lines-1]
  724. = Inform_verbs[Inform_verb].l[k+lines];
  725. }
  726. else Inform_verbs[Inform_verb].lines = --lines;
  727. directive_keywords.enabled = FALSE;
  728. directives.enabled = TRUE;
  729. }
  730. /* ========================================================================= */
  731. /* Data structure management routines */
  732. /* ------------------------------------------------------------------------- */
  733. extern void init_verbs_vars(void)
  734. {
  735. no_fake_actions = 0;
  736. no_actions = 0;
  737. no_grammar_lines = 0;
  738. no_grammar_tokens = 0;
  739. English_verb_list_size = 0;
  740. Inform_verbs = NULL;
  741. action_byte_offset = NULL;
  742. grammar_token_routine = NULL;
  743. adjectives = NULL;
  744. adjective_sort_code = NULL;
  745. English_verb_list = NULL;
  746. if (!glulx_mode)
  747. grammar_version_number = 1;
  748. else
  749. grammar_version_number = 2;
  750. }
  751. extern void verbs_begin_pass(void)
  752. {
  753. no_Inform_verbs=0; no_adjectives=0;
  754. no_grammar_token_routines=0;
  755. no_actions=0;
  756. no_fake_actions=0;
  757. grammar_lines_top = 0;
  758. }
  759. extern void verbs_allocate_arrays(void)
  760. {
  761. Inform_verbs = my_calloc(sizeof(verbt), MAX_VERBS, "verbs");
  762. grammar_lines = my_malloc(MAX_LINESPACE, "grammar lines");
  763. action_byte_offset = my_calloc(sizeof(int32), MAX_ACTIONS, "actions");
  764. action_symbol = my_calloc(sizeof(int32), MAX_ACTIONS,
  765. "action symbols");
  766. grammar_token_routine = my_calloc(sizeof(int32), MAX_ACTIONS,
  767. "grammar token routines");
  768. adjectives = my_calloc(sizeof(int32), MAX_ADJECTIVES,
  769. "adjectives");
  770. adjective_sort_code = my_calloc(DICT_WORD_BYTES, MAX_ADJECTIVES,
  771. "adjective sort codes");
  772. English_verb_list = my_malloc(MAX_VERBSPACE, "register of verbs");
  773. English_verb_list_top = English_verb_list;
  774. }
  775. extern void verbs_free_arrays(void)
  776. {
  777. my_free(&Inform_verbs, "verbs");
  778. my_free(&grammar_lines, "grammar lines");
  779. my_free(&action_byte_offset, "actions");
  780. my_free(&action_symbol, "action symbols");
  781. my_free(&grammar_token_routine, "grammar token routines");
  782. my_free(&adjectives, "adjectives");
  783. my_free(&adjective_sort_code, "adjective sort codes");
  784. my_free(&English_verb_list, "register of verbs");
  785. }
  786. /* ========================================================================= */