sentencepiece_model.proto 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. // Copyright 2016 Google Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.!
  14. syntax = "proto2";
  15. // TODO(taku): Needs to use LITE RUNTIME in OSS release.
  16. option optimize_for = LITE_RUNTIME;
  17. option go_package = "./sentencepiece";
  18. package sentencepiece;
  19. // TrainerSpec encodes a various parameters for SentencePiece training.
  20. // Next id: 55
  21. message TrainerSpec {
  22. ///////////////////////////////////////////////////////////////////
  23. // General parameters
  24. //
  25. // Input corpus files.
  26. // Trainer accepts the following two formats:
  27. // A) Monolingual: plain text, one sentence per line.
  28. // B) Bilingual: TSV, source sentence <tab> target sentence
  29. // When bilingual data is passed, shared vocabulary model is built.
  30. // Note that the input file must be raw corpus, not a preprocessed corpus.
  31. // Trainer only loads the first `input_sentence_size` sentences specified
  32. // with this parameter.
  33. repeated string input = 1;
  34. // Input corpus format:
  35. // "text": one-sentence-per-line text format (default)
  36. // "tsv": sentence <tab> freq
  37. optional string input_format = 7;
  38. // Output model file prefix.
  39. // <model_prefix>.model and <model_prefix>.vocab are generated.
  40. optional string model_prefix = 2;
  41. // Model type. only have UNIGRAM now.
  42. enum ModelType {
  43. UNIGRAM = 1; // Unigram language model with dynamic algorithm
  44. BPE = 2; // Byte Pair Encoding
  45. WORD = 3; // Delimitered by whitespace.
  46. CHAR = 4; // tokenizes into character sequence
  47. }
  48. optional ModelType model_type = 3 [default = UNIGRAM];
  49. // Vocabulary size. 8k is the default size.
  50. optional int32 vocab_size = 4 [default = 8000];
  51. // List of the languages this model can accept.
  52. // Since the model is language-agnostic, this field is used as a reference.
  53. repeated string accept_language = 5;
  54. // Size of self-test samples, which are encoded in the model file.
  55. optional int32 self_test_sample_size = 6 [default = 0];
  56. // Whether to use DP version of sentencepiece. Use it with TSV input format
  57. // (requires precomputed word tab counts to work).
  58. optional bool enable_differential_privacy = 50 [default = false];
  59. // Set these parameters if you need DP version of sentencepiece.
  60. // std of noise to add.
  61. optional float differential_privacy_noise_level = 51 [default = 0.0];
  62. // Clipping threshold to apply after adding noise. All the words with
  63. // frequency less than this value are dropped.
  64. optional uint64 differential_privacy_clipping_threshold = 52 [default = 0];
  65. ///////////////////////////////////////////////////////////////////
  66. // Training parameters.
  67. //
  68. // Uses characters which cover the corpus with the ratio of `chars_coverage`.
  69. // This parameter determines the set of basic Alphabet of sentence piece.
  70. // 1.0 - `chars_coverage` characters are treated as UNK.
  71. // See also required_chars field.
  72. optional float character_coverage = 10 [default = 0.9995];
  73. // Maximum size of sentences the trainer loads from `input` parameter.
  74. // Trainer simply loads the `input` files in sequence.
  75. // It is better to shuffle the input corpus randomly.
  76. optional uint64 input_sentence_size = 11 [default = 0];
  77. optional bool shuffle_input_sentence = 19 [default = true];
  78. // Maximum size of sentences to make seed sentence pieces.
  79. // Extended suffix array is constructed to extract frequent
  80. // sub-strings from the corpus. This uses 20N working space,
  81. // where N is the size of corpus.
  82. optional int32 mining_sentence_size = 12 [deprecated = true];
  83. // Maximum size of sentences to train sentence pieces.
  84. optional int32 training_sentence_size = 13 [deprecated = true];
  85. // The size of seed sentencepieces.
  86. // `seed_sentencepiece_size` must be larger than `vocab_size`.
  87. optional int32 seed_sentencepiece_size = 14 [default = 1000000];
  88. // In every EM sub-iterations, keeps top
  89. // `shrinking_factor` * `current sentencepieces size` with respect to
  90. // the loss of the sentence piece. This value should be smaller than 1.0.
  91. optional float shrinking_factor = 15 [default = 0.75];
  92. // The maximum sentence length in byte. The sentences with the length
  93. // larger than `max_sentence_length` is simply ignored.
  94. // Longer input tends to bring the following risks:
  95. // * Overflow during EM training (unigram language model only)
  96. // * Performance drop because of O(n log n) cost in BPE.
  97. optional int32 max_sentence_length = 18 [default = 4192];
  98. // Number of threads in the training.
  99. optional int32 num_threads = 16 [default = 16];
  100. // Number of EM sub iterations.
  101. optional int32 num_sub_iterations = 17 [default = 2];
  102. ///////////////////////////////////////////////////////////////////
  103. // SentencePiece parameters which control the shapes of sentence piece.
  104. //
  105. // Maximum length of sentencepiece.
  106. optional int32 max_sentencepiece_length = 20 [default = 16];
  107. // Uses Unicode script to split sentence pieces.
  108. // When `split_by_unicode_script` is true, we do not allow sentence piece to
  109. // include multiple Unicode scripts, e.g. "F1" is not a valid piece.
  110. // Exception: CJ characters (Hiragana/Katakana/Han) are all handled
  111. // as one script type, since Japanese word can consist of multiple scripts.
  112. // This exception is always applied regardless of the accept-language
  113. // parameter.
  114. optional bool split_by_unicode_script = 21 [default = true];
  115. // When `split_by_number` is true, put a boundary between number and
  116. // non-number transition. If we want to treat "F1" is one token, set this flag
  117. // to be false.
  118. optional bool split_by_number = 23 [default = true];
  119. // Use a white space to split sentence pieces.
  120. // When `split_by_whitespace` is false, we may have the piece containing
  121. // a white space in the middle. e.g., "in_the".
  122. optional bool split_by_whitespace = 22 [default = true];
  123. // Adds whitespace symbol (_) as a suffix instead of prefix. e.g., _hello =>
  124. // hello_. When `treat_whitespace_as_suffix` is true,
  125. // NormalizerSpec::add_dummy_prefix will add the dummy whitespace to the end
  126. // of sentence.
  127. optional bool treat_whitespace_as_suffix = 24 [default = false];
  128. // Allows pieces that only contain whitespaces instead of appearing only as
  129. // prefix or suffix of other pieces.
  130. optional bool allow_whitespace_only_pieces = 26 [default = false];
  131. // Split all digits (0-9) into separate pieces.
  132. optional bool split_digits = 25 [default = false];
  133. // Defines the pre-tokenization delimiter.
  134. // When specified, no pieces crossing this delimiter is not included
  135. // in the vocab. Then the delimiter string is virtually ignored
  136. // during the training. This field can allows constraints on the vocabulary
  137. // selection. Note that this field is available on unigram mode.
  138. optional string pretokenization_delimiter = 53 [ default = ""];
  139. ///////////////////////////////////////////////////////////////////
  140. // Vocabulary management
  141. //
  142. // Defines control symbols used as an indicator to
  143. // change the behavior of the decoder. <s> and </s> are pre-defined.
  144. // We can use this field to encode various meta information,
  145. // including language indicator in multilingual model.
  146. // These symbols are not visible to users, but visible to
  147. // the decoder. Note that when the input sentence contains control symbols,
  148. // they are not treated as one token, but segmented into normal pieces.
  149. // Control symbols must be inserted independently from the segmentation.
  150. repeated string control_symbols = 30;
  151. // Defines user defined symbols.
  152. // These symbols are added with extremely high score
  153. // so they are always treated as one unique symbol in any context.
  154. // Typical usage of user_defined_symbols is placeholder for named entities.
  155. repeated string user_defined_symbols = 31;
  156. // Defines required characters. Each UTF8 character in this string is included
  157. // in the character set regardless of character_coverage value. Unlike
  158. // user_defined_symbols, these characters have scores based on the frequency
  159. // on input sentences, and the model can form subwords using characters
  160. // in this field.
  161. optional string required_chars = 36;
  162. // Decomposes unknown pieces into UTF-8 bytes.
  163. optional bool byte_fallback = 35 [default = false];
  164. // When creating the vocabulary file, defines whether or not to additionally
  165. // output the score for each piece.
  166. optional bool vocabulary_output_piece_score = 32 [default = true];
  167. // `vocab_size` is treated as hard limit. Crash if
  168. // the model can not produce the vocab of size `vocab_size`,
  169. // When `hard_vocab_limit` is false, vocab_size is treated
  170. // as soft limit. Note that when model_type=char,
  171. // always assumes hard_vocab_limit = false.
  172. optional bool hard_vocab_limit = 33 [default = true];
  173. // use all symbols for vocab extraction. This flag is valid
  174. // if model type is either CHAR or WORD
  175. optional bool use_all_vocab = 34 [default = false];
  176. ///////////////////////////////////////////////////////////////////
  177. // Reserved special meta tokens.
  178. // * -1 is not used.
  179. // * unk_id must not be -1.
  180. // Id must starts with 0 and be contigous.
  181. optional int32 unk_id = 40 [default = 0]; // <unk>
  182. optional int32 bos_id = 41 [default = 1]; // <s>
  183. optional int32 eos_id = 42 [default = 2]; // </s>
  184. optional int32 pad_id = 43 [default = -1]; // <pad> (padding)
  185. optional string unk_piece = 45 [default = "<unk>"];
  186. optional string bos_piece = 46 [default = "<s>"];
  187. optional string eos_piece = 47 [default = "</s>"];
  188. optional string pad_piece = 48 [default = "<pad>"];
  189. // Encodes <unk> into U+2047 (DOUBLE QUESTION MARK),
  190. // since this character can be useful both for user and
  191. // developer. We can easily figure out that <unk> is emitted.
  192. optional string unk_surface = 44 [default = " \xE2\x81\x87 "];
  193. // Increase bit depth to allow unigram model training on large
  194. // (>10M sentences) corpora. A Side-effect of enabling this flag
  195. // is increased memory usage.
  196. optional bool train_extremely_large_corpus = 49 [default = false];
  197. // Path to a seed sentencepieces file, with one tab-separated
  198. // seed sentencepiece <tab> frequency per line.
  199. optional string seed_sentencepieces_file = 54 [default = ""];
  200. // Customized extensions: the range of field numbers
  201. // are open to third-party extensions.
  202. extensions 200 to max;
  203. }
  204. // NormalizerSpec encodes a various parameters for string normalizaiton
  205. message NormalizerSpec {
  206. // name of normalization rule.
  207. optional string name = 1;
  208. // Pre-compiled normalization rule created by
  209. // Builder::GetPrecompiledCharsMap() or Builder::CompileCharsMap() method.
  210. // Usually this field is set by Builder::GetNormalizerSpec() method.
  211. optional bytes precompiled_charsmap = 2;
  212. // Adds dummy whitespace at the beginning of text in order to
  213. // treat "world" in "world" and "hello world" in the same way.
  214. optional bool add_dummy_prefix = 3 [default = true];
  215. // Removes leading, trailing, and duplicate internal whitespace.
  216. optional bool remove_extra_whitespaces = 4 [default = true];
  217. // Replaces whitespace with meta symbol.
  218. // This field must be true to train sentence piece model.
  219. optional bool escape_whitespaces = 5 [default = true];
  220. // Custom normalization rule file in TSV format.
  221. // https://github.com/google/sentencepiece/blob/master/doc/normalization.md
  222. // This field is only used in SentencePieceTrainer::Train() method, which
  223. // compiles the rule into the binary rule stored in `precompiled_charsmap`.
  224. optional string normalization_rule_tsv = 6;
  225. // Customized extensions: the range of field numbers
  226. // are open to third-party extensions.
  227. extensions 200 to max;
  228. }
  229. // Proto to store samples for self-testing.
  230. message SelfTestData {
  231. message Sample {
  232. optional string input = 1;
  233. optional string expected = 2;
  234. }
  235. repeated Sample samples = 1;
  236. // Customized extensions: the range of field numbers
  237. // are open to third-party extensions.
  238. extensions 200 to max;
  239. }
  240. // ModelProto stores model parameters.
  241. // SentencePieceProcessor is supposed to be self-contained.
  242. // All settings/parameters which may change the behavior must be encoded
  243. // in ModelProto.
  244. message ModelProto {
  245. message SentencePiece {
  246. enum Type {
  247. NORMAL = 1; // normal symbol
  248. UNKNOWN = 2; // unknown symbol. only <unk> for now.
  249. CONTROL = 3; // control symbols. </s>, <s>, <2ja> etc.
  250. USER_DEFINED = 4; // user defined symbols.
  251. // Typical usage of USER_DEFINED symbol
  252. // is placeholder.
  253. BYTE = 6; // byte symbols. Used when `byte_fallback` is true.
  254. UNUSED = 5; // this piece is not used.
  255. }
  256. optional string piece = 1; // piece must not be empty.
  257. optional float score = 2;
  258. optional Type type = 3 [default = NORMAL];
  259. // Customized extensions: the range of field numbers
  260. // are open to third-party extensions.
  261. extensions 200 to max;
  262. }
  263. // Sentence pieces with scores.
  264. repeated SentencePiece pieces = 1;
  265. // Spec used to generate this model file.
  266. optional TrainerSpec trainer_spec = 2;
  267. // Spec for text normalization.
  268. optional NormalizerSpec normalizer_spec = 3;
  269. // Stores sample input and its expected segmentation to verify the model.
  270. optional SelfTestData self_test_data = 4;
  271. // Spec for text de-normalization.
  272. optional NormalizerSpec denormalizer_spec = 5;
  273. // Customized extensions: the range of field numbers
  274. // are open to third-party extensions.
  275. extensions 200 to max;
  276. }