phystokens.py 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
  2. # For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
  3. """Better tokenizing for coverage.py."""
  4. import codecs
  5. import keyword
  6. import re
  7. import sys
  8. import token
  9. import tokenize
  10. from coverage import env
  11. from coverage.backward import iternext
  12. from coverage.misc import contract
  13. def phys_tokens(toks):
  14. """Return all physical tokens, even line continuations.
  15. tokenize.generate_tokens() doesn't return a token for the backslash that
  16. continues lines. This wrapper provides those tokens so that we can
  17. re-create a faithful representation of the original source.
  18. Returns the same values as generate_tokens()
  19. """
  20. last_line = None
  21. last_lineno = -1
  22. last_ttype = None
  23. for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
  24. if last_lineno != elineno:
  25. if last_line and last_line.endswith("\\\n"):
  26. # We are at the beginning of a new line, and the last line
  27. # ended with a backslash. We probably have to inject a
  28. # backslash token into the stream. Unfortunately, there's more
  29. # to figure out. This code::
  30. #
  31. # usage = """\
  32. # HEY THERE
  33. # """
  34. #
  35. # triggers this condition, but the token text is::
  36. #
  37. # '"""\\\nHEY THERE\n"""'
  38. #
  39. # so we need to figure out if the backslash is already in the
  40. # string token or not.
  41. inject_backslash = True
  42. if last_ttype == tokenize.COMMENT:
  43. # Comments like this \
  44. # should never result in a new token.
  45. inject_backslash = False
  46. elif ttype == token.STRING:
  47. if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
  48. # It's a multi-line string and the first line ends with
  49. # a backslash, so we don't need to inject another.
  50. inject_backslash = False
  51. if inject_backslash:
  52. # Figure out what column the backslash is in.
  53. ccol = len(last_line.split("\n")[-2]) - 1
  54. # Yield the token, with a fake token type.
  55. yield (
  56. 99999, "\\\n",
  57. (slineno, ccol), (slineno, ccol+2),
  58. last_line
  59. )
  60. last_line = ltext
  61. last_ttype = ttype
  62. yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext
  63. last_lineno = elineno
  64. @contract(source='unicode')
  65. def source_token_lines(source):
  66. """Generate a series of lines, one for each line in `source`.
  67. Each line is a list of pairs, each pair is a token::
  68. [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
  69. Each pair has a token class, and the token text.
  70. If you concatenate all the token texts, and then join them with newlines,
  71. you should have your original `source` back, with two differences:
  72. trailing whitespace is not preserved, and a final line with no newline
  73. is indistinguishable from a final line with a newline.
  74. """
  75. ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL])
  76. line = []
  77. col = 0
  78. source = source.expandtabs(8).replace('\r\n', '\n')
  79. tokgen = generate_tokens(source)
  80. for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
  81. mark_start = True
  82. for part in re.split('(\n)', ttext):
  83. if part == '\n':
  84. yield line
  85. line = []
  86. col = 0
  87. mark_end = False
  88. elif part == '':
  89. mark_end = False
  90. elif ttype in ws_tokens:
  91. mark_end = False
  92. else:
  93. if mark_start and scol > col:
  94. line.append(("ws", u" " * (scol - col)))
  95. mark_start = False
  96. tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
  97. if ttype == token.NAME and keyword.iskeyword(ttext):
  98. tok_class = "key"
  99. line.append((tok_class, part))
  100. mark_end = True
  101. scol = 0
  102. if mark_end:
  103. col = ecol
  104. if line:
  105. yield line
  106. class CachedTokenizer(object):
  107. """A one-element cache around tokenize.generate_tokens.
  108. When reporting, coverage.py tokenizes files twice, once to find the
  109. structure of the file, and once to syntax-color it. Tokenizing is
  110. expensive, and easily cached.
  111. This is a one-element cache so that our twice-in-a-row tokenizing doesn't
  112. actually tokenize twice.
  113. """
  114. def __init__(self):
  115. self.last_text = None
  116. self.last_tokens = None
  117. @contract(text='unicode')
  118. def generate_tokens(self, text):
  119. """A stand-in for `tokenize.generate_tokens`."""
  120. if text != self.last_text:
  121. self.last_text = text
  122. readline = iternext(text.splitlines(True))
  123. self.last_tokens = list(tokenize.generate_tokens(readline))
  124. return self.last_tokens
  125. # Create our generate_tokens cache as a callable replacement function.
  126. generate_tokens = CachedTokenizer().generate_tokens
  127. COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE)
  128. @contract(source='bytes')
  129. def _source_encoding_py2(source):
  130. """Determine the encoding for `source`, according to PEP 263.
  131. `source` is a byte string, the text of the program.
  132. Returns a string, the name of the encoding.
  133. """
  134. assert isinstance(source, bytes)
  135. # Do this so the detect_encode code we copied will work.
  136. readline = iternext(source.splitlines(True))
  137. # This is mostly code adapted from Py3.2's tokenize module.
  138. def _get_normal_name(orig_enc):
  139. """Imitates get_normal_name in tokenizer.c."""
  140. # Only care about the first 12 characters.
  141. enc = orig_enc[:12].lower().replace("_", "-")
  142. if re.match(r"^utf-8($|-)", enc):
  143. return "utf-8"
  144. if re.match(r"^(latin-1|iso-8859-1|iso-latin-1)($|-)", enc):
  145. return "iso-8859-1"
  146. return orig_enc
  147. # From detect_encode():
  148. # It detects the encoding from the presence of a UTF-8 BOM or an encoding
  149. # cookie as specified in PEP-0263. If both a BOM and a cookie are present,
  150. # but disagree, a SyntaxError will be raised. If the encoding cookie is an
  151. # invalid charset, raise a SyntaxError. Note that if a UTF-8 BOM is found,
  152. # 'utf-8-sig' is returned.
  153. # If no encoding is specified, then the default will be returned.
  154. default = 'ascii'
  155. bom_found = False
  156. encoding = None
  157. def read_or_stop():
  158. """Get the next source line, or ''."""
  159. try:
  160. return readline()
  161. except StopIteration:
  162. return ''
  163. def find_cookie(line):
  164. """Find an encoding cookie in `line`."""
  165. try:
  166. line_string = line.decode('ascii')
  167. except UnicodeDecodeError:
  168. return None
  169. matches = COOKIE_RE.findall(line_string)
  170. if not matches:
  171. return None
  172. encoding = _get_normal_name(matches[0])
  173. try:
  174. codec = codecs.lookup(encoding)
  175. except LookupError:
  176. # This behavior mimics the Python interpreter
  177. raise SyntaxError("unknown encoding: " + encoding)
  178. if bom_found:
  179. # codecs in 2.3 were raw tuples of functions, assume the best.
  180. codec_name = getattr(codec, 'name', encoding)
  181. if codec_name != 'utf-8':
  182. # This behavior mimics the Python interpreter
  183. raise SyntaxError('encoding problem: utf-8')
  184. encoding += '-sig'
  185. return encoding
  186. first = read_or_stop()
  187. if first.startswith(codecs.BOM_UTF8):
  188. bom_found = True
  189. first = first[3:]
  190. default = 'utf-8-sig'
  191. if not first:
  192. return default
  193. encoding = find_cookie(first)
  194. if encoding:
  195. return encoding
  196. second = read_or_stop()
  197. if not second:
  198. return default
  199. encoding = find_cookie(second)
  200. if encoding:
  201. return encoding
  202. return default
  203. @contract(source='bytes')
  204. def _source_encoding_py3(source):
  205. """Determine the encoding for `source`, according to PEP 263.
  206. `source` is a byte string: the text of the program.
  207. Returns a string, the name of the encoding.
  208. """
  209. readline = iternext(source.splitlines(True))
  210. return tokenize.detect_encoding(readline)[0]
  211. if env.PY3:
  212. source_encoding = _source_encoding_py3
  213. else:
  214. source_encoding = _source_encoding_py2
  215. @contract(source='unicode')
  216. def compile_unicode(source, filename, mode):
  217. """Just like the `compile` builtin, but works on any Unicode string.
  218. Python 2's compile() builtin has a stupid restriction: if the source string
  219. is Unicode, then it may not have a encoding declaration in it. Why not?
  220. Who knows! It also decodes to utf8, and then tries to interpret those utf8
  221. bytes according to the encoding declaration. Why? Who knows!
  222. This function neuters the coding declaration, and compiles it.
  223. """
  224. source = neuter_encoding_declaration(source)
  225. if env.PY2 and isinstance(filename, unicode):
  226. filename = filename.encode(sys.getfilesystemencoding(), "replace")
  227. code = compile(source, filename, mode)
  228. return code
  229. @contract(source='unicode', returns='unicode')
  230. def neuter_encoding_declaration(source):
  231. """Return `source`, with any encoding declaration neutered."""
  232. source = COOKIE_RE.sub("# (deleted declaration)", source, count=2)
  233. return source