media/webrtc/trunk/testing/gtest/scripts/pump.py

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/media/webrtc/trunk/testing/gtest/scripts/pump.py	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,855 @@
     1.4 +#!/usr/bin/env python
     1.5 +#
     1.6 +# Copyright 2008, Google Inc.
     1.7 +# All rights reserved.
     1.8 +#
     1.9 +# Redistribution and use in source and binary forms, with or without
    1.10 +# modification, are permitted provided that the following conditions are
    1.11 +# met:
    1.12 +#
    1.13 +#     * Redistributions of source code must retain the above copyright
    1.14 +# notice, this list of conditions and the following disclaimer.
    1.15 +#     * Redistributions in binary form must reproduce the above
    1.16 +# copyright notice, this list of conditions and the following disclaimer
    1.17 +# in the documentation and/or other materials provided with the
    1.18 +# distribution.
    1.19 +#     * Neither the name of Google Inc. nor the names of its
    1.20 +# contributors may be used to endorse or promote products derived from
    1.21 +# this software without specific prior written permission.
    1.22 +#
    1.23 +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    1.24 +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    1.25 +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    1.26 +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    1.27 +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    1.28 +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    1.29 +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    1.30 +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    1.31 +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.32 +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    1.33 +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.34 +
    1.35 +"""pump v0.2.0 - Pretty Useful for Meta Programming.
    1.36 +
    1.37 +A tool for preprocessor meta programming.  Useful for generating
    1.38 +repetitive boilerplate code.  Especially useful for writing C++
    1.39 +classes, functions, macros, and templates that need to work with
    1.40 +various number of arguments.
    1.41 +
    1.42 +USAGE:
    1.43 +       pump.py SOURCE_FILE
    1.44 +
    1.45 +EXAMPLES:
    1.46 +       pump.py foo.cc.pump
    1.47 +         Converts foo.cc.pump to foo.cc.
    1.48 +
    1.49 +GRAMMAR:
    1.50 +       CODE ::= ATOMIC_CODE*
    1.51 +       ATOMIC_CODE ::= $var ID = EXPRESSION
    1.52 +           | $var ID = [[ CODE ]]
    1.53 +           | $range ID EXPRESSION..EXPRESSION
    1.54 +           | $for ID SEPARATOR [[ CODE ]]
    1.55 +           | $($)
    1.56 +           | $ID
    1.57 +           | $(EXPRESSION)
    1.58 +           | $if EXPRESSION [[ CODE ]] ELSE_BRANCH
    1.59 +           | [[ CODE ]]
    1.60 +           | RAW_CODE
    1.61 +       SEPARATOR ::= RAW_CODE | EMPTY
    1.62 +       ELSE_BRANCH ::= $else [[ CODE ]]
    1.63 +           | $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
    1.64 +           | EMPTY
    1.65 +       EXPRESSION has Python syntax.
    1.66 +"""
    1.67 +
    1.68 +__author__ = 'wan@google.com (Zhanyong Wan)'
    1.69 +
    1.70 +import os
    1.71 +import re
    1.72 +import sys
    1.73 +
    1.74 +
    1.75 +TOKEN_TABLE = [
    1.76 +    (re.compile(r'\$var\s+'), '$var'),
    1.77 +    (re.compile(r'\$elif\s+'), '$elif'),
    1.78 +    (re.compile(r'\$else\s+'), '$else'),
    1.79 +    (re.compile(r'\$for\s+'), '$for'),
    1.80 +    (re.compile(r'\$if\s+'), '$if'),
    1.81 +    (re.compile(r'\$range\s+'), '$range'),
    1.82 +    (re.compile(r'\$[_A-Za-z]\w*'), '$id'),
    1.83 +    (re.compile(r'\$\(\$\)'), '$($)'),
    1.84 +    (re.compile(r'\$'), '$'),
    1.85 +    (re.compile(r'\[\[\n?'), '[['),
    1.86 +    (re.compile(r'\]\]\n?'), ']]'),
    1.87 +    ]
    1.88 +
    1.89 +
    1.90 +class Cursor:
    1.91 +  """Represents a position (line and column) in a text file."""
    1.92 +
    1.93 +  def __init__(self, line=-1, column=-1):
    1.94 +    self.line = line
    1.95 +    self.column = column
    1.96 +
    1.97 +  def __eq__(self, rhs):
    1.98 +    return self.line == rhs.line and self.column == rhs.column
    1.99 +
   1.100 +  def __ne__(self, rhs):
   1.101 +    return not self == rhs
   1.102 +
   1.103 +  def __lt__(self, rhs):
   1.104 +    return self.line < rhs.line or (
   1.105 +        self.line == rhs.line and self.column < rhs.column)
   1.106 +
   1.107 +  def __le__(self, rhs):
   1.108 +    return self < rhs or self == rhs
   1.109 +
   1.110 +  def __gt__(self, rhs):
   1.111 +    return rhs < self
   1.112 +
   1.113 +  def __ge__(self, rhs):
   1.114 +    return rhs <= self
   1.115 +
   1.116 +  def __str__(self):
   1.117 +    if self == Eof():
   1.118 +      return 'EOF'
   1.119 +    else:
   1.120 +      return '%s(%s)' % (self.line + 1, self.column)
   1.121 +
   1.122 +  def __add__(self, offset):
   1.123 +    return Cursor(self.line, self.column + offset)
   1.124 +
   1.125 +  def __sub__(self, offset):
   1.126 +    return Cursor(self.line, self.column - offset)
   1.127 +
   1.128 +  def Clone(self):
   1.129 +    """Returns a copy of self."""
   1.130 +
   1.131 +    return Cursor(self.line, self.column)
   1.132 +
   1.133 +
   1.134 +# Special cursor to indicate the end-of-file.
   1.135 +def Eof():
   1.136 +  """Returns the special cursor to denote the end-of-file."""
   1.137 +  return Cursor(-1, -1)
   1.138 +
   1.139 +
   1.140 +class Token:
   1.141 +  """Represents a token in a Pump source file."""
   1.142 +
   1.143 +  def __init__(self, start=None, end=None, value=None, token_type=None):
   1.144 +    if start is None:
   1.145 +      self.start = Eof()
   1.146 +    else:
   1.147 +      self.start = start
   1.148 +    if end is None:
   1.149 +      self.end = Eof()
   1.150 +    else:
   1.151 +      self.end = end
   1.152 +    self.value = value
   1.153 +    self.token_type = token_type
   1.154 +
   1.155 +  def __str__(self):
   1.156 +    return 'Token @%s: \'%s\' type=%s' % (
   1.157 +        self.start, self.value, self.token_type)
   1.158 +
   1.159 +  def Clone(self):
   1.160 +    """Returns a copy of self."""
   1.161 +
   1.162 +    return Token(self.start.Clone(), self.end.Clone(), self.value,
   1.163 +                 self.token_type)
   1.164 +
   1.165 +
   1.166 +def StartsWith(lines, pos, string):
   1.167 +  """Returns True iff the given position in lines starts with 'string'."""
   1.168 +
   1.169 +  return lines[pos.line][pos.column:].startswith(string)
   1.170 +
   1.171 +
   1.172 +def FindFirstInLine(line, token_table):
   1.173 +  best_match_start = -1
   1.174 +  for (regex, token_type) in token_table:
   1.175 +    m = regex.search(line)
   1.176 +    if m:
   1.177 +      # We found regex in lines
   1.178 +      if best_match_start < 0 or m.start() < best_match_start:
   1.179 +        best_match_start = m.start()
   1.180 +        best_match_length = m.end() - m.start()
   1.181 +        best_match_token_type = token_type
   1.182 +
   1.183 +  if best_match_start < 0:
   1.184 +    return None
   1.185 +
   1.186 +  return (best_match_start, best_match_length, best_match_token_type)
   1.187 +
   1.188 +
   1.189 +def FindFirst(lines, token_table, cursor):
   1.190 +  """Finds the first occurrence of any string in strings in lines."""
   1.191 +
   1.192 +  start = cursor.Clone()
   1.193 +  cur_line_number = cursor.line
   1.194 +  for line in lines[start.line:]:
   1.195 +    if cur_line_number == start.line:
   1.196 +      line = line[start.column:]
   1.197 +    m = FindFirstInLine(line, token_table)
   1.198 +    if m:
   1.199 +      # We found a regex in line.
   1.200 +      (start_column, length, token_type) = m
   1.201 +      if cur_line_number == start.line:
   1.202 +        start_column += start.column
   1.203 +      found_start = Cursor(cur_line_number, start_column)
   1.204 +      found_end = found_start + length
   1.205 +      return MakeToken(lines, found_start, found_end, token_type)
   1.206 +    cur_line_number += 1
   1.207 +  # We failed to find str in lines
   1.208 +  return None
   1.209 +
   1.210 +
   1.211 +def SubString(lines, start, end):
   1.212 +  """Returns a substring in lines."""
   1.213 +
   1.214 +  if end == Eof():
   1.215 +    end = Cursor(len(lines) - 1, len(lines[-1]))
   1.216 +
   1.217 +  if start >= end:
   1.218 +    return ''
   1.219 +
   1.220 +  if start.line == end.line:
   1.221 +    return lines[start.line][start.column:end.column]
   1.222 +
   1.223 +  result_lines = ([lines[start.line][start.column:]] +
   1.224 +                  lines[start.line + 1:end.line] +
   1.225 +                  [lines[end.line][:end.column]])
   1.226 +  return ''.join(result_lines)
   1.227 +
   1.228 +
   1.229 +def StripMetaComments(str):
   1.230 +  """Strip meta comments from each line in the given string."""
   1.231 +
   1.232 +  # First, completely remove lines containing nothing but a meta
   1.233 +  # comment, including the trailing \n.
   1.234 +  str = re.sub(r'^\s*\$\$.*\n', '', str)
   1.235 +
   1.236 +  # Then, remove meta comments from contentful lines.
   1.237 +  return re.sub(r'\s*\$\$.*', '', str)
   1.238 +
   1.239 +
   1.240 +def MakeToken(lines, start, end, token_type):
   1.241 +  """Creates a new instance of Token."""
   1.242 +
   1.243 +  return Token(start, end, SubString(lines, start, end), token_type)
   1.244 +
   1.245 +
   1.246 +def ParseToken(lines, pos, regex, token_type):
   1.247 +  line = lines[pos.line][pos.column:]
   1.248 +  m = regex.search(line)
   1.249 +  if m and not m.start():
   1.250 +    return MakeToken(lines, pos, pos + m.end(), token_type)
   1.251 +  else:
   1.252 +    print 'ERROR: %s expected at %s.' % (token_type, pos)
   1.253 +    sys.exit(1)
   1.254 +
   1.255 +
   1.256 +ID_REGEX = re.compile(r'[_A-Za-z]\w*')
   1.257 +EQ_REGEX = re.compile(r'=')
   1.258 +REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
   1.259 +OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
   1.260 +WHITE_SPACE_REGEX = re.compile(r'\s')
   1.261 +DOT_DOT_REGEX = re.compile(r'\.\.')
   1.262 +
   1.263 +
   1.264 +def Skip(lines, pos, regex):
   1.265 +  line = lines[pos.line][pos.column:]
   1.266 +  m = re.search(regex, line)
   1.267 +  if m and not m.start():
   1.268 +    return pos + m.end()
   1.269 +  else:
   1.270 +    return pos
   1.271 +
   1.272 +
   1.273 +def SkipUntil(lines, pos, regex, token_type):
   1.274 +  line = lines[pos.line][pos.column:]
   1.275 +  m = re.search(regex, line)
   1.276 +  if m:
   1.277 +    return pos + m.start()
   1.278 +  else:
   1.279 +    print ('ERROR: %s expected on line %s after column %s.' %
   1.280 +           (token_type, pos.line + 1, pos.column))
   1.281 +    sys.exit(1)
   1.282 +
   1.283 +
   1.284 +def ParseExpTokenInParens(lines, pos):
   1.285 +  def ParseInParens(pos):
   1.286 +    pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
   1.287 +    pos = Skip(lines, pos, r'\(')
   1.288 +    pos = Parse(pos)
   1.289 +    pos = Skip(lines, pos, r'\)')
   1.290 +    return pos
   1.291 +
   1.292 +  def Parse(pos):
   1.293 +    pos = SkipUntil(lines, pos, r'\(|\)', ')')
   1.294 +    if SubString(lines, pos, pos + 1) == '(':
   1.295 +      pos = Parse(pos + 1)
   1.296 +      pos = Skip(lines, pos, r'\)')
   1.297 +      return Parse(pos)
   1.298 +    else:
   1.299 +      return pos
   1.300 +
   1.301 +  start = pos.Clone()
   1.302 +  pos = ParseInParens(pos)
   1.303 +  return MakeToken(lines, start, pos, 'exp')
   1.304 +
   1.305 +
   1.306 +def RStripNewLineFromToken(token):
   1.307 +  if token.value.endswith('\n'):
   1.308 +    return Token(token.start, token.end, token.value[:-1], token.token_type)
   1.309 +  else:
   1.310 +    return token
   1.311 +
   1.312 +
   1.313 +def TokenizeLines(lines, pos):
   1.314 +  while True:
   1.315 +    found = FindFirst(lines, TOKEN_TABLE, pos)
   1.316 +    if not found:
   1.317 +      yield MakeToken(lines, pos, Eof(), 'code')
   1.318 +      return
   1.319 +
   1.320 +    if found.start == pos:
   1.321 +      prev_token = None
   1.322 +      prev_token_rstripped = None
   1.323 +    else:
   1.324 +      prev_token = MakeToken(lines, pos, found.start, 'code')
   1.325 +      prev_token_rstripped = RStripNewLineFromToken(prev_token)
   1.326 +
   1.327 +    if found.token_type == '$var':
   1.328 +      if prev_token_rstripped:
   1.329 +        yield prev_token_rstripped
   1.330 +      yield found
   1.331 +      id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
   1.332 +      yield id_token
   1.333 +      pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
   1.334 +
   1.335 +      eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
   1.336 +      yield eq_token
   1.337 +      pos = Skip(lines, eq_token.end, r'\s*')
   1.338 +
   1.339 +      if SubString(lines, pos, pos + 2) != '[[':
   1.340 +        exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
   1.341 +        yield exp_token
   1.342 +        pos = Cursor(exp_token.end.line + 1, 0)
   1.343 +    elif found.token_type == '$for':
   1.344 +      if prev_token_rstripped:
   1.345 +        yield prev_token_rstripped
   1.346 +      yield found
   1.347 +      id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
   1.348 +      yield id_token
   1.349 +      pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
   1.350 +    elif found.token_type == '$range':
   1.351 +      if prev_token_rstripped:
   1.352 +        yield prev_token_rstripped
   1.353 +      yield found
   1.354 +      id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
   1.355 +      yield id_token
   1.356 +      pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
   1.357 +
   1.358 +      dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
   1.359 +      yield MakeToken(lines, pos, dots_pos, 'exp')
   1.360 +      yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
   1.361 +      pos = dots_pos + 2
   1.362 +      new_pos = Cursor(pos.line + 1, 0)
   1.363 +      yield MakeToken(lines, pos, new_pos, 'exp')
   1.364 +      pos = new_pos
   1.365 +    elif found.token_type == '$':
   1.366 +      if prev_token:
   1.367 +        yield prev_token
   1.368 +      yield found
   1.369 +      exp_token = ParseExpTokenInParens(lines, found.end)
   1.370 +      yield exp_token
   1.371 +      pos = exp_token.end
   1.372 +    elif (found.token_type == ']]' or found.token_type == '$if' or
   1.373 +          found.token_type == '$elif' or found.token_type == '$else'):
   1.374 +      if prev_token_rstripped:
   1.375 +        yield prev_token_rstripped
   1.376 +      yield found
   1.377 +      pos = found.end
   1.378 +    else:
   1.379 +      if prev_token:
   1.380 +        yield prev_token
   1.381 +      yield found
   1.382 +      pos = found.end
   1.383 +
   1.384 +
   1.385 +def Tokenize(s):
   1.386 +  """A generator that yields the tokens in the given string."""
   1.387 +  if s != '':
   1.388 +    lines = s.splitlines(True)
   1.389 +    for token in TokenizeLines(lines, Cursor(0, 0)):
   1.390 +      yield token
   1.391 +
   1.392 +
   1.393 +class CodeNode:
   1.394 +  def __init__(self, atomic_code_list=None):
   1.395 +    self.atomic_code = atomic_code_list
   1.396 +
   1.397 +
   1.398 +class VarNode:
   1.399 +  def __init__(self, identifier=None, atomic_code=None):
   1.400 +    self.identifier = identifier
   1.401 +    self.atomic_code = atomic_code
   1.402 +
   1.403 +
   1.404 +class RangeNode:
   1.405 +  def __init__(self, identifier=None, exp1=None, exp2=None):
   1.406 +    self.identifier = identifier
   1.407 +    self.exp1 = exp1
   1.408 +    self.exp2 = exp2
   1.409 +
   1.410 +
   1.411 +class ForNode:
   1.412 +  def __init__(self, identifier=None, sep=None, code=None):
   1.413 +    self.identifier = identifier
   1.414 +    self.sep = sep
   1.415 +    self.code = code
   1.416 +
   1.417 +
   1.418 +class ElseNode:
   1.419 +  def __init__(self, else_branch=None):
   1.420 +    self.else_branch = else_branch
   1.421 +
   1.422 +
   1.423 +class IfNode:
   1.424 +  def __init__(self, exp=None, then_branch=None, else_branch=None):
   1.425 +    self.exp = exp
   1.426 +    self.then_branch = then_branch
   1.427 +    self.else_branch = else_branch
   1.428 +
   1.429 +
   1.430 +class RawCodeNode:
   1.431 +  def __init__(self, token=None):
   1.432 +    self.raw_code = token
   1.433 +
   1.434 +
   1.435 +class LiteralDollarNode:
   1.436 +  def __init__(self, token):
   1.437 +    self.token = token
   1.438 +
   1.439 +
   1.440 +class ExpNode:
   1.441 +  def __init__(self, token, python_exp):
   1.442 +    self.token = token
   1.443 +    self.python_exp = python_exp
   1.444 +
   1.445 +
   1.446 +def PopFront(a_list):
   1.447 +  head = a_list[0]
   1.448 +  a_list[:1] = []
   1.449 +  return head
   1.450 +
   1.451 +
   1.452 +def PushFront(a_list, elem):
   1.453 +  a_list[:0] = [elem]
   1.454 +
   1.455 +
   1.456 +def PopToken(a_list, token_type=None):
   1.457 +  token = PopFront(a_list)
   1.458 +  if token_type is not None and token.token_type != token_type:
   1.459 +    print 'ERROR: %s expected at %s' % (token_type, token.start)
   1.460 +    print 'ERROR: %s found instead' % (token,)
   1.461 +    sys.exit(1)
   1.462 +
   1.463 +  return token
   1.464 +
   1.465 +
   1.466 +def PeekToken(a_list):
   1.467 +  if not a_list:
   1.468 +    return None
   1.469 +
   1.470 +  return a_list[0]
   1.471 +
   1.472 +
   1.473 +def ParseExpNode(token):
   1.474 +  python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
   1.475 +  return ExpNode(token, python_exp)
   1.476 +
   1.477 +
   1.478 +def ParseElseNode(tokens):
   1.479 +  def Pop(token_type=None):
   1.480 +    return PopToken(tokens, token_type)
   1.481 +
   1.482 +  next = PeekToken(tokens)
   1.483 +  if not next:
   1.484 +    return None
   1.485 +  if next.token_type == '$else':
   1.486 +    Pop('$else')
   1.487 +    Pop('[[')
   1.488 +    code_node = ParseCodeNode(tokens)
   1.489 +    Pop(']]')
   1.490 +    return code_node
   1.491 +  elif next.token_type == '$elif':
   1.492 +    Pop('$elif')
   1.493 +    exp = Pop('code')
   1.494 +    Pop('[[')
   1.495 +    code_node = ParseCodeNode(tokens)
   1.496 +    Pop(']]')
   1.497 +    inner_else_node = ParseElseNode(tokens)
   1.498 +    return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
   1.499 +  elif not next.value.strip():
   1.500 +    Pop('code')
   1.501 +    return ParseElseNode(tokens)
   1.502 +  else:
   1.503 +    return None
   1.504 +
   1.505 +
   1.506 +def ParseAtomicCodeNode(tokens):
   1.507 +  def Pop(token_type=None):
   1.508 +    return PopToken(tokens, token_type)
   1.509 +
   1.510 +  head = PopFront(tokens)
   1.511 +  t = head.token_type
   1.512 +  if t == 'code':
   1.513 +    return RawCodeNode(head)
   1.514 +  elif t == '$var':
   1.515 +    id_token = Pop('id')
   1.516 +    Pop('=')
   1.517 +    next = PeekToken(tokens)
   1.518 +    if next.token_type == 'exp':
   1.519 +      exp_token = Pop()
   1.520 +      return VarNode(id_token, ParseExpNode(exp_token))
   1.521 +    Pop('[[')
   1.522 +    code_node = ParseCodeNode(tokens)
   1.523 +    Pop(']]')
   1.524 +    return VarNode(id_token, code_node)
   1.525 +  elif t == '$for':
   1.526 +    id_token = Pop('id')
   1.527 +    next_token = PeekToken(tokens)
   1.528 +    if next_token.token_type == 'code':
   1.529 +      sep_token = next_token
   1.530 +      Pop('code')
   1.531 +    else:
   1.532 +      sep_token = None
   1.533 +    Pop('[[')
   1.534 +    code_node = ParseCodeNode(tokens)
   1.535 +    Pop(']]')
   1.536 +    return ForNode(id_token, sep_token, code_node)
   1.537 +  elif t == '$if':
   1.538 +    exp_token = Pop('code')
   1.539 +    Pop('[[')
   1.540 +    code_node = ParseCodeNode(tokens)
   1.541 +    Pop(']]')
   1.542 +    else_node = ParseElseNode(tokens)
   1.543 +    return IfNode(ParseExpNode(exp_token), code_node, else_node)
   1.544 +  elif t == '$range':
   1.545 +    id_token = Pop('id')
   1.546 +    exp1_token = Pop('exp')
   1.547 +    Pop('..')
   1.548 +    exp2_token = Pop('exp')
   1.549 +    return RangeNode(id_token, ParseExpNode(exp1_token),
   1.550 +                     ParseExpNode(exp2_token))
   1.551 +  elif t == '$id':
   1.552 +    return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
   1.553 +  elif t == '$($)':
   1.554 +    return LiteralDollarNode(head)
   1.555 +  elif t == '$':
   1.556 +    exp_token = Pop('exp')
   1.557 +    return ParseExpNode(exp_token)
   1.558 +  elif t == '[[':
   1.559 +    code_node = ParseCodeNode(tokens)
   1.560 +    Pop(']]')
   1.561 +    return code_node
   1.562 +  else:
   1.563 +    PushFront(tokens, head)
   1.564 +    return None
   1.565 +
   1.566 +
   1.567 +def ParseCodeNode(tokens):
   1.568 +  atomic_code_list = []
   1.569 +  while True:
   1.570 +    if not tokens:
   1.571 +      break
   1.572 +    atomic_code_node = ParseAtomicCodeNode(tokens)
   1.573 +    if atomic_code_node:
   1.574 +      atomic_code_list.append(atomic_code_node)
   1.575 +    else:
   1.576 +      break
   1.577 +  return CodeNode(atomic_code_list)
   1.578 +
   1.579 +
   1.580 +def ParseToAST(pump_src_text):
   1.581 +  """Convert the given Pump source text into an AST."""
   1.582 +  tokens = list(Tokenize(pump_src_text))
   1.583 +  code_node = ParseCodeNode(tokens)
   1.584 +  return code_node
   1.585 +
   1.586 +
   1.587 +class Env:
   1.588 +  def __init__(self):
   1.589 +    self.variables = []
   1.590 +    self.ranges = []
   1.591 +
   1.592 +  def Clone(self):
   1.593 +    clone = Env()
   1.594 +    clone.variables = self.variables[:]
   1.595 +    clone.ranges = self.ranges[:]
   1.596 +    return clone
   1.597 +
   1.598 +  def PushVariable(self, var, value):
   1.599 +    # If value looks like an int, store it as an int.
   1.600 +    try:
   1.601 +      int_value = int(value)
   1.602 +      if ('%s' % int_value) == value:
   1.603 +        value = int_value
   1.604 +    except Exception:
   1.605 +      pass
   1.606 +    self.variables[:0] = [(var, value)]
   1.607 +
   1.608 +  def PopVariable(self):
   1.609 +    self.variables[:1] = []
   1.610 +
   1.611 +  def PushRange(self, var, lower, upper):
   1.612 +    self.ranges[:0] = [(var, lower, upper)]
   1.613 +
   1.614 +  def PopRange(self):
   1.615 +    self.ranges[:1] = []
   1.616 +
   1.617 +  def GetValue(self, identifier):
   1.618 +    for (var, value) in self.variables:
   1.619 +      if identifier == var:
   1.620 +        return value
   1.621 +
   1.622 +    print 'ERROR: meta variable %s is undefined.' % (identifier,)
   1.623 +    sys.exit(1)
   1.624 +
   1.625 +  def EvalExp(self, exp):
   1.626 +    try:
   1.627 +      result = eval(exp.python_exp)
   1.628 +    except Exception, e:
   1.629 +      print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
   1.630 +      print ('ERROR: failed to evaluate meta expression %s at %s' %
   1.631 +             (exp.python_exp, exp.token.start))
   1.632 +      sys.exit(1)
   1.633 +    return result
   1.634 +
   1.635 +  def GetRange(self, identifier):
   1.636 +    for (var, lower, upper) in self.ranges:
   1.637 +      if identifier == var:
   1.638 +        return (lower, upper)
   1.639 +
   1.640 +    print 'ERROR: range %s is undefined.' % (identifier,)
   1.641 +    sys.exit(1)
   1.642 +
   1.643 +
   1.644 +class Output:
   1.645 +  def __init__(self):
   1.646 +    self.string = ''
   1.647 +
   1.648 +  def GetLastLine(self):
   1.649 +    index = self.string.rfind('\n')
   1.650 +    if index < 0:
   1.651 +      return ''
   1.652 +
   1.653 +    return self.string[index + 1:]
   1.654 +
   1.655 +  def Append(self, s):
   1.656 +    self.string += s
   1.657 +
   1.658 +
   1.659 +def RunAtomicCode(env, node, output):
   1.660 +  if isinstance(node, VarNode):
   1.661 +    identifier = node.identifier.value.strip()
   1.662 +    result = Output()
   1.663 +    RunAtomicCode(env.Clone(), node.atomic_code, result)
   1.664 +    value = result.string
   1.665 +    env.PushVariable(identifier, value)
   1.666 +  elif isinstance(node, RangeNode):
   1.667 +    identifier = node.identifier.value.strip()
   1.668 +    lower = int(env.EvalExp(node.exp1))
   1.669 +    upper = int(env.EvalExp(node.exp2))
   1.670 +    env.PushRange(identifier, lower, upper)
   1.671 +  elif isinstance(node, ForNode):
   1.672 +    identifier = node.identifier.value.strip()
   1.673 +    if node.sep is None:
   1.674 +      sep = ''
   1.675 +    else:
   1.676 +      sep = node.sep.value
   1.677 +    (lower, upper) = env.GetRange(identifier)
   1.678 +    for i in range(lower, upper + 1):
   1.679 +      new_env = env.Clone()
   1.680 +      new_env.PushVariable(identifier, i)
   1.681 +      RunCode(new_env, node.code, output)
   1.682 +      if i != upper:
   1.683 +        output.Append(sep)
   1.684 +  elif isinstance(node, RawCodeNode):
   1.685 +    output.Append(node.raw_code.value)
   1.686 +  elif isinstance(node, IfNode):
   1.687 +    cond = env.EvalExp(node.exp)
   1.688 +    if cond:
   1.689 +      RunCode(env.Clone(), node.then_branch, output)
   1.690 +    elif node.else_branch is not None:
   1.691 +      RunCode(env.Clone(), node.else_branch, output)
   1.692 +  elif isinstance(node, ExpNode):
   1.693 +    value = env.EvalExp(node)
   1.694 +    output.Append('%s' % (value,))
   1.695 +  elif isinstance(node, LiteralDollarNode):
   1.696 +    output.Append('$')
   1.697 +  elif isinstance(node, CodeNode):
   1.698 +    RunCode(env.Clone(), node, output)
   1.699 +  else:
   1.700 +    print 'BAD'
   1.701 +    print node
   1.702 +    sys.exit(1)
   1.703 +
   1.704 +
   1.705 +def RunCode(env, code_node, output):
   1.706 +  for atomic_code in code_node.atomic_code:
   1.707 +    RunAtomicCode(env, atomic_code, output)
   1.708 +
   1.709 +
   1.710 +def IsSingleLineComment(cur_line):
   1.711 +  return '//' in cur_line
   1.712 +
   1.713 +
   1.714 +def IsInPreprocessorDirective(prev_lines, cur_line):
   1.715 +  if cur_line.lstrip().startswith('#'):
   1.716 +    return True
   1.717 +  return prev_lines and prev_lines[-1].endswith('\\')
   1.718 +
   1.719 +
   1.720 +def WrapComment(line, output):
   1.721 +  loc = line.find('//')
   1.722 +  before_comment = line[:loc].rstrip()
   1.723 +  if before_comment == '':
   1.724 +    indent = loc
   1.725 +  else:
   1.726 +    output.append(before_comment)
   1.727 +    indent = len(before_comment) - len(before_comment.lstrip())
   1.728 +  prefix = indent*' ' + '// '
   1.729 +  max_len = 80 - len(prefix)
   1.730 +  comment = line[loc + 2:].strip()
   1.731 +  segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
   1.732 +  cur_line = ''
   1.733 +  for seg in segs:
   1.734 +    if len((cur_line + seg).rstrip()) < max_len:
   1.735 +      cur_line += seg
   1.736 +    else:
   1.737 +      if cur_line.strip() != '':
   1.738 +        output.append(prefix + cur_line.rstrip())
   1.739 +      cur_line = seg.lstrip()
   1.740 +  if cur_line.strip() != '':
   1.741 +    output.append(prefix + cur_line.strip())
   1.742 +
   1.743 +
   1.744 +def WrapCode(line, line_concat, output):
   1.745 +  indent = len(line) - len(line.lstrip())
   1.746 +  prefix = indent*' '  # Prefix of the current line
   1.747 +  max_len = 80 - indent - len(line_concat)  # Maximum length of the current line
   1.748 +  new_prefix = prefix + 4*' '  # Prefix of a continuation line
   1.749 +  new_max_len = max_len - 4  # Maximum length of a continuation line
   1.750 +  # Prefers to wrap a line after a ',' or ';'.
   1.751 +  segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
   1.752 +  cur_line = ''  # The current line without leading spaces.
   1.753 +  for seg in segs:
   1.754 +    # If the line is still too long, wrap at a space.
   1.755 +    while cur_line == '' and len(seg.strip()) > max_len:
   1.756 +      seg = seg.lstrip()
   1.757 +      split_at = seg.rfind(' ', 0, max_len)
   1.758 +      output.append(prefix + seg[:split_at].strip() + line_concat)
   1.759 +      seg = seg[split_at + 1:]
   1.760 +      prefix = new_prefix
   1.761 +      max_len = new_max_len
   1.762 +
   1.763 +    if len((cur_line + seg).rstrip()) < max_len:
   1.764 +      cur_line = (cur_line + seg).lstrip()
   1.765 +    else:
   1.766 +      output.append(prefix + cur_line.rstrip() + line_concat)
   1.767 +      prefix = new_prefix
   1.768 +      max_len = new_max_len
   1.769 +      cur_line = seg.lstrip()
   1.770 +  if cur_line.strip() != '':
   1.771 +    output.append(prefix + cur_line.strip())
   1.772 +
   1.773 +
   1.774 +def WrapPreprocessorDirective(line, output):
   1.775 +  WrapCode(line, ' \\', output)
   1.776 +
   1.777 +
   1.778 +def WrapPlainCode(line, output):
   1.779 +  WrapCode(line, '', output)
   1.780 +
   1.781 +
   1.782 +def IsMultiLineIWYUPragma(line):
   1.783 +  return re.search(r'/\* IWYU pragma: ', line)
   1.784 +
   1.785 +
   1.786 +def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
   1.787 +  return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
   1.788 +          re.match(r'^#include\s', line) or
   1.789 +          # Don't break IWYU pragmas, either; that causes iwyu.py problems.
   1.790 +          re.search(r'// IWYU pragma: ', line))
   1.791 +
   1.792 +
   1.793 +def WrapLongLine(line, output):
   1.794 +  line = line.rstrip()
   1.795 +  if len(line) <= 80:
   1.796 +    output.append(line)
   1.797 +  elif IsSingleLineComment(line):
   1.798 +    if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
   1.799 +      # The style guide made an exception to allow long header guard lines,
   1.800 +      # includes and IWYU pragmas.
   1.801 +      output.append(line)
   1.802 +    else:
   1.803 +      WrapComment(line, output)
   1.804 +  elif IsInPreprocessorDirective(output, line):
   1.805 +    if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
   1.806 +      # The style guide made an exception to allow long header guard lines,
   1.807 +      # includes and IWYU pragmas.
   1.808 +      output.append(line)
   1.809 +    else:
   1.810 +      WrapPreprocessorDirective(line, output)
   1.811 +  elif IsMultiLineIWYUPragma(line):
   1.812 +    output.append(line)
   1.813 +  else:
   1.814 +    WrapPlainCode(line, output)
   1.815 +
   1.816 +
   1.817 +def BeautifyCode(string):
   1.818 +  lines = string.splitlines()
   1.819 +  output = []
   1.820 +  for line in lines:
   1.821 +    WrapLongLine(line, output)
   1.822 +  output2 = [line.rstrip() for line in output]
   1.823 +  return '\n'.join(output2) + '\n'
   1.824 +
   1.825 +
   1.826 +def ConvertFromPumpSource(src_text):
   1.827 +  """Return the text generated from the given Pump source text."""
   1.828 +  ast = ParseToAST(StripMetaComments(src_text))
   1.829 +  output = Output()
   1.830 +  RunCode(Env(), ast, output)
   1.831 +  return BeautifyCode(output.string)
   1.832 +
   1.833 +
   1.834 +def main(argv):
   1.835 +  if len(argv) == 1:
   1.836 +    print __doc__
   1.837 +    sys.exit(1)
   1.838 +
   1.839 +  file_path = argv[-1]
   1.840 +  output_str = ConvertFromPumpSource(file(file_path, 'r').read())
   1.841 +  if file_path.endswith('.pump'):
   1.842 +    output_file_path = file_path[:-5]
   1.843 +  else:
   1.844 +    output_file_path = '-'
   1.845 +  if output_file_path == '-':
   1.846 +    print output_str,
   1.847 +  else:
   1.848 +    output_file = file(output_file_path, 'w')
   1.849 +    output_file.write('// This file was GENERATED by command:\n')
   1.850 +    output_file.write('//     %s %s\n' %
   1.851 +                      (os.path.basename(__file__), os.path.basename(file_path)))
   1.852 +    output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
   1.853 +    output_file.write(output_str)
   1.854 +    output_file.close()
   1.855 +
   1.856 +
   1.857 +if __name__ == '__main__':
   1.858 +  main(sys.argv)

mercurial