diff --git a/src/etc/check-summary.py b/src/etc/check-summary.py index 55428a6fcc4..917e1970a36 100755 --- a/src/etc/check-summary.py +++ b/src/etc/check-summary.py @@ -15,6 +15,7 @@ import sys if __name__ == '__main__': summaries = [] + def summarise(fname): summary = {} with open(fname) as fd: @@ -27,12 +28,14 @@ if __name__ == '__main__': # track bench runs if splitline[1] == 'ns/iter': status = 'bench' - if not summary.has_key(status): + if status not in summary: summary[status] = [] summary[status].append(test) summaries.append((fname, summary)) + def count(t): return sum(map(lambda (f, s): len(s.get(t, [])), summaries)) + logfiles = sys.argv[1:] for files in map(glob.glob, logfiles): map(summarise, files) @@ -41,8 +44,9 @@ if __name__ == '__main__': ignored = count('ignored') measured = count('bench') print "summary of %d test runs: %d passed; %d failed; %d ignored; %d measured" % \ - (len(logfiles), ok, failed, ignored, measured) + (len(logfiles), ok, failed, ignored, measured) print "" + if failed > 0: print "failed tests:" for f, s in summaries: diff --git a/src/etc/errorck.py b/src/etc/errorck.py index 952e299265d..c940359abc1 100644 --- a/src/etc/errorck.py +++ b/src/etc/errorck.py @@ -11,7 +11,9 @@ # Digs error codes out of files named 'diagnostics.rs' across # the tree, and ensures thare are no duplicates. -import sys, os, re +import sys +import os +import re src_dir = sys.argv[1] errcode_map = {} diff --git a/src/etc/extract_grammar.py b/src/etc/extract_grammar.py index 53781652902..a12c3298cb3 100755 --- a/src/etc/extract_grammar.py +++ b/src/etc/extract_grammar.py @@ -14,11 +14,11 @@ import fileinput -collections = { "gram": [], - "keyword": [], - "reserved": [], - "binop": [], - "unop": [] } +collections = {"gram": [], + "keyword": [], + "reserved": [], + "binop": [], + "unop": []} in_coll = False @@ -47,66 +47,66 @@ for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")): # Define operator symbol-names here tokens = ["non_star", "non_slash", "non_eol", - "non_single_quote", "non_double_quote", "ident" ] + "non_single_quote", "non_double_quote", "ident"] symnames = { -".": "dot", -"+": "plus", -"-": "minus", -"/": "slash", -"*": "star", -"%": "percent", + ".": "dot", + "+": "plus", + "-": "minus", + "/": "slash", + "*": "star", + "%": "percent", -"~": "tilde", -"@": "at", + "~": "tilde", + "@": "at", -"!": "not", -"&": "and", -"|": "or", -"^": "xor", + "!": "not", + "&": "and", + "|": "or", + "^": "xor", -"<<": "lsl", -">>": "lsr", -">>>": "asr", + "<<": "lsl", + ">>": "lsr", + ">>>": "asr", -"&&": "andand", -"||": "oror", + "&&": "andand", + "||": "oror", -"<" : "lt", -"<=" : "le", -"==" : "eqeq", -">=" : "ge", -">" : "gt", + "<": "lt", + "<=": "le", + "==": "eqeq", + ">=": "ge", + ">": "gt", -"=": "eq", + "=": "eq", -"+=": "plusequal", -"-=": "minusequal", -"/=": "divequal", -"*=": "starequal", -"%=": "percentequal", + "+=": "plusequal", + "-=": "minusequal", + "/=": "divequal", + "*=": "starequal", + "%=": "percentequal", -"&=": "andequal", -"|=": "orequal", -"^=": "xorequal", + "&=": "andequal", + "|=": "orequal", + "^=": "xorequal", -">>=": "lsrequal", -">>>=": "asrequal", -"<<=": "lslequal", + ">>=": "lsrequal", + ">>>=": "asrequal", + "<<=": "lslequal", -"::": "coloncolon", + "::": "coloncolon", -"->": "rightarrow", -"<-": "leftarrow", -"<->": "swaparrow", + "->": "rightarrow", + "<-": "leftarrow", + "<->": "swaparrow", -"//": "linecomment", -"/*": "openblockcomment", -"*/": "closeblockcomment", -"macro_rules": "macro_rules", -"=>" : "eg", -".." : "dotdot", -"," : "comma" + "//": "linecomment", + "/*": "openblockcomment", + "*/": "closeblockcomment", + "macro_rules": "macro_rules", + "=>": "eg", + "..": "dotdot", + ",": "comma" } lines = [] @@ -126,8 +126,8 @@ for line in collections["gram"]: + word) if word not in tokens: if (word in collections["keyword"] or - word in collections["reserved"]): - tokens.append(word) + word in collections["reserved"]): + tokens.append(word) else: raise Exception("unknown keyword/reserved word: " + word) @@ -149,8 +149,8 @@ for sym in collections["unop"] + collections["binop"] + symnames.keys(): print("%start parser, token;") print("%%token %s ;" % ("\n\t, ".join(tokens))) for coll in ["keyword", "reserved"]: - print("%s: %s ; " % (coll, "\n\t| ".join(collections[coll]))); + print("%s: %s ; " % (coll, "\n\t| ".join(collections[coll]))) for coll in ["binop", "unop"]: print("%s: %s ; " % (coll, "\n\t| ".join([symnames[x] - for x in collections[coll]]))); -print("\n".join(lines)); + for x in collections[coll]]))) +print("\n".join(lines)) diff --git a/src/etc/gdb_rust_pretty_printing.py b/src/etc/gdb_rust_pretty_printing.py index b6770c99975..c5587bb10d1 100755 --- a/src/etc/gdb_rust_pretty_printing.py +++ b/src/etc/gdb_rust_pretty_printing.py @@ -14,181 +14,189 @@ import gdb # GDB Pretty Printing Module for Rust #=============================================================================== + def register_printers(objfile): - "Registers Rust pretty printers for the given objfile" - objfile.pretty_printers.append(rust_pretty_printer_lookup_function) + "Registers Rust pretty printers for the given objfile" + objfile.pretty_printers.append(rust_pretty_printer_lookup_function) + def rust_pretty_printer_lookup_function(val): - "Returns the correct Rust pretty printer for the given value if there is one" - type_code = val.type.code + "Returns the correct Rust pretty printer for the given value if there is one" + type_code = val.type.code - if type_code == gdb.TYPE_CODE_STRUCT: - struct_kind = classify_struct(val.type) + if type_code == gdb.TYPE_CODE_STRUCT: + struct_kind = classify_struct(val.type) - if struct_kind == STRUCT_KIND_STR_SLICE: - return RustStringSlicePrinter(val) + if struct_kind == STRUCT_KIND_STR_SLICE: + return RustStringSlicePrinter(val) - if struct_kind == STRUCT_KIND_TUPLE: - return RustTuplePrinter(val) + if struct_kind == STRUCT_KIND_TUPLE: + return RustTuplePrinter(val) - if struct_kind == STRUCT_KIND_TUPLE_STRUCT: - return RustTupleStructPrinter(val, False) + if struct_kind == STRUCT_KIND_TUPLE_STRUCT: + return RustTupleStructPrinter(val, False) - if struct_kind == STRUCT_KIND_CSTYLE_VARIANT: - return RustCStyleEnumPrinter(val[get_field_at_index(val, 0)]) + if struct_kind == STRUCT_KIND_CSTYLE_VARIANT: + return RustCStyleEnumPrinter(val[get_field_at_index(val, 0)]) - if struct_kind == STRUCT_KIND_TUPLE_VARIANT: - return RustTupleStructPrinter(val, True) + if struct_kind == STRUCT_KIND_TUPLE_VARIANT: + return RustTupleStructPrinter(val, True) - if struct_kind == STRUCT_KIND_STRUCT_VARIANT: - return RustStructPrinter(val, True) + if struct_kind == STRUCT_KIND_STRUCT_VARIANT: + return RustStructPrinter(val, True) - return RustStructPrinter(val, False) + return RustStructPrinter(val, False) - # Enum handling - if type_code == gdb.TYPE_CODE_UNION: - enum_members = list(val.type.fields()) - enum_member_count = len(enum_members) + # Enum handling + if type_code == gdb.TYPE_CODE_UNION: + enum_members = list(val.type.fields()) + enum_member_count = len(enum_members) - if enum_member_count == 0: - return RustStructPrinter(val, False) + if enum_member_count == 0: + return RustStructPrinter(val, False) - if enum_member_count == 1: - first_variant_name = enum_members[0].name - if first_variant_name == None: - # This is a singleton enum - return rust_pretty_printer_lookup_function(val[enum_members[0]]) - else: - assert first_variant_name.startswith("RUST$ENCODED$ENUM$") - # This is a space-optimized enum. - # This means this enum has only two states, and Rust uses one of the - # fields somewhere in the struct to determine which of the two states - # it's in. The location of the field is encoded in the name as something - # like RUST$ENCODED$ENUM$(num$)*name_of_zero_state - last_separator_index = first_variant_name.rfind("$") - start_index = len("RUST$ENCODED$ENUM$") - disr_field_indices = first_variant_name[start_index : - last_separator_index].split("$") - disr_field_indices = [int(index) for index in disr_field_indices] + if enum_member_count == 1: + first_variant_name = enum_members[0].name + if first_variant_name is None: + # This is a singleton enum + return rust_pretty_printer_lookup_function(val[enum_members[0]]) + else: + assert first_variant_name.startswith("RUST$ENCODED$ENUM$") + # This is a space-optimized enum. + # This means this enum has only two states, and Rust uses one + # of the fields somewhere in the struct to determine which of + # the two states it's in. The location of the field is encoded + # in the name as something like + # RUST$ENCODED$ENUM$(num$)*name_of_zero_state + last_separator_index = first_variant_name.rfind("$") + start_index = len("RUST$ENCODED$ENUM$") + disr_field_indices = first_variant_name[start_index:last_separator_index].split("$") + disr_field_indices = [int(index) for index in disr_field_indices] - sole_variant_val = val[enum_members[0]] - discriminant = sole_variant_val - for disr_field_index in disr_field_indices: - disr_field = get_field_at_index(discriminant, disr_field_index) - discriminant = discriminant[disr_field] + sole_variant_val = val[enum_members[0]] + discriminant = sole_variant_val + for disr_field_index in disr_field_indices: + disr_field = get_field_at_index(discriminant, disr_field_index) + discriminant = discriminant[disr_field] - # If the discriminant field is a fat pointer we have to consider the - # first word as the true discriminant - if discriminant.type.code == gdb.TYPE_CODE_STRUCT: - discriminant = discriminant[get_field_at_index(discriminant, 0)] + # If the discriminant field is a fat pointer we have to consider the + # first word as the true discriminant + if discriminant.type.code == gdb.TYPE_CODE_STRUCT: + discriminant = discriminant[get_field_at_index(discriminant, 0)] - if discriminant == 0: - null_variant_name = first_variant_name[last_separator_index + 1:] - return IdentityPrinter(null_variant_name) + if discriminant == 0: + null_variant_name = first_variant_name[last_separator_index + 1:] + return IdentityPrinter(null_variant_name) - return rust_pretty_printer_lookup_function(sole_variant_val) + return rust_pretty_printer_lookup_function(sole_variant_val) - # This is a regular enum, extract the discriminant - discriminant_name, discriminant_val = extract_discriminant_value(val) - return rust_pretty_printer_lookup_function(val[enum_members[discriminant_val]]) + # This is a regular enum, extract the discriminant + discriminant_name, discriminant_val = extract_discriminant_value(val) + return rust_pretty_printer_lookup_function(val[enum_members[discriminant_val]]) - # No pretty printer has been found - return None + # No pretty printer has been found + return None #=------------------------------------------------------------------------------ # Pretty Printer Classes #=------------------------------------------------------------------------------ + class RustStructPrinter: - def __init__(self, val, hide_first_field): - self.val = val - self.hide_first_field = hide_first_field + def __init__(self, val, hide_first_field): + self.val = val + self.hide_first_field = hide_first_field - def to_string(self): - return self.val.type.tag + def to_string(self): + return self.val.type.tag - def children(self): - cs = [] - for field in self.val.type.fields(): - field_name = field.name - # Normally the field name is used as a key to access the field value, - # because that's also supported in older versions of GDB... - field_key = field_name - if field_name == None: - field_name = "" - # ... but for fields without a name (as in tuples), we have to fall back - # to the newer method of using the field object directly as key. In - # older versions of GDB, this will just fail. - field_key = field - name_value_tuple = ( field_name, self.val[field_key] ) - cs.append( name_value_tuple ) + def children(self): + cs = [] + for field in self.val.type.fields(): + field_name = field.name + # Normally the field name is used as a key to access the field + # value, because that's also supported in older versions of GDB... + field_key = field_name + if field_name is None: + field_name = "" + # ... but for fields without a name (as in tuples), we have to + # fall back to the newer method of using the field object + # directly as key. In older versions of GDB, this will just + # fail. + field_key = field + name_value_tuple = (field_name, self.val[field_key]) + cs.append(name_value_tuple) - if self.hide_first_field: - cs = cs[1:] + if self.hide_first_field: + cs = cs[1:] + + return cs - return cs class RustTuplePrinter: - def __init__(self, val): - self.val = val + def __init__(self, val): + self.val = val - def to_string(self): - return None + def to_string(self): + return None - def children(self): - cs = [] - for field in self.val.type.fields(): - cs.append( ("", self.val[field]) ) + def children(self): + cs = [] + for field in self.val.type.fields(): + cs.append(("", self.val[field])) - return cs + return cs + + def display_hint(self): + return "array" - def display_hint(self): - return "array" class RustTupleStructPrinter: - def __init__(self, val, hide_first_field): - self.val = val - self.hide_first_field = hide_first_field + def __init__(self, val, hide_first_field): + self.val = val + self.hide_first_field = hide_first_field - def to_string(self): - return self.val.type.tag + def to_string(self): + return self.val.type.tag - def children(self): - cs = [] - for field in self.val.type.fields(): - cs.append( ("", self.val[field]) ) + def children(self): + cs = [] + for field in self.val.type.fields(): + cs.append(("", self.val[field])) - if self.hide_first_field: - cs = cs[1:] + if self.hide_first_field: + cs = cs[1:] - return cs + return cs + + def display_hint(self): + return "array" - def display_hint(self): - return "array" class RustStringSlicePrinter: - def __init__(self, val): - self.val = val + def __init__(self, val): + self.val = val + + def to_string(self): + slice_byte_len = self.val["length"] + return '"%s"' % self.val["data_ptr"].string(encoding="utf-8", length=slice_byte_len) - def to_string(self): - slice_byte_len = self.val["length"] - return '"%s"' % self.val["data_ptr"].string(encoding = "utf-8", - length = slice_byte_len) class RustCStyleEnumPrinter: - def __init__(self, val): - assert val.type.code == gdb.TYPE_CODE_ENUM - self.val = val + def __init__(self, val): + assert val.type.code == gdb.TYPE_CODE_ENUM + self.val = val + + def to_string(self): + return str(self.val) - def to_string(self): - return str(self.val) class IdentityPrinter: - def __init__(self, string): - self.string = string + def __init__(self, string): + self.string = string - def to_string(self): - return self.string + def to_string(self): + return self.string STRUCT_KIND_REGULAR_STRUCT = 0 STRUCT_KIND_TUPLE_STRUCT = 1 @@ -198,47 +206,51 @@ STRUCT_KIND_STRUCT_VARIANT = 4 STRUCT_KIND_CSTYLE_VARIANT = 5 STRUCT_KIND_STR_SLICE = 6 + def classify_struct(type): - if type.tag == "&str": - return STRUCT_KIND_STR_SLICE + if type.tag == "&str": + return STRUCT_KIND_STR_SLICE - fields = list(type.fields()) - field_count = len(fields) + fields = list(type.fields()) + field_count = len(fields) + + if field_count == 0: + return STRUCT_KIND_REGULAR_STRUCT + + if fields[0].name == "RUST$ENUM$DISR": + if field_count == 1: + return STRUCT_KIND_CSTYLE_VARIANT + elif fields[1].name is None: + return STRUCT_KIND_TUPLE_VARIANT + else: + return STRUCT_KIND_STRUCT_VARIANT + + if fields[0].name is None: + if type.tag.startswith("("): + return STRUCT_KIND_TUPLE + else: + return STRUCT_KIND_TUPLE_STRUCT - if field_count == 0: return STRUCT_KIND_REGULAR_STRUCT - if fields[0].name == "RUST$ENUM$DISR": - if field_count == 1: - return STRUCT_KIND_CSTYLE_VARIANT - elif fields[1].name == None: - return STRUCT_KIND_TUPLE_VARIANT - else: - return STRUCT_KIND_STRUCT_VARIANT - - if fields[0].name == None: - if type.tag.startswith("("): - return STRUCT_KIND_TUPLE - else: - return STRUCT_KIND_TUPLE_STRUCT - - return STRUCT_KIND_REGULAR_STRUCT def extract_discriminant_value(enum_val): - assert enum_val.type.code == gdb.TYPE_CODE_UNION - for variant_descriptor in enum_val.type.fields(): - variant_val = enum_val[variant_descriptor] - for field in variant_val.type.fields(): - return (field.name, int(variant_val[field])) + assert enum_val.type.code == gdb.TYPE_CODE_UNION + for variant_descriptor in enum_val.type.fields(): + variant_val = enum_val[variant_descriptor] + for field in variant_val.type.fields(): + return (field.name, int(variant_val[field])) + def first_field(val): - for field in val.type.fields(): - return field + for field in val.type.fields(): + return field + def get_field_at_index(val, index): - i = 0 - for field in val.type.fields(): - if i == index: - return field - i += 1 - return None + i = 0 + for field in val.type.fields(): + if i == index: + return field + i += 1 + return None diff --git a/src/etc/generate-keyword-tests.py b/src/etc/generate-keyword-tests.py index bf421513cba..937c231a473 100755 --- a/src/etc/generate-keyword-tests.py +++ b/src/etc/generate-keyword-tests.py @@ -56,4 +56,4 @@ for kw in sys.argv[1:]: f.write(template % (datetime.datetime.now().year, kw, kw)) # mark file read-only - os.chmod(test_file, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH) + os.chmod(test_file, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) diff --git a/src/etc/get-snapshot.py b/src/etc/get-snapshot.py index 886a84bd819..26246bd2c32 100755 --- a/src/etc/get-snapshot.py +++ b/src/etc/get-snapshot.py @@ -10,36 +10,40 @@ # option. This file may not be copied, modified, or distributed # except according to those terms. -import os, tarfile, re, shutil, sys +import os +import tarfile +import shutil +import sys from snapshot import * + def unpack_snapshot(triple, dl_path): - print("opening snapshot " + dl_path) - tar = tarfile.open(dl_path) - kernel = get_kernel(triple) + print("opening snapshot " + dl_path) + tar = tarfile.open(dl_path) + kernel = get_kernel(triple) - stagep = os.path.join(triple, "stage0") + stagep = os.path.join(triple, "stage0") - # Remove files from prior unpackings, since snapshot rustc may not - # be able to disambiguate between multiple candidate libraries. - # (Leave dirs in place since extracting step still needs them.) - for root, _, files in os.walk(stagep): - for f in files: - print("removing " + os.path.join(root, f)) - os.unlink(os.path.join(root, f)) + # Remove files from prior unpackings, since snapshot rustc may not + # be able to disambiguate between multiple candidate libraries. + # (Leave dirs in place since extracting step still needs them.) + for root, _, files in os.walk(stagep): + for f in files: + print("removing " + os.path.join(root, f)) + os.unlink(os.path.join(root, f)) - for p in tar.getnames(): - name = p.replace("rust-stage0/", "", 1); + for p in tar.getnames(): + name = p.replace("rust-stage0/", "", 1) - fp = os.path.join(stagep, name) - print("extracting " + p) - tar.extract(p, download_unpack_base) - tp = os.path.join(download_unpack_base, p) - if os.path.isdir(tp) and os.path.exists(fp): - continue - shutil.move(tp, fp) - tar.close() - shutil.rmtree(download_unpack_base) + fp = os.path.join(stagep, name) + print("extracting " + p) + tar.extract(p, download_unpack_base) + tp = os.path.join(download_unpack_base, p) + if os.path.isdir(tp) and os.path.exists(fp): + continue + shutil.move(tp, fp) + tar.close() + shutil.rmtree(download_unpack_base) # Main @@ -48,23 +52,27 @@ def unpack_snapshot(triple, dl_path): # The first is the O/S triple. # The second is an optional path to the snapshot to use. -triple = sys.argv[1] -if len(sys.argv) == 3: - dl_path = sys.argv[2] -else: - snap = determine_curr_snapshot(triple) - dl = os.path.join(download_dir_base, snap) - url = download_url_base + "/" + snap - print("determined most recent snapshot: " + snap) +def main(argv): + triple = argv[1] + if len(argv) == 3: + dl_path = argv[2] + else: + snap = determine_curr_snapshot(triple) + dl = os.path.join(download_dir_base, snap) + url = download_url_base + "/" + snap + print("determined most recent snapshot: " + snap) - if (not os.path.exists(dl)): - get_url_to_file(url, dl) + if (not os.path.exists(dl)): + get_url_to_file(url, dl) - if (snap_filename_hash_part(snap) == hash_file(dl)): - print("got download with ok hash") - else: - raise Exception("bad hash on download") + if (snap_filename_hash_part(snap) == hash_file(dl)): + print("got download with ok hash") + else: + raise Exception("bad hash on download") - dl_path = os.path.join(download_dir_base, snap) + dl_path = os.path.join(download_dir_base, snap) -unpack_snapshot(triple, dl_path) + unpack_snapshot(triple, dl_path) + +if __name__ == '__main__': + main(sys.argv) diff --git a/src/etc/htmldocck.py b/src/etc/htmldocck.py index ad78e13ca25..22792ff7635 100644 --- a/src/etc/htmldocck.py +++ b/src/etc/htmldocck.py @@ -118,40 +118,54 @@ entitydefs['rarrb'] = u'\u21e5' VOID_ELEMENTS = set(['area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr']) -# simplified HTML parser. -# this is possible because we are dealing with very regular HTML from rustdoc; -# we only have to deal with i) void elements and ii) empty attributes. + class CustomHTMLParser(HTMLParser): + """simplified HTML parser. + + this is possible because we are dealing with very regular HTML from + rustdoc; we only have to deal with i) void elements and ii) empty + attributes.""" def __init__(self, target=None): HTMLParser.__init__(self) self.__builder = target or ET.TreeBuilder() + def handle_starttag(self, tag, attrs): attrs = dict((k, v or '') for k, v in attrs) self.__builder.start(tag, attrs) - if tag in VOID_ELEMENTS: self.__builder.end(tag) + if tag in VOID_ELEMENTS: + self.__builder.end(tag) + def handle_endtag(self, tag): self.__builder.end(tag) + def handle_startendtag(self, tag, attrs): attrs = dict((k, v or '') for k, v in attrs) self.__builder.start(tag, attrs) self.__builder.end(tag) + def handle_data(self, data): self.__builder.data(data) + def handle_entityref(self, name): self.__builder.data(entitydefs[name]) + def handle_charref(self, name): code = int(name[1:], 16) if name.startswith(('x', 'X')) else int(name, 10) self.__builder.data(unichr(code).encode('utf-8')) + def close(self): HTMLParser.close(self) return self.__builder.close() Command = namedtuple('Command', 'negated cmd args lineno') -# returns a generator out of the file object, which -# - removes `\\` then `\n` then a shared prefix with the previous line then optional whitespace; -# - keeps a line number (starting from 0) of the first line being concatenated. + def concat_multi_lines(f): + """returns a generator out of the file object, which + - removes `\\` then `\n` then a shared prefix with the previous line then + optional whitespace; + - keeps a line number (starting from 0) of the first line being + concatenated.""" lastline = None # set to the last line when the last line has a backslash firstlineno = None catenated = '' @@ -162,7 +176,8 @@ def concat_multi_lines(f): if lastline is not None: maxprefix = 0 for i in xrange(min(len(line), len(lastline))): - if line[i] != lastline[i]: break + if line[i] != lastline[i]: + break maxprefix += 1 line = line[maxprefix:].lstrip() @@ -184,11 +199,14 @@ LINE_PATTERN = re.compile(r''' (?P[A-Za-z]+(?:-[A-Za-z]+)*) (?P.*)$ ''', re.X) + + def get_commands(template): with open(template, 'rUb') as f: for lineno, line in concat_multi_lines(f): m = LINE_PATTERN.search(line) - if not m: continue + if not m: + continue negated = (m.group('negated') == '!') cmd = m.group('cmd') @@ -198,17 +216,22 @@ def get_commands(template): args = shlex.split(args) yield Command(negated=negated, cmd=cmd, args=args, lineno=lineno+1) + def _flatten(node, acc): - if node.text: acc.append(node.text) + if node.text: + acc.append(node.text) for e in node: _flatten(e, acc) - if e.tail: acc.append(e.tail) + if e.tail: + acc.append(e.tail) + def flatten(node): acc = [] _flatten(node, acc) return ''.join(acc) + def normalize_xpath(path): if path.startswith('//'): return '.' + path # avoid warnings @@ -218,6 +241,7 @@ def normalize_xpath(path): raise RuntimeError('Non-absolute XPath is not supported due to \ the implementation issue.') + class CachedFiles(object): def __init__(self, root): self.root = root @@ -267,6 +291,7 @@ class CachedFiles(object): self.trees[path] = tree return self.trees[path] + def check_string(data, pat, regexp): if not pat: return True # special case a presence testing @@ -277,6 +302,7 @@ def check_string(data, pat, regexp): pat = ' '.join(pat.split()) return pat in data + def check_tree_attr(tree, path, attr, pat, regexp): path = normalize_xpath(path) ret = False @@ -287,9 +313,11 @@ def check_tree_attr(tree, path, attr, pat, regexp): continue else: ret = check_string(value, pat, regexp) - if ret: break + if ret: + break return ret + def check_tree_text(tree, path, pat, regexp): path = normalize_xpath(path) ret = False @@ -300,9 +328,11 @@ def check_tree_text(tree, path, pat, regexp): continue else: ret = check_string(value, pat, regexp) - if ret: break + if ret: + break return ret + def check(target, commands): cache = CachedFiles(target) for c in commands: @@ -323,7 +353,8 @@ def check(target, commands): ret = check_tree_attr(cache.get_tree(c.args[0]), pat, attr, c.args[2], regexp) else: # normalized text pat = c.args[1] - if pat.endswith('/text()'): pat = pat[:-7] + if pat.endswith('/text()'): + pat = pat[:-7] ret = check_tree_text(cache.get_tree(c.args[0]), pat, c.args[2], regexp) else: raise RuntimeError('Invalid number of @{} arguments \ @@ -348,4 +379,3 @@ if __name__ == '__main__': raise SystemExit(1) else: check(sys.argv[1], get_commands(sys.argv[2])) - diff --git a/src/etc/latest-unix-snaps.py b/src/etc/latest-unix-snaps.py index 32e9691f815..6c93bf23f90 100755 --- a/src/etc/latest-unix-snaps.py +++ b/src/etc/latest-unix-snaps.py @@ -10,7 +10,8 @@ # option. This file may not be copied, modified, or distributed # except according to those terms. -import os, tarfile, hashlib, re, shutil, sys +import os +import re from snapshot import * f = open(snapshotfile) @@ -26,7 +27,8 @@ newestSet = {} for line in f.readlines(): i += 1 parsed = parse_line(i, line) - if (not parsed): continue + if not parsed: + continue if parsed["type"] == "snapshot": if (len(newestSet) == 0 or parsed["date"] > newestSet["date"]): @@ -37,16 +39,16 @@ for line in f.readlines(): else: addingMode = False - elif addingMode == True and parsed["type"] == "file": + elif addingMode is True and parsed["type"] == "file": tux = re.compile("linux", re.IGNORECASE) - if (tux.match(parsed["platform"]) != None): - ff = {} - ff["platform"] = parsed["platform"] - ff["hash"] = parsed["hash"] - newestSet["files"] += [ff] + if (tux.match(parsed["platform"]) is not None): + ff = {} + ff["platform"] = parsed["platform"] + ff["hash"] = parsed["hash"] + newestSet["files"] += [ff] -def download_new_file (date, rev, platform, hsh): +def download_new_file(date, rev, platform, hsh): snap = full_snapshot_name(date, rev, platform, hsh) dl = os.path.join(download_dir_base, snap) url = download_url_base + "/" + snap @@ -59,5 +61,5 @@ def download_new_file (date, rev, platform, hsh): raise Exception("bad hash on download") for ff in newestSet["files"]: - download_new_file (newestSet["date"], newestSet["rev"], + download_new_file(newestSet["date"], newestSet["rev"], ff["platform"], ff["hash"]) diff --git a/src/etc/lldb_batchmode.py b/src/etc/lldb_batchmode.py index 25e5661ca49..b1506285b3a 100644 --- a/src/etc/lldb_batchmode.py +++ b/src/etc/lldb_batchmode.py @@ -30,36 +30,35 @@ import sys import threading import thread import re -import atexit import time # Set this to True for additional output DEBUG_OUTPUT = False + def print_debug(s): - "Print something if DEBUG_OUTPUT is True" - global DEBUG_OUTPUT - if DEBUG_OUTPUT: - print("DEBUG: " + str(s)) + "Print something if DEBUG_OUTPUT is True" + global DEBUG_OUTPUT + if DEBUG_OUTPUT: + print("DEBUG: " + str(s)) def normalize_whitespace(s): - "Replace newlines, tabs, multiple spaces, etc with exactly one space" - return re.sub("\s+", " ", s) + "Replace newlines, tabs, multiple spaces, etc with exactly one space" + return re.sub("\s+", " ", s) -# This callback is registered with every breakpoint and makes sure that the frame containing the -# breakpoint location is selected def breakpoint_callback(frame, bp_loc, dict): - "Called whenever a breakpoint is hit" - print("Hit breakpoint " + str(bp_loc)) + """This callback is registered with every breakpoint and makes sure that the + frame containing the breakpoint location is selected""" + print("Hit breakpoint " + str(bp_loc)) - # Select the frame and the thread containing it - frame.thread.process.SetSelectedThread(frame.thread) - frame.thread.SetSelectedFrame(frame.idx) + # Select the frame and the thread containing it + frame.thread.process.SetSelectedThread(frame.thread) + frame.thread.SetSelectedFrame(frame.idx) - # Returning True means that we actually want to stop at this breakpoint - return True + # Returning True means that we actually want to stop at this breakpoint + return True # This is a list of breakpoints that are not registered with the breakpoint callback. The list is @@ -70,91 +69,99 @@ new_breakpoints = [] # used to avoid hooking callbacks into breakpoints more than once registered_breakpoints = set() + def execute_command(command_interpreter, command): - "Executes a single CLI command" - global new_breakpoints - global registered_breakpoints + "Executes a single CLI command" + global new_breakpoints + global registered_breakpoints - res = lldb.SBCommandReturnObject() - print(command) - command_interpreter.HandleCommand(command, res) + res = lldb.SBCommandReturnObject() + print(command) + command_interpreter.HandleCommand(command, res) - if res.Succeeded(): - if res.HasResult(): - print(normalize_whitespace(res.GetOutput()), end = '\n') + if res.Succeeded(): + if res.HasResult(): + print(normalize_whitespace(res.GetOutput()), end='\n') - # If the command introduced any breakpoints, make sure to register them with the breakpoint - # callback - while len(new_breakpoints) > 0: - res.Clear() - breakpoint_id = new_breakpoints.pop() + # If the command introduced any breakpoints, make sure to register + # them with the breakpoint + # callback + while len(new_breakpoints) > 0: + res.Clear() + breakpoint_id = new_breakpoints.pop() - if breakpoint_id in registered_breakpoints: - print_debug("breakpoint with id %s is already registered. Ignoring." % str(breakpoint_id)) - else: - print_debug("registering breakpoint callback, id = " + str(breakpoint_id)) - callback_command = "breakpoint command add -F breakpoint_callback " + str(breakpoint_id) - command_interpreter.HandleCommand(callback_command, res) - if res.Succeeded(): - print_debug("successfully registered breakpoint callback, id = " + str(breakpoint_id)) - registered_breakpoints.add(breakpoint_id) - else: - print("Error while trying to register breakpoint callback, id = " + str(breakpoint_id)) - else: - print(res.GetError()) + if breakpoint_id in registered_breakpoints: + print_debug("breakpoint with id %s is already registered. Ignoring." % + str(breakpoint_id)) + else: + print_debug("registering breakpoint callback, id = " + str(breakpoint_id)) + callback_command = ("breakpoint command add -F breakpoint_callback " + + str(breakpoint_id)) + command_interpreter.HandleCommand(callback_command, res) + if res.Succeeded(): + print_debug("successfully registered breakpoint callback, id = " + + str(breakpoint_id)) + registered_breakpoints.add(breakpoint_id) + else: + print("Error while trying to register breakpoint callback, id = " + + str(breakpoint_id)) + else: + print(res.GetError()) def start_breakpoint_listener(target): - "Listens for breakpoints being added and adds new ones to the callback registration list" - listener = lldb.SBListener("breakpoint listener") + """Listens for breakpoints being added and adds new ones to the callback + registration list""" + listener = lldb.SBListener("breakpoint listener") - def listen(): - event = lldb.SBEvent() - try: - while True: - if listener.WaitForEvent(120, event): - if lldb.SBBreakpoint.EventIsBreakpointEvent(event) and \ - lldb.SBBreakpoint.GetBreakpointEventTypeFromEvent(event) == \ - lldb.eBreakpointEventTypeAdded: - global new_breakpoints - breakpoint = lldb.SBBreakpoint.GetBreakpointFromEvent(event) - print_debug("breakpoint added, id = " + str(breakpoint.id)) - new_breakpoints.append(breakpoint.id) - except: - print_debug("breakpoint listener shutting down") + def listen(): + event = lldb.SBEvent() + try: + while True: + if listener.WaitForEvent(120, event): + if lldb.SBBreakpoint.EventIsBreakpointEvent(event) and \ + lldb.SBBreakpoint.GetBreakpointEventTypeFromEvent(event) == \ + lldb.eBreakpointEventTypeAdded: + global new_breakpoints + breakpoint = lldb.SBBreakpoint.GetBreakpointFromEvent(event) + print_debug("breakpoint added, id = " + str(breakpoint.id)) + new_breakpoints.append(breakpoint.id) + except: + print_debug("breakpoint listener shutting down") - # Start the listener and let it run as a daemon - listener_thread = threading.Thread(target = listen) - listener_thread.daemon = True - listener_thread.start() + # Start the listener and let it run as a daemon + listener_thread = threading.Thread(target=listen) + listener_thread.daemon = True + listener_thread.start() - # Register the listener with the target - target.GetBroadcaster().AddListener(listener, lldb.SBTarget.eBroadcastBitBreakpointChanged) + # Register the listener with the target + target.GetBroadcaster().AddListener(listener, lldb.SBTarget.eBroadcastBitBreakpointChanged) def start_watchdog(): - "Starts a watchdog thread that will terminate the process after a certain period of time" - watchdog_start_time = time.clock() - watchdog_max_time = watchdog_start_time + 30 + """Starts a watchdog thread that will terminate the process after a certain + period of time""" + watchdog_start_time = time.clock() + watchdog_max_time = watchdog_start_time + 30 - def watchdog(): - while time.clock() < watchdog_max_time: - time.sleep(1) - print("TIMEOUT: lldb_batchmode.py has been running for too long. Aborting!") - thread.interrupt_main() + def watchdog(): + while time.clock() < watchdog_max_time: + time.sleep(1) + print("TIMEOUT: lldb_batchmode.py has been running for too long. Aborting!") + thread.interrupt_main() - # Start the listener and let it run as a daemon - watchdog_thread = threading.Thread(target = watchdog) - watchdog_thread.daemon = True - watchdog_thread.start() + # Start the listener and let it run as a daemon + watchdog_thread = threading.Thread(target=watchdog) + watchdog_thread.daemon = True + watchdog_thread.start() #################################################################################################### # ~main #################################################################################################### if len(sys.argv) != 3: - print("usage: python lldb_batchmode.py target-path script-path") - sys.exit(1) + print("usage: python lldb_batchmode.py target-path script-path") + sys.exit(1) target_path = sys.argv[1] script_path = sys.argv[2] @@ -181,9 +188,9 @@ target_error = lldb.SBError() target = debugger.CreateTarget(target_path, None, None, True, target_error) if not target: - print("Could not create debugging target '" + target_path + "': " + str(target_error) + - ". Aborting.", file=sys.stderr) - sys.exit(1) + print("Could not create debugging target '" + target_path + "': " + + str(target_error) + ". Aborting.", file=sys.stderr) + sys.exit(1) # Register the breakpoint callback for every breakpoint @@ -192,22 +199,21 @@ start_breakpoint_listener(target) command_interpreter = debugger.GetCommandInterpreter() try: - script_file = open(script_path, 'r') + script_file = open(script_path, 'r') - for line in script_file: - command = line.strip() - if command == "run" or command == "r" or re.match("^process\s+launch.*", command): - # Before starting to run the program, let the thread sleep a bit, so all - # breakpoint added events can be processed - time.sleep(0.5) - if command != '': - execute_command(command_interpreter, command) + for line in script_file: + command = line.strip() + if command == "run" or command == "r" or re.match("^process\s+launch.*", command): + # Before starting to run the program, let the thread sleep a bit, so all + # breakpoint added events can be processed + time.sleep(0.5) + if command != '': + execute_command(command_interpreter, command) except IOError as e: - print("Could not read debugging script '%s'." % script_path, file = sys.stderr) - print(e, file = sys.stderr) - print("Aborting.", file = sys.stderr) - sys.exit(1) + print("Could not read debugging script '%s'." % script_path, file=sys.stderr) + print(e, file=sys.stderr) + print("Aborting.", file=sys.stderr) + sys.exit(1) finally: - script_file.close() - + script_file.close() diff --git a/src/etc/lldb_rust_formatters.py b/src/etc/lldb_rust_formatters.py index 05d71902904..42c83b6a42e 100644 --- a/src/etc/lldb_rust_formatters.py +++ b/src/etc/lldb_rust_formatters.py @@ -10,23 +10,24 @@ import lldb + def print_val(val, internal_dict): - '''Prints the given value with Rust syntax''' - type_class = val.GetType().GetTypeClass() + '''Prints the given value with Rust syntax''' + type_class = val.GetType().GetTypeClass() - if type_class == lldb.eTypeClassStruct: - return print_struct_val(val, internal_dict) + if type_class == lldb.eTypeClassStruct: + return print_struct_val(val, internal_dict) - if type_class == lldb.eTypeClassUnion: - return print_enum_val(val, internal_dict) + if type_class == lldb.eTypeClassUnion: + return print_enum_val(val, internal_dict) - if type_class == lldb.eTypeClassPointer: - return print_pointer_val(val, internal_dict) + if type_class == lldb.eTypeClassPointer: + return print_pointer_val(val, internal_dict) - if type_class == lldb.eTypeClassArray: - return print_fixed_size_vec_val(val, internal_dict) + if type_class == lldb.eTypeClassArray: + return print_fixed_size_vec_val(val, internal_dict) - return val.GetValue() + return val.GetValue() #=-------------------------------------------------------------------------------------------------- @@ -34,162 +35,164 @@ def print_val(val, internal_dict): #=-------------------------------------------------------------------------------------------------- def print_struct_val(val, internal_dict): - '''Prints a struct, tuple, or tuple struct value with Rust syntax''' - assert val.GetType().GetTypeClass() == lldb.eTypeClassStruct + '''Prints a struct, tuple, or tuple struct value with Rust syntax''' + assert val.GetType().GetTypeClass() == lldb.eTypeClassStruct + + if is_vec_slice(val): + return print_vec_slice_val(val, internal_dict) + else: + return print_struct_val_starting_from(0, val, internal_dict) - if is_vec_slice(val): - return print_vec_slice_val(val, internal_dict) - else: - return print_struct_val_starting_from(0, val, internal_dict) def print_vec_slice_val(val, internal_dict): - length = val.GetChildAtIndex(1).GetValueAsUnsigned() + length = val.GetChildAtIndex(1).GetValueAsUnsigned() - data_ptr_val = val.GetChildAtIndex(0) - data_ptr_type = data_ptr_val.GetType() - assert data_ptr_type.IsPointerType() + data_ptr_val = val.GetChildAtIndex(0) + data_ptr_type = data_ptr_val.GetType() + assert data_ptr_type.IsPointerType() - element_type = data_ptr_type.GetPointeeType() - element_type_size = element_type.GetByteSize() + element_type = data_ptr_type.GetPointeeType() + element_type_size = element_type.GetByteSize() - start_address = data_ptr_val.GetValueAsUnsigned() + start_address = data_ptr_val.GetValueAsUnsigned() - def render_element(i): - address = start_address + i * element_type_size - element_val = val.CreateValueFromAddress( val.GetName() + ("[%s]" % i), address, element_type) - return print_val(element_val, internal_dict) + def render_element(i): + address = start_address + i * element_type_size + element_val = val.CreateValueFromAddress(val.GetName() + + ("[%s]" % i), address, element_type) + return print_val(element_val, internal_dict) + + return "&[%s]" % (', '.join([render_element(i) for i in range(length)])) - return "&[%s]" % (', '.join([render_element(i) for i in range(length)])) def print_struct_val_starting_from(field_start_index, val, internal_dict): - ''' - Prints a struct, tuple, or tuple struct value with Rust syntax. - Ignores any fields before field_start_index. - ''' - assert val.GetType().GetTypeClass() == lldb.eTypeClassStruct + ''' + Prints a struct, tuple, or tuple struct value with Rust syntax. + Ignores any fields before field_start_index. + ''' + assert val.GetType().GetTypeClass() == lldb.eTypeClassStruct - t = val.GetType() - type_name = extract_type_name(t.GetName()) - num_children = val.num_children + t = val.GetType() + type_name = extract_type_name(t.GetName()) + num_children = val.num_children - if (num_children - field_start_index) == 0: - # The only field of this struct is the enum discriminant - return type_name + if (num_children - field_start_index) == 0: + # The only field of this struct is the enum discriminant + return type_name - has_field_names = type_has_field_names(t) + has_field_names = type_has_field_names(t) - if has_field_names: - template = "%(type_name)s {\n%(body)s\n}" - separator = ", \n" - else: - template = "%(type_name)s(%(body)s)" - separator = ", " - - if type_name.startswith("("): - # this is a tuple, so don't print the type name - type_name = "" - - def render_child(child_index): - this = "" if has_field_names: - field_name = t.GetFieldAtIndex(child_index).GetName() - this += field_name + ": " + template = "%(type_name)s {\n%(body)s\n}" + separator = ", \n" + else: + template = "%(type_name)s(%(body)s)" + separator = ", " - field_val = val.GetChildAtIndex(child_index) - return this + print_val(field_val, internal_dict) + if type_name.startswith("("): + # this is a tuple, so don't print the type name + type_name = "" - body = separator.join([render_child(idx) for idx in range(field_start_index, num_children)]) + def render_child(child_index): + this = "" + if has_field_names: + field_name = t.GetFieldAtIndex(child_index).GetName() + this += field_name + ": " - return template % {"type_name": type_name, - "body": body} + field_val = val.GetChildAtIndex(child_index) + return this + print_val(field_val, internal_dict) + + body = separator.join([render_child(idx) for idx in range(field_start_index, num_children)]) + + return template % {"type_name": type_name, + "body": body} def print_enum_val(val, internal_dict): - '''Prints an enum value with Rust syntax''' + '''Prints an enum value with Rust syntax''' - assert val.GetType().GetTypeClass() == lldb.eTypeClassUnion + assert val.GetType().GetTypeClass() == lldb.eTypeClassUnion - if val.num_children == 1: - # This is either an enum with just one variant, or it is an Option-like enum - # where the discriminant is encoded in a non-nullable pointer field. We find - # out which one it is by looking at the member name of the sole union - # variant. If it starts with "RUST$ENCODED$ENUM$" then we have an - # Option-like enum. - first_variant_name = val.GetChildAtIndex(0).GetName() - if first_variant_name and first_variant_name.startswith("RUST$ENCODED$ENUM$"): + if val.num_children == 1: + # This is either an enum with just one variant, or it is an Option-like + # enum where the discriminant is encoded in a non-nullable pointer + # field. We find out which one it is by looking at the member name of + # the sole union variant. If it starts with "RUST$ENCODED$ENUM$" then + # we have an Option-like enum. + first_variant_name = val.GetChildAtIndex(0).GetName() + if first_variant_name and first_variant_name.startswith("RUST$ENCODED$ENUM$"): - # This is an Option-like enum. The position of the discriminator field is - # encoded in the name which has the format: - # RUST$ENCODED$ENUM$$ - last_separator_index = first_variant_name.rfind("$") - if last_separator_index == -1: - return "" % first_variant_name + # This is an Option-like enum. The position of the discriminator field is + # encoded in the name which has the format: + # RUST$ENCODED$ENUM$$ + last_separator_index = first_variant_name.rfind("$") + if last_separator_index == -1: + return "" % first_variant_name - start_index = len("RUST$ENCODED$ENUM$") + start_index = len("RUST$ENCODED$ENUM$") - # Extract indices of the discriminator field - try: - disr_field_indices = first_variant_name[start_index : - last_separator_index].split("$") - disr_field_indices = [int(index) for index in disr_field_indices] - except: - return "" % first_variant_name + # Extract indices of the discriminator field + try: + disr_field_indices = first_variant_name[start_index:last_separator_index].split("$") + disr_field_indices = [int(index) for index in disr_field_indices] + except: + return "" % first_variant_name - # Read the discriminant - disr_val = val.GetChildAtIndex(0) - for index in disr_field_indices: - disr_val = disr_val.GetChildAtIndex(index) + # Read the discriminant + disr_val = val.GetChildAtIndex(0) + for index in disr_field_indices: + disr_val = disr_val.GetChildAtIndex(index) - # If the discriminant field is a fat pointer we have to consider the - # first word as the true discriminant - if disr_val.GetType().GetTypeClass() == lldb.eTypeClassStruct: - disr_val = disr_val.GetChildAtIndex(0) + # If the discriminant field is a fat pointer we have to consider the + # first word as the true discriminant + if disr_val.GetType().GetTypeClass() == lldb.eTypeClassStruct: + disr_val = disr_val.GetChildAtIndex(0) - if disr_val.GetValueAsUnsigned() == 0: - # Null case: Print the name of the null-variant - null_variant_name = first_variant_name[last_separator_index + 1:] - return null_variant_name - else: - # Non-null case: Interpret the data as a value of the non-null variant type - return print_struct_val_starting_from(0, val.GetChildAtIndex(0), internal_dict) - else: - # This is just a regular uni-variant enum without discriminator field - return print_struct_val_starting_from(0, val.GetChildAtIndex(0), internal_dict) + if disr_val.GetValueAsUnsigned() == 0: + # Null case: Print the name of the null-variant + null_variant_name = first_variant_name[last_separator_index + 1:] + return null_variant_name + else: + # Non-null case: Interpret the data as a value of the non-null variant type + return print_struct_val_starting_from(0, val.GetChildAtIndex(0), internal_dict) + else: + # This is just a regular uni-variant enum without discriminator field + return print_struct_val_starting_from(0, val.GetChildAtIndex(0), internal_dict) - # If we are here, this is a regular enum with more than one variant - disr_val = val.GetChildAtIndex(0).GetChildMemberWithName("RUST$ENUM$DISR") - disr_type = disr_val.GetType() + # If we are here, this is a regular enum with more than one variant + disr_val = val.GetChildAtIndex(0).GetChildMemberWithName("RUST$ENUM$DISR") + disr_type = disr_val.GetType() - if disr_type.GetTypeClass() != lldb.eTypeClassEnumeration: - return "" + if disr_type.GetTypeClass() != lldb.eTypeClassEnumeration: + return "" - variant_index = disr_val.GetValueAsUnsigned() - return print_struct_val_starting_from(1, val.GetChildAtIndex(variant_index), internal_dict) + variant_index = disr_val.GetValueAsUnsigned() + return print_struct_val_starting_from(1, val.GetChildAtIndex(variant_index), internal_dict) def print_pointer_val(val, internal_dict): - '''Prints a pointer value with Rust syntax''' - assert val.GetType().IsPointerType() - sigil = "&" - type_name = extract_type_name(val.GetType().GetName()) - if type_name and type_name[0:1] in ["&", "~", "*"]: - sigil = type_name[0:1] + '''Prints a pointer value with Rust syntax''' + assert val.GetType().IsPointerType() + sigil = "&" + type_name = extract_type_name(val.GetType().GetName()) + if type_name and type_name[0:1] in ["&", "~", "*"]: + sigil = type_name[0:1] - return sigil + hex(val.GetValueAsUnsigned()) #print_val(val.Dereference(), internal_dict) + return sigil + hex(val.GetValueAsUnsigned()) #print_val(val.Dereference(), internal_dict) def print_fixed_size_vec_val(val, internal_dict): - assert val.GetType().GetTypeClass() == lldb.eTypeClassArray + assert val.GetType().GetTypeClass() == lldb.eTypeClassArray - output = "[" + output = "[" - for i in range(val.num_children): - output += print_val(val.GetChildAtIndex(i), internal_dict) - if i != val.num_children - 1: - output += ", " + for i in range(val.num_children): + output += print_val(val.GetChildAtIndex(i), internal_dict) + if i != val.num_children - 1: + output += ", " - output += "]" - return output + output += "]" + return output #=-------------------------------------------------------------------------------------------------- @@ -198,46 +201,45 @@ def print_fixed_size_vec_val(val, internal_dict): unqualified_type_markers = frozenset(["(", "[", "&", "*"]) + def extract_type_name(qualified_type_name): - '''Extracts the type name from a fully qualified path''' - if qualified_type_name[0] in unqualified_type_markers: - return qualified_type_name + '''Extracts the type name from a fully qualified path''' + if qualified_type_name[0] in unqualified_type_markers: + return qualified_type_name - end_of_search = qualified_type_name.find("<") - if end_of_search < 0: - end_of_search = len(qualified_type_name) + end_of_search = qualified_type_name.find("<") + if end_of_search < 0: + end_of_search = len(qualified_type_name) - index = qualified_type_name.rfind("::", 0, end_of_search) - if index < 0: - return qualified_type_name - else: - return qualified_type_name[index + 2:] + index = qualified_type_name.rfind("::", 0, end_of_search) + if index < 0: + return qualified_type_name + else: + return qualified_type_name[index + 2:] def type_has_field_names(ty): - '''Returns true of this is a type with field names (struct, struct-like enum variant)''' - # This may also be an enum variant where the first field doesn't have a name but the rest has - if ty.GetNumberOfFields() > 1: - return ty.GetFieldAtIndex(1).GetName() != None - else: - return ty.GetFieldAtIndex(0).GetName() != None + '''Returns true of this is a type with field names (struct, struct-like enum variant)''' + # This may also be an enum variant where the first field doesn't have a name but the rest has + if ty.GetNumberOfFields() > 1: + return ty.GetFieldAtIndex(1).GetName() is not None + else: + return ty.GetFieldAtIndex(0).GetName() is not None def is_vec_slice(val): - ty = val.GetType() - if ty.GetTypeClass() != lldb.eTypeClassStruct: - return False + ty = val.GetType() + if ty.GetTypeClass() != lldb.eTypeClassStruct: + return False - if ty.GetNumberOfFields() != 2: - return False + if ty.GetNumberOfFields() != 2: + return False - if ty.GetFieldAtIndex(0).GetName() != "data_ptr": - return False + if ty.GetFieldAtIndex(0).GetName() != "data_ptr": + return False - if ty.GetFieldAtIndex(1).GetName() != "length": - return False + if ty.GetFieldAtIndex(1).GetName() != "length": + return False - type_name = extract_type_name(ty.GetName()).replace("&'static", "&").replace(" ", "") - return type_name.startswith("&[") and type_name.endswith("]") - -# vi: sw=2:ts=2 + type_name = extract_type_name(ty.GetName()).replace("&'static", "&").replace(" ", "") + return type_name.startswith("&[") and type_name.endswith("]") diff --git a/src/etc/make-win-dist.py b/src/etc/make-win-dist.py index ea2a98db2dc..13d05135024 100644 --- a/src/etc/make-win-dist.py +++ b/src/etc/make-win-dist.py @@ -14,7 +14,11 @@ # argv[3] = target triple # The first two correspond to the two installable components defined in the setup script. -import sys, os, shutil, subprocess +import sys +import os +import shutil +import subprocess + def find_files(files, path): found = [] @@ -28,6 +32,7 @@ def find_files(files, path): raise Exception("Could not find '%s' in %s" % (fname, path)) return found + def make_win_dist(rust_root, gcc_root, target_triple): # Ask gcc where it keeps its stuff gcc_out = subprocess.check_output(["gcc.exe", "-print-search-dirs"]) @@ -114,5 +119,5 @@ def make_win_dist(rust_root, gcc_root, target_triple): for src in target_libs: shutil.copy(src, target_lib_dir) -if __name__=="__main__": +if __name__ == "__main__": make_win_dist(sys.argv[1], sys.argv[2], sys.argv[3]) diff --git a/src/etc/maketest.py b/src/etc/maketest.py index 3f29c0b2f12..f500de5e15d 100644 --- a/src/etc/maketest.py +++ b/src/etc/maketest.py @@ -12,13 +12,14 @@ import subprocess import os import sys -# msys1/msys2 automatically converts `/abs/path1:/abs/path2` into -# `c:\real\abs\path1;c:\real\abs\path2` (semicolons) if shell thinks -# the value is list of paths. -# (if there is only one path, it becomes `c:/real/abs/path`.) -# this causes great confusion and error: shell and Makefile doesn't like -# windows paths so it is really error-prone. revert it for peace. + def normalize_path(v): + """msys1/msys2 automatically converts `/abs/path1:/abs/path2` into + `c:\real\abs\path1;c:\real\abs\path2` (semicolons) if shell thinks + the value is list of paths. + (if there is only one path, it becomes `c:/real/abs/path`.) + this causes great confusion and error: shell and Makefile doesn't like + windows paths so it is really error-prone. revert it for peace.""" v = v.replace('\\', '/') # c:/path -> /c/path if ':/' in v: @@ -31,6 +32,7 @@ def putenv(name, value): value = normalize_path(value) os.putenv(name, value) + def convert_path_spec(name, value): if os.name == 'nt' and name != 'PATH': value = ":".join(normalize_path(v) for v in value.split(";")) @@ -42,14 +44,14 @@ putenv('TMPDIR', os.path.abspath(sys.argv[4])) putenv('CC', sys.argv[5]) putenv('RUSTDOC', os.path.abspath(sys.argv[6])) filt = sys.argv[7] -putenv('LD_LIB_PATH_ENVVAR', sys.argv[8]); -putenv('HOST_RPATH_DIR', os.path.abspath(sys.argv[9])); -putenv('TARGET_RPATH_DIR', os.path.abspath(sys.argv[10])); +putenv('LD_LIB_PATH_ENVVAR', sys.argv[8]) +putenv('HOST_RPATH_DIR', os.path.abspath(sys.argv[9])) +putenv('TARGET_RPATH_DIR', os.path.abspath(sys.argv[10])) putenv('RUST_BUILD_STAGE', sys.argv[11]) putenv('S', os.path.abspath(sys.argv[12])) putenv('PYTHON', sys.executable) -if not filt in sys.argv[1]: +if filt not in sys.argv[1]: sys.exit(0) print('maketest: ' + os.path.basename(os.path.dirname(sys.argv[1]))) @@ -63,19 +65,19 @@ if path[-1] == '/': path = path[:-1] proc = subprocess.Popen([make, '-C', path], - stdout = subprocess.PIPE, - stderr = subprocess.PIPE) + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) out, err = proc.communicate() i = proc.wait() if i != 0: - - print '----- ' + sys.argv[1] + """ -------------------- + print """\ +----- %s -------------------- ------ stdout --------------------------------------------- -""" + out + """ +%s ------ stderr --------------------------------------------- -""" + err + """ +%s ------ --------------------------------------------- -""" - sys.exit(i) +""" % (sys.argv[1], out, err) + sys.exit(i) diff --git a/src/etc/mirror-all-snapshots.py b/src/etc/mirror-all-snapshots.py index 3934c235e8c..cd77f882140 100644 --- a/src/etc/mirror-all-snapshots.py +++ b/src/etc/mirror-all-snapshots.py @@ -10,7 +10,7 @@ # option. This file may not be copied, modified, or distributed # except according to those terms. -import os, tarfile, hashlib, re, shutil +import os from snapshot import * f = open(snapshotfile) @@ -23,13 +23,14 @@ i = 0 for line in f.readlines(): i += 1 parsed = parse_line(i, line) - if (not parsed): continue + if not parsed: + continue if parsed["type"] == "snapshot": date = parsed["date"] rev = parsed["rev"] - elif rev != None and parsed["type"] == "file": + elif rev is not None and parsed["type"] == "file": platform = parsed["platform"] hsh = parsed["hash"] snap = full_snapshot_name(date, rev, platform, hsh) diff --git a/src/etc/mklldeps.py b/src/etc/mklldeps.py index 834ba074c62..a4234159cb8 100644 --- a/src/etc/mklldeps.py +++ b/src/etc/mklldeps.py @@ -11,8 +11,6 @@ import os import sys import subprocess -import itertools -from os import path f = open(sys.argv[1], 'wb') @@ -35,6 +33,7 @@ f.write("""// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // take a look at src/etc/mklldeps.py if you're interested """) + def run(args): proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() diff --git a/src/etc/snapshot.py b/src/etc/snapshot.py index 8f45f7f1af2..698c3a551e2 100644 --- a/src/etc/snapshot.py +++ b/src/etc/snapshot.py @@ -8,24 +8,32 @@ # option. This file may not be copied, modified, or distributed # except according to those terms. -import re, os, sys, glob, tarfile, shutil, subprocess, tempfile, distutils.spawn +import re +import os +import sys +import glob +import tarfile +import shutil +import subprocess +import distutils.spawn try: - import hashlib - sha_func = hashlib.sha1 + import hashlib + sha_func = hashlib.sha1 except ImportError: - import sha - sha_func = sha.new + import sha + sha_func = sha.new + def scrub(b): - if sys.version_info >= (3,) and type(b) == bytes: - return b.decode('ascii') - else: - return b + if sys.version_info >= (3,) and type(b) == bytes: + return b.decode('ascii') + else: + return b src_dir = scrub(os.getenv("CFG_SRC_DIR")) if not src_dir: - raise Exception("missing env var CFG_SRC_DIR") + raise Exception("missing env var CFG_SRC_DIR") snapshotfile = os.path.join(src_dir, "src", "snapshots.txt") download_url_base = "https://static.rust-lang.org/stage0-snapshots" @@ -33,54 +41,57 @@ download_dir_base = "dl" download_unpack_base = os.path.join(download_dir_base, "unpack") snapshot_files = { - "linux": ["bin/rustc"], - "macos": ["bin/rustc"], - "winnt": ["bin/rustc.exe"], - "freebsd": ["bin/rustc"], - "dragonfly": ["bin/rustc"], - } + "linux": ["bin/rustc"], + "macos": ["bin/rustc"], + "winnt": ["bin/rustc.exe"], + "freebsd": ["bin/rustc"], + "dragonfly": ["bin/rustc"], + } + +winnt_runtime_deps_32 = ["libgcc_s_dw2-1.dll", "libstdc++-6.dll"] +winnt_runtime_deps_64 = ["libgcc_s_seh-1.dll", "libstdc++-6.dll"] -winnt_runtime_deps_32 = ["libgcc_s_dw2-1.dll", - "libstdc++-6.dll"] -winnt_runtime_deps_64 = ["libgcc_s_seh-1.dll", - "libstdc++-6.dll"] def parse_line(n, line): - global snapshotfile + global snapshotfile - if re.match(r"\s*$", line): return None + if re.match(r"\s*$", line): + return None - if re.match(r"^T\s*$", line): return None + if re.match(r"^T\s*$", line): + return None - match = re.match(r"\s+([\w_-]+) ([a-fA-F\d]{40})\s*$", line) - if match: - return { "type": "file", - "platform": match.group(1), - "hash": match.group(2).lower() } + match = re.match(r"\s+([\w_-]+) ([a-fA-F\d]{40})\s*$", line) + if match: + return {"type": "file", + "platform": match.group(1), + "hash": match.group(2).lower()} - match = re.match(r"([ST]) (\d{4}-\d{2}-\d{2}) ([a-fA-F\d]+)\s*$", line); - if (not match): - raise Exception("%s:%d:E syntax error: " % (snapshotfile, n)) - return {"type": "snapshot", - "date": match.group(2), - "rev": match.group(3)} + match = re.match(r"([ST]) (\d{4}-\d{2}-\d{2}) ([a-fA-F\d]+)\s*$", line) + if not match: + raise Exception("%s:%d:E syntax error: " % (snapshotfile, n)) + return {"type": "snapshot", + "date": match.group(2), + "rev": match.group(3)} def partial_snapshot_name(date, rev, platform): - return ("rust-stage0-%s-%s-%s.tar.bz2" - % (date, rev, platform)) + return ("rust-stage0-%s-%s-%s.tar.bz2" % + (date, rev, platform)) + def full_snapshot_name(date, rev, platform, hsh): - return ("rust-stage0-%s-%s-%s-%s.tar.bz2" - % (date, rev, platform, hsh)) + return ("rust-stage0-%s-%s-%s-%s.tar.bz2" % + (date, rev, platform, hsh)) def get_kernel(triple): t = triple.split('-') if len(t) == 2: - os_name = t[1] + os_name = t[1] else: - os_name = t[2] + os_name = t[2] + if os_name == "windows": return "winnt" if os_name == "darwin": @@ -91,19 +102,20 @@ def get_kernel(triple): return "dragonfly" return "linux" + def get_cpu(triple): arch = triple.split('-')[0] if arch == "i686": - return "i386" + return "i386" return arch + def get_platform(triple): - return "%s-%s" % (get_kernel(triple), get_cpu(triple)) + return "%s-%s" % (get_kernel(triple), get_cpu(triple)) def cmd_out(cmdline): - p = subprocess.Popen(cmdline, - stdout=subprocess.PIPE) + p = subprocess.Popen(cmdline, stdout=subprocess.PIPE) return scrub(p.communicate()[0].strip()) @@ -124,7 +136,8 @@ def local_rev_short_sha(): def local_rev_committer_date(): return local_rev_info("ci") -def get_url_to_file(u,f): + +def get_url_to_file(u, f): # no security issue, just to stop partial download leaving a stale file tmpf = f + '.tmp' @@ -137,40 +150,44 @@ def get_url_to_file(u,f): if returncode != 0: try: os.unlink(tmpf) - except OSError as e: + except OSError: pass raise Exception("failed to fetch url") os.rename(tmpf, f) + def snap_filename_hash_part(snap): - match = re.match(r".*([a-fA-F\d]{40}).tar.bz2$", snap) - if not match: - raise Exception("unable to find hash in filename: " + snap) - return match.group(1) + match = re.match(r".*([a-fA-F\d]{40}).tar.bz2$", snap) + if not match: + raise Exception("unable to find hash in filename: " + snap) + return match.group(1) + def hash_file(x): h = sha_func() h.update(open(x, "rb").read()) return scrub(h.hexdigest()) -# Returns a list of paths of Rust's system runtime dependencies + def get_winnt_runtime_deps(platform): + """Returns a list of paths of Rust's system runtime dependencies""" if platform == "winnt-x86_64": - deps = winnt_runtime_deps_64 + deps = winnt_runtime_deps_64 else: - deps = winnt_runtime_deps_32 + deps = winnt_runtime_deps_32 runtime_deps = [] path_dirs = os.environ["PATH"].split(os.pathsep) for name in deps: - for dir in path_dirs: - filepath = os.path.join(dir, name) - if os.path.isfile(filepath): - runtime_deps.append(filepath) - break - else: - raise Exception("Could not find runtime dependency: %s" % name) + for dir in path_dirs: + filepath = os.path.join(dir, name) + if os.path.isfile(filepath): + runtime_deps.append(filepath) + break + else: + raise Exception("Could not find runtime dependency: %s" % name) return runtime_deps + def make_snapshot(stage, triple): kernel = get_kernel(triple) platform = get_platform(triple) @@ -180,31 +197,31 @@ def make_snapshot(stage, triple): file0 = partial_snapshot_name(date, rev, platform) def in_tar_name(fn): - cs = re.split(r"[\\/]", fn) - if len(cs) >= 2: - return os.sep.join(cs[-2:]) + cs = re.split(r"[\\/]", fn) + if len(cs) >= 2: + return os.sep.join(cs[-2:]) tar = tarfile.open(file0, "w:bz2") for name in snapshot_files[kernel]: - dir = stage - if stage == "stage1" and re.match(r"^lib/(lib)?std.*", name): - dir = "stage0" - fn_glob = os.path.join(triple, dir, name) - matches = glob.glob(fn_glob) - if not matches: - raise Exception("Not found file with name like " + fn_glob) - if len(matches) == 1: - tar.add(matches[0], "rust-stage0/" + in_tar_name(matches[0])) - else: - raise Exception("Found stale files: \n %s\n" - "Please make a clean build." % "\n ".join(matches)) + dir = stage + if stage == "stage1" and re.match(r"^lib/(lib)?std.*", name): + dir = "stage0" + fn_glob = os.path.join(triple, dir, name) + matches = glob.glob(fn_glob) + if not matches: + raise Exception("Not found file with name like " + fn_glob) + if len(matches) == 1: + tar.add(matches[0], "rust-stage0/" + in_tar_name(matches[0])) + else: + raise Exception("Found stale files: \n %s\n" + "Please make a clean build." % "\n ".join(matches)) - if kernel=="winnt": - for path in get_winnt_runtime_deps(platform): - tar.add(path, "rust-stage0/bin/" + os.path.basename(path)) - tar.add(os.path.join(os.path.dirname(__file__), "third-party"), - "rust-stage0/bin/third-party") + if kernel == "winnt": + for path in get_winnt_runtime_deps(platform): + tar.add(path, "rust-stage0/bin/" + os.path.basename(path)) + tar.add(os.path.join(os.path.dirname(__file__), "third-party"), + "rust-stage0/bin/third-party") tar.close() @@ -215,60 +232,64 @@ def make_snapshot(stage, triple): return file1 + def curr_snapshot_rev(): - i = 0 - found_snap = False - date = None - rev = None + i = 0 + found_snap = False + date = None + rev = None - f = open(snapshotfile) - for line in f.readlines(): - i += 1 - parsed = parse_line(i, line) - if (not parsed): continue + f = open(snapshotfile) + for line in f.readlines(): + i += 1 + parsed = parse_line(i, line) + if not parsed: + continue - if parsed["type"] == "snapshot": - date = parsed["date"] - rev = parsed["rev"] - found_snap = True - break + if parsed["type"] == "snapshot": + date = parsed["date"] + rev = parsed["rev"] + found_snap = True + break - if not found_snap: - raise Exception("no snapshot entries in file") + if not found_snap: + raise Exception("no snapshot entries in file") + + return (date, rev) - return (date, rev) def determine_curr_snapshot(triple): - i = 0 - platform = get_platform(triple) + i = 0 + platform = get_platform(triple) - found_file = False - found_snap = False - hsh = None - date = None - rev = None + found_file = False + found_snap = False + hsh = None + date = None + rev = None - f = open(snapshotfile) - for line in f.readlines(): - i += 1 - parsed = parse_line(i, line) - if (not parsed): continue + f = open(snapshotfile) + for line in f.readlines(): + i += 1 + parsed = parse_line(i, line) + if not parsed: + continue - if found_snap and parsed["type"] == "file": - if parsed["platform"] == platform: - hsh = parsed["hash"] - found_file = True - break; - elif parsed["type"] == "snapshot": - date = parsed["date"] - rev = parsed["rev"] - found_snap = True + if found_snap and parsed["type"] == "file": + if parsed["platform"] == platform: + hsh = parsed["hash"] + found_file = True + break + elif parsed["type"] == "snapshot": + date = parsed["date"] + rev = parsed["rev"] + found_snap = True - if not found_snap: - raise Exception("no snapshot entries in file") + if not found_snap: + raise Exception("no snapshot entries in file") - if not found_file: - raise Exception("no snapshot file found for platform %s, rev %s" % - (platform, rev)) + if not found_file: + raise Exception("no snapshot file found for platform %s, rev %s" % + (platform, rev)) - return full_snapshot_name(date, rev, platform, hsh) + return full_snapshot_name(date, rev, platform, hsh) diff --git a/src/etc/sugarise-doc-comments.py b/src/etc/sugarise-doc-comments.py index 7d4ad749fe3..62870f3ed47 100755 --- a/src/etc/sugarise-doc-comments.py +++ b/src/etc/sugarise-doc-comments.py @@ -17,7 +17,10 @@ # it sugarises all .rs/.rc files underneath the working directory # -import sys, os, fnmatch, re +import sys +import os +import fnmatch +import re DOC_PATTERN = '^(?P[\\t ]*)#\\[(\\s*)doc(\\s*)=' + \ @@ -85,7 +88,6 @@ def sugarise_file(path): if s != ns: open(path, 'w').write(ns) - for (dirpath, dirnames, filenames) in os.walk('.'): for name in fnmatch.filter(filenames, '*.r[sc]'): sugarise_file(os.path.join(dirpath, name)) diff --git a/src/etc/tidy.py b/src/etc/tidy.py index c65b762e517..f5172feb5b6 100644 --- a/src/etc/tidy.py +++ b/src/etc/tidy.py @@ -8,37 +8,45 @@ # option. This file may not be copied, modified, or distributed # except according to those terms. -import sys, fileinput, subprocess, re, os +import sys +import fileinput +import subprocess +import re +import os from licenseck import * import snapshot -err=0 -cols=100 -cr_flag="ignore-tidy-cr" -tab_flag="ignore-tidy-tab" -linelength_flag="ignore-tidy-linelength" +err = 0 +cols = 100 +cr_flag = "ignore-tidy-cr" +tab_flag = "ignore-tidy-tab" +linelength_flag = "ignore-tidy-linelength" # Be careful to support Python 2.4, 2.6, and 3.x here! -config_proc=subprocess.Popen([ "git", "config", "core.autocrlf" ], - stdout=subprocess.PIPE) -result=config_proc.communicate()[0] +config_proc = subprocess.Popen(["git", "config", "core.autocrlf"], + stdout=subprocess.PIPE) +result = config_proc.communicate()[0] + +true = "true".encode('utf8') +autocrlf = result.strip() == true if result is not None else False -true="true".encode('utf8') -autocrlf=result.strip() == true if result is not None else False def report_error_name_no(name, no, s): global err print("%s:%d: %s" % (name, no, s)) - err=1 + err = 1 + def report_err(s): report_error_name_no(fileinput.filename(), fileinput.filelineno(), s) + def report_warn(s): print("%s:%d: %s" % (fileinput.filename(), fileinput.filelineno(), s)) + def do_license_check(name, contents): if not check_license(name, contents): report_error_name_no(name, 1, "incorrect license") @@ -81,13 +89,13 @@ try: date, rev = snapshot.curr_snapshot_rev() if not hsh.startswith(rev): report_err("snapshot out of date (" + date - + "): " + line) + + "): " + line) else: if "SNAP" in line: report_warn("unmatched SNAP line: " + line) if check_tab and ('\t' in line and - "Makefile" not in fileinput.filename()): + "Makefile" not in fileinput.filename()): report_err("tab character") if check_cr and not autocrlf and '\r' in line: report_err("CR character")