diff options
Diffstat (limited to 'packages/Python/lldbsuite/test/benchmarks')
12 files changed, 190 insertions, 95 deletions
| diff --git a/packages/Python/lldbsuite/test/benchmarks/continue/TestBenchmarkContinue.py b/packages/Python/lldbsuite/test/benchmarks/continue/TestBenchmarkContinue.py index f7c274522f9b2..74336693bcb29 100644 --- a/packages/Python/lldbsuite/test/benchmarks/continue/TestBenchmarkContinue.py +++ b/packages/Python/lldbsuite/test/benchmarks/continue/TestBenchmarkContinue.py @@ -5,14 +5,15 @@ Test lldb data formatter subsystem.  from __future__ import print_function - -import os, time +import os +import time  import lldb  from lldbsuite.test.decorators import *  from lldbsuite.test.lldbbench import *  from lldbsuite.test.lldbtest import *  from lldbsuite.test import lldbutil +  class TestBenchmarkContinue(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -31,14 +32,16 @@ class TestBenchmarkContinue(BenchBase):          """Benchmark different ways to continue a process"""          self.runCmd("file a.out", CURRENT_EXECUTABLE_SET) -        bkpt = self.target().FindBreakpointByID(lldbutil.run_break_set_by_source_regexp (self, "// break here")) +        bkpt = self.target().FindBreakpointByID( +            lldbutil.run_break_set_by_source_regexp( +                self, "// break here"))          self.runCmd("run", RUN_SUCCEEDED)          # The stop reason of the thread should be breakpoint.          self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, -            substrs = ['stopped', -                       'stop reason = breakpoint']) +                    substrs=['stopped', +                             'stop reason = breakpoint'])          # This is the function to remove the custom formats in order to have a          # clean slate for the next test case. @@ -47,22 +50,24 @@ class TestBenchmarkContinue(BenchBase):              self.runCmd('type summary clear', check=False)              self.runCmd('type filter clear', check=False)              self.runCmd('type synth clear', check=False) -            self.runCmd("settings set target.max-children-count 256", check=False) +            self.runCmd( +                "settings set target.max-children-count 256", +                check=False)          # Execute the cleanup function during test case tear down.          self.addTearDownHook(cleanup) -         +          runCmd_sw = Stopwatch()          lldbutil_sw = Stopwatch() -        for i in range(0,15): +        for i in range(0, 15):              runCmd_sw.start()              self.runCmd("continue")              runCmd_sw.stop() -         -        for i in range(0,15): + +        for i in range(0, 15):              lldbutil_sw.start()              lldbutil.continue_to_breakpoint(self.process(), bkpt)              lldbutil_sw.stop() -             -        print("runCmd: %s\nlldbutil: %s" % (runCmd_sw,lldbutil_sw)) + +        print("runCmd: %s\nlldbutil: %s" % (runCmd_sw, lldbutil_sw)) diff --git a/packages/Python/lldbsuite/test/benchmarks/disassembly/TestDisassembly.py b/packages/Python/lldbsuite/test/benchmarks/disassembly/TestDisassembly.py index 8a0c044147a00..8bce4815894d5 100644 --- a/packages/Python/lldbsuite/test/benchmarks/disassembly/TestDisassembly.py +++ b/packages/Python/lldbsuite/test/benchmarks/disassembly/TestDisassembly.py @@ -3,18 +3,20 @@  from __future__ import print_function - -import os, sys +import os +import sys  import lldb  from lldbsuite.test.decorators import *  from lldbsuite.test.lldbbench import *  from lldbsuite.test.lldbtest import *  from lldbsuite.test import lldbutil +  def is_exe(fpath):      """Returns true if fpath is an executable."""      return os.path.isfile(fpath) and os.access(fpath, os.X_OK) +  class DisassembleDriverMainLoop(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -42,7 +44,9 @@ class DisassembleDriverMainLoop(BenchBase):      @benchmarks_test      @no_debug_info_test -    @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr22274: need a pexpect replacement for windows") +    @expectedFailureAll( +        oslist=["windows"], +        bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")      def test_run_lldb_then_gdb(self):          """Test disassembly on a large function with lldb vs. gdb."""          print() @@ -54,11 +58,13 @@ class DisassembleDriverMainLoop(BenchBase):          print("lldb benchmark:", self.stopwatch)          self.run_gdb_disassembly(self.exe, self.function, self.count)          print("gdb benchmark:", self.stopwatch) -        print("lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg)) +        print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))      @benchmarks_test      @no_debug_info_test -    @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr22274: need a pexpect replacement for windows") +    @expectedFailureAll( +        oslist=["windows"], +        bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")      def test_run_gdb_then_lldb(self):          """Test disassembly on a large function with lldb vs. gdb."""          print() @@ -70,7 +76,7 @@ class DisassembleDriverMainLoop(BenchBase):          print("gdb benchmark:", self.stopwatch)          self.run_lldb_disassembly(self.exe, self.function, self.count)          print("lldb benchmark:", self.stopwatch) -        print("lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg)) +        print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))      def run_lldb_disassembly(self, exe, function, count):          import pexpect @@ -79,7 +85,9 @@ class DisassembleDriverMainLoop(BenchBase):          prompt = self.child_prompt          # So that the child gets torn down after the test. -        self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe)) +        self.child = pexpect.spawn( +            '%s %s %s' % +            (lldbtest_config.lldbExec, self.lldbOption, exe))          child = self.child          # Turn on logging for what the child sends back. diff --git a/packages/Python/lldbsuite/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py b/packages/Python/lldbsuite/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py index f8e3d94b65775..36f23572648d3 100644 --- a/packages/Python/lldbsuite/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py +++ b/packages/Python/lldbsuite/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py @@ -5,13 +5,14 @@ inferior and traverses the stack for thread0 to arrive at frame with function  from __future__ import print_function - -import os, sys +import os +import sys  import lldb  from lldbsuite.test.decorators import *  from lldbsuite.test.lldbbench import *  from lldbsuite.test.lldbtest import * +  class AttachThenDisassemblyBench(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -32,10 +33,11 @@ class AttachThenDisassemblyBench(BenchBase):      def run_lldb_attach_then_disassembly(self, exe, count):          target = self.dbg.CreateTarget(exe) -        # Spawn a new process and don't display the stdout if not in TraceOn() mode. +        # Spawn a new process and don't display the stdout if not in TraceOn() +        # mode.          import subprocess -        popen = subprocess.Popen([exe, self.lldbOption], -                                 stdout = open(os.devnull, 'w') if not self.TraceOn() else None) +        popen = subprocess.Popen([exe, self.lldbOption], stdout=open( +            os.devnull, 'w') if not self.TraceOn() else None)          if self.TraceOn():              print("pid of spawned process: %d" % popen.pid) @@ -51,7 +53,7 @@ class AttachThenDisassemblyBench(BenchBase):          i = 0          found = False          for f in thread0: -            #print("frame#%d %s" % (i, f.GetFunctionName())) +            # print("frame#%d %s" % (i, f.GetFunctionName()))              if "MainLoop" in f.GetFunctionName():                  found = True                  thread0.SetSelectedFrame(i) @@ -59,7 +61,7 @@ class AttachThenDisassemblyBench(BenchBase):                      print("Found frame#%d for function 'MainLoop'" % i)                  break              i += 1 -             +          # Reset the stopwatch now.          self.stopwatch.reset()          for i in range(count): diff --git a/packages/Python/lldbsuite/test/benchmarks/disassembly/TestXcode41Vs42GDBDisassembly.py b/packages/Python/lldbsuite/test/benchmarks/disassembly/TestXcode41Vs42GDBDisassembly.py index 618aac7eafcdb..fd91bb441d9fc 100644 --- a/packages/Python/lldbsuite/test/benchmarks/disassembly/TestXcode41Vs42GDBDisassembly.py +++ b/packages/Python/lldbsuite/test/benchmarks/disassembly/TestXcode41Vs42GDBDisassembly.py @@ -3,8 +3,8 @@  from __future__ import print_function - -import os, sys +import os +import sys  import lldb  from lldbsuite.test.decorators import *  from lldbsuite.test.lldbbench import * @@ -12,6 +12,7 @@ from lldbsuite.test.lldbtest import *  from lldbsuite.test import configuration  from lldbsuite.test import lldbutil +  class XCode41Vs42GDBDisassembly(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -28,31 +29,53 @@ class XCode41Vs42GDBDisassembly(BenchBase):      @benchmarks_test      @no_debug_info_test -    @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr22274: need a pexpect replacement for windows") +    @expectedFailureAll( +        oslist=["windows"], +        bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")      def test_run_41_then_42(self):          """Test disassembly on a large function with 4.1 vs. 4.2's gdb."""          print() -        self.run_gdb_disassembly(self.gdb_41_exe, self.exe, self.function, self.count) +        self.run_gdb_disassembly( +            self.gdb_41_exe, +            self.exe, +            self.function, +            self.count)          print("4.1 gdb benchmark:", self.stopwatch)          self.gdb_41_avg = self.stopwatch.avg() -        self.run_gdb_disassembly(self.gdb_42_exe, self.exe, self.function, self.count) +        self.run_gdb_disassembly( +            self.gdb_42_exe, +            self.exe, +            self.function, +            self.count)          print("4.2 gdb benchmark:", self.stopwatch)          self.gdb_42_avg = self.stopwatch.avg() -        print("gdb_42_avg/gdb_41_avg: %f" % (self.gdb_42_avg/self.gdb_41_avg)) +        print("gdb_42_avg/gdb_41_avg: %f" % +              (self.gdb_42_avg / self.gdb_41_avg))      @benchmarks_test      @no_debug_info_test -    @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr22274: need a pexpect replacement for windows") +    @expectedFailureAll( +        oslist=["windows"], +        bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")      def test_run_42_then_41(self):          """Test disassembly on a large function with 4.1 vs. 4.2's gdb."""          print() -        self.run_gdb_disassembly(self.gdb_42_exe, self.exe, self.function, self.count) +        self.run_gdb_disassembly( +            self.gdb_42_exe, +            self.exe, +            self.function, +            self.count)          print("4.2 gdb benchmark:", self.stopwatch)          self.gdb_42_avg = self.stopwatch.avg() -        self.run_gdb_disassembly(self.gdb_41_exe, self.exe, self.function, self.count) +        self.run_gdb_disassembly( +            self.gdb_41_exe, +            self.exe, +            self.function, +            self.count)          print("4.1 gdb benchmark:", self.stopwatch)          self.gdb_41_avg = self.stopwatch.avg() -        print("gdb_42_avg/gdb_41_avg: %f" % (self.gdb_42_avg/self.gdb_41_avg)) +        print("gdb_42_avg/gdb_41_avg: %f" % +              (self.gdb_42_avg / self.gdb_41_avg))      def run_gdb_disassembly(self, gdb_exe_path, exe, function, count):          import pexpect diff --git a/packages/Python/lldbsuite/test/benchmarks/expression/TestExpressionCmd.py b/packages/Python/lldbsuite/test/benchmarks/expression/TestExpressionCmd.py index 68d2bd9793e1f..a9899d93bf030 100644 --- a/packages/Python/lldbsuite/test/benchmarks/expression/TestExpressionCmd.py +++ b/packages/Python/lldbsuite/test/benchmarks/expression/TestExpressionCmd.py @@ -3,8 +3,8 @@  from __future__ import print_function - -import os, sys +import os +import sys  import lldb  from lldbsuite.test.decorators import *  from lldbsuite.test.lldbbench import * @@ -12,6 +12,7 @@ from lldbsuite.test.lldbtest import *  from lldbsuite.test import configuration  from lldbsuite.test import lldbutil +  class ExpressionEvaluationCase(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -19,11 +20,14 @@ class ExpressionEvaluationCase(BenchBase):      def setUp(self):          BenchBase.setUp(self)          self.source = 'main.cpp' -        self.line_to_break = line_number(self.source, '// Set breakpoint here.') +        self.line_to_break = line_number( +            self.source, '// Set breakpoint here.')          self.count = 25      @benchmarks_test -    @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr22274: need a pexpect replacement for windows") +    @expectedFailureAll( +        oslist=["windows"], +        bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")      def test_expr_cmd(self):          """Test lldb's expression commands and collect statistics."""          self.build() @@ -45,7 +49,9 @@ class ExpressionEvaluationCase(BenchBase):          self.stopwatch.reset()          for i in range(count):              # So that the child gets torn down after the test. -            self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe)) +            self.child = pexpect.spawn( +                '%s %s %s' % +                (lldbtest_config.lldbExec, self.lldbOption, exe))              child = self.child              # Turn on logging for what the child sends back. @@ -53,7 +59,9 @@ class ExpressionEvaluationCase(BenchBase):                  child.logfile_read = sys.stdout              child.expect_exact(prompt) -            child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break)) +            child.sendline( +                'breakpoint set -f %s -l %d' % +                (self.source, self.line_to_break))              child.expect_exact(prompt)              child.sendline('run')              child.expect_exact(prompt) diff --git a/packages/Python/lldbsuite/test/benchmarks/expression/TestRepeatedExprs.py b/packages/Python/lldbsuite/test/benchmarks/expression/TestRepeatedExprs.py index 2ad409e53b075..a223d2cf1fa7d 100644 --- a/packages/Python/lldbsuite/test/benchmarks/expression/TestRepeatedExprs.py +++ b/packages/Python/lldbsuite/test/benchmarks/expression/TestRepeatedExprs.py @@ -3,8 +3,8 @@  from __future__ import print_function - -import os, sys +import os +import sys  import lldb  from lldbsuite.test.lldbbench import BenchBase  from lldbsuite.test.decorators import * @@ -12,6 +12,7 @@ from lldbsuite.test.lldbtest import *  from lldbsuite.test import configuration  from lldbsuite.test import lldbutil +  class RepeatedExprsCase(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -19,13 +20,16 @@ class RepeatedExprsCase(BenchBase):      def setUp(self):          BenchBase.setUp(self)          self.source = 'main.cpp' -        self.line_to_break = line_number(self.source, '// Set breakpoint here.') +        self.line_to_break = line_number( +            self.source, '// Set breakpoint here.')          self.lldb_avg = None          self.gdb_avg = None          self.count = 100      @benchmarks_test -    @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr22274: need a pexpect replacement for windows") +    @expectedFailureAll( +        oslist=["windows"], +        bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")      def test_compare_lldb_to_gdb(self):          """Test repeated expressions with lldb vs. gdb."""          self.build() @@ -36,7 +40,7 @@ class RepeatedExprsCase(BenchBase):          print("lldb benchmark:", self.stopwatch)          self.run_gdb_repeated_exprs(self.exe_name, self.count)          print("gdb benchmark:", self.stopwatch) -        print("lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg)) +        print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))      def run_lldb_repeated_exprs(self, exe_name, count):          import pexpect @@ -47,7 +51,9 @@ class RepeatedExprsCase(BenchBase):          prompt = self.child_prompt          # So that the child gets torn down after the test. -        self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe)) +        self.child = pexpect.spawn( +            '%s %s %s' % +            (lldbtest_config.lldbExec, self.lldbOption, exe))          child = self.child          # Turn on logging for what the child sends back. @@ -55,7 +61,9 @@ class RepeatedExprsCase(BenchBase):              child.logfile_read = sys.stdout          child.expect_exact(prompt) -        child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break)) +        child.sendline( +            'breakpoint set -f %s -l %d' % +            (self.source, self.line_to_break))          child.expect_exact(prompt)          child.sendline('run')          child.expect_exact(prompt) @@ -71,7 +79,7 @@ class RepeatedExprsCase(BenchBase):                  child.sendline(expr_cmd2)                  child.expect_exact(prompt)              child.sendline('process continue') -            child.expect_exact(prompt)         +            child.expect_exact(prompt)          child.sendline('quit')          try: @@ -117,7 +125,7 @@ class RepeatedExprsCase(BenchBase):                  child.sendline(expr_cmd2)                  child.expect_exact(prompt)              child.sendline('continue') -            child.expect_exact(prompt)         +            child.expect_exact(prompt)          child.sendline('quit')          child.expect_exact('The program is running.  Exit anyway?') diff --git a/packages/Python/lldbsuite/test/benchmarks/frame_variable/TestFrameVariableResponse.py b/packages/Python/lldbsuite/test/benchmarks/frame_variable/TestFrameVariableResponse.py index 9f58352973799..3ed23e615409b 100644 --- a/packages/Python/lldbsuite/test/benchmarks/frame_variable/TestFrameVariableResponse.py +++ b/packages/Python/lldbsuite/test/benchmarks/frame_variable/TestFrameVariableResponse.py @@ -3,14 +3,15 @@  from __future__ import print_function - -import os, sys +import os +import sys  import lldb  from lldbsuite.test import configuration  from lldbsuite.test import lldbtest_config  from lldbsuite.test.decorators import *  from lldbsuite.test.lldbbench import * +  class FrameVariableResponseBench(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -23,7 +24,9 @@ class FrameVariableResponseBench(BenchBase):      @benchmarks_test      @no_debug_info_test -    @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr22274: need a pexpect replacement for windows") +    @expectedFailureAll( +        oslist=["windows"], +        bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")      def test_startup_delay(self):          """Test response time for the 'frame variable' command."""          print() @@ -40,7 +43,9 @@ class FrameVariableResponseBench(BenchBase):          self.stopwatch.reset()          for i in range(count):              # So that the child gets torn down after the test. -            self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe)) +            self.child = pexpect.spawn( +                '%s %s %s' % +                (lldbtest_config.lldbExec, self.lldbOption, exe))              child = self.child              # Turn on logging for what the child sends back. @@ -52,9 +57,9 @@ class FrameVariableResponseBench(BenchBase):              child.expect_exact(prompt)              # Run the target and expect it to be stopped due to breakpoint. -            child.sendline('run') # Aka 'process launch'. +            child.sendline('run')  # Aka 'process launch'.              child.expect_exact(prompt) -         +              with self.stopwatch:                  # Measure the 'frame variable' response time.                  child.sendline('frame variable') diff --git a/packages/Python/lldbsuite/test/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py b/packages/Python/lldbsuite/test/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py index 12e23e9569435..659382e7311bb 100644 --- a/packages/Python/lldbsuite/test/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py +++ b/packages/Python/lldbsuite/test/benchmarks/libcxxlist/TestBenchmarkLibcxxList.py @@ -5,14 +5,15 @@ Test lldb data formatter subsystem.  from __future__ import print_function - -import os, time +import os +import time  import lldb  from lldbsuite.test.decorators import *  from lldbsuite.test.lldbbench import *  from lldbsuite.test.lldbtest import *  from lldbsuite.test import lldbutil +  class TestBenchmarkLibcxxList(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -31,14 +32,16 @@ class TestBenchmarkLibcxxList(BenchBase):          """Benchmark the std::list data formatter (libc++)"""          self.runCmd("file a.out", CURRENT_EXECUTABLE_SET) -        bkpt = self.target().FindBreakpointByID(lldbutil.run_break_set_by_source_regexp (self, "break here")) +        bkpt = self.target().FindBreakpointByID( +            lldbutil.run_break_set_by_source_regexp( +                self, "break here"))          self.runCmd("run", RUN_SUCCEEDED)          # The stop reason of the thread should be breakpoint.          self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, -            substrs = ['stopped', -                       'stop reason = breakpoint']) +                    substrs=['stopped', +                             'stop reason = breakpoint'])          # This is the function to remove the custom formats in order to have a          # clean slate for the next test case. @@ -47,15 +50,17 @@ class TestBenchmarkLibcxxList(BenchBase):              self.runCmd('type summary clear', check=False)              self.runCmd('type filter clear', check=False)              self.runCmd('type synth clear', check=False) -            self.runCmd("settings set target.max-children-count 256", check=False) +            self.runCmd( +                "settings set target.max-children-count 256", +                check=False)          # Execute the cleanup function during test case tear down.          self.addTearDownHook(cleanup) -         +          sw = Stopwatch() -         +          sw.start()          self.expect('frame variable -A list', substrs=['[300]', '300'])          sw.stop() -             +          print("time to print: %s" % (sw)) diff --git a/packages/Python/lldbsuite/test/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py b/packages/Python/lldbsuite/test/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py index 4466cd083ca3a..343f93d95c4e2 100644 --- a/packages/Python/lldbsuite/test/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py +++ b/packages/Python/lldbsuite/test/benchmarks/libcxxmap/TestBenchmarkLibcxxMap.py @@ -5,14 +5,15 @@ Test lldb data formatter subsystem.  from __future__ import print_function - -import os, time +import os +import time  import lldb  from lldbsuite.test.lldbbench import *  from lldbsuite.test.decorators import *  from lldbsuite.test.lldbtest import *  from lldbsuite.test import lldbutil +  class TestBenchmarkLibcxxMap(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -31,14 +32,16 @@ class TestBenchmarkLibcxxMap(BenchBase):          """Benchmark the std::map data formatter (libc++)"""          self.runCmd("file a.out", CURRENT_EXECUTABLE_SET) -        bkpt = self.target().FindBreakpointByID(lldbutil.run_break_set_by_source_regexp (self, "break here")) +        bkpt = self.target().FindBreakpointByID( +            lldbutil.run_break_set_by_source_regexp( +                self, "break here"))          self.runCmd("run", RUN_SUCCEEDED)          # The stop reason of the thread should be breakpoint.          self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, -            substrs = ['stopped', -                       'stop reason = breakpoint']) +                    substrs=['stopped', +                             'stop reason = breakpoint'])          # This is the function to remove the custom formats in order to have a          # clean slate for the next test case. @@ -47,15 +50,17 @@ class TestBenchmarkLibcxxMap(BenchBase):              self.runCmd('type summary clear', check=False)              self.runCmd('type filter clear', check=False)              self.runCmd('type synth clear', check=False) -            self.runCmd("settings set target.max-children-count 256", check=False) +            self.runCmd( +                "settings set target.max-children-count 256", +                check=False)          # Execute the cleanup function during test case tear down.          self.addTearDownHook(cleanup) -         +          sw = Stopwatch() -         +          sw.start()          self.expect('frame variable -A map', substrs=['[300]', '300'])          sw.stop() -             +          print("time to print: %s" % (sw)) diff --git a/packages/Python/lldbsuite/test/benchmarks/startup/TestStartupDelays.py b/packages/Python/lldbsuite/test/benchmarks/startup/TestStartupDelays.py index 9d2356ab54f9c..baacdc81e2371 100644 --- a/packages/Python/lldbsuite/test/benchmarks/startup/TestStartupDelays.py +++ b/packages/Python/lldbsuite/test/benchmarks/startup/TestStartupDelays.py @@ -3,14 +3,15 @@  from __future__ import print_function - -import os, sys +import os +import sys  import lldb  from lldbsuite.test import configuration  from lldbsuite.test import lldbtest_config  from lldbsuite.test.decorators import *  from lldbsuite.test.lldbbench import * +  class StartupDelaysBench(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -28,14 +29,22 @@ class StartupDelaysBench(BenchBase):      @benchmarks_test      @no_debug_info_test -    @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr22274: need a pexpect replacement for windows") +    @expectedFailureAll( +        oslist=["windows"], +        bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")      def test_startup_delay(self):          """Test start up delays creating a target, setting a breakpoint, and run to breakpoint stop."""          print()          self.run_startup_delays_bench(self.exe, self.break_spec, self.count) -        print("lldb startup delay (create fresh target) benchmark:", self.stopwatch) -        print("lldb startup delay (set first breakpoint) benchmark:", self.stopwatch2) -        print("lldb startup delay (run to breakpoint) benchmark:", self.stopwatch3) +        print( +            "lldb startup delay (create fresh target) benchmark:", +            self.stopwatch) +        print( +            "lldb startup delay (set first breakpoint) benchmark:", +            self.stopwatch2) +        print( +            "lldb startup delay (run to breakpoint) benchmark:", +            self.stopwatch3)      def run_startup_delays_bench(self, exe, break_spec, count):          import pexpect @@ -48,7 +57,9 @@ class StartupDelaysBench(BenchBase):          self.stopwatch2.reset()          for i in range(count):              # So that the child gets torn down after the test. -            self.child = pexpect.spawn('%s %s' % (lldbtest_config.lldbExec, self.lldbOption)) +            self.child = pexpect.spawn( +                '%s %s' % +                (lldbtest_config.lldbExec, self.lldbOption))              child = self.child              # Turn on logging for what the child sends back. @@ -57,7 +68,7 @@ class StartupDelaysBench(BenchBase):              with self.stopwatch:                  # Create a fresh target. -                child.sendline('file %s' % exe) # Aka 'target create'. +                child.sendline('file %s' % exe)  # Aka 'target create'.                  child.expect_exact(prompt)              with self.stopwatch2: diff --git a/packages/Python/lldbsuite/test/benchmarks/stepping/TestSteppingSpeed.py b/packages/Python/lldbsuite/test/benchmarks/stepping/TestSteppingSpeed.py index 3ab760d4abe82..2a2a8ef000a75 100644 --- a/packages/Python/lldbsuite/test/benchmarks/stepping/TestSteppingSpeed.py +++ b/packages/Python/lldbsuite/test/benchmarks/stepping/TestSteppingSpeed.py @@ -2,7 +2,8 @@  from __future__ import print_function -import os, sys +import os +import sys  import lldb  from lldbsuite.test import configuration  from lldbsuite.test import lldbtest_config @@ -11,6 +12,7 @@ from lldbsuite.test.decorators import *  from lldbsuite.test.lldbtest import *  from lldbsuite.test import lldbutil +  class SteppingSpeedBench(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -26,7 +28,9 @@ class SteppingSpeedBench(BenchBase):      @benchmarks_test      @no_debug_info_test -    @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr22274: need a pexpect replacement for windows") +    @expectedFailureAll( +        oslist=["windows"], +        bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")      def test_run_lldb_steppings(self):          """Test lldb steppings on a large executable."""          print() @@ -40,7 +44,9 @@ class SteppingSpeedBench(BenchBase):          prompt = self.child_prompt          # So that the child gets torn down after the test. -        self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe)) +        self.child = pexpect.spawn( +            '%s %s %s' % +            (lldbtest_config.lldbExec, self.lldbOption, exe))          child = self.child          # Turn on logging for what the child sends back. @@ -58,7 +64,7 @@ class SteppingSpeedBench(BenchBase):          for i in range(count):              with self.stopwatch:                  # Disassemble the function. -                child.sendline('next') # Aka 'thread step-over'. +                child.sendline('next')  # Aka 'thread step-over'.                  child.expect_exact(prompt)          child.sendline('quit') diff --git a/packages/Python/lldbsuite/test/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py b/packages/Python/lldbsuite/test/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py index 3106c4511f586..ab2b2004fc5da 100644 --- a/packages/Python/lldbsuite/test/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py +++ b/packages/Python/lldbsuite/test/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py @@ -3,8 +3,8 @@  from __future__ import print_function - -import os, sys +import os +import sys  import lldb  from lldbsuite.test.lldbbench import *  from lldbsuite.test.decorators import * @@ -12,6 +12,7 @@ from lldbsuite.test.lldbtest import *  from lldbsuite.test import configuration  from lldbsuite.test import lldbutil +  class CompileRunToBreakpointBench(BenchBase):      mydir = TestBase.compute_mydir(__file__) @@ -27,7 +28,9 @@ class CompileRunToBreakpointBench(BenchBase):      @benchmarks_test      @no_debug_info_test -    @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr22274: need a pexpect replacement for windows") +    @expectedFailureAll( +        oslist=["windows"], +        bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")      def test_run_lldb_then_gdb(self):          """Benchmark turnaround time with lldb vs. gdb."""          print() @@ -35,15 +38,18 @@ class CompileRunToBreakpointBench(BenchBase):          print("lldb turnaround benchmark:", self.stopwatch)          self.run_gdb_turnaround(self.exe, self.function, self.count)          print("gdb turnaround benchmark:", self.stopwatch) -        print("lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg)) +        print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))      def run_lldb_turnaround(self, exe, function, count):          import pexpect +          def run_one_round():              prompt = self.child_prompt              # So that the child gets torn down after the test. -            self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe)) +            self.child = pexpect.spawn( +                '%s %s %s' % +                (lldbtest_config.lldbExec, self.lldbOption, exe))              child = self.child              # Turn on logging for what the child sends back. @@ -62,7 +68,8 @@ class CompileRunToBreakpointBench(BenchBase):          self.stopwatch.reset()          for i in range(count + 1): -            # Ignore the first invoke lldb and run to the breakpoint turnaround time. +            # Ignore the first invoke lldb and run to the breakpoint turnaround +            # time.              if i == 0:                  run_one_round()              else: @@ -80,6 +87,7 @@ class CompileRunToBreakpointBench(BenchBase):      def run_gdb_turnaround(self, exe, function, count):          import pexpect +          def run_one_round():              prompt = self.child_prompt @@ -102,8 +110,9 @@ class CompileRunToBreakpointBench(BenchBase):          # Reset the stopwatch now.          self.stopwatch.reset() -        for i in range(count+1): -            # Ignore the first invoke lldb and run to the breakpoint turnaround time. +        for i in range(count + 1): +            # Ignore the first invoke lldb and run to the breakpoint turnaround +            # time.              if i == 0:                  run_one_round()              else: | 
