From 3c24edf4307d743953cb929ad39b0037da6a06b3 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Tue, 14 Nov 2017 15:07:19 +0100 Subject: [PATCH 001/367] crash.command.dmesg: Fix to work again The "charp" type is 32-bit from some reason. A cast to this type might cause losing the higher 32 bits of the address. It helps to use the delayed lookup for the type. This patch also removes two debug messages that printed "msg" and "textval". I guess that someone used them to debug this problem. Signed-off-by: Petr Mladek --- crash/commands/dmesg.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index f483e04474c..3784257e38e 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -147,7 +147,6 @@ class LogCommand(CrashCommand): """ def __init__(self, name): - self.charp = gdb.lookup_type('char').pointer() parser = CrashCommandParser(prog=name) parser.add_argument('-t', action='store_true', default=False) @@ -157,7 +156,7 @@ def __init__(self, name): parser.format_usage = lambda: 'log [-tdm]\n' CrashCommand.__init__(self, name, parser) - __types__ = [ 'struct printk_log *' ] + __types__ = [ 'struct printk_log *' , 'char *' ] __symvals__ = [ 'log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', 'clear_seq', 'log_first_seq', 'log_next_seq' ] @@ -180,10 +179,8 @@ def log_from_idx(self, logbuf, idx, dict_needed=False): msg = (logbuf + idx).cast(self.printk_log_p_type) try: - print(msg) - textval = (msg.cast(self.charp) + + textval = (msg.cast(self.char_p_type) + self.printk_log_p_type.target().sizeof) - print(textval) text = textval.string(length=msg['text_len']) except UnicodeDecodeError as e: print(e) @@ -208,7 +205,7 @@ def log_from_idx(self, logbuf, idx, dict_needed=False): if dict_needed: dict_len = int(msg['dict_len']) - d = (msg.cast(self.charp) + + d = (msg.cast(self.char_p_type) + self.printk_log_p_type.target().sizeof + textlen) s = '' From 5ac9092fcf5a1f8c8a651497ce5fe1e982e3d87a Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 15 Nov 2017 17:25:22 -0500 Subject: [PATCH 002/367] crash.subsystem.storage: fix type checking The type checking needs to use an unqualified type or the const will cause the comparison to fail. Also fix exception using DelayedAttributeError. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/__init__.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index c1a37b921d7..7e28bc0bbf1 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -14,6 +14,7 @@ from crash.util import container_of from crash.infra import CrashBaseClass, export from crash.types.classdev import for_each_class_device +import crash.exceptions class Storage(CrashBaseClass): __types__ = [ 'struct gendisk', @@ -35,17 +36,17 @@ class Storage(CrashBaseClass): @classmethod def check_types(cls, result): try: - if cls.part_type.type != cls.device_type_type: + if cls.part_type.type.unqualified() != cls.device_type_type: raise TypeError("part_type expected to be {} not {}" .format(cls.device_type_type, cls.part_type.type)) - if cls.disk_type.type != cls.device_type_type: + if cls.disk_type.type.unqualified() != cls.device_type_type: raise TypeError("disk_type expected to be {} not {}" .format(cls.device_type_type, cls.disk_type.type)) cls.types_checked = True - except DelayedAttributeError: + except crash.exceptions.DelayedAttributeError: pass @export From a656f97fd0d45018a79e456b1f214cfaa36d84cf Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Tue, 24 Jul 2018 22:29:48 -0400 Subject: [PATCH 003/367] crash: sanity check gdb features before startup We don't want to make it partway through initialization before throwing a weird error. By checking gdb features on initial import, we can catch those early. Signed-off-by: Jeff Mahoney --- crash/__init__.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/crash/__init__.py b/crash/__init__.py index 9430d0c2456..1079c80e093 100644 --- a/crash/__init__.py +++ b/crash/__init__.py @@ -4,3 +4,43 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function + +# Perform some sanity checks to ensure that we can actually work +import gdb + +try: + x = gdb.Target +except AttributeError, e: + raise RuntimeError("the installed gdb doesn't provide gdb.Target") + +try: + x = gdb.lookup_symbol('x', None) +except TypeError, e: + raise RuntimeError("the installed gdb doesn't support looking up symbols without a gdb.Block") + +try: + x = gdb.MinSymbol +except AttributeError, e: + raise RuntimeError("the installed gdb doesn't provide gdb.MinSymbol") + +try: + x = gdb.Register +except AttributeError, e: + raise RuntimeError("the installed gdb doesn't provide gdb.Register") + +try: + x = gdb.Symbol.section +except AttributeError, e: + raise RuntimeError("the installed gdb doesn't provide gdb.Symbol.section") + +try: + x = gdb.Inferior.new_thread +except AttributeError, e: + raise RuntimeError("the installed gdb doesn't provide gdb.Inferior.new_thread") + +try: + x = gdb.Objfile.architecture +except AttributeError, e: + raise RuntimeError("the installed gdb doesn't provide gdb.Objfile.architecture") + +del x From 3f7280e569cc87097c67ac31f3cc46ce19f43741 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Tue, 24 Jul 2018 22:31:11 -0400 Subject: [PATCH 004/367] crash.sh: support for separate gdb installation At this stage of development, unpatched gdb doesn't support the features that crash-python requires. Rather than expect the user to replace their gdb with a patched on, we can install a separate gdb and use that. We'll check for crash-python-gdb first and then fall back to gdb. We also will now throw an error if no gdb is found. Signed-off-by: Jeff Mahoney --- crash.sh | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/crash.sh b/crash.sh index 0f273c2fe88..c21dea237ad 100755 --- a/crash.sh +++ b/crash.sh @@ -78,6 +78,19 @@ GDBINIT="$TMPDIR/gdbinit" set -e +GDB= +for gdb in crash-python-gdb gdb; do + if $gdb -v > /dev/null 2> /dev/null; then + GDB=$gdb + break + fi +done + +if [ -z "$GDB" ]; then + echo "ERROR: gdb is not available." >&2 + exit 1 +fi + # If we're using crash.sh from the git repo, use the modules from the git repo DIR="$(dirname $0)" if [ -e "$DIR/setup.py" ]; then @@ -121,9 +134,9 @@ if [ "$DEBUGMODE" = "gdb" ]; then RUN="run -nh -q -x $GDBINIT" echo $RUN > /tmp/gdbinit - gdb gdb -nh -q -x /tmp/gdbinit + gdb $GDB -nh -q -x /tmp/gdbinit elif [ "$DEBUGMODE" = "valgrind" ]; then - valgrind --keep-stacktraces=alloc-and-free gdb -nh -q -x $GDBINIT + valgrind --keep-stacktraces=alloc-and-free $GDB -nh -q -x $GDBINIT else - gdb -nh -q -x $GDBINIT + $GDB -nh -q -x $GDBINIT fi From 2cea420454591b5586a70fda93df67d81d641d4d Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Tue, 24 Jul 2018 22:33:36 -0400 Subject: [PATCH 005/367] crash.sh: improve error reporting during startup Now that the python code does feature detection, we'll should report that in a readable way rather than just throwing an exception. This catches the exception and prints it to stderr before exiting with a nonzero exit code. Signed-off-by: Jeff Mahoney --- crash.sh | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/crash.sh b/crash.sh index c21dea237ad..eb00abe25c2 100755 --- a/crash.sh +++ b/crash.sh @@ -119,14 +119,24 @@ set height 0 set print pretty on python -import crash.session +from __future__ import print_function +import sys +try: + import crash.session +except RuntimeError, e: + print("crash-python: {}, exiting".format(str(e)), file=sys.stderr) + sys.exit(1) path = "$SEARCHDIRS".split(' ') try: x = crash.session.Session("$KERNEL", "$VMCORE", "$ZKERNEL", path) + print("The 'pyhelp' command will list the command extensions.") except gdb.error as e: - print(str(e)) + print("crash-python: {}, exiting".format(str(e)), file=sys.stderr) + sys.exit(1) except RuntimeError as e: - print("Failed to open {}: {}".format("$VMCORE", str(e))) + print("crash-python: Failed to open {}. {}".format("$VMCORE", str(e)), + file=sys.stderr) + sys.exit(1) EOF # This is how we debug gdb problems when running crash From b72b246d4c39b37625065a7073d380f13a5eb3ef Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Mon, 25 Jun 2018 16:37:27 -0400 Subject: [PATCH 006/367] crash.types.task: fix typo while checking task_struct type The variable is 'task' not 'task_struct' Signed-off-by: Jeff Mahoney --- crash/types/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/types/task.py b/crash/types/task.py index f7e66984523..5576b0c7861 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -136,7 +136,7 @@ def init_task_types(cls, task): if not cls.valid: t = gdb.lookup_type('struct task_struct') if task.type != t: - raise BadTaskError(task_struct) + raise BadTaskError(task) # Using a type within the same context makes things a *lot* faster # This works around a shortcoming in gdb. A type lookup and From d3e7626ccc6d5e81122fb2c66478dbe7e3443f4b Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Fri, 13 Jul 2018 12:20:23 -0400 Subject: [PATCH 007/367] tests: add test for type callbacks The type lookup callbacks didn't have testcases. Signed-off-by: Jeff Mahoney --- tests/test_infra_lookup.py | 56 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/tests/test_infra_lookup.py b/tests/test_infra_lookup.py index f748879b077..5173c2c0892 100644 --- a/tests/test_infra_lookup.py +++ b/tests/test_infra_lookup.py @@ -527,3 +527,59 @@ def test_p_type_available_at_start(self): x = test() y = x.test_p_type self.assertTrue(isinstance(y, gdb.Type)) + + def type_callback_test(self): + class Test(CrashBaseClass): + __type_callbacks__ = [ + ('unsigned long', 'check_ulong') + ] + ulong_valid = False + @classmethod + def check_ulong(cls, gdbtype): + cls.ulong_valid = True + + return Test + + def test_type_callback_nofile(self): + test = self.type_callback_test() + x = test() + self.assertFalse(test.ulong_valid) + with self.assertRaises(AttributeError): + y = x.unsigned_long_type + + def test_type_callback(self): + test = self.type_callback_test() + x = test() + self.load_file() + self.assertTrue(test.ulong_valid) + with self.assertRaises(AttributeError): + y = x.unsigned_long_type + + def type_callback_test_multi(self): + class Test(CrashBaseClass): + __types__ = [ 'unsigned long' ] + __type_callbacks__ = [ + ('unsigned long', 'check_ulong') + ] + ulong_valid = False + @classmethod + def check_ulong(cls, gdbtype): + cls.ulong_valid = True + + return Test + + def test_type_callback_nofile_multi(self): + test = self.type_callback_test_multi() + x = test() + self.assertFalse(test.ulong_valid) + with self.assertRaises(DelayedAttributeError): + y = x.unsigned_long_type + + def test_type_callback_multi(self): + test = self.type_callback_test_multi() + x = test() + self.load_file() + self.assertTrue(test.ulong_valid) + y = x.unsigned_long_type + self.assertTrue(isinstance(y, gdb.Type)) + self.assertTrue(y.sizeof > 4) From 91998da71c22a10ed70edc35e188bda44a76f5b7 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 14 Nov 2017 11:57:44 -0500 Subject: [PATCH 008/367] crash.arch.x86_64: add support for inactive_task_frame With Linux 4.9, thread_return was eliminated and we need to use the inactive_task_frame structure instead. Signed-off-by: Jeff Mahoney --- crash/arch/x86_64.py | 45 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 11ac099328c..90814ec7393 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -21,11 +21,21 @@ def __init__(self): super(x86_64Architecture, self).__init__() # PC for blocked threads try: - thread_return = gdb.lookup_minimal_symbol("thread_return") - self.rip = thread_return.value().address - except Exception: - raise RuntimeError("{} requires symbol 'thread_return'" - .format(self.__class__.__name__)) + inactive = gdb.lookup_type('struct inactive_task_frame') + self.fetch_register_scheduled = \ + self.fetch_register_scheduled_inactive + self.inactive_task_frame_type = inactive +# self.__switch_to_asm = gdb.lookup_symbol + print("Using __switch_to_asm") + except gdb.error as e: + try: + thread_return = gdb.lookup_minimal_symbol("thread_return") + self.thread_return = thread_return.value().address + self.fetch_register_scheduled = \ + self.fetch_register_scheduled_thread_return + except Exception: + raise RuntimeError("{} requires symbol 'thread_return'" + .format(self.__class__.__name__)) self.ulong_type = gdb.lookup_type('unsigned long') thread_info_type = gdb.lookup_type('struct thread_info') self.thread_info_p_type = thread_info_type.pointer() @@ -40,20 +50,35 @@ def setup_thread_info(self, thread): def fetch_register_active(self, thread, register): task = thread.info + print(task.regs) for reg in task.regs: if reg == "rip" and (register != 16 and register != -1): continue - if reg in ["gs_base", "orig_ax", "rflags", "fs_base"]: - continue - thread.registers[reg].value = task.regs[reg] + try: + thread.registers[reg].value = task.regs[reg] + except KeyError, e: + pass + + def fetch_register_scheduled_inactive(self, thread, register): + ulong_type = self.ulong_type + task = thread.info.task_struct + + # Only write rip when requested; It resets the frame cache + if register == 16 or register == -1: + thread.registers['rip'].value = self.thread_return + if register == 16: + return True + + print("ok") + rsp = task['thread']['sp'].cast(ulong_type.pointer()) - def fetch_register_scheduled(self, thread, register): + def fetch_register_scheduled_thread_return(self, thread, register): ulong_type = self.ulong_type task = thread.info.task_struct # Only write rip when requested; It resets the frame cache if register == 16 or register == -1: - thread.registers['rip'].value = self.rip + thread.registers['rip'].value = self.thread_return if register == 16: return True From 9404633d5cfcd6ab374880fec1720a694b181863 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Tue, 24 Jul 2018 23:38:57 -0400 Subject: [PATCH 009/367] crash.commands.mount: fix mount command for newer kernels The mount command was still trying to access ->mnt_root directly as if it were a vfsmount. This adds a mount_root helper to handle the lookup without needing special handling at each call site. Signed-off-by: Jeff Mahoney --- crash/commands/mount.py | 3 ++- crash/subsystem/filesystem/mount.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/crash/commands/mount.py b/crash/commands/mount.py index e619ec36ae6..1309483c525 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -20,6 +20,7 @@ from crash.subsystem.filesystem.mount import d_path, for_each_mount from crash.subsystem.filesystem.mount import mount_device, mount_fstype from crash.subsystem.filesystem.mount import mount_super, mount_flags +from crash.subsystem.filesystem.mount import mount_root class MountCommand(CrashCommand): """display mounted file systems @@ -63,7 +64,7 @@ def show_one_mount(self, mnt, args, task=None): flags = "" if args.f: flags = " ({})".format(mount_flags(mnt)) - path = d_path(mnt, mnt['mnt_root']) + path = d_path(mnt, mount_root(mnt)) if args.v: print("{:016x} {:016x} {:<10} {:<16} {}" .format(long(mnt.address), long(mount_super(mnt)), diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index da8ececccd3..be13de2245b 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -125,6 +125,16 @@ def mount_super(mnt): sb = mnt['mnt_sb'] return sb + @export + @staticmethod + def mount_root(mnt): + try: + mnt = mnt['mnt'] + except gdb.error: + pass + + return mnt['mnt_root'] + @export @classmethod def mount_fstype(cls, mnt): @@ -154,6 +164,11 @@ def d_path(cls, mnt, dentry, root=None): if mount.type.code != gdb.TYPE_CODE_PTR: mount = mount.address + try: + mnt = mnt['mnt'].address + except gdb.error: + pass + name = "" # Gone are the days where finding the root was as simple as From 2d1e6c7b777e7ca4d2120dac67f642299fc42edb Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 25 Jul 2018 17:41:45 -0400 Subject: [PATCH 010/367] crash.arch.x86_64: fix inactive_task_frame Commit 91998da71c2 (crash.arch.x86_64: add support for inactive_task_frame) was premature and was tested against a kernel that didn't have the complete implementation. This patch completes it. Signed-off-by: Jeff Mahoney --- crash/arch/x86_64.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 90814ec7393..62c396a5839 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -25,8 +25,6 @@ def __init__(self): self.fetch_register_scheduled = \ self.fetch_register_scheduled_inactive self.inactive_task_frame_type = inactive -# self.__switch_to_asm = gdb.lookup_symbol - print("Using __switch_to_asm") except gdb.error as e: try: thread_return = gdb.lookup_minimal_symbol("thread_return") @@ -50,7 +48,6 @@ def setup_thread_info(self, thread): def fetch_register_active(self, thread, register): task = thread.info - print(task.regs) for reg in task.regs: if reg == "rip" and (register != 16 and register != -1): continue @@ -63,14 +60,28 @@ def fetch_register_scheduled_inactive(self, thread, register): ulong_type = self.ulong_type task = thread.info.task_struct + rsp = task['thread']['sp'].cast(ulong_type.pointer()) + + frame = rsp.cast(self.inactive_task_frame_type.pointer()).dereference() + # Only write rip when requested; It resets the frame cache if register == 16 or register == -1: - thread.registers['rip'].value = self.thread_return + thread.registers['rip'].value = frame['ret_addr'] if register == 16: return True - print("ok") - rsp = task['thread']['sp'].cast(ulong_type.pointer()) + thread.registers['rsp'].value = rsp + thread.registers['rbp'].value = frame['bp'] + thread.registers['rbx'].value = frame['bx'] + thread.registers['r12'].value = frame['r12'] + thread.registers['r13'].value = frame['r13'] + thread.registers['r14'].value = frame['r14'] + thread.registers['r15'].value = frame['r15'] + thread.registers['cs'].value = 2*8 + thread.registers['ss'].value = 3*8 + + thread.info.stack_pointer = rsp + thread.info.valid_stack = True def fetch_register_scheduled_thread_return(self, thread, register): ulong_type = self.ulong_type From cc374ae993ad776169da9b358f051ad1d0e6eaff Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 25 Jul 2018 17:41:50 -0400 Subject: [PATCH 011/367] crash.commands.ps: fix exception handling ps still used the old CrashCommandError exception when it should've been using CrashCommandLineError. Signed-off-by: Jeff Mahoney --- crash/commands/ps.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 6874fd13bf9..2e72aa1fac0 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -13,6 +13,7 @@ long = int from crash.commands import CrashCommand, CrashCommandParser +from crash.commands import CrashCommandLineError from crash.types.task import LinuxTask, TaskStateFlags as TF class PSCommand(CrashCommand): @@ -548,7 +549,7 @@ def execute(self, argv): try: self.setup_task_states() except AttributeError: - raise CrashCommandError("The task subsystem is not available.") + raise CrashCommandLineError("The task subsystem is not available.") sort_by = sort_by_pid if argv.l: From 87345f1ecaaa67630085378ba1eaa834aa035c59 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 25 Jul 2018 17:41:54 -0400 Subject: [PATCH 012/367] crash.types.task: add get_last_cpu() helper Linux v4.9 allowed architectures to move much of thread_info into task_struct. This change adds a task.get_last_cpu() helper to return the cpu in either case. Signed-off-by: Jeff Mahoney --- crash/commands/ps.py | 4 ++-- crash/types/task.py | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 2e72aa1fac0..ee842744265 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -461,7 +461,7 @@ def task_state_string(self, task): def task_header(cls, task): task_struct = task.task_struct template = "PID: {0:-5d} TASK: {1:x} CPU: {2:>2d} COMMAND: \"{3}\"" - cpu = int(task.get_thread_info()['cpu']) + cpu = task.get_last_cpu() if task.active: cpu = task.cpu return template.format(int(task_struct['pid']), @@ -515,7 +515,7 @@ def print_one(self, argv, thread): width = 7 print(line.format(active, int(task_struct['pid']), int(parent_pid), - int(task.get_thread_info()['cpu']), long(pointer), + int(task.get_last_cpu()), long(pointer), width, self.task_state_string(task), 0, task.total_vm * 4096 // 1024, task.rss * 4096 // 1024, diff --git a/crash/types/task.py b/crash/types/task.py index 5576b0c7861..9662363c9fe 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -163,6 +163,12 @@ def set_thread_info(self, thread_info): def get_thread_info(self): return self.thread_info + def get_last_cpu(self): + try: + return self.task_struct['cpu'] + except gdb.error as e: + return self.thread_info['cpu'] + def task_state(self): state = long(self.task_struct['state']) if self.task_state_has_exit_state: From 1b04cc4e8bb2c4703cfbf923e86b658d4ee4e677 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 25 Jul 2018 17:44:01 -0400 Subject: [PATCH 013/367] crash.session: move most of setup to crash.kernel The initialization is needed to work with the kernel and should be outside of the session. Signed-off-by: Jeff Mahoney --- crash/infra/__init__.py | 7 +++ crash/kdump/target.py | 72 ++------------------------- crash/kernel.py | 105 ++++++++++++++++++++++++++++++++++------ crash/session.py | 22 ++------- 4 files changed, 107 insertions(+), 99 deletions(-) diff --git a/crash/infra/__init__.py b/crash/infra/__init__.py index c71a73867c2..5014da41805 100644 --- a/crash/infra/__init__.py +++ b/crash/infra/__init__.py @@ -38,6 +38,13 @@ def __call__(self, *args, **kwargs): else: return self.func(obj, *args, **kwargs) +def register_singleton(mod, obj): + if not hasattr(mod, '_export_wrapper_singleton_dict'): + raise RuntimeError("Class {} has no exported members." + .format(obj.__class__.__name__)) + + mod._export_wrapper_singleton_dict[obj.__class__] = obj + def export(func): """This marks the function for export to the module namespace. The class must inherit from CrashBaseClass.""" diff --git a/crash/kdump/target.py b/crash/kdump/target.py index 808242a501e..5befa2dc7f1 100644 --- a/crash/kdump/target.py +++ b/crash/kdump/target.py @@ -10,18 +10,12 @@ from kdumpfile import kdumpfile, KDUMP_KVADDR from kdumpfile.exceptions import * import addrxlat -from crash.types.list import list_for_each_entry -from crash.types.percpu import get_percpu_var -from crash.types.task import LinuxTask -import crash.cache.tasks import crash.arch import crash.arch.x86_64 if sys.version_info.major >= 3: long = int -LINUX_KERNEL_PID = 1 - class SymbolCallback(object): "addrxlat symbolic callback" @@ -44,20 +38,16 @@ def __call__(self, symtype, *args): raise addrxlat.NoDataError() class Target(gdb.Target): - def __init__(self, filename, debug=False): - self.filename = filename + def __init__(self, vmcore, debug=False): + if not isinstance(vmcore, kdumpfile): + raise TypeError("vmcore must be of type kdumpfile") self.arch = None self.debug = debug - try: - self.kdump = kdumpfile(filename) - except OSErrorException as e: - raise RuntimeError(str(e)) + self.kdump = vmcore ctx = self.kdump.get_addrxlat_ctx() ctx.cb_sym = SymbolCallback(ctx) self.kdump.attr['addrxlat.ostype'] = 'linux' - gdb.execute('set print thread-events 0') - self.setup_arch() # So far we've read from the kernel image, now that we've setup @@ -65,9 +55,6 @@ def __init__(self, filename, debug=False): # infrastructure. super(Target, self).__init__() - # Now we're reading from the dump file - self.setup_tasks() - def setup_arch(self): archname = self.kdump.attr.arch.name archclass = crash.arch.get_architecture(archname) @@ -86,57 +73,6 @@ def setup_arch(self): self.arch = archclass() - def setup_tasks(self): - init_task = gdb.lookup_global_symbol('init_task') - task_list = init_task.value()['tasks'] - runqueues = gdb.lookup_global_symbol('runqueues') - - rqs = get_percpu_var(runqueues) - rqscurrs = {long(x["curr"]) : k for (k, x) in rqs.items()} - - self.pid_to_task_struct = {} - - print("Loading tasks...", end='') - sys.stdout.flush() - - task_count = 0 - tasks = [] - for taskg in list_for_each_entry(task_list, init_task.type, 'tasks'): - tasks.append(taskg) - for task in list_for_each_entry(taskg['thread_group'], init_task.type, 'thread_group'): - tasks.append(task) - - for task in tasks: - cpu = None - regs = None - active = long(task.address) in rqscurrs - if active: - cpu = rqscurrs[long(task.address)] - regs = self.kdump.attr.cpu[cpu].reg - - ltask = LinuxTask(task, active, cpu, regs) - ptid = (LINUX_KERNEL_PID, task['pid'], 0) - try: - thread = gdb.selected_inferior().new_thread(ptid, ltask) - except gdb.error as e: - print("Failed to setup task @{:#x}".format(long(task.address))) - continue - thread.name = task['comm'].string() - - self.arch.setup_thread_info(thread) - ltask.attach_thread(thread) - ltask.set_get_stack_pointer(self.arch.get_stack_pointer) - - crash.cache.tasks.cache_task(ltask) - - task_count += 1 - if task_count % 100 == 0: - print(".", end='') - sys.stdout.flush() - print(" done. ({} tasks total)".format(task_count)) - - gdb.selected_inferior().executing = False - @classmethod def report_error(cls, addr, length, error): print("Error while reading {:d} bytes from {:#x}: {}" diff --git a/crash/kernel.py b/crash/kernel.py index 4ab86a6b20a..24d5eef8bda 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -10,26 +10,52 @@ import os.path from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each_entry +from crash.types.percpu import get_percpu_var +from crash.types.list import list_for_each_entry +import crash.cache.tasks +from crash.types.task import LinuxTask +import crash.kdump +import crash.kdump.target +from kdumpfile import kdumpfile if sys.version_info.major >= 3: long = int +LINUX_KERNEL_PID = 1 + + class CrashKernel(CrashBaseClass): __types__ = [ 'struct module' ] __symvals__ = [ 'modules' ] - def __init__(self): + def __init__(self, vmlinux_filename, searchpath=None): self.findmap = {} + self.vmlinux_filename = vmlinux_filename + self.searchpath = searchpath + + error = gdb.execute("file {}".format(vmlinux_filename), to_string=True) + + try: + list_type = gdb.lookup_type('struct list_head') + except gdb.error as e: + self.load_debuginfo(gdb.objfiles()[0], None) + try: + list_type = gdb.lookup_type('struct list_head') + except gdb.error as e: + raise RuntimeError("Couldn't locate debuginfo for {}" + .format(vmlinux_filename)) + + def attach_vmcore(self, vmcore_filename, debug=False): + self.vmcore_filename = vmcore_filename + self.vmcore = kdumpfile(vmcore_filename) + self.target = crash.kdump.target.Target(self.vmcore, debug) - @export def for_each_module(self): for module in list_for_each_entry(self.modules, self.module_type, 'list'): yield module - @export - @staticmethod - def get_module_sections(module): + def get_module_sections(self, module): attrs = module['sect_attrs'] out = [] for sec in range(0, attrs['nsections']): @@ -41,17 +67,16 @@ def get_module_sections(module): return " ".join(out) - @export - def load_modules(self, searchpath, verbose=False): + def load_modules(self, verbose=False): print("Loading modules...", end='') sys.stdout.flush() failed = 0 loaded = 0 - for module in for_each_module(): + for module in self.for_each_module(): modname = "{}".format(module['name'].string()) modfname = "{}.ko".format(modname) found = False - for path in searchpath: + for path in self.searchpath: modpath = self.find_module_file(modfname, path) if not modpath: continue @@ -61,14 +86,14 @@ def load_modules(self, searchpath, verbose=False): if verbose: print("Loading {} at {}" .format(modname, module['module_core'])) - sections = get_module_sections(module) + sections = self.get_module_sections(module) gdb.execute("add-symbol-file {} {} {}" .format(modpath, module['module_core'], sections), to_string=True) sal = gdb.find_pc_line(long(module['module_core'])) if sal.symtab is None: objfile = gdb.lookup_objfile(modpath) - load_debuginfo(searchpath, objfile, modpath) + self.load_debuginfo(objfile, modpath) # We really should check the version, but GDB doesn't export # a way to lookup sections. @@ -108,8 +133,7 @@ def find_module_file(self, name, path): except KeyError: return None - @export - def load_debuginfo(self, searchpath, objfile, name=None, verbose=False): + def load_debuginfo(self, objfile, name=None, verbose=False): if name is None: name = objfile.filename if ".gz" in name: @@ -121,7 +145,7 @@ def load_debuginfo(self, searchpath, objfile, name=None, verbose=False): if os.path.exists(filename): filepath = filename else: - for path in searchpath: + for path in self.searchpath: filepath = self.find_module_file(filename, path) if filepath: break @@ -130,3 +154,56 @@ def load_debuginfo(self, searchpath, objfile, name=None, verbose=False): objfile.add_separate_debug_file(filepath) else: print("Could not locate debuginfo for {}".format(name)) + + def setup_tasks(self): + gdb.execute('set print thread-events 0') + + init_task = gdb.lookup_global_symbol('init_task') + task_list = init_task.value()['tasks'] + runqueues = gdb.lookup_global_symbol('runqueues') + + rqs = get_percpu_var(runqueues) + rqscurrs = {long(x["curr"]) : k for (k, x) in rqs.items()} + + self.pid_to_task_struct = {} + + print("Loading tasks...", end='') + sys.stdout.flush() + + task_count = 0 + tasks = [] + for taskg in list_for_each_entry(task_list, init_task.type, 'tasks'): + tasks.append(taskg) + for task in list_for_each_entry(taskg['thread_group'], init_task.type, 'thread_group'): + tasks.append(task) + + for task in tasks: + cpu = None + regs = None + active = long(task.address) in rqscurrs + if active: + cpu = rqscurrs[long(task.address)] + regs = self.vmcore.attr.cpu[cpu].reg + + ltask = LinuxTask(task, active, cpu, regs) + ptid = (LINUX_KERNEL_PID, task['pid'], 0) + try: + thread = gdb.selected_inferior().new_thread(ptid, ltask) + except gdb.error as e: + print("Failed to setup task @{:#x}".format(long(task.address))) + continue + thread.name = task['comm'].string() + + self.target.arch.setup_thread_info(thread) + ltask.attach_thread(thread) + ltask.set_get_stack_pointer(self.target.arch.get_stack_pointer) + + crash.cache.tasks.cache_task(ltask) + + task_count += 1 + if task_count % 100 == 0: + print(".", end='') + sys.stdout.flush() + print(" done. ({} tasks total)".format(task_count)) + + gdb.selected_inferior().executing = False diff --git a/crash/session.py b/crash/session.py index 9ee4cd19125..2cf142e157c 100644 --- a/crash/session.py +++ b/crash/session.py @@ -9,8 +9,7 @@ import sys from crash.infra import autoload_submodules -from crash.kernel import load_debuginfo, load_modules -import crash.kdump.target +import crash.kernel class Session(object): """crash.Session is the main driver component for crash-python""" @@ -24,21 +23,10 @@ def __init__(self, kernel_exec=None, vmcore=None, kernelpath=None, autoload_submodules('crash.subsystem') autoload_submodules('crash.commands') - self.searchpath = searchpath - if not kernel_exec: return - error = gdb.execute("file {}".format(kernel_exec), to_string=True) - - try: - list_type = gdb.lookup_type('struct list_head') - except gdb.error as e: - load_debuginfo(searchpath, gdb.objfiles()[0], kernelpath) - try: - list_type = gdb.lookup_type('struct list_head') - except gdb.error as e: - raise RuntimeError("Couldn't locate debuginfo for {}".format(kernel_exec)) - - self.target = crash.kdump.target.Target(vmcore, debug) - load_modules(self.searchpath) + self.kernel = crash.kernel.CrashKernel(kernel_exec, searchpath) + self.kernel.attach_vmcore(vmcore, debug) + self.kernel.setup_tasks() + self.kernel.load_modules(searchpath) From 59ed5a51bbb5878d9c9242cf37b18d129fb548c5 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 25 Jul 2018 17:42:00 -0400 Subject: [PATCH 014/367] crash: add support for kASLR This patch enables kASLR support for crash-python. libkdumpfile does the heavy lifting for address translation but the devil is in the details. There are a few ordering issues that needed to be sorted out: - The base address comes from the vmcore - If we load the kernel without the base address, all of the objfile callbacks for delayed lookups will be called with the unrelocated addresses - add-symbol-file doesn't set the architecture, so we need to do that ourselves. Petr Tesarik's work on gdb to support an objfile-wide offset makes this work quite a bit easier. One hiccup is that, internally, gdb takes the section offset into account when generating addresses for minimal symbols. This means that providing an address for .text that already has the offset applied doesn't work because the minimal symbols will not be properly relocated. If it's omitted, the -o functionality does the right thing. Signed-off-by: Jeff Mahoney --- crash/kdump/target.py | 3 -- crash/kernel.py | 76 +++++++++++++++++++++++++++++++++++++++++-- crash/session.py | 18 ++++++---- setup.py | 2 +- 4 files changed, 86 insertions(+), 13 deletions(-) diff --git a/crash/kdump/target.py b/crash/kdump/target.py index 5befa2dc7f1..70f1c228e8b 100644 --- a/crash/kdump/target.py +++ b/crash/kdump/target.py @@ -34,7 +34,6 @@ def __call__(self, symtype, *args): ms = gdb.lookup_minimal_symbol(args[0]) if ms is not None: return long(ms.value().address) - raise addrxlat.NoDataError() class Target(gdb.Target): @@ -48,8 +47,6 @@ def __init__(self, vmcore, debug=False): ctx.cb_sym = SymbolCallback(ctx) self.kdump.attr['addrxlat.ostype'] = 'linux' - self.setup_arch() - # So far we've read from the kernel image, now that we've setup # the architecture, we're ready to plumb into the target # infrastructure. diff --git a/crash/kernel.py b/crash/kernel.py index 24d5eef8bda..098bde6af5e 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -17,13 +17,13 @@ import crash.kdump import crash.kdump.target from kdumpfile import kdumpfile +from elftools.elf.elffile import ELFFile if sys.version_info.major >= 3: long = int LINUX_KERNEL_PID = 1 - class CrashKernel(CrashBaseClass): __types__ = [ 'struct module' ] __symvals__ = [ 'modules' ] @@ -33,7 +33,32 @@ def __init__(self, vmlinux_filename, searchpath=None): self.vmlinux_filename = vmlinux_filename self.searchpath = searchpath - error = gdb.execute("file {}".format(vmlinux_filename), to_string=True) + f = open(self.vmlinux_filename, 'rb') + self.elffile = ELFFile(f) + + self.set_gdb_arch() + + def set_gdb_arch(self): + mach = self.elffile['e_machine'] + e_class = self.elffile['e_ident']['EI_CLASS'] + + elf_to_gdb = { + ('EM_X86_64', 'ELFCLASS64') : 'i386:x86-64', + ('EM_386', 'ELFCLASS32') : 'i386', + ('EM_S390', 'ELFCLASS64') : 's390:64-bit' + } + + try: + gdbarch = elf_to_gdb[(mach, e_class)] + except KeyError as e: + raise RuntimeError("no mapping for {}:{} to gdb architecture found.".format(mach, e_class)) + gdb.execute("set arch {}".format(gdbarch), to_string=True) + + def open_kernel(self): + if self.base_offset is None: + raise RuntimeError("Base offset is unconfigured.") + + self.load_sections() try: list_type = gdb.lookup_type('struct list_head') @@ -43,13 +68,58 @@ def __init__(self, vmlinux_filename, searchpath=None): list_type = gdb.lookup_type('struct list_head') except gdb.error as e: raise RuntimeError("Couldn't locate debuginfo for {}" - .format(vmlinux_filename)) + .format(self.vmlinux_filename)) + + self.target.setup_arch() + + def get_sections(self): + sections = {} + + text = self.elffile.get_section_by_name('.text') + + for section in self.elffile.iter_sections(): + if (section['sh_addr'] < text['sh_addr'] and + section.name != '.data..percpu'): + continue + sections[section.name] = section['sh_addr'] + + return sections + + def load_sections(self): + sections = self.get_sections() + + line = "" + + # .data..percpu shouldn't have relocation applied but it does. + # Perhaps it's due to the address being 0 and it being handled + # as unspecified in the parameter list. +# for section, addr in sections.items(): +# if addr == 0: +# line += " -s {} {:#x}".format(section, addr) + + # The gdb internals are subtle WRT how symbols are mapped. + # Minimal symbols are mapped using the offset for the section + # that contains them. That means that using providing an address + # for .text here gives a base address with no offset and minimal + # symbols in .text (like __switch_to_asm) will not have the correct + # addresses after relocation. + cmd = "add-symbol-file {} -o {:#x} {} ".format(self.vmlinux_filename, + self.base_offset, line) + gdb.execute(cmd, to_string=True) def attach_vmcore(self, vmcore_filename, debug=False): self.vmcore_filename = vmcore_filename self.vmcore = kdumpfile(vmcore_filename) self.target = crash.kdump.target.Target(self.vmcore, debug) + self.base_offset = 0 + try: + KERNELOFFSET = "linux.vmcoreinfo.lines.KERNELOFFSET" + attr = self.vmcore.attr.get(KERNELOFFSET, "0") + self.base_offset = long(attr, base=16) + except Exception as e: + print(e) + def for_each_module(self): for module in list_for_each_entry(self.modules, self.module_type, 'list'): diff --git a/crash/session.py b/crash/session.py index 2cf142e157c..cf4ef2cddc0 100644 --- a/crash/session.py +++ b/crash/session.py @@ -10,23 +10,29 @@ from crash.infra import autoload_submodules import crash.kernel +from kdumpfile import kdumpfile class Session(object): """crash.Session is the main driver component for crash-python""" def __init__(self, kernel_exec=None, vmcore=None, kernelpath=None, searchpath=None, debug=False): + self.vmcore_filename = vmcore + print("crash-python initializing...") if searchpath is None: searchpath = [] + if kernel_exec: + self.kernel = crash.kernel.CrashKernel(kernel_exec, searchpath) + self.kernel.attach_vmcore(vmcore, debug) + self.kernel.open_kernel() + autoload_submodules('crash.cache') autoload_submodules('crash.subsystem') autoload_submodules('crash.commands') - if not kernel_exec: - return + if kernel_exec: + self.kernel.setup_tasks() + self.kernel.load_modules(searchpath) + - self.kernel = crash.kernel.CrashKernel(kernel_exec, searchpath) - self.kernel.attach_vmcore(vmcore, debug) - self.kernel.setup_tasks() - self.kernel.load_modules(searchpath) diff --git a/setup.py b/setup.py index f2dbb705542..e06b693f522 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ '' : [ "*.dist" "*.txt" ], }, - install_requires = [ 'future' ], + install_requires = [ 'future', 'pyelftools' ], author = "Jeff Mahoney", author_email = "jeffm@suse.com", From 349dc9ce23396a21edb397a752911314a8451091 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Sep 2018 19:20:33 +0200 Subject: [PATCH 015/367] crash.commands.dmesg: cast message length to long Signed-off-by: Jeff Mahoney --- crash/commands/dmesg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 3784257e38e..1680d9eb5ce 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -181,7 +181,7 @@ def log_from_idx(self, logbuf, idx, dict_needed=False): try: textval = (msg.cast(self.char_p_type) + self.printk_log_p_type.target().sizeof) - text = textval.string(length=msg['text_len']) + text = textval.string(length=long(msg['text_len'])) except UnicodeDecodeError as e: print(e) From 803a8326e9f6cb4039271736649327d7a0915285 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Sep 2018 19:22:07 +0200 Subject: [PATCH 016/367] crash.util: add get_typed_pointer This commit adds a helper to get a typed pointer from a value or address. Signed-off-by: Jeff Mahoney --- crash/util.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/crash/util.py b/crash/util.py index e2478598e8c..c015911dd93 100644 --- a/crash/util.py +++ b/crash/util.py @@ -214,3 +214,25 @@ def safe_lookup_type(name, block=None): @staticmethod def array_size(value): return value.type.sizeof // value[0].type.sizeof + + @export + @staticmethod + def get_typed_pointer(val, gdbtype): + if gdbtype.code != gdb.TYPE_CODE_PTR: + gdbtype = gdbtype.pointer() + if isinstance(val, gdb.Value): + if (val.type != gdbtype and + val.type != gdbtype.target()): + raise TypeError("gdb.Value must refer to {} not {}" + .format(gdbtype, val.type)) + elif isinstance(val, str): + try: + val = long(val, 16) + except TypeError as e: + print(e) + raise TypeError("string must describe hex address: ".format(e)) + if isinstance(val, long): + val = gdb.Value(val).cast(gdbtype).dereference() + + return val + From fd8306018054ee9c9ef8a38f729e95fcfefed70b Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Sep 2018 19:23:52 +0200 Subject: [PATCH 017/367] crash.kernel: handle module layout change Kernel v4.5 introduced a structure to encapsulate module layout. This patch accounts for that. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 098bde6af5e..9214d17430f 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -153,14 +153,18 @@ def load_modules(self, verbose=False): found = True + if 'module_core' in module.type: + addr = long(module['module_core']) + else: + addr = long(module['core_layout']['base']) + if verbose: - print("Loading {} at {}" - .format(modname, module['module_core'])) + print("Loading {} at {:#x}".format(modname, addr)) sections = self.get_module_sections(module) - gdb.execute("add-symbol-file {} {} {}" - .format(modpath, module['module_core'], sections), + gdb.execute("add-symbol-file {} {:#x} {}" + .format(modpath, addr, sections), to_string=True) - sal = gdb.find_pc_line(long(module['module_core'])) + sal = gdb.find_pc_line(addr) if sal.symtab is None: objfile = gdb.lookup_objfile(modpath) self.load_debuginfo(objfile, modpath) From 7a861ca3ea1bf6f28b2ed687b16617208b1e5c2e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Sep 2018 19:24:50 +0200 Subject: [PATCH 018/367] crash.subsystem.storage: use unqualified types when passed by value When a type is passed via a value or symbol, it can be const or volatile. While the latter is rare in the kernel, the former is commonplace and is one of the things that many subsystems add to operations structures and the like over time. This patch ignores the qualifications when doing a type comparison. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/__init__.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 7e28bc0bbf1..9e126bfa3c3 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -99,12 +99,12 @@ def part_to_dev(self, part): @export def for_each_block_device(self, subtype=None): if subtype: - if subtype.type == self.device_type_type: + if subtype.type.unqualified() == self.device_type_type: subtype = subtype.address - elif subtype.type != self.device_type_type.pointer(): + elif subtype.type.unqualified() != self.device_type_type.pointer(): raise TypeError("subtype must be {} not {}" .format(self.device_type_type.pointer(), - subtype.type)) + subtype.type.unqualified())) for dev in for_each_class_device(self.block_class, subtype): if dev['type'] == self.disk_type.address: yield self.dev_to_gendisk(dev) @@ -123,16 +123,16 @@ def gendisk_name(self, gendisk): if gendisk.type.code == gdb.TYPE_CODE_PTR: gendisk = gendisk.dereference() - if gendisk.type == self.gendisk_type: + if gendisk.type.unqualified() == self.gendisk_type: return gendisk['disk_name'].string() - elif gendisk.type == self.hd_struct_type: + elif gendisk.type.unqualified() == self.hd_struct_type: parent = self.dev_to_gendisk(self.part_to_dev(gendisk)['parent']) return "{}{:d}".format(self.gendisk_name(parent), int(gendisk['partno'])) else: raise TypeError("expected {} or {}, not {}" .format(self.gendisk_type, self.hd_struct_type, - gendisk.type)) + gendisk.type.unqualified())) @export def block_device_name(self, bdev): From af5ec50ec9aeabcb936337b12b70e31ca6deb711 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Sep 2018 19:26:29 +0200 Subject: [PATCH 019/367] crash:subsystem:storage: print endio callback when a bio can't be decoded When a bio can't be decoded, we print a generic message. Including the name of the endio may help in identifying its source easily. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 9e126bfa3c3..5edcc9b180b 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -75,8 +75,9 @@ def decode_bio(cls, bio): return cls.bio_decoders[long(bio['bi_end_io'])](bio) except KeyError: chain = { - 'description' : "{:x} bio: undecoded bio on {}".format( - long(bio), block_device_name(bio['bi_bdev'])), + 'description' : "{:x} bio: undecoded bio on {} ({})".format( + long(bio), block_device_name(bio['bi_bdev']), + bio['bi_end_io']), } return chain From db367a8aabeff663db1f177e43bb5b02b20b9adf Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 17 Sep 2018 07:38:33 -0400 Subject: [PATCH 020/367] README.md: Update with correct gdb branch and startup instructions The current gdb branch is gdb-8.1-suse-target. Also add startup instructions. Signed-off-by: Jeff Mahoney --- README.md | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5170686d50f..122cfdd9139 100644 --- a/README.md +++ b/README.md @@ -2,5 +2,18 @@ This repository contains the python extensions for interacting with Linux kernel crash dumps. You'll need: -* libkdumpfile: https://github.com/ptesarik/libkdumpfile -* gdb-python: https://github.com/jeffmahoney/gdb-python/tree/python-working-target +* [libkdumpfile](https://github.com/ptesarik/libkdumpfile) +* [gdb-python](https://github.com/jeffmahoney/gdb-python/tree/gdb-8.1-suse-target) + +Packages for SUSE-created releases are available on the [Open Build Service](https://download.opensuse.org/repositories/home:/jeff_mahoney:/crash-python/). + +Crash-python requires the following to run properly: +- The complete debuginfo for the kernel to be debug, including modules +- The ELF images for the kernel and all modules +- The vmcore dump image from the crashed system + +To start: +`pycrash -d ` + +The `-d` option may be specified multiple times if multiple directories are +required. From ecf3678649f0e94ed3ab75922a87bb1bdaddb8ce Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Sun, 16 Sep 2018 08:33:18 -0400 Subject: [PATCH 021/367] crash.subsystem.storage.device-mapper: fix bio assignment for rq based dm We were assigning bio to next bio and then returning the next bio as both 'bio' and 'next'. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/device_mapper.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index ae06161811a..5ff67cc31a5 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -40,8 +40,6 @@ def decode_clone_bio_rq(cls, bio): # while long(b) != 0: # b = b['bi_next'] - bio = info['orig'] - chain = { 'bio' : bio, 'next' : info['orig'], From 5c8a7dd703bcd460c292fc5d58a62708851a520e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Sat, 15 Sep 2018 16:23:35 -0400 Subject: [PATCH 022/367] crash.subsystem.storage: add documentation Add documentation for public methods. Rename type registration callbacks to make it obvious they are intended to be private. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/__init__.py | 234 ++++++++++++++++++++++- crash/subsystem/storage/blocksq.py | 28 +++ crash/subsystem/storage/device_mapper.py | 49 ++++- 3 files changed, 303 insertions(+), 8 deletions(-) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 5edcc9b180b..4fb00f89ba5 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -27,14 +27,14 @@ class Storage(CrashBaseClass): 'disk_type', 'part_type' ] __symbol_callbacks = [ - ( 'disk_type', 'check_types' ), - ( 'part_type', 'check_types' ) ] - __type_callbacks__ = [ ('struct device_type', 'check_types' ) ] + ( 'disk_type', '_check_types' ), + ( 'part_type', '_check_types' ) ] + __type_callbacks__ = [ ('struct device_type', '_check_types' ) ] bio_decoders = {} @classmethod - def check_types(cls, result): + def _check_types(cls, result): try: if cls.part_type.type.unqualified() != cls.device_type_type: raise TypeError("part_type expected to be {} not {}" @@ -52,6 +52,35 @@ def check_types(cls, result): @export @classmethod def register_bio_decoder(cls, sym, decoder): + """ + Registers a bio decoder with the storage subsystem. + + A bio decoder is a method that accepts a bio, potentially + interprets the private members of the bio, and returns + a dictionary. The only mandatory member of the dictionary + is 'description' which contains a human-readable description + of the purpose of this bio. + + If the bio is part of a stack, the 'next' item should contain + the next object in the stack. It does not necessarily need + to be a bio. It does need to have a 'decoder' item declared + that will accept the given object. The decoder does not + need to be registered unless it will be a top-level decoder. + + Other items can be added as-needed to allow informed callers + to obtain direct information. + + Args: + sym (gdb.Symbol or gdb.Value): + The Symbol or Value describing a kernel function used as + a bio->b_end_io callback + decoder (method): A Python method that accepts a + gdb.Value(struct bio) + + Raises: + TypeError: sym is not a gdb.Symbol or gdb.Value + """ + if isinstance(sym, gdb.Symbol): sym = sym.value().address elif not isinstance(sym, gdb.Value): @@ -61,6 +90,26 @@ def register_bio_decoder(cls, sym, decoder): @export @classmethod def for_each_bio_in_stack(cls, bio): + """ + Iterates and decodes each bio involved in a stacked storage environment + + This method will return a dictionary describing each object + in the storage stack, starting with the provided bio, as + processed by each level's decoder. The stack will be interrupted + if an encountered object doesn't have a decoder specified. + + See register_bio_decoder for more detail. + + Args: + bio (gdb.Value): The initial struct bio to start + decoding + + Yields: + dict : Contains, minimally, the following item. + - description (str): A human-readable description of the bio. + Additional items may be available based on the + implmentation-specific decoder. + """ first = cls.bio_decoders[long(bio['bi_end_io'])](bio) if first: yield first @@ -71,6 +120,25 @@ def for_each_bio_in_stack(cls, bio): @export @classmethod def decode_bio(cls, bio): + """ + Decodes a single bio, if possible + + This method will return a dictionary describing a single bio + after decoding it using a registered decoder, if available. + + If no decoder is registered, a generic description will be + returned in the dictionary's 'description' field. + + Args: + bio (gdb.Value): The bio to decode + + Returns: + dict: Contains, minimally, the following item. + - description (str): A human-readable description of the bio. + Additional items may be available based on the + implmentation-specific decoder. + """ + try: return cls.bio_decoders[long(bio['bi_end_io'])](bio) except KeyError: @@ -83,22 +151,97 @@ def decode_bio(cls, bio): @export def dev_to_gendisk(self, dev): + """ + Converts a struct device that is embedded in a struct gendisk + back to the struct gendisk. + + Args: + dev (gdb.Value) : A struct device contained within + a struct gendisk. No checking is performed. Results + if other structures are provided are undefined. + + Returns: + gdb.Value : The converted struct hd_struct + """ return container_of(dev, self.gendisk_type, 'part0.__dev') @export def dev_to_part(self, dev): + """ + Converts a struct device that is embedded in a struct hd_struct + back to the struct hd_struct. + + Args: + dev (gdb.Value): A struct device embedded within a + struct hd_struct. No checking is performed. Results if other + structures are provided are undefined. + + Returns: + gdb.Value(struct hd_struct): The converted struct hd_struct + + """ return container_of(dev, self.hd_struct_type, '__dev') @export def gendisk_to_dev(self, gendisk): + """ + Converts a struct gendisk that embeds a struct device to + the struct device. + + Args: + dev (gdb.Value): A struct gendisk that embeds + a struct device. No checking is performed. Results + if other structures are provided are undefined. + + Returns: + gdb.Value: The converted struct device + """ + return gendisk['part0']['__dev'].address @export def part_to_dev(self, part): + """ + Converts a struct hd_struct that embeds a struct device to + the struct device. + + Args: + dev (gdb.Value): A struct hd_struct that embeds + a struct device. No checking is performed. Results if + other structures are provided are undefined. + + Returns: + gdb.Value: The converted struct device + """ return part['__dev'].address @export def for_each_block_device(self, subtype=None): + """ + Iterates over each block device registered with the block class. + + This method iterates over the block_class klist and yields every + member found. The members are either struct gendisk or + struct hd_struct, depending on whether it describes an entire + disk or a partition, respectively. + + The members can be filtered by providing a subtype, which + corresponds to a the the type field of the struct device. + + Args: + subtype (gdb.Value, optional): The struct + device_type that will be used to match and filter. Typically + 'disk_type' or 'device_type' + + Yields: + gdb.Value - A struct gendisk + or struct hd_struct that meets the filter criteria. + + Raises: + RuntimeError: An unknown device type was encountered during + iteration. + """ + if subtype: if subtype.type.unqualified() == self.device_type_type: subtype = subtype.address @@ -117,10 +260,35 @@ def for_each_block_device(self, subtype=None): @export def for_each_disk(self): + """ + Iterates over each block device registered with the block class + that corresponds to an entire disk. + + This is an alias for for_each_block_device(disk_type) + """ + return self.for_each_block_device(self.disk_type) @export def gendisk_name(self, gendisk): + """ + Returns the name of the provided block device. + + This method evaluates the block device and returns the name, + including partition number, if applicable. + + Args: + gendisk(gdb.Value): + A struct gendisk or struct hd_struct for which to return + the name + + Returns: + str: the name of the block device + + Raises: + TypeError: gdb.Value does not describe a struct gendisk or + struct hd_struct + """ if gendisk.type.code == gdb.TYPE_CODE_PTR: gendisk = gendisk.dereference() @@ -137,20 +305,78 @@ def gendisk_name(self, gendisk): @export def block_device_name(self, bdev): + """ + Returns the name of the provided block device. + + This method evaluates the block device and returns the name, + including partition number, if applicable. + + Args: + bdev(gdb.Value): A struct block_device for + which to return the name + + Returns: + str: the name of the block device + """ return self.gendisk_name(bdev['bd_disk']) @export def is_bdev_inode(self, inode): + """ + Tests whether the provided struct inode describes a block device + + This method evaluates the inode and returns a True or False, + depending on whether the inode describes a block device. + + Args: + bdev(gdb.Value): The struct inode to test whether + it describes a block device. + + Returns: + bool: True if the inode describes a block device, False otherwise. + """ return inode['i_sb'] == self.blockdev_superblock @export def inode_to_block_device(self, inode): + """ + Returns the block device associated with this inode. + + If the inode describes a block device, return that block device. + Otherwise, raise TypeError. + + Args: + inode(gdb.Value): The struct inode for which to + return the associated block device + + Returns: + gdb.Value: The struct block_device associated + with the provided struct inode + + Raises: + TypeError: inode does not describe a block device + """ if inode['i_sb'] != self.blockdev_superblock: raise TypeError("inode does not correspond to block device") return container_of(inode, self.bdev_inode_type, 'vfs_inode')['bdev'] @export def inode_on_bdev(self, inode): + """ + Returns the block device associated with this inode. + + If the inode describes a block device, return that block device. + Otherwise, return the block device, if any, associated + with the inode's super block. + + Args: + inode(gdb.Value): The struct inode for which to + return the associated block device + + Returns: + gdb.Value: The struct block_device associated + with the provided struct inode + """ if self.is_bdev_inode(inode): return self.inode_to_block_device(inode) else: diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index efe87a39b31..27785117280 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -23,6 +23,20 @@ class SingleQueueBlock(CrashBaseClass): @export def for_each_request_in_queue(self, queue): + """ + Iterates over each struct request in request_queue + + This method iterates over the request_queue's queuelist and + returns a request for each member. + + Args: + queue(gdb.Value): The struct request_queue + used to iterate + + Yields: + gdb.Value: Each struct request contained within + the request_queue's queuelist + """ if long(queue) == 0: raise NoQueueError("Queue is NULL") return list_for_each_entry(queue['queue_head'], self.request_type, @@ -31,4 +45,18 @@ def for_each_request_in_queue(self, queue): @export @classmethod def request_age_ms(cls, request): + """ + Returns the age of the request in milliseconds + + This method returns the difference between the current time + (jiffies) and the request's start_time, in milliseconds. + + Args: + request(gdb.Value): The struct request used + to determine age + + Returns: + long: Difference between the request's start_time and + current jiffies in milliseconds. + """ return kernel.jiffies_to_msec(kernel.jiffies - request['start_time']) diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index 5ff67cc31a5..a63ca3727bb 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -19,19 +19,39 @@ class DeviceMapper(CrashBaseClass): __types__ = [ 'struct dm_rq_clone_bio_info *', 'struct dm_target_io *' ] __symbol_callbacks__ = [ - ('end_clone_bio', 'register_end_clone_bio'), - ('clone_endio', 'register_clone_endio') ] + ('end_clone_bio', '_register_end_clone_bio'), + ('clone_endio', '_register_clone_endio') ] @classmethod - def register_end_clone_bio(cls, sym): + def _register_end_clone_bio(cls, sym): block.register_bio_decoder(sym, cls.decode_clone_bio_rq) @classmethod - def register_clone_endio(cls, sym): + def _register_clone_endio(cls, sym): block.register_bio_decoder(sym, cls.decode_clone_bio) @classmethod def decode_clone_bio_rq(cls, bio): + """ + Decodes a request-based device mapper cloned bio + + This method decodes a cloned bio generated by request-based + device mapper targets. + + Args: + bio(gdb.Value): A struct bio generated by a + request-based device mapper target + + Returns: + dict: Contains the following items: + - description (str): Human-readable description of the bio + - bio (gdb.Value): The provided bio + - next (gdb.Value): The original bio that was + the source of this one + - decoder (method(gdb.Value)): The decoder for + the original bio + """ + info = bio['bi_private'].cast(cls.dm_rq_clone_bio_info_p_type) count = bio['bi_cnt']['counter'] @@ -53,6 +73,27 @@ def decode_clone_bio_rq(cls, bio): @classmethod def decode_clone_bio(cls, bio): + """ + Decodes a bio-based device mapper cloned bio + + This method decodes a cloned bio generated by request-based + device mapper targets. + + Args: + bio(gdb.Value): A struct bio generated by a + bio-based device mapper target + + Returns: + dict: Contains the following items: + - description (str): Human-readable description of the bio + - bio (gdb.Value): The provided bio + - tio (gdb.Value): The struct + dm_target_tio for this bio + - next (gdb.Value): The original bio that was + the source of this one + - decoder (method(gdb.Value)): The decoder for the + original bio + """ tio = bio['bi_private'].cast(cls.dm_target_io_p_type) next_bio = tio['io']['bio'] From b06e290786b0c570bb0116cbd68045a0149e6960 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Sun, 16 Sep 2018 08:30:39 -0400 Subject: [PATCH 023/367] crash.subsystem.filesystems: add documentation Add documentation for public methods. Rename type registration callbacks to make it obvious they are intended to be private. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/__init__.py | 136 +++++++++++++++++++++++-- crash/subsystem/filesystem/btrfs.py | 25 +++++ crash/subsystem/filesystem/ext3.py | 26 ++++- 3 files changed, 179 insertions(+), 8 deletions(-) diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index 50306740db2..bdcf694946f 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -16,36 +16,93 @@ class FileSystem(CrashBaseClass): __types__ = [ 'struct dio *', 'struct buffer_head *' ] __symbol_callbacks__ = [ - ('dio_bio_end', 'register_dio_bio_end'), - ('dio_bio_end_aio', 'register_dio_bio_end'), - ('mpage_end_io', 'register_mpage_end_io') ] + ('dio_bio_end', '_register_dio_bio_end'), + ('dio_bio_end_aio', '_register_dio_bio_end'), + ('mpage_end_io', '_register_mpage_end_io') ] buffer_head_decoders = {} @classmethod - def register_dio_bio(cls, symval): + def _register_dio_bio(cls, symval): block.register_bio_decoder(cls.dio_bio_end, cls.decode_dio_bio) @classmethod - def register_dio_bio_end(cls, sym): + def _register_dio_bio_end(cls, sym): block.register_bio_decoder(sym, cls.decode_dio_bio) @classmethod - def register_mpage_end_io(cls, sym): + def _register_mpage_end_io(cls, sym): block.register_bio_decoder(sym, cls.decode_mpage) @export @staticmethod def super_fstype(sb): + """ + Returns the file system type's name for a given superblock. + + Args: + sb (gdb.Value): The struct super_block for + which to return the file system type's name + + Returns: + str: The file system type's name + """ return sb['s_type']['name'].string() @export @classmethod def register_buffer_head_decoder(cls, sym, decoder): + """ + Registers a buffer_head decoder with the filesystem subsystem. + + A buffer_head decoder is a method thats acepts a buffer_head, + potentially interprets the private members of the buffer_head, + and returns a dictionary. The only mandatory member of the + dictionary is 'description' which contains a human-readable + description of the purpose of this buffer_head. + + If the buffer_head is part of a stack, the 'next' item should contain + the next object in the stack. It does not necessarily need to be + a buffer_head. It does need to have a 'decoder' item declared + that will accept the given object. The decoder does not need to + be registered unless it will be a top-level decoder. + + Other items can be added as-needed to allow informed callers + to obtain direct informatiom. + + Args: + sym (gdb.Value): + The kernel function used as buffer_head->b_h_end_io callback + """ + cls.buffer_head_decoders[sym] = decoder @classmethod def decode_dio_bio(cls, bio): + """ + Decodes a bio used for direct i/o. + + This method decodes a bio generated by the direct-io component of + the file system subsystem. The bio can either have been submitted + directly or asynchronously. + + Args: + bio(gdb.Value): The struct bio to be decoded, generated + by the direct i/o component + + Returns: + dict: Contains the following items: + - description (str): Human-readable description of the bio + - bio (gdb.Value): The struct bio being decoded + - dio (gdb.Value): The direct i/o component of + the bio + - fstype (str): The name of the file system which submitted + this bio + - inode (gdb.Value): The struct inode, if any, + that owns the file associated with this bio + - offset (long): The offset within the file, in bytes + - devname (str): The device name associated with this bio + """ dio = bio['bi_private'].cast(cls.dio_p_type) fstype = cls.super_fstype(dio['inode']['i_sb']) dev = block_device_name(dio['inode']['i_sb']['s_bdev']) @@ -65,6 +122,25 @@ def decode_dio_bio(cls, bio): @classmethod def decode_mpage(cls, bio): + """ + Decodes a bio used for multipage i/o. + + This method decodes a bio generated by the mpage component of + the file system subsystem. + + Args: + bio(gdb.Value): The struct bio to be decoded, generated + by the mpage component + + Returns: + dict: Contains the following items: + - description (str): Human-readable description of the bio + - bio (gdb.Value): The struct bio being decoded + - fstype (str): The name of the file system which submitted + this bio + - inode (gdb.Value): The struct inode, if any, + that owns the file associated with this bio + """ inode = bio['bi_io_vec'][0]['bv_page']['mapping']['host'] fstype = cls.super_fstype(inode['i_sb']) chain = { @@ -80,6 +156,24 @@ def decode_mpage(cls, bio): @classmethod def decode_bio_buffer_head(cls, bio): + """ + Decodes a bio used to perform i/o for buffer_heads + + This method decodes a bio generated by buffer head submission. + + Args: + bio(gdb.Value): The struct bio to be decoded, generated + by buffer head submission + + Returns: + dict: Contains the following items: + - description (str): Human-readable description of the bio + - bio (gdb.Value): The struct bio being decoded + - next (gdb.Value): The buffer_head that + initiated this bio. + - decoder (gdb.Value): + A decoder for the buffer head + """ bh = bio['bi_private'].cast(cls.buffer_head_p_type) chain = { 'description' : @@ -93,6 +187,23 @@ def decode_bio_buffer_head(cls, bio): @classmethod def decode_buffer_head(cls, bh): + """ + Decodes a struct buffer_head + + This method decodes a struct buffer_head, using an + implementation-specific decoder, if available + + Args: + bio(gdb.Value): The struct buffer_head to be + decoded. + + Returns: + dict: Minimally contains the following items. + - description (str): Human-readable description of the bio + - bh (gdb.Value): The struct buffer_head + Additional items may be available based on the + implmentation-specific decoder. + """ endio = bh['b_end_io'] try: return cls.buffer_head_decoders[endio](bh) @@ -109,6 +220,19 @@ def decode_buffer_head(cls, bh): @classmethod def decode_end_buffer_write_sync(cls, bh): + """ + Decodes a struct buffer_head submitted by file systems for routine + synchronous writeback. + + Args: + bio(gdb.Value): The struct buffer_head to be + decoded. + + Returns: + dict: Minimally contains the following items. + - description (str): Human-readable description of the bio + - bh (gdb.Value): The struct buffer_head + """ desc = ("{:x} buffer_head: for dev {}, block {}, size {} (unassociated)" .format(block_device_name(bh['b_bdev']), bh['b_blocknr'], bh['b_size'])) diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index bd0e32c96dc..3fd60bdf219 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -14,8 +14,33 @@ class BtrfsFileSystem(CrashBaseClass): @classmethod def btrfs_inode(cls, vfs_inode): + """ + Converts a VFS inode to a btrfs inode + + This method converts a struct inode to a struct btrfs_inode. + + Args: + vfs_inode (gdb.Value): The struct inode to convert + to a struct btrfs_inode + + Returns: + gdb.Value: The converted struct btrfs_inode + """ return container_of(vfs_inode, cls.btrfs_inode_type, 'vfs_inode') @classmethod def btrfs_sb_info(cls, super_block): + """ + Converts a VFS superblock to a btrfs fs_info + + This method converts a struct super_block to a struct btrfs_fs_info + + Args: + super_block (gdb.Value): The struct super_block + to convert to a struct btrfs_fs_info. + + Returns: + gdb.Value: The converted struct + btrfs_fs_info + """ return super_block['s_fs_info'].cast(cls.btrfs_fs_info_p_type) diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index 5f45ca1c1f0..b6d99dbdbdb 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -17,10 +17,10 @@ class Ext3(CrashBaseClass): __symbol_callbacks__ = [ - ('journal_end_buffer_io_sync', 'register_journal_buffer_io_sync') ] + ('journal_end_buffer_io_sync', '_register_journal_buffer_io_sync') ] @classmethod - def register_journal_buffer_io_sync(cls, sym): + def _register_journal_buffer_io_sync(cls, sym): # ext3/ext4 and jbd/jbd2 share names but not implementations b = gdb.block_for_pc(long(sym.value().address)) sym = get_symbol_value('journal_end_buffer_io_sync', b) @@ -29,6 +29,28 @@ def register_journal_buffer_io_sync(cls, sym): @classmethod def decode_journal_buffer_io_sync(cls, bh): + """ + Decodes an ext3 journal buffer + + This method decodes a struct buffer_head with and end_io callback + of journal_end_buffer_io_sync. + + Args: + bh (gdb.Value): The struct buffer_head to + decode + + Returns: + dict: Contains the following items: + - description (str): Human-readable description of + the buffer head + - bh (gdb.Value): The buffer head being + decoded + - fstype (str): The name of the file system type being decoded + - devname (str): The name of the device the file system uses + - offset (long): The offset, in bytes, of the block described + - length (long): The length of the block described + """ + fstype = "journal on ext3" devname = block_device_name(bh['b_bdev']) chain = { From 271312fa69c9531e300222476038045e6d1db060 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 17 Sep 2018 07:59:22 -0400 Subject: [PATCH 024/367] contrib: add xfs-dump-ail.py This script dumps the AIL for an XFS file system. Signed-off-by: Jeff Mahoney --- contrib/xfs-dump-ail.py | 76 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 contrib/xfs-dump-ail.py diff --git a/contrib/xfs-dump-ail.py b/contrib/xfs-dump-ail.py new file mode 100644 index 00000000000..049801f3673 --- /dev/null +++ b/contrib/xfs-dump-ail.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from crash.types.list import list_for_each_entry +from crash.util import container_of +import gdb + +# This script dumps the inodes and buffers in the XFS AIL. The mount +# address is hard-coded and would need to be replaced for use. + +XFS_LI_EFI = 0x1236 +XFS_LI_EFD = 0x1237 +XFS_LI_IUNLINK = 0x1238 +XFS_LI_INODE = 0x123b # aligned ino chunks, var-size ibufs +XFS_LI_BUF = 0x123c # v2 bufs, variable sized inode bufs +XFS_LI_DQUOT = 0x123d +XFS_LI_QUOTAOFF = 0x123e + +XFS_LI_TYPES = { + XFS_LI_EFI : "XFS_LI_EFI", + XFS_LI_EFD : "XFS_LI_EFD", + XFS_LI_IUNLINK : "XFS_LI_IUNLINK", + XFS_LI_INODE : "XFS_LI_INODE", + XFS_LI_BUF : "XFS_LI_BUF", + XFS_LI_EFI : "XFS_LI_EFI", + XFS_LI_DQUOT : "XFS_LI_DQUOT", + XFS_LI_QUOTAOFF : "XFS_LI_QUOTAOFF", +} + +def xfs_for_each_ail_entry(ail): + xfs_log_item_type = gdb.lookup_type('struct xfs_log_item') + for item in list_for_each_entry(ail['xa_ail'], xfs_log_item_type, 'li_ail'): + yield item + +def xfs_for_each_ail_log_item(mp): + for item in for_each_ail_entry(mp['m_ail']): + yield item + +xfs_buf_log_item_type = gdb.lookup_type('struct xfs_buf_log_item') +xfs_inode_log_item_type = gdb.lookup_type('struct xfs_inode_log_item') +xfs_efi_log_item_type = gdb.lookup_type('struct xfs_efi_log_item') +xfs_efd_log_item_type = gdb.lookup_type('struct xfs_efd_log_item') +xfs_dq_logitem_type = gdb.lookup_type('struct xfs_dq_logitem') +xfs_qoff_logitem_type = gdb.lookup_type('struct xfs_qoff_logitem') + +def xfs_for_each_ail_log_item_typed(mp): + for item in for_each_xfs_ail_item(mp): + li_type = long(item['li_type']) + if li_type == XFS_LI_BUF: + yield container_of(item, xfs_buf_log_item_type, 'bli_item') + elif li_type == XFS_LI_INODE: + yield container_of(item, xfs_inode_log_item_type, 'ili_item') + elif li_type == XFS_LI_EFI: + yield container_of(item, xfs_efi_log_item_type, 'efi_item') + elif li_type == XFS_LI_EFD: + yield container_of(item, xfs_efd_log_item_type, 'efd_item') + elif li_type == XFS_LI_IUNLINK: + yield li_type + elif li_type == XFS_LI_DQUOT: + yield container_of(item, xfs_dq_logitem, 'qli_item') + elif li_type == XFS_LI_QUOTAOFF: + yield container_of(item, xfs_qoff_logitem, 'qql_item') + else: + print XFS_LI_TYPES[li_type] + +xfs_mount = gdb.lookup_type('struct xfs_mount').pointer() +mp = gdb.Value(0xffff880bf34a1800).cast(xfs_mount).dereference() + +for item in xfs_for_each_ail_log_item_typed(mp): + if item.type == xfs_buf_log_item_type: + buf = item['bli_buf'] + print "xfs_buf @ {:x} blockno={}".format(long(buf), buf['b_bn']) + elif item.type == xfs_inode_log_item_type: + xfs_inode = item['ili_inode'] + print "inode @ {:x}".format(long(xfs_inode['i_vnode'].address)) + else: + print "{} @ {:x}".format(item.type, long(item.address)) From 30ae8cee13bb3a2ca20f8738d69aa8b06dc31323 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 17 Sep 2018 08:02:43 -0400 Subject: [PATCH 025/367] contrib: add stuck-requests.py This script dumps the queued requests for every disk on the system. Signed-off-by: Jeff Mahoney --- contrib/stuck-requests.py | 47 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 contrib/stuck-requests.py diff --git a/contrib/stuck-requests.py b/contrib/stuck-requests.py new file mode 100644 index 00000000000..17c5eaaf75e --- /dev/null +++ b/contrib/stuck-requests.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +# bsc#1031358 + +# This script dumps stuck requests for every disk on the system + +from crash.subsystem.storage import for_each_disk +from crash.subsystem.storage import for_each_bio_in_stack +from crash.subsystem.storage import gendisk_name +from crash.subsystem.storage.blocksq import for_each_request_in_queue +from crash.types.list import list_for_each_entry +from crash.util import get_symbol_value +from crash.cache.syscache import kernel, jiffies_to_msec + +empty = [] + +flush_end_io = get_symbol_value('flush_end_io') + +for b in for_each_disk(): + name = gendisk_name(b) + count = 0 + for r in for_each_request_in_queue(b['queue']): + age_in_jiffies = kernel.jiffies - r['start_time'] + age = float(long(kernel.jiffies_to_msec(age_in_jiffies))) / 1000 + if count == 0: + print name + if r['bio']: + print "{}: {:x} request: age={}s, bio chain".format( + count, long(r.address), age, long(r['bio'])) + n=0 + for entry in for_each_bio_in_stack(r['bio']): + print " {}: {}".format(n, entry['description']) + n += 1 + else: + if r['end_io'] == flush_end_io: + print "{}: {:x} request: age={}s, pending flush request".format( + count, long(r.address), age) + else: + print "{}: {:x} request: start={}, undecoded".format( + count, long(r.address), age) + count += 1 + print + + if count == 0: + empty.append(name) + +#print "Queues for the following devices were empty: {}".format(", ".join(empty)) From 29e79588e5490ad6981651744e28882490ab1d76 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 17 Sep 2018 08:04:50 -0400 Subject: [PATCH 026/367] contrib: add xfs-analyze.py This script cross references items in the the XFS AIL with locked buffers and inodes in every task's stack. Signed-off-by: Jeff Mahoney --- contrib/xfs-analyze.py | 168 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100644 contrib/xfs-analyze.py diff --git a/contrib/xfs-analyze.py b/contrib/xfs-analyze.py new file mode 100644 index 00000000000..cc87c15d8f1 --- /dev/null +++ b/contrib/xfs-analyze.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +# bsc#1025860 + +# This script cross references items in the AIL with buffers and inodes +# locked in every task's stack + +from crash.types.list import list_for_each_entry +from crash.util import container_of +import gdb + +dentry_type = gdb.lookup_type('struct dentry') +ail_type = gdb.lookup_type('struct xfs_ail') +xfs_log_item_type = gdb.lookup_type('struct xfs_log_item') +xfs_inode_log_item_type = gdb.lookup_type('struct xfs_inode_log_item') +ail = gdb.Value(0xffff885e3b9e3a40).cast(ail_type.pointer()).dereference() +print ail + +# This should go into a crash.types.rwsem +RWSEM_ACTIVE_MASK = 0xffffffffL +RWSEM_UNLOCKED_VALUE = 0 +RWSEM_ACTIVE_BIAS = 1 +RWSEM_WAITING_BIAS = 0xffffffff00000000L +RWSEM_ACTIVE_READ_BIAS = 1 +RWSEM_ACTIVE_WRITE_BIAS = 0xffffffff00000001L + +def inode_paths(inode): + for dentry in list_for_each_entry(inode['i_dentry'], dentry_type, ''): + names = [dentry['d_name']['name'].string()] + parent = dentry['d_parent'] + while parent.address != parent['d_parent'].address: + names.insert(0, parent['d_name']['name'].string()) + parent = parent['d_parent'] + + yield '/'.join(names) + +def rwsem_read_trylock(rwsem): + count = long(rwsem['count']) & 0xffffffffffffffffL + if count == 0: + return True + if count & RWSEM_ACTIVE_WRITE_BIAS: + return False + if count >= 0: + return True + +locked_inodes = {} + +def check_item(item): + if item['li_type'] == 0x123b: # inode + iitem = container_of(item, xfs_inode_log_item_type, 'ili_item') + if iitem['ili_inode']['i_pincount']['counter'] > 0: +# print "".format(iitem['ili_inode'].address) + return 1 + if not rwsem_read_trylock(iitem['ili_inode']['i_lock']['mr_lock']): + inode = iitem['ili_inode']['i_vnode'].address +# print "".format(inode) + print oct(int(inode['i_mode'])) + if long(inode) in locked_inodes: + print "in AIL multiple times" + else: + locked_inodes[long(inode)] = iitem['ili_inode'] +# for path in inode_paths(inode): +# print path + return 2 +# print "" + elif item['li_type'] == 0x123c: # buffer + pass + else: + print "*** Odd type {}".format(item['li_type']) + return 0 + +# superblock ffff885e2ec11000 +# fs_info 0xffff885e33f7e000 +# m_ail 0xffff885e3b9e3a40 + +last_pushed = ail['xa_last_pushed_lsn'] +target = ail['xa_target'] + +found = None +count = 0 +last_lsn = 0 +total = 0 +for item in list_for_each_entry(ail['xa_ail'], xfs_log_item_type, 'li_ail'): + + # xfsaild_push fast forwards to the last pushed before starting + # pushes are two (three, kind of) stages for inodes, which most of + # the ail list is for this report + # 1) attempt to push the inode item, which writes it back to its buffer + # 2) upon success, attempt to push the buffer + # 3) when the buffer is successfully written, the callback is called + # which removes the item from the list + # The list prior to last_pushed contains the items for which we're + # waiting on writeback. + if item['li_lsn'] < last_pushed: + count += 1 + continue + if last_lsn == 0: + print "Skipped {} items before last_pushed ({})".format(count, last_pushed) + count = 0 + elif item['li_lsn'] > target: + print "** Target LSN reached: {}".format(target) + break + + total += 1 + + if last_lsn != item['li_lsn']: + if last_lsn != 0: + print "*** {:<4} total items for LSN {} ({} ready, {} pinned, {} locked)".format(count, last_lsn, ready, pinned, locked) + count = 0 +# print "*** Processing LSN {}".format(item['li_lsn']) + pinned = 0 + locked = 0 + ready = 0 + + ret = check_item(item) + if ret == 1: + pinned += 1 + elif ret == 2: + locked += 1 + else: + if locked and ready == 0: + print "<{} locked>".format(locked) + ready += 1 + + last_lsn = item['li_lsn'] + count += 1 + + # We only care about the first 100 items + if count > 104: + break + +checked = 0 +dead = 0 +for thread in gdb.selected_inferior().threads(): + thread.switch() + try: + f = gdb.selected_frame() + while True: + f = f.older() + fn = f.function() + if not fn: + break + if fn.name == '__fput': + fp = f.read_var('file') + inode = fp['f_path']['dentry']['d_inode'] + checked += 1 + if inode in locked_inodes: + print inode + break + if fn.name == 'vfs_create': + try: + inode = f.read_var('dir') + except ValueError, e: + print f + inode = None + checked += 1 + if long(inode) in locked_inodes: + print "PID {} inode {}".format(thread.ptid, hex(long(inode))) + dead += 1 + break + + except gdb.error, e: + pass + +print "Checked {} inodes in __fput or vfs_create".format(checked) +print "Total items processed: {}".format(total) +print "Total inodes tracked: {}".format(len(locked_inodes.keys())) +print "Total inodes locked and waiting: {}".format(dead) From 5f69622070eb4038f38bc889a3aa54c2cc4ee9f9 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Sun, 16 Sep 2018 04:46:16 -0400 Subject: [PATCH 027/367] UNTESTED: crash.subsystem.storage.device-mapper: update for device mapper end_io changes Linux commit bfc6d41cee53b (dm: stop using bi_private) landed in v3.15, which means that resolving the context from bio->bi_private would fail. This commit updates the symbol callbacks to test for the 'clone' member of each of the contexts and assigns a getter method to get the context in each decoder. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/device_mapper.py | 37 +++++++++++++++++++++--- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index a63ca3727bb..02a21dff295 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -24,10 +24,20 @@ class DeviceMapper(CrashBaseClass): @classmethod def _register_end_clone_bio(cls, sym): + if 'clone' in cls.dm_rq_clone_bio_info_p_type.target(): + getter = cls._get_clone_bio_rq_info_3_7 + else: + getter = cls._get_clone_bio_rq_info_old + cls._get_clone_bio_rq_info = getter block.register_bio_decoder(sym, cls.decode_clone_bio_rq) @classmethod def _register_clone_endio(cls, sym): + if 'clone' in cls.dm_target_io_p_type.target(): + getter = cls._get_clone_bio_tio_3_15 + else: + getter = cls._get_clone_bio_tio_old + cls._get_clone_bio_tio = getter block.register_bio_decoder(sym, cls.decode_clone_bio) @classmethod @@ -46,14 +56,15 @@ def decode_clone_bio_rq(cls, bio): dict: Contains the following items: - description (str): Human-readable description of the bio - bio (gdb.Value): The provided bio + - tio (gdb.Value(): The struct + dm_target_io for this bio - next (gdb.Value): The original bio that was the source of this one - decoder (method(gdb.Value)): The decoder for the original bio """ - info = bio['bi_private'].cast(cls.dm_rq_clone_bio_info_p_type) - count = bio['bi_cnt']['counter'] + info = cls._get_clone_bio_rq_info(bio) # We can pull the related bios together here if required # b = bio['bi_next'] @@ -62,6 +73,7 @@ def decode_clone_bio_rq(cls, bio): chain = { 'bio' : bio, + 'tio' : info['tio'], 'next' : info['orig'], 'description' : '{:x} bio: Request-based Device Mapper on {}'.format( @@ -71,6 +83,14 @@ def decode_clone_bio_rq(cls, bio): return chain + @classmethod + def _get_clone_bio_rq_info_old(cls, bio): + return bio['bi_private'].cast(cls.dm_rq_clone_bio_info_p_type) + + @classmethod + def _get_clone_bio_rq_info_3_7(cls, bio): + return container_of(bio, cls.dm_rq_clone_bio_info_p_type, 'clone') + @classmethod def decode_clone_bio(cls, bio): """ @@ -94,7 +114,7 @@ def decode_clone_bio(cls, bio): - decoder (method(gdb.Value)): The decoder for the original bio """ - tio = bio['bi_private'].cast(cls.dm_target_io_p_type) + tio = cls._get_clone_bio_tio(bio) next_bio = tio['io']['bio'] @@ -107,8 +127,17 @@ def decode_clone_bio(cls, bio): long(next_bio['bi_sector'])), 'bio' : bio, 'tio' : tio, - 'next' : tio['io']['bio'], + 'next' : next_bio, 'decoder' : block.decode_bio, } return chain + + @classmethod + def _get_clone_bio_tio_old(cls, bio): + return bio['bi_private'].cast(cls.dm_target_io_p_type) + + @classmethod + def _get_clone_bio_tio_3_15(cls, bio): + return container_of(bio['bi_private'], + cls.dm_clone_bio_info_p_type, 'clone') From d607e113f0f85cd99906cc052ade424c32c052bb Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 17 Sep 2018 09:09:38 -0400 Subject: [PATCH 028/367] crash.infra.lookup: remove silly objfile argument from MinimalSymbolCallback The callback is executed when the objfile is loaded. If an objfile is provided, it has already been loaded, so the callback would never fire. Signed-off-by: Jeff Mahoney --- crash/infra/lookup.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index 9fe57dcdc19..9560207291f 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -16,20 +16,18 @@ from crash.exceptions import DelayedAttributeError class MinimalSymbolCallback(ObjfileEventCallback): - def __init__(self, name, callback, symbol_file=None, objfile=None): + def __init__(self, name, callback, symbol_file=None): self.name = name self.symbol_file = symbol_file - self.objfile = objfile self.callback = callback super(MinimalSymbolCallback, self).__init__() def check_ready(self): - return gdb.lookup_minimal_symbol(self.name, self.symbol_file, - self.objfile) + return gdb.lookup_minimal_symbol(self.name, self.symbol_file, None) def __str__(self): - return ("<{}({}, {}, {}, {})>" + return ("<{}({}, {}, {})>" .format(self.__class__.__name__, self.name, - self.symbol_file, self.objfile, self.callback)) + self.symbol_file, self.callback)) class SymbolCallback(ObjfileEventCallback): def __init__(self, name, callback, block=None, From 9b7a0486bc680001d2faf708c13bf8f126d71cde Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 17 Sep 2018 09:17:55 -0400 Subject: [PATCH 029/367] crash.infra.lookup: remove silly block argument from SymbolCallback The callback is executed when the objfile is loaded. If an block is provided, it has already been resolved from an objfile. If the objfile is already loaded, the callback would never fire. Signed-off-by: Jeff Mahoney --- crash/infra/lookup.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index 9560207291f..c538f4a121b 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -30,21 +30,18 @@ def __str__(self): self.symbol_file, self.callback)) class SymbolCallback(ObjfileEventCallback): - def __init__(self, name, callback, block=None, - domain=gdb.SYMBOL_VAR_DOMAIN): + def __init__(self, name, callback, domain=gdb.SYMBOL_VAR_DOMAIN): self.name = name - self.block = block self.domain = domain self.callback = callback super(SymbolCallback, self).__init__() def check_ready(self): - return gdb.lookup_symbol(self.name, self.block, self.domain)[0] + return gdb.lookup_symbol(self.name, None, self.domain)[0] def __str__(self): - return ("<{}({}, {}, {})>" - .format(self.__class__.__name__, self.name, - self.block, self.domain)) + return ("<{}({}, {})>" + .format(self.__class__.__name__, self.name, self.domain)) class SymvalCallback(SymbolCallback): def check_ready(self): From 536e3b25934b540fde0a3f977f7c96d1b653f29b Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 17 Sep 2018 09:28:20 -0400 Subject: [PATCH 030/367] crash.infra.lookup: remove unused callback_wrapper Signed-off-by: Jeff Mahoney --- crash/infra/lookup.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index c538f4a121b..07c3f5c1132 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -132,17 +132,6 @@ def callback(self, value): def __str__(self): return "{} attached with {}".format(self.__class__, str(self.cb)) -def callback_wrapper(object): - def __init__(self, cls, func): - super(callback_wrapper, self).__init__() - self.cls = cls - self.func = func - - def __call__(self, *args, **kwargs): - if isinstance(self.func, str): - self.func = getattr(self.cls, self.func) - return self.func(cls, *args, **kwargs) - class ClassProperty(object): def __init__(self, get): self.get = get From efeb0263a656a2a1c395859075b649b41ca26799 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 17 Sep 2018 10:00:53 -0400 Subject: [PATCH 031/367] crash: document infrastructure classes Signed-off-by: Jeff Mahoney --- crash/exceptions.py | 7 ++ crash/infra/__init__.py | 3 +- crash/infra/callback.py | 38 ++++++++--- crash/infra/lookup.py | 73 ++++++++++++++++++++- crash/session.py | 20 +++++- crash/util.py | 138 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 265 insertions(+), 14 deletions(-) diff --git a/crash/exceptions.py b/crash/exceptions.py index a43d27d10f2..f0e79141818 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -6,15 +6,22 @@ from __future__ import print_function class MissingSymbolError(RuntimeError): + """The requested symbol cannot be located.""" pass class MissingTypeError(RuntimeError): + """The requested type cannot be located.""" pass class CorruptedError(RuntimeError): + """A corrupted data structure has been encountered.""" pass class DelayedAttributeError(AttributeError): + """ + The attribute has been declared but the symbol to fill it has not yet been + located. + """ def __init__(self, owner, name): msg = "{} has delayed attribute {} but it has not been completed." super(DelayedAttributeError, self).__init__(msg.format(owner, name)) diff --git a/crash/infra/__init__.py b/crash/infra/__init__.py index 5014da41805..2e1eee8b27e 100644 --- a/crash/infra/__init__.py +++ b/crash/infra/__init__.py @@ -55,7 +55,8 @@ def export(func): return func class _CrashBaseMeta(type): - """This metaclass handles both exporting methods to the module namespace + """ + This metaclass handles both exporting methods to the module namespace and handling asynchronous loading of types and symbols. To enable it, all you need to do is define your class as follows: diff --git a/crash/infra/callback.py b/crash/infra/callback.py index 08d64396282..c9bdbbbf9a3 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -10,12 +10,23 @@ import sys class CallbackCompleted(RuntimeError): + """The callback has already been completed and is no longer valid""" def __init__(self, callback_obj): msg = "{} has already completed.".format(callback_obj.name) super(CallbackCompleted, self).__init__(msg) self.callback_obj = callback_obj class ObjfileEventCallback(object): + """ + A generic objfile callback class + + When GDB loads an objfile, it can perform callbacks. These callbacks + are triggered for every objfile loaded. Once marked complete, the + callback is removed so it doesn't trigger for future objfile loads. + + Derived classes need only implement the complete and check_ready + methods. + """ def __init__(self): self.completed = True completed = False @@ -32,11 +43,11 @@ def __init__(self): if completed is False: self.completed = False - gdb.events.new_objfile.connect(self.new_objfile_callback) + gdb.events.new_objfile.connect(self._new_objfile_callback) def complete(self): if not self.completed: - gdb.events.new_objfile.disconnect(self.new_objfile_callback) + gdb.events.new_objfile.disconnect(self._new_objfile_callback) self.completed = True else: raise CallbackCompleted(self) @@ -56,7 +67,7 @@ def setup_symbol_cache_flush_callback(cls): def flush_symbol_cache_callback(self, event): gdb.execute("maint flush-symbol-cache") - def new_objfile_callback(self, event): + def _new_objfile_callback(self, event): # GDB purposely copies the event list prior to calling the callbacks # If we remove an event from another handler, it will still be sent if self.completed: @@ -69,14 +80,21 @@ def new_objfile_callback(self, event): self.complete() def check_ready(self): - """check_ready returns the value that will be passed to the callback. - A return value other than None or False will be passed to the - callback.""" + """ + check_ready returns the value that will be passed to the callback. + A return value other than None or False will be passed to the + callback. + """ return True def callback(self, result): - """The callback may return None, True, or False. A return value of - None or True indicates that the callback is completed and may - be disconnected. A return value of False indicates that the - callback should stay connected for future use.""" + """ + The callback may return None, True, or False. A return value of + None or True indicates that the callback is completed and may + be disconnected. A return value of False indicates that the + callback should stay connected for future use. + + Args: + result: The result to pass to the callback + """ pass diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index 07c3f5c1132..cebf07b7244 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -16,7 +16,18 @@ from crash.exceptions import DelayedAttributeError class MinimalSymbolCallback(ObjfileEventCallback): + """ + A callback that executes when the named minimal symbol is + discovered in the objfile and returns the gdb.MinimalSymbol. + """ def __init__(self, name, callback, symbol_file=None): + """ + Args: + name (str): The name of the minimal symbol to discover + callback (method): The callback to execute when the minimal + symbol is discovered + symbol_file (str, optional, default=None): Name of symbol file + """ self.name = name self.symbol_file = symbol_file self.callback = callback @@ -30,7 +41,19 @@ def __str__(self): self.symbol_file, self.callback)) class SymbolCallback(ObjfileEventCallback): + """ + A callback that executes when the named symbol is discovered in the + objfile and returns the gdb.Symbol. + """ def __init__(self, name, callback, domain=gdb.SYMBOL_VAR_DOMAIN): + """ + Args: + name (str): The name of the symbol to discover + callbacks (method): The callback to execute when the minimal + symbol is discover + domain (gdb.Symbol constant, i.e. SYMBOL_*_DOMAIN): The domain + to search for the symbol + """ self.name = name self.domain = domain self.callback = callback @@ -44,6 +67,10 @@ def __str__(self): .format(self.__class__.__name__, self.name, self.domain)) class SymvalCallback(SymbolCallback): + """ + A callback that executes when the named symbol is discovered in the + objfile and returns the gdb.Value associated with it. + """ def check_ready(self): sym = super(SymvalCallback, self).check_ready() if sym is not None: @@ -54,6 +81,10 @@ def check_ready(self): return None class TypeCallback(ObjfileEventCallback): + """ + A callback that executes when the named type is discovered in the + objfile and returns the gdb.Type associated with it. + """ def __init__(self, name, callback, block=None): self.name = name self.block = block @@ -71,6 +102,10 @@ def __str__(self): .format(self.__class__.__name__, self.name, self.block)) class DelayedValue(object): + """ + A generic class for making class attributes available that describe + to-be-loaded symbols, minimal symbols, and types. + """ def __init__(self, name): self.name = name self.value = None @@ -86,21 +121,44 @@ def callback(self, value): self.value = value class DelayedMinimalSymbol(DelayedValue): + """ + A DelayedValue that handles minimal symbols. + """ def __init__(self, name): + """ + Args: + name (str): The name of the minimal symbol + """ super(DelayedMinimalSymbol, self).__init__(name) self.cb = MinimalSymbolCallback(name, self.callback) def __str__(self): return "{} attached with {}".format(self.__class__, str(self.cb)) class DelayedSymbol(DelayedValue): + """ + A DelayedValue that handles symbols. + """ def __init__(self, name): + """ + Args: + name (str): The name of the symbol + """ super(DelayedSymbol, self).__init__(name) self.cb = SymbolCallback(name, self.callback) def __str__(self): return "{} attached with {}".format(self.__class__, str(self.cb)) class DelayedType(DelayedValue): + """ + A DelayedValue for types. + """ def __init__(self, name, pointer=False): + """ + Args: + name (str): The name of the type. Must not be a pointer type. + pointer (bool, optional, default=False): Whether the requested + type should be returned as a pointer to that type. + """ super(DelayedType, self).__init__(name) self.pointer = pointer self.cb = TypeCallback(name, self.callback) @@ -114,6 +172,9 @@ def callback(self, value): self.value = value class DelayedSymval(DelayedSymbol): + """ + A DelayedSymbol that returns the gdb.Value associated with the symbol. + """ def callback(self, value): symval = value.value() if symval.type.code == gdb.TYPE_CODE_FUNC: @@ -124,8 +185,10 @@ def __str__(self): return "{} attached with {}".format(self.__class__, str(self.cb)) class DelayedMinimalSymval(DelayedMinimalSymbol): - """Sets the property to contain the value of the address of the - minimal symbol as a long.""" + """ + A DelayedMinimalSymbol that returns the address of the + minimal symbol as a long. + """ def callback(self, value): self.value = long(value.value().address) @@ -140,6 +203,12 @@ def __get__(self, instance, owner): return self.get(owner) class DelayedLookups(object): + """ + A class for handling dynamic creation of class attributes that + contain delayed values. The attributes are specified using + special names. These are documented in the _CrashBaseMeta + documentation. + """ @classmethod def _resolve_type(cls, name): pointer = False diff --git a/crash/session.py b/crash/session.py index cf4ef2cddc0..b5433756a00 100644 --- a/crash/session.py +++ b/crash/session.py @@ -13,7 +13,25 @@ from kdumpfile import kdumpfile class Session(object): - """crash.Session is the main driver component for crash-python""" + """ + crash.Session is the main driver component for crash-python + + The Session class loads the kernel, kernel modules, debuginfo, + and vmcore and auto loads any sub modules for autoinitializing + commands and subsystems. + + Args: + kernel_exec (str, optional): The path to the kernel executable + vmcore (str, optional): The path to the vmcore + kernelpath (str, optional): The path the kernel name to use + when reporting errors + searchpath (list of str, optional): Paths to directory trees to + search for kernel modules and debuginfo + debug (bool, optional, default=False): Whether to enable verbose + debugging output + """ + + def __init__(self, kernel_exec=None, vmcore=None, kernelpath=None, searchpath=None, debug=False): self.vmcore_filename = vmcore diff --git a/crash/util.py b/crash/util.py index c015911dd93..0b5e7e42fb5 100644 --- a/crash/util.py +++ b/crash/util.py @@ -74,6 +74,23 @@ class TypesUtilClass(CrashBaseClass): @export def container_of(self, val, gdbtype, member): + """ + Returns an object that contains the specified object at the given + offset. + + Args: + val (gdb.Value): The value to be converted. It can refer to an + allocated structure or a pointer. + gdbtype (gdb.Type): The type of the object that will be generated + member (str): The name of the member in the target struct that + contains `val`. + + Returns: + gdb.Value: The converted object, of the type specified by + the caller. + Raises: + TypeError: val is not a gdb.Value + """ if not isinstance(val, gdb.Value): raise TypeError("container_of expects gdb.Value") charp = self.char_p_type @@ -86,6 +103,20 @@ def container_of(self, val, gdbtype, member): @export @staticmethod def get_symbol_value(symname, block=None, domain=None): + """ + Returns the value associated with a named symbol + + Args: + symname (str): Name of the symbol to resolve + block (gdb.Block, optional, default=None): The block to resolve + the symbol within + domain (gdb.Symbol constant SYMBOL_*_DOMAIN, optional, default=None): + The domain to search for the symbol + Returns: + gdb.Value: The requested value + Raises: + MissingSymbolError: The symbol or value cannot be located + """ if domain is None: domain = gdb.SYMBOL_VAR_DOMAIN sym = gdb.lookup_symbol(symname, block, domain)[0] @@ -96,6 +127,20 @@ def get_symbol_value(symname, block=None, domain=None): @export @classmethod def safe_get_symbol_value(cls, symname, block=None, domain=None): + """ + Returns the value associated with a named symbol + + Args: + symname (str): Name of the symbol to resolve + block (gdb.Block, optional, default=None): The block to resolve + the symbol within + domain (gdb.Symbol constant SYMBOL_*_DOMAIN, optional, default=None): + The domain to search for the symbol + Returns: + gdb.Value: The requested value or + None: if the symbol or value cannot be found + + """ try: return cls.get_symbol_value(symname, block, domain) except MissingSymbolError: @@ -104,6 +149,19 @@ def safe_get_symbol_value(cls, symname, block=None, domain=None): @export @staticmethod def resolve_type(val): + """ + Resolves a gdb.Type given a type, value, string, or symbol + + Args: + val (gdb.Type, gdb.Value, str, gdb.Symbol): The object for which + to resolve the type + + Returns: + gdb.Type: The resolved type + + Raises: + TypeError: The object type of val is not valid + """ if isinstance(val, gdb.Type): gdbtype = val elif isinstance(val, gdb.Value): @@ -158,6 +216,25 @@ def __offsetof(cls, val, spec, error): @export @classmethod def offsetof_type(cls, val, spec, error=True): + """ + Returns the offset and type of a named member of a structure + + Args: + val (gdb.Type, gdb.Symbol, gdb.Value, or str): The type that + contains the specified member, must be a struct or union + spec (str): The member of the member to resolve + error (bool, optional, default=True): Whether to consider lookup + failures an error + + Returns: + Tuple of: + long: The offset of the resolved member + gdb.Type: The type of the resolved member + + Raises: + InvalidArgumentError: val is not a valid type + InvalidComponentError: spec is not valid for the type + """ gdbtype = None try: gdbtype = resolve_type(val) @@ -188,6 +265,24 @@ def offsetof_type(cls, val, spec, error=True): @export @classmethod def offsetof(cls, val, spec, error=True): + """ + Returns the offset of a named member of a structure + + Args: + val (gdb.Type, gdb.Symbol, gdb.Value, or str): The type that + contains the specified member, must be a struct or union + spec (str): The member of the member to resolve + error (bool, optional, default=True): Whether to consider lookup + failures an error + + Returns: + long: The offset of the resolved member + None: The member could not be resolved + + Raises: + InvalidArgumentError: val is not a valid type + InvalidComponentError: spec is not valid for the type + """ res = cls.offsetof_type(val, spec, error) if res: return res[0] @@ -196,6 +291,23 @@ def offsetof(cls, val, spec, error=True): @export @classmethod def find_member_variant(cls, gdbtype, variants): + """ + Examines the given type and returns the first found member name + + Over time, structure member names may change. This routine + allows the caller to provide a list of potential names and returns + the first one found. + + Args: + gdbtype (gdb.Type): The type of structure or union to examine + variants (list of str): The names of members to search + + Returns: + str: The first member name found + + Raises: + TypeError: No named member could be found + """ for v in variants: if cls.offsetof(gdbtype, v, False) is not None: return v @@ -205,6 +317,15 @@ def find_member_variant(cls, gdbtype, variants): @export @staticmethod def safe_lookup_type(name, block=None): + """ + Looks up a gdb.Type without throwing an exception on failure + + Args: + name (str): The name of the type to look up + + Returns: + gdb.Type for requested type or None if it could not be found + """ try: return gdb.lookup_type(name, block) except gdb.error: @@ -213,11 +334,28 @@ def safe_lookup_type(name, block=None): @export @staticmethod def array_size(value): + """ + Returns the number of elements in an array + + Args: + value (gdb.Value): The array to size + """ return value.type.sizeof // value[0].type.sizeof @export @staticmethod def get_typed_pointer(val, gdbtype): + """ + Returns a pointer to the requested type at the given address + + Args: + val (gdb.Value, str, or long): The address for which to provide + a casted pointer + gdbtype (gdb.Type): The type of the pointer to return + + Returns: + gdb.Value: The casted pointer of the requested type + """ if gdbtype.code != gdb.TYPE_CODE_PTR: gdbtype = gdbtype.pointer() if isinstance(val, gdb.Value): From e60f708d186862bef3adca5d409933be36bc26a7 Mon Sep 17 00:00:00 2001 From: Tom de Vries Date: Tue, 25 Sep 2018 08:25:13 +0200 Subject: [PATCH 032/367] tests: Fix expected error in tests_target.py Since commit 1b04cc4 "crash.session: move most of setup to crash.kernel", gdb.Target throws a TypeError instead of a RuntimeError in __init__ when the vmcore argument is illegal. Update the corresponding test in test_target.py accordingly. --- tests/test_target.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_target.py b/tests/test_target.py index 4a25918ea7d..525b9640f0c 100644 --- a/tests/test_target.py +++ b/tests/test_target.py @@ -15,7 +15,7 @@ def setUp(self): self.do_real_tests = os.path.exists("tests/vmcore") def test_bad_file(self): - with self.assertRaises(RuntimeError): + with self.assertRaises(TypeError): x = Target("/does/not/exist") def test_real_open_with_no_kernel(self): From 6f33fa4d9b8c534ad44ea87f26c820c02b789567 Mon Sep 17 00:00:00 2001 From: Tom de Vries Date: Mon, 24 Sep 2018 17:22:51 +0200 Subject: [PATCH 033/367] tests: Fix reload in test_syscache.py for python3.4 Function imp.reload has been deprecated in python3.4. Use importlib.reload instead. --- tests/test_syscache.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_syscache.py b/tests/test_syscache.py index 5ffcda036a1..fd17b14ac31 100644 --- a/tests/test_syscache.py +++ b/tests/test_syscache.py @@ -7,6 +7,9 @@ import unittest import gdb +import sys +if sys.version_info >= (3, 4): + from importlib import reload from crash.exceptions import DelayedAttributeError fake_config = ( From a1bd92f1206c9be6cb4844b1cf41d53a4ee5d851 Mon Sep 17 00:00:00 2001 From: Tom de Vries Date: Mon, 24 Sep 2018 17:09:42 +0200 Subject: [PATCH 034/367] tests: Fix StringIO import in test_syscmd.py for python3 The cStringIO module has been removed in python3, so "from cStringIO import StringIO" no longer works. Fix this by importing StringIO from io instead. --- tests/test_syscmd.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_syscmd.py b/tests/test_syscmd.py index cc141793fed..beaa817fe7a 100644 --- a/tests/test_syscmd.py +++ b/tests/test_syscmd.py @@ -8,7 +8,10 @@ import unittest import gdb import sys -from cStringIO import StringIO +if sys.version_info.major >= 3: + from io import StringIO +else: + from cStringIO import StringIO from crash.exceptions import MissingSymbolError from crash.commands import CrashCommandLineError From fea09334e706df2089bf5286f44a90dbf04f834a Mon Sep 17 00:00:00 2001 From: Tom de Vries Date: Mon, 24 Sep 2018 17:10:09 +0200 Subject: [PATCH 035/367] crash.infra.lookup: Fix formatting for python3 In python3, indentation is rejected as inconsistent if a source file mixes tabs and spaces in a way that makes the relative indentation level of subsequent lines dependent on the tab size. This fixes a "TabError: inconsistent use of tabs and spaces in indentation" in crash/infra/lookup.py by using indentation on the offending line that is consistent with the surrounding lines. --- crash/infra/lookup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index cebf07b7244..b9ef48f5484 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -92,7 +92,7 @@ def __init__(self, name, callback, block=None): super(TypeCallback, self).__init__() def check_ready(self): - try: + try: return gdb.lookup_type(self.name, self.block) except gdb.error as e: return None From 234d3c062573dc3a6439fae2f203626f42c14036 Mon Sep 17 00:00:00 2001 From: Tom de Vries Date: Wed, 26 Sep 2018 15:07:35 +0200 Subject: [PATCH 036/367] crash.cache.syscache: Handle read_memory returning a memoryview for python3 If gdb is build with python3, gdb.selected_inferior().read_memory returns a buffer object, but with python3, it returns a memoryview object. This patch makes sure we handle both cases. --- crash/cache/syscache.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index 84a3d18afbb..b703615275e 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -63,7 +63,15 @@ def __getattr__(self, name): @staticmethod def read_buf(address, size): - return str(gdb.selected_inferior().read_memory(address, size)) + return gdb.selected_inferior().read_memory(address, size) + + @staticmethod + def read_buf_str(address, size): + buf = gdb.selected_inferior().read_memory(address, size) + if isinstance(buf, memoryview): + return buf.tobytes().decode('utf-8') + else: + return str(buf) def decompress_config_buffer(self): MAGIC_START = 'IKCFG_ST' @@ -74,12 +82,12 @@ def decompress_config_buffer(self): data_len = self.kernel_config_data.type.sizeof buf_len = len(MAGIC_START) - buf = self.read_buf(data_addr, buf_len) + buf = self.read_buf_str(data_addr, buf_len) if buf != MAGIC_START: raise IOError("Missing MAGIC_START in kernel_config_data.") buf_len = len(MAGIC_END) - buf = self.read_buf(data_addr + data_len - buf_len - 1, buf_len) + buf = self.read_buf_str(data_addr + data_len - buf_len - 1, buf_len) if buf != MAGIC_END: raise IOError("Missing MAGIC_END in kernel_config_data.") @@ -87,6 +95,10 @@ def decompress_config_buffer(self): buf_len = data_len - len(MAGIC_START) - len(MAGIC_END) buf = self.read_buf(data_addr + len(MAGIC_START), buf_len) self.config_buffer = zlib.decompress(buf, 16 + zlib.MAX_WBITS) + if (isinstance(self.config_buffer, bytes)): + self.config_buffer = str(self.config_buffer.decode('utf-8')) + else: + self.config_buffer = str(self.config_buffer) return self.config_buffer def __str__(self): From 24ff3d34e19215fbb75b9046f0a702559e27867b Mon Sep 17 00:00:00 2001 From: Tom de Vries Date: Mon, 24 Sep 2018 15:06:56 +0200 Subject: [PATCH 037/367] crash.sh, crash, contrib: Fix except syntax for python3 In python3, the python2 syntax "except exc, var" has been changed into "except exc as var". For python 2.6, the new "as" syntax is accepted as an alternative to the "," syntax. This patch updates the remaining "," occurances to the "as" syntax, fixing the syntax for python3. While this means that those occurances no longer work with pre-python2.6, there are already 25 occurances of the "as" syntax in crash-python, so it's safe to assume that pre-python2.6 is already not supported. --- contrib/xfs-analyze.py | 4 ++-- crash.sh | 2 +- crash/__init__.py | 14 +++++++------- crash/arch/x86_64.py | 2 +- crash/commands/dmesg.py | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/contrib/xfs-analyze.py b/contrib/xfs-analyze.py index cc87c15d8f1..09995c86f9b 100644 --- a/contrib/xfs-analyze.py +++ b/contrib/xfs-analyze.py @@ -150,7 +150,7 @@ def check_item(item): if fn.name == 'vfs_create': try: inode = f.read_var('dir') - except ValueError, e: + except ValueError as e: print f inode = None checked += 1 @@ -159,7 +159,7 @@ def check_item(item): dead += 1 break - except gdb.error, e: + except gdb.error as e: pass print "Checked {} inodes in __fput or vfs_create".format(checked) diff --git a/crash.sh b/crash.sh index eb00abe25c2..a229b132d3d 100755 --- a/crash.sh +++ b/crash.sh @@ -123,7 +123,7 @@ from __future__ import print_function import sys try: import crash.session -except RuntimeError, e: +except RuntimeError as e: print("crash-python: {}, exiting".format(str(e)), file=sys.stderr) sys.exit(1) path = "$SEARCHDIRS".split(' ') diff --git a/crash/__init__.py b/crash/__init__.py index 1079c80e093..383d1cccd9f 100644 --- a/crash/__init__.py +++ b/crash/__init__.py @@ -10,37 +10,37 @@ try: x = gdb.Target -except AttributeError, e: +except AttributeError as e: raise RuntimeError("the installed gdb doesn't provide gdb.Target") try: x = gdb.lookup_symbol('x', None) -except TypeError, e: +except TypeError as e: raise RuntimeError("the installed gdb doesn't support looking up symbols without a gdb.Block") try: x = gdb.MinSymbol -except AttributeError, e: +except AttributeError as e: raise RuntimeError("the installed gdb doesn't provide gdb.MinSymbol") try: x = gdb.Register -except AttributeError, e: +except AttributeError as e: raise RuntimeError("the installed gdb doesn't provide gdb.Register") try: x = gdb.Symbol.section -except AttributeError, e: +except AttributeError as e: raise RuntimeError("the installed gdb doesn't provide gdb.Symbol.section") try: x = gdb.Inferior.new_thread -except AttributeError, e: +except AttributeError as e: raise RuntimeError("the installed gdb doesn't provide gdb.Inferior.new_thread") try: x = gdb.Objfile.architecture -except AttributeError, e: +except AttributeError as e: raise RuntimeError("the installed gdb doesn't provide gdb.Objfile.architecture") del x diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 62c396a5839..70011b5edae 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -53,7 +53,7 @@ def fetch_register_active(self, thread, register): continue try: thread.registers[reg].value = task.regs[reg] - except KeyError, e: + except KeyError as e: pass def fetch_register_scheduled_inactive(self, thread, register): diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 1680d9eb5ce..dd8701b8bfe 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -223,7 +223,7 @@ def log_from_idx(self, logbuf, idx, dict_needed=False): def get_log_msgs(self, dict_needed=False): try: idx = self.log_first_idx - except DelayedAttributeError, e: + except DelayedAttributeError as e: raise LogTypeException('not structured log') if self.clear_seq < self.log_first_seq: From 64abe0c2d9fdc9697abc698587e3b912aca1489e Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 16 Nov 2017 12:35:05 +0100 Subject: [PATCH 038/367] types/list: support gdb.Symbol list_head in list_for_each() The caller of list_for_each() can then avoid doing sym.value(). Signed-off-by: Vlastimil Babka --- crash/types/list.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crash/types/list.py b/crash/types/list.py index b9f37cc8498..a47196d73a5 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -28,6 +28,8 @@ class TypesListClass(CrashBaseClass): @export def list_for_each(self, list_head): pending_exception = None + if isinstance(list_head, gdb.Symbol): + list_head = list_head.value() if not isinstance(list_head, gdb.Value): raise TypeError("list_head must be gdb.Value representing 'struct list_head' or a 'struct list_head *' not {}" .format(type(list_head).__name__)) From 3f7a14cbee7373c0eb8a66824abc885e3fec1a20 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 20 Nov 2017 08:48:48 +0100 Subject: [PATCH 039/367] types/list: add optional include_head parameter Sometimes lists don't have a standalone list_head, but we iterate them starting with particular object's embedded head. For example the "init_task" swapper. Support that with an optional parameter. Now, 'pyps' will list [swapper] among the tasks. Signed-off-by: Vlastimil Babka --- crash/kernel.py | 2 +- crash/types/list.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 9214d17430f..f2a8b954ae4 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -246,7 +246,7 @@ def setup_tasks(self): task_count = 0 tasks = [] - for taskg in list_for_each_entry(task_list, init_task.type, 'tasks'): + for taskg in list_for_each_entry(task_list, init_task.type, 'tasks', include_head=True): tasks.append(taskg) for task in list_for_each_entry(taskg['thread_group'], init_task.type, 'thread_group'): tasks.append(task) diff --git a/crash/types/list.py b/crash/types/list.py index a47196d73a5..d4e58c8f740 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -26,7 +26,7 @@ class TypesListClass(CrashBaseClass): __types__ = [ 'struct list_head' ] @export - def list_for_each(self, list_head): + def list_for_each(self, list_head, include_head=False): pending_exception = None if isinstance(list_head, gdb.Symbol): list_head = list_head.value() @@ -42,6 +42,9 @@ def list_for_each(self, list_head): if long(list_head.address) == 0: raise CorruptListError("list_head is NULL pointer.") + if include_head: + yield list_head.address + try: nxt = list_head['next'] prev = list_head @@ -87,8 +90,8 @@ def list_for_each(self, list_head): raise pending_exception @export - def list_for_each_entry(self, list_head, gdbtype, member): - for node in list_for_each(list_head): + def list_for_each_entry(self, list_head, gdbtype, member, include_head=False): + for node in list_for_each(list_head, include_head=include_head): if node.type != self.list_head_type.pointer(): raise TypeError("Type {} found. Expected struct list_head *." .format(str(node.type))) From d3b2e59b4e58a6193fdd6ced94da1b68fc55eba0 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 2 May 2018 15:39:40 +0200 Subject: [PATCH 040/367] util: add array_for_each() Since gdb array objects are not iterable, add a helper generator for that. Signed-off-by: Vlastimil Babka --- crash/util.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crash/util.py b/crash/util.py index 0b5e7e42fb5..f63ff66328d 100644 --- a/crash/util.py +++ b/crash/util.py @@ -374,3 +374,10 @@ def get_typed_pointer(val, gdbtype): return val + @export + @staticmethod + def array_for_each(value): + size = array_size(value) + for i in range(array_size(value)): + yield value[i] + From 8646f1cedb5b8eb6978961370ab26adb99fadd4a Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 2 May 2018 15:42:57 +0200 Subject: [PATCH 041/367] types/list: support iterating lists in reverse order Add an optional reverse=False parameter for that. Signed-off-by: Vlastimil Babka --- crash/types/list.py | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/crash/types/list.py b/crash/types/list.py index d4e58c8f740..6327e5fb761 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -26,7 +26,7 @@ class TypesListClass(CrashBaseClass): __types__ = [ 'struct list_head' ] @export - def list_for_each(self, list_head, include_head=False): + def list_for_each(self, list_head, include_head=False, reverse=False): pending_exception = None if isinstance(list_head, gdb.Symbol): list_head = list_head.value() @@ -42,14 +42,20 @@ def list_for_each(self, list_head, include_head=False): if long(list_head.address) == 0: raise CorruptListError("list_head is NULL pointer.") + next_ = 'next' + prev_ = 'prev' + if reverse: + next_ = 'prev' + prev_ = 'next' + if include_head: yield list_head.address try: - nxt = list_head['next'] + nxt = list_head[next_] prev = list_head if long(nxt) == 0: - raise CorruptListError("next pointer is NULL") + raise CorruptListError("{} pointer is NULL".format(next_)) node = nxt.dereference() except gdb.error as e: raise BufferError("Failed to read list_head {:#x}: {}" @@ -59,39 +65,39 @@ def list_for_each(self, list_head, include_head=False): yield node.address try: - if long(prev.address) != long(node['prev']): - error = ("broken prev link {:#x} -next-> {:#x} -prev-> {:#x}" - .format(long(prev.address), long(node.address), - long(node['prev']))) + if long(prev.address) != long(node[prev_]): + error = ("broken {} link {:#x} -{}-> {:#x} -{}-> {:#x}" + .format(prev_, long(prev.address), next_, long(node.address), + prev_, long(node[prev_]))) pending_exception = CorruptListError(error) # broken prev link means there might be a cycle that # does not include the initial head, so start detecting # cycles fast = node - nxt = node['next'] + nxt = node[next_] if fast is not None: # are we detecting cycles? advance fast 2 times and compare # each with our current node (Floyd's Tortoise and Hare # algorithm) for i in range(2): - fast = fast['next'].dereference() + fast = fast[next_].dereference() if node.address == fast.address: raise ListCycleError("Cycle in list detected.") prev = node if long(nxt) == 0: - raise CorruptListError("next pointer is NULL") + raise CorruptListError("{} pointer is NULL".format(next_)) node = nxt.dereference() except gdb.error as e: - raise BufferError("Failed to read list_head {:#x}: {}" - .format(long(node.address), str(e))) + raise BufferError("Failed to read list_head {:#x} in list {:#x}: {}" + .format(long(node.address), long(list_head.address), str(e))) if pending_exception is not None: raise pending_exception @export - def list_for_each_entry(self, list_head, gdbtype, member, include_head=False): - for node in list_for_each(list_head, include_head=include_head): + def list_for_each_entry(self, list_head, gdbtype, member, include_head=False, reverse=False): + for node in list_for_each(list_head, include_head=include_head, reverse=reverse): if node.type != self.list_head_type.pointer(): raise TypeError("Type {} found. Expected struct list_head *." .format(str(node.type))) From dc2c7e50e6bdc24124cc3aed0b7993b4d6609670 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 12 Jul 2018 10:45:57 +0200 Subject: [PATCH 042/367] types/list: fix and improve error handling The cycle detection code can stumble on bogus list element faster than the real iterator and then it will report a wrong element as bogus. Fix that by catching exceptions in the fast iterator separately and just cancelling cycle detection in that case. Also yield elements only after trying to read the next pointer from them, to avoid yielding unreadable elements. Signed-off-by: Vlastimil Babka --- crash/types/list.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/crash/types/list.py b/crash/types/list.py index 6327e5fb761..dedb8fce0ce 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -62,8 +62,6 @@ def list_for_each(self, list_head, include_head=False, reverse=False): .format(long(list_head.address), str(e))) while node.address != list_head.address: - yield node.address - try: if long(prev.address) != long(node[prev_]): error = ("broken {} link {:#x} -{}-> {:#x} -{}-> {:#x}" @@ -75,7 +73,14 @@ def list_for_each(self, list_head, include_head=False, reverse=False): # cycles fast = node nxt = node[next_] + # only yield after trying to read something from the node, no + # point in giving out bogus list elements + yield node.address + except gdb.error as e: + raise BufferError("Failed to read list_head {:#x} in list {:#x}: {}" + .format(long(node.address), long(list_head.address), str(e))) + try: if fast is not None: # are we detecting cycles? advance fast 2 times and compare # each with our current node (Floyd's Tortoise and Hare @@ -84,14 +89,17 @@ def list_for_each(self, list_head, include_head=False, reverse=False): fast = fast[next_].dereference() if node.address == fast.address: raise ListCycleError("Cycle in list detected.") + except gdb.error: + # we hit an unreadable element, so just stop detecting cycles + # and the slow iterator will hit it as well + fast = None + + prev = node + if long(nxt) == 0: + raise CorruptListError("{} -> {} pointer is NULL" + .format(node.address, next_)) + node = nxt.dereference() - prev = node - if long(nxt) == 0: - raise CorruptListError("{} pointer is NULL".format(next_)) - node = nxt.dereference() - except gdb.error as e: - raise BufferError("Failed to read list_head {:#x} in list {:#x}: {}" - .format(long(node.address), long(list_head.address), str(e))) if pending_exception is not None: raise pending_exception From 654ffc596956bfbc9a3951d18089030aa92aa96f Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 16 Jul 2018 14:52:07 +0200 Subject: [PATCH 043/367] cache/syscache: return None for non-existing CONFIG_* keys Make it easier to handle querying for CONFIG keys without the need to handle KeyError at each site. Signed-off-by: Vlastimil Babka --- crash/cache/syscache.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index b703615275e..de173a3007f 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -121,7 +121,10 @@ def _parse_config(self): return self.ikconfig_cache def __getitem__(self, name): - return self.ikconfig_cache[name] + try: + return self.ikconfig_cache[name] + except KeyError: + return None class CrashKernelCache(CrashCache): __symvals__ = [ 'avenrun' ] From e56f7f270352a57336a15526a50df674a2f36bd4 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Sat, 9 Feb 2019 22:37:32 +0100 Subject: [PATCH 044/367] crash.sh: print tracebacks for unhandled exceptions It's good to know why we are exiting unexpectedly. Signed-off-by: Vlastimil Babka --- crash.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crash.sh b/crash.sh index a229b132d3d..121c77b6ab9 100755 --- a/crash.sh +++ b/crash.sh @@ -121,10 +121,12 @@ set print pretty on python from __future__ import print_function import sys +import traceback try: import crash.session except RuntimeError as e: print("crash-python: {}, exiting".format(str(e)), file=sys.stderr) + traceback.print_exc() sys.exit(1) path = "$SEARCHDIRS".split(' ') try: @@ -132,10 +134,12 @@ try: print("The 'pyhelp' command will list the command extensions.") except gdb.error as e: print("crash-python: {}, exiting".format(str(e)), file=sys.stderr) + traceback.print_exc() sys.exit(1) except RuntimeError as e: print("crash-python: Failed to open {}. {}".format("$VMCORE", str(e)), file=sys.stderr) + traceback.print_exc() sys.exit(1) EOF From c7696dbf85c22bbeafe99b7dadd9ff3631271631 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Sat, 9 Feb 2019 22:59:23 +0100 Subject: [PATCH 045/367] types/list: improvements for traversing broken lists Two improvements to list_for_each() useful when checking broken slab chains: print_broken_links - there might be more than one such problem, and storing this as pending exception then reports only the last one. Allow printing all of them. exact_cycles - the general cycle detection algorithm has no memory overhead, but doesn't detect deterministically. Allow more precise detection with the memory cost of storing each visited list node in a set. Signed-off-by: Vlastimil Babka --- crash/types/list.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/crash/types/list.py b/crash/types/list.py index dedb8fce0ce..15cdffb9f84 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -26,7 +26,8 @@ class TypesListClass(CrashBaseClass): __types__ = [ 'struct list_head' ] @export - def list_for_each(self, list_head, include_head=False, reverse=False): + def list_for_each(self, list_head, include_head=False, reverse=False, + print_broken_links=True, exact_cycles=False): pending_exception = None if isinstance(list_head, gdb.Symbol): list_head = list_head.value() @@ -48,6 +49,9 @@ def list_for_each(self, list_head, include_head=False, reverse=False): next_ = 'prev' prev_ = 'next' + if exact_cycles: + visited = set() + if include_head: yield list_head.address @@ -62,16 +66,24 @@ def list_for_each(self, list_head, include_head=False, reverse=False): .format(long(list_head.address), str(e))) while node.address != list_head.address: + if exact_cycles: + if long(node.address) in visited: + raise ListCycleError("Cycle in list detected.") + else: + visited.add(long(node.address)) try: if long(prev.address) != long(node[prev_]): error = ("broken {} link {:#x} -{}-> {:#x} -{}-> {:#x}" .format(prev_, long(prev.address), next_, long(node.address), prev_, long(node[prev_]))) pending_exception = CorruptListError(error) + if print_broken_links: + print(error) # broken prev link means there might be a cycle that # does not include the initial head, so start detecting # cycles - fast = node + if not exact_cycles and fast is not None: + fast = node nxt = node[next_] # only yield after trying to read something from the node, no # point in giving out bogus list elements From a79f893ac031d1e332e96638522565ec427376dd Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 16 Nov 2017 12:41:10 +0100 Subject: [PATCH 046/367] types/percpu: support dynamic allocations in is_percpu_var() checks is_percpu_var() currently support just static in-kernel allocations, which is insufficient for e.g. zone->pageset. Add the necessary parsing of percpu chunk structures in __is_percpu_var_dynamic() to check for valid dynamic percpu pointers. Because the pcpu_chunk->map structure is inefficient, cache the valid intervals on first use. This uses a simple list of intervals for now, to avoid adding a dependency on some interval tree module. Signed-off-by: Vlastimil Babka --- crash/types/percpu.py | 49 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index ee03e59ff68..dc7745a9cc9 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -9,19 +9,23 @@ import sys from crash.infra import CrashBaseClass, export from crash.util import array_size +from crash.types.list import list_for_each_entry from crash.exceptions import DelayedAttributeError if sys.version_info.major >= 3: long = int class TypesPerCPUClass(CrashBaseClass): - __types__ = [ 'char *' ] - __symvals__ = [ '__per_cpu_offset' ] + __types__ = [ 'char *', 'struct pcpu_chunk' ] + __symvals__ = [ '__per_cpu_offset', 'pcpu_base_addr', 'pcpu_slot', + 'pcpu_nr_slots' ] __minsymvals__ = ['__per_cpu_start', '__per_cpu_end' ] __minsymbol_callbacks__ = [ ('__per_cpu_start', 'setup_per_cpu_size'), ('__per_cpu_end', 'setup_per_cpu_size') ] __symbol_callbacks__ = [ ('__per_cpu_offset', 'setup_nr_cpus') ] + dynamic_offset_cache = None + @classmethod def setup_per_cpu_size(cls, symbol): try: @@ -33,17 +37,56 @@ def setup_per_cpu_size(cls, symbol): def setup_nr_cpus(cls, ignored): cls.nr_cpus = array_size(cls.__per_cpu_offset) + @classmethod + def __setup_dynamic_offset_cache(cls): + # TODO: interval tree would be more efficient, but this adds no 3rd + # party module dependency... + cls.dynamic_offset_cache = list() + for slot in range(cls.pcpu_nr_slots): + for chunk in list_for_each_entry(cls.pcpu_slot[slot], cls.pcpu_chunk_type, 'list'): + chunk_base = long(chunk["base_addr"]) - long(cls.pcpu_base_addr) + long(cls.__per_cpu_start) + off = 0 + start = None + for i in range(chunk['map_used']): + val = long(chunk['map'][i]) + if val < 0: + if start is None: + start = off + else: + if start is not None: + cls.dynamic_offset_cache.append((chunk_base + start, chunk_base + off)) + start = None + off += abs(val) + if start is not None: + cls.dynamic_offset_cache.append((chunk_base + start, chunk_base + off)) + def __is_percpu_var(self, var): if long(var) < self.__per_cpu_start: return False v = var.cast(self.char_p_type) - self.__per_cpu_start return long(v) < self.per_cpu_size + def __is_percpu_var_dynamic(self, var): + if self.dynamic_offset_cache is None: + self.__setup_dynamic_offset_cache() + + var = long(var) + # TODO: we could sort the list... + for (start, end) in self.dynamic_offset_cache: + if var >= start and var < end: + return True + + return False + @export def is_percpu_var(self, var): if isinstance(var, gdb.Symbol): var = var.value().address - return self.__is_percpu_var(var) + if self.__is_percpu_var(var): + return True + if self.__is_percpu_var_dynamic(var): + return True + return False def get_percpu_var_nocheck(self, var, cpu=None): if cpu is None: From b34ba80d7c81114dc62ab8e43b4345560e3b6855 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Fri, 14 Sep 2018 12:22:19 +0200 Subject: [PATCH 047/367] types/percpu: fix dynamic percpu checks for kernels after 3.14 The 3.14 commit 723ad1d90b56 ("percpu: store offsets instead of lengths in ->map[]") changed the semantics of map array. Adapt to that change to properly check dynamic percpu allocations again, with a heuristic detection to avoid relying on version number - more info in comment. Signed-off-by: Vlastimil Babka --- crash/types/percpu.py | 63 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 51 insertions(+), 12 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index dc7745a9cc9..0359a054ab8 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -42,23 +42,62 @@ def __setup_dynamic_offset_cache(cls): # TODO: interval tree would be more efficient, but this adds no 3rd # party module dependency... cls.dynamic_offset_cache = list() + used_is_negative = None for slot in range(cls.pcpu_nr_slots): for chunk in list_for_each_entry(cls.pcpu_slot[slot], cls.pcpu_chunk_type, 'list'): chunk_base = long(chunk["base_addr"]) - long(cls.pcpu_base_addr) + long(cls.__per_cpu_start) off = 0 start = None - for i in range(chunk['map_used']): - val = long(chunk['map'][i]) - if val < 0: - if start is None: - start = off - else: - if start is not None: - cls.dynamic_offset_cache.append((chunk_base + start, chunk_base + off)) - start = None - off += abs(val) - if start is not None: - cls.dynamic_offset_cache.append((chunk_base + start, chunk_base + off)) + _map = chunk['map'] + map_used = long(chunk['map_used']) + + # Prior to 3.14 commit 723ad1d90b56 ("percpu: store offsets + # instead of lengths in ->map[]"), negative values in map + # meant the area is used, and the absolute value is area size. + # After the commit, the value is area offset for unused, and + # offset | 1 for used (all offsets have to be even). The value + # at index 'map_used' is a 'sentry' which is the total size | + # 1. There is no easy indication of whether kernel includes + # the commit, unless we want to rely on version numbers and + # risk breakage in case of backport to older version. Instead + # employ a heuristic which scans the first chunk, and if no + # negative value is found, assume the kernel includes the + # commit. + if used_is_negative is None: + used_is_negative = False + for i in range(map_used): + val = long(_map[i]) + if val < 0: + used_is_negative = True + break + + if used_is_negative: + for i in range(map_used): + val = long(_map[i]) + if val < 0: + if start is None: + start = off + else: + if start is not None: + cls.__add_to_offset_cache(chunk_base, start, off) + start = None + off += abs(val) + if start is not None: + cls.__add_to_offset_cache(chunk_base, start, off) + else: + for i in range(map_used): + off = long(_map[i]) + if off & 1 == 1: + off -= 1 + if start is None: + start = off + else: + if start is not None: + cls.__add_to_offset_cache(chunk_base, start, off) + start = None + if start is not None: + off = long(_map[map_used]) - 1 + cls.__add_to_offset_cache(chunk_base, start, off) def __is_percpu_var(self, var): if long(var) < self.__per_cpu_start: From 9043f1dc3e04fa2a6effaac8625981cb33816080 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Fri, 14 Sep 2018 13:30:15 +0200 Subject: [PATCH 048/367] types/percpu: add KASLR support The relocation done by KASLR adjusts values of symbols, including percpu ones which are not real pointers, but offsets. This is compensated by subtracting the __per_cpu_start which is also adjusted. However percpu values that are not symbols (both static and dynamic) remain unchanged, so the subtraction of __per_cpu_start yields wrong result. We have to detect the KASLR offset (currently x86_64-specific hack) and subtract it for non-symbols. Signed-off-by: Vlastimil Babka --- crash/types/percpu.py | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 0359a054ab8..6853fcf82a6 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -26,6 +26,14 @@ class TypesPerCPUClass(CrashBaseClass): dynamic_offset_cache = None + # TODO: put this somewhere else - arch? + @classmethod + def setup_kaslr_offset(cls): + offset = long(gdb.lookup_minimal_symbol("_text").value().address) + offset -= long(gdb.lookup_minimal_symbol("phys_startup_64").value().address) + offset -= 0xffffffff80000000 + cls.kaslr_offset = offset + @classmethod def setup_per_cpu_size(cls, symbol): try: @@ -36,6 +44,13 @@ def setup_per_cpu_size(cls, symbol): @classmethod def setup_nr_cpus(cls, ignored): cls.nr_cpus = array_size(cls.__per_cpu_offset) + # piggyback on this as it seems those minsymbols at the time of + # their callback yield offset of 0 + cls.setup_kaslr_offset() + + @classmethod + def __add_to_offset_cache(cls, base, start, end): + cls.dynamic_offset_cache.append((base + start, base + end)) @classmethod def __setup_dynamic_offset_cache(cls): @@ -45,7 +60,11 @@ def __setup_dynamic_offset_cache(cls): used_is_negative = None for slot in range(cls.pcpu_nr_slots): for chunk in list_for_each_entry(cls.pcpu_slot[slot], cls.pcpu_chunk_type, 'list'): - chunk_base = long(chunk["base_addr"]) - long(cls.pcpu_base_addr) + long(cls.__per_cpu_start) + chunk_base = long(chunk["base_addr"]) - long(cls.pcpu_base_addr) + # __per_cpu_start is adjusted by KASLR, but dynamic offsets are + # not, so we have to subtract the offset + chunk_base += long(cls.__per_cpu_start) - cls.kaslr_offset + off = 0 start = None _map = chunk['map'] @@ -127,16 +146,24 @@ def is_percpu_var(self, var): return True return False - def get_percpu_var_nocheck(self, var, cpu=None): + def get_percpu_var_nocheck(self, var, cpu=None, is_symbol=False): if cpu is None: vals = {} for cpu in range(0, self.nr_cpus): - vals[cpu] = self.get_percpu_var_nocheck(var, cpu) + vals[cpu] = self.get_percpu_var_nocheck(var, cpu, is_symbol) return vals addr = self.__per_cpu_offset[cpu] addr += var.cast(self.char_p_type) addr -= self.__per_cpu_start + + # if we got var from symbol, it means KASLR relocation was applied to + # the offset, it was applied also to __per_cpu_start, which cancels out + # If var wasn't a symbol, we have to undo the adjustion to + # __per_cpu_start, otherwise we get a bogus address + if not is_symbol: + addr += self.kaslr_offset + vartype = var.type return addr.cast(vartype).dereference() @@ -147,8 +174,10 @@ def get_percpu_var(self, var, cpu=None): # - pointers to objects, where we'll need to use the target # - a pointer to a percpu object, where we'll need to use the # address of the target - if isinstance(var, gdb.Symbol): + is_symbol = False + if isinstance(var, gdb.Symbol) or isinstance(var, gdb.MinSymbol): var = var.value() + is_symbol = True if not isinstance(var, gdb.Value): raise TypeError("Argument must be gdb.Symbol or gdb.Value") if var.type.code != gdb.TYPE_CODE_PTR: @@ -156,5 +185,5 @@ def get_percpu_var(self, var, cpu=None): if not self.is_percpu_var(var): var = var.address if not self.is_percpu_var(var): - raise TypeError("Argument does not correspond to a percpu pointer.") - return self.get_percpu_var_nocheck(var, cpu) + raise TypeError("Argument {} does not correspond to a percpu pointer.".format(var)) + return self.get_percpu_var_nocheck(var, cpu, is_symbol) From 68fea4d0fac82f721303a537cb2cfa2043f27e92 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Sat, 9 Feb 2019 23:51:59 +0100 Subject: [PATCH 049/367] types/bitmap: add bitmap handling Export function for_each_set_bit() for working with kernel bitmaps. Uses workaround for github issue #31. Signed-off-by: Vlastimil Babka --- crash/types/bitmap.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 crash/types/bitmap.py diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py new file mode 100644 index 00000000000..1715b4ce812 --- /dev/null +++ b/crash/types/bitmap.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb + +from crash.infra import CrashBaseClass, export + +class TypesBitmapClass(CrashBaseClass): + __types__ = [ 'unsigned long' ] + __type_callbacks__ = [ ('unsigned long', 'setup_ulong') ] + + bits_per_ulong = None + + @classmethod + def setup_ulong(cls, gdbtype): + cls.bits_per_ulong = gdbtype.sizeof * 8 + + @export + @classmethod + def for_each_set_bit(cls, bitmap): + + # FIXME: callback not workie? + cls.bits_per_ulong = cls.unsigned_long_type.sizeof * 8 + + size = bitmap.type.sizeof * 8 + idx = 0 + bit = 0 + while size > 0: + ulong = bitmap[idx] + + if ulong != 0: + for off in range(min(size, cls.bits_per_ulong)): + if ulong & 1 != 0: + yield bit + bit += 1 + ulong >>= 1 + else: + bit += cls.bits_per_ulong + + size -= cls.bits_per_ulong + idx += 1 + From 120e2f498ffbd7f4292237a8075c7ba838e7998d Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Sat, 9 Feb 2019 23:55:11 +0100 Subject: [PATCH 050/367] types/cpu: add class for various cpu-related functions For now it provides for_each_online_cpu() Signed-off-by: Vlastimil Babka --- crash/types/cpu.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 crash/types/cpu.py diff --git a/crash/types/cpu.py b/crash/types/cpu.py new file mode 100644 index 00000000000..ac84baf009b --- /dev/null +++ b/crash/types/cpu.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +import gdb +import sys +from crash.infra import CrashBaseClass, export +from crash.util import container_of, find_member_variant, get_symbol_value +from crash.types.bitmap import for_each_set_bit + +if sys.version_info.major >= 3: + long = int + +# this wraps no particular type, rather it's a placeholder for +# functions to iterate over online cpu's etc. +class TypesCPUClass(CrashBaseClass): + + __symbol_callbacks__ = [ ('cpu_online_mask', 'setup_cpus_mask') ] + + cpus_online = None + + @classmethod + def setup_cpus_mask(cls, cpu_mask): + bits = cpu_mask.value()["bits"] + cls.cpus_online = list(for_each_set_bit(bits)) + + @export + def for_each_online_cpu(self): + for cpu in self.cpus_online: + yield cpu + From 85acecebfe91c06bd5c3ad80daeb7bcd287795d8 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Sun, 10 Feb 2019 00:07:58 +0100 Subject: [PATCH 051/367] types/node,zone: add basic NUMA node and zone representation Add support for iterating online NUMA nodes, their zones etc. Signed-off-by: Vlastimil Babka --- crash/types/node.py | 89 +++++++++++++++++++++++++++++++++++++++++++++ crash/types/zone.py | 42 +++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 crash/types/node.py create mode 100644 crash/types/zone.py diff --git a/crash/types/node.py b/crash/types/node.py new file mode 100644 index 00000000000..531b44f3b92 --- /dev/null +++ b/crash/types/node.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +from crash.infra import CrashBaseClass, export +from crash.util import container_of, find_member_variant, get_symbol_value +from crash.types.percpu import get_percpu_var +from bitmap import for_each_set_bit +import crash.types.zone + +class TypesNodeUtilsClass(CrashBaseClass): + __symbols__ = [ 'numa_node' ] + __symvals__ = [ 'numa_cpu_lookup_table' ] + + @export + def numa_node_id(self, cpu): + if gdb.current_target().arch.ident == "powerpc:common64": + return long(self.numa_cpu_lookup_table[cpu]) + else: + return long(get_percpu_var(self.numa_node, cpu)) + +class Node(CrashBaseClass): + __types__ = [ 'pg_data_t', 'struct zone' ] + + @staticmethod + def from_nid(nid): + node_data = gdb.lookup_global_symbol("node_data").value() + return Node(node_data[nid].dereference()) + + def for_each_zone(self): + node_zones = self.gdb_obj["node_zones"] + + ptr = long(node_zones[0].address) + + (first, last) = node_zones.type.range() + for zid in range(first, last + 1): + # FIXME: gdb seems to lose the alignment padding with plain + # node_zones[zid], so we have to simulate it using zone_type.sizeof + # which appears to be correct + zone = gdb.Value(ptr).cast(self.zone_type.pointer()).dereference() + yield crash.types.zone.Zone(zone, zid) + ptr += self.zone_type.sizeof + + def __init__(self, obj): + self.gdb_obj = obj + +class Nodes(CrashBaseClass): + + __symbol_callbacks__ = [ ('node_states', 'setup_node_states') ] + + nids_online = None + nids_possible = None + + @classmethod + def setup_node_states(cls, node_states_sym): + + node_states = node_states_sym.value() + + enum_node_states = gdb.lookup_type("enum node_states") + + N_POSSIBLE = enum_node_states["N_POSSIBLE"].enumval + N_ONLINE = enum_node_states["N_ONLINE"].enumval + + bits = node_states[N_POSSIBLE]["bits"] + cls.nids_possible = list(for_each_set_bit(bits)) + + bits = node_states[N_ONLINE]["bits"] + cls.nids_online = list(for_each_set_bit(bits)) + + @export + def for_each_nid(cls): + for nid in cls.nids_possible: + yield nid + + @export + def for_each_online_nid(cls): + for nid in cls.nids_online: + yield nid + + @export + def for_each_node(cls): + for nid in cls.for_each_nid(): + yield Node.from_nid(nid) + + @export + def for_each_online_node(cls): + for nid in cls.for_each_online_nid(): + yield Node.from_nid(nid) + diff --git a/crash/types/zone.py b/crash/types/zone.py new file mode 100644 index 00000000000..420e9164380 --- /dev/null +++ b/crash/types/zone.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +from crash.infra import CrashBaseClass, export +from crash.util import container_of, find_member_variant, array_for_each +import crash.types.node +from cpu import for_each_online_cpu +from crash.types.list import list_for_each_entry + +def getValue(sym): + return gdb.lookup_symbol(sym, None)[0].value() + +class Zone(CrashBaseClass): + __types__ = [ 'struct zone', 'struct page' ] + + def __init__(self, obj, zid): + self.gdb_obj = obj + self.zid = zid + self.nid = long(obj["node"]) + + def is_populated(self): + if self.gdb_obj["present_pages"] != 0: + return True + else: + return False + +class Zones(CrashBaseClass): + + @export + def for_each_zone(cls): + for node in crash.types.node,for_each_node(): + for zone in node.for_each_zone(): + yield zone + + @export + def for_each_populated_zone(cls): + #TODO: some filter thing? + for zone in cls.for_each_zone(): + if zone.is_populated(): + yield zone + From 6913bd06293e8ac5ffcfe412f41046ee2fdc132d Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Sun, 10 Feb 2019 00:11:17 +0100 Subject: [PATCH 052/367] types/page: add struct page handling support Add type wrapping a struct page, querying some common flags, iterating memmap, creating page from virt addr etc. Signed-off-by: Vlastimil Babka --- crash/types/page.py | 237 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 237 insertions(+) create mode 100644 crash/types/page.py diff --git a/crash/types/page.py b/crash/types/page.py new file mode 100644 index 00000000000..d2d9b81a392 --- /dev/null +++ b/crash/types/page.py @@ -0,0 +1,237 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from math import log, ceil +import gdb +import types +from crash.infra import CrashBaseClass, export +from crash.util import container_of, find_member_variant +from crash.cache.syscache import config + +#TODO debuginfo won't tell us, depends on version? +PAGE_MAPPING_ANON = 1 + +class Page(CrashBaseClass): + __types__ = [ 'unsigned long', 'struct page', 'enum pageflags', + 'enum zone_type', 'struct mem_section'] + __type_callbacks__ = [ ('struct page', 'setup_page_type' ), + ('enum pageflags', 'setup_pageflags' ), + ('enum zone_type', 'setup_zone_type' ), + ('struct mem_section', 'setup_mem_section') ] + __symvals__ = [ 'mem_section' ] + # TODO: this should better be generalized to some callback for + # "config is available" without refering to the symbol name here + __symbol_callbacks__ = [ ('kernel_config_data', 'setup_nodes_width' ), + ('vmemmap_base', 'setup_vmemmap_base' ), + ('page_offset_base', 'setup_directmap_base' ) ] + + slab_cache_name = None + slab_page_name = None + compound_head_name = None + vmemmap_base = 0xffffea0000000000 + vmemmap = None + directmap_base = 0xffff880000000000 + pageflags = dict() + + PG_tail = None + PG_slab = None + PG_lru = None + + setup_page_type_done = False + setup_pageflags_done = False + setup_pageflags_finish_done = False + + ZONES_WIDTH = None + NODES_WIDTH = None + # TODO have arch provide this? + BITS_PER_LONG = None + + sparsemem = False + + @classmethod + def setup_page_type(cls, gdbtype): + # TODO: should check config, but that failed to work on ppc64, hardcode + # 64k for now + if gdb.current_target().arch.ident == "powerpc:common64": + cls.PAGE_SHIFT = 16 + # also a config + cls.directmap_base = 0xc000000000000000 + + cls.sparsemem = True + cls.SECTION_SIZE_BITS = 24 + else: + cls.PAGE_SHIFT = 12 + cls.PAGE_SIZE = 4096 + + cls.PAGE_SIZE = 1 << cls.PAGE_SHIFT + + cls.slab_cache_name = find_member_variant(gdbtype, ('slab_cache', 'lru')) + cls.slab_page_name = find_member_variant(gdbtype, ('slab_page', 'lru')) + cls.compound_head_name = find_member_variant(gdbtype, ('compound_head', 'first_page' )) + cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(gdbtype.pointer()) + + cls.setup_page_type_done = True + if cls.setup_pageflags_done and not cls.setup_pageflags_finish_done: + cls.setup_pageflags_finish() + + @classmethod + def setup_mem_section(cls, gdbtype): + # TODO assumes SPARSEMEM_EXTREME + cls.SECTIONS_PER_ROOT = cls.PAGE_SIZE / gdbtype.sizeof + + @classmethod + def pfn_to_page(cls, pfn): + if cls.sparsemem: + section_nr = pfn >> (cls.SECTION_SIZE_BITS - cls.PAGE_SHIFT) + root_idx = section_nr / cls.SECTIONS_PER_ROOT + offset = section_nr & (cls.SECTIONS_PER_ROOT - 1) + section = cls.mem_section[root_idx][offset] + + pagemap = section["section_mem_map"] & ~3L + return (pagemap.cast(cls.page_type.pointer()) + pfn).dereference() + else: + return cls.vmemmap[pfn] + + @classmethod + def setup_pageflags(cls, gdbtype): + for field in gdbtype.fields(): + cls.pageflags[field.name] = field.enumval + + cls.setup_pageflags_done = True + if cls.setup_page_type_done and not cls.setup_pageflags_finish_done: + cls.setup_pageflags_finish() + + cls.PG_slab = 1L << cls.pageflags['PG_slab'] + cls.PG_lru = 1L << cls.pageflags['PG_lru'] + + @classmethod + def setup_vmemmap_base(cls, symbol): + cls.vmemmap_base = long(symbol.value()) + # setup_page_type() was first and used the hardcoded initial value, + # we have to update + if cls.vmemmap is not None: + cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(cls.page_type.pointer()) + + @classmethod + def setup_directmap_base(cls, symbol): + cls.directmap_base = long(symbol.value()) + + @classmethod + def setup_zone_type(cls, gdbtype): + max_nr_zones = gdbtype['__MAX_NR_ZONES'].enumval + cls.ZONES_WIDTH = int(ceil(log(max_nr_zones))) + + @classmethod + def setup_nodes_width(cls, symbol): + # TODO: handle kernels with no space for nodes in page flags + try: + cls.NODES_WIDTH = int(config['NODES_SHIFT']) + except: + # XXX + print("Unable to determine NODES_SHIFT from config, trying 8") + cls.NODES_WIDTH = 8 + # piggyback on this callback because type callback doesn't seem to work + # for unsigned long + cls.BITS_PER_LONG = cls.unsigned_long_type.sizeof * 8 + + @classmethod + def setup_pageflags_finish(cls): + cls.setup_pageflags_finish_done = True + if 'PG_tail' in cls.pageflags.keys(): + cls.PG_tail = 1L << cls.pageflags['PG_tail'] + cls.is_tail = cls.__is_tail_flag + + if cls.compound_head_name == 'first_page': + cls.__compound_head = cls.__compound_head_first_page + if cls.PG_tail is None: + cls.PG_tail = 1L << cls.pageflags['PG_compound'] | 1L << cls.pageflags['PG_reclaim'] + cls.is_tail = cls.__is_tail_flagcombo + + @staticmethod + def from_page_addr(addr): + page_ptr = gdb.Value(addr).cast(Page.page_type.pointer()) + pfn = (addr - Page.vmemmap_base) / Page.page_type.sizeof + return Page(page_ptr.dereference(), pfn) + + def __is_tail_flagcombo(self): + return bool((self.flags & self.PG_tail) == self.PG_tail) + + def __is_tail_flag(self): + return bool(self.flags & self.PG_tail) + + def is_tail(self): + return bool(self.gdb_obj['compound_head'] & 1) + + def is_slab(self): + return bool(self.flags & self.PG_slab) + + def is_lru(self): + return bool(self.flags & self.PG_lru) + + def is_anon(self): + mapping = long(self.gdb_obj["mapping"]) + return (mapping & PAGE_MAPPING_ANON) != 0 + + def get_slab_cache(self): + if Page.slab_cache_name == "lru": + return self.gdb_obj["lru"]["next"] + return self.gdb_obj[Page.slab_cache_name] + + def get_slab_page(self): + if Page.slab_page_name == "lru": + return self.gdb_obj["lru"]["prev"] + return self.gdb_obj[Page.slab_page_name] + + def get_nid(self): + return self.flags >> (self.BITS_PER_LONG - self.NODES_WIDTH) + + def get_zid(self): + shift = self.BITS_PER_LONG - self.NODES_WIDTH - self.ZONES_WIDTH + zid = self.flags >> shift & ((1 << self.ZONES_WIDTH) - 1) + return zid + + def __compound_head_first_page(self): + return long(self.gdb_obj['first_page']) + + def __compound_head(self): + return long(self.gdb_obj['compound_head']) - 1 + + def compound_head(self): + if not self.is_tail(): + return self + + return Page.from_page_addr(self.__compound_head()) + + def __init__(self, obj, pfn): + self.gdb_obj = obj + self.pfn = pfn + self.flags = long(obj["flags"]) + +class Pages(CrashBaseClass): + + @export + def pfn_to_page(cls, pfn): + return Page(Page.pfn_to_page(pfn), pfn) + + @export + def page_from_addr(cls, addr): + pfn = (addr - Page.directmap_base) / Page.PAGE_SIZE + return pfn_to_page(pfn) + + @export + def page_from_gdb_obj(cls, gdb_obj): + pfn = (long(gdb_obj.address) - Page.vmemmap_base) / Page.page_type.sizeof + return Page(gdb_obj, pfn) + + @export + def for_each_page(): + # TODO works only on x86? + max_pfn = long(gdb.lookup_global_symbol("max_pfn").value()) + for pfn in range(max_pfn): + try: + yield Page.pfn_to_page(pfn) + except gdb.error, e: + # TODO: distinguish pfn_valid() and report failures for those? + pass + + From 8073fdee9235eb742851802e05d540cd9e4d101b Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Sun, 10 Feb 2019 00:14:31 +0100 Subject: [PATCH 053/367] types/slab: add slab and kmem_cache support Add representation of kmem_cache and slab, including extensive integrity checking and error reporting Signed-off-by: Vlastimil Babka --- crash/cache/slab.py | 21 ++ crash/types/slab.py | 647 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 668 insertions(+) create mode 100644 crash/cache/slab.py create mode 100644 crash/types/slab.py diff --git a/crash/cache/slab.py b/crash/cache/slab.py new file mode 100644 index 00000000000..406fd2111bb --- /dev/null +++ b/crash/cache/slab.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +from crash.types.list import list_for_each_entry +from crash.cache import CrashCache + +class CrashCacheSlab(CrashCache): + + def __init__(self): + super(CrashCacheSlab, self).__init__() + self.populated = False + self.kmem_caches = dict() + self.kmem_caches_by_addr = dict() + + def refresh(self): + self.populated = False + self.kmem_caches = dict() + self.kmem_caches_by_addr = dict() + +cache = CrashCacheSlab() diff --git a/crash/types/slab.py b/crash/types/slab.py new file mode 100644 index 00000000000..4b3f8a8426d --- /dev/null +++ b/crash/types/slab.py @@ -0,0 +1,647 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +import crash +import sys +import traceback +from crash.util import container_of, find_member_variant, get_symbol_value +from crash.util import safe_get_symbol_value +from percpu import get_percpu_var +from crash.infra import CrashBaseClass, export +from crash.types.list import list_for_each, list_for_each_entry +from crash.types.page import Page, page_from_gdb_obj, page_from_addr +from crash.types.node import for_each_nid +from crash.types.cpu import for_each_online_cpu +from crash.types.node import numa_node_id + +AC_PERCPU = "percpu" +AC_SHARED = "shared" +AC_ALIEN = "alien" + +slab_partial = 0 +slab_full = 1 +slab_free = 2 + +slab_list_name = {0: "partial", 1: "full", 2: "free"} +slab_list_fullname = {0: "slabs_partial", 1: "slabs_full", 2: "slabs_free"} + +BUFCTL_END = ~0 & 0xffffffff + +def col_error(msg): + return "\033[1;31;40m {}\033[0;37;40m ".format(msg) + +def col_bold(msg): + return "\033[1;37;40m {}\033[0;37;40m ".format(msg) + + +class Slab(CrashBaseClass): + __types__ = [ 'struct slab', 'struct page', 'kmem_cache', 'kmem_bufctl_t', + 'freelist_idx_t' ] + __type_callbacks__ = [ ('struct page', 'check_page_type'), + ('struct slab', 'check_slab_type'), + ('kmem_bufctl_t', 'check_bufctl_type'), + ('freelist_idx_t', 'check_bufctl_type') ] + + slab_list_head = None + page_slab = None + real_slab_type = None + bufctl_type = None + + @classmethod + def check_page_type(cls, gdbtype): + if cls.page_slab is None: + cls.page_slab = True + cls.real_slab_type = gdbtype + cls.slab_list_head = 'lru' + + @classmethod + def check_slab_type(cls, gdbtype): + cls.page_slab = False + cls.real_slab_type = gdbtype + cls.slab_list_head = 'list' + + @classmethod + def check_bufctl_type(cls, gdbtype): + cls.bufctl_type = gdbtype + + @classmethod + def from_addr(cls, slab_addr, kmem_cache): + if not isinstance(kmem_cache, KmemCache): + kmem_cache = kmem_cache_from_addr(kmem_cache) + slab_struct = gdb.Value(slab_addr).cast(cls.real_slab_type.pointer()).dereference() + return Slab(slab_struct, kmem_cache) + + @classmethod + def from_page(cls, page): + kmem_cache_addr = long(page.get_slab_cache()) + kmem_cache = kmem_cache_from_addr(kmem_cache_addr) + if cls.page_slab: + return Slab(page.gdb_obj, kmem_cache) + else: + slab_addr = long(page.get_slab_page()) + return Slab.from_addr(slab_addr, kmem_cache) + + @classmethod + def from_list_head(cls, list_head, kmem_cache): + gdb_obj = container_of(list_head, cls.real_slab_type, cls.slab_list_head) + return Slab(gdb_obj, kmem_cache) + + def __add_free_obj_by_idx(self, idx): + objs_per_slab = self.kmem_cache.objs_per_slab + bufsize = self.kmem_cache.buffer_size + + if (idx >= objs_per_slab): + self.__error(": free object index %d overflows %d" % (idx, + objs_per_slab)) + return False + + obj_addr = self.s_mem + idx * bufsize + if obj_addr in self.free: + self.__error(": object %x duplicated on freelist" % obj_addr) + return False + else: + self.free.add(obj_addr) + + return True + + def __populate_free(self): + if self.free: + return + + self.free = set() + bufsize = self.kmem_cache.buffer_size + objs_per_slab = self.kmem_cache.objs_per_slab + + if self.page_slab: + page = self.gdb_obj + freelist = page["freelist"].cast(self.bufctl_type.pointer()) + for i in range(self.inuse, objs_per_slab): + obj_idx = int(freelist[i]) + self.__add_free_obj_by_idx(obj_idx) + # XXX not generally useful and reliable + if False and objs_per_slab > 1: + all_zeroes = True + for i in range(objs_per_slab): + obj_idx = int(freelist[i]) + if obj_idx != 0: + all_zeroes = False + if all_zeroes: + self.__error(": freelist full of zeroes") + + else: + bufctl = self.gdb_obj.address[1].cast(self.bufctl_type).address + f = int(self.gdb_obj["free"]) + while f != BUFCTL_END: + if not self.__add_free_obj_by_idx(f): + self.__error(": bufctl cycle detected") + break + + f = int(bufctl[f]) + + def find_obj(self, addr): + bufsize = self.kmem_cache.buffer_size + objs_per_slab = self.kmem_cache.objs_per_slab + + if long(addr) < self.s_mem: + return None + + idx = (long(addr) - self.s_mem) / bufsize + if idx >= objs_per_slab: + return None + + return self.s_mem + (idx * bufsize) + + def contains_obj(self, addr): + obj_addr = self.find_obj(addr) + + if not obj_addr: + return (False, 0L, None) + + self.__populate_free() + if obj_addr in self.free: + return (False, obj_addr, None) + + ac = self.kmem_cache.get_array_caches() + + if obj_addr in ac: + return (False, obj_addr, ac[obj_addr]) + + return (True, obj_addr, None) + + def __error(self, msg, misplaced = False): + msg = col_error("cache %s slab %x%s" % (self.kmem_cache.name, + long(self.gdb_obj.address), msg)) + self.error = True + if misplaced: + self.misplaced_error = msg + else: + print(msg) + + def __free_error(self, list_name): + self.misplaced_list = list_name + self.__error(": is on list %s, but has %d of %d objects allocated" % + (list_name, self.inuse, self.kmem_cache.objs_per_slab), misplaced = True) + + def get_objects(self): + bufsize = self.kmem_cache.buffer_size + obj = self.s_mem + for i in range(self.kmem_cache.objs_per_slab): + yield obj + obj += bufsize + + def get_allocated_objects(self): + for obj in self.get_objects(): + c = self.contains_obj(obj) + if c[0]: + yield obj + + def check(self, slabtype, nid): + self.__populate_free() + num_free = len(self.free) + max_free = self.kmem_cache.objs_per_slab + + if self.kmem_cache.off_slab and not Slab.page_slab: + struct_slab_slab = slab_from_obj_addr(long(self.gdb_obj.address)) + if not struct_slab_slab: + self.__error(": OFF_SLAB struct slab is not a slab object itself") + else: + struct_slab_cache = struct_slab_slab.kmem_cache.name + if not self.kmem_cache.off_slab_cache: + if struct_slab_cache != "size-64" and struct_slab_cache != "size-128": + self.__error(": OFF_SLAB struct slab is in a wrong cache %s" % + struct_slab_cache) + else: + self.kmem_cache.off_slab_cache = struct_slab_cache + elif struct_slab_cache != self.kmem_cache.off_slab_cache: + self.__error(": OFF_SLAB struct slab is in a wrong cache %s" % + struct_slab_cache) + + struct_slab_obj = struct_slab_slab.contains_obj(self.gdb_obj.address) + if not struct_slab_obj[0]: + self.__error(": OFF_SLAB struct slab is not allocated") + print(struct_slab_obj) + elif struct_slab_obj[1] != long(self.gdb_obj.address): + self.__error(": OFF_SLAB struct slab at wrong offset{}".format( + long(self.gdb_obj.address) - struct_slab_obj[1])) + + if self.inuse + num_free != max_free: + self.__error(": inuse=%d free=%d adds up to %d (should be %d)" % + (self.inuse, num_free, self.inuse + num_free, max_free)) + + if slabtype == slab_free: + if num_free != max_free: + self.__free_error("slab_free") + elif slabtype == slab_partial: + if num_free == 0 or num_free == max_free: + self.__free_error("slab_partial") + elif slabtype == slab_full: + if num_free > 0: + self.__free_error("slab_full") + + if self.page_slab: + slab_nid = self.page.get_nid() + if nid != slab_nid: + self.__error(": slab is on nid %d instead of %d" % + (slab_nid, nid)) + print "free objects %d" % num_free + + ac = self.kmem_cache.get_array_caches() + last_page_addr = 0 + for obj in self.get_objects(): + if obj in self.free and obj in ac: + self.__error(": obj %x is marked as free but in array cache:" % obj) + print(ac[obj]) + try: + page = page_from_addr(obj).compound_head() + except: + self.__error(": failed to get page for object %x" % obj) + continue + + if long(page.gdb_obj.address) == last_page_addr: + continue + + last_page_addr = long(page.gdb_obj.address) + + if page.get_nid() != nid: + self.__error(": obj %x is on nid %d instead of %d" % + (obj, page.get_nid(), nid)) + if not page.is_slab(): + self.__error(": obj %x is not on PageSlab page" % obj) + kmem_cache_addr = long(page.get_slab_cache()) + if kmem_cache_addr != long(self.kmem_cache.gdb_obj.address): + self.__error(": obj %x is on page where pointer to kmem_cache points to %x instead of %x" % + (obj, kmem_cache_addr, long(self.kmem_cache.gdb_obj.address))) + + if self.page_slab: + continue + + slab_addr = long(page.get_slab_page()) + if slab_addr != self.gdb_obj.address: + self.__error(": obj %x is on page where pointer to slab wrongly points to %x" % + (obj, slab_addr)) + return num_free + + def __init__(self, gdb_obj, kmem_cache, error=False): + self.error = error + self.gdb_obj = gdb_obj + self.kmem_cache = kmem_cache + self.free = None + self.misplaced_list = None + self.misplaced_error = None + + if error: + return + + if self.page_slab: + self.inuse = int(gdb_obj["active"]) + self.page = page_from_gdb_obj(gdb_obj) + else: + self.inuse = int(gdb_obj["inuse"]) + self.s_mem = long(gdb_obj["s_mem"]) + +class KmemCache(CrashBaseClass): + __types__ = [ 'struct kmem_cache', 'struct alien_cache' ] + __type_callbacks__ = [ ('struct kmem_cache', 'check_kmem_cache_type'), + ('struct alien_cache', 'setup_alien_cache_type') ] + + buffer_size_name = None + nodelists_name = None + percpu_name = None + percpu_cache = None + head_name = None + alien_cache_type_exists = False + + @classmethod + def check_kmem_cache_type(cls, gdbtype): + cls.buffer_size_name = find_member_variant(gdbtype, ('buffer_size', 'size')) + cls.nodelists_name = find_member_variant(gdbtype, ('nodelists', 'node')) + cls.percpu_name = find_member_variant(gdbtype, ('cpu_cache', 'array')) + cls.percpu_cache = bool(cls.percpu_name == 'cpu_cache') + cls.head_name = find_member_variant(gdbtype, ('next', 'list')) + + @classmethod + def setup_alien_cache_type(cls, gdbtype): + cls.alien_cache_type_exists = True + + def __get_nodelist(self, node): + return self.gdb_obj[KmemCache.nodelists_name][node] + + def __get_nodelists(self): + for nid in for_each_nid(): + node = self.__get_nodelist(nid) + if long(node) == 0L: + continue + yield (nid, node.dereference()) + + @staticmethod + def all_find_obj(addr): + slab = slab_from_obj_addr(addr) + if not slab: + return None + return slab.contains_obj(addr) + + def __init__(self, name, gdb_obj): + self.name = name + self.gdb_obj = gdb_obj + self.array_caches = None + + self.objs_per_slab = int(gdb_obj["num"]) + self.buffer_size = int(gdb_obj[KmemCache.buffer_size_name]) + + if long(gdb_obj["flags"]) & 0x80000000: + self.off_slab = True + self.off_slab_cache = None + else: + self.off_slab = False + + def __fill_array_cache(self, acache, ac_type, nid_src, nid_tgt): + avail = int(acache["avail"]) + limit = int(acache["limit"]) + + # TODO check avail > limit + if avail == 0: + return + + cache_dict = {"ac_type" : ac_type, "nid_src" : nid_src, + "nid_tgt" : nid_tgt} + +# print(cache_dict) + if ac_type == AC_PERCPU: + nid_tgt = numa_node_id(nid_tgt) + + for i in range(avail): + ptr = long(acache["entry"][i]) +# print(hex(ptr)) + if ptr in self.array_caches: + print (col_error("WARNING: array cache duplicity detected!")) + else: + self.array_caches[ptr] = cache_dict + + page = page_from_addr(ptr) + obj_nid = page.get_nid() + + if obj_nid != nid_tgt: + print (col_error("Object {:#x} in cache {} is on wrong nid {} instead of {}".format( + ptr, cache_dict, obj_nid, nid_tgt))) + + def __fill_alien_caches(self, node, nid_src): + alien_cache = node["alien"] + + # TODO check that this only happens for single-node systems? + if long(alien_cache) == 0L: + return + + for nid in for_each_nid(): + array = alien_cache[nid].dereference() + + # TODO: limit should prevent this? + if array.address == 0: + continue + + if self.alien_cache_type_exists: + array = array["ac"] + + # A node cannot have alien cache on the same node, but some + # kernels (xen) seem to have a non-null pointer there anyway + if nid_src == nid: + continue + + self.__fill_array_cache(array, AC_ALIEN, nid_src, nid) + + def __fill_percpu_caches(self): + cpu_cache = self.gdb_obj[KmemCache.percpu_name] + + for cpu in for_each_online_cpu(): + if (KmemCache.percpu_cache): + array = get_percpu_var(cpu_cache, cpu) + else: + array = cpu_cache[cpu].dereference() + + self.__fill_array_cache(array, AC_PERCPU, -1, cpu) + + def __fill_all_array_caches(self): + self.array_caches = dict() + + self.__fill_percpu_caches() + + # TODO check and report collisions + for (nid, node) in self.__get_nodelists(): + shared_cache = node["shared"] + if long(shared_cache) != 0: + self.__fill_array_cache(shared_cache.dereference(), AC_SHARED, nid, nid) + + self.__fill_alien_caches(node, nid) + + def get_array_caches(self): + if self.array_caches is None: + self.__fill_all_array_caches() + + return self.array_caches + + def __get_allocated_objects(self, node, slabtype): + for slab in self.get_slabs_of_type(node, slabtype): + for obj in slab.get_allocated_objects(): + yield obj + + def get_allocated_objects(self): + for (nid, node) in self.__get_nodelists(): + for obj in self.__get_allocated_objects(node, slab_partial): + yield obj + for obj in self.__get_allocated_objects(node, slab_full): + yield obj + + def get_slabs_of_type(self, node, slabtype, reverse=False, exact_cycles=False): + wrong_list_nodes = dict() + for stype in range(3): + if stype != slabtype: + wrong_list_nodes[long(node[slab_list_fullname[stype]].address)] = stype + + slab_list = node[slab_list_fullname[slabtype]] + for list_head in list_for_each(slab_list, reverse=reverse, exact_cycles=exact_cycles): + try: + if long(list_head) in wrong_list_nodes.keys(): + wrong_type = wrong_list_nodes[long(list_head)] + print(col_error("Encountered head of {} slab list while traversing {} slab list, skipping". + format(slab_list_name[wrong_type], slab_list_name[slabtype]))) + continue + + slab = Slab.from_list_head(list_head, self) + except: + traceback.print_exc() + print("failed to initialize slab object from list_head {:#x}: {}".format( + long(list_head), sys.exc_info()[0])) + continue + slab = Slab(gdb_slab, kmem_cache, error = True) + yield slab + + + def __check_slab(self, slab, slabtype, nid, errors): + addr = long(slab.gdb_obj.address) + free = 0 + + if slab.error == False: + free = slab.check(slabtype, nid) + + if slab.misplaced_error is None and errors['num_misplaced'] > 0: + if errors['num_misplaced'] > 0: + print(col_error("{} slab objects were misplaced, printing the last:".format(errors['num_misplaced']))) + print(errors['last_misplaced']) + errors['num_misplaced'] = 0 + errors['last_misplaced'] = None + + if slab.error == False: + errors['num_ok'] += 1 + errors['last_ok'] = addr + if not errors['first_ok']: + errors['first_ok'] = addr + else: + if errors['num_ok'] > 0: + print("{} slab objects were ok between {:#x} and {:#x}". + format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) + errors['num_ok'] = 0 + errors['first_ok'] = None + errors['last_ok'] = None + + if slab.misplaced_error is not None: + if errors['num_misplaced'] == 0: + print(slab.misplaced_error) + errors['num_misplaced'] += 1 + errors['last_misplaced'] = slab.misplaced_error + + return free + + def ___check_slabs(self, node, slabtype, nid, reverse=False): + slabs = 0 + free = 0 + check_ok = True + + errors = {'first_ok': None, 'last_ok': None, 'num_ok': 0, + 'first_misplaced': None, 'last_misplaced': None, 'num_misplaced': 0} + + try: + for slab in self.get_slabs_of_type(node, slabtype, reverse, exact_cycles=True): + try: + free += self.__check_slab(slab, slabtype, nid, errors) + except Exception as e: + print(col_error("Exception when checking slab {:#x}:{}". + format(long(slab.gdb_obj.address), e))) + traceback.print_exc() + slabs += 1 + + except Exception as e: + print(col_error("Unrecoverable error when traversing {} slab list: {}".format( + slab_list_name[slabtype], e))) + check_ok = False + + if errors['num_ok'] > 0: + print("{} slab objects were ok between {:#x} and {:#x}". + format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) + + if errors['num_misplaced'] > 0: + print(col_error("{} slab objects were misplaced, printing the last:".format(errors['num_misplaced']))) + print(errors['last_misplaced']) + + return (check_ok, slabs, free) + + def __check_slabs(self, node, slabtype, nid): + + slab_list = node[slab_list_fullname[slabtype]] + + print("checking {} slab list {:#x}".format(slab_list_name[slabtype], + long(slab_list.address))) + + errors = {'first_ok': None, 'last_ok': None, 'num_ok': 0, + 'first_misplaced': None, 'last_misplaced': None, 'num_misplaced': 0} + + (check_ok, slabs, free) = self.___check_slabs(node, slabtype, nid) + + if not check_ok: + print("Retrying the slab list in reverse order") + (check_ok, slabs_rev, free_rev) = self.___check_slabs(node, + slabtype, nid, reverse=True) + slabs += slabs_rev + free += free_rev + + #print("checked {} slabs in {} slab list".format( +# slabs, slab_list_name[slabtype])) + + return free + + def check_array_caches(self): + acs = self.get_array_caches() + for ac_ptr in acs.keys(): + ac_obj_slab = slab_from_obj_addr(ac_ptr) + if not ac_obj_slab: + print("cached pointer {:#x} in {} not found in slab".format( + ac_ptr, acs[ac_ptr])) + elif ac_obj_slab.kmem_cache.name != self.name: + print("cached pointer {:#x} in {} belongs to wrong kmem cache {}".format( + ac_ptr, acs[ac_ptr], ac_obj_slab.kmem_cache.name)) + else: + ac_obj_obj = ac_obj_slab.contains_obj(ac_ptr) + if ac_obj_obj[0] == False and ac_obj_obj[2] is None: + print("cached pointer {:#x} in {} is not allocated: {}".format( + ac_ptr, acs[ac_ptr], ac_obj_obj)) + elif ac_obj_obj[1] != ac_ptr: + print("cached pointer {:#x} in {} has wrong offset: {}".format( + ac_ptr, acs[ac_ptr], ac_obj_obj)) + + def check_all(self): + for (nid, node) in self.__get_nodelists(): + lock = long(node["list_lock"]["rlock"]["raw_lock"]["slock"]) + if lock != 0: + print(col_error("unexpected lock value in kmem_list3 {:#x}: {:#x}". + format(long(node.address), lock))) + free_declared = long(node["free_objects"]) + free_counted = self.__check_slabs(node, slab_partial, nid) + free_counted += self.__check_slabs(node, slab_full, nid) + free_counted += self.__check_slabs(node, slab_free, nid) + if free_declared != free_counted: + print (col_error("free objects mismatch on node %d: declared=%d counted=%d" % + (nid, free_declared, free_counted))) + self.check_array_caches() + +class KmemCaches(CrashBaseClass): + + __symbol_callbacks__ = [ ('slab_caches', 'setup_slab_caches'), + (' cache_chain', 'setup_slab_caches') ] + + kmem_caches = None + kmem_caches_by_addr = None + + @classmethod + def setup_slab_caches(cls, slab_caches): + cls.kmem_caches = dict() + cls.kmem_caches_by_addr = dict() + + list_caches = slab_caches.value() + + for cache in list_for_each_entry(list_caches, KmemCache.kmem_cache_type, + KmemCache.head_name): + name = cache["name"].string() + kmem_cache = KmemCache(name, cache) + + cls.kmem_caches[name] = kmem_cache + cls.kmem_caches_by_addr[long(cache.address)] = kmem_cache + + @export + def kmem_cache_from_addr(cls, addr): + return cls.kmem_caches_by_addr[addr] + + @export + def kmem_cache_from_name(cls, name): + return cls.kmem_caches[name] + + @export + def kmem_cache_get_all(cls): + return cls.kmem_caches.values() + + @export + def slab_from_obj_addr(cls, addr): + page = page_from_addr(addr).compound_head() + if not page.is_slab(): + return None + + return Slab.from_page(page) + From 3c1c0bfbdcac99cd38399ee175371c824cc834b5 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Sun, 10 Feb 2019 00:19:37 +0100 Subject: [PATCH 054/367] types/zone: add checking free pages integrity Allow checking that pages managed as free in a zone really have correct node and zone id's in their struct pages. Signed-off-by: Vlastimil Babka --- crash/types/zone.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/crash/types/zone.py b/crash/types/zone.py index 420e9164380..431bf31743b 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -5,6 +5,7 @@ from crash.infra import CrashBaseClass, export from crash.util import container_of, find_member_variant, array_for_each import crash.types.node +from crash.types.percpu import get_percpu_var from cpu import for_each_online_cpu from crash.types.list import list_for_each_entry @@ -25,6 +26,30 @@ def is_populated(self): else: return False + def _check_free_area(self, area, is_pcp): + nr_free = 0 + list_array_name = "lists" if is_pcp else "free_list" + for free_list in array_for_each(area[list_array_name]): + for page_obj in list_for_each_entry(free_list, self.page_type, "lru"): + page = crash.types.page.Page.from_obj(page_obj) + nr_free += 1 + if page.get_nid() != self.nid or page.get_zid() != self.zid: + print("page {:#x} misplaced on {} of zone {}:{}, has flags for zone {}:{}". + format(long(page_obj.address), "pcplist" if is_pcp else "freelist", + self.nid, self.zid, page.get_nid(), page.get_zid())) + nr_expected = area["count"] if is_pcp else area["nr_free"] + if nr_free != nr_expected: + print("nr_free mismatch in {} {}: expected {}, counted {}". + format("pcplist" if is_pcp else "area", area.address, + nr_expected, nr_free)) + + def check_free_pages(self): + for area in array_for_each(self.gdb_obj["free_area"]): + self._check_free_area(area, False) + for cpu in for_each_online_cpu(): + pageset = get_percpu_var(self.gdb_obj["pageset"], cpu) + self._check_free_area(pageset["pcp"], True) + class Zones(CrashBaseClass): @export From 8cae84880e578b301fec3392f35a11b6dd873008 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Sun, 10 Feb 2019 00:22:22 +0100 Subject: [PATCH 055/367] types/vmstat: add gathering of vmstat info Allow gathering and reporting vmstat info, including percpu diffs. Signed-off-by: Vlastimil Babka --- crash/types/vmstat.py | 73 +++++++++++++++++++++++++++++++++++++++++++ crash/types/zone.py | 22 +++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 crash/types/vmstat.py diff --git a/crash/types/vmstat.py b/crash/types/vmstat.py new file mode 100644 index 00000000000..438b379f445 --- /dev/null +++ b/crash/types/vmstat.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +from crash.infra import CrashBaseClass, export +from crash.util import container_of, find_member_variant +import crash.types.node +from crash.types.percpu import get_percpu_var +from cpu import for_each_online_cpu + +class VmStat(CrashBaseClass): + __types__ = ['enum zone_stat_item', 'enum vm_event_item'] + __type_callbacks__ = [ ('enum zone_stat_item', 'check_enum_type'), + ('enum vm_event_item', 'check_enum_type') ] + + nr_stat_items = None + nr_event_items = None + + vm_stat_names = None + vm_event_names = None + + @classmethod + def check_enum_type(cls, gdbtype): + if gdbtype == cls.enum_zone_stat_item_type: + (items, names) = cls.__populate_names(gdbtype, 'NR_VM_ZONE_STAT_ITEMS') + cls.nr_stat_items = items + cls.vm_stat_names = names + elif gdbtype == cls.enum_vm_event_item_type: + (items, names) = cls.__populate_names(gdbtype, 'NR_VM_EVENT_ITEMS') + cls.nr_event_items = items + cls.vm_event_names = names + else: + raise TypeError("Unexpected type {}".format(gdbtype.name)) + + @classmethod + def __populate_names(cls, enum_type, items_name): + nr_items = enum_type[items_name].enumval + + names = ["__UNKNOWN__"] * nr_items + + for field in enum_type.fields(): + if field.enumval < nr_items: + names[field.enumval] = field.name + + return (nr_items, names) + + @staticmethod + def get_stat_names(): + if VmStat.vm_stat_names is None: + VmStat.vm_stat_names = VmStat.__populate_names( + VmStat.nr_stat_items, "enum zone_stat_item") + return VmStat.vm_stat_names + + @staticmethod + def get_event_names(): + if VmStat.vm_event_names is None: + VmStat.vm_event_names = VmStat.__populate_names( + VmStat.nr_event_items, "enum vm_event_item") + return VmStat.vm_event_names + + @staticmethod + def get_events(): + states_sym = gdb.lookup_global_symbol("vm_event_states") + nr = VmStat.nr_event_items + events = [0L] * nr + + for cpu in for_each_online_cpu(): + states = get_percpu_var(states_sym, cpu) + for item in range(0, nr): + events[item] += long(states["event"][item]) + + return events + diff --git a/crash/types/zone.py b/crash/types/zone.py index 431bf31743b..294879ab7b4 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -6,6 +6,7 @@ from crash.util import container_of, find_member_variant, array_for_each import crash.types.node from crash.types.percpu import get_percpu_var +from crash.types.vmstat import VmStat from cpu import for_each_online_cpu from crash.types.list import list_for_each_entry @@ -26,6 +27,27 @@ def is_populated(self): else: return False + def get_vmstat(self): + stats = [0L] * VmStat.nr_stat_items + vm_stat = self.gdb_obj["vm_stat"] + + for item in range (0, VmStat.nr_stat_items): + # TODO abstract atomic? + stats[item] = long(vm_stat[item]["counter"]) + return stats + + def add_vmstat_diffs(self, diffs): + for cpu in for_each_online_cpu(): + pageset = get_percpu_var(self.gdb_obj["pageset"], cpu) + vmdiff = pageset["vm_stat_diff"] + for item in range (0, VmStat.nr_stat_items): + diffs[item] += int(vmdiff[item]) + + def get_vmstat_diffs(self): + diffs = [0L] * VmStat.nr_stat_items + self.add_vmstat_diffs(diffs) + return diffs + def _check_free_area(self, area, is_pcp): nr_free = 0 list_array_name = "lists" if is_pcp else "free_list" From 1e985a9e38e432c13aa6e057cfd00971508f9cc9 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Sun, 10 Feb 2019 00:23:20 +0100 Subject: [PATCH 056/367] commands/kmem: wire up slab, zone and vmstat reporting Add (py)kmem command for slab object identification and cache integrity checking, zone info and vmstat reporting. Signed-off-by: Vlastimil Babka --- crash/commands/kmem.py | 165 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 crash/commands/kmem.py diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py new file mode 100644 index 00000000000..e1b67e98892 --- /dev/null +++ b/crash/commands/kmem.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +import crash +from crash.commands import CrashCommand, CrashCommandParser +from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name, slab_from_obj_addr +from crash.types.zone import for_each_zone, for_each_populated_zone +from crash.types.vmstat import VmStat +import argparse +import re + +def getValue(sym): + return gdb.lookup_symbol(sym, None)[0].value() + +class KmemCommand(CrashCommand): + """ kernel memory inspection + +NAME + kmem - kernel memory inspection + +SYNOPSIS + kmem addr - try to find addr within kmem caches + kmem -s [slabname] - check consistency of single or all kmem cache + kmem -z - report zones + kmem -V - report vmstats + +DESCRIPTION + This command currently offers very basic kmem cache query and checking. + """ + + def __init__(self, name): + parser = CrashCommandParser(prog=name) + + group = parser.add_mutually_exclusive_group() + group.add_argument('-s', action='store_true', default=False) + group.add_argument('-z', action='store_true', default=False) + group.add_argument('-V', action='store_true', default=False) + + parser.add_argument('arg', nargs=argparse.REMAINDER) + + parser.format_usage = lambda : "kmem [-s] [addr | slabname]\n" + super(KmemCommand, self).__init__(name, parser) + + def execute(self, args): + if args.z: + self.print_zones() + return + elif args.V: + self.print_vmstats() + return + elif args.s: + if args.arg: + cache_name = args.arg[0] + print "Checking kmem cache " + cache_name + cache = kmem_cache_from_name(cache_name) + cache.check_all() + else: + print "Checking all kmem caches..." + for cache in kmem_cache_get_all(): + print cache.name + cache.check_all() + + print "Checking done." + return + + if not args.arg: + print "Nothing to do." + return + + addr = long(args.arg[0], 0) + slab = slab_from_obj_addr(addr) + + if not slab: + print "Address not found in any kmem cache." + return + + obj = slab.contains_obj(addr) + name = slab.kmem_cache.name + + if obj[0]: + print ("ALLOCATED object %x from slab %s" % (obj[1], name)) + else: + if obj[1] == 0L: + print ("Address on slab %s but not within valid object slot" + % name) + elif not obj[2]: + print ("FREE object %x from slab %s" % (obj[1], name)) + else: + ac = obj[2] + if ac["ac_type"] == "percpu": + ac_desc = "cpu %d cache" % ac["nid_tgt"] + elif ac["ac_type"] == "shared": + ac_desc = "shared cache on node %d" % ac["nid_tgt"] + elif ac["ac_type"] == "alien": + ac_desc = "alien cache of node %d for node %d" % (ac["nid_src"], ac["nid_tgt"]) + else: + print "unexpected array cache type" + print ac + return + + print ("FREE object %x from slab %s (in %s)" % + (obj[1], name, ac_desc)) + + def __print_vmstat(self, vmstat, diffs): + vmstat_names = VmStat.get_stat_names(); + just = max(map(len, vmstat_names)) + nr_items = VmStat.nr_stat_items + + vmstat = [sum(x) for x in zip(vmstat, diffs)] + + for i in range(0, nr_items): + print("%s: %d (%d)" % (vmstat_names[i].rjust(just), + vmstat[i], diffs[i])) + + def print_vmstats(self): + print " VM_STAT:" + #TODO put this... where? + snr_items = VmStat.nr_stat_items + + stats = [0L] * nr_items + vm_stat = getValue("vm_stat") + + for item in range (0, nr_items): + # TODO abstract atomic? + stats[item] = long(vm_stat[item]["counter"]) + + diffs = [0L] * nr_items + + for zone in for_each_populated_zone(): + zone.add_vmstat_diffs(diffs) + + self.__print_vmstat(stats, diffs) + + print + print " VM_EVENT_STATES:" + + vm_events = VmStat.get_events() + names = VmStat.get_event_names() + just = max(map(len, names)) + + for name, val in zip(names, vm_events): + print("%s: %d" % (name.rjust(just), val)) + + def print_zones(self): + for zone in for_each_zone(): + zone_struct = zone.gdb_obj + + print("NODE: %d ZONE: %d ADDR: %x NAME: \"%s\"" % + (zone_struct["node"], zone.zid, zone_struct.address, + zone_struct["name"].string())) + + if not zone.is_populated(): + print " [unpopulated]" + print + continue + + print " VM_STAT:" + vmstat = zone.get_vmstat() + diffs = zone.get_vmstat_diffs() + self.__print_vmstat(vmstat, diffs) + + print + +KmemCommand("kmem") From f6172c8eed9963c7ffbbf8c500bc321d39a31f72 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 22:09:26 -0400 Subject: [PATCH 057/367] Update README.md to describe WIP development --- README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 122cfdd9139..7ba53095c5f 100644 --- a/README.md +++ b/README.md @@ -5,10 +5,15 @@ You'll need: * [libkdumpfile](https://github.com/ptesarik/libkdumpfile) * [gdb-python](https://github.com/jeffmahoney/gdb-python/tree/gdb-8.1-suse-target) +For the latest development efforts: +* [gdb-python 'master-suse-target' branch](https://github.com/jeffmahoney/gdb-python/tree/master-suse-target) +configured with `--with-python=/usr/bin/python3` +* [crash-python 'next' branch](https://github.com/jeffmahoney/crash-python/tree/next) + Packages for SUSE-created releases are available on the [Open Build Service](https://download.opensuse.org/repositories/home:/jeff_mahoney:/crash-python/). Crash-python requires the following to run properly: -- The complete debuginfo for the kernel to be debug, including modules +- The complete debuginfo for the kernel to be debugged, including modules - The ELF images for the kernel and all modules - The vmcore dump image from the crashed system From c817a3528c485055c0e29628fc403ddecd8005d4 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 058/367] crash.commands.kmem: fix typo (snr_items -> nr_items) Commit a266495a1cc (commands/kmem: wire up slab, zone and vmstat reporting) contained a typo where 'nr_items' was referenced as 'snr_items'. Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index e1b67e98892..2cec043537f 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -116,7 +116,7 @@ def __print_vmstat(self, vmstat, diffs): def print_vmstats(self): print " VM_STAT:" #TODO put this... where? - snr_items = VmStat.nr_stat_items + nr_items = VmStat.nr_stat_items stats = [0L] * nr_items vm_stat = getValue("vm_stat") From dfc1b7af1adb4d38869d8525fc5b42546ee8875f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 059/367] crash.types.zone: fix typo Commit 5a4109b3a50 (types/zone: add checking free pages integrity) contained a typo where a comma found its way into an object chain instead of a period. Signed-off-by: Jeff Mahoney --- crash/types/zone.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/types/zone.py b/crash/types/zone.py index 294879ab7b4..4095b4cee92 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -76,7 +76,7 @@ class Zones(CrashBaseClass): @export def for_each_zone(cls): - for node in crash.types.node,for_each_node(): + for node in crash.types.node.for_each_node(): for zone in node.for_each_zone(): yield zone From 95a6f7403a2fed7b7cc7e72f8f20613ebccfb053 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 060/367] crash.types.slab: handle missing cache names more gracefully If a slab cache doesn't exist, we'll throw a KeyError exception. Let's return None instead. Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 3 +++ crash/types/slab.py | 15 +++++++++++---- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 2cec043537f..306d5ac1469 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -54,6 +54,9 @@ def execute(self, args): cache_name = args.arg[0] print "Checking kmem cache " + cache_name cache = kmem_cache_from_name(cache_name) + if cache is None: + print "Cache {} not found.".format(cache_name) + return cache.check_all() else: print "Checking all kmem caches..." diff --git a/crash/types/slab.py b/crash/types/slab.py index 4b3f8a8426d..819da2878e0 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -617,8 +617,9 @@ def setup_slab_caches(cls, slab_caches): list_caches = slab_caches.value() - for cache in list_for_each_entry(list_caches, KmemCache.kmem_cache_type, - KmemCache.head_name): + for cache in list_for_each_entry(list_caches, + KmemCache.kmem_cache_type, + KmemCache.head_name): name = cache["name"].string() kmem_cache = KmemCache(name, cache) @@ -627,11 +628,17 @@ def setup_slab_caches(cls, slab_caches): @export def kmem_cache_from_addr(cls, addr): - return cls.kmem_caches_by_addr[addr] + try: + return cls.kmem_caches_by_addr[addr] + except KeyError: + return None @export def kmem_cache_from_name(cls, name): - return cls.kmem_caches[name] + try: + return cls.kmem_caches[name] + except KeyError: + return None @export def kmem_cache_get_all(cls): From 7c8f57e358e4919ccbea8ec8261b622eb3eef1e5 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 061/367] crash.types.slab: handle different locking implementations more gracefully The slab debugging code tries to diagnose locking issues but the support is only for an older x86 kernel. Skip it with a message if that's not the version we're using. Signed-off-by: Jeff Mahoney --- crash/types/slab.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/crash/types/slab.py b/crash/types/slab.py index 819da2878e0..e55d5df3a57 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -589,10 +589,14 @@ def check_array_caches(self): def check_all(self): for (nid, node) in self.__get_nodelists(): - lock = long(node["list_lock"]["rlock"]["raw_lock"]["slock"]) - if lock != 0: - print(col_error("unexpected lock value in kmem_list3 {:#x}: {:#x}". - format(long(node.address), lock))) + try: + # This is version and architecture specific + lock = long(node["list_lock"]["rlock"]["raw_lock"]["slock"]) + if lock != 0: + print(col_error("unexpected lock value in kmem_list3 {:#x}: {:#x}". + format(long(node.address), lock))) + except gdb.error: + print("Can't check lock state -- locking implementation unknown.") free_declared = long(node["free_objects"]) free_counted = self.__check_slabs(node, slab_partial, nid) free_counted += self.__check_slabs(node, slab_full, nid) From 8556f5505a1bd340c2137d687969562eb61c8bed Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 062/367] crash.commands.kmem: handle different vmstat implementation more gracefully Newer kernels have a different way to do vmstat that the existing code doesn't handle. We should fail gracefully instead of throwing an exception when we can't find our structures. Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 306d5ac1469..33d80d2fe99 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -117,12 +117,16 @@ def __print_vmstat(self, vmstat, diffs): vmstat[i], diffs[i])) def print_vmstats(self): + try: + vm_stat = getValue("vm_stat") + except AttributeError: + raise gdb.GdbError("Support for new-style vmstat is unimplemented.") + print " VM_STAT:" #TODO put this... where? nr_items = VmStat.nr_stat_items stats = [0L] * nr_items - vm_stat = getValue("vm_stat") for item in range (0, nr_items): # TODO abstract atomic? From b029ba17af0a9fdb821e8a6d770abb6e381a8d2f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 19 Sep 2018 10:50:18 +0200 Subject: [PATCH 063/367] crash.subsystem.filesystem: fix decoder initialization The decoder registration for crash.subsystem.filesystem was incomplete. revisit this --- crash/subsystem/filesystem/__init__.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index bdcf694946f..3263ab5c887 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -16,9 +16,10 @@ class FileSystem(CrashBaseClass): __types__ = [ 'struct dio *', 'struct buffer_head *' ] __symbol_callbacks__ = [ - ('dio_bio_end', '_register_dio_bio_end'), + ('dio_bio_end_io', '_register_dio_bio_end'), ('dio_bio_end_aio', '_register_dio_bio_end'), - ('mpage_end_io', '_register_mpage_end_io') ] + ('mpage_end_io', '_register_mpage_end_io'), + ('end_bio_bh_io_sync', '_register_end_bio_bh_io_sync') ] buffer_head_decoders = {} @@ -34,6 +35,10 @@ def _register_dio_bio_end(cls, sym): def _register_mpage_end_io(cls, sym): block.register_bio_decoder(sym, cls.decode_mpage) + @classmethod + def _register_end_bio_bh_io_sync(cls, sym): + block.register_bio_decoder(sym, cls.decode_bio_buffer_head) + @export @staticmethod def super_fstype(sb): From 0cf5987c45e0375034d7f487371cb1c077fe7b07 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 064/367] crash: require python 3.6 Convert the project to use Python 3.6 syntax: - print() as function - except as - absolute imports - no more __future__ imports - int() instead of long() - no keying off of sys.version.major This also enables the use of type checking and f-strings but those conversions will happen separately. Signed-off-by: Jeff Mahoney --- Makefile | 2 +- contrib/lsbtrfs-cmd.py | 4 +- contrib/mods.py | 2 +- contrib/stuck-requests.py | 10 ++-- contrib/xfs-analyze.py | 16 ++--- contrib/xfs-dump-ail.py | 10 ++-- crash.sh | 3 +- crash/__init__.py | 4 -- crash/addrxlat.py | 15 +---- crash/arch/__init__.py | 6 +- crash/arch/x86_64.py | 10 +--- crash/cache/__init__.py | 4 -- crash/cache/slab.py | 2 +- crash/cache/syscache.py | 18 ++---- crash/cache/tasks.py | 4 -- crash/cache/vm.py | 4 -- crash/commands/__init__.py | 4 -- crash/commands/dmesg.py | 14 +---- crash/commands/help.py | 4 -- crash/commands/kmem.py | 52 ++++++++-------- crash/commands/mount.py | 10 +--- crash/commands/ps.py | 12 +--- crash/commands/syscmd.py | 4 -- crash/commands/task.py | 4 -- crash/commands/vtop.py | 4 +- crash/exceptions.py | 4 -- crash/infra/__init__.py | 8 +-- crash/infra/callback.py | 4 -- crash/infra/lookup.py | 12 +--- crash/kdump/target.py | 9 +-- crash/kernel.py | 23 +++---- crash/session.py | 4 -- crash/subsystem/filesystem/__init__.py | 14 ++--- crash/subsystem/filesystem/btrfs.py | 4 -- crash/subsystem/filesystem/ext3.py | 20 ++----- crash/subsystem/filesystem/mount.py | 10 +--- crash/subsystem/storage/__init__.py | 16 ++--- crash/subsystem/storage/blocksq.py | 12 +--- crash/subsystem/storage/device_mapper.py | 18 ++---- crash/types/bitmap.py | 2 +- crash/types/classdev.py | 10 +--- crash/types/cpu.py | 10 +--- crash/types/klist.py | 8 --- crash/types/list.py | 28 ++++----- crash/types/node.py | 10 ++-- crash/types/page.py | 30 +++++----- crash/types/percpu.py | 32 ++++------ crash/types/slab.py | 76 ++++++++++++------------ crash/types/task.py | 37 +++++------- crash/types/vmstat.py | 8 +-- crash/types/zone.py | 14 ++--- crash/util.py | 15 ++--- setup.py | 8 +-- test-all.sh | 4 +- tests/test_infra.py | 4 -- tests/test_infra_lookup.py | 4 -- tests/test_list.py | 4 -- tests/test_objfile_callbacks.py | 4 -- tests/test_percpu.py | 10 +--- tests/test_syscache.py | 7 +-- tests/test_syscmd.py | 9 +-- tests/test_target.py | 4 -- tests/test_util.py | 5 -- tests/unittest-bootstrap.py | 4 -- 64 files changed, 224 insertions(+), 503 deletions(-) diff --git a/Makefile b/Makefile index 8d041e75d8c..457f4204412 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ man-install: man $(INSTALL) -m 644 $(GZ_MAN1) $(DESTDIR)$(man1dir) install: man-install - python setup.py install + python3 setup.py install lint: lint3 pylint --rcfile tests/pylintrc -r n crash diff --git a/contrib/lsbtrfs-cmd.py b/contrib/lsbtrfs-cmd.py index aafc85b97eb..02159d8216f 100644 --- a/contrib/lsbtrfs-cmd.py +++ b/contrib/lsbtrfs-cmd.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: path = "/lib/modules/4.4.20-6.gd2e08c5-default/kernel/fs/btrfs/btrfs.ko" @@ -37,7 +37,7 @@ def execute(self, argv): if sb['s_type']['name'].string() == "btrfs": fs_info = gdb.Value(sb['s_fs_info']).cast(btrfs_fs_info_type.pointer()) - u = long(0) + u = 0 for i in range(0, 16): u <<= 8 u += int(fs_info['fsid'][i]) diff --git a/contrib/mods.py b/contrib/mods.py index 2fff7c6e0ff..22eb0df9d91 100644 --- a/contrib/mods.py +++ b/contrib/mods.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: from crash.types.list import list_for_each_entry diff --git a/contrib/stuck-requests.py b/contrib/stuck-requests.py index 17c5eaaf75e..78b45501ffb 100644 --- a/contrib/stuck-requests.py +++ b/contrib/stuck-requests.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: # bsc#1031358 @@ -21,12 +21,12 @@ count = 0 for r in for_each_request_in_queue(b['queue']): age_in_jiffies = kernel.jiffies - r['start_time'] - age = float(long(kernel.jiffies_to_msec(age_in_jiffies))) / 1000 + age = float(int(kernel.jiffies_to_msec(age_in_jiffies))) / 1000 if count == 0: print name if r['bio']: print "{}: {:x} request: age={}s, bio chain".format( - count, long(r.address), age, long(r['bio'])) + count, int(r.address), age, int(r['bio'])) n=0 for entry in for_each_bio_in_stack(r['bio']): print " {}: {}".format(n, entry['description']) @@ -34,10 +34,10 @@ else: if r['end_io'] == flush_end_io: print "{}: {:x} request: age={}s, pending flush request".format( - count, long(r.address), age) + count, int(r.address), age) else: print "{}: {:x} request: start={}, undecoded".format( - count, long(r.address), age) + count, int(r.address), age) count += 1 print diff --git a/contrib/xfs-analyze.py b/contrib/xfs-analyze.py index 09995c86f9b..62a9a30e21f 100644 --- a/contrib/xfs-analyze.py +++ b/contrib/xfs-analyze.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: # bsc#1025860 @@ -20,9 +20,9 @@ RWSEM_ACTIVE_MASK = 0xffffffffL RWSEM_UNLOCKED_VALUE = 0 RWSEM_ACTIVE_BIAS = 1 -RWSEM_WAITING_BIAS = 0xffffffff00000000L +RWSEM_WAITING_BIAS = 0xffffffff00000000 RWSEM_ACTIVE_READ_BIAS = 1 -RWSEM_ACTIVE_WRITE_BIAS = 0xffffffff00000001L +RWSEM_ACTIVE_WRITE_BIAS = 0xffffffff00000001 def inode_paths(inode): for dentry in list_for_each_entry(inode['i_dentry'], dentry_type, ''): @@ -35,7 +35,7 @@ def inode_paths(inode): yield '/'.join(names) def rwsem_read_trylock(rwsem): - count = long(rwsem['count']) & 0xffffffffffffffffL + count = int(rwsem['count']) & 0xffffffffffffffffL if count == 0: return True if count & RWSEM_ACTIVE_WRITE_BIAS: @@ -55,10 +55,10 @@ def check_item(item): inode = iitem['ili_inode']['i_vnode'].address # print "".format(inode) print oct(int(inode['i_mode'])) - if long(inode) in locked_inodes: + if int(inode) in locked_inodes: print "in AIL multiple times" else: - locked_inodes[long(inode)] = iitem['ili_inode'] + locked_inodes[int(inode)] = iitem['ili_inode'] # for path in inode_paths(inode): # print path return 2 @@ -154,8 +154,8 @@ def check_item(item): print f inode = None checked += 1 - if long(inode) in locked_inodes: - print "PID {} inode {}".format(thread.ptid, hex(long(inode))) + if int(inode) in locked_inodes: + print "PID {} inode {}".format(thread.ptid, hex(int(inode))) dead += 1 break diff --git a/contrib/xfs-dump-ail.py b/contrib/xfs-dump-ail.py index 049801f3673..17b8643b672 100644 --- a/contrib/xfs-dump-ail.py +++ b/contrib/xfs-dump-ail.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: from crash.types.list import list_for_each_entry from crash.util import container_of @@ -44,7 +44,7 @@ def xfs_for_each_ail_log_item(mp): def xfs_for_each_ail_log_item_typed(mp): for item in for_each_xfs_ail_item(mp): - li_type = long(item['li_type']) + li_type = int(item['li_type']) if li_type == XFS_LI_BUF: yield container_of(item, xfs_buf_log_item_type, 'bli_item') elif li_type == XFS_LI_INODE: @@ -68,9 +68,9 @@ def xfs_for_each_ail_log_item_typed(mp): for item in xfs_for_each_ail_log_item_typed(mp): if item.type == xfs_buf_log_item_type: buf = item['bli_buf'] - print "xfs_buf @ {:x} blockno={}".format(long(buf), buf['b_bn']) + print "xfs_buf @ {:x} blockno={}".format(int(buf), buf['b_bn']) elif item.type == xfs_inode_log_item_type: xfs_inode = item['ili_inode'] - print "inode @ {:x}".format(long(xfs_inode['i_vnode'].address)) + print "inode @ {:x}".format(int(xfs_inode['i_vnode'].address)) else: - print "{} @ {:x}".format(item.type, long(item.address)) + print "{} @ {:x}".format(item.type, int(item.address)) diff --git a/crash.sh b/crash.sh index 121c77b6ab9..cd47cca23eb 100755 --- a/crash.sh +++ b/crash.sh @@ -96,7 +96,7 @@ DIR="$(dirname $0)" if [ -e "$DIR/setup.py" ]; then pushd $DIR > /dev/null rm -rf build/lib/crash - python setup.py build > /dev/null + python3 setup.py build > /dev/null echo "python sys.path.insert(0, '$DIR/build/lib')" >> $GDBINIT popd > /dev/null fi @@ -119,7 +119,6 @@ set height 0 set print pretty on python -from __future__ import print_function import sys import traceback try: diff --git a/crash/__init__.py b/crash/__init__.py index 383d1cccd9f..59a03f1b8f0 100644 --- a/crash/__init__.py +++ b/crash/__init__.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - # Perform some sanity checks to ensure that we can actually work import gdb diff --git a/crash/addrxlat.py b/crash/addrxlat.py index 4f7569c2fb1..ee6b0d4fa6b 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -1,16 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb -import sys - -if sys.version_info.major >= 3: - long = int - import addrxlat from crash.infra import CrashBaseClass, export from crash.cache.syscache import utsname @@ -27,7 +18,7 @@ def cb_sym(self, symtype, *args): if symtype == addrxlat.SYM_VALUE: ms = gdb.lookup_minimal_symbol(args[0]) if ms is not None: - return long(ms.value().address) + return int(ms.value().address) elif symtype == addrxlat.SYM_SIZEOF: sym = gdb.lookup_symbol(args[0], None)[0] if sym is not None: @@ -43,10 +34,10 @@ def cb_sym(self, symtype, *args): return super(TranslationContext, self).cb_sym(symtype, *args) def cb_read32(self, faddr): - return long(gdb.Value(faddr.addr).cast(self.uint32_ptr).dereference()) + return int(gdb.Value(faddr.addr).cast(self.uint32_ptr).dereference()) def cb_read64(self, faddr): - return long(gdb.Value(faddr.addr).cast(self.uint64_ptr).dereference()) + return int(gdb.Value(faddr.addr).cast(self.uint64_ptr).dereference()) class CrashAddressTranslation(CrashBaseClass): def __init__(self): diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index c1cf530c27d..a1eb80c9364 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb class CrashArchitecture(object): @@ -48,7 +44,7 @@ def __init__(self, ii, address): def __iter__(self): return self - def next(self): + def __next__(self): frame = next(self.input_iterator) if frame.inferior_frame().pc() < self.address: diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 70011b5edae..b025c0672c7 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -1,15 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb -import sys - -if sys.version_info.major >= 3: - long = int from crash.arch import CrashArchitecture, register, KernelFrameFilter @@ -123,6 +115,6 @@ def fetch_register_scheduled_thread_return(self, thread, register): @classmethod def get_stack_pointer(cls, thread): - return long(thread.registers['rsp'].value) + return int(thread.registers['rsp'].value) register(x86_64Architecture) diff --git a/crash/cache/__init__.py b/crash/cache/__init__.py index a723c911977..502299d3ebf 100644 --- a/crash/cache/__init__.py +++ b/crash/cache/__init__.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb import os diff --git a/crash/cache/slab.py b/crash/cache/slab.py index 406fd2111bb..d0c18d8e58c 100644 --- a/crash/cache/slab.py +++ b/crash/cache/slab.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index de173a3007f..ff875a1d5e1 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -1,21 +1,13 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - from builtins import round import gdb import re import zlib -import sys from datetime import timedelta -if sys.version_info.major >= 3: - long = int - from crash.exceptions import DelayedAttributeError from crash.cache import CrashCache from crash.util import array_size @@ -141,7 +133,7 @@ def __init__(self, config): def __getattr__(self, name): if name == 'hz': - self.hz = long(self.config['HZ']) + self.hz = int(self.config['HZ']) return self.hz elif name == 'uptime': return self.get_uptime() @@ -153,7 +145,7 @@ def __getattr__(self, name): def calculate_loadavg(metric): # The kernel needs to do fixed point trickery to calculate # a floating point average. We can just return a float. - return round(long(metric) / (1 << 11), 2) + return round(int(metric) / (1 << 11), 2) @staticmethod def format_loadavg(metrics): @@ -187,19 +179,19 @@ def setup_jiffies(cls, symbol): if jiffies_sym: try: - jiffies = long(jiffies_sym.value()) + jiffies = int(jiffies_sym.value()) except gdb.MemoryError: return False cls.adjust_jiffies = True else: - jiffies = long(gdb.lookup_global_symbol('jiffies').value()) + jiffies = int(gdb.lookup_global_symbol('jiffies').value()) cls.adjust_jiffies = False delayed = get_delayed_lookup(cls, 'jiffies').callback(jiffies) def adjusted_jiffies(self): if self.adjust_jiffies: - return self.jiffies -(long(0x100000000) - 300 * self.hz) + return self.jiffies -(int(0x100000000) - 300 * self.hz) else: return self.jiffies diff --git a/crash/cache/tasks.py b/crash/cache/tasks.py index 246012dfc3c..c3909a96a40 100644 --- a/crash/cache/tasks.py +++ b/crash/cache/tasks.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb from crash.cache import CrashCache diff --git a/crash/cache/vm.py b/crash/cache/vm.py index 05e7126ce73..bd9dddde816 100644 --- a/crash/cache/vm.py +++ b/crash/cache/vm.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb from crash.cache import CrashCache diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 0447014ae61..aef86ed0c7a 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - from crash.infra import CrashBaseClass import gdb diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index dd8701b8bfe..c995122a3db 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -1,12 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb -import sys import os.path import argparse import re @@ -14,9 +9,6 @@ from crash.commands import CrashCommand, CrashCommandParser from crash.exceptions import DelayedAttributeError -if sys.version_info.major >= 3: - long = int - class LogTypeException(Exception): pass @@ -181,7 +173,7 @@ def log_from_idx(self, logbuf, idx, dict_needed=False): try: textval = (msg.cast(self.char_p_type) + self.printk_log_p_type.target().sizeof) - text = textval.string(length=long(msg['text_len'])) + text = textval.string(length=int(msg['text_len'])) except UnicodeDecodeError as e: print(e) @@ -197,7 +189,7 @@ def log_from_idx(self, logbuf, idx, dict_needed=False): msgdict = { 'text' : text[0:textlen], - 'timestamp' : long(msg['ts_nsec']), + 'timestamp' : int(msg['ts_nsec']), 'level' : int(msg['level']), 'next' : nextidx, 'dict' : [], @@ -243,7 +235,7 @@ def handle_structured_log(self, args): for msg in self.get_log_msgs(args.d): timestamp = '' if not args.t: - usecs = long(msg['timestamp']) + usecs = int(msg['timestamp']) timestamp = ('[{:5d}.{:06d}] ' .format(usecs // 1000000000, (usecs % 1000000000) // 1000)) diff --git a/crash/commands/help.py b/crash/commands/help.py index bcf4a542fd3..3c8ee171243 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import gdb import argparse from crash.commands import CrashCommand, CrashCommandParser diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 33d80d2fe99..52484419636 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb @@ -52,43 +52,43 @@ def execute(self, args): elif args.s: if args.arg: cache_name = args.arg[0] - print "Checking kmem cache " + cache_name + print("Checking kmem cache {}".format(cache_name)) cache = kmem_cache_from_name(cache_name) if cache is None: - print "Cache {} not found.".format(cache_name) + print("Cache {} not found.".format(cache_name)) return cache.check_all() else: - print "Checking all kmem caches..." + print("Checking all kmem caches...") for cache in kmem_cache_get_all(): - print cache.name + print(cache.name) cache.check_all() - print "Checking done." + print("Checking done.") return if not args.arg: - print "Nothing to do." + print("Nothing to do.") return - addr = long(args.arg[0], 0) + addr = int(args.arg[0], 0) slab = slab_from_obj_addr(addr) if not slab: - print "Address not found in any kmem cache." + print("Address not found in any kmem cache.") return obj = slab.contains_obj(addr) name = slab.kmem_cache.name if obj[0]: - print ("ALLOCATED object %x from slab %s" % (obj[1], name)) + print("ALLOCATED object %x from slab %s" % (obj[1], name)) else: - if obj[1] == 0L: - print ("Address on slab %s but not within valid object slot" + if obj[1] == 0: + print("Address on slab %s but not within valid object slot" % name) elif not obj[2]: - print ("FREE object %x from slab %s" % (obj[1], name)) + print("FREE object %x from slab %s" % (obj[1], name)) else: ac = obj[2] if ac["ac_type"] == "percpu": @@ -98,11 +98,11 @@ def execute(self, args): elif ac["ac_type"] == "alien": ac_desc = "alien cache of node %d for node %d" % (ac["nid_src"], ac["nid_tgt"]) else: - print "unexpected array cache type" - print ac + print("unexpected array cache type") + print(ac) return - print ("FREE object %x from slab %s (in %s)" % + print("FREE object %x from slab %s (in %s)" % (obj[1], name, ac_desc)) def __print_vmstat(self, vmstat, diffs): @@ -122,25 +122,25 @@ def print_vmstats(self): except AttributeError: raise gdb.GdbError("Support for new-style vmstat is unimplemented.") - print " VM_STAT:" + print(" VM_STAT:") #TODO put this... where? nr_items = VmStat.nr_stat_items - stats = [0L] * nr_items + stats = [0] * nr_items for item in range (0, nr_items): # TODO abstract atomic? - stats[item] = long(vm_stat[item]["counter"]) + stats[item] = int(vm_stat[item]["counter"]) - diffs = [0L] * nr_items + diffs = [0] * nr_items for zone in for_each_populated_zone(): zone.add_vmstat_diffs(diffs) self.__print_vmstat(stats, diffs) - print - print " VM_EVENT_STATES:" + print() + print(" VM_EVENT_STATES:") vm_events = VmStat.get_events() names = VmStat.get_event_names() @@ -158,15 +158,15 @@ def print_zones(self): zone_struct["name"].string())) if not zone.is_populated(): - print " [unpopulated]" - print + print(" [unpopulated]") + print() continue - print " VM_STAT:" + print(" VM_STAT:") vmstat = zone.get_vmstat() diffs = zone.get_vmstat_diffs() self.__print_vmstat(vmstat, diffs) - print + print() KmemCommand("kmem") diff --git a/crash/commands/mount.py b/crash/commands/mount.py index 1309483c525..bfe015be7d8 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -1,15 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import gdb -import sys - -if sys.version_info.major >= 3: - long = int from crash.commands import CrashCommand, CrashCommandParser from crash.subsystem.filesystem.mount import MNT_NOSUID, MNT_NODEV, MNT_NOEXEC @@ -67,7 +59,7 @@ def show_one_mount(self, mnt, args, task=None): path = d_path(mnt, mount_root(mnt)) if args.v: print("{:016x} {:016x} {:<10} {:<16} {}" - .format(long(mnt.address), long(mount_super(mnt)), + .format(int(mnt.address), int(mount_super(mnt)), mount_fstype(mnt), mount_device(mnt), path)) else: print("{} on {} type {}{}" diff --git a/crash/commands/ps.py b/crash/commands/ps.py index ee842744265..737a598f60b 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -1,16 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import gdb import argparse -import sys - -if sys.version_info.major >= 3: - long = int from crash.commands import CrashCommand, CrashCommandParser from crash.commands import CrashCommandLineError @@ -465,7 +457,7 @@ def task_header(cls, task): if task.active: cpu = task.cpu return template.format(int(task_struct['pid']), - long(task_struct.address), cpu, + int(task_struct.address), cpu, task_struct['comm'].string()) def print_last_run(self, task): @@ -515,7 +507,7 @@ def print_one(self, argv, thread): width = 7 print(line.format(active, int(task_struct['pid']), int(parent_pid), - int(task.get_last_cpu()), long(pointer), + int(task.get_last_cpu()), int(pointer), width, self.task_state_string(task), 0, task.total_vm * 4096 // 1024, task.rss * 4096 // 1024, diff --git a/crash/commands/syscmd.py b/crash/commands/syscmd.py index c157c025a38..06dfbcba2bb 100644 --- a/crash/commands/syscmd.py +++ b/crash/commands/syscmd.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb from crash.commands import CrashCommand, CrashCommandParser from crash.commands import CrashCommandLineError diff --git a/crash/commands/task.py b/crash/commands/task.py index 85b5ba8df0e..ad7defeacf3 100644 --- a/crash/commands/task.py +++ b/crash/commands/task.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import gdb from crash.commands import CrashCommand, CrashCommandParser import crash.cache.tasks diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index eeae796f0f4..f2fa767e0cc 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -1,8 +1,6 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function - import gdb import argparse from crash.commands import CrashCommand, CrashCommandParser diff --git a/crash/exceptions.py b/crash/exceptions.py index f0e79141818..93bfc7df6da 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - class MissingSymbolError(RuntimeError): """The requested symbol cannot be located.""" pass diff --git a/crash/infra/__init__.py b/crash/infra/__init__.py index 2e1eee8b27e..7a92c877039 100644 --- a/crash/infra/__init__.py +++ b/crash/infra/__init__.py @@ -1,12 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - -from future.utils import with_metaclass - import sys import glob import os.path @@ -107,7 +101,7 @@ def setup_exports_for_class(cls, dct): hasattr(decl.__func__, "__export_to_module__"))): setattr(mod, name, export_wrapper(mod, cls, decl)) -class CrashBaseClass(with_metaclass(_CrashBaseMeta)): +class CrashBaseClass(metaclass=_CrashBaseMeta): pass def autoload_submodules(caller, callback=None): diff --git a/crash/infra/callback.py b/crash/infra/callback.py index c9bdbbbf9a3..fa8430209f3 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import gdb import traceback import sys diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index b9ef48f5484..77e8013784f 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -1,15 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import gdb -import sys - -if sys.version_info.major >= 3: - long = int import crash.infra from crash.infra.callback import ObjfileEventCallback @@ -187,10 +179,10 @@ def __str__(self): class DelayedMinimalSymval(DelayedMinimalSymbol): """ A DelayedMinimalSymbol that returns the address of the - minimal symbol as a long. + minimal symbol as a int. """ def callback(self, value): - self.value = long(value.value().address) + self.value = int(value.value().address) def __str__(self): return "{} attached with {}".format(self.__class__, str(self.cb)) diff --git a/crash/kdump/target.py b/crash/kdump/target.py index 70f1c228e8b..b6ea9997f85 100644 --- a/crash/kdump/target.py +++ b/crash/kdump/target.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb import sys from kdumpfile import kdumpfile, KDUMP_KVADDR @@ -13,9 +9,6 @@ import crash.arch import crash.arch.x86_64 -if sys.version_info.major >= 3: - long = int - class SymbolCallback(object): "addrxlat symbolic callback" @@ -33,7 +26,7 @@ def __call__(self, symtype, *args): if symtype == addrxlat.SYM_VALUE: ms = gdb.lookup_minimal_symbol(args[0]) if ms is not None: - return long(ms.value().address) + return int(ms.value().address) raise addrxlat.NoDataError() class Target(gdb.Target): diff --git a/crash/kernel.py b/crash/kernel.py index f2a8b954ae4..0da18160c78 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb import sys import os.path @@ -19,9 +15,6 @@ from kdumpfile import kdumpfile from elftools.elf.elffile import ELFFile -if sys.version_info.major >= 3: - long = int - LINUX_KERNEL_PID = 1 class CrashKernel(CrashBaseClass): @@ -116,7 +109,7 @@ def attach_vmcore(self, vmcore_filename, debug=False): try: KERNELOFFSET = "linux.vmcoreinfo.lines.KERNELOFFSET" attr = self.vmcore.attr.get(KERNELOFFSET, "0") - self.base_offset = long(attr, base=16) + self.base_offset = int(attr, base=16) except Exception as e: print(e) @@ -133,7 +126,7 @@ def get_module_sections(self, module): name = attr['name'].string() if name == '.text': continue - out.append("-s {} {:#x}".format(name, long(attr['address']))) + out.append("-s {} {:#x}".format(name, int(attr['address']))) return " ".join(out) @@ -154,9 +147,9 @@ def load_modules(self, verbose=False): found = True if 'module_core' in module.type: - addr = long(module['module_core']) + addr = int(module['module_core']) else: - addr = long(module['core_layout']['base']) + addr = int(module['core_layout']['base']) if verbose: print("Loading {} at {:#x}".format(modname, addr)) @@ -237,7 +230,7 @@ def setup_tasks(self): runqueues = gdb.lookup_global_symbol('runqueues') rqs = get_percpu_var(runqueues) - rqscurrs = {long(x["curr"]) : k for (k, x) in rqs.items()} + rqscurrs = {int(x["curr"]) : k for (k, x) in rqs.items()} self.pid_to_task_struct = {} @@ -254,9 +247,9 @@ def setup_tasks(self): for task in tasks: cpu = None regs = None - active = long(task.address) in rqscurrs + active = int(task.address) in rqscurrs if active: - cpu = rqscurrs[long(task.address)] + cpu = rqscurrs[int(task.address)] regs = self.vmcore.attr.cpu[cpu].reg ltask = LinuxTask(task, active, cpu, regs) @@ -264,7 +257,7 @@ def setup_tasks(self): try: thread = gdb.selected_inferior().new_thread(ptid, ltask) except gdb.error as e: - print("Failed to setup task @{:#x}".format(long(task.address))) + print("Failed to setup task @{:#x}".format(int(task.address))) continue thread.name = task['comm'].string() diff --git a/crash/session.py b/crash/session.py index b5433756a00..581a5ce6394 100644 --- a/crash/session.py +++ b/crash/session.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import gdb import sys diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index 3263ab5c887..410cdd02ea2 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb from crash.util import container_of from crash.infra import CrashBaseClass, export @@ -105,7 +101,7 @@ def decode_dio_bio(cls, bio): this bio - inode (gdb.Value): The struct inode, if any, that owns the file associated with this bio - - offset (long): The offset within the file, in bytes + - offset (int): The offset within the file, in bytes - devname (str): The device name associated with this bio """ dio = bio['bi_private'].cast(cls.dio_p_type) @@ -115,7 +111,7 @@ def decode_dio_bio(cls, bio): chain = { 'description' : "{:x} bio: Direct I/O for {} inode {} on {}".format( - long(bio), fstype, dio['inode']['i_ino'], dev), + int(bio), fstype, dio['inode']['i_ino'], dev), 'bio' : bio, 'dio' : dio, 'fstype' : fstype, @@ -151,7 +147,7 @@ def decode_mpage(cls, bio): chain = { 'description' : "{:x} bio: Multipage I/O: inode {}, type {}, dev {}".format( - long(bio), inode['i_ino'], fstype, + int(bio), inode['i_ino'], fstype, block_device_name(bio['bi_bdev'])), 'bio' : bio, 'fstype' : fstype, @@ -182,7 +178,7 @@ def decode_bio_buffer_head(cls, bio): bh = bio['bi_private'].cast(cls.buffer_head_p_type) chain = { 'description' : - "{:x} bio: Bio representation of buffer head".format(long(bio)), + "{:x} bio: Bio representation of buffer head".format(int(bio)), 'bio' : bio, 'next' : bh, 'decoder' : cls.decode_buffer_head, @@ -215,7 +211,7 @@ def decode_buffer_head(cls, bh): except KeyError: pass desc = "{:x} buffer_head: for dev {}, block {}, size {} (undecoded)".format( - long(bh), block_device_name(bh['b_bdev']), + int(bh), block_device_name(bh['b_bdev']), bh['b_blocknr'], bh['b_size']) chain = { 'description' : desc, diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index 3fd60bdf219..1515f00862b 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb from crash.infra import CrashBaseClass diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index b6d99dbdbdb..71f45e77613 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -1,15 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import gdb -import sys - -if sys.version_info.major >= 3: - long = int from crash.infra import CrashBaseClass from crash.util import get_symbol_value @@ -22,7 +14,7 @@ class Ext3(CrashBaseClass): @classmethod def _register_journal_buffer_io_sync(cls, sym): # ext3/ext4 and jbd/jbd2 share names but not implementations - b = gdb.block_for_pc(long(sym.value().address)) + b = gdb.block_for_pc(int(sym.value().address)) sym = get_symbol_value('journal_end_buffer_io_sync', b) register_buffer_head_decoder(sym, cls.decode_journal_buffer_io_sync) @@ -47,19 +39,19 @@ def decode_journal_buffer_io_sync(cls, bh): decoded - fstype (str): The name of the file system type being decoded - devname (str): The name of the device the file system uses - - offset (long): The offset, in bytes, of the block described - - length (long): The length of the block described + - offset (int): The offset, in bytes, of the block described + - length (int): The length of the block described """ fstype = "journal on ext3" devname = block_device_name(bh['b_bdev']) chain = { 'bh' : bh, - 'description' : "{:x} buffer_head: {} journal block (jbd) on {}".format(long(bh), fstype, devname), + 'description' : "{:x} buffer_head: {} journal block (jbd) on {}".format(int(bh), fstype, devname), 'fstype' : fstype, 'devname' : devname, - 'offset' : long(bh['b_blocknr']) * long(bh['b_size']), - 'length' : long(bh['b_size']) + 'offset' : int(bh['b_blocknr']) * int(bh['b_size']), + 'length' : int(bh['b_size']) } return chain diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index be13de2245b..9422dc0bc36 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -1,15 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb -import sys - -if sys.version_info.major >= 3: - long = int from crash.infra import CrashBaseClass, export from crash.subsystem.filesystem import super_fstype @@ -76,7 +68,7 @@ def real_mount(cls, vfsmnt): @export @classmethod def mount_flags(cls, mnt, show_hidden=False): - flags = long(mnt['mnt_flags']) + flags = int(mnt['mnt_flags']) if flags & MNT_READONLY: flagstr = "ro" diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 4fb00f89ba5..d210011edb4 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -1,15 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import gdb -import sys - -if sys.version_info.major >= 3: - long = int from crash.util import container_of from crash.infra import CrashBaseClass, export @@ -85,7 +77,7 @@ def register_bio_decoder(cls, sym, decoder): sym = sym.value().address elif not isinstance(sym, gdb.Value): raise TypeError("register_bio_decoder expects gdb.Symbol or gdb.Value") - cls.bio_decoders[long(sym)] = decoder + cls.bio_decoders[int(sym)] = decoder @export @classmethod @@ -110,7 +102,7 @@ def for_each_bio_in_stack(cls, bio): Additional items may be available based on the implmentation-specific decoder. """ - first = cls.bio_decoders[long(bio['bi_end_io'])](bio) + first = cls.bio_decoders[int(bio['bi_end_io'])](bio) if first: yield first while 'decoder' in first: @@ -140,11 +132,11 @@ def decode_bio(cls, bio): """ try: - return cls.bio_decoders[long(bio['bi_end_io'])](bio) + return cls.bio_decoders[int(bio['bi_end_io'])](bio) except KeyError: chain = { 'description' : "{:x} bio: undecoded bio on {} ({})".format( - long(bio), block_device_name(bio['bi_bdev']), + int(bio), block_device_name(bio['bi_bdev']), bio['bi_end_io']), } return chain diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index 27785117280..b2b10d5a723 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -1,15 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import gdb -import sys - -if sys.version_info.major >= 3: - long = int from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each_entry @@ -37,7 +29,7 @@ def for_each_request_in_queue(self, queue): gdb.Value: Each struct request contained within the request_queue's queuelist """ - if long(queue) == 0: + if int(queue) == 0: raise NoQueueError("Queue is NULL") return list_for_each_entry(queue['queue_head'], self.request_type, 'queuelist') @@ -56,7 +48,7 @@ def request_age_ms(cls, request): to determine age Returns: - long: Difference between the request's start_time and + int: Difference between the request's start_time and current jiffies in milliseconds. """ return kernel.jiffies_to_msec(kernel.jiffies - request['start_time']) diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index 02a21dff295..0030bf4cd7c 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -1,15 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import gdb -import sys - -if sys.version_info.major >= 3: - long = int from crash.infra import CrashBaseClass from crash.subsystem.storage import Storage as block @@ -68,7 +60,7 @@ def decode_clone_bio_rq(cls, bio): # We can pull the related bios together here if required # b = bio['bi_next'] - # while long(b) != 0: + # while int(b) != 0: # b = b['bi_next'] chain = { @@ -77,7 +69,7 @@ def decode_clone_bio_rq(cls, bio): 'next' : info['orig'], 'description' : '{:x} bio: Request-based Device Mapper on {}'.format( - long(bio), block_device_name(bio['bi_bdev'])), + int(bio), block_device_name(bio['bi_bdev'])), 'decoder' : block.decode_bio, } @@ -120,11 +112,11 @@ def decode_clone_bio(cls, bio): chain = { 'description' : "{:x} bio: device mapper clone: {}[{}] -> {}[{}]".format( - long(bio), + int(bio), block_device_name(bio['bi_bdev']), - long(bio['bi_sector']), + int(bio['bi_sector']), block_device_name(next_bio['bi_bdev']), - long(next_bio['bi_sector'])), + int(next_bio['bi_sector'])), 'bio' : bio, 'tio' : tio, 'next' : next_bio, diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index 1715b4ce812..b7f343c3933 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb diff --git a/crash/types/classdev.py b/crash/types/classdev.py index a64adf743d2..3b82a5a7cb0 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -1,18 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb -import sys from crash.infra import CrashBaseClass, export from crash.types.klist import klist_for_each_entry -if sys.version_info.major >= 3: - long = int - class ClassDeviceClass(CrashBaseClass): __types__ = [ 'struct device' ] @@ -20,5 +12,5 @@ class ClassDeviceClass(CrashBaseClass): def for_each_class_device(self, class_struct, subtype=None): klist = class_struct['p']['klist_devices'] for dev in klist_for_each_entry(klist, self.device_type, 'knode_class'): - if subtype is None or long(subtype) == long(dev['type']): + if subtype is None or int(subtype) == int(dev['type']): yield dev diff --git a/crash/types/cpu.py b/crash/types/cpu.py index ac84baf009b..a5c63f26d7b 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -1,19 +1,11 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb -import sys from crash.infra import CrashBaseClass, export from crash.util import container_of, find_member_variant, get_symbol_value from crash.types.bitmap import for_each_set_bit -if sys.version_info.major >= 3: - long = int - # this wraps no particular type, rather it's a placeholder for # functions to iterate over online cpu's etc. class TypesCPUClass(CrashBaseClass): diff --git a/crash/types/klist.py b/crash/types/klist.py index 5b0ad1a52c8..88e9b0a7aad 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -1,19 +1,11 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb from crash.util import container_of from crash.types.list import list_for_each_entry from crash.exceptions import CorruptedError from crash.infra import CrashBaseClass, export -import sys - -if sys.version_info.major >= 3: - long = int class KlistCorruptedError(CorruptedError): pass diff --git a/crash/types/list.py b/crash/types/list.py index 15cdffb9f84..380a367c8d9 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -1,18 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb -import sys from crash.util import container_of from crash.infra import CrashBaseClass, export -if sys.version_info.major >= 3: - long = int - class ListError(Exception): pass @@ -40,7 +32,7 @@ def list_for_each(self, list_head, include_head=False, reverse=False, raise TypeError("Must be struct list_head not {}" .format(str(list_head.type))) fast = None - if long(list_head.address) == 0: + if int(list_head.address) == 0: raise CorruptListError("list_head is NULL pointer.") next_ = 'next' @@ -58,24 +50,24 @@ def list_for_each(self, list_head, include_head=False, reverse=False, try: nxt = list_head[next_] prev = list_head - if long(nxt) == 0: + if int(nxt) == 0: raise CorruptListError("{} pointer is NULL".format(next_)) node = nxt.dereference() except gdb.error as e: raise BufferError("Failed to read list_head {:#x}: {}" - .format(long(list_head.address), str(e))) + .format(int(list_head.address), str(e))) while node.address != list_head.address: if exact_cycles: - if long(node.address) in visited: + if int(node.address) in visited: raise ListCycleError("Cycle in list detected.") else: - visited.add(long(node.address)) + visited.add(int(node.address)) try: - if long(prev.address) != long(node[prev_]): + if int(prev.address) != int(node[prev_]): error = ("broken {} link {:#x} -{}-> {:#x} -{}-> {:#x}" - .format(prev_, long(prev.address), next_, long(node.address), - prev_, long(node[prev_]))) + .format(prev_, int(prev.address), next_, int(node.address), + prev_, int(node[prev_]))) pending_exception = CorruptListError(error) if print_broken_links: print(error) @@ -90,7 +82,7 @@ def list_for_each(self, list_head, include_head=False, reverse=False, yield node.address except gdb.error as e: raise BufferError("Failed to read list_head {:#x} in list {:#x}: {}" - .format(long(node.address), long(list_head.address), str(e))) + .format(int(node.address), int(list_head.address), str(e))) try: if fast is not None: @@ -107,7 +99,7 @@ def list_for_each(self, list_head, include_head=False, reverse=False, fast = None prev = node - if long(nxt) == 0: + if int(nxt) == 0: raise CorruptListError("{} -> {} pointer is NULL" .format(node.address, next_)) node = nxt.dereference() diff --git a/crash/types/node.py b/crash/types/node.py index 531b44f3b92..2e59f07f2cf 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -1,11 +1,11 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb from crash.infra import CrashBaseClass, export from crash.util import container_of, find_member_variant, get_symbol_value from crash.types.percpu import get_percpu_var -from bitmap import for_each_set_bit +from crash.types.bitmap import for_each_set_bit import crash.types.zone class TypesNodeUtilsClass(CrashBaseClass): @@ -15,9 +15,9 @@ class TypesNodeUtilsClass(CrashBaseClass): @export def numa_node_id(self, cpu): if gdb.current_target().arch.ident == "powerpc:common64": - return long(self.numa_cpu_lookup_table[cpu]) + return int(self.numa_cpu_lookup_table[cpu]) else: - return long(get_percpu_var(self.numa_node, cpu)) + return int(get_percpu_var(self.numa_node, cpu)) class Node(CrashBaseClass): __types__ = [ 'pg_data_t', 'struct zone' ] @@ -30,7 +30,7 @@ def from_nid(nid): def for_each_zone(self): node_zones = self.gdb_obj["node_zones"] - ptr = long(node_zones[0].address) + ptr = int(node_zones[0].address) (first, last) = node_zones.type.range() for zid in range(first, last + 1): diff --git a/crash/types/page.py b/crash/types/page.py index d2d9b81a392..7f8d3a0321a 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: from math import log, ceil @@ -87,7 +87,7 @@ def pfn_to_page(cls, pfn): offset = section_nr & (cls.SECTIONS_PER_ROOT - 1) section = cls.mem_section[root_idx][offset] - pagemap = section["section_mem_map"] & ~3L + pagemap = section["section_mem_map"] & ~3 return (pagemap.cast(cls.page_type.pointer()) + pfn).dereference() else: return cls.vmemmap[pfn] @@ -101,12 +101,12 @@ def setup_pageflags(cls, gdbtype): if cls.setup_page_type_done and not cls.setup_pageflags_finish_done: cls.setup_pageflags_finish() - cls.PG_slab = 1L << cls.pageflags['PG_slab'] - cls.PG_lru = 1L << cls.pageflags['PG_lru'] + cls.PG_slab = 1 << cls.pageflags['PG_slab'] + cls.PG_lru = 1 << cls.pageflags['PG_lru'] @classmethod def setup_vmemmap_base(cls, symbol): - cls.vmemmap_base = long(symbol.value()) + cls.vmemmap_base = int(symbol.value()) # setup_page_type() was first and used the hardcoded initial value, # we have to update if cls.vmemmap is not None: @@ -114,7 +114,7 @@ def setup_vmemmap_base(cls, symbol): @classmethod def setup_directmap_base(cls, symbol): - cls.directmap_base = long(symbol.value()) + cls.directmap_base = int(symbol.value()) @classmethod def setup_zone_type(cls, gdbtype): @@ -138,13 +138,13 @@ def setup_nodes_width(cls, symbol): def setup_pageflags_finish(cls): cls.setup_pageflags_finish_done = True if 'PG_tail' in cls.pageflags.keys(): - cls.PG_tail = 1L << cls.pageflags['PG_tail'] + cls.PG_tail = 1 << cls.pageflags['PG_tail'] cls.is_tail = cls.__is_tail_flag if cls.compound_head_name == 'first_page': cls.__compound_head = cls.__compound_head_first_page if cls.PG_tail is None: - cls.PG_tail = 1L << cls.pageflags['PG_compound'] | 1L << cls.pageflags['PG_reclaim'] + cls.PG_tail = 1 << cls.pageflags['PG_compound'] | 1 << cls.pageflags['PG_reclaim'] cls.is_tail = cls.__is_tail_flagcombo @staticmethod @@ -169,7 +169,7 @@ def is_lru(self): return bool(self.flags & self.PG_lru) def is_anon(self): - mapping = long(self.gdb_obj["mapping"]) + mapping = int(self.gdb_obj["mapping"]) return (mapping & PAGE_MAPPING_ANON) != 0 def get_slab_cache(self): @@ -191,10 +191,10 @@ def get_zid(self): return zid def __compound_head_first_page(self): - return long(self.gdb_obj['first_page']) + return int(self.gdb_obj['first_page']) def __compound_head(self): - return long(self.gdb_obj['compound_head']) - 1 + return int(self.gdb_obj['compound_head']) - 1 def compound_head(self): if not self.is_tail(): @@ -205,7 +205,7 @@ def compound_head(self): def __init__(self, obj, pfn): self.gdb_obj = obj self.pfn = pfn - self.flags = long(obj["flags"]) + self.flags = int(obj["flags"]) class Pages(CrashBaseClass): @@ -220,17 +220,17 @@ def page_from_addr(cls, addr): @export def page_from_gdb_obj(cls, gdb_obj): - pfn = (long(gdb_obj.address) - Page.vmemmap_base) / Page.page_type.sizeof + pfn = (int(gdb_obj.address) - Page.vmemmap_base) / Page.page_type.sizeof return Page(gdb_obj, pfn) @export def for_each_page(): # TODO works only on x86? - max_pfn = long(gdb.lookup_global_symbol("max_pfn").value()) + max_pfn = int(gdb.lookup_global_symbol("max_pfn").value()) for pfn in range(max_pfn): try: yield Page.pfn_to_page(pfn) - except gdb.error, e: + except gdb.error: # TODO: distinguish pfn_valid() and report failures for those? pass diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 6853fcf82a6..c1034d23223 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -1,20 +1,12 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb -import sys from crash.infra import CrashBaseClass, export from crash.util import array_size from crash.types.list import list_for_each_entry from crash.exceptions import DelayedAttributeError -if sys.version_info.major >= 3: - long = int - class TypesPerCPUClass(CrashBaseClass): __types__ = [ 'char *', 'struct pcpu_chunk' ] __symvals__ = [ '__per_cpu_offset', 'pcpu_base_addr', 'pcpu_slot', @@ -29,8 +21,8 @@ class TypesPerCPUClass(CrashBaseClass): # TODO: put this somewhere else - arch? @classmethod def setup_kaslr_offset(cls): - offset = long(gdb.lookup_minimal_symbol("_text").value().address) - offset -= long(gdb.lookup_minimal_symbol("phys_startup_64").value().address) + offset = int(gdb.lookup_minimal_symbol("_text").value().address) + offset -= int(gdb.lookup_minimal_symbol("phys_startup_64").value().address) offset -= 0xffffffff80000000 cls.kaslr_offset = offset @@ -60,15 +52,15 @@ def __setup_dynamic_offset_cache(cls): used_is_negative = None for slot in range(cls.pcpu_nr_slots): for chunk in list_for_each_entry(cls.pcpu_slot[slot], cls.pcpu_chunk_type, 'list'): - chunk_base = long(chunk["base_addr"]) - long(cls.pcpu_base_addr) + chunk_base = int(chunk["base_addr"]) - int(cls.pcpu_base_addr) # __per_cpu_start is adjusted by KASLR, but dynamic offsets are # not, so we have to subtract the offset - chunk_base += long(cls.__per_cpu_start) - cls.kaslr_offset + chunk_base += int(cls.__per_cpu_start) - cls.kaslr_offset off = 0 start = None _map = chunk['map'] - map_used = long(chunk['map_used']) + map_used = int(chunk['map_used']) # Prior to 3.14 commit 723ad1d90b56 ("percpu: store offsets # instead of lengths in ->map[]"), negative values in map @@ -85,14 +77,14 @@ def __setup_dynamic_offset_cache(cls): if used_is_negative is None: used_is_negative = False for i in range(map_used): - val = long(_map[i]) + val = int(_map[i]) if val < 0: used_is_negative = True break if used_is_negative: for i in range(map_used): - val = long(_map[i]) + val = int(_map[i]) if val < 0: if start is None: start = off @@ -105,7 +97,7 @@ def __setup_dynamic_offset_cache(cls): cls.__add_to_offset_cache(chunk_base, start, off) else: for i in range(map_used): - off = long(_map[i]) + off = int(_map[i]) if off & 1 == 1: off -= 1 if start is None: @@ -115,20 +107,20 @@ def __setup_dynamic_offset_cache(cls): cls.__add_to_offset_cache(chunk_base, start, off) start = None if start is not None: - off = long(_map[map_used]) - 1 + off = int(_map[map_used]) - 1 cls.__add_to_offset_cache(chunk_base, start, off) def __is_percpu_var(self, var): - if long(var) < self.__per_cpu_start: + if int(var) < self.__per_cpu_start: return False v = var.cast(self.char_p_type) - self.__per_cpu_start - return long(v) < self.per_cpu_size + return int(v) < self.per_cpu_size def __is_percpu_var_dynamic(self, var): if self.dynamic_offset_cache is None: self.__setup_dynamic_offset_cache() - var = long(var) + var = int(var) # TODO: we could sort the list... for (start, end) in self.dynamic_offset_cache: if var >= start and var < end: diff --git a/crash/types/slab.py b/crash/types/slab.py index e55d5df3a57..bbf96a4da91 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb @@ -7,7 +7,7 @@ import traceback from crash.util import container_of, find_member_variant, get_symbol_value from crash.util import safe_get_symbol_value -from percpu import get_percpu_var +from crash.types.percpu import get_percpu_var from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each, list_for_each_entry from crash.types.page import Page, page_from_gdb_obj, page_from_addr @@ -74,12 +74,12 @@ def from_addr(cls, slab_addr, kmem_cache): @classmethod def from_page(cls, page): - kmem_cache_addr = long(page.get_slab_cache()) + kmem_cache_addr = int(page.get_slab_cache()) kmem_cache = kmem_cache_from_addr(kmem_cache_addr) if cls.page_slab: return Slab(page.gdb_obj, kmem_cache) else: - slab_addr = long(page.get_slab_page()) + slab_addr = int(page.get_slab_page()) return Slab.from_addr(slab_addr, kmem_cache) @classmethod @@ -143,10 +143,10 @@ def find_obj(self, addr): bufsize = self.kmem_cache.buffer_size objs_per_slab = self.kmem_cache.objs_per_slab - if long(addr) < self.s_mem: + if int(addr) < self.s_mem: return None - idx = (long(addr) - self.s_mem) / bufsize + idx = (int(addr) - self.s_mem) / bufsize if idx >= objs_per_slab: return None @@ -156,7 +156,7 @@ def contains_obj(self, addr): obj_addr = self.find_obj(addr) if not obj_addr: - return (False, 0L, None) + return (False, 0, None) self.__populate_free() if obj_addr in self.free: @@ -171,7 +171,7 @@ def contains_obj(self, addr): def __error(self, msg, misplaced = False): msg = col_error("cache %s slab %x%s" % (self.kmem_cache.name, - long(self.gdb_obj.address), msg)) + int(self.gdb_obj.address), msg)) self.error = True if misplaced: self.misplaced_error = msg @@ -202,7 +202,7 @@ def check(self, slabtype, nid): max_free = self.kmem_cache.objs_per_slab if self.kmem_cache.off_slab and not Slab.page_slab: - struct_slab_slab = slab_from_obj_addr(long(self.gdb_obj.address)) + struct_slab_slab = slab_from_obj_addr(int(self.gdb_obj.address)) if not struct_slab_slab: self.__error(": OFF_SLAB struct slab is not a slab object itself") else: @@ -221,9 +221,9 @@ def check(self, slabtype, nid): if not struct_slab_obj[0]: self.__error(": OFF_SLAB struct slab is not allocated") print(struct_slab_obj) - elif struct_slab_obj[1] != long(self.gdb_obj.address): + elif struct_slab_obj[1] != int(self.gdb_obj.address): self.__error(": OFF_SLAB struct slab at wrong offset{}".format( - long(self.gdb_obj.address) - struct_slab_obj[1])) + int(self.gdb_obj.address) - struct_slab_obj[1])) if self.inuse + num_free != max_free: self.__error(": inuse=%d free=%d adds up to %d (should be %d)" % @@ -244,7 +244,7 @@ def check(self, slabtype, nid): if nid != slab_nid: self.__error(": slab is on nid %d instead of %d" % (slab_nid, nid)) - print "free objects %d" % num_free + print("free objects %d" % num_free) ac = self.kmem_cache.get_array_caches() last_page_addr = 0 @@ -258,25 +258,25 @@ def check(self, slabtype, nid): self.__error(": failed to get page for object %x" % obj) continue - if long(page.gdb_obj.address) == last_page_addr: + if int(page.gdb_obj.address) == last_page_addr: continue - last_page_addr = long(page.gdb_obj.address) + last_page_addr = int(page.gdb_obj.address) if page.get_nid() != nid: self.__error(": obj %x is on nid %d instead of %d" % (obj, page.get_nid(), nid)) if not page.is_slab(): self.__error(": obj %x is not on PageSlab page" % obj) - kmem_cache_addr = long(page.get_slab_cache()) - if kmem_cache_addr != long(self.kmem_cache.gdb_obj.address): + kmem_cache_addr = int(page.get_slab_cache()) + if kmem_cache_addr != int(self.kmem_cache.gdb_obj.address): self.__error(": obj %x is on page where pointer to kmem_cache points to %x instead of %x" % - (obj, kmem_cache_addr, long(self.kmem_cache.gdb_obj.address))) + (obj, kmem_cache_addr, int(self.kmem_cache.gdb_obj.address))) if self.page_slab: continue - slab_addr = long(page.get_slab_page()) + slab_addr = int(page.get_slab_page()) if slab_addr != self.gdb_obj.address: self.__error(": obj %x is on page where pointer to slab wrongly points to %x" % (obj, slab_addr)) @@ -298,7 +298,7 @@ def __init__(self, gdb_obj, kmem_cache, error=False): self.page = page_from_gdb_obj(gdb_obj) else: self.inuse = int(gdb_obj["inuse"]) - self.s_mem = long(gdb_obj["s_mem"]) + self.s_mem = int(gdb_obj["s_mem"]) class KmemCache(CrashBaseClass): __types__ = [ 'struct kmem_cache', 'struct alien_cache' ] @@ -330,7 +330,7 @@ def __get_nodelist(self, node): def __get_nodelists(self): for nid in for_each_nid(): node = self.__get_nodelist(nid) - if long(node) == 0L: + if int(node) == 0: continue yield (nid, node.dereference()) @@ -349,7 +349,7 @@ def __init__(self, name, gdb_obj): self.objs_per_slab = int(gdb_obj["num"]) self.buffer_size = int(gdb_obj[KmemCache.buffer_size_name]) - if long(gdb_obj["flags"]) & 0x80000000: + if int(gdb_obj["flags"]) & 0x80000000: self.off_slab = True self.off_slab_cache = None else: @@ -371,10 +371,10 @@ def __fill_array_cache(self, acache, ac_type, nid_src, nid_tgt): nid_tgt = numa_node_id(nid_tgt) for i in range(avail): - ptr = long(acache["entry"][i]) + ptr = int(acache["entry"][i]) # print(hex(ptr)) if ptr in self.array_caches: - print (col_error("WARNING: array cache duplicity detected!")) + print(col_error("WARNING: array cache duplicity detected!")) else: self.array_caches[ptr] = cache_dict @@ -382,14 +382,14 @@ def __fill_array_cache(self, acache, ac_type, nid_src, nid_tgt): obj_nid = page.get_nid() if obj_nid != nid_tgt: - print (col_error("Object {:#x} in cache {} is on wrong nid {} instead of {}".format( + print(col_error("Object {:#x} in cache {} is on wrong nid {} instead of {}".format( ptr, cache_dict, obj_nid, nid_tgt))) def __fill_alien_caches(self, node, nid_src): alien_cache = node["alien"] # TODO check that this only happens for single-node systems? - if long(alien_cache) == 0L: + if int(alien_cache) == 0: return for nid in for_each_nid(): @@ -428,7 +428,7 @@ def __fill_all_array_caches(self): # TODO check and report collisions for (nid, node) in self.__get_nodelists(): shared_cache = node["shared"] - if long(shared_cache) != 0: + if int(shared_cache) != 0: self.__fill_array_cache(shared_cache.dereference(), AC_SHARED, nid, nid) self.__fill_alien_caches(node, nid) @@ -455,13 +455,13 @@ def get_slabs_of_type(self, node, slabtype, reverse=False, exact_cycles=False): wrong_list_nodes = dict() for stype in range(3): if stype != slabtype: - wrong_list_nodes[long(node[slab_list_fullname[stype]].address)] = stype + wrong_list_nodes[int(node[slab_list_fullname[stype]].address)] = stype slab_list = node[slab_list_fullname[slabtype]] for list_head in list_for_each(slab_list, reverse=reverse, exact_cycles=exact_cycles): try: - if long(list_head) in wrong_list_nodes.keys(): - wrong_type = wrong_list_nodes[long(list_head)] + if int(list_head) in wrong_list_nodes.keys(): + wrong_type = wrong_list_nodes[int(list_head)] print(col_error("Encountered head of {} slab list while traversing {} slab list, skipping". format(slab_list_name[wrong_type], slab_list_name[slabtype]))) continue @@ -470,14 +470,14 @@ def get_slabs_of_type(self, node, slabtype, reverse=False, exact_cycles=False): except: traceback.print_exc() print("failed to initialize slab object from list_head {:#x}: {}".format( - long(list_head), sys.exc_info()[0])) + int(list_head), sys.exc_info()[0])) continue slab = Slab(gdb_slab, kmem_cache, error = True) yield slab def __check_slab(self, slab, slabtype, nid, errors): - addr = long(slab.gdb_obj.address) + addr = int(slab.gdb_obj.address) free = 0 if slab.error == False: @@ -525,7 +525,7 @@ def ___check_slabs(self, node, slabtype, nid, reverse=False): free += self.__check_slab(slab, slabtype, nid, errors) except Exception as e: print(col_error("Exception when checking slab {:#x}:{}". - format(long(slab.gdb_obj.address), e))) + format(int(slab.gdb_obj.address), e))) traceback.print_exc() slabs += 1 @@ -549,7 +549,7 @@ def __check_slabs(self, node, slabtype, nid): slab_list = node[slab_list_fullname[slabtype]] print("checking {} slab list {:#x}".format(slab_list_name[slabtype], - long(slab_list.address))) + int(slab_list.address))) errors = {'first_ok': None, 'last_ok': None, 'num_ok': 0, 'first_misplaced': None, 'last_misplaced': None, 'num_misplaced': 0} @@ -591,18 +591,18 @@ def check_all(self): for (nid, node) in self.__get_nodelists(): try: # This is version and architecture specific - lock = long(node["list_lock"]["rlock"]["raw_lock"]["slock"]) + lock = int(node["list_lock"]["rlock"]["raw_lock"]["slock"]) if lock != 0: print(col_error("unexpected lock value in kmem_list3 {:#x}: {:#x}". - format(long(node.address), lock))) + format(int(node.address), lock))) except gdb.error: print("Can't check lock state -- locking implementation unknown.") - free_declared = long(node["free_objects"]) + free_declared = int(node["free_objects"]) free_counted = self.__check_slabs(node, slab_partial, nid) free_counted += self.__check_slabs(node, slab_full, nid) free_counted += self.__check_slabs(node, slab_free, nid) if free_declared != free_counted: - print (col_error("free objects mismatch on node %d: declared=%d counted=%d" % + print(col_error("free objects mismatch on node %d: declared=%d counted=%d" % (nid, free_declared, free_counted))) self.check_array_caches() @@ -628,7 +628,7 @@ def setup_slab_caches(cls, slab_caches): kmem_cache = KmemCache(name, cache) cls.kmem_caches[name] = kmem_cache - cls.kmem_caches_by_addr[long(cache.address)] = kmem_cache + cls.kmem_caches_by_addr[int(cache.address)] = kmem_cache @export def kmem_cache_from_addr(cls, addr): diff --git a/crash/types/task.py b/crash/types/task.py index 9662363c9fe..f450b528aa0 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -1,21 +1,12 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import gdb -import sys - -if sys.version_info.major >= 3: - long = int - from crash.util import array_size from crash.infra import CrashBaseClass from crash.infra.lookup import DelayedValue, ClassProperty, get_delayed_lookup -PF_EXITING = long(0x4) +PF_EXITING = 0x4 def get_value(symname): sym = gdb.lookup_symbol(symname, block=None, domain=gdb.SYMBOL_VAR_DOMAIN) @@ -170,9 +161,9 @@ def get_last_cpu(self): return self.thread_info['cpu'] def task_state(self): - state = long(self.task_struct['state']) + state = int(self.task_struct['state']) if self.task_state_has_exit_state: - state |= long(self.task_struct['exit_state']) + state |= int(self.task_struct['exit_state']) return state def maybe_dead(self): @@ -188,7 +179,7 @@ def maybe_dead(self): return (state & known) == 0 def task_flags(self): - return long(self.task_struct['flags']) + return int(self.task_struct['flags']) def is_exiting(self): return self.task_flags() & PF_EXITING @@ -209,8 +200,8 @@ def update_mem_usage(self): return self.rss = self.get_rss() - self.total_vm = long(mm['total_vm']) - self.pgd_addr = long(mm['pgd']) + self.total_vm = int(mm['total_vm']) + self.pgd_addr = int(mm['pgd']) self.mem_valid = True def is_kernel_task(self): @@ -240,17 +231,17 @@ def get_stack_pointer(cls): return fn(self.thread) def get_rss_field(self): - return long(self.task_struct['mm']['rss'].value()) + return int(self.task_struct['mm']['rss'].value()) def get__rss_field(self): - return long(self.task_struct['mm']['_rss'].value()) + return int(self.task_struct['mm']['_rss'].value()) def get_rss_stat_field(self): stat = self.task_struct['mm']['rss_stat']['count'] stat0 = self.task_struct['mm']['rss_stat']['count'][0] rss = 0 for i in range(stat.type.sizeof // stat[0].type.sizeof): - rss += long(stat[i]['counter']) + rss += int(stat[i]['counter']) return rss def get_anon_file_rss_fields(self): @@ -259,9 +250,9 @@ def get_anon_file_rss_fields(self): for name in ['_anon_rss', '_file_rss']: if name in mm_struct_fields: if mm[name].type == self.atomic_long_type: - rss += long(mm[name]['counter']) + rss += int(mm[name]['counter']) else: - rss += long(mm[name]) + rss += int(mm[name]) return rss # The Pythonic way to do this is by generating the LinuxTask class @@ -285,13 +276,13 @@ def pick_get_rss(cls): raise RuntimeError("No method to retrieve RSS from task found.") def last_run__last_run(self): - return long(self.task_struct['last_run']) + return int(self.task_struct['last_run']) def last_run__timestamp(self): - return long(self.task_struct['timestamp']) + return int(self.task_struct['timestamp']) def last_run__last_arrival(self): - return long(self.task_struct['sched_info']['last_arrival']) + return int(self.task_struct['sched_info']['last_arrival']) @classmethod def pick_last_run(cls): diff --git a/crash/types/vmstat.py b/crash/types/vmstat.py index 438b379f445..139c3237a26 100644 --- a/crash/types/vmstat.py +++ b/crash/types/vmstat.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb @@ -6,7 +6,7 @@ from crash.util import container_of, find_member_variant import crash.types.node from crash.types.percpu import get_percpu_var -from cpu import for_each_online_cpu +from crash.types.cpu import for_each_online_cpu class VmStat(CrashBaseClass): __types__ = ['enum zone_stat_item', 'enum vm_event_item'] @@ -62,12 +62,12 @@ def get_event_names(): def get_events(): states_sym = gdb.lookup_global_symbol("vm_event_states") nr = VmStat.nr_event_items - events = [0L] * nr + events = [0] * nr for cpu in for_each_online_cpu(): states = get_percpu_var(states_sym, cpu) for item in range(0, nr): - events[item] += long(states["event"][item]) + events[item] += int(states["event"][item]) return events diff --git a/crash/types/zone.py b/crash/types/zone.py index 4095b4cee92..64f515a5aea 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb @@ -7,7 +7,7 @@ import crash.types.node from crash.types.percpu import get_percpu_var from crash.types.vmstat import VmStat -from cpu import for_each_online_cpu +from crash.types.cpu import for_each_online_cpu from crash.types.list import list_for_each_entry def getValue(sym): @@ -19,7 +19,7 @@ class Zone(CrashBaseClass): def __init__(self, obj, zid): self.gdb_obj = obj self.zid = zid - self.nid = long(obj["node"]) + self.nid = int(obj["node"]) def is_populated(self): if self.gdb_obj["present_pages"] != 0: @@ -28,12 +28,12 @@ def is_populated(self): return False def get_vmstat(self): - stats = [0L] * VmStat.nr_stat_items + stats = [0] * VmStat.nr_stat_items vm_stat = self.gdb_obj["vm_stat"] for item in range (0, VmStat.nr_stat_items): # TODO abstract atomic? - stats[item] = long(vm_stat[item]["counter"]) + stats[item] = int(vm_stat[item]["counter"]) return stats def add_vmstat_diffs(self, diffs): @@ -44,7 +44,7 @@ def add_vmstat_diffs(self, diffs): diffs[item] += int(vmdiff[item]) def get_vmstat_diffs(self): - diffs = [0L] * VmStat.nr_stat_items + diffs = [0] * VmStat.nr_stat_items self.add_vmstat_diffs(diffs) return diffs @@ -57,7 +57,7 @@ def _check_free_area(self, area, is_pcp): nr_free += 1 if page.get_nid() != self.nid or page.get_zid() != self.zid: print("page {:#x} misplaced on {} of zone {}:{}, has flags for zone {}:{}". - format(long(page_obj.address), "pcplist" if is_pcp else "freelist", + format(int(page_obj.address), "pcplist" if is_pcp else "freelist", self.nid, self.zid, page.get_nid(), page.get_zid())) nr_expected = area["count"] if is_pcp else area["nr_free"] if nr_free != nr_expected: diff --git a/crash/util.py b/crash/util.py index f63ff66328d..024e57de554 100644 --- a/crash/util.py +++ b/crash/util.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - import gdb from crash.infra import CrashBaseClass, export from crash.exceptions import MissingTypeError, MissingSymbolError @@ -228,7 +224,7 @@ def offsetof_type(cls, val, spec, error=True): Returns: Tuple of: - long: The offset of the resolved member + int: The offset of the resolved member gdb.Type: The type of the resolved member Raises: @@ -276,7 +272,7 @@ def offsetof(cls, val, spec, error=True): failures an error Returns: - long: The offset of the resolved member + int: The offset of the resolved member None: The member could not be resolved Raises: @@ -349,7 +345,7 @@ def get_typed_pointer(val, gdbtype): Returns a pointer to the requested type at the given address Args: - val (gdb.Value, str, or long): The address for which to provide + val (gdb.Value, str, or int): The address for which to provide a casted pointer gdbtype (gdb.Type): The type of the pointer to return @@ -365,11 +361,11 @@ def get_typed_pointer(val, gdbtype): .format(gdbtype, val.type)) elif isinstance(val, str): try: - val = long(val, 16) + val = int(val, 16) except TypeError as e: print(e) raise TypeError("string must describe hex address: ".format(e)) - if isinstance(val, long): + if isinstance(val, int): val = gdb.Value(val).cast(gdbtype).dereference() return val @@ -380,4 +376,3 @@ def array_for_each(value): size = array_size(value) for i in range(array_size(value)): yield value[i] - diff --git a/setup.py b/setup.py index e06b693f522..36e66af06eb 100644 --- a/setup.py +++ b/setup.py @@ -1,8 +1,7 @@ -#!/usr/bin/env python +#!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import +import sys from setuptools import setup, find_packages @@ -13,8 +12,9 @@ package_data = { '' : [ "*.dist" "*.txt" ], }, + python_requires='>=3.6', - install_requires = [ 'future', 'pyelftools' ], + install_requires = [ 'pyelftools' ], author = "Jeff Mahoney", author_email = "jeffm@suse.com", diff --git a/test-all.sh b/test-all.sh index 5c22baba27c..e8fff15cbad 100755 --- a/test-all.sh +++ b/test-all.sh @@ -1,6 +1,6 @@ #!/bin/sh rm -rf build/lib/crash -python setup.py build +python3 setup.py build make -C tests -gdb -batch -ex "source tests/unittest-bootstrap.py" +crash-python-gdb -batch -ex "source tests/unittest-bootstrap.py" diff --git a/tests/test_infra.py b/tests/test_infra.py index 05c7badf750..3c3c5d50d60 100644 --- a/tests/test_infra.py +++ b/tests/test_infra.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import unittest import gdb diff --git a/tests/test_infra_lookup.py b/tests/test_infra_lookup.py index 5173c2c0892..acb7098fb24 100644 --- a/tests/test_infra_lookup.py +++ b/tests/test_infra_lookup.py @@ -1,9 +1,5 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import unittest import gdb diff --git a/tests/test_list.py b/tests/test_list.py index 3bf31fcf48e..a5efe9081ec 100644 --- a/tests/test_list.py +++ b/tests/test_list.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import unittest import gdb diff --git a/tests/test_objfile_callbacks.py b/tests/test_objfile_callbacks.py index eaa905b6e97..7b0591d8c3b 100644 --- a/tests/test_objfile_callbacks.py +++ b/tests/test_objfile_callbacks.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import unittest import gdb diff --git a/tests/test_percpu.py b/tests/test_percpu.py index 09304261a9b..773c7969449 100644 --- a/tests/test_percpu.py +++ b/tests/test_percpu.py @@ -1,20 +1,12 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import unittest import gdb -import sys import crash import crash.types.percpu -if sys.version_info.major >= 3: - long = int - class TestPerCPU(unittest.TestCase): def setUp(self): gdb.execute("file tests/test-percpu", to_string=True) @@ -71,7 +63,7 @@ def test_voidp_test(self): for cpu, val in list(crash.types.percpu.get_percpu_var(var).items()): self.assertTrue(val is not None) self.assertTrue(val.type == self.voidp) - self.assertTrue(long(val) == 0xdeadbeef) + self.assertTrue(int(val) == 0xdeadbeef) def test_struct_test_ptr(self): var = gdb.lookup_symbol('ptr_to_struct_test', None)[0] diff --git a/tests/test_syscache.py b/tests/test_syscache.py index fd17b14ac31..2b69b93440e 100644 --- a/tests/test_syscache.py +++ b/tests/test_syscache.py @@ -1,15 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import unittest import gdb import sys -if sys.version_info >= (3, 4): - from importlib import reload +from importlib import reload from crash.exceptions import DelayedAttributeError fake_config = ( diff --git a/tests/test_syscmd.py b/tests/test_syscmd.py index beaa817fe7a..51a5c6e4eef 100644 --- a/tests/test_syscmd.py +++ b/tests/test_syscmd.py @@ -1,17 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import unittest import gdb import sys -if sys.version_info.major >= 3: - from io import StringIO -else: - from cStringIO import StringIO +from io import StringIO from crash.exceptions import MissingSymbolError from crash.commands import CrashCommandLineError diff --git a/tests/test_target.py b/tests/test_target.py index 525b9640f0c..29877ed2bb3 100644 --- a/tests/test_target.py +++ b/tests/test_target.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import unittest import gdb import os.path diff --git a/tests/test_util.py b/tests/test_util.py index 6a4c8f37ce3..9abbeb97295 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -1,10 +1,5 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: - -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import unittest import gdb diff --git a/tests/unittest-bootstrap.py b/tests/unittest-bootstrap.py index d0a4add4006..54c03ad4998 100644 --- a/tests/unittest-bootstrap.py +++ b/tests/unittest-bootstrap.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - import sys import os import unittest From 0be3b8662883672b7257ef697639de4faa1f2705 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 17 May 2019 16:13:57 -0400 Subject: [PATCH 065/367] crash: use super() everywhere Python 3 eliminated the need to specify the parent class in the simple case. This commit replaces all uses of super(class, self) with super(). Signed-off-by: Jeff Mahoney --- crash/addrxlat.py | 4 ++-- crash/cache/slab.py | 2 +- crash/cache/vm.py | 2 +- crash/commands/help.py | 2 +- crash/commands/kmem.py | 2 +- crash/commands/mount.py | 2 +- crash/commands/vtop.py | 4 ++-- crash/exceptions.py | 2 +- crash/infra/callback.py | 2 +- crash/infra/lookup.py | 14 +++++++------- crash/kdump/target.py | 4 ++-- crash/types/task.py | 2 +- crash/util.py | 12 ++++++------ 13 files changed, 27 insertions(+), 27 deletions(-) diff --git a/crash/addrxlat.py b/crash/addrxlat.py index ee6b0d4fa6b..5d00e75db27 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -9,7 +9,7 @@ class TranslationContext(addrxlat.Context): def __init__(self, *args, **kwargs): - super(TranslationContext, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.read_caps = addrxlat.CAPS(addrxlat.KVADDR) self.uint32_ptr = gdb.lookup_type('uint32_t').pointer() self.uint64_ptr = gdb.lookup_type('uint64_t').pointer() @@ -31,7 +31,7 @@ def cb_sym(self, symtype, *args): if sym is not None: return offsetof(sym.type, args[1]) - return super(TranslationContext, self).cb_sym(symtype, *args) + return super().cb_sym(symtype, *args) def cb_read32(self, faddr): return int(gdb.Value(faddr.addr).cast(self.uint32_ptr).dereference()) diff --git a/crash/cache/slab.py b/crash/cache/slab.py index d0c18d8e58c..13270230d86 100644 --- a/crash/cache/slab.py +++ b/crash/cache/slab.py @@ -8,7 +8,7 @@ class CrashCacheSlab(CrashCache): def __init__(self): - super(CrashCacheSlab, self).__init__() + super().__init__() self.populated = False self.kmem_caches = dict() self.kmem_caches_by_addr = dict() diff --git a/crash/cache/vm.py b/crash/cache/vm.py index bd9dddde816..3a0cea75605 100644 --- a/crash/cache/vm.py +++ b/crash/cache/vm.py @@ -6,7 +6,7 @@ from crash.cache import CrashCache class CrashCacheVM(CrashCache): def __init__(self): - super(CrashCacheVM, self).__init__() + super().__init__() def refresh(self): pass diff --git a/crash/commands/help.py b/crash/commands/help.py index 3c8ee171243..3ba87a4e655 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -23,7 +23,7 @@ class HelpCommand(CrashCommand): def __init__(self): parser = CrashCommandParser(prog="help") parser.add_argument('args', nargs=argparse.REMAINDER) - super(HelpCommand, self).__init__('help', parser) + super().__init__('help', parser) def execute(self, argv): if not argv.args: diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 52484419636..514160a9c07 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -40,7 +40,7 @@ def __init__(self, name): parser.add_argument('arg', nargs=argparse.REMAINDER) parser.format_usage = lambda : "kmem [-s] [addr | slabname]\n" - super(KmemCommand, self).__init__(name, parser) + super().__init__(name, parser) def execute(self, args): if args.z: diff --git a/crash/commands/mount.py b/crash/commands/mount.py index bfe015be7d8..0dc08e0aff3 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -32,7 +32,7 @@ def __init__(self, name): parser.add_argument('-d', action='store_true', default=False) parser.format_usage = lambda : "mount\n" - super(MountCommand, self).__init__(name, parser) + super().__init__(name, parser) def __getattr__(self, name): if name == 'charp': diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index f2fa767e0cc..6de1e3d340e 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -51,7 +51,7 @@ def value(self): class LinuxNonAutoPGT(LinuxPGT): def address(self): - addr = super(LinuxNonAutoPGT, self).address() + ' [machine], ' + addr = super().address() + ' [machine], ' tmp = self.ptr.copy() try: tmp.conv(addrxlat.KPHYSADDR, self.context, self.system) @@ -186,7 +186,7 @@ def __init__(self): parser.format_usage = lambda : \ "vtop [-c [pid | taskp]] [-u|-k] address ...\n" - super(VTOPCommand, self).__init__("vtop", parser) + super().__init__("vtop", parser) def execute(self, argv): ctx = addrxlat_context() diff --git a/crash/exceptions.py b/crash/exceptions.py index 93bfc7df6da..593bb63f8bb 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -20,4 +20,4 @@ class DelayedAttributeError(AttributeError): """ def __init__(self, owner, name): msg = "{} has delayed attribute {} but it has not been completed." - super(DelayedAttributeError, self).__init__(msg.format(owner, name)) + super().__init__(msg.format(owner, name)) diff --git a/crash/infra/callback.py b/crash/infra/callback.py index fa8430209f3..8307d16e630 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -9,7 +9,7 @@ class CallbackCompleted(RuntimeError): """The callback has already been completed and is no longer valid""" def __init__(self, callback_obj): msg = "{} has already completed.".format(callback_obj.name) - super(CallbackCompleted, self).__init__(msg) + super().__init__(msg) self.callback_obj = callback_obj class ObjfileEventCallback(object): diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index 77e8013784f..c350586e17b 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -23,7 +23,7 @@ def __init__(self, name, callback, symbol_file=None): self.name = name self.symbol_file = symbol_file self.callback = callback - super(MinimalSymbolCallback, self).__init__() + super().__init__() def check_ready(self): return gdb.lookup_minimal_symbol(self.name, self.symbol_file, None) @@ -49,7 +49,7 @@ def __init__(self, name, callback, domain=gdb.SYMBOL_VAR_DOMAIN): self.name = name self.domain = domain self.callback = callback - super(SymbolCallback, self).__init__() + super().__init__() def check_ready(self): return gdb.lookup_symbol(self.name, None, self.domain)[0] @@ -64,7 +64,7 @@ class SymvalCallback(SymbolCallback): objfile and returns the gdb.Value associated with it. """ def check_ready(self): - sym = super(SymvalCallback, self).check_ready() + sym = super().check_ready() if sym is not None: try: return sym.value() @@ -81,7 +81,7 @@ def __init__(self, name, callback, block=None): self.name = name self.block = block self.callback = callback - super(TypeCallback, self).__init__() + super().__init__() def check_ready(self): try: @@ -121,7 +121,7 @@ def __init__(self, name): Args: name (str): The name of the minimal symbol """ - super(DelayedMinimalSymbol, self).__init__(name) + super().__init__(name) self.cb = MinimalSymbolCallback(name, self.callback) def __str__(self): return "{} attached with {}".format(self.__class__, str(self.cb)) @@ -135,7 +135,7 @@ def __init__(self, name): Args: name (str): The name of the symbol """ - super(DelayedSymbol, self).__init__(name) + super().__init__(name) self.cb = SymbolCallback(name, self.callback) def __str__(self): return "{} attached with {}".format(self.__class__, str(self.cb)) @@ -151,7 +151,7 @@ def __init__(self, name, pointer=False): pointer (bool, optional, default=False): Whether the requested type should be returned as a pointer to that type. """ - super(DelayedType, self).__init__(name) + super().__init__(name) self.pointer = pointer self.cb = TypeCallback(name, self.callback) diff --git a/crash/kdump/target.py b/crash/kdump/target.py index b6ea9997f85..d76e8001b47 100644 --- a/crash/kdump/target.py +++ b/crash/kdump/target.py @@ -13,7 +13,7 @@ class SymbolCallback(object): "addrxlat symbolic callback" def __init__(self, ctx=None, *args, **kwargs): - super(SymbolCallback, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.ctx = ctx def __call__(self, symtype, *args): @@ -43,7 +43,7 @@ def __init__(self, vmcore, debug=False): # So far we've read from the kernel image, now that we've setup # the architecture, we're ready to plumb into the target # infrastructure. - super(Target, self).__init__() + super().__init__() def setup_arch(self): archname = self.kdump.attr.arch.name diff --git a/crash/types/task.py b/crash/types/task.py index f450b528aa0..5e930f2c5e7 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -87,7 +87,7 @@ def __init__(self, task): typedesc = task.type else: typedesc = type(task) - super(BadTaskError, self).__init__(self.msgtemplate.format(typedesc)) + super().__init__(self.msgtemplate.format(typedesc)) class LinuxTask(object): task_struct_type = None diff --git a/crash/util.py b/crash/util.py index 024e57de554..0c5905fdd3f 100644 --- a/crash/util.py +++ b/crash/util.py @@ -8,7 +8,7 @@ class OffsetOfError(Exception): """Generic Exception for offsetof errors""" def __init__(self, message): - super(OffsetOfError, self).__init__() + super().__init__() self.message = message def __str__(self): @@ -20,7 +20,7 @@ class InvalidArgumentError(OffsetOfError): def __init__(self, val): msg = self.formatter.format(str(type(val))) - super(InvalidArgumentError, self).__init__(msg) + super().__init__(msg) self.val = val class InvalidArgumentTypeError(OffsetOfError): @@ -28,7 +28,7 @@ class InvalidArgumentTypeError(OffsetOfError): formatter = "`{}' is not a struct or union" def __init__(self, gdbtype): msg = self.formatter.format(str(gdbtype)) - super(InvalidArgumentTypeError, self).__init__(msg) + super().__init__(msg) self.type = gdbtype class InvalidComponentError(OffsetOfError): @@ -36,7 +36,7 @@ class InvalidComponentError(OffsetOfError): formatter = "cannot resolve '{}->{}' ({})" def __init__(self, gdbtype, spec, message): msg = self.formatter.format(str(gdbtype), spec, message) - super(InvalidComponentError, self).__init__(msg) + super().__init__(msg) self.type = gdbtype self.spec = spec @@ -51,7 +51,7 @@ class _InvalidComponentTypeError(_InvalidComponentBaseError): formatter = "component `{}' in `{}' is not a struct or union" def __init__(self, name, spec): msg = self.formatter.format(name, spec) - super(_InvalidComponentTypeError, self).__init__(msg) + super().__init__(msg) self.name = name self.spec = spec @@ -61,7 +61,7 @@ class _InvalidComponentNameError(_InvalidComponentBaseError): formatter = "no such member `{}' in `{}'" def __init__(self, member, gdbtype): msg = self.formatter.format(member, str(gdbtype)) - super(_InvalidComponentNameError, self).__init__(msg) + super().__init__(msg) self.member = member self.type = gdbtype From 6e438f05caede9018bcae33e3ff29f38fbd0a6b9 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 21 May 2019 16:53:32 -0400 Subject: [PATCH 066/367] tests: reduce noise as much as possible This commit reduces the verbosity level of unittest, uses make -s, and adds the -q option to setup.py. Signed-off-by: Jeff Mahoney --- test-all.sh | 6 +++--- tests/unittest-bootstrap.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test-all.sh b/test-all.sh index e8fff15cbad..86a98c9c797 100755 --- a/test-all.sh +++ b/test-all.sh @@ -1,6 +1,6 @@ #!/bin/sh rm -rf build/lib/crash -python3 setup.py build -make -C tests -crash-python-gdb -batch -ex "source tests/unittest-bootstrap.py" +python3 setup.py -q build +make -C tests -s +crash-python-gdb -nx -batch -ex "source tests/unittest-bootstrap.py" diff --git a/tests/unittest-bootstrap.py b/tests/unittest-bootstrap.py index 54c03ad4998..807ca0854c1 100644 --- a/tests/unittest-bootstrap.py +++ b/tests/unittest-bootstrap.py @@ -9,4 +9,4 @@ test_loader = unittest.TestLoader() test_suite = test_loader.discover('tests', pattern='test_*.py') -unittest.TextTestRunner(verbosity=2).run(test_suite) +unittest.TextTestRunner(verbosity=1).run(test_suite) From 303f681db0daf807bbdbad8b7cd9f6e32a817530 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 21 May 2019 16:55:16 -0400 Subject: [PATCH 067/367] tests: add support for typing static checker Python 3.5 introduced type hinting that can be used to check our code for typing errors prior to runtime. This can help shake out bugs that are undetected or just noisy in the test cases. Signed-off-by: Jeff Mahoney --- test-all.sh | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/test-all.sh b/test-all.sh index 86a98c9c797..6e9eaeb2bf0 100755 --- a/test-all.sh +++ b/test-all.sh @@ -4,3 +4,27 @@ rm -rf build/lib/crash python3 setup.py -q build make -C tests -s crash-python-gdb -nx -batch -ex "source tests/unittest-bootstrap.py" + +has_mypy() { + python3 -c 'import mypy' 2> /dev/null +} + +if has_mypy; then + cat <<- END > $DIR/gdbinit + set build-id-verbose 0 + set python print-stack full + set height 0 + set print pretty on + python + sys.path.insert(0, 'build/lib') + from mypy.main import main + main(None, args=["-p", "crash", "--ignore-missing-imports"]) + end + END + echo "Doing static checking." + if ! crash-python-gdb -nx -batch -x $DIR/gdbinit; then + echo "static checking failed." >&2 + else + echo "OK" + fi +fi From 5629edb22e1c0adc42b228fd12b68f25dc05e661 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 8 May 2019 20:31:04 -0400 Subject: [PATCH 068/367] crash.commands: remove Crash* prefix from classes This should make it easier to share code with helpers that operate using bare GDB. Signed-off-by: Jeff Mahoney --- crash/commands/__init__.py | 26 ++++++++++++++++---------- crash/commands/dmesg.py | 8 ++++---- crash/commands/help.py | 6 +++--- crash/commands/kmem.py | 6 +++--- crash/commands/mount.py | 6 +++--- crash/commands/ps.py | 12 ++++++------ crash/commands/syscmd.py | 13 ++++++------- crash/commands/task.py | 8 ++++---- crash/commands/vtop.py | 6 +++--- tests/test_syscmd.py | 6 +++--- 10 files changed, 51 insertions(+), 46 deletions(-) diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index aef86ed0c7a..7b1772196c0 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -10,21 +10,24 @@ import importlib import argparse -class CrashCommandLineError(RuntimeError): +class CommandError(RuntimeError): pass -class CrashCommandParser(argparse.ArgumentParser): +class CommandLineError(RuntimeError): + pass + +class ArgumentParser(argparse.ArgumentParser): def error(self, message): - raise CrashCommandLineError(message) + raise CommandLineError(message) -class CrashCommand(CrashBaseClass, gdb.Command): +class Command(CrashBaseClass, gdb.Command): commands = {} def __init__(self, name, parser=None): self.name = "py" + name if parser is None: - parser = CrashCommandParser(prog=self.name) - elif not isinstance(parser, CrashCommandParser): - raise TypeError("parser must be CrashCommandParser") + parser = ArgumentParser(prog=self.name) + elif not isinstance(parser, ArgumentParser): + raise TypeError("parser must be ArgumentParser") nl = "" if self.__doc__[-1] != '\n': @@ -42,13 +45,16 @@ def invoke_uncaught(self, argstr, from_tty): def invoke(self, argstr, from_tty=False): try: self.invoke_uncaught(argstr, from_tty) - except CrashCommandLineError as e: - print("{}: {}".format(self.name, str(e))) + except CommandError as e: + print(f"{self.name}: {str(e)}") + except CommandLineError as e: + print(f"{self.name}: {str(e)}") + self.parser.print_usage() except (SystemExit, KeyboardInterrupt): pass def execute(self, argv): - raise NotImplementedError("CrashCommand should not be called directly") + raise NotImplementedError("Command should not be called directly") def discover(): modules = glob.glob(os.path.dirname(__file__)+"/[A-Za-z]*.py") diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index c995122a3db..19d38398d80 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -6,7 +6,7 @@ import argparse import re -from crash.commands import CrashCommand, CrashCommandParser +from crash.commands import Command, ArgumentParser from crash.exceptions import DelayedAttributeError class LogTypeException(Exception): @@ -15,7 +15,7 @@ class LogTypeException(Exception): class LogInvalidOption(Exception): pass -class LogCommand(CrashCommand): +class LogCommand(Command): """dump system message buffer NAME @@ -139,14 +139,14 @@ class LogCommand(CrashCommand): """ def __init__(self, name): - parser = CrashCommandParser(prog=name) + parser = ArgumentParser(prog=name) parser.add_argument('-t', action='store_true', default=False) parser.add_argument('-d', action='store_true', default=False) parser.add_argument('-m', action='store_true', default=False) parser.format_usage = lambda: 'log [-tdm]\n' - CrashCommand.__init__(self, name, parser) + Command.__init__(self, name, parser) __types__ = [ 'struct printk_log *' , 'char *' ] __symvals__ = [ 'log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', diff --git a/crash/commands/help.py b/crash/commands/help.py index 3ba87a4e655..43fa3c5e97c 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -3,9 +3,9 @@ import gdb import argparse -from crash.commands import CrashCommand, CrashCommandParser +from crash.commands import Command, ArgumentParser -class HelpCommand(CrashCommand): +class HelpCommand(Command): """ this command NAME @@ -21,7 +21,7 @@ class HelpCommand(CrashCommand): """ def __init__(self): - parser = CrashCommandParser(prog="help") + parser = ArgumentParser(prog="help") parser.add_argument('args', nargs=argparse.REMAINDER) super().__init__('help', parser) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 514160a9c07..95d5d60fbc3 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -3,7 +3,7 @@ import gdb import crash -from crash.commands import CrashCommand, CrashCommandParser +from crash.commands import Command, ArgumentParser from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name, slab_from_obj_addr from crash.types.zone import for_each_zone, for_each_populated_zone from crash.types.vmstat import VmStat @@ -13,7 +13,7 @@ def getValue(sym): return gdb.lookup_symbol(sym, None)[0].value() -class KmemCommand(CrashCommand): +class KmemCommand(Command): """ kernel memory inspection NAME @@ -30,7 +30,7 @@ class KmemCommand(CrashCommand): """ def __init__(self, name): - parser = CrashCommandParser(prog=name) + parser = ArgumentParser(prog=name) group = parser.add_mutually_exclusive_group() group.add_argument('-s', action='store_true', default=False) diff --git a/crash/commands/mount.py b/crash/commands/mount.py index 0dc08e0aff3..7600a508fb6 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -3,7 +3,7 @@ import gdb -from crash.commands import CrashCommand, CrashCommandParser +from crash.commands import Command, ArgumentParser from crash.subsystem.filesystem.mount import MNT_NOSUID, MNT_NODEV, MNT_NOEXEC from crash.subsystem.filesystem.mount import MNT_NOATIME, MNT_NODIRATIME from crash.subsystem.filesystem.mount import MNT_RELATIME, MNT_READONLY @@ -14,7 +14,7 @@ from crash.subsystem.filesystem.mount import mount_super, mount_flags from crash.subsystem.filesystem.mount import mount_root -class MountCommand(CrashCommand): +class MountCommand(Command): """display mounted file systems NAME @@ -25,7 +25,7 @@ class MountCommand(CrashCommand): -d display device obtained from super_block """ def __init__(self, name): - parser = CrashCommandParser(prog=name) + parser = ArgumentParser(prog=name) parser.add_argument('-v', action='store_true', default=False) parser.add_argument('-f', action='store_true', default=False) diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 737a598f60b..44c18b92e15 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -4,11 +4,11 @@ import gdb import argparse -from crash.commands import CrashCommand, CrashCommandParser -from crash.commands import CrashCommandLineError +from crash.commands import Command, ArgumentParser +from crash.commands import CommandLineError from crash.types.task import LinuxTask, TaskStateFlags as TF -class PSCommand(CrashCommand): +class PSCommand(Command): """display process status information NAME @@ -381,7 +381,7 @@ class PSCommand(CrashCommand): 20 2 3 ffff8802129a9710 IN 0.0 0 0 [migration/3] """ def __init__(self): - parser = CrashCommandParser(prog="ps") + parser = ArgumentParser(prog="ps") group = parser.add_mutually_exclusive_group() group.add_argument('-k', action='store_true', default=False) @@ -406,7 +406,7 @@ def __init__(self): parser.format_usage = lambda: \ "ps [-k|-u|-G][-s][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ...\n" - CrashCommand.__init__(self, "ps", parser) + Command.__init__(self, "ps", parser) self.header_template = " PID PPID CPU {1:^{0}} ST %MEM " \ "VSZ RSS COMM" @@ -541,7 +541,7 @@ def execute(self, argv): try: self.setup_task_states() except AttributeError: - raise CrashCommandLineError("The task subsystem is not available.") + raise CommandLineError("The task subsystem is not available.") sort_by = sort_by_pid if argv.l: diff --git a/crash/commands/syscmd.py b/crash/commands/syscmd.py index 06dfbcba2bb..8f11a9b90db 100644 --- a/crash/commands/syscmd.py +++ b/crash/commands/syscmd.py @@ -2,11 +2,11 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb -from crash.commands import CrashCommand, CrashCommandParser -from crash.commands import CrashCommandLineError +from crash.commands import Command, ArgumentParser +from crash.commands import CommandLineError from crash.cache.syscache import utsname, config, kernel -class SysCommand(CrashCommand): +class SysCommand(Command): """system data NAME @@ -43,12 +43,12 @@ class SysCommand(CrashCommand): """ def __init__(self, name): - parser = CrashCommandParser(prog=name) + parser = ArgumentParser(prog=name) parser.add_argument('config', nargs='?') parser.format_usage = lambda: "sys [config]\n" - CrashCommand.__init__(self, name, parser) + Command.__init__(self, name, parser) @staticmethod def show_default(): @@ -64,8 +64,7 @@ def execute(self, args): if args.config == "config": print(config) else: - raise CrashCommandLineError("error: unknown option: {}" - .format(args.config)) + raise CommandLineError(f"error: unknown option: {args.config}") else: self.show_default() diff --git a/crash/commands/task.py b/crash/commands/task.py index ad7defeacf3..f50553dc7ad 100644 --- a/crash/commands/task.py +++ b/crash/commands/task.py @@ -2,11 +2,11 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb -from crash.commands import CrashCommand, CrashCommandParser +from crash.commands import Command, ArgumentParser import crash.cache.tasks import argparse -class TaskCommand(CrashCommand): +class TaskCommand(Command): """select task by pid NAME @@ -23,12 +23,12 @@ class TaskCommand(CrashCommand): """ def __init__(self, name): - parser = CrashCommandParser(prog=name) + parser = ArgumentParser(prog=name) parser.add_argument('pid', type=int, nargs=1) parser.format_usage = lambda: "thread \n" - CrashCommand.__init__(self, name, parser) + Command.__init__(self, name, parser) def execute(self, args): try: diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index 6de1e3d340e..5e1476d75d8 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -3,7 +3,7 @@ import gdb import argparse -from crash.commands import CrashCommand, CrashCommandParser +from crash.commands import Command, ArgumentParser from crash.addrxlat import addrxlat_context, addrxlat_system, addrxlat_is_non_auto import addrxlat @@ -59,7 +59,7 @@ def address(self): except (addrxlat.NotPresentError, addrxlat.NoDataError): return addr + 'N/A' -class VTOPCommand(CrashCommand): +class VTOPCommand(Command): """convert virtual address to physical NAME @@ -173,7 +173,7 @@ class VTOPCommand(CrashCommand): """ def __init__(self): - parser = CrashCommandParser(prog="vtop") + parser = ArgumentParser(prog="vtop") group = parser.add_mutually_exclusive_group() group.add_argument('-u', action='store_true', default=False) diff --git a/tests/test_syscmd.py b/tests/test_syscmd.py index 51a5c6e4eef..d78c5472725 100644 --- a/tests/test_syscmd.py +++ b/tests/test_syscmd.py @@ -7,7 +7,7 @@ from io import StringIO from crash.exceptions import MissingSymbolError -from crash.commands import CrashCommandLineError +from crash.commands import CommandLineError from crash.commands.syscmd import SysCommand class TestSysCmd(unittest.TestCase): @@ -28,11 +28,11 @@ def test_sys(self): self.assertTrue('MACHINE: x86_64' in result) def test_sys_garbage(self): - with self.assertRaises(CrashCommandLineError): + with self.assertRaises(CommandLineError): self.cmd.invoke_uncaught("garbage", from_tty=False) def test_sys_garbage_flag(self): - with self.assertRaises(CrashCommandLineError): + with self.assertRaises(CommandLineError): self.cmd.invoke_uncaught("-a", from_tty=False) def test_sys_config(self): From 262e0a99006edbceba3b76b228f37dcdf8d8a913 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 26 Apr 2019 21:22:18 +0200 Subject: [PATCH 069/367] crash.commands.ps: add ability to filter tasks by name This commit adds the ability to filter tasks by name using UNIX wildcard-style expressions. e.g. pyps *xfs* Signed-off-by: Jeff Mahoney --- crash/commands/ps.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 44c18b92e15..78996cb649a 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -3,6 +3,8 @@ import gdb import argparse +import fnmatch +import re from crash.commands import Command, ArgumentParser from crash.commands import CommandLineError @@ -558,18 +560,27 @@ def execute(self, argv): width = 16 print(self.header_template.format(width, col4name)) - if not argv.args: - for thread in sorted(gdb.selected_inferior().threads(), key=sort_by): - task = thread.info - if task: - if argv.k and not task.is_kernel_task(): - continue - if argv.u and task.is_kernel_task(): + regex = None + if argv.args: + regex = re.compile(fnmatch.translate(argv.args[0])) + + for thread in sorted(gdb.selected_inferior().threads(), key=sort_by): + task = thread.info + if task: + if argv.k and not task.is_kernel_task(): + continue + if argv.u and task.is_kernel_task(): + continue + + if regex is not None: + m = regex.match(task.task_struct['comm'].string()) + if m is None: continue - # Only show thread group leaders + + # Only show thread group leaders # if argv.G and task.pid != int(task.task_struct['tgid']): - task.update_mem_usage() - self.print_one(argv, thread) + task.update_mem_usage() + self.print_one(argv, thread) PSCommand() From 65b6b15b1946f80f7dbbbd80a8079a7f787a0bde Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 29 Apr 2019 14:44:58 -0400 Subject: [PATCH 070/367] crash.types.task: improve task flag handling This commit adds knowledge of the task flags for newer releases. In Linux v3.14, several elements were removed from task_state_array. In Linux v4.4, TASK_PARKED was renumbered to be in task_state_array. This commit handles the right things and will complain if the flags change again. Signed-off-by: Jeff Mahoney --- crash/commands/ps.py | 17 ++---- crash/types/task.py | 127 ++++++++++++++++++++++++++++++++++--------- 2 files changed, 107 insertions(+), 37 deletions(-) diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 78996cb649a..36815346499 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -437,7 +437,7 @@ def task_state_string(self, task): pass buf = '??' - if hasattr(TF, 'TASK_DEAD'): + if TF.has_flag('TASK_DEAD'): try: buf = self.task_states[state & ~TF.TASK_DEAD] except KeyError: @@ -522,17 +522,15 @@ def setup_task_states(self): TF.TASK_RUNNING : "RU", TF.TASK_INTERRUPTIBLE : "IN", TF.TASK_UNINTERRUPTIBLE : "UN", - TF.TASK_ZOMBIE : "ZO", + TF.EXIT_ZOMBIE : "ZO", TF.TASK_STOPPED : "ST", } - if hasattr(TF, 'TASK_EXCLUSIVE'): - self.task_states[TF.TASK_EXCLUSIVE] = "EX" - if hasattr(TF, 'TASK_SWAPPING'): + if TF.has_flag('TASK_SWAPPING'): self.task_states[TF.TASK_SWAPPING] = "SW" - if hasattr(TF, 'TASK_DEAD'): + if TF.has_flag('TASK_DEAD'): self.task_states[TF.TASK_DEAD] = "DE" - if hasattr(TF, 'TASK_TRACING_STOPPED'): + if TF.has_flag('TASK_TRACING_STOPPED'): self.task_states[TF.TASK_TRACING_STOPPED] = "TR" def execute(self, argv): @@ -540,10 +538,7 @@ def execute(self, argv): sort_by_last_run = lambda x: -x.info.last_run() if not hasattr(self, 'task_states'): - try: - self.setup_task_states() - except AttributeError: - raise CommandLineError("The task subsystem is not available.") + self.setup_task_states() sort_by = sort_by_pid if argv.l: diff --git a/crash/types/task.py b/crash/types/task.py index 5e930f2c5e7..259006e6d21 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -13,16 +13,44 @@ def get_value(symname): if sym[0]: return sym[0].value() +# This is pretty painful. These are all #defines so none of them end +# up with symbols in the kernel. The best approximation we have is +# task_state_array which doesn't include all of them. All we can do +# is make some assumptions based on the changes upstream. This will +# be fragile. class TaskStateFlags(CrashBaseClass): __types__ = [ 'char *', 'struct task_struct' ] __symvals__ = [ 'task_state_array' ] - __symbol_callbacks__ = [ ('task_state_array', 'task_state_flags_callback') ] - __delayed_values__ = [ 'TASK_RUNNING', 'TASK_INTERRUPTIBLE', - 'TASK_UNINTERRUPTIBLE', 'TASK_ZOMBIE', - 'TASK_STOPPED', 'TASK_SWAPPING', 'TASK_EXCLUSIVE' ] + __symbol_callbacks__ = [ ('task_state_array', '_task_state_flags_callback') ] + + TASK_RUNNING = 0 + + TASK_FLAG_UNINITIALIZED = -1 + + TASK_INTERRUPTIBLE: int=TASK_FLAG_UNINITIALIZED + TASK_UNINTERRUPTIBLE: int=TASK_FLAG_UNINITIALIZED + TASK_STOPPED: int=TASK_FLAG_UNINITIALIZED + EXIT_ZOMBIE: int=TASK_FLAG_UNINITIALIZED + TASK_DEAD: int=TASK_FLAG_UNINITIALIZED + EXIT_DEAD: int=TASK_FLAG_UNINITIALIZED + TASK_SWAPPING: int=TASK_FLAG_UNINITIALIZED + TASK_TRACING_STOPPED: int=TASK_FLAG_UNINITIALIZED + TASK_WAKEKILL: int=TASK_FLAG_UNINITIALIZED + TASK_WAKING: int=TASK_FLAG_UNINITIALIZED + TASK_PARKED: int=TASK_FLAG_UNINITIALIZED + __TASK_IDLE: int=TASK_FLAG_UNINITIALIZED + + TASK_NOLOAD: int=TASK_FLAG_UNINITIALIZED + TASK_NEW: int=TASK_FLAG_UNINITIALIZED + TASK_IDLE: int=TASK_FLAG_UNINITIALIZED + + @classmethod + def has_flag(cls, flagname): + v = getattr(cls, flagname) + return v != cls.TASK_FLAG_UNINITIALIZED @classmethod - def task_state_flags_callback(cls, symbol): + def _task_state_flags_callback(cls, symbol): count = array_size(cls.task_state_array) bit = 0 @@ -33,45 +61,92 @@ def task_state_flags_callback(cls, symbol): '(sleeping)' : 'TASK_INTERRUPTIBLE', '(disk sleep)' : 'TASK_UNINTERRUPTIBLE', '(stopped)' : 'TASK_STOPPED', - '(zombie)' : 'TASK_ZOMBIE', - #'(dead)' : 'TASK_DEAD', + '(zombie)' : 'EXIT_ZOMBIE', + 'x (dead)' : 'TASK_DEAD', + 'X (dead)' : 'EXIT_DEAD', '(swapping)' : 'TASK_SWAPPING', - #'(tracing stop)' : 'TASK_TRACING_STOPPED', + '(tracing stop)' : 'TASK_TRACING_STOPPED', '(wakekill)' : 'TASK_WAKEKILL', '(waking)' : 'TASK_WAKING', + '(parked)' : 'TASK_PARKED', + '(idle)' : '__TASK_IDLE', } for key in state_strings: if key in state: - try: - dv = get_delayed_lookup(cls, state_strings[key]) - dv.callback(bit) - except KeyError: - setattr(cls, state_strings[key], bit) - if '(dead)' in state: - cls.TASK_DEAD = bit - if '(tracing stop)' in state: - cls.TASK_TRACING_STOPPED = bit + setattr(cls, state_strings[key], bit) + if bit == 0: bit = 1 else: bit <<= 1 - cls.check_state_bits() + + # Linux 4.14 re-introduced TASK_PARKED into task_state_array + # which renumbered some bits + if cls.has_flag('TASK_PARKED') and not cls.has_flag('TASK_DEAD'): + newbits = cls.TASK_PARKED << 1 + cls.TASK_DEAD = newbits + cls.TASK_WAKEKILL = newbits << 1 + cls.TASK_WAKING = newbits << 2 + cls.TASK_NOLOAD = newbits << 3 + cls.TASK_NEW = newbits << 4 + + assert(cls.TASK_PARKED == 0x0040) + assert(cls.TASK_DEAD == 0x0080) + assert(cls.TASK_WAKEKILL == 0x0100) + assert(cls.TASK_WAKING == 0x0200) + + # Linux 3.14 removed several elements from task_state_array + # so we'll have to make some assumptions. + # TASK_NOLOAD wasn't introduced until 4.2 and wasn't added + # to task_state_array until v4.14. There's no way to + # detect whether the use of the flag is valid for a particular + # kernel release. + elif cls.has_flag('EXIT_DEAD'): + if cls.EXIT_ZOMBIE > cls.EXIT_DEAD: + newbits = cls.EXIT_ZOMBIE << 1 + else: + newbits = cls.EXIT_DEAD << 1 + cls.TASK_DEAD = newbits + cls.TASK_WAKEKILL = newbits << 1 + cls.TASK_WAKING = newbits << 2 + cls.TASK_PARKED = newbits << 3 + cls.TASK_NOLOAD = newbits << 4 + cls.TASK_NEW = newbits << 5 + + assert(cls.TASK_DEAD == 0x0040) + assert(cls.TASK_WAKEKILL == 0x0080) + assert(cls.TASK_WAKING == 0x0100) + assert(cls.TASK_PARKED == 0x0200) + else: + assert(cls.TASK_DEAD == 64) + assert(cls.TASK_WAKEKILL == 128) + assert(cls.TASK_WAKING == 256) + assert(cls.TASK_PARKED == 512) + + if cls.has_flag('TASK_NOLOAD'): + assert(cls.TASK_NOLOAD == 1024) + cls.TASK_IDLE = cls.TASK_NOLOAD | cls.TASK_UNINTERRUPTIBLE + assert(cls.TASK_IDLE == 1026) + if cls.has_flag('TASK_NEW'): + assert(cls.TASK_NEW == 2048) + + cls._check_state_bits() @classmethod - def check_state_bits(cls): + def _check_state_bits(cls): required = [ 'TASK_RUNNING', 'TASK_INTERRUPTIBLE', 'TASK_UNINTERRUPTIBLE', - 'TASK_ZOMBIE', + 'EXIT_ZOMBIE', 'TASK_STOPPED', ] missing = [] for bit in required: - if not hasattr(cls, bit): + if not cls.has_flag(bit): missing.append(bit) if len(missing): @@ -156,9 +231,9 @@ def get_thread_info(self): def get_last_cpu(self): try: - return self.task_struct['cpu'] + return int(self.task_struct['cpu']) except gdb.error as e: - return self.thread_info['cpu'] + return int(self.thread_info['cpu']) def task_state(self): state = int(self.task_struct['state']) @@ -171,10 +246,10 @@ def maybe_dead(self): known = TF.TASK_INTERRUPTIBLE known |= TF.TASK_UNINTERRUPTIBLE - known |= TF.TASK_ZOMBIE + known |= TF.EXIT_ZOMBIE known |= TF.TASK_STOPPED - if hasattr(TF, 'TASK_SWAPPING'): + if TF.has_flag('TASK_SWAPPING'): known |= TF.TASK_SWAPPING return (state & known) == 0 @@ -185,7 +260,7 @@ def is_exiting(self): return self.task_flags() & PF_EXITING def is_zombie(self): - return self.task_state() & TF.TASK_ZOMBIE + return self.task_state() & TF.EXIT_ZOMBIE def update_mem_usage(self): if self.mem_valid: From 5e2543500db7838383194c5ca3376ca323c82041 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 29 Apr 2019 14:45:17 -0400 Subject: [PATCH 071/367] crash.commands.ps: add support for TASK_IDLE Kernel 4.2 introduced TASK_NOLOAD, which when combined with TASK_UNINTERRUPTIBLE, produced TASK_IDLE. This mask is used for kernel threads, so without support for the flags, `ps' shows ?? for kernel threads. Signed-off-by: Jeff Mahoney --- crash/commands/ps.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 36815346499..4b4bb919482 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -436,19 +436,21 @@ def task_state_string(self, task): except AttributeError: pass - buf = '??' - if TF.has_flag('TASK_DEAD'): - try: - buf = self.task_states[state & ~TF.TASK_DEAD] - except KeyError: - pass + buf = None - if state & TF.TASK_DEAD and task.maybe_dead(): - buf = self.task_states[TF.TASK_DEAD] + for bits in sorted(self.task_states.keys(), reverse=True): + if (state & bits) == bits: + buf = self.task_states[bits] + break + if state & TF.TASK_DEAD and task.maybe_dead(): + buf = self.task_states[TF.TASK_DEAD] if buf is not None and exclusive: buf += "EX" + if buf is None: + print(f"Unknown state {state} found") + return buf @classmethod @@ -532,6 +534,8 @@ def setup_task_states(self): self.task_states[TF.TASK_DEAD] = "DE" if TF.has_flag('TASK_TRACING_STOPPED'): self.task_states[TF.TASK_TRACING_STOPPED] = "TR" + if TF.has_flag('TASK_IDLE'): + self.task_states[TF.TASK_IDLE] = "ID" def execute(self, argv): sort_by_pid = lambda x: x.info.task_struct['pid'] From e3b9f2237eeb755281d30d723c09794fc40d944b Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 19 Sep 2018 10:47:38 +0200 Subject: [PATCH 072/367] crash.commands.help: sort commands in help output It's more user-friendly to be able to locate whether a command is present alphabetically. Signed-off-by: Jeff Mahoney --- crash/commands/help.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/commands/help.py b/crash/commands/help.py index 43fa3c5e97c..8d0b4d13d57 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -28,7 +28,7 @@ def __init__(self): def execute(self, argv): if not argv.args: print("Available commands:") - for cmd in self.commands: + for cmd in sorted(self.commands): text = self.commands[cmd].__doc__ if text: summary = text.split('\n')[0].strip() From be4bc2fa2a6d275d5534712993e6c1a4c2323833 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 29 Apr 2019 14:40:13 -0400 Subject: [PATCH 073/367] crash.util: add struct_has_member The use of anonymous structures and unions means that things like: struct foo { struct { int x; }; }; if 'x' in cls.foo_type: # will evaluate false when foo.x works fine in C code. In order to make these less painful for subsystem modules, we add a struct_has_member helper that does the right thing to resolve the member. Signed-off-by: Jeff Mahoney --- crash/util.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/crash/util.py b/crash/util.py index 0c5905fdd3f..12608f1096b 100644 --- a/crash/util.py +++ b/crash/util.py @@ -1,10 +1,14 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Union + import gdb from crash.infra import CrashBaseClass, export from crash.exceptions import MissingTypeError, MissingSymbolError +TypeSpecifier = Union [ gdb.Type, gdb.Value, str, gdb.Symbol ] + class OffsetOfError(Exception): """Generic Exception for offsetof errors""" def __init__(self, message): @@ -96,6 +100,37 @@ def container_of(self, val, gdbtype, member): offset = offsetof(gdbtype, member) return (val.cast(charp) - offset).cast(gdbtype.pointer()).dereference() + @export + @staticmethod + def struct_has_member(gdbtype: TypeSpecifier, name: str) -> bool: + """ + Returns whether a structure has a given member name. + + A typical method of determining whether a structure has a member is just + to check the fields list. That generally works but falls apart when + the structure contains an anonymous union or substructure since + it will push the members one level deeper in the namespace. + + This routine provides a simple interface that covers those details. + + Args: + val (gdb.Type, gdb.Value, str, gdb.Symbol): The object for which + to resolve the type to search for the member + name (str): The name of the member to query + + Returns: + bool: Whether the member is present in the specified type + + Raises: + TypeError: An invalid argument has been provided. + + """ + try: + x = TypesUtilClass.offsetof(gdbtype, name) + return True + except InvalidComponentError: + return False + @export @staticmethod def get_symbol_value(symname, block=None, domain=None): From 73993cecf38f429fa164fb9c76c6d5ac5b76454e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 29 Apr 2019 14:43:50 -0400 Subject: [PATCH 074/367] crash.types.task: handle anonymous sub-structure in mm_struct Kernel v4.19 moved most of mm_struct into an anonymous sub-structure. Even though C code can access members directly, the gdb type infrastructure reflects the actual type layout. This means that things like "if 'rss_stat' in cls.mm_struct_type" will return false even if the member is present. To cope with this, use struct_has_member instead, which does the right things when detecting whether a struct member is present. Signed-off-by: Jeff Mahoney --- crash/types/task.py | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/crash/types/task.py b/crash/types/task.py index 259006e6d21..2f35507eb2c 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -2,7 +2,7 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb -from crash.util import array_size +from crash.util import array_size, struct_has_member from crash.infra import CrashBaseClass from crash.infra.lookup import DelayedValue, ClassProperty, get_delayed_lookup @@ -212,7 +212,7 @@ def init_task_types(cls, task): cls.task_struct_type = task.type fields = cls.task_struct_type.fields() cls.task_state_has_exit_state = 'exit_state' in fields - cls.mm_struct_fields = gdb.lookup_type('struct mm_struct').keys() + cls.mm_struct_type = gdb.lookup_type('struct mm_struct') cls.pick_get_rss() cls.pick_last_run() cls.init_mm = get_value('init_mm') @@ -322,12 +322,11 @@ def get_rss_stat_field(self): def get_anon_file_rss_fields(self): mm = self.task_struct['mm'] rss = 0 - for name in ['_anon_rss', '_file_rss']: - if name in mm_struct_fields: - if mm[name].type == self.atomic_long_type: - rss += int(mm[name]['counter']) - else: - rss += int(mm[name]) + for name in cls.anon_file_rss_fields: + if mm[name].type == self.atomic_long_type: + rss += int(mm[name]['counter']) + else: + rss += int(mm[name]) return rss # The Pythonic way to do this is by generating the LinuxTask class @@ -335,20 +334,28 @@ def get_anon_file_rss_fields(self): # select the proper function and assign it to the class. @classmethod def pick_get_rss(cls): - if 'rss' in cls.mm_struct_fields: + if struct_has_member(cls.mm_struct_type, 'rss'): cls.get_rss = cls.get_rss_field - elif '_rss' in cls.mm_struct_fields: + elif struct_has_member(cls.mm_struct_type, '_rss'): cls.get_rss = cls.get__rss_field - elif 'rss_stat' in cls.mm_struct_fields: + elif struct_has_member(cls.mm_struct_type, 'rss_stat'): cls.MM_FILEPAGES = get_value('MM_FILEPAGES') cls.MM_ANONPAGES = get_value('MM_ANONPAGES') cls.get_rss = cls.get_rss_stat_field - elif '_anon_rss' in cls.mm_struct_fields or \ - '_file_rss' in cls.mm_struct_fields: + else: + cls.anon_file_rss_fields = [] + + if struct_has_member(cls.mm_struct_type, '_file_rss'): + cls.anon_file_rss_fields.append('_file_rss') + + if struct_has_member(cls.mm_struct_type, '_anon_rss'): + cls.anon_file_rss_fields.append('_anon_rss') + cls.atomic_long_type = gdb.lookup_type('atomic_long_t') cls.get_rss = cls.get_anon_file_rss_fields - else: - raise RuntimeError("No method to retrieve RSS from task found.") + + if len(cls.anon_file_rss_fields): + raise RuntimeError("No method to retrieve RSS from task found.") def last_run__last_run(self): return int(self.task_struct['last_run']) From b9ebf588d09adc807b2d5fcfd49a3324a234eead Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 30 Apr 2019 21:26:20 -0400 Subject: [PATCH 075/367] crash.util: add uuid decoding With upcoming file system subsystem modules, we'll want a common way to handle UUID decoding. XFS uses uuid_t while btrfs uses an array of u8. This introduces helpers into crash.util: - decode_uuid -- decodes the byte array - decode_uuid_t -- decodes the uuid_t Signed-off-by: Jeff Mahoney --- crash/util.py | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/crash/util.py b/crash/util.py index 12608f1096b..1f240aeae00 100644 --- a/crash/util.py +++ b/crash/util.py @@ -4,6 +4,8 @@ from typing import Union import gdb +import uuid + from crash.infra import CrashBaseClass, export from crash.exceptions import MissingTypeError, MissingSymbolError @@ -70,7 +72,7 @@ def __init__(self, member, gdbtype): self.type = gdbtype class TypesUtilClass(CrashBaseClass): - __types__ = [ 'char *' ] + __types__ = [ 'char *', 'uuid_t' ] @export def container_of(self, val, gdbtype, member): @@ -411,3 +413,67 @@ def array_for_each(value): size = array_size(value) for i in range(array_size(value)): yield value[i] + + @export + @classmethod + def decode_uuid(cls, value: gdb.Value) -> uuid.UUID: + """ + Decode an array of bytes that describes a UUID into a Python-style + UUID object + + Args: + value (gdb.Value): The UUID to decode + + Returns: + uuid.UUID: The UUID object that describes the value + + Raises: + TypeError: value is not gdb.Value or does not describe a 16-byte array. + + """ + if not isinstance(value, gdb.Value): + raise TypeError("value must be gdb.Value") + + if (value.type.code != gdb.TYPE_CODE_ARRAY or + value[0].type.sizeof != 1 or + value.type.sizeof != 16): + raise TypeError("value must describe an array of 16 bytes") + + u = 0 + for i in range(0, 16): + u <<= 8 + u += int(value[i]) + + return uuid.UUID(int=u) + + @export + @classmethod + def decode_uuid_t(cls, value: gdb.Value) -> uuid.UUID: + """ + Decode a Linux kernel uuid_t into a Python-style UUID object + + Args: + value (gdb.Value): The uuid_t to be decoded + + Returns: + uuid.UUID: The UUID object that describes the value + + Raises: + TypeError: value is not gdb.Value + """ + if not isinstance(value, gdb.Value): + raise TypeError("value must be gdb.Value") + + if value.type != self.uuid_t_type: + if (value.type.code == gdb.TYPE_CODE_PTR and + value.type.target() == self.uuid_t_type): + value = value.dereference() + else: + raise TypeError("value must describe a uuid_t") + + if 'b' in cls.uuid_t_type: + member = 'b' + else: + member = '__u_bits' + + return cls.decode_uuid(value[member]) From 2f00096f783da019322e5a1c17cf17b7fd0d2104 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 26 Apr 2019 21:34:48 +0200 Subject: [PATCH 076/367] crash.util: add decode_flags helper The decode_flags helper takes a gdb.Value representing an integer and a dictionary of int -> str that maps the powers of 2 to flag names and produces a human-readable string describing the flags. If no name is found FLAG_$number is used instead. Signed-off-by: Jeff Mahoney --- crash/util.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/crash/util.py b/crash/util.py index 1f240aeae00..bfa1548502d 100644 --- a/crash/util.py +++ b/crash/util.py @@ -6,6 +6,8 @@ import gdb import uuid +from typing import Dict + from crash.infra import CrashBaseClass, export from crash.exceptions import MissingTypeError, MissingSymbolError @@ -414,6 +416,47 @@ def array_for_each(value): for i in range(array_size(value)): yield value[i] + @export + @staticmethod + def decode_flags(value: gdb.Value, names: Dict[int, str], + separator: str="|") -> str: + """ + Present a bitfield of individual flags in a human-readable format. + + Args: + value (gdb.Value): + The value containing the flags to be decoded. + names (dict of int->str): + A dictionary containing mappings for each bit number to + a human-readable name. Any flags found that do not have + a matching value in the dict will be displayed as FLAG_. + separator (str, defaults to "|"): + The string to use as a separator between each flag name in the + output. + + Returns: + str: A human-readable string displaying the flag values. + + Raises: + TypeError: value is not gdb.Value or names is not dict. + """ + if not isinstance(value, gdb.Value): + raise TypeError("value must be gdb.Value") + + if not isinstance(names, dict): + raise TypeError("names must be a dictionary of int -> str") + + flags_val = int(value) + flags = [] + for n in range(0, value.type.sizeof << 3): + if flags_val & (1 << n): + try: + flags.append(names[1 << n]) + except KeyError: + flags.append("FLAG_{}".format(n)) + + return separator.join(flags) + @export @classmethod def decode_uuid(cls, value: gdb.Value) -> uuid.UUID: From b7d712ef7ef6cbbe8ff43b53182ecbcbc33c6e2c Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 19 Sep 2018 10:48:13 +0200 Subject: [PATCH 077/367] crash.types.{,k}list: use the type from the symbol when available Internally, gdb treats the type loaded from a typed symbol and a type symbol differently and wants to do the full type comparison dance. If we use the typed symbol directly, we can use a pointer comparison. Signed-off-by: Jeff Mahoney --- crash/types/klist.py | 4 ++++ crash/types/list.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/crash/types/klist.py b/crash/types/klist.py index 88e9b0a7aad..7ccb49fa7b3 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -20,6 +20,8 @@ def klist_for_each(self, klist): elif klist.type != self.klist_type: raise TypeError("klist must be gdb.Value representing 'struct klist' or 'struct klist *' not {}" .format(klist.type)) + if klist.type is not self.klist_type: + self.klist_type = klist.type for node in list_for_each_entry(klist['k_list'], self.klist_node_type, 'n_node'): @@ -32,4 +34,6 @@ def klist_for_each_entry(self, klist, gdbtype, member): for node in klist_for_each(klist): if node.type != self.klist_node_type: raise TypeError("Type {} found. Expected {}.".format(node.type), self.klist_node_type.pointer()) + if node.type is not self.klist_node_type: + self.klist_node_type = node.type yield container_of(node, gdbtype, member) diff --git a/crash/types/list.py b/crash/types/list.py index 380a367c8d9..68c3a33672f 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -31,6 +31,8 @@ def list_for_each(self, list_head, include_head=False, reverse=False, elif list_head.type != self.list_head_type: raise TypeError("Must be struct list_head not {}" .format(str(list_head.type))) + if list_head.type is not self.list_head_type: + self.list_head_type = list_head.type fast = None if int(list_head.address) == 0: raise CorruptListError("list_head is NULL pointer.") From b3a9d13131af0aa9b2da89316669446ba338b094 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 26 Apr 2019 21:24:20 +0200 Subject: [PATCH 078/367] crash.types.list: add list_empty The list_empty method returns a boolean indicating whether a list_head describes an empty list. Signed-off-by: Jeff Mahoney --- crash/types/list.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/crash/types/list.py b/crash/types/list.py index 68c3a33672f..ccfa0ccb6ed 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -116,3 +116,12 @@ def list_for_each_entry(self, list_head, gdbtype, member, include_head=False, re raise TypeError("Type {} found. Expected struct list_head *." .format(str(node.type))) yield container_of(node, gdbtype, member) + + @export + def list_empty(self, list_head): + addr = int(list_head.address) + if list_head.type.code == gdb.TYPE_CODE_PTR: + addr = int(list_head) + + return addr == int(list_head['next']) + From 8b3c9f256b5a9c3985f726e2c468a41a36f1c44a Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 079/367] crash.types.list: fix cycle tests The cycle tests aren't passing exact_cycles=True and will loop forever. Signed-off-by: Jeff Mahoney --- crash/types/list.py | 7 +++++-- tests/test_list.py | 8 ++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/crash/types/list.py b/crash/types/list.py index ccfa0ccb6ed..53df2c5cfd1 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -110,8 +110,11 @@ def list_for_each(self, list_head, include_head=False, reverse=False, raise pending_exception @export - def list_for_each_entry(self, list_head, gdbtype, member, include_head=False, reverse=False): - for node in list_for_each(list_head, include_head=include_head, reverse=reverse): + def list_for_each_entry(self, list_head, gdbtype, member, + include_head=False, reverse=False, + exact_cycles=False): + for node in list_for_each(list_head, include_head=include_head, + reverse=reverse, exact_cycles=exact_cycles): if node.type != self.list_head_type.pointer(): raise TypeError("Type {} found. Expected struct list_head *." .format(str(node.type))) diff --git a/tests/test_list.py b/tests/test_list.py index a5efe9081ec..bea22c4a267 100644 --- a/tests/test_list.py +++ b/tests/test_list.py @@ -68,7 +68,7 @@ def test_cycle_list(self): expected_count = short_list.type.sizeof // short_list[0].type.sizeof count = 0 with self.assertRaises(ListCycleError): - for node in list_for_each(normal_list): + for node in list_for_each(normal_list, exact_cycles=True): count += 1 def test_corrupt_list(self): @@ -77,7 +77,7 @@ def test_corrupt_list(self): expected_count = short_list.type.sizeof // short_list[0].type.sizeof count = 0 with self.assertRaises(CorruptListError): - for node in list_for_each(normal_list): + for node in list_for_each(normal_list, exact_cycles=True): count += 1 def test_normal_container_list_with_string(self): @@ -110,7 +110,7 @@ def test_cycle_container_list_with_string(self): count = 0 with self.assertRaises(ListCycleError): for node in list_for_each_entry(cycle_list, 'struct container', - 'list'): + 'list', exact_cycles=True): count += 1 def test_cycle_container_list_with_type(self): @@ -122,7 +122,7 @@ def test_cycle_container_list_with_type(self): count = 0 with self.assertRaises(ListCycleError): for node in list_for_each_entry(cycle_list, struct_container, - 'list'): + 'list', exact_cycles=True): count += 1 def test_bad_container_list_with_string(self): From c47f33970cd448eeddd90332e48c062f43977cd9 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 080/367] tests: clear file during test teardown The tests that load files (or targets) need to tear them down so subsequent tests don't get tripped up by them. Signed-off-by: Jeff Mahoney --- tests/test_infra_lookup.py | 3 +++ tests/test_list.py | 3 +++ tests/test_objfile_callbacks.py | 3 +++ tests/test_percpu.py | 1 + tests/test_syscache.py | 3 +++ tests/test_syscmd.py | 3 +++ tests/test_util.py | 3 +++ 7 files changed, 19 insertions(+) diff --git a/tests/test_infra_lookup.py b/tests/test_infra_lookup.py index acb7098fb24..9c3f50e86d4 100644 --- a/tests/test_infra_lookup.py +++ b/tests/test_infra_lookup.py @@ -204,6 +204,9 @@ class TestMinimalSymbolCallback(unittest.TestCase): def setUp(self): gdb.execute("file") + def tearDown(self): + gdb.execute("file") + def load_file(self): gdb.execute("file tests/test-util") diff --git a/tests/test_list.py b/tests/test_list.py index bea22c4a267..d78bba53ee1 100644 --- a/tests/test_list.py +++ b/tests/test_list.py @@ -15,6 +15,9 @@ def setUp(self): gdb.execute("file tests/test-list") self.list_head = gdb.lookup_type("struct list_head") + def tearDown(self): + gdb.execute("file") + def test_none_list(self): count = 0 with self.assertRaises(TypeError): diff --git a/tests/test_objfile_callbacks.py b/tests/test_objfile_callbacks.py index 7b0591d8c3b..74aba9105fc 100644 --- a/tests/test_objfile_callbacks.py +++ b/tests/test_objfile_callbacks.py @@ -11,6 +11,9 @@ class TestCallback(unittest.TestCase): def setUp(self): gdb.execute("file") + def tearDown(self): + gdb.execute("file") + def load_file(self): gdb.execute("file tests/test-util") diff --git a/tests/test_percpu.py b/tests/test_percpu.py index 773c7969449..a3f25c80f3b 100644 --- a/tests/test_percpu.py +++ b/tests/test_percpu.py @@ -31,6 +31,7 @@ def setUp(self): def tearDown(self): try: gdb.execute("detach", to_string=True) + gdb.execute("file") except gdb.error: print() pass diff --git a/tests/test_syscache.py b/tests/test_syscache.py index 2b69b93440e..2f719090f67 100644 --- a/tests/test_syscache.py +++ b/tests/test_syscache.py @@ -23,6 +23,9 @@ def setUp(self): gdb.execute("file tests/test-syscache") self.cycle_namespace() + def tearDown(self): + gdb.execute("file") + def cycle_namespace(self): import crash.cache.syscache reload(crash.cache.syscache) diff --git a/tests/test_syscmd.py b/tests/test_syscmd.py index d78c5472725..7387d18600c 100644 --- a/tests/test_syscmd.py +++ b/tests/test_syscmd.py @@ -15,6 +15,9 @@ def setUp(self): gdb.execute("file tests/test-syscache", to_string=True) self.cmd = SysCommand("pysys") + def tearDown(self): + gdb.execute("file") + def test_sys(self): old_stdout = sys.stdout sys.stdout = StringIO() diff --git a/tests/test_util.py b/tests/test_util.py index 9abbeb97295..e3cf78aa56d 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -21,6 +21,9 @@ def setUp(self): self.ulongsize = self.ulong.sizeof self.test_struct = gdb.lookup_type("struct test") + def tearDown(self): + gdb.execute("file") + def test_invalid_python_type(self): with self.assertRaises(InvalidArgumentError): offset = offsetof(self, 'dontcare') From 7bfe822c1e85a8bb40066f78e62299a9100bf438 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 081/367] crash.kernel: convert setup to use DelayedSymvals Now that we have DelayedAttributes everywhere, the setup code can be converted to use it. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 0da18160c78..1bb87cabdce 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -19,7 +19,8 @@ class CrashKernel(CrashBaseClass): __types__ = [ 'struct module' ] - __symvals__ = [ 'modules' ] + __symvals__ = [ 'modules', 'init_task' ] + __symbols__ = [ 'runqueues'] def __init__(self, vmlinux_filename, searchpath=None): self.findmap = {} @@ -225,11 +226,9 @@ def load_debuginfo(self, objfile, name=None, verbose=False): def setup_tasks(self): gdb.execute('set print thread-events 0') - init_task = gdb.lookup_global_symbol('init_task') - task_list = init_task.value()['tasks'] - runqueues = gdb.lookup_global_symbol('runqueues') + task_list = self.init_task['tasks'] - rqs = get_percpu_var(runqueues) + rqs = get_percpu_var(self.runqueues) rqscurrs = {int(x["curr"]) : k for (k, x) in rqs.items()} self.pid_to_task_struct = {} @@ -239,9 +238,12 @@ def setup_tasks(self): task_count = 0 tasks = [] - for taskg in list_for_each_entry(task_list, init_task.type, 'tasks', include_head=True): + for taskg in list_for_each_entry(task_list, self.init_task.type, + 'tasks', include_head=True): tasks.append(taskg) - for task in list_for_each_entry(taskg['thread_group'], init_task.type, 'thread_group'): + for task in list_for_each_entry(taskg['thread_group'], + self.init_task.type, + 'thread_group'): tasks.append(task) for task in tasks: From 0bb497e83b3d9cb7a1534a7dd24eac80584c2c11 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 082/367] crash.arch: add baseline ppc64 support This commit adds baseline ppc64 support. It should be enough to populate the thread list but this is an old commit that needs refreshing. Signed-off-by: Jeff Mahoney --- crash/arch/ppc64.py | 30 ++++++++++++++++++++++++++++++ crash/kdump/target.py | 1 + crash/kernel.py | 3 ++- 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 crash/arch/ppc64.py diff --git a/crash/arch/ppc64.py b/crash/arch/ppc64.py new file mode 100644 index 00000000000..3ab8eb7e159 --- /dev/null +++ b/crash/arch/ppc64.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb + +from crash.arch import CrashArchitecture, register, KernelFrameFilter + +class Powerpc64Architecture(CrashArchitecture): + ident = "powerpc:common64" + aliases = ["ppc64", "elf64-powerpc"] + + def __init__(self): + super(Powerpc64Architecture, self).__init__() + self.ulong_type = gdb.lookup_type('unsigned long') + thread_info_type = gdb.lookup_type('struct thread_info') + self.thread_info_p_type = thread_info_type.pointer() + + # Stop stack traces with addresses below this + self.filter = KernelFrameFilter(0xffff000000000000) + + def setup_thread_info(self, thread): + task = thread.info.task_struct + thread_info = task['stack'].cast(self.thread_info_p_type) + thread.info.set_thread_info(thread_info) + + @classmethod + def get_stack_pointer(cls, thread_struct): + return thread_struct['ksp'] + +register(Powerpc64Architecture) diff --git a/crash/kdump/target.py b/crash/kdump/target.py index d76e8001b47..948ca2b6a19 100644 --- a/crash/kdump/target.py +++ b/crash/kdump/target.py @@ -8,6 +8,7 @@ import addrxlat import crash.arch import crash.arch.x86_64 +import crash.arch.ppc64 class SymbolCallback(object): "addrxlat symbolic callback" diff --git a/crash/kernel.py b/crash/kernel.py index 1bb87cabdce..a8acc3d1457 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -39,7 +39,8 @@ def set_gdb_arch(self): elf_to_gdb = { ('EM_X86_64', 'ELFCLASS64') : 'i386:x86-64', ('EM_386', 'ELFCLASS32') : 'i386', - ('EM_S390', 'ELFCLASS64') : 's390:64-bit' + ('EM_S390', 'ELFCLASS64') : 's390:64-bit', + ('EM_PPC64', 'ELFCLASS64') : 'powerpc:common64' } try: From 6862725ae943a99db52f1dd9d28287356efa306d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 083/367] crash.types.bitmap: add find first/last/next helpers This commit adds some typical helpers for bitmaps: - find_first_set_bit - find_next_set_bit - find_last_set_bit - find_first_zero_bit - find_next_zero_bit Signed-off-by: Jeff Mahoney --- crash/types/bitmap.py | 279 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 275 insertions(+), 4 deletions(-) diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index b7f343c3933..aad3a970d20 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -1,8 +1,12 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Iterable + import gdb +from math import log + from crash.infra import CrashBaseClass, export class TypesBitmapClass(CrashBaseClass): @@ -12,17 +16,45 @@ class TypesBitmapClass(CrashBaseClass): bits_per_ulong = None @classmethod - def setup_ulong(cls, gdbtype): + def _check_bitmap_type(cls, bitmap: gdb.Value) -> None: + if ((bitmap.type.code != gdb.TYPE_CODE_ARRAY or + bitmap[0].type.code != cls.unsigned_long_type.code or + bitmap[0].type.sizeof != cls.unsigned_long_type.sizeof) and + (bitmap.type.code != gdb.TYPE_CODE_PTR or + bitmap.type.target().code != cls.unsigned_long_type.code or + bitmap.type.target().sizeof != cls.unsigned_long_type.sizeof)): + raise TypeError("bitmaps are expected to be arrays of unsigned long not `{}'" + .format(bitmap.type)) + + @classmethod + def setup_ulong(cls, gdbtype: gdb.Type) -> None: cls.bits_per_ulong = gdbtype.sizeof * 8 @export @classmethod - def for_each_set_bit(cls, bitmap): + def for_each_set_bit(cls, bitmap: gdb.Value, + size_in_bytes: int=None) -> Iterable[int]: + """ + Yield each set bit in a bitmap + + Args: + bitmap (gdb.Value: + The bitmap to iterate + size_in_bytes (int): The size of the bitmap if the type is + unsigned long *. + + Yields: + int: The position of a bit that is set + """ + cls._check_bitmap_type(bitmap) + + if size_in_bytes is None: + size_in_bytes = bitmap.type.sizeof # FIXME: callback not workie? cls.bits_per_ulong = cls.unsigned_long_type.sizeof * 8 - size = bitmap.type.sizeof * 8 + size = size_in_bytes * 8 idx = 0 bit = 0 while size > 0: @@ -39,4 +71,243 @@ def for_each_set_bit(cls, bitmap): size -= cls.bits_per_ulong idx += 1 - + + @classmethod + def _find_first_set_bit(cls, val: gdb.Value) -> int: + r = 1 + + if val == 0: + return 0 + + if (val & 0xffffffff) == 0: + val >>= 32 + r += 32 + + if (val & 0xffff) == 0: + val >>= 16 + r += 16 + + if (val & 0xff) == 0: + val >>= 8 + r += 8 + + if (val & 0xf) == 0: + val >>= 4 + r += 4 + + if (val & 0x3) == 0: + val >>= 2 + r += 2 + + if (val & 0x1) == 0: + val >>= 1 + r += 1 + + return r + + @export + @classmethod + def find_next_zero_bit(cls, bitmap: gdb.Value, start: int, + size_in_bytes: int=None) -> int: + """ + Return the next unset bit in the bitmap starting at position `start', + inclusive. + + Args: + bitmap (gdb.Value: + The bitmap to test + start (int): The bit number to use as a starting position. If + the bit at this position is unset, it will be the first + bit number yielded. + size_in_bytes (int): The size of the bitmap if the type is + unsigned long *. + + Returns: + int: The position of the first bit that is unset or 0 if all are set + """ + cls._check_bitmap_type(bitmap) + + if size_in_bytes is None: + size_in_bytes = bitmap.type.sizeof + + elements = size_in_bytes // cls.unsigned_long_type.sizeof + + if start > size_in_bytes << 3: + raise IndexError("Element {} is out of range ({} elements)" + .format(start, elements)) + + element = start // (cls.unsigned_long_type.sizeof << 3) + offset = start % (cls.unsigned_long_type.sizeof << 3) + + for n in range(element, elements): + item = ~bitmap[n] + if item == 0: + continue + + if offset > 0: + item &= ~((1 << offset) - 1) + + v = cls._find_first_set_bit(item) + if v > 0: + ret = n * (cls.unsigned_long_type.sizeof << 3) + v + assert(ret >= start) + return ret + + offset = 0 + + return 0 + + @export + @classmethod + def find_first_zero_bit(cls, bitmap: gdb.Value, + size_in_bytes: int=None) -> int: + """ + Return the first unset bit in the bitmap + + Args: + bitmap (gdb.Value: + The bitmap to scan + start (int): The bit number to use as a starting position. If + the bit at this position is unset, it will be the first + bit number yielded. + + Returns: + int: The position of the first bit that is unset + """ + return cls.find_next_zero_bit(bitmap, 0, size_in_bytes) + + @export + @classmethod + def find_next_set_bit(cls, bitmap: gdb.Value, start: int, + size_in_bytes: int=None) -> int: + """ + Return the next set bit in the bitmap starting at position `start', + inclusive. + + Args: + bitmap (gdb.Value: + The bitmap to scan + start (int): The bit number to use as a starting position. If + the bit at this position is unset, it will be the first + bit number yielded. + size_in_bytes (int): The size of the bitmap if the type is + unsigned long *. + + Returns: + int: The position of the next bit that is set, or 0 if all are + unset + """ + cls._check_bitmap_type(bitmap) + + if size_in_bytes is None: + size_in_bytes = bitmap.type.sizeof + + elements = size_in_bytes // cls.unsigned_long_type.sizeof + + if start > size_in_bytes << 3: + raise IndexError("Element {} is out of range ({} elements)" + .format(start, elements)) + + element = start // (cls.unsigned_long_type.sizeof << 3) + offset = start % (cls.unsigned_long_type.sizeof << 3) + + for n in range(element, elements): + if bitmap[n] == 0: + continue + + item = bitmap[n] + if offset > 0: + item &= ~((1 << offset) - 1) + + v = cls._find_first_set_bit(item) + if v > 0: + ret = n * (cls.unsigned_long_type.sizeof << 3) + v + assert(ret >= start) + return ret + + offset = 0 + + return 0 + + @export + @classmethod + def find_first_set_bit(cls, bitmap: gdb.Value, + size_in_bytes: int=None) -> int: + """ + Return the first set bit in the bitmap + + Args: + bitmap (gdb.Value: + The bitmap to scan + size_in_bytes (int): The size of the bitmap if the type is + unsigned long *. + + Returns: + int: The position of the first bit that is set, or 0 if all are + unset + """ + return cls.find_next_set_bit(bitmap, 0, size_in_bytes) + + @classmethod + def _find_last_set_bit(cls, val: gdb.Value) -> int: + r = cls.unsigned_long_type.sizeof << 3 + + if val == 0: + return 0 + + if (val & 0xffffffff00000000) == 0: + val <<= 32 + r -= 32 + + if (val & 0xffff000000000000) == 0: + val <<= 16 + r -= 16 + + if (val & 0xff00000000000000) == 0: + val <<= 8 + r -= 8 + + if (val & 0xf000000000000000) == 0: + val <<= 4 + r -= 4 + + if (val & 0xc000000000000000) == 0: + val <<= 2 + r -= 2 + + if (val & 0x8000000000000000) == 0: + val <<= 1 + r -= 1 + + return r + + @export + @classmethod + def find_last_set_bit(cls, bitmap: gdb.Value, + size_in_bytes: int=None) -> int: + """ + Return the last set bit in the bitmap + + Args: + bitmap (gdb.Value: + The bitmap to scan + + Returns: + int: The position of the last bit that is set, or 0 if all are unset + """ + cls._check_bitmap_type(bitmap) + + if size_in_bytes is None: + size_in_bytes = bitmap.type.sizeof + + elements = size_in_bytes // cls.unsigned_long_type.sizeof + + for n in range(elements - 1, -1, -1): + if bitmap[n] == 0: + continue + + v = cls._find_last_set_bit(bitmap[n]) + if v > 0: + return n * (cls.unsigned_long_type.sizeof << 3) + v + + return 0 From be6cc948624f1f838c1af5fb56cb43ec351a7d95 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 084/367] crash: use new gdb.Target to create standalone target When I rebased crash-python-gdb to an 8.3 prerelease, I found that targets have been converted to C++. That necessitated a rewrite of much of the target code, and I cleaned up some rough edges. With the new target, we load the vmcore using a simple 'target kdumpfile /path/to/vmcore' command that can be used entirely outside of the crash semantic code. This means we can debug the target more easily and use it in the testing code without having to parse everything for every test. This commit converts crash to use the new target but doesn't exploit it for testing yet. Signed-off-by: Jeff Mahoney --- crash.sh | 22 +++++-- crash/kdump/__init__.py | 2 - crash/kernel.py | 109 ++++++------------------------- crash/session.py | 20 ++---- crash/types/node.py | 2 +- crash/types/page.py | 2 +- kdump/__init__.py | 0 {crash/kdump => kdump}/target.py | 88 +++++++++++++------------ tests/test_target.py | 24 +++++-- 9 files changed, 107 insertions(+), 162 deletions(-) delete mode 100644 crash/kdump/__init__.py create mode 100644 kdump/__init__.py rename {crash/kdump => kdump}/target.py (56%) diff --git a/crash.sh b/crash.sh index cd47cca23eb..77535b82151 100755 --- a/crash.sh +++ b/crash.sh @@ -118,6 +118,15 @@ set prompt py-crash> set height 0 set print pretty on +file "$KERNEL" + +python +from kdump.target import Target +target = Target(debug=False) +end + +target kdumpfile $VMCORE + python import sys import traceback @@ -129,8 +138,8 @@ except RuntimeError as e: sys.exit(1) path = "$SEARCHDIRS".split(' ') try: - x = crash.session.Session("$KERNEL", "$VMCORE", "$ZKERNEL", path) - print("The 'pyhelp' command will list the command extensions.") + x = crash.session.Session(path) + print("The 'pyhelp' command will list the command extensions.") except gdb.error as e: print("crash-python: {}, exiting".format(str(e)), file=sys.stderr) traceback.print_exc() @@ -140,16 +149,19 @@ except RuntimeError as e: file=sys.stderr) traceback.print_exc() sys.exit(1) + +target.unregister() +del target EOF # This is how we debug gdb problems when running crash if [ "$DEBUGMODE" = "gdb" ]; then - RUN="run -nh -q -x $GDBINIT" + RUN="run -nx -q -x $GDBINIT" echo $RUN > /tmp/gdbinit - gdb $GDB -nh -q -x /tmp/gdbinit + gdb $GDB -nx -q -x /tmp/gdbinit elif [ "$DEBUGMODE" = "valgrind" ]; then valgrind --keep-stacktraces=alloc-and-free $GDB -nh -q -x $GDBINIT else - $GDB -nh -q -x $GDBINIT + $GDB -nx -q -x $GDBINIT fi diff --git a/crash/kdump/__init__.py b/crash/kdump/__init__.py deleted file mode 100644 index 9e72c13b9b3..00000000000 --- a/crash/kdump/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# -*- coding: utf-8 -*- -# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: diff --git a/crash/kernel.py b/crash/kernel.py index a8acc3d1457..8e4257f5688 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -4,15 +4,15 @@ import gdb import sys import os.path +import crash.arch +import crash.arch.x86_64 +import crash.arch.ppc64 from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each_entry from crash.types.percpu import get_percpu_var from crash.types.list import list_for_each_entry import crash.cache.tasks from crash.types.task import LinuxTask -import crash.kdump -import crash.kdump.target -from kdumpfile import kdumpfile from elftools.elf.elffile import ELFFile LINUX_KERNEL_PID = 1 @@ -22,98 +22,29 @@ class CrashKernel(CrashBaseClass): __symvals__ = [ 'modules', 'init_task' ] __symbols__ = [ 'runqueues'] - def __init__(self, vmlinux_filename, searchpath=None): + def __init__(self, searchpath=None): self.findmap = {} - self.vmlinux_filename = vmlinux_filename self.searchpath = searchpath - f = open(self.vmlinux_filename, 'rb') - self.elffile = ELFFile(f) - - self.set_gdb_arch() - - def set_gdb_arch(self): - mach = self.elffile['e_machine'] - e_class = self.elffile['e_ident']['EI_CLASS'] - - elf_to_gdb = { - ('EM_X86_64', 'ELFCLASS64') : 'i386:x86-64', - ('EM_386', 'ELFCLASS32') : 'i386', - ('EM_S390', 'ELFCLASS64') : 's390:64-bit', - ('EM_PPC64', 'ELFCLASS64') : 'powerpc:common64' - } - - try: - gdbarch = elf_to_gdb[(mach, e_class)] - except KeyError as e: - raise RuntimeError("no mapping for {}:{} to gdb architecture found.".format(mach, e_class)) - gdb.execute("set arch {}".format(gdbarch), to_string=True) - - def open_kernel(self): - if self.base_offset is None: - raise RuntimeError("Base offset is unconfigured.") + sym = gdb.lookup_symbol('vsnprintf', None)[0] + if sym is None: + raise RuntimeError("Missing vsnprintf indicates that there is no kernel image loaded.") - self.load_sections() - - try: - list_type = gdb.lookup_type('struct list_head') - except gdb.error as e: - self.load_debuginfo(gdb.objfiles()[0], None) - try: - list_type = gdb.lookup_type('struct list_head') - except gdb.error as e: - raise RuntimeError("Couldn't locate debuginfo for {}" - .format(self.vmlinux_filename)) + f = open(gdb.objfiles()[0].filename, 'rb') + self.elffile = ELFFile(f) - self.target.setup_arch() + archname = sym.symtab.objfile.architecture.name() + archclass = crash.arch.get_architecture(archname) + self.arch = archclass() - def get_sections(self): - sections = {} + self.target = gdb.current_target() + self.vmcore = self.target.kdump - text = self.elffile.get_section_by_name('.text') + self.target.fetch_registers = self.fetch_registers - for section in self.elffile.iter_sections(): - if (section['sh_addr'] < text['sh_addr'] and - section.name != '.data..percpu'): - continue - sections[section.name] = section['sh_addr'] - - return sections - - def load_sections(self): - sections = self.get_sections() - - line = "" - - # .data..percpu shouldn't have relocation applied but it does. - # Perhaps it's due to the address being 0 and it being handled - # as unspecified in the parameter list. -# for section, addr in sections.items(): -# if addr == 0: -# line += " -s {} {:#x}".format(section, addr) - - # The gdb internals are subtle WRT how symbols are mapped. - # Minimal symbols are mapped using the offset for the section - # that contains them. That means that using providing an address - # for .text here gives a base address with no offset and minimal - # symbols in .text (like __switch_to_asm) will not have the correct - # addresses after relocation. - cmd = "add-symbol-file {} -o {:#x} {} ".format(self.vmlinux_filename, - self.base_offset, line) - gdb.execute(cmd, to_string=True) - - def attach_vmcore(self, vmcore_filename, debug=False): - self.vmcore_filename = vmcore_filename - self.vmcore = kdumpfile(vmcore_filename) - self.target = crash.kdump.target.Target(self.vmcore, debug) - - self.base_offset = 0 - try: - KERNELOFFSET = "linux.vmcoreinfo.lines.KERNELOFFSET" - attr = self.vmcore.attr.get(KERNELOFFSET, "0") - self.base_offset = int(attr, base=16) - except Exception as e: - print(e) + def fetch_registers(self, register): + thread = gdb.selected_thread() + return self.arch.fetch_register(thread, register.regnum) def for_each_module(self): for module in list_for_each_entry(self.modules, self.module_type, @@ -264,9 +195,9 @@ def setup_tasks(self): continue thread.name = task['comm'].string() - self.target.arch.setup_thread_info(thread) + self.arch.setup_thread_info(thread) ltask.attach_thread(thread) - ltask.set_get_stack_pointer(self.target.arch.get_stack_pointer) + ltask.set_get_stack_pointer(self.arch.get_stack_pointer) crash.cache.tasks.cache_task(ltask) diff --git a/crash/session.py b/crash/session.py index 581a5ce6394..3296887575c 100644 --- a/crash/session.py +++ b/crash/session.py @@ -17,10 +17,6 @@ class Session(object): commands and subsystems. Args: - kernel_exec (str, optional): The path to the kernel executable - vmcore (str, optional): The path to the vmcore - kernelpath (str, optional): The path the kernel name to use - when reporting errors searchpath (list of str, optional): Paths to directory trees to search for kernel modules and debuginfo debug (bool, optional, default=False): Whether to enable verbose @@ -28,25 +24,17 @@ class Session(object): """ - def __init__(self, kernel_exec=None, vmcore=None, kernelpath=None, - searchpath=None, debug=False): - self.vmcore_filename = vmcore - + def __init__(self, searchpath=None, debug=False): print("crash-python initializing...") if searchpath is None: searchpath = [] - if kernel_exec: - self.kernel = crash.kernel.CrashKernel(kernel_exec, searchpath) - self.kernel.attach_vmcore(vmcore, debug) - self.kernel.open_kernel() + self.kernel = crash.kernel.CrashKernel(searchpath) autoload_submodules('crash.cache') autoload_submodules('crash.subsystem') autoload_submodules('crash.commands') - if kernel_exec: - self.kernel.setup_tasks() - self.kernel.load_modules(searchpath) - + self.kernel.setup_tasks() + self.kernel.load_modules(searchpath) diff --git a/crash/types/node.py b/crash/types/node.py index 2e59f07f2cf..1e460d9cbb0 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -14,7 +14,7 @@ class TypesNodeUtilsClass(CrashBaseClass): @export def numa_node_id(self, cpu): - if gdb.current_target().arch.ident == "powerpc:common64": + if gdb.current_target().arch.name() == "powerpc:common64": return int(self.numa_cpu_lookup_table[cpu]) else: return int(get_percpu_var(self.numa_node, cpu)) diff --git a/crash/types/page.py b/crash/types/page.py index 7f8d3a0321a..54487aa2bd7 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -52,7 +52,7 @@ class Page(CrashBaseClass): def setup_page_type(cls, gdbtype): # TODO: should check config, but that failed to work on ppc64, hardcode # 64k for now - if gdb.current_target().arch.ident == "powerpc:common64": + if gdb.current_target().arch.name() == "powerpc:common64": cls.PAGE_SHIFT = 16 # also a config cls.directmap_base = 0xc000000000000000 diff --git a/kdump/__init__.py b/kdump/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/crash/kdump/target.py b/kdump/target.py similarity index 56% rename from crash/kdump/target.py rename to kdump/target.py index 948ca2b6a19..d374707e8e7 100644 --- a/crash/kdump/target.py +++ b/kdump/target.py @@ -6,9 +6,6 @@ from kdumpfile import kdumpfile, KDUMP_KVADDR from kdumpfile.exceptions import * import addrxlat -import crash.arch -import crash.arch.x86_64 -import crash.arch.ppc64 class SymbolCallback(object): "addrxlat symbolic callback" @@ -31,38 +28,50 @@ def __call__(self, symtype, *args): raise addrxlat.NoDataError() class Target(gdb.Target): - def __init__(self, vmcore, debug=False): - if not isinstance(vmcore, kdumpfile): - raise TypeError("vmcore must be of type kdumpfile") - self.arch = None + def __init__(self, debug=False): + super().__init__() self.debug = debug - self.kdump = vmcore + self.shortname = "kdumpfile" + self.longname = "Use a Linux kernel kdump file as a target" + + self.register() + + def open(self, filename, from_tty): + + if len(gdb.objfiles()) == 0: + raise gdb.GdbError("kdumpfile target requires kernel to be already loaded for symbol resolution") + try: + self.kdump = kdumpfile(file=filename) + except Exception as e: + raise gdb.GdbError("Failed to open `{}': {}" + .format(filename, str(e))) + + self.kdump.attr['addrxlat.ostype'] = 'linux' ctx = self.kdump.get_addrxlat_ctx() ctx.cb_sym = SymbolCallback(ctx) - self.kdump.attr['addrxlat.ostype'] = 'linux' - # So far we've read from the kernel image, now that we've setup - # the architecture, we're ready to plumb into the target - # infrastructure. - super().__init__() + KERNELOFFSET = "linux.vmcoreinfo.lines.KERNELOFFSET" + try: + attr = self.kdump.attr.get(KERNELOFFSET, "0") + self.base_offset = int(attr, base=16) + except Exception as e: + self.base_offset = 0 + + vmlinux = gdb.objfiles()[0].filename - def setup_arch(self): - archname = self.kdump.attr.arch.name - archclass = crash.arch.get_architecture(archname) - if not archclass: - raise NotImplementedError("Architecture {} is not supported yet." - .format(archname)) + # Load the kernel at the relocated address + gdb.execute("add-symbol-file {} -o {:#x} -s .data..percpu 0" + .format(vmlinux, self.base_offset)) - # Doesn't matter what symbol as long as it's everywhere - # Use vsnprintf since 'printk' can be dropped with CONFIG_PRINTK=n - sym = gdb.lookup_symbol('vsnprintf', None)[0] - if sym is None: - raise RuntimeError("Missing vsnprintf indicates there is no kernel image loaded.") - if sym.symtab.objfile.architecture.name() != archclass.ident: - raise TypeError("Dump file is for `{}' but provided kernel is for `{}'" - .format(archname, archclass.ident)) + # Clear out the old symbol cache + gdb.execute("file {}".format(vmlinux)) - self.arch = archclass() + def close(self): + try: + self.unregister() + except: + pass + del self.kdump @classmethod def report_error(cls, addr, length, error): @@ -70,7 +79,7 @@ def report_error(cls, addr, length, error): .format(length, addr, str(error)), file=sys.stderr) - def to_xfer_partial(self, obj, annex, readbuf, writebuf, offset, ln): + def xfer_partial(self, obj, annex, readbuf, writebuf, offset, ln): ret = -1 if obj == self.TARGET_OBJECT_MEMORY: try: @@ -93,28 +102,21 @@ def to_xfer_partial(self, obj, annex, readbuf, writebuf, offset, ln): raise IOError("Unknown obj type") return ret - @staticmethod - def to_thread_alive(ptid): + def thread_alive(self, ptid): return True - @staticmethod - def to_pid_to_str(ptid): + def pid_to_str(self, ptid): return "pid {:d}".format(ptid[1]) - def to_fetch_registers(self, register): - thread = gdb.selected_thread() - self.arch.fetch_register(thread, register.regnum) - return True + def fetch_registers(self, register): + return False - @staticmethod - def to_prepare_to_store(thread): + def prepare_to_store(self, thread): pass # We don't need to store anything; The regcache is already written. - @staticmethod - def to_store_registers(thread): + def store_registers(self, thread): pass - @staticmethod - def to_has_execution(ptid): + def has_execution(self, ptid): return False diff --git a/tests/test_target.py b/tests/test_target.py index 29877ed2bb3..27df6590904 100644 --- a/tests/test_target.py +++ b/tests/test_target.py @@ -4,17 +4,31 @@ import unittest import gdb import os.path -from crash.kdump.target import Target +from kdump.target import Target class TestUtil(unittest.TestCase): def setUp(self): + gdb.execute("file") self.do_real_tests = os.path.exists("tests/vmcore") + def tearDown(self): + try: + x = gdb.current_target() + del x + except: + pass + gdb.execute('target exec') + def test_bad_file(self): - with self.assertRaises(TypeError): - x = Target("/does/not/exist") + x = Target() + with self.assertRaises(gdb.error): + gdb.execute('target kdumpfile /does/not/exist') + x.unregister() def test_real_open_with_no_kernel(self): if self.do_real_tests: - with self.assertRaises(RuntimeError): - x = Target("tests/vmcore") + x = Target() + with self.assertRaises(gdb.error): + gdb.execute('target kdumpfile tests/vmcore') + x.unregister() + From dd4fb58cada8b879bbad5a224c575ffe6fa3d040 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Tue, 7 May 2019 11:24:18 -0400 Subject: [PATCH 085/367] crash.types.module: create module for modules This module contains for_each_module and a new for_each_module_section. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 21 ++++------------- crash/types/module.py | 54 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 16 deletions(-) create mode 100644 crash/types/module.py diff --git a/crash/kernel.py b/crash/kernel.py index 8e4257f5688..2281d7235be 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -11,6 +11,7 @@ from crash.types.list import list_for_each_entry from crash.types.percpu import get_percpu_var from crash.types.list import list_for_each_entry +from crash.types.module import for_each_module, for_each_module_section import crash.cache.tasks from crash.types.task import LinuxTask from elftools.elf.elffile import ELFFile @@ -18,8 +19,7 @@ LINUX_KERNEL_PID = 1 class CrashKernel(CrashBaseClass): - __types__ = [ 'struct module' ] - __symvals__ = [ 'modules', 'init_task' ] + __symvals__ = [ 'init_task' ] __symbols__ = [ 'runqueues'] def __init__(self, searchpath=None): @@ -46,21 +46,10 @@ def fetch_registers(self, register): thread = gdb.selected_thread() return self.arch.fetch_register(thread, register.regnum) - def for_each_module(self): - for module in list_for_each_entry(self.modules, self.module_type, - 'list'): - yield module - def get_module_sections(self, module): - attrs = module['sect_attrs'] out = [] - for sec in range(0, attrs['nsections']): - attr = attrs['attrs'][sec] - name = attr['name'].string() - if name == '.text': - continue - out.append("-s {} {:#x}".format(name, int(attr['address']))) - + for (name, addr) in for_each_module_section(module): + out.append("-s {} {:#x}".format(name, addr)) return " ".join(out) def load_modules(self, verbose=False): @@ -68,7 +57,7 @@ def load_modules(self, verbose=False): sys.stdout.flush() failed = 0 loaded = 0 - for module in self.for_each_module(): + for module in for_each_module(): modname = "{}".format(module['name'].string()) modfname = "{}.ko".format(modname) found = False diff --git a/crash/types/module.py b/crash/types/module.py new file mode 100644 index 00000000000..3a17b245345 --- /dev/null +++ b/crash/types/module.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from typing import Iterable, Tuple + +import gdb +from crash.infra import CrashBaseClass, export +from crash.types.list import list_for_each_entry + +class Module(CrashBaseClass): + __symvals__ = [ 'modules'] + __types__ = [ 'struct module' ] + + @classmethod + @export + def for_each_module(cls) -> Iterable[gdb.Value]: + """ + Iterate over each module in the modules list + + Yields: + gdb.Value(): The next module on the list + + """ + for module in list_for_each_entry(cls.modules, cls.module_type, + 'list'): + yield module + + @classmethod + @export + def for_each_module_section(cls, module: gdb.Value) \ + -> Iterable[Tuple[str, int]]: + """ + Iterate over each ELF section in a loaded module + + This routine iterates over the 'sect_attrs' member of the + 'struct module' already in memory. For ELF sections from the + module at rest, use pyelftools on the module file. + + Args: + module (gdb.Value): The struct module to iterate + + Yields: + (str, int): A 2-tuple containing the name and address + of the section + """ + attrs = module['sect_attrs'] + + for sec in range(0, attrs['nsections']): + attr = attrs['attrs'][sec] + name = attr['name'].string() + if name == '.text': + continue + + yield (name, int(attr['address'])) From fa4d8dab014c16a086e23f3b3d7740fab0ce9c10 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 30 Apr 2019 22:54:14 -0400 Subject: [PATCH 086/367] kdump.target: don't specify offset for .data..percpu The "right" thing to do is for .data..percpu to be loaded at offset 0. Unfortunately, that only works when debuginfo is embedded in the binary. When separate debuginfo is used, section offsets can't be specified and gdb interprets an offset of 0 to mean "immediately after the preceding section." In order to make the rest of the percpu code sane, we'll let gdb make the same assumption when embedded debuginfo is used. Signed-off-by: Jeff Mahoney --- crash/types/percpu.py | 21 --------------------- kdump/target.py | 11 +++++++++-- 2 files changed, 9 insertions(+), 23 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index c1034d23223..7c446f45977 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -18,14 +18,6 @@ class TypesPerCPUClass(CrashBaseClass): dynamic_offset_cache = None - # TODO: put this somewhere else - arch? - @classmethod - def setup_kaslr_offset(cls): - offset = int(gdb.lookup_minimal_symbol("_text").value().address) - offset -= int(gdb.lookup_minimal_symbol("phys_startup_64").value().address) - offset -= 0xffffffff80000000 - cls.kaslr_offset = offset - @classmethod def setup_per_cpu_size(cls, symbol): try: @@ -36,9 +28,6 @@ def setup_per_cpu_size(cls, symbol): @classmethod def setup_nr_cpus(cls, ignored): cls.nr_cpus = array_size(cls.__per_cpu_offset) - # piggyback on this as it seems those minsymbols at the time of - # their callback yield offset of 0 - cls.setup_kaslr_offset() @classmethod def __add_to_offset_cache(cls, base, start, end): @@ -53,9 +42,6 @@ def __setup_dynamic_offset_cache(cls): for slot in range(cls.pcpu_nr_slots): for chunk in list_for_each_entry(cls.pcpu_slot[slot], cls.pcpu_chunk_type, 'list'): chunk_base = int(chunk["base_addr"]) - int(cls.pcpu_base_addr) - # __per_cpu_start is adjusted by KASLR, but dynamic offsets are - # not, so we have to subtract the offset - chunk_base += int(cls.__per_cpu_start) - cls.kaslr_offset off = 0 start = None @@ -149,13 +135,6 @@ def get_percpu_var_nocheck(self, var, cpu=None, is_symbol=False): addr += var.cast(self.char_p_type) addr -= self.__per_cpu_start - # if we got var from symbol, it means KASLR relocation was applied to - # the offset, it was applied also to __per_cpu_start, which cancels out - # If var wasn't a symbol, we have to undo the adjustion to - # __per_cpu_start, otherwise we get a bogus address - if not is_symbol: - addr += self.kaslr_offset - vartype = var.type return addr.cast(vartype).dereference() diff --git a/kdump/target.py b/kdump/target.py index d374707e8e7..fb4e11cf121 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -59,9 +59,16 @@ def open(self, filename, from_tty): vmlinux = gdb.objfiles()[0].filename + # Load the kernel at the relocated address - gdb.execute("add-symbol-file {} -o {:#x} -s .data..percpu 0" - .format(vmlinux, self.base_offset)) + # Unfortunately, the percpu section has an offset of 0 and + # ends up getting placed at the offset base. This is easy + # enough to handle in the percpu code. + result = gdb.execute("add-symbol-file {} -o {:#x}" + .format(vmlinux, self.base_offset), + to_string=True) + if self.debug: + print(result) # Clear out the old symbol cache gdb.execute("file {}".format(vmlinux)) From 237eb8d0d7a452ecff9770baf90d8620e211586d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 6 May 2019 14:22:33 -0400 Subject: [PATCH 087/367] crash.types.percpu: handle missing pcpu_nr_slots Kernels prior to 2.6.30 didn't have dynamic percpu ranges. The test cases have also not been extended to cover the dynamic ranges. This commit catches DelayedAttributeError so the test cases can pass. Signed-off-by: Jeff Mahoney --- crash/types/percpu.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 7c446f45977..7ffb90902f9 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -103,16 +103,20 @@ def __is_percpu_var(self, var): return int(v) < self.per_cpu_size def __is_percpu_var_dynamic(self, var): - if self.dynamic_offset_cache is None: - self.__setup_dynamic_offset_cache() + try: + if self.dynamic_offset_cache is None: + self.__setup_dynamic_offset_cache() - var = int(var) - # TODO: we could sort the list... - for (start, end) in self.dynamic_offset_cache: - if var >= start and var < end: - return True + var = int(var) + # TODO: we could sort the list... + for (start, end) in self.dynamic_offset_cache: + if var >= start and var < end: + return True - return False + return False + except DelayedAttributeError: + # This can happen with the testcases or in kernels prior to 2.6.30 + pass @export def is_percpu_var(self, var): From 6206891e8d305d7d7811ed91fcee3b62e7764a3b Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 1 May 2019 08:36:23 -0400 Subject: [PATCH 088/367] crash.types.percpu: get_percpu_var, raise exception with passed val If the value passed to get_percpu_var isn't a percpu, we raise an exception -- but the exception contained the processed value instead of the passed one. That can be misleading when debugging. Also, handle the val=None case that can occur as we try to treat the value as a pointer. Signed-off-by: Jeff Mahoney --- crash/types/percpu.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 7ffb90902f9..60f1a8ab4f0 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -144,6 +144,7 @@ def get_percpu_var_nocheck(self, var, cpu=None, is_symbol=False): @export def get_percpu_var(self, var, cpu=None): + orig_var = var # Percpus can be: # - actual objects, where we'll need to use the address. # - pointers to objects, where we'll need to use the target @@ -159,6 +160,6 @@ def get_percpu_var(self, var, cpu=None): var = var.address if not self.is_percpu_var(var): var = var.address - if not self.is_percpu_var(var): - raise TypeError("Argument {} does not correspond to a percpu pointer.".format(var)) - return self.get_percpu_var_nocheck(var, cpu, is_symbol) + if var is None or not self.is_percpu_var(var): + raise TypeError("Argument {} does not correspond to a percpu pointer.".format(orig_var)) + return self.get_percpu_var_nocheck(var, cpu, is_symbol, nr_cpus) From 4f2afa0f69a5cca8a90e39b603848ebfb13dd75b Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Tue, 7 May 2019 11:55:00 -0400 Subject: [PATCH 089/367] crash.types.cpu: fix online cpu mask and add possible cpu mask --- crash/types/cpu.py | 67 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 60 insertions(+), 7 deletions(-) diff --git a/crash/types/cpu.py b/crash/types/cpu.py index a5c63f26d7b..29fc3d666bd 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -3,24 +3,77 @@ import gdb from crash.infra import CrashBaseClass, export -from crash.util import container_of, find_member_variant, get_symbol_value from crash.types.bitmap import for_each_set_bit +from crash.exceptions import DelayedAttributeError + +from typing import List, Iterable # this wraps no particular type, rather it's a placeholder for # functions to iterate over online cpu's etc. class TypesCPUClass(CrashBaseClass): + __symbol_callbacks__ = [ ('cpu_online_mask', '_setup_online_mask'), + ('__cpu_online_mask', '_setup_online_mask'), + ('cpu_possible_mask', '_setup_possible_mask'), + ('__cpu_possible_mask', '_setup_possible_mask') ] - __symbol_callbacks__ = [ ('cpu_online_mask', 'setup_cpus_mask') ] - - cpus_online = None + cpus_online: List[int] = list() + cpus_possible: List[int] = list() @classmethod - def setup_cpus_mask(cls, cpu_mask): - bits = cpu_mask.value()["bits"] + def _setup_online_mask(cls, symbol: gdb.Symbol) -> None: + cls.cpu_online_mask = symbol.value() + bits = cls.cpu_online_mask["bits"] cls.cpus_online = list(for_each_set_bit(bits)) @export - def for_each_online_cpu(self): + def for_each_online_cpu(self) -> Iterable[int]: + """ + Yield CPU numbers of all online CPUs + + Yields: + int: Number of a possible CPU location + """ for cpu in self.cpus_online: yield cpu + @export + def highest_online_cpu_nr(self) -> None: + """ + Return The highest online CPU number + + Returns: + int: The highest online CPU number + """ + if not TypesCPUClass.cpus_online : + raise DelayedAttributeError(self.__class__.__name__, 'cpus_online') + return self.cpus_online[-1] + + @classmethod + def _setup_possible_mask(cls, cpu_mask: gdb.Symbol) -> None: + cls.cpu_possible_mask = cpu_mask.value() + bits = cls.cpu_possible_mask["bits"] + cls.cpus_possible = list(for_each_set_bit(bits)) + + @export + def for_each_possible_cpu(self) -> Iterable[int]: + """ + Yield CPU numbers of all possible CPUs + + Yields: + int: Number of a possible CPU location + """ + for cpu in self.cpus_possible: + yield cpu + + @export + def highest_possible_cpu_nr(self) -> int: + """ + Return The highest possible CPU number + + Returns: + int: The highest possible CPU number + """ + if not self.cpus_possible: + raise DelayedAttributeError(self.__class__.__name__, + 'cpus_possible') + return self.cpus_possible[-1] From c80a8a234b7ced2e3f8b4a8498825f057fd997e6 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 090/367] crash.types.percpu: better percpu handling Newer kernels do percpu differently than the SLE11 kernel we wrote the percpu code to use. It fails on newer kernels, causing crash-python to abort while setting up tasks. This commit adds support for reading the cpu_possible_mask to gather percpu variables. NOTE: Before this lands in master, we need to ensure it works with sparsely numbered CPUs. I'm not convinced it does yet. Signed-off-by: Jeff Mahoney --- crash/types/percpu.py | 404 +++++++++++++++++++++++++++++++----------- 1 file changed, 297 insertions(+), 107 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 60f1a8ab4f0..d0d895e5053 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -1,165 +1,355 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Dict, Union, List, Tuple + import gdb from crash.infra import CrashBaseClass, export -from crash.util import array_size +from crash.util import array_size, struct_has_member from crash.types.list import list_for_each_entry from crash.exceptions import DelayedAttributeError +from crash.types.bitmap import find_first_set_bit, find_last_set_bit +from crash.types.bitmap import find_next_set_bit, find_next_zero_bit +from crash.types.page import Page +from crash.types.cpu import highest_possible_cpu_nr + +class PerCPUError(TypeError): + """The passed object does not respond to a percpu pointer.""" + _fmt = "{} does not correspond to a percpu pointer." + def __init__(self, var): + super().__init__(self._fmt.format(var)) + +SymbolOrValue = Union[gdb.Value, gdb.Symbol] +PerCPUReturn = Union[gdb.Value, Dict[int, gdb.Value]] class TypesPerCPUClass(CrashBaseClass): - __types__ = [ 'char *', 'struct pcpu_chunk' ] + """ + Per-cpus come in a few forms: + - "Array" of objects + - "Array" of pointers to objects + - Pointers to either of those + + If we want to get the typing right, we need to recognize each one + and figure out what type to pass back. We do want to dereference + pointer to a percpu but we don't want to dereference a percpu + pointer. + """ + __types__ = [ 'void *', 'char *', 'struct pcpu_chunk', + 'struct percpu_counter' ] __symvals__ = [ '__per_cpu_offset', 'pcpu_base_addr', 'pcpu_slot', - 'pcpu_nr_slots' ] + 'pcpu_nr_slots', 'pcpu_group_offsets' ] __minsymvals__ = ['__per_cpu_start', '__per_cpu_end' ] - __minsymbol_callbacks__ = [ ('__per_cpu_start', 'setup_per_cpu_size'), - ('__per_cpu_end', 'setup_per_cpu_size') ] - __symbol_callbacks__ = [ ('__per_cpu_offset', 'setup_nr_cpus') ] + __minsymbol_callbacks__ = [ ('__per_cpu_start', '_setup_per_cpu_size'), + ('__per_cpu_end', '_setup_per_cpu_size') ] + __symbol_callbacks__ = [ ('__per_cpu_offset', '_setup_nr_cpus') ] - dynamic_offset_cache = None + _dynamic_offset_cache: List[Tuple[int, int]] = list() + _static_ranges: Dict[int, int] = dict() + _last_cpu = -1 + _nr_cpus = 0 @classmethod - def setup_per_cpu_size(cls, symbol): + def _setup_per_cpu_size(cls, symbol: gdb.Symbol) -> None: + try: + size = cls.__per_cpu_end - cls.__per_cpu_start + except DelayedAttributeError: + pass + + cls._static_ranges[0] = size + if cls.__per_cpu_start != 0: + cls._static_ranges[cls.__per_cpu_start] = size + try: - cls.per_cpu_size = cls.__per_cpu_end - cls.__per_cpu_start + # This is only an optimization so we don't return NR_CPUS values + # when there are far fewer CPUs on the system. + cls._last_cpu = highest_possible_cpu_nr() except DelayedAttributeError: pass @classmethod - def setup_nr_cpus(cls, ignored): - cls.nr_cpus = array_size(cls.__per_cpu_offset) + def _setup_nr_cpus(cls, ignored: gdb.Symbol) -> None: + cls._nr_cpus = array_size(cls.__per_cpu_offset) + + if cls._last_cpu == -1: + cls._last_cpu = cls._nr_cpus @classmethod - def __add_to_offset_cache(cls, base, start, end): - cls.dynamic_offset_cache.append((base + start, base + end)) + def _add_to_offset_cache(cls, base: int, start: int, end: int) -> None: + cls._dynamic_offset_cache.append((base + start, base + end)) @classmethod - def __setup_dynamic_offset_cache(cls): - # TODO: interval tree would be more efficient, but this adds no 3rd - # party module dependency... - cls.dynamic_offset_cache = list() + def dump_ranges(cls) -> None: + """ + Dump all percpu ranges to stdout + """ + for (start, size) in cls._static_ranges.items(): + print(f"static start={start:#x}, size={size:#x}") + if cls._dynamic_offset_cache: + for (start, end) in cls._dynamic_offset_cache: + print(f"dynamic start={start:#x}, end={end:#x}") + + @classmethod + def _setup_dynamic_offset_cache_area_map(cls, chunk: gdb.Value) -> None: used_is_negative = None - for slot in range(cls.pcpu_nr_slots): - for chunk in list_for_each_entry(cls.pcpu_slot[slot], cls.pcpu_chunk_type, 'list'): - chunk_base = int(chunk["base_addr"]) - int(cls.pcpu_base_addr) - - off = 0 - start = None - _map = chunk['map'] - map_used = int(chunk['map_used']) - - # Prior to 3.14 commit 723ad1d90b56 ("percpu: store offsets - # instead of lengths in ->map[]"), negative values in map - # meant the area is used, and the absolute value is area size. - # After the commit, the value is area offset for unused, and - # offset | 1 for used (all offsets have to be even). The value - # at index 'map_used' is a 'sentry' which is the total size | - # 1. There is no easy indication of whether kernel includes - # the commit, unless we want to rely on version numbers and - # risk breakage in case of backport to older version. Instead - # employ a heuristic which scans the first chunk, and if no - # negative value is found, assume the kernel includes the - # commit. - if used_is_negative is None: - used_is_negative = False - for i in range(map_used): - val = int(_map[i]) - if val < 0: - used_is_negative = True - break - - if used_is_negative: - for i in range(map_used): - val = int(_map[i]) - if val < 0: - if start is None: - start = off - else: - if start is not None: - cls.__add_to_offset_cache(chunk_base, start, off) - start = None - off += abs(val) + chunk_base = int(chunk["base_addr"]) - int(cls.pcpu_base_addr) + + off = 0 + start = None + _map = chunk['map'] + map_used = int(chunk['map_used']) + + # Prior to 3.14 commit 723ad1d90b56 ("percpu: store offsets + # instead of lengths in ->map[]"), negative values in map + # meant the area is used, and the absolute value is area size. + # After the commit, the value is area offset for unused, and + # offset | 1 for used (all offsets have to be even). The value + # at index 'map_used' is a 'sentry' which is the total size | + # 1. There is no easy indication of whether kernel includes + # the commit, unless we want to rely on version numbers and + # risk breakage in case of backport to older version. Instead + # employ a heuristic which scans the first chunk, and if no + # negative value is found, assume the kernel includes the + # commit. + if used_is_negative is None: + used_is_negative = False + for i in range(map_used): + val = int(_map[i]) + if val < 0: + used_is_negative = True + break + + if used_is_negative: + for i in range(map_used): + val = int(_map[i]) + if val < 0: + if start is None: + start = off + else: if start is not None: - cls.__add_to_offset_cache(chunk_base, start, off) + cls._add_to_offset_cache(chunk_base, start, off) + start = None + off += abs(val) + if start is not None: + cls._add_to_offset_cache(chunk_base, start, off) + else: + for i in range(map_used): + off = int(_map[i]) + if off & 1 == 1: + off -= 1 + if start is None: + start = off else: - for i in range(map_used): - off = int(_map[i]) - if off & 1 == 1: - off -= 1 - if start is None: - start = off - else: - if start is not None: - cls.__add_to_offset_cache(chunk_base, start, off) - start = None if start is not None: - off = int(_map[map_used]) - 1 - cls.__add_to_offset_cache(chunk_base, start, off) + cls._add_to_offset_cache(chunk_base, start, off) + start = None + if start is not None: + off = int(_map[map_used]) - 1 + cls._add_to_offset_cache(chunk_base, start, off) - def __is_percpu_var(self, var): - if int(var) < self.__per_cpu_start: - return False - v = var.cast(self.char_p_type) - self.__per_cpu_start - return int(v) < self.per_cpu_size - def __is_percpu_var_dynamic(self, var): + @classmethod + def _setup_dynamic_offset_cache_bitmap(cls, chunk: gdb.Value) -> None: + group_offset = int(cls.pcpu_group_offsets[0]) + size_in_bytes = int(chunk['nr_pages']) * Page.PAGE_SIZE + size_in_bits = size_in_bytes << 3 + start = -1 + end = 0 + + chunk_base = int(chunk["base_addr"]) - int(cls.pcpu_base_addr) + cls._add_to_offset_cache(chunk_base, 0, size_in_bytes) + + @classmethod + def _setup_dynamic_offset_cache(cls) -> None: + # TODO: interval tree would be more efficient, but this adds no 3rd + # party module dependency... + use_area_map = struct_has_member(cls.pcpu_chunk_type, 'map') + for slot in range(cls.pcpu_nr_slots): + for chunk in list_for_each_entry(cls.pcpu_slot[slot], cls.pcpu_chunk_type, 'list'): + if use_area_map: + cls._setup_dynamic_offset_cache_area_map(chunk) + else: + cls._setup_dynamic_offset_cache_bitmap(chunk) + + def _is_percpu_var_dynamic(self, var: int) -> bool: try: - if self.dynamic_offset_cache is None: - self.__setup_dynamic_offset_cache() + if not self._dynamic_offset_cache: + self._setup_dynamic_offset_cache() - var = int(var) # TODO: we could sort the list... - for (start, end) in self.dynamic_offset_cache: + for (start, end) in self._dynamic_offset_cache: if var >= start and var < end: return True - - return False except DelayedAttributeError: # This can happen with the testcases or in kernels prior to 2.6.30 pass + return False + + # The resolved percpu address + def _is_static_percpu_address(self, addr: int) -> bool: + for start in self._static_ranges: + size = self._static_ranges[start] + for cpu in range(0, self._last_cpu): + offset = int(__per_cpu_offset[cpu]) + start + if addr >= offset and addr < offset + size: + return True + return False + + # The percpu virtual address + def is_static_percpu_var(self, addr: int) -> bool: + """ + Returns whether the provided address is within the bounds of + the percpu static ranges + + Args: + addr: The address to query + + Returns: + :obj:`bool`: Whether this address belongs to a static range + """ + for start in self._static_ranges: + for cpu in range(0, self._last_cpu): + size = self._static_ranges[start] + if addr >= start and addr < start + size: + return True + return False + + # The percpu range should start at offset 0 but gdb relocation + # treats 0 as a special value indicating it should just be after + # the previous section. It's possible to override this while + # loading debuginfo but not when debuginfo is embedded. + def _relocated_offset(self, var): + addr=int(var) + start = self.__per_cpu_start + size = self._static_ranges[start] + if addr >= start and addr < start + size: + return addr - start + return addr + @export - def is_percpu_var(self, var): + def is_percpu_var(self, var: SymbolOrValue) -> bool: + """ + Returns whether the provided value or symbol falls within + any of the percpu ranges + + Args: + var: The symbol or value to query + + Returns: + :obj:`bool`: Whether the value belongs to any percpu range + """ if isinstance(var, gdb.Symbol): var = var.value().address - if self.__is_percpu_var(var): + + var = int(var) + if self.is_static_percpu_var(var): return True - if self.__is_percpu_var_dynamic(var): + if self._is_percpu_var_dynamic(var): return True return False - def get_percpu_var_nocheck(self, var, cpu=None, is_symbol=False): + def get_percpu_var_nocheck(self, var: SymbolOrValue, cpu: int=None, + nr_cpus: int=None) -> PerCPUReturn: + """ + Retrieve a per-cpu variable for one or all CPUs without performing + range checks + + Args: + var: The symbol or value to use to resolve the percpu location + cpu (optional): The cpu for which to return the per-cpu value. + A value of None will return a dictionary of [cpu, value] + for all CPUs. + nr_cpus(optional): The count of CPUs for which to return values. + :obj:`None` or unspecified will use the highest possible + CPU count. + + Returns: + :obj:`gdb.Value`: If cpu is specified, the value corresponding to + the specified CPU. The value is of the same type passed via + var. + :obj:`dict`(:obj:`int`, :obj:`gdb.Value`): If cpu is not specified, + the values corresponding to every CPU in a dictionary indexed by CPU + number. The type of the :obj:`gdb.Value` used as the + :obj:`dict` value is the same type as the :obj:`gdb.Value` + or :obj:`gdb.Symbol` passed via var. + + Raises: + :obj:`TypeError`: var is not :obj:`gdb.Symbol` or :obj:`gdb.Value` + :obj:`ValueError`: cpu is less than ``0`` + :obj:`ValueError`: nr_cpus is less-or-equal to ``0`` + """ + if nr_cpus is None: + nr_cpus = self._last_cpu + if nr_cpus < 0: + raise ValueError("nr_cpus must be > 0") if cpu is None: vals = {} - for cpu in range(0, self.nr_cpus): - vals[cpu] = self.get_percpu_var_nocheck(var, cpu, is_symbol) + for cpu in range(0, nr_cpus): + vals[cpu] = self.get_percpu_var_nocheck(var, cpu, nr_cpus) return vals + elif cpu < 0: + raise ValueError("cpu must be >= 0") addr = self.__per_cpu_offset[cpu] - addr += var.cast(self.char_p_type) - addr -= self.__per_cpu_start + if addr > 0: + addr += self._relocated_offset(var) - vartype = var.type - return addr.cast(vartype).dereference() + val = gdb.Value(addr).cast(var.type) + if var.type != self.void_p_type: + val = val.dereference() + return val @export - def get_percpu_var(self, var, cpu=None): + def get_percpu_var(self, var: SymbolOrValue, cpu: int=None, + nr_cpus: int=None) -> PerCPUReturn: + """ + Retrieve a per-cpu variable for one or all CPUs + + Args: + var: The symbol or value to use to resolve the percpu location + cpu (optional): The cpu for which to return the per-cpu value. + A value of None will return a dictionary of [cpu, value] + for all CPUs. + nr_cpus(optional): The count of CPUs for which to return values. + :obj:`None` or unspecified will use the highest possible + CPU count. + + Returns: + :obj:`gdb.Value`: If cpu is specified, the value corresponding to + the specified CPU. The value is of the same type passed via + var. + :obj:`dict`(:obj:`int`, :obj:`gdb.Value`): If cpu is not specified, + the values corresponding to every CPU in a dictionary indexed by CPU + number. The type of the :obj:`gdb.Value` used as the + :obj:`dict` value is the same type as the :obj:`gdb.Value` + or :obj:`gdb.Symbol` passed via var. + + Raises: + :obj:`TypeError`: var is not :obj:`gdb.Symbol` or :obj:`gdb.Value` + :obj:`ValueError`: cpu is less than ``0`` + :obj:`ValueError`: nr_cpus is less-or-equal to ``0`` + """ orig_var = var - # Percpus can be: - # - actual objects, where we'll need to use the address. - # - pointers to objects, where we'll need to use the target - # - a pointer to a percpu object, where we'll need to use the - # address of the target - is_symbol = False if isinstance(var, gdb.Symbol) or isinstance(var, gdb.MinSymbol): var = var.value() - is_symbol = True if not isinstance(var, gdb.Value): raise TypeError("Argument must be gdb.Symbol or gdb.Value") - if var.type.code != gdb.TYPE_CODE_PTR: - var = var.address - if not self.is_percpu_var(var): - var = var.address - if var is None or not self.is_percpu_var(var): - raise TypeError("Argument {} does not correspond to a percpu pointer.".format(orig_var)) - return self.get_percpu_var_nocheck(var, cpu, is_symbol, nr_cpus) + + if var.type.code == gdb.TYPE_CODE_PTR: + # The percpu contains pointers + if var.address is not None and self.is_percpu_var(var.address): + var = var.address + # Pointer to a percpu + elif self.is_percpu_var(var): + if var.type != self.void_p_type: + var = var.dereference().address + assert(self.is_percpu_var(var)) + else: + raise PerCPUError(orig_var) + # object is a percpu + elif self.is_percpu_var(var.address): + var = var.address + else: + raise PerCPUError(orig_var) + + return self.get_percpu_var_nocheck(var, cpu, nr_cpus) From fd7b2cda686a77404e202361069f20843f9cad10 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 21 May 2019 12:59:08 -0400 Subject: [PATCH 091/367] crash.types.percpu: separate the all-cpus and single-cpu cases --- crash/kernel.py | 4 +- crash/types/percpu.py | 143 +++++++++++++++++++----------------------- tests/test_percpu.py | 20 +++--- 3 files changed, 78 insertions(+), 89 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 2281d7235be..53715b30c49 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -9,7 +9,7 @@ import crash.arch.ppc64 from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each_entry -from crash.types.percpu import get_percpu_var +from crash.types.percpu import get_percpu_vars from crash.types.list import list_for_each_entry from crash.types.module import for_each_module, for_each_module_section import crash.cache.tasks @@ -149,7 +149,7 @@ def setup_tasks(self): task_list = self.init_task['tasks'] - rqs = get_percpu_var(self.runqueues) + rqs = get_percpu_vars(self.runqueues) rqscurrs = {int(x["curr"]) : k for (k, x) in rqs.items()} self.pid_to_task_struct = {} diff --git a/crash/types/percpu.py b/crash/types/percpu.py index d0d895e5053..9d21bbede24 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -20,7 +20,6 @@ def __init__(self, var): super().__init__(self._fmt.format(var)) SymbolOrValue = Union[gdb.Value, gdb.Symbol] -PerCPUReturn = Union[gdb.Value, Dict[int, gdb.Value]] class TypesPerCPUClass(CrashBaseClass): """ @@ -248,46 +247,34 @@ def is_percpu_var(self, var: SymbolOrValue) -> bool: return True return False - def get_percpu_var_nocheck(self, var: SymbolOrValue, cpu: int=None, - nr_cpus: int=None) -> PerCPUReturn: - """ - Retrieve a per-cpu variable for one or all CPUs without performing - range checks + def _resolve_percpu_var(self, var): + orig_var = var + if isinstance(var, gdb.Symbol) or isinstance(var, gdb.MinSymbol): + var = var.value() + if not isinstance(var, gdb.Value): + raise TypeError("Argument must be gdb.Symbol or gdb.Value") - Args: - var: The symbol or value to use to resolve the percpu location - cpu (optional): The cpu for which to return the per-cpu value. - A value of None will return a dictionary of [cpu, value] - for all CPUs. - nr_cpus(optional): The count of CPUs for which to return values. - :obj:`None` or unspecified will use the highest possible - CPU count. + if var.type.code == gdb.TYPE_CODE_PTR: + # The percpu contains pointers + if var.address is not None and self.is_percpu_var(var.address): + var = var.address + # Pointer to a percpu + elif self.is_percpu_var(var): + if var.type != types.void_p_type: + var = var.dereference().address + assert(self.is_percpu_var(var)) + else: + raise PerCPUError(orig_var) + # object is a percpu + elif self.is_percpu_var(var.address): + var = var.address + else: + raise PerCPUError(orig_var) - Returns: - :obj:`gdb.Value`: If cpu is specified, the value corresponding to - the specified CPU. The value is of the same type passed via - var. - :obj:`dict`(:obj:`int`, :obj:`gdb.Value`): If cpu is not specified, - the values corresponding to every CPU in a dictionary indexed by CPU - number. The type of the :obj:`gdb.Value` used as the - :obj:`dict` value is the same type as the :obj:`gdb.Value` - or :obj:`gdb.Symbol` passed via var. + return var - Raises: - :obj:`TypeError`: var is not :obj:`gdb.Symbol` or :obj:`gdb.Value` - :obj:`ValueError`: cpu is less than ``0`` - :obj:`ValueError`: nr_cpus is less-or-equal to ``0`` - """ - if nr_cpus is None: - nr_cpus = self._last_cpu - if nr_cpus < 0: - raise ValueError("nr_cpus must be > 0") - if cpu is None: - vals = {} - for cpu in range(0, nr_cpus): - vals[cpu] = self.get_percpu_var_nocheck(var, cpu, nr_cpus) - return vals - elif cpu < 0: + def _get_percpu_var(self, var: SymbolOrValue, cpu: int) -> gdb.Value: + if cpu < 0: raise ValueError("cpu must be >= 0") addr = self.__per_cpu_offset[cpu] @@ -300,56 +287,58 @@ def get_percpu_var_nocheck(self, var: SymbolOrValue, cpu: int=None, return val @export - def get_percpu_var(self, var: SymbolOrValue, cpu: int=None, - nr_cpus: int=None) -> PerCPUReturn: + def get_percpu_var(self, var: SymbolOrValue, cpu: int) -> gdb.Value: """ Retrieve a per-cpu variable for one or all CPUs Args: var: The symbol or value to use to resolve the percpu location - cpu (optional): The cpu for which to return the per-cpu value. - A value of None will return a dictionary of [cpu, value] - for all CPUs. - nr_cpus(optional): The count of CPUs for which to return values. - :obj:`None` or unspecified will use the highest possible - CPU count. + cpu: The cpu for which to return the per-cpu value. Returns: - :obj:`gdb.Value`: If cpu is specified, the value corresponding to - the specified CPU. The value is of the same type passed via - var. - :obj:`dict`(:obj:`int`, :obj:`gdb.Value`): If cpu is not specified, - the values corresponding to every CPU in a dictionary indexed by CPU - number. The type of the :obj:`gdb.Value` used as the - :obj:`dict` value is the same type as the :obj:`gdb.Value` - or :obj:`gdb.Symbol` passed via var. + :obj:`gdb.Value`: The value corresponding to the specified CPU. + The value is of the same type passed via var. Raises: :obj:`TypeError`: var is not :obj:`gdb.Symbol` or :obj:`gdb.Value` + :obj:`.PerCPUError`: var does not fall into any percpu range :obj:`ValueError`: cpu is less than ``0`` - :obj:`ValueError`: nr_cpus is less-or-equal to ``0`` """ - orig_var = var - if isinstance(var, gdb.Symbol) or isinstance(var, gdb.MinSymbol): - var = var.value() - if not isinstance(var, gdb.Value): - raise TypeError("Argument must be gdb.Symbol or gdb.Value") + var = self._resolve_percpu_var(var) + return self._get_percpu_var(var, cpu) - if var.type.code == gdb.TYPE_CODE_PTR: - # The percpu contains pointers - if var.address is not None and self.is_percpu_var(var.address): - var = var.address - # Pointer to a percpu - elif self.is_percpu_var(var): - if var.type != self.void_p_type: - var = var.dereference().address - assert(self.is_percpu_var(var)) - else: - raise PerCPUError(orig_var) - # object is a percpu - elif self.is_percpu_var(var.address): - var = var.address - else: - raise PerCPUError(orig_var) + @export + def get_percpu_vars(self, var: SymbolOrValue, + nr_cpus: int=None) -> Dict[int, gdb.Value]: + """ + Retrieve a per-cpu variable for all CPUs + + Args: + var: The symbol or value to use to resolve the percpu location + nr_cpus (optional): The number of CPUs for which to return results + ``None`` (or unspecified) will use the highest possible + CPU count. + + Returns: + :obj:`dict`(:obj:`int`, :obj:`gdb.Value`): The values corresponding + to every CPU in a dictionary indexed by CPU number. The type of the + :obj:`gdb.Value` used as the :obj:`dict` value is the same type as + the :obj:`gdb.Value` or :obj:`gdb.Symbol` passed as var. + + Raises: + :obj:`TypeError`: var is not ``gdb.Symbol`` or ``gdb.Value`` + :obj:`.PerCPUError`: var does not fall into any percpu range + :obj:`ValueError`: nr_cpus is <= ``0`` + """ + if nr_cpus is None: + nr_cpus = self.last_cpu + + if nr_cpus <= 0: + raise ValueError("nr_cpus must be > 0") + + vals = dict() - return self.get_percpu_var_nocheck(var, cpu, nr_cpus) + var = self._resolve_percpu_var(var) + for cpu in range(0, nr_cpus): + vals[cpu] = self._get_percpu_var(var, cpu) + return vals diff --git a/tests/test_percpu.py b/tests/test_percpu.py index a3f25c80f3b..0be6f2ed1c8 100644 --- a/tests/test_percpu.py +++ b/tests/test_percpu.py @@ -5,7 +5,7 @@ import gdb import crash -import crash.types.percpu +import crash.types.percpu as percpu class TestPerCPU(unittest.TestCase): def setUp(self): @@ -40,28 +40,28 @@ def tearDown(self): def test_struct_test(self): var = gdb.lookup_symbol('struct_test', None)[0] self.assertTrue(var is not None) - for cpu, val in list(crash.types.percpu.get_percpu_var(var).items()): + for cpu, val in list(percpu.get_percpu_vars(var).items()): self.assertTrue(val['x'] == cpu) self.assertTrue(val.type == self.test_struct) def test_ulong_test(self): var = gdb.lookup_symbol('ulong_test', None)[0] self.assertTrue(var is not None) - for cpu, val in list(crash.types.percpu.get_percpu_var(var).items()): + for cpu, val in list(percpu.get_percpu_vars(var).items()): self.assertTrue(val == cpu) self.assertTrue(val.type == self.ulong_type) def test_ulong_ptr_test(self): var = gdb.lookup_symbol('ptr_to_ulong_test', None)[0] self.assertTrue(var is not None) - for cpu, val in list(crash.types.percpu.get_percpu_var(var).items()): + for cpu, val in list(percpu.get_percpu_vars(var).items()): self.assertTrue(val.type == self.ulong_type.pointer()) self.assertTrue(val.dereference() == cpu) def test_voidp_test(self): var = gdb.lookup_symbol('voidp_test', None)[0] self.assertTrue(var is not None) - for cpu, val in list(crash.types.percpu.get_percpu_var(var).items()): + for cpu, val in list(percpu.get_percpu_vars(var).items()): self.assertTrue(val is not None) self.assertTrue(val.type == self.voidp) self.assertTrue(int(val) == 0xdeadbeef) @@ -69,7 +69,7 @@ def test_voidp_test(self): def test_struct_test_ptr(self): var = gdb.lookup_symbol('ptr_to_struct_test', None)[0] self.assertTrue(var is not None) - for cpu, val in list(crash.types.percpu.get_percpu_var(var).items()): + for cpu, val in list(percpu.get_percpu_vars(var).items()): self.assertTrue(val['x'] == cpu) self.assertTrue(val.type == self.test_struct.pointer()) @@ -77,14 +77,14 @@ def test_struct_test_ptr(self): def test_percpu_ptr_sym(self): var = gdb.lookup_symbol('percpu_test', None)[0] self.assertTrue(var is not None) - for cpu, val in list(crash.types.percpu.get_percpu_var(var).items()): + for cpu, val in list(percpu.get_percpu_vars(var).items()): self.assertTrue(val.type == self.test_struct) # This is a pointer to an unbound percpu var def test_percpu_ptr_val(self): var = gdb.lookup_symbol('percpu_test', None)[0].value() self.assertTrue(var is not None) - for cpu, val in list(crash.types.percpu.get_percpu_var(var).items()): + for cpu, val in list(percpu.get_percpu_vars(var).items()): self.assertTrue(val.type == self.test_struct) # This is a saved pointer to an bound percpu var (e.g. normal ptr) @@ -92,7 +92,7 @@ def test_non_percpu_sym(self): var = gdb.lookup_symbol('non_percpu_test', None)[0] self.assertTrue(var is not None) with self.assertRaises(TypeError): - x = crash.types.percpu.get_percpu_var(var, 0) + x = percpu.get_percpu_var(var, 0) self.assertTrue(var.value()['x'] == 0) # This is a pointer to an bound percpu var (e.g. normal ptr) @@ -100,5 +100,5 @@ def test_non_percpu_ptr(self): var = gdb.lookup_symbol('non_percpu_test', None)[0].value() self.assertTrue(var is not None) with self.assertRaises(TypeError): - x = crash.types.percpu.get_percpu_var(var, 0) + x = percpu.get_percpu_var(var, 0) self.assertTrue(var['x'] == 0) From 1451018513d6d2d48e25aded5ee5f62d7aceef6f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 30 Apr 2019 17:35:31 -0400 Subject: [PATCH 092/367] crash.types.percpu: add percpu_counter_sum This commit adds support for calculating the contents of percpu counters. Signed-off-by: Jeff Mahoney --- crash/types/percpu.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 9d21bbede24..f5de579423e 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -342,3 +342,32 @@ def get_percpu_vars(self, var: SymbolOrValue, for cpu in range(0, nr_cpus): vals[cpu] = self._get_percpu_var(var, cpu) return vals + + @export + def percpu_counter_sum(self, var: SymbolOrValue) -> int: + """ + Returns the sum of a percpu counter + + Args: + var: The percpu counter to sum. The value must be of type + ``struct percpu_counter``. + + Returns: + :obj:`int`: the sum of all components of the percpu counter + """ + if isinstance(var, gdb.Symbol): + var = var.value() + + if not (var.type == self.percpu_counter_type or + (var.type.code == gdb.TYPE_CODE_PTR and + var.type.target() == self.percpu_counter_type)): + raise TypeError("var must be gdb.Symbol or gdb.Value describing `{}' not `{}'" + .format(self.percpu_counter_type, var.type)) + + total = int(var['count']) + + v = get_percpu_vars(var['counters']) + for cpu in v: + total += int(v[cpu]) + + return total From 5ae3b26a23b0eefe5252fba0b3229081f12732e5 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 093/367] crash: auto-select crashing task and print backtrace on startup The most common task using a kernel debugger is to examine a crashed task. This autoselects and prints the backtrace on startup if such a task exists. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 10 ++++++++++ crash/session.py | 16 ++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/crash/kernel.py b/crash/kernel.py index 53715b30c49..b81d66e8402 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -15,6 +15,7 @@ import crash.cache.tasks from crash.types.task import LinuxTask from elftools.elf.elffile import ELFFile +from crash.util import get_symbol_value LINUX_KERNEL_PID = 1 @@ -41,6 +42,7 @@ def __init__(self, searchpath=None): self.vmcore = self.target.kdump self.target.fetch_registers = self.fetch_registers + self.crashing_thread = None def fetch_registers(self, register): thread = gdb.selected_thread() @@ -167,6 +169,11 @@ def setup_tasks(self): 'thread_group'): tasks.append(task) + try: + crashing_cpu = int(get_symbol_value('crashing_cpu')) + except Exception as e: + crashing_cpu = None + for task in tasks: cpu = None regs = None @@ -177,12 +184,15 @@ def setup_tasks(self): ltask = LinuxTask(task, active, cpu, regs) ptid = (LINUX_KERNEL_PID, task['pid'], 0) + try: thread = gdb.selected_inferior().new_thread(ptid, ltask) except gdb.error as e: print("Failed to setup task @{:#x}".format(int(task.address))) continue thread.name = task['comm'].string() + if active and crashing_cpu is not None and cpu == crashing_cpu: + self.crashing_thread = thread self.arch.setup_thread_info(thread) ltask.attach_thread(thread) diff --git a/crash/session.py b/crash/session.py index 3296887575c..2959aa1d3cd 100644 --- a/crash/session.py +++ b/crash/session.py @@ -38,3 +38,19 @@ def __init__(self, searchpath=None, debug=False): self.kernel.setup_tasks() self.kernel.load_modules(searchpath) + if self.kernel.crashing_thread: + try: + result = gdb.execute("thread {}" + .format(self.kernel.crashing_thread.num), + to_string=True) + if debug: + print(result) + except gdb.error as e: + print("Error while switching to crashed thread: {}" + .format(str(e))) + print("Further debugging may not be possible.") + return + + print("Backtrace from crashing task (PID {:d}):" + .format(self.kernel.crashing_thread.ptid[1])) + gdb.execute("where") From eb33dd313ad2f0a45d0aa94b00e758cd8789cf51 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 1 May 2019 08:48:35 -0400 Subject: [PATCH 094/367] crash.sh: use $TMPDIR for gdbinit in --gdb mode We currently drop the gdbinit in /tmp, which is not correct. Signed-off-by: Jeff Mahoney --- crash.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash.sh b/crash.sh index 77535b82151..d51bd17876c 100755 --- a/crash.sh +++ b/crash.sh @@ -158,8 +158,8 @@ EOF if [ "$DEBUGMODE" = "gdb" ]; then RUN="run -nx -q -x $GDBINIT" - echo $RUN > /tmp/gdbinit - gdb $GDB -nx -q -x /tmp/gdbinit + echo $RUN > $TMPDIR/gdbinit-debug + gdb $GDB -nx -q -x $TMPDIR/gdbinit-debug elif [ "$DEBUGMODE" = "valgrind" ]; then valgrind --keep-stacktraces=alloc-and-free $GDB -nh -q -x $GDBINIT else From 2384e0173bb7b49b55caa90c439fcc341abac7bc Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 1 May 2019 09:02:37 -0400 Subject: [PATCH 095/367] crash.kernel: use new KernelError exception to report errors during startup crash.kernel currently raises RuntimeError when we should be raising a more specific exception. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 5 ++++- crash/session.py | 10 ++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index b81d66e8402..84bcf0a36f8 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -17,6 +17,9 @@ from elftools.elf.elffile import ELFFile from crash.util import get_symbol_value +class CrashKernelError(RuntimeError): + pass + LINUX_KERNEL_PID = 1 class CrashKernel(CrashBaseClass): @@ -29,7 +32,7 @@ def __init__(self, searchpath=None): sym = gdb.lookup_symbol('vsnprintf', None)[0] if sym is None: - raise RuntimeError("Missing vsnprintf indicates that there is no kernel image loaded.") + raise CrashKernelError("Missing vsnprintf indicates that there is no kernel image loaded.") f = open(gdb.objfiles()[0].filename, 'rb') self.elffile = ELFFile(f) diff --git a/crash/session.py b/crash/session.py index 2959aa1d3cd..cbd2ab560dc 100644 --- a/crash/session.py +++ b/crash/session.py @@ -6,6 +6,7 @@ from crash.infra import autoload_submodules import crash.kernel +from crash.kernel import CrashKernelError from kdumpfile import kdumpfile class Session(object): @@ -35,8 +36,13 @@ def __init__(self, searchpath=None, debug=False): autoload_submodules('crash.subsystem') autoload_submodules('crash.commands') - self.kernel.setup_tasks() - self.kernel.load_modules(searchpath) + try: + self.kernel.setup_tasks() + self.kernel.load_modules(searchpath) + except CrashKernelError as e: + print(str(e)) + print("Further debugging may not be possible.") + return if self.kernel.crashing_thread: try: From da5073cddc2f3ccf8235785c3be13390fba7d8c0 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 1 May 2019 10:00:14 -0400 Subject: [PATCH 096/367] crash.kernel: add debug option to load_modules When debugging module loading, it's useful to see the full path to the module. crash.kernel.CrashKernel.load_module now accepts a debug option to make that a bit more verbose. The 'verbose' mode still prints the module name and the normal mode prints dots. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 84bcf0a36f8..827cd40c9d1 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -57,7 +57,7 @@ def get_module_sections(self, module): out.append("-s {} {:#x}".format(name, addr)) return " ".join(out) - def load_modules(self, verbose=False): + def load_modules(self, verbose=False, debug=False): print("Loading modules...", end='') sys.stdout.flush() failed = 0 @@ -78,8 +78,14 @@ def load_modules(self, verbose=False): else: addr = int(module['core_layout']['base']) - if verbose: + if debug: + print("Loading {} at {:#x}".format(modpath, addr)) + elif verbose: print("Loading {} at {:#x}".format(modname, addr)) + else: + print(".", end='') + sys.stdout.flush() + sections = self.get_module_sections(module) gdb.execute("add-symbol-file {} {:#x} {}" .format(modpath, addr, sections), From 04ef6d9347b668f77b2ee227eecc66620d5ce8c8 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 1 May 2019 09:32:17 -0400 Subject: [PATCH 097/367] crash.kernel: use objfile.has_symbols() to detect debuginfo Looking up a symbol is a hacky way of discovering whether we have debuginfo. Now that gdb-python has objfile.has_symbols(), we can use that instead. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 827cd40c9d1..98c94980b01 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -29,15 +29,17 @@ class CrashKernel(CrashBaseClass): def __init__(self, searchpath=None): self.findmap = {} self.searchpath = searchpath + obj = gdb.objfiles()[0] + kernel = os.path.basename(obj.filename) - sym = gdb.lookup_symbol('vsnprintf', None)[0] - if sym is None: - raise CrashKernelError("Missing vsnprintf indicates that there is no kernel image loaded.") + if not obj.has_symbols(): + raise CrashKernelError("Couldn't locate debuginfo for {}" + .format(kernel)) f = open(gdb.objfiles()[0].filename, 'rb') self.elffile = ELFFile(f) - archname = sym.symtab.objfile.architecture.name() + archname = obj.architecture.name() archclass = crash.arch.get_architecture(archname) self.arch = archclass() @@ -90,10 +92,12 @@ def load_modules(self, verbose=False, debug=False): gdb.execute("add-symbol-file {} {:#x} {}" .format(modpath, addr, sections), to_string=True) - sal = gdb.find_pc_line(addr) - if sal.symtab is None: - objfile = gdb.lookup_objfile(modpath) + + objfile = gdb.lookup_objfile(modpath) + if not objfile.has_symbols(): self.load_debuginfo(objfile, modpath) + elif debug: + print(" + has debug symbols") # We really should check the version, but GDB doesn't export # a way to lookup sections. From 01aacef1226ad1fc48308c0bbb8441d8b0f026fe Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 3 May 2019 15:01:33 -0400 Subject: [PATCH 098/367] crash.util: promote to be its own sub-package With the new delayed lookup code landing in crash.util, it makes sense to split it into its own sub-package. Signed-off-by: Jeff Mahoney --- crash/{util.py => util/__init__.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename crash/{util.py => util/__init__.py} (100%) diff --git a/crash/util.py b/crash/util/__init__.py similarity index 100% rename from crash/util.py rename to crash/util/__init__.py From 73207d75393053dce273f79889676c6d89e2d796 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 1 May 2019 09:46:45 -0400 Subject: [PATCH 099/367] crash: add static percpu regions for modules Static percpu variables added by modules aren't addressable right now. This commit adds the static ranges to the percpu mappings. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 5 +++++ crash/types/percpu.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/crash/kernel.py b/crash/kernel.py index 98c94980b01..ca93ed766d1 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -89,6 +89,11 @@ def load_modules(self, verbose=False, debug=False): sys.stdout.flush() sections = self.get_module_sections(module) + + percpu = int(module['percpu']) + if percpu > 0: + sections += " -s .data..percpu {:#x}".format(percpu) + gdb.execute("add-symbol-file {} {:#x} {}" .format(modpath, addr, sections), to_string=True) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index f5de579423e..d360b379713 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -7,6 +7,7 @@ from crash.infra import CrashBaseClass, export from crash.util import array_size, struct_has_member from crash.types.list import list_for_each_entry +from crash.types.module import for_each_module from crash.exceptions import DelayedAttributeError from crash.types.bitmap import find_first_set_bit, find_last_set_bit from crash.types.bitmap import find_next_set_bit, find_next_zero_bit @@ -44,6 +45,7 @@ class TypesPerCPUClass(CrashBaseClass): _dynamic_offset_cache: List[Tuple[int, int]] = list() _static_ranges: Dict[int, int] = dict() + _module_ranges: Dict[int, int] = dict() _last_cpu = -1 _nr_cpus = 0 @@ -72,6 +74,16 @@ def _setup_nr_cpus(cls, ignored: gdb.Symbol) -> None: if cls._last_cpu == -1: cls._last_cpu = cls._nr_cpus + @classmethod + def _setup_module_ranges(cls, modules: gdb.Symbol) -> None: + for module in for_each_module(): + start = int(module['percpu']) + if start == 0: + continue + + size = int(module['percpu_size']) + cls._module_ranges[start] = size + @classmethod def _add_to_offset_cache(cls, base: int, start: int, end: int) -> None: cls._dynamic_offset_cache.append((base + start, base + end)) @@ -83,6 +95,8 @@ def dump_ranges(cls) -> None: """ for (start, size) in cls._static_ranges.items(): print(f"static start={start:#x}, size={size:#x}") + for (start, size) in cls._module_ranges.items(): + print(f"module start={start:#x}, size={size:#x}") if cls._dynamic_offset_cache: for (start, end) in cls._dynamic_offset_cache: print(f"dynamic start={start:#x}, end={end:#x}") @@ -225,6 +239,24 @@ def _relocated_offset(self, var): return addr - start return addr + def is_module_percpu_var(self, addr: int) -> bool: + """ + Returns whether the provided value or symbol falls within + any of the percpu ranges for modules + + Args: + addr: The address to query + + Returns: + :obj:`bool`: Whether this address belongs to a module range + """ + for start in self._module_ranges: + for cpu in range(0, self.last_cpu): + size = self._module_ranges[start] + if addr >= start and addr < start + size: + return True + return False + @export def is_percpu_var(self, var: SymbolOrValue) -> bool: """ @@ -243,6 +275,8 @@ def is_percpu_var(self, var: SymbolOrValue) -> bool: var = int(var) if self.is_static_percpu_var(var): return True + if self.is_module_percpu_var(var): + return True if self._is_percpu_var_dynamic(var): return True return False From 9308770f218a00c617cbaa5b83e6218fabc4cde1 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 1 May 2019 09:54:47 -0400 Subject: [PATCH 100/367] crash.kernel: handle failure to add module symbols Rather than dump a gdb.error back to the UI, catch and raise a Crash-specific exception that gives some context. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index ca93ed766d1..b32eba5f691 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -94,9 +94,15 @@ def load_modules(self, verbose=False, debug=False): if percpu > 0: sections += " -s .data..percpu {:#x}".format(percpu) - gdb.execute("add-symbol-file {} {:#x} {}" - .format(modpath, addr, sections), - to_string=True) + try: + result = gdb.execute("add-symbol-file {} {:#x} {}" + .format(modpath, addr, sections), + to_string=True) + except gdb.error as e: + raise CrashKernelError("Error while loading module `{}': {}" + .format(modname, str(e))) + if debug: + print(result) objfile = gdb.lookup_objfile(modpath) if not objfile.has_symbols(): From dd2b64301bc41ae9ecbc1e216496fac85955cbd0 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 1 May 2019 10:12:24 -0400 Subject: [PATCH 101/367] crash.kernel: improve and document debuginfo and module loading In an ideal world, debuginfo packages would be installed under /usr/lib/debug and gdb would locate debuginfo automatically. Practically, this isn't the case. Recent SUSE kernel debuginfo packages don't create the symlinks for modules for automatic loading. Even if they did, the more common use case is to maintain a cache of debuginfo files outside of /usr/lib/debug. This commit improves the handling of the loading of modules and debuginfo. We have some sane defaults, documented in crash.kernel and add some command line options to override those. Signed-off-by: Jeff Mahoney --- crash.sh | 150 +++++++++++++--- crash/kernel.py | 451 +++++++++++++++++++++++++++++++++++++++++------ crash/session.py | 26 ++- pycrash | 1 + pycrash.asciidoc | 83 +++++++-- 5 files changed, 607 insertions(+), 104 deletions(-) create mode 120000 pycrash diff --git a/crash.sh b/crash.sh index d51bd17876c..d49cae7e7f9 100755 --- a/crash.sh +++ b/crash.sh @@ -3,38 +3,114 @@ usage() { cat <&2 -usage: $(basename $0) [-d|--search-dir ] +usage: $(basename $0) [options] -Debugging options: ---gdb Run the embedded gdb underneath a separate gdb instance. - This is useful for debugging issues in gdb that are seen - while running crash-python. ---valgrind Run the embedded gdb underneath valgrind. - This is useful for debugging memory leaks in gdb patches. ---nofiles Start up without loading any object files. - This is useful for testing delayed lookup error handling. +Options: +-r | --root + Use the specified directory as the root for all file searches. When + using properly configured .build-id symbolic links, this is the + best method to use as the debuginfo will be loaded automatically via + gdb without searching for filenames. + +-m | --modules + Use the specified directory to search for modules + +-d | --modules-debuginfo + Use the specified directory to search for module debuginfo + +-D | --vmlinux-debuginfo + Use the specified directory to search for vmlinux debuginfo + +-b | --build-dir + Use the specified directory as the root for all file searches. This + directory should be the root of a built kernel source tree. This is + shorthand for "-r -m . -d . -D ." and will override preceding + options. +Debugging options: +--debug + Enable noisy output for debugging the debugger +-v | --verbose + Enable verbose output for debugging the debugger +--gdb + Run the embedded gdb underneath a separate gdb instance. This is useful + for debugging issues in gdb that are seen while running crash-python. +--valgrind + Run the embedded gdb underneath valgrind. This is useful + for debugging memory leaks in gdb patches. END exit 1 } -TEMP=$(getopt -o 'd:h' --long 'search-dir:,gdb,valgrind,nofiles,help' -n "$(basename $0)" -- "$@") +TEMP=$(getopt -o 'vr:d:m:D:b:h' --long 'verbose,root:,modules-debuginfo:,modules:,vmlinux-debuginfo:,build-dir:,debug,gdb,valgrind,help' -n "$(basename $0)" -- "$@") if [ $? -ne 0 ]; then - echo "Terminating." >&2 - exit 1 + usage fi eval set -- "$TEMP" unset TEMP +VERBOSE=False +DEBUG=False + while true; do case "$1" in - '-d'|'--search-dir') - SEARCHDIRS="$SEARCHDIRS $2" + '-r'|'--root') + if test -z "$SEARCH_DIRS"; then + SEARCH_DIRS="$2" + else + SEARCH_DIRS="$SEARCH_DIRS $2" + fi + shift 2 + continue + ;; + '-m'|'--modules') + if test -z "$MODULES"; then + MODULES="$2" + else + MODULES="$MODULES $2" + fi + shift 2 + continue + ;; + '-d'|'--modules-debuginfo') + if test -z "$MODULES_DEBUGINFO"; then + MODULES_DEBUGINFO="$2" + else + MODULES_DEBUGINFO="$MODULES_DEBUGINFO $2" + fi + shift 2 + continue + ;; + '-D'|'--vmlinux-debuginfo') + if test -z "$VMLINUX_DEBUGINFO"; then + VMLINUX_DEBUGINFO="$2" + else + VMLINUX_DEBUGINFO="$VMLINUX_DEBUGINFO $2" + fi + shift 2 + continue + ;; + '-b'|'--build-dir') + SEARCH_DIRS="$2" + VMLINUX_DEBUGINFO="." + MODULES="." + MODULES_DEBUGINFO="." shift 2 continue + ;; + '-v'|'--verbose') + VERBOSE="True" + shift + continue + ;; + '--debug') + DEBUG="True" + shift + continue ;; + '--gdb') DEBUGMODE=gdb shift @@ -45,11 +121,6 @@ while true; do shift continue ;; - '--nofiles') - NOFILES=yes - shift - continue - ;; '-h'|'--help') usage ;; '--') @@ -63,7 +134,7 @@ while true; do esac done -if [ "$#" -ne 2 -a -z "$NOFILES" ]; then +if [ "$#" -ne 2 ]; then usage fi @@ -111,7 +182,15 @@ else fi VMCORE=$2 +for path in $SEARCH_DIRS; do + if test -n "$DFD"; then + DFD="$DFD:$path" + else + DFD="$path" + fi +done cat << EOF >> $GDBINIT +set debug-file-directory $DFD:/usr/lib/debug set build-id-verbose 0 set python print-stack full set prompt py-crash> @@ -132,13 +211,40 @@ import sys import traceback try: import crash.session + from crash.kernel import CrashKernel except RuntimeError as e: print("crash-python: {}, exiting".format(str(e)), file=sys.stderr) traceback.print_exc() sys.exit(1) -path = "$SEARCHDIRS".split(' ') + +roots = None +module_path = None +module_debuginfo_path = None +vmlinux_debuginfo = None +verbose=$VERBOSE +debug=$DEBUG + +s = "$SEARCH_DIRS" +if len(s) > 0: + roots = s.split(" ") + +s = "$VMLINUX_DEBUGINFO" +if len(s) > 0: + vmlinux_debuginfo = s.split(" ") + +s = "$MODULES" +if len(s) > 0: + module_path = s.split(" ") + +s = "$MODULES_DEBUGINFO" +if len(s) > 0: + module_debuginfo_path = s.split(" ") + try: - x = crash.session.Session(path) + kernel = CrashKernel(roots, vmlinux_debuginfo, module_path, + module_debuginfo_path, verbose, debug) + + x = crash.session.Session(kernel, verbose=verbose, debug=debug) print("The 'pyhelp' command will list the command extensions.") except gdb.error as e: print("crash-python: {}, exiting".format(str(e)), file=sys.stderr) diff --git a/crash/kernel.py b/crash/kernel.py index b32eba5f691..e88b59679a5 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -3,34 +3,266 @@ import gdb import sys +import re +import fnmatch import os.path import crash.arch import crash.arch.x86_64 import crash.arch.ppc64 from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each_entry -from crash.types.percpu import get_percpu_vars from crash.types.list import list_for_each_entry from crash.types.module import for_each_module, for_each_module_section -import crash.cache.tasks -from crash.types.task import LinuxTask from elftools.elf.elffile import ELFFile from crash.util import get_symbol_value +from crash.exceptions import MissingSymbolError + +from typing import Pattern, Union, List, Dict, Any class CrashKernelError(RuntimeError): pass +class NoMatchingFileError(FileNotFoundError): + pass + LINUX_KERNEL_PID = 1 +PathSpecifier = Union[List[str], str] + class CrashKernel(CrashBaseClass): + __types__ = [ 'char *' ] __symvals__ = [ 'init_task' ] __symbols__ = [ 'runqueues'] - def __init__(self, searchpath=None): - self.findmap = {} - self.searchpath = searchpath + + def __init__(self, roots: PathSpecifier=None, + vmlinux_debuginfo: PathSpecifier=None, + module_path: PathSpecifier=None, + module_debuginfo_path: PathSpecifier=None, + verbose: bool=False, debug: bool=False): + """ + Initialize a basic kernel semantic debugging session. + + This means that we load the following: + - Kernel image symbol table (and debuginfo, if not integrated) + relocated to the base offset used by kASLR + - Kernel modules that were loaded on the the crashed system (again, + with debuginfo if not integrated) + - Percpu ranges used by kernel module + - Architecture-specific details + - Linux tasks populated into the GDB thread table + + If kernel module files and debuginfo cannot be located, backtraces + may be incomplete if the addresses used by the modules are crossed. + Percpu ranges will be properly loaded regardless. + + For arguments that accept paths to specify a base directory to be + used, the entire directory structure will be read and cached to + speed up subsequent searches. Still, reading large directory trees + is a time consuming operation and being exact as possible will + improve startup time. + + Args: + root (str or list of str, None for defaults): The roots of trees + to search for debuginfo files. When specified, all roots + will be searched using the following arguments (including + the absolute paths in the defaults if unspecified). + + Defaults to: / + + vmlinux_debuginfo (str or list of str, None for defaults): The + location of the separate debuginfo file corresponding + to the kernel being debugged. + + Defaults to: + - .debug + - ./vmlinux-.debug + - /usr/lib/debug/.build-id/xx/.debug + - /usr/lib/debug/.debug + - /usr/lib/debug/boot/.debug + - /usr/lib/debug/boot/vmlinux- + + module_path (string, None for defaults): The base directory to + be used to search for kernel modules (e.g. module.ko) to be + used to load symbols for the kernel being debugged. + + Defaults to: + - ./modules + - /lib/modules/ + + module_debuginfo_path (string, None for defaults): The base + directory to search for debuginfo matching the kernel + modules already loaded. + + Defaults to: + - ./modules.debug + - /usr/lib/debug/.build-id/xx/.debug + - /usr/lib/debug/lib/modules/ + Raises: + CrashKernelError: If the kernel debuginfo cannot be loaded. + TypeError: If any of the arguments are not None, str, + or list of str + + """ + self.findmap: Dict[str, Dict[Any, Any]] = dict() + self.modules_order: Dict[str, Dict[str, str]] = dict() obj = gdb.objfiles()[0] kernel = os.path.basename(obj.filename) + debugroot = "/usr/lib/debug" + + version = self.extract_version() + + if roots is None: + self.roots = [ "/" ] + elif (isinstance(roots, list) and len(roots) > 0 and + isinstance(roots[0], str)): + x = None + for root in roots: + if os.path.exists(root): + if x is None: + x = [ root ] + else: + x.append(root) + else: + print("root {} does not exist".format(root)) + + if x is None: + x = [ "/" ] + self.roots = x + elif (isinstance(roots, str)): + x = None + if os.path.exists(roots): + if x is None: + x = [ roots ] + else: + x.append(roots) + if x is None: + x = [ "/" ] + self.roots = x + else: + raise TypeError("roots must be None, str, or list of str") + + if verbose: + print("roots={}".format(self.roots)) + + if vmlinux_debuginfo is None: + x = [] + defaults = [ + "{}.debug".format(kernel), + "vmlinux-{}.debug".format(version), + "{}/{}.debug".format(debugroot, kernel), + "{}/boot/{}.debug".format(debugroot, + os.path.basename(kernel)), + "{}/boot/vmlinux-{}.debug".format(debugroot, version), + ] + for root in self.roots: + for mpath in defaults: + path = "{}/{}".format(root, mpath) + if os.path.exists(path): + if x is None: + x = [path] + else: + x.append(path) + + self.vmlinux_debuginfo = x + + elif (isinstance(vmlinux_debuginfo, list) and + len(vmlinux_debuginfo) > 0 and + isinstance(vmlinux_debuginfo[0], str)): + self.vmlinux_debuginfo = vmlinux_debuginfo + elif isinstance(vmlinux_debuginfo, str): + self.vmlinux_debuginfo = [ vmlinux_debuginfo ] + else: + raise TypeError("vmlinux_debuginfo must be None, str, or list of str") + + if verbose: + print("vmlinux_debuginfo={}".format(self.vmlinux_debuginfo)) + + if module_path is None: + x = [] + + path = "modules" + if os.path.exists(path): + x.append(path) + + for root in self.roots: + path = "{}/lib/modules/{}".format(root, version) + if os.path.exists(path): + x.append(path) + + self.module_path = x + elif (isinstance(module_path, list) and + isinstance(module_path[0], str)): + x = [] + + for root in self.roots: + for mpath in module_path: + path = "{}/{}".format(root, mpath) + if os.path.exists(path): + x.append(path) + + self.module_path = x + elif isinstance(module_path, str): + x = [] + + if os.path.exists(module_path): + x.append(module_path) + + self.module_path = x + else: + raise TypeError("module_path must be None, str, or list of str") + + if verbose: + print("module_path={}".format(self.module_path)) + + if module_debuginfo_path is None: + x = [] + + path = "modules.debug" + if os.path.exists(path): + x.append(path) + + for root in self.roots: + path = "{}/{}/lib/modules/{}".format(root, debugroot, version) + if os.path.exists(path): + x.append(path) + self.module_debuginfo_path = x + elif (isinstance(module_debuginfo_path, list) and + isinstance(module_debuginfo_path[0], str)): + x = [] + + for root in self.roots: + for mpath in module_debuginfo_path: + path = "{}/{}".format(root, mpath) + if os.path.exists(path): + x.append(path) + + self.module_debuginfo_path = x + elif isinstance(module_debuginfo_path, str): + x = [] + + for root in self.roots: + path = "{}/{}".format(root, module_debuginfo_path) + if os.path.exists(path): + x.append(path) + + self.module_debuginfo_path = x + else: + raise TypeError("module_debuginfo_path must be None, str, or list of str") + + if verbose: + print("module_debuginfo_path={}".format(self.module_debuginfo_path)) + + # We need separate debuginfo. Let's go find it. + if not obj.has_symbols(): + print("Loading debug symbols for vmlinux") + for path in [self.build_id_path(obj)] + self.vmlinux_debuginfo: + try: + obj.add_separate_debug_file(path) + if obj.has_symbols(): + break + except gdb.error as e: + pass if not obj.has_symbols(): raise CrashKernelError("Couldn't locate debuginfo for {}" @@ -49,28 +281,51 @@ def __init__(self, searchpath=None): self.target.fetch_registers = self.fetch_registers self.crashing_thread = None - def fetch_registers(self, register): + # When working without a symbol table, we still need to be able + # to resolve version information. + def get_minsymbol_as_string(self, name: str) -> str: + sym = gdb.lookup_minimal_symbol(name).value() + + return sym.address.cast(self.char_p_type).string() + + def extract_version(self) -> str: + try: + uts = get_symbol_value('init_uts_ns') + return uts['name']['release'].string() + except (AttributeError, NameError, MissingSymbolError): + pass + + banner = self.get_minsymbol_as_string('linux_banner') + + return banner.split(' ')[2] + + def fetch_registers(self, register: gdb.Register) -> None: thread = gdb.selected_thread() - return self.arch.fetch_register(thread, register.regnum) + self.arch.fetch_register(thread, register.regnum) - def get_module_sections(self, module): + def get_module_sections(self, module: gdb.Value) -> str: out = [] for (name, addr) in for_each_module_section(module): out.append("-s {} {:#x}".format(name, addr)) return " ".join(out) - def load_modules(self, verbose=False, debug=False): - print("Loading modules...", end='') - sys.stdout.flush() + def load_modules(self, verbose: bool=False, debug: bool=False) -> None: + import crash.cache.syscache + version = crash.cache.syscache.utsname.release + print("Loading modules for {}".format(version), end='') + if verbose: + print(":", flush=True) failed = 0 loaded = 0 for module in for_each_module(): modname = "{}".format(module['name'].string()) modfname = "{}.ko".format(modname) found = False - for path in self.searchpath: - modpath = self.find_module_file(modfname, path) - if not modpath: + for path in self.module_path: + + try: + modpath = self.find_module_file(modfname, path) + except NoMatchingFileError: continue found = True @@ -106,7 +361,7 @@ def load_modules(self, verbose=False, debug=False): objfile = gdb.lookup_objfile(modpath) if not objfile.has_symbols(): - self.load_debuginfo(objfile, modpath) + self.load_module_debuginfo(objfile, modpath, verbose) elif debug: print(" + has debug symbols") @@ -120,6 +375,8 @@ def load_modules(self, verbose=False, debug=False): print("Couldn't find module file for {}".format(modname)) failed += 1 else: + if not objfile.has_symbols(): + print("Couldn't find debuginfo for {}".format(modname)) loaded += 1 if (loaded + failed) % 10 == 10: print(".", end='') @@ -134,43 +391,137 @@ def load_modules(self, verbose=False, debug=False): del self.findmap self.findmap = {} - def find_module_file(self, name, path): - if not path in self.findmap: - self.findmap[path] = {} + @staticmethod + def normalize_modname(mod: str) -> str: + return mod.replace('-', '_') + + def cache_modules_order(self, path: str) -> None: + self.modules_order[path] = dict() + order = os.path.join(path, "modules.order") + try: + f = open(order) + for line in f.readlines(): + modpath = line.rstrip() + modname = self.normalize_modname(os.path.basename(modpath)) + if modname[:7] == "kernel/": + modname = modname[7:] + modpath = os.path.join(path, modpath) + if os.path.exists(modpath): + self.modules_order[path][modname] = modpath + f.close() + except OSError: + pass + + def get_module_path_from_modules_order(self, path: str, name: str) -> str: + if not path in self.modules_order: + self.cache_modules_order(path) - for root, dirs, files in os.walk(path): - for filename in files: - nname = filename.replace('-', '_') - self.findmap[path][nname] = os.path.join(root, filename) try: - nname = name.replace('-', '_') - return self.findmap[path][nname] + return self.modules_order[path][name] except KeyError: - return None - - def load_debuginfo(self, objfile, name=None, verbose=False): - if name is None: - name = objfile.filename - if ".gz" in name: - name = name.replace(".gz", "") - filename = "{}.debug".format(os.path.basename(name)) - filepath = None - - # Check current directory first - if os.path.exists(filename): - filepath = filename - else: - for path in self.searchpath: - filepath = self.find_module_file(filename, path) - if filepath: - break + raise NoMatchingFileError(name) - if filepath: - objfile.add_separate_debug_file(filepath) + def cache_file_tree(self, path, regex: Pattern[str]=None) -> None: + if not path in self.findmap: + self.findmap[path] = { + 'filters' : [], + 'files' : {}, + } + + # If we've walked this path with no filters, we have everything + # already. + if self.findmap[path]['filters'] is None: + return + + if regex is None: + self.findmap[path]['filters'] = None else: - print("Could not locate debuginfo for {}".format(name)) + pattern = regex.pattern + if pattern in self.findmap[path]['filters']: + return + self.findmap[path]['filters'].append(pattern) + + for root, dirs, files in os.walk(path): + for filename in files: + modname = self.normalize_modname(filename) + + if regex and regex.match(modname) is None: + continue + + modpath = os.path.join(root, filename) + self.findmap[path]['files'][modname] = modpath + + def get_file_path_from_tree_search(self, path: str, name: str, + regex: Pattern[str]=None) -> str: + self.cache_file_tree(path, regex) + + try: + modname = self.normalize_modname(name) + return self.findmap[path]['files'][modname] + except KeyError: + raise NoMatchingFileError(name) + + def find_module_file(self, name: str, path: str) -> str: + try: + return self.get_module_path_from_modules_order(path, name) + except NoMatchingFileError: + pass + + regex = re.compile(fnmatch.translate("*.ko")) + return self.get_file_path_from_tree_search(path, name, regex) + + def find_module_debuginfo_file(self, name: str, path: str) -> str: + regex = re.compile(fnmatch.translate("*.ko.debug")) + return self.get_file_path_from_tree_search(path, name, regex) + + @staticmethod + def build_id_path(objfile: gdb.Objfile) -> str: + build_id = objfile.build_id + return ".build_id/{}/{}.debug".format(build_id[0:2], build_id[2:]) + + def try_load_debuginfo(self, objfile: gdb.Objfile, + path: str, verbose: bool=False) -> bool: + if not os.path.exists(path): + return False + + try: + if verbose: + print(" + Loading debuginfo: {}".format(path)) + objfile.add_separate_debug_file(path) + if objfile.has_symbols(): + return True + except gdb.error as e: + print(e) + + return False + + def load_module_debuginfo(self, objfile: gdb.Objfile, + modpath: str=None, verbose: bool=False) -> None: + if modpath is None: + modpath = objfile.filename + if ".gz" in modpath: + modpath = modpath.replace(".gz", "") + filename = "{}.debug".format(os.path.basename(modpath)) + + build_id_path = self.build_id_path(objfile) + + for path in self.module_debuginfo_path: + filepath = "{}/{}".format(path, build_id_path) + if self.try_load_debuginfo(objfile, filepath, verbose): + break + + try: + filepath = self.find_module_debuginfo_file(filename, path) + except NoMatchingFileError: + continue + + if self.try_load_debuginfo(objfile, filepath, verbose): + break - def setup_tasks(self): + def setup_tasks(self) -> None: + from crash.types.percpu import get_percpu_vars + from crash.types.task import LinuxTask + import crash.cache.tasks gdb.execute('set print thread-events 0') task_list = self.init_task['tasks'] @@ -178,8 +529,6 @@ def setup_tasks(self): rqs = get_percpu_vars(self.runqueues) rqscurrs = {int(x["curr"]) : k for (k, x) in rqs.items()} - self.pid_to_task_struct = {} - print("Loading tasks...", end='') sys.stdout.flush() @@ -196,7 +545,7 @@ def setup_tasks(self): try: crashing_cpu = int(get_symbol_value('crashing_cpu')) except Exception as e: - crashing_cpu = None + crashing_cpu = -1 for task in tasks: cpu = None @@ -215,7 +564,7 @@ def setup_tasks(self): print("Failed to setup task @{:#x}".format(int(task.address))) continue thread.name = task['comm'].string() - if active and crashing_cpu is not None and cpu == crashing_cpu: + if active and cpu == crashing_cpu: self.crashing_thread = thread self.arch.setup_thread_info(thread) diff --git a/crash/session.py b/crash/session.py index cbd2ab560dc..91298b13d35 100644 --- a/crash/session.py +++ b/crash/session.py @@ -5,32 +5,26 @@ import sys from crash.infra import autoload_submodules -import crash.kernel -from crash.kernel import CrashKernelError -from kdumpfile import kdumpfile +from crash.kernel import CrashKernel, CrashKernelError class Session(object): """ crash.Session is the main driver component for crash-python - The Session class loads the kernel, kernel modules, debuginfo, - and vmcore and auto loads any sub modules for autoinitializing - commands and subsystems. + The Session class loads the kernel modules, sets up tasks, and auto loads + any sub modules for autoinitializing commands and subsystems. Args: - searchpath (list of str, optional): Paths to directory trees to - search for kernel modules and debuginfo + kernel (CrashKernel): The kernel to debug during this session + verbose (bool, optional, default=False): Whether to enable verbose + output debug (bool, optional, default=False): Whether to enable verbose debugging output """ - - - def __init__(self, searchpath=None, debug=False): + def __init__(self, kernel: CrashKernel, verbose: bool=False, + debug: bool=False) -> None: print("crash-python initializing...") - if searchpath is None: - searchpath = [] - - self.kernel = crash.kernel.CrashKernel(searchpath) + self.kernel = kernel autoload_submodules('crash.cache') autoload_submodules('crash.subsystem') @@ -38,7 +32,7 @@ def __init__(self, searchpath=None, debug=False): try: self.kernel.setup_tasks() - self.kernel.load_modules(searchpath) + self.kernel.load_modules(verbose=verbose, debug=debug) except CrashKernelError as e: print(str(e)) print("Further debugging may not be possible.") diff --git a/pycrash b/pycrash new file mode 120000 index 00000000000..0c3d27cfed4 --- /dev/null +++ b/pycrash @@ -0,0 +1 @@ +crash.sh \ No newline at end of file diff --git a/pycrash.asciidoc b/pycrash.asciidoc index ed216071e55..c4d1bced9f5 100644 --- a/pycrash.asciidoc +++ b/pycrash.asciidoc @@ -7,34 +7,87 @@ pycrash - a Linux kernel crash dump debugger written in Python SYNOPSIS -------- -*pycrash* [options] +*pycrash* [options] DESCRIPTION ----------- The *pycrash* utility is a Linux kernel crash debugger written in Python. It improves upon the original crash tool by adding support for symbolic -backtraces and in that it is easily extensible by the user. +backtraces and in that it is easily extensible by the user using a rich +python interface that offers semantic helpers for various subsystems. In order to operate properly, full debuginfo is required for the kernel -image and all modules in use. +image and all modules in use. Without options specifying other paths, +the following defaults are used for locating the debuginfo and modules: + +Kernel debuginfo: + +* .debug +* ./vmlinux-.debug +* /usr/lib/debug/.build-id//.debug +* /usr/lib/debug/.debug +* /usr/lib/debug/boot/vmlinux-.debug +* /usr/lib/debug/boot/vmlinux- + +Module path: + +* ./modules +* /lib/modules/ + +Module debuginfo path: + +* ./modules.debug +* /usr/lib/debug/.build-id/xx/.debug +* /usr/lib/debug/lib/modules/ + +The build-id and kernel-version fields are detected within the kernel +and modules and cannot be overridden. + OPTIONS ------- -*-d|--search-dir *:: -Specify a directory to search recursively for modules or debuginfo for -the kernel or modules. -+ -This option may be specified multiple times. + +Each of the following options may be specified multiple times. + +*-r | --root *:: + Use the specified directory as the root for all file searches. When + using properly configured .build-id symbolic links, this is the + best method to use as the debuginfo will be loaded automatically via + gdb without searching for filenames. If this is the only option + specified, the defaults documented above will be used relative to + each root. + +*-m | --modules *:: + Use the specified directory to search for modules + +*-d | --modules-debuginfo *:: + Use the specified directory to search for module debuginfo + +*-D | --vmlinux-debuginfo *:: + Use the specified directory to search for vmlinux debuginfo + +*-b | --build-dir *:: + Use the specified directory as the root for all file searches. This + directory should be the root of a built kernel source tree. This is + shorthand for *-r -m . -d . -D .* and will override preceding + options. + +DEBUGGING OPTIONS: +------------------ + +*-v | --verbose*:: + Enable verbose output for debugging the debugger + +*--debug*:: + Enable even noisier output for debugging the debugger *--gdb*:: -Start the gdb instance used with crash-python within gdb. -+ -This option is primarily intended for debugging gdb issues. + Run the embedded gdb underneath a separate gdb instance. This is useful + for debugging issues in gdb that are seen while running crash-python. *--valgrind*:: -Start the gdb instance used with crash-python within valgrind. -+ -This option is primarily intended for debugging gdb issues. + Run the embedded gdb underneath valgrind. This is useful + for debugging memory leaks in gdb patches. EXIT STATUS ----------- @@ -49,4 +102,4 @@ Please refer to the GitHub repository at https://github.com/jeffmahoney/crash-py SEE ALSO -------- `gdb`(1) -`libdkumpfil` +`libdkumpfile` From a8584b2e90dbb94b0760873531a4e3ca1de16d74 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 25 Apr 2019 20:52:45 -0400 Subject: [PATCH 102/367] crash.kernel: check module version on load Since the beginning of this project, we've assumed that the modules we load match the kernel being debugged. We don't do anything to verify that assumption. This commit uses the version and vermagic to validate module contents, if available. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 89 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 85 insertions(+), 4 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index e88b59679a5..75cb0f9d888 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -25,6 +25,29 @@ class CrashKernelError(RuntimeError): class NoMatchingFileError(FileNotFoundError): pass +class ModinfoMismatchError(ValueError): + def __init__(self, attribute, path, value, expected_value): + self.path = path + self.value = value + self.expected_value = expected_value + self.attribute = attribute + + def __str__(self): + return "module {} has mismatched {} (got `{}' expected `{}')".format( + self.path, self.attribute, self.value, self.expected_value) + +class ModVersionMismatchError(ModinfoMismatchError): + def __init__(self, path, module_value, expected_value): + super(ModVersionMismatchError, self).__init__('vermagic', + path, module_value, + expected_value) + +class ModSourceVersionMismatchError(ModinfoMismatchError): + def __init__(self, path, module_value, expected_value): + super(ModSourceVersionMismatchError, self).__init__('srcversion', + path, module_value, + expected_value) + LINUX_KERNEL_PID = 1 PathSpecifier = Union[List[str], str] @@ -268,8 +291,7 @@ def __init__(self, roots: PathSpecifier=None, raise CrashKernelError("Couldn't locate debuginfo for {}" .format(kernel)) - f = open(gdb.objfiles()[0].filename, 'rb') - self.elffile = ELFFile(f) + self.vermagic = self.extract_vermagic() archname = obj.architecture.name() archclass = crash.arch.get_architecture(archname) @@ -299,6 +321,38 @@ def extract_version(self) -> str: return banner.split(' ')[2] + def extract_vermagic(self) -> str: + try: + magic = get_symbol_value('vermagic') + return magic.string() + except (AttributeError, NameError): + pass + + return self.get_minsymbol_as_string('vermagic') + + def extract_modinfo_from_module(self, modpath: str) -> Dict[str, str]: + f = open(modpath, 'rb') + + d = None + try: + elf = ELFFile(f) + modinfo = elf.get_section_by_name('.modinfo') + + d = {} + for line in modinfo.data().split(b'\x00'): + val = line.decode('utf-8') + if val: + eq = val.index('=') + d[val[0:eq]] = val[eq + 1:] + except Exception as e: + print(e) + del d + d = dict() + + del elf + f.close() + return d + def fetch_registers(self, register: gdb.Register) -> None: thread = gdb.selected_thread() self.arch.fetch_register(thread, register.regnum) @@ -309,6 +363,28 @@ def get_module_sections(self, module: gdb.Value) -> str: out.append("-s {} {:#x}".format(name, addr)) return " ".join(out) + def check_module_version(self, modpath: str, module: gdb.Value) -> None: + modinfo = self.extract_modinfo_from_module(modpath) + + vermagic = None + if 'vermagic' in modinfo: + vermagic = modinfo['vermagic'] + + if vermagic != self.vermagic: + raise ModVersionMismatchError(modpath, vermagic, self.vermagic) + + mi_srcversion = None + if 'srcversion' in modinfo: + mi_srcversion = modinfo['srcversion'] + + mod_srcversion = None + if 'srcversion' in module.type: + mod_srcversion = module['srcversion'].string() + + if mi_srcversion != mod_srcversion: + raise ModSourceVersionMismatchError(modpath, mi_srcversion, + mod_srcversion) + def load_modules(self, verbose: bool=False, debug: bool=False) -> None: import crash.cache.syscache version = crash.cache.syscache.utsname.release @@ -328,6 +404,13 @@ def load_modules(self, verbose: bool=False, debug: bool=False) -> None: except NoMatchingFileError: continue + try: + self.check_module_version(modpath, module) + except ModinfoMismatchError as e: + if verbose: + print(str(e)) + continue + found = True if 'module_core' in module.type: @@ -365,8 +448,6 @@ def load_modules(self, verbose: bool=False, debug: bool=False) -> None: elif debug: print(" + has debug symbols") - # We really should check the version, but GDB doesn't export - # a way to lookup sections. break if not found: From a505dc9436d8800ed04bab636c8e3edf4bc19d6d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 2 May 2019 14:12:47 -0400 Subject: [PATCH 103/367] crash.commands.task: allow 'task' command with no argument to show current task Since we use the 'task' command to select tasks instead of the gdb 'thread' command directly, the 'task' command should report the current task when no argument is given. Signed-off-by: Jeff Mahoney --- crash/commands/task.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crash/commands/task.py b/crash/commands/task.py index f50553dc7ad..6aac802ca47 100644 --- a/crash/commands/task.py +++ b/crash/commands/task.py @@ -25,14 +25,18 @@ def __init__(self, name): parser = ArgumentParser(prog=name) - parser.add_argument('pid', type=int, nargs=1) + parser.add_argument('pid', type=int, nargs=argparse.REMAINDER) parser.format_usage = lambda: "thread \n" Command.__init__(self, name, parser) def execute(self, args): try: - thread = crash.cache.tasks.get_task(args.pid[0]).thread + if args.pid: + thread = crash.cache.tasks.get_task(args.pid[0]).thread + else: + thread = gdb.selected_thread() + gdb.execute("thread {}".format(thread.num)) except KeyError: print("No such task with pid {}".format(args.pid[0])) From 56e230f28696704a30b0bfdf4c08dcc2646cd527 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 17 May 2019 16:18:12 -0400 Subject: [PATCH 104/367] crash.infra.callbacks: explicitly connect to callbacks Having the callback objects connect to the gdb callback infrastructure made initialization complicated in some cases. By making it explicit, we can ensure the callback object is fully initialized before it receives its first callback. Signed-off-by: Jeff Mahoney --- crash/infra/callback.py | 25 ++++++++++++++++++------- crash/infra/lookup.py | 15 ++++++++++++--- tests/test_objfile_callbacks.py | 2 ++ 3 files changed, 32 insertions(+), 10 deletions(-) diff --git a/crash/infra/callback.py b/crash/infra/callback.py index 8307d16e630..08a0accaf8b 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -24,27 +24,38 @@ class ObjfileEventCallback(object): methods. """ def __init__(self): - self.completed = True - completed = False + self.completed = False + self.connected = False self.setup_symbol_cache_flush_callback() + def connect_callback(self): + if self.completed: + raise CallbackCompleted(self) + + if self.connected: + return + + self.connected = True + # We don't want to do lookups immediately if we don't have # an objfile. It'll fail for any custom types but it can # also return builtin types that are eventually changed. if len(gdb.objfiles()) > 0: result = self.check_ready() if not (result is None or result is False): - completed = self.callback(result) + self.completed = self.callback(result) - if completed is False: - self.completed = False + if self.completed is False: gdb.events.new_objfile.connect(self._new_objfile_callback) + return self.completed + def complete(self): if not self.completed: gdb.events.new_objfile.disconnect(self._new_objfile_callback) self.completed = True + self.connected = False else: raise CallbackCompleted(self) @@ -81,7 +92,7 @@ def check_ready(self): A return value other than None or False will be passed to the callback. """ - return True + raise NotImplementedError("check_ready must be implemented by derived class.") def callback(self, result): """ @@ -93,4 +104,4 @@ def callback(self, result): Args: result: The result to pass to the callback """ - pass + raise NotImplementedError("callback must be implemented by derived class.") diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index c350586e17b..56bb0f79393 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -20,10 +20,13 @@ def __init__(self, name, callback, symbol_file=None): symbol is discovered symbol_file (str, optional, default=None): Name of symbol file """ + super().__init__() + self.name = name self.symbol_file = symbol_file self.callback = callback - super().__init__() + + self.connect_callback() def check_ready(self): return gdb.lookup_minimal_symbol(self.name, self.symbol_file, None) @@ -46,10 +49,13 @@ def __init__(self, name, callback, domain=gdb.SYMBOL_VAR_DOMAIN): domain (gdb.Symbol constant, i.e. SYMBOL_*_DOMAIN): The domain to search for the symbol """ + super().__init__() + self.name = name self.domain = domain self.callback = callback - super().__init__() + + self.connect_callback() def check_ready(self): return gdb.lookup_symbol(self.name, None, self.domain)[0] @@ -78,10 +84,13 @@ class TypeCallback(ObjfileEventCallback): objfile and returns the gdb.Type associated with it. """ def __init__(self, name, callback, block=None): + super().__init__() + self.name = name self.block = block self.callback = callback - super().__init__() + + self.connect_callback() def check_ready(self): try: diff --git a/tests/test_objfile_callbacks.py b/tests/test_objfile_callbacks.py index 74aba9105fc..ae1906e3dc8 100644 --- a/tests/test_objfile_callbacks.py +++ b/tests/test_objfile_callbacks.py @@ -24,6 +24,8 @@ def __init__(self): self.checked = False super(test_class, self).__init__() + self.connect_callback() + def check_ready(self): self.checked = True return safe_get_symbol_value('main') From 216531ad564ed0a5be4b9fc18d14dafd347ac317 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 3 May 2019 14:09:03 -0400 Subject: [PATCH 105/367] crash.infra.lookup: move type name resolution into TypeCallback In preparation to eliminate CrashBaseClass, we need to pull type name resolution out of the DelayedLookups class that will be eventually removed. This commit moves it into TypeCallback where it can also be used by DelayedType. Signed-off-by: Jeff Mahoney --- crash/exceptions.py | 7 ++-- crash/infra/lookup.py | 67 +++++++++++++++++++------------------- crash/types/cpu.py | 5 ++- tests/test_infra_lookup.py | 14 ++++---- 4 files changed, 46 insertions(+), 47 deletions(-) diff --git a/crash/exceptions.py b/crash/exceptions.py index 593bb63f8bb..e7ddd27a359 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -18,6 +18,7 @@ class DelayedAttributeError(AttributeError): The attribute has been declared but the symbol to fill it has not yet been located. """ - def __init__(self, owner, name): - msg = "{} has delayed attribute {} but it has not been completed." - super().__init__(msg.format(owner, name)) + def __init__(self, name): + msg = "Delayed attribute {} has not been completed." + self.name = name + super().__init__(msg.format(name)) diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index 56bb0f79393..f155f818c3d 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -86,12 +86,35 @@ class TypeCallback(ObjfileEventCallback): def __init__(self, name, callback, block=None): super().__init__() - self.name = name + (self.name, self.attrname, self.pointer) = self.resolve_type(name) + self.block = block self.callback = callback self.connect_callback() + @staticmethod + def resolve_type(name): + pointer = False + name = name.strip() + if name[-1] == '*': + pointer = True + name = name[:-1].strip() + + attrname = name + if name.startswith('struct '): + attrname = name[7:].strip() + + if pointer: + attrname += '_p_type' + else: + attrname += '_type' + + name = name + attrname = attrname.replace(' ', '_') + + return (name, attrname, pointer) + def check_ready(self): try: return gdb.lookup_type(self.name, self.block) @@ -111,9 +134,9 @@ def __init__(self, name): self.name = name self.value = None - def get(self, owner): + def get(self): if self.value is None: - raise DelayedAttributeError(owner, self.name) + raise DelayedAttributeError(self.name) return self.value def callback(self, value): @@ -153,15 +176,13 @@ class DelayedType(DelayedValue): """ A DelayedValue for types. """ - def __init__(self, name, pointer=False): + def __init__(self, name): """ Args: - name (str): The name of the type. Must not be a pointer type. - pointer (bool, optional, default=False): Whether the requested - type should be returned as a pointer to that type. + name (str): The name of the type. """ + (name, attrname, self.pointer) = TypeCallback.resolve_type(name) super().__init__(name) - self.pointer = pointer self.cb = TypeCallback(name, self.callback) def __str__(self): @@ -201,7 +222,7 @@ def __init__(self, get): self.get = get def __get__(self, instance, owner): - return self.get(owner) + return self.get() class DelayedLookups(object): """ @@ -210,26 +231,6 @@ class DelayedLookups(object): special names. These are documented in the _CrashBaseMeta documentation. """ - @classmethod - def _resolve_type(cls, name): - pointer = False - name = name.strip() - if name[-1] == '*': - pointer = True - name = name[:-1].strip() - - attrname = name - if name.startswith('struct '): - attrname = name[7:].strip() - - if pointer: - attrname += '_p_type' - else: - attrname += '_type' - - attrname = attrname.replace(' ', '_') - return (name, attrname, pointer) - @classmethod def name_check(cls, dct, name, attrname): try: @@ -260,9 +261,8 @@ def setup_delayed_lookups_for_class(cls, clsname, dct): if not isinstance(dct['__types__'], list): raise TypeError('__types__ attribute must be a list of strings') for typ in dct['__types__']: - (lookupname, attrname, pointer) = cls._resolve_type(typ) - cls.add_lookup(clsname, dct, lookupname, - DelayedType(lookupname, pointer), attrname) + t = DelayedType(typ) + cls.add_lookup(clsname, dct, t.name, t, t.attrname) del dct['__types__'] if '__symbols__' in dct: if not isinstance(dct['__symbols__'], list): @@ -304,9 +304,8 @@ def setup_named_callbacks(this_cls, cls, dct): callbacks = [] if '__type_callbacks__' in dct: for (typ, callback) in dct['__type_callbacks__']: - (lookupname, attrname, pointer) = this_cls._resolve_type(typ) cb = getattr(cls, callback) - callbacks.append(TypeCallback(lookupname, cb)) + callbacks.append(TypeCallback(typ, cb)) del dct['__type_callbacks__'] if '__symbol_callbacks__' in dct: diff --git a/crash/types/cpu.py b/crash/types/cpu.py index 29fc3d666bd..1a83dee38e2 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -45,7 +45,7 @@ def highest_online_cpu_nr(self) -> None: int: The highest online CPU number """ if not TypesCPUClass.cpus_online : - raise DelayedAttributeError(self.__class__.__name__, 'cpus_online') + raise DelayedAttributeError('cpus_online') return self.cpus_online[-1] @classmethod @@ -74,6 +74,5 @@ def highest_possible_cpu_nr(self) -> int: int: The highest possible CPU number """ if not self.cpus_possible: - raise DelayedAttributeError(self.__class__.__name__, - 'cpus_possible') + raise DelayedAttributeError('cpus_possible') return self.cpus_possible[-1] diff --git a/tests/test_infra_lookup.py b/tests/test_infra_lookup.py index 9c3f50e86d4..a1acb52ecb5 100644 --- a/tests/test_infra_lookup.py +++ b/tests/test_infra_lookup.py @@ -16,7 +16,7 @@ class TestDelayedLookupSetup(unittest.TestCase): def test_resolve_struct_normal(self): spec = 'struct test' - (name, attrname, pointer) = DelayedLookups._resolve_type(spec) + (name, attrname, pointer) = TypeCallback.resolve_type(spec) self.assertTrue(name == 'struct test') self.assertTrue(attrname == 'test_type') self.assertFalse(pointer) @@ -24,7 +24,7 @@ def test_resolve_struct_normal(self): def test_resolve_struct_normal_pointer(self): spec = 'struct test *' - (name, attrname, pointer) = DelayedLookups._resolve_type(spec) + (name, attrname, pointer) = TypeCallback.resolve_type(spec) self.assertTrue(name == 'struct test') self.assertTrue(attrname == 'test_p_type') self.assertTrue(pointer) @@ -32,7 +32,7 @@ def test_resolve_struct_normal_pointer(self): def test_resolve_struct_leading_whitespace(self): spec = ' struct test' - (name, attrname, pointer) = DelayedLookups._resolve_type(spec) + (name, attrname, pointer) = TypeCallback.resolve_type(spec) self.assertTrue(name == 'struct test') self.assertTrue(attrname == 'test_type') self.assertFalse(pointer) @@ -40,7 +40,7 @@ def test_resolve_struct_leading_whitespace(self): def test_resolve_struct_trailing_whitespace(self): spec = 'struct test ' - (name, attrname, pointer) = DelayedLookups._resolve_type(spec) + (name, attrname, pointer) = TypeCallback.resolve_type(spec) self.assertTrue(name == 'struct test') self.assertTrue(attrname == 'test_type') self.assertFalse(pointer) @@ -48,7 +48,7 @@ def test_resolve_struct_trailing_whitespace(self): def test_resolve_struct_middle_whitespace(self): spec = 'struct test' - (name, attrname, pointer) = DelayedLookups._resolve_type(spec) + (name, attrname, pointer) = TypeCallback.resolve_type(spec) self.assertTrue(name == 'struct test') self.assertTrue(attrname == 'test_type') self.assertFalse(pointer) @@ -56,7 +56,7 @@ def test_resolve_struct_middle_whitespace(self): def test_resolve_char(self): spec = 'char' - (name, attrname, pointer) = DelayedLookups._resolve_type(spec) + (name, attrname, pointer) = TypeCallback.resolve_type(spec) self.assertTrue(name == 'char') self.assertTrue(attrname == 'char_type') self.assertFalse(pointer) @@ -64,7 +64,7 @@ def test_resolve_char(self): def test_resolve_char_pointer(self): spec = 'char *' - (name, attrname, pointer) = DelayedLookups._resolve_type(spec) + (name, attrname, pointer) = TypeCallback.resolve_type(spec) self.assertTrue(name == 'char') self.assertTrue(attrname == 'char_p_type') self.assertTrue(pointer) From 8f2d03b918c759ffd0f687f06950c779e79893e6 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 3 May 2019 15:03:37 -0400 Subject: [PATCH 106/367] crash.util.symbols: introduce simplified delayed lookups The CrashBaseClass method of resolving types and symbols automatically is overcomplicated. We have classes in modules that have no use for them other than to access the type and symbol resolution. We then have to export helpers to the module namespace for convenience. It's not obvious to most people why any of this stuff is the way it is, which makes contributing a challenge. This commit introduces a new crash.utils.symbol module that offers a simplified way to do it. We still have the automatic importing into a namespace, but the namespaces are not embedded in a class. Instead, we have a DelayedCollection object that is extended for particular uses. For example, we can convert for_each_super_block to the following: from crash.util.symbols import Types, Symvals types = Types(['struct super_block', 'struct module *']) symvals = Symvals(['super_blocks']) def for_each_super_block(): for sb in for_each_list_entry(symvals.super_blocks, types.super_block_type, s_list) yield sb Unfortunately, moving the attribute creation outside of the metaclass means that it's subject to Python name mangling. This means that symbols prefixed with two or more underscores cannot be accessed via the attribute "dot" interface. The DelayedCollection class supports attribute access via the dict "[name]" interface as well as via a get(name) accessor. Signed-off-by: Jeff Mahoney --- crash/infra/callback.py | 60 +++++-- crash/infra/lookup.py | 244 ++++++++++++++++++++------- crash/util/symbols.py | 331 +++++++++++++++++++++++++++++++++++++ tests/test_util_symbols.py | 241 +++++++++++++++++++++++++++ 4 files changed, 801 insertions(+), 75 deletions(-) create mode 100644 crash/util/symbols.py create mode 100644 tests/test_util_symbols.py diff --git a/crash/infra/callback.py b/crash/infra/callback.py index 08a0accaf8b..88b4b5753f0 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -1,10 +1,14 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Callable, Any, Union + import gdb import traceback import sys +Callback = Callable[[Any], Union[bool, None]] + class CallbackCompleted(RuntimeError): """The callback has already been completed and is no longer valid""" def __init__(self, callback_obj): @@ -22,14 +26,23 @@ class ObjfileEventCallback(object): Derived classes need only implement the complete and check_ready methods. + + Consumers of this interface must also call :meth:`connect_callback` to + connect the object to the callback infrastructure. """ def __init__(self): self.completed = False self.connected = False - self.setup_symbol_cache_flush_callback() + self._setup_symbol_cache_flush_callback() def connect_callback(self): + """ + Connect this callback to the event system. + + Raises: + :obj:`CallbackCompleted`: This callback has already been completed. + """ if self.completed: raise CallbackCompleted(self) @@ -52,6 +65,12 @@ def connect_callback(self): return self.completed def complete(self): + """ + Complete and disconnect this callback from the event system. + + Raises: + :obj:`CallbackCompleted`: This callback has already been completed. + """ if not self.completed: gdb.events.new_objfile.disconnect(self._new_objfile_callback) self.completed = True @@ -59,19 +78,19 @@ def complete(self): else: raise CallbackCompleted(self) - symbol_cache_flush_setup = False + _symbol_cache_flush_setup = False @classmethod - def setup_symbol_cache_flush_callback(cls): - if not cls.symbol_cache_flush_setup: - gdb.events.new_objfile.connect(cls.flush_symbol_cache_callback) - cls.symbol_cache_flush_setup = True + def _setup_symbol_cache_flush_callback(cls): + if not cls._symbol_cache_flush_setup: + gdb.events.new_objfile.connect(cls._flush_symbol_cache_callback) + cls._symbol_cache_flush_setup = True # GDB does this itself, but Python is initialized ahead of the # symtab code. The symtab observer is behind the python observers # in the execution queue so the cache flush executes /after/ us. @classmethod - def flush_symbol_cache_callback(self, event): + def _flush_symbol_cache_callback(self, event): gdb.execute("maint flush-symbol-cache") def _new_objfile_callback(self, event): @@ -86,22 +105,29 @@ def _new_objfile_callback(self, event): if completed is True or completed is None: self.complete() - def check_ready(self): + def check_ready(self) -> Any: """ - check_ready returns the value that will be passed to the callback. - A return value other than None or False will be passed to the - callback. + The method that derived classes implement for detecting when the + conditions required to call the callback have been met. + + Returns: + :obj:`object`: This method can return an arbitrary object. It will + be passed untouched to :meth:`callback` if the result is anything + other than :obj:`None` or :obj:`False`. """ raise NotImplementedError("check_ready must be implemented by derived class.") - def callback(self, result): + def callback(self, result: Any) -> Union[None, bool]: """ - The callback may return None, True, or False. A return value of - None or True indicates that the callback is completed and may - be disconnected. A return value of False indicates that the - callback should stay connected for future use. + The callback that derived classes implement for handling the + sucessful result of :meth:`check_ready`. Args: - result: The result to pass to the callback + result: The result returned from :meth:`check_ready` + + Returns: + :obj:`None` or :obj:`bool`: If :obj:`None` or :obj:`True`, + the callback succeeded and will be completed and removed. + Otherwise, the callback will stay connected for future completion. """ raise NotImplementedError("callback must be implemented by derived class.") diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index f155f818c3d..527e0f3b06f 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -3,61 +3,125 @@ import gdb +from typing import Tuple, Any, Union + import crash.infra from crash.infra.callback import ObjfileEventCallback +from crash.infra.callback import Callback from crash.exceptions import DelayedAttributeError -class MinimalSymbolCallback(ObjfileEventCallback): +class NamedCallback(ObjfileEventCallback): """ - A callback that executes when the named minimal symbol is - discovered in the objfile and returns the gdb.MinimalSymbol. + A base class for Callbacks with names + + This cannot be used directly since it does not provide a + method for :meth:`.ObjfileEventCallback.callback`. + + Args: + name: The name of the symbol or type to be resolved. + callback: A function to call with the result of the derived class's + :meth:`.ObjfileEventCallback.check_ready` method. + attrname (optional): A name safe for use as an attribute name. + If unspecified, defaults to the same string as name. + + Attributes: + name (:obj:`str`): The name of the symbol or type being resolved. + attrname (:obj:`str`): The name of symbol or type being resolved + translated for use as an attribute name. """ - def __init__(self, name, callback, symbol_file=None): + def __init__(self, name: str, callback: Callback, attrname: str=None): + super().__init__() + + self.name = name + self.attrname = self.name + + if attrname is not None: + self.attrname = attrname + + self._callback = callback + + def callback(self, result: Any) -> Union[None, bool]: """ + The callback for handling the sucessful result of :meth:`check_ready`. + + It indirectly calls the callback specified in the constructor. + Args: - name (str): The name of the minimal symbol to discover - callback (method): The callback to execute when the minimal - symbol is discovered - symbol_file (str, optional, default=None): Name of symbol file + result: The result returned from :meth:`check_ready` + + Returns: + :obj:`None` or :obj:`bool`: If :obj:`None` or :obj:`True`, + the callback succeeded and will be completed and removed. + Otherwise, the callback will stay connected for future completion. """ - super().__init__() + return self._callback(result) + +class MinimalSymbolCallback(NamedCallback): + """ + A callback that executes when the named minimal symbol is + discovered in the objfile and returns the :obj:`gdb.MinSymbol`. + + The callback must accept a :obj:`gdb.MinSymbol` and return + :obj:`bool` or :obj:`None`. + + Args: + name: The name of the minimal symbol to discover + callback: The callback to execute when the minimal symbol is discovered + symbol_file (optional): Name of the symbol file to use + """ + def __init__(self, name: str, callback: Callback, symbol_file: str=None): + super().__init__(name, callback) - self.name = name self.symbol_file = symbol_file - self.callback = callback self.connect_callback() - def check_ready(self): + def check_ready(self) -> gdb.MinSymbol: + """ + Returns the result of looking up the minimal symbol when a new + object file is loaded. + + Returns: + :obj:`gdb.MinSymbol`: The requested minimal symbol + """ return gdb.lookup_minimal_symbol(self.name, self.symbol_file, None) + def __str__(self): return ("<{}({}, {}, {})>" .format(self.__class__.__name__, self.name, self.symbol_file, self.callback)) -class SymbolCallback(ObjfileEventCallback): +class SymbolCallback(NamedCallback): """ A callback that executes when the named symbol is discovered in the - objfile and returns the gdb.Symbol. + objfile and returns the :obj:`gdb.Symbol`. + + The callback must accept a :obj:`gdb.Symbol` and return :obj:`bool` + or :obj:`None`. + + Args: + name: The name of the symbol to discover + callback: The callback to execute when the symbol is discovered + domain (optional): The domain to search for the symbol. The value + is assumed to be one of the value associated with :obj:`gdb.Symbol` + constant, i.e. SYMBOL_*_DOMAIN. """ - def __init__(self, name, callback, domain=gdb.SYMBOL_VAR_DOMAIN): - """ - Args: - name (str): The name of the symbol to discover - callbacks (method): The callback to execute when the minimal - symbol is discover - domain (gdb.Symbol constant, i.e. SYMBOL_*_DOMAIN): The domain - to search for the symbol - """ - super().__init__() + def __init__(self, name: str, callback: Callback, + domain: int=gdb.SYMBOL_VAR_DOMAIN): + super().__init__(name, callback) - self.name = name self.domain = domain - self.callback = callback self.connect_callback() - def check_ready(self): + def check_ready(self) -> gdb.Symbol: + """ + Returns the result of looking up the symbol when a new object + file is loaded. + + Returns: + :obj:`gdb.Symbol`: The requested symbol + """ return gdb.lookup_symbol(self.name, None, self.domain)[0] def __str__(self): @@ -67,9 +131,22 @@ def __str__(self): class SymvalCallback(SymbolCallback): """ A callback that executes when the named symbol is discovered in the - objfile and returns the gdb.Value associated with it. + objfile and returns the :obj:`gdb.Value` associated with the + :obj:`gdb.Symbol`. + + The callback must accept a :obj:`gdb.Value` and return :obj:`bool` + or :obj:`None`. + + See :obj:`SymbolCallback` for arguments. """ - def check_ready(self): + def check_ready(self) -> gdb.Value: + """ + After successfully looking up the :obj:`gdb.Symbol`, returns + the :obj:`gdb.Value` associated with it. + + Returns: + :obj:`gdb.Value`: The value associated with the requested symbol + """ sym = super().check_ready() if sym is not None: try: @@ -78,23 +155,65 @@ def check_ready(self): pass return None -class TypeCallback(ObjfileEventCallback): +class TypeCallback(NamedCallback): """ A callback that executes when the named type is discovered in the - objfile and returns the gdb.Type associated with it. + objfile and returns the :obj:`gdb.Type` associated with it. + + The callback must accept a :obj:`gdb.Type` and return :obj:`bool` + or :obj:`None`. + + Args: + name: The name of the type to discover + callback: The callback to execute when the type is discovered + block (optional): The :obj:`gdb.Block` to search for the symbol + """ - def __init__(self, name, callback, block=None): - super().__init__() + def __init__(self, name: str, callback: Callback, block: gdb.Block=None): + (name, attrname, self.pointer) = self.resolve_type(name) - (self.name, self.attrname, self.pointer) = self.resolve_type(name) + super().__init__(name, callback, attrname) self.block = block - self.callback = callback self.connect_callback() @staticmethod - def resolve_type(name): + def resolve_type(name: str) -> Tuple[str, str, bool]: + """ + This function takes a C type name and translates it into a 3-tuple + that contains the basic type name, the type name translated to + a form suitable for an attribute name, and whether the type + corresponds to a pointer. + + The basic type name has all leading and trailing whitespace stripped, + and any ``*`` removed. + + The attribute type name takes that base, removes the leading + ``struct`` for structure types, removes any leading or trailing + whitespace, replaces internal spaces with underscores, and appends + a ``_type`` or ``_p_type`` suffix, depending on whether the type + is a pointer type. + + Some examples: + + - ``struct foo`` → ``foo_type`` + - ``struct foo *`` → ``foo_p_type`` + - ``unsigned long`` → ``unsigned_long_type`` + + *Notes*: + - Multiple levels of pointers are not handled properly. In + practice this means that ``struct foo *`` and + ``struct foo **`` can't be used simultaneously. This is + typically not a problem. + - Unions are not handled as a special case as structs are. A + union type would use an attribute name of ``union_foo_type``. + + Returns: + (:obj:`str`, :obj:`str`, :obj:`bool`): A 3-tuple consisting of + the basic type name, the name formatted for use as an attribute + name, and whether the type is a pointer type. + """ pointer = False name = name.strip() if name[-1] == '*': @@ -130,8 +249,11 @@ class DelayedValue(object): A generic class for making class attributes available that describe to-be-loaded symbols, minimal symbols, and types. """ - def __init__(self, name): + def __init__(self, name, attrname=None): self.name = name + self.attrname = attrname + if self.attrname is None: + self.attrname = name self.value = None def get(self): @@ -147,46 +269,45 @@ def callback(self, value): class DelayedMinimalSymbol(DelayedValue): """ A DelayedValue that handles minimal symbols. + + Args: + name: The name of the minimal symbol """ - def __init__(self, name): - """ - Args: - name (str): The name of the minimal symbol - """ + def __init__(self, name: str): super().__init__(name) self.cb = MinimalSymbolCallback(name, self.callback) + def __str__(self): return "{} attached with {}".format(self.__class__, str(self.cb)) class DelayedSymbol(DelayedValue): """ A DelayedValue that handles symbols. + + Args: + name: The name of the symbol """ - def __init__(self, name): - """ - Args: - name (str): The name of the symbol - """ + def __init__(self, name: str): super().__init__(name) self.cb = SymbolCallback(name, self.callback) + def __str__(self): return "{} attached with {}".format(self.__class__, str(self.cb)) class DelayedType(DelayedValue): """ A DelayedValue for types. + + Args: + name: The name of the type. """ - def __init__(self, name): - """ - Args: - name (str): The name of the type. - """ + def __init__(self, name: str): (name, attrname, self.pointer) = TypeCallback.resolve_type(name) - super().__init__(name) + super().__init__(name, attrname) self.cb = TypeCallback(name, self.callback) def __str__(self): - return "{} attached with {}".format(self.__class__, str(self.cb)) + return "{} attached with {}".format(self.__class__, str(self.callback)) def callback(self, value): if self.pointer: @@ -195,9 +316,13 @@ def callback(self, value): class DelayedSymval(DelayedSymbol): """ - A DelayedSymbol that returns the gdb.Value associated with the symbol. + A :obj:`DelayedSymbol` that returns the :obj:`gdb.Value` + associated with the symbol. + + Args: + name: The name of the symbol. """ - def callback(self, value): + def callback(self, value: gdb.Symbol) -> None: symval = value.value() if symval.type.code == gdb.TYPE_CODE_FUNC: symval = symval.address @@ -209,9 +334,12 @@ def __str__(self): class DelayedMinimalSymval(DelayedMinimalSymbol): """ A DelayedMinimalSymbol that returns the address of the - minimal symbol as a int. + minimal symbol as an :obj:`int`. + + Args: + name: The name of the minimal symbol. """ - def callback(self, value): + def callback(self, value: gdb.MinSymbol) -> None: self.value = int(value.value().address) def __str__(self): diff --git a/crash/util/symbols.py b/crash/util/symbols.py new file mode 100644 index 00000000000..580294b8b30 --- /dev/null +++ b/crash/util/symbols.py @@ -0,0 +1,331 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +The crash.util.symbols module provides a mechanism to simply discover +and resolve symbols, types, minimal symbols, and values. + +A typical use is declaring a DelayedCollection at the top of a module and +using the DelayedCollection within the classes and functions that are +a part of the module. + +Each of the collections defined here are instantiated using a list of +names that each collection type will resolve into a type, a symbol, a minimal +symbol, etc. The names will by available as dictionary keys and also as +attribute names. In the latter case, the names will be resolved into +a form usable as an attribute name. See :class:`.Types` for more information. +""" + +from typing import Type, List, Tuple, Callable, Union, Dict, Any + +import gdb + +from crash.infra.lookup import DelayedType, DelayedSymbol, DelayedSymval +from crash.infra.lookup import DelayedValue, DelayedMinimalSymbol +from crash.infra.lookup import DelayedMinimalSymval +from crash.infra.lookup import NamedCallback, TypeCallback +from crash.infra.lookup import SymbolCallback, MinimalSymbolCallback +from crash.exceptions import DelayedAttributeError + +CollectedValue = Union[gdb.Type, gdb.Value, gdb.Symbol, gdb.MinSymbol, Any] +Names = Union[List[str], str] + +class DelayedCollection(object): + """ + A generic container for delayed lookups. + + In addition to the :meth:`get` method, the names are also accessible + via attribute names (``__getattr__``) or dictionary keys (``__getitem__``). + + Args: + cls: The type of :obj:`.DelayedValue` to be collected + names: The names of all the symbols to be collected + + Attributes: + attrs (:obj:`dict`): A dictionary that maps the attribute names + to the :obj:`.DelayedValue` object associated with each one. + While the ``__getattr__`` and ``__getitem__`` methods will + return the contained object. This dictionary will contain + the container object *or* the contained object if it has + been overridden via :meth:`override`. + """ + def __init__(self, cls: Type[DelayedValue], names: Names): + self.attrs: Dict[str, DelayedValue] = {} + + if isinstance(names, str): + names = [ names ] + + for name in names: + t = cls(name) + self.attrs[t.attrname] = t + self.attrs[t.name] = t + + def get(self, name: str) -> CollectedValue: + """ + Obtain the object associated with name + + Args: + name: The attribute name associated with the :obj:`.DelayedValue` + + Returns: + :obj:`object`: The underlying object associated with this name. + + Raises: + :obj:`NameError`: The name does not exist. + :obj:`.DelayedAttributeError`: The name exists but the value + has not been resolved yet. + """ + if name not in self.attrs: + raise NameError(f"'{self.__class__}' object has no '{name}'") + + if self.attrs[name].value is not None: + setattr(self, name, self.attrs[name].value) + return self.attrs[name].value + + raise DelayedAttributeError(name) + + def override(self, name: str, value: CollectedValue) -> None: + """ + Override the :obj:`.DelayedValue` stored in the collection + + At times it may be required to override the value kept in the + collection. + """ + if not name in self.attrs: + raise RuntimeError(f"{name} is not part of this collection") + + self.attrs[name].value = value + + def __getitem__(self, name): + try: + return self.get(name) + except NameError as e: + raise KeyError(str(e)) + + def __getattr__(self, name): + try: + return self.get(name) + except NameError as e: + raise AttributeError(str(e)) + +class Types(DelayedCollection): + """ + A container to resolve :obj:`gdb.Type` objects from the symbol table + as they become available. + + Example: + + .. code-block:: pycon + + >>> from crash.util.symbols import Types + >>> types = Types(["struct foo", "struct foo *"]) + >>> ex1 = types.foo_type + >>> ex2 = types.foo_p_type + >>> ex3 = types['foo_type'] + >>> ex4 = types['struct foo'] + + See :meth:`~crash.infra.lookup.TypeCallback.resolve_type` for details. + + Args: + names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names + of the types to resolve. + """ + def __init__(self, names: Names): + super(Types, self).__init__(DelayedType, names) + + def override(self, name: str, value: gdb.Type) -> None: + """ + Override the type value, resolving the type name first. + + The *real* type name is used, not the attribute name. + + .. code-block: pycon + + >>> t = gdb.lookup_type('struct foo') + >>> types.override('struct foo', t) + """ + (name, attrname, pointer) = TypeCallback.resolve_type(name) + + super().override(name, value) + super().override(attrname, value) + +class Symbols(DelayedCollection): + """ + A container to resolve :obj:`gdb.Symbol` objects from the symbol table + as they become available. + + Example: + + .. code-block:: pycon + + >>> from crash.util.symbols import Symvals + >>> symbols = Symbols(["modules", "super_blocks"]) + >>> print(symbols.modules) + modules + >>> print(symbols['modules']) + modules + >>> print(symbols.modules.type) + + + Args: + names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names + of the symbols to resolve. + """ + def __init__(self, names): + super(Symbols, self).__init__(DelayedSymbol, names) + +class Symvals(DelayedCollection): + """ + A container to resolve :obj:`gdb.Symbol` objects from the symbol table + as they become available and use the associated values as the stored + object. + + Example: + + .. code-block:: pycon + + >>> from crash.util.symbols import Symvals + >>> symvals = Symvals(["modules", "super_blocks"]) + >>> print(symvals.modules) + { + next = 0xffffffffc0675208 <__this_module+8>, + prev = 0xffffffffc00e8b48 <__this_module+8> + } + >>> print(symvals.modules.address) + 0xffffffffab0ff030 + >>> print(symvals['modules']) + { + next = 0xffffffffc0675208 <__this_module+8>, + prev = 0xffffffffc00e8b48 <__this_module+8> + } + >>> print(symvals.modules.type) + + + Args: + names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names + of the symbols to resolve. + """ + def __init__(self, names): + super(Symvals, self).__init__(DelayedSymval, names) + +class MinimalSymbols(DelayedCollection): + """ + A container to resolve :obj:`gdb.MinSymbol` objects from the symbol table + as they become available. Minimal symbols don't have any type information + associated with them so they are mostly used to resolve names to + addresses. + + Example: + + .. code-block:: pycon + + >>> import gdb + >>> from crash.util.symbols import MinimalSymbols + >>> msymbols = MinimalSymbols(['modules', 'super_block']) + >>> print(msymbols.modules.type) + 11 + >>> print(gdb.MINSYMBOL_TYPE_FILE_BSS) + 11 + >>> print(msymbols['modules']) + modules + >>> print(msymbols['modules'].value()) + + >>> print(msymbols['modules'].value().address) + 0xffffffff820ff030 + >>> print(type(msymbols['modules'])) + + + Args: + names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names + of the minimal symbols to resolve. + """ + def __init__(self, names): + super(MinimalSymbols, self).__init__(DelayedMinimalSymbol, names) + +class MinimalSymvals(DelayedCollection): + """ + A container to resolve :obj:`gdb.MinSymbol` objects from the symbol table + as they become available and uses the address of the values associated + with them as the stored object. Minimal symbols don't have any type + information associated with them so they are mostly used to resolve + names to addresses. + + Example: + + .. code-block:: pycon + + >>> import gdb + from crash.util.symbols import MinimalSymvals + >>> msymvals = MinimalSymvals(['modules', 'super_block']) + >>> print(f"{msymvals.modules:#x}") + 0xffffffff820ff030 + >>> print(f"{msymvals['modules']:#x}") + 0xffffffff820ff030 + >>> print(type(msymvals['modules'])) + + + Args: + names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names + of the minimal symbols to resolve. + """ + def __init__(self, names): + super(MinimalSymvals, self).__init__(DelayedMinimalSymval, names) + +class DelayedValues(DelayedCollection): + """ + A container to keep generic :class:`.DelayedValue` objects. + + These will raise :obj:`.DelayedAttributeError` until + :meth:`.DelayedValue.callback` is called with a value to populate it. + + The callback must be accessed via :attr:`.DelayedCollection.attrs` or the + :obj:`.DelayedValue` object will be evaluated first, also raising + :obj:`.DelayedAttributeError`. + + Example: + + .. code-block:: pycon + + >>> from crash.util.symbols import DelayedValues + >>> dvals = DelayedValues(['generic_value', 'another_value']) + >>> dvals.attrs['generic_value'].callback(True) + >>> print(dvals.generic_value) + True + >>> print(dvals.another_value) + Traceback (most recent call last): + File "", line 4, in + File "./build/lib/crash/util/symbols.py", line 107, in __getattr__ + return self.get(name) + File "./build/lib/crash/util/symbols.py", line 85, in get + raise DelayedAttributeError(name) + crash.exceptions.DelayedAttributeError: Delayed attribute another_value has not been completed. + + Args: + names: The names to use for the :obj:`.DelayedValue` objects. + """ + def __init__(self, names: Names): + super(DelayedValues, self).__init__(DelayedValue, names) + +CallbackSpecifier = Tuple[str, Callable] +CallbackSpecifiers = Union[List[CallbackSpecifier], CallbackSpecifier] + +class CallbackCollection(object): + def __init__(self, cls: Type[NamedCallback], cbs: CallbackSpecifiers): + if isinstance(cbs, tuple): + cbs = [ cbs ] + + for cb in cbs: + t = cls(cb[0], cb[1]) + setattr(self, t.attrname, t) + +class TypeCallbacks(CallbackCollection): + def __init__(self, cbs): + super().__init__(TypeCallback, cbs) + +class SymbolCallbacks(CallbackCollection): + def __init__(self, cbs): + super().__init__(SymbolCallback, cbs) + +class MinimalSymbolCallbacks(CallbackCollection): + def __init__(self, cbs): + super().__init__(MinimalSymbolCallback, cbs) + diff --git a/tests/test_util_symbols.py b/tests/test_util_symbols.py new file mode 100644 index 00000000000..3bb43e9f372 --- /dev/null +++ b/tests/test_util_symbols.py @@ -0,0 +1,241 @@ +# -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import unittest +import gdb + +from crash.exceptions import DelayedAttributeError + +from crash.util.symbols import MinimalSymbols, Symbols, Symvals, Types +from crash.util.symbols import TypeCallbacks, SymbolCallbacks +from crash.util.symbols import MinimalSymbolCallbacks + +class TestDelayedContainers(unittest.TestCase): + def setUp(self): + gdb.execute("file") + + def load_file(self): + gdb.execute("file tests/test-util") + + def msymbol_test(self): + class Test(object): + msymbols = MinimalSymbols([ 'test_struct' ]) + return Test + + def test_bad_msymbol_name(self): + test = self.msymbol_test() + x = test.msymbols + with self.assertRaises(AttributeError): + y = x.bad_symbol_name + + def test_msymbol_unavailable_at_start(self): + test = self.msymbol_test() + x = test().msymbols + with self.assertRaises(DelayedAttributeError): + y = x.test_struct + + def test_msymbol_available_on_load(self): + test = self.msymbol_test() + x = test().msymbols + with self.assertRaises(DelayedAttributeError): + y = x.test_struct + self.load_file() + self.assertTrue(isinstance(x.test_struct, gdb.MinSymbol)) + + def test_msymbol_available_at_start(self): + test = self.msymbol_test() + x = test().msymbols + self.load_file() + + self.assertTrue(isinstance(x.test_struct, gdb.MinSymbol)) + + def symbol_test(self): + class Test(object): + symbols = Symbols([ 'test_struct' ]) + return Test + + def test_bad_symbol_name(self): + test = self.symbol_test() + x = test.symbols + with self.assertRaises(AttributeError): + y = x.bad_symbol_name + + def test_symbol_unavailable_at_start(self): + test = self.symbol_test() + x = test().symbols + with self.assertRaises(DelayedAttributeError): + y = x.test_struct + + def test_symbol_available_on_load(self): + test = self.symbol_test() + x = test().symbols + with self.assertRaises(DelayedAttributeError): + y = x.test_struct + self.load_file() + self.assertTrue(isinstance(x.test_struct, gdb.Symbol)) + + def test_symbol_available_at_start(self): + test = self.symbol_test() + self.load_file() + + x = test().symbols + self.assertTrue(isinstance(x.test_struct, gdb.Symbol)) + + def symval_test(self): + class Test(object): + symvals = Symvals( [ 'test_struct' ] ) + return Test + + def test_bad_symval_name(self): + test = self.symval_test() + x = test.symvals + with self.assertRaises(AttributeError): + y = x.bad_symval_name + + def test_symval_unavailable_at_start(self): + test = self.symval_test() + x = test().symvals + with self.assertRaises(DelayedAttributeError): + y = x.test_struct + + def test_symval_available_on_load(self): + test = self.symval_test() + x = test().symvals + with self.assertRaises(DelayedAttributeError): + y = x.test_struct + self.load_file() + self.assertTrue(isinstance(x.test_struct, gdb.Value)) + + def test_symval_available_at_start(self): + test = self.symval_test() + self.load_file() + + x = test().symvals + self.assertTrue(isinstance(x.test_struct, gdb.Value)) + + def type_test(self): + class Test(object): + types = Types( [ 'struct test' ] ) + return Test + + def test_bad_type_name(self): + test = self.type_test() + x = test.types + with self.assertRaises(AttributeError): + y = x.bad_type_name + + def test_type_unavailable_at_start(self): + test = self.type_test() + x = test().types + with self.assertRaises(DelayedAttributeError): + y = x.test_type + + def test_type_available_on_load(self): + test = self.type_test() + x = test().types + with self.assertRaises(DelayedAttributeError): + y = x.test_type + self.load_file() + y = x.test_type + self.assertTrue(isinstance(y, gdb.Type)) + + def test_type_available_at_start(self): + test = self.type_test() + self.load_file() + + x = test().types + y = x.test_type + self.assertTrue(isinstance(y, gdb.Type)) + + def ptype_test(self): + class Test(object): + types = Types( [ 'struct test *' ]) + return Test + + def test_bad_ptype_name(self): + test = self.ptype_test() + x = test.types + with self.assertRaises(AttributeError): + y = x.bad_ptype_name + + def test_p_type_unavailable_at_start(self): + test = self.ptype_test() + x = test().types + with self.assertRaises(DelayedAttributeError): + y = x.test_p_type + + def test_p_type_available_on_load(self): + test = self.ptype_test() + x = test().types + with self.assertRaises(DelayedAttributeError): + y = x.test_p_type + self.load_file() + y = x.test_p_type + self.assertTrue(isinstance(y, gdb.Type)) + + def test_p_type_available_at_start(self): + test = self.ptype_test() + self.load_file() + + x = test().types + y = x.test_p_type + self.assertTrue(isinstance(y, gdb.Type)) + + def type_callback_test(self): + class Test(object): + class nested(object): + ulong_valid = False + + @classmethod + def check_ulong(cls, gdbtype): + cls.ulong_valid = True + + type_cbs = TypeCallbacks( [ ('unsigned long', + nested.check_ulong) ] ) + return Test + + def test_type_callback_nofile(self): + test = self.type_callback_test() + x = test().nested + self.assertFalse(x.ulong_valid) + with self.assertRaises(AttributeError): + y = x.unsigned_long_type + + def test_type_callback(self): + test = self.type_callback_test() + x = test().nested + self.load_file() + self.assertTrue(x.ulong_valid) + with self.assertRaises(AttributeError): + y = x.unsigned_long_type + + def type_callback_test_multi(self): + class Test(object): + class nested(object): + types = Types( [ 'unsigned long' ] ) + + ulong_valid = False + + @classmethod + def check_ulong(cls, gdbtype): + cls.ulong_valid = True + + type_cbs = TypeCallbacks( [ ('unsigned long', + nested.check_ulong) ] ) + + return Test + + def test_type_callback_nofile_multi(self): + test = self.type_callback_test_multi() + x = test().nested + self.assertFalse(x.ulong_valid) + with self.assertRaises(DelayedAttributeError): + y = x.types.unsigned_long_type + + def test_type_callback_multi(self): + test = self.type_callback_test_multi() + x = test().nested + self.load_file() + self.assertTrue(x.ulong_valid) + y = x.types.unsigned_long_type + self.assertTrue(isinstance(y, gdb.Type)) + self.assertTrue(y.sizeof > 4) From c1467a7157595b2f3d8aab02935dc4590929379d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 26 Apr 2019 21:36:51 +0200 Subject: [PATCH 107/367] crash.subsystem.filesystem: add super_flags super_flags returns a human-readable string describing the flags in super_block->s_flags. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/__init__.py | 81 +++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index 410cdd02ea2..fbba96ec0e0 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -2,12 +2,73 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb -from crash.util import container_of +from crash.util import container_of, decode_flags from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each_entry from crash.subsystem.storage import block_device_name from crash.subsystem.storage import Storage as block +MS_RDONLY = 1 +MS_NOSUID = 2 +MS_NODEV = 4 +MS_NOEXEC = 8 +MS_SYNCHRONOUS = 16 +MS_REMOUNT = 32 +MS_MANDLOCK = 64 +MS_DIRSYNC = 128 +MS_NOATIME = 1024 +MS_NODIRATIME = 2048 +MS_BIND = 4096 +MS_MOVE = 8192 +MS_REC = 16384 +MS_VERBOSE = 32768 +MS_SILENT = 32768 +MS_POSIXACL = (1<<16) +MS_UNBINDABLE = (1<<17) +MS_PRIVATE = (1<<18) +MS_SLAVE = (1<<19) +MS_SHARED = (1<<20) +MS_RELATIME = (1<<21) +MS_KERNMOUNT = (1<<22) +MS_I_VERSION = (1<<23) +MS_STRICTATIME = (1<<24) +MS_LAZYTIME = (1<<25) +MS_NOSEC = (1<<28) +MS_BORN = (1<<29) +MS_ACTIVE = (1<<30) +MS_NOUSER = (1<<31) + +SB_FLAGS = { + MS_RDONLY : "MS_RDONLY", + MS_NOSUID : "MS_NOSUID", + MS_NODEV : "MS_NODEV", + MS_NOEXEC : "MS_NOEXEC", + MS_SYNCHRONOUS : "MS_SYNCHRONOUS", + MS_REMOUNT : "MS_REMOUNT", + MS_MANDLOCK : "MS_MANDLOCK", + MS_DIRSYNC : "MS_DIRSYNC", + MS_NOATIME : "MS_NOATIME", + MS_NODIRATIME : "MS_NODIRATIME", + MS_BIND : "MS_BIND", + MS_MOVE : "MS_MOVE", + MS_REC : "MS_REC", + MS_SILENT : "MS_SILENT", + MS_POSIXACL : "MS_POSIXACL", + MS_UNBINDABLE : "MS_UNBINDABLE", + MS_PRIVATE : "MS_PRIVATE", + MS_SLAVE : "MS_SLAVE", + MS_SHARED : "MS_SHARED", + MS_RELATIME : "MS_RELATIME", + MS_KERNMOUNT : "MS_KERNMOUNT", + MS_I_VERSION : "MS_I_VERSION", + MS_STRICTATIME : "MS_STRICTATIME", + MS_LAZYTIME : "MS_LAZYTIME", + MS_NOSEC : "MS_NOSEC", + MS_BORN : "MS_BORN", + MS_ACTIVE : "MS_ACTIVE", + MS_NOUSER : "MS_NOUSER", +} + class FileSystem(CrashBaseClass): __types__ = [ 'struct dio *', 'struct buffer_head *' ] @@ -37,7 +98,7 @@ def _register_end_bio_bh_io_sync(cls, sym): @export @staticmethod - def super_fstype(sb): + def super_fstype(sb: gdb.Value) -> str: """ Returns the file system type's name for a given superblock. @@ -50,6 +111,22 @@ def super_fstype(sb): """ return sb['s_type']['name'].string() + @export + @staticmethod + def super_flags(sb: gdb.Value) -> str: + """ + Returns the flags associated with the given superblock. + + Args: + sb (gdb.Value): The struct super_block for + which to return the flags. + + Returns: + str: The flags field in human-readable form. + + """ + return decode_flags(sb['s_flags'], SB_FLAGS) + @export @classmethod def register_buffer_head_decoder(cls, sym, decoder): From d84d8c8d9c09f008637b5ae79757902d1af9cf11 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 18 Sep 2018 05:28:41 -0400 Subject: [PATCH 108/367] crash.subsystem.filesystem: add for_each_super_block, get_super_block Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/__init__.py | 54 +++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index fbba96ec0e0..1ba7151c5cd 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -1,13 +1,17 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Iterable, Union + import gdb -from crash.util import container_of, decode_flags +from crash.util import container_of, get_typed_pointer, decode_flags from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each_entry from crash.subsystem.storage import block_device_name from crash.subsystem.storage import Storage as block +AddressSpecifier = Union[int, str, gdb.Value] + MS_RDONLY = 1 MS_NOSUID = 2 MS_NODEV = 4 @@ -71,7 +75,9 @@ class FileSystem(CrashBaseClass): __types__ = [ 'struct dio *', - 'struct buffer_head *' ] + 'struct buffer_head *', + 'struct super_block' ] + __symvals__ = [ 'super_blocks' ] __symbol_callbacks__ = [ ('dio_bio_end_io', '_register_dio_bio_end'), ('dio_bio_end_aio', '_register_dio_bio_end'), @@ -320,4 +326,48 @@ def decode_end_buffer_write_sync(cls, bh): } return chain + @export + @classmethod + def for_each_super_block(cls) -> Iterable[gdb.Value]: + """ + Iterate over the list of super blocks and yield each one. + + Args: + None + + Yields: + gdb.Value + """ + for sb in list_for_each_entry(cls.super_blocks, cls.super_block_type, + 's_list'): + yield sb + + @export + @classmethod + def get_super_block(cls, desc: AddressSpecifier, + force: bool=False) -> gdb.Value: + """ + Given an address description return a gdb.Value that contains + a struct super_block at that address. + + Args: + desc (gdb.Value, str, or int): The address for which to provide + a casted pointer + force (bool): Skip testing whether the value is available. + + Returns: + gdb.Value: The super_block at the requested + location + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + sb = get_typed_pointer(desc, cls.super_block_type).dereference() + if not force: + try: + x = int(sb['s_dev']) + except gdb.NotAvailableError: + raise gdb.NotAvailableError(f"no superblock available at `{desc}'") + return sb + inst = FileSystem() From 76bb354bc7d4da0d787a0c6e1dd44681de2fb8a5 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 19 Sep 2018 10:49:40 +0200 Subject: [PATCH 109/367] crash.subsystem.filesystem.mount: use type from symbol when available Internally, gdb treats the type loaded from a typed symbol and a type symbol differently and wants to do the full type comparison dance. If we use the typed symbol directly, we can use a pointer comparison. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/mount.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 9422dc0bc36..b9578985f6f 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -62,6 +62,11 @@ def for_each_mount_nsproxy(self, task): def real_mount(cls, vfsmnt): if (vfsmnt.type == cls.mount_type or vfsmnt.type == cls.mount_type.pointer()): + t = vfsmnt.type + if t.code == gdb.TYPE_CODE_PTR: + t = t.target() + if t is not cls.mount_type: + cls.mount_type = t return vfsmnt return container_of(vfsmnt, cls.mount_type, 'mnt') From 4eb1231acade950131d58f5bdd08d52501a97c2f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 3 May 2019 21:07:17 -0400 Subject: [PATCH 110/367] crash.subsystem.filesystem.mount: use crash.util.decode_flags Now that we have crash.util.decode_flags, we can use that instead of open-coding it in mount_flags. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/mount.py | 63 +++++++++++------------------ 1 file changed, 24 insertions(+), 39 deletions(-) diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index b9578985f6f..2407dceb1dd 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -6,7 +6,7 @@ from crash.infra import CrashBaseClass, export from crash.subsystem.filesystem import super_fstype from crash.types.list import list_for_each_entry -from crash.util import container_of +from crash.util import container_of, decode_flags, struct_has_member MNT_NOSUID = 0x01 MNT_NODEV = 0x02 @@ -20,6 +20,25 @@ MNT_SHARED = 0x1000 MNT_UNBINDABLE = 0x2000 +MNT_FLAGS = { + MNT_NOSUID : "MNT_NOSUID", + MNT_NODEV : "MNT_NODEV", + MNT_NOEXEC : "MNT_NOEXEC", + MNT_NOATIME : "MNT_NOATIME", + MNT_NODIRATIME : "MNT_NODIRATIME", + MNT_RELATIME : "MNT_RELATIME", + MNT_READONLY : "MNT_READONLY", +} + +MNT_FLAGS_HIDDEN = { + MNT_SHRINKABLE : "[MNT_SHRINKABLE]", + MNT_WRITE_HOLD : "[MNT_WRITE_HOLD]", + MNT_SHARED : "[MNT_SHARED]", + MNT_UNBINDABLE : "[MNT_UNBINDABLE]", +} +MNT_FLAGS_HIDDEN.update(MNT_FLAGS) + + class Mount(CrashBaseClass): __types__ = [ 'struct mount', 'struct vfsmount' ] __symvals__ = [ 'init_task' ] @@ -73,45 +92,11 @@ def real_mount(cls, vfsmnt): @export @classmethod def mount_flags(cls, mnt, show_hidden=False): - flags = int(mnt['mnt_flags']) - - if flags & MNT_READONLY: - flagstr = "ro" - else: - flagstr = "rw" - - if flags & MNT_NOSUID: - flagstr += ",nosuid" - - if flags & MNT_NODEV: - flagstr += ",nodev" - - if flags & MNT_NOEXEC: - flagstr += ",noexec" - - if flags & MNT_NOATIME: - flagstr += ",noatime" - - if flags & MNT_NODIRATIME: - flagstr += ",nodiratime" - - if flags & MNT_RELATIME: - flagstr += ",relatime" - + if struct_has_member(mnt, 'mnt'): + mnt = mnt['mnt'] if show_hidden: - if flags & MNT_SHRINKABLE: - flagstr += ",[MNT_SHRINKABLE]" - - if flags & MNT_WRITE_HOLD: - flagstr += ",[MNT_WRITE_HOLD]" - - if flags & MNT_SHARED: - flagstr += ",[MNT_SHARED]" - - if flags & MNT_UNBINDABLE: - flagstr += ",[MNT_UNBINDABLE]" - - return flagstr + return decode_flags(mnt['mnt_flags'], MNT_FLAGS_HIDDEN, ",") + return decode_flags(mnt['mnt_flags'], MNT_FLAGS, ",") @export @staticmethod From fade53887f9c9e2d4d2ab566c4fabc0b0b840c16 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 30 Apr 2019 22:34:45 -0400 Subject: [PATCH 111/367] crash.subsystem.filesystem: include sector number when describing dio bio Knowing a particular bio is dio isn't entirely helpful when there are multiples of them. This includes the sector number as well. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index 1ba7151c5cd..de92f6f0d65 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -193,8 +193,9 @@ def decode_dio_bio(cls, bio): offset = dio['block_in_file'] << dio['blkbits'] chain = { - 'description' : "{:x} bio: Direct I/O for {} inode {} on {}".format( - int(bio), fstype, dio['inode']['i_ino'], dev), + 'description' : "{:x} bio: Direct I/O for {} inode {}, sector {} on {}".format( + int(bio), fstype, dio['inode']['i_ino'], + bio['bi_sector'], dev), 'bio' : bio, 'dio' : dio, 'fstype' : fstype, From ff9679a17ca989027f1e1d78be0877d4e82c1260 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 3 May 2019 15:15:09 -0400 Subject: [PATCH 112/367] crash.subsystem.{filesystem,storage}: refactor bio decoders The current bio decoders are essentially open coded everywhere. This commit formalizes the interface to make it easier to extend. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/__init__.py | 218 ----------------- crash/subsystem/filesystem/decoders.py | 138 +++++++++++ crash/subsystem/filesystem/ext3.py | 80 +++--- crash/subsystem/storage/__init__.py | 89 +------ crash/subsystem/storage/decoders.py | 296 +++++++++++++++++++++++ crash/subsystem/storage/device_mapper.py | 192 +++++++-------- 6 files changed, 559 insertions(+), 454 deletions(-) create mode 100644 crash/subsystem/filesystem/decoders.py create mode 100644 crash/subsystem/storage/decoders.py diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index de92f6f0d65..32b74dec738 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -78,30 +78,6 @@ class FileSystem(CrashBaseClass): 'struct buffer_head *', 'struct super_block' ] __symvals__ = [ 'super_blocks' ] - __symbol_callbacks__ = [ - ('dio_bio_end_io', '_register_dio_bio_end'), - ('dio_bio_end_aio', '_register_dio_bio_end'), - ('mpage_end_io', '_register_mpage_end_io'), - ('end_bio_bh_io_sync', '_register_end_bio_bh_io_sync') ] - - buffer_head_decoders = {} - - @classmethod - def _register_dio_bio(cls, symval): - block.register_bio_decoder(cls.dio_bio_end, cls.decode_dio_bio) - - @classmethod - def _register_dio_bio_end(cls, sym): - block.register_bio_decoder(sym, cls.decode_dio_bio) - - @classmethod - def _register_mpage_end_io(cls, sym): - block.register_bio_decoder(sym, cls.decode_mpage) - - @classmethod - def _register_end_bio_bh_io_sync(cls, sym): - block.register_bio_decoder(sym, cls.decode_bio_buffer_head) - @export @staticmethod def super_fstype(sb: gdb.Value) -> str: @@ -133,200 +109,6 @@ def super_flags(sb: gdb.Value) -> str: """ return decode_flags(sb['s_flags'], SB_FLAGS) - @export - @classmethod - def register_buffer_head_decoder(cls, sym, decoder): - """ - Registers a buffer_head decoder with the filesystem subsystem. - - A buffer_head decoder is a method thats acepts a buffer_head, - potentially interprets the private members of the buffer_head, - and returns a dictionary. The only mandatory member of the - dictionary is 'description' which contains a human-readable - description of the purpose of this buffer_head. - - If the buffer_head is part of a stack, the 'next' item should contain - the next object in the stack. It does not necessarily need to be - a buffer_head. It does need to have a 'decoder' item declared - that will accept the given object. The decoder does not need to - be registered unless it will be a top-level decoder. - - Other items can be added as-needed to allow informed callers - to obtain direct informatiom. - - Args: - sym (gdb.Value): - The kernel function used as buffer_head->b_h_end_io callback - """ - - cls.buffer_head_decoders[sym] = decoder - - @classmethod - def decode_dio_bio(cls, bio): - """ - Decodes a bio used for direct i/o. - - This method decodes a bio generated by the direct-io component of - the file system subsystem. The bio can either have been submitted - directly or asynchronously. - - Args: - bio(gdb.Value): The struct bio to be decoded, generated - by the direct i/o component - - Returns: - dict: Contains the following items: - - description (str): Human-readable description of the bio - - bio (gdb.Value): The struct bio being decoded - - dio (gdb.Value): The direct i/o component of - the bio - - fstype (str): The name of the file system which submitted - this bio - - inode (gdb.Value): The struct inode, if any, - that owns the file associated with this bio - - offset (int): The offset within the file, in bytes - - devname (str): The device name associated with this bio - """ - dio = bio['bi_private'].cast(cls.dio_p_type) - fstype = cls.super_fstype(dio['inode']['i_sb']) - dev = block_device_name(dio['inode']['i_sb']['s_bdev']) - offset = dio['block_in_file'] << dio['blkbits'] - - chain = { - 'description' : "{:x} bio: Direct I/O for {} inode {}, sector {} on {}".format( - int(bio), fstype, dio['inode']['i_ino'], - bio['bi_sector'], dev), - 'bio' : bio, - 'dio' : dio, - 'fstype' : fstype, - 'inode' : dio['inode'], - 'offset' : offset, - 'devname' : dev, - } - return chain - - @classmethod - def decode_mpage(cls, bio): - """ - Decodes a bio used for multipage i/o. - - This method decodes a bio generated by the mpage component of - the file system subsystem. - - Args: - bio(gdb.Value): The struct bio to be decoded, generated - by the mpage component - - Returns: - dict: Contains the following items: - - description (str): Human-readable description of the bio - - bio (gdb.Value): The struct bio being decoded - - fstype (str): The name of the file system which submitted - this bio - - inode (gdb.Value): The struct inode, if any, - that owns the file associated with this bio - """ - inode = bio['bi_io_vec'][0]['bv_page']['mapping']['host'] - fstype = cls.super_fstype(inode['i_sb']) - chain = { - 'description' : - "{:x} bio: Multipage I/O: inode {}, type {}, dev {}".format( - int(bio), inode['i_ino'], fstype, - block_device_name(bio['bi_bdev'])), - 'bio' : bio, - 'fstype' : fstype, - 'inode' : inode, - } - return chain - - @classmethod - def decode_bio_buffer_head(cls, bio): - """ - Decodes a bio used to perform i/o for buffer_heads - - This method decodes a bio generated by buffer head submission. - - Args: - bio(gdb.Value): The struct bio to be decoded, generated - by buffer head submission - - Returns: - dict: Contains the following items: - - description (str): Human-readable description of the bio - - bio (gdb.Value): The struct bio being decoded - - next (gdb.Value): The buffer_head that - initiated this bio. - - decoder (gdb.Value): - A decoder for the buffer head - """ - bh = bio['bi_private'].cast(cls.buffer_head_p_type) - chain = { - 'description' : - "{:x} bio: Bio representation of buffer head".format(int(bio)), - 'bio' : bio, - 'next' : bh, - 'decoder' : cls.decode_buffer_head, - } - - return chain - - @classmethod - def decode_buffer_head(cls, bh): - """ - Decodes a struct buffer_head - - This method decodes a struct buffer_head, using an - implementation-specific decoder, if available - - Args: - bio(gdb.Value): The struct buffer_head to be - decoded. - - Returns: - dict: Minimally contains the following items. - - description (str): Human-readable description of the bio - - bh (gdb.Value): The struct buffer_head - Additional items may be available based on the - implmentation-specific decoder. - """ - endio = bh['b_end_io'] - try: - return cls.buffer_head_decoders[endio](bh) - except KeyError: - pass - desc = "{:x} buffer_head: for dev {}, block {}, size {} (undecoded)".format( - int(bh), block_device_name(bh['b_bdev']), - bh['b_blocknr'], bh['b_size']) - chain = { - 'description' : desc, - 'bh' : bh, - } - return chain - - @classmethod - def decode_end_buffer_write_sync(cls, bh): - """ - Decodes a struct buffer_head submitted by file systems for routine - synchronous writeback. - - Args: - bio(gdb.Value): The struct buffer_head to be - decoded. - - Returns: - dict: Minimally contains the following items. - - description (str): Human-readable description of the bio - - bh (gdb.Value): The struct buffer_head - """ - desc = ("{:x} buffer_head: for dev {}, block {}, size {} (unassociated)" - .format(block_device_name(bh['b_bdev']), - bh['b_blocknr'], bh['b_size'])) - chain = { - 'description' : desc, - 'bh' : bh, - } - return chain - @export @classmethod def for_each_super_block(cls) -> Iterable[gdb.Value]: diff --git a/crash/subsystem/filesystem/decoders.py b/crash/subsystem/filesystem/decoders.py new file mode 100644 index 00000000000..badd71189e2 --- /dev/null +++ b/crash/subsystem/filesystem/decoders.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +from crash.util.symbols import Types +from crash.subsystem.storage import block_device_name +from crash.subsystem.storage.decoders import Decoder, decode_bh +from crash.subsystem.filesystem import super_fstype + +class DIOBioDecoder(Decoder): + """ + Decodes a bio used for direct i/o. + + This method decodes a bio generated by the direct-io component of + the file system subsystem. The bio can either have been submitted + directly or asynchronously. + + Args: + bio(gdb.Value): The struct bio to be decoded, generated + by the direct i/o component + """ + + types = Types([ 'struct dio *' ]) + __endio__ = [ 'dio_bio_end_io', 'dio_bio_end_io' ] + description = "{:x} bio: Direct I/O for {} inode {}, sector {} on {}" + + def __init__(self, bio): + super().__init__() + self.bio = bio + + def interpret(self): + self.dio = self.bio['bi_private'].cast(self.types.dio_p_type) + self.fstype = super_fstype(self.dio['inode']['i_sb']) + self.dev = block_device_name(self.dio['inode']['i_sb']['s_bdev']) + self.offset = self.dio['block_in_file'] << self.dio['blkbits'] + + def __str__(self): + return self.description.format(int(self.bio), self.fstype, + self.dio['inode']['i_ino'], + self.bio['bi_sector'], self.dev) + + def __next__(self): + return None + +DIOBioDecoder.register() + +class DecodeMPage(Decoder): + """ + Decodes a bio used for multipage i/o. + + This method decodes a bio generated by the mpage component of + the file system subsystem. + + Args: + bio(gdb.Value): The struct bio to be decoded, generated + by the mpage component + + Returns: + dict: Contains the following items: + - description (str): Human-readable description of the bio + - bio (gdb.Value): The struct bio being decoded + - fstype (str): The name of the file system which submitted + this bio + - inode (gdb.Value): The struct inode, if any, + that owns the file associated with this bio + """ + + __endio__ = 'mpage_end_io' + + description = "{:x} bio: Multipage I/O: inode {}, type {}, dev {}" + + def __init__(self, bio): + super().__init__() + + self.bio = bio + + def interpret(self): + self.inode = bio['bi_io_vec'][0]['bv_page']['mapping']['host'] + self.fstype = super_fstype(inode['i_sb']) + + def __str__(self): + return self.description.format(int(self.bio), self.inode['i_ino'], + self.fstype, + block_device_name(bio['bi_bdev'])) + +DecodeMPage.register() + +class DecodeBioBH(Decoder): + """ + Decodes a bio used to perform i/o for buffer_heads + + This method decodes a bio generated by buffer head submission. + + Args: + bio(gdb.Value): The struct bio to be decoded, generated + by buffer head submission + + """ + types = Types([ 'struct buffer_head *' ]) + __endio__ = 'end_bio_bh_io_sync' + description = "{:x} bio: Bio representation of buffer head" + + def __init__(self, bio): + super().__init__() + self.bio = bio + + def interpret(self): + self.bh = self.bio['bi_private'].cast(self.types.buffer_head_p_type) + + def __str__(self): + return self.description.format(int(bio)) + + def __next__(self): + return decode_bh(self.bh) + +DecodeBioBH.register() + +class DecodeSyncWBBH(Decoder): + """ + Decodes a struct buffer_head submitted by file systems for routine + synchronous writeback. + + Args: + bio(gdb.Value): The struct buffer_head to be + decoded. + """ + __endio__ = 'end_buffer_write_sync' + description = "{:x} buffer_head: for dev {}, block {}, size {} (unassociated)" + + def __init__(self, bh): + super().__init__() + self.bh = bh + + def __str__(self): + self.description.format(block_device_name(bh['b_bdev']), + self.bh['b_blocknr'], self.bh['b_size']) + +DecodeSyncWBBH.register() diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index 71f45e77613..320804d6228 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -5,53 +5,33 @@ from crash.infra import CrashBaseClass from crash.util import get_symbol_value -from crash.subsystem.filesystem import register_buffer_head_decoder - -class Ext3(CrashBaseClass): - __symbol_callbacks__ = [ - ('journal_end_buffer_io_sync', '_register_journal_buffer_io_sync') ] - - @classmethod - def _register_journal_buffer_io_sync(cls, sym): - # ext3/ext4 and jbd/jbd2 share names but not implementations - b = gdb.block_for_pc(int(sym.value().address)) - sym = get_symbol_value('journal_end_buffer_io_sync', b) - - register_buffer_head_decoder(sym, cls.decode_journal_buffer_io_sync) - - @classmethod - def decode_journal_buffer_io_sync(cls, bh): - """ - Decodes an ext3 journal buffer - - This method decodes a struct buffer_head with and end_io callback - of journal_end_buffer_io_sync. - - Args: - bh (gdb.Value): The struct buffer_head to - decode - - Returns: - dict: Contains the following items: - - description (str): Human-readable description of - the buffer head - - bh (gdb.Value): The buffer head being - decoded - - fstype (str): The name of the file system type being decoded - - devname (str): The name of the device the file system uses - - offset (int): The offset, in bytes, of the block described - - length (int): The length of the block described - """ - - fstype = "journal on ext3" - devname = block_device_name(bh['b_bdev']) - chain = { - 'bh' : bh, - 'description' : "{:x} buffer_head: {} journal block (jbd) on {}".format(int(bh), fstype, devname), - 'fstype' : fstype, - 'devname' : devname, - 'offset' : int(bh['b_blocknr']) * int(bh['b_size']), - 'length' : int(bh['b_size']) - } - - return chain +from crash.subsystem.storage.decoders import Decoder + +class Ext3Decoder(Decoder): + """ + Decodes an ext3 journal buffer + + This decodes a struct buffer_head with an end_io callback + of journal_end_buffer_io_sync. + + Args: + bh (gdb.Value): The struct buffer_head to decode + """ + + __endio__ = 'journal_end_buffer_io_sync' + description = "{:x} buffer_head: {} journal block (jbd) on {}" + + def __init__(self, bh): + super().__init__() + self.bh = bh + + def interpret(self): + self.fstype = "journal on ext3" + self.devname = block_device_name(self.bh['b_bdev']) + self.offset = int(self.bh['b_blocknr']) * int(self.bh['b_size']) + self.length = int(self.bh['b_size']) + + def __str__(self): + return self.description(int(self.bh), fstype, devname) + +Ext3Decoder.register() diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index d210011edb4..5228e1d290c 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -1,11 +1,14 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Iterable + import gdb from crash.util import container_of from crash.infra import CrashBaseClass, export from crash.types.classdev import for_each_class_device +from . import decoders import crash.exceptions class Storage(CrashBaseClass): @@ -23,8 +26,6 @@ class Storage(CrashBaseClass): ( 'part_type', '_check_types' ) ] __type_callbacks__ = [ ('struct device_type', '_check_types' ) ] - bio_decoders = {} - @classmethod def _check_types(cls, result): try: @@ -43,45 +44,7 @@ def _check_types(cls, result): @export @classmethod - def register_bio_decoder(cls, sym, decoder): - """ - Registers a bio decoder with the storage subsystem. - - A bio decoder is a method that accepts a bio, potentially - interprets the private members of the bio, and returns - a dictionary. The only mandatory member of the dictionary - is 'description' which contains a human-readable description - of the purpose of this bio. - - If the bio is part of a stack, the 'next' item should contain - the next object in the stack. It does not necessarily need - to be a bio. It does need to have a 'decoder' item declared - that will accept the given object. The decoder does not - need to be registered unless it will be a top-level decoder. - - Other items can be added as-needed to allow informed callers - to obtain direct information. - - Args: - sym (gdb.Symbol or gdb.Value): - The Symbol or Value describing a kernel function used as - a bio->b_end_io callback - decoder (method): A Python method that accepts a - gdb.Value(struct bio) - - Raises: - TypeError: sym is not a gdb.Symbol or gdb.Value - """ - - if isinstance(sym, gdb.Symbol): - sym = sym.value().address - elif not isinstance(sym, gdb.Value): - raise TypeError("register_bio_decoder expects gdb.Symbol or gdb.Value") - cls.bio_decoders[int(sym)] = decoder - - @export - @classmethod - def for_each_bio_in_stack(cls, bio): + def for_each_bio_in_stack(cls, bio: gdb.Value) -> Iterable[decoders.Decoder]: """ Iterates and decodes each bio involved in a stacked storage environment @@ -90,7 +53,7 @@ def for_each_bio_in_stack(cls, bio): processed by each level's decoder. The stack will be interrupted if an encountered object doesn't have a decoder specified. - See register_bio_decoder for more detail. + See crash.subsystem.storage.decoder.register_decoder for more detail. Args: bio (gdb.Value): The initial struct bio to start @@ -102,44 +65,10 @@ def for_each_bio_in_stack(cls, bio): Additional items may be available based on the implmentation-specific decoder. """ - first = cls.bio_decoders[int(bio['bi_end_io'])](bio) - if first: - yield first - while 'decoder' in first: - first = first['decoder'](first['next']) - yield first - - @export - @classmethod - def decode_bio(cls, bio): - """ - Decodes a single bio, if possible - - This method will return a dictionary describing a single bio - after decoding it using a registered decoder, if available. - - If no decoder is registered, a generic description will be - returned in the dictionary's 'description' field. - - Args: - bio (gdb.Value): The bio to decode - - Returns: - dict: Contains, minimally, the following item. - - description (str): A human-readable description of the bio. - Additional items may be available based on the - implmentation-specific decoder. - """ - - try: - return cls.bio_decoders[int(bio['bi_end_io'])](bio) - except KeyError: - chain = { - 'description' : "{:x} bio: undecoded bio on {} ({})".format( - int(bio), block_device_name(bio['bi_bdev']), - bio['bi_end_io']), - } - return chain + decoder = decoders.decode_bio(bio) + while decoder is not None: + yield decoder + decoder = next(decoder) @export def dev_to_gendisk(self, dev): diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py new file mode 100644 index 00000000000..4bd61eae73a --- /dev/null +++ b/crash/subsystem/storage/decoders.py @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +from typing import Union, List, Dict +from crash.infra.lookup import SymbolCallback + +EndIOSpecifier = Union[int, str, List[str], gdb.Value, gdb.Symbol, None] + +class Decoder(CrashBaseClass): + """Decoder objects are used to unwind the storage stack + + They are relatively lightweight at runtime, meaning that the object + is initialized but not decoded until it's needed. The string will + be formatted each time, but each Decoder's interpret() method will + be called once. + + Attributes: + interpreted (bool): Whether the contents of this Decoder have already + been interpreted + """ + + __endio__: EndIOSpecifier = None + + def __init__(self): + self.interpreted = False + + def interpret(self) -> None: + """ + Interpret the Decoder object + + Rather than populate all the fields when they may not be used, + we delay interpreting the object until the fields are needed. + + This method will examine the object passed to the derived class's + constructor and produce the attributes required for each object. + """ + pass + + def __getattr__(self, name): + if self.interpreted: + raise AttributeError(f"No such attribute `{name}'") + + self.interpret() + self.interpreted = True + return getattr(self, name) + + @classmethod + def register(cls): + """ + Registers a decoder with the storage decoder subsystem. + + Each Decoder contains the name of an endio routine. When + an object that needs decoding is encountered, the endio + routine contained in the object is used to look up the decoder + for that object. + """ + register_decoder(cls.__endio__, cls) + + def __str__(self) -> str: + """ + The printable description of this Decoder. Typical Decoders + include the address of the object, the block device it uses, + and the location(s) affected by the object. + """ + pass + + def __next__(self): + """ + For stacked storage, the object may have been generated as part + of an operation on another object. e.g. a bio generated by + buffer_head submission, a request generated by bio submission, + a bio generated by another bio being cloned by device mapper, etc. + + In these scenarios, the __next__ method can be used to pass the + next Decoder object in the chain. It is not necessary to know + the source of the object being decoded -- only its type is + necessary. + + Typical uses will be 'return decode_bh(self.bh)' or + 'return decode_bio(self.next_bio)' + + If there are no objects beyond this one, it does not need to be + overridden. + """ + return None + +class BadBHDecoder(Decoder): + """ + Placeholder decoder for bad buffer_head pointers + + Rather than raise a NotAvailableError during decoding, we use a + BadBHDecoder decoder to document where in the chain there was an + invalid buffer_head. + """ + description = "{:x} bh: invalid buffer_head" + + def __init__(self, bh): + """ + Initialize a Decoder for `struct buffer_head' that describes + a bad pointer + + Args: + bh (gdb.Value): The buffer_head to decode + """ + super().__init__() + self.bh = bh + + def __str__(self): + return self.description.format(int(self.bh)) + +class GenericBHDecoder(Decoder): + """ + Decodes a bio that references a struct buffer_head + + This method decodes a generic struct buffer_head, when no + implementation-specific decoder is available + + Attributes: + bh (gdb.Value): + The buffer head that was referenced from the bio + """ + + description = "{:x} buffer_head: for dev {}, block {}, size {} (undecoded)" + + def __init__(self, bh: gdb.Value): + """ + Initalize a Decoder for `struct buffer_head' without any file + system information associated with it + + Args: + bio(gdb.Value): + The struct buffer_head to be decoded. + """ + super().__init__() + self.bh = bh + + def interpret(self): + self.block_device = block_device_name(self.bh['b_bdev']) + + def __str__(self): + return self.description.format(int(self.bh), self.block_device, + self.bh['b_blocknr'], self.bh['b_size']) + +_decoders: Dict[int, Decoder] = dict() + +def register_decoder(endio: EndIOSpecifier, decoder: Decoder) -> None: + """ + Registers a bio/buffer_head decoder with the storage subsystem. + + A decoder is a class that accepts a bio, buffer_head, or other object, + potentially interprets the private members of the object, and + returns a Decoder object that describes it. + + The only mandatory part of a Decoder is the __str__ method to + print the description. + + If the bio is part of a stack, the __next__ method will contain + the next Decoder object in the stack. It does not necessarily need + to be a bio. The Decoder does not need to be registered unless it + will be a top-level decoder. + + Other attributes can be added as-needed to allow informed callers + to obtain direct information. + + Args: + endio (str, list of str, gdb.Symbol, gdb.Value, or int): The function + used as an endio callback. + + The str or list of str arguments are used to register a callback + such that the Decoder is registered when the symbol is available. + + The gdb.Symbol, gdb.Value, and int versions are to be used + once the symbol is available for resolution. + + If in doubt, use the names instead of the symbols objects. + + decoder (Decoder): The decoder class used to handle this object. + + """ + debug = False + if isinstance(endio, str): + if debug: + print(f"Registering {endio} as callback") + x = SymbolCallback(endio, lambda a: register_decoder(a, decoder)) + return + elif isinstance(endio, list) and isinstance(endio[0], str): + for sym in endio: + if debug: + print(f"Registering {sym} as callback") + x = SymbolCallback(sym, lambda a: register_decoder(a, decoder)) + return + + if isinstance(endio, gdb.Symbol): + endio = endio.value() + + if isinstance(endio, gdb.Value): + endio = int(endio.address) + + if debug: + print(f"Registering {endio:#x} for real") + + _decoders[endio] = decoder + +class BadBioDecoder(Decoder): + """ + Placeholder decoder for bad bio pointers + + Rather than raise a NotAvailableError during decoding, we use a + BadBioDecoder decoder to document where in the chain there was an + invalid bio. + """ + description = "{:x} bio: invalid bio" + + def __init__(self, bio): + """ + Initialize a Decoder for `struct bio' that describes a bad pointer + + Args: + bio (gdb.Value): The bio to decode + """ + super().__init__() + self.bio = bio + + def __str__(self): + return self.description.format(int(self.bio)) + +class GenericBioDecoder(Decoder): + """ + Placeholder decoder for when we have a valid bio but nothing to decode it + """ + description = "{:x} bio: undecoded bio on {} ({})" + def __init__(self, bio): + """ + Initialize a Decoder for `struct bio' for a bio with no other decoder + + Args: + bio (gdb.Value): The bio to decode + """ + super().__init__() + self.bio = bio + + def __str__(self): + return self.description.format(int(self.bio), + block_device_name(self.bio['bi_bdev']), + bio['bi_end_io']) + +def decode_bio(bio: gdb.Value) -> Decoder: + """ + Decodes a single bio, if possible + + This method will return a Decoder object describing a single bio + after decoding it using a registered decoder, if available. + + If no decoder is registered, a generic description will be used. + + Args: + bio (gdb.Value): The bio to decode + + Returns: + BadBioDecoder: The bio was not valid + GenericBioDecoder: The bio has no other decoder + Decoder-derived object: The decoder appropriate for this bio type + """ + + try: + return _decoders[int(bio['bi_end_io'])](bio) + except KeyError: + return GenericBioDecoder(bio) + except gdb.NotAvailableError: + return BadBioDecoder(bio) + +def decode_bh(bh: gdb.Value) -> Decoder: + """ + Decodes a single buffer_head, if possible + + This method will return a Decoder object describing a single buffer_head + after decoding it using a registered decoder, if available. + + If no decoder is registered, a generic description will be used. + + Args: + bh (gdb.Value): The buffer_head to decode + + Returns: + BadBHecoder: The bio was not valid + GenericBHDecoder: The bio has no other decoder + Decoder-derived object: + The decoder appropriate for this buffer_head type + """ + try: + return _decoders[int(bh['b_end_io'])](bh) + except KeyError: + return GenericBHDecoder(bh) + except gdb.NotAvailableError: + return BadBHDecoder(bh) diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index 0030bf4cd7c..cd535906ec7 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -4,76 +4,46 @@ import gdb from crash.infra import CrashBaseClass -from crash.subsystem.storage import Storage as block from crash.subsystem.storage import block_device_name +from crash.subsystem.storage.decoders import Decoder, decode_bio -class DeviceMapper(CrashBaseClass): - __types__ = [ 'struct dm_rq_clone_bio_info *', - 'struct dm_target_io *' ] - __symbol_callbacks__ = [ - ('end_clone_bio', '_register_end_clone_bio'), - ('clone_endio', '_register_clone_endio') ] +class ClonedBioReqDecoder(Decoder): + """ + Decodes a request-based device mapper cloned bio - @classmethod - def _register_end_clone_bio(cls, sym): - if 'clone' in cls.dm_rq_clone_bio_info_p_type.target(): - getter = cls._get_clone_bio_rq_info_3_7 - else: - getter = cls._get_clone_bio_rq_info_old - cls._get_clone_bio_rq_info = getter - block.register_bio_decoder(sym, cls.decode_clone_bio_rq) + This decodes a cloned bio generated by request-based device mapper targets. - @classmethod - def _register_clone_endio(cls, sym): - if 'clone' in cls.dm_target_io_p_type.target(): - getter = cls._get_clone_bio_tio_3_15 - else: - getter = cls._get_clone_bio_tio_old - cls._get_clone_bio_tio = getter - block.register_bio_decoder(sym, cls.decode_clone_bio) + Args: + bio(gdb.Value): A struct bio generated by a + request-based device mapper target - @classmethod - def decode_clone_bio_rq(cls, bio): - """ - Decodes a request-based device mapper cloned bio - - This method decodes a cloned bio generated by request-based - device mapper targets. - - Args: - bio(gdb.Value): A struct bio generated by a - request-based device mapper target - - Returns: - dict: Contains the following items: - - description (str): Human-readable description of the bio - - bio (gdb.Value): The provided bio - - tio (gdb.Value(): The struct - dm_target_io for this bio - - next (gdb.Value): The original bio that was - the source of this one - - decoder (method(gdb.Value)): The decoder for - the original bio - """ - - info = cls._get_clone_bio_rq_info(bio) - - # We can pull the related bios together here if required - # b = bio['bi_next'] - # while int(b) != 0: - # b = b['bi_next'] - - chain = { - 'bio' : bio, - 'tio' : info['tio'], - 'next' : info['orig'], - 'description' : - '{:x} bio: Request-based Device Mapper on {}'.format( - int(bio), block_device_name(bio['bi_bdev'])), - 'decoder' : block.decode_bio, - } - - return chain + """ + __types__ = [ 'struct dm_rq_clone_bio_info *' ] + __endio__ = 'end_clone_bio' + description = '{:x} bio: Request-based Device Mapper on {}' + + _get_clone_bio_rq_info = None + + def __init__(self, bio): + super().__init__() + self.bio = bio + if cls._get_clone_bio_rq_info is None: + if 'clone' in cls.dm_rq_clone_bio_info_p_type.target(): + getter = cls._get_clone_bio_rq_info_3_7 + else: + getter = cls._get_clone_bio_rq_info_old + cls._get_clone_bio_rq_info = getter + + def interpret(self): + self.info = cls._get_clone_bio_rq_info(bio) + self.tio = self.info['tio'] + + def __str__(self): + self.description.format(int(self.bio), + block_device_name(self.bio['bi_bdev'])) + + def __next__(self): + return decode_bio(self.info['orig']) @classmethod def _get_clone_bio_rq_info_old(cls, bio): @@ -83,47 +53,55 @@ def _get_clone_bio_rq_info_old(cls, bio): def _get_clone_bio_rq_info_3_7(cls, bio): return container_of(bio, cls.dm_rq_clone_bio_info_p_type, 'clone') - @classmethod - def decode_clone_bio(cls, bio): - """ - Decodes a bio-based device mapper cloned bio - - This method decodes a cloned bio generated by request-based - device mapper targets. - - Args: - bio(gdb.Value): A struct bio generated by a - bio-based device mapper target - - Returns: - dict: Contains the following items: - - description (str): Human-readable description of the bio - - bio (gdb.Value): The provided bio - - tio (gdb.Value): The struct - dm_target_tio for this bio - - next (gdb.Value): The original bio that was - the source of this one - - decoder (method(gdb.Value)): The decoder for the - original bio - """ - tio = cls._get_clone_bio_tio(bio) - - next_bio = tio['io']['bio'] - - chain = { - 'description' : "{:x} bio: device mapper clone: {}[{}] -> {}[{}]".format( - int(bio), - block_device_name(bio['bi_bdev']), - int(bio['bi_sector']), - block_device_name(next_bio['bi_bdev']), - int(next_bio['bi_sector'])), - 'bio' : bio, - 'tio' : tio, - 'next' : next_bio, - 'decoder' : block.decode_bio, - } - - return chain +ClonedBioReqDecoder.register() + +class ClonedBioDecoder(Decoder): + """ + Decodes a bio-based device mapper cloned bio + + This method decodes a cloned bio generated by request-based + device mapper targets. + + Attributes: + bio (gdb.Value): + A struct bio generated by a bio-based device mapper target + + next_bio (gdb.Value): + The struct bio that generated this one. + + tio (gdb.Value): + The struct dm_target_tio for this bio + """ + __types__ = [ 'struct dm_target_io *' ] + _get_clone_bio_tio = None + __endio__ = 'clone_endio' + description = "{:x} bio: device mapper clone: {}[{}] -> {}[{}]" + + def __init__(self, bio): + super().__init__() + self.bio = bio + + if _get_clone_bio_tio is None: + if 'clone' in cls.dm_target_io_p_type.target(): + getter = cls._get_clone_bio_tio_3_15 + else: + getter = cls._get_clone_bio_tio_old + cls._get_clone_bio_tio = getter + + def interpret(self): + self.tio = cls._get_clone_bio_tio(bio) + self.next_bio = tio['io']['bio'] + + def __str__(self): + return self.description.format( + int(self.bio), + block_device_name(self.bio['bi_bdev']), + int(bself.io['bi_sector']), + block_device_name(self.next_bio['bi_bdev']), + int(self.next_bio['bi_sector'])) + + def __next__(self): + return decode_bio(self.next_bio) @classmethod def _get_clone_bio_tio_old(cls, bio): @@ -133,3 +111,5 @@ def _get_clone_bio_tio_old(cls, bio): def _get_clone_bio_tio_3_15(cls, bio): return container_of(bio['bi_private'], cls.dm_clone_bio_info_p_type, 'clone') + +ClonedBioDecoder.register() From 1433b5baca398ba52fe649b870cd19b78dddd79a Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 6 May 2019 16:26:29 -0400 Subject: [PATCH 113/367] crash.types.vmstat: remove dead code The versions of the static methods that build the num name lists are redundant since the callbacks perform that work. Also, the static methods are using a version of the API that doesn't exist anymore. Signed-off-by: Jeff Mahoney --- crash/types/vmstat.py | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/crash/types/vmstat.py b/crash/types/vmstat.py index 139c3237a26..6d2cc77fd29 100644 --- a/crash/types/vmstat.py +++ b/crash/types/vmstat.py @@ -10,12 +10,13 @@ class VmStat(CrashBaseClass): __types__ = ['enum zone_stat_item', 'enum vm_event_item'] + __symbols__ = [ 'vm_event_states' ] __type_callbacks__ = [ ('enum zone_stat_item', 'check_enum_type'), ('enum vm_event_item', 'check_enum_type') ] nr_stat_items = None nr_event_items = None - + vm_stat_names = None vm_event_names = None @@ -40,34 +41,26 @@ def __populate_names(cls, enum_type, items_name): for field in enum_type.fields(): if field.enumval < nr_items: - names[field.enumval] = field.name - + names[field.enumval] = field.name + return (nr_items, names) @staticmethod def get_stat_names(): - if VmStat.vm_stat_names is None: - VmStat.vm_stat_names = VmStat.__populate_names( - VmStat.nr_stat_items, "enum zone_stat_item") return VmStat.vm_stat_names @staticmethod def get_event_names(): - if VmStat.vm_event_names is None: - VmStat.vm_event_names = VmStat.__populate_names( - VmStat.nr_event_items, "enum vm_event_item") return VmStat.vm_event_names - @staticmethod + @classmethod def get_events(): - states_sym = gdb.lookup_global_symbol("vm_event_states") nr = VmStat.nr_event_items events = [0] * nr for cpu in for_each_online_cpu(): - states = get_percpu_var(states_sym, cpu) + states = get_percpu_var(cls.vm_event_states, cpu) for item in range(0, nr): events[item] += int(states["event"][item]) return events - From a664805dd1a89848d83203b20da47c72925365d1 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 3 May 2019 15:17:13 -0400 Subject: [PATCH 114/367] crash: remove CrashBaseClass This commit removes CrashBaseClass from the project entirely. This simplifies the code immensely and means contributors don't need to understand and/or potentially debug the black magic behind the namespace, implicit singleton, exporting, and type/symbol loading in order to get started. The pattern now is to use crash.types.symbols to declare a Types, Symbols, Symvals, etc object at the top of the module that contains any needed types or symbols. These are still declared as an array of names and the naming structure (e.g. struct_name_type) is the same as with CrashBaseClass. They just appear in separate collections under separate names. Most functions will be at the module level and will reference these objects directly. If a state object is required, a class is still used but it can inherit from `object' instead. If callbacks are used on class attributes (instead of instance attributes) they must be declared outside of the class. This allows us to eliminate many classes entirely and reduce others to simple state holder classes. In some cases, like with percpu, it makes sense to just use module instance and marshall the calls from the module level to the instance explicitly. Signed-off-by: Jeff Mahoney --- crash/addrxlat.py | 19 +- crash/cache/__init__.py | 4 +- crash/cache/syscache.py | 51 +- crash/commands/__init__.py | 4 +- crash/commands/dmesg.py | 37 +- crash/infra/__init__.py | 98 --- crash/infra/lookup.py | 109 --- crash/kernel.py | 21 +- crash/subsystem/filesystem/__init__.py | 154 ++--- crash/subsystem/filesystem/btrfs.py | 54 +- crash/subsystem/filesystem/ext3.py | 2 - crash/subsystem/filesystem/mount.py | 229 +++---- crash/subsystem/storage/__init__.py | 562 ++++++++------- crash/subsystem/storage/blocksq.py | 82 ++- crash/subsystem/storage/decoders.py | 3 +- crash/subsystem/storage/device_mapper.py | 18 +- crash/types/bitmap.py | 540 +++++++-------- crash/types/classdev.py | 17 +- crash/types/cpu.py | 108 +-- crash/types/klist.py | 51 +- crash/types/list.py | 217 +++--- crash/types/module.py | 85 ++- crash/types/node.py | 74 +- crash/types/page.py | 88 ++- crash/types/percpu.py | 196 ++++-- crash/types/slab.py | 154 ++--- crash/types/task.py | 44 +- crash/types/vmstat.py | 44 +- crash/types/zone.py | 34 +- crash/util/__init__.py | 825 +++++++++++------------ tests/test_infra.py | 87 --- tests/test_infra_lookup.py | 362 +--------- tests/test_syscache.py | 2 +- 33 files changed, 1817 insertions(+), 2558 deletions(-) delete mode 100644 tests/test_infra.py diff --git a/crash/addrxlat.py b/crash/addrxlat.py index 5d00e75db27..03e24658f91 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -3,7 +3,6 @@ import gdb import addrxlat -from crash.infra import CrashBaseClass, export from crash.cache.syscache import utsname from crash.util import offsetof @@ -39,7 +38,7 @@ def cb_read32(self, faddr): def cb_read64(self, faddr): return int(gdb.Value(faddr.addr).cast(self.uint64_ptr).dereference()) -class CrashAddressTranslation(CrashBaseClass): +class CrashAddressTranslation(object): def __init__(self): try: target = gdb.current_target() @@ -62,14 +61,12 @@ def __init__(self): self.is_non_auto = True break - @export - def addrxlat_context(self): - return self.context +__impl = CrashAddressTranslation() +def addrxlat_context(): + return __impl.context - @export - def addrxlat_system(self): - return self.system +def addrxlat_system(): + return __impl.system - @export - def addrxlat_is_non_auto(self): - return self.is_non_auto +def addrxlat_is_non_auto(): + return __impl.is_non_auto diff --git a/crash/cache/__init__.py b/crash/cache/__init__.py index 502299d3ebf..2afb4a5aa6e 100644 --- a/crash/cache/__init__.py +++ b/crash/cache/__init__.py @@ -7,9 +7,9 @@ import glob import importlib -from crash.infra import CrashBaseClass, autoload_submodules +from crash.infra import autoload_submodules -class CrashCache(CrashBaseClass): +class CrashCache(object): def refresh(self): pass diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index ff875a1d5e1..b5d9a666115 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -11,14 +11,14 @@ from crash.exceptions import DelayedAttributeError from crash.cache import CrashCache from crash.util import array_size -from crash.infra import export -from crash.infra.lookup import get_delayed_lookup +from crash.util.symbols import Types, Symvals, SymbolCallbacks +from crash.infra.lookup import DelayedValue class CrashUtsnameCache(CrashCache): - __symvals__ = [ 'init_uts_ns' ] + symvals = Symvals([ 'init_uts_ns' ]) def load_utsname(self): - self.utsname = self.init_uts_ns['name'] + self.utsname = self.symvals.init_uts_ns['name'] return self.utsname def init_utsname_cache(self): @@ -43,8 +43,8 @@ def __getattr__(self, name): return getattr(self.__class__, name) class CrashConfigCache(CrashCache): - __types__ = [ 'char *' ] - __symvals__ = [ 'kernel_config_data' ] + types = Types([ 'char *' ]) + symvals = Symvals([ 'kernel_config_data' ]) def __getattr__(self, name): if name == 'config_buffer': @@ -70,8 +70,8 @@ def decompress_config_buffer(self): MAGIC_END = 'IKCFG_ED' # Must cast it to char * to do the pointer arithmetic correctly - data_addr = self.kernel_config_data.address.cast(self.char_p_type) - data_len = self.kernel_config_data.type.sizeof + data_addr = self.symvals.kernel_config_data.address.cast(self.types.char_p_type) + data_len = self.symvals.kernel_config_data.type.sizeof buf_len = len(MAGIC_START) buf = self.read_buf_str(data_addr, buf_len) @@ -119,14 +119,18 @@ def __getitem__(self, name): return None class CrashKernelCache(CrashCache): - __symvals__ = [ 'avenrun' ] - __symbol_callbacks__ = [ - ( 'jiffies', 'setup_jiffies' ), - ( 'jiffies_64', 'setup_jiffies' ) ] - __delayed_values__ = [ 'jiffies' ] + symvals = Symvals([ 'avenrun' ]) jiffies_ready = False adjust_jiffies = False + + jiffies_dv = DelayedValue('jiffies') + + @property + def jiffies(self): + v = self.jiffies_dv.get() + return v + def __init__(self, config): CrashCache.__init__(self) self.config = config @@ -157,8 +161,8 @@ def format_loadavg(metrics): def get_loadavg_values(self): metrics = [] - for index in range(0, array_size(self.avenrun)): - metrics.append(self.calculate_loadavg(self.avenrun[index])) + for index in range(0, array_size(self.symvals.avenrun)): + metrics.append(self.calculate_loadavg(self.symvals.avenrun[index])) return metrics @@ -170,6 +174,11 @@ def get_loadavg(self): except DelayedAttributeError: return "Unknown" + @classmethod + def set_jiffies(cls, value): + cls.jiffies_dv.value = None + cls.jiffies_dv.callback(value) + @classmethod def setup_jiffies(cls, symbol): if cls.jiffies_ready: @@ -187,7 +196,7 @@ def setup_jiffies(cls, symbol): jiffies = int(gdb.lookup_global_symbol('jiffies').value()) cls.adjust_jiffies = False - delayed = get_delayed_lookup(cls, 'jiffies').callback(jiffies) + cls.set_jiffies(jiffies) def adjusted_jiffies(self): if self.adjust_jiffies: @@ -199,10 +208,14 @@ def get_uptime(self): self.uptime = timedelta(seconds=self.adjusted_jiffies() // self.hz) return self.uptime - @export - def jiffies_to_msec(self, jiffies): - return 1000 // self.hz * jiffies +symbol_cbs = SymbolCallbacks( [( 'jiffies', + CrashKernelCache.setup_jiffies ), + ( 'jiffies_64', + CrashKernelCache.setup_jiffies ) ]) utsname = CrashUtsnameCache() config = CrashConfigCache() kernel = CrashKernelCache(config) + +def jiffies_to_msec(jiffies): + return 1000 // kernel.hz * jiffies diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 7b1772196c0..fac1b28a82b 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from crash.infra import CrashBaseClass - import gdb import os @@ -20,7 +18,7 @@ class ArgumentParser(argparse.ArgumentParser): def error(self, message): raise CommandLineError(message) -class Command(CrashBaseClass, gdb.Command): +class Command(gdb.Command): commands = {} def __init__(self, name, parser=None): self.name = "py" + name diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 19d38398d80..18a286cf96a 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -8,6 +8,11 @@ from crash.commands import Command, ArgumentParser from crash.exceptions import DelayedAttributeError +from crash.util.symbols import Types, Symvals + +types = Types([ 'struct printk_log *' , 'char *' ]) +symvals = Symvals([ 'log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', + 'clear_seq', 'log_first_seq', 'log_next_seq' ]) class LogTypeException(Exception): pass @@ -148,10 +153,6 @@ def __init__(self, name): parser.format_usage = lambda: 'log [-tdm]\n' Command.__init__(self, name, parser) - __types__ = [ 'struct printk_log *' , 'char *' ] - __symvals__ = [ 'log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', - 'clear_seq', 'log_first_seq', 'log_next_seq' ] - @classmethod def filter_unstructured_log(cls, log, args): lines = log.split('\n') @@ -168,11 +169,11 @@ def filter_unstructured_log(cls, log, args): return '\n'.join(lines) def log_from_idx(self, logbuf, idx, dict_needed=False): - msg = (logbuf + idx).cast(self.printk_log_p_type) + msg = (logbuf + idx).cast(types.printk_log_p_type) try: - textval = (msg.cast(self.char_p_type) + - self.printk_log_p_type.target().sizeof) + textval = (msg.cast(types.char_p_type) + + types.printk_log_p_type.target().sizeof) text = textval.string(length=int(msg['text_len'])) except UnicodeDecodeError as e: print(e) @@ -197,8 +198,8 @@ def log_from_idx(self, logbuf, idx, dict_needed=False): if dict_needed: dict_len = int(msg['dict_len']) - d = (msg.cast(self.char_p_type) + - self.printk_log_p_type.target().sizeof + textlen) + d = (msg.cast(types.char_p_type) + + types.printk_log_p_type.target().sizeof + textlen) s = '' for i in range(0, dict_len): @@ -214,19 +215,19 @@ def log_from_idx(self, logbuf, idx, dict_needed=False): def get_log_msgs(self, dict_needed=False): try: - idx = self.log_first_idx + idx = symvals.log_first_idx except DelayedAttributeError as e: raise LogTypeException('not structured log') - if self.clear_seq < self.log_first_seq: - self.clear_seq = self.log_first_seq + if symvals.clear_seq < symvals.log_first_seq: + symvals.clear_seq = symvals.log_first_seq - seq = self.clear_seq - idx = self.log_first_idx + seq = symvals.clear_seq + idx = symvals.log_first_idx - while seq < self.log_next_seq: - msg = self.log_from_idx(self.log_buf, idx, dict_needed) + while seq < symvals.log_next_seq: + msg = self.log_from_idx(symvals.log_buf, idx, dict_needed) seq += 1 idx = msg['next'] yield msg @@ -250,11 +251,11 @@ def handle_structured_log(self, args): print('{}'.format(d.encode('string_escape'))) def handle_logbuf(self, args): - if self.log_buf_len and self.log_buf: + if symvals.log_buf_len and symvals.log_buf: if args.d: raise LogInvalidOption("Unstructured logs don't offer key/value pair support") - print(self.filter_unstructured_log(self.log_buf.string('utf-8', 'replace'), args)) + print(self.filter_unstructured_log(symvals.log_buf.string('utf-8', 'replace'), args)) def execute(self, args): try: diff --git a/crash/infra/__init__.py b/crash/infra/__init__.py index 7a92c877039..5d53aa0888b 100644 --- a/crash/infra/__init__.py +++ b/crash/infra/__init__.py @@ -4,106 +4,8 @@ import sys import glob import os.path -import inspect import importlib -from crash.infra.lookup import DelayedLookups - -class export_wrapper(object): - def __init__(self, mod, cls, func): - self.cls = cls - self.func = func - - if not hasattr(mod, '_export_wrapper_singleton_dict'): - mod._export_wrapper_singleton_dict = {} - self.singleton_dict = mod._export_wrapper_singleton_dict - - def __call__(self, *args, **kwargs): - try: - obj = self.singleton_dict[self.cls] - except KeyError: - obj = self.cls() - self.singleton_dict[self.cls] = obj - - if isinstance(self.func, classmethod): - return self.func.__func__(self.cls, *args, **kwargs) - elif isinstance(self.func, staticmethod): - return self.func.__func__(*args, **kwargs) - else: - return self.func(obj, *args, **kwargs) - -def register_singleton(mod, obj): - if not hasattr(mod, '_export_wrapper_singleton_dict'): - raise RuntimeError("Class {} has no exported members." - .format(obj.__class__.__name__)) - - mod._export_wrapper_singleton_dict[obj.__class__] = obj - -def export(func): - """This marks the function for export to the module namespace. - The class must inherit from CrashBaseClass.""" - if isinstance(func, staticmethod) or isinstance(func, classmethod): - func.__func__.__export_to_module__ = True - else: - func.__export_to_module__ = True - return func - -class _CrashBaseMeta(type): - """ - This metaclass handles both exporting methods to the module namespace - and handling asynchronous loading of types and symbols. To enable it, - all you need to do is define your class as follows: - - class Foo(CrashBaseClass): - ... - - There are several special class variables that are interpreted during - class (not instance) creation. - - The following create properties in the class that initially - raise MissingSymbolError but contain the requested information when - made available. The properties for types will be the name of the type, - with 'struct ' removed and _type appended. E.g. 'struct test' becomes - test_type. If it's a pointer type, _p is appended after the type name, - e.g. 'struct test *' becomes test_p_type. The properties for the symbols - are named with the symbol name. If there is a naming collision, - NameError is raised. - __types__ -- A list consisting of type names. Pointer are handled in - Pointer are handled in a manner similarly to how - they are handled in C code. e.g. 'char *'. - __symbols__ -- A list of symbol names - __minsymbols__ -- A list of minimal symbols - __symvals__ -- A list of symbol names that will return the value - associated with the symbol instead of the symbol itself. - - The following set up callbacks when the requested type or symbol value - is available. These each accept a list of 2-tuples, (specifier, callback). - The callback is passed the type or symbol requested. - __type_callbacks__ - __symbol_callbacks__ - """ - def __new__(cls, name, parents, dct): - DelayedLookups.setup_delayed_lookups_for_class(name, dct) - return type.__new__(cls, name, parents, dct) - - def __init__(cls, name, parents, dct): - super(_CrashBaseMeta, cls).__init__(name, parents, dct) - cls.setup_exports_for_class(cls, dct) - DelayedLookups.setup_named_callbacks(cls, dct) - - @staticmethod - def setup_exports_for_class(cls, dct): - mod = sys.modules[dct['__module__']] - for name, decl in dct.items(): - if (hasattr(decl, '__export_to_module__') or - ((isinstance(decl, classmethod) or - isinstance(decl, staticmethod)) and - hasattr(decl.__func__, "__export_to_module__"))): - setattr(mod, name, export_wrapper(mod, cls, decl)) - -class CrashBaseClass(metaclass=_CrashBaseMeta): - pass - def autoload_submodules(caller, callback=None): mods = [] try: diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index 527e0f3b06f..e134da497f3 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -344,112 +344,3 @@ def callback(self, value: gdb.MinSymbol) -> None: def __str__(self): return "{} attached with {}".format(self.__class__, str(self.cb)) - -class ClassProperty(object): - def __init__(self, get): - self.get = get - - def __get__(self, instance, owner): - return self.get() - -class DelayedLookups(object): - """ - A class for handling dynamic creation of class attributes that - contain delayed values. The attributes are specified using - special names. These are documented in the _CrashBaseMeta - documentation. - """ - @classmethod - def name_check(cls, dct, name, attrname): - try: - collision = dct['__delayed_lookups__'][attrname] - except KeyError: - return - - raise NameError("DelayedLookup name collision: `{}' and `{}' -> `{}'" - .format(name, collision.name, attrname)) - - @classmethod - def add_lookup(cls, clsname, dct, name, attr, attrname=None): - if attrname is None: - attrname = name - cls.name_check(dct, name, attrname) - dct['__delayed_lookups__'][attrname] = attr - if attrname.startswith('__'): - attrname = '_{}{}'.format(clsname, attrname) - dct[attrname] = ClassProperty(attr.get) - - @classmethod - def setup_delayed_lookups_for_class(cls, clsname, dct): - if '__delayed_lookups__' in dct: - raise NameError("Name `delayed_lookups' is reserved when using DelayedLookups") - dct['__delayed_lookups__'] = {} - - if '__types__' in dct: - if not isinstance(dct['__types__'], list): - raise TypeError('__types__ attribute must be a list of strings') - for typ in dct['__types__']: - t = DelayedType(typ) - cls.add_lookup(clsname, dct, t.name, t, t.attrname) - del dct['__types__'] - if '__symbols__' in dct: - if not isinstance(dct['__symbols__'], list): - raise TypeError('__symbols__ attribute must be a list of strings') - for symname in dct['__symbols__']: - cls.add_lookup(clsname, dct, symname, DelayedSymbol(symname)) - del dct['__symbols__'] - if '__minsymbols__' in dct: - if not isinstance(dct['__minsymbols__'], list): - raise TypeError('__minsymbols_ attribute must be a list of strings') - for symname in dct['__minsymbols__']: - cls.add_lookup(clsname, dct, symname, - DelayedMinimalSymbol(symname)) - del dct['__minsymbols__'] - if '__symvals__' in dct: - if not isinstance(dct['__symvals__'], list): - raise TypeError('__symvals__ attribute must be a list of strings') - for symname in dct['__symvals__']: - cls.add_lookup(clsname, dct, symname, DelayedSymval(symname)) - del dct['__symvals__'] - - if '__minsymvals__' in dct: - if not isinstance(dct['__minsymvals__'], list): - raise TypeError('__minsymvals__ attribute must be a list of strings') - for symname in dct['__minsymvals__']: - cls.add_lookup(clsname, dct, symname, - DelayedMinimalSymval(symname)) - del dct['__minsymvals__'] - - if '__delayed_values__' in dct: - if not isinstance(dct['__delayed_values__'], list): - raise TypeError('__delayed_values__ attribute must be a list of strings') - for propname in dct['__delayed_values__']: - cls.add_lookup(clsname, dct, propname, DelayedValue(propname)) - del dct['__delayed_values__'] - - @classmethod - def setup_named_callbacks(this_cls, cls, dct): - callbacks = [] - if '__type_callbacks__' in dct: - for (typ, callback) in dct['__type_callbacks__']: - cb = getattr(cls, callback) - callbacks.append(TypeCallback(typ, cb)) - del dct['__type_callbacks__'] - - if '__symbol_callbacks__' in dct: - for (sym, callback) in dct['__symbol_callbacks__']: - cb = getattr(cls, callback) - callbacks.append(SymbolCallback(sym, cb)) - del dct['__symbol_callbacks__'] - if '__minsymbol_callbacks__' in dct: - for (sym, callback) in dct['__minsymbol_callbacks__']: - cb = getattr(cls, callback) - callbacks.append(MinimalSymbolCallback(sym, cb)) - del dct['__minsymbol_callbacks__'] - if callbacks: - dct['__delayed_lookups__']['__callbacks__'] = callbacks - -def get_delayed_lookup(cls, name): - return cls.__delayed_lookups__[name] - - diff --git a/crash/kernel.py b/crash/kernel.py index 75cb0f9d888..d2d7ce6b4ba 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -9,12 +9,12 @@ import crash.arch import crash.arch.x86_64 import crash.arch.ppc64 -from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each_entry from crash.types.list import list_for_each_entry from crash.types.module import for_each_module, for_each_module_section from elftools.elf.elffile import ELFFile from crash.util import get_symbol_value +from crash.util.symbols import Types, Symvals, Symbols from crash.exceptions import MissingSymbolError from typing import Pattern, Union, List, Dict, Any @@ -52,11 +52,10 @@ def __init__(self, path, module_value, expected_value): PathSpecifier = Union[List[str], str] -class CrashKernel(CrashBaseClass): - __types__ = [ 'char *' ] - __symvals__ = [ 'init_task' ] - __symbols__ = [ 'runqueues'] - +class CrashKernel(object): + types = Types([ 'char *' ]) + symvals = Symvals([ 'init_task' ]) + symbols = Symbols([ 'runqueues']) def __init__(self, roots: PathSpecifier=None, vmlinux_debuginfo: PathSpecifier=None, @@ -308,7 +307,7 @@ def __init__(self, roots: PathSpecifier=None, def get_minsymbol_as_string(self, name: str) -> str: sym = gdb.lookup_minimal_symbol(name).value() - return sym.address.cast(self.char_p_type).string() + return sym.address.cast(self.types.char_p_type).string() def extract_version(self) -> str: try: @@ -605,9 +604,9 @@ def setup_tasks(self) -> None: import crash.cache.tasks gdb.execute('set print thread-events 0') - task_list = self.init_task['tasks'] + task_list = self.symvals.init_task['tasks'] - rqs = get_percpu_vars(self.runqueues) + rqs = get_percpu_vars(self.symbols.runqueues) rqscurrs = {int(x["curr"]) : k for (k, x) in rqs.items()} print("Loading tasks...", end='') @@ -615,11 +614,11 @@ def setup_tasks(self) -> None: task_count = 0 tasks = [] - for taskg in list_for_each_entry(task_list, self.init_task.type, + for taskg in list_for_each_entry(task_list, self.symvals.init_task.type, 'tasks', include_head=True): tasks.append(taskg) for task in list_for_each_entry(taskg['thread_group'], - self.init_task.type, + self.symvals.init_task.type, 'thread_group'): tasks.append(task) diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index 32b74dec738..ee2f2dbf8bb 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -5,10 +5,13 @@ import gdb from crash.util import container_of, get_typed_pointer, decode_flags -from crash.infra import CrashBaseClass, export +from crash.util.symbols import Types, Symvals +from crash.infra.lookup import DelayedSymval, DelayedType from crash.types.list import list_for_each_entry from crash.subsystem.storage import block_device_name -from crash.subsystem.storage import Storage as block + +types = Types('struct super_block') +symvals = Symvals('super_blocks') AddressSpecifier = Union[int, str, gdb.Value] @@ -73,84 +76,69 @@ MS_NOUSER : "MS_NOUSER", } -class FileSystem(CrashBaseClass): - __types__ = [ 'struct dio *', - 'struct buffer_head *', - 'struct super_block' ] - __symvals__ = [ 'super_blocks' ] - @export - @staticmethod - def super_fstype(sb: gdb.Value) -> str: - """ - Returns the file system type's name for a given superblock. - - Args: - sb (gdb.Value): The struct super_block for - which to return the file system type's name - - Returns: - str: The file system type's name - """ - return sb['s_type']['name'].string() - - @export - @staticmethod - def super_flags(sb: gdb.Value) -> str: - """ - Returns the flags associated with the given superblock. - - Args: - sb (gdb.Value): The struct super_block for - which to return the flags. - - Returns: - str: The flags field in human-readable form. - - """ - return decode_flags(sb['s_flags'], SB_FLAGS) - - @export - @classmethod - def for_each_super_block(cls) -> Iterable[gdb.Value]: - """ - Iterate over the list of super blocks and yield each one. - - Args: - None - - Yields: - gdb.Value - """ - for sb in list_for_each_entry(cls.super_blocks, cls.super_block_type, - 's_list'): - yield sb - - @export - @classmethod - def get_super_block(cls, desc: AddressSpecifier, - force: bool=False) -> gdb.Value: - """ - Given an address description return a gdb.Value that contains - a struct super_block at that address. - - Args: - desc (gdb.Value, str, or int): The address for which to provide - a casted pointer - force (bool): Skip testing whether the value is available. - - Returns: - gdb.Value: The super_block at the requested - location - - Raises: - gdb.NotAvailableError: The target value was not available. - """ - sb = get_typed_pointer(desc, cls.super_block_type).dereference() - if not force: - try: - x = int(sb['s_dev']) - except gdb.NotAvailableError: - raise gdb.NotAvailableError(f"no superblock available at `{desc}'") - return sb - -inst = FileSystem() +def super_fstype(sb: gdb.Value) -> str: + """ + Returns the file system type's name for a given superblock. + + Args: + sb (gdb.Value): The struct super_block for + which to return the file system type's name + + Returns: + str: The file system type's name + """ + return sb['s_type']['name'].string() + +def super_flags(sb: gdb.Value) -> str: + """ + Returns the flags associated with the given superblock. + + Args: + sb (gdb.Value): The struct super_block for + which to return the flags. + + Returns: + str: The flags field in human-readable form. + + """ + return decode_flags(sb['s_flags'], SB_FLAGS) + +def for_each_super_block() -> Iterable[gdb.Value]: + """ + Iterate over the list of super blocks and yield each one. + + Args: + None + + Yields: + gdb.Value + """ + for sb in list_for_each_entry(symvals.super_blocks, + types.super_block_type, 's_list'): + yield sb + +def get_super_block(desc: AddressSpecifier, force: bool=False) -> gdb.Value: + """ + Given an address description return a gdb.Value that contains + a struct super_block at that address. + + Args: + desc (gdb.Value, str, or int): The address for which to provide + a casted pointer + force (bool): Skip testing whether the value is available. + + Returns: + gdb.Value: The super_block at the requested + location + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + sb = get_typed_pointer(desc, types.super_block_type).dereference() + if not force: + try: + x = int(sb['s_dev']) + except gdb.NotAvailableError: + raise gdb.NotAvailableError(f"no superblock available at `{desc}'") + + return sb diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index 1515f00862b..d42790dee39 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -3,40 +3,38 @@ import gdb -from crash.infra import CrashBaseClass +from crash.util.symbols import Types -class BtrfsFileSystem(CrashBaseClass): - __types__ = [ 'struct btrfs_inode', 'struct btrfs_fs_info *' ] +types = Types([ 'struct btrfs_inode', 'struct btrfs_fs_info *', + 'struct btrfs_fs_info' ]) - @classmethod - def btrfs_inode(cls, vfs_inode): - """ - Converts a VFS inode to a btrfs inode +def btrfs_inode(vfs_inode): + """ + Converts a VFS inode to a btrfs inode - This method converts a struct inode to a struct btrfs_inode. + This method converts a struct inode to a struct btrfs_inode. - Args: - vfs_inode (gdb.Value): The struct inode to convert - to a struct btrfs_inode + Args: + vfs_inode (gdb.Value): The struct inode to convert + to a struct btrfs_inode - Returns: - gdb.Value: The converted struct btrfs_inode - """ - return container_of(vfs_inode, cls.btrfs_inode_type, 'vfs_inode') + Returns: + gdb.Value: The converted struct btrfs_inode + """ + return container_of(vfs_inode, types.btrfs_inode_type, 'vfs_inode') - @classmethod - def btrfs_sb_info(cls, super_block): - """ - Converts a VFS superblock to a btrfs fs_info +def btrfs_fs_info(super_block): + """ + Converts a VFS superblock to a btrfs fs_info - This method converts a struct super_block to a struct btrfs_fs_info + This method converts a struct super_block to a struct btrfs_fs_info - Args: - super_block (gdb.Value): The struct super_block - to convert to a struct btrfs_fs_info. + Args: + super_block (gdb.Value): The struct super_block + to convert to a struct btrfs_fs_info. - Returns: - gdb.Value: The converted struct - btrfs_fs_info - """ - return super_block['s_fs_info'].cast(cls.btrfs_fs_info_p_type) + Returns: + gdb.Value: The converted struct + btrfs_fs_info + """ + return super_block['s_fs_info'].cast(types.btrfs_fs_info_p_type) diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index 320804d6228..cdad6cf5a90 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -3,8 +3,6 @@ import gdb -from crash.infra import CrashBaseClass -from crash.util import get_symbol_value from crash.subsystem.storage.decoders import Decoder class Ext3Decoder(Decoder): diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 2407dceb1dd..44ebda4d080 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -3,10 +3,10 @@ import gdb -from crash.infra import CrashBaseClass, export from crash.subsystem.filesystem import super_fstype from crash.types.list import list_for_each_entry from crash.util import container_of, decode_flags, struct_has_member +from crash.util.symbols import Types, Symvals, TypeCallbacks, SymbolCallbacks MNT_NOSUID = 0x01 MNT_NODEV = 0x02 @@ -38,139 +38,124 @@ } MNT_FLAGS_HIDDEN.update(MNT_FLAGS) +types = Types([ 'struct mount', 'struct vfsmount' ]) +symvals = Symvals([ 'init_task' ]) -class Mount(CrashBaseClass): - __types__ = [ 'struct mount', 'struct vfsmount' ] - __symvals__ = [ 'init_task' ] - __type_callbacks__ = [ ('struct vfsmount', 'check_mount_type' ) ] - __symbol_callbacks__ = [ ('init_task', 'check_task_interface' ) ] - +class Mount(object): @classmethod def for_each_mount_impl(cls, task): raise NotImplementedError("Mount.for_each_mount is unhandled on this kernel version.") @classmethod - def check_mount_type(cls, gdbtype): - try: - cls.mount_type = gdb.lookup_type('struct mount') - except gdb.error: - # Older kernels didn't separate mount from vfsmount - cls.mount_type = cls.vfsmount_type + def for_each_mount_nsproxy(cls, task): + return list_for_each_entry(task['nsproxy']['mnt_ns']['list'], + types.mount_type, 'mnt_list') @classmethod def check_task_interface(cls, symval): try: - nsproxy = cls.init_task['nsproxy'] + nsproxy = symvals.init_task['nsproxy'] cls.for_each_mount_impl = cls.for_each_mount_nsproxy except KeyError: print("check_task_interface called but no init_task?") pass - @export - def for_each_mount(self, task=None): - if task is None: - task = self.init_task - return self.for_each_mount_impl(task) - - def for_each_mount_nsproxy(self, task): - return list_for_each_entry(task['nsproxy']['mnt_ns']['list'], - self.mount_type, 'mnt_list') - - @export - @classmethod - def real_mount(cls, vfsmnt): - if (vfsmnt.type == cls.mount_type or - vfsmnt.type == cls.mount_type.pointer()): - t = vfsmnt.type - if t.code == gdb.TYPE_CODE_PTR: - t = t.target() - if t is not cls.mount_type: - cls.mount_type = t - return vfsmnt - return container_of(vfsmnt, cls.mount_type, 'mnt') - - @export - @classmethod - def mount_flags(cls, mnt, show_hidden=False): - if struct_has_member(mnt, 'mnt'): - mnt = mnt['mnt'] - if show_hidden: - return decode_flags(mnt['mnt_flags'], MNT_FLAGS_HIDDEN, ",") - return decode_flags(mnt['mnt_flags'], MNT_FLAGS, ",") - - @export - @staticmethod - def mount_super(mnt): - try: - sb = mnt['mnt']['mnt_sb'] - except gdb.error: - sb = mnt['mnt_sb'] - return sb - - @export - @staticmethod - def mount_root(mnt): - try: - mnt = mnt['mnt'] - except gdb.error: - pass - - return mnt['mnt_root'] - - @export - @classmethod - def mount_fstype(cls, mnt): - return super_fstype(cls.mount_super(mnt)) - - @export - @classmethod - def mount_device(cls, mnt): - devname = mnt['mnt_devname'].string() - if devname is None: - devname = "none" - return devname - - @export - @classmethod - def d_path(cls, mnt, dentry, root=None): - if root is None: - root = cls.init_task['fs']['root'] - - if dentry.type.code != gdb.TYPE_CODE_PTR: - dentry = dentry.address - - if mnt.type.code != gdb.TYPE_CODE_PTR: - mnt = mnt.address - - mount = cls.real_mount(mnt) - if mount.type.code != gdb.TYPE_CODE_PTR: - mount = mount.address - - try: - mnt = mnt['mnt'].address - except gdb.error: - pass - - name = "" - - # Gone are the days where finding the root was as simple as - # dentry == dentry->d_parent - while dentry != root['dentry'] or mnt != root['mnt']: - if dentry == mnt['mnt_root'] or dentry == dentry['d_parent']: - if dentry != mnt['mnt_root']: - return None - if mount != mount['mnt_parent']: - dentry = mount['mnt_mountpoint'] - mount = mount['mnt_parent'] - try: - mnt = mount['mnt'].address - except gdb.error: - mnt = mount - continue - break - - name = "/" + dentry['d_name']['name'].string() + name - dentry = dentry['d_parent'] - if not name: - name = '/' - return name +def check_mount_type(gdbtype): + try: + types.mount_type = gdb.lookup_type('struct mount') + except gdb.error: + # Older kernels didn't separate mount from vfsmount + types.mount_type = types.vfsmount_type + +def for_each_mount(task=None): + if task is None: + task = symvals.init_task + return Mount.for_each_mount_impl(task) + +def real_mount(vfsmnt): + if (vfsmnt.type == types.mount_type or + vfsmnt.type == types.mount_type.pointer()): + t = vfsmnt.type + if t.code == gdb.TYPE_CODE_PTR: + t = t.target() + if t is not types.mount_type: + types.mount_type = t + return vfsmnt + return container_of(vfsmnt, types.mount_type, 'mnt') + +def mount_flags(mnt, show_hidden=False): + if struct_has_member(mnt, 'mnt'): + mnt = mnt['mnt'] + if show_hidden: + return decode_flags(mnt['mnt_flags'], MNT_FLAGS_HIDDEN, ",") + return decode_flags(mnt['mnt_flags'], MNT_FLAGS, ",") + +def mount_super(mnt): + try: + sb = mnt['mnt']['mnt_sb'] + except gdb.error: + sb = mnt['mnt_sb'] + return sb + +def mount_root(mnt): + try: + mnt = mnt['mnt'] + except gdb.error: + pass + + return mnt['mnt_root'] + +def mount_fstype(mnt): + return super_fstype(mount_super(mnt)) + +def mount_device(mnt): + devname = mnt['mnt_devname'].string() + if devname is None: + devname = "none" + return devname + +def d_path(mnt, dentry, root=None): + if root is None: + root = symvals.init_task['fs']['root'] + + if dentry.type.code != gdb.TYPE_CODE_PTR: + dentry = dentry.address + + if mnt.type.code != gdb.TYPE_CODE_PTR: + mnt = mnt.address + + mount = real_mount(mnt) + if mount.type.code != gdb.TYPE_CODE_PTR: + mount = mount.address + + try: + mnt = mnt['mnt'].address + except gdb.error: + pass + + name = "" + + # Gone are the days where finding the root was as simple as + # dentry == dentry->d_parent + while dentry != root['dentry'] or mnt != root['mnt']: + if dentry == mnt['mnt_root'] or dentry == dentry['d_parent']: + if dentry != mnt['mnt_root']: + return None + if mount != mount['mnt_parent']: + dentry = mount['mnt_mountpoint'] + mount = mount['mnt_parent'] + try: + mnt = mount['mnt'].address + except gdb.error: + mnt = mount + continue + break + + name = "/" + dentry['d_name']['name'].string() + name + dentry = dentry['d_parent'] + if not name: + name = '/' + return name + +type_cbs = TypeCallbacks([ ('struct vfsmount', check_mount_type ) ]) +symbols_cbs = SymbolCallbacks([ ('init_task', Mount.check_task_interface ) ]) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 5228e1d290c..37cf821d8d9 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -4,302 +4,280 @@ from typing import Iterable import gdb +from gdb.types import get_basic_type from crash.util import container_of -from crash.infra import CrashBaseClass, export +from crash.util.symbols import Types, Symvals, SymbolCallbacks, TypeCallbacks from crash.types.classdev import for_each_class_device from . import decoders import crash.exceptions -class Storage(CrashBaseClass): - __types__ = [ 'struct gendisk', - 'struct hd_struct', - 'struct device', - 'struct device_type', - 'struct bdev_inode' ] - __symvals__ = [ 'block_class', - 'blockdev_superblock', - 'disk_type', - 'part_type' ] - __symbol_callbacks = [ - ( 'disk_type', '_check_types' ), - ( 'part_type', '_check_types' ) ] - __type_callbacks__ = [ ('struct device_type', '_check_types' ) ] - - @classmethod - def _check_types(cls, result): - try: - if cls.part_type.type.unqualified() != cls.device_type_type: - raise TypeError("part_type expected to be {} not {}" - .format(cls.device_type_type, - cls.part_type.type)) - - if cls.disk_type.type.unqualified() != cls.device_type_type: - raise TypeError("disk_type expected to be {} not {}" - .format(cls.device_type_type, - cls.disk_type.type)) - cls.types_checked = True - except crash.exceptions.DelayedAttributeError: - pass - - @export - @classmethod - def for_each_bio_in_stack(cls, bio: gdb.Value) -> Iterable[decoders.Decoder]: - """ - Iterates and decodes each bio involved in a stacked storage environment - - This method will return a dictionary describing each object - in the storage stack, starting with the provided bio, as - processed by each level's decoder. The stack will be interrupted - if an encountered object doesn't have a decoder specified. - - See crash.subsystem.storage.decoder.register_decoder for more detail. - - Args: - bio (gdb.Value): The initial struct bio to start - decoding - - Yields: - dict : Contains, minimally, the following item. - - description (str): A human-readable description of the bio. - Additional items may be available based on the - implmentation-specific decoder. - """ - decoder = decoders.decode_bio(bio) - while decoder is not None: - yield decoder - decoder = next(decoder) - - @export - def dev_to_gendisk(self, dev): - """ - Converts a struct device that is embedded in a struct gendisk - back to the struct gendisk. - - Args: - dev (gdb.Value) : A struct device contained within - a struct gendisk. No checking is performed. Results - if other structures are provided are undefined. - - Returns: - gdb.Value : The converted struct hd_struct - """ - return container_of(dev, self.gendisk_type, 'part0.__dev') - - @export - def dev_to_part(self, dev): - """ - Converts a struct device that is embedded in a struct hd_struct - back to the struct hd_struct. - - Args: - dev (gdb.Value): A struct device embedded within a - struct hd_struct. No checking is performed. Results if other - structures are provided are undefined. - - Returns: - gdb.Value(struct hd_struct): The converted struct hd_struct - - """ - return container_of(dev, self.hd_struct_type, '__dev') - - @export - def gendisk_to_dev(self, gendisk): - """ - Converts a struct gendisk that embeds a struct device to - the struct device. - - Args: - dev (gdb.Value): A struct gendisk that embeds - a struct device. No checking is performed. Results - if other structures are provided are undefined. - - Returns: - gdb.Value: The converted struct device - """ - - return gendisk['part0']['__dev'].address - - @export - def part_to_dev(self, part): - """ - Converts a struct hd_struct that embeds a struct device to - the struct device. - - Args: - dev (gdb.Value): A struct hd_struct that embeds - a struct device. No checking is performed. Results if - other structures are provided are undefined. - - Returns: - gdb.Value: The converted struct device - """ - return part['__dev'].address - - @export - def for_each_block_device(self, subtype=None): - """ - Iterates over each block device registered with the block class. - - This method iterates over the block_class klist and yields every - member found. The members are either struct gendisk or - struct hd_struct, depending on whether it describes an entire - disk or a partition, respectively. - - The members can be filtered by providing a subtype, which - corresponds to a the the type field of the struct device. - - Args: - subtype (gdb.Value, optional): The struct - device_type that will be used to match and filter. Typically - 'disk_type' or 'device_type' - - Yields: - gdb.Value - A struct gendisk - or struct hd_struct that meets the filter criteria. - - Raises: - RuntimeError: An unknown device type was encountered during - iteration. - """ - - if subtype: - if subtype.type.unqualified() == self.device_type_type: - subtype = subtype.address - elif subtype.type.unqualified() != self.device_type_type.pointer(): - raise TypeError("subtype must be {} not {}" - .format(self.device_type_type.pointer(), - subtype.type.unqualified())) - for dev in for_each_class_device(self.block_class, subtype): - if dev['type'] == self.disk_type.address: - yield self.dev_to_gendisk(dev) - elif dev['type'] == self.part_type.address: - yield self.dev_to_part(dev) - else: - raise RuntimeError("Encountered unexpected device type {}" - .format(dev['type'])) - - @export - def for_each_disk(self): - """ - Iterates over each block device registered with the block class - that corresponds to an entire disk. - - This is an alias for for_each_block_device(disk_type) - """ - - return self.for_each_block_device(self.disk_type) - - @export - def gendisk_name(self, gendisk): - """ - Returns the name of the provided block device. - - This method evaluates the block device and returns the name, - including partition number, if applicable. - - Args: - gendisk(gdb.Value): - A struct gendisk or struct hd_struct for which to return - the name - - Returns: - str: the name of the block device - - Raises: - TypeError: gdb.Value does not describe a struct gendisk or - struct hd_struct - """ - if gendisk.type.code == gdb.TYPE_CODE_PTR: - gendisk = gendisk.dereference() - - if gendisk.type.unqualified() == self.gendisk_type: - return gendisk['disk_name'].string() - elif gendisk.type.unqualified() == self.hd_struct_type: - parent = self.dev_to_gendisk(self.part_to_dev(gendisk)['parent']) - return "{}{:d}".format(self.gendisk_name(parent), - int(gendisk['partno'])) +types = Types([ 'struct gendisk', 'struct hd_struct', 'struct device', + 'struct device_type', 'struct bdev_inode' ]) +symvals = Symvals([ 'block_class', 'blockdev_superblock', 'disk_type', + 'part_type' ]) + +def for_each_bio_in_stack(bio: gdb.Value) -> Iterable[decoders.Decoder]: + """ + Iterates and decodes each bio involved in a stacked storage environment + + This method will yield a Decoder object describing each level + in the storage stack, starting with the provided bio, as + processed by each level's decoder. The stack will be interrupted + if an encountered object doesn't have a decoder specified. + + See crash.subsystem.storage.decoders for more detail. + + Args: + bio (gdb.Value): The initial struct bio to start + decoding + + Yields: + Decoder + """ + decoder = decoders.decode_bio(bio) + while decoder is not None: + yield decoder + decoder = next(decoder) + +def dev_to_gendisk(dev): + """ + Converts a struct device that is embedded in a struct gendisk + back to the struct gendisk. + + Args: + dev (gdb.Value) : A struct device contained within + a struct gendisk. No checking is performed. Results + if other structures are provided are undefined. + + Returns: + gdb.Value : The converted struct hd_struct + """ + return container_of(dev, types.gendisk_type, 'part0.__dev') + +def dev_to_part(dev): + """ + Converts a struct device that is embedded in a struct hd_struct + back to the struct hd_struct. + + Args: + dev (gdb.Value): A struct device embedded within a + struct hd_struct. No checking is performed. Results if other + structures are provided are undefined. + + Returns: + gdb.Value: The converted struct hd_struct + + """ + return container_of(dev, types.hd_struct_type, '__dev') + +def gendisk_to_dev(gendisk): + """ + Converts a struct gendisk that embeds a struct device to + the struct device. + + Args: + dev (gdb.Value): A struct gendisk that embeds + a struct device. No checking is performed. Results + if other structures are provided are undefined. + + Returns: + gdb.Value: The converted struct device + """ + + return gendisk['part0']['__dev'].address + +def part_to_dev(part): + """ + Converts a struct hd_struct that embeds a struct device to + the struct device. + + Args: + dev (gdb.Value): A struct hd_struct that embeds + a struct device. No checking is performed. Results if + other structures are provided are undefined. + + Returns: + gdb.Value: The converted struct device + """ + return part['__dev'].address + + +def for_each_block_device(subtype: gdb.Value=None) -> Iterable[gdb.Value]: + """ + Iterates over each block device registered with the block class. + + This method iterates over the block_class klist and yields every + member found. The members are either struct gendisk or + struct hd_struct, depending on whether it describes an entire + disk or a partition, respectively. + + The members can be filtered by providing a subtype, which + corresponds to a the the type field of the struct device. + + Args: + subtype (gdb.Value, optional): The struct + device_type that will be used to match and filter. Typically + 'disk_type' or 'device_type' + + Yields: + gdb.Value or + gdb.Value: + A struct gendisk or struct hd_struct that meets + the filter criteria. + + Raises: + RuntimeError: An unknown device type was encountered during + iteration. + """ + + if subtype: + if get_basic_type(subtype.type) == types.device_type_type: + subtype = subtype.address + elif get_basic_type(subtype.type) != types.device_type_type.pointer(): + raise TypeError("subtype must be {} not {}" + .format(types.device_type_type.pointer(), + subtype.type.unqualified())) + for dev in for_each_class_device(symvals.block_class, subtype): + if dev['type'] == symvals.disk_type.address: + yield dev_to_gendisk(dev) + elif dev['type'] == symvals.part_type.address: + yield dev_to_part(dev) else: - raise TypeError("expected {} or {}, not {}" - .format(self.gendisk_type, self.hd_struct_type, - gendisk.type.unqualified())) - - @export - def block_device_name(self, bdev): - """ - Returns the name of the provided block device. - - This method evaluates the block device and returns the name, - including partition number, if applicable. - - Args: - bdev(gdb.Value): A struct block_device for - which to return the name - - Returns: - str: the name of the block device - """ - return self.gendisk_name(bdev['bd_disk']) - - @export - def is_bdev_inode(self, inode): - """ - Tests whether the provided struct inode describes a block device - - This method evaluates the inode and returns a True or False, - depending on whether the inode describes a block device. - - Args: - bdev(gdb.Value): The struct inode to test whether - it describes a block device. - - Returns: - bool: True if the inode describes a block device, False otherwise. - """ - return inode['i_sb'] == self.blockdev_superblock - - @export - def inode_to_block_device(self, inode): - """ - Returns the block device associated with this inode. - - If the inode describes a block device, return that block device. - Otherwise, raise TypeError. - - Args: - inode(gdb.Value): The struct inode for which to - return the associated block device - - Returns: - gdb.Value: The struct block_device associated - with the provided struct inode - - Raises: - TypeError: inode does not describe a block device - """ - if inode['i_sb'] != self.blockdev_superblock: - raise TypeError("inode does not correspond to block device") - return container_of(inode, self.bdev_inode_type, 'vfs_inode')['bdev'] - - @export - def inode_on_bdev(self, inode): - """ - Returns the block device associated with this inode. - - If the inode describes a block device, return that block device. - Otherwise, return the block device, if any, associated - with the inode's super block. - - Args: - inode(gdb.Value): The struct inode for which to - return the associated block device - - Returns: - gdb.Value: The struct block_device associated - with the provided struct inode - """ - if self.is_bdev_inode(inode): - return self.inode_to_block_device(inode) - else: - return inode['i_sb']['s_bdev'] -inst = Storage() + raise RuntimeError("Encountered unexpected device type {}" + .format(dev['type'])) + +def for_each_disk(): + """ + Iterates over each block device registered with the block class + that corresponds to an entire disk. + + This is an alias for for_each_block_device(disk_type) + """ + + return for_each_block_device(symvals.disk_type) + +def gendisk_name(gendisk): + """ + Returns the name of the provided block device. + + This method evaluates the block device and returns the name, + including partition number, if applicable. + + Args: + gendisk(gdb.Value): + A struct gendisk or struct hd_struct for which to return + the name + + Returns: + str: the name of the block device + + Raises: + TypeError: gdb.Value does not describe a struct gendisk or + struct hd_struct + """ + if gendisk.type.code == gdb.TYPE_CODE_PTR: + gendisk = gendisk.dereference() + + if get_basic_type(gendisk.type) == types.gendisk_type: + return gendisk['disk_name'].string() + elif get_basic_type(gendisk.type) == types.hd_struct_type: + parent = dev_to_gendisk(part_to_dev(gendisk)['parent']) + return "{}{:d}".format(gendisk_name(parent), int(gendisk['partno'])) + else: + raise TypeError("expected {} or {}, not {}" + .format(types.gendisk_type, types.hd_struct_type, + gendisk.type.unqualified())) + +def block_device_name(bdev): + """ + Returns the name of the provided block device. + + This method evaluates the block device and returns the name, + including partition number, if applicable. + + Args: + bdev(gdb.Value): A struct block_device for + which to return the name + + Returns: + str: the name of the block device + """ + return gendisk_name(bdev['bd_disk']) + +def is_bdev_inode(inode): + """ + Tests whether the provided struct inode describes a block device + + This method evaluates the inode and returns a True or False, + depending on whether the inode describes a block device. + + Args: + bdev(gdb.Value): The struct inode to test whether + it describes a block device. + + Returns: + bool: True if the inode describes a block device, False otherwise. + """ + return inode['i_sb'] == symvals.blockdev_superblock + +def inode_to_block_device(inode): + """ + Returns the block device associated with this inode. + + If the inode describes a block device, return that block device. + Otherwise, raise TypeError. + + Args: + inode(gdb.Value): The struct inode for which to + return the associated block device + + Returns: + gdb.Value: The struct block_device associated + with the provided struct inode + + Raises: + TypeError: inode does not describe a block device + """ + if inode['i_sb'] != symvals.blockdev_superblock: + raise TypeError("inode does not correspond to block device") + return container_of(inode, types.bdev_inode_type, 'vfs_inode')['bdev'] + +def inode_on_bdev(inode): + """ + Returns the block device associated with this inode. + + If the inode describes a block device, return that block device. + Otherwise, return the block device, if any, associated + with the inode's super block. + + Args: + inode(gdb.Value): The struct inode for which to + return the associated block device + + Returns: + gdb.Value: The struct block_device associated + with the provided struct inode + """ + if is_bdev_inode(inode): + return inode_to_block_device(inode) + else: + return inode['i_sb']['s_bdev'] + +def _check_types(result): + try: + if symvals.part_type.type.unqualified() != types.device_type_type: + raise TypeError("part_type expected to be {} not {}" + .format(symvals.device_type_type, + types.part_type.type)) + + if symvals.disk_type.type.unqualified() != types.device_type_type: + raise TypeError("disk_type expected to be {} not {}" + .format(symvals.device_type_type, + types.disk_type.type)) + except crash.exceptions.DelayedAttributeError: + pass + +symbol_cbs = SymbolCallbacks([ ( 'disk_type', _check_types ), + ( 'part_type', _check_types )] ) +type_cbs = TypeCallbacks([ ('struct device_type', _check_types ) ]) diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index b2b10d5a723..53533c0c7b0 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -3,52 +3,48 @@ import gdb -from crash.infra import CrashBaseClass, export +from crash.util.symbols import Types from crash.types.list import list_for_each_entry from crash.cache.syscache import kernel class NoQueueError(RuntimeError): pass -class SingleQueueBlock(CrashBaseClass): - __types__ = [ 'struct request' ] - - @export - def for_each_request_in_queue(self, queue): - """ - Iterates over each struct request in request_queue - - This method iterates over the request_queue's queuelist and - returns a request for each member. - - Args: - queue(gdb.Value): The struct request_queue - used to iterate - - Yields: - gdb.Value: Each struct request contained within - the request_queue's queuelist - """ - if int(queue) == 0: - raise NoQueueError("Queue is NULL") - return list_for_each_entry(queue['queue_head'], self.request_type, - 'queuelist') - - @export - @classmethod - def request_age_ms(cls, request): - """ - Returns the age of the request in milliseconds - - This method returns the difference between the current time - (jiffies) and the request's start_time, in milliseconds. - - Args: - request(gdb.Value): The struct request used - to determine age - - Returns: - int: Difference between the request's start_time and - current jiffies in milliseconds. - """ - return kernel.jiffies_to_msec(kernel.jiffies - request['start_time']) +types = Types([ 'struct request' ]) + +def for_each_request_in_queue(queue): + """ + Iterates over each struct request in request_queue + + This method iterates over the request_queue's queuelist and + returns a request for each member. + + Args: + queue(gdb.Value): The struct request_queue + used to iterate + + Yields: + gdb.Value: Each struct request contained within + the request_queue's queuelist + """ + if int(queue) == 0: + raise NoQueueError("Queue is NULL") + return list_for_each_entry(queue['queue_head'], types.request_type, + 'queuelist') + +def request_age_ms(request): + """ + Returns the age of the request in milliseconds + + This method returns the difference between the current time + (jiffies) and the request's start_time, in milliseconds. + + Args: + request(gdb.Value): The struct request used + to determine age + + Returns: + int: Difference between the request's start_time and + current jiffies in milliseconds. + """ + return kernel.jiffies_to_msec(kernel.jiffies - request['start_time']) diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 4bd61eae73a..91f467e728a 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -7,7 +7,7 @@ EndIOSpecifier = Union[int, str, List[str], gdb.Value, gdb.Symbol, None] -class Decoder(CrashBaseClass): +class Decoder(object): """Decoder objects are used to unwind the storage stack They are relatively lightweight at runtime, meaning that the object @@ -19,7 +19,6 @@ class Decoder(CrashBaseClass): interpreted (bool): Whether the contents of this Decoder have already been interpreted """ - __endio__: EndIOSpecifier = None def __init__(self): diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index cd535906ec7..57238f0ac0c 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -3,7 +3,7 @@ import gdb -from crash.infra import CrashBaseClass +from crash.util.symbols import Types from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder, decode_bio @@ -18,7 +18,7 @@ class ClonedBioReqDecoder(Decoder): request-based device mapper target """ - __types__ = [ 'struct dm_rq_clone_bio_info *' ] + types = Types([ 'struct dm_rq_clone_bio_info *' ]) __endio__ = 'end_clone_bio' description = '{:x} bio: Request-based Device Mapper on {}' @@ -28,7 +28,7 @@ def __init__(self, bio): super().__init__() self.bio = bio if cls._get_clone_bio_rq_info is None: - if 'clone' in cls.dm_rq_clone_bio_info_p_type.target(): + if 'clone' in cls.types.dm_rq_clone_bio_info_p_type.target(): getter = cls._get_clone_bio_rq_info_3_7 else: getter = cls._get_clone_bio_rq_info_old @@ -47,11 +47,11 @@ def __next__(self): @classmethod def _get_clone_bio_rq_info_old(cls, bio): - return bio['bi_private'].cast(cls.dm_rq_clone_bio_info_p_type) + return bio['bi_private'].cast(cls.types.dm_rq_clone_bio_info_p_type) @classmethod def _get_clone_bio_rq_info_3_7(cls, bio): - return container_of(bio, cls.dm_rq_clone_bio_info_p_type, 'clone') + return container_of(bio, cls.types.dm_rq_clone_bio_info_p_type, 'clone') ClonedBioReqDecoder.register() @@ -72,7 +72,7 @@ class ClonedBioDecoder(Decoder): tio (gdb.Value): The struct dm_target_tio for this bio """ - __types__ = [ 'struct dm_target_io *' ] + types = Types([ 'struct dm_target_io *' ]) _get_clone_bio_tio = None __endio__ = 'clone_endio' description = "{:x} bio: device mapper clone: {}[{}] -> {}[{}]" @@ -82,7 +82,7 @@ def __init__(self, bio): self.bio = bio if _get_clone_bio_tio is None: - if 'clone' in cls.dm_target_io_p_type.target(): + if 'clone' in cls.types.dm_target_io_p_type.target(): getter = cls._get_clone_bio_tio_3_15 else: getter = cls._get_clone_bio_tio_old @@ -105,11 +105,11 @@ def __next__(self): @classmethod def _get_clone_bio_tio_old(cls, bio): - return bio['bi_private'].cast(cls.dm_target_io_p_type) + return bio['bi_private'].cast(cls.types.dm_target_io_p_type) @classmethod def _get_clone_bio_tio_3_15(cls, bio): return container_of(bio['bi_private'], - cls.dm_clone_bio_info_p_type, 'clone') + cls.types.dm_clone_bio_info_p_type, 'clone') ClonedBioDecoder.register() diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index aad3a970d20..be84a585a5c 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -4,310 +4,280 @@ from typing import Iterable import gdb - from math import log -from crash.infra import CrashBaseClass, export - -class TypesBitmapClass(CrashBaseClass): - __types__ = [ 'unsigned long' ] - __type_callbacks__ = [ ('unsigned long', 'setup_ulong') ] - - bits_per_ulong = None - - @classmethod - def _check_bitmap_type(cls, bitmap: gdb.Value) -> None: - if ((bitmap.type.code != gdb.TYPE_CODE_ARRAY or - bitmap[0].type.code != cls.unsigned_long_type.code or - bitmap[0].type.sizeof != cls.unsigned_long_type.sizeof) and - (bitmap.type.code != gdb.TYPE_CODE_PTR or - bitmap.type.target().code != cls.unsigned_long_type.code or - bitmap.type.target().sizeof != cls.unsigned_long_type.sizeof)): - raise TypeError("bitmaps are expected to be arrays of unsigned long not `{}'" - .format(bitmap.type)) - - @classmethod - def setup_ulong(cls, gdbtype: gdb.Type) -> None: - cls.bits_per_ulong = gdbtype.sizeof * 8 - - @export - @classmethod - def for_each_set_bit(cls, bitmap: gdb.Value, - size_in_bytes: int=None) -> Iterable[int]: - """ - Yield each set bit in a bitmap - - Args: - bitmap (gdb.Value: - The bitmap to iterate - size_in_bytes (int): The size of the bitmap if the type is - unsigned long *. - - Yields: - int: The position of a bit that is set - """ - cls._check_bitmap_type(bitmap) - - if size_in_bytes is None: - size_in_bytes = bitmap.type.sizeof - - # FIXME: callback not workie? - cls.bits_per_ulong = cls.unsigned_long_type.sizeof * 8 - - size = size_in_bytes * 8 - idx = 0 - bit = 0 - while size > 0: - ulong = bitmap[idx] - - if ulong != 0: - for off in range(min(size, cls.bits_per_ulong)): - if ulong & 1 != 0: - yield bit - bit += 1 - ulong >>= 1 - else: - bit += cls.bits_per_ulong - - size -= cls.bits_per_ulong - idx += 1 - - @classmethod - def _find_first_set_bit(cls, val: gdb.Value) -> int: - r = 1 - - if val == 0: - return 0 - - if (val & 0xffffffff) == 0: - val >>= 32 - r += 32 - - if (val & 0xffff) == 0: - val >>= 16 - r += 16 - - if (val & 0xff) == 0: - val >>= 8 - r += 8 - - if (val & 0xf) == 0: - val >>= 4 - r += 4 - - if (val & 0x3) == 0: - val >>= 2 - r += 2 - - if (val & 0x1) == 0: - val >>= 1 - r += 1 - - return r - - @export - @classmethod - def find_next_zero_bit(cls, bitmap: gdb.Value, start: int, - size_in_bytes: int=None) -> int: - """ - Return the next unset bit in the bitmap starting at position `start', - inclusive. - - Args: - bitmap (gdb.Value: - The bitmap to test - start (int): The bit number to use as a starting position. If - the bit at this position is unset, it will be the first - bit number yielded. - size_in_bytes (int): The size of the bitmap if the type is - unsigned long *. - - Returns: - int: The position of the first bit that is unset or 0 if all are set - """ - cls._check_bitmap_type(bitmap) - - if size_in_bytes is None: - size_in_bytes = bitmap.type.sizeof - - elements = size_in_bytes // cls.unsigned_long_type.sizeof - - if start > size_in_bytes << 3: - raise IndexError("Element {} is out of range ({} elements)" - .format(start, elements)) - - element = start // (cls.unsigned_long_type.sizeof << 3) - offset = start % (cls.unsigned_long_type.sizeof << 3) - - for n in range(element, elements): - item = ~bitmap[n] - if item == 0: - continue - - if offset > 0: - item &= ~((1 << offset) - 1) - - v = cls._find_first_set_bit(item) - if v > 0: - ret = n * (cls.unsigned_long_type.sizeof << 3) + v - assert(ret >= start) - return ret - - offset = 0 - - return 0 - - @export - @classmethod - def find_first_zero_bit(cls, bitmap: gdb.Value, - size_in_bytes: int=None) -> int: - """ - Return the first unset bit in the bitmap - - Args: - bitmap (gdb.Value: - The bitmap to scan - start (int): The bit number to use as a starting position. If - the bit at this position is unset, it will be the first - bit number yielded. - - Returns: - int: The position of the first bit that is unset - """ - return cls.find_next_zero_bit(bitmap, 0, size_in_bytes) - - @export - @classmethod - def find_next_set_bit(cls, bitmap: gdb.Value, start: int, - size_in_bytes: int=None) -> int: - """ - Return the next set bit in the bitmap starting at position `start', - inclusive. - - Args: - bitmap (gdb.Value: - The bitmap to scan - start (int): The bit number to use as a starting position. If - the bit at this position is unset, it will be the first - bit number yielded. - size_in_bytes (int): The size of the bitmap if the type is - unsigned long *. - - Returns: - int: The position of the next bit that is set, or 0 if all are - unset - """ - cls._check_bitmap_type(bitmap) - - if size_in_bytes is None: - size_in_bytes = bitmap.type.sizeof - - elements = size_in_bytes // cls.unsigned_long_type.sizeof - - if start > size_in_bytes << 3: - raise IndexError("Element {} is out of range ({} elements)" - .format(start, elements)) - - element = start // (cls.unsigned_long_type.sizeof << 3) - offset = start % (cls.unsigned_long_type.sizeof << 3) - - for n in range(element, elements): - if bitmap[n] == 0: - continue - - item = bitmap[n] - if offset > 0: - item &= ~((1 << offset) - 1) - - v = cls._find_first_set_bit(item) - if v > 0: - ret = n * (cls.unsigned_long_type.sizeof << 3) + v - assert(ret >= start) - return ret - - offset = 0 - +from crash.util.symbols import Types + +types = Types('unsigned long') + +def _check_bitmap_type(bitmap: gdb.Value) -> None: + if ((bitmap.type.code != gdb.TYPE_CODE_ARRAY or + bitmap[0].type.code != types.unsigned_long_type.code or + bitmap[0].type.sizeof != types.unsigned_long_type.sizeof) and + (bitmap.type.code != gdb.TYPE_CODE_PTR or + bitmap.type.target().code != types.unsigned_long_type.code or + bitmap.type.target().sizeof != types.unsigned_long_type.sizeof)): + raise TypeError("bitmaps are expected to be arrays of unsigned long not `{}'" + .format(bitmap.type)) + +def for_each_set_bit(bitmap: gdb.Value, + size_in_bytes: int=None) -> Iterable[int]: + """ + Yield each set bit in a bitmap + + Args: + bitmap (gdb.Value: + The bitmap to iterate + size_in_bytes (int): The size of the bitmap if the type is + unsigned long *. + + Yields: + int: The position of a bit that is set + """ + _check_bitmap_type(bitmap) + + if size_in_bytes is None: + size_in_bytes = bitmap.type.sizeof + + bits_per_ulong = types.unsigned_long_type.sizeof * 8 + + size = size_in_bytes * 8 + idx = 0 + bit = 0 + while size > 0: + ulong = bitmap[idx] + + if ulong != 0: + for off in range(min(size, bits_per_ulong)): + if ulong & 1 != 0: + yield bit + bit += 1 + ulong >>= 1 + else: + bit += bits_per_ulong + + size -= bits_per_ulong + idx += 1 + +def _find_first_set_bit(val: gdb.Value) -> int: + r = 1 + + if val == 0: return 0 - @export - @classmethod - def find_first_set_bit(cls, bitmap: gdb.Value, - size_in_bytes: int=None) -> int: - """ - Return the first set bit in the bitmap - - Args: - bitmap (gdb.Value: - The bitmap to scan - size_in_bytes (int): The size of the bitmap if the type is - unsigned long *. + if (val & 0xffffffff) == 0: + val >>= 32 + r += 32 + + if (val & 0xffff) == 0: + val >>= 16 + r += 16 + + if (val & 0xff) == 0: + val >>= 8 + r += 8 + + if (val & 0xf) == 0: + val >>= 4 + r += 4 + + if (val & 0x3) == 0: + val >>= 2 + r += 2 + + if (val & 0x1) == 0: + val >>= 1 + r += 1 + + return r + +def find_next_zero_bit(bitmap: gdb.Value, start: int, + size_in_bytes: int=None) -> int: + """ + Return the next unset bit in the bitmap starting at position `start', + inclusive. + + Args: + bitmap (gdb.Value: + The bitmap to test + start (int): The bit number to use as a starting position. If + the bit at this position is unset, it will be the first + bit number yielded. + size_in_bytes (int): The size of the bitmap if the type is + unsigned long *. + + Returns: + int: The position of the first bit that is unset or 0 if all are set + """ + _check_bitmap_type(bitmap) - Returns: - int: The position of the first bit that is set, or 0 if all are - unset - """ - return cls.find_next_set_bit(bitmap, 0, size_in_bytes) + if size_in_bytes is None: + size_in_bytes = bitmap.type.sizeof + + elements = size_in_bytes // types.unsigned_long_type.sizeof + + if start > size_in_bytes << 3: + raise IndexError("Element {} is out of range ({} elements)" + .format(start, elements)) + + element = start // (types.unsigned_long_type.sizeof << 3) + offset = start % (types.unsigned_long_type.sizeof << 3) + + for n in range(element, elements): + item = ~bitmap[n] + if item == 0: + continue + + if offset > 0: + item &= ~((1 << offset) - 1) + + v = _find_first_set_bit(item) + if v > 0: + ret = n * (types.unsigned_long_type.sizeof << 3) + v + assert(ret >= start) + return ret + + offset = 0 + + return 0 + +def find_first_zero_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: + """ + Return the first unset bit in the bitmap + + Args: + bitmap (gdb.Value: + The bitmap to scan + start (int): The bit number to use as a starting position. If + the bit at this position is unset, it will be the first + bit number yielded. + + Returns: + int: The position of the first bit that is unset + """ + return find_next_zero_bit(bitmap, 0, size_in_bytes) + +def find_next_set_bit(bitmap: gdb.Value, start: int, + size_in_bytes: int=None) -> int: + """ + Return the next set bit in the bitmap starting at position `start', + inclusive. + + Args: + bitmap (gdb.Value: + The bitmap to scan + start (int): The bit number to use as a starting position. If + the bit at this position is unset, it will be the first + bit number yielded. + size_in_bytes (int): The size of the bitmap if the type is + unsigned long *. + + Returns: + int: The position of the next bit that is set, or 0 if all are unset + """ + _check_bitmap_type(bitmap) + + if size_in_bytes is None: + size_in_bytes = bitmap.type.sizeof + + elements = size_in_bytes // types.unsigned_long_type.sizeof + + if start > size_in_bytes << 3: + raise IndexError("Element {} is out of range ({} elements)" + .format(start, elements)) + + element = start // (types.unsigned_long_type.sizeof << 3) + offset = start % (types.unsigned_long_type.sizeof << 3) + + for n in range(element, elements): + if bitmap[n] == 0: + continue + + item = bitmap[n] + if offset > 0: + item &= ~((1 << offset) - 1) + + v = _find_first_set_bit(item) + if v > 0: + ret = n * (types.unsigned_long_type.sizeof << 3) + v + assert(ret >= start) + return ret + + offset = 0 - @classmethod - def _find_last_set_bit(cls, val: gdb.Value) -> int: - r = cls.unsigned_long_type.sizeof << 3 + return 0 + +def find_first_set_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: + """ + Return the first set bit in the bitmap - if val == 0: - return 0 + Args: + bitmap (gdb.Value: + The bitmap to scan + size_in_bytes (int): The size of the bitmap if the type is + unsigned long *. + + Returns: + int: The position of the first bit that is set, or 0 if all are unset + """ + return find_next_set_bit(bitmap, 0, size_in_bytes) + +def _find_last_set_bit(val: gdb.Value) -> int: + r = types.unsigned_long_type.sizeof << 3 + + if val == 0: + return 0 - if (val & 0xffffffff00000000) == 0: - val <<= 32 - r -= 32 + if (val & 0xffffffff00000000) == 0: + val <<= 32 + r -= 32 - if (val & 0xffff000000000000) == 0: - val <<= 16 - r -= 16 + if (val & 0xffff000000000000) == 0: + val <<= 16 + r -= 16 - if (val & 0xff00000000000000) == 0: - val <<= 8 - r -= 8 + if (val & 0xff00000000000000) == 0: + val <<= 8 + r -= 8 - if (val & 0xf000000000000000) == 0: - val <<= 4 - r -= 4 + if (val & 0xf000000000000000) == 0: + val <<= 4 + r -= 4 - if (val & 0xc000000000000000) == 0: - val <<= 2 - r -= 2 + if (val & 0xc000000000000000) == 0: + val <<= 2 + r -= 2 - if (val & 0x8000000000000000) == 0: - val <<= 1 - r -= 1 + if (val & 0x8000000000000000) == 0: + val <<= 1 + r -= 1 - return r + return r - @export - @classmethod - def find_last_set_bit(cls, bitmap: gdb.Value, - size_in_bytes: int=None) -> int: - """ - Return the last set bit in the bitmap +def find_last_set_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: + """ + Return the last set bit in the bitmap - Args: - bitmap (gdb.Value: - The bitmap to scan + Args: + bitmap (gdb.Value: + The bitmap to scan - Returns: - int: The position of the last bit that is set, or 0 if all are unset - """ - cls._check_bitmap_type(bitmap) + Returns: + int: The position of the last bit that is set, or 0 if all are unset + """ + _check_bitmap_type(bitmap) - if size_in_bytes is None: - size_in_bytes = bitmap.type.sizeof + if size_in_bytes is None: + size_in_bytes = bitmap.type.sizeof - elements = size_in_bytes // cls.unsigned_long_type.sizeof + elements = size_in_bytes // types.unsigned_long_type.sizeof - for n in range(elements - 1, -1, -1): - if bitmap[n] == 0: - continue + for n in range(elements - 1, -1, -1): + if bitmap[n] == 0: + continue - v = cls._find_last_set_bit(bitmap[n]) - if v > 0: - return n * (cls.unsigned_long_type.sizeof << 3) + v + v = _find_last_set_bit(bitmap[n]) + if v > 0: + return n * (types.unsigned_long_type.sizeof << 3) + v - return 0 + return 0 diff --git a/crash/types/classdev.py b/crash/types/classdev.py index 3b82a5a7cb0..885872651ae 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -2,15 +2,14 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb -from crash.infra import CrashBaseClass, export + from crash.types.klist import klist_for_each_entry +from crash.util.symbols import Types -class ClassDeviceClass(CrashBaseClass): - __types__ = [ 'struct device' ] +types = Types(['struct device']) - @export - def for_each_class_device(self, class_struct, subtype=None): - klist = class_struct['p']['klist_devices'] - for dev in klist_for_each_entry(klist, self.device_type, 'knode_class'): - if subtype is None or int(subtype) == int(dev['type']): - yield dev +def for_each_class_device(class_struct, subtype=None): + klist = class_struct['p']['klist_devices'] + for dev in klist_for_each_entry(klist, types.device_type, 'knode_class'): + if subtype is None or int(subtype) == int(dev['type']): + yield dev diff --git a/crash/types/cpu.py b/crash/types/cpu.py index 1a83dee38e2..841619c748b 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -1,8 +1,10 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Iterable, List + import gdb -from crash.infra import CrashBaseClass, export +from crash.util.symbols import SymbolCallbacks from crash.types.bitmap import for_each_set_bit from crash.exceptions import DelayedAttributeError @@ -10,69 +12,73 @@ # this wraps no particular type, rather it's a placeholder for # functions to iterate over online cpu's etc. -class TypesCPUClass(CrashBaseClass): - __symbol_callbacks__ = [ ('cpu_online_mask', '_setup_online_mask'), - ('__cpu_online_mask', '_setup_online_mask'), - ('cpu_possible_mask', '_setup_possible_mask'), - ('__cpu_possible_mask', '_setup_possible_mask') ] +class TypesCPUClass(object): cpus_online: List[int] = list() cpus_possible: List[int] = list() + cpu_online_mask: gdb.Value = None + cpu_possible_mask: gdb.Value = None + @classmethod def _setup_online_mask(cls, symbol: gdb.Symbol) -> None: cls.cpu_online_mask = symbol.value() bits = cls.cpu_online_mask["bits"] cls.cpus_online = list(for_each_set_bit(bits)) - @export - def for_each_online_cpu(self) -> Iterable[int]: - """ - Yield CPU numbers of all online CPUs - - Yields: - int: Number of a possible CPU location - """ - for cpu in self.cpus_online: - yield cpu - - @export - def highest_online_cpu_nr(self) -> None: - """ - Return The highest online CPU number - - Returns: - int: The highest online CPU number - """ - if not TypesCPUClass.cpus_online : - raise DelayedAttributeError('cpus_online') - return self.cpus_online[-1] - @classmethod def _setup_possible_mask(cls, cpu_mask: gdb.Symbol) -> None: cls.cpu_possible_mask = cpu_mask.value() bits = cls.cpu_possible_mask["bits"] cls.cpus_possible = list(for_each_set_bit(bits)) - @export - def for_each_possible_cpu(self) -> Iterable[int]: - """ - Yield CPU numbers of all possible CPUs - - Yields: - int: Number of a possible CPU location - """ - for cpu in self.cpus_possible: - yield cpu - - @export - def highest_possible_cpu_nr(self) -> int: - """ - Return The highest possible CPU number - - Returns: - int: The highest possible CPU number - """ - if not self.cpus_possible: - raise DelayedAttributeError('cpus_possible') - return self.cpus_possible[-1] +def for_each_online_cpu() -> Iterable[int]: + """ + Yield CPU numbers of all online CPUs + + Yields: + int: Number of a possible CPU location + """ + for cpu in TypesCPUClass.cpus_online: + yield cpu + +def highest_online_cpu_nr() -> int: + """ + Return The highest online CPU number + + Returns: + int: The highest online CPU number + """ + if not TypesCPUClass.cpus_online: + raise DelayedAttributeError('cpus_online') + return TypesCPUClass.cpus_online[-1] + +def for_each_possible_cpu() -> Iterable[int]: + """ + Yield CPU numbers of all possible CPUs + + Yields: + int: Number of a possible CPU location + """ + for cpu in TypesCPUClass.cpus_possible: + yield cpu + +def highest_possible_cpu_nr() -> int: + """ + Return The highest possible CPU number + + Returns: + int: The highest possible CPU number + """ + if not TypesCPUClass.cpus_possible: + raise DelayedAttributeError('cpus_possible') + return TypesCPUClass.cpus_possible[-1] + +symbol_cbs = SymbolCallbacks([ ('cpu_online_mask', + TypesCPUClass._setup_online_mask), + ('__cpu_online_mask', + TypesCPUClass._setup_online_mask), + ('cpu_possible_mask', + TypesCPUClass._setup_possible_mask), + ('__cpu_possible_mask', + TypesCPUClass._setup_possible_mask) ]) diff --git a/crash/types/klist.py b/crash/types/klist.py index 7ccb49fa7b3..e58b074fa86 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -5,35 +5,34 @@ from crash.util import container_of from crash.types.list import list_for_each_entry from crash.exceptions import CorruptedError -from crash.infra import CrashBaseClass, export + +from crash.util.symbols import Types + +types = Types([ 'struct klist_node', 'struct klist' ]) class KlistCorruptedError(CorruptedError): pass -class TypesKlistClass(CrashBaseClass): - __types__ = [ 'struct klist_node', 'struct klist' ] - - @export - def klist_for_each(self, klist): - if klist.type == self.klist_type.pointer(): - klist = klist.dereference() - elif klist.type != self.klist_type: - raise TypeError("klist must be gdb.Value representing 'struct klist' or 'struct klist *' not {}" - .format(klist.type)) - if klist.type is not self.klist_type: - self.klist_type = klist.type +def klist_for_each(klist): + if klist.type == types.klist_type.pointer(): + klist = klist.dereference() + elif klist.type != types.klist_type: + raise TypeError("klist must be gdb.Value representing 'struct klist' or 'struct klist *' not {}" + .format(klist.type)) + if klist.type is not types.klist_type: + types.override('struct klist', klist.type) - for node in list_for_each_entry(klist['k_list'], - self.klist_node_type, 'n_node'): - if node['n_klist'] != klist.address: - raise KlistCorruptedError("Corrupted") - yield node + for node in list_for_each_entry(klist['k_list'], + types.klist_node_type, 'n_node'): + if node['n_klist'] != klist.address: + raise KlistCorruptedError("Corrupted") + yield node - @export - def klist_for_each_entry(self, klist, gdbtype, member): - for node in klist_for_each(klist): - if node.type != self.klist_node_type: - raise TypeError("Type {} found. Expected {}.".format(node.type), self.klist_node_type.pointer()) - if node.type is not self.klist_node_type: - self.klist_node_type = node.type - yield container_of(node, gdbtype, member) +def klist_for_each_entry(klist, gdbtype, member): + for node in klist_for_each(klist): + if node.type != types.klist_node_type: + raise TypeError("Type {} found. Expected {}." + .format(node.type), types.klist_node_type.pointer()) + if node.type is not types.klist_node_type: + types.override('struct klist_node', node.type) + yield container_of(node, gdbtype, member) diff --git a/crash/types/list.py b/crash/types/list.py index 53df2c5cfd1..54c384717b1 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -3,7 +3,7 @@ import gdb from crash.util import container_of -from crash.infra import CrashBaseClass, export +from crash.util.symbols import Types class ListError(Exception): pass @@ -14,117 +14,112 @@ class CorruptListError(ListError): class ListCycleError(CorruptListError): pass -class TypesListClass(CrashBaseClass): - __types__ = [ 'struct list_head' ] - - @export - def list_for_each(self, list_head, include_head=False, reverse=False, - print_broken_links=True, exact_cycles=False): - pending_exception = None - if isinstance(list_head, gdb.Symbol): - list_head = list_head.value() - if not isinstance(list_head, gdb.Value): - raise TypeError("list_head must be gdb.Value representing 'struct list_head' or a 'struct list_head *' not {}" - .format(type(list_head).__name__)) - if list_head.type == self.list_head_type.pointer(): - list_head = list_head.dereference() - elif list_head.type != self.list_head_type: - raise TypeError("Must be struct list_head not {}" - .format(str(list_head.type))) - if list_head.type is not self.list_head_type: - self.list_head_type = list_head.type - fast = None - if int(list_head.address) == 0: - raise CorruptListError("list_head is NULL pointer.") - - next_ = 'next' - prev_ = 'prev' - if reverse: - next_ = 'prev' - prev_ = 'next' - +types = Types([ 'struct list_head' ]) + +def list_for_each(list_head, include_head=False, reverse=False, + print_broken_links=True, exact_cycles=False): + pending_exception = None + if isinstance(list_head, gdb.Symbol): + list_head = list_head.value() + if not isinstance(list_head, gdb.Value): + raise TypeError("list_head must be gdb.Value representing 'struct list_head' or a 'struct list_head *' not {}" + .format(type(list_head).__name__)) + if list_head.type == types.list_head_type.pointer(): + list_head = list_head.dereference() + elif list_head.type != types.list_head_type: + raise TypeError("Must be struct list_head not {}" + .format(str(list_head.type))) + if list_head.type is not types.list_head_type: + types.override('struct list_head', list_head.type) + fast = None + if int(list_head.address) == 0: + raise CorruptListError("list_head is NULL pointer.") + + next_ = 'next' + prev_ = 'prev' + if reverse: + next_ = 'prev' + prev_ = 'next' + + if exact_cycles: + visited = set() + + if include_head: + yield list_head.address + + try: + nxt = list_head[next_] + prev = list_head + if int(nxt) == 0: + raise CorruptListError("{} pointer is NULL".format(next_)) + node = nxt.dereference() + except gdb.error as e: + raise BufferError("Failed to read list_head {:#x}: {}" + .format(int(list_head.address), str(e))) + + while node.address != list_head.address: if exact_cycles: - visited = set() - - if include_head: - yield list_head.address - + if int(node.address) in visited: + raise ListCycleError("Cycle in list detected.") + else: + visited.add(int(node.address)) try: - nxt = list_head[next_] - prev = list_head - if int(nxt) == 0: - raise CorruptListError("{} pointer is NULL".format(next_)) - node = nxt.dereference() + if int(prev.address) != int(node[prev_]): + error = ("broken {} link {:#x} -{}-> {:#x} -{}-> {:#x}" + .format(prev_, int(prev.address), next_, int(node.address), + prev_, int(node[prev_]))) + pending_exception = CorruptListError(error) + if print_broken_links: + print(error) + # broken prev link means there might be a cycle that + # does not include the initial head, so start detecting + # cycles + if not exact_cycles and fast is not None: + fast = node + nxt = node[next_] + # only yield after trying to read something from the node, no + # point in giving out bogus list elements + yield node.address except gdb.error as e: - raise BufferError("Failed to read list_head {:#x}: {}" - .format(int(list_head.address), str(e))) - - while node.address != list_head.address: - if exact_cycles: - if int(node.address) in visited: - raise ListCycleError("Cycle in list detected.") - else: - visited.add(int(node.address)) - try: - if int(prev.address) != int(node[prev_]): - error = ("broken {} link {:#x} -{}-> {:#x} -{}-> {:#x}" - .format(prev_, int(prev.address), next_, int(node.address), - prev_, int(node[prev_]))) - pending_exception = CorruptListError(error) - if print_broken_links: - print(error) - # broken prev link means there might be a cycle that - # does not include the initial head, so start detecting - # cycles - if not exact_cycles and fast is not None: - fast = node - nxt = node[next_] - # only yield after trying to read something from the node, no - # point in giving out bogus list elements - yield node.address - except gdb.error as e: - raise BufferError("Failed to read list_head {:#x} in list {:#x}: {}" - .format(int(node.address), int(list_head.address), str(e))) - - try: - if fast is not None: - # are we detecting cycles? advance fast 2 times and compare - # each with our current node (Floyd's Tortoise and Hare - # algorithm) - for i in range(2): - fast = fast[next_].dereference() - if node.address == fast.address: - raise ListCycleError("Cycle in list detected.") - except gdb.error: - # we hit an unreadable element, so just stop detecting cycles - # and the slow iterator will hit it as well - fast = None - - prev = node - if int(nxt) == 0: - raise CorruptListError("{} -> {} pointer is NULL" - .format(node.address, next_)) - node = nxt.dereference() - - if pending_exception is not None: - raise pending_exception - - @export - def list_for_each_entry(self, list_head, gdbtype, member, - include_head=False, reverse=False, - exact_cycles=False): - for node in list_for_each(list_head, include_head=include_head, - reverse=reverse, exact_cycles=exact_cycles): - if node.type != self.list_head_type.pointer(): - raise TypeError("Type {} found. Expected struct list_head *." - .format(str(node.type))) - yield container_of(node, gdbtype, member) - - @export - def list_empty(self, list_head): - addr = int(list_head.address) - if list_head.type.code == gdb.TYPE_CODE_PTR: - addr = int(list_head) - - return addr == int(list_head['next']) + raise BufferError("Failed to read list_head {:#x} in list {:#x}: {}" + .format(int(node.address), int(list_head.address), str(e))) + try: + if fast is not None: + # are we detecting cycles? advance fast 2 times and compare + # each with our current node (Floyd's Tortoise and Hare + # algorithm) + for i in range(2): + fast = fast[next_].dereference() + if node.address == fast.address: + raise ListCycleError("Cycle in list detected.") + except gdb.error: + # we hit an unreadable element, so just stop detecting cycles + # and the slow iterator will hit it as well + fast = None + + prev = node + if int(nxt) == 0: + raise CorruptListError("{} -> {} pointer is NULL" + .format(node.address, next_)) + node = nxt.dereference() + + if pending_exception is not None: + raise pending_exception + +def list_for_each_entry(list_head, gdbtype, member, + include_head=False, reverse=False, + exact_cycles=False): + for node in list_for_each(list_head, include_head=include_head, + reverse=reverse, exact_cycles=exact_cycles): + if node.type != types.list_head_type.pointer(): + raise TypeError("Type {} found. Expected struct list_head *." + .format(str(node.type))) + yield container_of(node, gdbtype, member) + +def list_empty(list_head): + addr = int(list_head.address) + if list_head.type.code == gdb.TYPE_CODE_PTR: + addr = int(list_head) + + return addr == int(list_head['next']) diff --git a/crash/types/module.py b/crash/types/module.py index 3a17b245345..d787f9f538f 100644 --- a/crash/types/module.py +++ b/crash/types/module.py @@ -4,51 +4,44 @@ from typing import Iterable, Tuple import gdb -from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each_entry +from crash.util.symbols import Symvals, Types -class Module(CrashBaseClass): - __symvals__ = [ 'modules'] - __types__ = [ 'struct module' ] - - @classmethod - @export - def for_each_module(cls) -> Iterable[gdb.Value]: - """ - Iterate over each module in the modules list - - Yields: - gdb.Value(): The next module on the list - - """ - for module in list_for_each_entry(cls.modules, cls.module_type, - 'list'): - yield module - - @classmethod - @export - def for_each_module_section(cls, module: gdb.Value) \ - -> Iterable[Tuple[str, int]]: - """ - Iterate over each ELF section in a loaded module - - This routine iterates over the 'sect_attrs' member of the - 'struct module' already in memory. For ELF sections from the - module at rest, use pyelftools on the module file. - - Args: - module (gdb.Value): The struct module to iterate - - Yields: - (str, int): A 2-tuple containing the name and address - of the section - """ - attrs = module['sect_attrs'] - - for sec in range(0, attrs['nsections']): - attr = attrs['attrs'][sec] - name = attr['name'].string() - if name == '.text': - continue - - yield (name, int(attr['address'])) +symvals = Symvals([ 'modules' ]) +types = Types([ 'struct module' ]) + +def for_each_module() -> Iterable[gdb.Value]: + """ + Iterate over each module in the modules list + + Yields: + gdb.Value(): The next module on the list + + """ + for module in list_for_each_entry(symvals.modules, types.module_type, + 'list'): + yield module + +def for_each_module_section(module: gdb.Value) -> Iterable[Tuple[str, int]]: + """ + Iterate over each ELF section in a loaded module + + This routine iterates over the 'sect_attrs' member of the 'struct module' + already in memory. For ELF sections from the module at rest, use + pyelftools on the module file. + + Args: + module (gdb.Value): The struct module to iterate + + Yields: + (str, int): A 2-tuple containing the name and address of the section + """ + attrs = module['sect_attrs'] + + for sec in range(0, attrs['nsections']): + attr = attrs['attrs'][sec] + name = attr['name'].string() + if name == '.text': + continue + + yield (name, int(attr['address'])) diff --git a/crash/types/node.py b/crash/types/node.py index 1e460d9cbb0..34db2196def 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -2,30 +2,26 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb -from crash.infra import CrashBaseClass, export +from crash.util.symbols import Symbols, Symvals, Types, SymbolCallbacks from crash.util import container_of, find_member_variant, get_symbol_value from crash.types.percpu import get_percpu_var from crash.types.bitmap import for_each_set_bit import crash.types.zone -class TypesNodeUtilsClass(CrashBaseClass): - __symbols__ = [ 'numa_node' ] - __symvals__ = [ 'numa_cpu_lookup_table' ] +symbols = Symbols([ 'numa_node' ]) +symvals = Symvals([ 'numa_cpu_lookup_table', 'node_data' ]) +types = Types([ 'pg_data_t', 'struct zone' ]) - @export - def numa_node_id(self, cpu): - if gdb.current_target().arch.name() == "powerpc:common64": - return int(self.numa_cpu_lookup_table[cpu]) - else: - return int(get_percpu_var(self.numa_node, cpu)) - -class Node(CrashBaseClass): - __types__ = [ 'pg_data_t', 'struct zone' ] +def numa_node_id(cpu): + if gdb.current_target().arch.name() == "powerpc:common64": + return int(symvals.numa_cpu_lookup_table[cpu]) + else: + return int(get_percpu_var(symbols.numa_node, cpu)) +class Node(object): @staticmethod def from_nid(nid): - node_data = gdb.lookup_global_symbol("node_data").value() - return Node(node_data[nid].dereference()) + return Node(symvals.node_data[nid].dereference()) def for_each_zone(self): node_zones = self.gdb_obj["node_zones"] @@ -37,25 +33,21 @@ def for_each_zone(self): # FIXME: gdb seems to lose the alignment padding with plain # node_zones[zid], so we have to simulate it using zone_type.sizeof # which appears to be correct - zone = gdb.Value(ptr).cast(self.zone_type.pointer()).dereference() + zone = gdb.Value(ptr).cast(types.zone_type.pointer()).dereference() yield crash.types.zone.Zone(zone, zid) - ptr += self.zone_type.sizeof + ptr += types.zone_type.sizeof def __init__(self, obj): self.gdb_obj = obj -class Nodes(CrashBaseClass): - - __symbol_callbacks__ = [ ('node_states', 'setup_node_states') ] - +class NodeStates(object): nids_online = None nids_possible = None @classmethod def setup_node_states(cls, node_states_sym): - - node_states = node_states_sym.value() + node_states = node_states_sym.value() enum_node_states = gdb.lookup_type("enum node_states") N_POSSIBLE = enum_node_states["N_POSSIBLE"].enumval @@ -67,23 +59,21 @@ def setup_node_states(cls, node_states_sym): bits = node_states[N_ONLINE]["bits"] cls.nids_online = list(for_each_set_bit(bits)) - @export - def for_each_nid(cls): - for nid in cls.nids_possible: - yield nid - - @export - def for_each_online_nid(cls): - for nid in cls.nids_online: - yield nid - - @export - def for_each_node(cls): - for nid in cls.for_each_nid(): - yield Node.from_nid(nid) - - @export - def for_each_online_node(cls): - for nid in cls.for_each_online_nid(): - yield Node.from_nid(nid) +symbol_cbs = SymbolCallbacks([('node_states', NodeStates.setup_node_states)]) + +def for_each_nid(): + for nid in NodeStates.nids_possible: + yield nid + +def for_each_online_nid(): + for nid in NodeStates.nids_online: + yield nid + +def for_each_node(): + for nid in for_each_nid(): + yield Node.from_nid(nid) + +def for_each_online_node(): + for nid in for_each_online_nid(): + yield Node.from_nid(nid) diff --git a/crash/types/page.py b/crash/types/page.py index 54487aa2bd7..2ea07853e76 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -3,28 +3,18 @@ from math import log, ceil import gdb -import types -from crash.infra import CrashBaseClass, export from crash.util import container_of, find_member_variant +from crash.util.symbols import Types, Symvals, TypeCallbacks, SymbolCallbacks from crash.cache.syscache import config #TODO debuginfo won't tell us, depends on version? PAGE_MAPPING_ANON = 1 -class Page(CrashBaseClass): - __types__ = [ 'unsigned long', 'struct page', 'enum pageflags', - 'enum zone_type', 'struct mem_section'] - __type_callbacks__ = [ ('struct page', 'setup_page_type' ), - ('enum pageflags', 'setup_pageflags' ), - ('enum zone_type', 'setup_zone_type' ), - ('struct mem_section', 'setup_mem_section') ] - __symvals__ = [ 'mem_section' ] - # TODO: this should better be generalized to some callback for - # "config is available" without refering to the symbol name here - __symbol_callbacks__ = [ ('kernel_config_data', 'setup_nodes_width' ), - ('vmemmap_base', 'setup_vmemmap_base' ), - ('page_offset_base', 'setup_directmap_base' ) ] +types = Types([ 'unsigned long', 'struct page', 'enum pageflags', + 'enum zone_type', 'struct mem_section']) +symvals = Symvals([ 'mem_section' ]) +class Page(object): slab_cache_name = None slab_page_name = None compound_head_name = None @@ -85,10 +75,10 @@ def pfn_to_page(cls, pfn): section_nr = pfn >> (cls.SECTION_SIZE_BITS - cls.PAGE_SHIFT) root_idx = section_nr / cls.SECTIONS_PER_ROOT offset = section_nr & (cls.SECTIONS_PER_ROOT - 1) - section = cls.mem_section[root_idx][offset] + section = symvals.mem_section[root_idx][offset] pagemap = section["section_mem_map"] & ~3 - return (pagemap.cast(cls.page_type.pointer()) + pfn).dereference() + return (pagemap.cast(types.page_type.pointer()) + pfn).dereference() else: return cls.vmemmap[pfn] @@ -110,7 +100,7 @@ def setup_vmemmap_base(cls, symbol): # setup_page_type() was first and used the hardcoded initial value, # we have to update if cls.vmemmap is not None: - cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(cls.page_type.pointer()) + cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(types.page_type.pointer()) @classmethod def setup_directmap_base(cls, symbol): @@ -132,7 +122,7 @@ def setup_nodes_width(cls, symbol): cls.NODES_WIDTH = 8 # piggyback on this callback because type callback doesn't seem to work # for unsigned long - cls.BITS_PER_LONG = cls.unsigned_long_type.sizeof * 8 + cls.BITS_PER_LONG = types.unsigned_long_type.sizeof * 8 @classmethod def setup_pageflags_finish(cls): @@ -149,8 +139,8 @@ def setup_pageflags_finish(cls): @staticmethod def from_page_addr(addr): - page_ptr = gdb.Value(addr).cast(Page.page_type.pointer()) - pfn = (addr - Page.vmemmap_base) / Page.page_type.sizeof + page_ptr = gdb.Value(addr).cast(types.page_type.pointer()) + pfn = (addr - Page.vmemmap_base) / types.page_type.sizeof return Page(page_ptr.dereference(), pfn) def __is_tail_flagcombo(self): @@ -201,37 +191,41 @@ def compound_head(self): return self return Page.from_page_addr(self.__compound_head()) - + def __init__(self, obj, pfn): self.gdb_obj = obj self.pfn = pfn self.flags = int(obj["flags"]) -class Pages(CrashBaseClass): +type_cbs = TypeCallbacks([ ('struct page', Page.setup_page_type ), + ('enum pageflags', Page.setup_pageflags ), + ('enum zone_type', Page.setup_zone_type ), + ('struct mem_section', Page.setup_mem_section) ]) + +# TODO: this should better be generalized to some callback for +# "config is available" without refering to the symbol name here +symbol_cbs = SymbolCallbacks([ ('kernel_config_data', Page.setup_nodes_width ), + ('vmemmap_base', Page.setup_vmemmap_base ), + ('page_offset_base', Page.setup_directmap_base ) ]) - @export - def pfn_to_page(cls, pfn): - return Page(Page.pfn_to_page(pfn), pfn) - - @export - def page_from_addr(cls, addr): - pfn = (addr - Page.directmap_base) / Page.PAGE_SIZE - return pfn_to_page(pfn) - - @export - def page_from_gdb_obj(cls, gdb_obj): - pfn = (int(gdb_obj.address) - Page.vmemmap_base) / Page.page_type.sizeof - return Page(gdb_obj, pfn) - - @export - def for_each_page(): - # TODO works only on x86? - max_pfn = int(gdb.lookup_global_symbol("max_pfn").value()) - for pfn in range(max_pfn): - try: - yield Page.pfn_to_page(pfn) - except gdb.error: - # TODO: distinguish pfn_valid() and report failures for those? - pass +def pfn_to_page(pfn): + return Page(Page.pfn_to_page(pfn), pfn) +def page_from_addr(addr): + pfn = (addr - Page.directmap_base) / Page.PAGE_SIZE + return pfn_to_page(pfn) + +def page_from_gdb_obj(gdb_obj): + pfn = (int(gdb_obj.address) - Page.vmemmap_base) / types.page_type.sizeof + return Page(gdb_obj, pfn) + +def for_each_page(): + # TODO works only on x86? + max_pfn = int(gdb.lookup_global_symbol("max_pfn").value()) + for pfn in range(max_pfn): + try: + yield Page.pfn_to_page(pfn) + except gdb.error: + # TODO: distinguish pfn_valid() and report failures for those? + pass diff --git a/crash/types/percpu.py b/crash/types/percpu.py index d360b379713..9a99847d38b 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -4,8 +4,9 @@ from typing import Dict, Union, List, Tuple import gdb -from crash.infra import CrashBaseClass, export from crash.util import array_size, struct_has_member +from crash.util.symbols import Types, Symvals, MinimalSymvals, MinimalSymbols +from crash.util.symbols import MinimalSymbolCallbacks, SymbolCallbacks from crash.types.list import list_for_each_entry from crash.types.module import for_each_module from crash.exceptions import DelayedAttributeError @@ -20,9 +21,15 @@ class PerCPUError(TypeError): def __init__(self, var): super().__init__(self._fmt.format(var)) +types = Types([ 'void *', 'char *', 'struct pcpu_chunk', + 'struct percpu_counter' ]) +symvals = Symvals([ '__per_cpu_offset', 'pcpu_base_addr', 'pcpu_slot', + 'pcpu_nr_slots', 'pcpu_group_offsets' ]) +msymvals = MinimalSymvals( ['__per_cpu_start', '__per_cpu_end' ]) + SymbolOrValue = Union[gdb.Value, gdb.Symbol] -class TypesPerCPUClass(CrashBaseClass): +class PerCPUState(object): """ Per-cpus come in a few forms: - "Array" of objects @@ -34,15 +41,6 @@ class TypesPerCPUClass(CrashBaseClass): pointer to a percpu but we don't want to dereference a percpu pointer. """ - __types__ = [ 'void *', 'char *', 'struct pcpu_chunk', - 'struct percpu_counter' ] - __symvals__ = [ '__per_cpu_offset', 'pcpu_base_addr', 'pcpu_slot', - 'pcpu_nr_slots', 'pcpu_group_offsets' ] - __minsymvals__ = ['__per_cpu_start', '__per_cpu_end' ] - __minsymbol_callbacks__ = [ ('__per_cpu_start', '_setup_per_cpu_size'), - ('__per_cpu_end', '_setup_per_cpu_size') ] - __symbol_callbacks__ = [ ('__per_cpu_offset', '_setup_nr_cpus') ] - _dynamic_offset_cache: List[Tuple[int, int]] = list() _static_ranges: Dict[int, int] = dict() _module_ranges: Dict[int, int] = dict() @@ -52,13 +50,13 @@ class TypesPerCPUClass(CrashBaseClass): @classmethod def _setup_per_cpu_size(cls, symbol: gdb.Symbol) -> None: try: - size = cls.__per_cpu_end - cls.__per_cpu_start + size = msymvals['__per_cpu_end'] - msymvals['__per_cpu_start'] except DelayedAttributeError: pass cls._static_ranges[0] = size - if cls.__per_cpu_start != 0: - cls._static_ranges[cls.__per_cpu_start] = size + if msymvals['__per_cpu_start'] != 0: + cls._static_ranges[msymvals['__per_cpu_start']] = size try: # This is only an optimization so we don't return NR_CPUS values @@ -69,7 +67,7 @@ def _setup_per_cpu_size(cls, symbol: gdb.Symbol) -> None: @classmethod def _setup_nr_cpus(cls, ignored: gdb.Symbol) -> None: - cls._nr_cpus = array_size(cls.__per_cpu_offset) + cls._nr_cpus = array_size(symvals['__per_cpu_offset']) if cls._last_cpu == -1: cls._last_cpu = cls._nr_cpus @@ -84,9 +82,8 @@ def _setup_module_ranges(cls, modules: gdb.Symbol) -> None: size = int(module['percpu_size']) cls._module_ranges[start] = size - @classmethod - def _add_to_offset_cache(cls, base: int, start: int, end: int) -> None: - cls._dynamic_offset_cache.append((base + start, base + end)) + def _add_to_offset_cache(self, base: int, start: int, end: int) -> None: + self._dynamic_offset_cache.append((base + start, base + end)) @classmethod def dump_ranges(cls) -> None: @@ -97,14 +94,12 @@ def dump_ranges(cls) -> None: print(f"static start={start:#x}, size={size:#x}") for (start, size) in cls._module_ranges.items(): print(f"module start={start:#x}, size={size:#x}") - if cls._dynamic_offset_cache: - for (start, end) in cls._dynamic_offset_cache: - print(f"dynamic start={start:#x}, end={end:#x}") + for (start, end) in cls._dynamic_offset_cache: + print(f"dynamic start={start:#x}, end={end:#x}") - @classmethod - def _setup_dynamic_offset_cache_area_map(cls, chunk: gdb.Value) -> None: + def _setup_dynamic_offset_cache_area_map(self, chunk: gdb.Value) -> None: used_is_negative = None - chunk_base = int(chunk["base_addr"]) - int(cls.pcpu_base_addr) + chunk_base = int(chunk["base_addr"]) - int(symvals.pcpu_base_addr) off = 0 start = None @@ -139,11 +134,11 @@ def _setup_dynamic_offset_cache_area_map(cls, chunk: gdb.Value) -> None: start = off else: if start is not None: - cls._add_to_offset_cache(chunk_base, start, off) + self._add_to_offset_cache(chunk_base, start, off) start = None off += abs(val) if start is not None: - cls._add_to_offset_cache(chunk_base, start, off) + self._add_to_offset_cache(chunk_base, start, off) else: for i in range(map_used): off = int(_map[i]) @@ -153,35 +148,33 @@ def _setup_dynamic_offset_cache_area_map(cls, chunk: gdb.Value) -> None: start = off else: if start is not None: - cls._add_to_offset_cache(chunk_base, start, off) + self._add_to_offset_cache(chunk_base, start, off) start = None if start is not None: off = int(_map[map_used]) - 1 - cls._add_to_offset_cache(chunk_base, start, off) + self._add_to_offset_cache(chunk_base, start, off) - @classmethod - def _setup_dynamic_offset_cache_bitmap(cls, chunk: gdb.Value) -> None: - group_offset = int(cls.pcpu_group_offsets[0]) + def _setup_dynamic_offset_cache_bitmap(self, chunk: gdb.Value) -> None: + group_offset = int(symvals.pcpu_group_offsets[0]) size_in_bytes = int(chunk['nr_pages']) * Page.PAGE_SIZE size_in_bits = size_in_bytes << 3 start = -1 end = 0 - chunk_base = int(chunk["base_addr"]) - int(cls.pcpu_base_addr) - cls._add_to_offset_cache(chunk_base, 0, size_in_bytes) + chunk_base = int(chunk["base_addr"]) - int(symvals.pcpu_base_addr) + self._add_to_offset_cache(chunk_base, 0, size_in_bytes) - @classmethod - def _setup_dynamic_offset_cache(cls) -> None: + def _setup_dynamic_offset_cache(self) -> None: # TODO: interval tree would be more efficient, but this adds no 3rd # party module dependency... - use_area_map = struct_has_member(cls.pcpu_chunk_type, 'map') - for slot in range(cls.pcpu_nr_slots): - for chunk in list_for_each_entry(cls.pcpu_slot[slot], cls.pcpu_chunk_type, 'list'): + use_area_map = struct_has_member(types.pcpu_chunk_type, 'map') + for slot in range(symvals.pcpu_nr_slots): + for chunk in list_for_each_entry(symvals.pcpu_slot[slot], types.pcpu_chunk_type, 'list'): if use_area_map: - cls._setup_dynamic_offset_cache_area_map(chunk) + self._setup_dynamic_offset_cache_area_map(chunk) else: - cls._setup_dynamic_offset_cache_bitmap(chunk) + self._setup_dynamic_offset_cache_bitmap(chunk) def _is_percpu_var_dynamic(self, var: int) -> bool: try: @@ -203,7 +196,7 @@ def _is_static_percpu_address(self, addr: int) -> bool: for start in self._static_ranges: size = self._static_ranges[start] for cpu in range(0, self._last_cpu): - offset = int(__per_cpu_offset[cpu]) + start + offset = int(symvals['__per_cpu_offset'][cpu]) + start if addr >= offset and addr < offset + size: return True return False @@ -233,7 +226,7 @@ def is_static_percpu_var(self, addr: int) -> bool: # loading debuginfo but not when debuginfo is embedded. def _relocated_offset(self, var): addr=int(var) - start = self.__per_cpu_start + start = msymvals['__per_cpu_start'] size = self._static_ranges[start] if addr >= start and addr < start + size: return addr - start @@ -251,13 +244,12 @@ def is_module_percpu_var(self, addr: int) -> bool: :obj:`bool`: Whether this address belongs to a module range """ for start in self._module_ranges: - for cpu in range(0, self.last_cpu): + for cpu in range(0, self._last_cpu): size = self._module_ranges[start] if addr >= start and addr < start + size: return True return False - @export def is_percpu_var(self, var: SymbolOrValue) -> bool: """ Returns whether the provided value or symbol falls within @@ -311,16 +303,15 @@ def _get_percpu_var(self, var: SymbolOrValue, cpu: int) -> gdb.Value: if cpu < 0: raise ValueError("cpu must be >= 0") - addr = self.__per_cpu_offset[cpu] + addr = symvals['__per_cpu_offset'][cpu] if addr > 0: addr += self._relocated_offset(var) val = gdb.Value(addr).cast(var.type) - if var.type != self.void_p_type: + if var.type != types.void_p_type: val = val.dereference() return val - @export def get_percpu_var(self, var: SymbolOrValue, cpu: int) -> gdb.Value: """ Retrieve a per-cpu variable for one or all CPUs @@ -341,7 +332,6 @@ def get_percpu_var(self, var: SymbolOrValue, cpu: int) -> gdb.Value: var = self._resolve_percpu_var(var) return self._get_percpu_var(var, cpu) - @export def get_percpu_vars(self, var: SymbolOrValue, nr_cpus: int=None) -> Dict[int, gdb.Value]: """ @@ -365,7 +355,7 @@ def get_percpu_vars(self, var: SymbolOrValue, :obj:`ValueError`: nr_cpus is <= ``0`` """ if nr_cpus is None: - nr_cpus = self.last_cpu + nr_cpus = self._last_cpu if nr_cpus <= 0: raise ValueError("nr_cpus must be > 0") @@ -377,31 +367,95 @@ def get_percpu_vars(self, var: SymbolOrValue, vals[cpu] = self._get_percpu_var(var, cpu) return vals - @export - def percpu_counter_sum(self, var: SymbolOrValue) -> int: - """ - Returns the sum of a percpu counter +msym_cbs = MinimalSymbolCallbacks([ ('__per_cpu_start', + PerCPUState._setup_per_cpu_size), + ('__per_cpu_end', + PerCPUState._setup_per_cpu_size) ]) +symbol_cbs = SymbolCallbacks([ ('__per_cpu_offset', PerCPUState._setup_nr_cpus), + ('modules', PerCPUState._setup_module_ranges) ]) - Args: - var: The percpu counter to sum. The value must be of type - ``struct percpu_counter``. +_state = PerCPUState() - Returns: - :obj:`int`: the sum of all components of the percpu counter - """ - if isinstance(var, gdb.Symbol): - var = var.value() +def is_percpu_var(var: SymbolOrValue) -> bool: + """ + Returns whether the provided value or symbol falls within + any of the percpu ranges + + Args: + var: The symbol or value to query + + Returns: + :obj:`bool`: Whether the value belongs to any percpu range + """ + return _state.is_percpu_var(var) + +def get_percpu_var(var: SymbolOrValue, cpu: int) -> gdb.Value: + """ + Retrieve a per-cpu variable for a single CPU + + Args: + var: The symbol or value to use to resolve the percpu location + cpu: The cpu for which to return the per-cpu value. + + Returns: + :obj:`gdb.Value`: The value corresponding to the specified CPU. + The value is of the same type passed via var. + + Raises: + :obj:`TypeError`: var is not :obj:`gdb.Symbol` or :obj:`gdb.Value` + :obj:`.PerCPUError`: var does not fall into any percpu range + :obj:`ValueError`: cpu is less than ``0`` + """ + return _state.get_percpu_var(var, cpu) + +def get_percpu_vars(var: SymbolOrValue, + nr_cpus: int=None) -> Dict[int, gdb.Value]: + """ + Retrieve a per-cpu variable for all CPUs + + Args: + var: The symbol or value to use to resolve the percpu location. + nr_cpus (optional): The number of CPUs for which to return results. + ``None`` (or unspecified) will use the highest possible + CPU count. + + Returns: + :obj:`dict`(:obj:`int`, :obj:`gdb.Value`): The values corresponding + to every CPU in a dictionary indexed by CPU number. The type of the + :obj:`gdb.Value` used as the :obj:`dict` value is the same type as + the :obj:`gdb.Value` or :obj:`gdb.Symbol` passed as var. + + Raises: + :obj:`TypeError`: var is not :obj:`gdb.Symbol` or :obj:`gdb.Value` + :obj:`.PerCPUError`: var does not fall into any percpu range + :obj:`ValueError`: nr_cpus is <= ``0`` + """ + return _state.get_percpu_vars(var, nr_cpus) + +def percpu_counter_sum(var: SymbolOrValue) -> int: + """ + Returns the sum of a percpu counter + + Args: + var: The percpu counter to sum. The value must be of type + ``struct percpu_counter``. + + Returns: + :obj:`int`: the sum of all components of the percpu counter + """ + if isinstance(var, gdb.Symbol): + var = var.value() - if not (var.type == self.percpu_counter_type or - (var.type.code == gdb.TYPE_CODE_PTR and - var.type.target() == self.percpu_counter_type)): - raise TypeError("var must be gdb.Symbol or gdb.Value describing `{}' not `{}'" - .format(self.percpu_counter_type, var.type)) + if not (var.type == types.percpu_counter_type or + (var.type.code == gdb.TYPE_CODE_PTR and + var.type.target() == types.percpu_counter_type)): + raise TypeError("var must be gdb.Symbol or gdb.Value describing `{}' not `{}'" + .format(types.percpu_counter_type, var.type)) - total = int(var['count']) + total = int(var['count']) - v = get_percpu_vars(var['counters']) - for cpu in v: - total += int(v[cpu]) + v = get_percpu_vars(var['counters']) + for cpu in v: + total += int(v[cpu]) - return total + return total diff --git a/crash/types/slab.py b/crash/types/slab.py index bbf96a4da91..9b4b312cc9f 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -7,8 +7,8 @@ import traceback from crash.util import container_of, find_member_variant, get_symbol_value from crash.util import safe_get_symbol_value +from crash.util.symbols import Types, TypeCallbacks, SymbolCallbacks from crash.types.percpu import get_percpu_var -from crash.infra import CrashBaseClass, export from crash.types.list import list_for_each, list_for_each_entry from crash.types.page import Page, page_from_gdb_obj, page_from_addr from crash.types.node import for_each_nid @@ -34,14 +34,9 @@ def col_error(msg): def col_bold(msg): return "\033[1;37;40m {}\033[0;37;40m ".format(msg) +types = Types([ 'kmem_cache', 'struct kmem_cache' ]) -class Slab(CrashBaseClass): - __types__ = [ 'struct slab', 'struct page', 'kmem_cache', 'kmem_bufctl_t', - 'freelist_idx_t' ] - __type_callbacks__ = [ ('struct page', 'check_page_type'), - ('struct slab', 'check_slab_type'), - ('kmem_bufctl_t', 'check_bufctl_type'), - ('freelist_idx_t', 'check_bufctl_type') ] +class Slab(object): slab_list_head = None page_slab = None @@ -90,7 +85,7 @@ def from_list_head(cls, list_head, kmem_cache): def __add_free_obj_by_idx(self, idx): objs_per_slab = self.kmem_cache.objs_per_slab bufsize = self.kmem_cache.buffer_size - + if (idx >= objs_per_slab): self.__error(": free object index %d overflows %d" % (idx, objs_per_slab)) @@ -102,13 +97,13 @@ def __add_free_obj_by_idx(self, idx): return False else: self.free.add(obj_addr) - + return True def __populate_free(self): if self.free: return - + self.free = set() bufsize = self.kmem_cache.buffer_size objs_per_slab = self.kmem_cache.objs_per_slab @@ -142,7 +137,7 @@ def __populate_free(self): def find_obj(self, addr): bufsize = self.kmem_cache.buffer_size objs_per_slab = self.kmem_cache.objs_per_slab - + if int(addr) < self.s_mem: return None @@ -168,7 +163,7 @@ def contains_obj(self, addr): return (False, obj_addr, ac[obj_addr]) return (True, obj_addr, None) - + def __error(self, msg, misplaced = False): msg = col_error("cache %s slab %x%s" % (self.kmem_cache.name, int(self.gdb_obj.address), msg)) @@ -177,7 +172,7 @@ def __error(self, msg, misplaced = False): self.misplaced_error = msg else: print(msg) - + def __free_error(self, list_name): self.misplaced_list = list_name self.__error(": is on list %s, but has %d of %d objects allocated" % @@ -216,7 +211,7 @@ def check(self, slabtype, nid): elif struct_slab_cache != self.kmem_cache.off_slab_cache: self.__error(": OFF_SLAB struct slab is in a wrong cache %s" % struct_slab_cache) - + struct_slab_obj = struct_slab_slab.contains_obj(self.gdb_obj.address) if not struct_slab_obj[0]: self.__error(": OFF_SLAB struct slab is not allocated") @@ -228,7 +223,7 @@ def check(self, slabtype, nid): if self.inuse + num_free != max_free: self.__error(": inuse=%d free=%d adds up to %d (should be %d)" % (self.inuse, num_free, self.inuse + num_free, max_free)) - + if slabtype == slab_free: if num_free != max_free: self.__free_error("slab_free") @@ -242,7 +237,7 @@ def check(self, slabtype, nid): if self.page_slab: slab_nid = self.page.get_nid() if nid != slab_nid: - self.__error(": slab is on nid %d instead of %d" % + self.__error(": slab is on nid %d instead of %d" % (slab_nid, nid)) print("free objects %d" % num_free) @@ -264,7 +259,7 @@ def check(self, slabtype, nid): last_page_addr = int(page.gdb_obj.address) if page.get_nid() != nid: - self.__error(": obj %x is on nid %d instead of %d" % + self.__error(": obj %x is on nid %d instead of %d" % (obj, page.get_nid(), nid)) if not page.is_slab(): self.__error(": obj %x is not on PageSlab page" % obj) @@ -300,11 +295,7 @@ def __init__(self, gdb_obj, kmem_cache, error=False): self.inuse = int(gdb_obj["inuse"]) self.s_mem = int(gdb_obj["s_mem"]) -class KmemCache(CrashBaseClass): - __types__ = [ 'struct kmem_cache', 'struct alien_cache' ] - __type_callbacks__ = [ ('struct kmem_cache', 'check_kmem_cache_type'), - ('struct alien_cache', 'setup_alien_cache_type') ] - +class KmemCache(object): buffer_size_name = None nodelists_name = None percpu_name = None @@ -326,7 +317,7 @@ def setup_alien_cache_type(cls, gdbtype): def __get_nodelist(self, node): return self.gdb_obj[KmemCache.nodelists_name][node] - + def __get_nodelists(self): for nid in for_each_nid(): node = self.__get_nodelist(nid) @@ -345,7 +336,7 @@ def __init__(self, name, gdb_obj): self.name = name self.gdb_obj = gdb_obj self.array_caches = None - + self.objs_per_slab = int(gdb_obj["num"]) self.buffer_size = int(gdb_obj[KmemCache.buffer_size_name]) @@ -377,7 +368,7 @@ def __fill_array_cache(self, acache, ac_type, nid_src, nid_tgt): print(col_error("WARNING: array cache duplicity detected!")) else: self.array_caches[ptr] = cache_dict - + page = page_from_addr(ptr) obj_nid = page.get_nid() @@ -430,7 +421,7 @@ def __fill_all_array_caches(self): shared_cache = node["shared"] if int(shared_cache) != 0: self.__fill_array_cache(shared_cache.dereference(), AC_SHARED, nid, nid) - + self.__fill_alien_caches(node, nid) def get_array_caches(self): @@ -533,7 +524,7 @@ def ___check_slabs(self, node, slabtype, nid, reverse=False): print(col_error("Unrecoverable error when traversing {} slab list: {}".format( slab_list_name[slabtype], e))) check_ok = False - + if errors['num_ok'] > 0: print("{} slab objects were ok between {:#x} and {:#x}". format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) @@ -545,7 +536,7 @@ def ___check_slabs(self, node, slabtype, nid, reverse=False): return (check_ok, slabs, free) def __check_slabs(self, node, slabtype, nid): - + slab_list = node[slab_list_fullname[slabtype]] print("checking {} slab list {:#x}".format(slab_list_name[slabtype], @@ -562,7 +553,7 @@ def __check_slabs(self, node, slabtype, nid): slabtype, nid, reverse=True) slabs += slabs_rev free += free_rev - + #print("checked {} slabs in {} slab list".format( # slabs, slab_list_name[slabtype])) @@ -606,53 +597,56 @@ def check_all(self): (nid, free_declared, free_counted))) self.check_array_caches() -class KmemCaches(CrashBaseClass): - - __symbol_callbacks__ = [ ('slab_caches', 'setup_slab_caches'), - (' cache_chain', 'setup_slab_caches') ] - - kmem_caches = None - kmem_caches_by_addr = None - - @classmethod - def setup_slab_caches(cls, slab_caches): - cls.kmem_caches = dict() - cls.kmem_caches_by_addr = dict() - - list_caches = slab_caches.value() - - for cache in list_for_each_entry(list_caches, - KmemCache.kmem_cache_type, - KmemCache.head_name): - name = cache["name"].string() - kmem_cache = KmemCache(name, cache) - - cls.kmem_caches[name] = kmem_cache - cls.kmem_caches_by_addr[int(cache.address)] = kmem_cache - - @export - def kmem_cache_from_addr(cls, addr): - try: - return cls.kmem_caches_by_addr[addr] - except KeyError: - return None - - @export - def kmem_cache_from_name(cls, name): - try: - return cls.kmem_caches[name] - except KeyError: - return None - - @export - def kmem_cache_get_all(cls): - return cls.kmem_caches.values() - - @export - def slab_from_obj_addr(cls, addr): - page = page_from_addr(addr).compound_head() - if not page.is_slab(): - return None - - return Slab.from_page(page) - +kmem_caches = None +kmem_caches_by_addr = None + +def setup_slab_caches(slab_caches): + global kmem_caches + global kmem_caches_by_addr + + kmem_caches = dict() + kmem_caches_by_addr = dict() + + list_caches = slab_caches.value() + + for cache in list_for_each_entry(list_caches, + types.kmem_cache_type, + KmemCache.head_name): + name = cache["name"].string() + kmem_cache = KmemCache(name, cache) + + kmem_caches[name] = kmem_cache + kmem_caches_by_addr[int(cache.address)] = kmem_cache + +def kmem_cache_from_addr(addr): + try: + return kmem_caches_by_addr[addr] + except KeyError: + return None + +def kmem_cache_from_name(name): + try: + return kmem_caches[name] + except KeyError: + return None + +def kmem_cache_get_all(): + return kmem_caches.values() + +def slab_from_obj_addr(addr): + page = page_from_addr(addr).compound_head() + if not page.is_slab(): + return None + + return Slab.from_page(page) + +type_cbs = TypeCallbacks([ ('struct page', Slab.check_page_type), + ('struct slab', Slab.check_slab_type), + ('kmem_bufctl_t', Slab.check_bufctl_type), + ('freelist_idx_t', Slab.check_bufctl_type), + ('struct kmem_cache', + KmemCache.check_kmem_cache_type), + ('struct alien_cache', + KmemCache.setup_alien_cache_type) ]) +symbol_cbs = SymbolCallbacks([ ('slab_caches', setup_slab_caches), + (' cache_chain', setup_slab_caches) ]) diff --git a/crash/types/task.py b/crash/types/task.py index 2f35507eb2c..8ab41884fdf 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -3,8 +3,7 @@ import gdb from crash.util import array_size, struct_has_member -from crash.infra import CrashBaseClass -from crash.infra.lookup import DelayedValue, ClassProperty, get_delayed_lookup +from crash.util.symbols import Types, Symvals, SymbolCallbacks PF_EXITING = 0x4 @@ -13,16 +12,15 @@ def get_value(symname): if sym[0]: return sym[0].value() +types = Types(['struct task_struct', 'struct mm_struct', 'atomic_long_t' ]) +symvals = Symvals([ 'task_state_array' ]) + # This is pretty painful. These are all #defines so none of them end # up with symbols in the kernel. The best approximation we have is # task_state_array which doesn't include all of them. All we can do # is make some assumptions based on the changes upstream. This will # be fragile. -class TaskStateFlags(CrashBaseClass): - __types__ = [ 'char *', 'struct task_struct' ] - __symvals__ = [ 'task_state_array' ] - __symbol_callbacks__ = [ ('task_state_array', '_task_state_flags_callback') ] - +class TaskStateFlags(object): TASK_RUNNING = 0 TASK_FLAG_UNINITIALIZED = -1 @@ -51,11 +49,11 @@ def has_flag(cls, flagname): @classmethod def _task_state_flags_callback(cls, symbol): - count = array_size(cls.task_state_array) + count = array_size(symvals.task_state_array) bit = 0 for i in range(count): - state = cls.task_state_array[i].string() + state = symvals.task_state_array[i].string() state_strings = { '(running)' : 'TASK_RUNNING', '(sleeping)' : 'TASK_INTERRUPTIBLE', @@ -153,6 +151,9 @@ def _check_state_bits(cls): raise RuntimeError("Missing required task states: {}" .format(",".join(missing))) +symbol_cbs = SymbolCallbacks([ ('task_state_array', + TaskStateFlags._task_state_flags_callback) ]) + TF = TaskStateFlags class BadTaskError(TypeError): @@ -178,8 +179,8 @@ def __init__(self, task_struct, active=False, cpu=None, regs=None): raise TypeError("cpu must be integer or None") if not (isinstance(task_struct, gdb.Value) and - (task_struct.type == self.task_struct_type or - task_struct.type == self.task_struct_type.pointer())): + (task_struct.type == types.task_struct_type or + task_struct.type == types.task_struct_type.pointer())): raise BadTaskError(task_struct) self.task_struct = task_struct @@ -200,7 +201,7 @@ def __init__(self, task_struct, active=False, cpu=None, regs=None): @classmethod def init_task_types(cls, task): if not cls.valid: - t = gdb.lookup_type('struct task_struct') + t = types.task_struct_type if task.type != t: raise BadTaskError(task) @@ -209,10 +210,9 @@ def init_task_types(cls, task): # a type resolved from a symbol will be different structures # within gdb. Equality requires a deep comparison rather than # a simple pointer comparison. - cls.task_struct_type = task.type - fields = cls.task_struct_type.fields() + types.task_struct_type = task.type + fields = types.task_struct_type.fields() cls.task_state_has_exit_state = 'exit_state' in fields - cls.mm_struct_type = gdb.lookup_type('struct mm_struct') cls.pick_get_rss() cls.pick_last_run() cls.init_mm = get_value('init_mm') @@ -334,21 +334,21 @@ def get_anon_file_rss_fields(self): # select the proper function and assign it to the class. @classmethod def pick_get_rss(cls): - if struct_has_member(cls.mm_struct_type, 'rss'): + if struct_has_member(types.mm_struct_type, 'rss'): cls.get_rss = cls.get_rss_field - elif struct_has_member(cls.mm_struct_type, '_rss'): + elif struct_has_member(types.mm_struct_type, '_rss'): cls.get_rss = cls.get__rss_field - elif struct_has_member(cls.mm_struct_type, 'rss_stat'): + elif struct_has_member(types.mm_struct_type, 'rss_stat'): cls.MM_FILEPAGES = get_value('MM_FILEPAGES') cls.MM_ANONPAGES = get_value('MM_ANONPAGES') cls.get_rss = cls.get_rss_stat_field else: cls.anon_file_rss_fields = [] - if struct_has_member(cls.mm_struct_type, '_file_rss'): + if struct_has_member(types.mm_struct_type, '_file_rss'): cls.anon_file_rss_fields.append('_file_rss') - if struct_has_member(cls.mm_struct_type, '_anon_rss'): + if struct_has_member(types.mm_struct_type, '_anon_rss'): cls.anon_file_rss_fields.append('_anon_rss') cls.atomic_long_type = gdb.lookup_type('atomic_long_t') @@ -368,9 +368,9 @@ def last_run__last_arrival(self): @classmethod def pick_last_run(cls): - fields = cls.task_struct_type.keys() + fields = types.task_struct_type.keys() if ('sched_info' in fields and - 'last_arrival' in cls.task_struct_type['sched_info'].type.keys()): + 'last_arrival' in types.task_struct_type['sched_info'].type.keys()): cls.last_run = cls.last_run__last_arrival elif 'last_run' in fields: diff --git a/crash/types/vmstat.py b/crash/types/vmstat.py index 6d2cc77fd29..1f2c77aae38 100644 --- a/crash/types/vmstat.py +++ b/crash/types/vmstat.py @@ -2,17 +2,16 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb -from crash.infra import CrashBaseClass, export from crash.util import container_of, find_member_variant +from crash.util.symbols import Types, TypeCallbacks, Symbols import crash.types.node from crash.types.percpu import get_percpu_var from crash.types.cpu import for_each_online_cpu -class VmStat(CrashBaseClass): - __types__ = ['enum zone_stat_item', 'enum vm_event_item'] - __symbols__ = [ 'vm_event_states' ] - __type_callbacks__ = [ ('enum zone_stat_item', 'check_enum_type'), - ('enum vm_event_item', 'check_enum_type') ] + +class VmStat(object): + types = Types(['enum zone_stat_item', 'enum vm_event_item']) + symbols = Symbols([ 'vm_event_states' ]) nr_stat_items = None nr_event_items = None @@ -22,12 +21,14 @@ class VmStat(CrashBaseClass): @classmethod def check_enum_type(cls, gdbtype): - if gdbtype == cls.enum_zone_stat_item_type: - (items, names) = cls.__populate_names(gdbtype, 'NR_VM_ZONE_STAT_ITEMS') + if gdbtype == cls.types.enum_zone_stat_item_type: + (items, names) = cls.__populate_names(gdbtype, + 'NR_VM_ZONE_STAT_ITEMS') cls.nr_stat_items = items cls.vm_stat_names = names - elif gdbtype == cls.enum_vm_event_item_type: - (items, names) = cls.__populate_names(gdbtype, 'NR_VM_EVENT_ITEMS') + elif gdbtype == cls.types.enum_vm_event_item_type: + (items, names) = cls.__populate_names(gdbtype, + 'NR_VM_EVENT_ITEMS') cls.nr_event_items = items cls.vm_event_names = names else: @@ -45,22 +46,27 @@ def __populate_names(cls, enum_type, items_name): return (nr_items, names) - @staticmethod - def get_stat_names(): - return VmStat.vm_stat_names + @classmethod + def get_stat_names(cls): + return cls.vm_stat_names - @staticmethod - def get_event_names(): - return VmStat.vm_event_names + @classmethod + def get_event_names(cls): + return cls.vm_event_names @classmethod - def get_events(): - nr = VmStat.nr_event_items + def get_events(cls): + nr = cls.nr_event_items events = [0] * nr for cpu in for_each_online_cpu(): - states = get_percpu_var(cls.vm_event_states, cpu) + states = get_percpu_var(cls.symbols.vm_event_states, cpu) for item in range(0, nr): events[item] += int(states["event"][item]) return events + +type_cbs = TypeCallbacks([ ('enum zone_stat_item', + VmStat.check_enum_type), + ('enum vm_event_item', + VmStat.check_enum_type) ]) diff --git a/crash/types/zone.py b/crash/types/zone.py index 64f515a5aea..836ab682e9f 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -2,19 +2,17 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb -from crash.infra import CrashBaseClass, export from crash.util import container_of, find_member_variant, array_for_each +from crash.util.symbols import Types import crash.types.node from crash.types.percpu import get_percpu_var from crash.types.vmstat import VmStat from crash.types.cpu import for_each_online_cpu from crash.types.list import list_for_each_entry -def getValue(sym): - return gdb.lookup_symbol(sym, None)[0].value() +class Zone(object): -class Zone(CrashBaseClass): - __types__ = [ 'struct zone', 'struct page' ] + types = Types([' struct page' ]) def __init__(self, obj, zid): self.gdb_obj = obj @@ -52,7 +50,9 @@ def _check_free_area(self, area, is_pcp): nr_free = 0 list_array_name = "lists" if is_pcp else "free_list" for free_list in array_for_each(area[list_array_name]): - for page_obj in list_for_each_entry(free_list, self.page_type, "lru"): + for page_obj in list_for_each_entry(free_list, + self.types.page_type, + "lru"): page = crash.types.page.Page.from_obj(page_obj) nr_free += 1 if page.get_nid() != self.nid or page.get_zid() != self.zid: @@ -72,18 +72,14 @@ def check_free_pages(self): pageset = get_percpu_var(self.gdb_obj["pageset"], cpu) self._check_free_area(pageset["pcp"], True) -class Zones(CrashBaseClass): +def for_each_zone(): + for node in crash.types.node.for_each_node(): + for zone in node.for_each_zone(): + yield zone - @export - def for_each_zone(cls): - for node in crash.types.node.for_each_node(): - for zone in node.for_each_zone(): - yield zone - - @export - def for_each_populated_zone(cls): - #TODO: some filter thing? - for zone in cls.for_each_zone(): - if zone.is_populated(): - yield zone +def for_each_populated_zone(): + #TODO: some filter thing? + for zone in for_each_zone(): + if zone.is_populated(): + yield zone diff --git a/crash/util/__init__.py b/crash/util/__init__.py index bfa1548502d..817180b4054 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -7,8 +7,7 @@ import uuid from typing import Dict - -from crash.infra import CrashBaseClass, export +from crash.util.symbols import Types from crash.exceptions import MissingTypeError, MissingSymbolError TypeSpecifier = Union [ gdb.Type, gdb.Value, str, gdb.Symbol ] @@ -73,450 +72,418 @@ def __init__(self, member, gdbtype): self.member = member self.type = gdbtype -class TypesUtilClass(CrashBaseClass): - __types__ = [ 'char *', 'uuid_t' ] - - @export - def container_of(self, val, gdbtype, member): - """ - Returns an object that contains the specified object at the given - offset. - - Args: - val (gdb.Value): The value to be converted. It can refer to an - allocated structure or a pointer. - gdbtype (gdb.Type): The type of the object that will be generated - member (str): The name of the member in the target struct that - contains `val`. - - Returns: - gdb.Value: The converted object, of the type specified by - the caller. - Raises: - TypeError: val is not a gdb.Value - """ - if not isinstance(val, gdb.Value): - raise TypeError("container_of expects gdb.Value") - charp = self.char_p_type - if val.type.code != gdb.TYPE_CODE_PTR: - val = val.address - gdbtype = resolve_type(gdbtype) - offset = offsetof(gdbtype, member) - return (val.cast(charp) - offset).cast(gdbtype.pointer()).dereference() - - @export - @staticmethod - def struct_has_member(gdbtype: TypeSpecifier, name: str) -> bool: - """ - Returns whether a structure has a given member name. - - A typical method of determining whether a structure has a member is just - to check the fields list. That generally works but falls apart when - the structure contains an anonymous union or substructure since - it will push the members one level deeper in the namespace. - - This routine provides a simple interface that covers those details. - - Args: - val (gdb.Type, gdb.Value, str, gdb.Symbol): The object for which - to resolve the type to search for the member - name (str): The name of the member to query - - Returns: - bool: Whether the member is present in the specified type - - Raises: - TypeError: An invalid argument has been provided. - - """ - try: - x = TypesUtilClass.offsetof(gdbtype, name) - return True - except InvalidComponentError: - return False - - @export - @staticmethod - def get_symbol_value(symname, block=None, domain=None): - """ - Returns the value associated with a named symbol - - Args: - symname (str): Name of the symbol to resolve - block (gdb.Block, optional, default=None): The block to resolve - the symbol within - domain (gdb.Symbol constant SYMBOL_*_DOMAIN, optional, default=None): - The domain to search for the symbol - Returns: - gdb.Value: The requested value - Raises: - MissingSymbolError: The symbol or value cannot be located - """ - if domain is None: - domain = gdb.SYMBOL_VAR_DOMAIN - sym = gdb.lookup_symbol(symname, block, domain)[0] - if sym: - return sym.value() - raise MissingSymbolError("Cannot locate symbol {}".format(symname)) - - @export - @classmethod - def safe_get_symbol_value(cls, symname, block=None, domain=None): - """ - Returns the value associated with a named symbol - - Args: - symname (str): Name of the symbol to resolve - block (gdb.Block, optional, default=None): The block to resolve - the symbol within - domain (gdb.Symbol constant SYMBOL_*_DOMAIN, optional, default=None): - The domain to search for the symbol - Returns: - gdb.Value: The requested value or - None: if the symbol or value cannot be found - - """ - try: - return cls.get_symbol_value(symname, block, domain) - except MissingSymbolError: - return None - - @export - @staticmethod - def resolve_type(val): - """ - Resolves a gdb.Type given a type, value, string, or symbol - - Args: - val (gdb.Type, gdb.Value, str, gdb.Symbol): The object for which - to resolve the type - - Returns: - gdb.Type: The resolved type - - Raises: - TypeError: The object type of val is not valid - """ - if isinstance(val, gdb.Type): - gdbtype = val - elif isinstance(val, gdb.Value): - gdbtype = val.type - elif isinstance(val, str): - try: - gdbtype = gdb.lookup_type(val) - except gdb.error: - raise MissingTypeError("Could not resolve type {}" - .format(val)) - elif isinstance(val, gdb.Symbol): - gdbtype = val.value().type - else: - raise TypeError("Invalid type {}".format(str(type(val)))) - return gdbtype - - @classmethod - def __offsetof(cls, val, spec, error): - gdbtype = val - offset = 0 - - for member in spec.split('.'): - found = False - if gdbtype.code != gdb.TYPE_CODE_STRUCT and \ - gdbtype.code != gdb.TYPE_CODE_UNION: - raise _InvalidComponentTypeError(field.name, spec) - for field in gdbtype.fields(): - off = field.bitpos >> 3 - if field.name == member: - nexttype = field.type - found = True - break +types = Types([ 'char *', 'uuid_t' ]) + +def container_of(val, gdbtype, member): + """ + Returns an object that contains the specified object at the given + offset. + + Args: + val (gdb.Value): The value to be converted. It can refer to an + allocated structure or a pointer. + gdbtype (gdb.Type): The type of the object that will be generated + member (str): The name of the member in the target struct that + contains `val`. + + Returns: + gdb.Value: The converted object, of the type specified by + the caller. + Raises: + TypeError: val is not a gdb.Value + """ + if not isinstance(val, gdb.Value): + raise TypeError("container_of expects gdb.Value") + charp = types.char_p_type + if val.type.code != gdb.TYPE_CODE_PTR: + val = val.address + gdbtype = resolve_type(gdbtype) + offset = offsetof(gdbtype, member) + return (val.cast(charp) - offset).cast(gdbtype.pointer()).dereference() + +def struct_has_member(gdbtype: TypeSpecifier, name: str) -> bool: + """ + Returns whether a structure has a given member name. + + A typical method of determining whether a structure has a member is just + to check the fields list. That generally works but falls apart when + the structure contains an anonymous union or substructure since + it will push the members one level deeper in the namespace. + + This routine provides a simple interface that covers those details. + + Args: + val (gdb.Type, gdb.Value, str, gdb.Symbol): The object for which + to resolve the type to search for the member + name (str): The name of the member to query + + Returns: + bool: Whether the member is present in the specified type + + Raises: + TypeError: An invalid argument has been provided. + + """ + try: + x = offsetof(gdbtype, name) + return True + except InvalidComponentError: + return False + +def get_symbol_value(symname, block=None, domain=None): + """ + Returns the value associated with a named symbol + + Args: + symname (str): Name of the symbol to resolve + block (gdb.Block, optional, default=None): The block to resolve + the symbol within + domain (gdb.Symbol constant SYMBOL_*_DOMAIN, optional, default=None): + The domain to search for the symbol + Returns: + gdb.Value: The requested value + Raises: + MissingSymbolError: The symbol or value cannot be located + """ + if domain is None: + domain = gdb.SYMBOL_VAR_DOMAIN + sym = gdb.lookup_symbol(symname, block, domain)[0] + if sym: + return sym.value() + raise MissingSymbolError("Cannot locate symbol {}".format(symname)) + +def safe_get_symbol_value(symname, block=None, domain=None): + """ + Returns the value associated with a named symbol + + Args: + symname (str): Name of the symbol to resolve + block (gdb.Block, optional, default=None): The block to resolve + the symbol within + domain (gdb.Symbol constant SYMBOL_*_DOMAIN, optional, default=None): + The domain to search for the symbol + Returns: + gdb.Value: The requested value or + None: if the symbol or value cannot be found + + """ + try: + return get_symbol_value(symname, block, domain) + except MissingSymbolError: + return None - # Step into anonymous structs and unions - if field.name is None: - res = cls.__offsetof(field.type, member, False) - if res is not None: - found = True - off += res[0] - nexttype = res[1] - break - if not found: - if error: - raise _InvalidComponentNameError(member, gdbtype) - else: - return None - gdbtype = nexttype - offset += off - - return (offset, gdbtype) - - @export - @classmethod - def offsetof_type(cls, val, spec, error=True): - """ - Returns the offset and type of a named member of a structure - - Args: - val (gdb.Type, gdb.Symbol, gdb.Value, or str): The type that - contains the specified member, must be a struct or union - spec (str): The member of the member to resolve - error (bool, optional, default=True): Whether to consider lookup - failures an error - - Returns: - Tuple of: - int: The offset of the resolved member - gdb.Type: The type of the resolved member - - Raises: - InvalidArgumentError: val is not a valid type - InvalidComponentError: spec is not valid for the type - """ - gdbtype = None - try: - gdbtype = resolve_type(val) - except MissingTypeError as e: - pass - except TypeError as e: - pass +def resolve_type(val): + """ + Resolves a gdb.Type given a type, value, string, or symbol - if not isinstance(gdbtype, gdb.Type): - raise InvalidArgumentError(val) + Args: + val (gdb.Type, gdb.Value, str, gdb.Symbol): The object for which + to resolve the type - # We'll be friendly and accept pointers as the initial type - if gdbtype.code == gdb.TYPE_CODE_PTR: - gdbtype = gdbtype.target() + Returns: + gdb.Type: The resolved type + Raises: + TypeError: The object type of val is not valid + """ + if isinstance(val, gdb.Type): + gdbtype = val + elif isinstance(val, gdb.Value): + gdbtype = val.type + elif isinstance(val, str): + try: + gdbtype = gdb.lookup_type(val) + except gdb.error: + raise MissingTypeError("Could not resolve type {}" + .format(val)) + elif isinstance(val, gdb.Symbol): + gdbtype = val.value().type + else: + raise TypeError("Invalid type {}".format(str(type(val)))) + return gdbtype + +def __offsetof(val, spec, error): + gdbtype = val + offset = 0 + + for member in spec.split('.'): + found = False if gdbtype.code != gdb.TYPE_CODE_STRUCT and \ gdbtype.code != gdb.TYPE_CODE_UNION: - raise InvalidArgumentTypeError(gdbtype) - - try: - return cls.__offsetof(gdbtype, spec, error) - except _InvalidComponentBaseError as e: + raise _InvalidComponentTypeError(field.name, spec) + for field in gdbtype.fields(): + off = field.bitpos >> 3 + if field.name == member: + nexttype = field.type + found = True + break + + # Step into anonymous structs and unions + if field.name is None: + res = __offsetof(field.type, member, False) + if res is not None: + found = True + off += res[0] + nexttype = res[1] + break + if not found: if error: - raise InvalidComponentError(gdbtype, spec, e.message) + raise _InvalidComponentNameError(member, gdbtype) else: return None + gdbtype = nexttype + offset += off + + return (offset, gdbtype) - @export - @classmethod - def offsetof(cls, val, spec, error=True): - """ - Returns the offset of a named member of a structure +def offsetof_type(val, spec, error=True): + """ + Returns the offset and type of a named member of a structure - Args: - val (gdb.Type, gdb.Symbol, gdb.Value, or str): The type that - contains the specified member, must be a struct or union - spec (str): The member of the member to resolve - error (bool, optional, default=True): Whether to consider lookup - failures an error + Args: + val (gdb.Type, gdb.Symbol, gdb.Value, or str): The type that + contains the specified member, must be a struct or union + spec (str): The member of the member to resolve + error (bool, optional, default=True): Whether to consider lookup + failures an error - Returns: + Returns: + Tuple of: int: The offset of the resolved member - None: The member could not be resolved - - Raises: - InvalidArgumentError: val is not a valid type - InvalidComponentError: spec is not valid for the type - """ - res = cls.offsetof_type(val, spec, error) - if res: - return res[0] + gdb.Type: The type of the resolved member + + Raises: + InvalidArgumentError: val is not a valid type + InvalidComponentError: spec is not valid for the type + """ + gdbtype = None + try: + gdbtype = resolve_type(val) + except MissingTypeError as e: + pass + except TypeError as e: + pass + + if not isinstance(gdbtype, gdb.Type): + raise InvalidArgumentError(val) + + # We'll be friendly and accept pointers as the initial type + if gdbtype.code == gdb.TYPE_CODE_PTR: + gdbtype = gdbtype.target() + + if gdbtype.code != gdb.TYPE_CODE_STRUCT and \ + gdbtype.code != gdb.TYPE_CODE_UNION: + raise InvalidArgumentTypeError(gdbtype) + + try: + return __offsetof(gdbtype, spec, error) + except _InvalidComponentBaseError as e: + if error: + raise InvalidComponentError(gdbtype, spec, e.message) + else: + return None + +def offsetof(val, spec, error=True): + """ + Returns the offset of a named member of a structure + + Args: + val (gdb.Type, gdb.Symbol, gdb.Value, or str): The type that + contains the specified member, must be a struct or union + spec (str): The member of the member to resolve + error (bool, optional, default=True): Whether to consider lookup + failures an error + + Returns: + int: The offset of the resolved member + None: The member could not be resolved + + Raises: + InvalidArgumentError: val is not a valid type + InvalidComponentError: spec is not valid for the type + """ + res = offsetof_type(val, spec, error) + if res: + return res[0] + return None + +def find_member_variant(gdbtype, variants): + """ + Examines the given type and returns the first found member name + + Over time, structure member names may change. This routine + allows the caller to provide a list of potential names and returns + the first one found. + + Args: + gdbtype (gdb.Type): The type of structure or union to examine + variants (list of str): The names of members to search + + Returns: + str: The first member name found + + Raises: + TypeError: No named member could be found + """ + for v in variants: + if offsetof(gdbtype, v, False) is not None: + return v + raise TypeError("Unrecognized '{}': could not find member '{}'" + .format(str(gdbtype), variants[0])) + +def safe_lookup_type(name, block=None): + """ + Looks up a gdb.Type without throwing an exception on failure + + Args: + name (str): The name of the type to look up + + Returns: + gdb.Type for requested type or None if it could not be found + """ + try: + return gdb.lookup_type(name, block) + except gdb.error: return None - @export - @classmethod - def find_member_variant(cls, gdbtype, variants): - """ - Examines the given type and returns the first found member name - - Over time, structure member names may change. This routine - allows the caller to provide a list of potential names and returns - the first one found. - - Args: - gdbtype (gdb.Type): The type of structure or union to examine - variants (list of str): The names of members to search - - Returns: - str: The first member name found - - Raises: - TypeError: No named member could be found - """ - for v in variants: - if cls.offsetof(gdbtype, v, False) is not None: - return v - raise TypeError("Unrecognized '{}': could not find member '{}'" - .format(str(gdbtype), variants[0])) - - @export - @staticmethod - def safe_lookup_type(name, block=None): - """ - Looks up a gdb.Type without throwing an exception on failure - - Args: - name (str): The name of the type to look up - - Returns: - gdb.Type for requested type or None if it could not be found - """ +def array_size(value): + """ + Returns the number of elements in an array + + Args: + value (gdb.Value): The array to size + """ + return value.type.sizeof // value[0].type.sizeof + +def get_typed_pointer(val, gdbtype): + """ + Returns a pointer to the requested type at the given address + + Args: + val (gdb.Value, str, or int): The address for which to provide + a casted pointer + gdbtype (gdb.Type): The type of the pointer to return + + Returns: + gdb.Value: The casted pointer of the requested type + """ + if gdbtype.code != gdb.TYPE_CODE_PTR: + gdbtype = gdbtype.pointer() + if isinstance(val, gdb.Value): + if (val.type != gdbtype and + val.type != gdbtype.target()): + raise TypeError("gdb.Value must refer to {} not {}" + .format(gdbtype, val.type)) + elif isinstance(val, str): try: - return gdb.lookup_type(name, block) - except gdb.error: - return None - - @export - @staticmethod - def array_size(value): - """ - Returns the number of elements in an array - - Args: - value (gdb.Value): The array to size - """ - return value.type.sizeof // value[0].type.sizeof - - @export - @staticmethod - def get_typed_pointer(val, gdbtype): - """ - Returns a pointer to the requested type at the given address - - Args: - val (gdb.Value, str, or int): The address for which to provide - a casted pointer - gdbtype (gdb.Type): The type of the pointer to return - - Returns: - gdb.Value: The casted pointer of the requested type - """ - if gdbtype.code != gdb.TYPE_CODE_PTR: - gdbtype = gdbtype.pointer() - if isinstance(val, gdb.Value): - if (val.type != gdbtype and - val.type != gdbtype.target()): - raise TypeError("gdb.Value must refer to {} not {}" - .format(gdbtype, val.type)) - elif isinstance(val, str): + val = int(val, 16) + except TypeError as e: + print(e) + raise TypeError("string must describe hex address: ".format(e)) + if isinstance(val, int): + val = gdb.Value(val).cast(gdbtype).dereference() + + return val + +def array_for_each(value): + size = array_size(value) + for i in range(array_size(value)): + yield value[i] + +def decode_flags(value: gdb.Value, names: Dict[int, str], + separator: str="|") -> str: + """ + Present a bitfield of individual flags in a human-readable format. + + Args: + value (gdb.Value): + The value containing the flags to be decoded. + names (dict of int->str): + A dictionary containing mappings for each bit number to + a human-readable name. Any flags found that do not have + a matching value in the dict will be displayed as FLAG_. + separator (str, defaults to "|"): + The string to use as a separator between each flag name in the + output. + + Returns: + str: A human-readable string displaying the flag values. + + Raises: + TypeError: value is not gdb.Value or names is not dict. + """ + if not isinstance(value, gdb.Value): + raise TypeError("value must be gdb.Value") + + if not isinstance(names, dict): + raise TypeError("names must be a dictionary of int -> str") + + flags_val = int(value) + flags = [] + for n in range(0, value.type.sizeof << 3): + if flags_val & (1 << n): try: - val = int(val, 16) - except TypeError as e: - print(e) - raise TypeError("string must describe hex address: ".format(e)) - if isinstance(val, int): - val = gdb.Value(val).cast(gdbtype).dereference() - - return val - - @export - @staticmethod - def array_for_each(value): - size = array_size(value) - for i in range(array_size(value)): - yield value[i] - - @export - @staticmethod - def decode_flags(value: gdb.Value, names: Dict[int, str], - separator: str="|") -> str: - """ - Present a bitfield of individual flags in a human-readable format. - - Args: - value (gdb.Value): - The value containing the flags to be decoded. - names (dict of int->str): - A dictionary containing mappings for each bit number to - a human-readable name. Any flags found that do not have - a matching value in the dict will be displayed as FLAG_. - separator (str, defaults to "|"): - The string to use as a separator between each flag name in the - output. - - Returns: - str: A human-readable string displaying the flag values. - - Raises: - TypeError: value is not gdb.Value or names is not dict. - """ - if not isinstance(value, gdb.Value): - raise TypeError("value must be gdb.Value") - - if not isinstance(names, dict): - raise TypeError("names must be a dictionary of int -> str") - - flags_val = int(value) - flags = [] - for n in range(0, value.type.sizeof << 3): - if flags_val & (1 << n): - try: - flags.append(names[1 << n]) - except KeyError: - flags.append("FLAG_{}".format(n)) - - return separator.join(flags) - - @export - @classmethod - def decode_uuid(cls, value: gdb.Value) -> uuid.UUID: - """ - Decode an array of bytes that describes a UUID into a Python-style - UUID object - - Args: - value (gdb.Value): The UUID to decode - - Returns: - uuid.UUID: The UUID object that describes the value - - Raises: - TypeError: value is not gdb.Value or does not describe a 16-byte array. - - """ - if not isinstance(value, gdb.Value): - raise TypeError("value must be gdb.Value") - - if (value.type.code != gdb.TYPE_CODE_ARRAY or - value[0].type.sizeof != 1 or - value.type.sizeof != 16): - raise TypeError("value must describe an array of 16 bytes") - - u = 0 - for i in range(0, 16): - u <<= 8 - u += int(value[i]) - - return uuid.UUID(int=u) - - @export - @classmethod - def decode_uuid_t(cls, value: gdb.Value) -> uuid.UUID: - """ - Decode a Linux kernel uuid_t into a Python-style UUID object - - Args: - value (gdb.Value): The uuid_t to be decoded - - Returns: - uuid.UUID: The UUID object that describes the value - - Raises: - TypeError: value is not gdb.Value - """ - if not isinstance(value, gdb.Value): - raise TypeError("value must be gdb.Value") - - if value.type != self.uuid_t_type: - if (value.type.code == gdb.TYPE_CODE_PTR and - value.type.target() == self.uuid_t_type): - value = value.dereference() - else: - raise TypeError("value must describe a uuid_t") + flags.append(names[1 << n]) + except KeyError: + flags.append("FLAG_{}".format(n)) + + return separator.join(flags) + +def decode_uuid(value: gdb.Value) -> uuid.UUID: + """ + Decode an array of bytes that describes a UUID into a Python-style + UUID object + + Args: + value (gdb.Value): The UUID to decode - if 'b' in cls.uuid_t_type: - member = 'b' + Returns: + uuid.UUID: The UUID object that describes the value + + Raises: + TypeError: value is not gdb.Value or does not describe a 16-byte array. + + """ + if not isinstance(value, gdb.Value): + raise TypeError("value must be gdb.Value") + + if (value.type.code != gdb.TYPE_CODE_ARRAY or + value[0].type.sizeof != 1 or value.type.sizeof != 16): + raise TypeError("value must describe an array of 16 bytes") + + u = 0 + for i in range(0, 16): + u <<= 8 + u += int(value[i]) + + return uuid.UUID(int=u) + +def decode_uuid_t(value: gdb.Value) -> uuid.UUID: + """ + Decode a Linux kernel uuid_t into a Python-style UUID object + + Args: + value (gdb.Value): The uuid_t to be decoded + + Returns: + uuid.UUID: The UUID object that describes the value + + Raises: + TypeError: value is not gdb.Value + """ + if not isinstance(value, gdb.Value): + raise TypeError("value must be gdb.Value") + + if value.type != types.uuid_t_type: + if (value.type.code == gdb.TYPE_CODE_PTR and + value.type.target() == types.uuid_t_type): + value = value.dereference() else: - member = '__u_bits' + raise TypeError("value must describe a uuid_t") + + if struct_has_member(types.uuid_t_type, 'b'): + member = 'b' + else: + member = '__u_bits' - return cls.decode_uuid(value[member]) + return decode_uuid(value[member]) diff --git a/tests/test_infra.py b/tests/test_infra.py deleted file mode 100644 index 3c3c5d50d60..00000000000 --- a/tests/test_infra.py +++ /dev/null @@ -1,87 +0,0 @@ -# -*- coding: utf-8 -*- -# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: - -import unittest -import gdb - -from crash.infra import CrashBaseClass, export - -# The delayed init tests check for presence of an attribute in the instance -# dict (or class dict for class attributes) since hasattr() will call -# __getattr__, causing delayed initialization to occur. - -class TestInfra(unittest.TestCase): - def test_exporter_baseline(self): - class test_class(CrashBaseClass): - inited = False - def __init__(self): - self.retval = 1020 - setattr(self.__class__, 'inited', True) - @export - def test_func(self): - return self.retval - - x = test_class() - self.assertTrue(x.inited) - - self.assertTrue(test_class.inited) - self.assertTrue(test_func() == 1020) - self.assertTrue(test_class.inited) - - def test_export_normal(self): - class test_class(CrashBaseClass): - @export - def test_func(self): - return 104 - - self.assertTrue(test_func() == 104) - - def test_static_export(self): - class test_class(CrashBaseClass): - @staticmethod - @export - def test_func(): - return 1050 - - self.assertTrue(test_func() == 1050) - - def test_export_static(self): - class test_class(CrashBaseClass): - @export - @staticmethod - def test_func(): - return 105 - - self.assertTrue(test_func() == 105) - - def test_export_class(self): - class test_class(CrashBaseClass): - @classmethod - @export - def test_func(self): - return 106 - - self.assertTrue(test_func() == 106) - - def test_export_multiple_exports_one_instance(self): - class test_class(CrashBaseClass): - instances = 0 - def __init__(self): - setattr(self.__class__, 'instances', self.instances + 1) - - @export - def test_func(self): - return 1060 - @export - def test_func2(self): - return 1061 - - self.assertTrue(test_class.instances == 0) - self.assertTrue(test_func() == 1060) - self.assertTrue(test_class.instances == 1) - self.assertTrue(test_func() == 1060) - self.assertTrue(test_class.instances == 1) - self.assertTrue(test_func2() == 1061) - self.assertTrue(test_class.instances == 1) - self.assertTrue(test_func2() == 1061) - self.assertTrue(test_class.instances == 1) diff --git a/tests/test_infra_lookup.py b/tests/test_infra_lookup.py index a1acb52ecb5..8f79ccdd7e8 100644 --- a/tests/test_infra_lookup.py +++ b/tests/test_infra_lookup.py @@ -3,16 +3,14 @@ import unittest import gdb -from crash.infra import CrashBaseClass from crash.exceptions import DelayedAttributeError from crash.infra.callback import ObjfileEventCallback from crash.infra.lookup import SymbolCallback, TypeCallback from crash.infra.lookup import MinimalSymbolCallback -from crash.infra.lookup import DelayedLookups, ClassProperty from crash.infra.lookup import DelayedType, DelayedSymbol, DelayedSymval from crash.infra.lookup import DelayedMinimalSymbol, DelayedMinimalSymval -class TestDelayedLookupSetup(unittest.TestCase): +class TestTypeNameResolution(unittest.TestCase): def test_resolve_struct_normal(self): spec = 'struct test' @@ -69,137 +67,6 @@ def test_resolve_char_pointer(self): self.assertTrue(attrname == 'char_p_type') self.assertTrue(pointer) - def test_name_collision_attrs(self): - class test_data(object): - def __init__(self): - self.name = 'foo' - def get(self): - pass - def set(self, value): - pass - d = {'__delayed_lookups__' : {}} - attr = test_data() - DelayedLookups.add_lookup('TestClass', d, 'foo', attr) - with self.assertRaises(NameError): - DelayedLookups.add_lookup('TestClass', d, 'foo', attr) - - def test_name_collision_reserved(self): - d = {'__delayed_lookups__' : {}} - with self.assertRaises(NameError): - DelayedLookups.setup_delayed_lookups_for_class('TestClass', d) - - def test_type_setup(self): - d = {'__types__' : [ 'void *', 'struct test' ] } - DelayedLookups.setup_delayed_lookups_for_class('TestClass', d) - self.assertFalse('__types__' in d) - self.assertTrue('void_p_type' in d) - self.assertTrue(isinstance(d['void_p_type'], ClassProperty)) - self.assertTrue('void_p_type' in d['__delayed_lookups__']) - self.assertTrue(isinstance(d['__delayed_lookups__']['void_p_type'], - DelayedType)) - self.assertTrue('test_type' in d) - self.assertTrue(isinstance(d['test_type'], ClassProperty)) - self.assertTrue('test_type' in d['__delayed_lookups__']) - self.assertTrue(isinstance(d['__delayed_lookups__']['test_type'], - DelayedType)) - def test_symbol_setup(self): - d = {'__symbols__' : [ 'main' ]} - DelayedLookups.setup_delayed_lookups_for_class('TestClass', d) - self.assertFalse('__symbols__' in d) - self.assertTrue('main' in d) - self.assertTrue(isinstance(d['main'], ClassProperty)) - self.assertTrue('main' in d['__delayed_lookups__']) - self.assertTrue(isinstance(d['__delayed_lookups__']['main'], - DelayedSymbol)) - - def test_symval_setup(self): - d = {'__symvals__' : [ 'main' ]} - DelayedLookups.setup_delayed_lookups_for_class('TestClass', d) - self.assertFalse('__symvals__' in d) - self.assertTrue('main' in d) - self.assertTrue(isinstance(d['main'], ClassProperty)) - self.assertTrue('main' in d['__delayed_lookups__']) - self.assertTrue(isinstance(d['__delayed_lookups__']['main'], - DelayedSymval)) - - def test_symval_setup_bad(self): - d = {'__symvals__' : 'main' } - with self.assertRaises(TypeError): - DelayedLookups.setup_delayed_lookups_for_class('TestClass', d) - - def test_minsymbol_setup(self): - d = {'__minsymbols__' : [ 'main' ]} - DelayedLookups.setup_delayed_lookups_for_class('TestClass', d) - self.assertFalse('__minsymbols__' in d) - self.assertTrue('main' in d) - self.assertTrue(isinstance(d['main'], ClassProperty)) - self.assertTrue('main' in d['__delayed_lookups__']) - self.assertTrue(isinstance(d['__delayed_lookups__']['main'], - DelayedMinimalSymbol)) - def test_minsymval_setup(self): - d = {'__minsymvals__' : [ 'main' ]} - DelayedLookups.setup_delayed_lookups_for_class('TestClass', d) - self.assertFalse('__minsymvals__' in d) - self.assertTrue('main' in d) - self.assertTrue(isinstance(d['main'], ClassProperty)) - self.assertTrue('main' in d['__delayed_lookups__']) - self.assertTrue(isinstance(d['__delayed_lookups__']['main'], - DelayedMinimalSymval)) - - def get_callback_class(self): - class TestClass(DelayedLookups): - @classmethod - def main_callback(self, value): - self.main_value = value - - @classmethod - def voidp_callback(self, value): - self.voidp_value = value - - return TestClass - - def test_type_callback_setup(self): - TestClass = self.get_callback_class() - d = {'__type_callbacks__' : [ ('void *', 'voidp_callback') ], - '__delayed_lookups__' : {} } - DelayedLookups.setup_named_callbacks(TestClass, d) - self.assertFalse('__type_callbacks__' in d) - - def test_type_callback_setup_bad(self): - TestClass = self.get_callback_class() - d = {'__type_callbacks__' : [ 'void *', 'voidp_callback' ], - '__delayed_lookups__' : {} } - with self.assertRaises(ValueError): - DelayedLookups.setup_named_callbacks(TestClass, d) - - def test_symbol_callback_setup(self): - TestClass = self.get_callback_class() - d = {'__symbol_callbacks__' : [ ('main', 'main_callback') ], - '__delayed_lookups__' : {} } - DelayedLookups.setup_named_callbacks(TestClass, d) - self.assertFalse('__symbol_callbacks__' in d) - - def test_symbol_callback_setup_bad(self): - TestClass = self.get_callback_class() - d = {'__symbol_callbacks__' : [ 'main', 'main_callback' ], - '__delayed_lookups__' : {} } - with self.assertRaises(ValueError): - DelayedLookups.setup_named_callbacks(TestClass, d) - - def test_minsymbol_callback_setup(self): - TestClass = self.get_callback_class() - d = {'__minsymbol_callbacks__' : [ ('main', 'main_callback') ], - '__delayed_lookups__' : {} } - DelayedLookups.setup_named_callbacks(TestClass, d) - self.assertFalse('__minsymbol_callbacks__' in d) - - def test_minsymbol_callback_setup_bad(self): - TestClass = self.get_callback_class() - d = {'__minsymbol_callbacks__' : [ 'main', 'main_callback' ], - '__delayed_lookups__' : {} } - with self.assertRaises(ValueError): - DelayedLookups.setup_named_callbacks(TestClass, d) - class TestMinimalSymbolCallback(unittest.TestCase): def setUp(self): gdb.execute("file") @@ -355,230 +222,3 @@ def test_type_not_found_in_early_load_then_found_after_load(self): self.load_file() self.assertTrue(x.found) self.assertTrue(isinstance(x.gdbtype, gdb.Type)) - -class TestDelayedLookup(unittest.TestCase): - def setUp(self): - gdb.execute("file") - - def load_file(self): - gdb.execute("file tests/test-util") - - def msymbol_test(self): - class Test(CrashBaseClass): - __minsymbols__ = [ 'test_struct' ] - return Test - - def test_bad_msymbol_name(self): - test = self.msymbol_test() - x = test - with self.assertRaises(AttributeError): - y = x.bad_symbol_name - - def test_msymbol_unavailable_at_start(self): - test = self.msymbol_test() - x = test() - with self.assertRaises(DelayedAttributeError): - y = x.test_struct - - def test_msymbol_available_on_load(self): - test = self.msymbol_test() - x = test() - with self.assertRaises(DelayedAttributeError): - y = x.test_struct - self.load_file() - self.assertTrue(isinstance(x.test_struct, gdb.MinSymbol)) - - def test_msymbol_available_at_start(self): - test = self.msymbol_test() - self.load_file() - - x = test() - self.assertTrue(isinstance(x.test_struct, gdb.MinSymbol)) - - def symbol_test(self): - class Test(CrashBaseClass): - __symbols__ = [ 'test_struct' ] - return Test - - def test_bad_symbol_name(self): - test = self.symbol_test() - x = test - with self.assertRaises(AttributeError): - y = x.bad_symbol_name - - def test_symbol_unavailable_at_start(self): - test = self.symbol_test() - x = test() - with self.assertRaises(DelayedAttributeError): - y = x.test_struct - - def test_symbol_available_on_load(self): - test = self.symbol_test() - x = test() - with self.assertRaises(DelayedAttributeError): - y = x.test_struct - self.load_file() - self.assertTrue(isinstance(x.test_struct, gdb.Symbol)) - - def test_symbol_available_at_start(self): - test = self.symbol_test() - self.load_file() - - x = test() - self.assertTrue(isinstance(x.test_struct, gdb.Symbol)) - - def symval_test(self): - class Test(CrashBaseClass): - __symvals__ = [ 'test_struct' ] - return Test - - def test_bad_symval_name(self): - test = self.symval_test() - x = test - with self.assertRaises(AttributeError): - y = x.bad_symval_name - - def test_symval_unavailable_at_start(self): - test = self.symval_test() - x = test() - with self.assertRaises(DelayedAttributeError): - y = x.test_struct - - def test_symval_available_on_load(self): - test = self.symval_test() - x = test() - with self.assertRaises(DelayedAttributeError): - y = x.test_struct - self.load_file() - self.assertTrue(isinstance(x.test_struct, gdb.Value)) - - def test_symval_available_at_start(self): - test = self.symval_test() - self.load_file() - - x = test() - self.assertTrue(isinstance(x.test_struct, gdb.Value)) - - def type_test(self): - class Test(CrashBaseClass): - __types__ = [ 'struct test' ] - return Test - - def test_bad_type_name(self): - test = self.type_test() - x = test - with self.assertRaises(AttributeError): - y = x.bad_type_name - - def test_type_unavailable_at_start(self): - test = self.type_test() - x = test() - with self.assertRaises(DelayedAttributeError): - y = x.test_type - - def test_type_available_on_load(self): - test = self.type_test() - x = test() - with self.assertRaises(DelayedAttributeError): - y = x.test_type - self.load_file() - y = x.test_type - self.assertTrue(isinstance(y, gdb.Type)) - - def test_type_available_at_start(self): - test = self.type_test() - self.load_file() - - x = test() - y = x.test_type - self.assertTrue(isinstance(y, gdb.Type)) - - def ptype_test(self): - class Test(CrashBaseClass): - __types__ = [ 'struct test *' ] - return Test - - def test_bad_ptype_name(self): - test = self.ptype_test() - x = test - with self.assertRaises(AttributeError): - y = x.bad_ptype_name - - def test_p_type_unavailable_at_start(self): - test = self.ptype_test() - x = test() - with self.assertRaises(DelayedAttributeError): - y = x.test_p_type - - def test_p_type_available_on_load(self): - test = self.ptype_test() - x = test() - with self.assertRaises(DelayedAttributeError): - y = x.test_p_type - self.load_file() - y = x.test_p_type - self.assertTrue(isinstance(y, gdb.Type)) - - def test_p_type_available_at_start(self): - test = self.ptype_test() - self.load_file() - - x = test() - y = x.test_p_type - self.assertTrue(isinstance(y, gdb.Type)) - - def type_callback_test(self): - class Test(CrashBaseClass): - __type_callbacks__ = [ - ('unsigned long', 'check_ulong') - ] - ulong_valid = False - @classmethod - def check_ulong(cls, gdbtype): - cls.ulong_valid = True - - return Test - - def test_type_callback_nofile(self): - test = self.type_callback_test() - x = test() - self.assertFalse(test.ulong_valid) - with self.assertRaises(AttributeError): - y = x.unsigned_long_type - - def test_type_callback(self): - test = self.type_callback_test() - x = test() - self.load_file() - self.assertTrue(test.ulong_valid) - with self.assertRaises(AttributeError): - y = x.unsigned_long_type - - def type_callback_test_multi(self): - class Test(CrashBaseClass): - __types__ = [ 'unsigned long' ] - __type_callbacks__ = [ - ('unsigned long', 'check_ulong') - ] - ulong_valid = False - @classmethod - def check_ulong(cls, gdbtype): - cls.ulong_valid = True - - return Test - - def test_type_callback_nofile_multi(self): - test = self.type_callback_test_multi() - x = test() - self.assertFalse(test.ulong_valid) - with self.assertRaises(DelayedAttributeError): - y = x.unsigned_long_type - - def test_type_callback_multi(self): - test = self.type_callback_test_multi() - x = test() - self.load_file() - self.assertTrue(test.ulong_valid) - y = x.unsigned_long_type - self.assertTrue(isinstance(y, gdb.Type)) - self.assertTrue(y.sizeof > 4) diff --git a/tests/test_syscache.py b/tests/test_syscache.py index 2f719090f67..980df67280e 100644 --- a/tests/test_syscache.py +++ b/tests/test_syscache.py @@ -111,7 +111,7 @@ def test_get_uptime_value(self): from crash.cache.syscache import CrashConfigCache, CrashKernelCache config = CrashConfigCache() kernel = CrashKernelCache(config) - kernel.jiffies = 27028508 + kernel.set_jiffies(27028508) kernel.adjust_jiffies = False x = kernel.uptime uptime = str(x) From b2182f365d1061db6bd4533a495240084c70a660 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 20 May 2019 13:16:07 -0400 Subject: [PATCH 115/367] crash.util: add more documentation No code changes. This just updates the API documentation for crash.util. Signed-off-by: Jeff Mahoney --- crash/util/__init__.py | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 817180b4054..eaef267fdb5 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -82,9 +82,10 @@ def container_of(val, gdbtype, member): Args: val (gdb.Value): The value to be converted. It can refer to an allocated structure or a pointer. - gdbtype (gdb.Type): The type of the object that will be generated - member (str): The name of the member in the target struct that - contains `val`. + gdbtype (gdb.Type, gdb.Value, str, gdb.Symbol): + The type of the object that will be generated + member (str): + The name of the member in the target struct that contains `val`. Returns: gdb.Value: The converted object, of the type specified by @@ -185,6 +186,7 @@ def resolve_type(val): Raises: TypeError: The object type of val is not valid + MissingTypeError: could not resolve the type from string argument """ if isinstance(val, gdb.Type): gdbtype = val @@ -337,6 +339,8 @@ def safe_lookup_type(name, block=None): Args: name (str): The name of the type to look up + block (gdb.Block, optional, default=None): + The block to use to resolve the type Returns: gdb.Type for requested type or None if it could not be found @@ -352,6 +356,9 @@ def array_size(value): Args: value (gdb.Value): The array to size + + Returns: + int: The number of elements in the array """ return value.type.sizeof // value[0].type.sizeof @@ -366,6 +373,9 @@ def get_typed_pointer(val, gdbtype): Returns: gdb.Value: The casted pointer of the requested type + + Raises: + TypeError: string value for val does not describe a hex address """ if gdbtype.code != gdb.TYPE_CODE_PTR: gdbtype = gdbtype.pointer() @@ -386,6 +396,15 @@ def get_typed_pointer(val, gdbtype): return val def array_for_each(value): + """ + Yields each element in an array separately + + Args: + value (gdb.Value): The array to iterate + + Yields: + gdb.Value: One element in the array at a time + """ size = array_size(value) for i in range(array_size(value)): yield value[i] From 2a277bedb78a52caec659dcdf1be374cd6682b3c Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Tue, 7 May 2019 16:30:58 -0400 Subject: [PATCH 116/367] crash: factor out task iteration This commit moves the open-coded iteration over the task lists in crash.kernel to crash.types.task. This will allow it to be unit tested one we have kernel testing in place. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 13 ++---------- crash/types/task.py | 48 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 49 insertions(+), 12 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index d2d7ce6b4ba..fcbc300dee7 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -600,7 +600,7 @@ def load_module_debuginfo(self, objfile: gdb.Objfile, def setup_tasks(self) -> None: from crash.types.percpu import get_percpu_vars - from crash.types.task import LinuxTask + from crash.types.task import LinuxTask, for_each_all_tasks import crash.cache.tasks gdb.execute('set print thread-events 0') @@ -613,21 +613,12 @@ def setup_tasks(self) -> None: sys.stdout.flush() task_count = 0 - tasks = [] - for taskg in list_for_each_entry(task_list, self.symvals.init_task.type, - 'tasks', include_head=True): - tasks.append(taskg) - for task in list_for_each_entry(taskg['thread_group'], - self.symvals.init_task.type, - 'thread_group'): - tasks.append(task) - try: crashing_cpu = int(get_symbol_value('crashing_cpu')) except Exception as e: crashing_cpu = -1 - for task in tasks: + for task in for_each_all_tasks(): cpu = None regs = None active = int(task.address) in rqscurrs diff --git a/crash/types/task.py b/crash/types/task.py index 8ab41884fdf..4449b7d48eb 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -4,6 +4,7 @@ import gdb from crash.util import array_size, struct_has_member from crash.util.symbols import Types, Symvals, SymbolCallbacks +from crash.types.list import list_for_each_entry PF_EXITING = 0x4 @@ -13,7 +14,7 @@ def get_value(symname): return sym[0].value() types = Types(['struct task_struct', 'struct mm_struct', 'atomic_long_t' ]) -symvals = Symvals([ 'task_state_array' ]) +symvals = Symvals([ 'task_state_array', 'init_task' ]) # This is pretty painful. These are all #defines so none of them end # up with symbols in the kernel. The best approximation we have is @@ -380,3 +381,48 @@ def pick_last_run(cls): cls.last_run = cls.last_run__timestamp else: raise RuntimeError("No method to retrieve last run from task found.") + +def for_each_thread_group_leader() -> Iterator[gdb.Value]: + """ + Iterate the task list and yield each thread group leader + + Yields: + :obj:`gdb.Value`: The next task on the list. The value is of + type ``struct task_struct``. + """ + task_list = symvals.init_task['tasks'] + for task in list_for_each_entry(task_list, symvals.init_task.type, + 'tasks', include_head=True): + yield task + +def for_each_thread_in_group(task: gdb.Value) -> Iterator[gdb.Value]: + """ + Iterate a thread group leader's thread list and + yield each struct task_struct + + Args: + task: The task_struct that is the thread group leader. The value + must be of type ``struct task_struct``. + + Yields: + :obj:`gdb.Value`: The next task on the list. The value is of type + ``struct task_struct``. + """ + thread_list = task['thread_group'] + for thread in list_for_each_entry(thread_list, symvals.init_task.type, + 'thread_group'): + yield thread + +def for_each_all_tasks() -> Iterator[gdb.Value]: + """ + Iterate the task list and yield each task including any associated + thread tasks + + Yields: + :obj:`gdb.Value`: The next task on the list. The value is of type + ``struct task_struct``. + """ + for leader in for_each_thread_group_leader(): + yield leader + for task in for_each_thread_in_group(leader): + yield task From 1fd6c5ef9bf49ccab8ccf0d89588875b88bcf0a3 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 8 May 2019 17:50:13 -0400 Subject: [PATCH 117/367] crash.commands: handle DelayedAttributeError gracefully Command.execute should be catching DelayedAttributeError and displaying an understandable message instead of a stack trace. Signed-off-by: Jeff Mahoney --- crash/commands/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index fac1b28a82b..5a17cf75c88 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -8,6 +8,8 @@ import importlib import argparse +from crash.exceptions import DelayedAttributeError + class CommandError(RuntimeError): pass @@ -48,6 +50,8 @@ def invoke(self, argstr, from_tty=False): except CommandLineError as e: print(f"{self.name}: {str(e)}") self.parser.print_usage() + except DelayedAttributeError as e: + print(f"{self.name}: command unavailable, {str(e)}") except (SystemExit, KeyboardInterrupt): pass From 3461ebd3326e91b7948063e1b25e06beac9211e9 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 8 May 2019 19:39:30 -0400 Subject: [PATCH 118/367] crash.commands.dmesg: handling dictionary string encoding better The dictionary handling code was outputting bytes instead of strings, so the output of dmesg -d was littered with b''. This commit properly encodes the dictionary into ASCII. Signed-off-by: Jeff Mahoney --- crash/commands/dmesg.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 18a286cf96a..8d5ba0fcb1e 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -200,16 +200,8 @@ def log_from_idx(self, logbuf, idx, dict_needed=False): dict_len = int(msg['dict_len']) d = (msg.cast(types.char_p_type) + types.printk_log_p_type.target().sizeof + textlen) - s = '' - - for i in range(0, dict_len): - if d[i]: - s += chr(d[i]) - else: - msgdict['dict'].append(s) - s = '' - - if s != '': + if dict_len > 0: + s = d.string('ascii', 'backslashreplace', dict_len) msgdict['dict'].append(s) return msgdict @@ -248,7 +240,7 @@ def handle_structured_log(self, args): print('{}{}{}'.format(level, timestamp, line)) for d in msg['dict']: - print('{}'.format(d.encode('string_escape'))) + print(d) def handle_logbuf(self, args): if symvals.log_buf_len and symvals.log_buf: From 2b8d2e934e5ebffd69587fadf3f4e19bee7712c3 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 8 May 2019 20:17:16 -0400 Subject: [PATCH 119/367] crash.types.slab: improve address printing The slab code was printing raw tuples with ints in base-10 that represented addresses. This commit ensures those are displayed as hex addresses. Signed-off-by: Jeff Mahoney --- crash/types/slab.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/crash/types/slab.py b/crash/types/slab.py index 9b4b312cc9f..fa22e8b8787 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -155,14 +155,14 @@ def contains_obj(self, addr): self.__populate_free() if obj_addr in self.free: - return (False, obj_addr, None) + return (False, int(obj_addr), None) ac = self.kmem_cache.get_array_caches() if obj_addr in ac: - return (False, obj_addr, ac[obj_addr]) + return (False, int(obj_addr), ac[obj_addr]) - return (True, obj_addr, None) + return (True, int(obj_addr), None) def __error(self, msg, misplaced = False): msg = col_error("cache %s slab %x%s" % (self.kmem_cache.name, @@ -575,8 +575,9 @@ def check_array_caches(self): print("cached pointer {:#x} in {} is not allocated: {}".format( ac_ptr, acs[ac_ptr], ac_obj_obj)) elif ac_obj_obj[1] != ac_ptr: - print("cached pointer {:#x} in {} has wrong offset: {}".format( - ac_ptr, acs[ac_ptr], ac_obj_obj)) + print("cached pointer {:#x} in {} has wrong offset: ({}, {:#x}, {})" + .format( ac_ptr, acs[ac_ptr], ac_obj_obj[0], + ac_obj_obj[1], ac_obj_obj[2])) def check_all(self): for (nid, node) in self.__get_nodelists(): From 2a6d90a1dc093828a6c6abdf3ce68f78b6f7b72b Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 8 May 2019 20:22:53 -0400 Subject: [PATCH 120/367] crash.commands.kmem: use exceptions to exit command with error The kmem command currently exits with success even when errors are encountered. This makes testing the code automatically difficult. This commit uses the CommandError and CommandLineError exceptions to handle those conditions. Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 95d5d60fbc3..6437d927b02 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -4,6 +4,7 @@ import gdb import crash from crash.commands import Command, ArgumentParser +from crash.commands import CommandError, CommandLineError from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name, slab_from_obj_addr from crash.types.zone import for_each_zone, for_each_populated_zone from crash.types.vmstat import VmStat @@ -55,8 +56,7 @@ def execute(self, args): print("Checking kmem cache {}".format(cache_name)) cache = kmem_cache_from_name(cache_name) if cache is None: - print("Cache {} not found.".format(cache_name)) - return + raise CommandError(f"Cache {cache_name} not found.") cache.check_all() else: print("Checking all kmem caches...") @@ -68,15 +68,16 @@ def execute(self, args): return if not args.arg: - print("Nothing to do.") - return + raise CommandLineError("no address specified") - addr = int(args.arg[0], 0) + try: + addr = int(args.arg[0], 0) + except ValueError: + raise CommandLineError("address must be numeric") slab = slab_from_obj_addr(addr) if not slab: - print("Address not found in any kmem cache.") - return + raise CommandError("Address not found in any kmem cache.") obj = slab.contains_obj(addr) name = slab.kmem_cache.name @@ -98,9 +99,7 @@ def execute(self, args): elif ac["ac_type"] == "alien": ac_desc = "alien cache of node %d for node %d" % (ac["nid_src"], ac["nid_tgt"]) else: - print("unexpected array cache type") - print(ac) - return + raise CommandError(f"unexpected array cache type {str(ac)}") print("FREE object %x from slab %s (in %s)" % (obj[1], name, ac_desc)) @@ -120,7 +119,7 @@ def print_vmstats(self): try: vm_stat = getValue("vm_stat") except AttributeError: - raise gdb.GdbError("Support for new-style vmstat is unimplemented.") + raise CommandError("Support for new-style vmstat is unimplemented.") print(" VM_STAT:") #TODO put this... where? From f27052ee48a065731177de7e2887644e27960c4d Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 8 May 2019 21:09:59 -0400 Subject: [PATCH 121/367] crash.commands.kmem: fix argument parsing The argument parsing for the kmem command wasn't strict enough and would allow some strange combinations through. This commit fixes the parsing so that it accepts an address only when none of the options are used and that the only option that accepts an (optional) argument is -s [slabname]. Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 6437d927b02..2a6312b2812 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -5,7 +5,8 @@ import crash from crash.commands import Command, ArgumentParser from crash.commands import CommandError, CommandLineError -from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name, slab_from_obj_addr +from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name +from crash.types.slab import slab_from_obj_addr from crash.types.zone import for_each_zone, for_each_populated_zone from crash.types.vmstat import VmStat import argparse @@ -34,13 +35,12 @@ def __init__(self, name): parser = ArgumentParser(prog=name) group = parser.add_mutually_exclusive_group() - group.add_argument('-s', action='store_true', default=False) + group.add_argument('-s', nargs='?', const=True, default=False, + dest='slabname') group.add_argument('-z', action='store_true', default=False) group.add_argument('-V', action='store_true', default=False) + group.add_argument('address', nargs='?') - parser.add_argument('arg', nargs=argparse.REMAINDER) - - parser.format_usage = lambda : "kmem [-s] [addr | slabname]\n" super().__init__(name, parser) def execute(self, args): @@ -50,28 +50,28 @@ def execute(self, args): elif args.V: self.print_vmstats() return - elif args.s: - if args.arg: - cache_name = args.arg[0] - print("Checking kmem cache {}".format(cache_name)) - cache = kmem_cache_from_name(cache_name) - if cache is None: - raise CommandError(f"Cache {cache_name} not found.") - cache.check_all() - else: + elif args.slabname: + if args.slabname is True: print("Checking all kmem caches...") for cache in kmem_cache_get_all(): print(cache.name) cache.check_all() + else: + cache_name = args.slabname + print(f"Checking kmem cache {cache_name}") + cache = kmem_cache_from_name(cache_name) + if cache is None: + raise CommandError(f"Cache {cache_name} not found.") + cache.check_all() print("Checking done.") return - if not args.arg: + if not args.address: raise CommandLineError("no address specified") try: - addr = int(args.arg[0], 0) + addr = int(args.address[0], 0) except ValueError: raise CommandLineError("address must be numeric") slab = slab_from_obj_addr(addr) From ec07a60f5cd66864c3974d4fffd5c7b41bec15f8 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 8 May 2019 21:12:15 -0400 Subject: [PATCH 122/367] crash.commands.kmem: clean up some trivial things - Don't use a private version of get_symbol_value - Don't import re and argparse when they're not used Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 2a6312b2812..38f8cc32a7c 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -9,11 +9,8 @@ from crash.types.slab import slab_from_obj_addr from crash.types.zone import for_each_zone, for_each_populated_zone from crash.types.vmstat import VmStat -import argparse -import re - -def getValue(sym): - return gdb.lookup_symbol(sym, None)[0].value() +from crash.util import get_symbol_value +from crash.exceptions import MissingSymbolError class KmemCommand(Command): """ kernel memory inspection @@ -117,8 +114,8 @@ def __print_vmstat(self, vmstat, diffs): def print_vmstats(self): try: - vm_stat = getValue("vm_stat") - except AttributeError: + vm_stat = get_symbol_value("vm_stat") + except MissingSymbolError: raise CommandError("Support for new-style vmstat is unimplemented.") print(" VM_STAT:") From d77c8e81be1e81e7d82c5687152e23dbbe7246a0 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Thu, 9 May 2019 20:42:10 -0400 Subject: [PATCH 123/367] crash.commands.ps: uncomment -G mode The -G argument for ps was commented out. This adds the test for whether a task is a thread group leader to crash.types.task and enables the -G argument in ps. Signed-off-by: Jeff Mahoney --- crash/commands/ps.py | 3 ++- crash/types/task.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 4b4bb919482..3717a1073ca 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -578,7 +578,8 @@ def execute(self, argv): # Only show thread group leaders -# if argv.G and task.pid != int(task.task_struct['tgid']): + if argv.G and not task.is_thread_group_leader(): + continue task.update_mem_usage() self.print_one(argv, thread) diff --git a/crash/types/task.py b/crash/types/task.py index 4449b7d48eb..7c6ee0ff257 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -263,6 +263,9 @@ def is_exiting(self): def is_zombie(self): return self.task_state() & TF.EXIT_ZOMBIE + def is_thread_group_leader(self): + return int(self.task_struct['exit_signal']) >= 0 + def update_mem_usage(self): if self.mem_valid: return From 297f9093eab78f7d67b1b0a160feae62fe18a129 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Thu, 9 May 2019 16:32:34 -0400 Subject: [PATCH 124/367] crash.subsystem.filesystems: add is_fstype_super and is_fstype_inode These two helpers will take a generic superblock or inode and determine whether it belongs to the given file system. It's a naive implementation that uses a string comparison. This is intentional so the comparison can be made without symbol resolution that may require module loading. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/__init__.py | 32 ++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index ee2f2dbf8bb..714b18a933e 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -142,3 +142,35 @@ def get_super_block(desc: AddressSpecifier, force: bool=False) -> gdb.Value: raise gdb.NotAvailableError(f"no superblock available at `{desc}'") return sb + +def is_fstype_super(super_block: gdb.Value, name: str) -> bool: + """ + Tests whether the super_block belongs to a particular file system type. + + This uses a naive string comparison so modules are not required. + + Args: + super_block (gdb.Value): + The struct super_block to test + name (str): The name of the file system type + + Returns: + bool: whether the super_block belongs to the specified file system + """ + return super_fstype(super_block) == name + +def is_fstype_inode(inode: gdb.Value, name: str) -> bool: + """ + Tests whether the inode belongs to a particular file system type. + + Args: + inode (gdb.Value): The struct inode to test + name (str): The name of the file system type + + Returns: + bool: whether the inode belongs to the specified file system + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + return is_fstype_super(inode['i_sb'], name) From 5208f8b6fa29d0a57b6dd99394c961a7cf6f8d38 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 29 Apr 2019 15:08:53 -0400 Subject: [PATCH 125/367] crash.subsystem.filesystem.btrfs: add helpers for super, inode, and uuids This adds helpers to: - export the fsid and metadata uuid from btrfs file systems - test whether a generic super block belongs to btrfs - test whether a generic inode belongs to btrfs We also document the APIs of existing helpers. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/btrfs.py | 117 ++++++++++++++++++++++++++-- 1 file changed, 109 insertions(+), 8 deletions(-) diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index d42790dee39..b5e33ba7bc5 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -2,13 +2,47 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb +import uuid +from crash.util import decode_uuid, struct_has_member, container_of from crash.util.symbols import Types +from crash.subsystem.filesystem import is_fstype_super types = Types([ 'struct btrfs_inode', 'struct btrfs_fs_info *', - 'struct btrfs_fs_info' ]) + 'struct btrfs_fs_info' ]) -def btrfs_inode(vfs_inode): +def is_btrfs_super(super_block: gdb.Value) -> bool: + """ + Tests whether a super_block belongs to btrfs. + + Args: + super_block (gdb.Value): The struct super_block + to test + + Returns: + bool: Whether the super_block belongs to btrfs + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + return is_fstype_super(super_block, "btrfs") + +def is_btrfs_inode(vfs_inode: gdb.Value) -> bool: + """ + Tests whether a inode belongs to btrfs. + + Args: + vfs_inode (gdb.Value): The struct inode to test + + Returns: + bool: Whether the inode belongs to btrfs + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + return is_btrfs_super(vfs_inode['i_sb']) + +def btrfs_inode(vfs_inode: gdb.Value, force: bool=False ) -> gdb.Value: """ Converts a VFS inode to a btrfs inode @@ -18,23 +52,90 @@ def btrfs_inode(vfs_inode): vfs_inode (gdb.Value): The struct inode to convert to a struct btrfs_inode + force (bool): Ignore type checking. + Returns: gdb.Value: The converted struct btrfs_inode + + Raises: + TypeError: the inode does not belong to btrfs + gdb.NotAvailableError: The target value was not available. """ + if not force and not is_btrfs_inode(vfs_inode): + raise TypeError("inode does not belong to btrfs") + return container_of(vfs_inode, types.btrfs_inode_type, 'vfs_inode') -def btrfs_fs_info(super_block): +def btrfs_fs_info(super_block: gdb.Value, force: bool=False) -> gdb.Value: """ - Converts a VFS superblock to a btrfs fs_info + Resolves a btrfs_fs_info from a VFS superblock - This method converts a struct super_block to a struct btrfs_fs_info + This method resolves a struct btrfs_fs_info from a struct super_block Args: super_block (gdb.Value): The struct super_block - to convert to a struct btrfs_fs_info. + to use to resolve a struct btrfs_fs_info. A pointer to a + struct super_block is also acceptable. + + force (bool): Ignore type checking. Returns: - gdb.Value: The converted struct + gdb.Value: The resolved struct btrfs_fs_info + + Raises: + TypeError: the super_block does not belong to btrfs + gdb.NotAvailableError: The target value was not available. + """ + if not force and not is_btrfs_super(super_block): + raise TypeError("super_block does not belong to btrfs") + + fs_info = super_block['s_fs_info'].cast(types.btrfs_fs_info_p_type) + return fs_info.dereference() + +def btrfs_fsid(super_block: gdb.Value, force: bool=False) -> uuid.UUID: + """ + Returns the btrfs fsid (UUID) for the specified superblock. + + Args: + super_block (gdb.Value): The struct super_block + for which to return the btrfs fsid. + + force (bool): Ignore type checking. + + Returns: + uuid.UUID: The Python UUID Object for the btrfs fsid + + Raises: + TypeError: the super_block does not belong to btrfs + gdb.NotAvailableError: The target value was not available. + """ + fs_info = btrfs_fs_info(super_block, force) + if struct_has_member(types.btrfs_fs_info_type, 'fsid'): + return decode_uuid(fs_info['fsid']) + return decode_uuid(fs_info['fs_devices']['fsid']) + +def btrfs_metadata_uuid(sb: gdb.Value, force: bool=False) -> uuid.UUID: + """ + Returns the btrfs metadata uuid for the specified superblock. + + Args: + super_block (gdb.Value): The struct super_block + for which to return the btrfs metadata uuid. + + force (bool): Ignore type checking. + + Returns: + uuid.UUID: The Python UUID Object for the btrfs fsid + + Raises: + TypeError: the super_block does not belong to btrfs + gdb.NotAvailableError: The target value was not available. """ - return super_block['s_fs_info'].cast(types.btrfs_fs_info_p_type) + fs_info = btrfs_fs_info(sb, force) + if struct_has_member(types.btrfs_fs_info_type, 'metadata_uuid'): + return decode_uuid(fs_info['metadata_uuid']) + elif struct_has_member(fs_info['fs_devices'].type, 'metadata_uuid'): + return decode_uuid(fs_info['fs_devices']['metadata_uuid']) + else: + return btrfs_fsid(sb, force) From 99eb7be08ed72fc89c97919b936d921db2d7bcbe Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 20 May 2019 09:36:58 -0400 Subject: [PATCH 126/367] crash.util: fix get_typed_pointer semantics Despite being called 'get_typed_pointer', we were dereferencing the pointer before turning. Also, we were refusing to take the address of a value that wasn't already the type we were targeting, which is silly since that would just return the object back. Signed-off-by: Jeff Mahoney --- crash/util/__init__.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/crash/util/__init__.py b/crash/util/__init__.py index eaef267fdb5..8b23e5c3c86 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -366,6 +366,10 @@ def get_typed_pointer(val, gdbtype): """ Returns a pointer to the requested type at the given address + If the val is passed as a gdb.Value, it will be casted to + the expected type. If it is not a pointer, the address of the + value will be used instead. + Args: val (gdb.Value, str, or int): The address for which to provide a casted pointer @@ -380,10 +384,8 @@ def get_typed_pointer(val, gdbtype): if gdbtype.code != gdb.TYPE_CODE_PTR: gdbtype = gdbtype.pointer() if isinstance(val, gdb.Value): - if (val.type != gdbtype and - val.type != gdbtype.target()): - raise TypeError("gdb.Value must refer to {} not {}" - .format(gdbtype, val.type)) + if val.type.code != gdb.TYPE_CODE_PTR: + val = val.address elif isinstance(val, str): try: val = int(val, 16) @@ -391,7 +393,9 @@ def get_typed_pointer(val, gdbtype): print(e) raise TypeError("string must describe hex address: ".format(e)) if isinstance(val, int): - val = gdb.Value(val).cast(gdbtype).dereference() + val = gdb.Value(val).cast(gdbtype) + else: + val = val.cast(gdbtype) return val From a8307f407a45e7b2e7c080601d1b42511ef4777e Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 15 May 2019 14:49:16 -0400 Subject: [PATCH 127/367] crash.types.task: get_stack_pointer should take thread_struct The arch-specific part of get_stack_pointer just needs to interpret the arch's thread_struct. Pass it that and avoid confusion. Signed-off-by: Jeff Mahoney --- crash/arch/x86_64.py | 6 +++--- crash/types/task.py | 8 ++------ 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index b025c0672c7..4d72f1e9285 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -53,6 +53,7 @@ def fetch_register_scheduled_inactive(self, thread, register): task = thread.info.task_struct rsp = task['thread']['sp'].cast(ulong_type.pointer()) + thread.registers['rsp'].value = rsp frame = rsp.cast(self.inactive_task_frame_type.pointer()).dereference() @@ -62,7 +63,6 @@ def fetch_register_scheduled_inactive(self, thread, register): if register == 16: return True - thread.registers['rsp'].value = rsp thread.registers['rbp'].value = frame['bp'] thread.registers['rbx'].value = frame['bx'] thread.registers['r12'].value = frame['r12'] @@ -114,7 +114,7 @@ def fetch_register_scheduled_thread_return(self, thread, register): thread.info.valid_stack = True @classmethod - def get_stack_pointer(cls, thread): - return int(thread.registers['rsp'].value) + def get_stack_pointer(cls, thread_struct): + return thread_struct['sp'] register(x86_64Architecture) diff --git a/crash/types/task.py b/crash/types/task.py index 7c6ee0ff257..4b08a8267b5 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -302,12 +302,8 @@ def is_kernel_task(self): def set_get_stack_pointer(cls, fn): cls.get_stack_pointer_fn = fn - @classmethod - def get_stack_pointer(cls): - # This unbinds the function from the task object so we don't - # pass self to the function. - fn = cls.get_stack_pointer_fn - return fn(self.thread) + def get_stack_pointer(self): + return self.get_stack_pointer_fn(self.task_struct['thread']) def get_rss_field(self): return int(self.task_struct['mm']['rss'].value()) From fd532eb74e0445048eb57e907545793683b158c4 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 15 May 2019 14:51:56 -0400 Subject: [PATCH 128/367] crash.types.task: add accessor-helpers Every consumer of a task shouldn't need to drill down into the structure just to get the task name, pid, etc. Signed-off-by: Jeff Mahoney --- crash/types/task.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crash/types/task.py b/crash/types/task.py index 4b08a8267b5..9a598ecf0ae 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -283,6 +283,22 @@ def update_mem_usage(self): self.pgd_addr = int(mm['pgd']) self.mem_valid = True + def task_name(self, brackets=False): + name = self.task_struct['comm'].string() + if brackets and self.is_kernel_task(): + return f"[{name}]" + else: + return name + + def task_pid(self): + return int(self.task_struct['pid']) + + def parent_pid(self): + return int(self.task_struct['parent']['pid']) + + def task_address(self): + return int(self.task_struct.address) + def is_kernel_task(self): if self.task_struct['pid'] == 0: return True From 79b95d341c33453faeb6fc84b4f909390fa4dfb6 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 20 May 2019 13:53:32 -0400 Subject: [PATCH 129/367] crash.types.task: add documentation and static typing hints This commit adds API documentation and static typing hints to tasks. There are some minor code changes to make mypy happy with the result. Signed-off-by: Jeff Mahoney --- crash/types/task.py | 331 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 263 insertions(+), 68 deletions(-) diff --git a/crash/types/task.py b/crash/types/task.py index 9a598ecf0ae..eddb9a7f510 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Iterator, Callable, Dict + import gdb from crash.util import array_size, struct_has_member from crash.util.symbols import Types, Symvals, SymbolCallbacks @@ -22,6 +24,14 @@ def get_value(symname): # is make some assumptions based on the changes upstream. This will # be fragile. class TaskStateFlags(object): + """ + A class to contain state related to discovering task flag values. + Not meant to be instantiated. + + + The initial values below are overridden once symbols are available to + resolve them properly. + """ TASK_RUNNING = 0 TASK_FLAG_UNINITIALIZED = -1 @@ -43,6 +53,9 @@ class TaskStateFlags(object): TASK_NEW: int=TASK_FLAG_UNINITIALIZED TASK_IDLE: int=TASK_FLAG_UNINITIALIZED + def __init__(self): + raise NotImplementedError("This class is not meant to be instantiated") + @classmethod def has_flag(cls, flagname): v = getattr(cls, flagname) @@ -157,32 +170,59 @@ def _check_state_bits(cls): TF = TaskStateFlags -class BadTaskError(TypeError): - msgtemplate = "task_struct must be gdb.Value describing struct task_struct not {}" - def __init__(self, task): - if isinstance(task, gdb.Value): - typedesc = task.type - else: - typedesc = type(task) - super().__init__(self.msgtemplate.format(typedesc)) - class LinuxTask(object): - task_struct_type = None - mm_struct_fields = None - get_rss = None - get_stack_pointer_fn = None - valid = False + """ + A wrapper class for ``struct task_struct``. There will be typically + one of these allocated for every task discovered in the debugging + environment. - def __init__(self, task_struct, active=False, cpu=None, regs=None): - self.init_task_types(task_struct) + Args: + task_struct: The task to wrap. The value must be of type + ``struct task_struct``. + active: Whether this task is active in the debugging enviroment + cpu: Which CPU this task was using + regs: The registers associated with this task + + Attributes: + task_struct (:obj:`gdb.Value`): The task being wrapped. The value + is of type ``struct task_struct``. + active (:obj:`bool`): Whether this task is active + cpu (:obj:`int`): The CPU number the task was using + regs: The registers associated with this task, if active + thread_info (:obj:`gdb.Value`): The architecture-specific + ``struct thread_info`` for this task. The value will be of + type ``struct thread_info``. + thread (:obj:`gdb.InferiorThread`): The GDB representation of the + thread. + mem_valid (:obj:`bool`): Whether the memory statistics are currently + valid. + rss (:obj:`int`): The size of the resident memory for this task. + total_vm (:obj:`int`): The total size of the vm space for this task. + pgd_addr (:obj:`int`): The address of the top of the page table tree. + + Raises: + :obj:`.ArgumentTypeError`: task_struct was not a :obj:`gdb.Value`. + :obj:`.UnexpectedGDBTypeError`: task_struct was not of type + ``struct task_struct``. + :obj:`.InvalidArgumentError`: The cpu number was not ``None`` or an + :obj:`int`. + """ + _valid = False + + def __init__(self, task_struct: gdb.Value, active: bool=False, + cpu: int=None, regs: Dict[str, int]=None): + self._init_task_types(task_struct) if cpu is not None and not isinstance(cpu, int): raise TypeError("cpu must be integer or None") - if not (isinstance(task_struct, gdb.Value) and - (task_struct.type == types.task_struct_type or - task_struct.type == types.task_struct_type.pointer())): - raise BadTaskError(task_struct) + if not isinstance(task_struct, gdb.Value): + raise ArgumentTypeError('task_struct', task_struct, gdb.Value) + + if not (task_struct.type == types.task_struct_type or + task_struct.type == types.task_struct_type.pointer()): + raise UnexpectedGDBTypeError('task_struct', task_struct, + types.task_struct_type) self.task_struct = task_struct self.active = active @@ -190,7 +230,6 @@ def __init__(self, task_struct, active=False, cpu=None, regs=None): self.regs = regs self.thread_info = None - self.stack_pointer = None self.thread = None # mem data @@ -200,8 +239,8 @@ def __init__(self, task_struct, active=False, cpu=None, regs=None): self.pgd_addr = 0 @classmethod - def init_task_types(cls, task): - if not cls.valid: + def _init_task_types(cls, task): + if not cls._valid: t = types.task_struct_type if task.type != t: raise BadTaskError(task) @@ -211,38 +250,87 @@ def init_task_types(cls, task): # a type resolved from a symbol will be different structures # within gdb. Equality requires a deep comparison rather than # a simple pointer comparison. - types.task_struct_type = task.type + types.override('struct task_struct', task.type) fields = types.task_struct_type.fields() cls.task_state_has_exit_state = 'exit_state' in fields - cls.pick_get_rss() - cls.pick_last_run() + cls._pick_get_rss() + cls._pick_last_run() cls.init_mm = get_value('init_mm') - cls.valid = True + cls._valid = True - def attach_thread(self, thread): + def attach_thread(self, thread: gdb.InferiorThread) -> None: + """ + Associate a gdb thread with this task + + Args: + thread: The gdb thread to associate with this task + """ if not isinstance(thread, gdb.InferiorThread): raise TypeError("Expected gdb.InferiorThread") self.thread = thread - def set_thread_info(self, thread_info): + def set_thread_info(self, thread_info: gdb.Value) -> None: + """ + Set the thread info for this task + + The thread info structure is architecture specific. This method + allows the architecture code to assign its thread info structure + to this task. + + Args: + thread_info: The ``struct thread_info`` to be associated with + this task. The value must be of type ``struct thread_info``. + """ self.thread_info = thread_info - def get_thread_info(self): + def get_thread_info(self) -> gdb.Value: + """ + Get the thread info for this task + + The thread info structure is architecture specific and so this + method abstracts its retreival. + + Returns: + :obj:`gdb.Value`: The struct thread_info associated with this + task. The type of the value is ``struct thread_info``. + """ return self.thread_info - def get_last_cpu(self): - try: - return int(self.task_struct['cpu']) - except gdb.error as e: - return int(self.thread_info['cpu']) + def get_last_cpu(self) -> int: + """ + Returns the last cpu this task was scheduled to execute on - def task_state(self): + Returns: + :obj:`int`: The last cpu this task was scheduled to execute on + """ + if struct_has_member(self.task_struct, 'cpu'): + cpu = self.task_struct['cpu'] + else: + cpu = self.thread_info['cpu'] + return int(cpu) + + # Hrm. This seems broken since we're combining flags from + # two fields. + def task_state(self) -> int: + """ + Return the task state flags for this task *(possibly broken due to + combining flags from ``state`` and ``exit_state``)*. + + Returns: + :obj:`int`: The state flags for this task. + """ state = int(self.task_struct['state']) if self.task_state_has_exit_state: state |= int(self.task_struct['exit_state']) return state - def maybe_dead(self): + def maybe_dead(self) -> bool: + """ + Returns whether this task is dead + + Returns: + :obj:`bool`: Whether this task is dead + """ state = self.task_state() known = TF.TASK_INTERRUPTIBLE @@ -254,19 +342,49 @@ def maybe_dead(self): known |= TF.TASK_SWAPPING return (state & known) == 0 - def task_flags(self): + def task_flags(self) -> int: + """ + Returns the flags for this task + + Returns: + :obj:`int`: The flags for this task + """ return int(self.task_struct['flags']) - def is_exiting(self): - return self.task_flags() & PF_EXITING + def is_exiting(self) -> bool: + """ + Returns whether a task is exiting + + Returns: + :obj:`bool`: Whether the task is exiting + """ + return (self.task_flags() & PF_EXITING) != 0 + + def is_zombie(self) -> bool: + """ + Returns whether a task is in Zombie state - def is_zombie(self): - return self.task_state() & TF.EXIT_ZOMBIE + Returns: + :obj:`bool`: Whether the task is in zombie state + """ + return (self.task_state() & TF.EXIT_ZOMBIE) != 0 - def is_thread_group_leader(self): + def is_thread_group_leader(self) -> bool: + """ + Returns whether a task is a thread group leader + + Returns: + :obj:`bool`: Whether the task is a thread group leader + """ return int(self.task_struct['exit_signal']) >= 0 - def update_mem_usage(self): + def update_mem_usage(self) -> None: + """ + Update the memory usage for this task + + Tasks are created initially without their memory statistics. This + method explicitly updates them. + """ if self.mem_valid: return @@ -283,20 +401,48 @@ def update_mem_usage(self): self.pgd_addr = int(mm['pgd']) self.mem_valid = True - def task_name(self, brackets=False): + def task_name(self, brackets: bool=False) -> str: + """ + Returns the ``comm`` field of this task + + Args: + brackets: If this task is a kernel thread, surround the name + in square brackets + + Returns: + :obj:`str`: The ``comm`` field of this task a python string + """ name = self.task_struct['comm'].string() if brackets and self.is_kernel_task(): return f"[{name}]" else: return name - def task_pid(self): + def task_pid(self) -> int: + """ + Returns the pid of this task + + Returns: + :obj:`int`: The pid of this task + """ return int(self.task_struct['pid']) - def parent_pid(self): + def parent_pid(self) -> int: + """ + Returns the pid of this task's parent + + Returns: + :obj:`int`: The pid of this task's parent + """ return int(self.task_struct['parent']['pid']) - def task_address(self): + def task_address(self) -> int: + """ + Returns the address of the task_struct for this task + + Returns: + :obj:`int`: The address of the task_struct + """ return int(self.task_struct.address) def is_kernel_task(self): @@ -315,19 +461,44 @@ def is_kernel_task(self): return False @classmethod - def set_get_stack_pointer(cls, fn): - cls.get_stack_pointer_fn = fn + def set_get_stack_pointer(cls, fn: Callable[[gdb.Value], int]): + """ + Set the stack pointer callback for this architecture + + The callback must accept a :obj:`gdb.Value` of type + ``struct thread`` and return a :obj:`int` containing the address + of the stack pointer. + + Args: + fn: The callback to use. It will be used by all tasks. + """ + setattr(cls, '_get_stack_pointer_fn', fn) + + def get_stack_pointer(self) -> int: + """ + Get the stack pointer for this task + + Returns: + :obj:`int`: The address of the stack pointer for this task. + + Raises: + :obj:`NotImplementedError`: The architecture hasn't provided + a stack pointer callback. + """ + try: + fn = getattr(self, '_get_stack_pointer_fn') + except AttributeError as e: + raise NotImplementedError("Architecture hasn't provided stack pointer callback") - def get_stack_pointer(self): - return self.get_stack_pointer_fn(self.task_struct['thread']) + return int(fn(self.task_struct['thread'])) - def get_rss_field(self): + def _get_rss_field(self): return int(self.task_struct['mm']['rss'].value()) - def get__rss_field(self): + def _get__rss_field(self): return int(self.task_struct['mm']['_rss'].value()) - def get_rss_stat_field(self): + def _get_rss_stat_field(self): stat = self.task_struct['mm']['rss_stat']['count'] stat0 = self.task_struct['mm']['rss_stat']['count'][0] rss = 0 @@ -335,7 +506,7 @@ def get_rss_stat_field(self): rss += int(stat[i]['counter']) return rss - def get_anon_file_rss_fields(self): + def _get_anon_file_rss_fields(self): mm = self.task_struct['mm'] rss = 0 for name in cls.anon_file_rss_fields: @@ -349,15 +520,15 @@ def get_anon_file_rss_fields(self): # dynamically. We may do that eventually, but for now we can just # select the proper function and assign it to the class. @classmethod - def pick_get_rss(cls): + def _pick_get_rss(cls): if struct_has_member(types.mm_struct_type, 'rss'): - cls.get_rss = cls.get_rss_field + cls._get_rss = cls._get_rss_field elif struct_has_member(types.mm_struct_type, '_rss'): - cls.get_rss = cls.get__rss_field + cls._get_rss = cls._get__rss_field elif struct_has_member(types.mm_struct_type, 'rss_stat'): cls.MM_FILEPAGES = get_value('MM_FILEPAGES') cls.MM_ANONPAGES = get_value('MM_ANONPAGES') - cls.get_rss = cls.get_rss_stat_field + cls._get_rss = cls._get_rss_stat_field else: cls.anon_file_rss_fields = [] @@ -368,35 +539,59 @@ def pick_get_rss(cls): cls.anon_file_rss_fields.append('_anon_rss') cls.atomic_long_type = gdb.lookup_type('atomic_long_t') - cls.get_rss = cls.get_anon_file_rss_fields + cls._get_rss = cls._get_anon_file_rss_fields if len(cls.anon_file_rss_fields): raise RuntimeError("No method to retrieve RSS from task found.") - def last_run__last_run(self): + def _get_rss(self) -> int: + raise NotImplementedError("_get_rss not implemented") + + def get_rss(self): + """ + Return the resident set for this task + + Returns: + :obj:`int`: The size of the resident memory set for this task + """ + return self._get_rss() + + def _last_run__last_run(self): return int(self.task_struct['last_run']) - def last_run__timestamp(self): + def _last_run__timestamp(self): return int(self.task_struct['timestamp']) - def last_run__last_arrival(self): + def _last_run__last_arrival(self): return int(self.task_struct['sched_info']['last_arrival']) + def _get_last_run(self) -> int: + raise NotImplementedError("_get_last_run not implemented") + @classmethod - def pick_last_run(cls): + def _pick_last_run(cls): fields = types.task_struct_type.keys() if ('sched_info' in fields and 'last_arrival' in types.task_struct_type['sched_info'].type.keys()): - cls.last_run = cls.last_run__last_arrival + cls._get_last_run = cls._last_run__last_arrival elif 'last_run' in fields: - cls.last_run = cls.last_run__last_run + cls._get_last_run = cls._last_run__last_run elif 'timestamp' in fields: - cls.last_run = cls.last_run__timestamp + cls._get_last_run = cls._last_run__timestamp else: raise RuntimeError("No method to retrieve last run from task found.") + def last_run(self) -> int: + """ + The timestamp of when this task was last run + + Returns: + :obj:`int`: The timestamp of when this task was last run + """ + return self._get_last_run() + def for_each_thread_group_leader() -> Iterator[gdb.Value]: """ Iterate the task list and yield each thread group leader From 0d2f31b11dea8ffdfc4828903c7f0ae10b9d9083 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 15 May 2019 14:58:04 -0400 Subject: [PATCH 130/367] crash.commands.ps: factor out formatting This is a big commit that pulls the formatting of the output out of the command. The idea is that we can implement this more cleanly by adding methods to the formatting class. Signed-off-by: Jeff Mahoney --- crash/commands/ps.py | 281 ++++++++++++++++++++++++++----------------- 1 file changed, 168 insertions(+), 113 deletions(-) diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 3717a1073ca..ee170d8d476 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -7,9 +7,152 @@ import re from crash.commands import Command, ArgumentParser -from crash.commands import CommandLineError +from crash.commands import CommandLineError, CommandError from crash.types.task import LinuxTask, TaskStateFlags as TF +class TaskFormat(object): + """ + This class is responsible for converting the arguments into formatting + rules. + """ + def __init__(self, argv, regex): + self.sort = lambda x: x.info.task_pid() + self._filter = lambda x: True + self._format_one_task = self._format_common_line + self._regex = regex + + if argv.s: + self._format_header = self._format_stack_header + self._format_column4 = self._format_stack_address + elif argv.n: + self._format_header = self._format_threadnum_header + self._format_column4 = self._format_thread_num + else: + self._format_header = self._format_task_header + self._format_column4 = self._format_task_address + + if argv.k: + self._filter = self._is_kernel_thread + elif argv.u: + self._filter = self._is_user_task + elif argv.G: + self._filter = self._is_thread_group_leader + + if argv.l: + self.sort = lambda x: -x.info.last_run() + self._format_one_task = self._format_last_run + self._format_header = lambda : "" + + def _format_generic_header(self, col4name: str, col4width: int) -> str: + header = f" PID PPID CPU {col4name:^{col4width}} ST %MEM " + header += "VSZ RSS COMM" + + return header + + def _format_stack_header(self) -> str: + return self._format_generic_header("KSTACK", 16) + + def _format_stack_address(self, task: LinuxTask) -> str: + addr = int(task.get_stack_pointer()) + return f"{addr:16x}" + + def _format_task_header(self) ->str: + return self._format_generic_header("TASK", 16) + + def _format_task_address(self, task: LinuxTask) -> str: + addr = int(task.task_struct.address) + return f"{addr:16x}" + + def _format_threadnum_header(self) -> str: + return self._format_generic_header("THREAD#", 7) + + def _format_thread_num(self, task: LinuxTask) -> str: + return f"{task.thread.num:7d}" + + def _is_kernel_thread(self, task: LinuxTask) -> bool: + return task.is_kernel_task() + + def _is_user_task(self, task: LinuxTask) -> bool: + return not self._is_kernel_thread(task) + + def _is_thread_group_leader(self, task: LinuxTask) -> bool: + return task.is_thread_group_leader() + + def _format_common_line(self, task: LinuxTask, state: str) -> str: + pid = task.task_pid() + parent_pid = task.parent_pid() + last_cpu = task.get_last_cpu() + name = task.task_name() + + # This needs adaptation for page size != 4k + total_vm = task.total_vm * 4096 // 1024 + rss = task.rss * 4096 // 1024 + + if task.active: + active = ">" + else: + active = " " + + line = f"{active} {pid:>5} {parent_pid:>5} {last_cpu:>3} " + line += self._format_column4(task) + line += f" {state:3} {0:.1f} {total_vm:7d} {rss:6d} {name}" + + return line + + def _format_last_run(self, task: LinuxTask, state: str) -> str: + pid = task.task_pid() + addr = task.task_address() + cpu = task.get_last_cpu() + name = task.task_name() + if task.active: + cpu = task.cpu + + line = f"[{task.last_run():d}] [{state}] PID: {pid:-5d} " + line += f"TASK: {addr:x} CPU: {cpu:>2d} COMMAND: \"{name}\"" + + return line + + def should_print_task(self, task: LinuxTask) -> bool: + """ + Given optional filters and regex as part of the parent + object, return whether a task passes the criteria to be + printed. + + Args: + task (LinuxTask): The task under consideration + + Returns: + bool: Whether this task should be printed + """ + if self._filter(task) is False: + return False + + if self._regex and not self._regex.match(task.task_name()): + return False + + return True + + def format_one_task(self, task: LinuxTask, state: str) -> str: + """ + Given the formatting rules, produce the output line for this task. + + Args: + task (LinuxTask): The task to be printed + + Returns: + str: The ps output line for this task + """ + return self._format_one_task(task, state) + + def format_header(self) -> str: + """ + Return the header for this output object + + Returns: + str: The header for this type of ps output + """ + return self._format_header() + class PSCommand(Command): """display process status information @@ -410,21 +553,6 @@ def __init__(self): Command.__init__(self, "ps", parser) - self.header_template = " PID PPID CPU {1:^{0}} ST %MEM " \ - "VSZ RSS COMM" - -# PID PPID CPU TASK ST %MEM VSZ RSS COMM -# 1 0 3 ffff88033aa780c8 RU 0.0 0 0 [systemd] -#> 17080 16749 6 ffff8801db5ae040 RU 0.0 8168 1032 less -# PID PPID CPU TASK ST %MEM VSZ RSS COMM -#> 0 0 0 ffffffff81c13460 RU 0.0 0 0 [swapper/0] -# 17077 16749 0 ffff8800b956b848 RU 0.0 0 0 [less] - self.line_template = "{0} {1:>5} {2:>5} {3:>3} {4:{5}x} {6:3} {7:.1f}" - self.line_template += " {8:7d} {9:6d} {10:.{11}}{12}{13:.{14}}" - - self.num_line_template = "{0} {1:>5} {2:>5} {3:>3} {4:{5}d} {6:3} {7:.1f}" - self.num_line_template += " {8:7d} {9:6d} {10:.{11}}{12}{13:.{14}}" - def task_state_string(self, task): state = task.task_state() buf = None @@ -453,72 +581,6 @@ def task_state_string(self, task): return buf - @classmethod - def task_header(cls, task): - task_struct = task.task_struct - template = "PID: {0:-5d} TASK: {1:x} CPU: {2:>2d} COMMAND: \"{3}\"" - cpu = task.get_last_cpu() - if task.active: - cpu = task.cpu - return template.format(int(task_struct['pid']), - int(task_struct.address), cpu, - task_struct['comm'].string()) - - def print_last_run(self, task): - radix = 10 - if radix == 10: - radix_string = "d" - else: - radix_string = "x" - template = "[{0:{1}}] [{2}] {3}" - print(template.format(task.last_run(), radix_string, - self.task_state_string(task), - self.task_header(task))) - - def print_one(self, argv, thread): - task = thread.info - specified = argv.args is None - task_struct = task.task_struct - - pointer = task_struct.address - if argv.s: - pointer = task.get_stack_pointer() - - if argv.n: - pointer = thread.num - - if argv.l: - self.print_last_run(task) - return - - try: - parent_pid = task_struct['parent']['pid'] - except KeyError: - # This can happen on live systems where pids have gone - # away - print("Couldn't locate task at address {:#x}" - .format(task_struct.parent.address)) - return - - if task.active: - active = ">" - else: - active = " " - line = self.line_template - width = 16 - if argv.n: - line = self.num_line_template - width = 7 - - print(line.format(active, int(task_struct['pid']), int(parent_pid), - int(task.get_last_cpu()), int(pointer), - width, self.task_state_string(task), 0, - task.total_vm * 4096 // 1024, - task.rss * 4096 // 1024, - "[", int(task.is_kernel_task()), - task_struct['comm'].string(), - "]", int(task.is_kernel_task()))) - def setup_task_states(self): self.task_states = { TF.TASK_RUNNING : "RU", @@ -538,49 +600,42 @@ def setup_task_states(self): self.task_states[TF.TASK_IDLE] = "ID" def execute(self, argv): - sort_by_pid = lambda x: x.info.task_struct['pid'] - sort_by_last_run = lambda x: -x.info.last_run() + # Unimplemented + if argv.p or argv.c or argv.t or argv.a or argv.g or argv.r: + raise CommandError("Support for the -p, -c, -t, -a, -g, and -r options is unimplemented.") if not hasattr(self, 'task_states'): self.setup_task_states() - sort_by = sort_by_pid - if argv.l: - sort_by = sort_by_last_run - else: - if argv.s: - col4name = "KSTACK" - width = 16 - elif argv.n: - col4name = " THREAD#" - width = 7 - else: - col4name = "TASK" - width = 16 - print(self.header_template.format(width, col4name)) - regex = None if argv.args: regex = re.compile(fnmatch.translate(argv.args[0])) - for thread in sorted(gdb.selected_inferior().threads(), key=sort_by): + taskformat = TaskFormat(argv, regex) + + count = 0 + header = taskformat.format_header() + for thread in sorted(gdb.selected_inferior().threads(), + key=taskformat.sort): task = thread.info if task: - if argv.k and not task.is_kernel_task(): + if not taskformat.should_print_task(task): continue - if argv.u and task.is_kernel_task(): - continue - - if regex is not None: - m = regex.match(task.task_struct['comm'].string()) - if m is None: - continue - - # Only show thread group leaders - if argv.G and not task.is_thread_group_leader(): - continue + if header: + print(header) + header = None task.update_mem_usage() - self.print_one(argv, thread) + state = self.task_state_string(task) + line = taskformat.format_one_task(task, state) + print(line) + count += 1 + + if count == 0: + if regex: + print(f"No matches for {argv.args[0]}.") + else: + raise CommandError("Unfiltered output has no matches. BUG?") + PSCommand() From 44440eff2141d446b50cda833d601f1a1beebe40 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 23 May 2019 16:53:15 -0400 Subject: [PATCH 131/367] crash.commands.dmesg: raise CommandError instead of gdb.GdbError When we encounter an unstructured log with -d, we need to raise CommandError. Signed-off-by: Jeff Mahoney --- crash/commands/dmesg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 8d5ba0fcb1e..7b730fdf95d 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -6,7 +6,7 @@ import argparse import re -from crash.commands import Command, ArgumentParser +from crash.commands import Command, ArgumentParser, CommandError from crash.exceptions import DelayedAttributeError from crash.util.symbols import Types, Symvals @@ -262,7 +262,7 @@ def execute(self, args): except LogTypeException as lte: pass except LogInvalidOption as lio: - raise gdb.GdbError(str(lio)) + raise CommandError(str(lio)) print("Can't find valid log") From 3c782d3986027eefbfd9a4972a788c15567c7222 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 7 May 2019 17:52:33 -0400 Subject: [PATCH 132/367] crash: add kernel testing against vmcores One of the things that has had a serious negative effect on the quality of crash-python is the inability to do automated testing on a broad variety of kernels and vmcores. Often, we have to run our tests by hand. More often, we get reports from new users that something broke unexpectedly. This commit adds real unit testing against real kernels and vmcores. It includes a few test cases that will need to be extended further. The policy moving forward will be that new features will require a matching test case that passes across a variety of kernels. Signed-off-by: Jeff Mahoney --- INTERNALS.md | 19 + TESTING.md | 61 ++ crash/commands/__init__.py | 2 +- kernel-tests/decorators.py | 62 ++ kernel-tests/test_commands_dmesg.py | 105 ++++ kernel-tests/test_commands_kmem.py | 130 +++++ kernel-tests/test_commands_mount.py | 40 ++ kernel-tests/test_commands_ps.py | 528 ++++++++++++++++++ .../test_subsystem_storage_decoders.py | 43 ++ kernel-tests/test_types_bitmap.py | 76 +++ kernel-tests/test_types_cpu.py | 31 + kernel-tests/test_types_module.py | 30 + kernel-tests/test_types_node.py | 41 ++ kernel-tests/test_types_percpu.py | 19 + kernel-tests/test_types_task.py | 40 ++ kernel-tests/test_types_zone.py | 27 + kernel-tests/unittest-bootstrap.py | 42 ++ kernel-tests/unittest-prepare.py | 53 ++ setup.py | 2 +- test-all.sh | 28 + 20 files changed, 1377 insertions(+), 2 deletions(-) create mode 100644 INTERNALS.md create mode 100644 TESTING.md create mode 100644 kernel-tests/decorators.py create mode 100644 kernel-tests/test_commands_dmesg.py create mode 100644 kernel-tests/test_commands_kmem.py create mode 100644 kernel-tests/test_commands_mount.py create mode 100644 kernel-tests/test_commands_ps.py create mode 100644 kernel-tests/test_subsystem_storage_decoders.py create mode 100644 kernel-tests/test_types_bitmap.py create mode 100644 kernel-tests/test_types_cpu.py create mode 100644 kernel-tests/test_types_module.py create mode 100644 kernel-tests/test_types_node.py create mode 100644 kernel-tests/test_types_percpu.py create mode 100644 kernel-tests/test_types_task.py create mode 100644 kernel-tests/test_types_zone.py create mode 100644 kernel-tests/unittest-bootstrap.py create mode 100644 kernel-tests/unittest-prepare.py diff --git a/INTERNALS.md b/INTERNALS.md new file mode 100644 index 00000000000..871cac2b4b9 --- /dev/null +++ b/INTERNALS.md @@ -0,0 +1,19 @@ + +# GDB + +## Python contexts within GDB + +Each time gdb enters the Python interpreter it establishes a context. +Part of the context includes what architecture gdb believes it is +debugging ('gdbarch') and that is passed into the context. If anything +changes the gdbarch in that Python context, it won't be visible to any +subsequent Python code until a new session is established. + +When gdb starts up on x86_64, it uses a gdbarch of i386 -- with 32-bit words +and pointers. Only when we load an executable or target does it switch +to i386:x86_64. + +The effect of this is that any code that relys on type information *must* +be executed in a separate context from the one that loaded the executable +and/or taret. Otherwise, any built-in types that are pointers or `long` +based will use the 32-bit sizes. diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 00000000000..95764818a2f --- /dev/null +++ b/TESTING.md @@ -0,0 +1,61 @@ +# Testing + +## Summary + +There are unit tests in the tests/ dir that are standalone and useful for +testing basic functionality. + +There are unit tests in the kernel-tests dir that require configuration, +kernel images, debuginfo, and vmcores to use. + +## Configuration + +The configuration for each kernel/vmcore to be tested goes in a .ini file +with the following format. All fields except kernel and vmcore are +optional, and defaults will be used. A kernel missing debuginfo cannot +be used for testing. Missing modules will mean module-specific tests +will be skipped. + +```[test] +kernel=/path/to/kernel +vmcore=/path/to/vmcore +vmlinux_debuginfo=/path/to/vmlinux-debuginfo +modules=/path/to/modules +module_debuginfo_path=/path/to/module/debuginfo +root=/root/for/tree/searches``` + +The optional fields match those defined in crash.kernel.CrashKernel. + +Example 1: +```[test] +kernel=/var/crash/2019-04-23-11:35/vmlinux-4.12.14-150.14-default.gz +vmcore=/var/crash/2019-04-23-11:35/vmcore``` + +In this example, the kernel and debuginfo packages are installed in the +default locations and will be searched automatically. + +Example 2: +```[test] +kernel=/var/crash/2019-04-23-11:35/vmlinux-4.12.14-150.14-default.gz +vmcore=/var/crash/2019-04-23-11:35/vmcore +root=/var/cache/crash-setup/leap15/4.12.14-150.14-default +``` + +In this example, the kernel and debuginfo packages are installed under +/var/cache/crash-setup/leap15/4.12.14-150.14-default and so we only +specify a root directory. + +## Running + +The script `test-all.sh` when run with no options will execute only +the standalone tests. The script takes a list of the .ini files +described above and will execute the kernel tests against those +configurations immediately after the standalone tests. + +Example: +```sh test-all.sh kernel-test-configs/4.12.14-150.14-default.ini kernel-test-configs/5.1.0-rc7-vanilla.ini``` +or +```sh test-all.sh kernel-test-configs/*.ini``` + +Each configuration will execute independently from one another. + diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 5a17cf75c88..8d90438d1d5 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -37,7 +37,7 @@ def __init__(self, name, parser=None): self.commands[self.name] = self gdb.Command.__init__(self, self.name, gdb.COMMAND_USER) - def invoke_uncaught(self, argstr, from_tty): + def invoke_uncaught(self, argstr, from_tty=False): argv = gdb.string_to_argv(argstr) args = self.parser.parse_args(argv) self.execute(args) diff --git a/kernel-tests/decorators.py b/kernel-tests/decorators.py new file mode 100644 index 00000000000..db205bfb2e0 --- /dev/null +++ b/kernel-tests/decorators.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + +def skip_with_type(typename): + try: + gdbtype = gdb.lookup_type(typename) + return unittest.skip(f"found type {typename}") + except gdb.error: + pass + + return lambda func: func + +def skip_without_type(typename): + try: + gdbtype = gdb.lookup_type(typename) + except gdb.error: + return unittest.skip(f"missing type {typename}") + + return lambda func: func + +def skip_with_symbol(symname): + symbol = gdb.lookup_symbol(symname, None)[0] + if symbol is not None: + return unittest.skip(f"found symbol {symname}") + + return lambda func: func + +def skip_without_symbol(symname): + symbol = gdb.lookup_symbol(symname, None)[0] + if symbol is None: + return unittest.skip(f"missing symbol {symname}") + + return lambda func: func + +def has_super_blocks(name): + from crash.subsystem.filesystem import for_each_super_block + for sb in for_each_super_block(): + if sb['s_type']['name'].string() == name: + return True + return False + +can_test = {} + +def skip_with_supers(name): + if not name in can_test: + can_test[name] = has_super_blocks(name) + + if not can_test[name]: + return lambda func: func + + return unittest.skip(f"{name} file systems in image") + +def skip_without_supers(name): + if not name in can_test: + can_test[name] = has_super_blocks(name) + + if can_test[name]: + return lambda func: func + + return unittest.skip(f"no {name} file systems in image") diff --git a/kernel-tests/test_commands_dmesg.py b/kernel-tests/test_commands_dmesg.py new file mode 100644 index 00000000000..12c2227f6c6 --- /dev/null +++ b/kernel-tests/test_commands_dmesg.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + +from decorators import skip_with_symbol, skip_without_symbol + +from crash.commands.dmesg import LogCommand +from crash.commands import CommandError, CommandLineError + +class TestCommandsLog(unittest.TestCase): + def setUp(self): + self.stdout = sys.stdout + sys.stdout = io.StringIO() + self.command = LogCommand("dmesg") + + def tearDown(self): + sys.stdout = self.stdout + + def output(self): + return sys.stdout.getvalue() + + def test_dmesg(self): + """`dmesg' produces valid output""" + self.command.invoke_uncaught("") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + def test_dmesg_bad_option(self): + """`dmesg -x` raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-x") + + def test_dmesg_t(self): + """`dmesg' produces valid output""" + self.command.invoke_uncaught("-t") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + @skip_without_symbol('log_first_seq') + def test_dmesg_d(self): + """`dmesg -d' produces valid output""" + self.command.invoke_uncaught("-d") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + @skip_with_symbol('log_first_seq') + def test_dmesg_d(self): + """`dmesg -d' raises CommandError with unstructured log""" + with self.assertRaises(CommandError): + self.command.invoke_uncaught("-d") + + def test_dmesg_m(self): + """`dmesg -m ' produces valid output""" + self.command.invoke_uncaught("-m") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + def test_dmesg_tm(self): + """`dmesg -t -m' produces valid output""" + self.command.invoke_uncaught("-t -m") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + @skip_without_symbol('log_first_seq') + def test_dmesg_td(self): + """`dmesg -t -d' produces valid output""" + self.command.invoke_uncaught("-t -d") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + @skip_without_symbol('log_first_seq') + def test_dmesg_dm(self): + """`dmesg -m -d' produces valid output""" + self.command.invoke_uncaught("-m -d") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + @skip_without_symbol('log_first_seq') + def test_dmesg_tdm(self): + """`dmesg -t -d -m' produces valid output""" + self.command.invoke_uncaught("-t -d -m") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + @skip_with_symbol('log_first_seq') + def test_dmesg_td(self): + """`dmesg -t -d' raises CommandError with unstructured log""" + with self.assertRaises(CommandError): + self.command.invoke_uncaught("-t -d") + + @skip_with_symbol('log_first_seq') + def test_dmesg_dm(self): + """`dmesg -m -d' raises CommandError with unstructured log""" + with self.assertRaises(CommandError): + self.command.invoke_uncaught("-m -d") + + @skip_with_symbol('log_first_seq') + def test_dmesg_tdm(self): + """`dmesg -t -d -m' raises CommandError with unstructured log""" + with self.assertRaises(CommandError): + self.command.invoke_uncaught("-t -d -m") + diff --git a/kernel-tests/test_commands_kmem.py b/kernel-tests/test_commands_kmem.py new file mode 100644 index 00000000000..3717b4a6e9b --- /dev/null +++ b/kernel-tests/test_commands_kmem.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + +from decorators import skip_without_symbol +from decorators import skip_with_symbol + +from crash.commands.kmem import KmemCommand +from crash.commands import CommandLineError, CommandError + +class TestCommandsKmem(unittest.TestCase): + def setUp(self): + self.stdout = sys.stdout + sys.stdout = io.StringIO() + self.command = KmemCommand("kmem") + + def tearDown(self): + sys.stdout = self.stdout + + def output(self): + return sys.stdout.getvalue() + + def test_kmem_empty(self): + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("") + + def test_kmem_invalid(self): + """`kmem' returns error""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("invalid") + + @unittest.skip("takes a huge amount of time on a real core") + def test_kmem_s(self): + """`kmem -s' produces valid output""" + self.command.invoke_uncaught("-s") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + def test_kmem_s_inode_cache(self): + """`kmem -s inode_cache' produces valid output""" + self.command.invoke_uncaught("-s inode_cache") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + def test_kmem_s_unknown_cache(self): + """`kmem -s unknown_cache' raises CommandError""" + with self.assertRaises(CommandError): + self.command.invoke_uncaught("-s unknown_cache") + + def test_kmem_sz(self): + """`kmem -s -z' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-s -z") + + def test_kmem_sz_valid_cache(self): + """`kmem -s -z' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-s inode_cache -z") + + def test_kmem_sz_invalid_cache(self): + """`kmem -s unknown_cache -z' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-s unknown_cache -z") + + def test_kmem_sv(self): + """`kmem -s -V' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-s -V") + + def test_kmem_sv_valid_cache(self): + """`kmem -s inode_cache -V' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-s inode_cache -V") + + def test_kmem_sv_invalid_cache(self): + """`kmem -s unknown_cache -V' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-s unknown_cache -V") + + def test_kmem_z(self): + """`kmem -z' produces valid output""" + self.command.invoke_uncaught("-z") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + def test_kmem_z_invalid(self): + """`kmem -z invalid' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-z invalid") + + @skip_without_symbol('vm_stat') + def test_kmem_v(self): + """`kmem -V' produces valid output""" + self.command.invoke_uncaught("-V") + output = self.output() + self.assertTrue(len(output.split("\n")) > 0) + + @skip_with_symbol('vm_stat') + def test_kmem_v_unimplemented(self): + """`kmem -V' raises CommandError due to missing symbol""" + with self.assertRaises(CommandError): + self.command.invoke_uncaught("-V") + + def test_kmem_v_invalid(self): + """`kmem -V invalid' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-V invalid") + + def test_kmem_vz(self): + """`kmem -V -z' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-V -z") + + def test_kmem_svz(self): + """`kmem -V -z -s' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-V -z -s") + + def test_kmem_svz_valid_cache(self): + """`kmem -V -z -s inode_cache' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-V -z -s inode_cache") + + def test_kmem_svz_invalid_cache(self): + """`kmem -V -z -s unknown_cache' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("-V -z -s unknown_cache") diff --git a/kernel-tests/test_commands_mount.py b/kernel-tests/test_commands_mount.py new file mode 100644 index 00000000000..e3af005db8f --- /dev/null +++ b/kernel-tests/test_commands_mount.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + +from crash.commands.mount import MountCommand + +class TestCommandsMount(unittest.TestCase): + def setUp(self): + self.stdout = sys.stdout + sys.stdout = io.StringIO() + self.command = MountCommand("mount") + + def tearDown(self): + sys.stdout = self.stdout + + def output(self): + return sys.stdout.getvalue() + + def test_mount(self): + self.command.invoke("") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + def test_mount_f(self): + self.command.invoke("-f") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + def test_mount_v(self): + self.command.invoke("-v") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + def test_mount_d(self): + self.command.invoke("-d") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) diff --git a/kernel-tests/test_commands_ps.py b/kernel-tests/test_commands_ps.py new file mode 100644 index 00000000000..9c76a074c59 --- /dev/null +++ b/kernel-tests/test_commands_ps.py @@ -0,0 +1,528 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + +import sys +import io +import re +import fnmatch + +from crash.commands import CommandError, CommandLineError +from crash.commands.ps import PSCommand +import crash.types.task as tasks + +def bad_command_line(fn, ignored=True): + """Marks test to expect CommandLineError for unimplemented options""" + def test_decorator(fn): + def test_decorated(self, *args, **kwargs): + self.assertRaises(CommandLineError, fn, self, *args, **kwargs) + return test_decorated + test_decorator.__doc__ = fn.__doc__ + " (bad command line raises CommandLineError)" + return test_decorator + +def unimplemented(fn, ignored=True): + """Marks test to expect CommandError for unimplemented options""" + def test_decorator(fn): + def test_decorated(self, *args, **kwargs): + self.assertRaises(CommandError, fn, self, *args, **kwargs) + return test_decorated + test_decorator.__doc__ = fn.__doc__ + " (unimplemented command raises CommandError)" + return test_decorator + +PF_KTHREAD = 0x200000 + +class TestCommandsPs(unittest.TestCase): + def setUp(self): + self.stdout = sys.stdout + self.redirected = io.StringIO() + sys.stdout = self.redirected + self.command = PSCommand() + self.do_output = False + + def tearDown(self): + sys.stdout = self.stdout + if self.do_output: + print(self._output()) + + def _output(self): + return self.redirected.getvalue() + + def output(self): + try: + return self.output_list + except AttributeError: + self.output_list = self._output().split("\n") + return self.output_list + + def output_lines(self): + output = self.output() + return len(output) - 1 + + def get_wildcard_regex(self, wildcard): + return re.compile(fnmatch.translate(wildcard)) + + def check_line_count(self, count): + self.assertTrue(self.output_lines() == count) + + def check_header(self, expected): + header = self.output()[0] + self.assertTrue(re.match(expected, header) is not None) + + def check_task_header(self): + regex = "\s+PID\s+PPID\s+CPU\s+TASK\s+ST\s+%MEM\s+VSZ\s+RSS\s+COMM" + self.check_header(regex) + + def check_kstack_header(self): + regex = "\s+PID\s+PPID\s+CPU\s+KSTACK\s+ST\s+%MEM\s+VSZ\s+RSS\s+COMM" + self.check_header(regex) + + def check_threadnum_header(self): + regex = "\s+PID\s+PPID\s+CPU\s+THREAD#\s+ST\s+%MEM\s+VSZ\s+RSS\s+COMM" + self.check_header(regex) + + def check_body(self, regex, start=1): + comp = re.compile(regex) + lines = 0 + for line in self.output()[start:-1]: + self.assertTrue(comp.match(line) is not None) + lines += 1 + + self.assertTrue(lines > 0) + + def check_threadnum_output(self): + regex = ">?\s+\d+\s+\d+\s+\d+\s+\d+\s+[A-Z]+\s+[\d\.]+\s+\d+\s+\d+\s+.*" + + self.check_body(regex) + + + def check_normal_output(self): + regex = ">?\s+\d+\s+\d+\s+\d+\s+[\d+a-f]+\s+[A-Z]+\s+[\d\.]+\s+\d+\s+\d+\s+.*" + + self.check_body(regex) + + def check_last_run_output(self): + regex = "\[\d+\]\s+\[[A-Z][A-Z]\]\s+PID:\s+\d+\s+TASK:\s+[\da-f]+\s+CPU:\s+\d+\s+COMMAND: \".*\"" + self.check_body(regex, 0) + + def check_no_matches_output(self): + self.check_header('No matches for.*') + lines = self.output_lines() + self.assertTrue(lines == 1) + + def is_kernel_thread(self, task): + return (int(task['flags']) & PF_KTHREAD) + + def is_user_task(self, task): + return not self.is_kernel_thread(task) + + def task_name(self, task_struct): + return task_struct['comm'].string() + + def count_tasks(self, test=None, regex=None): + count = 0 + for task in tasks.for_each_all_tasks(): + if test is not None and not test(task): + continue + if regex is None or regex.match(self.task_name(task)): + count += 1 + + return count + + def count_kernel_tasks(self, regex=None): + return self.count_tasks(self.is_kernel_thread, regex) + + def count_user_tasks(self, regex=None): + return self.count_tasks(self.is_user_task, regex) + + def count_thread_group_leaders(self, regex=None): + count = 0 + for task in tasks.for_each_thread_group_leader(): + if regex is None or regex.match(self.task_name(task)): + count += 1 + + return count + + def test_ps_empty(self): + self.command.invoke_uncaught("") + self.assertTrue(self.output_lines() > 1) + + def test_ps_wildcard(self): + self.command.invoke_uncaught("*worker*") + + regex = self.get_wildcard_regex("*worker*") + self.check_line_count(self.count_tasks(regex=regex) + 1) + + def test_ps_bad_wildcard(self): + """Test `ps *BaDWiLdCaRd2019*' returns no matches output""" + self.command.invoke_uncaught("*BaDWiLdCaRd2019*") + self.check_no_matches_output() + + def test_ps_k(self): + """Test `ps -k' outputs all (and only) kernel threads""" + self.command.invoke_uncaught("-k") + lines = self.output_lines() + + self.check_task_header() + self.check_normal_output() + + self.check_line_count(self.count_kernel_tasks() + 1) + + def test_ps_k_wildcard(self): + """Test `ps -k *wonder*' outputs only matching kernel threads""" + self.command.invoke_uncaught("-k *worker*") + lines = self.output_lines() + + regex = self.get_wildcard_regex("*worker*") + + self.check_task_header() + self.check_normal_output() + self.check_line_count(self.count_kernel_tasks(regex) + 1) + + def test_ps_k_bad_wildcard(self): + """Test `ps -k *BaDWiLdCaRd2019*' returns no matches output""" + self.command.invoke_uncaught("-k *BaDWiLdCaRd2019*") + self.check_no_matches_output() + + def test_ps_u(self): + """Test `ps -u' outputs all (and only) user tasks""" + self.command.invoke_uncaught("-u") + + self.check_task_header() + self.check_normal_output() + + self.check_line_count(self.count_user_tasks() + 1) + + def test_ps_u_wildcard(self): + """Test `ps -u *wonder*' outputs only matching user tasks""" + self.command.invoke_uncaught("-u *nscd*") + lines = self.output_lines() + + regex = self.get_wildcard_regex("*nscd*") + + self.check_task_header() + self.check_normal_output() + + self.check_line_count(self.count_user_tasks(regex) + 1) + + def test_ps_u_bad_wildcard(self): + """Test `ps -u *BaDWiLdCaRd2019*' returns no matches output""" + self.command.invoke_uncaught("-u *BaDWiLdCaRd2019*") + self.check_no_matches_output() + + def test_ps_g(self): + """Test `ps -G' outputs all (and only) thread group leaders""" + self.command.invoke_uncaught("-G") + + self.check_task_header() + self.check_normal_output() + + self.check_line_count(self.count_thread_group_leaders() + 1) + + def test_ps_g_wildcard(self): + """Test `ps -G *nscd*' outputs only matching thread group leaders""" + self.command.invoke_uncaught("-G *nscd*") + + regex = self.get_wildcard_regex("*nscd*") + + self.check_task_header() + self.check_normal_output() + + self.check_line_count(self.count_thread_group_leaders() + 1) + + def test_ps_g_bad_wildcard(self): + """Test `ps -G *BaDWiLdCaRd2019*' returns no matches output""" + self.command.invoke_uncaught("-G *BaDWiLdCaRd2019*") + self.check_no_matches_output() + + @bad_command_line + def test_ps_uk(self): + """Test `ps -u -k'""" + self.command.invoke_uncaught("-u -k") + + @bad_command_line + def test_ps_uk_wildcard(self): + """Test `ps -u -k *'""" + self.command.invoke_uncaught("-u -k *") + + @bad_command_line + def test_ps_uG(self): + """Test `ps -u -G'""" + self.command.invoke_uncaught("-u -k") + + @bad_command_line + def test_ps_uG_wildcard(self): + """Test `ps -u -G *'""" + self.command.invoke_uncaught("-u -k *") + + @bad_command_line + def test_ps_kG(self): + """Test `ps -k -G'""" + self.command.invoke_uncaught("-k -G") + + @bad_command_line + def test_ps_kG_wildcard(self): + """Test `ps -k -G *'""" + self.command.invoke_uncaught("-k -G *") + + @bad_command_line + def test_ps_ukG(self): + """Test `ps -u -k -G'""" + self.command.invoke_uncaught("-u -k -G") + + @bad_command_line + def test_ps_ukG_wildcard(self): + """Test `ps -u -k -G *'""" + self.command.invoke_uncaught("-u -k -G *") + + def test_ps_s(self): + """Test `ps -s'""" + self.command.invoke_uncaught("-s") + + self.check_kstack_header() + self.check_normal_output() + + self.check_line_count(self.count_tasks() + 1) + + def test_ps_s_wildcard(self): + """Test `ps -s *nscd*'""" + self.command.invoke_uncaught("-s *nscd*") + + self.check_kstack_header() + self.check_normal_output() + + regex = self.get_wildcard_regex("*nscd*") + self.check_line_count(self.count_tasks(regex=regex) + 1) + + def test_ps_s_bad_wildcard(self): + """Test `ps -s *BaDWiLdCaRd2019*'""" + self.command.invoke_uncaught("-s *BaDWiLdCaRd2019*") + + self.check_no_matches_output() + + def test_ps_n(self): + """Test `ps -n'""" + self.command.invoke_uncaught("-n") + + self.check_threadnum_header() + self.check_threadnum_output() + + self.check_line_count(self.count_tasks() + 1) + + def test_ps_n_wildcard(self): + """Test `ps -n *nscd*'""" + self.command.invoke_uncaught("-n *nscd*") + + self.check_threadnum_header() + self.check_threadnum_output() + + regex = self.get_wildcard_regex("*nscd*") + self.check_line_count(self.count_tasks(regex=regex) + 1) + + def test_ps_n_bad_wildcard(self): + """Test `ps -n *BaDWiLdCaRd2019*' returns no matches output""" + self.command.invoke_uncaught("-n *BaDWiLdCaRd2019*") + + self.check_no_matches_output() + + def test_ps_nu(self): + """Test `ps -n -u'""" + self.command.invoke_uncaught("-n -u") + + self.check_threadnum_header() + self.check_threadnum_output() + + self.check_line_count(self.count_user_tasks() + 1) + + def test_ps_nu_wildcard(self): + """Test `ps -n -u *nscd*'""" + self.command.invoke_uncaught("-n -u *nscd*") + + self.check_threadnum_header() + self.check_threadnum_output() + + regex = self.get_wildcard_regex("*nscd*") + self.check_line_count(self.count_user_tasks(regex) + 1) + + def test_ps_nu_bad_wildcard(self): + """Test `ps -n -u *BaDWiLdCaRd2019*' returns no matches output""" + self.command.invoke_uncaught("-n -u *BaDWiLdCaRd2019*") + + self.check_no_matches_output() + + def test_ps_nk(self): + """Test `ps -n -k'""" + self.command.invoke_uncaught("-n -k") + + self.check_threadnum_header() + self.check_threadnum_output() + + self.check_line_count(self.count_kernel_tasks() + 1) + + def test_ps_nk_wildcard(self): + """Test `ps -n -k *worker*'""" + self.command.invoke_uncaught("-n -k *worker*") + + self.check_threadnum_header() + self.check_threadnum_output() + + regex = self.get_wildcard_regex("*worker*") + self.check_line_count(self.count_kernel_tasks(regex) + 1) + + def test_ps_nk_bad_wildcard(self): + """Test `ps -n -k *BaDWiLdCaRd2019*' returns no matches output""" + self.command.invoke_uncaught("-n -k *BaDWiLdCaRd2019*") + + self.check_no_matches_output() + + def test_ps_nG(self): + """Test `ps -n -G'""" + self.command.invoke_uncaught("-n -G") + + self.check_threadnum_header() + self.check_threadnum_output() + + self.check_line_count(self.count_thread_group_leaders() + 1) + + def test_ps_nG_wildcard(self): + """Test `ps -n -G *nscd*'""" + self.command.invoke_uncaught("-n -G *nscd*") + + self.check_threadnum_header() + self.check_threadnum_output() + + regex = self.get_wildcard_regex("*nscd*") + self.check_line_count(self.count_thread_group_leaders(regex) + 1) + + def test_ps_nG_bad_wildcard(self): + """Test `ps -n -G *BaDWiLdCaRd2019*' returns no matches output""" + self.command.invoke_uncaught("-n -G *BaDWiLdCaRd2019*") + + self.check_no_matches_output() + + @unimplemented + def test_ps_t(self): + """Test `ps -t'""" + self.command.invoke_uncaught("-t") + + # Check format + + self.check_line_count(self.count_tasks()) + + @unimplemented + def test_ps_t_wildcard(self): + """Test `ps -t *nscd*'""" + self.command.invoke_uncaught("-t *nscd*") + + # Check format + + regex = self.get_wildcard_regex("*nscd*") + self.check_line_count(self.count_tasks(regex=regex)) + + def test_ps_l(self): + """Test `ps -l'""" + self.command.invoke_uncaught("-l") + + # No header to test + self.check_last_run_output() + self.check_line_count(self.count_tasks()) + + def test_ps_l_wildcard(self): + """Test `ps -l *nscd*'""" + self.command.invoke_uncaught("-l *nscd*") + + # No header to test + self.check_last_run_output() + + regex = self.get_wildcard_regex("*nscd*") + self.check_line_count(self.count_tasks(regex=regex)) + + @unimplemented + def test_ps_p(self): + """Test `ps -p'""" + self.command.invoke_uncaught("-p") + lines = self.output_lines() + + self.assertTrue(lines > 1) + + @unimplemented + def test_ps_p_wildcard(self): + """Test `ps -p *nscd*'""" + self.command.invoke_uncaught("-p *nscd*") + lines = self.output_lines() + + regex = self.get_wildcard_regex("*nscd*") + + self.assertTrue(lines > 1) + + @unimplemented + def test_ps_c(self): + """Test `ps -c'""" + self.command.invoke_uncaught("-c") + lines = self.output_lines() + + self.assertTrue(lines > 1) + + @unimplemented + def test_ps_c_wildcard(self): + """Test `ps -c *nscd*'""" + self.command.invoke_uncaught("-c *nscd*") + lines = self.output_lines() + + regex = self.get_wildcard_regex("*nscd*") + + self.assertTrue(lines > 1) + + @unimplemented + def test_ps_a(self): + """Test `ps -a'""" + self.command.invoke_uncaught("-a") + lines = self.output_lines() + + self.assertTrue(lines > 1) + + @unimplemented + def test_ps_a_wildcard(self): + """Test `ps -a *nscd*'""" + self.command.invoke_uncaught("-a *nscd*") + lines = self.output_lines() + + regex = self.get_wildcard_regex("*nscd*") + + self.assertTrue(lines > 1) + + @unimplemented + def test_ps_g(self): + """Test `ps -g'""" + self.command.invoke_uncaught("-g") + lines = self.output_lines() + + self.assertTrue(lines > 1) + + @unimplemented + def test_ps_g_wildcard(self): + """Test `ps -g *nscd*'""" + self.command.invoke_uncaught("-g *nscd*") + lines = self.output_lines() + + regex = self.get_wildcard_regex("*nscd*") + + self.assertTrue(lines > 1) + + @unimplemented + def test_ps_r(self): + """Test `ps -r'""" + self.command.invoke_uncaught("-r") + lines = self.output_lines() + + self.assertTrue(lines > 1) + + @unimplemented + def test_ps_r_wildcard(self): + """Test `ps -r *nscd*'""" + self.command.invoke_uncaught("-r *nscd*") + lines = self.output_lines() + + regex = self.get_wildcard_regex("*nscd*") + + self.assertTrue(lines > 1) diff --git a/kernel-tests/test_subsystem_storage_decoders.py b/kernel-tests/test_subsystem_storage_decoders.py new file mode 100644 index 00000000000..9a0b1d35285 --- /dev/null +++ b/kernel-tests/test_subsystem_storage_decoders.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + +import crash.util as util +import crash.subsystem.storage.decoders as decoders + +# We need live bios to be able to test this properly + +class TestSubsystemStorageDecoders(unittest.TestCase): + nullptr = 0x0 + poisonptr = 0xdead000000000100 + + def setUp(self): + self.bio_type = gdb.lookup_type('struct bio') + self.buffer_head_type = gdb.lookup_type('struct buffer_head') + + def test_decode_null_bio(self): + bio = util.get_typed_pointer(self.nullptr, self.bio_type) + bio = bio.dereference() + decoder = decoders.decode_bio(bio) + self.assertTrue(type(decoder) is decoders.BadBioDecoder) + + def test_decode_poison_bio(self): + bio = util.get_typed_pointer(self.poisonptr, self.bio_type) + bio = bio.dereference() + decoder = decoders.decode_bio(bio) + self.assertTrue(type(decoder) is decoders.BadBioDecoder) + + def test_decode_null_bh(self): + bh = util.get_typed_pointer(self.nullptr, self.buffer_head_type) + bh = bh.dereference() + decoder = decoders.decode_bh(bh) + self.assertTrue(type(decoder) is decoders.BadBHDecoder) + + def test_decode_poison_bh(self): + bh = util.get_typed_pointer(self.poisonptr, self.buffer_head_type) + bh = bh.dereference() + decoder = decoders.decode_bh(bh) + self.assertTrue(type(decoder) is decoders.BadBHDecoder) diff --git a/kernel-tests/test_types_bitmap.py b/kernel-tests/test_types_bitmap.py new file mode 100644 index 00000000000..4e0c938e89c --- /dev/null +++ b/kernel-tests/test_types_bitmap.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + +import crash.types.bitmap as bitmaps + +class TestBitmap(unittest.TestCase): + def test_for_each_set_bit(self): + sym = gdb.lookup_symbol('cpu_online_mask', None)[0] + if sym is None: + sym = gdb.lookup_symbol('__cpu_online_mask', None)[0] + + self.assertTrue(sym is not None) + + bitmap = sym.value()['bits'] + + count = 0 + for bit in bitmaps.for_each_set_bit(bitmap): + self.assertTrue(type(bit) is int) + count += 1 + + self.assertTrue(count > 0) + + def test_find_first_set_bit(self): + sym = gdb.lookup_symbol('cpu_online_mask', None)[0] + if sym is None: + sym = gdb.lookup_symbol('__cpu_online_mask', None)[0] + + self.assertTrue(sym is not None) + + bitmap = sym.value()['bits'] + + count = 0 + bit = bitmaps.find_first_set_bit(bitmap) + self.assertTrue(type(bit) is int) + + def test_find_next_set_bit(self): + sym = gdb.lookup_symbol('cpu_online_mask', None)[0] + if sym is None: + sym = gdb.lookup_symbol('__cpu_online_mask', None)[0] + + self.assertTrue(sym is not None) + + bitmap = sym.value()['bits'] + + count = 0 + bit = bitmaps.find_next_set_bit(bitmap, 1) + self.assertTrue(type(bit) is int) + + def test_find_first_zero_bit(self): + sym = gdb.lookup_symbol('cpu_online_mask', None)[0] + if sym is None: + sym = gdb.lookup_symbol('__cpu_online_mask', None)[0] + + self.assertTrue(sym is not None) + + bitmap = sym.value()['bits'] + + count = 0 + bit = bitmaps.find_first_zero_bit(bitmap) + self.assertTrue(type(bit) is int) + + def test_find_next_zero_bit(self): + sym = gdb.lookup_symbol('cpu_online_mask', None)[0] + if sym is None: + sym = gdb.lookup_symbol('__cpu_online_mask', None)[0] + + self.assertTrue(sym is not None) + + bitmap = sym.value()['bits'] + + count = 0 + bit = bitmaps.find_next_zero_bit(bitmap, 10) + self.assertTrue(type(bit) is int) + diff --git a/kernel-tests/test_types_cpu.py b/kernel-tests/test_types_cpu.py new file mode 100644 index 00000000000..803d18fa872 --- /dev/null +++ b/kernel-tests/test_types_cpu.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + +import crash.types.cpu as cpus + +class TestCPU(unittest.TestCase): + def test_online_cpu_iteration(self): + count = 0 + for cpu in cpus.for_each_online_cpu(): + self.assertTrue(type(cpu) is int) + count += 1 + + self.assertTrue(count > 0) + + def test_highest_online_cpu(self): + cpu = cpus.highest_online_cpu_nr() + self.assertTrue(type(cpu) is int) + + def test_possible_cpu_iteration(self): + count = 0 + for cpu in cpus.for_each_possible_cpu(): + self.assertTrue(type(cpu) is int) + count += 1 + + self.assertTrue(count > 0) + + def test_highest_possible_cpu(self): + cpu = cpus.highest_possible_cpu_nr() + self.assertTrue(type(cpu) is int) diff --git a/kernel-tests/test_types_module.py b/kernel-tests/test_types_module.py new file mode 100644 index 00000000000..2954901906b --- /dev/null +++ b/kernel-tests/test_types_module.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + +gdbinit = """ +set build-id-verbose 0 +set python print-stack full +set prompt py-crash> +set height 0 +set print pretty on""" + +class TestModules(unittest.TestCase): + def test_for_each_module(self): + from crash.types.module import for_each_module + + modtype = gdb.lookup_type('struct module') + + for mod in for_each_module(): + self.assertTrue(mod.type == modtype) + + def test_for_each_module_section(self): + from crash.types.module import for_each_module_section + from crash.types.module import for_each_module + + for mod in for_each_module(): + for section in for_each_module_section(mod): + self.assertTrue(type(section) is tuple) + self.assertTrue(type(section[0]) is str) + self.assertTrue(type(section[1]) is int) diff --git a/kernel-tests/test_types_node.py b/kernel-tests/test_types_node.py new file mode 100644 index 00000000000..3f3a9aa63de --- /dev/null +++ b/kernel-tests/test_types_node.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + + +import crash.types.node as numa_node + +class TestNumaNode(unittest.TestCase): + def test_for_each_node(self): + count = 0 + for node in numa_node.for_each_node(): + self.assertTrue(type(node) is numa_node.Node) + count += 1 + self.assertTrue(count > 0) + + def test_for_each_online_node(self): + count = 0 + for node in numa_node.for_each_online_node(): + self.assertTrue(type(node) is numa_node.Node) + count += 1 + self.assertTrue(count > 0) + + def test_for_each_nid(self): + count = 0 + for nid in numa_node.for_each_nid(): + self.assertTrue(type(nid) is int) + count += 1 + self.assertTrue(count > 0) + + def test_for_each_online_nid(self): + count = 0 + for nid in numa_node.for_each_online_nid(): + self.assertTrue(type(nid) is int) + count += 1 + self.assertTrue(count > 0) + + def test_numa_node_id(self): + nid = numa_node.numa_node_id(0) + self.assertTrue(type(nid) is int) + diff --git a/kernel-tests/test_types_percpu.py b/kernel-tests/test_types_percpu.py new file mode 100644 index 00000000000..ac3df699ff9 --- /dev/null +++ b/kernel-tests/test_types_percpu.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + +from crash.types.percpu import get_percpu_vars, is_percpu_var + +class TestPerCPU(unittest.TestCase): + def test_runqueues(self): + rqs = gdb.lookup_symbol('runqueues', None)[0] + rq_type = gdb.lookup_type('struct rq') + + self.assertTrue(rqs.type == rq_type) + + pcpu = get_percpu_vars(rqs) + for (cpu, rq) in pcpu.items(): + self.assertTrue(type(cpu) is int) + self.assertTrue(type(rq) is gdb.Value) + self.assertTrue(rq.type == rq_type) diff --git a/kernel-tests/test_types_task.py b/kernel-tests/test_types_task.py new file mode 100644 index 00000000000..5099e295cfa --- /dev/null +++ b/kernel-tests/test_types_task.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + +import crash.types.task as tasks + +class TestTasks(unittest.TestCase): + def setUp(self): + self.task_struct_type = gdb.lookup_type('struct task_struct') + + def test_thread_group_leader_iteration(self): + count = 0 + for leader in tasks.for_each_thread_group_leader(): + self.assertTrue(type(leader) is gdb.Value) + self.assertTrue(leader.type == self.task_struct_type) + self.assertTrue(int(leader['exit_signal']) >= 0) + count += 1 + + self.assertTrue(count > 0) + + def test_thread_group_iteration(self): + count = 0 + for leader in tasks.for_each_thread_group_leader(): + for thread in tasks.for_each_thread_in_group(leader): + self.assertTrue(type(thread) is gdb.Value) + self.assertTrue(thread.type == self.task_struct_type) + self.assertTrue(int(thread['exit_signal']) < 0) + count += 1 + + self.assertTrue(count > 0) + + def test_iterate_all_tasks(self): + count = 0 + for task in tasks.for_each_all_tasks(): + self.assertTrue(type(task) is gdb.Value) + self.assertTrue(task.type == self.task_struct_type) + count += 1 + + self.assertTrue(count > 0) diff --git a/kernel-tests/test_types_zone.py b/kernel-tests/test_types_zone.py new file mode 100644 index 00000000000..45d65fd489e --- /dev/null +++ b/kernel-tests/test_types_zone.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + + +import crash.types.node as numa_node +import crash.types.zone as mmzone + +class TestNumaNode(unittest.TestCase): + def test_for_each_zone(self): + count = 0 + for node in numa_node.for_each_node(): + for zone in node.for_each_zone(): + self.assertTrue(type(zone) is mmzone.Zone) + count += 1 + + self.assertTrue(count > 0) + + def test_for_each_populated_zone(self): + count = 0 + for zone in mmzone.for_each_populated_zone(): + self.assertTrue(type(zone) is mmzone.Zone) + count += 1 + + self.assertTrue(count > 0) + diff --git a/kernel-tests/unittest-bootstrap.py b/kernel-tests/unittest-bootstrap.py new file mode 100644 index 00000000000..35542c5f395 --- /dev/null +++ b/kernel-tests/unittest-bootstrap.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import unittest +import sys +import os +import os.path +import configparser +import gzip +import shutil + +config = configparser.ConfigParser() +filename = os.environ['CRASH_PYTHON_TESTFILE'] +try: + f = open(filename) + config.read_file(f) +except FileNotFoundError as e: + print(f"{str(e)}") + sys.exit(1) + +try: + vmcore = config['test']['vmcore'] +except KeyError: + print(f"{filename} doesn't contain the required sections.") + sys.exit(1) + +roots = config['test'].get('root', None) +vmlinux_debuginfo = config['test'].get('vmlinux_debuginfo', None) +module_path = config['test'].get('module_path', None) +module_debuginfo_path = config['test'].get('module_debuginfo_path', None) + +from crash.kernel import CrashKernel +kernel = CrashKernel(roots=roots, vmlinux_debuginfo=vmlinux_debuginfo, + module_path=module_path, + module_debuginfo_path=module_debuginfo_path) + +kernel.setup_tasks() +kernel.load_modules() + +test_loader = unittest.TestLoader() +test_suite = test_loader.discover('kernel-tests', pattern='test_*.py') +unittest.TextTestRunner(verbosity=2).run(test_suite) diff --git a/kernel-tests/unittest-prepare.py b/kernel-tests/unittest-prepare.py new file mode 100644 index 00000000000..e887b6f1046 --- /dev/null +++ b/kernel-tests/unittest-prepare.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import sys +import os +import os.path +import configparser +import gzip +import shutil + +config = configparser.ConfigParser() +filename = os.environ['CRASH_PYTHON_TESTFILE'] +try: + f = open(filename) + config.read_file(f) +except FileNotFoundError as e: + print(f"{str(e)}") + sys.exit(1) + +try: + vmlinux = config['test']['kernel'] + vmcore = config['test']['vmcore'] +except KeyError as e: + print(f"{filename} doesn't contain the required sections `{str(e)}.") + sys.exit(1) + +roots = config['test'].get('root', None) +vmlinux_debuginfo = config['test'].get('vmlinux_debuginfo', None) +module_path = config['test'].get('module_path', None) +module_debuginfo_path = config['test'].get('module_debuginfo_path', None) + +if vmlinux.endswith(".gz"): + vmlinux_gz = vmlinux + testdir = os.environ['CRASH_PYTHON_TESTDIR'] + base = os.path.basename(vmlinux)[:-3] + vmlinux = os.path.join(testdir, base) + + with gzip.open(vmlinux_gz, 'r') as f_in, open(vmlinux, 'wb') as f_out: + shutil.copyfileobj(f_in, f_out) + + f_out.close() + f_in.close() + +gdb.execute(f"file {vmlinux}") + +from kdump.target import Target +target = Target(debug=False) + +try: + gdb.execute(f"target kdumpfile {vmcore}") +except gdb.error as e: + print(str(e)) + sys.exit(1) diff --git a/setup.py b/setup.py index 36e66af06eb..3a8db1515ce 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ setup( name = "crash", version = "0.1", - packages = find_packages(exclude=['tests']), + packages = find_packages(exclude=['tests', 'kernel-tests']), package_data = { '' : [ "*.dist" "*.txt" ], }, diff --git a/test-all.sh b/test-all.sh index 6e9eaeb2bf0..4bfc47b10ec 100755 --- a/test-all.sh +++ b/test-all.sh @@ -1,5 +1,17 @@ #!/bin/sh +set -e + +cleanup() { + test -n "$DIR" && rm -rf "$DIR" +} + +trap cleanup EXIT + +DIR=$(mktemp -d "/tmp/crash-python-tests.XXXXXX") + +export CRASH_PYTHON_TESTDIR="$DIR" + rm -rf build/lib/crash python3 setup.py -q build make -C tests -s @@ -28,3 +40,19 @@ if has_mypy; then echo "OK" fi fi + +cat << END > $DIR/gdbinit +python sys.path.insert(0, 'build/lib') +set build-id-verbose 0 +set python print-stack full +set prompt py-crash> +set height 0 +set print pretty on +source kernel-tests/unittest-prepare.py +source kernel-tests/unittest-bootstrap.py +END + +for f in "$@"; do + export CRASH_PYTHON_TESTFILE="$f" + crash-python-gdb -nx -batch -x $DIR/gdbinit +done From 7e6c1774038961d462beae73190a8d97df1b4697 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 7 May 2019 17:52:48 -0400 Subject: [PATCH 133/367] crash.types.classdev: update classdev iteration to use device_private With Linux v5.1-rc1, knode_class was moved from struct device to struct device_private. This commit updates for_each_class_device to use the implementation that matches the kernel. Signed-off-by: Jeff Mahoney --- crash/types/classdev.py | 31 +++++++++++++++++++++++++---- kernel-tests/test_types_classdev.py | 21 +++++++++++++++++++ 2 files changed, 48 insertions(+), 4 deletions(-) create mode 100644 kernel-tests/test_types_classdev.py diff --git a/crash/types/classdev.py b/crash/types/classdev.py index 885872651ae..8f8c40ece4e 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -3,13 +3,36 @@ import gdb -from crash.types.klist import klist_for_each_entry -from crash.util.symbols import Types +from crash.types.klist import klist_for_each +from crash.util import struct_has_member, container_of +from crash.util.symbols import Types, TypeCallbacks -types = Types(['struct device']) +types = Types(['struct device', 'struct device_private']) + +class ClassdevState(object): + class_is_private = True + + #v5.1-rc1 moved knode_class from struct device to struct device_private + @classmethod + def setup_iterator_type(cls, gdbtype): + if struct_has_member(gdbtype, 'knode_class'): + cls.class_is_private = False + + +type_cbs = TypeCallbacks([ ('struct device', + ClassdevState.setup_iterator_type) ]) def for_each_class_device(class_struct, subtype=None): klist = class_struct['p']['klist_devices'] - for dev in klist_for_each_entry(klist, types.device_type, 'knode_class'): + + container_type = types.device_type + if ClassdevState.class_is_private: + container_type = types.device_private_type + + for knode in klist_for_each(klist): + dev = container_of(knode, container_type, 'knode_class') + if ClassdevState.class_is_private: + dev = dev['device'].dereference() + if subtype is None or int(subtype) == int(dev['type']): yield dev diff --git a/kernel-tests/test_types_classdev.py b/kernel-tests/test_types_classdev.py new file mode 100644 index 00000000000..409c926aeb7 --- /dev/null +++ b/kernel-tests/test_types_classdev.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + +import crash.types.classdev as classdevs + +class TestClassdev(unittest.TestCase): + def setUp(self): + self.device_type = gdb.lookup_type('struct device') + + def test_classdev_iteration(self): + count = 0 + block_class = gdb.lookup_symbol('block_class', None)[0].value() + for dev in classdevs.for_each_class_device(block_class): + self.assertTrue(type(dev) is gdb.Value) + self.assertTrue(dev.type == self.device_type) + count += 1 + + self.assertTrue(count > 0) + From 38dee70bccfbf2f5a694ad93dec2c520b1603993 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 15 May 2019 18:12:44 -0400 Subject: [PATCH 134/367] crash.subsystem.filesystem: document gdb.NotAvailableError The helper routines can be passed bad pointers, so document that each can raise gdb.NotAvailableError. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/__init__.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index 714b18a933e..bfc9ded270f 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -86,6 +86,9 @@ def super_fstype(sb: gdb.Value) -> str: Returns: str: The file system type's name + + Raises: + gdb.NotAvailableError: The target value was not available. """ return sb['s_type']['name'].string() @@ -100,6 +103,8 @@ def super_flags(sb: gdb.Value) -> str: Returns: str: The flags field in human-readable form. + Raises: + gdb.NotAvailableError: The target value was not available. """ return decode_flags(sb['s_flags'], SB_FLAGS) @@ -112,6 +117,9 @@ def for_each_super_block() -> Iterable[gdb.Value]: Yields: gdb.Value + + Raises: + gdb.NotAvailableError: The target value was not available. """ for sb in list_for_each_entry(symvals.super_blocks, types.super_block_type, 's_list'): @@ -156,6 +164,9 @@ def is_fstype_super(super_block: gdb.Value, name: str) -> bool: Returns: bool: whether the super_block belongs to the specified file system + + Raises: + gdb.NotAvailableError: The target value was not available. """ return super_fstype(super_block) == name From 798e2c2932d8184e756aeb184ae69f627b62845a Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Thu, 9 May 2019 16:33:50 -0400 Subject: [PATCH 135/367] crash.subsytem.filesystem.mount: add API documentation This commit adds documentation for the mount API and makes private some methods/functions that are meant to be internal. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/mount.py | 135 +++++++++++++++++++++++----- 1 file changed, 115 insertions(+), 20 deletions(-) diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 44ebda4d080..3194c8e2109 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -48,11 +48,16 @@ def for_each_mount_impl(cls, task): @classmethod def for_each_mount_nsproxy(cls, task): + """ + An implementation of for_each_mount that uses the task's + nsproxy to locate the mount namespace. See for_each_mount + for more details. + """ return list_for_each_entry(task['nsproxy']['mnt_ns']['list'], types.mount_type, 'mnt_list') @classmethod - def check_task_interface(cls, symval): + def _check_task_interface(cls, symval): try: nsproxy = symvals.init_task['nsproxy'] cls.for_each_mount_impl = cls.for_each_mount_nsproxy @@ -60,7 +65,7 @@ def check_task_interface(cls, symval): print("check_task_interface called but no init_task?") pass -def check_mount_type(gdbtype): +def _check_mount_type(gdbtype): try: types.mount_type = gdb.lookup_type('struct mount') except gdb.error: @@ -68,36 +73,77 @@ def check_mount_type(gdbtype): types.mount_type = types.vfsmount_type def for_each_mount(task=None): + """ + Iterate over each mountpoint in the namespace of the specified task + + If no task is given, the init_task is used. + + The type of the mount structure returned depends on whether + 'struct mount' exists on the kernel version being debugged. + + Args: + task (gdb.Value, default=): + The task which contains the namespace to iterate. + + Yields: + gdb.Value: + A mountpoint attached to the namespace. + + """ if task is None: task = symvals.init_task return Mount.for_each_mount_impl(task) -def real_mount(vfsmnt): - if (vfsmnt.type == types.mount_type or - vfsmnt.type == types.mount_type.pointer()): - t = vfsmnt.type - if t.code == gdb.TYPE_CODE_PTR: - t = t.target() - if t is not types.mount_type: - types.mount_type = t - return vfsmnt - return container_of(vfsmnt, types.mount_type, 'mnt') +def mount_flags(mnt: gdb.Value, show_hidden: bool=False) -> str: + """ + Returns the human-readable flags of the mount structure + + Args: + mnt (gdb.Value): + The mount structure for which to return flags -def mount_flags(mnt, show_hidden=False): + show_hidden (bool, default=False): + Whether to return hidden flags + + Returns: + str: The mount flags in human-readable form + """ if struct_has_member(mnt, 'mnt'): mnt = mnt['mnt'] if show_hidden: return decode_flags(mnt['mnt_flags'], MNT_FLAGS_HIDDEN, ",") return decode_flags(mnt['mnt_flags'], MNT_FLAGS, ",") -def mount_super(mnt): +def mount_super(mnt: gdb.Value) -> gdb.Value: + """ + Returns the struct super_block associated with a mount + + Args: + mnt: gdb.Value: + The mount structure for which to return the super_block + + Returns: + gdb.Value: + The super_block associated with the mount + """ try: sb = mnt['mnt']['mnt_sb'] except gdb.error: sb = mnt['mnt_sb'] return sb -def mount_root(mnt): +def mount_root(mnt: gdb.Value) -> gdb.Value: + """ + Returns the struct dentry corresponding to the root of a mount + + Args: + mnt: gdb.Value: + The mount structure for which to return the root dentry + + Returns: + gdb.Value: + The dentry that corresponds to the root of the mount + """ try: mnt = mnt['mnt'] except gdb.error: @@ -105,16 +151,65 @@ def mount_root(mnt): return mnt['mnt_root'] -def mount_fstype(mnt): +def mount_fstype(mnt: gdb.Value) -> str: + """ + Returns the file system type of the mount + + Args: + mnt (gdb.Value): + The mount structure for which to return the file system tyoe + + Returns: + str: The file system type of the mount in string form + """ return super_fstype(mount_super(mnt)) -def mount_device(mnt): +def mount_device(mnt: gdb.Value) -> str: + """ + Returns the device name that this mount is using + + Args: + gdb.Value: + The mount structure for which to get the device name + + Returns: + str: The device name in string form + + """ devname = mnt['mnt_devname'].string() if devname is None: devname = "none" return devname +def _real_mount(vfsmnt): + if (vfsmnt.type == types.mount_type or + vfsmnt.type == types.mount_type.pointer()): + t = vfsmnt.type + if t.code == gdb.TYPE_CODE_PTR: + t = t.target() + if t is not types.mount_type: + types.mount_type = t + return vfsmnt + return container_of(vfsmnt, types.mount_type, 'mnt') + def d_path(mnt, dentry, root=None): + """ + Returns a file system path described by a mount and dentry + + Args: + mnt (gdb.Value): + The mount for the start of the path + + dentry (gdb.Value): + The dentry for the start of the path + + root (gdb.Value, default=None): + The mount at which to stop resolution. If None, + the current root of the namespace. + + Returns: + str: The path in string form + """ if root is None: root = symvals.init_task['fs']['root'] @@ -124,7 +219,7 @@ def d_path(mnt, dentry, root=None): if mnt.type.code != gdb.TYPE_CODE_PTR: mnt = mnt.address - mount = real_mount(mnt) + mount = _real_mount(mnt) if mount.type.code != gdb.TYPE_CODE_PTR: mount = mount.address @@ -157,5 +252,5 @@ def d_path(mnt, dentry, root=None): name = '/' return name -type_cbs = TypeCallbacks([ ('struct vfsmount', check_mount_type ) ]) -symbols_cbs = SymbolCallbacks([ ('init_task', Mount.check_task_interface ) ]) +type_cbs = TypeCallbacks([ ('struct vfsmount', _check_mount_type ) ]) +symbols_cbs = SymbolCallbacks([ ('init_task', Mount._check_task_interface ) ]) From 047042450749e770457aaafa6da8ead4282be301 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 18 Sep 2018 05:38:53 -0400 Subject: [PATCH 136/367] crash.commands.btrfs: add basic btrfs command This adds an `btrfs' command to display some details of btrfs file systems. Included subcommands are: - 'list' -- list all mounted btrfs file systems, including device and uuid. Signed-off-by: Jeff Mahoney --- crash/commands/btrfs.py | 60 +++++++++++++++++++++++++ kernel-tests/test_commands_btrfs.py | 68 +++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+) create mode 100644 crash/commands/btrfs.py create mode 100644 kernel-tests/test_commands_btrfs.py diff --git a/crash/commands/btrfs.py b/crash/commands/btrfs.py new file mode 100644 index 00000000000..e32c609ab23 --- /dev/null +++ b/crash/commands/btrfs.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb + +from argparse import Namespace +from crash.commands import Command, ArgumentParser +from crash.commands import CommandLineError +from crash.exceptions import DelayedAttributeError +from crash.subsystem.filesystem import for_each_super_block, super_fstype +from crash.subsystem.filesystem.btrfs import btrfs_fsid, btrfs_metadata_uuid + +class BtrfsCommand(Command): + """display Btrfs internal data structures + +NAME + btrfs - display Btrfs internal data structures + +SYNOPSIS + btrfs + +COMMANDS + btrfs list [-m] - list all btrfs file systems (-m to show metadata uuid)""" + + def __init__(self, name): + parser = ArgumentParser(prog=name) + subparsers = parser.add_subparsers(help="sub-command help") + list_parser = subparsers.add_parser('list', help='list help') + list_parser.set_defaults(subcommand=self.list_btrfs) + list_parser.add_argument('-m', action='store_true', default=False) + + parser.format_usage = lambda: 'btrfs [args...]\n' + Command.__init__(self, name, parser) + + def list_btrfs(self, args: Namespace) -> None: + print_header = True + count = 0 + for sb in for_each_super_block(): + if super_fstype(sb) == "btrfs": + if args.m: + u = btrfs_metadata_uuid(sb) + which_fsid = "METADATA UUID" + else: + u = btrfs_fsid(sb) + which_fsid = "FSID" + if print_header: + print("SUPER BLOCK\t\tDEVICE\t\t{}".format(which_fsid)) + print_header = False + print("{}\t{}\t\t{}".format(sb.address, sb['s_id'].string(), u)) + count += 1 + if count == 0: + print("No btrfs file systems were mounted.") + + def execute(self, args): + if hasattr(args, 'subcommand'): + args.subcommand(args) + else: + raise CommandLineError("no command specified") + +BtrfsCommand("btrfs") diff --git a/kernel-tests/test_commands_btrfs.py b/kernel-tests/test_commands_btrfs.py new file mode 100644 index 00000000000..8c217b9a2b8 --- /dev/null +++ b/kernel-tests/test_commands_btrfs.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + +from decorators import skip_without_supers, skip_with_supers, skip_without_type + +from crash.commands.btrfs import BtrfsCommand +from crash.commands import CommandLineError +from crash.exceptions import DelayedAttributeError + +class TestCommandsBtrfs(unittest.TestCase): + def setUp(self): + self.stdout = sys.stdout + self.redirected = io.StringIO() + sys.stdout = self.redirected + self.command = BtrfsCommand("btrfs") + + def tearDown(self): + sys.stdout = self.stdout + + def output(self): + return self.redirected.getvalue() + + def output_lines(self): + output = self.output() + return len(output.split("\n")) - 1 + + def test_btrfs_empty(self): + """`btrfs` raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("") + + @skip_without_supers('btrfs') + def test_btrfs_list(self): + """`btrfs list` produces valid output""" + self.command.invoke_uncaught("list") + self.assertTrue(self.output_lines() > 0) + + @skip_without_supers('btrfs') + def test_btrfs_list_m(self): + """`btrfs list -m` produces valid output""" + self.command.invoke_uncaught("list -m") + self.assertTrue(self.output_lines() > 0) + + @skip_with_supers('btrfs') + def test_btrfs_list_without_supers(self): + """`btrfs list` without supers produces one-line status""" + self.command.invoke_uncaught("list") + self.assertTrue(self.output_lines() == 1) + + @skip_with_supers('btrfs') + def test_btrfs_list_m_without_supers(self): + """`btrfs list -m` without supers produces one-line status""" + self.command.invoke_uncaught("list -m") + self.assertTrue(self.output_lines() == 1) + + def test_btrfs_list_invalid(self): + """`btrfs list -invalid` raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("list -invalid") + + def test_btrfs_invalid_command(self): + """`btrfs invalid command` raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("invalid command") From d3654dc078b45b972631c5f077d876049a7ab3b2 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 18 Sep 2018 05:41:46 -0400 Subject: [PATCH 137/367] crash.subsystem.filesystem.xfs: Add basic XFS infra This commit adds a basic xfs file system system module. Included are: - Python variables for flags - Mappings from flags to flag names - Decoding for xfs_bufs and inodes - Helpers for mount flags, superblock version, and uuid - AIL iterators including item decoding Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/xfs.py | 590 ++++++++++++++++++++++++++++++ 1 file changed, 590 insertions(+) create mode 100644 crash/subsystem/filesystem/xfs.py diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py new file mode 100644 index 00000000000..04f6d38ca64 --- /dev/null +++ b/crash/subsystem/filesystem/xfs.py @@ -0,0 +1,590 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +import uuid + +from typing import Union, Iterable + +from crash.types.list import list_for_each_entry +from crash.util import container_of, decode_uuid_t, decode_flags +from crash.util import struct_has_member +from crash.util.symbols import Types, TypeCallbacks +from crash.subsystem.filesystem import is_fstype_super, is_fstype_inode +from crash.subsystem.storage import block_device_name +from crash.subsystem.storage.decoders import Decoder + +# XFS inode locks +XFS_IOLOCK_EXCL = 0x01 +XFS_IOLOCK_SHARED = 0x02 +XFS_ILOCK_EXCL = 0x04 +XFS_ILOCK_SHARED = 0x08 +XFS_MMAPLOCK_EXCL = 0x10 +XFS_MMAPLOCK_SHARED = 0x20 + +XFS_LOCK_MASK = 0x3f + +XFS_LOCK_FLAGS = { + XFS_IOLOCK_EXCL : "XFS_IOLOCK_EXCL", + XFS_IOLOCK_SHARED : "XFS_IOLOCK_SHARED", + XFS_ILOCK_EXCL : "XFS_ILOCK_EXCL", + XFS_ILOCK_SHARED : "XFS_ILOCK_SHARED", + XFS_MMAPLOCK_EXCL : "XFS_MMAPLOCK_EXCL", + XFS_MMAPLOCK_SHARED : "XFS_MMAPLOCK_SHARED", +} + +XFS_LI_EFI = 0x1236 +XFS_LI_EFD = 0x1237 +XFS_LI_IUNLINK = 0x1238 +XFS_LI_INODE = 0x123b # aligned ino chunks, var-size ibufs +XFS_LI_BUF = 0x123c # v2 bufs, variable sized inode bufs +XFS_LI_DQUOT = 0x123d +XFS_LI_QUOTAOFF = 0x123e + +XFS_LI_TYPES = { + XFS_LI_EFI : "XFS_LI_EFI", + XFS_LI_EFD : "XFS_LI_EFD", + XFS_LI_IUNLINK : "XFS_LI_IUNLINK", + XFS_LI_INODE : "XFS_LI_INODE", + XFS_LI_BUF : "XFS_LI_BUF", + XFS_LI_EFI : "XFS_LI_EFI", + XFS_LI_DQUOT : "XFS_LI_DQUOT", + XFS_LI_QUOTAOFF : "XFS_LI_QUOTAOFF", +} + +XFS_BLI_HOLD = 0x01 +XFS_BLI_DIRTY = 0x02 +XFS_BLI_STALE = 0x04 +XFS_BLI_LOGGED = 0x08 +XFS_BLI_INODE_ALLOC_BUF = 0x10 +XFS_BLI_STALE_INODE = 0x20 +XFS_BLI_INODE_BUF = 0x40 + +XFS_BLI_FLAGS = { + XFS_BLI_HOLD : "HOLD", + XFS_BLI_DIRTY : "DIRTY", + XFS_BLI_STALE : "STALE", + XFS_BLI_LOGGED : "LOGGED", + XFS_BLI_INODE_ALLOC_BUF : "INODE_ALLOC", + XFS_BLI_STALE_INODE : "STALE_INODE", + XFS_BLI_INODE_BUF : "INODE_BUF", +} + +XBF_READ = (1 << 0) # buffer intended for reading from device +XBF_WRITE = (1 << 1) # buffer intended for writing to device +XBF_MAPPED = (1 << 2) # buffer mapped (b_addr valid) +XBF_ASYNC = (1 << 4) # initiator will not wait for completion +XBF_DONE = (1 << 5) # all pages in the buffer uptodate +XBF_DELWRI = (1 << 6) # buffer has dirty pages +XBF_STALE = (1 << 7) # buffer has been staled, do not find it +XBF_ORDERED = (1 << 11) # use ordered writes +XBF_READ_AHEAD = (1 << 12) # asynchronous read-ahead +XBF_LOG_BUFFER = (1 << 13) # this is a buffer used for the log + +# flags used only as arguments to access routines +XBF_LOCK = (1 << 14) # lock requested +XBF_TRYLOCK = (1 << 15) # lock requested, but do not wait +XBF_DONT_BLOCK = (1 << 16) # do not block in current thread + +# flags used only internally +_XBF_PAGES = (1 << 18) # backed by refcounted pages +_XBF_RUN_QUEUES = (1 << 19) # run block device task queue +_XBF_KMEM = (1 << 20) # backed by heap memory +_XBF_DELWRI_Q = (1 << 21) # buffer on delwri queue +_XBF_LRU_DISPOSE = (1 << 24) # buffer being discarded + +XFS_BUF_FLAGS = { + XBF_READ : "READ", + XBF_WRITE : "WRITE", + XBF_MAPPED : "MAPPED", + XBF_ASYNC : "ASYNC", + XBF_DONE : "DONE", + XBF_DELWRI : "DELWRI", + XBF_STALE : "STALE", + XBF_ORDERED : "ORDERED", + XBF_READ_AHEAD : "READ_AHEAD", + XBF_LOCK : "LOCK", # should never be set + XBF_TRYLOCK : "TRYLOCK", # ditto + XBF_DONT_BLOCK : "DONT_BLOCK", # ditto + _XBF_PAGES : "PAGES", + _XBF_RUN_QUEUES : "RUN_QUEUES", + _XBF_KMEM : "KMEM", + _XBF_DELWRI_Q : "DELWRI_Q", + _XBF_LRU_DISPOSE : "LRU_DISPOSE", +} + +XFS_ILOG_CORE = 0x001 +XFS_ILOG_DDATA = 0x002 +XFS_ILOG_DEXT = 0x004 +XFS_ILOG_DBROOT = 0x008 +XFS_ILOG_DEV = 0x010 +XFS_ILOG_UUID = 0x020 +XFS_ILOG_ADATA = 0x040 +XFS_ILOG_AEXT = 0x080 +XFS_ILOG_ABROOT = 0x100 +XFS_ILOG_DOWNER = 0x200 +XFS_ILOG_AOWNER = 0x400 +XFS_ILOG_TIMESTAMP = 0x4000 + +XFS_ILI_FLAGS = { + XFS_ILOG_CORE : "CORE", + XFS_ILOG_DDATA : "DDATA", + XFS_ILOG_DEXT : "DEXT", + XFS_ILOG_DBROOT : "DBROOT", + XFS_ILOG_DEV : "DEV", + XFS_ILOG_UUID : "UUID", + XFS_ILOG_ADATA : "ADATA", + XFS_ILOG_AEXT : "AEXT", + XFS_ILOG_ABROOT : "ABROOT", + XFS_ILOG_DOWNER : "DOWNER", + XFS_ILOG_AOWNER : "AOWNER", + XFS_ILOG_TIMESTAMP : "TIMESTAMP", +} + +XFS_MOUNT_WSYNC = (1 << 0) +XFS_MOUNT_UNMOUNTING = (1 << 1) +XFS_MOUNT_DMAPI = (1 << 2) +XFS_MOUNT_WAS_CLEAN = (1 << 3) +XFS_MOUNT_FS_SHUTDOWN = (1 << 4) +XFS_MOUNT_DISCARD = (1 << 5) +XFS_MOUNT_NOALIGN = (1 << 7) +XFS_MOUNT_ATTR2 = (1 << 8) +XFS_MOUNT_GRPID = (1 << 9) +XFS_MOUNT_NORECOVERY = (1 << 10) +XFS_MOUNT_DFLT_IOSIZE = (1 << 12) +XFS_MOUNT_SMALL_INUMS = (1 << 14) +XFS_MOUNT_32BITINODES = (1 << 15) +XFS_MOUNT_NOUUID = (1 << 16) +XFS_MOUNT_BARRIER = (1 << 17) +XFS_MOUNT_IKEEP = (1 << 18) +XFS_MOUNT_SWALLOC = (1 << 19) +XFS_MOUNT_RDONLY = (1 << 20) +XFS_MOUNT_DIRSYNC = (1 << 21) +XFS_MOUNT_COMPAT_IOSIZE = (1 << 22) +XFS_MOUNT_FILESTREAMS = (1 << 24) +XFS_MOUNT_NOATTR2 = (1 << 25) + +XFS_MOUNT_FLAGS = { + XFS_MOUNT_WSYNC : "WSYNC", + XFS_MOUNT_UNMOUNTING : "UNMOUNTING", + XFS_MOUNT_DMAPI : "DMAPI", + XFS_MOUNT_WAS_CLEAN : "WAS_CLEAN", + XFS_MOUNT_FS_SHUTDOWN : "FS_SHUTDOWN", + XFS_MOUNT_DISCARD : "DISCARD", + XFS_MOUNT_NOALIGN : "NOALIGN", + XFS_MOUNT_ATTR2 : "ATTR2", + XFS_MOUNT_GRPID : "GRPID", + XFS_MOUNT_NORECOVERY : "NORECOVERY", + XFS_MOUNT_DFLT_IOSIZE : "DFLT_IOSIZE", + XFS_MOUNT_SMALL_INUMS : "SMALL_INUMS", + XFS_MOUNT_32BITINODES : "32BITINODES", + XFS_MOUNT_NOUUID : "NOUUID", + XFS_MOUNT_BARRIER : "BARRIER", + XFS_MOUNT_IKEEP : "IKEEP", + XFS_MOUNT_SWALLOC : "SWALLOC", + XFS_MOUNT_RDONLY : "RDONLY", + XFS_MOUNT_DIRSYNC : "DIRSYNC", + XFS_MOUNT_COMPAT_IOSIZE : "COMPAT_IOSIZE", + XFS_MOUNT_FILESTREAMS : "FILESTREAMS", + XFS_MOUNT_NOATTR2 : "NOATTR2", +} + +class XFSBufDecoder(Decoder): + """ + Decodes a struct xfs_buf into human-readable form + """ + + def __init__(self, xfsbuf): + super(XFSBufDecoder, self).__init__() + self.xfsbuf = xfsbuf + + def __str__(self): + return xfs_format_xfsbuf(self.xfsbuf) + +class XFSBufBioDecoder(Decoder): + """ + Decodes a bio with an xfsbuf ->bi_end_io + """ + description = "{:x} bio: xfs buffer on {}" + __endio__ = 'xfs_buf_bio_end_io' + types = Types([ 'struct xfs_buf *' ]) + + def __init__(self, bio): + super(XFSBufBioDecoder, self).__init__() + self.bio = bio + + def interpret(self): + self.xfsbuf = bio['bi_private'].cast(cls.types.xfs_buf_p_type) + self.devname = block_device_name(bio['bi_bdev']) + + def __next__(self): + return XFSBufDecoder(xfs.xfsbuf) + + def __str__(self): + return self.description.format(self.bio, self.devname) + +XFSBufBioDecoder.register() + +types = Types([ 'struct xfs_log_item', 'struct xfs_buf_log_item', + 'struct xfs_inode_log_item', 'struct xfs_efi_log_item', + 'struct xfs_efd_log_item', 'struct xfs_dq_logitem', + 'struct xfs_qoff_logitem', 'struct xfs_inode', + 'struct xfs_mount *', 'struct xfs_buf *' ]) + +class XFS(object): + """ + XFS File system state class. Not meant to be instantiated directly. + """ + ail_head_name = None + + @classmethod + def _detect_ail_version(cls, gdbtype): + if struct_has_member(gdbtype, 'ail_head'): + cls.ail_head_name = 'ail_head' + else: + cls.ail_head_name = 'xa_ail' + +def is_xfs_super(super_block: gdb.Value) -> bool: + """ + Tests whether a super_block belongs to XFS. + + Args: + super_block (gdb.Value): + The struct super_block to test + + Returns: + bool: Whether the super_block belongs to XFS + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + return is_fstype_super(super_block, "xfs") + +def is_xfs_inode(vfs_inode: gdb.Value) -> bool: + """ + Tests whether a generic VFS inode belongs to XFS + + Args: + vfs_inode (gdb.value(): + The struct inode to test whether it belongs to XFS + + Returns: + bool: Whether the inode belongs to XFS + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + + return is_fstype_inode(vfs_inode, "xfs") + +def xfs_inode(vfs_inode: gdb.Value, force: bool=False) -> gdb.Value: + """ + Converts a VFS inode to a xfs inode + + This method converts a struct inode to a struct xfs_inode. + + Args: + vfs_inode (gdb.Value): + The struct inode to convert to a struct xfs_inode + + force (bool): ignore type checking + + Returns: + gdb.Value: The converted struct xfs_inode + + Raises: + TypeError: The inode does not belong to xfs + gdb.NotAvailableError: The target value was not available. + """ + if not force and not is_xfs_inode(vfs_inode): + raise TypeError("inode does not belong to xfs") + + return container_of(vfs_inode, types.xfs_inode, 'i_vnode') + +def xfs_mount(sb: gdb.Value, force: bool=False) -> gdb.Value: + """ + Converts a VFS superblock to a xfs mount + + This method converts a struct super_block to a struct xfs_mount * + + Args: + super_block (gdb.Value): + The struct super_block to convert to a struct xfs_fs_info. + + Returns: + gdb.Value: The converted struct xfs_mount + + Raises: + TypeError: The superblock does not belong to xfs + gdb.NotAvailableError: The target value was not available. + """ + if not force and not is_xfs_super(sb): + raise TypeError("superblock does not belong to xfs") + + return sb['s_fs_info'].cast(types.xfs_mount_p_type) + +def xfs_mount_flags(mp: gdb.Value) -> str: + """ + Return the XFS-internal mount flags in string form + + Args: + mp (gdb.Value): + The struct xfs_mount for the file system + + Returns: + str: The mount flags in string form + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + return decode_flags(mp['m_flags'], XFS_MOUNT_FLAGS) + +def xfs_mount_uuid(mp: gdb.Value) -> uuid.UUID: + """ + Return the UUID for an XFS file system in string form + + Args: + mp gdb.Value(): + The struct xfs_mount for the file system + + Returns: + uuid.UUID: The Python UUID object that describes the xfs UUID + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + return decode_uuid_t(mp['m_sb']['sb_uuid']) + +def xfs_mount_version(mp: gdb.Value) -> int: + return int(mp['m_sb']['sb_versionnum']) & 0xf + +def xfs_for_each_ail_entry(ail: gdb.Value) -> Iterable[gdb.Value]: + """ + Iterates over the XFS Active Item Log and returns each item + + Args: + ail (gdb.Value): The XFS AIL to iterate + + Yields: + gdb.Value + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + head = ail[XFS.ail_head_name] + for item in list_for_each_entry(head, types.xfs_log_item_type, 'li_ail'): + yield item + +def xfs_for_each_ail_log_item(mp: gdb.Value) -> Iterable[gdb.Value]: + """ + Iterates over the XFS Active Item Log and returns each item + + Args: + mp (gdb.Value): The XFS mount to iterate + + Yields: + gdb.Value + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + for item in xfs_for_each_ail_entry(mp['m_ail']): + yield item + +def item_to_buf_log_item(item: gdb.Value) -> gdb.Value: + """ + Converts an xfs_log_item to an xfs_buf_log_item + + Args: + item (gdb.Value): The log item to convert + + Returns: + gdb.Value + + Raises: + TypeError: The type of log item is not XFS_LI_BUF + gdb.NotAvailableError: The target value was not available. + """ + if item['li_type'] != XFS_LI_BUF: + raise TypeError("item is not a buf log item") + return container_of(item, types.xfs_buf_log_item_type, 'bli_item') + +def item_to_inode_log_item(item: gdb.Value) -> gdb.Value: + """ + Converts an xfs_log_item to an xfs_inode_log_item + + Args: + item (gdb.Value): The log item to convert + + Returns: + gdb.Value + + Raises: + TypeError: The type of log item is not XFS_LI_INODE + gdb.NotAvailableError: The target value was not available. + """ + if item['li_type'] != XFS_LI_INODE: + raise TypeError("item is not an inode log item") + return container_of(item, types.xfs_inode_log_item_type, 'ili_item') + +def item_to_efi_log_item(item: gdb.Value) -> gdb.Value: + """ + Converts an xfs_log_item to an xfs_efi_log_item + + Args: + item (gdb.Value): The log item to convert + + Returns: + gdb.Value + + Raises: + TypeError: The type of log item is not XFS_LI_EFI + gdb.NotAvailableError: The target value was not available. + """ + if item['li_type'] != XFS_LI_EFI: + raise TypeError("item is not an EFI log item") + return container_of(item, types.xfs_efi_log_item_type, 'efi_item') + +def item_to_efd_log_item(item: gdb.Value) -> gdb.Value: + """ + Converts an xfs_log_item to an xfs_efd_log_item + + Args: + item (gdb.Value): The log item to convert + + Returns: + gdb.Value + + Raises: + TypeError: The type of log item is not XFS_LI_EFD + gdb.NotAvailableError: The target value was not available. + """ + if item['li_type'] != XFS_LI_EFD: + raise TypeError("item is not an EFD log item") + return container_of(item, types.xfs_efd_log_item_type, 'efd_item') + +def item_to_dquot_log_item(item: gdb.Value) -> gdb.Value: + """ + Converts an xfs_log_item to an xfs_dquot_log_item + + Args: + item (gdb.Value): The log item to convert + + Returns: + gdb.Value + + Raises: + TypeError: The type of log item is not XFS_LI_DQUOT + gdb.NotAvailableError: The target value was not available. + """ + if item['li_type'] != XFS_LI_DQUOT: + raise TypeError("item is not an DQUOT log item") + return container_of(item, types.xfs_dq_logitem_type, 'qli_item') + +def item_to_quotaoff_log_item(item: gdb.Value) -> gdb.Value: + """ + Converts an xfs_log_item to an xfs_quotaoff_log_item + + Args: + item (gdb.Value): The log item to convert + + Returns: + gdb.Value + + Raises: + TypeError: The type of log item is not XFS_LI_QUOTAOFF + gdb.NotAvailableError: The target value was not available. + """ + if item['li_type'] != XFS_LI_QUOTAOFF: + raise TypeError("item is not an QUOTAOFF log item") + return container_of(item, types.xfs_qoff_logitem_type, 'qql_item') + +def xfs_log_item_typed(item:gdb.Value) -> gdb.Value: + """ + Returns the log item converted from the generic type to the actual type + + Args: + item (gdb.Value): The struct xfs_log_item to + convert. + + Returns: + Depending on the item type, one of: + gdb.Value + gdb.Value + gdb.Value + gdb.Value + gdb.Value + gdb.Value (for UNLINK item) + + Raises: + RuntimeError: An unexpected item type was encountered + gdb.NotAvailableError: The target value was not available. + """ + li_type = int(item['li_type']) + if li_type == XFS_LI_BUF: + return item_to_buf_log_item(item) + elif li_type == XFS_LI_INODE: + return item_to_inode_log_item(item) + elif li_type == XFS_LI_EFI: + return item_to_efi_log_item(item) + elif li_type == XFS_LI_EFD: + return item_to_efd_log_item(item) + elif li_type == XFS_LI_IUNLINK: + # There isn't actually any type information for this + return item['li_type'] + elif li_type == XFS_LI_DQUOT: + return item_to_dquot_log_item(item) + elif li_type == XFS_LI_QUOTAOFF: + return item_to_quotaoff_log_item(item) + + raise RuntimeError("Unknown AIL item type {:x}".format(li_type)) + +def xfs_format_xfsbuf(buf: gdb.Value) -> str: + """ + Returns a human-readable format of struct xfs_buf + + Args: + buf (gdb.Value): + The struct xfs_buf to decode + + Returns: + str: The human-readable representation of the struct xfs_buf + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + state = "" + bflags = decode_flags(buf['b_flags'], XFS_BUF_FLAGS) + + if buf['b_pin_count']['counter']: + state += "P" + if buf['b_sema']['count'] >= 0: + state += "L" + + return f"{int(buf):x} xfsbuf: logical offset {buf['b_bn']:d}, " \ + f"size {buf['b_buffer_len']:d}, block number {buf['b_bn']:d}, " \ + f"flags {bflags}, state {state}" + +def xfs_for_each_ail_log_item_typed(mp: gdb.Value) -> gdb.Value: + """ + Iterates over the XFS Active Item Log and returns each item, resolved + to the specific type. + + Args: + mp (gdb.Value): The XFS mount to iterate + + Yields: + Depending on the item type, one of: + gdb.Value + gdb.Value + gdb.Value + gdb.Value + gdb.Value + + Raises: + gdb.NotAvailableError: The target value was not available. + """ + for item in types.xfs_for_each_ail_log_item(mp): + yield types.xfs_log_item_typed(item) + +type_cbs = TypeCallbacks([ ('struct xfs_ail', XFS._detect_ail_version) ]) From edfcfac9c741e2c41775e4abed5b04c05dc25aa6 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 18 Sep 2018 05:38:36 -0400 Subject: [PATCH 138/367] crash.commands.xfs: add basic xfs command This adds an `xfs' command to display some details of xfs file systems. Included subcommands are: - 'list' -- list all mounted xfs file systems, including device and uuid - 'show' -- show details of a single xfs file system - 'dump-ail' -- dump contents of the AIL for one file system - 'dump-buft' -- dump contents of the bt_delwrite_queue for one file system Signed-off-by: Jeff Mahoney --- crash/commands/xfs.py | 196 ++++++++++++++++++++++++++++++ kernel-tests/test_commands_xfs.py | 77 ++++++++++++ 2 files changed, 273 insertions(+) create mode 100644 crash/commands/xfs.py create mode 100644 kernel-tests/test_commands_xfs.py diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py new file mode 100644 index 00000000000..1add4eee09d --- /dev/null +++ b/crash/commands/xfs.py @@ -0,0 +1,196 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +import os.path +import argparse +import re + +from argparse import Namespace +from crash.commands import Command, ArgumentParser +from crash.commands import CommandLineError, CommandError +from crash.exceptions import DelayedAttributeError +from crash.types.list import list_for_each_entry, list_empty +from crash.subsystem.filesystem import for_each_super_block, get_super_block +from crash.subsystem.filesystem import super_flags +from crash.subsystem.filesystem.xfs import xfs_mount +from crash.subsystem.filesystem.xfs import xfs_for_each_ail_log_item +from crash.subsystem.filesystem.xfs import xfs_log_item_typed +from crash.subsystem.filesystem.xfs import xfs_format_xfsbuf +from crash.subsystem.filesystem.xfs import XFS_LI_TYPES +from crash.subsystem.filesystem.xfs import XFS_LI_EFI, XFS_LI_EFD +from crash.subsystem.filesystem.xfs import XFS_LI_IUNLINK, XFS_LI_INODE +from crash.subsystem.filesystem.xfs import XFS_LI_BUF, XFS_LI_DQUOT +from crash.subsystem.filesystem.xfs import XFS_LI_QUOTAOFF, XFS_BLI_FLAGS +from crash.subsystem.filesystem.xfs import xfs_mount_flags, xfs_mount_uuid +from crash.subsystem.filesystem.xfs import xfs_mount_version + +from crash.util.symbols import Types + +types = Types(['struct xfs_buf *']) + +class XFSCommand(Command): + """display XFS internal data structures + +NAME + xfs - display XFS internal data structures + +SYNOPSIS + xfs [arguments ...] + +COMMANDS + xfs list + xfs show + xfs dump-ail + xfs dump-buft + """ + + def __init__(self, name): + parser = ArgumentParser(prog=name) + subparsers = parser.add_subparsers(help="sub-command help") + show_parser = subparsers.add_parser('show', help='show help') + show_parser.set_defaults(subcommand=self.show_xfs) + show_parser.add_argument('addr') + list_parser = subparsers.add_parser('list', help='list help') + list_parser.set_defaults(subcommand=self.list_xfs) + ail_parser = subparsers.add_parser('dump-ail', help='ail help') + ail_parser.set_defaults(subcommand=self.dump_ail) + ail_parser.add_argument('addr') + buft_parser = subparsers.add_parser('dump-buft', help='buft help') + buft_parser.set_defaults(subcommand=self.dump_buftargs) + buft_parser.add_argument('addr') + + Command.__init__(self, name, parser) + + def list_xfs(self, args: Namespace) -> None: + count = 0 + print_header = True + for sb in for_each_super_block(): + if sb['s_type']['name'].string() == "xfs": + mp = xfs_mount(sb) + u = xfs_mount_uuid(mp) + if print_header: + print_header = False + print("SUPER BLOCK\t\t\tDEVICE\t\tUUID") + + print("{}\t{}\t{}".format(sb.address, sb['s_id'].string(), u)) + count += 1 + + if count == 0: + print("No xfs file systems are mounted.") + + def show_xfs(self, args: Namespace) -> None: + try: + sb = get_super_block(args.addr) + except gdb.NotAvailableError as e: + raise CommandError(str(e)) + + mp = xfs_mount(sb) + + print("Device: {}".format(sb['s_id'].string())) + print("UUID: {}".format(xfs_mount_uuid(mp))) + print("VFS superblock flags: {}".format(super_flags(sb))) + print("Flags: {}".format(xfs_mount_flags(mp))) + print("Version: {}".format(xfs_mount_version(mp))) + if list_empty(mp['m_ail']['xa_ail']): + print("AIL is empty") + else: + print("AIL has items queued") + + def dump_ail(self, args: Namespace) -> None: + try: + sb = get_super_block(args.addr) + except gdb.NotAvailableError as e: + raise CommandError(str(e)) + + mp = xfs_mount(sb) + ail = mp['m_ail'] + itemno = 0 + print("AIL @ {:x}".format(int(ail))) + print("target={} last_pushed_lsn={} log_flush=" + .format(int(ail['xa_target']), int(ail['xa_last_pushed_lsn'])), + end='') + try: + print("{}".format(int(ail['xa_log_flush']))) + except: + print("[N/A]") + + for bitem in xfs_for_each_ail_log_item(mp): + li_type = int(bitem['li_type']) + lsn = int(bitem['li_lsn']) + item = xfs_log_item_typed(bitem) + print("{}: item={:x} lsn={} {} " + .format(itemno, int(bitem.address), lsn, + XFS_LI_TYPES[li_type][7:]), end='') + if li_type == XFS_LI_BUF: + buf = item['bli_buf'] + flags = [] + bli_flags = int(item['bli_flags']) + + for flag in XFS_BLI_FLAGS.keys(): + if flag & bli_flags: + flags.append(XFS_BLI_FLAGS[flag]) + + print(" buf@{:x} bli_flags={}" + .format(int(buf), "|".join(flags))) + + print(" {}".format(xfs_format_xfsbuf(buf))) + elif li_type == XFS_LI_INODE: + ili_flags = int(item['ili_lock_flags']) + flags = [] + xfs_inode = item['ili_inode'] + print("inode@{:x} i_ino={} ili_lock_flags={:x} " + .format(int(xfs_inode['i_vnode'].address), + int(xfs_inode['i_ino']), ili_flags)) + elif li_type == XFS_LI_EFI: + efi = item['efi_format'] + print("efi@{:x} size={}, nextents={}, id={:x}" + .format(int(item.address), int(efi['efi_size']), + int(efi['efi_nextents']), int(efi['efi_id']))) + elif li_type == XFS_LI_EFI: + efd = item['efd_format'] + print("efd@{:x} size={}, nextents={}, id={:x}" + .format(int(item.address), int(efd['efd_size']), + int(efd['efd_nextents']), int(efd['efd_id']))) + elif li_type == XFS_LI_DQUOT: + dquot = item['qli_dquot'] + print("dquot@{:x}".format(int(dquot), int(dquot['dq_flags']))) + elif li_type == XFS_LI_QUOTAOFF: + qoff = item['qql_format'] + print("qoff@{:x} type={} size={} flags={}" + .format(int(qoff), int(qoff['qf_type']), + int(qoff['qf_size']), int(qoff['qf_flags']))) + else: + print("item@{:x}".format(int(item.address))) + itemno += 1 + + @classmethod + def dump_buftarg(cls, targ: gdb.Value) -> None: + for buf in list_for_each_entry(targ['bt_delwrite_queue'], + types.xfs_buf_p_type.target(), 'b_list'): + print("{:x} {}".format(int(buf.address), xfs_format_xfsbuf(buf))) + + @classmethod + def dump_buftargs(cls, args: Namespace): + try: + sb = get_super_block(args.addr) + except gdb.NotAvailableError as e: + raise CommandError(str(e)) + mp = xfs_mount(sb) + ddev = mp['m_ddev_targp'] + ldev = mp['m_logdev_targp'] + + print("Data device queue @ {:x}:".format(int(ddev))) + cls.dump_buftarg(ddev) + + if int(ddev) != int(ldev): + print("Log device queue:") + cls.dump_buftarg(ldev) + + def execute(self, args): + if hasattr(args, 'subcommand'): + args.subcommand(args) + else: + raise CommandLineError("no command specified") + +XFSCommand("xfs") diff --git a/kernel-tests/test_commands_xfs.py b/kernel-tests/test_commands_xfs.py new file mode 100644 index 00000000000..0737eca01c5 --- /dev/null +++ b/kernel-tests/test_commands_xfs.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + +from decorators import skip_without_supers, skip_with_supers + +from crash.commands.xfs import XFSCommand +from crash.exceptions import DelayedAttributeError +from crash.commands import CommandLineError, CommandError + +class TestCommandsXFS(unittest.TestCase): + """ + These tests require that the xfs file system be built-in or loaded as + a module. If the test vmcore doesn't have the xfs module loaded or + modules haven't been provided, most of these tests will be skipped. + """ + + def setUp(self): + self.stdout = sys.stdout + self.redirected = io.StringIO() + sys.stdout = self.redirected + self.command = XFSCommand("xfs") + + def tearDown(self): + sys.stdout = self.stdout + + def output(self): + return self.redirected.getvalue() + + def output_lines(self): + return len(self.output().split("\n")) - 1 + + def test_empty_command(self): + """`xfs' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("") + + def test_invalid_command(self): + """`xfs invalid command' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("invalid command") + + @skip_without_supers('xfs') + def test_xfs_list(self): + """`xfs list' produces valid output""" + self.command.invoke_uncaught("list") + self.assertTrue(self.output_lines() > 0) + + @skip_with_supers('xfs') + def test_xfs_list_no_mounts(self): + """`xfs list' produces one-line status with no mounts""" + self.command.invoke_uncaught("list") + self.assertTrue(self.output_lines() == 1) + + def test_xfs_list_invalid(self): + """`xfs list invalid' raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("list invalid") + + def test_xfs_show_null(self): + """`xfs show 0' raises CommandError""" + with self.assertRaises(CommandError): + self.command.invoke_uncaught("show 0") + + def test_xfs_dump_ail_null(self): + """`xfs dump-ail 0' raises CommandError""" + with self.assertRaises(CommandError): + self.command.invoke_uncaught("dump-ail 0") + + def test_xfs_dump_buft_null(self): + """`xfs dump-buft 0' raises CommandError""" + with self.assertRaises(CommandError): + self.command.invoke_uncaught("dump-buft 0") + From 8d6437cbabe86b9ab940555042dd9ad31fae0adc Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 22 Apr 2019 18:14:32 -0400 Subject: [PATCH 139/367] crash.subsystem.storage.blocksq: add per-queue requests_in_flight call This adds a helper to pass back the requests in flight for a particular queue (block single-queue only). Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/blocksq.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index 53533c0c7b0..52a517f39d4 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -48,3 +48,14 @@ def request_age_ms(request): current jiffies in milliseconds. """ return kernel.jiffies_to_msec(kernel.jiffies - request['start_time']) + +def requests_in_flight(queue): + """ + Report how many requests are in flight for this queue + + This method returns a 2-tuple of ints. The first value + is the number of read requests in flight. The second + value is the number of write requests in flight. + """ + return (int(queue['in_flight'][0]), + int(queue['in_flight'][1])) From 16a5f751d811493c5f3968ed16ff62cf5903d760 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 1 May 2019 11:20:33 -0400 Subject: [PATCH 140/367] crash.commands.lsmod: add basic lsmod command This commit adds a basic `lsmod' command. By default, it will display the module name, core address, size, and users of it. With the -p option, it will display the percpu base and size. With -p , it will display the percpu base for the given CPU number. Signed-off-by: Jeff Mahoney --- crash/commands/lsmod.py | 126 ++++++++++++++++++++++++++++ kernel-tests/test_commands_lsmod.py | 40 +++++++++ 2 files changed, 166 insertions(+) create mode 100644 crash/commands/lsmod.py create mode 100644 kernel-tests/test_commands_lsmod.py diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py new file mode 100644 index 00000000000..737f59c8ffb --- /dev/null +++ b/crash/commands/lsmod.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +import re +import fnmatch +import argparse + +from crash.commands import Command, ArgumentParser +from crash.types.module import for_each_module +from crash.util import struct_has_member +from crash.types.list import list_for_each_entry +from crash.types.percpu import get_percpu_var +import crash.types.percpu + +class ModuleCommand(Command): + """display module information + +NAME + lsmod - display module information + +SYNOPSIS + lsmod [-p [n]] [name-wildcard] + +DESCRIPTION + This command displays information about loaded modules. + + The default output will show all loaded modules, the core address, + its size, and any users of the module. By specifying [name-wildcard], + the results can be filtered to modules matching the wildcard. + + The following options are available: + -p display the percpu base for the module and the size of its region + -p CPU# display the percpu base for the module and the size of its region + for the specified CPU number + +""" + def __init__(self): + parser = ArgumentParser(prog="lsmod") + + parser.add_argument('-p', nargs='?', const=-1, default=None, type=int) + parser.add_argument('args', nargs=argparse.REMAINDER) + + parser.format_usage = lambda: "lsmod [-p] [regex] ...\n" + + Command.__init__(self, "lsmod", parser) + + self.module_use_type = gdb.lookup_type('struct module_use') + + def print_module_percpu(self, mod, cpu=-1): + cpu = int(cpu) + addr = int(mod['percpu']) + if addr == 0: + return + + if cpu != -1: + addr = get_percpu_var(mod['percpu'], cpu) + tabs = "\t\t" + else: + tabs = "\t\t\t" + + size = int(mod['percpu_size']) + print("{:16s}\t{:#x}{}{:d}".format(mod['name'].string(), int(addr), + tabs, size)) + + + def execute(self, argv): + regex = None + show_deps = True + print_header = True + if argv.args: + regex = re.compile(fnmatch.translate(argv.args[0])) + + if argv.p is not None: + show_deps = False + + core_layout = None + + for mod in for_each_module(): + if core_layout is None: + core_layout = struct_has_member(mod.type, 'core_layout') + + modname = mod['name'].string() + if regex: + m = regex.match(modname) + if m is None: + continue + + if argv.p is not None: + if print_header: + print_header = False + if argv.p == -1: + print("Module\t\t\tPercpu Base\t\tSize") + else: + print("Module\t\t\tPercpu Base@CPU{:d}\t\tSize" + .format(argv.p)) + self.print_module_percpu(mod, argv.p) + continue + + if print_header: + print_header = False + print("Module\t\t\tAddress\t\t\tSize\tUsed by") + + if core_layout: + addr = int(mod['core_layout']['base']) + size = int(mod['core_layout']['size']) + else: + addr = int(mod['module_core']) + size = int(mod['core_size']) + + module_use = "" + count = 0 + for use in list_for_each_entry(mod['source_list'], + self.module_use_type, + 'source_list'): + if module_use == "": + module_use += " " + else: + module_use += "," + module_use += use['source']['name'].string() + count += 1 + + print("{:16s}\t{:#x}\t{:d}\t{:d}{}" + .format(modname, addr, size, count, module_use)) + +ModuleCommand() diff --git a/kernel-tests/test_commands_lsmod.py b/kernel-tests/test_commands_lsmod.py new file mode 100644 index 00000000000..610409a29ff --- /dev/null +++ b/kernel-tests/test_commands_lsmod.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + +from crash.commands.lsmod import ModuleCommand + +class TestCommandsLsmod(unittest.TestCase): + def setUp(self): + self.stdout = sys.stdout + sys.stdout = io.StringIO() + + def tearDown(self): + sys.stdout = self.stdout + + def output(self): + return sys.stdout.getvalue() + + def test_lsmod(self): + ModuleCommand().invoke("") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + def test_lsmod_wildcard(self): + ModuleCommand().invoke("*") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + + def test_lsmod_p(self): + ModuleCommand().invoke("-p") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) + print(output) + + def test_lsmod_p_0(self): + ModuleCommand().invoke("-p 0") + output = self.output() + self.assertTrue(len(output.split("\n")) > 2) From 2ef9178107f0edf1c3cb21a4e7cdc4257b39454d Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Thu, 9 May 2019 12:59:37 -0400 Subject: [PATCH 141/367] crash.cache.syscache: update config parsing Kernel v5.1-rc1 moved the compressed config data into .rodata using asm .globl variables to mark the bounds. This commit updates crash.cache.syscache to handle the new variables and cleans up the code a bit. Signed-off-by: Jeff Mahoney --- crash/cache/syscache.py | 95 +++++++++++++++++++++---------- crash/types/page.py | 11 ++-- kernel-tests/test_commands_sys.py | 32 +++++++++++ 3 files changed, 103 insertions(+), 35 deletions(-) create mode 100644 kernel-tests/test_commands_sys.py diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index b5d9a666115..2012ce8ae61 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Dict + from builtins import round import gdb @@ -11,9 +13,12 @@ from crash.exceptions import DelayedAttributeError from crash.cache import CrashCache from crash.util import array_size -from crash.util.symbols import Types, Symvals, SymbolCallbacks +from crash.util.symbols import Types, Symvals, SymbolCallbacks, MinimalSymvals from crash.infra.lookup import DelayedValue + +ImageLocation = Dict[str, Dict[str, int]] + class CrashUtsnameCache(CrashCache): symvals = Symvals([ 'init_uts_ns' ]) @@ -45,6 +50,8 @@ def __getattr__(self, name): class CrashConfigCache(CrashCache): types = Types([ 'char *' ]) symvals = Symvals([ 'kernel_config_data' ]) + msymvals = MinimalSymvals([ 'kernel_config_data', + 'kernel_config_data_end' ]) def __getattr__(self, name): if name == 'config_buffer': @@ -53,51 +60,77 @@ def __getattr__(self, name): return self._parse_config() return getattr(self.__class__, name) - @staticmethod - def read_buf(address, size): + def read_buf(self, address: int, size: int) -> memoryview: return gdb.selected_inferior().read_memory(address, size) - @staticmethod - def read_buf_str(address, size): - buf = gdb.selected_inferior().read_memory(address, size) - if isinstance(buf, memoryview): - return buf.tobytes().decode('utf-8') - else: - return str(buf) - - def decompress_config_buffer(self): - MAGIC_START = 'IKCFG_ST' - MAGIC_END = 'IKCFG_ED' - - # Must cast it to char * to do the pointer arithmetic correctly - data_addr = self.symvals.kernel_config_data.address.cast(self.types.char_p_type) - data_len = self.symvals.kernel_config_data.type.sizeof + def read_buf_bytes(self, address: int, size: int) -> bytes: + return self.read_buf(address, size).tobytes() + + def locate_config_buffer_section(self) -> ImageLocation: + data_start = int(self.msymvals.kernel_config_data) + data_end = int(self.msymvals.kernel_config_data_end) + + return { + 'data' : { + 'start' : data_start, + 'size' : data_end - data_start, + }, + 'magic' : { + 'start' : data_start - 8, + 'end' : data_end, + }, + } + + def locate_config_buffer_typed(self) -> ImageLocation: + start = int(self.symvals.kernel_config_data.address) + end = start + self.symvals.kernel_config_data.type.sizeof + + return { + 'data' : { + 'start' : start + 8, + 'size' : end - start - 2*8 - 1, + }, + 'magic' : { + 'start' : start, + 'end' : end - 8 - 1, + }, + } + + def verify_image(self, location: ImageLocation) -> None: + MAGIC_START = b'IKCFG_ST' + MAGIC_END = b'IKCFG_ED' buf_len = len(MAGIC_START) - buf = self.read_buf_str(data_addr, buf_len) + buf = self.read_buf_bytes(location['magic']['start'], buf_len) if buf != MAGIC_START: - raise IOError("Missing MAGIC_START in kernel_config_data.") + raise IOError(f"Missing MAGIC_START in kernel_config_data. Got `{buf}'") buf_len = len(MAGIC_END) - buf = self.read_buf_str(data_addr + data_len - buf_len - 1, buf_len) + buf = self.read_buf_bytes(location['magic']['end'], buf_len) if buf != MAGIC_END: - raise IOError("Missing MAGIC_END in kernel_config_data.") + raise IOError("Missing MAGIC_END in kernel_config_data. Got `{buf}'") + + def decompress_config_buffer(self) -> str: + try: + location = self.locate_config_buffer_section() + except DelayedAttributeError: + location = self.locate_config_buffer_typed() + + self.verify_image(location) # Read the compressed data - buf_len = data_len - len(MAGIC_START) - len(MAGIC_END) - buf = self.read_buf(data_addr + len(MAGIC_START), buf_len) - self.config_buffer = zlib.decompress(buf, 16 + zlib.MAX_WBITS) - if (isinstance(self.config_buffer, bytes)): - self.config_buffer = str(self.config_buffer.decode('utf-8')) - else: - self.config_buffer = str(self.config_buffer) + buf = self.read_buf_bytes(location['data']['start'], + location['data']['size']) + + decompressed = zlib.decompress(buf, 16 + zlib.MAX_WBITS) + self.config_buffer = str(decompressed.decode('utf-8')) return self.config_buffer def __str__(self): return self.config_buffer - def _parse_config(self): - self.ikconfig_cache = {} + def _parse_config(self) -> Dict[str, str]: + self.ikconfig_cache: Dict[str, str] = dict() for line in self.config_buffer.splitlines(): # bin comments diff --git a/crash/types/page.py b/crash/types/page.py index 2ea07853e76..9385c9f2e0a 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -4,7 +4,8 @@ from math import log, ceil import gdb from crash.util import container_of, find_member_variant -from crash.util.symbols import Types, Symvals, TypeCallbacks, SymbolCallbacks +from crash.util.symbols import Types, Symvals, TypeCallbacks +from crash.util.symbols import SymbolCallbacks, MinimalSymbolCallbacks from crash.cache.syscache import config #TODO debuginfo won't tell us, depends on version? @@ -201,12 +202,14 @@ def __init__(self, obj, pfn): ('enum pageflags', Page.setup_pageflags ), ('enum zone_type', Page.setup_zone_type ), ('struct mem_section', Page.setup_mem_section) ]) +msymbol_cbs = MinimalSymbolCallbacks([ ('kernel_config_data', + Page.setup_nodes_width ) ]) # TODO: this should better be generalized to some callback for # "config is available" without refering to the symbol name here -symbol_cbs = SymbolCallbacks([ ('kernel_config_data', Page.setup_nodes_width ), - ('vmemmap_base', Page.setup_vmemmap_base ), - ('page_offset_base', Page.setup_directmap_base ) ]) +symbol_cbs = SymbolCallbacks([ ('vmemmap_base', Page.setup_vmemmap_base ), + ('page_offset_base', + Page.setup_directmap_base ) ]) def pfn_to_page(pfn): diff --git a/kernel-tests/test_commands_sys.py b/kernel-tests/test_commands_sys.py new file mode 100644 index 00000000000..e246722abe1 --- /dev/null +++ b/kernel-tests/test_commands_sys.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + +from crash.commands.syscmd import SysCommand + +class TestCommandsSys(unittest.TestCase): + def setUp(self): + self.stdout = sys.stdout + self.redirect = io.StringIO() + sys.stdout = self.redirect + self.command = SysCommand("sys") + + def tearDown(self): + sys.stdout = self.stdout + + def output(self): + return self.redirect.getvalue() + + def output_lines(self): + return len(self.output().split("\n")) + + def test_sys(self): + self.command.invoke_uncaught("") + self.assertTrue(self.output_lines() > 2) + + def test_sys_config(self): + self.command.invoke_uncaught("config") + self.assertTrue(self.output_lines() > 2) From a0b88a5db8e06c44d9c9f5e6877ba8f1c53a736e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 17 May 2019 15:20:13 -0400 Subject: [PATCH 142/367] tests: silence as much noise as possible The test output is littered with 'broken link' reports during tests that are specifically testing that behavior. We can tidy up a bit by adding a print_broken_links option that defaults to True but can be set to False by the test cases. Signed-off-by: Jeff Mahoney --- crash/types/list.py | 5 ++++- tests/test_list.py | 13 ++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/crash/types/list.py b/crash/types/list.py index 54c384717b1..dae232f6e9a 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -109,9 +109,12 @@ def list_for_each(list_head, include_head=False, reverse=False, def list_for_each_entry(list_head, gdbtype, member, include_head=False, reverse=False, + print_broken_links=True, exact_cycles=False): for node in list_for_each(list_head, include_head=include_head, - reverse=reverse, exact_cycles=exact_cycles): + reverse=reverse, + print_broken_links=print_broken_links, + exact_cycles=exact_cycles): if node.type != types.list_head_type.pointer(): raise TypeError("Type {} found. Expected struct list_head *." .format(str(node.type))) diff --git a/tests/test_list.py b/tests/test_list.py index d78bba53ee1..83d80770cf9 100644 --- a/tests/test_list.py +++ b/tests/test_list.py @@ -80,7 +80,8 @@ def test_corrupt_list(self): expected_count = short_list.type.sizeof // short_list[0].type.sizeof count = 0 with self.assertRaises(CorruptListError): - for node in list_for_each(normal_list, exact_cycles=True): + for node in list_for_each(normal_list, exact_cycles=True, + print_broken_links=False): count += 1 def test_normal_container_list_with_string(self): @@ -113,7 +114,8 @@ def test_cycle_container_list_with_string(self): count = 0 with self.assertRaises(ListCycleError): for node in list_for_each_entry(cycle_list, 'struct container', - 'list', exact_cycles=True): + 'list', exact_cycles=True, + print_broken_links=False): count += 1 def test_cycle_container_list_with_type(self): @@ -125,7 +127,8 @@ def test_cycle_container_list_with_type(self): count = 0 with self.assertRaises(ListCycleError): for node in list_for_each_entry(cycle_list, struct_container, - 'list', exact_cycles=True): + 'list', exact_cycles=True, + print_broken_links=False): count += 1 def test_bad_container_list_with_string(self): @@ -136,7 +139,7 @@ def test_bad_container_list_with_string(self): count = 0 with self.assertRaises(CorruptListError): for node in list_for_each_entry(bad_list, 'struct container', - 'list'): + 'list', print_broken_links=False): count += 1 def test_bad_container_list_with_type(self): @@ -148,5 +151,5 @@ def test_bad_container_list_with_type(self): count = 0 with self.assertRaises(CorruptListError): for node in list_for_each_entry(bad_list, struct_container, - 'list'): + 'list', print_broken_links=False): count += 1 From edac82a789f1e0c0619046ef9bfdce1dd789e624 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 28 May 2019 10:46:13 -0400 Subject: [PATCH 143/367] lists: remove pointless typecheck The *_entry variants of the list iterators typecheck each item yielded by the non-entry variants of the iterators. This is silly and a failure would indicate a fundamental bug that would be caught by the unit tests. Signed-off-by: Jeff Mahoney --- crash/types/klist.py | 3 --- crash/types/list.py | 3 --- 2 files changed, 6 deletions(-) diff --git a/crash/types/klist.py b/crash/types/klist.py index e58b074fa86..6326fb39fe2 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -30,9 +30,6 @@ def klist_for_each(klist): def klist_for_each_entry(klist, gdbtype, member): for node in klist_for_each(klist): - if node.type != types.klist_node_type: - raise TypeError("Type {} found. Expected {}." - .format(node.type), types.klist_node_type.pointer()) if node.type is not types.klist_node_type: types.override('struct klist_node', node.type) yield container_of(node, gdbtype, member) diff --git a/crash/types/list.py b/crash/types/list.py index dae232f6e9a..f576baffd3e 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -115,9 +115,6 @@ def list_for_each_entry(list_head, gdbtype, member, reverse=reverse, print_broken_links=print_broken_links, exact_cycles=exact_cycles): - if node.type != types.list_head_type.pointer(): - raise TypeError("Type {} found. Expected struct list_head *." - .format(str(node.type))) yield container_of(node, gdbtype, member) def list_empty(list_head): From 58919b0affc87916add33b23c10841e5a95c8dce Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 28 May 2019 11:38:24 -0400 Subject: [PATCH 144/367] crash.util: cleanup argument exceptions This commit moves the argument handling exceptions from crash.util to crash.exceptions and standardizes the output for common scenarios. Signed-off-by: Jeff Mahoney --- crash/commands/__init__.py | 2 +- crash/exceptions.py | 36 +++++++++++++++++++ crash/kernel.py | 12 +++---- crash/subsystem/filesystem/btrfs.py | 13 +++---- crash/subsystem/filesystem/xfs.py | 29 +++++++-------- crash/subsystem/storage/__init__.py | 16 ++++----- crash/types/bitmap.py | 3 +- crash/types/klist.py | 2 +- crash/types/list.py | 10 +++--- crash/types/percpu.py | 18 ++++++---- crash/types/task.py | 2 +- crash/util/__init__.py | 43 +++++----------------- tests/test_list.py | 56 +++++++++++++++++++++++++++++ tests/test_util.py | 25 +++++++------ 14 files changed, 169 insertions(+), 98 deletions(-) diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 8d90438d1d5..a179c4ec9e8 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -27,7 +27,7 @@ def __init__(self, name, parser=None): if parser is None: parser = ArgumentParser(prog=self.name) elif not isinstance(parser, ArgumentParser): - raise TypeError("parser must be ArgumentParser") + raise ArgumentTypeError('parser', parser, ArgumentParser) nl = "" if self.__doc__[-1] != '\n': diff --git a/crash/exceptions.py b/crash/exceptions.py index e7ddd27a359..44b4f92da96 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -22,3 +22,39 @@ def __init__(self, name): msg = "Delayed attribute {} has not been completed." self.name = name super().__init__(msg.format(name)) + +class InvalidArgumentError(TypeError): + """Base class for invalid argument exceptions""" + def __init__(self, msg): + super().__init__(msg) + +class ArgumentTypeError(InvalidArgumentError): + """The provided object could not be converted to the expected type""" + formatter = "cannot convert argument `{}' of type {} to {}" + + def __init__(self, name, val, expected_type): + msg = self.formatter.format(name, self.format_clsname(val.__class__), + self.format_clsname(expected_type)) + super().__init__(msg) + self.val = val + + def format_clsname(self, cls): + module = cls.__module__ + if module is None or module == str.__class__.__module__: + return cls.__name__ # Avoid reporting __builtin__ + else: + return module + '.' + cls.__name__ + +class UnexpectedGDBTypeError(InvalidArgumentError): + """The gdb.Type passed describes an inappropriate type for the operation""" + formatter = "expected gdb.Type `{}' to describe `{}' not `{}'" + def __init__(self, name, gdbtype, expected_type): + msg = self.formatter.format(name, str(gdbtype), str(expected_type)) + super().__init__(msg) + +class NotStructOrUnionError(UnexpectedGDBTypeError): + """The provided type is not a struct or union""" + formatter = "argument `{}' describes type `{}' which is not a struct or union" + def __init__(self, name, gdbtype): + super().__init__(name, gdbtype, gdbtype) + msg = self.formatter.format(name, str(gdbtype)) diff --git a/crash/kernel.py b/crash/kernel.py index fcbc300dee7..84931c4124e 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -15,7 +15,7 @@ from elftools.elf.elffile import ELFFile from crash.util import get_symbol_value from crash.util.symbols import Types, Symvals, Symbols -from crash.exceptions import MissingSymbolError +from crash.exceptions import MissingSymbolError, InvalidArgumentError from typing import Pattern, Union, List, Dict, Any @@ -122,7 +122,7 @@ def __init__(self, roots: PathSpecifier=None, - /usr/lib/debug/lib/modules/ Raises: CrashKernelError: If the kernel debuginfo cannot be loaded. - TypeError: If any of the arguments are not None, str, + InvalidArgumentError: If any of the arguments are not None, str, or list of str """ @@ -162,7 +162,7 @@ def __init__(self, roots: PathSpecifier=None, x = [ "/" ] self.roots = x else: - raise TypeError("roots must be None, str, or list of str") + raise InvalidArgumentError("roots must be None, str, or list of str") if verbose: print("roots={}".format(self.roots)) @@ -195,7 +195,7 @@ def __init__(self, roots: PathSpecifier=None, elif isinstance(vmlinux_debuginfo, str): self.vmlinux_debuginfo = [ vmlinux_debuginfo ] else: - raise TypeError("vmlinux_debuginfo must be None, str, or list of str") + raise InvalidArgumentError("vmlinux_debuginfo must be None, str, or list of str") if verbose: print("vmlinux_debuginfo={}".format(self.vmlinux_debuginfo)) @@ -232,7 +232,7 @@ def __init__(self, roots: PathSpecifier=None, self.module_path = x else: - raise TypeError("module_path must be None, str, or list of str") + raise InvalidArgumentError("module_path must be None, str, or list of str") if verbose: print("module_path={}".format(self.module_path)) @@ -270,7 +270,7 @@ def __init__(self, roots: PathSpecifier=None, self.module_debuginfo_path = x else: - raise TypeError("module_debuginfo_path must be None, str, or list of str") + raise InvalidArgumentError("module_debuginfo_path must be None, str, or list of str") if verbose: print("module_debuginfo_path={}".format(self.module_debuginfo_path)) diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index b5e33ba7bc5..9e2756fa47b 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -4,6 +4,7 @@ import gdb import uuid +from crash.exceptions import InvalidArgumentError from crash.util import decode_uuid, struct_has_member, container_of from crash.util.symbols import Types from crash.subsystem.filesystem import is_fstype_super @@ -58,11 +59,11 @@ def btrfs_inode(vfs_inode: gdb.Value, force: bool=False ) -> gdb.Value: gdb.Value: The converted struct btrfs_inode Raises: - TypeError: the inode does not belong to btrfs + InvalidArgumentError: the inode does not belong to btrfs gdb.NotAvailableError: The target value was not available. """ if not force and not is_btrfs_inode(vfs_inode): - raise TypeError("inode does not belong to btrfs") + raise InvalidArgumentError("inode does not belong to btrfs") return container_of(vfs_inode, types.btrfs_inode_type, 'vfs_inode') @@ -84,11 +85,11 @@ def btrfs_fs_info(super_block: gdb.Value, force: bool=False) -> gdb.Value: btrfs_fs_info Raises: - TypeError: the super_block does not belong to btrfs + InvalidArgumentError: the super_block does not belong to btrfs gdb.NotAvailableError: The target value was not available. """ if not force and not is_btrfs_super(super_block): - raise TypeError("super_block does not belong to btrfs") + raise InvalidArgumentError("super_block does not belong to btrfs") fs_info = super_block['s_fs_info'].cast(types.btrfs_fs_info_p_type) return fs_info.dereference() @@ -107,7 +108,7 @@ def btrfs_fsid(super_block: gdb.Value, force: bool=False) -> uuid.UUID: uuid.UUID: The Python UUID Object for the btrfs fsid Raises: - TypeError: the super_block does not belong to btrfs + InvalidArgumentError: the super_block does not belong to btrfs gdb.NotAvailableError: The target value was not available. """ fs_info = btrfs_fs_info(super_block, force) @@ -129,7 +130,7 @@ def btrfs_metadata_uuid(sb: gdb.Value, force: bool=False) -> uuid.UUID: uuid.UUID: The Python UUID Object for the btrfs fsid Raises: - TypeError: the super_block does not belong to btrfs + InvalidArgumentError: the super_block does not belong to btrfs gdb.NotAvailableError: The target value was not available. """ fs_info = btrfs_fs_info(sb, force) diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index 04f6d38ca64..b39738efeff 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -6,6 +6,7 @@ from typing import Union, Iterable +from crash.exceptions import InvalidArgumentError from crash.types.list import list_for_each_entry from crash.util import container_of, decode_uuid_t, decode_flags from crash.util import struct_has_member @@ -297,7 +298,7 @@ def xfs_inode(vfs_inode: gdb.Value, force: bool=False) -> gdb.Value: gdb.NotAvailableError: The target value was not available. """ if not force and not is_xfs_inode(vfs_inode): - raise TypeError("inode does not belong to xfs") + raise InvalidArgumentError("inode does not belong to xfs") return container_of(vfs_inode, types.xfs_inode, 'i_vnode') @@ -319,7 +320,7 @@ def xfs_mount(sb: gdb.Value, force: bool=False) -> gdb.Value: gdb.NotAvailableError: The target value was not available. """ if not force and not is_xfs_super(sb): - raise TypeError("superblock does not belong to xfs") + raise InvalidArgumentError("superblock does not belong to xfs") return sb['s_fs_info'].cast(types.xfs_mount_p_type) @@ -402,11 +403,11 @@ def item_to_buf_log_item(item: gdb.Value) -> gdb.Value: gdb.Value Raises: - TypeError: The type of log item is not XFS_LI_BUF + InvalidArgumentError: The type of log item is not XFS_LI_BUF gdb.NotAvailableError: The target value was not available. """ if item['li_type'] != XFS_LI_BUF: - raise TypeError("item is not a buf log item") + raise InvalidArgumentError("item is not a buf log item") return container_of(item, types.xfs_buf_log_item_type, 'bli_item') def item_to_inode_log_item(item: gdb.Value) -> gdb.Value: @@ -420,11 +421,11 @@ def item_to_inode_log_item(item: gdb.Value) -> gdb.Value: gdb.Value Raises: - TypeError: The type of log item is not XFS_LI_INODE + InvalidArgumentError: The type of log item is not XFS_LI_INODE gdb.NotAvailableError: The target value was not available. """ if item['li_type'] != XFS_LI_INODE: - raise TypeError("item is not an inode log item") + raise InvalidArgumentError("item is not an inode log item") return container_of(item, types.xfs_inode_log_item_type, 'ili_item') def item_to_efi_log_item(item: gdb.Value) -> gdb.Value: @@ -438,11 +439,11 @@ def item_to_efi_log_item(item: gdb.Value) -> gdb.Value: gdb.Value Raises: - TypeError: The type of log item is not XFS_LI_EFI + InvalidArgumentError: The type of log item is not XFS_LI_EFI gdb.NotAvailableError: The target value was not available. """ if item['li_type'] != XFS_LI_EFI: - raise TypeError("item is not an EFI log item") + raise InvalidArgumentError("item is not an EFI log item") return container_of(item, types.xfs_efi_log_item_type, 'efi_item') def item_to_efd_log_item(item: gdb.Value) -> gdb.Value: @@ -456,11 +457,11 @@ def item_to_efd_log_item(item: gdb.Value) -> gdb.Value: gdb.Value Raises: - TypeError: The type of log item is not XFS_LI_EFD + InvalidArgumentError: The type of log item is not XFS_LI_EFD gdb.NotAvailableError: The target value was not available. """ if item['li_type'] != XFS_LI_EFD: - raise TypeError("item is not an EFD log item") + raise InvalidArgumentError("item is not an EFD log item") return container_of(item, types.xfs_efd_log_item_type, 'efd_item') def item_to_dquot_log_item(item: gdb.Value) -> gdb.Value: @@ -474,11 +475,11 @@ def item_to_dquot_log_item(item: gdb.Value) -> gdb.Value: gdb.Value Raises: - TypeError: The type of log item is not XFS_LI_DQUOT + InvalidArgumentError: The type of log item is not XFS_LI_DQUOT gdb.NotAvailableError: The target value was not available. """ if item['li_type'] != XFS_LI_DQUOT: - raise TypeError("item is not an DQUOT log item") + raise InvalidArgumentError("item is not an DQUOT log item") return container_of(item, types.xfs_dq_logitem_type, 'qli_item') def item_to_quotaoff_log_item(item: gdb.Value) -> gdb.Value: @@ -492,11 +493,11 @@ def item_to_quotaoff_log_item(item: gdb.Value) -> gdb.Value: gdb.Value Raises: - TypeError: The type of log item is not XFS_LI_QUOTAOFF + InvalidArgumentError: The type of log item is not XFS_LI_QUOTAOFF gdb.NotAvailableError: The target value was not available. """ if item['li_type'] != XFS_LI_QUOTAOFF: - raise TypeError("item is not an QUOTAOFF log item") + raise InvalidArgumentError("item is not an QUOTAOFF log item") return container_of(item, types.xfs_qoff_logitem_type, 'qql_item') def xfs_log_item_typed(item:gdb.Value) -> gdb.Value: diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 37cf821d8d9..7fd0860af19 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -10,7 +10,7 @@ from crash.util.symbols import Types, Symvals, SymbolCallbacks, TypeCallbacks from crash.types.classdev import for_each_class_device from . import decoders -import crash.exceptions +from crash.exceptions import DelayedAttributeError, InvalidArgumentError types = Types([ 'struct gendisk', 'struct hd_struct', 'struct device', 'struct device_type', 'struct bdev_inode' ]) @@ -135,7 +135,7 @@ def for_each_block_device(subtype: gdb.Value=None) -> Iterable[gdb.Value]: if get_basic_type(subtype.type) == types.device_type_type: subtype = subtype.address elif get_basic_type(subtype.type) != types.device_type_type.pointer(): - raise TypeError("subtype must be {} not {}" + raise InvalidArgumentError("subtype must be {} not {}" .format(types.device_type_type.pointer(), subtype.type.unqualified())) for dev in for_each_class_device(symvals.block_class, subtype): @@ -173,7 +173,7 @@ def gendisk_name(gendisk): str: the name of the block device Raises: - TypeError: gdb.Value does not describe a struct gendisk or + InvalidArgumentError: gdb.Value does not describe a struct gendisk or struct hd_struct """ if gendisk.type.code == gdb.TYPE_CODE_PTR: @@ -185,7 +185,7 @@ def gendisk_name(gendisk): parent = dev_to_gendisk(part_to_dev(gendisk)['parent']) return "{}{:d}".format(gendisk_name(parent), int(gendisk['partno'])) else: - raise TypeError("expected {} or {}, not {}" + raise InvalidArgumentError("expected {} or {}, not {}" .format(types.gendisk_type, types.hd_struct_type, gendisk.type.unqualified())) @@ -226,7 +226,7 @@ def inode_to_block_device(inode): Returns the block device associated with this inode. If the inode describes a block device, return that block device. - Otherwise, raise TypeError. + Otherwise, raise InvalidArgumentError. Args: inode(gdb.Value): The struct inode for which to @@ -237,10 +237,10 @@ def inode_to_block_device(inode): with the provided struct inode Raises: - TypeError: inode does not describe a block device + InvalidArgumentError: inode does not describe a block device """ if inode['i_sb'] != symvals.blockdev_superblock: - raise TypeError("inode does not correspond to block device") + raise InvalidArgumentError("inode does not correspond to block device") return container_of(inode, types.bdev_inode_type, 'vfs_inode')['bdev'] def inode_on_bdev(inode): @@ -275,7 +275,7 @@ def _check_types(result): raise TypeError("disk_type expected to be {} not {}" .format(symvals.device_type_type, types.disk_type.type)) - except crash.exceptions.DelayedAttributeError: + except DelayedAttributeError: pass symbol_cbs = SymbolCallbacks([ ( 'disk_type', _check_types ), diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index be84a585a5c..97611695cbf 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -6,6 +6,7 @@ import gdb from math import log +from crash.exceptions import InvalidArgumentError from crash.util.symbols import Types types = Types('unsigned long') @@ -17,7 +18,7 @@ def _check_bitmap_type(bitmap: gdb.Value) -> None: (bitmap.type.code != gdb.TYPE_CODE_PTR or bitmap.type.target().code != types.unsigned_long_type.code or bitmap.type.target().sizeof != types.unsigned_long_type.sizeof)): - raise TypeError("bitmaps are expected to be arrays of unsigned long not `{}'" + raise InvalidArgumentError("bitmaps are expected to be arrays of unsigned long not `{}'" .format(bitmap.type)) def for_each_set_bit(bitmap: gdb.Value, diff --git a/crash/types/klist.py b/crash/types/klist.py index 6326fb39fe2..cc6694781e8 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -17,7 +17,7 @@ def klist_for_each(klist): if klist.type == types.klist_type.pointer(): klist = klist.dereference() elif klist.type != types.klist_type: - raise TypeError("klist must be gdb.Value representing 'struct klist' or 'struct klist *' not {}" + raise InvalidArgumentError("klist must be gdb.Value representing 'struct klist' or 'struct klist *' not {}" .format(klist.type)) if klist.type is not types.klist_type: types.override('struct klist', klist.type) diff --git a/crash/types/list.py b/crash/types/list.py index f576baffd3e..eecd30d2b93 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -4,6 +4,7 @@ import gdb from crash.util import container_of from crash.util.symbols import Types +from crash.exceptions import ArgumentTypeError, UnexpectedGDBTypeError class ListError(Exception): pass @@ -19,16 +20,13 @@ class ListCycleError(CorruptListError): def list_for_each(list_head, include_head=False, reverse=False, print_broken_links=True, exact_cycles=False): pending_exception = None - if isinstance(list_head, gdb.Symbol): - list_head = list_head.value() if not isinstance(list_head, gdb.Value): - raise TypeError("list_head must be gdb.Value representing 'struct list_head' or a 'struct list_head *' not {}" - .format(type(list_head).__name__)) + raise ArgumentTypeError('list_head', list_head, gdb.Value) if list_head.type == types.list_head_type.pointer(): list_head = list_head.dereference() elif list_head.type != types.list_head_type: - raise TypeError("Must be struct list_head not {}" - .format(str(list_head.type))) + raise UnexpectedGDBTypeError('list_head', types.list_head_type, + list_head.type) if list_head.type is not types.list_head_type: types.override('struct list_head', list_head.type) fast = None diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 9a99847d38b..aea46c639cd 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -9,7 +9,7 @@ from crash.util.symbols import MinimalSymbolCallbacks, SymbolCallbacks from crash.types.list import list_for_each_entry from crash.types.module import for_each_module -from crash.exceptions import DelayedAttributeError +from crash.exceptions import DelayedAttributeError, InvalidArgumentError from crash.types.bitmap import find_first_set_bit, find_last_set_bit from crash.types.bitmap import find_next_set_bit, find_next_zero_bit from crash.types.page import Page @@ -278,7 +278,7 @@ def _resolve_percpu_var(self, var): if isinstance(var, gdb.Symbol) or isinstance(var, gdb.MinSymbol): var = var.value() if not isinstance(var, gdb.Value): - raise TypeError("Argument must be gdb.Symbol or gdb.Value") + raise InvalidArgumentError("Argument must be gdb.Symbol or gdb.Value") if var.type.code == gdb.TYPE_CODE_PTR: # The percpu contains pointers @@ -325,7 +325,8 @@ def get_percpu_var(self, var: SymbolOrValue, cpu: int) -> gdb.Value: The value is of the same type passed via var. Raises: - :obj:`TypeError`: var is not :obj:`gdb.Symbol` or :obj:`gdb.Value` + :obj:`.InvalidArgumentError`: var is not :obj:`gdb.Symbol` or + :obj:`gdb.Value` :obj:`.PerCPUError`: var does not fall into any percpu range :obj:`ValueError`: cpu is less than ``0`` """ @@ -350,7 +351,8 @@ def get_percpu_vars(self, var: SymbolOrValue, the :obj:`gdb.Value` or :obj:`gdb.Symbol` passed as var. Raises: - :obj:`TypeError`: var is not ``gdb.Symbol`` or ``gdb.Value`` + :obj:`.InvalidArgumentError`: var is not :obj:`gdb.Symbol` or + :obj:`gdb.Value` :obj:`.PerCPUError`: var does not fall into any percpu range :obj:`ValueError`: nr_cpus is <= ``0`` """ @@ -402,7 +404,8 @@ def get_percpu_var(var: SymbolOrValue, cpu: int) -> gdb.Value: The value is of the same type passed via var. Raises: - :obj:`TypeError`: var is not :obj:`gdb.Symbol` or :obj:`gdb.Value` + :obj:`.InvalidArgumentError`: var is not :obj:`gdb.Symbol` + or :obj:`gdb.Value` :obj:`.PerCPUError`: var does not fall into any percpu range :obj:`ValueError`: cpu is less than ``0`` """ @@ -426,7 +429,8 @@ def get_percpu_vars(var: SymbolOrValue, the :obj:`gdb.Value` or :obj:`gdb.Symbol` passed as var. Raises: - :obj:`TypeError`: var is not :obj:`gdb.Symbol` or :obj:`gdb.Value` + :obj:`.InvalidArgumentError`: var is not :obj:`gdb.Symbol` + or :obj:`gdb.Value` :obj:`.PerCPUError`: var does not fall into any percpu range :obj:`ValueError`: nr_cpus is <= ``0`` """ @@ -449,7 +453,7 @@ def percpu_counter_sum(var: SymbolOrValue) -> int: if not (var.type == types.percpu_counter_type or (var.type.code == gdb.TYPE_CODE_PTR and var.type.target() == types.percpu_counter_type)): - raise TypeError("var must be gdb.Symbol or gdb.Value describing `{}' not `{}'" + raise InvalidArgumentError("var must be gdb.Symbol or gdb.Value describing `{}' not `{}'" .format(types.percpu_counter_type, var.type)) total = int(var['count']) diff --git a/crash/types/task.py b/crash/types/task.py index eddb9a7f510..35eedd5f692 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -214,7 +214,7 @@ def __init__(self, task_struct: gdb.Value, active: bool=False, self._init_task_types(task_struct) if cpu is not None and not isinstance(cpu, int): - raise TypeError("cpu must be integer or None") + raise InvalidArgumentError("cpu must be integer or None") if not isinstance(task_struct, gdb.Value): raise ArgumentTypeError('task_struct', task_struct, gdb.Value) diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 8b23e5c3c86..b0e7d954e19 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -9,36 +9,11 @@ from typing import Dict from crash.util.symbols import Types from crash.exceptions import MissingTypeError, MissingSymbolError +from crash.exceptions import ArgumentTypeError, NotStructOrUnionError TypeSpecifier = Union [ gdb.Type, gdb.Value, str, gdb.Symbol ] -class OffsetOfError(Exception): - """Generic Exception for offsetof errors""" - def __init__(self, message): - super().__init__() - self.message = message - - def __str__(self): - return self.message - -class InvalidArgumentError(OffsetOfError): - """The provided object could not be converted to gdb.Type""" - formatter = "cannot convert {} to gdb.Type" - - def __init__(self, val): - msg = self.formatter.format(str(type(val))) - super().__init__(msg) - self.val = val - -class InvalidArgumentTypeError(OffsetOfError): - """The provided type is not a struct or union""" - formatter = "`{}' is not a struct or union" - def __init__(self, gdbtype): - msg = self.formatter.format(str(gdbtype)) - super().__init__(msg) - self.type = gdbtype - -class InvalidComponentError(OffsetOfError): +class InvalidComponentError(LookupError): """An error occured while resolving the member specification""" formatter = "cannot resolve '{}->{}' ({})" def __init__(self, gdbtype, spec, message): @@ -49,7 +24,7 @@ def __init__(self, gdbtype, spec, message): # These exceptions are only raised by _offsetof and should not be # visible outside of this module. -class _InvalidComponentBaseError(OffsetOfError): +class _InvalidComponentBaseError(RuntimeError): """An internal error occured while resolving the member specification""" pass @@ -94,7 +69,7 @@ def container_of(val, gdbtype, member): TypeError: val is not a gdb.Value """ if not isinstance(val, gdb.Value): - raise TypeError("container_of expects gdb.Value") + raise ArgumentTypeError('val', type(val), gdb.Value) charp = types.char_p_type if val.type.code != gdb.TYPE_CODE_PTR: val = val.address @@ -255,7 +230,7 @@ def offsetof_type(val, spec, error=True): gdb.Type: The type of the resolved member Raises: - InvalidArgumentError: val is not a valid type + ArgumentTypeError: val is not of type gdb.Type InvalidComponentError: spec is not valid for the type """ gdbtype = None @@ -267,7 +242,7 @@ def offsetof_type(val, spec, error=True): pass if not isinstance(gdbtype, gdb.Type): - raise InvalidArgumentError(val) + raise ArgumentTypeError('gdbtype', gdbtype, gdb.Type) # We'll be friendly and accept pointers as the initial type if gdbtype.code == gdb.TYPE_CODE_PTR: @@ -275,13 +250,13 @@ def offsetof_type(val, spec, error=True): if gdbtype.code != gdb.TYPE_CODE_STRUCT and \ gdbtype.code != gdb.TYPE_CODE_UNION: - raise InvalidArgumentTypeError(gdbtype) + raise NotStructOrUnionError('gdbtype', gdbtype) try: return __offsetof(gdbtype, spec, error) except _InvalidComponentBaseError as e: if error: - raise InvalidComponentError(gdbtype, spec, e.message) + raise InvalidComponentError(gdbtype, spec, str(e)) else: return None @@ -301,7 +276,7 @@ def offsetof(val, spec, error=True): None: The member could not be resolved Raises: - InvalidArgumentError: val is not a valid type + ArgumentTypeError: val is not a valid type InvalidComponentError: spec is not valid for the type """ res = offsetof_type(val, spec, error) diff --git a/tests/test_list.py b/tests/test_list.py index 83d80770cf9..7c96db71942 100644 --- a/tests/test_list.py +++ b/tests/test_list.py @@ -4,6 +4,8 @@ import unittest import gdb +from crash.exceptions import ArgumentTypeError, UnexpectedGDBTypeError +from crash.exceptions import InvalidArgumentError from crash.types.list import list_for_each, list_for_each_entry from crash.types.list import ListCycleError, CorruptListError @@ -153,3 +155,57 @@ def test_bad_container_list_with_type(self): for node in list_for_each_entry(bad_list, struct_container, 'list', print_broken_links=False): count += 1 + + def test_list_for_each_with_bad_argument_base(self): + bad_type = gdb.lookup_type('unsigned long') + with self.assertRaises(InvalidArgumentError): + for node in list_for_each(bad_type): + pass + + def test_list_for_each_entry_with_bad_argument_base(self): + bad_type = gdb.lookup_type('unsigned long') + struct_container = gdb.lookup_type('struct container') + with self.assertRaises(InvalidArgumentError): + for node in list_for_each_entry(bad_type, struct_container, 'list'): + pass + + def test_list_for_each_with_bad_list_type_base(self): + short_list = get_symbol("good_containers") + struct_container = gdb.lookup_type('struct container') + with self.assertRaises(InvalidArgumentError): + for node in list_for_each(short_list): + pass + + def test_list_for_each_entry_with_bad_list_type_base(self): + short_list = get_symbol("good_containers") + struct_container = gdb.lookup_type('struct container') + with self.assertRaises(InvalidArgumentError): + for node in list_for_each_entry(short_list, struct_container, 'list'): + pass + def test_list_for_each_with_bad_argument(self): + bad_type = gdb.lookup_type('unsigned long') + with self.assertRaises(ArgumentTypeError): + for node in list_for_each(bad_type): + pass + + def test_list_for_each_entry_with_bad_argument(self): + bad_type = gdb.lookup_type('unsigned long') + struct_container = gdb.lookup_type('struct container') + with self.assertRaises(ArgumentTypeError): + for node in list_for_each_entry(bad_type, struct_container, 'list'): + pass + + def test_list_for_each_with_bad_list_type(self): + short_list = get_symbol("good_containers") + struct_container = gdb.lookup_type('struct container') + with self.assertRaises(UnexpectedGDBTypeError): + for node in list_for_each(short_list): + pass + + def test_list_for_each_entry_with_bad_list_type(self): + short_list = get_symbol("good_containers") + struct_container = gdb.lookup_type('struct container') + with self.assertRaises(UnexpectedGDBTypeError): + for node in list_for_each_entry(short_list, struct_container, 'list'): + pass + diff --git a/tests/test_util.py b/tests/test_util.py index e3cf78aa56d..f6e36b8080b 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -6,9 +6,8 @@ from crash.exceptions import MissingTypeError, MissingSymbolError from crash.util import offsetof, container_of, resolve_type from crash.util import get_symbol_value, safe_get_symbol_value -from crash.util import InvalidComponentError -from crash.util import InvalidArgumentError -from crash.util import InvalidArgumentTypeError +from crash.exceptions import ArgumentTypeError +from crash.exceptions import NotStructOrUnionError from crash.util import InvalidComponentError def getsym(sym): @@ -25,7 +24,7 @@ def tearDown(self): gdb.execute("file") def test_invalid_python_type(self): - with self.assertRaises(InvalidArgumentError): + with self.assertRaises(ArgumentTypeError): offset = offsetof(self, 'dontcare') def test_type_by_string_name(self): @@ -33,7 +32,7 @@ def test_type_by_string_name(self): self.assertTrue(offset == 0) def test_type_by_invalid_name(self): - with self.assertRaises(InvalidArgumentError): + with self.assertRaises(ArgumentTypeError): offset = offsetof('struct invalid_struct', 'test_member') def test_invalid_member(self): @@ -51,37 +50,37 @@ def test_struct_by_value(self): self.assertTrue(offset == 0) def test_ulong_by_name(self): - with self.assertRaises(InvalidArgumentTypeError): + with self.assertRaises(NotStructOrUnionError): offset = offsetof('unsigned long', 'test_member') def test_ulong_by_type(self): t = gdb.lookup_type("unsigned long") - with self.assertRaises(InvalidArgumentTypeError): + with self.assertRaises(NotStructOrUnionError): offset = offsetof(t, 'test_member') def test_ulong_by_type_pointer(self): t = gdb.lookup_type("unsigned long").pointer() - with self.assertRaises(InvalidArgumentTypeError): + with self.assertRaises(NotStructOrUnionError): offset = offsetof(t, 'test_member') def test_ulong_by_symbol(self): t = gdb.lookup_global_symbol('global_ulong_symbol') - with self.assertRaises(InvalidArgumentTypeError): + with self.assertRaises(NotStructOrUnionError): offset = offsetof(t, 'test_member') def test_ulong_by_value(self): t = gdb.lookup_global_symbol('global_ulong_symbol').value() - with self.assertRaises(InvalidArgumentTypeError): + with self.assertRaises(NotStructOrUnionError): offset = offsetof(t, 'test_member') def test_void_pointer_by_symbol(self): t = gdb.lookup_global_symbol('global_void_pointer_symbol') - with self.assertRaises(InvalidArgumentTypeError): + with self.assertRaises(NotStructOrUnionError): offset = offsetof(t, 'test_member') def test_void_pointer_by_value(self): t = gdb.lookup_global_symbol('global_void_pointer_symbol').value() - with self.assertRaises(InvalidArgumentTypeError): + with self.assertRaises(NotStructOrUnionError): offset = offsetof(t, 'test_member') def test_union_by_symbol(self): @@ -595,5 +594,5 @@ def test_container_of_bad_type(self): sym = getsym('embedded_struct_list_container') container = getsym('test_struct') self.assertTrue(sym.address != container.address) - with self.assertRaises(InvalidArgumentTypeError): + with self.assertRaises(NotStructOrUnionError): addr = container_of(sym, self.ulong, 'test_member') From f2a3000544e71be868c2d0219b6b74a28adede79 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 28 May 2019 11:48:19 -0400 Subject: [PATCH 145/367] crash.util: require gdb.Type for container_of/offset_of Once upon a time, we defined the interface for container_of and offset_of to be as flexible as possible. Nobody uses the flexibility, so let's specify that the type passed to container_of, offset_of, be gdb.Type. Signed-off-by: Jeff Mahoney --- crash/arch/__init__.py | 4 +- crash/arch/ppc64.py | 4 +- crash/arch/x86_64.py | 15 ++-- crash/commands/__init__.py | 4 +- crash/commands/mount.py | 5 +- crash/subsystem/storage/__init__.py | 20 ++--- crash/subsystem/storage/blocksq.py | 8 +- crash/types/classdev.py | 9 ++- crash/types/klist.py | 32 +++++++- crash/types/list.py | 64 +++++++++++++-- crash/types/node.py | 118 ++++++++++++++++++++++++---- crash/types/page.py | 6 +- crash/util/__init__.py | 70 ++++++++--------- tests/test_list.py | 14 ++-- tests/test_util.py | 30 +++---- 15 files changed, 292 insertions(+), 111 deletions(-) diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index a1eb80c9364..51f033395b9 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -1,11 +1,13 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import List + import gdb class CrashArchitecture(object): ident = "base-class" - aliases = None + aliases: List[str] = list() def __init__(self): pass diff --git a/crash/arch/ppc64.py b/crash/arch/ppc64.py index 3ab8eb7e159..1d361f6ef97 100644 --- a/crash/arch/ppc64.py +++ b/crash/arch/ppc64.py @@ -18,13 +18,13 @@ def __init__(self): # Stop stack traces with addresses below this self.filter = KernelFrameFilter(0xffff000000000000) - def setup_thread_info(self, thread): + def setup_thread_info(self, thread: gdb.InferiorThread) -> None: task = thread.info.task_struct thread_info = task['stack'].cast(self.thread_info_p_type) thread.info.set_thread_info(thread_info) @classmethod - def get_stack_pointer(cls, thread_struct): + def get_stack_pointer(cls, thread_struct: gdb.Value) -> gdb.Value: return thread_struct['ksp'] register(Powerpc64Architecture) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 4d72f1e9285..7febdd2ab79 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -33,12 +33,13 @@ def __init__(self): # Stop stack traces with addresses below this self.filter = KernelFrameFilter(0xffff000000000000) - def setup_thread_info(self, thread): + def setup_thread_info(self, thread: gdb.InferiorThread) -> None: task = thread.info.task_struct thread_info = task['stack'].cast(self.thread_info_p_type) thread.info.set_thread_info(thread_info) - def fetch_register_active(self, thread, register): + def fetch_register_active(self, thread: gdb.InferiorThread, + register: gdb.Register) -> None: task = thread.info for reg in task.regs: if reg == "rip" and (register != 16 and register != -1): @@ -48,7 +49,8 @@ def fetch_register_active(self, thread, register): except KeyError as e: pass - def fetch_register_scheduled_inactive(self, thread, register): + def fetch_register_scheduled_inactive(self, thread: gdb.InferiorThread, + register: gdb.Register) -> None: ulong_type = self.ulong_type task = thread.info.task_struct @@ -61,7 +63,7 @@ def fetch_register_scheduled_inactive(self, thread, register): if register == 16 or register == -1: thread.registers['rip'].value = frame['ret_addr'] if register == 16: - return True + return thread.registers['rbp'].value = frame['bp'] thread.registers['rbx'].value = frame['bx'] @@ -75,7 +77,8 @@ def fetch_register_scheduled_inactive(self, thread, register): thread.info.stack_pointer = rsp thread.info.valid_stack = True - def fetch_register_scheduled_thread_return(self, thread, register): + def fetch_register_scheduled_thread_return(self, thread: gdb.InferiorThread, + register: gdb.Register): ulong_type = self.ulong_type task = thread.info.task_struct @@ -114,7 +117,7 @@ def fetch_register_scheduled_thread_return(self, thread, register): thread.info.valid_stack = True @classmethod - def get_stack_pointer(cls, thread_struct): + def get_stack_pointer(cls, thread_struct: gdb.Value) -> gdb.Value: return thread_struct['sp'] register(x86_64Architecture) diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index a179c4ec9e8..8e0a3331477 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Dict + import gdb import os @@ -21,7 +23,7 @@ def error(self, message): raise CommandLineError(message) class Command(gdb.Command): - commands = {} + commands: Dict[str, gdb.Command] = dict() def __init__(self, name, parser=None): self.name = "py" + name if parser is None: diff --git a/crash/commands/mount.py b/crash/commands/mount.py index 7600a508fb6..f3cdedb0d2a 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -3,7 +3,9 @@ import gdb +from argparse import Namespace from crash.commands import Command, ArgumentParser +from crash.types.task import LinuxTask from crash.subsystem.filesystem.mount import MNT_NOSUID, MNT_NODEV, MNT_NOEXEC from crash.subsystem.filesystem.mount import MNT_NOATIME, MNT_NODIRATIME from crash.subsystem.filesystem.mount import MNT_RELATIME, MNT_READONLY @@ -49,7 +51,8 @@ def execute(self, args): for mnt in for_each_mount(): self.show_one_mount(mnt, args) - def show_one_mount(self, mnt, args, task=None): + def show_one_mount(self, mnt: gdb.Value, args: Namespace, + task: LinuxTask=None) -> None: if mnt.type.code == gdb.TYPE_CODE_PTR: mnt = mnt.dereference() diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 7fd0860af19..3b6c0728d19 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -40,7 +40,7 @@ def for_each_bio_in_stack(bio: gdb.Value) -> Iterable[decoders.Decoder]: yield decoder decoder = next(decoder) -def dev_to_gendisk(dev): +def dev_to_gendisk(dev: gdb.Value) -> gdb.Value: """ Converts a struct device that is embedded in a struct gendisk back to the struct gendisk. @@ -55,7 +55,7 @@ def dev_to_gendisk(dev): """ return container_of(dev, types.gendisk_type, 'part0.__dev') -def dev_to_part(dev): +def dev_to_part(dev: gdb.Value) -> gdb.Value: """ Converts a struct device that is embedded in a struct hd_struct back to the struct hd_struct. @@ -71,7 +71,7 @@ def dev_to_part(dev): """ return container_of(dev, types.hd_struct_type, '__dev') -def gendisk_to_dev(gendisk): +def gendisk_to_dev(gendisk: gdb.Value) -> gdb.Value: """ Converts a struct gendisk that embeds a struct device to the struct device. @@ -87,7 +87,7 @@ def gendisk_to_dev(gendisk): return gendisk['part0']['__dev'].address -def part_to_dev(part): +def part_to_dev(part: gdb.Value) -> gdb.Value: """ Converts a struct hd_struct that embeds a struct device to the struct device. @@ -147,7 +147,7 @@ def for_each_block_device(subtype: gdb.Value=None) -> Iterable[gdb.Value]: raise RuntimeError("Encountered unexpected device type {}" .format(dev['type'])) -def for_each_disk(): +def for_each_disk() -> Iterable[gdb.Value]: """ Iterates over each block device registered with the block class that corresponds to an entire disk. @@ -157,7 +157,7 @@ def for_each_disk(): return for_each_block_device(symvals.disk_type) -def gendisk_name(gendisk): +def gendisk_name(gendisk: gdb.Value) -> str: """ Returns the name of the provided block device. @@ -189,7 +189,7 @@ def gendisk_name(gendisk): .format(types.gendisk_type, types.hd_struct_type, gendisk.type.unqualified())) -def block_device_name(bdev): +def block_device_name(bdev: gdb.Value) -> str: """ Returns the name of the provided block device. @@ -205,7 +205,7 @@ def block_device_name(bdev): """ return gendisk_name(bdev['bd_disk']) -def is_bdev_inode(inode): +def is_bdev_inode(inode: gdb.Value) -> bool: """ Tests whether the provided struct inode describes a block device @@ -221,7 +221,7 @@ def is_bdev_inode(inode): """ return inode['i_sb'] == symvals.blockdev_superblock -def inode_to_block_device(inode): +def inode_to_block_device(inode: gdb.Value) -> gdb.Value: """ Returns the block device associated with this inode. @@ -243,7 +243,7 @@ def inode_to_block_device(inode): raise InvalidArgumentError("inode does not correspond to block device") return container_of(inode, types.bdev_inode_type, 'vfs_inode')['bdev'] -def inode_on_bdev(inode): +def inode_on_bdev(inode: gdb.Value) -> gdb.Value: """ Returns the block device associated with this inode. diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index 52a517f39d4..34e7d14827b 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Iterable, Tuple + import gdb from crash.util.symbols import Types @@ -12,7 +14,7 @@ class NoQueueError(RuntimeError): types = Types([ 'struct request' ]) -def for_each_request_in_queue(queue): +def for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: """ Iterates over each struct request in request_queue @@ -32,7 +34,7 @@ def for_each_request_in_queue(queue): return list_for_each_entry(queue['queue_head'], types.request_type, 'queuelist') -def request_age_ms(request): +def request_age_ms(request: gdb.Value) -> int: """ Returns the age of the request in milliseconds @@ -49,7 +51,7 @@ def request_age_ms(request): """ return kernel.jiffies_to_msec(kernel.jiffies - request['start_time']) -def requests_in_flight(queue): +def requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: """ Report how many requests are in flight for this queue diff --git a/crash/types/classdev.py b/crash/types/classdev.py index 8f8c40ece4e..d1f1e8a65ab 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Iterable + import gdb from crash.types.klist import klist_for_each @@ -14,15 +16,16 @@ class ClassdevState(object): #v5.1-rc1 moved knode_class from struct device to struct device_private @classmethod - def setup_iterator_type(cls, gdbtype): + def _setup_iterator_type(cls, gdbtype): if struct_has_member(gdbtype, 'knode_class'): cls.class_is_private = False type_cbs = TypeCallbacks([ ('struct device', - ClassdevState.setup_iterator_type) ]) + ClassdevState._setup_iterator_type) ]) -def for_each_class_device(class_struct, subtype=None): +def for_each_class_device(class_struct: gdb.Value, + subtype: gdb.Value=None) -> Iterable[gdb.Value]: klist = class_struct['p']['klist_devices'] container_type = types.device_type diff --git a/crash/types/klist.py b/crash/types/klist.py index cc6694781e8..9fc5fac480e 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -1,10 +1,12 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Iterable + import gdb from crash.util import container_of from crash.types.list import list_for_each_entry -from crash.exceptions import CorruptedError +from crash.exceptions import CorruptedError, InvalidArgumentError from crash.util.symbols import Types @@ -13,7 +15,17 @@ class KlistCorruptedError(CorruptedError): pass -def klist_for_each(klist): +def klist_for_each(klist: gdb.Value) -> Iterable[gdb.Value]: + """ + Iterate over a klist and yield each node + + Args: + klist (gdb.Value): + The list to iterate + + Yields: + gdb.Value: The next node in the list + """ if klist.type == types.klist_type.pointer(): klist = klist.dereference() elif klist.type != types.klist_type: @@ -28,7 +40,21 @@ def klist_for_each(klist): raise KlistCorruptedError("Corrupted") yield node -def klist_for_each_entry(klist, gdbtype, member): +def klist_for_each_entry(klist: gdb.Value, gdbtype: gdb.Type, + member: str) -> gdb.Value: + """ + Iterate over a klist and yield each node's containing object + + Args: + klist (gdb.Value): + The list to iterate + gdbtype (gdb.Type): The type of the containing object + member (str): The name of the member in the containing object that + corresponds to the klist_node + + Yields: + gdb.Value: The next node in the list + """ for node in klist_for_each(klist): if node.type is not types.klist_node_type: types.override('struct klist_node', node.type) diff --git a/crash/types/list.py b/crash/types/list.py index eecd30d2b93..4660ca2b58c 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Iterator, Set + import gdb from crash.util import container_of from crash.util.symbols import Types @@ -17,8 +19,33 @@ class ListCycleError(CorruptListError): types = Types([ 'struct list_head' ]) -def list_for_each(list_head, include_head=False, reverse=False, - print_broken_links=True, exact_cycles=False): +def list_for_each(list_head: gdb.Value, include_head: bool=False, + reverse: bool=False, print_broken_links: bool=True, + exact_cycles: bool=False) -> Iterator[gdb.Value]: + """ + Iterate over a list and yield each node + + Args: + list_head (gdb.Value): + The list to iterate + include_head (bool, optional, default=False): + Include the head of the list in iteration - useful + for lists with no anchors + reverse (bool, optional, default=False): + Iterate the list in reverse order (follow the prev links) + print_broken_links (bool, optional, default=True): + Print warnings about broken links + exact_cycles (bool, optional, default=False): + Detect and raise an exception if a cycle is detected in the list + + Yields: + gdb.Value: The next node in the list + + Raises: + CorruptListError: the list is corrupted + ListCycleError: the list contains cycles + BufferError: portions of the list cannot be read + """ pending_exception = None if not isinstance(list_head, gdb.Value): raise ArgumentTypeError('list_head', list_head, gdb.Value) @@ -40,7 +67,7 @@ def list_for_each(list_head, include_head=False, reverse=False, prev_ = 'next' if exact_cycles: - visited = set() + visited: Set[int] = set() if include_head: yield list_head.address @@ -105,10 +132,33 @@ def list_for_each(list_head, include_head=False, reverse=False, if pending_exception is not None: raise pending_exception -def list_for_each_entry(list_head, gdbtype, member, - include_head=False, reverse=False, - print_broken_links=True, - exact_cycles=False): +def list_for_each_entry(list_head: gdb.Value, gdbtype: gdb.Type, + member: str, include_head: bool=False, + reverse: bool=False, print_broken_links: bool=True, + exact_cycles: bool=False) -> Iterator[gdb.Value]: + """ + Iterate over a list and yield each node's containing object + + Args: + list_head (gdb.Value): + The list to iterate + gdbtype (gdb.Type): The type of the containing object + member (str): The name of the member in the containing object that + corresponds to the list_head + include_head (bool, optional, default=False): + Include the head of the list in iteration - useful for + lists with no anchors + reverse (bool, optional, default=False): + Iterate the list in reverse order (follow the prev links) + print_broken_links (bool, optional, default=True): + Print warnings about broken links + exact_cycles (bool, optional, default=False): + Detect and raise an exception if a cycle is detected in the list + + Yields: + gdb.Value: The next node in the list + """ + for node in list_for_each(list_head, include_head=include_head, reverse=reverse, print_broken_links=print_broken_links, diff --git a/crash/types/node.py b/crash/types/node.py index 34db2196def..2a452106db6 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -1,29 +1,60 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Iterable, List, Type, TypeVar + import gdb from crash.util.symbols import Symbols, Symvals, Types, SymbolCallbacks from crash.util import container_of, find_member_variant, get_symbol_value from crash.types.percpu import get_percpu_var from crash.types.bitmap import for_each_set_bit import crash.types.zone +from crash.exceptions import DelayedAttributeError symbols = Symbols([ 'numa_node' ]) symvals = Symvals([ 'numa_cpu_lookup_table', 'node_data' ]) types = Types([ 'pg_data_t', 'struct zone' ]) -def numa_node_id(cpu): +def numa_node_id(cpu: int) -> int: + """ + Return the NUMA node ID for a given CPU + + Args: + cpu (int): The CPU number to obtain the NUMA node ID + Returns: + int: The NUMA node ID for the specified CPU. + """ if gdb.current_target().arch.name() == "powerpc:common64": return int(symvals.numa_cpu_lookup_table[cpu]) else: return int(get_percpu_var(symbols.numa_node, cpu)) +NodeType = TypeVar('NodeType', bound='Node') + class Node(object): - @staticmethod - def from_nid(nid): - return Node(symvals.node_data[nid].dereference()) + """ + A wrapper around the Linux kernel 'struct node' structure + """ + @classmethod + def from_nid(cls: Type[NodeType], nid: int) -> NodeType: + """ + Obtain a Node using the NUMA Node ID (nid) + + Args: + nid (int): The NUMA Node ID - def for_each_zone(self): + Returns: + Node: the Node wrapper for the struct node for this NID + """ + return cls(symvals.node_data[nid].dereference()) + + def for_each_zone(self) -> Iterable[crash.types.zone.Zone]: + """ + Iterate over each zone contained in this NUMA node + + Yields: + Zone: The next Zone in this Node + """ node_zones = self.gdb_obj["node_zones"] ptr = int(node_zones[0].address) @@ -37,15 +68,22 @@ def for_each_zone(self): yield crash.types.zone.Zone(zone, zid) ptr += types.zone_type.sizeof - def __init__(self, obj): + def __init__(self, obj: gdb.Value): + """ + Initialize a Node using the gdb.Value for the struct node + + Args: + obj: gdb.Value: + The node for which to construct a wrapper + """ self.gdb_obj = obj class NodeStates(object): - nids_online = None - nids_possible = None + nids_online: List[int] = list() + nids_possible: List[int] = list() @classmethod - def setup_node_states(cls, node_states_sym): + def _setup_node_states(cls, node_states_sym): node_states = node_states_sym.value() enum_node_states = gdb.lookup_type("enum node_states") @@ -59,21 +97,73 @@ def setup_node_states(cls, node_states_sym): bits = node_states[N_ONLINE]["bits"] cls.nids_online = list(for_each_set_bit(bits)) -symbol_cbs = SymbolCallbacks([('node_states', NodeStates.setup_node_states)]) + def for_each_nid(self) -> Iterable[int]: + """ + Iterate over each NUMA Node ID + + Yields: + int: The next NUMA Node ID + """ + if not self.nids_possible: + raise DelayedAttributeError('node_states') + + for nid in self.nids_possible: + yield nid + + def for_each_online_nid(self) -> Iterable[int]: + """ + Iterate over each online NUMA Node ID + + Yields: + int: The next NUMA Node ID + """ + if not self.nids_online: + raise DelayedAttributeError('node_states') + + for nid in self.nids_online: + yield nid + +symbol_cbs = SymbolCallbacks([('node_states', NodeStates._setup_node_states)]) + +_state = NodeStates() def for_each_nid(): - for nid in NodeStates.nids_possible: + """ + Iterate over each NUMA Node ID + + Yields: + int: The next NUMA Node ID + """ + for nid in _state.for_each_nid(): yield nid def for_each_online_nid(): - for nid in NodeStates.nids_online: + """ + Iterate over each online NUMA Node ID + + Yields: + int: The next NUMA Node ID + """ + for nid in _state.for_each_online_nid(): yield nid -def for_each_node(): +def for_each_node() -> Iterable[Node]: + """ + Iterate over each NUMA Node + + Yields: + int: The next NUMA Node + """ for nid in for_each_nid(): yield Node.from_nid(nid) -def for_each_online_node(): +def for_each_online_node() -> Iterable[Node]: + """ + Iterate over each Online NUMA Node + + Yields: + int: The next NUMA Node + """ for nid in for_each_online_nid(): yield Node.from_nid(nid) diff --git a/crash/types/page.py b/crash/types/page.py index 9385c9f2e0a..7d72d9828e1 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -1,6 +1,8 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Dict + from math import log, ceil import gdb from crash.util import container_of, find_member_variant @@ -22,7 +24,7 @@ class Page(object): vmemmap_base = 0xffffea0000000000 vmemmap = None directmap_base = 0xffff880000000000 - pageflags = dict() + pageflags: Dict[str, int] = dict() PG_tail = None PG_slab = None @@ -37,6 +39,8 @@ class Page(object): # TODO have arch provide this? BITS_PER_LONG = None + PAGE_SIZE = 4096 + sparsemem = False @classmethod diff --git a/crash/util/__init__.py b/crash/util/__init__.py index b0e7d954e19..d284f88e66e 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Union +from typing import Union, Tuple, List, Iterator, Dict import gdb import uuid @@ -12,6 +12,7 @@ from crash.exceptions import ArgumentTypeError, NotStructOrUnionError TypeSpecifier = Union [ gdb.Type, gdb.Value, str, gdb.Symbol ] +AddressSpecifier = Union [ gdb.Value, str, int ] class InvalidComponentError(LookupError): """An error occured while resolving the member specification""" @@ -49,7 +50,7 @@ def __init__(self, member, gdbtype): types = Types([ 'char *', 'uuid_t' ]) -def container_of(val, gdbtype, member): +def container_of(val: gdb.Value, gdbtype: gdb.Type, member) -> gdb.Value: """ Returns an object that contains the specified object at the given offset. @@ -57,8 +58,7 @@ def container_of(val, gdbtype, member): Args: val (gdb.Value): The value to be converted. It can refer to an allocated structure or a pointer. - gdbtype (gdb.Type, gdb.Value, str, gdb.Symbol): - The type of the object that will be generated + gdbtype (gdb.Type): The type of the object that will be generated member (str): The name of the member in the target struct that contains `val`. @@ -70,10 +70,11 @@ def container_of(val, gdbtype, member): """ if not isinstance(val, gdb.Value): raise ArgumentTypeError('val', type(val), gdb.Value) + if not isinstance(gdbtype, gdb.Type): + raise ArgumentTypeError('gdbtype', type(gdbtype), gdb.Type) charp = types.char_p_type if val.type.code != gdb.TYPE_CODE_PTR: val = val.address - gdbtype = resolve_type(gdbtype) offset = offsetof(gdbtype, member) return (val.cast(charp) - offset).cast(gdbtype.pointer()).dereference() @@ -100,13 +101,15 @@ def struct_has_member(gdbtype: TypeSpecifier, name: str) -> bool: TypeError: An invalid argument has been provided. """ + gdbtype = resolve_type(gdbtype) try: x = offsetof(gdbtype, name) return True except InvalidComponentError: return False -def get_symbol_value(symname, block=None, domain=None): +def get_symbol_value(symname: str, block: gdb.Block=None, + domain: int=None) -> gdb.Value: """ Returns the value associated with a named symbol @@ -128,7 +131,8 @@ def get_symbol_value(symname, block=None, domain=None): return sym.value() raise MissingSymbolError("Cannot locate symbol {}".format(symname)) -def safe_get_symbol_value(symname, block=None, domain=None): +def safe_get_symbol_value(symname: str, block: gdb.Block=None, + domain: int=None) -> gdb.Value: """ Returns the value associated with a named symbol @@ -148,7 +152,7 @@ def safe_get_symbol_value(symname, block=None, domain=None): except MissingSymbolError: return None -def resolve_type(val): +def resolve_type(val: TypeSpecifier) -> gdb.Type: """ Resolves a gdb.Type given a type, value, string, or symbol @@ -213,14 +217,15 @@ def __offsetof(val, spec, error): return (offset, gdbtype) -def offsetof_type(val, spec, error=True): +def offsetof_type(gdbtype: gdb.Type, member_name: str, + error: bool=True) -> Union[Tuple[int, gdb.Type], None]: """ Returns the offset and type of a named member of a structure Args: - val (gdb.Type, gdb.Symbol, gdb.Value, or str): The type that - contains the specified member, must be a struct or union - spec (str): The member of the member to resolve + gdbtype (gdb.Type): The type that contains the specified member, + must be a struct or union + member_name (str): The member of the member to resolve error (bool, optional, default=True): Whether to consider lookup failures an error @@ -230,17 +235,9 @@ def offsetof_type(val, spec, error=True): gdb.Type: The type of the resolved member Raises: - ArgumentTypeError: val is not of type gdb.Type - InvalidComponentError: spec is not valid for the type + ArgumentTypeError: gdbtype is not of type gdb.Type + InvalidComponentError: member_name is not valid for the type """ - gdbtype = None - try: - gdbtype = resolve_type(val) - except MissingTypeError as e: - pass - except TypeError as e: - pass - if not isinstance(gdbtype, gdb.Type): raise ArgumentTypeError('gdbtype', gdbtype, gdb.Type) @@ -253,21 +250,22 @@ def offsetof_type(val, spec, error=True): raise NotStructOrUnionError('gdbtype', gdbtype) try: - return __offsetof(gdbtype, spec, error) + return __offsetof(gdbtype, member_name, error) except _InvalidComponentBaseError as e: if error: - raise InvalidComponentError(gdbtype, spec, str(e)) + raise InvalidComponentError(gdbtype, member_name, str(e)) else: return None -def offsetof(val, spec, error=True): +def offsetof(gdbtype: gdb.Type, member_name: str, + error: bool=True) -> Union[int, None]: """ Returns the offset of a named member of a structure Args: - val (gdb.Type, gdb.Symbol, gdb.Value, or str): The type that - contains the specified member, must be a struct or union - spec (str): The member of the member to resolve + gdbtype (gdb.Type): The type that contains the specified member, + must be a struct or union + member_name (str): The member of the member to resolve error (bool, optional, default=True): Whether to consider lookup failures an error @@ -276,15 +274,15 @@ def offsetof(val, spec, error=True): None: The member could not be resolved Raises: - ArgumentTypeError: val is not a valid type - InvalidComponentError: spec is not valid for the type + ArgumentTypeError: gdbtype is not a valid type + InvalidComponentError: member_name is not valid for the type """ - res = offsetof_type(val, spec, error) + res = offsetof_type(gdbtype, member_name, error) if res: return res[0] return None -def find_member_variant(gdbtype, variants): +def find_member_variant(gdbtype: gdb.Type, variants: List[str]) -> str: """ Examines the given type and returns the first found member name @@ -308,7 +306,7 @@ def find_member_variant(gdbtype, variants): raise TypeError("Unrecognized '{}': could not find member '{}'" .format(str(gdbtype), variants[0])) -def safe_lookup_type(name, block=None): +def safe_lookup_type(name: str, block: gdb.Block=None) -> Union[gdb.Type, None]: """ Looks up a gdb.Type without throwing an exception on failure @@ -325,7 +323,7 @@ def safe_lookup_type(name, block=None): except gdb.error: return None -def array_size(value): +def array_size(value: gdb.Value) -> int: """ Returns the number of elements in an array @@ -337,7 +335,7 @@ def array_size(value): """ return value.type.sizeof // value[0].type.sizeof -def get_typed_pointer(val, gdbtype): +def get_typed_pointer(val: AddressSpecifier, gdbtype: gdb.Type) -> gdb.Type: """ Returns a pointer to the requested type at the given address @@ -374,7 +372,7 @@ def get_typed_pointer(val, gdbtype): return val -def array_for_each(value): +def array_for_each(value: gdb.Value) -> Iterator[gdb.Value]: """ Yields each element in an array separately diff --git a/tests/test_list.py b/tests/test_list.py index 7c96db71942..4a8dec2beab 100644 --- a/tests/test_list.py +++ b/tests/test_list.py @@ -91,12 +91,10 @@ def test_normal_container_list_with_string(self): short_list = get_symbol("good_containers") expected_count = short_list.type.sizeof // short_list[0].type.sizeof - count = 0 - for node in list_for_each_entry(normal_list, 'struct container', - 'list'): - count += 1 - - self.assertTrue(count == expected_count) + with self.assertRaises(ArgumentTypeError): + for node in list_for_each_entry(normal_list, 'struct container', + 'list'): + count += 1 def test_normal_container_list_with_type(self): normal_list = get_symbol("good_container_list") @@ -114,7 +112,7 @@ def test_cycle_container_list_with_string(self): expected_count = short_list.type.sizeof // short_list[0].type.sizeof count = 0 - with self.assertRaises(ListCycleError): + with self.assertRaises(ArgumentTypeError): for node in list_for_each_entry(cycle_list, 'struct container', 'list', exact_cycles=True, print_broken_links=False): @@ -139,7 +137,7 @@ def test_bad_container_list_with_string(self): expected_count = short_list.type.sizeof // short_list[0].type.sizeof count = 0 - with self.assertRaises(CorruptListError): + with self.assertRaises(ArgumentTypeError): for node in list_for_each_entry(bad_list, 'struct container', 'list', print_broken_links=False): count += 1 diff --git a/tests/test_util.py b/tests/test_util.py index f6e36b8080b..dd3fdf2ae32 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -28,8 +28,8 @@ def test_invalid_python_type(self): offset = offsetof(self, 'dontcare') def test_type_by_string_name(self): - offset = offsetof('struct test', 'test_member') - self.assertTrue(offset == 0) + with self.assertRaises(ArgumentTypeError): + offset = offsetof('struct test', 'test_member') def test_type_by_invalid_name(self): with self.assertRaises(ArgumentTypeError): @@ -41,16 +41,16 @@ def test_invalid_member(self): def test_struct_by_symbol(self): val = gdb.lookup_global_symbol("global_struct_symbol") - offset = offsetof(val, 'test_member') - self.assertTrue(offset == 0) + with self.assertRaises(ArgumentTypeError): + offset = offsetof(val, 'test_member') def test_struct_by_value(self): val = gdb.lookup_global_symbol("global_struct_symbol").value() - offset = offsetof(val, 'test_member') - self.assertTrue(offset == 0) + with self.assertRaises(ArgumentTypeError): + offset = offsetof(val, 'test_member') def test_ulong_by_name(self): - with self.assertRaises(NotStructOrUnionError): + with self.assertRaises(ArgumentTypeError): offset = offsetof('unsigned long', 'test_member') def test_ulong_by_type(self): @@ -65,33 +65,33 @@ def test_ulong_by_type_pointer(self): def test_ulong_by_symbol(self): t = gdb.lookup_global_symbol('global_ulong_symbol') - with self.assertRaises(NotStructOrUnionError): + with self.assertRaises(ArgumentTypeError): offset = offsetof(t, 'test_member') def test_ulong_by_value(self): t = gdb.lookup_global_symbol('global_ulong_symbol').value() - with self.assertRaises(NotStructOrUnionError): + with self.assertRaises(ArgumentTypeError): offset = offsetof(t, 'test_member') def test_void_pointer_by_symbol(self): t = gdb.lookup_global_symbol('global_void_pointer_symbol') - with self.assertRaises(NotStructOrUnionError): + with self.assertRaises(ArgumentTypeError): offset = offsetof(t, 'test_member') def test_void_pointer_by_value(self): t = gdb.lookup_global_symbol('global_void_pointer_symbol').value() - with self.assertRaises(NotStructOrUnionError): + with self.assertRaises(ArgumentTypeError): offset = offsetof(t, 'test_member') def test_union_by_symbol(self): t = gdb.lookup_global_symbol('global_union_symbol') - offset = offsetof(t, 'member1') - self.assertTrue(offset == 0) + with self.assertRaises(ArgumentTypeError): + offset = offsetof(t, 'member1') def test_union_by_value(self): t = gdb.lookup_global_symbol('global_union_symbol').value() - offset = offsetof(t, 'member1') - self.assertTrue(offset == 0) + with self.assertRaises(ArgumentTypeError): + offset = offsetof(t, 'member1') def test_struct(self): offset = offsetof(self.test_struct, 'test_member') From 6ebdac1dec77e44b54c9fd9f2ae3ae339d02ec2f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 22 May 2019 17:34:19 -0400 Subject: [PATCH 146/367] crash.subsystem.storage: fix documentation and return values There were several places where we were returning pointers to structures instead of the structures directly. That did not match what the documentation claimed would be returned. This commit fixes those cases and adds test cases for storage. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/__init__.py | 14 +- kernel-tests/test_subsystem_storage.py | 311 +++++++++++++++++++++++++ 2 files changed, 319 insertions(+), 6 deletions(-) create mode 100644 kernel-tests/test_subsystem_storage.py diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 3b6c0728d19..264c5b9add7 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -85,7 +85,7 @@ def gendisk_to_dev(gendisk: gdb.Value) -> gdb.Value: gdb.Value: The converted struct device """ - return gendisk['part0']['__dev'].address + return gendisk['part0']['__dev'] def part_to_dev(part: gdb.Value) -> gdb.Value: """ @@ -100,7 +100,7 @@ def part_to_dev(part: gdb.Value) -> gdb.Value: Returns: gdb.Value: The converted struct device """ - return part['__dev'].address + return part['__dev'] def for_each_block_device(subtype: gdb.Value=None) -> Iterable[gdb.Value]: @@ -118,7 +118,7 @@ def for_each_block_device(subtype: gdb.Value=None) -> Iterable[gdb.Value]: Args: subtype (gdb.Value, optional): The struct device_type that will be used to match and filter. Typically - 'disk_type' or 'device_type' + 'disk_type' or 'part_type' Yields: gdb.Value or @@ -127,8 +127,9 @@ def for_each_block_device(subtype: gdb.Value=None) -> Iterable[gdb.Value]: the filter criteria. Raises: - RuntimeError: An unknown device type was encountered during - iteration. + RuntimeError: An unknown device type was encountered during iteration. + TypeError: The provided subtype was not of `struct device_type' or + `struct device type *' """ if subtype: @@ -205,6 +206,7 @@ def block_device_name(bdev: gdb.Value) -> str: """ return gendisk_name(bdev['bd_disk']) + def is_bdev_inode(inode: gdb.Value) -> bool: """ Tests whether the provided struct inode describes a block device @@ -262,7 +264,7 @@ def inode_on_bdev(inode: gdb.Value) -> gdb.Value: if is_bdev_inode(inode): return inode_to_block_device(inode) else: - return inode['i_sb']['s_bdev'] + return inode['i_sb']['s_bdev'].dereference() def _check_types(result): try: diff --git a/kernel-tests/test_subsystem_storage.py b/kernel-tests/test_subsystem_storage.py new file mode 100644 index 00000000000..c16da889fa2 --- /dev/null +++ b/kernel-tests/test_subsystem_storage.py @@ -0,0 +1,311 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys +import re + +import crash.subsystem.storage as storage +import crash.subsystem.filesystem as fs +import crash.util as util +from crash.types.list import list_for_each_entry + +class TestSubsystemFilesystem(unittest.TestCase): + nullptr = 0x0 + poisonptr = 0xdead000000000100 + + def setUp(self): + self.char_p_type = gdb.lookup_type('char').pointer() + self.super_block_type = gdb.lookup_type('struct super_block') + self.inode_type = gdb.lookup_type('struct inode') + self.device_type = gdb.lookup_type('struct device') + self.block_device_type = gdb.lookup_type('struct block_device') + self.gendisk_type = gdb.lookup_type('struct gendisk') + self.hd_struct_type = gdb.lookup_type('struct hd_struct') + + def get_blockdev_superblock(self): + return gdb.lookup_symbol('blockdev_superblock', None)[0].value() + + def get_block_device(self): + all_bdevs = gdb.lookup_symbol('all_bdevs', None)[0].value() + for bdev in list_for_each_entry(all_bdevs, self.block_device_type, + 'bd_list'): + if int(bdev['bd_disk']) != 0 and int(bdev['bd_part']) != 0: + return bdev + return None + + def get_gendisk(self): + all_bdevs = gdb.lookup_symbol('all_bdevs', None)[0].value() + for bdev in list_for_each_entry(all_bdevs, self.block_device_type, + 'bd_list'): + if int(bdev['bd_disk']) != 0: + return bdev['bd_disk'].dereference() + return None + + def get_hd_struct(self): + all_bdevs = gdb.lookup_symbol('all_bdevs', None)[0].value() + for bdev in list_for_each_entry(all_bdevs, self.block_device_type, + 'bd_list'): + if int(bdev['bd_part']) != 0: + return bdev['bd_part'].dereference() + return None + + def get_filesystem_inode(self): + for sb in fs.for_each_super_block(): + if fs.super_fstype(sb) != "bdev": + return sb['s_root']['d_inode'].dereference() + + raise RuntimeError("No file system supers?") + + def get_block_device_inode(self): + bdev_sb = self.get_blockdev_superblock() + for inode in list_for_each_entry(bdev_sb['s_inodes'], self.inode_type, + 'i_sb_list'): + return inode + + def get_blockdev_filesystem(self): + for sb in fs.for_each_super_block(): + fstype = sb['s_type']['name'].string() + print(f"{int(sb['s_bdev']):#x} name={fstype}") + if int(sb['s_bdev']) != 0: + return sb + + raise RuntimeError("No block device supers?") + + @unittest.skip + def test_for_each_bio_in_stack(self): + """This requires a dump that has a bio in flight to test""" + pass + + def test_for_each_block_device_unfiltered(self): + disk_type = storage.symvals.disk_type + part_type = storage.symvals.part_type + for bdev in storage.for_each_block_device(): + self.assertTrue(type(bdev) is gdb.Value) + self.assertTrue(bdev.type == storage.types.gendisk_type or + bdev.type == storage.types.hd_struct_type) + + def test_for_each_block_device_filtered_for_disk(self): + disk_type = storage.symvals.disk_type + for bdev in storage.for_each_block_device(disk_type): + self.assertTrue(type(bdev) is gdb.Value) + self.assertTrue(bdev.type == storage.types.gendisk_type) + + def test_for_each_block_device_filtered_nullptr(self): + null_type = util.get_typed_pointer(self.nullptr, + storage.types.device_type_type) + + # The pointer is only used for comparison so we won't raise + # an exception but we won't get any results either. + for bdev in storage.for_each_block_device(null_type.dereference()): + self.assertTrue(False) + + def test_for_each_block_device_filtered_poisonptr(self): + null_type = util.get_typed_pointer(self.poisonptr, + storage.types.device_type_type) + + # The pointer is only used for comparison so we won't raise + # an exception but we won't get any results either. + for bdev in storage.for_each_block_device(null_type.dereference()): + self.assertTrue(False) + + def test_for_each_disk(self): + for bdev in storage.for_each_disk(): + self.assertTrue(type(bdev) is gdb.Value) + self.assertTrue(bdev.type == storage.types.gendisk_type) + + def test_for_each_block_device_filtered_for_partitions(self): + part_type = storage.symvals.part_type + for bdev in storage.for_each_block_device(part_type): + self.assertTrue(type(bdev) is gdb.Value) + self.assertTrue(bdev.type == storage.types.hd_struct_type) + + def test_block_device_name(self): + bdev = self.get_block_device() + self.assertTrue(type(bdev) is gdb.Value) + self.assertTrue(bdev.type == self.block_device_type) + name = storage.block_device_name(bdev) + self.assertTrue(type(name) is str) + + def test_block_device_name_nullptr(self): + bdev = util.get_typed_pointer(self.nullptr, self.block_device_type).dereference() + self.assertTrue(type(bdev) is gdb.Value) + self.assertTrue(bdev.type == self.block_device_type) + with self.assertRaises(gdb.NotAvailableError): + name = storage.block_device_name(bdev) + + def test_block_device_name_poisonptr(self): + bdev = util.get_typed_pointer(self.poisonptr, self.block_device_type).dereference() + self.assertTrue(type(bdev) is gdb.Value) + self.assertTrue(bdev.type == self.block_device_type) + with self.assertRaises(gdb.NotAvailableError): + name = storage.block_device_name(bdev) + + def test_is_bdev_inode(self): + inode = self.get_block_device_inode() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + self.assertTrue(storage.is_bdev_inode(inode)) + + def test_is_bdev_inode_fs_inode(self): + inode = self.get_filesystem_inode() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + self.assertFalse(storage.is_bdev_inode(inode)) + + def test_is_bdev_inode_null_inode(self): + inode = util.get_typed_pointer(self.nullptr, self.inode_type) + inode = inode.dereference() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + with self.assertRaises(gdb.NotAvailableError): + x = storage.is_bdev_inode(inode) + + def test_is_bdev_inode_poison_inode(self): + inode = util.get_typed_pointer(self.poisonptr, self.inode_type) + inode = inode.dereference() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + with self.assertRaises(gdb.NotAvailableError): + x = storage.is_bdev_inode(inode) + + def test_inode_on_bdev_bdev_inode(self): + bdev_sb = self.get_blockdev_superblock() + inode = self.get_block_device_inode() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + bdev = storage.inode_on_bdev(inode) + self.assertTrue(type(bdev) is gdb.Value) + self.assertTrue(bdev.type == self.block_device_type) + self.assertTrue(inode['i_sb'] == bdev_sb) + self.assertTrue(fs.super_fstype(inode['i_sb']) == "bdev") + + def test_inode_on_bdev_fs_inode(self): + bdev_sb = self.get_blockdev_superblock() + inode = self.get_filesystem_inode() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + bdev = storage.inode_on_bdev(inode) + self.assertTrue(type(bdev) is gdb.Value) + self.assertTrue(bdev.type == self.block_device_type) + self.assertFalse(inode['i_sb'] == bdev_sb) + self.assertFalse(fs.super_fstype(inode['i_sb']) == "bdev") + + def test_inode_on_bdev_null_inode(self): + inode = util.get_typed_pointer(self.nullptr, self.inode_type) + inode = inode.dereference() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + with self.assertRaises(gdb.NotAvailableError): + bdev = storage.inode_on_bdev(inode) + + def test_inode_on_bdev_poison_inode(self): + inode = util.get_typed_pointer(self.poisonptr, self.inode_type) + inode = inode.dereference() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + with self.assertRaises(gdb.NotAvailableError): + bdev = storage.inode_on_bdev(inode) + + def test_inode_to_block_device_bdev_inode(self): + inode = self.get_block_device_inode() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + bdev = storage.inode_to_block_device(inode) + self.assertTrue(type(bdev) is gdb.Value) + self.assertTrue(bdev.type == self.block_device_type) + + def test_inode_to_block_device_filesystem_inode(self): + inode = self.get_filesystem_inode() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + + with self.assertRaises(TypeError): + bdev = storage.inode_to_block_device(inode) + + def test_inode_to_block_device_null_inode(self): + inode = util.get_typed_pointer(self.nullptr, self.inode_type) + inode = inode.dereference() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + + with self.assertRaises(gdb.NotAvailableError): + bdev = storage.inode_to_block_device(inode) + + def test_inode_to_block_device_poison_inode(self): + inode = util.get_typed_pointer(self.poisonptr, self.inode_type) + inode = inode.dereference() + self.assertTrue(type(inode) is gdb.Value) + self.assertTrue(inode.type == self.inode_type) + + with self.assertRaises(gdb.NotAvailableError): + bdev = storage.inode_to_block_device(inode) + + def test_gendisk_name_disk(self): + bdev = self.get_block_device() + name = storage.gendisk_name(bdev['bd_disk']) + self.assertTrue(type(name) is str) + + def test_gendisk_name_part(self): + bdev = self.get_block_device() + name = storage.gendisk_name(bdev['bd_part']) + self.assertTrue(type(name) is str) + + def test_gendisk_name_disk_null_bdev(self): + bdev = util.get_typed_pointer(self.nullptr, self.block_device_type) + bdev = bdev.dereference() + with self.assertRaises(gdb.NotAvailableError): + name = storage.gendisk_name(bdev['bd_disk']) + + def test_gendisk_name_disk_poison_bdev(self): + bdev = util.get_typed_pointer(self.poisonptr, self.block_device_type) + bdev = bdev.dereference() + with self.assertRaises(gdb.NotAvailableError): + name = storage.gendisk_name(bdev['bd_disk']) + + def test_gendisk_to_dev(self): + gendisk = self.get_gendisk() + self.assertTrue(type(gendisk) is gdb.Value) + self.assertTrue(gendisk.type == self.gendisk_type) + + dev = storage.gendisk_to_dev(gendisk) + self.assertTrue(type(dev) is gdb.Value) + self.assertTrue(dev.type == self.device_type) + + def test_part_to_dev(self): + part = self.get_hd_struct() + self.assertTrue(type(part) is gdb.Value) + self.assertTrue(part.type == self.hd_struct_type) + + dev = storage.part_to_dev(part) + self.assertTrue(type(dev) is gdb.Value) + self.assertTrue(dev.type == self.device_type) + + def test_dev_to_gendisk(self): + gendisk = self.get_gendisk() + self.assertTrue(type(gendisk) is gdb.Value) + self.assertTrue(gendisk.type == self.gendisk_type) + + dev = storage.gendisk_to_dev(gendisk) + self.assertTrue(type(dev) is gdb.Value) + self.assertTrue(dev.type == self.device_type) + + ngendisk = storage.dev_to_gendisk(dev) + self.assertTrue(type(ngendisk) is gdb.Value) + self.assertTrue(ngendisk.type == self.gendisk_type) + self.assertTrue(gendisk == ngendisk) + + def test_dev_to_part(self): + hd_struct = self.get_hd_struct() + self.assertTrue(type(hd_struct) is gdb.Value) + self.assertTrue(hd_struct.type == self.hd_struct_type) + + dev = storage.part_to_dev(hd_struct) + self.assertTrue(type(dev) is gdb.Value) + self.assertTrue(dev.type == self.device_type) + + nhd_struct = storage.dev_to_part(dev) + self.assertTrue(type(nhd_struct) is gdb.Value) + self.assertTrue(nhd_struct.type == self.hd_struct_type) + self.assertTrue(hd_struct == nhd_struct) From 886884cb0818e54bee16fc16cfc09b9e75b2198f Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 29 May 2019 16:15:04 -0400 Subject: [PATCH 147/367] crash.commands: explicity define help text using ::format_help This commit stops (ab)using the class docstring for the command to contain help text. There was nothing inherently wrong with doing it but it means that the entire command help text appears in the API documentation and needs to be properly formatted rst, which isn't what we want or need. Commands now provide a ::format_help method that returns a string containing the help text. All leading and trailing whitespace will be stripped prior to printing so we have some flexibility as far as how to declare the strings. The class docstring will now only contain the summary, which is more suitable for inclusion in the API documentation. Signed-off-by: Jeff Mahoney --- crash/commands/__init__.py | 11 +++++++---- crash/commands/btrfs.py | 21 ++++++++++++++------ crash/commands/dmesg.py | 18 ++++++++++++++---- crash/commands/help.py | 39 +++++++++++++++++++++----------------- crash/commands/kmem.py | 18 ++++++++++++++---- crash/commands/lsmod.py | 18 ++++++++++++++---- crash/commands/mount.py | 17 ++++++++++++++--- crash/commands/ps.py | 16 +++++++++++++--- crash/commands/syscmd.py | 18 +++++++++++++----- crash/commands/task.py | 19 +++++++++++++++---- crash/commands/vtop.py | 18 ++++++++++++++---- crash/commands/xfs.py | 19 ++++++++++++++----- 12 files changed, 169 insertions(+), 63 deletions(-) diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 8e0a3331477..599ef3ff7ef 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -31,14 +31,17 @@ def __init__(self, name, parser=None): elif not isinstance(parser, ArgumentParser): raise ArgumentTypeError('parser', parser, ArgumentParser) - nl = "" - if self.__doc__[-1] != '\n': - nl = "\n" - parser.format_help = lambda: self.__doc__ + nl self.parser = parser + parser.format_help = self._format_help self.commands[self.name] = self gdb.Command.__init__(self, self.name, gdb.COMMAND_USER) + def _format_help(self) -> str: + try: + return self.format_help().strip() + "\n" + except AttributeError: + return "\n" + def invoke_uncaught(self, argstr, from_tty=False): argv = gdb.string_to_argv(argstr) args = self.parser.parse_args(argv) diff --git a/crash/commands/btrfs.py b/crash/commands/btrfs.py index e32c609ab23..c63827348a1 100644 --- a/crash/commands/btrfs.py +++ b/crash/commands/btrfs.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb - from argparse import Namespace from crash.commands import Command, ArgumentParser from crash.commands import CommandLineError @@ -10,9 +8,7 @@ from crash.subsystem.filesystem import for_each_super_block, super_fstype from crash.subsystem.filesystem.btrfs import btrfs_fsid, btrfs_metadata_uuid -class BtrfsCommand(Command): - """display Btrfs internal data structures - +btrfs_help_text = """ NAME btrfs - display Btrfs internal data structures @@ -20,7 +16,11 @@ class BtrfsCommand(Command): btrfs COMMANDS - btrfs list [-m] - list all btrfs file systems (-m to show metadata uuid)""" + btrfs list [-m] - list all btrfs file systems (-m to show metadata uuid) +""" + +class BtrfsCommand(Command): + """display Btrfs internal data structures""" def __init__(self, name): parser = ArgumentParser(prog=name) @@ -32,6 +32,15 @@ def __init__(self, name): parser.format_usage = lambda: 'btrfs [args...]\n' Command.__init__(self, name, parser) + def format_help(self) -> str: + """ + Returns the help text for the btrfs command + + Returns: + :obj:`str`: The help text for the btrfs command. + """ + return btrfs_help_text + def list_btrfs(self, args: Namespace) -> None: print_header = True count = 0 diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 7b730fdf95d..190b98a10a8 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -20,9 +20,7 @@ class LogTypeException(Exception): class LogInvalidOption(Exception): pass -class LogCommand(Command): - """dump system message buffer - +log_help_text = """ NAME log - dump system message buffer @@ -141,8 +139,11 @@ class LogCommand(Command): SUBSYSTEM=pci DEVICE=+pci:0000:ff:03.1 ... +""" + +class LogCommand(Command): + """dump system message buffer""" - """ def __init__(self, name): parser = ArgumentParser(prog=name) @@ -153,6 +154,15 @@ def __init__(self, name): parser.format_usage = lambda: 'log [-tdm]\n' Command.__init__(self, name, parser) + def format_help(self) -> str: + """ + Returns the help text for the log command + + Returns: + :obj:`str`: The help text for the log command. + """ + return log_help_text + @classmethod def filter_unstructured_log(cls, log, args): lines = log.split('\n') diff --git a/crash/commands/help.py b/crash/commands/help.py index 8d0b4d13d57..8e082633ed9 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -3,11 +3,9 @@ import gdb import argparse -from crash.commands import Command, ArgumentParser +from crash.commands import Command, CommandError, ArgumentParser -class HelpCommand(Command): - """ this command - +help_text = """ NAME help - display help for crash commands @@ -20,33 +18,40 @@ class HelpCommand(Command): text for that command will be printed. """ +class HelpCommand(Command): + """ this command""" + def __init__(self): parser = ArgumentParser(prog="help") parser.add_argument('args', nargs=argparse.REMAINDER) super().__init__('help', parser) + def format_help(self) -> str: + """ + Returns the help text for the help command + + Returns: + :obj:`str`: The help text for the help command. + """ + return help_text + def execute(self, argv): if not argv.args: print("Available commands:") for cmd in sorted(self.commands): - text = self.commands[cmd].__doc__ - if text: - summary = text.split('\n')[0].strip() - else: + summary = self.commands[cmd].__doc__.strip() + if not summary: summary = "no help text provided" print("{:<15} - {}".format(cmd, summary)) else: for cmd in argv.args: try: - text = self.commands[cmd].__doc__ - if text is None: - print("No help text available.") - f = text.find(" ") - if f == -1: - print(text) - else: - print(text[f+1:]) + text = self.commands[cmd].format_help().strip() except KeyError: - print("No such command `{}'".format(cmd)) + raise CommandError("No such command `{}'".format(cmd)) + if text is None: + print("No help text available.") + else: + print(text) HelpCommand() diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 38f8cc32a7c..bd877092a2c 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -12,9 +12,7 @@ from crash.util import get_symbol_value from crash.exceptions import MissingSymbolError -class KmemCommand(Command): - """ kernel memory inspection - +kmem_help_text = """ NAME kmem - kernel memory inspection @@ -26,7 +24,10 @@ class KmemCommand(Command): DESCRIPTION This command currently offers very basic kmem cache query and checking. - """ +""" + +class KmemCommand(Command): + """ kernel memory inspection""" def __init__(self, name): parser = ArgumentParser(prog=name) @@ -40,6 +41,15 @@ def __init__(self, name): super().__init__(name, parser) + def format_help(self) -> str: + """ + Returns the help text for the kmem command + + Returns: + :obj:`str`: The help text for the kmem command. + """ + return kmem_help_text + def execute(self, args): if args.z: self.print_zones() diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index 737f59c8ffb..4dd5cac5c10 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -13,9 +13,7 @@ from crash.types.percpu import get_percpu_var import crash.types.percpu -class ModuleCommand(Command): - """display module information - +lsmod_help_text = """ NAME lsmod - display module information @@ -33,8 +31,11 @@ class ModuleCommand(Command): -p display the percpu base for the module and the size of its region -p CPU# display the percpu base for the module and the size of its region for the specified CPU number - """ + +class ModuleCommand(Command): + """display module information""" + def __init__(self): parser = ArgumentParser(prog="lsmod") @@ -47,6 +48,15 @@ def __init__(self): self.module_use_type = gdb.lookup_type('struct module_use') + def format_help(self) -> str: + """ + Returns the help text for the lsmod command + + Returns: + :obj:`str`: The help text for the lsmod command. + """ + return lsmod_help_text + def print_module_percpu(self, mod, cpu=-1): cpu = int(cpu) addr = int(mod['percpu']) diff --git a/crash/commands/mount.py b/crash/commands/mount.py index f3cdedb0d2a..e21cf163ffd 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -16,9 +16,7 @@ from crash.subsystem.filesystem.mount import mount_super, mount_flags from crash.subsystem.filesystem.mount import mount_root -class MountCommand(Command): - """display mounted file systems - +mount_help_text = """ NAME mount - display mounted file systems @@ -26,6 +24,10 @@ class MountCommand(Command): -v display superblock and vfsmount addresses -d display device obtained from super_block """ + +class MountCommand(Command): + """display mounted file systems""" + def __init__(self, name): parser = ArgumentParser(prog=name) @@ -36,6 +38,15 @@ def __init__(self, name): parser.format_usage = lambda : "mount\n" super().__init__(name, parser) + def format_help(self) -> str: + """ + Returns the help text for the mount command + + Returns: + :obj:`str`: The help text for the mount command. + """ + return mount_help_text + def __getattr__(self, name): if name == 'charp': self.charp = gdb.lookup_type('char').pointer() diff --git a/crash/commands/ps.py b/crash/commands/ps.py index ee170d8d476..9730d993919 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -153,9 +153,7 @@ def format_header(self) -> str: """ return self._format_header() -class PSCommand(Command): - """display process status information - +ps_help_text = """ NAME ps - display process status information @@ -525,6 +523,9 @@ class PSCommand(Command): 15 2 2 ffff880212989710 IN 0.0 0 0 [migration/2] 20 2 3 ffff8802129a9710 IN 0.0 0 0 [migration/3] """ + +class PSCommand(Command): + """display process status information""" def __init__(self): parser = ArgumentParser(prog="ps") @@ -553,6 +554,15 @@ def __init__(self): Command.__init__(self, "ps", parser) + def format_help(self) -> str: + """ + Returns the help text for the ps command + + Returns: + :obj:`str`: The help text for the ps command. + """ + return ps_help_text + def task_state_string(self, task): state = task.task_state() buf = None diff --git a/crash/commands/syscmd.py b/crash/commands/syscmd.py index 8f11a9b90db..b2ef256936b 100644 --- a/crash/commands/syscmd.py +++ b/crash/commands/syscmd.py @@ -6,9 +6,7 @@ from crash.commands import CommandLineError from crash.cache.syscache import utsname, config, kernel -class SysCommand(Command): - """system data - +sys_help_text = """ NAME sys - system data @@ -38,9 +36,10 @@ class SysCommand(Command): VERSION: #24 SMP Mon Oct 11 17:41:40 CDT 1999 MACHINE: i686 (500 MHz) MEMORY: 1 GB +""" - - """ +class SysCommand(Command): + """system data""" def __init__(self, name): parser = ArgumentParser(prog=name) @@ -50,6 +49,15 @@ def __init__(self, name): parser.format_usage = lambda: "sys [config]\n" Command.__init__(self, name, parser) + def format_help(self) -> str: + """ + Returns the help text for the sys command + + Returns: + :obj:`str`: The help text for the sys command. + """ + return sys_help_text + @staticmethod def show_default(): print(" UPTIME: {}".format(kernel.uptime)) diff --git a/crash/commands/task.py b/crash/commands/task.py index 6aac802ca47..9b677108f51 100644 --- a/crash/commands/task.py +++ b/crash/commands/task.py @@ -6,9 +6,7 @@ import crash.cache.tasks import argparse -class TaskCommand(Command): - """select task by pid - +task_help_text = """ NAME task - select task by pid @@ -20,7 +18,11 @@ class TaskCommand(Command): EXAMPLES task 1402 - """ +""" + +class TaskCommand(Command): + """select task by pid""" + def __init__(self, name): parser = ArgumentParser(prog=name) @@ -30,6 +32,15 @@ def __init__(self, name): parser.format_usage = lambda: "thread \n" Command.__init__(self, name, parser) + def format_help(self) -> str: + """ + Returns the help text for the task command + + Returns: + :obj:`str`: The help text for the task command. + """ + return task_help_text + def execute(self, args): try: if args.pid: diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index 5e1476d75d8..c92db9ed9b9 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -59,9 +59,7 @@ def address(self): except (addrxlat.NotPresentError, addrxlat.NoDataError): return addr + 'N/A' -class VTOPCommand(Command): - """convert virtual address to physical - +vtop_help_text = """ NAME vtop - virtual to physical @@ -170,7 +168,10 @@ class VTOPCommand(Command): c7200ae0 40104000 40b08000 73 SWAP: /dev/sda8 OFFSET: 22716 - """ +""" + +class VTOPCommand(Command): + """convert virtual address to physical""" def __init__(self): parser = ArgumentParser(prog="vtop") @@ -188,6 +189,15 @@ def __init__(self): super().__init__("vtop", parser) + def format_help(self) -> str: + """ + Returns the help text for the vtop command + + Returns: + :obj:`str`: The help text for the vtop command. + """ + return vtop_help_text + def execute(self, argv): ctx = addrxlat_context() sys = addrxlat_system() diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index 1add4eee09d..e60051b1fe0 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -28,10 +28,7 @@ from crash.util.symbols import Types types = Types(['struct xfs_buf *']) - -class XFSCommand(Command): - """display XFS internal data structures - +xfs_help_text = """ NAME xfs - display XFS internal data structures @@ -43,7 +40,10 @@ class XFSCommand(Command): xfs show xfs dump-ail xfs dump-buft - """ +""" + +class XFSCommand(Command): + """display XFS internal data structures""" def __init__(self, name): parser = ArgumentParser(prog=name) @@ -62,6 +62,15 @@ def __init__(self, name): Command.__init__(self, name, parser) + def format_help(self) -> str: + """ + Returns the help text for the xfs command + + Returns: + :obj:`str`: The help text for the xfs command. + """ + return xfs_help_text + def list_xfs(self, args: Namespace) -> None: count = 0 print_header = True From 1ab82f9cd9e9fb19c1b542218427074c233958e1 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 30 May 2019 13:11:56 -0400 Subject: [PATCH 148/367] crash.types.task: pull set_active out of constructor By pulling set_active out of the constructor we make the object creation and the handling in crash.kernel simpler. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 7 ++++--- crash/types/task.py | 44 +++++++++++++++++++++++++++++--------------- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 84931c4124e..9afd5c9e3df 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -619,14 +619,15 @@ def setup_tasks(self) -> None: crashing_cpu = -1 for task in for_each_all_tasks(): - cpu = None - regs = None + ltask = LinuxTask(task) + active = int(task.address) in rqscurrs if active: cpu = rqscurrs[int(task.address)] regs = self.vmcore.attr.cpu[cpu].reg + ltask.set_active(cpu, regs) + - ltask = LinuxTask(task, active, cpu, regs) ptid = (LINUX_KERNEL_PID, task['pid'], 0) try: diff --git a/crash/types/task.py b/crash/types/task.py index 35eedd5f692..c4f494d552c 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -4,6 +4,8 @@ from typing import Iterator, Callable, Dict import gdb +from crash.exceptions import InvalidArgumentError, ArgumentTypeError +from crash.exceptions import UnexpectedGDBTypeError from crash.util import array_size, struct_has_member from crash.util.symbols import Types, Symvals, SymbolCallbacks from crash.types.list import list_for_each_entry @@ -179,9 +181,6 @@ class LinuxTask(object): Args: task_struct: The task to wrap. The value must be of type ``struct task_struct``. - active: Whether this task is active in the debugging enviroment - cpu: Which CPU this task was using - regs: The registers associated with this task Attributes: task_struct (:obj:`gdb.Value`): The task being wrapped. The value @@ -208,14 +207,11 @@ class LinuxTask(object): :obj:`int`. """ _valid = False + _task_state_has_exit_state = None - def __init__(self, task_struct: gdb.Value, active: bool=False, - cpu: int=None, regs: Dict[str, int]=None): + def __init__(self, task_struct: gdb.Value): self._init_task_types(task_struct) - if cpu is not None and not isinstance(cpu, int): - raise InvalidArgumentError("cpu must be integer or None") - if not isinstance(task_struct, gdb.Value): raise ArgumentTypeError('task_struct', task_struct, gdb.Value) @@ -225,12 +221,12 @@ def __init__(self, task_struct: gdb.Value, active: bool=False, types.task_struct_type) self.task_struct = task_struct - self.active = active - self.cpu = cpu - self.regs = regs + self.active = False + self.cpu = -1 + self.regs: Dict[str, int] = dict() - self.thread_info = None - self.thread = None + self.thread_info: gdb.Value = None + self.thread: gdb.InferiorThread = None # mem data self.mem_valid = False @@ -252,12 +248,30 @@ def _init_task_types(cls, task): # a simple pointer comparison. types.override('struct task_struct', task.type) fields = types.task_struct_type.fields() - cls.task_state_has_exit_state = 'exit_state' in fields + cls._task_state_has_exit_state = 'exit_state' in fields cls._pick_get_rss() cls._pick_last_run() cls.init_mm = get_value('init_mm') cls._valid = True + def set_active(self, cpu: int, regs: Dict[str, int]) -> None: + """ + Set this task as active in the debugging environment + + Args: + cpu: Which CPU this task was using + regs: The registers associated with this task + + Raises: + :obj:`.InvalidArgumentError`: The cpu was not a valid integer. + """ + if not (isinstance(cpu, int) and cpu >= 0): + raise InvalidArgumentError("cpu must be integer >= 0") + + self.active = True + self.cpu = cpu + self.regs = regs + def attach_thread(self, thread: gdb.InferiorThread) -> None: """ Associate a gdb thread with this task @@ -320,7 +334,7 @@ def task_state(self) -> int: :obj:`int`: The state flags for this task. """ state = int(self.task_struct['state']) - if self.task_state_has_exit_state: + if self._task_state_has_exit_state: state |= int(self.task_struct['exit_state']) return state From 1883db935d2aa0050b1fe45556aef1efa9c193a4 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 30 May 2019 13:50:53 -0400 Subject: [PATCH 149/367] crash: Add initial documentation Finally, some basic documentation. For now it is the API documentation and some basic "getting started" instructions. We still need a user guide, command examples, and a HOWTO for writing custom scripts. Signed-off-by: Jeff Mahoney --- .gitignore | 3 + Makefile | 13 +- README.rst | 147 +++++++++++++++ crash/subsystem/filesystem/__init__.py | 57 +++--- crash/subsystem/filesystem/btrfs.py | 73 +++---- crash/subsystem/filesystem/decoders.py | 84 +++++---- crash/subsystem/filesystem/ext3.py | 20 +- crash/subsystem/filesystem/mount.py | 110 ++++++----- crash/subsystem/filesystem/xfs.py | 230 ++++++++++++++--------- crash/subsystem/storage/__init__.py | 125 ++++++------ crash/subsystem/storage/blocksq.py | 33 ++-- crash/subsystem/storage/decoders.py | 176 ++++++++--------- crash/subsystem/storage/device_mapper.py | 84 +++++---- crash/types/bitmap.py | 94 ++++++--- crash/types/classdev.py | 25 ++- crash/types/cpu.py | 32 +++- crash/types/klist.py | 18 +- crash/types/list.py | 69 ++++--- crash/types/module.py | 18 +- crash/types/node.py | 38 ++-- doc-source/conf.py | 187 ++++++++++++++++++ doc-source/index.rst | 39 ++++ doc-source/installation.rst | 6 + doc-source/make-gdb-refs.py | 64 +++++++ doc-source/mock/README | 3 + doc-source/mock/addrxlat/__init__.py | 15 ++ doc-source/mock/gdb/__init__.py | 69 +++++++ doc-source/mock/gdb/types.py | 2 + doc-source/user_guide.rst | 4 + setup.cfg | 7 + 30 files changed, 1305 insertions(+), 540 deletions(-) create mode 100644 README.rst create mode 100644 doc-source/conf.py create mode 100644 doc-source/index.rst create mode 100644 doc-source/installation.rst create mode 100644 doc-source/make-gdb-refs.py create mode 100644 doc-source/mock/README create mode 100644 doc-source/mock/addrxlat/__init__.py create mode 100644 doc-source/mock/gdb/__init__.py create mode 100644 doc-source/mock/gdb/types.py create mode 100644 doc-source/user_guide.rst create mode 100644 setup.cfg diff --git a/.gitignore b/.gitignore index f3d74a9a581..edfbb817718 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,5 @@ *.pyc *~ +doc-source/crash*.rst +doc-source/modules.rst +docs diff --git a/Makefile b/Makefile index 457f4204412..1555024aec2 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,10 @@ man-install: man $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) $(INSTALL) -m 644 $(GZ_MAN1) $(DESTDIR)$(man1dir) -install: man-install +build: crash tests kernel-tests + python3 setup.py -q build + +install: man-install build python3 setup.py install lint: lint3 @@ -42,3 +45,11 @@ lint: lint3 lint3: pylint --py3k -r n crash + +doc: build FORCE + rm -rf docs + rm -f doc/source/crash.*rst doc/source/modules.rst + sphinx-apidoc -M -e -H "API Reference" -f -o doc-source crash + (cd doc-source ; python3 make-gdb-refs.py) + python3 setup.py -q build_sphinx +FORCE: diff --git a/README.rst b/README.rst new file mode 100644 index 00000000000..339fd4b0b4a --- /dev/null +++ b/README.rst @@ -0,0 +1,147 @@ +crash-python +============ + +.. start-introduction + +crash-python is a semantic debugger for the Linux kernel. It is meant to +feel familiar for users of the classic +`crash `_ debugger but allows +much more powerful symbolic access to crash dumps as well as enabling an API for +writing ad-hoc extensions, commands, and analysis scripts. + +.. code-block:: bash + + $ pycrash vmlinux-4.12.14-150.14-default.gz vmcore + + crash-python initializing... + Loading tasks.... done. (170 tasks total) + Loading modules for 4.12.14-150.14-default.... done. (78 loaded) + [Switching to thread 170 (pid 27032)] + #0 sysrq_handle_crash (key=99) at ../drivers/tty/sysrq.c:146 + 146 *killer = 1; + Backtrace from crashing task (PID 27032): + #0 0xffffffffaa4b3682 in sysrq_handle_crash (key=99) + at ../drivers/tty/sysrq.c:146 + #1 0xffffffffaa4b3d34 in __handle_sysrq (key=99, check_mask=false) + at ../drivers/tty/sysrq.c:559 + #2 0xffffffffaa4b41eb in write_sysrq_trigger + (file=, buf=, count=18446628512242465728, ppos=) at ../drivers/tty/sysrq.c:1105 + #3 0xffffffffaa2b95b0 in proc_reg_write + (file=, buf=, count=, ppos=) at ../fs/proc/inode.c:230 + #4 0xffffffffaa246696 in __vfs_write + (file=0x63 , p=, count=, pos=0xffffa53fc0c5ff08) at ../fs/read_write.c:508 + #5 0xffffffffaa247c2d in vfs_write + (file=0xffff96e5a9a24c00, buf=0x560dc6656220 , count=, pos=0xffffa53fc0c5ff08) + at ../fs/read_write.c:558 + #6 0xffffffffaa249112 in SYSC_write + (count=, buf=, fd=) + at ../fs/read_write.c:605 + #7 0xffffffffaa249112 in SyS_write + (fd=, buf=94617163096608, count=2) at ../fs/read_write.c:597 + #8 0xffffffffaa003ae4 in do_syscall_64 (regs=0x63 ) + at ../arch/x86/entry/common.c:284 + #9 0xffffffffaa80009a in entry_SYSCALL_64 () + at ../arch/x86/entry/entry_64.S:236 + The 'pyhelp' command will list the command extensions. + py-crash> + py-crash> print *(struct file *)0xffff96e5a9a24c00 + $1 = { + f_u = { + fu_llist = { + next = 0x0 + }, + fu_rcuhead = { + next = 0x0 , + func = 0x0 + } + }, + f_path = { + mnt = 0xffff96e5b02d23a0, + dentry = 0xffff96e4b65b06c0 + }, + f_inode = 0xffff96e5ad464578, + f_op = 0xffffffffaac4d940 , + f_lock = { + { + rlock = { + raw_lock = { + val = { + counter = 0 + } + } + } + } + }, + f_write_hint = WRITE_LIFE_NOT_SET, + [...] + +.. end-introduction + +Installation +------------ + +.. start-installation + +`Crash-python `_ is on `GitHub `_. + +It requires the following components to work successfully: + +- `Python `_ 3.6 or newer +- `pyelftools `_ +- `libkdumpfile `_ +- `GDB `_ with python extensions and built with Python 3.6 or newer. + +If you are using a SUSE or openSUSE release, pre-built packages are available on the `Open Build Service `_. + +.. end-installation + +Quick start +----------- + +.. start-quick-start + +Crash-python requires the following to run properly: + +- The complete debuginfo for the kernel to be debugged, including modules +- The ELF images for the kernel and all modules +- The vmcore dump image from the crashed system + +To start: + +.. code-block:: bash + + $ pycrash [options] + +Since different systems and users place these files in different locations, there are number of command-line options to locate them. On a typical SUSE system, if you have the kernel-default and kernel-default-debuginfo packages installed, you will not need to provide any additional options. + +If you have expanded the RPMs separately into a different directory, you can start with: + +.. code-block:: bash + + $ pycrash -r /path/to/root + +If you’re debugging a kernel that you built from a source tree directly and installed using make INSTALL_MOD_STRIP=1 modules_install install, you can specify your build directory as a source for debuginfo: + +.. code-block:: bash + + $ pycrash -b /path/to/build/dir + +The full options are documented with: + +.. code-block:: bash + + $ pycrash --help + +.. end-quick-start + + +License: +-------- + +.. start-license + +Copyright 2016-2019 Jeff Mahoney, `SUSE `_. + +crash-python is licensed under the `GPLv2 `_. + +.. end-license diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index bfc9ded270f..bc77d43e3ab 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -81,14 +81,14 @@ def super_fstype(sb: gdb.Value) -> str: Returns the file system type's name for a given superblock. Args: - sb (gdb.Value): The struct super_block for - which to return the file system type's name + sb: The ``struct super_block`` for which to return the file system + type's name. The value must be of type ``struct super_block``. Returns: - str: The file system type's name + :obj:`str`:The file system type's name Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ return sb['s_type']['name'].string() @@ -97,14 +97,14 @@ def super_flags(sb: gdb.Value) -> str: Returns the flags associated with the given superblock. Args: - sb (gdb.Value): The struct super_block for - which to return the flags. + sb: The ``struct super_block`` for which to return the flags. + The value must be of type ``struct super_block``. Returns: - str: The flags field in human-readable form. + :obj:`str`:The flags field in human-readable form. Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ return decode_flags(sb['s_flags'], SB_FLAGS) @@ -112,14 +112,12 @@ def for_each_super_block() -> Iterable[gdb.Value]: """ Iterate over the list of super blocks and yield each one. - Args: - None - Yields: - gdb.Value + :obj:`gdb.Value`: One value for each super block. Each value + will be of type ``struct super_block``. Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ for sb in list_for_each_entry(symvals.super_blocks, types.super_block_type, 's_list'): @@ -131,16 +129,17 @@ def get_super_block(desc: AddressSpecifier, force: bool=False) -> gdb.Value: a struct super_block at that address. Args: - desc (gdb.Value, str, or int): The address for which to provide - a casted pointer - force (bool): Skip testing whether the value is available. + desc: The address for which to provide a casted pointer. The address + may be specified using an existing Value, an integer address, + or a hexadecimal address represented as a 0x-prefixed string. + force: Skip testing whether the value is available. Returns: - gdb.Value: The super_block at the requested - location + :obj:`gdb.Value`: The super block at the requested location. + The value will be ``struct super_block``. Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ sb = get_typed_pointer(desc, types.super_block_type).dereference() if not force: @@ -158,15 +157,16 @@ def is_fstype_super(super_block: gdb.Value, name: str) -> bool: This uses a naive string comparison so modules are not required. Args: - super_block (gdb.Value): - The struct super_block to test - name (str): The name of the file system type + super_block: The struct super_block to test. The value must be + of type ``struct super_block``. + name: The name of the file system type Returns: - bool: whether the super_block belongs to the specified file system + :obj:`bool`: whether the ``struct super_block`` belongs to the + specified file system Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ return super_fstype(super_block) == name @@ -175,13 +175,14 @@ def is_fstype_inode(inode: gdb.Value, name: str) -> bool: Tests whether the inode belongs to a particular file system type. Args: - inode (gdb.Value): The struct inode to test - name (str): The name of the file system type + inode: The struct inode to test. The value must be of + type ``struct inode``. + name: The name of the file system type Returns: - bool: whether the inode belongs to the specified file system + :obj:`bool`: whether the inode belongs to the specified file system Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ return is_fstype_super(inode['i_sb'], name) diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index 9e2756fa47b..e9f8656e179 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -14,17 +14,17 @@ def is_btrfs_super(super_block: gdb.Value) -> bool: """ - Tests whether a super_block belongs to btrfs. + Tests whether a ``struct super_block`` belongs to btrfs. Args: - super_block (gdb.Value): The struct super_block - to test + super_block: The ``struct super_block`` to test. + The value must be of type ``struct super_block``. Returns: - bool: Whether the super_block belongs to btrfs + :obj:`bool`: Whether the super_block belongs to btrfs Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ return is_fstype_super(super_block, "btrfs") @@ -33,13 +33,14 @@ def is_btrfs_inode(vfs_inode: gdb.Value) -> bool: Tests whether a inode belongs to btrfs. Args: - vfs_inode (gdb.Value): The struct inode to test + vfs_inode: The ``struct inode`` to test. The value must be + of type ``struct inode``. Returns: - bool: Whether the inode belongs to btrfs + :obj:`bool`: Whether the inode belongs to btrfs Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ return is_btrfs_super(vfs_inode['i_sb']) @@ -47,20 +48,21 @@ def btrfs_inode(vfs_inode: gdb.Value, force: bool=False ) -> gdb.Value: """ Converts a VFS inode to a btrfs inode - This method converts a struct inode to a struct btrfs_inode. + This method converts a ``struct inode`` to a ``struct btrfs_inode``. Args: - vfs_inode (gdb.Value): The struct inode to convert - to a struct btrfs_inode + vfs_inode: The ``struct inode`` to convert to a ``struct btrfs_inode``. + The value must be of type ``struct inode``. - force (bool): Ignore type checking. + force: Ignore type checking. Returns: - gdb.Value: The converted struct btrfs_inode + :obj:`gdb.Value`: The converted ``struct btrfs_inode``. + The value will be of type ``struct btrfs_inode``. Raises: - InvalidArgumentError: the inode does not belong to btrfs - gdb.NotAvailableError: The target value was not available. + :obj:`.InvalidArgumentError`: the inode does not belong to btrfs + :obj:`gdb.NotAvailableError`: The target value was not available. """ if not force and not is_btrfs_inode(vfs_inode): raise InvalidArgumentError("inode does not belong to btrfs") @@ -74,19 +76,19 @@ def btrfs_fs_info(super_block: gdb.Value, force: bool=False) -> gdb.Value: This method resolves a struct btrfs_fs_info from a struct super_block Args: - super_block (gdb.Value): The struct super_block - to use to resolve a struct btrfs_fs_info. A pointer to a - struct super_block is also acceptable. + super_block: The ``struct super_block`` to use to resolve a' + ``struct btrfs_fs_info``. A pointer to a ``struct super_block`` + is also acceptable. - force (bool): Ignore type checking. + force: Ignore type checking. Returns: - gdb.Value: The resolved struct - btrfs_fs_info + :obj:`gdb.Value: The resolved ``struct btrfs_fs_info``. The value will + be of type ``struct btrfs_fs_info``. Raises: - InvalidArgumentError: the super_block does not belong to btrfs - gdb.NotAvailableError: The target value was not available. + :obj:`.InvalidArgumentError`: the super_block does not belong to btrfs + :obj:`gdb.NotAvailableError`: The target value was not available. """ if not force and not is_btrfs_super(super_block): raise InvalidArgumentError("super_block does not belong to btrfs") @@ -99,17 +101,17 @@ def btrfs_fsid(super_block: gdb.Value, force: bool=False) -> uuid.UUID: Returns the btrfs fsid (UUID) for the specified superblock. Args: - super_block (gdb.Value): The struct super_block - for which to return the btrfs fsid. + super_block: The ``struct super_block`` for which to return the + btrfs fsid. The value must be of type ``struct super_block``. - force (bool): Ignore type checking. + force: Ignore type checking. Returns: - uuid.UUID: The Python UUID Object for the btrfs fsid + :obj:`uuid.UUID`: The Python UUID Object for the btrfs fsid Raises: - InvalidArgumentError: the super_block does not belong to btrfs - gdb.NotAvailableError: The target value was not available. + :obj:`.InvalidArgumentError`: the super_block does not belong to btrfs + :obj:`gdb.NotAvailableError`: The target value was not available. """ fs_info = btrfs_fs_info(super_block, force) if struct_has_member(types.btrfs_fs_info_type, 'fsid'): @@ -121,17 +123,18 @@ def btrfs_metadata_uuid(sb: gdb.Value, force: bool=False) -> uuid.UUID: Returns the btrfs metadata uuid for the specified superblock. Args: - super_block (gdb.Value): The struct super_block - for which to return the btrfs metadata uuid. + super_block: The ``struct super_block`` for which to return the + btrfs metadata uuid. The value must be of type + ``struct super_block``. - force (bool): Ignore type checking. + force: Ignore type checking. Returns: - uuid.UUID: The Python UUID Object for the btrfs fsid + :obj:`uuid.UUID`: The Python UUID Object for the btrfs fsid Raises: - InvalidArgumentError: the super_block does not belong to btrfs - gdb.NotAvailableError: The target value was not available. + :obj:`.InvalidArgumentError`: the super_block does not belong to btrfs + :obj:`gdb.NotAvailableError`: The target value was not available. """ fs_info = btrfs_fs_info(sb, force) if struct_has_member(types.btrfs_fs_info_type, 'metadata_uuid'): diff --git a/crash/subsystem/filesystem/decoders.py b/crash/subsystem/filesystem/decoders.py index badd71189e2..b416c13bcd2 100644 --- a/crash/subsystem/filesystem/decoders.py +++ b/crash/subsystem/filesystem/decoders.py @@ -16,28 +16,38 @@ class DIOBioDecoder(Decoder): directly or asynchronously. Args: - bio(gdb.Value): The struct bio to be decoded, generated - by the direct i/o component + bio: The struct bio to be decoded, generated by the direct i/o + component. The value must be of type ``struct bio``. + + Attributes: + bio (:obj:`gdb.Value`): The bio. The value is of type + ``struct bio``. + dio (:obj:`gdb.Value`): ``struct dio *`` that represents the + direct i/o operation + fstype (str): the name of the file system type + dev (str): the name of the underlying device + offset (str): the starting offset on disk """ - types = Types([ 'struct dio *' ]) + _types = Types([ 'struct dio *' ]) __endio__ = [ 'dio_bio_end_io', 'dio_bio_end_io' ] - description = "{:x} bio: Direct I/O for {} inode {}, sector {} on {}" + _description = "{:x} bio: Direct I/O for {} inode {}, sector {} on {}" - def __init__(self, bio): + def __init__(self, bio: gdb.Value): super().__init__() self.bio = bio def interpret(self): - self.dio = self.bio['bi_private'].cast(self.types.dio_p_type) + """Interprets a direct i/o bio to populate its attributes""" + self.dio = self.bio['bi_private'].cast(self._types.dio_p_type) self.fstype = super_fstype(self.dio['inode']['i_sb']) self.dev = block_device_name(self.dio['inode']['i_sb']['s_bdev']) self.offset = self.dio['block_in_file'] << self.dio['blkbits'] def __str__(self): - return self.description.format(int(self.bio), self.fstype, - self.dio['inode']['i_ino'], - self.bio['bi_sector'], self.dev) + return self._description.format(int(self.bio), self.fstype, + self.dio['inode']['i_ino'], + self.bio['bi_sector'], self.dev) def __next__(self): return None @@ -52,29 +62,28 @@ class DecodeMPage(Decoder): the file system subsystem. Args: - bio(gdb.Value): The struct bio to be decoded, generated - by the mpage component - - Returns: - dict: Contains the following items: - - description (str): Human-readable description of the bio - - bio (gdb.Value): The struct bio being decoded - - fstype (str): The name of the file system which submitted - this bio - - inode (gdb.Value): The struct inode, if any, - that owns the file associated with this bio + bio: The struct bio to be decoded, generated by the mpage component. + The value must be of type ``struct bio``. + + Attributes: + bio (:obj:`gdb.Value`): The bio. The value is of type + ``struct bio``. + inode (:obj:`gdb.Value`): The inode associated with this bio. The + value is of type ``struct inode``. + fstype (str): the name of the file system type """ __endio__ = 'mpage_end_io' description = "{:x} bio: Multipage I/O: inode {}, type {}, dev {}" - def __init__(self, bio): + def __init__(self, bio: gdb.Value): super().__init__() self.bio = bio def interpret(self): + """Interpret the multipage bio to populate its attributes""" self.inode = bio['bi_io_vec'][0]['bv_page']['mapping']['host'] self.fstype = super_fstype(inode['i_sb']) @@ -92,23 +101,30 @@ class DecodeBioBH(Decoder): This method decodes a bio generated by buffer head submission. Args: - bio(gdb.Value): The struct bio to be decoded, generated - by buffer head submission + bio: The struct bio to be decoded, generated by buffer head + submission. The value must be of type ``struct bio``. + + Attributes: + bio (:obj:`gdb.Value`): The bio. The value is of type + ``struct bio``. + bh (:obj:`gdb.Value`): The struct buffer_head associated with this + bio. The value is of type ``struct buffer_head``. """ - types = Types([ 'struct buffer_head *' ]) + _types = Types([ 'struct buffer_head *' ]) __endio__ = 'end_bio_bh_io_sync' - description = "{:x} bio: Bio representation of buffer head" + _description = "{:x} bio: Bio representation of buffer head" - def __init__(self, bio): + def __init__(self, bio: gdb.Value): super().__init__() self.bio = bio def interpret(self): - self.bh = self.bio['bi_private'].cast(self.types.buffer_head_p_type) + """Interpret the buffer_head bio to populate its attributes""" + self.bh = self.bio['bi_private'].cast(self._types.buffer_head_p_type) def __str__(self): - return self.description.format(int(bio)) + return self._description.format(int(bio)) def __next__(self): return decode_bh(self.bh) @@ -121,18 +137,22 @@ class DecodeSyncWBBH(Decoder): synchronous writeback. Args: - bio(gdb.Value): The struct buffer_head to be - decoded. + bio: The ``struct buffer_head`` to be decoded. The value must be of + ``struct buffer_head``. + + Attributes: + bh (:obj:`gdb.Value`): The ``struct buffer_head`` being decoded. + The value is of type ``struct buffer_head``. """ __endio__ = 'end_buffer_write_sync' - description = "{:x} buffer_head: for dev {}, block {}, size {} (unassociated)" + _description = "{:x} buffer_head: for dev {}, block {}, size {} (unassociated)" def __init__(self, bh): super().__init__() self.bh = bh def __str__(self): - self.description.format(block_device_name(bh['b_bdev']), + self._description.format(block_device_name(bh['b_bdev']), self.bh['b_blocknr'], self.bh['b_size']) DecodeSyncWBBH.register() diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index cdad6cf5a90..9d2eee8c681 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -9,27 +9,35 @@ class Ext3Decoder(Decoder): """ Decodes an ext3 journal buffer - This decodes a struct buffer_head with an end_io callback - of journal_end_buffer_io_sync. + This decodes a ``struct buffer_head`` with a `b_end_io` callback + of ``journal_end_buffer_io_sync``. Args: - bh (gdb.Value): The struct buffer_head to decode + bh: The struct buffer_head to decode. The value must be of + type ``struct buffer_head``. + + Attributes: + fstype (str): "journal on ext3" + devname (str): The device name in string form + offset (int): The starting offset of this buffer on the device + length (int): The length of buffer on the the device """ __endio__ = 'journal_end_buffer_io_sync' - description = "{:x} buffer_head: {} journal block (jbd) on {}" + _description = "{:x} buffer_head: {} journal block (jbd) on {}" - def __init__(self, bh): + def __init__(self, bh: gdb.Value): super().__init__() self.bh = bh def interpret(self): + """Interprets the ext3 buffer_head to populate its attributes""" self.fstype = "journal on ext3" self.devname = block_device_name(self.bh['b_bdev']) self.offset = int(self.bh['b_blocknr']) * int(self.bh['b_size']) self.length = int(self.bh['b_size']) def __str__(self): - return self.description(int(self.bh), fstype, devname) + return self._description(int(self.bh), self.fstype, self.devname) Ext3Decoder.register() diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 3194c8e2109..a7e3beb4ad2 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -1,8 +1,21 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +The crash.subsystem.filesystem.mount module contains helpers used to +access the file system namespace. + +.. _mount_structure: + +*NOTE*: Linux v3.3 split ``struct mount`` from ``struct vfsmount``. Prior +kernels do not have ``struct mount``. In functions documented as using a +:obj:`gdb.Value` describing a ``struct mount``, a ``struct vfsmount`` +will be required and/or returned instead. +""" import gdb +from typing import Iterator + from crash.subsystem.filesystem import super_fstype from crash.types.list import list_for_each_entry from crash.util import container_of, decode_flags, struct_has_member @@ -41,16 +54,16 @@ types = Types([ 'struct mount', 'struct vfsmount' ]) symvals = Symvals([ 'init_task' ]) -class Mount(object): +class _Mount(object): @classmethod - def for_each_mount_impl(cls, task): - raise NotImplementedError("Mount.for_each_mount is unhandled on this kernel version.") + def _for_each_mount_impl(cls, task): + raise NotImplementedError("_Mount.for_each_mount is unhandled on this kernel version.") @classmethod def for_each_mount_nsproxy(cls, task): """ An implementation of for_each_mount that uses the task's - nsproxy to locate the mount namespace. See for_each_mount + nsproxy to locate the mount namespace. See :ref:`for_each_mount` for more details. """ return list_for_each_entry(task['nsproxy']['mnt_ns']['list'], @@ -60,7 +73,7 @@ def for_each_mount_nsproxy(cls, task): def _check_task_interface(cls, symval): try: nsproxy = symvals.init_task['nsproxy'] - cls.for_each_mount_impl = cls.for_each_mount_nsproxy + cls._for_each_mount_impl = cls.for_each_mount_nsproxy except KeyError: print("check_task_interface called but no init_task?") pass @@ -72,41 +85,46 @@ def _check_mount_type(gdbtype): # Older kernels didn't separate mount from vfsmount types.mount_type = types.vfsmount_type -def for_each_mount(task=None): +def for_each_mount(task: gdb.Value=None) -> Iterator[gdb.Value]: """ Iterate over each mountpoint in the namespace of the specified task - If no task is given, the init_task is used. + If no task is given, the ``init_task`` symbol is used. The type of the mount structure returned depends on whether - 'struct mount' exists on the kernel version being debugged. + ``struct mount`` exists on the kernel version being debugged :ref:`structure `. Args: - task (gdb.Value, default=): - The task which contains the namespace to iterate. + task: The task which contains the namespace to iterate. The + :obj:`gdb.Value` must describe a ``struct task_struct``. If + unspecified, the value for the ``init_task`` symbol will be + used. Yields: - gdb.Value: - A mountpoint attached to the namespace. + :obj:`gdb.Value`: A mountpoint attached to the namespace. + The value will be of type ``struct mount`` + :ref:`structure ` . + Raises: + :obj:`gdb.NotAvailableError`: The target value is not available. """ if task is None: task = symvals.init_task - return Mount.for_each_mount_impl(task) + return _Mount._for_each_mount_impl(task) def mount_flags(mnt: gdb.Value, show_hidden: bool=False) -> str: """ - Returns the human-readable flags of the mount structure + Returns the human-readable flags of the ``struct mount`` + :ref:`structure `. Args: - mnt (gdb.Value): - The mount structure for which to return flags + mnt: The :ref:`mount structure ` for which to + return flags - show_hidden (bool, default=False): - Whether to return hidden flags + show_hidden: Whether to return hidden flags Returns: - str: The mount flags in human-readable form + :obj:`str`: The mount flags in human-readable form """ if struct_has_member(mnt, 'mnt'): mnt = mnt['mnt'] @@ -119,12 +137,12 @@ def mount_super(mnt: gdb.Value) -> gdb.Value: Returns the struct super_block associated with a mount Args: - mnt: gdb.Value: - The mount structure for which to return the super_block + mnt: The :ref:`mount structure ` for which to + return the super_block Returns: - gdb.Value: - The super_block associated with the mount + :obj:`gdb.Value`: The super_block associated with the mount. + The value will be of type ``struct super_block``. """ try: sb = mnt['mnt']['mnt_sb'] @@ -137,12 +155,12 @@ def mount_root(mnt: gdb.Value) -> gdb.Value: Returns the struct dentry corresponding to the root of a mount Args: - mnt: gdb.Value: - The mount structure for which to return the root dentry + mnt: The :ref:`mount structure ` for which to + return the root dentry Returns: - gdb.Value: - The dentry that corresponds to the root of the mount + :obj:`gdb.Value`: The dentry that corresponds to the root of + the mount. The value will be of type ``struct dentry``. """ try: mnt = mnt['mnt'] @@ -156,11 +174,11 @@ def mount_fstype(mnt: gdb.Value) -> str: Returns the file system type of the mount Args: - mnt (gdb.Value): - The mount structure for which to return the file system tyoe + mnt: The :ref:`mount structure ` for which to + return the file system type Returns: - str: The file system type of the mount in string form + :obj:`str`: The file system type of the mount in string form """ return super_fstype(mount_super(mnt)) @@ -169,12 +187,14 @@ def mount_device(mnt: gdb.Value) -> str: Returns the device name that this mount is using Args: - gdb.Value: - The mount structure for which to get the device name + mnt: The :ref:`mount structure ` for which to + get the device name Returns: - str: The device name in string form + :obj:`str`: The device name in string form + Raises: + :obj:`gdb.NotAvailableError`: The target value was not available. """ devname = mnt['mnt_devname'].string() if devname is None: @@ -192,23 +212,25 @@ def _real_mount(vfsmnt): return vfsmnt return container_of(vfsmnt, types.mount_type, 'mnt') -def d_path(mnt, dentry, root=None): +def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value=None): """ Returns a file system path described by a mount and dentry Args: - mnt (gdb.Value): - The mount for the start of the path + mnt: The :ref:`mount structure ` for the start + of the path - dentry (gdb.Value): - The dentry for the start of the path - - root (gdb.Value, default=None): - The mount at which to stop resolution. If None, - the current root of the namespace. + dentry: The dentry for the start of the path. The value must be + of type ``struct dentry``. + root: The :ref:`mount structure ` at which to + stop resolution. If unspecified or ``None``, the current root + of the namespace is used. Returns: - str: The path in string form + :obj:`str`: The path in string form + + Raises: + :obj:`gdb.NotAvailableError`: The target value was not available. """ if root is None: root = symvals.init_task['fs']['root'] @@ -253,4 +275,4 @@ def d_path(mnt, dentry, root=None): return name type_cbs = TypeCallbacks([ ('struct vfsmount', _check_mount_type ) ]) -symbols_cbs = SymbolCallbacks([ ('init_task', Mount._check_task_interface ) ]) +symbols_cbs = SymbolCallbacks([ ('init_task', _Mount._check_task_interface ) ]) diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index b39738efeff..38666f36bb3 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -1,5 +1,9 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +The crash.subsystem.filesystem.xfs module offers helpers to work with +XFS file systems. +""" import gdb import uuid @@ -205,24 +209,34 @@ def __str__(self): class XFSBufBioDecoder(Decoder): """ Decodes a bio with an xfsbuf ->bi_end_io + + Args: + bio: The struct bio to decode. The value must be of type + ``struct bio``. + + Attributes: + xfsbuf (gdb.Value): The xfsbuf structure. It is of type + ``struct xfs_buf *``. + devname (str): The string representation of the device name """ - description = "{:x} bio: xfs buffer on {}" + _description = "{:x} bio: xfs buffer on {}" __endio__ = 'xfs_buf_bio_end_io' - types = Types([ 'struct xfs_buf *' ]) + _types = Types([ 'struct xfs_buf *' ]) - def __init__(self, bio): + def __init__(self, bio: gdb.Value): super(XFSBufBioDecoder, self).__init__() self.bio = bio def interpret(self): - self.xfsbuf = bio['bi_private'].cast(cls.types.xfs_buf_p_type) + """Interpret the xfsbuf bio to populate its attributes""" + self.xfsbuf = bio['bi_private'].cast(cls._types.xfs_buf_p_type) self.devname = block_device_name(bio['bi_bdev']) def __next__(self): return XFSBufDecoder(xfs.xfsbuf) def __str__(self): - return self.description.format(self.bio, self.devname) + return self._description.format(self.bio, self.devname) XFSBufBioDecoder.register() @@ -232,48 +246,48 @@ def __str__(self): 'struct xfs_qoff_logitem', 'struct xfs_inode', 'struct xfs_mount *', 'struct xfs_buf *' ]) -class XFS(object): +class _XFS(object): """ XFS File system state class. Not meant to be instantiated directly. """ - ail_head_name = None + _ail_head_name = None @classmethod def _detect_ail_version(cls, gdbtype): if struct_has_member(gdbtype, 'ail_head'): - cls.ail_head_name = 'ail_head' + cls._ail_head_name = 'ail_head' else: - cls.ail_head_name = 'xa_ail' + cls._ail_head_name = 'xa_ail' def is_xfs_super(super_block: gdb.Value) -> bool: """ - Tests whether a super_block belongs to XFS. + Tests whether a ``struct super_block`` belongs to XFS. Args: - super_block (gdb.Value): - The struct super_block to test + super_block: The struct super_block to test. The value must be of type + ``struct super_block``. Returns: - bool: Whether the super_block belongs to XFS + :obj:`bool`: Whether the super_block belongs to XFS Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ return is_fstype_super(super_block, "xfs") def is_xfs_inode(vfs_inode: gdb.Value) -> bool: """ - Tests whether a generic VFS inode belongs to XFS + Tests whether a generic ``struct inode`` belongs to XFS Args: - vfs_inode (gdb.value(): - The struct inode to test whether it belongs to XFS + vfs_inode: The struct inode to test whether it belongs to XFS. + The value must be of type ``struct inode``. Returns: - bool: Whether the inode belongs to XFS + :obj:`bool`: Whether the inode belongs to XFS Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ return is_fstype_inode(vfs_inode, "xfs") @@ -282,20 +296,21 @@ def xfs_inode(vfs_inode: gdb.Value, force: bool=False) -> gdb.Value: """ Converts a VFS inode to a xfs inode - This method converts a struct inode to a struct xfs_inode. + This method converts a ``struct inode`` to a ``struct xfs_inode``. Args: - vfs_inode (gdb.Value): - The struct inode to convert to a struct xfs_inode + vfs_inode: The ``struct inode`` to convert to a ``struct xfs_inode`` + The value must be of type ``struct inode``. - force (bool): ignore type checking + force: ignore type checking Returns: - gdb.Value: The converted struct xfs_inode + :obj:`gdb.Value`: The converted ``struct xfs_inode``. The value + will be of type ``struct xfs_inode``. Raises: TypeError: The inode does not belong to xfs - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ if not force and not is_xfs_inode(vfs_inode): raise InvalidArgumentError("inode does not belong to xfs") @@ -306,18 +321,20 @@ def xfs_mount(sb: gdb.Value, force: bool=False) -> gdb.Value: """ Converts a VFS superblock to a xfs mount - This method converts a struct super_block to a struct xfs_mount * + This method converts a ``struct super_block`` to a ``struct xfs_mount *`` Args: - super_block (gdb.Value): - The struct super_block to convert to a struct xfs_fs_info. + super_block: The struct super_block to convert to a + ``struct xfs_fs_info``. The value must be of type + ``struct super_block``. Returns: - gdb.Value: The converted struct xfs_mount + :obj:`gdb.Value`: The converted ``struct xfs_mount``. The value will be + of type ``struct xfs_mount *``. Raises: - TypeError: The superblock does not belong to xfs - gdb.NotAvailableError: The target value was not available. + InvalidArgumentError: The ``struct super_block`` does not belong to xfs + :obj:`gdb.NotAvailableError`: The target value was not available. """ if not force and not is_xfs_super(sb): raise InvalidArgumentError("superblock does not belong to xfs") @@ -329,14 +346,14 @@ def xfs_mount_flags(mp: gdb.Value) -> str: Return the XFS-internal mount flags in string form Args: - mp (gdb.Value): - The struct xfs_mount for the file system + mp: The ``struct xfs_mount`` for the file system. The value must be of + type ``struct xfs_mount``. Returns: - str: The mount flags in string form + :obj:`str`: The mount flags in string form Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ return decode_flags(mp['m_flags'], XFS_MOUNT_FLAGS) @@ -345,14 +362,14 @@ def xfs_mount_uuid(mp: gdb.Value) -> uuid.UUID: Return the UUID for an XFS file system in string form Args: - mp gdb.Value(): - The struct xfs_mount for the file system + mp: The ``struct xfs_mount`` for the file system. The value must be of + type ``struct xfs_mount``. Returns: - uuid.UUID: The Python UUID object that describes the xfs UUID + :obj:`uuid.UUID`: The Python UUID object that describes the xfs UUID Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ return decode_uuid_t(mp['m_sb']['sb_uuid']) @@ -364,15 +381,17 @@ def xfs_for_each_ail_entry(ail: gdb.Value) -> Iterable[gdb.Value]: Iterates over the XFS Active Item Log and returns each item Args: - ail (gdb.Value): The XFS AIL to iterate + ail: The XFS AIL to iterate. The value must be of type + ``struct xfs_ail``. Yields: - gdb.Value + :obj:`gdb.Value`: A log item from the AIL. Each value will be of + type ``struct xfs_log_item``. Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ - head = ail[XFS.ail_head_name] + head = ail[_XFS._ail_head_name] for item in list_for_each_entry(head, types.xfs_log_item_type, 'li_ail'): yield item @@ -381,13 +400,15 @@ def xfs_for_each_ail_log_item(mp: gdb.Value) -> Iterable[gdb.Value]: Iterates over the XFS Active Item Log and returns each item Args: - mp (gdb.Value): The XFS mount to iterate + mp: The XFS mount to iterate. The value must be of type `struct + xfs_mount`. Yields: - gdb.Value + :obj:`gdb.Value`: A log item from AIL owned by this mount. + The value will be of type ``struct xfs_log_item``. Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ for item in xfs_for_each_ail_entry(mp['m_ail']): yield item @@ -397,14 +418,16 @@ def item_to_buf_log_item(item: gdb.Value) -> gdb.Value: Converts an xfs_log_item to an xfs_buf_log_item Args: - item (gdb.Value): The log item to convert + item: The log item to convert. The value must be of + type ``struct xfs_log_item``. Returns: - gdb.Value + :obj:`gdb.Value`: The converted log item. The value will be of + type ``struct xfs_buf_log_item``. Raises: - InvalidArgumentError: The type of log item is not XFS_LI_BUF - gdb.NotAvailableError: The target value was not available. + InvalidArgumentError: The type of log item is not ``XFS_LI_BUF`` + :obj:`gdb.NotAvailableError`: The target value was not available. """ if item['li_type'] != XFS_LI_BUF: raise InvalidArgumentError("item is not a buf log item") @@ -415,14 +438,16 @@ def item_to_inode_log_item(item: gdb.Value) -> gdb.Value: Converts an xfs_log_item to an xfs_inode_log_item Args: - item (gdb.Value): The log item to convert + item: The log item to convert. The value must of of type + ``struct xfs_log_item``. Returns: - gdb.Value + :obj:`gdb.Value`: The converted log item. The value will be of + type ``struct xfs_inode_log_item``. Raises: - InvalidArgumentError: The type of log item is not XFS_LI_INODE - gdb.NotAvailableError: The target value was not available. + InvalidArgumentError: The type of log item is not ``XFS_LI_INODE`` + :obj:`gdb.NotAvailableError`: The target value was not available. """ if item['li_type'] != XFS_LI_INODE: raise InvalidArgumentError("item is not an inode log item") @@ -433,14 +458,16 @@ def item_to_efi_log_item(item: gdb.Value) -> gdb.Value: Converts an xfs_log_item to an xfs_efi_log_item Args: - item (gdb.Value): The log item to convert + item: The log item to convert. The value must of of type + ``struct xfs_log_item``. Returns: - gdb.Value + :obj:`gdb.Value`: The converted log item. The value will be of + type ``struct xfs_efi_log_item``. Raises: - InvalidArgumentError: The type of log item is not XFS_LI_EFI - gdb.NotAvailableError: The target value was not available. + InvalidArgumentError: The type of log item is not ``XFS_LI_EFI`` + :obj:`gdb.NotAvailableError`: The target value was not available. """ if item['li_type'] != XFS_LI_EFI: raise InvalidArgumentError("item is not an EFI log item") @@ -451,14 +478,16 @@ def item_to_efd_log_item(item: gdb.Value) -> gdb.Value: Converts an xfs_log_item to an xfs_efd_log_item Args: - item (gdb.Value): The log item to convert + item: The log item to convert. The value must of of type + ``struct xfs_log_item``. Returns: - gdb.Value + :obj:`gdb.Value`: The converted log item. The value will be of + type ``struct xfs_efd_log_item``. Raises: - InvalidArgumentError: The type of log item is not XFS_LI_EFD - gdb.NotAvailableError: The target value was not available. + InvalidArgumentError: The type of log item is not ``XFS_LI_EFD`` + :obj:`gdb.NotAvailableError`: The target value was not available. """ if item['li_type'] != XFS_LI_EFD: raise InvalidArgumentError("item is not an EFD log item") @@ -469,14 +498,16 @@ def item_to_dquot_log_item(item: gdb.Value) -> gdb.Value: Converts an xfs_log_item to an xfs_dquot_log_item Args: - item (gdb.Value): The log item to convert + item: The log item to convert. The value must of of type + ``struct xfs_log_item``. Returns: - gdb.Value + :obj:`gdb.Value`: The converted log item. The value will be of + type ``struct xfs_dquot_log_item``. Raises: - InvalidArgumentError: The type of log item is not XFS_LI_DQUOT - gdb.NotAvailableError: The target value was not available. + InvalidArgumentError: The type of log item is not ``XFS_LI_DQUOT`` + :obj:`gdb.NotAvailableError`: The target value was not available. """ if item['li_type'] != XFS_LI_DQUOT: raise InvalidArgumentError("item is not an DQUOT log item") @@ -487,14 +518,16 @@ def item_to_quotaoff_log_item(item: gdb.Value) -> gdb.Value: Converts an xfs_log_item to an xfs_quotaoff_log_item Args: - item (gdb.Value): The log item to convert + item: The log item to convert. The value must be of type + ``struct xfs_log_item``. Returns: - gdb.Value + :obj:`gdb.Value`: The converted log item. The value will be of + type ``struct xfs_quotaoff_log_item`` Raises: - InvalidArgumentError: The type of log item is not XFS_LI_QUOTAOFF - gdb.NotAvailableError: The target value was not available. + InvalidArgumentError: The type of log item is not ``XFS_LI_QUOTAOFF`` + :obj:`gdb.NotAvailableError`: The target value was not available. """ if item['li_type'] != XFS_LI_QUOTAOFF: raise InvalidArgumentError("item is not an QUOTAOFF log item") @@ -505,21 +538,24 @@ def xfs_log_item_typed(item:gdb.Value) -> gdb.Value: Returns the log item converted from the generic type to the actual type Args: - item (gdb.Value): The struct xfs_log_item to - convert. + item: The ``struct xfs_log_item`` to convert. The value must be + of type ``struct xfs_log_item``. Returns: - Depending on the item type, one of: - gdb.Value - gdb.Value - gdb.Value - gdb.Value - gdb.Value - gdb.Value (for UNLINK item) + :obj:`gdb.Value`: + + Depending on type, the value will be any of the following types: + + - ``struct xfs_buf_log_item_type`` + - ``struct xfs_inode_log_item_type`` + - ``struct xfs_efi_log_item_type`` + - ``struct xfs_efd_log_item_type`` + - ``struct xfs_dq_logitem`` + - ``int`` (for ``XFS_LI_IUNLINK`` item) Raises: RuntimeError: An unexpected item type was encountered - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ li_type = int(item['li_type']) if li_type == XFS_LI_BUF: @@ -542,17 +578,18 @@ def xfs_log_item_typed(item:gdb.Value) -> gdb.Value: def xfs_format_xfsbuf(buf: gdb.Value) -> str: """ - Returns a human-readable format of struct xfs_buf + Returns a human-readable format of ``struct xfs_buf`` Args: - buf (gdb.Value): - The struct xfs_buf to decode + buf: The ``struct xfs_buf`` to decode. The value must be of type + ``struct xfs_buf``. Returns: - str: The human-readable representation of the struct xfs_buf + :obj:`str`: The human-readable representation of the + ``struct xfs_buf``. Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ state = "" bflags = decode_flags(buf['b_flags'], XFS_BUF_FLAGS) @@ -572,20 +609,25 @@ def xfs_for_each_ail_log_item_typed(mp: gdb.Value) -> gdb.Value: to the specific type. Args: - mp (gdb.Value): The XFS mount to iterate + mp: The XFS mount to iterate. The value must be of + type ``struct xfs_mount``. Yields: - Depending on the item type, one of: - gdb.Value - gdb.Value - gdb.Value - gdb.Value - gdb.Value + :obj:`gdb.Value`: + + Depending on type, the value will be any of the following types: + + - ``struct xfs_buf_log_item_type`` + - ``struct xfs_inode_log_item_type`` + - ``struct xfs_efi_log_item_type`` + - ``struct xfs_efd_log_item_type`` + - ``struct xfs_dq_logitem`` + - ``int`` (for UNLINK item) Raises: - gdb.NotAvailableError: The target value was not available. + :obj:`gdb.NotAvailableError`: The target value was not available. """ for item in types.xfs_for_each_ail_log_item(mp): yield types.xfs_log_item_typed(item) -type_cbs = TypeCallbacks([ ('struct xfs_ail', XFS._detect_ail_version) ]) +type_cbs = TypeCallbacks([ ('struct xfs_ail', _XFS._detect_ail_version) ]) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 264c5b9add7..cbe109b33db 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -26,14 +26,14 @@ def for_each_bio_in_stack(bio: gdb.Value) -> Iterable[decoders.Decoder]: processed by each level's decoder. The stack will be interrupted if an encountered object doesn't have a decoder specified. - See crash.subsystem.storage.decoders for more detail. + See :mod:`crash.subsystem.storage.decoders` for more detail. Args: - bio (gdb.Value): The initial struct bio to start - decoding + bio: The initial struct bio to start decoding. The value must be + of type ``struct bio``. Yields: - Decoder + :obj:`.Decoder`: The next :obj:`.Decoder` in the stack, if any remain. """ decoder = decoders.decode_bio(bio) while decoder is not None: @@ -42,63 +42,63 @@ def for_each_bio_in_stack(bio: gdb.Value) -> Iterable[decoders.Decoder]: def dev_to_gendisk(dev: gdb.Value) -> gdb.Value: """ - Converts a struct device that is embedded in a struct gendisk - back to the struct gendisk. + Converts a ``struct device`` that is embedded in a ``struct gendisk`` + back to the ``struct gendisk``. Args: - dev (gdb.Value) : A struct device contained within - a struct gendisk. No checking is performed. Results - if other structures are provided are undefined. + dev: A ``struct device`` contained within a ``struct gendisk``. + The value must be of type ``struct device``. Returns: - gdb.Value : The converted struct hd_struct + :obj:`gdb.Value`: The converted gendisk. The value is of type + ``struct gendisk``. """ return container_of(dev, types.gendisk_type, 'part0.__dev') def dev_to_part(dev: gdb.Value) -> gdb.Value: """ - Converts a struct device that is embedded in a struct hd_struct - back to the struct hd_struct. + Converts a ``struct device`` that is embedded in a ``struct hd_struct`` + back to the ``struct hd_struct``. Args: - dev (gdb.Value): A struct device embedded within a - struct hd_struct. No checking is performed. Results if other - structures are provided are undefined. + dev: A ``struct device`` embedded within a ``struct hd_struct``. The + value must be of type ``struct device``. Returns: - gdb.Value: The converted struct hd_struct + :obj:`gdb.Value`: The converted ``struct hd_struct``. The value is of + type ``struct hd_struct``. """ return container_of(dev, types.hd_struct_type, '__dev') def gendisk_to_dev(gendisk: gdb.Value) -> gdb.Value: """ - Converts a struct gendisk that embeds a struct device to - the struct device. + Converts a ``struct gendisk`` that embeds a ``struct device`` to + the ``struct device``. Args: - dev (gdb.Value): A struct gendisk that embeds - a struct device. No checking is performed. Results - if other structures are provided are undefined. + dev: A ``struct gendisk`` that embeds a ``struct device``. The + value must be of type ``struct device``. Returns: - gdb.Value: The converted struct device + :obj:`gdb.Value`: The converted ``struct device``. The value is + of type ``struct device``. """ return gendisk['part0']['__dev'] def part_to_dev(part: gdb.Value) -> gdb.Value: """ - Converts a struct hd_struct that embeds a struct device to - the struct device. + Converts a ``struct hd_struct`` that embeds a ``struct device`` to + the ``struct device``. Args: - dev (gdb.Value): A struct hd_struct that embeds - a struct device. No checking is performed. Results if - other structures are provided are undefined. + dev: A ``struct hd_struct`` that embeds a ``struct device``. The + value must be of type ``struct device``. Returns: - gdb.Value: The converted struct device + :obj:`gdb.Value`: The converted ``struct device``. The value is + of type ``struct device``. """ return part['__dev'] @@ -116,20 +116,19 @@ def for_each_block_device(subtype: gdb.Value=None) -> Iterable[gdb.Value]: corresponds to a the the type field of the struct device. Args: - subtype (gdb.Value, optional): The struct - device_type that will be used to match and filter. Typically - 'disk_type' or 'part_type' + subtype (optional): The ``struct device_type`` that will be used + to match and filter. Typically the values associated with + the ``disk_type`` or ``part_type`` :obj:`gdb.Symbol`. Yields: - gdb.Value or - gdb.Value: - A struct gendisk or struct hd_struct that meets - the filter criteria. + :obj:`gdb.Value`: The next block device that matches the subtype. + The value is of type ``struct gendisk`` or ``struct hd_struct``. Raises: - RuntimeError: An unknown device type was encountered during iteration. - TypeError: The provided subtype was not of `struct device_type' or - `struct device type *' + :obj:`RuntimeError`: An unknown device type was encountered + during iteration. + :obj:`TypeError`: The provided subtype was not of + ``struct device_type`` or ``struct device type *`` """ if subtype: @@ -153,7 +152,7 @@ def for_each_disk() -> Iterable[gdb.Value]: Iterates over each block device registered with the block class that corresponds to an entire disk. - This is an alias for for_each_block_device(disk_type) + This is an alias for for_each_block_device(``disk_type``) """ return for_each_block_device(symvals.disk_type) @@ -166,16 +165,16 @@ def gendisk_name(gendisk: gdb.Value) -> str: including partition number, if applicable. Args: - gendisk(gdb.Value): - A struct gendisk or struct hd_struct for which to return - the name + gendisk: A ``struct gendisk`` or ``struct hd_struct`` for which to + return the name. The value must be of type ``struct gendisk`` + or ``struct hd_struct``. Returns: - str: the name of the block device + :obj:`str`: The name of the block device Raises: - InvalidArgumentError: gdb.Value does not describe a struct gendisk or - struct hd_struct + :obj:`.InvalidArgumentError`: gendisk does not describe a + ``struct gendisk`` or ``struct hd_struct`` """ if gendisk.type.code == gdb.TYPE_CODE_PTR: gendisk = gendisk.dereference() @@ -198,28 +197,29 @@ def block_device_name(bdev: gdb.Value) -> str: including partition number, if applicable. Args: - bdev(gdb.Value): A struct block_device for - which to return the name + bdev: A ``struct block_device`` for which to return the name. The + value must be of type ``struct block_device``. Returns: - str: the name of the block device + :obj:`str`: The name of the block device """ return gendisk_name(bdev['bd_disk']) def is_bdev_inode(inode: gdb.Value) -> bool: """ - Tests whether the provided struct inode describes a block device + Tests whether the provided ``struct inode`` describes a block device - This method evaluates the inode and returns a True or False, + This method evaluates the inode and returns :obj:`True` or :obj:`False`, depending on whether the inode describes a block device. Args: - bdev(gdb.Value): The struct inode to test whether - it describes a block device. + bdev: The ``struct inode`` to test whether it describes a block device. + The value must be of type ``struct inode``. Returns: - bool: True if the inode describes a block device, False otherwise. + :obj:`bool`: :obj:`True` if the inode describes a block device, + :obj:`False` otherwise. """ return inode['i_sb'] == symvals.blockdev_superblock @@ -231,15 +231,16 @@ def inode_to_block_device(inode: gdb.Value) -> gdb.Value: Otherwise, raise InvalidArgumentError. Args: - inode(gdb.Value): The struct inode for which to - return the associated block device + inode: The ``struct inode`` for which to return the associated + block device. The value must be of type ``struct inode``. Returns: - gdb.Value: The struct block_device associated - with the provided struct inode + :obj:`gdb.Value`: The ``struct block_device`` associated with the + provided ``struct inode``. The value is of type + ``struct block_device``. Raises: - InvalidArgumentError: inode does not describe a block device + :obj:`.InvalidArgumentError`: inode does not describe a block device """ if inode['i_sb'] != symvals.blockdev_superblock: raise InvalidArgumentError("inode does not correspond to block device") @@ -254,12 +255,12 @@ def inode_on_bdev(inode: gdb.Value) -> gdb.Value: with the inode's super block. Args: - inode(gdb.Value): The struct inode for which to - return the associated block device + inode: The ``struct inode`` for which to return the associated + block device. The value must be of type ``struct inode``. Returns: - gdb.Value: The struct block_device associated - with the provided struct inode + :obj:`gdb.Value`: The ``struct block_device`` associated with the + provided ``struct inode``. The value is of type ``struct inode``. """ if is_bdev_inode(inode): return inode_to_block_device(inode) diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index 34e7d14827b..16cab773717 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -16,18 +16,18 @@ class NoQueueError(RuntimeError): def for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: """ - Iterates over each struct request in request_queue + Iterates over each ``struct request`` in request_queue - This method iterates over the request_queue's queuelist and + This method iterates over the ``request_queue``'s queuelist and returns a request for each member. Args: - queue(gdb.Value): The struct request_queue - used to iterate + queue: The ``struct request_queue`` used to iterate. The value + must be of type ``struct request_queue``. Yields: - gdb.Value: Each struct request contained within - the request_queue's queuelist + :obj:`gdb.Value`: Each ``struct request`` contained within the + ``request_queue``'s queuelist. The value is of type ``struct request``. """ if int(queue) == 0: raise NoQueueError("Queue is NULL") @@ -39,15 +39,15 @@ def request_age_ms(request: gdb.Value) -> int: Returns the age of the request in milliseconds This method returns the difference between the current time - (jiffies) and the request's start_time, in milliseconds. + (``jiffies``) and the request's ``start_time``, in milliseconds. Args: - request(gdb.Value): The struct request used - to determine age + request: The ``struct request`` used to determine age. The value + is of type ``struct request``. Returns: - int: Difference between the request's start_time and - current jiffies in milliseconds. + :obj:`int`: Difference between the request's ``start_time`` and + current ``jiffies`` in milliseconds. """ return kernel.jiffies_to_msec(kernel.jiffies - request['start_time']) @@ -55,9 +55,14 @@ def requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: """ Report how many requests are in flight for this queue - This method returns a 2-tuple of ints. The first value - is the number of read requests in flight. The second - value is the number of write requests in flight. + Args: + queue: The request queue to inspect for requests in flight. + The value must be of type ``struct request_queue``. + + Returns: + (:obj:`int`, :obj:`int`): The requests in flight. The first member of + the 2-tuple is the number of read requests, the second is the number + of write requests. """ return (int(queue['in_flight'][0]), int(queue['in_flight'][1])) diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 91f467e728a..8e2ab7aaa8e 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -12,12 +12,12 @@ class Decoder(object): They are relatively lightweight at runtime, meaning that the object is initialized but not decoded until it's needed. The string will - be formatted each time, but each Decoder's interpret() method will - be called once. + be formatted each time, but each :obj:`.Decoder`'s :func:`interpret()` + method will be called once. Attributes: - interpreted (bool): Whether the contents of this Decoder have already - been interpreted + interpreted (:obj:`bool`): Whether the contents of this + :obj:`.Decoder` have already been interpreted """ __endio__: EndIOSpecifier = None @@ -26,7 +26,7 @@ def __init__(self): def interpret(self) -> None: """ - Interpret the Decoder object + Interpret the :obj:`.Decoder` object Rather than populate all the fields when they may not be used, we delay interpreting the object until the fields are needed. @@ -49,7 +49,7 @@ def register(cls): """ Registers a decoder with the storage decoder subsystem. - Each Decoder contains the name of an endio routine. When + Each :obj:`.Decoder` contains the name of an endio routine. When an object that needs decoding is encountered, the endio routine contained in the object is used to look up the decoder for that object. @@ -58,9 +58,9 @@ def register(cls): def __str__(self) -> str: """ - The printable description of this Decoder. Typical Decoders - include the address of the object, the block device it uses, - and the location(s) affected by the object. + The printable description of this :obj:`.Decoder`. Typical + :obj:`.Decoder`s include the address of the object, the block + device it uses, and the location(s) affected by the object. """ pass @@ -72,12 +72,12 @@ def __next__(self): a bio generated by another bio being cloned by device mapper, etc. In these scenarios, the __next__ method can be used to pass the - next Decoder object in the chain. It is not necessary to know + next :obj:`Decoder` object in the chain. It is not necessary to know the source of the object being decoded -- only its type is necessary. - Typical uses will be 'return decode_bh(self.bh)' or - 'return decode_bio(self.next_bio)' + Typical uses will be ``return decode_bh(self.bh)`` or + ``return decode_bio(self.next_bio)`` If there are no objects beyond this one, it does not need to be overridden. @@ -88,49 +88,48 @@ class BadBHDecoder(Decoder): """ Placeholder decoder for bad buffer_head pointers - Rather than raise a NotAvailableError during decoding, we use a - BadBHDecoder decoder to document where in the chain there was an + Rather than raise a :obj:`gdb.NotAvailableError` during decoding, we use a + :obj:`.BadBHDecoder` decoder to annotate where in the chain there was an invalid buffer_head. - """ - description = "{:x} bh: invalid buffer_head" - def __init__(self, bh): - """ - Initialize a Decoder for `struct buffer_head' that describes - a bad pointer + Args: + bh: The ``struct buffer_head`` to be decoded. The value must be of + type ``struct buffer_head``. - Args: - bh (gdb.Value): The buffer_head to decode - """ + Attributes: + bh (:obj:`gdb.Value`): The ``struct buffer head`` that was + referenced from the bio. The value is of type + ``struct buffer_head``. + """ + _description = "{:x} bh: invalid buffer_head" + + def __init__(self, bh: gdb.Value): super().__init__() self.bh = bh def __str__(self): - return self.description.format(int(self.bh)) + return self._description.format(int(self.bh)) class GenericBHDecoder(Decoder): """ - Decodes a bio that references a struct buffer_head + Decodes a bio that references a ``struct buffer_head`` - This method decodes a generic struct buffer_head, when no + This method decodes a generic ``struct buffer_head``, when no implementation-specific decoder is available + Args: + bh: The ``struct buffer_head`` to be decoded. The value must be of + type ``struct buffer_head``. + Attributes: - bh (gdb.Value): - The buffer head that was referenced from the bio + bh (:obj:`gdb.Value`): The ``struct buffer head`` that was + referenced from the bio. The value is of type + ``struct buffer_head``. """ - description = "{:x} buffer_head: for dev {}, block {}, size {} (undecoded)" + _description = "{:x} buffer_head: for dev {}, block {}, size {} (undecoded)" def __init__(self, bh: gdb.Value): - """ - Initalize a Decoder for `struct buffer_head' without any file - system information associated with it - - Args: - bio(gdb.Value): - The struct buffer_head to be decoded. - """ super().__init__() self.bh = bh @@ -138,8 +137,8 @@ def interpret(self): self.block_device = block_device_name(self.bh['b_bdev']) def __str__(self): - return self.description.format(int(self.bh), self.block_device, - self.bh['b_blocknr'], self.bh['b_size']) + return self._description.format(int(self.bh), self.block_device, + self.bh['b_blocknr'], self.bh['b_size']) _decoders: Dict[int, Decoder] = dict() @@ -149,32 +148,32 @@ def register_decoder(endio: EndIOSpecifier, decoder: Decoder) -> None: A decoder is a class that accepts a bio, buffer_head, or other object, potentially interprets the private members of the object, and - returns a Decoder object that describes it. + returns a :obj:`.Decoder` object that describes it. - The only mandatory part of a Decoder is the __str__ method to - print the description. + The only mandatory part of a :obj:`.Decoder` is the :meth:`__str__` + method to format the description. - If the bio is part of a stack, the __next__ method will contain - the next Decoder object in the stack. It does not necessarily need - to be a bio. The Decoder does not need to be registered unless it + If the bio is part of a stack, the :meth:`__next__` method will contain + the next :obj:`.Decoder` object in the stack. It does not necessarily need + to be a bio. The :obj:`.Decoder` does not need to be registered unless it will be a top-level decoder. Other attributes can be added as-needed to allow informed callers to obtain direct information. Args: - endio (str, list of str, gdb.Symbol, gdb.Value, or int): The function - used as an endio callback. + endio: The function(s) used as endio callback(s). - The str or list of str arguments are used to register a callback - such that the Decoder is registered when the symbol is available. + The :obj:`str` or :obj:`list` of :obj:`str` arguments are used + to register a callback such that the :obj:`.Decoder` is + registered when the symbol is available. - The gdb.Symbol, gdb.Value, and int versions are to be used - once the symbol is available for resolution. + The :obj:`gdb.Symbol`, :obj:`gdb.Value`, and :obj:`int` versions + are to be used once the symbol is available for resolution. - If in doubt, use the names instead of the symbols objects. + If in doubt, use the names instead of the :obj:`gdb.Symbol` objects. - decoder (Decoder): The decoder class used to handle this object. + decoder: The decoder class used to handle this object. """ debug = False @@ -205,42 +204,44 @@ class BadBioDecoder(Decoder): """ Placeholder decoder for bad bio pointers - Rather than raise a NotAvailableError during decoding, we use a - BadBioDecoder decoder to document where in the chain there was an + Rather than raise a :obj:`.NotAvailableError` during decoding, we use a + :obj:`.BadBioDecoder` decoder to annotate where in the chain there was an invalid bio. - """ - description = "{:x} bio: invalid bio" - def __init__(self, bio): - """ - Initialize a Decoder for `struct bio' that describes a bad pointer + Args: + bio: The bio to decode. The value must be of type ``struct bio``. - Args: - bio (gdb.Value): The bio to decode - """ + Attributes: + bio (:obj:`gdb.Value`): The bio being decoded. The value is of + type ``struct bio``. + """ + _description = "{:x} bio: invalid bio" + + def __init__(self, bio: gdb.Value): super().__init__() self.bio = bio def __str__(self): - return self.description.format(int(self.bio)) + return self._description.format(int(self.bio)) class GenericBioDecoder(Decoder): """ Placeholder decoder for when we have a valid bio but nothing to decode it - """ - description = "{:x} bio: undecoded bio on {} ({})" - def __init__(self, bio): - """ - Initialize a Decoder for `struct bio' for a bio with no other decoder - Args: - bio (gdb.Value): The bio to decode - """ + Args: + bio: The bio to decode. The value must be of type ``struct bio``. + + Attributes: + bio (:obj:`gdb.Value`): The bio being decoded. The value is of type + ``struct bio``. + """ + _description = "{:x} bio: undecoded bio on {} ({})" + def __init__(self, bio: gdb.Value): super().__init__() self.bio = bio def __str__(self): - return self.description.format(int(self.bio), + return self._description.format(int(self.bio), block_device_name(self.bio['bi_bdev']), bio['bi_end_io']) @@ -248,18 +249,18 @@ def decode_bio(bio: gdb.Value) -> Decoder: """ Decodes a single bio, if possible - This method will return a Decoder object describing a single bio + This method will return a :obj:`.Decoder` object describing a single bio after decoding it using a registered decoder, if available. - If no decoder is registered, a generic description will be used. + If no decoder is registered, a generic decoder will be used. + + If an invalid object is encountered, a handler decoder will be used. Args: - bio (gdb.Value): The bio to decode + bio: The bio to decode. The value must be of type ``struct bio``. Returns: - BadBioDecoder: The bio was not valid - GenericBioDecoder: The bio has no other decoder - Decoder-derived object: The decoder appropriate for this bio type + :obj:`.Decoder`: The decoder appropriate for this bio type. """ try: @@ -273,19 +274,20 @@ def decode_bh(bh: gdb.Value) -> Decoder: """ Decodes a single buffer_head, if possible - This method will return a Decoder object describing a single buffer_head - after decoding it using a registered decoder, if available. + This method will return a :obj:`.Decoder` object describing a single + ``struct buffer_head`` after decoding it using a registered decoder, + if available. + + If no decoder is registered, a generic decoder will be used. - If no decoder is registered, a generic description will be used. + If an invalid object is encountered, a handler decoder will be used. Args: - bh (gdb.Value): The buffer_head to decode + bh: The buffer_head to decode. The value must be of type + ``struct buffer_head``. Returns: - BadBHecoder: The bio was not valid - GenericBHDecoder: The bio has no other decoder - Decoder-derived object: - The decoder appropriate for this buffer_head type + :obj:`.Decoder`: The decoder appropriate for this buffer_head type """ try: return _decoders[int(bh['b_end_io'])](bh) diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index 57238f0ac0c..7a2af9634f6 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -14,44 +14,44 @@ class ClonedBioReqDecoder(Decoder): This decodes a cloned bio generated by request-based device mapper targets. Args: - bio(gdb.Value): A struct bio generated by a - request-based device mapper target + bio: A ``struct bio`` generated by a request-based device mapper + target. The value must be of type ``struct bio``. """ - types = Types([ 'struct dm_rq_clone_bio_info *' ]) + _types = Types([ 'struct dm_rq_clone_bio_info *' ]) __endio__ = 'end_clone_bio' - description = '{:x} bio: Request-based Device Mapper on {}' + _description = '{:x} bio: Request-based Device Mapper on {}' _get_clone_bio_rq_info = None - def __init__(self, bio): + def __init__(self, bio: gdb.Value): super().__init__() self.bio = bio - if cls._get_clone_bio_rq_info is None: - if 'clone' in cls.types.dm_rq_clone_bio_info_p_type.target(): - getter = cls._get_clone_bio_rq_info_3_7 + if self._get_clone_bio_rq_info is None: + if 'clone' in self._types.dm_rq_clone_bio_info_p_type.target(): + getter = self._get_clone_bio_rq_info_3_7 else: - getter = cls._get_clone_bio_rq_info_old - cls._get_clone_bio_rq_info = getter + getter = self._get_clone_bio_rq_info_old + self._get_clone_bio_rq_info = getter def interpret(self): + """Interprets the request-based device mapper bio to populate its + attributes""" self.info = cls._get_clone_bio_rq_info(bio) self.tio = self.info['tio'] def __str__(self): - self.description.format(int(self.bio), + self._description.format(int(self.bio), block_device_name(self.bio['bi_bdev'])) def __next__(self): return decode_bio(self.info['orig']) - @classmethod - def _get_clone_bio_rq_info_old(cls, bio): - return bio['bi_private'].cast(cls.types.dm_rq_clone_bio_info_p_type) + def _get_clone_bio_rq_info_old(self, bio): + return bio['bi_private'].cast(self._types.dm_rq_clone_bio_info_p_type) - @classmethod - def _get_clone_bio_rq_info_3_7(cls, bio): - return container_of(bio, cls.types.dm_rq_clone_bio_info_p_type, 'clone') + def _get_clone_bio_rq_info_3_7(self, bio): + return container_of(bio, self._types.dm_rq_clone_bio_info_p_type, 'clone') ClonedBioReqDecoder.register() @@ -59,41 +59,47 @@ class ClonedBioDecoder(Decoder): """ Decodes a bio-based device mapper cloned bio - This method decodes a cloned bio generated by request-based + This method decodes cloned bio generated by request-based device mapper targets. + Args: + bio: A ``struct bio`` generated by a bio-based device mapper target. + The value must be of type ``struct bio``. + Attributes: - bio (gdb.Value): - A struct bio generated by a bio-based device mapper target + bio (:obj:`gdb.Value`): A ``struct bio`` generated by a bio-based + device mapper target. The value is of type ``struct bio``. - next_bio (gdb.Value): - The struct bio that generated this one. + next_bio (:obj:`gdb.Value`): The struct bio that generated this one. + The value is of type ``struct bio``. - tio (gdb.Value): - The struct dm_target_tio for this bio + tio (:obj:`gdb.Value`): The dm target i/o operation for this bio. The + value is of type ``struct dm_target_io``. """ - types = Types([ 'struct dm_target_io *' ]) + _types = Types([ 'struct dm_target_io *' ]) _get_clone_bio_tio = None __endio__ = 'clone_endio' - description = "{:x} bio: device mapper clone: {}[{}] -> {}[{}]" + _description = "{:x} bio: device mapper clone: {}[{}] -> {}[{}]" - def __init__(self, bio): + def __init__(self, bio: gdb.Value): super().__init__() self.bio = bio - if _get_clone_bio_tio is None: - if 'clone' in cls.types.dm_target_io_p_type.target(): - getter = cls._get_clone_bio_tio_3_15 + if self._get_clone_bio_tio is None: + if 'clone' in self._types.dm_target_io_p_type.target(): + getter = self._get_clone_bio_tio_3_15 else: - getter = cls._get_clone_bio_tio_old - cls._get_clone_bio_tio = getter + getter = self._get_clone_bio_tio_old + self._get_clone_bio_tio = getter def interpret(self): - self.tio = cls._get_clone_bio_tio(bio) + """Interprets the cloned device mapper bio to populate its + attributes""" + self.tio = self._get_clone_bio_tio(bio) self.next_bio = tio['io']['bio'] def __str__(self): - return self.description.format( + return self._description.format( int(self.bio), block_device_name(self.bio['bi_bdev']), int(bself.io['bi_sector']), @@ -103,13 +109,11 @@ def __str__(self): def __next__(self): return decode_bio(self.next_bio) - @classmethod - def _get_clone_bio_tio_old(cls, bio): - return bio['bi_private'].cast(cls.types.dm_target_io_p_type) + def _get_clone_bio_tio_old(self, bio): + return bio['bi_private'].cast(self._types.dm_target_io_p_type) - @classmethod - def _get_clone_bio_tio_3_15(cls, bio): + def _get_clone_bio_tio_3_15(self, bio): return container_of(bio['bi_private'], - cls.types.dm_clone_bio_info_p_type, 'clone') + self._types.dm_clone_bio_info_p_type, 'clone') ClonedBioDecoder.register() diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index 97611695cbf..1c9a5dc1722 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -1,5 +1,13 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +The crash.types.bitmap module provides helpers for iterating and scanning +in-memory bitmaps. + +.. _bitmap_note: + +A bitmap is represented as either an array of ``unsigned long`` or as ``unsigned long *``. Each routine below that accepts a gdb.Value requires that it be of either type. +""" from typing import Iterable @@ -27,13 +35,16 @@ def for_each_set_bit(bitmap: gdb.Value, Yield each set bit in a bitmap Args: - bitmap (gdb.Value: - The bitmap to iterate - size_in_bytes (int): The size of the bitmap if the type is - unsigned long *. + bitmap: The :ref:`bitmap ` to iterate. + size_in_bytes: The size of the bitmap if the type is + ``unsigned long *``. Yields: - int: The position of a bit that is set + :obj:`int`: The position of a bit that is set + + Raises: + :obj:`.InvalidArgumentError`: The :obj:`gdb.Value` is not of + type ``unsigned long[]`` or ``unsigned long *``. """ _check_bitmap_type(bitmap) @@ -95,20 +106,24 @@ def _find_first_set_bit(val: gdb.Value) -> int: def find_next_zero_bit(bitmap: gdb.Value, start: int, size_in_bytes: int=None) -> int: """ - Return the next unset bit in the bitmap starting at position `start', + Return the next unset bit in the bitmap starting at position start, inclusive. Args: - bitmap (gdb.Value: - The bitmap to test - start (int): The bit number to use as a starting position. If + bitmap: The :ref:`bitmap ` to scan. + start: The bit number to use as a starting position. If the bit at this position is unset, it will be the first bit number yielded. - size_in_bytes (int): The size of the bitmap if the type is - unsigned long *. + size_in_bytes: The size of the bitmap if the type is + ``unsigned long *``. Returns: - int: The position of the first bit that is unset or 0 if all are set + :obj:`int`: The position of the first bit that is unset or + ``0`` if all are set + + Raises: + :obj:`.InvalidArgumentError`: The :obj:`gdb.Value` is not of + type ``unsigned long[]`` or ``unsigned long *``. """ _check_bitmap_type(bitmap) @@ -147,34 +162,41 @@ def find_first_zero_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: Return the first unset bit in the bitmap Args: - bitmap (gdb.Value: - The bitmap to scan - start (int): The bit number to use as a starting position. If + bitmap: The :ref:`bitmap ` to scan. + start: The bit number to use as a starting position. If the bit at this position is unset, it will be the first bit number yielded. Returns: - int: The position of the first bit that is unset + :obj:`int`: The position of the first bit that is unset + + Raises: + :obj:`.InvalidArgumentError`: The :obj:`gdb.Value` is not of + type ``unsigned long[]`` or ``unsigned long *``. """ return find_next_zero_bit(bitmap, 0, size_in_bytes) def find_next_set_bit(bitmap: gdb.Value, start: int, size_in_bytes: int=None) -> int: """ - Return the next set bit in the bitmap starting at position `start', + Return the next set bit in the bitmap starting at position start, inclusive. Args: - bitmap (gdb.Value: - The bitmap to scan - start (int): The bit number to use as a starting position. If + bitmap: The :ref:`bitmap ` to scan. + start: The bit number to use as a starting position. If the bit at this position is unset, it will be the first bit number yielded. - size_in_bytes (int): The size of the bitmap if the type is - unsigned long *. + size_in_bytes: The size of the bitmap if the type is + ``unsigned long *``. Returns: - int: The position of the next bit that is set, or 0 if all are unset + :obj:`int`: The position of the next bit that is set, or + ``0`` if all are unset + + Raises: + :obj:`.InvalidArgumentError`: The :obj:`gdb.Value` is not of + type ``unsigned long[]`` or ``unsigned long *``. """ _check_bitmap_type(bitmap) @@ -213,13 +235,17 @@ def find_first_set_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: Return the first set bit in the bitmap Args: - bitmap (gdb.Value: - The bitmap to scan - size_in_bytes (int): The size of the bitmap if the type is - unsigned long *. + bitmap: The :ref:`bitmap ` to scan. + size_in_bytes: The size of the bitmap if the type is + ``unsigned long *``. Returns: - int: The position of the first bit that is set, or 0 if all are unset + :obj:`int`: The position of the first bit that is set, or + ``0`` if all are unset + + Raises: + :obj:`.InvalidArgumentError`: The :obj:`gdb.Value` is not of + type ``unsigned long[]`` or ``unsigned long *``. """ return find_next_set_bit(bitmap, 0, size_in_bytes) @@ -260,11 +286,17 @@ def find_last_set_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: Return the last set bit in the bitmap Args: - bitmap (gdb.Value: - The bitmap to scan + bitmap: The :ref:`bitmap ` to scan. + size_in_bytes: The size of the bitmap if the type is + ``unsigned long *``. Returns: - int: The position of the last bit that is set, or 0 if all are unset + :obj:`int`: The position of the last bit that is set, or + ``0`` if all are unset + + Raises: + :obj:`.InvalidArgumentError`: The :obj:`gdb.Value` is not + of type ``unsigned long[]`` or ``unsigned long *``. """ _check_bitmap_type(bitmap) diff --git a/crash/types/classdev.py b/crash/types/classdev.py index d1f1e8a65ab..5bd5fe531da 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -1,5 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +The crash.types.classdev module offers helpers to work with class devices. +""" from typing import Iterable @@ -12,13 +15,13 @@ types = Types(['struct device', 'struct device_private']) class ClassdevState(object): - class_is_private = True + _class_is_private = True #v5.1-rc1 moved knode_class from struct device to struct device_private @classmethod def _setup_iterator_type(cls, gdbtype): if struct_has_member(gdbtype, 'knode_class'): - cls.class_is_private = False + cls._class_is_private = False type_cbs = TypeCallbacks([ ('struct device', @@ -26,15 +29,29 @@ def _setup_iterator_type(cls, gdbtype): def for_each_class_device(class_struct: gdb.Value, subtype: gdb.Value=None) -> Iterable[gdb.Value]: + """ + Iterate over the list of class devices + + Args: + class_struct: The class of devices to iterate + subtype: A ``struct device_type *`` to use to filter the results. + The value must be of type ``struct device_type *`` and will + be used to compare against the ``type`` field of each + ``struct device``. + + Yields: + :obj:`gdb.Value`: A device on the class's device list. The value is + of type ``struct device``. + """ klist = class_struct['p']['klist_devices'] container_type = types.device_type - if ClassdevState.class_is_private: + if ClassdevState._class_is_private: container_type = types.device_private_type for knode in klist_for_each(klist): dev = container_of(knode, container_type, 'knode_class') - if ClassdevState.class_is_private: + if ClassdevState._class_is_private: dev = dev['device'].dereference() if subtype is None or int(subtype) == int(dev['type']): diff --git a/crash/types/cpu.py b/crash/types/cpu.py index 841619c748b..d964245d94d 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -1,5 +1,8 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +The crash.types.cpu module offers helpers to work with the state of CPUs. +""" from typing import Iterable, List @@ -13,23 +16,32 @@ # this wraps no particular type, rather it's a placeholder for # functions to iterate over online cpu's etc. class TypesCPUClass(object): + """A state holder class for handling CPUs. Not meant to be instantiated. + + Attributes: + cpus_online (:obj:`list` of :obj:`int`): A list of the IDs of all online CPUs. + cpus_possible (:obj:`list` of :obj:`int`): A list of the IDs of all possible CPUs. + """ cpus_online: List[int] = list() cpus_possible: List[int] = list() - cpu_online_mask: gdb.Value = None - cpu_possible_mask: gdb.Value = None + _cpu_online_mask: gdb.Value = None + _cpu_possible_mask: gdb.Value = None + + def __init__(self): + raise NotImplementedError("This class is not meant to be instantiated") @classmethod def _setup_online_mask(cls, symbol: gdb.Symbol) -> None: - cls.cpu_online_mask = symbol.value() - bits = cls.cpu_online_mask["bits"] + cls._cpu_online_mask = symbol.value() + bits = cls._cpu_online_mask["bits"] cls.cpus_online = list(for_each_set_bit(bits)) @classmethod def _setup_possible_mask(cls, cpu_mask: gdb.Symbol) -> None: - cls.cpu_possible_mask = cpu_mask.value() - bits = cls.cpu_possible_mask["bits"] + cls._cpu_possible_mask = cpu_mask.value() + bits = cls._cpu_possible_mask["bits"] cls.cpus_possible = list(for_each_set_bit(bits)) def for_each_online_cpu() -> Iterable[int]: @@ -37,7 +49,7 @@ def for_each_online_cpu() -> Iterable[int]: Yield CPU numbers of all online CPUs Yields: - int: Number of a possible CPU location + :obj:`int`: Number of an online CPU location """ for cpu in TypesCPUClass.cpus_online: yield cpu @@ -47,7 +59,7 @@ def highest_online_cpu_nr() -> int: Return The highest online CPU number Returns: - int: The highest online CPU number + :obj:`int`: The highest online CPU number """ if not TypesCPUClass.cpus_online: raise DelayedAttributeError('cpus_online') @@ -58,7 +70,7 @@ def for_each_possible_cpu() -> Iterable[int]: Yield CPU numbers of all possible CPUs Yields: - int: Number of a possible CPU location + :obj:`int`: Number of a possible CPU location """ for cpu in TypesCPUClass.cpus_possible: yield cpu @@ -68,7 +80,7 @@ def highest_possible_cpu_nr() -> int: Return The highest possible CPU number Returns: - int: The highest possible CPU number + :obj:`int`: The highest possible CPU number """ if not TypesCPUClass.cpus_possible: raise DelayedAttributeError('cpus_possible') diff --git a/crash/types/klist.py b/crash/types/klist.py index 9fc5fac480e..fc18915e082 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -20,11 +20,12 @@ def klist_for_each(klist: gdb.Value) -> Iterable[gdb.Value]: Iterate over a klist and yield each node Args: - klist (gdb.Value): - The list to iterate + klist: The list to iterate. The value must be of type + ``struct klist`` or ``struct klist *``. Yields: - gdb.Value: The next node in the list + :obj:`gdb.Value`: The next node in the list. The value is of type + ``struct klist_node``. """ if klist.type == types.klist_type.pointer(): klist = klist.dereference() @@ -46,14 +47,15 @@ def klist_for_each_entry(klist: gdb.Value, gdbtype: gdb.Type, Iterate over a klist and yield each node's containing object Args: - klist (gdb.Value): - The list to iterate - gdbtype (gdb.Type): The type of the containing object - member (str): The name of the member in the containing object that + klist: The list to iterate. The value must be of type + ``struct klist`` or ``struct klist *``. + gdbtype: The type of the containing object + member: The name of the member in the containing object that corresponds to the klist_node Yields: - gdb.Value: The next node in the list + :obj:`gdb.Value`: The next node in the list. The value is of the + specified type. """ for node in klist_for_each(klist): if node.type is not types.klist_node_type: diff --git a/crash/types/list.py b/crash/types/list.py index 4660ca2b58c..084311115dc 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -26,25 +26,25 @@ def list_for_each(list_head: gdb.Value, include_head: bool=False, Iterate over a list and yield each node Args: - list_head (gdb.Value): - The list to iterate - include_head (bool, optional, default=False): - Include the head of the list in iteration - useful - for lists with no anchors - reverse (bool, optional, default=False): - Iterate the list in reverse order (follow the prev links) - print_broken_links (bool, optional, default=True): - Print warnings about broken links - exact_cycles (bool, optional, default=False): - Detect and raise an exception if a cycle is detected in the list + list_head: The list to iterate. The value must be of type + ``struct list_head`` or ``struct list_head *``. + include_head (optional): Include the head of the list in + iteration - useful for lists with no anchors + reverse (optional): Iterate the list in reverse order + (follow the ``prev`` links) + print_broken_links (optional): Print warnings about broken links + exact_cycles (optional): Detect and raise an exception if + a cycle is detected in the list Yields: - gdb.Value: The next node in the list + gdb.Value: The next node in the list. The value is + of type ``struct list_head``. Raises: - CorruptListError: the list is corrupted - ListCycleError: the list contains cycles - BufferError: portions of the list cannot be read + :obj:`.CorruptListError`: the list is corrupted + :obj:`.ListCycleError`: the list contains cycles + :obj:`BufferError`: portions of the list cannot be read + :obj:`gdb.NotAvailableError`: The target value is not available. """ pending_exception = None if not isinstance(list_head, gdb.Value): @@ -140,23 +140,29 @@ def list_for_each_entry(list_head: gdb.Value, gdbtype: gdb.Type, Iterate over a list and yield each node's containing object Args: - list_head (gdb.Value): - The list to iterate - gdbtype (gdb.Type): The type of the containing object - member (str): The name of the member in the containing object that + list_head: The list to iterate. The value must be of type + ``struct list_head`` or ``struct list_head *``. + gdbtype: The type of the containing object + member: The name of the member in the containing object that corresponds to the list_head - include_head (bool, optional, default=False): + include_head (optional): Include the head of the list in iteration - useful for lists with no anchors - reverse (bool, optional, default=False): + reverse (optional): Iterate the list in reverse order (follow the prev links) - print_broken_links (bool, optional, default=True): + print_broken_links (optional): Print warnings about broken links - exact_cycles (bool, optional, default=False): + exact_cycles (optional): Detect and raise an exception if a cycle is detected in the list Yields: - gdb.Value: The next node in the list + gdb.Value: The next node in the list. The value is of the + specified type. + Raises: + :obj:`.CorruptListError`: the list is corrupted + :obj:`.ListCycleError`: the list contains cycles + :obj:`BufferError`: portions of the list cannot be read + :obj:`gdb.NotAvailableError`: The target value is not available. """ for node in list_for_each(list_head, include_head=include_head, @@ -165,7 +171,20 @@ def list_for_each_entry(list_head: gdb.Value, gdbtype: gdb.Type, exact_cycles=exact_cycles): yield container_of(node, gdbtype, member) -def list_empty(list_head): +def list_empty(list_head: gdb.Value) -> bool: + """ + Test whether a list is empty + + Args: + list_head: The list to test. The value must be of type + ``struct list_head`` or ``struct list_head *``. + + Returns: + :obj:`bool`: Whether the list is empty. + + Raises: + :obj:`gdb.NotAvailableError`: The target value is not available. + """ addr = int(list_head.address) if list_head.type.code == gdb.TYPE_CODE_PTR: addr = int(list_head) diff --git a/crash/types/module.py b/crash/types/module.py index d787f9f538f..2e8945b0845 100644 --- a/crash/types/module.py +++ b/crash/types/module.py @@ -15,7 +15,8 @@ def for_each_module() -> Iterable[gdb.Value]: Iterate over each module in the modules list Yields: - gdb.Value(): The next module on the list + :obj:`gdb.Value`: The next module on the list. The value is of + type ``struct module``. """ for module in list_for_each_entry(symvals.modules, types.module_type, @@ -26,15 +27,20 @@ def for_each_module_section(module: gdb.Value) -> Iterable[Tuple[str, int]]: """ Iterate over each ELF section in a loaded module - This routine iterates over the 'sect_attrs' member of the 'struct module' - already in memory. For ELF sections from the module at rest, use - pyelftools on the module file. + This routine iterates over the ``sect_attrs`` member of the + ``struct module`` already in memory. For ELF sections from the + module at rest, use pyelftools on the module file. Args: - module (gdb.Value): The struct module to iterate + module: The struct module to iterate. The value must be of type + ``struct module``. Yields: - (str, int): A 2-tuple containing the name and address of the section + (:obj:`str`, :obj:`int`): A 2-tuple containing the name and address + of the section + + Raises: + :obj:`gdb.NotAvailableError`: The target value is not available. """ attrs = module['sect_attrs'] diff --git a/crash/types/node.py b/crash/types/node.py index 2a452106db6..e3d62e91bcc 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -1,5 +1,8 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +The crash.types.node module offers helpers to work with NUMA nodes. +""" from typing import Iterable, List, Type, TypeVar @@ -20,9 +23,9 @@ def numa_node_id(cpu: int) -> int: Return the NUMA node ID for a given CPU Args: - cpu (int): The CPU number to obtain the NUMA node ID + cpu: The CPU number to obtain the NUMA node ID Returns: - int: The NUMA node ID for the specified CPU. + :obj:`int`: The NUMA node ID for the specified CPU. """ if gdb.current_target().arch.name() == "powerpc:common64": return int(symvals.numa_cpu_lookup_table[cpu]) @@ -41,10 +44,11 @@ def from_nid(cls: Type[NodeType], nid: int) -> NodeType: Obtain a Node using the NUMA Node ID (nid) Args: - nid (int): The NUMA Node ID + nid: The NUMA Node ID Returns: - Node: the Node wrapper for the struct node for this NID + :obj:`~crash.types.Node`: the Node wrapper for the struct + node for this NID """ return cls(symvals.node_data[nid].dereference()) @@ -53,7 +57,7 @@ def for_each_zone(self) -> Iterable[crash.types.zone.Zone]: Iterate over each zone contained in this NUMA node Yields: - Zone: The next Zone in this Node + :obj:`~crash.types.Zone`: The next Zone in this Node """ node_zones = self.gdb_obj["node_zones"] @@ -73,12 +77,20 @@ def __init__(self, obj: gdb.Value): Initialize a Node using the gdb.Value for the struct node Args: - obj: gdb.Value: - The node for which to construct a wrapper + obj: The node for which to construct a wrapper. The value must be + of type ``struct node``. """ self.gdb_obj = obj class NodeStates(object): + """ + A state holder for Node states. + + Attributes: + nids_online (:obj:`list` of :obj:`int`): A list of the online node IDs. + nids_possible (:obj:`list` of :obj:`int`): A list of the possible + node IDs. + """ nids_online: List[int] = list() nids_possible: List[int] = list() @@ -102,7 +114,7 @@ def for_each_nid(self) -> Iterable[int]: Iterate over each NUMA Node ID Yields: - int: The next NUMA Node ID + :obj:`int`: The next NUMA Node ID """ if not self.nids_possible: raise DelayedAttributeError('node_states') @@ -115,7 +127,7 @@ def for_each_online_nid(self) -> Iterable[int]: Iterate over each online NUMA Node ID Yields: - int: The next NUMA Node ID + :obj:`int`: The next NUMA Node ID """ if not self.nids_online: raise DelayedAttributeError('node_states') @@ -132,7 +144,7 @@ def for_each_nid(): Iterate over each NUMA Node ID Yields: - int: The next NUMA Node ID + :obj:`int`: The next NUMA Node ID """ for nid in _state.for_each_nid(): yield nid @@ -142,7 +154,7 @@ def for_each_online_nid(): Iterate over each online NUMA Node ID Yields: - int: The next NUMA Node ID + :obj:`int`: The next NUMA Node ID """ for nid in _state.for_each_online_nid(): yield nid @@ -152,7 +164,7 @@ def for_each_node() -> Iterable[Node]: Iterate over each NUMA Node Yields: - int: The next NUMA Node + :obj:`int`: The next NUMA Node """ for nid in for_each_nid(): yield Node.from_nid(nid) @@ -162,7 +174,7 @@ def for_each_online_node() -> Iterable[Node]: Iterate over each Online NUMA Node Yields: - int: The next NUMA Node + :obj:`int`: The next NUMA Node """ for nid in for_each_online_nid(): yield Node.from_nid(nid) diff --git a/doc-source/conf.py b/doc-source/conf.py new file mode 100644 index 00000000000..9febb4f0ac9 --- /dev/null +++ b/doc-source/conf.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# crash-python documentation build configuration file, created by +# sphinx-quickstart on Tue May 28 12:52:41 2019. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +sys.path.insert(0, os.path.abspath('./mock')) + + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.intersphinx', + 'sphinx.ext.viewcode', + 'sphinx.ext.napoleon'] + +intersphinx_mapping = { 'gdb' : + ("https://sourceware.org/gdb/onlinedocs/gdb/", "gdb.inv") } + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'crash-python' +copyright = '2019, Jeff Mahoney' +author = 'Jeff Mahoney' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '1.0' +# The full version, including alpha/beta/rc tags. +release = '1.0' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +#html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# This is required for the alabaster theme +# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars +html_sidebars = { + '**': [ + 'relations.html', # needs 'show_related': True theme option to display + 'searchbox.html', + ] +} + +html_theme_options = { + 'description': 'Kernel debugger in Python', + 'logo': 'logo.png', + 'logo_name': True, + 'logo_text_align': 'center', + 'github_user': 'jeffmahoney', + 'github_repo': 'crash-python', + 'github_button': True, + 'github_type': 'star', +} + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'crash-pythondoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'crash-python.tex', 'crash-python Documentation', + 'Jeff Mahoney', 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'crash-python', 'crash-python Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'crash-python', 'crash-python Documentation', + author, 'crash-python', 'One line description of project.', + 'Miscellaneous'), +] + + + diff --git a/doc-source/index.rst b/doc-source/index.rst new file mode 100644 index 00000000000..878f22ed414 --- /dev/null +++ b/doc-source/index.rst @@ -0,0 +1,39 @@ +crash-python +============ + +.. include:: ../README.rst + :start-after: start-introduction + :end-before: end-introduction + +See the :doc:`installation` instructions. + +Quick Start +----------- + +.. include:: ../README.rst + :start-after: start-quick-start + :end-before: end-quick-start + +License +------- + +.. include:: ../README.rst + :start-after: start-license + :end-before: end-license + +Table of Contents +----------------- + +.. toctree:: + :maxdepth: 2 + + installation + user_guide + modules + +Indices and tables +------------------ + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/doc-source/installation.rst b/doc-source/installation.rst new file mode 100644 index 00000000000..a969f097df4 --- /dev/null +++ b/doc-source/installation.rst @@ -0,0 +1,6 @@ +Installation +============ + +.. include:: ../README.rst + :start-after: start-installation + :end-before: end-installation diff --git a/doc-source/make-gdb-refs.py b/doc-source/make-gdb-refs.py new file mode 100644 index 00000000000..1a815f92c06 --- /dev/null +++ b/doc-source/make-gdb-refs.py @@ -0,0 +1,64 @@ +#!/usr/bin/python3 + +# This creates a mock objects.inv file to reference external documentation + +from sphinx.ext import intersphinx +from sphinx.util.inventory import InventoryFile + +class config(object): + def __init__(self, project, version): + self.project = project + self.version = version + +# type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]] + +# modules: +# modname, modname, 'module', "info[0]", 'module-' + modname, 0 +# objects: +# refname, refname, type, docname, refname, 1 +class MockDomain(object): + def __init__(self, name): + self.name = name + self.objects = dict() + + def add_class_ref(self, name, doc): + self.objects[name] = doc + + def get_objects(self): + for name, doc in self.objects.items(): + yield (name, name, 'class', doc, name, 1) + +class MockEnvironment(object): + def __init__(self): + self.domains = dict() + self.config = config('gdb', '8.3') + + def add_domain(self, domain): + self.domains[domain.name] = domain + +class MockBuilder(object): + + def get_target_uri(self, docname): + return docname + +env = MockEnvironment() +builder = MockBuilder() + +classes = MockDomain('py') + +classes.add_class_ref('gdb.Type', 'Types-In-Python.html') +classes.add_class_ref('gdb.Symbol', 'Symbols-In-Python.html') +classes.add_class_ref('gdb.Command', 'Commands-In-Python.html') +classes.add_class_ref('gdb.Inferior', 'Inferiors-In-Python.html') +classes.add_class_ref('gdb.Objfile', 'Objfiles-In-Python.html') +classes.add_class_ref('gdb.Value', 'Values-From-Inferior.html') +classes.add_class_ref('gdb.InferiorThread', 'Threads-In-Python.html') +classes.add_class_ref('gdb.Frame', 'Frames-In-Python.html') +classes.add_class_ref('gdb.NotAvailableErorr', 'Exception-Handling.html') +classes.add_class_ref('gdb.MemoryError', 'Exception-Handling.html') +classes.add_class_ref('gdb.error', 'Exception-Handling.html') +classes.add_class_ref('gdb.GdbError', 'Exception-Handling.html') + +env.add_domain(classes) + +InventoryFile.dump("gdb.inv", env, builder) diff --git a/doc-source/mock/README b/doc-source/mock/README new file mode 100644 index 00000000000..d8832fe5048 --- /dev/null +++ b/doc-source/mock/README @@ -0,0 +1,3 @@ +This directory contains a mockup of the gdb and addrxlat modules. + +Otherwise, sphinx fails to import the modules and can't build the docs. diff --git a/doc-source/mock/addrxlat/__init__.py b/doc-source/mock/addrxlat/__init__.py new file mode 100644 index 00000000000..d4464cde36c --- /dev/null +++ b/doc-source/mock/addrxlat/__init__.py @@ -0,0 +1,15 @@ +class Context(object): + pass + +class System(object): + def os_init(x, y, z): + pass + + def get_map(x): + pass + +def CAPS(x): + pass + +KVADDR = 0 +SYS_MAP_MACHPHYS_KPHYS = 0 diff --git a/doc-source/mock/gdb/__init__.py b/doc-source/mock/gdb/__init__.py new file mode 100644 index 00000000000..9c110d7881e --- /dev/null +++ b/doc-source/mock/gdb/__init__.py @@ -0,0 +1,69 @@ + +class Target(object): + class kdump(object): + pass + def get_addrxlat_ctx(): + pass + class get_addrxlat_sys(): + def get_map(self, x): + return [] + +class Register(object): + pass + +class Type(object): + def __init__(self, x): + pass + + @staticmethod + def pointer(): + pass + +class MinimalSymbol(object): + section = None + +class Symbol(object): + section = None + +class Inferior(object): + new_thread = None + +class InferiorThread(object): + pass + +class MinSymbol(object): + pass + +class Value(object): + pass + +class Objfile(object): + architecture = None + pass + +def lookup_symbol(x, y): + pass + +def lookup_type(x): + return Type(x) + +class events(object): + class new_objfile(object): + def connect(x): + pass + +def objfiles(): + return [] + +def current_target(): + return Target() + +class Block(object): + pass + +class Command(object): + def __init__(self, x, y): + pass + +SYMBOL_VAR_DOMAIN = 0 +COMMAND_USER = 0 diff --git a/doc-source/mock/gdb/types.py b/doc-source/mock/gdb/types.py new file mode 100644 index 00000000000..6a059386f0e --- /dev/null +++ b/doc-source/mock/gdb/types.py @@ -0,0 +1,2 @@ +def get_basic_type(x): + pass diff --git a/doc-source/user_guide.rst b/doc-source/user_guide.rst new file mode 100644 index 00000000000..e4cf6ee4ec6 --- /dev/null +++ b/doc-source/user_guide.rst @@ -0,0 +1,4 @@ +User Guide +========== + +To be written. diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000000..8ad6eff0a0d --- /dev/null +++ b/setup.cfg @@ -0,0 +1,7 @@ +[build_sphinx] +source-dir = doc-source +build-dir = docs +all_files = 1 + +[upload_sphinx] +upload-dir = docs/html From deb432ba27192454db7eae2993e730bb9e5a99dc Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 30 May 2019 14:13:47 -0400 Subject: [PATCH 150/367] README.md: Remove obsolete README file. Now that documentation is in rst format and GitHub understands README.rst for a project summary page, let's get rid of the old README. Signed-off-by: Jeff Mahoney --- README.md | 24 ------------------------ 1 file changed, 24 deletions(-) delete mode 100644 README.md diff --git a/README.md b/README.md deleted file mode 100644 index 7ba53095c5f..00000000000 --- a/README.md +++ /dev/null @@ -1,24 +0,0 @@ -This repository contains the python extensions for interacting -with Linux kernel crash dumps. - -You'll need: -* [libkdumpfile](https://github.com/ptesarik/libkdumpfile) -* [gdb-python](https://github.com/jeffmahoney/gdb-python/tree/gdb-8.1-suse-target) - -For the latest development efforts: -* [gdb-python 'master-suse-target' branch](https://github.com/jeffmahoney/gdb-python/tree/master-suse-target) -configured with `--with-python=/usr/bin/python3` -* [crash-python 'next' branch](https://github.com/jeffmahoney/crash-python/tree/next) - -Packages for SUSE-created releases are available on the [Open Build Service](https://download.opensuse.org/repositories/home:/jeff_mahoney:/crash-python/). - -Crash-python requires the following to run properly: -- The complete debuginfo for the kernel to be debugged, including modules -- The ELF images for the kernel and all modules -- The vmcore dump image from the crashed system - -To start: -`pycrash -d ` - -The `-d` option may be specified multiple times if multiple directories are -required. From fb63556ddfb2b37906cc7de1c10490009eed60a0 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 30 May 2019 16:27:16 -0400 Subject: [PATCH 151/367] docs: rework conf.py to build API documentation on readthedocs.io We want to keep the documentation uptodate without manual intervention. 'make doc' did the right things but readthedocs goes through conf.py This commit reworks conf.py to make it generate the gdb inventory file and the apidoc files. It also gets automatically picked up by build_sphinx so local docs are created properly using the same code. Signed-off-by: Jeff Mahoney --- Makefile | 2 - README.rst | 5 +++ doc-source/conf.py | 28 ++++++++++-- .../{make-gdb-refs.py => make_gdb_refs.py} | 43 ++++++++++--------- 4 files changed, 52 insertions(+), 26 deletions(-) rename doc-source/{make-gdb-refs.py => make_gdb_refs.py} (53%) diff --git a/Makefile b/Makefile index 1555024aec2..ccfb4823af1 100644 --- a/Makefile +++ b/Makefile @@ -49,7 +49,5 @@ lint3: doc: build FORCE rm -rf docs rm -f doc/source/crash.*rst doc/source/modules.rst - sphinx-apidoc -M -e -H "API Reference" -f -o doc-source crash - (cd doc-source ; python3 make-gdb-refs.py) python3 setup.py -q build_sphinx FORCE: diff --git a/README.rst b/README.rst index 339fd4b0b4a..b211d06cb71 100644 --- a/README.rst +++ b/README.rst @@ -75,6 +75,9 @@ writing ad-hoc extensions, commands, and analysis scripts. f_write_hint = WRITE_LIFE_NOT_SET, [...] +Full documentation can be found at `crash-python.readthedocs.io +`_. + .. end-introduction Installation @@ -135,6 +138,8 @@ The full options are documented with: .. end-quick-start + + License: -------- diff --git a/doc-source/conf.py b/doc-source/conf.py index 9febb4f0ac9..a777b942002 100644 --- a/doc-source/conf.py +++ b/doc-source/conf.py @@ -20,7 +20,31 @@ import os import sys sys.path.insert(0, os.path.abspath('./mock')) - +sys.path.insert(0, os.path.abspath('.')) + + +def run_apidoc(_): + try: + from sphinx.ext.apidoc import main + mod = "../crash" + out = "." + except ImportError as e: + from sphinx.apidoc import main + mod = "crash" + out = "doc-source" + import make_gdb_refs + import os + import sys + sys.path.append(os.path.join(os.path.dirname(__file__), '..')) + cur_dir = os.path.abspath(os.path.dirname(__file__)) + argv = [ '-M', '-e', '-H', 'API Reference', '-f', + '-o', out, mod ] + main(argv) + + make_gdb_refs.make_gdb_refs() + +def setup(app): + app.connect('builder-inited', run_apidoc) # -- General configuration ------------------------------------------------ @@ -183,5 +207,3 @@ 'Miscellaneous'), ] - - diff --git a/doc-source/make-gdb-refs.py b/doc-source/make_gdb_refs.py similarity index 53% rename from doc-source/make-gdb-refs.py rename to doc-source/make_gdb_refs.py index 1a815f92c06..e3c68e78eda 100644 --- a/doc-source/make-gdb-refs.py +++ b/doc-source/make_gdb_refs.py @@ -41,24 +41,25 @@ class MockBuilder(object): def get_target_uri(self, docname): return docname -env = MockEnvironment() -builder = MockBuilder() - -classes = MockDomain('py') - -classes.add_class_ref('gdb.Type', 'Types-In-Python.html') -classes.add_class_ref('gdb.Symbol', 'Symbols-In-Python.html') -classes.add_class_ref('gdb.Command', 'Commands-In-Python.html') -classes.add_class_ref('gdb.Inferior', 'Inferiors-In-Python.html') -classes.add_class_ref('gdb.Objfile', 'Objfiles-In-Python.html') -classes.add_class_ref('gdb.Value', 'Values-From-Inferior.html') -classes.add_class_ref('gdb.InferiorThread', 'Threads-In-Python.html') -classes.add_class_ref('gdb.Frame', 'Frames-In-Python.html') -classes.add_class_ref('gdb.NotAvailableErorr', 'Exception-Handling.html') -classes.add_class_ref('gdb.MemoryError', 'Exception-Handling.html') -classes.add_class_ref('gdb.error', 'Exception-Handling.html') -classes.add_class_ref('gdb.GdbError', 'Exception-Handling.html') - -env.add_domain(classes) - -InventoryFile.dump("gdb.inv", env, builder) +def make_gdb_refs(): + env = MockEnvironment() + builder = MockBuilder() + + classes = MockDomain('py') + + classes.add_class_ref('gdb.Type', 'Types-In-Python.html') + classes.add_class_ref('gdb.Symbol', 'Symbols-In-Python.html') + classes.add_class_ref('gdb.Command', 'Commands-In-Python.html') + classes.add_class_ref('gdb.Inferior', 'Inferiors-In-Python.html') + classes.add_class_ref('gdb.Objfile', 'Objfiles-In-Python.html') + classes.add_class_ref('gdb.Value', 'Values-From-Inferior.html') + classes.add_class_ref('gdb.InferiorThread', 'Threads-In-Python.html') + classes.add_class_ref('gdb.Frame', 'Frames-In-Python.html') + classes.add_class_ref('gdb.NotAvailableErorr', 'Exception-Handling.html') + classes.add_class_ref('gdb.MemoryError', 'Exception-Handling.html') + classes.add_class_ref('gdb.error', 'Exception-Handling.html') + classes.add_class_ref('gdb.GdbError', 'Exception-Handling.html') + + env.add_domain(classes) + + InventoryFile.dump("gdb.inv", env, builder) From 186d7db379177f107f67b8d3d8cf4741df743403 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 16:27:28 -0400 Subject: [PATCH 152/367] crash: move help handling into per-command parser The static checkers complain about overriding format_help and format_usage in crash.commands.Command, and rightly so. This commit adds a new ArgumentParser-derived class that provides its own format_help. Each command then derives its own parser from this and uses that to instantiate the command. Signed-off-by: Jeff Mahoney --- crash/commands/__init__.py | 15 +- crash/commands/btrfs.py | 31 +- crash/commands/dmesg.py | 256 ++++++------- crash/commands/help.py | 32 +- crash/commands/kmem.py | 32 +- crash/commands/lsmod.py | 46 +-- crash/commands/mount.py | 27 +- crash/commands/ps.py | 758 ++++++++++++++++++------------------- crash/commands/syscmd.py | 75 ++-- crash/commands/task.py | 31 +- crash/commands/vtop.py | 235 ++++++------ crash/commands/xfs.py | 33 +- 12 files changed, 742 insertions(+), 829 deletions(-) diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 599ef3ff7ef..46d67620aec 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -19,9 +19,14 @@ class CommandLineError(RuntimeError): pass class ArgumentParser(argparse.ArgumentParser): - def error(self, message): + def error(self, message: str): raise CommandLineError(message) + def format_help(self) -> str: + if self.__doc__ is None: + raise NotImplementedError("This command does not have help text") + return self.__doc__.strip() + "\n" + class Command(gdb.Command): commands: Dict[str, gdb.Command] = dict() def __init__(self, name, parser=None): @@ -32,15 +37,11 @@ def __init__(self, name, parser=None): raise ArgumentTypeError('parser', parser, ArgumentParser) self.parser = parser - parser.format_help = self._format_help self.commands[self.name] = self gdb.Command.__init__(self, self.name, gdb.COMMAND_USER) - def _format_help(self) -> str: - try: - return self.format_help().strip() + "\n" - except AttributeError: - return "\n" + def format_help(self) -> str: + return self.parser.format_help() def invoke_uncaught(self, argstr, from_tty=False): argv = gdb.string_to_argv(argstr) diff --git a/crash/commands/btrfs.py b/crash/commands/btrfs.py index c63827348a1..fcf868e272b 100644 --- a/crash/commands/btrfs.py +++ b/crash/commands/btrfs.py @@ -8,39 +8,32 @@ from crash.subsystem.filesystem import for_each_super_block, super_fstype from crash.subsystem.filesystem.btrfs import btrfs_fsid, btrfs_metadata_uuid -btrfs_help_text = """ -NAME - btrfs - display Btrfs internal data structures +class _Parser(ArgumentParser): + """ + NAME + btrfs - display Btrfs internal data structures -SYNOPSIS - btrfs + SYNOPSIS + btrfs -COMMANDS - btrfs list [-m] - list all btrfs file systems (-m to show metadata uuid) -""" + COMMANDS + btrfs list [-m] - list all btrfs file systems (-m to show metadata uuid) + """ + def format_usage(self) -> str: + return "btrfs [args...]\n" class BtrfsCommand(Command): """display Btrfs internal data structures""" def __init__(self, name): - parser = ArgumentParser(prog=name) + parser = _Parser(prog=name) subparsers = parser.add_subparsers(help="sub-command help") list_parser = subparsers.add_parser('list', help='list help') list_parser.set_defaults(subcommand=self.list_btrfs) list_parser.add_argument('-m', action='store_true', default=False) - parser.format_usage = lambda: 'btrfs [args...]\n' Command.__init__(self, name, parser) - def format_help(self) -> str: - """ - Returns the help text for the btrfs command - - Returns: - :obj:`str`: The help text for the btrfs command. - """ - return btrfs_help_text - def list_btrfs(self, args: Namespace) -> None: print_header = True count = 0 diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 190b98a10a8..4f5c8b7d94a 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -20,149 +20,143 @@ class LogTypeException(Exception): class LogInvalidOption(Exception): pass -log_help_text = """ -NAME - log - dump system message buffer - -SYNOPSIS - log [-tdm] - -DESCRIPTION - This command dumps the kernel log_buf contents in chronological order. The - command supports the older log_buf formats, which may or may not contain a - timestamp inserted prior to each message, as well as the newer variable-length - record format, where the timestamp is contained in each log entry's header. - - -t Display the message text without the timestamp. - -d Display the dictionary of key/value pair properties that are optionally - appended to a message by the kernel's dev_printk() function; only - applicable to the variable-length record format. - -m Display the message log level in brackets preceding each message. For - the variable-length record format, the level will be displayed in - hexadecimal, and depending upon the kernel version, also contains the - facility or flags bits. - - -EXAMPLES - Dump the kernel message buffer: - - crash> log - Linux version 2.2.5-15smp (root@mclinux1) (gcc version egcs-2.91.66 19990 - 314/Linux (egcs-1.1.2 release)) #1 SMP Thu Aug 26 11:04:37 EDT 1999 - Intel MultiProcessor Specification v1.4 - Virtual Wire compatibility mode. - OEM ID: DELL Product ID: WS 410 APIC at: 0xFEE00000 - Processor #0 Pentium(tm) Pro APIC version 17 - Processor #1 Pentium(tm) Pro APIC version 17 - I/O APIC #2 Version 17 at 0xFEC00000. - Processors: 2 - mapped APIC to ffffe000 (fee00000) - mapped IOAPIC to ffffd000 (fec00000) - Detected 447696347 Hz processor. - Console: colour VGA+ 80x25 - Calibrating delay loop... 445.64 BogoMIPS - ... - 8K byte-wide RAM 5:3 Rx:Tx split, autoselect/Autonegotiate interface. - MII transceiver found at address 24, status 782d. - Enabling bus-master transmits and whole-frame receives. - Installing knfsd (copyright (C) 1996 okir@monad.swb.de). - nfsd_init: initialized fhcache, entries=256 - ... - - Do the same thing, but also show the log level preceding each message: - - crash> log -m - <4>Linux version 2.2.5-15smp (root@mclinux1) (gcc version egcs-2.91.66 19990 - 314/Linux (egcs-1.1.2 release)) #1 SMP Thu Aug 26 11:04:37 EDT 1999 - <4>Intel MultiProcessor Specification v1.4 - <4> Virtual Wire compatibility mode. - <4>OEM ID: DELL Product ID: WS 410 APIC at: 0xFEE00000 - <4>Processor #0 Pentium(tm) Pro APIC version 17 - <4>Processor #1 Pentium(tm) Pro APIC version 17 - <4>I/O APIC #2 Version 17 at 0xFEC00000. - <4>Processors: 2 - <4>mapped APIC to ffffe000 (fee00000) - <4>mapped IOAPIC to ffffd000 (fec00000) - <4>Detected 447696347 Hz processor. - <4>Console: colour VGA+ 80x25 - <4>Calibrating delay loop... 445.64 BogoMIPS - ... - <6> 8K byte-wide RAM 5:3 Rx:Tx split, autoselect/Autonegotiate interface. - <6> MII transceiver found at address 24, status 782d. - <6> Enabling bus-master transmits and whole-frame receives. - <6>Installing knfsd (copyright (C) 1996 okir@monad.swb.de). - <7>nfsd_init: initialized fhcache, entries=256 - ... - - On a system with the variable-length record format, and whose log_buf has been - filled and wrapped around, display the log with timestamp data: - - crash> log - [ 0.467730] pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 - [ 0.467749] pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 - [ 0.467769] pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 - [ 0.467788] pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 - [ 0.467809] pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 - [ 0.467828] pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 - ... - - Display the same message text as above, without the timestamp data: - - crash> log -t - pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 - pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 - pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 - pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 - pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 - pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 - ... - - Display the same message text as above, with appended dictionary data: - - crash> log -td - pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:02.0 - pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:02.1 - pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:02.4 - pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:02.5 - pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:03.0 - pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:03.1 - ... -""" +class _Parser(ArgumentParser): + """ + NAME + log - dump system message buffer + + SYNOPSIS + log [-tdm] + + DESCRIPTION + This command dumps the kernel log_buf contents in chronological order. The + command supports the older log_buf formats, which may or may not contain a + timestamp inserted prior to each message, as well as the newer variable-length + record format, where the timestamp is contained in each log entry's header. + + -t Display the message text without the timestamp. + -d Display the dictionary of key/value pair properties that are optionally + appended to a message by the kernel's dev_printk() function; only + applicable to the variable-length record format. + -m Display the message log level in brackets preceding each message. For + the variable-length record format, the level will be displayed in + hexadecimal, and depending upon the kernel version, also contains the + facility or flags bits. + + + EXAMPLES + Dump the kernel message buffer: + + crash> log + Linux version 2.2.5-15smp (root@mclinux1) (gcc version egcs-2.91.66 19990 + 314/Linux (egcs-1.1.2 release)) #1 SMP Thu Aug 26 11:04:37 EDT 1999 + Intel MultiProcessor Specification v1.4 + Virtual Wire compatibility mode. + OEM ID: DELL Product ID: WS 410 APIC at: 0xFEE00000 + Processor #0 Pentium(tm) Pro APIC version 17 + Processor #1 Pentium(tm) Pro APIC version 17 + I/O APIC #2 Version 17 at 0xFEC00000. + Processors: 2 + mapped APIC to ffffe000 (fee00000) + mapped IOAPIC to ffffd000 (fec00000) + Detected 447696347 Hz processor. + Console: colour VGA+ 80x25 + Calibrating delay loop... 445.64 BogoMIPS + ... + 8K byte-wide RAM 5:3 Rx:Tx split, autoselect/Autonegotiate interface. + MII transceiver found at address 24, status 782d. + Enabling bus-master transmits and whole-frame receives. + Installing knfsd (copyright (C) 1996 okir@monad.swb.de). + nfsd_init: initialized fhcache, entries=256 + ... + + Do the same thing, but also show the log level preceding each message: + + crash> log -m + <4>Linux version 2.2.5-15smp (root@mclinux1) (gcc version egcs-2.91.66 19990 + 314/Linux (egcs-1.1.2 release)) #1 SMP Thu Aug 26 11:04:37 EDT 1999 + <4>Intel MultiProcessor Specification v1.4 + <4> Virtual Wire compatibility mode. + <4>OEM ID: DELL Product ID: WS 410 APIC at: 0xFEE00000 + <4>Processor #0 Pentium(tm) Pro APIC version 17 + <4>Processor #1 Pentium(tm) Pro APIC version 17 + <4>I/O APIC #2 Version 17 at 0xFEC00000. + <4>Processors: 2 + <4>mapped APIC to ffffe000 (fee00000) + <4>mapped IOAPIC to ffffd000 (fec00000) + <4>Detected 447696347 Hz processor. + <4>Console: colour VGA+ 80x25 + <4>Calibrating delay loop... 445.64 BogoMIPS + ... + <6> 8K byte-wide RAM 5:3 Rx:Tx split, autoselect/Autonegotiate interface. + <6> MII transceiver found at address 24, status 782d. + <6> Enabling bus-master transmits and whole-frame receives. + <6>Installing knfsd (copyright (C) 1996 okir@monad.swb.de). + <7>nfsd_init: initialized fhcache, entries=256 + ... + + On a system with the variable-length record format, and whose log_buf has been + filled and wrapped around, display the log with timestamp data: + + crash> log + [ 0.467730] pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 + [ 0.467749] pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 + [ 0.467769] pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 + [ 0.467788] pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 + [ 0.467809] pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 + [ 0.467828] pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 + ... + + Display the same message text as above, without the timestamp data: + + crash> log -t + pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 + pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 + pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 + pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 + pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 + pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 + ... + + Display the same message text as above, with appended dictionary data: + + crash> log -td + pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:02.0 + pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:02.1 + pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:02.4 + pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:02.5 + pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:03.0 + pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:03.1 + ... + """ + + def format_usage(self) -> str: + return 'log [-tdm]\n' class LogCommand(Command): """dump system message buffer""" def __init__(self, name): - parser = ArgumentParser(prog=name) + parser = _Parser(prog=name) parser.add_argument('-t', action='store_true', default=False) parser.add_argument('-d', action='store_true', default=False) parser.add_argument('-m', action='store_true', default=False) - parser.format_usage = lambda: 'log [-tdm]\n' Command.__init__(self, name, parser) - def format_help(self) -> str: - """ - Returns the help text for the log command - - Returns: - :obj:`str`: The help text for the log command. - """ - return log_help_text - @classmethod def filter_unstructured_log(cls, log, args): lines = log.split('\n') diff --git a/crash/commands/help.py b/crash/commands/help.py index 8e082633ed9..dc60a67506d 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -5,36 +5,28 @@ import argparse from crash.commands import Command, CommandError, ArgumentParser -help_text = """ -NAME - help - display help for crash commands +class _Parser(ArgumentParser): + """ + NAME + help - display help for crash commands -SYNOPSIS - help [command] + SYNOPSIS + help [command] -DESCRIPTION - This command displays help text for crash commands. When used alone, - it provides a list of commands. When an argument is specified, the help - text for that command will be printed. -""" + DESCRIPTION + This command displays help text for crash commands. When used alone, + it provides a list of commands. When an argument is specified, the help + text for that command will be printed. + """ class HelpCommand(Command): """ this command""" def __init__(self): - parser = ArgumentParser(prog="help") + parser = _Parser(prog="help") parser.add_argument('args', nargs=argparse.REMAINDER) super().__init__('help', parser) - def format_help(self) -> str: - """ - Returns the help text for the help command - - Returns: - :obj:`str`: The help text for the help command. - """ - return help_text - def execute(self, argv): if not argv.args: print("Available commands:") diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index bd877092a2c..2ae4a0e78b7 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -12,19 +12,20 @@ from crash.util import get_symbol_value from crash.exceptions import MissingSymbolError -kmem_help_text = """ -NAME - kmem - kernel memory inspection +class _Parser(ArgumentParser): + """ + NAME + kmem - kernel memory inspection -SYNOPSIS - kmem addr - try to find addr within kmem caches - kmem -s [slabname] - check consistency of single or all kmem cache - kmem -z - report zones - kmem -V - report vmstats + SYNOPSIS + kmem addr - try to find addr within kmem caches + kmem -s [slabname] - check consistency of single or all kmem cache + kmem -z - report zones + kmem -V - report vmstats -DESCRIPTION - This command currently offers very basic kmem cache query and checking. -""" + DESCRIPTION + This command currently offers very basic kmem cache query and checking. + """ class KmemCommand(Command): """ kernel memory inspection""" @@ -41,15 +42,6 @@ def __init__(self, name): super().__init__(name, parser) - def format_help(self) -> str: - """ - Returns the help text for the kmem command - - Returns: - :obj:`str`: The help text for the kmem command. - """ - return kmem_help_text - def execute(self, args): if args.z: self.print_zones() diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index 4dd5cac5c10..b19e00ca340 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -13,50 +13,42 @@ from crash.types.percpu import get_percpu_var import crash.types.percpu -lsmod_help_text = """ -NAME - lsmod - display module information +class _Parser(ArgumentParser): + """ + NAME + lsmod - display module information -SYNOPSIS - lsmod [-p [n]] [name-wildcard] + SYNOPSIS + lsmod [-p [n]] [name-wildcard] -DESCRIPTION - This command displays information about loaded modules. + DESCRIPTION + This command displays information about loaded modules. - The default output will show all loaded modules, the core address, - its size, and any users of the module. By specifying [name-wildcard], - the results can be filtered to modules matching the wildcard. + The default output will show all loaded modules, the core address, + its size, and any users of the module. By specifying [name-wildcard], + the results can be filtered to modules matching the wildcard. - The following options are available: - -p display the percpu base for the module and the size of its region - -p CPU# display the percpu base for the module and the size of its region - for the specified CPU number -""" + The following options are available: + -p display the percpu base for the module and the size of its region + -p CPU# display the percpu base for the module and the size of its region + for the specified CPU number + """ + def format_usage(self) -> str: + return "lsmod [-p] [regex] ...\n" class ModuleCommand(Command): """display module information""" def __init__(self): - parser = ArgumentParser(prog="lsmod") + parser = _Parser(prog="lsmod") parser.add_argument('-p', nargs='?', const=-1, default=None, type=int) parser.add_argument('args', nargs=argparse.REMAINDER) - parser.format_usage = lambda: "lsmod [-p] [regex] ...\n" - Command.__init__(self, "lsmod", parser) self.module_use_type = gdb.lookup_type('struct module_use') - def format_help(self) -> str: - """ - Returns the help text for the lsmod command - - Returns: - :obj:`str`: The help text for the lsmod command. - """ - return lsmod_help_text - def print_module_percpu(self, mod, cpu=-1): cpu = int(cpu) addr = int(mod['percpu']) diff --git a/crash/commands/mount.py b/crash/commands/mount.py index e21cf163ffd..b336155e620 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -16,37 +16,28 @@ from crash.subsystem.filesystem.mount import mount_super, mount_flags from crash.subsystem.filesystem.mount import mount_root -mount_help_text = """ -NAME - mount - display mounted file systems +class _Parser(ArgumentParser): + """ + NAME + mount - display mounted file systems - -f display common mount flags - -v display superblock and vfsmount addresses - -d display device obtained from super_block -""" + -f display common mount flags + -v display superblock and vfsmount addresses + -d display device obtained from super_block + """ class MountCommand(Command): """display mounted file systems""" def __init__(self, name): - parser = ArgumentParser(prog=name) + parser = _Parser(prog=name) parser.add_argument('-v', action='store_true', default=False) parser.add_argument('-f', action='store_true', default=False) parser.add_argument('-d', action='store_true', default=False) - parser.format_usage = lambda : "mount\n" super().__init__(name, parser) - def format_help(self) -> str: - """ - Returns the help text for the mount command - - Returns: - :obj:`str`: The help text for the mount command. - """ - return mount_help_text - def __getattr__(self, name): if name == 'charp': self.charp = gdb.lookup_type('char').pointer() diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 9730d993919..9870eddec98 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -153,381 +153,385 @@ def format_header(self) -> str: """ return self._format_header() -ps_help_text = """ -NAME - ps - display process status information - -SYNOPSIS - ps [-k|-u|-G][-s|-n][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ... - -DESCRIPTION - This command displays process status for selected, or all, processes - in the system. If no arguments are entered, the process data is - is displayed for all processes. Specific processes may be selected - by using the following identifier formats: - - pid a process PID. - taskp a hexadecimal task_struct pointer. - command a command name. If a command name is made up of letters that - are all numerical values, precede the name string with a "\". - If the command string is enclosed within "'" characters, then - the encompassed string must be a POSIX extended regular expression - that will be used to match task names. - - The process list may be further restricted by the following options: - - -k restrict the output to only kernel threads. - -u restrict the output to only user tasks. - -G display only the thread group leader in a thread group. - - The process identifier types may be mixed. For each task, the following - items are displayed: - - 1. the process PID. - 2. the parent process PID. - 3. the CPU number that the task ran on last. - 4. the task_struct address or the kernel stack pointer of the process. - (see -s option below) - 5. the task state (RU, IN, UN, ZO, ST, TR, DE, SW). - 6. the percentage of physical memory being used by this task. - 7. the virtual address size of this task in kilobytes. - 8. the resident set size of this task in kilobytes. - 9. the command name. - - The default output shows the task_struct address of each process under a - column titled "TASK". This can be changed to show the kernel stack - pointer under a column titled "KSTACKP". - - -s replace the TASK column with the KSTACKP column. - - On SMP machines, the active task on each CPU will be highlighted by an - angle bracket (">") preceding its information. - - Alternatively, information regarding parent-child relationships, - per-task time usage data, argument/environment data, thread groups, - or resource limits may be displayed: - - -p display the parental hierarchy of selected, or all, tasks. - -c display the children of selected, or all, tasks. - -t display the task run time, start time, and cumulative user - and system times. - -l display the task last_run or timestamp value, whichever applies, - of selected, or all, tasks; the list is sorted with the most - recently-run task (largest last_run/timestamp) shown first, - followed by the task's current state. - -a display the command line arguments and environment strings of - selected, or all, user-mode tasks. - -g display tasks by thread group, of selected, or all, tasks. - -r display resource limits (rlimits) of selected, or all, tasks. - -n display gdb thread number - -EXAMPLES - Show the process status of all current tasks: - - crash> ps - PID PPID CPU TASK ST %MEM VSZ RSS COMM - > 0 0 3 c024c000 RU 0.0 0 0 [swapper] - > 0 0 0 c0dce000 RU 0.0 0 0 [swapper] - 0 0 1 c0fa8000 RU 0.0 0 0 [swapper] - > 0 0 2 c009a000 RU 0.0 0 0 [swapper] - 1 0 1 c0098000 IN 0.0 1096 476 init - 2 1 1 c0090000 IN 0.0 0 0 [kflushd] - 3 1 1 c000e000 IN 0.0 0 0 [kpiod] - 4 1 3 c000c000 IN 0.0 0 0 [kswapd] - 5 1 1 c0008000 IN 0.0 0 0 [mdrecoveryd] - 253 1 2 fbc4c000 IN 0.0 1088 376 portmap - 268 1 2 fbc82000 IN 0.1 1232 504 ypbind - 274 268 2 fa984000 IN 0.1 1260 556 ypbind - 321 1 1 fabf6000 IN 0.1 1264 608 syslogd - 332 1 1 fa9be000 RU 0.1 1364 736 klogd - 346 1 2 fae88000 IN 0.0 1112 472 atd - 360 1 2 faeb2000 IN 0.1 1284 592 crond - 378 1 2 fafd6000 IN 0.1 1236 560 inetd - 392 1 0 fb710000 IN 0.1 2264 1468 named - 406 1 3 fb768000 IN 0.1 1284 560 lpd - 423 1 1 fb8ac000 IN 0.1 1128 528 rpc.statd - 434 1 2 fb75a000 IN 0.0 1072 376 rpc.rquotad - 445 1 2 fb4a4000 IN 0.0 1132 456 rpc.mountd - 460 1 1 fa938000 IN 0.0 0 0 [nfsd] - 461 1 1 faa86000 IN 0.0 0 0 [nfsd] - 462 1 0 fac48000 IN 0.0 0 0 [nfsd] - 463 1 0 fb4ca000 IN 0.0 0 0 [nfsd] - 464 1 0 fb4c8000 IN 0.0 0 0 [nfsd] - 465 1 2 fba6e000 IN 0.0 0 0 [nfsd] - 466 1 1 fba6c000 IN 0.0 0 0 [nfsd] - 467 1 2 fac04000 IN 0.0 0 0 [nfsd] - 468 461 2 fa93a000 IN 0.0 0 0 [lockd] - 469 468 2 fa93e000 IN 0.0 0 0 [rpciod] - 486 1 0 fab54000 IN 0.1 1596 880 amd - 523 1 2 fa84e000 IN 0.1 1884 1128 sendmail - 538 1 0 fa82c000 IN 0.0 1112 416 gpm - 552 1 3 fa70a000 IN 0.1 2384 1220 httpd - 556 552 3 fa776000 IN 0.1 2572 1352 httpd - 557 552 2 faba4000 IN 0.1 2572 1352 httpd - 558 552 1 fa802000 IN 0.1 2572 1352 httpd - 559 552 3 fa6ee000 IN 0.1 2572 1352 httpd - 560 552 3 fa700000 IN 0.1 2572 1352 httpd - 561 552 0 fa6f0000 IN 0.1 2572 1352 httpd - 562 552 3 fa6ea000 IN 0.1 2572 1352 httpd - 563 552 0 fa67c000 IN 0.1 2572 1352 httpd - 564 552 3 fa674000 IN 0.1 2572 1352 httpd - 565 552 3 fa66a000 IN 0.1 2572 1352 httpd - 582 1 2 fa402000 IN 0.2 2968 1916 xfs - 633 1 2 fa1ec000 IN 0.2 5512 2248 innd - 636 1 3 fa088000 IN 0.1 2536 804 actived - 676 1 0 fa840000 IN 0.0 1060 384 mingetty - 677 1 1 fa590000 IN 0.0 1060 384 mingetty - 678 1 2 fa3b8000 IN 0.0 1060 384 mingetty - 679 1 0 fa5b8000 IN 0.0 1060 384 mingetty - 680 1 1 fa3a4000 IN 0.0 1060 384 mingetty - 681 1 2 fa30a000 IN 0.0 1060 384 mingetty - 683 1 3 fa5d8000 IN 0.0 1052 280 update - 686 378 1 fa3aa000 IN 0.1 2320 1136 in.rlogind - 687 686 2 f9e52000 IN 0.1 2136 1000 login - 688 687 0 f9dec000 IN 0.1 1732 976 bash - > 700 688 1 f9d62000 RU 0.0 1048 256 gen12 - - Display the parental hierarchy of the "crash" process on a live system: - - crash> ps -p 4249 - PID: 0 TASK: c0252000 CPU: 0 COMMAND: "swapper" - PID: 1 TASK: c009a000 CPU: 1 COMMAND: "init" - PID: 632 TASK: c73b6000 CPU: 1 COMMAND: "prefdm" - PID: 637 TASK: c5a4a000 CPU: 1 COMMAND: "prefdm" - PID: 649 TASK: c179a000 CPU: 0 COMMAND: "kwm" - PID: 683 TASK: c1164000 CPU: 0 COMMAND: "kfm" - PID: 1186 TASK: c165a000 CPU: 0 COMMAND: "xterm" - PID: 1188 TASK: c705e000 CPU: 1 COMMAND: "bash" - PID: 4249 TASK: c6b9a000 CPU: 0 COMMAND: "crash" - - Display all children of the "kwm" window manager: - - crash> ps -c kwm - PID: 649 TASK: c179a000 CPU: 0 COMMAND: "kwm" - PID: 682 TASK: c2d58000 CPU: 1 COMMAND: "kwmsound" - PID: 683 TASK: c1164000 CPU: 1 COMMAND: "kfm" - PID: 685 TASK: c053c000 CPU: 0 COMMAND: "krootwm" - PID: 686 TASK: c13fa000 CPU: 0 COMMAND: "kpanel" - PID: 687 TASK: c13f0000 CPU: 1 COMMAND: "kbgndwm" - - Display all threads in a firefox session: - - crash> ps firefox - PID PPID CPU TASK ST %MEM VSZ RSS COMM - 21273 21256 6 ffff81003ec15080 IN 46.3 1138276 484364 firefox - 21276 21256 6 ffff81003f49e7e0 IN 46.3 1138276 484364 firefox - 21280 21256 0 ffff81003ec1d7e0 IN 46.3 1138276 484364 firefox - 21286 21256 6 ffff81000b0d1820 IN 46.3 1138276 484364 firefox - 21287 21256 2 ffff81000b0d10c0 IN 46.3 1138276 484364 firefox - 26975 21256 5 ffff81003b5c1820 IN 46.3 1138276 484364 firefox - 26976 21256 5 ffff810023232820 IN 46.3 1138276 484364 firefox - 26977 21256 4 ffff810021a11820 IN 46.3 1138276 484364 firefox - 26978 21256 5 ffff810003159040 IN 46.3 1138276 484364 firefox - 26979 21256 5 ffff81003a058820 IN 46.3 1138276 484364 firefox - - Display only the thread group leader in the firefox session: - - crash> ps -G firefox - PID PPID CPU TASK ST %MEM VSZ RSS COMM - 21273 21256 0 ffff81003ec15080 IN 46.3 1138276 484364 firefox - - Show the time usage data for pid 10318: - - crash> ps -t 10318 - PID: 10318 TASK: f7b85550 CPU: 5 COMMAND: "bash" - RUN TIME: 1 days, 01:35:32 - START TIME: 5209 - UTIME: 95 - STIME: 57 - - Show the process status of PID 1, task f9dec000, and all nfsd tasks: - - crash> ps 1 f9dec000 nfsd - PID PPID CPU TASK ST %MEM VSZ RSS COMM - 1 0 1 c0098000 IN 0.0 1096 476 init - 688 687 0 f9dec000 IN 0.1 1732 976 bash - 460 1 1 fa938000 IN 0.0 0 0 [nfsd] - 461 1 1 faa86000 IN 0.0 0 0 [nfsd] - 462 1 0 fac48000 IN 0.0 0 0 [nfsd] - 463 1 0 fb4ca000 IN 0.0 0 0 [nfsd] - 464 1 0 fb4c8000 IN 0.0 0 0 [nfsd] - 465 1 2 fba6e000 IN 0.0 0 0 [nfsd] - 466 1 1 fba6c000 IN 0.0 0 0 [nfsd] - 467 1 2 fac04000 IN 0.0 0 0 [nfsd] - - Show all kernel threads: - - crash> ps -k - PID PPID CPU TASK ST %MEM VSZ RSS COMM - 0 0 1 c0fac000 RU 0.0 0 0 [swapper] - 0 0 0 c0252000 RU 0.0 0 0 [swapper] - 2 1 1 c0fa0000 IN 0.0 0 0 [kflushd] - 3 1 1 c03de000 IN 0.0 0 0 [kpiod] - 4 1 1 c03dc000 IN 0.0 0 0 [kswapd] - 5 1 0 c0092000 IN 0.0 0 0 [mdrecoveryd] - 336 1 0 c4a9a000 IN 0.0 0 0 [rpciod] - 337 1 0 c4830000 IN 0.0 0 0 [lockd] - 487 1 1 c4ba6000 IN 0.0 0 0 [nfsd] - 488 1 0 c18c6000 IN 0.0 0 0 [nfsd] - 489 1 0 c0cac000 IN 0.0 0 0 [nfsd] - 490 1 0 c056a000 IN 0.0 0 0 [nfsd] - 491 1 0 c0860000 IN 0.0 0 0 [nfsd] - 492 1 1 c0254000 IN 0.0 0 0 [nfsd] - 493 1 0 c0a86000 IN 0.0 0 0 [nfsd] - 494 1 0 c0968000 IN 0.0 0 0 [nfsd] - - Show all tasks sorted by their task_struct's last_run or timestamp value, - whichever applies: - - crash> ps -l - [280195] [RU] PID: 2 TASK: c1468000 CPU: 0 COMMAND: "keventd" - [280195] [IN] PID: 1986 TASK: c5af4000 CPU: 0 COMMAND: "sshd" - [280195] [IN] PID: 2039 TASK: c58e6000 CPU: 0 COMMAND: "sshd" - [280195] [RU] PID: 2044 TASK: c5554000 CPU: 0 COMMAND: "bash" - [280195] [RU] PID: 2289 TASK: c70c0000 CPU: 0 COMMAND: "s" - [280190] [IN] PID: 1621 TASK: c54f8000 CPU: 0 COMMAND: "cupsd" - [280184] [IN] PID: 5 TASK: c154c000 CPU: 0 COMMAND: "kswapd" - [280184] [IN] PID: 6 TASK: c7ff6000 CPU: 0 COMMAND: "kscand" - [280170] [IN] PID: 0 TASK: c038e000 CPU: 0 COMMAND: "swapper" - [280166] [IN] PID: 2106 TASK: c0c0c000 CPU: 0 COMMAND: "sshd" - [280166] [IN] PID: 2162 TASK: c03a4000 CPU: 0 COMMAND: "vmstat" - [280160] [IN] PID: 1 TASK: c154a000 CPU: 0 COMMAND: "init" - [280131] [IN] PID: 3 TASK: c11ce000 CPU: 0 COMMAND: "kapmd" - [280117] [IN] PID: 1568 TASK: c5a8c000 CPU: 0 COMMAND: "smartd" - [280103] [IN] PID: 1694 TASK: c4c66000 CPU: 0 COMMAND: "ntpd" - [280060] [IN] PID: 8 TASK: c7ff2000 CPU: 0 COMMAND: "kupdated" - [279767] [IN] PID: 1720 TASK: c4608000 CPU: 0 COMMAND: "sendmail" - [279060] [IN] PID: 13 TASK: c69f4000 CPU: 0 COMMAND: "kjournald" - [278657] [IN] PID: 1523 TASK: c5ad4000 CPU: 0 COMMAND: "ypbind" - [277712] [IN] PID: 2163 TASK: c06e0000 CPU: 0 COMMAND: "sshd" - [277711] [IN] PID: 2244 TASK: c4cdc000 CPU: 0 COMMAND: "ssh" - [277261] [IN] PID: 1391 TASK: c5d8e000 CPU: 0 COMMAND: "syslogd" - [276837] [IN] PID: 1990 TASK: c58d8000 CPU: 0 COMMAND: "bash" - [276802] [IN] PID: 1853 TASK: c3828000 CPU: 0 COMMAND: "atd" - [276496] [IN] PID: 1749 TASK: c4480000 CPU: 0 COMMAND: "cannaserver" - [274931] [IN] PID: 1760 TASK: c43ac000 CPU: 0 COMMAND: "crond" - [246773] [IN] PID: 1844 TASK: c38d8000 CPU: 0 COMMAND: "xfs" - [125620] [IN] PID: 2170 TASK: c48dc000 CPU: 0 COMMAND: "bash" - [119059] [IN] PID: 1033 TASK: c64be000 CPU: 0 COMMAND: "kjournald" - [110916] [IN] PID: 1663 TASK: c528a000 CPU: 0 COMMAND: "sshd" - [ 86122] [IN] PID: 2112 TASK: c0da6000 CPU: 0 COMMAND: "bash" - [ 13637] [IN] PID: 1891 TASK: c67ae000 CPU: 0 COMMAND: "sshd" - [ 13636] [IN] PID: 1894 TASK: c38ec000 CPU: 0 COMMAND: "bash" - [ 7662] [IN] PID: 1885 TASK: c6478000 CPU: 0 COMMAND: "mingetty" - [ 7662] [IN] PID: 1886 TASK: c62da000 CPU: 0 COMMAND: "mingetty" - [ 7662] [IN] PID: 1887 TASK: c5f8c000 CPU: 0 COMMAND: "mingetty" - [ 7662] [IN] PID: 1888 TASK: c5f88000 CPU: 0 COMMAND: "mingetty" - [ 7662] [IN] PID: 1889 TASK: c5f86000 CPU: 0 COMMAND: "mingetty" - [ 7662] [IN] PID: 1890 TASK: c6424000 CPU: 0 COMMAND: "mingetty" - [ 7661] [IN] PID: 4 TASK: c154e000 CPU: 0 COMMAND: "ksoftirqd/0" - [ 7595] [IN] PID: 1872 TASK: c2e7e000 CPU: 0 COMMAND: "inventory.pl" - [ 6617] [IN] PID: 1771 TASK: c435a000 CPU: 0 COMMAND: "jserver" - [ 6307] [IN] PID: 1739 TASK: c48f8000 CPU: 0 COMMAND: "gpm" - [ 6285] [IN] PID: 1729 TASK: c4552000 CPU: 0 COMMAND: "sendmail" - [ 6009] [IN] PID: 1395 TASK: c6344000 CPU: 0 COMMAND: "klogd" - [ 5820] [IN] PID: 1677 TASK: c4d74000 CPU: 0 COMMAND: "xinetd" - [ 5719] [IN] PID: 1422 TASK: c5d04000 CPU: 0 COMMAND: "portmap" - [ 4633] [IN] PID: 1509 TASK: c5ed4000 CPU: 0 COMMAND: "apmd" - [ 4529] [IN] PID: 1520 TASK: c5d98000 CPU: 0 COMMAND: "ypbind" - [ 4515] [IN] PID: 1522 TASK: c5d32000 CPU: 0 COMMAND: "ypbind" - [ 4373] [IN] PID: 1441 TASK: c5d48000 CPU: 0 COMMAND: "rpc.statd" - [ 4210] [IN] PID: 1352 TASK: c5b30000 CPU: 0 COMMAND: "dhclient" - [ 1184] [IN] PID: 71 TASK: c65b6000 CPU: 0 COMMAND: "khubd" - [ 434] [IN] PID: 9 TASK: c11de000 CPU: 0 COMMAND: "mdrecoveryd" - [ 48] [IN] PID: 7 TASK: c7ff4000 CPU: 0 COMMAND: "bdflush" - - Show the kernel stack pointer of each user task: - - crash> ps -us - PID PPID CPU KSTACKP ST %MEM VSZ RSS COMM - 1 0 0 c009bedc IN 0.0 1096 52 init - 239 1 0 c15e7ed8 IN 0.2 1332 224 pump - 280 1 1 c7cbdedc IN 0.2 1092 208 portmap - 295 1 0 c7481edc IN 0.0 1232 0 ypbind - 301 295 0 c7c7bf28 IN 0.1 1260 124 ypbind - 376 1 1 c5053f28 IN 0.0 1316 40 automount - 381 1 0 c34ddf28 IN 0.2 1316 224 automount - 391 1 1 c2777f28 IN 0.2 1316 224 automount - ... - - Display the argument and environment data for the automount task: - - crash> ps -a automount - PID: 3948 TASK: f722ee30 CPU: 0 COMMAND: "automount" - ARG: /usr/sbin/automount --timeout=60 /net program /etc/auto.net - ENV: SELINUX_INIT=YES - CONSOLE=/dev/console - TERM=linux - INIT_VERSION=sysvinit-2.85 - PATH=/sbin:/usr/sbin:/bin:/usr/bin - LC_MESSAGES=en_US - RUNLEVEL=3 - runlevel=3 - PWD=/ - LANG=ja_JP.UTF-8 - PREVLEVEL=N - previous=N - HOME=/ - SHLVL=2 - _=/usr/sbin/automount - - Display the tasks in the thread group containing task c20ab0b0: - - crash> ps -g c20ab0b0 - PID: 6425 TASK: f72f50b0 CPU: 0 COMMAND: "firefox-bin" - PID: 6516 TASK: f71bf1b0 CPU: 0 COMMAND: "firefox-bin" - PID: 6518 TASK: d394b930 CPU: 0 COMMAND: "firefox-bin" - PID: 6520 TASK: c20aa030 CPU: 0 COMMAND: "firefox-bin" - PID: 6523 TASK: c20ab0b0 CPU: 0 COMMAND: "firefox-bin" - PID: 6614 TASK: f1f181b0 CPU: 0 COMMAND: "firefox-bin" - - Display the tasks in the thread group for each instance of the - program named "multi-thread": - - crash> ps -g multi-thread - PID: 2522 TASK: 1003f0dc7f0 CPU: 1 COMMAND: "multi-thread" - PID: 2523 TASK: 10037b13030 CPU: 1 COMMAND: "multi-thread" - PID: 2524 TASK: 1003e064030 CPU: 1 COMMAND: "multi-thread" - PID: 2525 TASK: 1003e13a7f0 CPU: 1 COMMAND: "multi-thread" - - PID: 2526 TASK: 1002f82b7f0 CPU: 1 COMMAND: "multi-thread" - PID: 2527 TASK: 1003e1737f0 CPU: 1 COMMAND: "multi-thread" - PID: 2528 TASK: 10035b4b7f0 CPU: 1 COMMAND: "multi-thread" - PID: 2529 TASK: 1003f0c37f0 CPU: 1 COMMAND: "multi-thread" - PID: 2530 TASK: 10035597030 CPU: 1 COMMAND: "multi-thread" - PID: 2531 TASK: 100184be7f0 CPU: 1 COMMAND: "multi-thread" - - Display the resource limits of "bash" task 13896: - - crash> ps -r 13896 - PID: 13896 TASK: cf402000 CPU: 0 COMMAND: "bash" - RLIMIT CURRENT MAXIMUM - CPU (unlimited) (unlimited) - FSIZE (unlimited) (unlimited) - DATA (unlimited) (unlimited) - STACK 10485760 (unlimited) - CORE (unlimited) (unlimited) - RSS (unlimited) (unlimited) - NPROC 4091 4091 - NOFILE 1024 1024 - MEMLOCK 4096 4096 - AS (unlimited) (unlimited) - LOCKS (unlimited) (unlimited) - - Search for task names matching a POSIX regular expression: - - crash> ps 'migration*' - PID PPID CPU TASK ST %MEM VSZ RSS COMM - 8 2 0 ffff8802128a2e20 IN 0.0 0 0 [migration/0] - 10 2 1 ffff880212969710 IN 0.0 0 0 [migration/1] - 15 2 2 ffff880212989710 IN 0.0 0 0 [migration/2] - 20 2 3 ffff8802129a9710 IN 0.0 0 0 [migration/3] - """ +class _Parser(ArgumentParser): + """ + NAME + ps - display process status information + + SYNOPSIS + ps [-k|-u|-G][-s|-n][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ... + + DESCRIPTION + This command displays process status for selected, or all, processes + in the system. If no arguments are entered, the process data is + is displayed for all processes. Specific processes may be selected + by using the following identifier formats: + + pid a process PID. + taskp a hexadecimal task_struct pointer. + command a command name. If a command name is made up of letters that + are all numerical values, precede the name string with a "\". + If the command string is enclosed within "'" characters, then + the encompassed string must be a POSIX extended regular expression + that will be used to match task names. + + The process list may be further restricted by the following options: + + -k restrict the output to only kernel threads. + -u restrict the output to only user tasks. + -G display only the thread group leader in a thread group. + + The process identifier types may be mixed. For each task, the following + items are displayed: + + 1. the process PID. + 2. the parent process PID. + 3. the CPU number that the task ran on last. + 4. the task_struct address or the kernel stack pointer of the process. + (see -s option below) + 5. the task state (RU, IN, UN, ZO, ST, TR, DE, SW). + 6. the percentage of physical memory being used by this task. + 7. the virtual address size of this task in kilobytes. + 8. the resident set size of this task in kilobytes. + 9. the command name. + + The default output shows the task_struct address of each process under a + column titled "TASK". This can be changed to show the kernel stack + pointer under a column titled "KSTACKP". + + -s replace the TASK column with the KSTACKP column. + + On SMP machines, the active task on each CPU will be highlighted by an + angle bracket (">") preceding its information. + + Alternatively, information regarding parent-child relationships, + per-task time usage data, argument/environment data, thread groups, + or resource limits may be displayed: + + -p display the parental hierarchy of selected, or all, tasks. + -c display the children of selected, or all, tasks. + -t display the task run time, start time, and cumulative user + and system times. + -l display the task last_run or timestamp value, whichever applies, + of selected, or all, tasks; the list is sorted with the most + recently-run task (largest last_run/timestamp) shown first, + followed by the task's current state. + -a display the command line arguments and environment strings of + selected, or all, user-mode tasks. + -g display tasks by thread group, of selected, or all, tasks. + -r display resource limits (rlimits) of selected, or all, tasks. + -n display gdb thread number + + EXAMPLES + Show the process status of all current tasks: + + crash> ps + PID PPID CPU TASK ST %MEM VSZ RSS COMM + > 0 0 3 c024c000 RU 0.0 0 0 [swapper] + > 0 0 0 c0dce000 RU 0.0 0 0 [swapper] + 0 0 1 c0fa8000 RU 0.0 0 0 [swapper] + > 0 0 2 c009a000 RU 0.0 0 0 [swapper] + 1 0 1 c0098000 IN 0.0 1096 476 init + 2 1 1 c0090000 IN 0.0 0 0 [kflushd] + 3 1 1 c000e000 IN 0.0 0 0 [kpiod] + 4 1 3 c000c000 IN 0.0 0 0 [kswapd] + 5 1 1 c0008000 IN 0.0 0 0 [mdrecoveryd] + 253 1 2 fbc4c000 IN 0.0 1088 376 portmap + 268 1 2 fbc82000 IN 0.1 1232 504 ypbind + 274 268 2 fa984000 IN 0.1 1260 556 ypbind + 321 1 1 fabf6000 IN 0.1 1264 608 syslogd + 332 1 1 fa9be000 RU 0.1 1364 736 klogd + 346 1 2 fae88000 IN 0.0 1112 472 atd + 360 1 2 faeb2000 IN 0.1 1284 592 crond + 378 1 2 fafd6000 IN 0.1 1236 560 inetd + 392 1 0 fb710000 IN 0.1 2264 1468 named + 406 1 3 fb768000 IN 0.1 1284 560 lpd + 423 1 1 fb8ac000 IN 0.1 1128 528 rpc.statd + 434 1 2 fb75a000 IN 0.0 1072 376 rpc.rquotad + 445 1 2 fb4a4000 IN 0.0 1132 456 rpc.mountd + 460 1 1 fa938000 IN 0.0 0 0 [nfsd] + 461 1 1 faa86000 IN 0.0 0 0 [nfsd] + 462 1 0 fac48000 IN 0.0 0 0 [nfsd] + 463 1 0 fb4ca000 IN 0.0 0 0 [nfsd] + 464 1 0 fb4c8000 IN 0.0 0 0 [nfsd] + 465 1 2 fba6e000 IN 0.0 0 0 [nfsd] + 466 1 1 fba6c000 IN 0.0 0 0 [nfsd] + 467 1 2 fac04000 IN 0.0 0 0 [nfsd] + 468 461 2 fa93a000 IN 0.0 0 0 [lockd] + 469 468 2 fa93e000 IN 0.0 0 0 [rpciod] + 486 1 0 fab54000 IN 0.1 1596 880 amd + 523 1 2 fa84e000 IN 0.1 1884 1128 sendmail + 538 1 0 fa82c000 IN 0.0 1112 416 gpm + 552 1 3 fa70a000 IN 0.1 2384 1220 httpd + 556 552 3 fa776000 IN 0.1 2572 1352 httpd + 557 552 2 faba4000 IN 0.1 2572 1352 httpd + 558 552 1 fa802000 IN 0.1 2572 1352 httpd + 559 552 3 fa6ee000 IN 0.1 2572 1352 httpd + 560 552 3 fa700000 IN 0.1 2572 1352 httpd + 561 552 0 fa6f0000 IN 0.1 2572 1352 httpd + 562 552 3 fa6ea000 IN 0.1 2572 1352 httpd + 563 552 0 fa67c000 IN 0.1 2572 1352 httpd + 564 552 3 fa674000 IN 0.1 2572 1352 httpd + 565 552 3 fa66a000 IN 0.1 2572 1352 httpd + 582 1 2 fa402000 IN 0.2 2968 1916 xfs + 633 1 2 fa1ec000 IN 0.2 5512 2248 innd + 636 1 3 fa088000 IN 0.1 2536 804 actived + 676 1 0 fa840000 IN 0.0 1060 384 mingetty + 677 1 1 fa590000 IN 0.0 1060 384 mingetty + 678 1 2 fa3b8000 IN 0.0 1060 384 mingetty + 679 1 0 fa5b8000 IN 0.0 1060 384 mingetty + 680 1 1 fa3a4000 IN 0.0 1060 384 mingetty + 681 1 2 fa30a000 IN 0.0 1060 384 mingetty + 683 1 3 fa5d8000 IN 0.0 1052 280 update + 686 378 1 fa3aa000 IN 0.1 2320 1136 in.rlogind + 687 686 2 f9e52000 IN 0.1 2136 1000 login + 688 687 0 f9dec000 IN 0.1 1732 976 bash + > 700 688 1 f9d62000 RU 0.0 1048 256 gen12 + + Display the parental hierarchy of the "crash" process on a live system: + + crash> ps -p 4249 + PID: 0 TASK: c0252000 CPU: 0 COMMAND: "swapper" + PID: 1 TASK: c009a000 CPU: 1 COMMAND: "init" + PID: 632 TASK: c73b6000 CPU: 1 COMMAND: "prefdm" + PID: 637 TASK: c5a4a000 CPU: 1 COMMAND: "prefdm" + PID: 649 TASK: c179a000 CPU: 0 COMMAND: "kwm" + PID: 683 TASK: c1164000 CPU: 0 COMMAND: "kfm" + PID: 1186 TASK: c165a000 CPU: 0 COMMAND: "xterm" + PID: 1188 TASK: c705e000 CPU: 1 COMMAND: "bash" + PID: 4249 TASK: c6b9a000 CPU: 0 COMMAND: "crash" + + Display all children of the "kwm" window manager: + + crash> ps -c kwm + PID: 649 TASK: c179a000 CPU: 0 COMMAND: "kwm" + PID: 682 TASK: c2d58000 CPU: 1 COMMAND: "kwmsound" + PID: 683 TASK: c1164000 CPU: 1 COMMAND: "kfm" + PID: 685 TASK: c053c000 CPU: 0 COMMAND: "krootwm" + PID: 686 TASK: c13fa000 CPU: 0 COMMAND: "kpanel" + PID: 687 TASK: c13f0000 CPU: 1 COMMAND: "kbgndwm" + + Display all threads in a firefox session: + + crash> ps firefox + PID PPID CPU TASK ST %MEM VSZ RSS COMM + 21273 21256 6 ffff81003ec15080 IN 46.3 1138276 484364 firefox + 21276 21256 6 ffff81003f49e7e0 IN 46.3 1138276 484364 firefox + 21280 21256 0 ffff81003ec1d7e0 IN 46.3 1138276 484364 firefox + 21286 21256 6 ffff81000b0d1820 IN 46.3 1138276 484364 firefox + 21287 21256 2 ffff81000b0d10c0 IN 46.3 1138276 484364 firefox + 26975 21256 5 ffff81003b5c1820 IN 46.3 1138276 484364 firefox + 26976 21256 5 ffff810023232820 IN 46.3 1138276 484364 firefox + 26977 21256 4 ffff810021a11820 IN 46.3 1138276 484364 firefox + 26978 21256 5 ffff810003159040 IN 46.3 1138276 484364 firefox + 26979 21256 5 ffff81003a058820 IN 46.3 1138276 484364 firefox + + Display only the thread group leader in the firefox session: + + crash> ps -G firefox + PID PPID CPU TASK ST %MEM VSZ RSS COMM + 21273 21256 0 ffff81003ec15080 IN 46.3 1138276 484364 firefox + + Show the time usage data for pid 10318: + + crash> ps -t 10318 + PID: 10318 TASK: f7b85550 CPU: 5 COMMAND: "bash" + RUN TIME: 1 days, 01:35:32 + START TIME: 5209 + UTIME: 95 + STIME: 57 + + Show the process status of PID 1, task f9dec000, and all nfsd tasks: + + crash> ps 1 f9dec000 nfsd + PID PPID CPU TASK ST %MEM VSZ RSS COMM + 1 0 1 c0098000 IN 0.0 1096 476 init + 688 687 0 f9dec000 IN 0.1 1732 976 bash + 460 1 1 fa938000 IN 0.0 0 0 [nfsd] + 461 1 1 faa86000 IN 0.0 0 0 [nfsd] + 462 1 0 fac48000 IN 0.0 0 0 [nfsd] + 463 1 0 fb4ca000 IN 0.0 0 0 [nfsd] + 464 1 0 fb4c8000 IN 0.0 0 0 [nfsd] + 465 1 2 fba6e000 IN 0.0 0 0 [nfsd] + 466 1 1 fba6c000 IN 0.0 0 0 [nfsd] + 467 1 2 fac04000 IN 0.0 0 0 [nfsd] + + Show all kernel threads: + + crash> ps -k + PID PPID CPU TASK ST %MEM VSZ RSS COMM + 0 0 1 c0fac000 RU 0.0 0 0 [swapper] + 0 0 0 c0252000 RU 0.0 0 0 [swapper] + 2 1 1 c0fa0000 IN 0.0 0 0 [kflushd] + 3 1 1 c03de000 IN 0.0 0 0 [kpiod] + 4 1 1 c03dc000 IN 0.0 0 0 [kswapd] + 5 1 0 c0092000 IN 0.0 0 0 [mdrecoveryd] + 336 1 0 c4a9a000 IN 0.0 0 0 [rpciod] + 337 1 0 c4830000 IN 0.0 0 0 [lockd] + 487 1 1 c4ba6000 IN 0.0 0 0 [nfsd] + 488 1 0 c18c6000 IN 0.0 0 0 [nfsd] + 489 1 0 c0cac000 IN 0.0 0 0 [nfsd] + 490 1 0 c056a000 IN 0.0 0 0 [nfsd] + 491 1 0 c0860000 IN 0.0 0 0 [nfsd] + 492 1 1 c0254000 IN 0.0 0 0 [nfsd] + 493 1 0 c0a86000 IN 0.0 0 0 [nfsd] + 494 1 0 c0968000 IN 0.0 0 0 [nfsd] + + Show all tasks sorted by their task_struct's last_run or timestamp value, + whichever applies: + + crash> ps -l + [280195] [RU] PID: 2 TASK: c1468000 CPU: 0 COMMAND: "keventd" + [280195] [IN] PID: 1986 TASK: c5af4000 CPU: 0 COMMAND: "sshd" + [280195] [IN] PID: 2039 TASK: c58e6000 CPU: 0 COMMAND: "sshd" + [280195] [RU] PID: 2044 TASK: c5554000 CPU: 0 COMMAND: "bash" + [280195] [RU] PID: 2289 TASK: c70c0000 CPU: 0 COMMAND: "s" + [280190] [IN] PID: 1621 TASK: c54f8000 CPU: 0 COMMAND: "cupsd" + [280184] [IN] PID: 5 TASK: c154c000 CPU: 0 COMMAND: "kswapd" + [280184] [IN] PID: 6 TASK: c7ff6000 CPU: 0 COMMAND: "kscand" + [280170] [IN] PID: 0 TASK: c038e000 CPU: 0 COMMAND: "swapper" + [280166] [IN] PID: 2106 TASK: c0c0c000 CPU: 0 COMMAND: "sshd" + [280166] [IN] PID: 2162 TASK: c03a4000 CPU: 0 COMMAND: "vmstat" + [280160] [IN] PID: 1 TASK: c154a000 CPU: 0 COMMAND: "init" + [280131] [IN] PID: 3 TASK: c11ce000 CPU: 0 COMMAND: "kapmd" + [280117] [IN] PID: 1568 TASK: c5a8c000 CPU: 0 COMMAND: "smartd" + [280103] [IN] PID: 1694 TASK: c4c66000 CPU: 0 COMMAND: "ntpd" + [280060] [IN] PID: 8 TASK: c7ff2000 CPU: 0 COMMAND: "kupdated" + [279767] [IN] PID: 1720 TASK: c4608000 CPU: 0 COMMAND: "sendmail" + [279060] [IN] PID: 13 TASK: c69f4000 CPU: 0 COMMAND: "kjournald" + [278657] [IN] PID: 1523 TASK: c5ad4000 CPU: 0 COMMAND: "ypbind" + [277712] [IN] PID: 2163 TASK: c06e0000 CPU: 0 COMMAND: "sshd" + [277711] [IN] PID: 2244 TASK: c4cdc000 CPU: 0 COMMAND: "ssh" + [277261] [IN] PID: 1391 TASK: c5d8e000 CPU: 0 COMMAND: "syslogd" + [276837] [IN] PID: 1990 TASK: c58d8000 CPU: 0 COMMAND: "bash" + [276802] [IN] PID: 1853 TASK: c3828000 CPU: 0 COMMAND: "atd" + [276496] [IN] PID: 1749 TASK: c4480000 CPU: 0 COMMAND: "cannaserver" + [274931] [IN] PID: 1760 TASK: c43ac000 CPU: 0 COMMAND: "crond" + [246773] [IN] PID: 1844 TASK: c38d8000 CPU: 0 COMMAND: "xfs" + [125620] [IN] PID: 2170 TASK: c48dc000 CPU: 0 COMMAND: "bash" + [119059] [IN] PID: 1033 TASK: c64be000 CPU: 0 COMMAND: "kjournald" + [110916] [IN] PID: 1663 TASK: c528a000 CPU: 0 COMMAND: "sshd" + [ 86122] [IN] PID: 2112 TASK: c0da6000 CPU: 0 COMMAND: "bash" + [ 13637] [IN] PID: 1891 TASK: c67ae000 CPU: 0 COMMAND: "sshd" + [ 13636] [IN] PID: 1894 TASK: c38ec000 CPU: 0 COMMAND: "bash" + [ 7662] [IN] PID: 1885 TASK: c6478000 CPU: 0 COMMAND: "mingetty" + [ 7662] [IN] PID: 1886 TASK: c62da000 CPU: 0 COMMAND: "mingetty" + [ 7662] [IN] PID: 1887 TASK: c5f8c000 CPU: 0 COMMAND: "mingetty" + [ 7662] [IN] PID: 1888 TASK: c5f88000 CPU: 0 COMMAND: "mingetty" + [ 7662] [IN] PID: 1889 TASK: c5f86000 CPU: 0 COMMAND: "mingetty" + [ 7662] [IN] PID: 1890 TASK: c6424000 CPU: 0 COMMAND: "mingetty" + [ 7661] [IN] PID: 4 TASK: c154e000 CPU: 0 COMMAND: "ksoftirqd/0" + [ 7595] [IN] PID: 1872 TASK: c2e7e000 CPU: 0 COMMAND: "inventory.pl" + [ 6617] [IN] PID: 1771 TASK: c435a000 CPU: 0 COMMAND: "jserver" + [ 6307] [IN] PID: 1739 TASK: c48f8000 CPU: 0 COMMAND: "gpm" + [ 6285] [IN] PID: 1729 TASK: c4552000 CPU: 0 COMMAND: "sendmail" + [ 6009] [IN] PID: 1395 TASK: c6344000 CPU: 0 COMMAND: "klogd" + [ 5820] [IN] PID: 1677 TASK: c4d74000 CPU: 0 COMMAND: "xinetd" + [ 5719] [IN] PID: 1422 TASK: c5d04000 CPU: 0 COMMAND: "portmap" + [ 4633] [IN] PID: 1509 TASK: c5ed4000 CPU: 0 COMMAND: "apmd" + [ 4529] [IN] PID: 1520 TASK: c5d98000 CPU: 0 COMMAND: "ypbind" + [ 4515] [IN] PID: 1522 TASK: c5d32000 CPU: 0 COMMAND: "ypbind" + [ 4373] [IN] PID: 1441 TASK: c5d48000 CPU: 0 COMMAND: "rpc.statd" + [ 4210] [IN] PID: 1352 TASK: c5b30000 CPU: 0 COMMAND: "dhclient" + [ 1184] [IN] PID: 71 TASK: c65b6000 CPU: 0 COMMAND: "khubd" + [ 434] [IN] PID: 9 TASK: c11de000 CPU: 0 COMMAND: "mdrecoveryd" + [ 48] [IN] PID: 7 TASK: c7ff4000 CPU: 0 COMMAND: "bdflush" + + Show the kernel stack pointer of each user task: + + crash> ps -us + PID PPID CPU KSTACKP ST %MEM VSZ RSS COMM + 1 0 0 c009bedc IN 0.0 1096 52 init + 239 1 0 c15e7ed8 IN 0.2 1332 224 pump + 280 1 1 c7cbdedc IN 0.2 1092 208 portmap + 295 1 0 c7481edc IN 0.0 1232 0 ypbind + 301 295 0 c7c7bf28 IN 0.1 1260 124 ypbind + 376 1 1 c5053f28 IN 0.0 1316 40 automount + 381 1 0 c34ddf28 IN 0.2 1316 224 automount + 391 1 1 c2777f28 IN 0.2 1316 224 automount + ... + + Display the argument and environment data for the automount task: + + crash> ps -a automount + PID: 3948 TASK: f722ee30 CPU: 0 COMMAND: "automount" + ARG: /usr/sbin/automount --timeout=60 /net program /etc/auto.net + ENV: SELINUX_INIT=YES + CONSOLE=/dev/console + TERM=linux + INIT_VERSION=sysvinit-2.85 + PATH=/sbin:/usr/sbin:/bin:/usr/bin + LC_MESSAGES=en_US + RUNLEVEL=3 + runlevel=3 + PWD=/ + LANG=ja_JP.UTF-8 + PREVLEVEL=N + previous=N + HOME=/ + SHLVL=2 + _=/usr/sbin/automount + + Display the tasks in the thread group containing task c20ab0b0: + + crash> ps -g c20ab0b0 + PID: 6425 TASK: f72f50b0 CPU: 0 COMMAND: "firefox-bin" + PID: 6516 TASK: f71bf1b0 CPU: 0 COMMAND: "firefox-bin" + PID: 6518 TASK: d394b930 CPU: 0 COMMAND: "firefox-bin" + PID: 6520 TASK: c20aa030 CPU: 0 COMMAND: "firefox-bin" + PID: 6523 TASK: c20ab0b0 CPU: 0 COMMAND: "firefox-bin" + PID: 6614 TASK: f1f181b0 CPU: 0 COMMAND: "firefox-bin" + + Display the tasks in the thread group for each instance of the + program named "multi-thread": + + crash> ps -g multi-thread + PID: 2522 TASK: 1003f0dc7f0 CPU: 1 COMMAND: "multi-thread" + PID: 2523 TASK: 10037b13030 CPU: 1 COMMAND: "multi-thread" + PID: 2524 TASK: 1003e064030 CPU: 1 COMMAND: "multi-thread" + PID: 2525 TASK: 1003e13a7f0 CPU: 1 COMMAND: "multi-thread" + + PID: 2526 TASK: 1002f82b7f0 CPU: 1 COMMAND: "multi-thread" + PID: 2527 TASK: 1003e1737f0 CPU: 1 COMMAND: "multi-thread" + PID: 2528 TASK: 10035b4b7f0 CPU: 1 COMMAND: "multi-thread" + PID: 2529 TASK: 1003f0c37f0 CPU: 1 COMMAND: "multi-thread" + PID: 2530 TASK: 10035597030 CPU: 1 COMMAND: "multi-thread" + PID: 2531 TASK: 100184be7f0 CPU: 1 COMMAND: "multi-thread" + + Display the resource limits of "bash" task 13896: + + crash> ps -r 13896 + PID: 13896 TASK: cf402000 CPU: 0 COMMAND: "bash" + RLIMIT CURRENT MAXIMUM + CPU (unlimited) (unlimited) + FSIZE (unlimited) (unlimited) + DATA (unlimited) (unlimited) + STACK 10485760 (unlimited) + CORE (unlimited) (unlimited) + RSS (unlimited) (unlimited) + NPROC 4091 4091 + NOFILE 1024 1024 + MEMLOCK 4096 4096 + AS (unlimited) (unlimited) + LOCKS (unlimited) (unlimited) + + Search for task names matching a POSIX regular expression: + + crash> ps 'migration*' + PID PPID CPU TASK ST %MEM VSZ RSS COMM + 8 2 0 ffff8802128a2e20 IN 0.0 0 0 [migration/0] + 10 2 1 ffff880212969710 IN 0.0 0 0 [migration/1] + 15 2 2 ffff880212989710 IN 0.0 0 0 [migration/2] + 20 2 3 ffff8802129a9710 IN 0.0 0 0 [migration/3] + """ + def format_usage(self) -> str: + return \ + "ps [-k|-u|-G][-s][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ...\n" class PSCommand(Command): """display process status information""" def __init__(self): - parser = ArgumentParser(prog="ps") + parser = _Parser(prog="ps") group = parser.add_mutually_exclusive_group() group.add_argument('-k', action='store_true', default=False) @@ -549,20 +553,8 @@ def __init__(self): parser.add_argument('args', nargs=argparse.REMAINDER) - parser.format_usage = lambda: \ - "ps [-k|-u|-G][-s][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ...\n" - Command.__init__(self, "ps", parser) - def format_help(self) -> str: - """ - Returns the help text for the ps command - - Returns: - :obj:`str`: The help text for the ps command. - """ - return ps_help_text - def task_state_string(self, task): state = task.task_state() buf = None diff --git a/crash/commands/syscmd.py b/crash/commands/syscmd.py index b2ef256936b..b46064c9d27 100644 --- a/crash/commands/syscmd.py +++ b/crash/commands/syscmd.py @@ -6,58 +6,49 @@ from crash.commands import CommandLineError from crash.cache.syscache import utsname, config, kernel -sys_help_text = """ -NAME - sys - system data - -SYNOPSIS - sys [config] - -DESCRIPTION - This command displays system-specific data. If no arguments are entered, - the same system data shown during crash invocation is shown. - - config If the kernel was configured with CONFIG_IKCONFIG, then - dump the in-kernel configuration data. - -EXAMPLES - Display essential system information: - - crash> sys config - KERNEL: vmlinux.4 - DUMPFILE: lcore.cr.4 - CPUS: 4 - DATE: Mon Oct 11 18:48:55 1999 - UPTIME: 10 days, 14:14:39 - LOAD AVERAGE: 0.74, 0.23, 0.08 - TASKS: 77 - NODENAME: test.mclinux.com - RELEASE: 2.2.5-15smp - VERSION: #24 SMP Mon Oct 11 17:41:40 CDT 1999 - MACHINE: i686 (500 MHz) - MEMORY: 1 GB -""" +class _Parser(ArgumentParser): + """ + NAME + sys - system data + + SYNOPSIS + sys [config] + + DESCRIPTION + This command displays system-specific data. If no arguments are entered, + the same system data shown during crash invocation is shown. + + config If the kernel was configured with CONFIG_IKCONFIG, then + dump the in-kernel configuration data. + + EXAMPLES + Display essential system information: + + crash> sys config + KERNEL: vmlinux.4 + DUMPFILE: lcore.cr.4 + CPUS: 4 + DATE: Mon Oct 11 18:48:55 1999 + UPTIME: 10 days, 14:14:39 + LOAD AVERAGE: 0.74, 0.23, 0.08 + TASKS: 77 + NODENAME: test.mclinux.com + RELEASE: 2.2.5-15smp + VERSION: #24 SMP Mon Oct 11 17:41:40 CDT 1999 + MACHINE: i686 (500 MHz) + MEMORY: 1 GB + """ class SysCommand(Command): """system data""" def __init__(self, name): - parser = ArgumentParser(prog=name) + parser = _Parser(prog=name) parser.add_argument('config', nargs='?') - parser.format_usage = lambda: "sys [config]\n" Command.__init__(self, name, parser) - def format_help(self) -> str: - """ - Returns the help text for the sys command - - Returns: - :obj:`str`: The help text for the sys command. - """ - return sys_help_text - @staticmethod def show_default(): print(" UPTIME: {}".format(kernel.uptime)) diff --git a/crash/commands/task.py b/crash/commands/task.py index 9b677108f51..296c12d1a98 100644 --- a/crash/commands/task.py +++ b/crash/commands/task.py @@ -6,19 +6,20 @@ import crash.cache.tasks import argparse -task_help_text = """ -NAME - task - select task by pid +class _Parser(ArgumentParser): + """ + NAME + task - select task by pid -SYNOPSIS - task + SYNOPSIS + task -DESCRIPTION - This command selects the appropriate gdb thread using its Linux pid. + DESCRIPTION + This command selects the appropriate gdb thread using its Linux pid. -EXAMPLES - task 1402 -""" + EXAMPLES + task 1402 + """ class TaskCommand(Command): """select task by pid""" @@ -29,18 +30,8 @@ def __init__(self, name): parser.add_argument('pid', type=int, nargs=argparse.REMAINDER) - parser.format_usage = lambda: "thread \n" Command.__init__(self, name, parser) - def format_help(self) -> str: - """ - Returns the help text for the task command - - Returns: - :obj:`str`: The help text for the task command. - """ - return task_help_text - def execute(self, args): try: if args.pid: diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index c92db9ed9b9..b79766352d5 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -59,116 +59,119 @@ def address(self): except (addrxlat.NotPresentError, addrxlat.NoDataError): return addr + 'N/A' -vtop_help_text = """ -NAME - vtop - virtual to physical - -SYNOPSIS - vtop [-c [pid | taskp]] [-u|-k] address ... - -DESCRIPTION - This command translates a user or kernel virtual address to its physical - address. Also displayed is the PTE translation, the vm_area_struct data - for user virtual addresses, the mem_map page data associated with the - physical page, and the swap location or file location if the page is - not mapped. The -u and -k options specify that the address is a user - or kernel virtual address; -u and -k are not necessary on processors whose - virtual addresses self-define themselves as user or kernel. User addresses - are translated with respect to the current context unless the -c option - is used. Kernel virtual addresses are translated using the swapper_pg_dir - as the base page directory unless the -c option is used. - - -u The address is a user virtual address; only required - on processors with overlapping user and kernel virtual - address spaces. - -k The address is a kernel virtual address; only required - on processors with overlapping user and kernel virtual - address spaces. - -c [pid | taskp] Translate the virtual address from the page directory - of the specified PID or hexadecimal task_struct pointer. - However, if this command is invoked from "foreach vtop", - the pid or taskp argument should NOT be entered; the - address will be translated using the page directory of - each task specified by "foreach". - address A hexadecimal user or kernel virtual address. - -EXAMPLES - Translate user virtual address 80b4000: - - crash> vtop 80b4000 - VIRTUAL PHYSICAL - 80b4000 660f000 - - PAGE DIRECTORY: c37f0000 - PGD: c37f0080 => e0d067 - PMD: c37f0080 => e0d067 - PTE: c0e0d2d0 => 660f067 - PAGE: 660f000 - - PTE PHYSICAL FLAGS - 660f067 660f000 (PRESENT|RW|USER|ACCESSED|DIRTY) - - VMA START END FLAGS FILE - c773daa0 80b4000 810c000 77 - - PAGE PHYSICAL INODE OFFSET CNT FLAGS - c0393258 660f000 0 17000 1 uptodate - - Translate kernel virtual address c806e000, first using swapper_pg_dir - as the page directory base, and secondly, using the page table base - of PID 1359: - - crash> vtop c806e000 - VIRTUAL PHYSICAL - c806e000 2216000 - - PAGE DIRECTORY: c0101000 - PGD: c0101c80 => 94063 - PMD: c0101c80 => 94063 - PTE: c00941b8 => 2216063 - PAGE: 2216000 - - PTE PHYSICAL FLAGS - 2216063 2216000 (PRESENT|RW|ACCESSED|DIRTY) - - PAGE PHYSICAL INODE OFFSET CNT FLAGS - c02e9370 2216000 0 0 1 - - crash> vtop -c 1359 c806e000 - VIRTUAL PHYSICAL - c806e000 2216000 - - PAGE DIRECTORY: c5caf000 - PGD: c5cafc80 => 94063 - PMD: c5cafc80 => 94063 - PTE: c00941b8 => 2216063 - PAGE: 2216000 - - PTE PHYSICAL FLAGS - 2216063 2216000 (PRESENT|RW|ACCESSED|DIRTY) - - PAGE PHYSICAL INODE OFFSET CNT FLAGS - c02e9370 2216000 0 0 1 - - Determine swap location of user virtual address 40104000: - - crash> vtop 40104000 - VIRTUAL PHYSICAL - 40104000 (not mapped) - - PAGE DIRECTORY: c40d8000 - PGD: c40d8400 => 6bbe067 - PMD: c40d8400 => 6bbe067 - PTE: c6bbe410 => 58bc00 - - PTE SWAP OFFSET - 58bc00 /dev/sda8 22716 - - VMA START END FLAGS FILE - c7200ae0 40104000 40b08000 73 - - SWAP: /dev/sda8 OFFSET: 22716 -""" +class _Parser(ArgumentParser): + """ + NAME + vtop - virtual to physical + + SYNOPSIS + vtop [-c [pid | taskp]] [-u|-k] address ... + + DESCRIPTION + This command translates a user or kernel virtual address to its physical + address. Also displayed is the PTE translation, the vm_area_struct data + for user virtual addresses, the mem_map page data associated with the + physical page, and the swap location or file location if the page is + not mapped. The -u and -k options specify that the address is a user + or kernel virtual address; -u and -k are not necessary on processors whose + virtual addresses self-define themselves as user or kernel. User addresses + are translated with respect to the current context unless the -c option + is used. Kernel virtual addresses are translated using the swapper_pg_dir + as the base page directory unless the -c option is used. + + -u The address is a user virtual address; only required + on processors with overlapping user and kernel virtual + address spaces. + -k The address is a kernel virtual address; only required + on processors with overlapping user and kernel virtual + address spaces. + -c [pid | taskp] Translate the virtual address from the page directory + of the specified PID or hexadecimal task_struct pointer. + However, if this command is invoked from "foreach vtop", + the pid or taskp argument should NOT be entered; the + address will be translated using the page directory of + each task specified by "foreach". + address A hexadecimal user or kernel virtual address. + + EXAMPLES + Translate user virtual address 80b4000: + + crash> vtop 80b4000 + VIRTUAL PHYSICAL + 80b4000 660f000 + + PAGE DIRECTORY: c37f0000 + PGD: c37f0080 => e0d067 + PMD: c37f0080 => e0d067 + PTE: c0e0d2d0 => 660f067 + PAGE: 660f000 + + PTE PHYSICAL FLAGS + 660f067 660f000 (PRESENT|RW|USER|ACCESSED|DIRTY) + + VMA START END FLAGS FILE + c773daa0 80b4000 810c000 77 + + PAGE PHYSICAL INODE OFFSET CNT FLAGS + c0393258 660f000 0 17000 1 uptodate + + Translate kernel virtual address c806e000, first using swapper_pg_dir + as the page directory base, and secondly, using the page table base + of PID 1359: + + crash> vtop c806e000 + VIRTUAL PHYSICAL + c806e000 2216000 + + PAGE DIRECTORY: c0101000 + PGD: c0101c80 => 94063 + PMD: c0101c80 => 94063 + PTE: c00941b8 => 2216063 + PAGE: 2216000 + + PTE PHYSICAL FLAGS + 2216063 2216000 (PRESENT|RW|ACCESSED|DIRTY) + + PAGE PHYSICAL INODE OFFSET CNT FLAGS + c02e9370 2216000 0 0 1 + + crash> vtop -c 1359 c806e000 + VIRTUAL PHYSICAL + c806e000 2216000 + + PAGE DIRECTORY: c5caf000 + PGD: c5cafc80 => 94063 + PMD: c5cafc80 => 94063 + PTE: c00941b8 => 2216063 + PAGE: 2216000 + + PTE PHYSICAL FLAGS + 2216063 2216000 (PRESENT|RW|ACCESSED|DIRTY) + + PAGE PHYSICAL INODE OFFSET CNT FLAGS + c02e9370 2216000 0 0 1 + + Determine swap location of user virtual address 40104000: + + crash> vtop 40104000 + VIRTUAL PHYSICAL + 40104000 (not mapped) + + PAGE DIRECTORY: c40d8000 + PGD: c40d8400 => 6bbe067 + PMD: c40d8400 => 6bbe067 + PTE: c6bbe410 => 58bc00 + + PTE SWAP OFFSET + 58bc00 /dev/sda8 22716 + + VMA START END FLAGS FILE + c7200ae0 40104000 40b08000 73 + + SWAP: /dev/sda8 OFFSET: 22716 + """ + def format_usage(self) -> str: + return "vtop [-c [pid | taskp]] [-u|-k] address ...\n" class VTOPCommand(Command): """convert virtual address to physical""" @@ -184,20 +187,8 @@ def __init__(self): parser.add_argument('args', nargs=argparse.ONE_OR_MORE) - parser.format_usage = lambda : \ - "vtop [-c [pid | taskp]] [-u|-k] address ...\n" - super().__init__("vtop", parser) - def format_help(self) -> str: - """ - Returns the help text for the vtop command - - Returns: - :obj:`str`: The help text for the vtop command. - """ - return vtop_help_text - def execute(self, argv): ctx = addrxlat_context() sys = addrxlat_system() diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index e60051b1fe0..a729330b516 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -28,19 +28,21 @@ from crash.util.symbols import Types types = Types(['struct xfs_buf *']) -xfs_help_text = """ -NAME - xfs - display XFS internal data structures -SYNOPSIS - xfs [arguments ...] +class _Parser(ArgumentParser): + """ + NAME + xfs - display XFS internal data structures -COMMANDS - xfs list - xfs show - xfs dump-ail - xfs dump-buft -""" + SYNOPSIS + xfs [arguments ...] + + COMMANDS + xfs list + xfs show + xfs dump-ail + xfs dump-buft + """ class XFSCommand(Command): """display XFS internal data structures""" @@ -62,15 +64,6 @@ def __init__(self, name): Command.__init__(self, name, parser) - def format_help(self) -> str: - """ - Returns the help text for the xfs command - - Returns: - :obj:`str`: The help text for the xfs command. - """ - return xfs_help_text - def list_xfs(self, args: Namespace) -> None: count = 0 print_header = True From fb327c45ee0f67fbc98463ca1e02d88491add5c8 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 19:06:19 -0400 Subject: [PATCH 153/367] crash.kernel: fix ModinfoMismatchError initialization The ModinfoMismatchError exception inherits from ValueError but doesn't call its constructor. This commit fixes that as well as calls new-style super() from the derived exceptions. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 9afd5c9e3df..c869f267cf7 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -26,27 +26,22 @@ class NoMatchingFileError(FileNotFoundError): pass class ModinfoMismatchError(ValueError): + _fmt = "module {} has mismatched {} (got `{}' expected `{}')" def __init__(self, attribute, path, value, expected_value): + msg = self._fmt.format(path, attribute, value, expected_value) + super().__init__(msg) self.path = path self.value = value self.expected_value = expected_value self.attribute = attribute - def __str__(self): - return "module {} has mismatched {} (got `{}' expected `{}')".format( - self.path, self.attribute, self.value, self.expected_value) - class ModVersionMismatchError(ModinfoMismatchError): def __init__(self, path, module_value, expected_value): - super(ModVersionMismatchError, self).__init__('vermagic', - path, module_value, - expected_value) + super().__init__('vermagic', path, module_value, expected_value) class ModSourceVersionMismatchError(ModinfoMismatchError): def __init__(self, path, module_value, expected_value): - super(ModSourceVersionMismatchError, self).__init__('srcversion', - path, module_value, - expected_value) + super().__init__('srcversion', path, module_value, expected_value) LINUX_KERNEL_PID = 1 From c7a7f6c43565d9cef6b45df5f75332215dabdd38 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 10:35:55 -0400 Subject: [PATCH 154/367] crash.commands.xfs: properly print dquot log item flags Also convert printing of buffer logitem flags to decode_flags Signed-off-by: Jeff Mahoney --- crash/commands/xfs.py | 18 ++++++------------ crash/subsystem/filesystem/xfs.py | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index a729330b516..918a737cf10 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -22,9 +22,10 @@ from crash.subsystem.filesystem.xfs import XFS_LI_IUNLINK, XFS_LI_INODE from crash.subsystem.filesystem.xfs import XFS_LI_BUF, XFS_LI_DQUOT from crash.subsystem.filesystem.xfs import XFS_LI_QUOTAOFF, XFS_BLI_FLAGS +from crash.subsystem.filesystem.xfs import XFS_DQ_FLAGS from crash.subsystem.filesystem.xfs import xfs_mount_flags, xfs_mount_uuid from crash.subsystem.filesystem.xfs import xfs_mount_version - +from crash.util import decode_flags from crash.util.symbols import Types types = Types(['struct xfs_buf *']) @@ -126,20 +127,12 @@ def dump_ail(self, args: Namespace) -> None: XFS_LI_TYPES[li_type][7:]), end='') if li_type == XFS_LI_BUF: buf = item['bli_buf'] - flags = [] - bli_flags = int(item['bli_flags']) - - for flag in XFS_BLI_FLAGS.keys(): - if flag & bli_flags: - flags.append(XFS_BLI_FLAGS[flag]) - - print(" buf@{:x} bli_flags={}" - .format(int(buf), "|".join(flags))) + flags = decode_flags(item['bli_flags'], XFS_BLI_FLAGS) + print(" buf@{:x} bli_flags={}" .format(int(buf), flags)) print(" {}".format(xfs_format_xfsbuf(buf))) elif li_type == XFS_LI_INODE: ili_flags = int(item['ili_lock_flags']) - flags = [] xfs_inode = item['ili_inode'] print("inode@{:x} i_ino={} ili_lock_flags={:x} " .format(int(xfs_inode['i_vnode'].address), @@ -156,7 +149,8 @@ def dump_ail(self, args: Namespace) -> None: int(efd['efd_nextents']), int(efd['efd_id']))) elif li_type == XFS_LI_DQUOT: dquot = item['qli_dquot'] - print("dquot@{:x}".format(int(dquot), int(dquot['dq_flags']))) + flags = decode_flags(dquot['dq_flags'], XFS_DQ_FLAGS) + print("dquot@{:x} flags={}".format(int(dquot), flags)) elif li_type == XFS_LI_QUOTAOFF: qoff = item['qql_format'] print("qoff@{:x} type={} size={} flags={}" diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index 38666f36bb3..9c9c4ef9006 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -146,6 +146,20 @@ XFS_ILOG_TIMESTAMP : "TIMESTAMP", } +XFS_DQ_USER = 0x0001 # a user quota +XFS_DQ_PROJ = 0x0002 # project quota +XFS_DQ_GROUP = 0x0004 # a group quota +XFS_DQ_DIRTY = 0x0008 # dquot is dirty +XFS_DQ_FREEING = 0x0010 # dquot is being torn down + +XFS_DQ_FLAGS = { + XFS_DQ_USER : "USER", + XFS_DQ_PROJ : "PROJ", + XFS_DQ_GROUP : "GROUP", + XFS_DQ_DIRTY : "DIRTY", + XFS_DQ_FREEING : "FREEING", +} + XFS_MOUNT_WSYNC = (1 << 0) XFS_MOUNT_UNMOUNTING = (1 << 1) XFS_MOUNT_DMAPI = (1 << 2) From b35156e6672bd39191413a51c5a75d3024d246cd Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 11:23:38 -0400 Subject: [PATCH 155/367] vtop: don't instantiate CrashAddressTranslation on module import The CrashAddressTranslation constructor requires a valid syscache be set up prior to running. This isn't a problem generally but when it's constructed during module import, it means unit tests that just import the module fail. Since it's only used for the vtop command and initialization is lightweight, move it to the vtop execute method. Signed-off-by: Jeff Mahoney --- crash/addrxlat.py | 24 +++++++++--------------- crash/commands/vtop.py | 13 ++++++------- 2 files changed, 15 insertions(+), 22 deletions(-) diff --git a/crash/addrxlat.py b/crash/addrxlat.py index 03e24658f91..5d94b238021 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -1,17 +1,19 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb import addrxlat from crash.cache.syscache import utsname from crash.util import offsetof +from crash.util.symbols import Types + +import gdb + +types = Types(['uint32_t *', 'uint64_t *']) class TranslationContext(addrxlat.Context): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.read_caps = addrxlat.CAPS(addrxlat.KVADDR) - self.uint32_ptr = gdb.lookup_type('uint32_t').pointer() - self.uint64_ptr = gdb.lookup_type('uint64_t').pointer() def cb_sym(self, symtype, *args): if symtype == addrxlat.SYM_VALUE: @@ -33,10 +35,12 @@ def cb_sym(self, symtype, *args): return super().cb_sym(symtype, *args) def cb_read32(self, faddr): - return int(gdb.Value(faddr.addr).cast(self.uint32_ptr).dereference()) + v = gdb.Value(faddr.addr).cast(types.uint32_t_p_type) + return int(v.dereference()) def cb_read64(self, faddr): - return int(gdb.Value(faddr.addr).cast(self.uint64_ptr).dereference()) + v = gdb.Value(faddr.addr).cast(types.uint64_t_p_type) + return int(v.dereference()) class CrashAddressTranslation(object): def __init__(self): @@ -60,13 +64,3 @@ def __init__(self): if meth.kind != addrxlat.LINEAR or meth.off != 0: self.is_non_auto = True break - -__impl = CrashAddressTranslation() -def addrxlat_context(): - return __impl.context - -def addrxlat_system(): - return __impl.system - -def addrxlat_is_non_auto(): - return __impl.is_non_auto diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index b79766352d5..dc958565266 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -4,7 +4,7 @@ import gdb import argparse from crash.commands import Command, ArgumentParser -from crash.addrxlat import addrxlat_context, addrxlat_system, addrxlat_is_non_auto +from crash.addrxlat import CrashAddressTranslation import addrxlat class LinuxPGT(object): @@ -190,19 +190,18 @@ def __init__(self): super().__init__("vtop", parser) def execute(self, argv): - ctx = addrxlat_context() - sys = addrxlat_system() - if addrxlat_is_non_auto(): - pgt = LinuxNonAutoPGT(ctx, sys) + trans = CrashAddressTranslation() + if trans.is_non_auto: + pgt = LinuxNonAutoPGT(trans.context, trans.system) else: - pgt = LinuxPGT(ctx, sys) + pgt = LinuxPGT(trans.context, trans.system) for addr in argv.args: addr = int(addr, 16) fulladdr = addrxlat.FullAddress(addrxlat.KVADDR, addr) print('{:16} {:16}'.format('VIRTUAL', 'PHYSICAL')) try: - fulladdr.conv(addrxlat.KPHYSADDR, ctx, sys) + fulladdr.conv(addrxlat.KPHYSADDR, trans.context, trans.system) phys = '{:x}'.format(fulladdr.addr) except addrxlat.BaseException: phys = '---' From 0fafee96aa853e808d6e842d65efee913e41e569 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 11:35:40 -0400 Subject: [PATCH 156/367] crash.commands.lsmod: use Types for delayed type lookup Directly looking up 'struct module_use' in the command constructor means that module import fails during non-kernel unit tests. Using the delayed type works properly. Signed-off-by: Jeff Mahoney --- crash/commands/lsmod.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index b19e00ca340..092ea5a61d1 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -9,6 +9,7 @@ from crash.commands import Command, ArgumentParser from crash.types.module import for_each_module from crash.util import struct_has_member +from crash.util.symbols import Types from crash.types.list import list_for_each_entry from crash.types.percpu import get_percpu_var import crash.types.percpu @@ -36,6 +37,8 @@ class _Parser(ArgumentParser): def format_usage(self) -> str: return "lsmod [-p] [regex] ...\n" +types = Types(['struct module_use']) + class ModuleCommand(Command): """display module information""" @@ -47,8 +50,6 @@ def __init__(self): Command.__init__(self, "lsmod", parser) - self.module_use_type = gdb.lookup_type('struct module_use') - def print_module_percpu(self, mod, cpu=-1): cpu = int(cpu) addr = int(mod['percpu']) @@ -113,7 +114,7 @@ def execute(self, argv): module_use = "" count = 0 for use in list_for_each_entry(mod['source_list'], - self.module_use_type, + types.module_use_type, 'source_list'): if module_use == "": module_use += " " From 5fa35281ab43199ec640cf6781b536b69c08b116 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 31 May 2019 12:01:34 -0400 Subject: [PATCH 157/367] kdump: add documentation building and static testing for kdump target When we added the apidoc and static testing to the project, we missed the kdump package. This commit adds it. Signed-off-by: Jeff Mahoney --- Makefile | 2 +- doc-source/conf.py | 14 ++++++--- doc-source/index.rst | 3 +- doc-source/mock/kdumpfile/__init__.py | 39 +++++++++++++++++++++++++ doc-source/mock/kdumpfile/exceptions.py | 11 +++++++ kdump/target.py | 1 + test-all.sh | 1 + 7 files changed, 65 insertions(+), 6 deletions(-) create mode 100644 doc-source/mock/kdumpfile/__init__.py create mode 100644 doc-source/mock/kdumpfile/exceptions.py diff --git a/Makefile b/Makefile index ccfb4823af1..0fcf8a320bc 100644 --- a/Makefile +++ b/Makefile @@ -48,6 +48,6 @@ lint3: doc: build FORCE rm -rf docs - rm -f doc/source/crash.*rst doc/source/modules.rst + rm -f doc-source/crash/.*rst doc-source/kdump/*.rst python3 setup.py -q build_sphinx FORCE: diff --git a/doc-source/conf.py b/doc-source/conf.py index a777b942002..815b4398459 100644 --- a/doc-source/conf.py +++ b/doc-source/conf.py @@ -26,19 +26,25 @@ def run_apidoc(_): try: from sphinx.ext.apidoc import main - mod = "../crash" + crash_mod = "../crash" + kdump_mod = "../kdump" out = "." except ImportError as e: from sphinx.apidoc import main - mod = "crash" + crash_mod = "crash" + kdump_mod = "kdump" out = "doc-source" import make_gdb_refs import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..')) cur_dir = os.path.abspath(os.path.dirname(__file__)) - argv = [ '-M', '-e', '-H', 'API Reference', '-f', - '-o', out, mod ] + argv = [ '-M', '-e', '-H', 'Crash API Reference', '-f', + '-o', out + "/crash", crash_mod ] + main(argv) + + argv = [ '-M', '-e', '-H', 'Kdump Target API Reference', '-f', + '-o', out + "/kdump", kdump_mod ] main(argv) make_gdb_refs.make_gdb_refs() diff --git a/doc-source/index.rst b/doc-source/index.rst index 878f22ed414..77ccf7e10e9 100644 --- a/doc-source/index.rst +++ b/doc-source/index.rst @@ -29,7 +29,8 @@ Table of Contents installation user_guide - modules + kdump/modules + crash/modules Indices and tables ------------------ diff --git a/doc-source/mock/kdumpfile/__init__.py b/doc-source/mock/kdumpfile/__init__.py new file mode 100644 index 00000000000..e98e05465f0 --- /dev/null +++ b/doc-source/mock/kdumpfile/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import addrxlat + +class attrdict(object): + def __init__(self): + self.dict = dict() + + def __setitem__(self, name, value): + self.dict[name] = value + + def __getitem__(self, name): + return self.dict[name] + + def __setattr__(self, name, value): + self.dict[name] = value + + def __getattr__(self, name): + return self.dict[name] + + def get(self, name, default): + return self.dict[name] + +class kdumpfile(object): + def __init__(self, file): + self.attr = attrdict() + self.attr.cpu = attrdict() + + def read(self, mode, offset, length): + return buffer() + + def get_addrxlat_ctx(self): + return addrxlat.Context() + + def get_addrxlat_sys(self): + return addrxlat.System() + +KDUMP_KVADDR = 0 diff --git a/doc-source/mock/kdumpfile/exceptions.py b/doc-source/mock/kdumpfile/exceptions.py new file mode 100644 index 00000000000..7365f84ae1b --- /dev/null +++ b/doc-source/mock/kdumpfile/exceptions.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +class EOFException(Exception): + pass + +class NoDataException(Exception): + pass + +class AddressTranslationException(Exception): + pass diff --git a/kdump/target.py b/kdump/target.py index fb4e11cf121..92ced9d1b4c 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -33,6 +33,7 @@ def __init__(self, debug=False): self.debug = debug self.shortname = "kdumpfile" self.longname = "Use a Linux kernel kdump file as a target" + self.kdump: kdumpfile = None self.register() diff --git a/test-all.sh b/test-all.sh index 4bfc47b10ec..0867bb59e0b 100755 --- a/test-all.sh +++ b/test-all.sh @@ -30,6 +30,7 @@ if has_mypy; then python sys.path.insert(0, 'build/lib') from mypy.main import main + main(None, args=["-p", "kdump", "--ignore-missing-imports"]) main(None, args=["-p", "crash", "--ignore-missing-imports"]) end END From f984357a0d4a55a31ef702ecac1949d5cfcbf80c Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 31 May 2019 14:54:14 -0400 Subject: [PATCH 158/367] tests: clean up testing This commit spits up test-all.sh into individual scripts accessible via make rules. We also add exit rules for the unit tests so that failures are actually passed back out to the calling script. At this point only the unit tests and live tests are run as part of 'make tests.' Once the static checks and lint errors/warnings are cleared, we'll enable those as well. Signed-off-by: Jeff Mahoney --- Makefile | 41 +++- TESTING.md | 61 ----- doc-source/index.rst | 1 + doc-source/testing.rst | 88 +++++++ kernel-tests/unittest-bootstrap.py | 5 +- kernel-tests/unittest-prepare.py | 28 +-- test-all.sh | 58 +---- tests/gdbinit-boilerplate | 5 + tests/{pylintrc => pylintrc-check} | 2 +- tests/pylintrc-enforce | 355 +++++++++++++++++++++++++++++ tests/run-gdb.sh | 5 + tests/run-kernel-tests.sh | 24 ++ tests/run-mypy.py | 25 ++ tests/run-pylint.py | 10 + tests/run-pylint.sh | 11 + tests/run-static-checks.sh | 15 ++ tests/run-tests.sh | 4 + tests/unittest-bootstrap.py | 6 +- 18 files changed, 604 insertions(+), 140 deletions(-) delete mode 100644 TESTING.md create mode 100644 doc-source/testing.rst create mode 100644 tests/gdbinit-boilerplate rename tests/{pylintrc => pylintrc-check} (91%) create mode 100644 tests/pylintrc-enforce create mode 100755 tests/run-gdb.sh create mode 100755 tests/run-kernel-tests.sh create mode 100644 tests/run-mypy.py create mode 100644 tests/run-pylint.py create mode 100755 tests/run-pylint.sh create mode 100755 tests/run-static-checks.sh create mode 100755 tests/run-tests.sh diff --git a/Makefile b/Makefile index 0fcf8a320bc..58df012f6b8 100644 --- a/Makefile +++ b/Makefile @@ -28,23 +28,54 @@ GZ_MAN1 = $(patsubst %.asciidoc,%.1.gz,$(MAN1_TXT)) man: $(GZ_MAN1) +PYLINT_ARGS ?= --rcfile tests/pylintrc-check -r n + +ifeq ($(E),1) +PYLINT_ARGS += -E +endif + all: man man-install: man $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) $(INSTALL) -m 644 $(GZ_MAN1) $(DESTDIR)$(man1dir) -build: crash tests kernel-tests +doc-clean: + rm -rf docs + rm -f doc-source/crash/*.rst doc-source/kdump/*.rst + +clean: doc-clean + make -C tests clean + rm -rf build + +build: crash tests python3 setup.py -q build +clean-build: clean build + install: man-install build python3 setup.py install -lint: lint3 - pylint --rcfile tests/pylintrc -r n crash +unit-tests: clean-build + make -C tests -s + sh tests/run-tests.sh + +PYLINT_ENFORCE="" + +lint-enforce: clean-build + sh tests/run-pylint.sh -r n --rcfile tests/pylintrc-enforce crash kdump + +lint: clean-build + sh tests/run-pylint.sh $(PYLINT_ARGS) crash kdump + +static-check: clean-build + sh tests/run-static-checks.sh + +live-tests: clean-build + sh tests/run-kernel-tests.sh $(INI_FILES) -lint3: - pylint --py3k -r n crash +test: unit-tests lint-enforce live-tests + @echo -n doc: build FORCE rm -rf docs diff --git a/TESTING.md b/TESTING.md deleted file mode 100644 index 95764818a2f..00000000000 --- a/TESTING.md +++ /dev/null @@ -1,61 +0,0 @@ -# Testing - -## Summary - -There are unit tests in the tests/ dir that are standalone and useful for -testing basic functionality. - -There are unit tests in the kernel-tests dir that require configuration, -kernel images, debuginfo, and vmcores to use. - -## Configuration - -The configuration for each kernel/vmcore to be tested goes in a .ini file -with the following format. All fields except kernel and vmcore are -optional, and defaults will be used. A kernel missing debuginfo cannot -be used for testing. Missing modules will mean module-specific tests -will be skipped. - -```[test] -kernel=/path/to/kernel -vmcore=/path/to/vmcore -vmlinux_debuginfo=/path/to/vmlinux-debuginfo -modules=/path/to/modules -module_debuginfo_path=/path/to/module/debuginfo -root=/root/for/tree/searches``` - -The optional fields match those defined in crash.kernel.CrashKernel. - -Example 1: -```[test] -kernel=/var/crash/2019-04-23-11:35/vmlinux-4.12.14-150.14-default.gz -vmcore=/var/crash/2019-04-23-11:35/vmcore``` - -In this example, the kernel and debuginfo packages are installed in the -default locations and will be searched automatically. - -Example 2: -```[test] -kernel=/var/crash/2019-04-23-11:35/vmlinux-4.12.14-150.14-default.gz -vmcore=/var/crash/2019-04-23-11:35/vmcore -root=/var/cache/crash-setup/leap15/4.12.14-150.14-default -``` - -In this example, the kernel and debuginfo packages are installed under -/var/cache/crash-setup/leap15/4.12.14-150.14-default and so we only -specify a root directory. - -## Running - -The script `test-all.sh` when run with no options will execute only -the standalone tests. The script takes a list of the .ini files -described above and will execute the kernel tests against those -configurations immediately after the standalone tests. - -Example: -```sh test-all.sh kernel-test-configs/4.12.14-150.14-default.ini kernel-test-configs/5.1.0-rc7-vanilla.ini``` -or -```sh test-all.sh kernel-test-configs/*.ini``` - -Each configuration will execute independently from one another. - diff --git a/doc-source/index.rst b/doc-source/index.rst index 77ccf7e10e9..cb737c1f2dc 100644 --- a/doc-source/index.rst +++ b/doc-source/index.rst @@ -28,6 +28,7 @@ Table of Contents :maxdepth: 2 installation + testing user_guide kdump/modules crash/modules diff --git a/doc-source/testing.rst b/doc-source/testing.rst new file mode 100644 index 00000000000..743a56f9f40 --- /dev/null +++ b/doc-source/testing.rst @@ -0,0 +1,88 @@ +Testing +======= + +Summary +------- + +There are unit tests in the tests/ dir that are standalone and useful for +testing basic functionality. + +There are unit tests in the kernel-tests dir that require configuration, +kernel images, debuginfo, and vmcores to use. + +If installed, there is support for running the `mypy `_ +static checker and the `pylint `_ code checker. + +`pylint` runs properly from within the gdb environment but `mypy` spawns +external interpreters and cannot run from within gdb. + +Configuration +------------- + +The configuration for each kernel/vmcore to be tested goes in a .ini file +with the following format. All fields except kernel and vmcore are +optional, and defaults will be used. A kernel missing debuginfo cannot +be used for testing. Missing modules will mean module-specific tests +will be skipped. + +.. code-block:: ini + + [test] + kernel=/path/to/kernel + vmcore=/path/to/vmcore + vmlinux_debuginfo=/path/to/vmlinux-debuginfo + modules=/path/to/modules + module_debuginfo_path=/path/to/module/debuginfo + root=/root/for/tree/searches + +The optional fields match those defined in `crash.kernel.CrashKernel`. + +Example 1: + +.. code-block:: ini + + [test] + kernel=/var/crash/2019-04-23-11:35/vmlinux-4.12.14-150.14-default.gz + vmcore=/var/crash/2019-04-23-11:35/vmcore + +In this example, the kernel and debuginfo packages are installed in the +default locations and will be searched automatically. + +Example 2: + +.. code-block:: ini + + [test] + kernel=/var/crash/2019-04-23-11:35/vmlinux-4.12.14-150.14-default.gz + vmcore=/var/crash/2019-04-23-11:35/vmcore + root=/var/cache/crash-setup/leap15/4.12.14-150.14-default + +In this example, the kernel and debuginfo packages are installed under +/var/cache/crash-setup/leap15/4.12.14-150.14-default and so we only +specify a root directory. + +Running +------- + +The make target `test` will run all standalone tests. The absence of `pylint` +or `mypy` is not considered an error. + +To run the tests using live vmcores using the configuration detailed above, +the `INI_FILES` option should be used. + +Example: + +.. code-block:: bash + + $ make live-tests INI_FILES='kernel-test-configs/4.12.14-150.14-default.ini kernel-test-configs/5.1.0-rc7-vanilla.ini' + + +or + +.. code-block:: bash + + $ make live-tests INI_FILES=kernel-test-configs/*.ini + + +Each configuration will execute independently from one another. + diff --git a/kernel-tests/unittest-bootstrap.py b/kernel-tests/unittest-bootstrap.py index 35542c5f395..a0fd8fb97cd 100644 --- a/kernel-tests/unittest-bootstrap.py +++ b/kernel-tests/unittest-bootstrap.py @@ -39,4 +39,7 @@ test_loader = unittest.TestLoader() test_suite = test_loader.discover('kernel-tests', pattern='test_*.py') -unittest.TextTestRunner(verbosity=2).run(test_suite) +ret = unittest.TextTestRunner(verbosity=2).run(test_suite) +if not ret.wasSuccessful(): + sys.exit(1) +sys.exit(0) diff --git a/kernel-tests/unittest-prepare.py b/kernel-tests/unittest-prepare.py index e887b6f1046..924684ace2a 100644 --- a/kernel-tests/unittest-prepare.py +++ b/kernel-tests/unittest-prepare.py @@ -29,25 +29,25 @@ module_path = config['test'].get('module_path', None) module_debuginfo_path = config['test'].get('module_debuginfo_path', None) -if vmlinux.endswith(".gz"): - vmlinux_gz = vmlinux - testdir = os.environ['CRASH_PYTHON_TESTDIR'] - base = os.path.basename(vmlinux)[:-3] - vmlinux = os.path.join(testdir, base) +try: + if vmlinux.endswith(".gz"): + vmlinux_gz = vmlinux + testdir = os.environ['CRASH_PYTHON_TESTDIR'] + base = os.path.basename(vmlinux)[:-3] + vmlinux = os.path.join(testdir, base) - with gzip.open(vmlinux_gz, 'r') as f_in, open(vmlinux, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) + with gzip.open(vmlinux_gz, 'r') as f_in, open(vmlinux, 'wb') as f_out: + shutil.copyfileobj(f_in, f_out) - f_out.close() - f_in.close() + f_out.close() + f_in.close() -gdb.execute(f"file {vmlinux}") + gdb.execute(f"file {vmlinux}") -from kdump.target import Target -target = Target(debug=False) + from kdump.target import Target + target = Target(debug=False) -try: gdb.execute(f"target kdumpfile {vmcore}") -except gdb.error as e: +except Exception as e: print(str(e)) sys.exit(1) diff --git a/test-all.sh b/test-all.sh index 0867bb59e0b..31bdda8af9e 100755 --- a/test-all.sh +++ b/test-all.sh @@ -1,59 +1,3 @@ #!/bin/sh -set -e - -cleanup() { - test -n "$DIR" && rm -rf "$DIR" -} - -trap cleanup EXIT - -DIR=$(mktemp -d "/tmp/crash-python-tests.XXXXXX") - -export CRASH_PYTHON_TESTDIR="$DIR" - -rm -rf build/lib/crash -python3 setup.py -q build -make -C tests -s -crash-python-gdb -nx -batch -ex "source tests/unittest-bootstrap.py" - -has_mypy() { - python3 -c 'import mypy' 2> /dev/null -} - -if has_mypy; then - cat <<- END > $DIR/gdbinit - set build-id-verbose 0 - set python print-stack full - set height 0 - set print pretty on - python - sys.path.insert(0, 'build/lib') - from mypy.main import main - main(None, args=["-p", "kdump", "--ignore-missing-imports"]) - main(None, args=["-p", "crash", "--ignore-missing-imports"]) - end - END - echo "Doing static checking." - if ! crash-python-gdb -nx -batch -x $DIR/gdbinit; then - echo "static checking failed." >&2 - else - echo "OK" - fi -fi - -cat << END > $DIR/gdbinit -python sys.path.insert(0, 'build/lib') -set build-id-verbose 0 -set python print-stack full -set prompt py-crash> -set height 0 -set print pretty on -source kernel-tests/unittest-prepare.py -source kernel-tests/unittest-bootstrap.py -END - -for f in "$@"; do - export CRASH_PYTHON_TESTFILE="$f" - crash-python-gdb -nx -batch -x $DIR/gdbinit -done +make test INI_FILES="$@" diff --git a/tests/gdbinit-boilerplate b/tests/gdbinit-boilerplate new file mode 100644 index 00000000000..c6b0d7a56ea --- /dev/null +++ b/tests/gdbinit-boilerplate @@ -0,0 +1,5 @@ +set build-id-verbose 0 +set python print-stack full +set height 0 +set print pretty on +python sys.path.insert(0, 'build/lib') diff --git a/tests/pylintrc b/tests/pylintrc-check similarity index 91% rename from tests/pylintrc rename to tests/pylintrc-check index 0a2903b4465..b03e59af6c9 100644 --- a/tests/pylintrc +++ b/tests/pylintrc-check @@ -65,7 +65,7 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating,missing-docstring,too-few-public-methods,invalid-name,duplicate-code,missing-final-newline,too-many-locals,too-many-instance-attributes,too-many-public-methods +disable=missing-docstring,too-few-public-methods,invalid-name,too-many-locals,too-many-instance-attributes,too-many-public-methods,fixme,no-self-use,too-many-branches,too-many-statements,too-many-arguments,too-many-boolean-expressions,line-too-long [REPORTS] diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce new file mode 100644 index 00000000000..efdbb83c402 --- /dev/null +++ b/tests/pylintrc-enforce @@ -0,0 +1,355 @@ +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +jobs=1 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist= + +# Allow optimization of some AST trees. This will activate a peephole AST +# optimizer, which will apply various small optimizations. For instance, it can +# be used to obtain the result of joining multiple strings with the addition +# operator. Joining a lot of strings can lead to a maximum recursion error in +# Pylint and this flag can prevent that. It has one side effect, the resulting +# AST will be different than the one from reality. This option is deprecated +# and it will be removed in Pylint 2.0. +optimize-ast=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=all + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". This option is deprecated +# and it will be removed in Pylint 2.0. +files-output=no + +# Tells whether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=100 + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma,dict-separator + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[BASIC] + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty + +# Regular expression matching correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for function names +function-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for variable names +variable-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct constant names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Naming hint for constant names +const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression matching correct attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for attribute names +attr-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for argument names +argument-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Naming hint for class attribute names +class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Naming hint for inline iteration names +inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ + +# Regular expression matching correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Naming hint for class names +class-name-hint=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression matching correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Naming hint for module names +module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression matching correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for method names +method-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + + +[ELIF] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/tests/run-gdb.sh b/tests/run-gdb.sh new file mode 100755 index 00000000000..b09f53b7df4 --- /dev/null +++ b/tests/run-gdb.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +DIR=$(dirname "$0") +echo "Starting gdb" +exec crash-python-gdb -nx -batch -x $DIR/gdbinit-boilerplate "$@" diff --git a/tests/run-kernel-tests.sh b/tests/run-kernel-tests.sh new file mode 100755 index 00000000000..6fef357fbb7 --- /dev/null +++ b/tests/run-kernel-tests.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +if test $# -eq 0; then + echo "No ini files specified. Nothing to do." + exit 0 +fi + +cleanup() { + test -n "$DIR" && rm -rf "$DIR" +} + +trap cleanup EXIT + +RUN="$(dirname "$0")" + +DIR=$(mktemp -d "/tmp/cp-kernel-tests.XXXXXX") +export CRASH_PYTHON_TESTDIR=$DIR + +TOPDIR=$(realpath "$(dirname "$0")"/..) +for f in "$@"; do + export CRASH_PYTHON_TESTFILE="$f" + $RUN/run-gdb.sh -x $TOPDIR/kernel-tests/unittest-prepare.py \ + -x $TOPDIR/kernel-tests/unittest-bootstrap.py +done diff --git a/tests/run-mypy.py b/tests/run-mypy.py new file mode 100644 index 00000000000..5758b0ac703 --- /dev/null +++ b/tests/run-mypy.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import sys + +print("Doing static checking.") + +from mypy.main import main + +common_args = ["--ignore-missing-imports", + "--disallow-incomplete-defs", + "--disallow-untyped-defs", + "--disallow-untyped-calls", + "--check-untyped-defs", + "--disallow-untyped-globals"] + +ret = main(None, args=["-p", "kdump"] + common_args) +ret2 = main(None, args=["-p", "crash"] + common_args) + +if ret or ret2: + print("static checking failed.", file=sys.stderr) + sys.exit(1) + +print("OK") +sys.exit(0) diff --git a/tests/run-pylint.py b/tests/run-pylint.py new file mode 100644 index 00000000000..d5b110944d1 --- /dev/null +++ b/tests/run-pylint.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from pylint import lint +import os +import shlex + +argv = shlex.split(os.environ['PYLINT_ARGV']) + +sys.exit(lint.Run(argv)) diff --git a/tests/run-pylint.sh b/tests/run-pylint.sh new file mode 100755 index 00000000000..2d33d2b8b9b --- /dev/null +++ b/tests/run-pylint.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if ! python3 -c 'import pylint' 2> /dev/null; then + echo "pylint is not installed" + exit 0 +fi + +export PYLINT_ARGV="$@" + +DIR=$(dirname "$0") +exec $DIR/run-gdb.sh -x $DIR/run-pylint.py diff --git a/tests/run-static-checks.sh b/tests/run-static-checks.sh new file mode 100755 index 00000000000..31b0758761b --- /dev/null +++ b/tests/run-static-checks.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# mypy spawns multiple subprocesses that invoke the interpreter +# separately. As a result, we fail to import the gdb and kdumpfile +# modules and fail. + +if ! python3 -c 'import mypy' 2> /dev/null; then + echo "mypy is not installed" + exit 0 +fi + +set -e + +DIR=$(dirname "$0") +python3 $DIR/run-mypy.py diff --git a/tests/run-tests.sh b/tests/run-tests.sh new file mode 100755 index 00000000000..79adf3bd620 --- /dev/null +++ b/tests/run-tests.sh @@ -0,0 +1,4 @@ +#/bin/bash + +DIR="$(dirname "$0")" +exec $DIR/run-gdb.sh -x $DIR/unittest-bootstrap.py diff --git a/tests/unittest-bootstrap.py b/tests/unittest-bootstrap.py index 807ca0854c1..f0846cec4be 100644 --- a/tests/unittest-bootstrap.py +++ b/tests/unittest-bootstrap.py @@ -9,4 +9,8 @@ test_loader = unittest.TestLoader() test_suite = test_loader.discover('tests', pattern='test_*.py') -unittest.TextTestRunner(verbosity=1).run(test_suite) +ret = unittest.TextTestRunner(verbosity=1).run(test_suite) +if not ret.wasSuccessful(): + sys.exit(1) + +sys.exit(0) From 38e0ce5886b7eab3a70a026367e696c26da1dea3 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 10:24:41 -0400 Subject: [PATCH 159/367] tests: add automatic import testing This commit adds a script that generates test cases to individually import each module in the project. It's automatically used by the existing infrastructure. Signed-off-by: Jeff Mahoney --- .gitignore | 1 + tests/Makefile | 8 +++++++- tests/gen-import-tests.sh | 27 +++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) create mode 100755 tests/gen-import-tests.sh diff --git a/.gitignore b/.gitignore index edfbb817718..79f4f801954 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ doc-source/crash*.rst doc-source/modules.rst docs +tests/test_imports.py diff --git a/tests/Makefile b/tests/Makefile index 4866fe5b1d7..09189355e55 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,7 +1,11 @@ CFLAGS = -ggdb -TARGETS := test-util.o test-list.o test-list test-util test-percpu test-syscache +TARGETS := test-util.o test-list.o test-list test-util test-percpu +TARGETS += test-syscache test_imports.py all: $(TARGETS) +test_imports.py: FORCE + sh gen-import-tests.sh > $@ + test-percpu.lds : test-percpu.lds.in build-lds ./build-lds $< $@ @@ -10,3 +14,5 @@ test-percpu: test-percpu.o test-percpu.lds clean: rm -f $(TARGETS) + +FORCE: diff --git a/tests/gen-import-tests.sh b/tests/gen-import-tests.sh new file mode 100755 index 00000000000..f5ffe202bcf --- /dev/null +++ b/tests/gen-import-tests.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -e + +DIR=$(realpath $(dirname $0)/..) + +cat << END +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import unittest + +class TestImports(unittest.TestCase): +END + +for f in $(cd $DIR ; find crash kdump -name '*.py'); do + path=$(echo $f | sed -e 's#/__init__.py##' -e 's#.py##') + name=$(echo $path | tr / .) + tname=$(echo $path | tr / _) + +cat < Date: Tue, 4 Jun 2019 16:30:53 -0400 Subject: [PATCH 160/367] crash.types.slab: remove dead code At the end of KmemCache.get_slabs_of_type we initialize a new Slab to yield, but it's immediately after a continue. It'll never run. Signed-off-by: Jeff Mahoney --- crash/types/slab.py | 1 - 1 file changed, 1 deletion(-) diff --git a/crash/types/slab.py b/crash/types/slab.py index fa22e8b8787..c50c4c06b9b 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -463,7 +463,6 @@ def get_slabs_of_type(self, node, slabtype, reverse=False, exact_cycles=False): print("failed to initialize slab object from list_head {:#x}: {}".format( int(list_head), sys.exc_info()[0])) continue - slab = Slab(gdb_slab, kmem_cache, error = True) yield slab From 4bf229ac47a31f5ff68ba6d29c730bab861fa5b9 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 16:32:03 -0400 Subject: [PATCH 161/367] crash.commands: add missing import for ArgumentTypeError crash.commands uses ArgumentTypeError but never imported it. Signed-off-by: Jeff Mahoney --- crash/commands/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 46d67620aec..9c40f3ce538 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -10,7 +10,7 @@ import importlib import argparse -from crash.exceptions import DelayedAttributeError +from crash.exceptions import DelayedAttributeError, ArgumentTypeError class CommandError(RuntimeError): pass From cc7c8d580ed67f9a969aacf08f81ea62f6d9d9ce Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 16:33:29 -0400 Subject: [PATCH 162/367] crash.types.task: Use UnexpectedGDBTypeError instead of BadTaskError BadTaskError no longer exists. Use UnexpectedGDBTypeError instead. Signed-off-by: Jeff Mahoney --- crash/types/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/types/task.py b/crash/types/task.py index c4f494d552c..caddaceda61 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -239,7 +239,7 @@ def _init_task_types(cls, task): if not cls._valid: t = types.task_struct_type if task.type != t: - raise BadTaskError(task) + raise UnexpectedGDBTypeError('task', task, t) # Using a type within the same context makes things a *lot* faster # This works around a shortcoming in gdb. A type lookup and From 0b35acb0d3aee4ed5a304856f0ba1b566bae323a Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 11:00:56 -0400 Subject: [PATCH 163/367] lint: fix undefined variable errors This commit fixes the following lint errors and enables enforcement of the 'undefined-variable', 'undefined-all-variable', 'global-variable-undefined', and 'undefined-loop-variable' pylint rules. ************* Module crash.subsystem.filesystem.decoders E: 87,21: Undefined variable 'bio' (undefined-variable) E: 88,35: Undefined variable 'inode' (undefined-variable) E: 93,57: Undefined variable 'bio' (undefined-variable) E:127,44: Undefined variable 'bio' (undefined-variable) E:155,51: Undefined variable 'bh' (undefined-variable) ************* Module crash.subsystem.filesystem.ext3 E: 36,23: Undefined variable 'block_device_name' (undefined-variable) ************* Module crash.subsystem.filesystem.xfs E:246,22: Undefined variable 'bio' (undefined-variable) E:246,45: Undefined variable 'cls' (undefined-variable) E:247,41: Undefined variable 'bio' (undefined-variable) E:250,29: Undefined variable 'xfs' (undefined-variable) ************* Module crash.subsystem.storage.device_mapper E: 40,20: Undefined variable 'cls' (undefined-variable) E: 40,47: Undefined variable 'bio' (undefined-variable) E: 54,15: Undefined variable 'container_of' (undefined-variable) E: 98,43: Undefined variable 'bio' (undefined-variable) E: 99,24: Undefined variable 'tio' (undefined-variable) E:105,36: Undefined variable 'bself' (undefined-variable) E:116,15: Undefined variable 'container_of' (undefined-variable) ************* Module crash.subsystem.storage.decoders E:137,28: Undefined variable 'block_device_name' (undefined-variable) E:245,39: Undefined variable 'block_device_name' (undefined-variable) E:246,39: Undefined variable 'bio' (undefined-variable) ************* Module crash.types.task E:526,20: Undefined variable 'cls' (undefined-variable) Signed-off-by: Jeff Mahoney --- contrib/stuck-requests.py | 2 +- crash/subsystem/filesystem/decoders.py | 12 +++++------ crash/subsystem/filesystem/ext3.py | 1 + crash/subsystem/filesystem/xfs.py | 6 +++--- crash/subsystem/storage/__init__.py | 24 --------------------- crash/subsystem/storage/decoders.py | 27 ++++++++++++++++++++++-- crash/subsystem/storage/device_mapper.py | 9 ++++---- crash/types/task.py | 2 +- tests/pylintrc-enforce | 2 +- 9 files changed, 43 insertions(+), 42 deletions(-) diff --git a/contrib/stuck-requests.py b/contrib/stuck-requests.py index 78b45501ffb..0ff4e1ae1a6 100644 --- a/contrib/stuck-requests.py +++ b/contrib/stuck-requests.py @@ -5,7 +5,7 @@ # This script dumps stuck requests for every disk on the system from crash.subsystem.storage import for_each_disk -from crash.subsystem.storage import for_each_bio_in_stack +from crash.subsystem.storage.decoders import for_each_bio_in_stack from crash.subsystem.storage import gendisk_name from crash.subsystem.storage.blocksq import for_each_request_in_queue from crash.types.list import list_for_each_entry diff --git a/crash/subsystem/filesystem/decoders.py b/crash/subsystem/filesystem/decoders.py index b416c13bcd2..8184dfae2ab 100644 --- a/crash/subsystem/filesystem/decoders.py +++ b/crash/subsystem/filesystem/decoders.py @@ -84,13 +84,13 @@ def __init__(self, bio: gdb.Value): def interpret(self): """Interpret the multipage bio to populate its attributes""" - self.inode = bio['bi_io_vec'][0]['bv_page']['mapping']['host'] - self.fstype = super_fstype(inode['i_sb']) + self.inode = self.bio['bi_io_vec'][0]['bv_page']['mapping']['host'] + self.fstype = super_fstype(self.inode['i_sb']) def __str__(self): return self.description.format(int(self.bio), self.inode['i_ino'], self.fstype, - block_device_name(bio['bi_bdev'])) + block_device_name(self.bio['bi_bdev'])) DecodeMPage.register() @@ -124,7 +124,7 @@ def interpret(self): self.bh = self.bio['bi_private'].cast(self._types.buffer_head_p_type) def __str__(self): - return self._description.format(int(bio)) + return self._description.format(int(self.bio)) def __next__(self): return decode_bh(self.bh) @@ -152,7 +152,7 @@ def __init__(self, bh): self.bh = bh def __str__(self): - self._description.format(block_device_name(bh['b_bdev']), - self.bh['b_blocknr'], self.bh['b_size']) + self._description.format(block_device_name(self.bh['b_bdev']), + self.bh['b_blocknr'], self.bh['b_size']) DecodeSyncWBBH.register() diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index 9d2eee8c681..d391ea3e2ef 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -3,6 +3,7 @@ import gdb +from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder class Ext3Decoder(Decoder): diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index 9c9c4ef9006..3a48c455428 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -243,11 +243,11 @@ def __init__(self, bio: gdb.Value): def interpret(self): """Interpret the xfsbuf bio to populate its attributes""" - self.xfsbuf = bio['bi_private'].cast(cls._types.xfs_buf_p_type) - self.devname = block_device_name(bio['bi_bdev']) + self.xfsbuf = self.bio['bi_private'].cast(self._types.xfs_buf_p_type) + self.devname = block_device_name(self.bio['bi_bdev']) def __next__(self): - return XFSBufDecoder(xfs.xfsbuf) + return XFSBufDecoder(self.xfsbuf) def __str__(self): return self._description.format(self.bio, self.devname) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index cbe109b33db..0b18d57ad3e 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -9,7 +9,6 @@ from crash.util import container_of from crash.util.symbols import Types, Symvals, SymbolCallbacks, TypeCallbacks from crash.types.classdev import for_each_class_device -from . import decoders from crash.exceptions import DelayedAttributeError, InvalidArgumentError types = Types([ 'struct gendisk', 'struct hd_struct', 'struct device', @@ -17,29 +16,6 @@ symvals = Symvals([ 'block_class', 'blockdev_superblock', 'disk_type', 'part_type' ]) -def for_each_bio_in_stack(bio: gdb.Value) -> Iterable[decoders.Decoder]: - """ - Iterates and decodes each bio involved in a stacked storage environment - - This method will yield a Decoder object describing each level - in the storage stack, starting with the provided bio, as - processed by each level's decoder. The stack will be interrupted - if an encountered object doesn't have a decoder specified. - - See :mod:`crash.subsystem.storage.decoders` for more detail. - - Args: - bio: The initial struct bio to start decoding. The value must be - of type ``struct bio``. - - Yields: - :obj:`.Decoder`: The next :obj:`.Decoder` in the stack, if any remain. - """ - decoder = decoders.decode_bio(bio) - while decoder is not None: - yield decoder - decoder = next(decoder) - def dev_to_gendisk(dev: gdb.Value) -> gdb.Value: """ Converts a ``struct device`` that is embedded in a ``struct gendisk`` diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 8e2ab7aaa8e..0dcf5fc1dde 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -2,8 +2,10 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb -from typing import Union, List, Dict +from typing import Union, List, Dict, Iterable + from crash.infra.lookup import SymbolCallback +from crash.subsystem.storage import block_device_name EndIOSpecifier = Union[int, str, List[str], gdb.Value, gdb.Symbol, None] @@ -243,7 +245,7 @@ def __init__(self, bio: gdb.Value): def __str__(self): return self._description.format(int(self.bio), block_device_name(self.bio['bi_bdev']), - bio['bi_end_io']) + self.bio['bi_end_io']) def decode_bio(bio: gdb.Value) -> Decoder: """ @@ -295,3 +297,24 @@ def decode_bh(bh: gdb.Value) -> Decoder: return GenericBHDecoder(bh) except gdb.NotAvailableError: return BadBHDecoder(bh) + +def for_each_bio_in_stack(bio: gdb.Value) -> Iterable[Decoder]: + """ + Iterates and decodes each bio involved in a stacked storage environment + + This method will yield a Decoder object describing each level + in the storage stack, starting with the provided bio, as + processed by each level's decoder. The stack will be interrupted + if an encountered object doesn't have a decoder specified. + + Args: + bio: The initial struct bio to start decoding. The value must be + of type ``struct bio``. + + Yields: + :obj:`.Decoder`: The next :obj:`.Decoder` in the stack, if any remain. + """ + decoder = decode_bio(bio) + while decoder is not None: + yield decoder + decoder = next(decoder) diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index 7a2af9634f6..088234a9b2d 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -3,6 +3,7 @@ import gdb +from crash.util import container_of from crash.util.symbols import Types from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder, decode_bio @@ -37,7 +38,7 @@ def __init__(self, bio: gdb.Value): def interpret(self): """Interprets the request-based device mapper bio to populate its attributes""" - self.info = cls._get_clone_bio_rq_info(bio) + self.info = self._get_clone_bio_rq_info(self.bio) self.tio = self.info['tio'] def __str__(self): @@ -95,14 +96,14 @@ def __init__(self, bio: gdb.Value): def interpret(self): """Interprets the cloned device mapper bio to populate its attributes""" - self.tio = self._get_clone_bio_tio(bio) - self.next_bio = tio['io']['bio'] + self.tio = self._get_clone_bio_tio(self.bio) + self.next_bio = self.tio['io']['bio'] def __str__(self): return self._description.format( int(self.bio), block_device_name(self.bio['bi_bdev']), - int(bself.io['bi_sector']), + int(self.bio['bi_sector']), block_device_name(self.next_bio['bi_bdev']), int(self.next_bio['bi_sector'])) diff --git a/crash/types/task.py b/crash/types/task.py index caddaceda61..87057bf012d 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -523,7 +523,7 @@ def _get_rss_stat_field(self): def _get_anon_file_rss_fields(self): mm = self.task_struct['mm'] rss = 0 - for name in cls.anon_file_rss_fields: + for name in self.anon_file_rss_fields: if mm[name].type == self.atomic_long_type: rss += int(mm[name]['counter']) else: diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index efdbb83c402..b5cb8946e02 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable= +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable [REPORTS] From 287e01e781f24be17ce52574d2d7801dd6f66f10 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 12:14:20 -0400 Subject: [PATCH 164/367] lint: fix not-callable errors This commit fixes the following lint errors and enables enforcement of the 'not-callable' pylint rule. ************* Module crash.subsystem.filesystem.ext3 E: 42,15: self._description is not callable (not-callable) Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/ext3.py | 2 +- tests/pylintrc-enforce | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index d391ea3e2ef..3361a99352c 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -39,6 +39,6 @@ def interpret(self): self.length = int(self.bh['b_size']) def __str__(self): - return self._description(int(self.bh), self.fstype, self.devname) + return self._description.format(int(self.bh), self.fstype, self.devname) Ext3Decoder.register() diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index b5cb8946e02..18933e3048b 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable [REPORTS] From 706be2aad9ad4f59015d523ab57cb25e93e8aad7 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 30 May 2019 20:45:07 -0400 Subject: [PATCH 165/367] lint: handle whitespace complaints This commit fixes the following lint complaints and enables enforcement of the 'bad-whitespace', 'trailing-whitespace', 'bad-continuation', and 'mixed-indentation' pylint rules. ************* Module crash.kernel C: 51, 0: No space allowed after bracket types = Types([ 'char *' ]) ^ (bad-whitespace) C: 51, 0: No space allowed before bracket types = Types([ 'char *' ]) ^ (bad-whitespace) C: 52, 0: No space allowed after bracket symvals = Symvals([ 'init_task' ]) ^ (bad-whitespace) C: 52, 0: No space allowed before bracket symvals = Symvals([ 'init_task' ]) ^ (bad-whitespace) C: 53, 0: No space allowed after bracket symbols = Symbols([ 'runqueues']) ^ (bad-whitespace) C: 55, 0: Exactly one space required around keyword argument assignment def __init__(self, roots: PathSpecifier=None, ^ (bad-whitespace) C: 56, 0: Exactly one space required around keyword argument assignment vmlinux_debuginfo: PathSpecifier=None, ^ (bad-whitespace) C: 57, 0: Exactly one space required around keyword argument assignment module_path: PathSpecifier=None, ^ (bad-whitespace) C: 58, 0: Exactly one space required around keyword argument assignment module_debuginfo_path: PathSpecifier=None, ^ (bad-whitespace) C: 59, 0: Exactly one space required around keyword argument assignment verbose: bool=False, debug: bool=False): ^ (bad-whitespace) C: 59, 0: Exactly one space required around keyword argument assignment verbose: bool=False, debug: bool=False): ^ (bad-whitespace) C:133, 0: No space allowed after bracket self.roots = [ "/" ] ^ (bad-whitespace) C:133, 0: No space allowed before bracket self.roots = [ "/" ] ^ (bad-whitespace) C:140, 0: No space allowed after bracket x = [ root ] ^ (bad-whitespace) C:140, 0: No space allowed before bracket x = [ root ] ^ (bad-whitespace) C:147, 0: No space allowed after bracket x = [ "/" ] ^ (bad-whitespace) C:147, 0: No space allowed before bracket x = [ "/" ] ^ (bad-whitespace) C:153, 0: No space allowed after bracket x = [ roots ] ^ (bad-whitespace) C:153, 0: No space allowed before bracket x = [ roots ] ^ (bad-whitespace) C:157, 0: No space allowed after bracket x = [ "/" ] ^ (bad-whitespace) C:157, 0: No space allowed before bracket x = [ "/" ] ^ (bad-whitespace) C:172, 0: Wrong continued indentation (remove 9 spaces). os.path.basename(kernel)), | ^ (bad-continuation) C:191, 0: No space allowed after bracket self.vmlinux_debuginfo = [ vmlinux_debuginfo ] ^ (bad-whitespace) C:191, 0: No space allowed before bracket self.vmlinux_debuginfo = [ vmlinux_debuginfo ] ^ (bad-whitespace) C:286, 0: Wrong continued indentation (remove 1 space). .format(kernel)) |^ (bad-continuation) C:382, 0: Exactly one space required around keyword argument assignment def load_modules(self, verbose: bool=False, debug: bool=False) -> None: ^ (bad-whitespace) C:382, 0: Exactly one space required around keyword argument assignment def load_modules(self, verbose: bool=False, debug: bool=False) -> None: ^ (bad-whitespace) C:431, 0: Wrong continued indentation (remove 3 spaces). .format(modpath, addr, sections), | ^ (bad-continuation) C:499, 0: Exactly one space required around keyword argument assignment def cache_file_tree(self, path, regex: Pattern[str]=None) -> None: ^ (bad-whitespace) C:530, 0: Wrong continued indentation (add 4 spaces). regex: Pattern[str]=None) -> str: ^ | (bad-continuation) C:530, 0: Exactly one space required around keyword argument assignment regex: Pattern[str]=None) -> str: ^ (bad-whitespace) C:558, 0: Exactly one space required around keyword argument assignment path: str, verbose: bool=False) -> bool: ^ (bad-whitespace) C:574, 0: Exactly one space required around keyword argument assignment modpath: str=None, verbose: bool=False) -> None: ^ (bad-whitespace) C:574, 0: Exactly one space required around keyword argument assignment modpath: str=None, verbose: bool=False) -> None: ^ (bad-whitespace) ************* Module crash.session C: 24, 0: Exactly one space required around keyword argument assignment def __init__(self, kernel: CrashKernel, verbose: bool=False, ^ (bad-whitespace) C: 25, 0: Exactly one space required around keyword argument assignment debug: bool=False) -> None: ^ (bad-whitespace) C: 44, 0: Wrong continued indentation (remove 1 space). .format(self.kernel.crashing_thread.num), |^ (bad-continuation) C: 50, 0: Wrong continued indentation (remove 42 spaces). .format(str(e))) | ^ (bad-continuation) ************* Module crash.addrxlat C: 55, 0: No space allowed around keyword argument assignment arch = utsname.machine, ^ (bad-whitespace) C: 56, 0: No space allowed around keyword argument assignment type = addrxlat.OS_LINUX) ^ (bad-whitespace) ************* Module crash.cache.syscache C: 23, 0: No space allowed after bracket symvals = Symvals([ 'init_uts_ns' ]) ^ (bad-whitespace) C: 23, 0: No space allowed before bracket symvals = Symvals([ 'init_uts_ns' ]) ^ (bad-whitespace) C: 39, 0: No space allowed after bracket utsname_fields = [ 'sysname', 'nodename', 'release', ^ (bad-whitespace) C: 40, 0: No space allowed before bracket 'version', 'machine', 'domainname' ] ^ (bad-whitespace) C: 51, 0: No space allowed after bracket types = Types([ 'char *' ]) ^ (bad-whitespace) C: 51, 0: No space allowed before bracket types = Types([ 'char *' ]) ^ (bad-whitespace) C: 52, 0: No space allowed after bracket symvals = Symvals([ 'kernel_config_data' ]) ^ (bad-whitespace) C: 52, 0: No space allowed before bracket symvals = Symvals([ 'kernel_config_data' ]) ^ (bad-whitespace) C: 53, 0: No space allowed after bracket msymvals = MinimalSymvals([ 'kernel_config_data', ^ (bad-whitespace) C: 54, 0: No space allowed before bracket 'kernel_config_data_end' ]) ^ (bad-whitespace) C:155, 0: No space allowed after bracket symvals = Symvals([ 'avenrun' ]) ^ (bad-whitespace) C:155, 0: No space allowed before bracket symvals = Symvals([ 'avenrun' ]) ^ (bad-whitespace) C:244, 0: No space allowed after bracket symbol_cbs = SymbolCallbacks( [( 'jiffies', ^ (bad-whitespace) C:244, 0: No space allowed after bracket symbol_cbs = SymbolCallbacks( [( 'jiffies', ^ (bad-whitespace) C:245, 0: No space allowed before bracket CrashKernelCache.setup_jiffies ), ^ (bad-whitespace) C:246, 0: No space allowed after bracket ( 'jiffies_64', ^ (bad-whitespace) C:247, 0: No space allowed before bracket CrashKernelCache.setup_jiffies ) ]) ^ (bad-whitespace) C:247, 0: No space allowed before bracket CrashKernelCache.setup_jiffies ) ]) ^ (bad-whitespace) ************* Module crash.commands.vtop C: 11, 0: No space allowed after bracket table_names = ( 'PTE', 'PMD', 'PUD', 'PGD' ) ^ (bad-whitespace) C: 11, 0: No space allowed before bracket table_names = ( 'PTE', 'PMD', 'PUD', 'PGD' ) ^ (bad-whitespace) ************* Module crash.commands.dmesg C: 13, 0: No space allowed after bracket types = Types([ 'struct printk_log *' , 'char *' ]) ^ (bad-whitespace) C: 13, 0: No space allowed before comma types = Types([ 'struct printk_log *' , 'char *' ]) ^ (bad-whitespace) C: 13, 0: No space allowed before bracket types = Types([ 'struct printk_log *' , 'char *' ]) ^ (bad-whitespace) C: 14, 0: No space allowed after bracket symvals = Symvals([ 'log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', ^ (bad-whitespace) C: 15, 0: No space allowed before bracket 'clear_seq', 'log_first_seq', 'log_next_seq' ]) ^ (bad-whitespace) ************* Module crash.commands.kmem C: 68, 0: Trailing whitespace (trailing-whitespace) C: 89, 0: Wrong continued indentation (remove 10 spaces). % name) | ^ (bad-continuation) C:102, 0: Trailing whitespace (trailing-whitespace) C:104, 0: Wrong continued indentation (remove 21 spaces). (obj[1], name, ac_desc)) | ^ (bad-continuation) C:115, 0: Wrong continued indentation (remove 13 spaces). vmstat[i], diffs[i])) | ^ (bad-continuation) C:126, 0: Trailing whitespace (trailing-whitespace) C:129, 0: No space allowed before bracket for item in range (0, nr_items): ^ (bad-whitespace) C:146, 0: Trailing whitespace (trailing-whitespace) C:153, 0: Trailing whitespace (trailing-whitespace) C:155, 0: Wrong continued indentation (remove 2 spaces). (zone_struct["node"], zone.zid, zone_struct.address, | ^ (bad-continuation) C:156, 0: Wrong continued indentation (remove 15 spaces). zone_struct["name"].string())) | ^ (bad-continuation) C:169, 0: Trailing whitespace (trailing-whitespace) ************* Module crash.commands.lsmod C: 99, 0: Wrong continued indentation (remove 2 spaces). .format(argv.p)) | ^ (bad-continuation) C:127, 0: Wrong continued indentation (remove 6 spaces). .format(modname, addr, size, count, module_use)) | ^ (bad-continuation) ************* Module crash.commands.mount C: 57, 0: Exactly one space required around keyword argument assignment task: LinuxTask=None) -> None: ^ (bad-whitespace) ************* Module crash.commands.ps C: 44, 0: No space allowed before : self._format_header = lambda : "" ^ (bad-whitespace) C: 47, 0: Exactly one space required before assignment header = f" PID PPID CPU {col4name:^{col4width}} ST %MEM " ^ (bad-whitespace) C: 96, 0: Exactly one space required before assignment line = f"{active} {pid:>5} {parent_pid:>5} {last_cpu:>3} " ^ (bad-whitespace) C:110, 0: Exactly one space required before assignment line = f"[{task.last_run():d}] [{state}] PID: {pid:-5d} " ^ (bad-whitespace) ************* Module crash.commands.xfs C:115, 0: Wrong continued indentation (remove 8 spaces). end='') | ^ (bad-continuation) C:166, 0: Wrong continued indentation (add 27 spaces). types.xfs_buf_p_type.target(), 'b_list'): ^ | (bad-continuation) ************* Module crash.infra.lookup C: 32, 0: Exactly one space required around keyword argument assignment def __init__(self, name: str, callback: Callback, attrname: str=None): ^ (bad-whitespace) C: 72, 0: Exactly one space required around keyword argument assignment def __init__(self, name: str, callback: Callback, symbol_file: str=None): ^ (bad-whitespace) C:110, 0: Exactly one space required around keyword argument assignment domain: int=gdb.SYMBOL_VAR_DOMAIN): ^ (bad-whitespace) C:172, 0: Exactly one space required around keyword argument assignment def __init__(self, name: str, callback: Callback, block: gdb.Block=None): ^ (bad-whitespace) ************* Module crash.subsystem.filesystem.decoders C: 32, 0: No space allowed after bracket _types = Types([ 'struct dio *' ]) ^ (bad-whitespace) C: 32, 0: No space allowed before bracket _types = Types([ 'struct dio *' ]) ^ (bad-whitespace) C: 33, 0: No space allowed after bracket __endio__ = [ 'dio_bio_end_io', 'dio_bio_end_io' ] ^ (bad-whitespace) C: 33, 0: No space allowed before bracket __endio__ = [ 'dio_bio_end_io', 'dio_bio_end_io' ] ^ (bad-whitespace) C:114, 0: No space allowed after bracket _types = Types([ 'struct buffer_head *' ]) ^ (bad-whitespace) C:114, 0: No space allowed before bracket _types = Types([ 'struct buffer_head *' ]) ^ (bad-whitespace) C:147, 0: Exactly one space required before assignment __endio__ = 'end_buffer_write_sync' ^ (bad-whitespace) ************* Module crash.subsystem.filesystem.xfs C: 23, 0: Exactly one space required before assignment XFS_IOLOCK_EXCL = 0x01 ^ (bad-whitespace) C: 24, 0: Exactly one space required before assignment XFS_IOLOCK_SHARED = 0x02 ^ (bad-whitespace) C: 25, 0: Exactly one space required before assignment XFS_ILOCK_EXCL = 0x04 ^ (bad-whitespace) C: 26, 0: Exactly one space required before assignment XFS_ILOCK_SHARED = 0x08 ^ (bad-whitespace) C: 27, 0: Exactly one space required before assignment XFS_MMAPLOCK_EXCL = 0x10 ^ (bad-whitespace) C: 30, 0: Exactly one space required before assignment XFS_LOCK_MASK = 0x3f ^ (bad-whitespace) C: 41, 0: Exactly one space required before assignment XFS_LI_EFI = 0x1236 ^ (bad-whitespace) C: 42, 0: Exactly one space required before assignment XFS_LI_EFD = 0x1237 ^ (bad-whitespace) C: 43, 0: Exactly one space required before assignment XFS_LI_IUNLINK = 0x1238 ^ (bad-whitespace) C: 44, 0: Exactly one space required before assignment XFS_LI_INODE = 0x123b # aligned ino chunks, var-size ibufs ^ (bad-whitespace) C: 45, 0: Exactly one space required before assignment XFS_LI_BUF = 0x123c # v2 bufs, variable sized inode bufs ^ (bad-whitespace) C: 46, 0: Exactly one space required before assignment XFS_LI_DQUOT = 0x123d ^ (bad-whitespace) C: 47, 0: Exactly one space required before assignment XFS_LI_QUOTAOFF = 0x123e ^ (bad-whitespace) C: 60, 0: Exactly one space required before assignment XFS_BLI_HOLD = 0x01 ^ (bad-whitespace) C: 61, 0: Exactly one space required before assignment XFS_BLI_DIRTY = 0x02 ^ (bad-whitespace) C: 62, 0: Exactly one space required before assignment XFS_BLI_STALE = 0x04 ^ (bad-whitespace) C: 63, 0: Exactly one space required before assignment XFS_BLI_LOGGED = 0x08 ^ (bad-whitespace) C: 65, 0: Exactly one space required before assignment XFS_BLI_STALE_INODE = 0x20 ^ (bad-whitespace) C: 66, 0: Exactly one space required before assignment XFS_BLI_INODE_BUF = 0x40 ^ (bad-whitespace) C: 78, 0: Exactly one space required before assignment XBF_READ = (1 << 0) # buffer intended for reading from device ^ (bad-whitespace) C: 79, 0: Exactly one space required before assignment XBF_WRITE = (1 << 1) # buffer intended for writing to device ^ (bad-whitespace) C: 80, 0: Exactly one space required before assignment XBF_MAPPED = (1 << 2) # buffer mapped (b_addr valid) ^ (bad-whitespace) C: 81, 0: Exactly one space required before assignment XBF_ASYNC = (1 << 4) # initiator will not wait for completion ^ (bad-whitespace) C: 82, 0: Exactly one space required before assignment XBF_DONE = (1 << 5) # all pages in the buffer uptodate ^ (bad-whitespace) C: 83, 0: Exactly one space required before assignment XBF_DELWRI = (1 << 6) # buffer has dirty pages ^ (bad-whitespace) C: 84, 0: Exactly one space required before assignment XBF_STALE = (1 << 7) # buffer has been staled, do not find it ^ (bad-whitespace) C: 85, 0: Exactly one space required before assignment XBF_ORDERED = (1 << 11) # use ordered writes ^ (bad-whitespace) C: 86, 0: Exactly one space required before assignment XBF_READ_AHEAD = (1 << 12) # asynchronous read-ahead ^ (bad-whitespace) C: 87, 0: Exactly one space required before assignment XBF_LOG_BUFFER = (1 << 13) # this is a buffer used for the log ^ (bad-whitespace) C: 90, 0: Exactly one space required before assignment XBF_LOCK = (1 << 14) # lock requested ^ (bad-whitespace) C: 91, 0: Exactly one space required before assignment XBF_TRYLOCK = (1 << 15) # lock requested, but do not wait ^ (bad-whitespace) C: 92, 0: Exactly one space required before assignment XBF_DONT_BLOCK = (1 << 16) # do not block in current thread ^ (bad-whitespace) C: 95, 0: Exactly one space required before assignment _XBF_PAGES = (1 << 18) # backed by refcounted pages ^ (bad-whitespace) C: 97, 0: Exactly one space required before assignment _XBF_KMEM = (1 << 20) # backed by heap memory ^ (bad-whitespace) C: 98, 0: Exactly one space required before assignment _XBF_DELWRI_Q = (1 << 21) # buffer on delwri queue ^ (bad-whitespace) C:121, 0: Exactly one space required before assignment XFS_ILOG_CORE = 0x001 ^ (bad-whitespace) C:122, 0: Exactly one space required before assignment XFS_ILOG_DDATA = 0x002 ^ (bad-whitespace) C:123, 0: Exactly one space required before assignment XFS_ILOG_DEXT = 0x004 ^ (bad-whitespace) C:124, 0: Exactly one space required before assignment XFS_ILOG_DBROOT = 0x008 ^ (bad-whitespace) C:125, 0: Exactly one space required before assignment XFS_ILOG_DEV = 0x010 ^ (bad-whitespace) C:126, 0: Exactly one space required before assignment XFS_ILOG_UUID = 0x020 ^ (bad-whitespace) C:127, 0: Exactly one space required before assignment XFS_ILOG_ADATA = 0x040 ^ (bad-whitespace) C:128, 0: Exactly one space required before assignment XFS_ILOG_AEXT = 0x080 ^ (bad-whitespace) C:129, 0: Exactly one space required before assignment XFS_ILOG_ABROOT = 0x100 ^ (bad-whitespace) C:130, 0: Exactly one space required before assignment XFS_ILOG_DOWNER = 0x200 ^ (bad-whitespace) C:131, 0: Exactly one space required before assignment XFS_ILOG_AOWNER = 0x400 ^ (bad-whitespace) C:163, 0: Exactly one space required before assignment XFS_MOUNT_WSYNC = (1 << 0) ^ (bad-whitespace) C:164, 0: Exactly one space required before assignment XFS_MOUNT_UNMOUNTING = (1 << 1) ^ (bad-whitespace) C:165, 0: Exactly one space required before assignment XFS_MOUNT_DMAPI = (1 << 2) ^ (bad-whitespace) C:166, 0: Exactly one space required before assignment XFS_MOUNT_WAS_CLEAN = (1 << 3) ^ (bad-whitespace) C:167, 0: Exactly one space required before assignment XFS_MOUNT_FS_SHUTDOWN = (1 << 4) ^ (bad-whitespace) C:168, 0: Exactly one space required before assignment XFS_MOUNT_DISCARD = (1 << 5) ^ (bad-whitespace) C:169, 0: Exactly one space required before assignment XFS_MOUNT_NOALIGN = (1 << 7) ^ (bad-whitespace) C:170, 0: Exactly one space required before assignment XFS_MOUNT_ATTR2 = (1 << 8) ^ (bad-whitespace) C:171, 0: Exactly one space required before assignment XFS_MOUNT_GRPID = (1 << 9) ^ (bad-whitespace) C:172, 0: Exactly one space required before assignment XFS_MOUNT_NORECOVERY = (1 << 10) ^ (bad-whitespace) C:173, 0: Exactly one space required before assignment XFS_MOUNT_DFLT_IOSIZE = (1 << 12) ^ (bad-whitespace) C:174, 0: Exactly one space required before assignment XFS_MOUNT_SMALL_INUMS = (1 << 14) ^ (bad-whitespace) C:175, 0: Exactly one space required before assignment XFS_MOUNT_32BITINODES = (1 << 15) ^ (bad-whitespace) C:176, 0: Exactly one space required before assignment XFS_MOUNT_NOUUID = (1 << 16) ^ (bad-whitespace) C:177, 0: Exactly one space required before assignment XFS_MOUNT_BARRIER = (1 << 17) ^ (bad-whitespace) C:178, 0: Exactly one space required before assignment XFS_MOUNT_IKEEP = (1 << 18) ^ (bad-whitespace) C:179, 0: Exactly one space required before assignment XFS_MOUNT_SWALLOC = (1 << 19) ^ (bad-whitespace) C:180, 0: Exactly one space required before assignment XFS_MOUNT_RDONLY = (1 << 20) ^ (bad-whitespace) C:181, 0: Exactly one space required before assignment XFS_MOUNT_DIRSYNC = (1 << 21) ^ (bad-whitespace) C:183, 0: Exactly one space required before assignment XFS_MOUNT_FILESTREAMS = (1 << 24) ^ (bad-whitespace) C:184, 0: Exactly one space required before assignment XFS_MOUNT_NOATTR2 = (1 << 25) ^ (bad-whitespace) C:238, 0: No space allowed after bracket _types = Types([ 'struct xfs_buf *' ]) ^ (bad-whitespace) C:238, 0: No space allowed before bracket _types = Types([ 'struct xfs_buf *' ]) ^ (bad-whitespace) C:257, 0: No space allowed after bracket types = Types([ 'struct xfs_log_item', 'struct xfs_buf_log_item', ^ (bad-whitespace) C:258, 0: Wrong continued indentation (remove 2 spaces). 'struct xfs_inode_log_item', 'struct xfs_efi_log_item', | ^ (bad-continuation) C:259, 0: Wrong continued indentation (remove 2 spaces). 'struct xfs_efd_log_item', 'struct xfs_dq_logitem', | ^ (bad-continuation) C:260, 0: Wrong continued indentation (remove 2 spaces). 'struct xfs_qoff_logitem', 'struct xfs_inode', | ^ (bad-continuation) C:261, 0: Wrong continued indentation (remove 2 spaces). 'struct xfs_mount *', 'struct xfs_buf *' ]) | ^ (bad-continuation) C:261, 0: No space allowed before bracket 'struct xfs_mount *', 'struct xfs_buf *' ]) ^ (bad-whitespace) C:309, 0: Exactly one space required around keyword argument assignment def xfs_inode(vfs_inode: gdb.Value, force: bool=False) -> gdb.Value: ^ (bad-whitespace) C:334, 0: Exactly one space required around keyword argument assignment def xfs_mount(sb: gdb.Value, force: bool=False) -> gdb.Value: ^ (bad-whitespace) C:550, 0: Exactly one space required after : def xfs_log_item_typed(item:gdb.Value) -> gdb.Value: ^ (bad-whitespace) C:647, 0: No space allowed after bracket type_cbs = TypeCallbacks([ ('struct xfs_ail', _XFS._detect_ail_version) ]) ^ (bad-whitespace) C:647, 0: No space allowed before bracket type_cbs = TypeCallbacks([ ('struct xfs_ail', _XFS._detect_ail_version) ]) ^ (bad-whitespace) ************* Module crash.subsystem.filesystem.__init__ C: 18, 0: Exactly one space required before assignment MS_RDONLY = 1 ^ (bad-whitespace) C: 19, 0: Exactly one space required before assignment MS_NOSUID = 2 ^ (bad-whitespace) C: 20, 0: Exactly one space required before assignment MS_NODEV = 4 ^ (bad-whitespace) C: 21, 0: Exactly one space required before assignment MS_NOEXEC = 8 ^ (bad-whitespace) C: 22, 0: Exactly one space required before assignment MS_SYNCHRONOUS = 16 ^ (bad-whitespace) C: 23, 0: Exactly one space required before assignment MS_REMOUNT = 32 ^ (bad-whitespace) C: 24, 0: Exactly one space required before assignment MS_MANDLOCK = 64 ^ (bad-whitespace) C: 25, 0: Exactly one space required before assignment MS_DIRSYNC = 128 ^ (bad-whitespace) C: 26, 0: Exactly one space required before assignment MS_NOATIME = 1024 ^ (bad-whitespace) C: 27, 0: Exactly one space required before assignment MS_NODIRATIME = 2048 ^ (bad-whitespace) C: 28, 0: Exactly one space required before assignment MS_BIND = 4096 ^ (bad-whitespace) C: 29, 0: Exactly one space required before assignment MS_MOVE = 8192 ^ (bad-whitespace) C: 30, 0: Exactly one space required before assignment MS_REC = 16384 ^ (bad-whitespace) C: 31, 0: Exactly one space required before assignment MS_VERBOSE = 32768 ^ (bad-whitespace) C: 32, 0: Exactly one space required before assignment MS_SILENT = 32768 ^ (bad-whitespace) C: 33, 0: Exactly one space required before assignment MS_POSIXACL = (1<<16) ^ (bad-whitespace) C: 34, 0: Exactly one space required before assignment MS_UNBINDABLE = (1<<17) ^ (bad-whitespace) C: 35, 0: Exactly one space required before assignment MS_PRIVATE = (1<<18) ^ (bad-whitespace) C: 36, 0: Exactly one space required before assignment MS_SLAVE = (1<<19) ^ (bad-whitespace) C: 37, 0: Exactly one space required before assignment MS_SHARED = (1<<20) ^ (bad-whitespace) C: 38, 0: Exactly one space required before assignment MS_RELATIME = (1<<21) ^ (bad-whitespace) C: 39, 0: Exactly one space required before assignment MS_KERNMOUNT = (1<<22) ^ (bad-whitespace) C: 40, 0: Exactly one space required before assignment MS_I_VERSION = (1<<23) ^ (bad-whitespace) C: 41, 0: Exactly one space required before assignment MS_STRICTATIME = (1<<24) ^ (bad-whitespace) C: 42, 0: Exactly one space required before assignment MS_LAZYTIME = (1<<25) ^ (bad-whitespace) C: 43, 0: Exactly one space required before assignment MS_NOSEC = (1<<28) ^ (bad-whitespace) C: 44, 0: Exactly one space required before assignment MS_BORN = (1<<29) ^ (bad-whitespace) C: 45, 0: Exactly one space required before assignment MS_ACTIVE = (1<<30) ^ (bad-whitespace) C: 46, 0: Exactly one space required before assignment MS_NOUSER = (1<<31) ^ (bad-whitespace) C:126, 0: Exactly one space required around keyword argument assignment def get_super_block(desc: AddressSpecifier, force: bool=False) -> gdb.Value: ^ (bad-whitespace) ************* Module crash.subsystem.filesystem.btrfs C: 12, 0: No space allowed after bracket types = Types([ 'struct btrfs_inode', 'struct btrfs_fs_info *', ^ (bad-whitespace) C: 13, 0: No space allowed before bracket 'struct btrfs_fs_info' ]) ^ (bad-whitespace) C: 47, 0: Exactly one space required around keyword argument assignment def btrfs_inode(vfs_inode: gdb.Value, force: bool=False ) -> gdb.Value: ^ (bad-whitespace) C: 47, 0: No space allowed before bracket def btrfs_inode(vfs_inode: gdb.Value, force: bool=False ) -> gdb.Value: ^ (bad-whitespace) C: 72, 0: Exactly one space required around keyword argument assignment def btrfs_fs_info(super_block: gdb.Value, force: bool=False) -> gdb.Value: ^ (bad-whitespace) C: 99, 0: Exactly one space required around keyword argument assignment def btrfs_fsid(super_block: gdb.Value, force: bool=False) -> uuid.UUID: ^ (bad-whitespace) C:121, 0: Exactly one space required around keyword argument assignment def btrfs_metadata_uuid(sb: gdb.Value, force: bool=False) -> uuid.UUID: ^ (bad-whitespace) ************* Module crash.subsystem.filesystem.mount C: 24, 0: Exactly one space required before assignment MNT_NOSUID = 0x01 ^ (bad-whitespace) C: 25, 0: Exactly one space required before assignment MNT_NODEV = 0x02 ^ (bad-whitespace) C: 26, 0: Exactly one space required before assignment MNT_NOEXEC = 0x04 ^ (bad-whitespace) C: 27, 0: Exactly one space required before assignment MNT_NOATIME = 0x08 ^ (bad-whitespace) C: 28, 0: Exactly one space required before assignment MNT_NODIRATIME = 0x10 ^ (bad-whitespace) C: 29, 0: Exactly one space required before assignment MNT_RELATIME = 0x20 ^ (bad-whitespace) C: 30, 0: Exactly one space required before assignment MNT_READONLY = 0x40 ^ (bad-whitespace) C: 31, 0: Exactly one space required before assignment MNT_SHRINKABLE = 0x100 ^ (bad-whitespace) C: 32, 0: Exactly one space required before assignment MNT_WRITE_HOLD = 0x200 ^ (bad-whitespace) C: 33, 0: Exactly one space required before assignment MNT_SHARED = 0x1000 ^ (bad-whitespace) C: 34, 0: Exactly one space required before assignment MNT_UNBINDABLE = 0x2000 ^ (bad-whitespace) C: 54, 0: No space allowed after bracket types = Types([ 'struct mount', 'struct vfsmount' ]) ^ (bad-whitespace) C: 54, 0: No space allowed before bracket types = Types([ 'struct mount', 'struct vfsmount' ]) ^ (bad-whitespace) C: 55, 0: No space allowed after bracket symvals = Symvals([ 'init_task' ]) ^ (bad-whitespace) C: 55, 0: No space allowed before bracket symvals = Symvals([ 'init_task' ]) ^ (bad-whitespace) C: 88, 0: Exactly one space required around keyword argument assignment def for_each_mount(task: gdb.Value=None) -> Iterator[gdb.Value]: ^ (bad-whitespace) C:115, 0: Exactly one space required around keyword argument assignment def mount_flags(mnt: gdb.Value, show_hidden: bool=False) -> str: ^ (bad-whitespace) C:206, 0: Wrong continued indentation before block (add 4 spaces). vfsmnt.type == types.mount_type.pointer()): ^ | (bad-continuation) C:215, 0: Exactly one space required around keyword argument assignment def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value=None): ^ (bad-whitespace) C:277, 0: No space allowed after bracket type_cbs = TypeCallbacks([ ('struct vfsmount', _check_mount_type ) ]) ^ (bad-whitespace) C:277, 0: No space allowed before bracket type_cbs = TypeCallbacks([ ('struct vfsmount', _check_mount_type ) ]) ^ (bad-whitespace) C:277, 0: No space allowed before bracket type_cbs = TypeCallbacks([ ('struct vfsmount', _check_mount_type ) ]) ^ (bad-whitespace) C:278, 0: No space allowed after bracket symbols_cbs = SymbolCallbacks([ ('init_task', _Mount._check_task_interface ) ]) ^ (bad-whitespace) C:278, 0: No space allowed before bracket symbols_cbs = SymbolCallbacks([ ('init_task', _Mount._check_task_interface ) ]) ^ (bad-whitespace) C:278, 0: No space allowed before bracket symbols_cbs = SymbolCallbacks([ ('init_task', _Mount._check_task_interface ) ]) ^ (bad-whitespace) ************* Module crash.subsystem.storage.device_mapper C: 22, 0: No space allowed after bracket _types = Types([ 'struct dm_rq_clone_bio_info *' ]) ^ (bad-whitespace) C: 22, 0: No space allowed before bracket _types = Types([ 'struct dm_rq_clone_bio_info *' ]) ^ (bad-whitespace) C: 46, 0: Wrong continued indentation (add 1 space). block_device_name(self.bio['bi_bdev'])) ^| (bad-continuation) C: 80, 0: No space allowed after bracket _types = Types([ 'struct dm_target_io *' ]) ^ (bad-whitespace) C: 80, 0: No space allowed before bracket _types = Types([ 'struct dm_target_io *' ]) ^ (bad-whitespace) C:104, 0: Wrong hanging indentation (remove 20 spaces). int(self.bio), | ^ (bad-continuation) C:105, 0: Wrong hanging indentation (remove 20 spaces). block_device_name(self.bio['bi_bdev']), | ^ (bad-continuation) C:106, 0: Wrong hanging indentation (remove 20 spaces). int(self.bio['bi_sector']), | ^ (bad-continuation) C:107, 0: Wrong hanging indentation (remove 20 spaces). block_device_name(self.next_bio['bi_bdev']), | ^ (bad-continuation) C:108, 0: Wrong hanging indentation (remove 20 spaces). int(self.next_bio['bi_sector'])) | ^ (bad-continuation) ************* Module crash.subsystem.storage.__init__ C: 14, 0: No space allowed after bracket types = Types([ 'struct gendisk', 'struct hd_struct', 'struct device', ^ (bad-whitespace) C: 15, 0: Wrong continued indentation (remove 2 spaces). 'struct device_type', 'struct bdev_inode' ]) | ^ (bad-continuation) C: 15, 0: No space allowed before bracket 'struct device_type', 'struct bdev_inode' ]) ^ (bad-whitespace) C: 16, 0: No space allowed after bracket symvals = Symvals([ 'block_class', 'blockdev_superblock', 'disk_type', ^ (bad-whitespace) C: 17, 0: No space allowed before bracket 'part_type' ]) ^ (bad-whitespace) C: 82, 0: Exactly one space required around keyword argument assignment def for_each_block_device(subtype: gdb.Value=None) -> Iterable[gdb.Value]: ^ (bad-whitespace) C:115, 0: Wrong continued indentation (add 11 spaces). .format(types.device_type_type.pointer(), ^ | (bad-continuation) C:165, 0: Wrong continued indentation (add 11 spaces). .format(types.gendisk_type, types.hd_struct_type, ^ | (bad-continuation) C:166, 0: Wrong continued indentation (add 8 spaces). gendisk.type.unqualified())) ^ | (bad-continuation) C:260, 0: No space allowed after bracket symbol_cbs = SymbolCallbacks([ ( 'disk_type', _check_types ), ^ (bad-whitespace) C:260, 0: No space allowed after bracket symbol_cbs = SymbolCallbacks([ ( 'disk_type', _check_types ), ^ (bad-whitespace) C:260, 0: No space allowed before bracket symbol_cbs = SymbolCallbacks([ ( 'disk_type', _check_types ), ^ (bad-whitespace) C:261, 0: No space allowed after bracket ( 'part_type', _check_types )] ) ^ (bad-whitespace) C:261, 0: No space allowed before bracket ( 'part_type', _check_types )] ) ^ (bad-whitespace) C:261, 0: No space allowed before bracket ( 'part_type', _check_types )] ) ^ (bad-whitespace) C:262, 0: No space allowed after bracket type_cbs = TypeCallbacks([ ('struct device_type', _check_types ) ]) ^ (bad-whitespace) C:262, 0: No space allowed before bracket type_cbs = TypeCallbacks([ ('struct device_type', _check_types ) ]) ^ (bad-whitespace) C:262, 0: No space allowed before bracket type_cbs = TypeCallbacks([ ('struct device_type', _check_types ) ]) ^ (bad-whitespace) ************* Module crash.subsystem.storage.blocksq C: 15, 0: No space allowed after bracket types = Types([ 'struct request' ]) ^ (bad-whitespace) C: 15, 0: No space allowed before bracket types = Types([ 'struct request' ]) ^ (bad-whitespace) ************* Module crash.subsystem.storage.decoders C:247, 0: Wrong continued indentation (add 1 space). block_device_name(self.bio['bi_bdev']), ^| (bad-continuation) C:248, 0: Wrong continued indentation (add 1 space). self.bio['bi_end_io']) ^| (bad-continuation) ************* Module crash.util.__init__ C: 14, 0: No space allowed around bracket TypeSpecifier = Union [ gdb.Type, gdb.Value, str, gdb.Symbol ] ^ (bad-whitespace) C: 14, 0: No space allowed before bracket TypeSpecifier = Union [ gdb.Type, gdb.Value, str, gdb.Symbol ] ^ (bad-whitespace) C: 15, 0: No space allowed around bracket AddressSpecifier = Union [ gdb.Value, str, int ] ^ (bad-whitespace) C: 15, 0: No space allowed before bracket AddressSpecifier = Union [ gdb.Value, str, int ] ^ (bad-whitespace) C: 51, 0: No space allowed after bracket types = Types([ 'char *', 'uuid_t' ]) ^ (bad-whitespace) C: 51, 0: No space allowed before bracket types = Types([ 'char *', 'uuid_t' ]) ^ (bad-whitespace) C:111, 0: Exactly one space required around keyword argument assignment def get_symbol_value(symname: str, block: gdb.Block=None, ^ (bad-whitespace) C:112, 0: Exactly one space required around keyword argument assignment domain: int=None) -> gdb.Value: ^ (bad-whitespace) C:134, 0: Exactly one space required around keyword argument assignment def safe_get_symbol_value(symname: str, block: gdb.Block=None, ^ (bad-whitespace) C:135, 0: Exactly one space required around keyword argument assignment domain: int=None) -> gdb.Value: ^ (bad-whitespace) C:221, 0: Exactly one space required around keyword argument assignment error: bool=True) -> Union[Tuple[int, gdb.Type], None]: ^ (bad-whitespace) C:261, 0: Exactly one space required around keyword argument assignment error: bool=True) -> Union[int, None]: ^ (bad-whitespace) C:309, 0: Exactly one space required around keyword argument assignment def safe_lookup_type(name: str, block: gdb.Block=None) -> Union[gdb.Type, None]: ^ (bad-whitespace) C:390, 0: Exactly one space required around keyword argument assignment separator: str="|") -> str: ^ (bad-whitespace) C:447, 0: Wrong continued indentation before block (add 4 spaces). value[0].type.sizeof != 1 or value.type.sizeof != 16): ^ | (bad-continuation) C:475, 0: Wrong continued indentation before block (add 4 spaces). value.type.target() == types.uuid_t_type): ^ | (bad-continuation) ************* Module crash.util.symbols C: 55, 0: No space allowed after bracket names = [ names ] ^ (bad-whitespace) C: 55, 0: No space allowed before bracket names = [ names ] ^ (bad-whitespace) C:314, 0: No space allowed after bracket cbs = [ cbs ] ^ (bad-whitespace) C:314, 0: No space allowed before bracket cbs = [ cbs ] ^ (bad-whitespace) ************* Module crash.types.bitmap C: 26, 0: Wrong continued indentation before block (add 4 spaces). (bitmap.type.code != gdb.TYPE_CODE_PTR or ^ | (bad-continuation) C: 30, 0: Wrong continued indentation (add 11 spaces). .format(bitmap.type)) ^ | (bad-continuation) C: 33, 0: Exactly one space required around keyword argument assignment size_in_bytes: int=None) -> Iterable[int]: ^ (bad-whitespace) C:107, 0: Exactly one space required around keyword argument assignment size_in_bytes: int=None) -> int: ^ (bad-whitespace) C:137, 0: Wrong continued indentation (remove 23 spaces). .format(start, elements)) | ^ (bad-continuation) C:160, 0: Exactly one space required around keyword argument assignment def find_first_zero_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: ^ (bad-whitespace) C:180, 0: Exactly one space required around keyword argument assignment size_in_bytes: int=None) -> int: ^ (bad-whitespace) C:210, 0: Wrong continued indentation (remove 23 spaces). .format(start, elements)) | ^ (bad-continuation) C:233, 0: Exactly one space required around keyword argument assignment def find_first_set_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: ^ (bad-whitespace) C:284, 0: Exactly one space required around keyword argument assignment def find_last_set_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: ^ (bad-whitespace) ************* Module crash.types.cpu C: 20, 0: Trailing whitespace (trailing-whitespace) C: 89, 0: No space allowed after bracket symbol_cbs = SymbolCallbacks([ ('cpu_online_mask', ^ (bad-whitespace) C: 96, 0: No space allowed before bracket TypesCPUClass._setup_possible_mask) ]) ^ (bad-whitespace) ************* Module crash.types.page C: 16, 0: No space allowed after bracket types = Types([ 'unsigned long', 'struct page', 'enum pageflags', ^ (bad-whitespace) C: 18, 0: No space allowed after bracket symvals = Symvals([ 'mem_section' ]) ^ (bad-whitespace) C: 18, 0: No space allowed before bracket symvals = Symvals([ 'mem_section' ]) ^ (bad-whitespace) C: 65, 0: No space allowed before bracket cls.compound_head_name = find_member_variant(gdbtype, ('compound_head', 'first_page' )) ^ (bad-whitespace) C:205, 0: No space allowed after bracket type_cbs = TypeCallbacks([ ('struct page', Page.setup_page_type ), ^ (bad-whitespace) C:205, 0: No space allowed before bracket type_cbs = TypeCallbacks([ ('struct page', Page.setup_page_type ), ^ (bad-whitespace) C:206, 0: No space allowed before bracket ('enum pageflags', Page.setup_pageflags ), ^ (bad-whitespace) C:207, 0: No space allowed before bracket ('enum zone_type', Page.setup_zone_type ), ^ (bad-whitespace) C:208, 0: No space allowed before bracket ('struct mem_section', Page.setup_mem_section) ]) ^ (bad-whitespace) C:209, 0: No space allowed after bracket msymbol_cbs = MinimalSymbolCallbacks([ ('kernel_config_data', ^ (bad-whitespace) C:210, 0: No space allowed before bracket Page.setup_nodes_width ) ]) ^ (bad-whitespace) C:210, 0: No space allowed before bracket Page.setup_nodes_width ) ]) ^ (bad-whitespace) C:214, 0: No space allowed after bracket symbol_cbs = SymbolCallbacks([ ('vmemmap_base', Page.setup_vmemmap_base ), ^ (bad-whitespace) C:214, 0: No space allowed before bracket symbol_cbs = SymbolCallbacks([ ('vmemmap_base', Page.setup_vmemmap_base ), ^ (bad-whitespace) C:216, 0: No space allowed before bracket Page.setup_directmap_base ) ]) ^ (bad-whitespace) C:216, 0: No space allowed before bracket Page.setup_directmap_base ) ]) ^ (bad-whitespace) ************* Module crash.types.klist C: 13, 0: No space allowed after bracket types = Types([ 'struct klist_node', 'struct klist' ]) ^ (bad-whitespace) C: 13, 0: No space allowed before bracket types = Types([ 'struct klist_node', 'struct klist' ]) ^ (bad-whitespace) C: 34, 0: Wrong continued indentation (add 11 spaces). .format(klist.type)) ^ | (bad-continuation) ************* Module crash.types.percpu C: 24, 0: No space allowed after bracket types = Types([ 'void *', 'char *', 'struct pcpu_chunk', ^ (bad-whitespace) C: 25, 0: No space allowed before bracket 'struct percpu_counter' ]) ^ (bad-whitespace) C: 26, 0: No space allowed after bracket symvals = Symvals([ '__per_cpu_offset', 'pcpu_base_addr', 'pcpu_slot', ^ (bad-whitespace) C: 27, 0: No space allowed before bracket 'pcpu_nr_slots', 'pcpu_group_offsets' ]) ^ (bad-whitespace) C: 28, 0: No space allowed after bracket msymvals = MinimalSymvals( ['__per_cpu_start', '__per_cpu_end' ]) ^ (bad-whitespace) C: 28, 0: No space allowed before bracket msymvals = MinimalSymvals( ['__per_cpu_start', '__per_cpu_end' ]) ^ (bad-whitespace) C:228, 0: Exactly one space required around assignment addr=int(var) ^ (bad-whitespace) C:317, 0: Trailing whitespace (trailing-whitespace) C:337, 0: Exactly one space required around keyword argument assignment nr_cpus: int=None) -> Dict[int, gdb.Value]: ^ (bad-whitespace) C:372, 0: No space allowed after bracket msym_cbs = MinimalSymbolCallbacks([ ('__per_cpu_start', ^ (bad-whitespace) C:375, 0: No space allowed before bracket PerCPUState._setup_per_cpu_size) ]) ^ (bad-whitespace) C:376, 0: No space allowed after bracket symbol_cbs = SymbolCallbacks([ ('__per_cpu_offset', PerCPUState._setup_nr_cpus), ^ (bad-whitespace) C:377, 0: No space allowed before bracket ('modules', PerCPUState._setup_module_ranges) ]) ^ (bad-whitespace) C:415, 0: Exactly one space required around keyword argument assignment nr_cpus: int=None) -> Dict[int, gdb.Value]: ^ (bad-whitespace) C:457, 0: Wrong continued indentation (add 7 spaces). .format(types.percpu_counter_type, var.type)) ^ | (bad-continuation) ************* Module crash.types.list C: 20, 0: No space allowed after bracket types = Types([ 'struct list_head' ]) ^ (bad-whitespace) C: 20, 0: No space allowed before bracket types = Types([ 'struct list_head' ]) ^ (bad-whitespace) C: 22, 0: Exactly one space required around keyword argument assignment def list_for_each(list_head: gdb.Value, include_head: bool=False, ^ (bad-whitespace) C: 23, 0: Exactly one space required around keyword argument assignment reverse: bool=False, print_broken_links: bool=True, ^ (bad-whitespace) C: 23, 0: Exactly one space required around keyword argument assignment reverse: bool=False, print_broken_links: bool=True, ^ (bad-whitespace) C: 24, 0: Exactly one space required around keyword argument assignment exact_cycles: bool=False) -> Iterator[gdb.Value]: ^ (bad-whitespace) C:136, 0: Exactly one space required around keyword argument assignment member: str, include_head: bool=False, ^ (bad-whitespace) C:137, 0: Exactly one space required around keyword argument assignment reverse: bool=False, print_broken_links: bool=True, ^ (bad-whitespace) C:137, 0: Exactly one space required around keyword argument assignment reverse: bool=False, print_broken_links: bool=True, ^ (bad-whitespace) C:138, 0: Exactly one space required around keyword argument assignment exact_cycles: bool=False) -> Iterator[gdb.Value]: ^ (bad-whitespace) ************* Module crash.types.slab C: 20, 0: Exactly one space required before assignment AC_ALIEN = "alien" ^ (bad-whitespace) C: 37, 0: No space allowed after bracket types = Types([ 'kmem_cache', 'struct kmem_cache' ]) ^ (bad-whitespace) C: 37, 0: No space allowed before bracket types = Types([ 'kmem_cache', 'struct kmem_cache' ]) ^ (bad-whitespace) C: 91, 0: Wrong continued indentation (add 6 spaces). objs_per_slab)) ^ | (bad-continuation) C:115, 0: Exactly one space required before assignment obj_idx = int(freelist[i]) ^ (bad-whitespace) C:121, 0: Exactly one space required before assignment obj_idx = int(freelist[i]) ^ (bad-whitespace) C:167, 0: No space allowed around keyword argument assignment def __error(self, msg, misplaced = False): ^ (bad-whitespace) C:169, 0: Wrong continued indentation (add 28 spaces). int(self.gdb_obj.address), msg)) ^ | (bad-continuation) C:179, 0: Wrong continued indentation (add 5 spaces). (list_name, self.inuse, self.kmem_cache.objs_per_slab), misplaced = True) ^ | (bad-continuation) C:179, 0: No space allowed around keyword argument assignment (list_name, self.inuse, self.kmem_cache.objs_per_slab), misplaced = True) ^ (bad-whitespace) C:208, 0: Wrong continued indentation (remove 3 spaces). struct_slab_cache) | ^ (bad-continuation) C:213, 0: Wrong continued indentation (remove 3 spaces). struct_slab_cache) | ^ (bad-continuation) C:221, 0: Wrong hanging indentation (remove 12 spaces). int(self.gdb_obj.address) - struct_slab_obj[1])) | ^ (bad-continuation) C:225, 0: Wrong continued indentation (add 5 spaces). (self.inuse, num_free, self.inuse + num_free, max_free)) ^ | (bad-continuation) C:241, 0: Wrong continued indentation (remove 27 spaces). (slab_nid, nid)) | ^ (bad-continuation) C:263, 0: Wrong continued indentation (remove 18 spaces). (obj, page.get_nid(), nid)) | ^ (bad-continuation) C:269, 0: Wrong continued indentation (remove 15 spaces). (obj, kmem_cache_addr, int(self.kmem_cache.gdb_obj.address))) | ^ (bad-continuation) C:277, 0: Wrong continued indentation (remove 43 spaces). (obj, slab_addr)) | ^ (bad-continuation) C:358, 0: Wrong continued indentation (remove 2 spaces). "nid_tgt" : nid_tgt} | ^ (bad-continuation) C:377, 0: Wrong hanging indentation (remove 8 spaces). ptr, cache_dict, obj_nid, nid_tgt))) | ^ (bad-continuation) C:457, 0: Wrong continued indentation (add 8 spaces). format(slab_list_name[wrong_type], slab_list_name[slabtype]))) ^ | (bad-continuation) C:464, 0: Wrong hanging indentation (remove 8 spaces). int(list_head), sys.exc_info()[0])) | ^ (bad-continuation) C:491, 0: Wrong continued indentation (remove 2 spaces). format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) | ^ (bad-continuation) C:510, 0: Wrong continued indentation (remove 2 spaces). 'first_misplaced': None, 'last_misplaced': None, 'num_misplaced': 0} | ^ (bad-continuation) C:518, 0: Wrong continued indentation (add 8 spaces). format(int(slab.gdb_obj.address), e))) ^ | (bad-continuation) C:524, 0: Wrong hanging indentation (remove 32 spaces). slab_list_name[slabtype], e))) | ^ (bad-continuation) C:529, 0: Wrong continued indentation (remove 2 spaces). format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) | ^ (bad-continuation) C:542, 0: Wrong continued indentation (add 3 spaces). int(slab_list.address))) ^ | (bad-continuation) C:545, 0: Wrong continued indentation (remove 2 spaces). 'first_misplaced': None, 'last_misplaced': None, 'num_misplaced': 0} | ^ (bad-continuation) C:552, 0: Wrong continued indentation (add 18 spaces). slabtype, nid, reverse=True) ^ | (bad-continuation) C:567, 0: Wrong hanging indentation (remove 4 spaces). ac_ptr, acs[ac_ptr])) | ^ (bad-continuation) C:578, 0: Wrong continued indentation (add 2 spaces). .format( ac_ptr, acs[ac_ptr], ac_obj_obj[0], ^ | (bad-continuation) C:578, 0: No space allowed after bracket .format( ac_ptr, acs[ac_ptr], ac_obj_obj[0], ^ (bad-whitespace) C:588, 0: Wrong continued indentation (add 11 spaces). format(int(node.address), lock))) ^ | (bad-continuation) C:597, 0: Wrong continued indentation (remove 16 spaces). (nid, free_declared, free_counted))) | ^ (bad-continuation) C:643, 0: No space allowed after bracket type_cbs = TypeCallbacks([ ('struct page', Slab.check_page_type), ^ (bad-whitespace) C:650, 0: No space allowed before bracket KmemCache.setup_alien_cache_type) ]) ^ (bad-whitespace) C:651, 0: No space allowed after bracket symbol_cbs = SymbolCallbacks([ ('slab_caches', setup_slab_caches), ^ (bad-whitespace) C:652, 0: No space allowed before bracket (' cache_chain', setup_slab_caches) ]) ^ (bad-whitespace) ************* Module crash.types.module C: 10, 0: No space allowed after bracket symvals = Symvals([ 'modules' ]) ^ (bad-whitespace) C: 10, 0: No space allowed before bracket symvals = Symvals([ 'modules' ]) ^ (bad-whitespace) C: 11, 0: No space allowed after bracket types = Types([ 'struct module' ]) ^ (bad-whitespace) C: 11, 0: No space allowed before bracket types = Types([ 'struct module' ]) ^ (bad-whitespace) ************* Module crash.types.task C: 20, 0: No space allowed before bracket types = Types(['struct task_struct', 'struct mm_struct', 'atomic_long_t' ]) ^ (bad-whitespace) C: 21, 0: No space allowed after bracket symvals = Symvals([ 'task_state_array', 'init_task' ]) ^ (bad-whitespace) C: 21, 0: No space allowed before bracket symvals = Symvals([ 'task_state_array', 'init_task' ]) ^ (bad-whitespace) C: 41, 0: Exactly one space required around assignment TASK_INTERRUPTIBLE: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 42, 0: Exactly one space required around assignment TASK_UNINTERRUPTIBLE: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 43, 0: Exactly one space required around assignment TASK_STOPPED: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 44, 0: Exactly one space required around assignment EXIT_ZOMBIE: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 45, 0: Exactly one space required around assignment TASK_DEAD: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 46, 0: Exactly one space required around assignment EXIT_DEAD: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 47, 0: Exactly one space required around assignment TASK_SWAPPING: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 48, 0: Exactly one space required around assignment TASK_TRACING_STOPPED: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 49, 0: Exactly one space required around assignment TASK_WAKEKILL: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 50, 0: Exactly one space required around assignment TASK_WAKING: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 51, 0: Exactly one space required around assignment TASK_PARKED: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 52, 0: Exactly one space required around assignment __TASK_IDLE: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 54, 0: Exactly one space required around assignment TASK_NOLOAD: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 55, 0: Exactly one space required around assignment TASK_NEW: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C: 56, 0: Exactly one space required around assignment TASK_IDLE: int=TASK_FLAG_UNINITIALIZED ^ (bad-whitespace) C:108, 0: Exactly one space required before comparison assert(cls.TASK_PARKED == 0x0040) ^^ (bad-whitespace) C:109, 0: Exactly one space required before comparison assert(cls.TASK_DEAD == 0x0080) ^^ (bad-whitespace) C:111, 0: Exactly one space required before comparison assert(cls.TASK_WAKING == 0x0200) ^^ (bad-whitespace) C:131, 0: Exactly one space required before comparison assert(cls.TASK_DEAD == 0x0040) ^^ (bad-whitespace) C:133, 0: Exactly one space required before comparison assert(cls.TASK_WAKING == 0x0100) ^^ (bad-whitespace) C:134, 0: Exactly one space required before comparison assert(cls.TASK_PARKED == 0x0200) ^^ (bad-whitespace) C:136, 0: Exactly one space required before comparison assert(cls.TASK_DEAD == 64) ^^ (bad-whitespace) C:138, 0: Exactly one space required before comparison assert(cls.TASK_WAKING == 256) ^^ (bad-whitespace) C:139, 0: Exactly one space required before comparison assert(cls.TASK_PARKED == 512) ^^ (bad-whitespace) C:170, 0: No space allowed after bracket symbol_cbs = SymbolCallbacks([ ('task_state_array', ^ (bad-whitespace) C:171, 0: No space allowed before bracket TaskStateFlags._task_state_flags_callback) ]) ^ (bad-whitespace) C:219, 0: Wrong continued indentation (remove 1 space). task_struct.type == types.task_struct_type.pointer()): |^ (bad-continuation) C:418, 0: Exactly one space required around keyword argument assignment def task_name(self, brackets: bool=False) -> str: ^ (bad-whitespace) C:619, 0: Wrong continued indentation (remove 1 space). 'tasks', include_head=True): |^ (bad-continuation) ************* Module crash.types.classdev C: 27, 0: No space allowed after bracket type_cbs = TypeCallbacks([ ('struct device', ^ (bad-whitespace) C: 28, 0: No space allowed before bracket ClassdevState._setup_iterator_type) ]) ^ (bad-whitespace) C: 31, 0: Exactly one space required around keyword argument assignment subtype: gdb.Value=None) -> Iterable[gdb.Value]: ^ (bad-whitespace) ************* Module crash.types.node C: 17, 0: No space allowed after bracket symbols = Symbols([ 'numa_node' ]) ^ (bad-whitespace) C: 17, 0: No space allowed before bracket symbols = Symbols([ 'numa_node' ]) ^ (bad-whitespace) C: 18, 0: No space allowed after bracket symvals = Symvals([ 'numa_cpu_lookup_table', 'node_data' ]) ^ (bad-whitespace) C: 18, 0: No space allowed before bracket symvals = Symvals([ 'numa_cpu_lookup_table', 'node_data' ]) ^ (bad-whitespace) C: 19, 0: No space allowed after bracket types = Types([ 'pg_data_t', 'struct zone' ]) ^ (bad-whitespace) C: 19, 0: No space allowed before bracket types = Types([ 'pg_data_t', 'struct zone' ]) ^ (bad-whitespace) ************* Module crash.types.vmstat C: 14, 0: No space allowed after bracket symbols = Symbols([ 'vm_event_states' ]) ^ (bad-whitespace) C: 14, 0: No space allowed before bracket symbols = Symbols([ 'vm_event_states' ]) ^ (bad-whitespace) C: 26, 0: Wrong continued indentation (remove 1 space). 'NR_VM_ZONE_STAT_ITEMS') |^ (bad-continuation) C: 31, 0: Wrong continued indentation (remove 1 space). 'NR_VM_EVENT_ITEMS') |^ (bad-continuation) C: 69, 0: No space allowed after bracket type_cbs = TypeCallbacks([ ('enum zone_stat_item', ^ (bad-whitespace) C: 70, 0: Wrong continued indentation (remove 9 spaces). VmStat.check_enum_type), | ^ (bad-continuation) C: 71, 0: Wrong continued indentation (remove 9 spaces). ('enum vm_event_item', | ^ (bad-continuation) C: 72, 0: No space allowed before bracket VmStat.check_enum_type) ]) ^ (bad-whitespace) ************* Module crash.types.zone C: 15, 0: No space allowed before bracket types = Types([' struct page' ]) ^ (bad-whitespace) C: 32, 0: No space allowed before bracket for item in range (0, VmStat.nr_stat_items): ^ (bad-whitespace) C: 41, 0: No space allowed before bracket for item in range (0, VmStat.nr_stat_items): ^ (bad-whitespace) C: 60, 0: Wrong continued indentation (add 2 spaces). format(int(page_obj.address), "pcplist" if is_pcp else "freelist", ^ | (bad-continuation) C: 61, 0: Wrong continued indentation (remove 1 space). self.nid, self.zid, page.get_nid(), page.get_zid())) |^ (bad-continuation) C: 65, 0: Wrong continued indentation (add 2 spaces). format("pcplist" if is_pcp else "area", area.address, ^ | (bad-continuation) C: 66, 0: Wrong continued indentation (remove 1 space). nr_expected, nr_free)) |^ (bad-continuation) ************* Module kdump.target C: 48, 0: Wrong continued indentation (remove 1 space). .format(filename, str(e))) |^ (bad-continuation) Signed-off-by: Jeff Mahoney --- crash/addrxlat.py | 4 +- crash/cache/syscache.py | 22 ++-- crash/commands/dmesg.py | 6 +- crash/commands/kmem.py | 24 ++-- crash/commands/lsmod.py | 4 +- crash/commands/mount.py | 2 +- crash/commands/ps.py | 8 +- crash/commands/vtop.py | 2 +- crash/commands/xfs.py | 5 +- crash/infra/lookup.py | 8 +- crash/kernel.py | 47 +++---- crash/session.py | 8 +- crash/subsystem/filesystem/__init__.py | 60 ++++----- crash/subsystem/filesystem/btrfs.py | 12 +- crash/subsystem/filesystem/decoders.py | 8 +- crash/subsystem/filesystem/mount.py | 38 +++--- crash/subsystem/filesystem/xfs.py | 158 +++++++++++------------ crash/subsystem/storage/__init__.py | 25 ++-- crash/subsystem/storage/blocksq.py | 2 +- crash/subsystem/storage/decoders.py | 4 +- crash/subsystem/storage/device_mapper.py | 15 +-- crash/types/bitmap.py | 28 ++-- crash/types/classdev.py | 6 +- crash/types/cpu.py | 18 +-- crash/types/klist.py | 4 +- crash/types/list.py | 14 +- crash/types/module.py | 4 +- crash/types/node.py | 7 +- crash/types/page.py | 26 ++-- crash/types/percpu.py | 39 +++--- crash/types/slab.py | 141 +++++++++++--------- crash/types/task.py | 72 +++++------ crash/types/vmstat.py | 24 ++-- crash/types/zone.py | 15 +-- crash/util/__init__.py | 29 +++-- crash/util/symbols.py | 5 +- kdump/target.py | 2 +- tests/pylintrc-enforce | 2 +- 38 files changed, 456 insertions(+), 442 deletions(-) diff --git a/crash/addrxlat.py b/crash/addrxlat.py index 5d94b238021..ada21bdf711 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -52,8 +52,8 @@ def __init__(self): self.context = TranslationContext() self.system = addrxlat.System() self.system.os_init(self.context, - arch = utsname.machine, - type = addrxlat.OS_LINUX) + arch=utsname.machine, + type=addrxlat.OS_LINUX) self.is_non_auto = False map = self.system.get_map(addrxlat.SYS_MAP_MACHPHYS_KPHYS) diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index 2012ce8ae61..18741aa83bc 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -20,7 +20,7 @@ ImageLocation = Dict[str, Dict[str, int]] class CrashUtsnameCache(CrashCache): - symvals = Symvals([ 'init_uts_ns' ]) + symvals = Symvals(['init_uts_ns']) def load_utsname(self): self.utsname = self.symvals.init_uts_ns['name'] @@ -36,8 +36,8 @@ def init_utsname_cache(self): self.utsname_cache = d return self.utsname_cache - utsname_fields = [ 'sysname', 'nodename', 'release', - 'version', 'machine', 'domainname' ] + utsname_fields = ['sysname', 'nodename', 'release', + 'version', 'machine', 'domainname'] def __getattr__(self, name): if name == 'utsname_cache': return self.init_utsname_cache() @@ -48,10 +48,10 @@ def __getattr__(self, name): return getattr(self.__class__, name) class CrashConfigCache(CrashCache): - types = Types([ 'char *' ]) - symvals = Symvals([ 'kernel_config_data' ]) - msymvals = MinimalSymvals([ 'kernel_config_data', - 'kernel_config_data_end' ]) + types = Types(['char *']) + symvals = Symvals(['kernel_config_data']) + msymvals = MinimalSymvals(['kernel_config_data', + 'kernel_config_data_end']) def __getattr__(self, name): if name == 'config_buffer': @@ -152,7 +152,7 @@ def __getitem__(self, name): return None class CrashKernelCache(CrashCache): - symvals = Symvals([ 'avenrun' ]) + symvals = Symvals(['avenrun']) jiffies_ready = False adjust_jiffies = False @@ -241,10 +241,8 @@ def get_uptime(self): self.uptime = timedelta(seconds=self.adjusted_jiffies() // self.hz) return self.uptime -symbol_cbs = SymbolCallbacks( [( 'jiffies', - CrashKernelCache.setup_jiffies ), - ( 'jiffies_64', - CrashKernelCache.setup_jiffies ) ]) +symbol_cbs = SymbolCallbacks([('jiffies', CrashKernelCache.setup_jiffies), + ('jiffies_64', CrashKernelCache.setup_jiffies)]) utsname = CrashUtsnameCache() config = CrashConfigCache() diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 4f5c8b7d94a..c7659c2e488 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -10,9 +10,9 @@ from crash.exceptions import DelayedAttributeError from crash.util.symbols import Types, Symvals -types = Types([ 'struct printk_log *' , 'char *' ]) -symvals = Symvals([ 'log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', - 'clear_seq', 'log_first_seq', 'log_next_seq' ]) +types = Types(['struct printk_log *', 'char *']) +symvals = Symvals(['log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', + 'clear_seq', 'log_first_seq', 'log_next_seq']) class LogTypeException(Exception): pass diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 2ae4a0e78b7..0f5bb8be380 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -65,7 +65,7 @@ def execute(self, args): print("Checking done.") return - + if not args.address: raise CommandLineError("no address specified") @@ -86,7 +86,7 @@ def execute(self, args): else: if obj[1] == 0: print("Address on slab %s but not within valid object slot" - % name) + % name) elif not obj[2]: print("FREE object %x from slab %s" % (obj[1], name)) else: @@ -99,9 +99,9 @@ def execute(self, args): ac_desc = "alien cache of node %d for node %d" % (ac["nid_src"], ac["nid_tgt"]) else: raise CommandError(f"unexpected array cache type {str(ac)}") - + print("FREE object %x from slab %s (in %s)" % - (obj[1], name, ac_desc)) + (obj[1], name, ac_desc)) def __print_vmstat(self, vmstat, diffs): vmstat_names = VmStat.get_stat_names(); @@ -112,7 +112,7 @@ def __print_vmstat(self, vmstat, diffs): for i in range(0, nr_items): print("%s: %d (%d)" % (vmstat_names[i].rjust(just), - vmstat[i], diffs[i])) + vmstat[i], diffs[i])) def print_vmstats(self): try: @@ -123,10 +123,10 @@ def print_vmstats(self): print(" VM_STAT:") #TODO put this... where? nr_items = VmStat.nr_stat_items - + stats = [0] * nr_items - for item in range (0, nr_items): + for item in range(0, nr_items): # TODO abstract atomic? stats[item] = int(vm_stat[item]["counter"]) @@ -143,17 +143,17 @@ def print_vmstats(self): vm_events = VmStat.get_events() names = VmStat.get_event_names() just = max(map(len, names)) - + for name, val in zip(names, vm_events): print("%s: %d" % (name.rjust(just), val)) def print_zones(self): for zone in for_each_zone(): zone_struct = zone.gdb_obj - + print("NODE: %d ZONE: %d ADDR: %x NAME: \"%s\"" % - (zone_struct["node"], zone.zid, zone_struct.address, - zone_struct["name"].string())) + (zone_struct["node"], zone.zid, zone_struct.address, + zone_struct["name"].string())) if not zone.is_populated(): print(" [unpopulated]") @@ -166,5 +166,5 @@ def print_zones(self): self.__print_vmstat(vmstat, diffs) print() - + KmemCommand("kmem") diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index 092ea5a61d1..3c08bed9fe2 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -96,7 +96,7 @@ def execute(self, argv): print("Module\t\t\tPercpu Base\t\tSize") else: print("Module\t\t\tPercpu Base@CPU{:d}\t\tSize" - .format(argv.p)) + .format(argv.p)) self.print_module_percpu(mod, argv.p) continue @@ -124,6 +124,6 @@ def execute(self, argv): count += 1 print("{:16s}\t{:#x}\t{:d}\t{:d}{}" - .format(modname, addr, size, count, module_use)) + .format(modname, addr, size, count, module_use)) ModuleCommand() diff --git a/crash/commands/mount.py b/crash/commands/mount.py index b336155e620..ce8242b2773 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -54,7 +54,7 @@ def execute(self, args): self.show_one_mount(mnt, args) def show_one_mount(self, mnt: gdb.Value, args: Namespace, - task: LinuxTask=None) -> None: + task: LinuxTask = None) -> None: if mnt.type.code == gdb.TYPE_CODE_PTR: mnt = mnt.dereference() diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 9870eddec98..7b21a2fba00 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -41,10 +41,10 @@ def __init__(self, argv, regex): if argv.l: self.sort = lambda x: -x.info.last_run() self._format_one_task = self._format_last_run - self._format_header = lambda : "" + self._format_header = lambda: "" def _format_generic_header(self, col4name: str, col4width: int) -> str: - header = f" PID PPID CPU {col4name:^{col4width}} ST %MEM " + header = f" PID PPID CPU {col4name:^{col4width}} ST %MEM " header += "VSZ RSS COMM" return header @@ -93,7 +93,7 @@ def _format_common_line(self, task: LinuxTask, state: str) -> str: else: active = " " - line = f"{active} {pid:>5} {parent_pid:>5} {last_cpu:>3} " + line = f"{active} {pid:>5} {parent_pid:>5} {last_cpu:>3} " line += self._format_column4(task) line += f" {state:3} {0:.1f} {total_vm:7d} {rss:6d} {name}" @@ -107,7 +107,7 @@ def _format_last_run(self, task: LinuxTask, state: str) -> str: if task.active: cpu = task.cpu - line = f"[{task.last_run():d}] [{state}] PID: {pid:-5d} " + line = f"[{task.last_run():d}] [{state}] PID: {pid:-5d} " line += f"TASK: {addr:x} CPU: {cpu:>2d} COMMAND: \"{name}\"" return line diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index dc958565266..4f79d2e34c6 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -8,7 +8,7 @@ import addrxlat class LinuxPGT(object): - table_names = ( 'PTE', 'PMD', 'PUD', 'PGD' ) + table_names = ('PTE', 'PMD', 'PUD', 'PGD') def __init__(self, ctx, sys): self.context = ctx diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index 918a737cf10..56cc8e04836 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -112,7 +112,7 @@ def dump_ail(self, args: Namespace) -> None: print("AIL @ {:x}".format(int(ail))) print("target={} last_pushed_lsn={} log_flush=" .format(int(ail['xa_target']), int(ail['xa_last_pushed_lsn'])), - end='') + end='') try: print("{}".format(int(ail['xa_log_flush']))) except: @@ -163,7 +163,8 @@ def dump_ail(self, args: Namespace) -> None: @classmethod def dump_buftarg(cls, targ: gdb.Value) -> None: for buf in list_for_each_entry(targ['bt_delwrite_queue'], - types.xfs_buf_p_type.target(), 'b_list'): + types.xfs_buf_p_type.target(), + 'b_list'): print("{:x} {}".format(int(buf.address), xfs_format_xfsbuf(buf))) @classmethod diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index e134da497f3..5883c722ac5 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -29,7 +29,7 @@ class NamedCallback(ObjfileEventCallback): attrname (:obj:`str`): The name of symbol or type being resolved translated for use as an attribute name. """ - def __init__(self, name: str, callback: Callback, attrname: str=None): + def __init__(self, name: str, callback: Callback, attrname: str = None): super().__init__() self.name = name @@ -69,7 +69,7 @@ class MinimalSymbolCallback(NamedCallback): callback: The callback to execute when the minimal symbol is discovered symbol_file (optional): Name of the symbol file to use """ - def __init__(self, name: str, callback: Callback, symbol_file: str=None): + def __init__(self, name: str, callback: Callback, symbol_file: str = None): super().__init__(name, callback) self.symbol_file = symbol_file @@ -107,7 +107,7 @@ class SymbolCallback(NamedCallback): constant, i.e. SYMBOL_*_DOMAIN. """ def __init__(self, name: str, callback: Callback, - domain: int=gdb.SYMBOL_VAR_DOMAIN): + domain: int = gdb.SYMBOL_VAR_DOMAIN): super().__init__(name, callback) self.domain = domain @@ -169,7 +169,7 @@ class TypeCallback(NamedCallback): block (optional): The :obj:`gdb.Block` to search for the symbol """ - def __init__(self, name: str, callback: Callback, block: gdb.Block=None): + def __init__(self, name: str, callback: Callback, block: gdb.Block = None): (name, attrname, self.pointer) = self.resolve_type(name) super().__init__(name, callback, attrname) diff --git a/crash/kernel.py b/crash/kernel.py index c869f267cf7..7ee9b0412e2 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -48,15 +48,15 @@ def __init__(self, path, module_value, expected_value): PathSpecifier = Union[List[str], str] class CrashKernel(object): - types = Types([ 'char *' ]) - symvals = Symvals([ 'init_task' ]) - symbols = Symbols([ 'runqueues']) - - def __init__(self, roots: PathSpecifier=None, - vmlinux_debuginfo: PathSpecifier=None, - module_path: PathSpecifier=None, - module_debuginfo_path: PathSpecifier=None, - verbose: bool=False, debug: bool=False): + types = Types(['char *']) + symvals = Symvals(['init_task']) + symbols = Symbols(['runqueues']) + + def __init__(self, roots: PathSpecifier = None, + vmlinux_debuginfo: PathSpecifier = None, + module_path: PathSpecifier = None, + module_debuginfo_path: PathSpecifier = None, + verbose: bool = False, debug: bool = False): """ Initialize a basic kernel semantic debugging session. @@ -130,31 +130,31 @@ def __init__(self, roots: PathSpecifier=None, version = self.extract_version() if roots is None: - self.roots = [ "/" ] + self.roots = ["/"] elif (isinstance(roots, list) and len(roots) > 0 and isinstance(roots[0], str)): x = None for root in roots: if os.path.exists(root): if x is None: - x = [ root ] + x = [root] else: x.append(root) else: print("root {} does not exist".format(root)) if x is None: - x = [ "/" ] + x = ["/"] self.roots = x elif (isinstance(roots, str)): x = None if os.path.exists(roots): if x is None: - x = [ roots ] + x = [roots] else: x.append(roots) if x is None: - x = [ "/" ] + x = ["/"] self.roots = x else: raise InvalidArgumentError("roots must be None, str, or list of str") @@ -169,7 +169,7 @@ def __init__(self, roots: PathSpecifier=None, "vmlinux-{}.debug".format(version), "{}/{}.debug".format(debugroot, kernel), "{}/boot/{}.debug".format(debugroot, - os.path.basename(kernel)), + os.path.basename(kernel)), "{}/boot/vmlinux-{}.debug".format(debugroot, version), ] for root in self.roots: @@ -188,7 +188,7 @@ def __init__(self, roots: PathSpecifier=None, isinstance(vmlinux_debuginfo[0], str)): self.vmlinux_debuginfo = vmlinux_debuginfo elif isinstance(vmlinux_debuginfo, str): - self.vmlinux_debuginfo = [ vmlinux_debuginfo ] + self.vmlinux_debuginfo = [vmlinux_debuginfo] else: raise InvalidArgumentError("vmlinux_debuginfo must be None, str, or list of str") @@ -283,7 +283,7 @@ def __init__(self, roots: PathSpecifier=None, if not obj.has_symbols(): raise CrashKernelError("Couldn't locate debuginfo for {}" - .format(kernel)) + .format(kernel)) self.vermagic = self.extract_vermagic() @@ -379,7 +379,7 @@ def check_module_version(self, modpath: str, module: gdb.Value) -> None: raise ModSourceVersionMismatchError(modpath, mi_srcversion, mod_srcversion) - def load_modules(self, verbose: bool=False, debug: bool=False) -> None: + def load_modules(self, verbose: bool = False, debug: bool = False) -> None: import crash.cache.syscache version = crash.cache.syscache.utsname.release print("Loading modules for {}".format(version), end='') @@ -428,7 +428,7 @@ def load_modules(self, verbose: bool=False, debug: bool=False) -> None: try: result = gdb.execute("add-symbol-file {} {:#x} {}" - .format(modpath, addr, sections), + .format(modpath, addr, sections), to_string=True) except gdb.error as e: raise CrashKernelError("Error while loading module `{}': {}" @@ -496,7 +496,7 @@ def get_module_path_from_modules_order(self, path: str, name: str) -> str: except KeyError: raise NoMatchingFileError(name) - def cache_file_tree(self, path, regex: Pattern[str]=None) -> None: + def cache_file_tree(self, path, regex: Pattern[str] = None) -> None: if not path in self.findmap: self.findmap[path] = { 'filters' : [], @@ -527,7 +527,7 @@ def cache_file_tree(self, path, regex: Pattern[str]=None) -> None: self.findmap[path]['files'][modname] = modpath def get_file_path_from_tree_search(self, path: str, name: str, - regex: Pattern[str]=None) -> str: + regex: Pattern[str] = None) -> str: self.cache_file_tree(path, regex) try: @@ -555,7 +555,7 @@ def build_id_path(objfile: gdb.Objfile) -> str: return ".build_id/{}/{}.debug".format(build_id[0:2], build_id[2:]) def try_load_debuginfo(self, objfile: gdb.Objfile, - path: str, verbose: bool=False) -> bool: + path: str, verbose: bool = False) -> bool: if not os.path.exists(path): return False @@ -571,7 +571,8 @@ def try_load_debuginfo(self, objfile: gdb.Objfile, return False def load_module_debuginfo(self, objfile: gdb.Objfile, - modpath: str=None, verbose: bool=False) -> None: + modpath: str = None, + verbose: bool = False) -> None: if modpath is None: modpath = objfile.filename if ".gz" in modpath: diff --git a/crash/session.py b/crash/session.py index 91298b13d35..c2805808ce7 100644 --- a/crash/session.py +++ b/crash/session.py @@ -21,8 +21,8 @@ class Session(object): debug (bool, optional, default=False): Whether to enable verbose debugging output """ - def __init__(self, kernel: CrashKernel, verbose: bool=False, - debug: bool=False) -> None: + def __init__(self, kernel: CrashKernel, verbose: bool = False, + debug: bool = False) -> None: print("crash-python initializing...") self.kernel = kernel @@ -41,13 +41,13 @@ def __init__(self, kernel: CrashKernel, verbose: bool=False, if self.kernel.crashing_thread: try: result = gdb.execute("thread {}" - .format(self.kernel.crashing_thread.num), + .format(self.kernel.crashing_thread.num), to_string=True) if debug: print(result) except gdb.error as e: print("Error while switching to crashed thread: {}" - .format(str(e))) + .format(str(e))) print("Further debugging may not be possible.") return diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index bc77d43e3ab..888bb2a7832 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -15,35 +15,35 @@ AddressSpecifier = Union[int, str, gdb.Value] -MS_RDONLY = 1 -MS_NOSUID = 2 -MS_NODEV = 4 -MS_NOEXEC = 8 -MS_SYNCHRONOUS = 16 -MS_REMOUNT = 32 -MS_MANDLOCK = 64 -MS_DIRSYNC = 128 -MS_NOATIME = 1024 -MS_NODIRATIME = 2048 -MS_BIND = 4096 -MS_MOVE = 8192 -MS_REC = 16384 -MS_VERBOSE = 32768 -MS_SILENT = 32768 -MS_POSIXACL = (1<<16) -MS_UNBINDABLE = (1<<17) -MS_PRIVATE = (1<<18) -MS_SLAVE = (1<<19) -MS_SHARED = (1<<20) -MS_RELATIME = (1<<21) -MS_KERNMOUNT = (1<<22) -MS_I_VERSION = (1<<23) -MS_STRICTATIME = (1<<24) -MS_LAZYTIME = (1<<25) -MS_NOSEC = (1<<28) -MS_BORN = (1<<29) -MS_ACTIVE = (1<<30) -MS_NOUSER = (1<<31) +MS_RDONLY = 1 +MS_NOSUID = 2 +MS_NODEV = 4 +MS_NOEXEC = 8 +MS_SYNCHRONOUS = 16 +MS_REMOUNT = 32 +MS_MANDLOCK = 64 +MS_DIRSYNC = 128 +MS_NOATIME = 1024 +MS_NODIRATIME = 2048 +MS_BIND = 4096 +MS_MOVE = 8192 +MS_REC = 16384 +MS_VERBOSE = 32768 +MS_SILENT = 32768 +MS_POSIXACL = (1<<16) +MS_UNBINDABLE = (1<<17) +MS_PRIVATE = (1<<18) +MS_SLAVE = (1<<19) +MS_SHARED = (1<<20) +MS_RELATIME = (1<<21) +MS_KERNMOUNT = (1<<22) +MS_I_VERSION = (1<<23) +MS_STRICTATIME = (1<<24) +MS_LAZYTIME = (1<<25) +MS_NOSEC = (1<<28) +MS_BORN = (1<<29) +MS_ACTIVE = (1<<30) +MS_NOUSER = (1<<31) SB_FLAGS = { MS_RDONLY : "MS_RDONLY", @@ -123,7 +123,7 @@ def for_each_super_block() -> Iterable[gdb.Value]: types.super_block_type, 's_list'): yield sb -def get_super_block(desc: AddressSpecifier, force: bool=False) -> gdb.Value: +def get_super_block(desc: AddressSpecifier, force: bool = False) -> gdb.Value: """ Given an address description return a gdb.Value that contains a struct super_block at that address. diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index e9f8656e179..25e6a76a2d3 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -9,8 +9,8 @@ from crash.util.symbols import Types from crash.subsystem.filesystem import is_fstype_super -types = Types([ 'struct btrfs_inode', 'struct btrfs_fs_info *', - 'struct btrfs_fs_info' ]) +types = Types(['struct btrfs_inode', 'struct btrfs_fs_info *', + 'struct btrfs_fs_info']) def is_btrfs_super(super_block: gdb.Value) -> bool: """ @@ -44,7 +44,7 @@ def is_btrfs_inode(vfs_inode: gdb.Value) -> bool: """ return is_btrfs_super(vfs_inode['i_sb']) -def btrfs_inode(vfs_inode: gdb.Value, force: bool=False ) -> gdb.Value: +def btrfs_inode(vfs_inode: gdb.Value, force: bool = False) -> gdb.Value: """ Converts a VFS inode to a btrfs inode @@ -69,7 +69,7 @@ def btrfs_inode(vfs_inode: gdb.Value, force: bool=False ) -> gdb.Value: return container_of(vfs_inode, types.btrfs_inode_type, 'vfs_inode') -def btrfs_fs_info(super_block: gdb.Value, force: bool=False) -> gdb.Value: +def btrfs_fs_info(super_block: gdb.Value, force: bool = False) -> gdb.Value: """ Resolves a btrfs_fs_info from a VFS superblock @@ -96,7 +96,7 @@ def btrfs_fs_info(super_block: gdb.Value, force: bool=False) -> gdb.Value: fs_info = super_block['s_fs_info'].cast(types.btrfs_fs_info_p_type) return fs_info.dereference() -def btrfs_fsid(super_block: gdb.Value, force: bool=False) -> uuid.UUID: +def btrfs_fsid(super_block: gdb.Value, force: bool = False) -> uuid.UUID: """ Returns the btrfs fsid (UUID) for the specified superblock. @@ -118,7 +118,7 @@ def btrfs_fsid(super_block: gdb.Value, force: bool=False) -> uuid.UUID: return decode_uuid(fs_info['fsid']) return decode_uuid(fs_info['fs_devices']['fsid']) -def btrfs_metadata_uuid(sb: gdb.Value, force: bool=False) -> uuid.UUID: +def btrfs_metadata_uuid(sb: gdb.Value, force: bool = False) -> uuid.UUID: """ Returns the btrfs metadata uuid for the specified superblock. diff --git a/crash/subsystem/filesystem/decoders.py b/crash/subsystem/filesystem/decoders.py index 8184dfae2ab..eea1596286a 100644 --- a/crash/subsystem/filesystem/decoders.py +++ b/crash/subsystem/filesystem/decoders.py @@ -29,8 +29,8 @@ class DIOBioDecoder(Decoder): offset (str): the starting offset on disk """ - _types = Types([ 'struct dio *' ]) - __endio__ = [ 'dio_bio_end_io', 'dio_bio_end_io' ] + _types = Types(['struct dio *']) + __endio__ = ['dio_bio_end_io', 'dio_bio_end_io'] _description = "{:x} bio: Direct I/O for {} inode {}, sector {} on {}" def __init__(self, bio: gdb.Value): @@ -111,7 +111,7 @@ class DecodeBioBH(Decoder): bio. The value is of type ``struct buffer_head``. """ - _types = Types([ 'struct buffer_head *' ]) + _types = Types(['struct buffer_head *']) __endio__ = 'end_bio_bh_io_sync' _description = "{:x} bio: Bio representation of buffer head" @@ -144,7 +144,7 @@ class DecodeSyncWBBH(Decoder): bh (:obj:`gdb.Value`): The ``struct buffer_head`` being decoded. The value is of type ``struct buffer_head``. """ - __endio__ = 'end_buffer_write_sync' + __endio__ = 'end_buffer_write_sync' _description = "{:x} buffer_head: for dev {}, block {}, size {} (unassociated)" def __init__(self, bh): diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index a7e3beb4ad2..021c477db55 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -21,17 +21,17 @@ from crash.util import container_of, decode_flags, struct_has_member from crash.util.symbols import Types, Symvals, TypeCallbacks, SymbolCallbacks -MNT_NOSUID = 0x01 -MNT_NODEV = 0x02 -MNT_NOEXEC = 0x04 -MNT_NOATIME = 0x08 -MNT_NODIRATIME = 0x10 -MNT_RELATIME = 0x20 -MNT_READONLY = 0x40 -MNT_SHRINKABLE = 0x100 -MNT_WRITE_HOLD = 0x200 -MNT_SHARED = 0x1000 -MNT_UNBINDABLE = 0x2000 +MNT_NOSUID = 0x01 +MNT_NODEV = 0x02 +MNT_NOEXEC = 0x04 +MNT_NOATIME = 0x08 +MNT_NODIRATIME = 0x10 +MNT_RELATIME = 0x20 +MNT_READONLY = 0x40 +MNT_SHRINKABLE = 0x100 +MNT_WRITE_HOLD = 0x200 +MNT_SHARED = 0x1000 +MNT_UNBINDABLE = 0x2000 MNT_FLAGS = { MNT_NOSUID : "MNT_NOSUID", @@ -51,8 +51,8 @@ } MNT_FLAGS_HIDDEN.update(MNT_FLAGS) -types = Types([ 'struct mount', 'struct vfsmount' ]) -symvals = Symvals([ 'init_task' ]) +types = Types(['struct mount', 'struct vfsmount']) +symvals = Symvals(['init_task']) class _Mount(object): @classmethod @@ -85,7 +85,7 @@ def _check_mount_type(gdbtype): # Older kernels didn't separate mount from vfsmount types.mount_type = types.vfsmount_type -def for_each_mount(task: gdb.Value=None) -> Iterator[gdb.Value]: +def for_each_mount(task: gdb.Value = None) -> Iterator[gdb.Value]: """ Iterate over each mountpoint in the namespace of the specified task @@ -112,7 +112,7 @@ def for_each_mount(task: gdb.Value=None) -> Iterator[gdb.Value]: task = symvals.init_task return _Mount._for_each_mount_impl(task) -def mount_flags(mnt: gdb.Value, show_hidden: bool=False) -> str: +def mount_flags(mnt: gdb.Value, show_hidden: bool = False) -> str: """ Returns the human-readable flags of the ``struct mount`` :ref:`structure `. @@ -203,7 +203,7 @@ def mount_device(mnt: gdb.Value) -> str: def _real_mount(vfsmnt): if (vfsmnt.type == types.mount_type or - vfsmnt.type == types.mount_type.pointer()): + vfsmnt.type == types.mount_type.pointer()): t = vfsmnt.type if t.code == gdb.TYPE_CODE_PTR: t = t.target() @@ -212,7 +212,7 @@ def _real_mount(vfsmnt): return vfsmnt return container_of(vfsmnt, types.mount_type, 'mnt') -def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value=None): +def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value = None): """ Returns a file system path described by a mount and dentry @@ -274,5 +274,5 @@ def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value=None): name = '/' return name -type_cbs = TypeCallbacks([ ('struct vfsmount', _check_mount_type ) ]) -symbols_cbs = SymbolCallbacks([ ('init_task', _Mount._check_task_interface ) ]) +type_cbs = TypeCallbacks([('struct vfsmount', _check_mount_type)]) +symbols_cbs = SymbolCallbacks([('init_task', _Mount._check_task_interface)]) diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index 3a48c455428..a2c62013c77 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -20,14 +20,14 @@ from crash.subsystem.storage.decoders import Decoder # XFS inode locks -XFS_IOLOCK_EXCL = 0x01 -XFS_IOLOCK_SHARED = 0x02 -XFS_ILOCK_EXCL = 0x04 -XFS_ILOCK_SHARED = 0x08 -XFS_MMAPLOCK_EXCL = 0x10 +XFS_IOLOCK_EXCL = 0x01 +XFS_IOLOCK_SHARED = 0x02 +XFS_ILOCK_EXCL = 0x04 +XFS_ILOCK_SHARED = 0x08 +XFS_MMAPLOCK_EXCL = 0x10 XFS_MMAPLOCK_SHARED = 0x20 -XFS_LOCK_MASK = 0x3f +XFS_LOCK_MASK = 0x3f XFS_LOCK_FLAGS = { XFS_IOLOCK_EXCL : "XFS_IOLOCK_EXCL", @@ -38,13 +38,13 @@ XFS_MMAPLOCK_SHARED : "XFS_MMAPLOCK_SHARED", } -XFS_LI_EFI = 0x1236 -XFS_LI_EFD = 0x1237 -XFS_LI_IUNLINK = 0x1238 -XFS_LI_INODE = 0x123b # aligned ino chunks, var-size ibufs -XFS_LI_BUF = 0x123c # v2 bufs, variable sized inode bufs -XFS_LI_DQUOT = 0x123d -XFS_LI_QUOTAOFF = 0x123e +XFS_LI_EFI = 0x1236 +XFS_LI_EFD = 0x1237 +XFS_LI_IUNLINK = 0x1238 +XFS_LI_INODE = 0x123b # aligned ino chunks, var-size ibufs +XFS_LI_BUF = 0x123c # v2 bufs, variable sized inode bufs +XFS_LI_DQUOT = 0x123d +XFS_LI_QUOTAOFF = 0x123e XFS_LI_TYPES = { XFS_LI_EFI : "XFS_LI_EFI", @@ -57,13 +57,13 @@ XFS_LI_QUOTAOFF : "XFS_LI_QUOTAOFF", } -XFS_BLI_HOLD = 0x01 -XFS_BLI_DIRTY = 0x02 -XFS_BLI_STALE = 0x04 -XFS_BLI_LOGGED = 0x08 +XFS_BLI_HOLD = 0x01 +XFS_BLI_DIRTY = 0x02 +XFS_BLI_STALE = 0x04 +XFS_BLI_LOGGED = 0x08 XFS_BLI_INODE_ALLOC_BUF = 0x10 -XFS_BLI_STALE_INODE = 0x20 -XFS_BLI_INODE_BUF = 0x40 +XFS_BLI_STALE_INODE = 0x20 +XFS_BLI_INODE_BUF = 0x40 XFS_BLI_FLAGS = { XFS_BLI_HOLD : "HOLD", @@ -75,27 +75,27 @@ XFS_BLI_INODE_BUF : "INODE_BUF", } -XBF_READ = (1 << 0) # buffer intended for reading from device -XBF_WRITE = (1 << 1) # buffer intended for writing to device -XBF_MAPPED = (1 << 2) # buffer mapped (b_addr valid) -XBF_ASYNC = (1 << 4) # initiator will not wait for completion -XBF_DONE = (1 << 5) # all pages in the buffer uptodate -XBF_DELWRI = (1 << 6) # buffer has dirty pages -XBF_STALE = (1 << 7) # buffer has been staled, do not find it -XBF_ORDERED = (1 << 11) # use ordered writes -XBF_READ_AHEAD = (1 << 12) # asynchronous read-ahead -XBF_LOG_BUFFER = (1 << 13) # this is a buffer used for the log +XBF_READ = (1 << 0) # buffer intended for reading from device +XBF_WRITE = (1 << 1) # buffer intended for writing to device +XBF_MAPPED = (1 << 2) # buffer mapped (b_addr valid) +XBF_ASYNC = (1 << 4) # initiator will not wait for completion +XBF_DONE = (1 << 5) # all pages in the buffer uptodate +XBF_DELWRI = (1 << 6) # buffer has dirty pages +XBF_STALE = (1 << 7) # buffer has been staled, do not find it +XBF_ORDERED = (1 << 11) # use ordered writes +XBF_READ_AHEAD = (1 << 12) # asynchronous read-ahead +XBF_LOG_BUFFER = (1 << 13) # this is a buffer used for the log # flags used only as arguments to access routines -XBF_LOCK = (1 << 14) # lock requested -XBF_TRYLOCK = (1 << 15) # lock requested, but do not wait -XBF_DONT_BLOCK = (1 << 16) # do not block in current thread +XBF_LOCK = (1 << 14) # lock requested +XBF_TRYLOCK = (1 << 15) # lock requested, but do not wait +XBF_DONT_BLOCK = (1 << 16) # do not block in current thread # flags used only internally -_XBF_PAGES = (1 << 18) # backed by refcounted pages +_XBF_PAGES = (1 << 18) # backed by refcounted pages _XBF_RUN_QUEUES = (1 << 19) # run block device task queue -_XBF_KMEM = (1 << 20) # backed by heap memory -_XBF_DELWRI_Q = (1 << 21) # buffer on delwri queue +_XBF_KMEM = (1 << 20) # backed by heap memory +_XBF_DELWRI_Q = (1 << 21) # buffer on delwri queue _XBF_LRU_DISPOSE = (1 << 24) # buffer being discarded XFS_BUF_FLAGS = { @@ -118,17 +118,17 @@ _XBF_LRU_DISPOSE : "LRU_DISPOSE", } -XFS_ILOG_CORE = 0x001 -XFS_ILOG_DDATA = 0x002 -XFS_ILOG_DEXT = 0x004 -XFS_ILOG_DBROOT = 0x008 -XFS_ILOG_DEV = 0x010 -XFS_ILOG_UUID = 0x020 -XFS_ILOG_ADATA = 0x040 -XFS_ILOG_AEXT = 0x080 -XFS_ILOG_ABROOT = 0x100 -XFS_ILOG_DOWNER = 0x200 -XFS_ILOG_AOWNER = 0x400 +XFS_ILOG_CORE = 0x001 +XFS_ILOG_DDATA = 0x002 +XFS_ILOG_DEXT = 0x004 +XFS_ILOG_DBROOT = 0x008 +XFS_ILOG_DEV = 0x010 +XFS_ILOG_UUID = 0x020 +XFS_ILOG_ADATA = 0x040 +XFS_ILOG_AEXT = 0x080 +XFS_ILOG_ABROOT = 0x100 +XFS_ILOG_DOWNER = 0x200 +XFS_ILOG_AOWNER = 0x400 XFS_ILOG_TIMESTAMP = 0x4000 XFS_ILI_FLAGS = { @@ -160,28 +160,28 @@ XFS_DQ_FREEING : "FREEING", } -XFS_MOUNT_WSYNC = (1 << 0) -XFS_MOUNT_UNMOUNTING = (1 << 1) -XFS_MOUNT_DMAPI = (1 << 2) -XFS_MOUNT_WAS_CLEAN = (1 << 3) -XFS_MOUNT_FS_SHUTDOWN = (1 << 4) -XFS_MOUNT_DISCARD = (1 << 5) -XFS_MOUNT_NOALIGN = (1 << 7) -XFS_MOUNT_ATTR2 = (1 << 8) -XFS_MOUNT_GRPID = (1 << 9) -XFS_MOUNT_NORECOVERY = (1 << 10) -XFS_MOUNT_DFLT_IOSIZE = (1 << 12) -XFS_MOUNT_SMALL_INUMS = (1 << 14) -XFS_MOUNT_32BITINODES = (1 << 15) -XFS_MOUNT_NOUUID = (1 << 16) -XFS_MOUNT_BARRIER = (1 << 17) -XFS_MOUNT_IKEEP = (1 << 18) -XFS_MOUNT_SWALLOC = (1 << 19) -XFS_MOUNT_RDONLY = (1 << 20) -XFS_MOUNT_DIRSYNC = (1 << 21) +XFS_MOUNT_WSYNC = (1 << 0) +XFS_MOUNT_UNMOUNTING = (1 << 1) +XFS_MOUNT_DMAPI = (1 << 2) +XFS_MOUNT_WAS_CLEAN = (1 << 3) +XFS_MOUNT_FS_SHUTDOWN = (1 << 4) +XFS_MOUNT_DISCARD = (1 << 5) +XFS_MOUNT_NOALIGN = (1 << 7) +XFS_MOUNT_ATTR2 = (1 << 8) +XFS_MOUNT_GRPID = (1 << 9) +XFS_MOUNT_NORECOVERY = (1 << 10) +XFS_MOUNT_DFLT_IOSIZE = (1 << 12) +XFS_MOUNT_SMALL_INUMS = (1 << 14) +XFS_MOUNT_32BITINODES = (1 << 15) +XFS_MOUNT_NOUUID = (1 << 16) +XFS_MOUNT_BARRIER = (1 << 17) +XFS_MOUNT_IKEEP = (1 << 18) +XFS_MOUNT_SWALLOC = (1 << 19) +XFS_MOUNT_RDONLY = (1 << 20) +XFS_MOUNT_DIRSYNC = (1 << 21) XFS_MOUNT_COMPAT_IOSIZE = (1 << 22) -XFS_MOUNT_FILESTREAMS = (1 << 24) -XFS_MOUNT_NOATTR2 = (1 << 25) +XFS_MOUNT_FILESTREAMS = (1 << 24) +XFS_MOUNT_NOATTR2 = (1 << 25) XFS_MOUNT_FLAGS = { XFS_MOUNT_WSYNC : "WSYNC", @@ -235,7 +235,7 @@ class XFSBufBioDecoder(Decoder): """ _description = "{:x} bio: xfs buffer on {}" __endio__ = 'xfs_buf_bio_end_io' - _types = Types([ 'struct xfs_buf *' ]) + _types = Types(['struct xfs_buf *']) def __init__(self, bio: gdb.Value): super(XFSBufBioDecoder, self).__init__() @@ -254,11 +254,11 @@ def __str__(self): XFSBufBioDecoder.register() -types = Types([ 'struct xfs_log_item', 'struct xfs_buf_log_item', - 'struct xfs_inode_log_item', 'struct xfs_efi_log_item', - 'struct xfs_efd_log_item', 'struct xfs_dq_logitem', - 'struct xfs_qoff_logitem', 'struct xfs_inode', - 'struct xfs_mount *', 'struct xfs_buf *' ]) +types = Types(['struct xfs_log_item', 'struct xfs_buf_log_item', + 'struct xfs_inode_log_item', 'struct xfs_efi_log_item', + 'struct xfs_efd_log_item', 'struct xfs_dq_logitem', + 'struct xfs_qoff_logitem', 'struct xfs_inode', + 'struct xfs_mount *', 'struct xfs_buf *']) class _XFS(object): """ @@ -306,7 +306,7 @@ def is_xfs_inode(vfs_inode: gdb.Value) -> bool: return is_fstype_inode(vfs_inode, "xfs") -def xfs_inode(vfs_inode: gdb.Value, force: bool=False) -> gdb.Value: +def xfs_inode(vfs_inode: gdb.Value, force: bool = False) -> gdb.Value: """ Converts a VFS inode to a xfs inode @@ -331,7 +331,7 @@ def xfs_inode(vfs_inode: gdb.Value, force: bool=False) -> gdb.Value: return container_of(vfs_inode, types.xfs_inode, 'i_vnode') -def xfs_mount(sb: gdb.Value, force: bool=False) -> gdb.Value: +def xfs_mount(sb: gdb.Value, force: bool = False) -> gdb.Value: """ Converts a VFS superblock to a xfs mount @@ -407,7 +407,7 @@ def xfs_for_each_ail_entry(ail: gdb.Value) -> Iterable[gdb.Value]: """ head = ail[_XFS._ail_head_name] for item in list_for_each_entry(head, types.xfs_log_item_type, 'li_ail'): - yield item + yield item def xfs_for_each_ail_log_item(mp: gdb.Value) -> Iterable[gdb.Value]: """ @@ -425,7 +425,7 @@ def xfs_for_each_ail_log_item(mp: gdb.Value) -> Iterable[gdb.Value]: :obj:`gdb.NotAvailableError`: The target value was not available. """ for item in xfs_for_each_ail_entry(mp['m_ail']): - yield item + yield item def item_to_buf_log_item(item: gdb.Value) -> gdb.Value: """ @@ -547,7 +547,7 @@ def item_to_quotaoff_log_item(item: gdb.Value) -> gdb.Value: raise InvalidArgumentError("item is not an QUOTAOFF log item") return container_of(item, types.xfs_qoff_logitem_type, 'qql_item') -def xfs_log_item_typed(item:gdb.Value) -> gdb.Value: +def xfs_log_item_typed(item: gdb.Value) -> gdb.Value: """ Returns the log item converted from the generic type to the actual type @@ -644,4 +644,4 @@ def xfs_for_each_ail_log_item_typed(mp: gdb.Value) -> gdb.Value: for item in types.xfs_for_each_ail_log_item(mp): yield types.xfs_log_item_typed(item) -type_cbs = TypeCallbacks([ ('struct xfs_ail', _XFS._detect_ail_version) ]) +type_cbs = TypeCallbacks([('struct xfs_ail', _XFS._detect_ail_version)]) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 0b18d57ad3e..d04c35b30fc 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -11,10 +11,10 @@ from crash.types.classdev import for_each_class_device from crash.exceptions import DelayedAttributeError, InvalidArgumentError -types = Types([ 'struct gendisk', 'struct hd_struct', 'struct device', - 'struct device_type', 'struct bdev_inode' ]) -symvals = Symvals([ 'block_class', 'blockdev_superblock', 'disk_type', - 'part_type' ]) +types = Types(['struct gendisk', 'struct hd_struct', 'struct device', + 'struct device_type', 'struct bdev_inode']) +symvals = Symvals(['block_class', 'blockdev_superblock', 'disk_type', + 'part_type']) def dev_to_gendisk(dev: gdb.Value) -> gdb.Value: """ @@ -79,7 +79,7 @@ def part_to_dev(part: gdb.Value) -> gdb.Value: return part['__dev'] -def for_each_block_device(subtype: gdb.Value=None) -> Iterable[gdb.Value]: +def for_each_block_device(subtype: gdb.Value = None) -> Iterable[gdb.Value]: """ Iterates over each block device registered with the block class. @@ -112,8 +112,8 @@ def for_each_block_device(subtype: gdb.Value=None) -> Iterable[gdb.Value]: subtype = subtype.address elif get_basic_type(subtype.type) != types.device_type_type.pointer(): raise InvalidArgumentError("subtype must be {} not {}" - .format(types.device_type_type.pointer(), - subtype.type.unqualified())) + .format(types.device_type_type.pointer(), + subtype.type.unqualified())) for dev in for_each_class_device(symvals.block_class, subtype): if dev['type'] == symvals.disk_type.address: yield dev_to_gendisk(dev) @@ -162,8 +162,9 @@ def gendisk_name(gendisk: gdb.Value) -> str: return "{}{:d}".format(gendisk_name(parent), int(gendisk['partno'])) else: raise InvalidArgumentError("expected {} or {}, not {}" - .format(types.gendisk_type, types.hd_struct_type, - gendisk.type.unqualified())) + .format(types.gendisk_type, + types.hd_struct_type, + gendisk.type.unqualified())) def block_device_name(bdev: gdb.Value) -> str: """ @@ -257,6 +258,6 @@ def _check_types(result): except DelayedAttributeError: pass -symbol_cbs = SymbolCallbacks([ ( 'disk_type', _check_types ), - ( 'part_type', _check_types )] ) -type_cbs = TypeCallbacks([ ('struct device_type', _check_types ) ]) +symbol_cbs = SymbolCallbacks([('disk_type', _check_types), + ('part_type', _check_types)]) +type_cbs = TypeCallbacks([('struct device_type', _check_types)]) diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index 16cab773717..977de22237f 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -12,7 +12,7 @@ class NoQueueError(RuntimeError): pass -types = Types([ 'struct request' ]) +types = Types(['struct request']) def for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: """ diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 0dcf5fc1dde..dc9e94305a0 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -244,8 +244,8 @@ def __init__(self, bio: gdb.Value): def __str__(self): return self._description.format(int(self.bio), - block_device_name(self.bio['bi_bdev']), - self.bio['bi_end_io']) + block_device_name(self.bio['bi_bdev']), + self.bio['bi_end_io']) def decode_bio(bio: gdb.Value) -> Decoder: """ diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index 088234a9b2d..ef616183753 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -19,7 +19,7 @@ class ClonedBioReqDecoder(Decoder): target. The value must be of type ``struct bio``. """ - _types = Types([ 'struct dm_rq_clone_bio_info *' ]) + _types = Types(['struct dm_rq_clone_bio_info *']) __endio__ = 'end_clone_bio' _description = '{:x} bio: Request-based Device Mapper on {}' @@ -43,7 +43,7 @@ def interpret(self): def __str__(self): self._description.format(int(self.bio), - block_device_name(self.bio['bi_bdev'])) + block_device_name(self.bio['bi_bdev'])) def __next__(self): return decode_bio(self.info['orig']) @@ -77,7 +77,7 @@ class ClonedBioDecoder(Decoder): tio (:obj:`gdb.Value`): The dm target i/o operation for this bio. The value is of type ``struct dm_target_io``. """ - _types = Types([ 'struct dm_target_io *' ]) + _types = Types(['struct dm_target_io *']) _get_clone_bio_tio = None __endio__ = 'clone_endio' _description = "{:x} bio: device mapper clone: {}[{}] -> {}[{}]" @@ -101,11 +101,10 @@ def interpret(self): def __str__(self): return self._description.format( - int(self.bio), - block_device_name(self.bio['bi_bdev']), - int(self.bio['bi_sector']), - block_device_name(self.next_bio['bi_bdev']), - int(self.next_bio['bi_sector'])) + int(self.bio), block_device_name(self.bio['bi_bdev']), + int(self.bio['bi_sector']), + block_device_name(self.next_bio['bi_bdev']), + int(self.next_bio['bi_sector'])) def __next__(self): return decode_bio(self.next_bio) diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index 1c9a5dc1722..1da76a21e32 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -6,7 +6,9 @@ .. _bitmap_note: -A bitmap is represented as either an array of ``unsigned long`` or as ``unsigned long *``. Each routine below that accepts a gdb.Value requires that it be of either type. +A bitmap is represented as either an array of ``unsigned long`` or as +``unsigned long *``. Each routine below that accepts a gdb.Value +requires that it be of either type. """ from typing import Iterable @@ -23,14 +25,14 @@ def _check_bitmap_type(bitmap: gdb.Value) -> None: if ((bitmap.type.code != gdb.TYPE_CODE_ARRAY or bitmap[0].type.code != types.unsigned_long_type.code or bitmap[0].type.sizeof != types.unsigned_long_type.sizeof) and - (bitmap.type.code != gdb.TYPE_CODE_PTR or - bitmap.type.target().code != types.unsigned_long_type.code or - bitmap.type.target().sizeof != types.unsigned_long_type.sizeof)): + (bitmap.type.code != gdb.TYPE_CODE_PTR or + bitmap.type.target().code != types.unsigned_long_type.code or + bitmap.type.target().sizeof != types.unsigned_long_type.sizeof)): raise InvalidArgumentError("bitmaps are expected to be arrays of unsigned long not `{}'" - .format(bitmap.type)) + .format(bitmap.type)) def for_each_set_bit(bitmap: gdb.Value, - size_in_bytes: int=None) -> Iterable[int]: + size_in_bytes: int = None) -> Iterable[int]: """ Yield each set bit in a bitmap @@ -104,7 +106,7 @@ def _find_first_set_bit(val: gdb.Value) -> int: return r def find_next_zero_bit(bitmap: gdb.Value, start: int, - size_in_bytes: int=None) -> int: + size_in_bytes: int = None) -> int: """ Return the next unset bit in the bitmap starting at position start, inclusive. @@ -134,7 +136,7 @@ def find_next_zero_bit(bitmap: gdb.Value, start: int, if start > size_in_bytes << 3: raise IndexError("Element {} is out of range ({} elements)" - .format(start, elements)) + .format(start, elements)) element = start // (types.unsigned_long_type.sizeof << 3) offset = start % (types.unsigned_long_type.sizeof << 3) @@ -157,7 +159,7 @@ def find_next_zero_bit(bitmap: gdb.Value, start: int, return 0 -def find_first_zero_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: +def find_first_zero_bit(bitmap: gdb.Value, size_in_bytes: int = None) -> int: """ Return the first unset bit in the bitmap @@ -177,7 +179,7 @@ def find_first_zero_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: return find_next_zero_bit(bitmap, 0, size_in_bytes) def find_next_set_bit(bitmap: gdb.Value, start: int, - size_in_bytes: int=None) -> int: + size_in_bytes: int = None) -> int: """ Return the next set bit in the bitmap starting at position start, inclusive. @@ -207,7 +209,7 @@ def find_next_set_bit(bitmap: gdb.Value, start: int, if start > size_in_bytes << 3: raise IndexError("Element {} is out of range ({} elements)" - .format(start, elements)) + .format(start, elements)) element = start // (types.unsigned_long_type.sizeof << 3) offset = start % (types.unsigned_long_type.sizeof << 3) @@ -230,7 +232,7 @@ def find_next_set_bit(bitmap: gdb.Value, start: int, return 0 -def find_first_set_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: +def find_first_set_bit(bitmap: gdb.Value, size_in_bytes: int = None) -> int: """ Return the first set bit in the bitmap @@ -281,7 +283,7 @@ def _find_last_set_bit(val: gdb.Value) -> int: return r -def find_last_set_bit(bitmap: gdb.Value, size_in_bytes: int=None) -> int: +def find_last_set_bit(bitmap: gdb.Value, size_in_bytes: int = None) -> int: """ Return the last set bit in the bitmap diff --git a/crash/types/classdev.py b/crash/types/classdev.py index 5bd5fe531da..5759607a3f0 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -24,11 +24,11 @@ def _setup_iterator_type(cls, gdbtype): cls._class_is_private = False -type_cbs = TypeCallbacks([ ('struct device', - ClassdevState._setup_iterator_type) ]) +type_cbs = TypeCallbacks([('struct device', + ClassdevState._setup_iterator_type)]) def for_each_class_device(class_struct: gdb.Value, - subtype: gdb.Value=None) -> Iterable[gdb.Value]: + subtype: gdb.Value = None) -> Iterable[gdb.Value]: """ Iterate over the list of class devices diff --git a/crash/types/cpu.py b/crash/types/cpu.py index d964245d94d..cf000498602 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -17,7 +17,7 @@ # functions to iterate over online cpu's etc. class TypesCPUClass(object): """A state holder class for handling CPUs. Not meant to be instantiated. - + Attributes: cpus_online (:obj:`list` of :obj:`int`): A list of the IDs of all online CPUs. cpus_possible (:obj:`list` of :obj:`int`): A list of the IDs of all possible CPUs. @@ -86,11 +86,11 @@ def highest_possible_cpu_nr() -> int: raise DelayedAttributeError('cpus_possible') return TypesCPUClass.cpus_possible[-1] -symbol_cbs = SymbolCallbacks([ ('cpu_online_mask', - TypesCPUClass._setup_online_mask), - ('__cpu_online_mask', - TypesCPUClass._setup_online_mask), - ('cpu_possible_mask', - TypesCPUClass._setup_possible_mask), - ('__cpu_possible_mask', - TypesCPUClass._setup_possible_mask) ]) +symbol_cbs = SymbolCallbacks([('cpu_online_mask', + TypesCPUClass._setup_online_mask), + ('__cpu_online_mask', + TypesCPUClass._setup_online_mask), + ('cpu_possible_mask', + TypesCPUClass._setup_possible_mask), + ('__cpu_possible_mask', + TypesCPUClass._setup_possible_mask)]) diff --git a/crash/types/klist.py b/crash/types/klist.py index fc18915e082..7796df44bfc 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -10,7 +10,7 @@ from crash.util.symbols import Types -types = Types([ 'struct klist_node', 'struct klist' ]) +types = Types(['struct klist_node', 'struct klist']) class KlistCorruptedError(CorruptedError): pass @@ -31,7 +31,7 @@ def klist_for_each(klist: gdb.Value) -> Iterable[gdb.Value]: klist = klist.dereference() elif klist.type != types.klist_type: raise InvalidArgumentError("klist must be gdb.Value representing 'struct klist' or 'struct klist *' not {}" - .format(klist.type)) + .format(klist.type)) if klist.type is not types.klist_type: types.override('struct klist', klist.type) diff --git a/crash/types/list.py b/crash/types/list.py index 084311115dc..a39dd5d4aae 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -17,11 +17,11 @@ class CorruptListError(ListError): class ListCycleError(CorruptListError): pass -types = Types([ 'struct list_head' ]) +types = Types(['struct list_head']) -def list_for_each(list_head: gdb.Value, include_head: bool=False, - reverse: bool=False, print_broken_links: bool=True, - exact_cycles: bool=False) -> Iterator[gdb.Value]: +def list_for_each(list_head: gdb.Value, include_head: bool = False, + reverse: bool = False, print_broken_links: bool = True, + exact_cycles: bool = False) -> Iterator[gdb.Value]: """ Iterate over a list and yield each node @@ -133,9 +133,9 @@ def list_for_each(list_head: gdb.Value, include_head: bool=False, raise pending_exception def list_for_each_entry(list_head: gdb.Value, gdbtype: gdb.Type, - member: str, include_head: bool=False, - reverse: bool=False, print_broken_links: bool=True, - exact_cycles: bool=False) -> Iterator[gdb.Value]: + member: str, include_head: bool = False, + reverse: bool = False, print_broken_links: bool = True, + exact_cycles: bool = False) -> Iterator[gdb.Value]: """ Iterate over a list and yield each node's containing object diff --git a/crash/types/module.py b/crash/types/module.py index 2e8945b0845..567449b1b6e 100644 --- a/crash/types/module.py +++ b/crash/types/module.py @@ -7,8 +7,8 @@ from crash.types.list import list_for_each_entry from crash.util.symbols import Symvals, Types -symvals = Symvals([ 'modules' ]) -types = Types([ 'struct module' ]) +symvals = Symvals(['modules']) +types = Types(['struct module']) def for_each_module() -> Iterable[gdb.Value]: """ diff --git a/crash/types/node.py b/crash/types/node.py index e3d62e91bcc..42e02fdfd43 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -14,9 +14,9 @@ import crash.types.zone from crash.exceptions import DelayedAttributeError -symbols = Symbols([ 'numa_node' ]) -symvals = Symvals([ 'numa_cpu_lookup_table', 'node_data' ]) -types = Types([ 'pg_data_t', 'struct zone' ]) +symbols = Symbols(['numa_node']) +symvals = Symvals(['numa_cpu_lookup_table', 'node_data']) +types = Types(['pg_data_t', 'struct zone']) def numa_node_id(cpu: int) -> int: """ @@ -178,4 +178,3 @@ def for_each_online_node() -> Iterable[Node]: """ for nid in for_each_online_nid(): yield Node.from_nid(nid) - diff --git a/crash/types/page.py b/crash/types/page.py index 7d72d9828e1..a6d90687ac3 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -13,9 +13,9 @@ #TODO debuginfo won't tell us, depends on version? PAGE_MAPPING_ANON = 1 -types = Types([ 'unsigned long', 'struct page', 'enum pageflags', - 'enum zone_type', 'struct mem_section']) -symvals = Symvals([ 'mem_section' ]) +types = Types(['unsigned long', 'struct page', 'enum pageflags', + 'enum zone_type', 'struct mem_section']) +symvals = Symvals(['mem_section']) class Page(object): slab_cache_name = None @@ -62,7 +62,7 @@ def setup_page_type(cls, gdbtype): cls.slab_cache_name = find_member_variant(gdbtype, ('slab_cache', 'lru')) cls.slab_page_name = find_member_variant(gdbtype, ('slab_page', 'lru')) - cls.compound_head_name = find_member_variant(gdbtype, ('compound_head', 'first_page' )) + cls.compound_head_name = find_member_variant(gdbtype, ('compound_head', 'first_page')) cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(gdbtype.pointer()) cls.setup_page_type_done = True @@ -202,18 +202,18 @@ def __init__(self, obj, pfn): self.pfn = pfn self.flags = int(obj["flags"]) -type_cbs = TypeCallbacks([ ('struct page', Page.setup_page_type ), - ('enum pageflags', Page.setup_pageflags ), - ('enum zone_type', Page.setup_zone_type ), - ('struct mem_section', Page.setup_mem_section) ]) -msymbol_cbs = MinimalSymbolCallbacks([ ('kernel_config_data', - Page.setup_nodes_width ) ]) +type_cbs = TypeCallbacks([('struct page', Page.setup_page_type), + ('enum pageflags', Page.setup_pageflags), + ('enum zone_type', Page.setup_zone_type), + ('struct mem_section', Page.setup_mem_section)]) +msymbol_cbs = MinimalSymbolCallbacks([('kernel_config_data', + Page.setup_nodes_width)]) # TODO: this should better be generalized to some callback for # "config is available" without refering to the symbol name here -symbol_cbs = SymbolCallbacks([ ('vmemmap_base', Page.setup_vmemmap_base ), - ('page_offset_base', - Page.setup_directmap_base ) ]) +symbol_cbs = SymbolCallbacks([('vmemmap_base', Page.setup_vmemmap_base), + ('page_offset_base', + Page.setup_directmap_base)]) def pfn_to_page(pfn): diff --git a/crash/types/percpu.py b/crash/types/percpu.py index aea46c639cd..191ac9882a7 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -21,11 +21,11 @@ class PerCPUError(TypeError): def __init__(self, var): super().__init__(self._fmt.format(var)) -types = Types([ 'void *', 'char *', 'struct pcpu_chunk', - 'struct percpu_counter' ]) -symvals = Symvals([ '__per_cpu_offset', 'pcpu_base_addr', 'pcpu_slot', - 'pcpu_nr_slots', 'pcpu_group_offsets' ]) -msymvals = MinimalSymvals( ['__per_cpu_start', '__per_cpu_end' ]) +types = Types(['void *', 'char *', 'struct pcpu_chunk', + 'struct percpu_counter']) +symvals = Symvals(['__per_cpu_offset', 'pcpu_base_addr', 'pcpu_slot', + 'pcpu_nr_slots', 'pcpu_group_offsets']) +msymvals = MinimalSymvals(['__per_cpu_start', '__per_cpu_end']) SymbolOrValue = Union[gdb.Value, gdb.Symbol] @@ -170,7 +170,8 @@ def _setup_dynamic_offset_cache(self) -> None: # party module dependency... use_area_map = struct_has_member(types.pcpu_chunk_type, 'map') for slot in range(symvals.pcpu_nr_slots): - for chunk in list_for_each_entry(symvals.pcpu_slot[slot], types.pcpu_chunk_type, 'list'): + for chunk in list_for_each_entry(symvals.pcpu_slot[slot], + types.pcpu_chunk_type, 'list'): if use_area_map: self._setup_dynamic_offset_cache_area_map(chunk) else: @@ -225,7 +226,7 @@ def is_static_percpu_var(self, addr: int) -> bool: # the previous section. It's possible to override this while # loading debuginfo but not when debuginfo is embedded. def _relocated_offset(self, var): - addr=int(var) + addr = int(var) start = msymvals['__per_cpu_start'] size = self._static_ranges[start] if addr >= start and addr < start + size: @@ -287,13 +288,13 @@ def _resolve_percpu_var(self, var): # Pointer to a percpu elif self.is_percpu_var(var): if var.type != types.void_p_type: - var = var.dereference().address + var = var.dereference().address assert(self.is_percpu_var(var)) else: raise PerCPUError(orig_var) # object is a percpu elif self.is_percpu_var(var.address): - var = var.address + var = var.address else: raise PerCPUError(orig_var) @@ -314,7 +315,7 @@ def _get_percpu_var(self, var: SymbolOrValue, cpu: int) -> gdb.Value: def get_percpu_var(self, var: SymbolOrValue, cpu: int) -> gdb.Value: """ - Retrieve a per-cpu variable for one or all CPUs + Retrieve a per-cpu variable for one or all CPUs Args: var: The symbol or value to use to resolve the percpu location @@ -334,7 +335,7 @@ def get_percpu_var(self, var: SymbolOrValue, cpu: int) -> gdb.Value: return self._get_percpu_var(var, cpu) def get_percpu_vars(self, var: SymbolOrValue, - nr_cpus: int=None) -> Dict[int, gdb.Value]: + nr_cpus: int = None) -> Dict[int, gdb.Value]: """ Retrieve a per-cpu variable for all CPUs @@ -369,12 +370,12 @@ def get_percpu_vars(self, var: SymbolOrValue, vals[cpu] = self._get_percpu_var(var, cpu) return vals -msym_cbs = MinimalSymbolCallbacks([ ('__per_cpu_start', - PerCPUState._setup_per_cpu_size), - ('__per_cpu_end', - PerCPUState._setup_per_cpu_size) ]) -symbol_cbs = SymbolCallbacks([ ('__per_cpu_offset', PerCPUState._setup_nr_cpus), - ('modules', PerCPUState._setup_module_ranges) ]) +msym_cbs = MinimalSymbolCallbacks([('__per_cpu_start', + PerCPUState._setup_per_cpu_size), + ('__per_cpu_end', + PerCPUState._setup_per_cpu_size)]) +symbol_cbs = SymbolCallbacks([('__per_cpu_offset', PerCPUState._setup_nr_cpus), + ('modules', PerCPUState._setup_module_ranges)]) _state = PerCPUState() @@ -412,7 +413,7 @@ def get_percpu_var(var: SymbolOrValue, cpu: int) -> gdb.Value: return _state.get_percpu_var(var, cpu) def get_percpu_vars(var: SymbolOrValue, - nr_cpus: int=None) -> Dict[int, gdb.Value]: + nr_cpus: int = None) -> Dict[int, gdb.Value]: """ Retrieve a per-cpu variable for all CPUs @@ -454,7 +455,7 @@ def percpu_counter_sum(var: SymbolOrValue) -> int: (var.type.code == gdb.TYPE_CODE_PTR and var.type.target() == types.percpu_counter_type)): raise InvalidArgumentError("var must be gdb.Symbol or gdb.Value describing `{}' not `{}'" - .format(types.percpu_counter_type, var.type)) + .format(types.percpu_counter_type, var.type)) total = int(var['count']) diff --git a/crash/types/slab.py b/crash/types/slab.py index c50c4c06b9b..95389d5abb9 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -17,7 +17,7 @@ AC_PERCPU = "percpu" AC_SHARED = "shared" -AC_ALIEN = "alien" +AC_ALIEN = "alien" slab_partial = 0 slab_full = 1 @@ -34,7 +34,7 @@ def col_error(msg): def col_bold(msg): return "\033[1;37;40m {}\033[0;37;40m ".format(msg) -types = Types([ 'kmem_cache', 'struct kmem_cache' ]) +types = Types(['kmem_cache', 'struct kmem_cache']) class Slab(object): @@ -87,8 +87,8 @@ def __add_free_obj_by_idx(self, idx): bufsize = self.kmem_cache.buffer_size if (idx >= objs_per_slab): - self.__error(": free object index %d overflows %d" % (idx, - objs_per_slab)) + self.__error(": free object index %d overflows %d" % + (idx, objs_per_slab)) return False obj_addr = self.s_mem + idx * bufsize @@ -112,13 +112,13 @@ def __populate_free(self): page = self.gdb_obj freelist = page["freelist"].cast(self.bufctl_type.pointer()) for i in range(self.inuse, objs_per_slab): - obj_idx = int(freelist[i]) + obj_idx = int(freelist[i]) self.__add_free_obj_by_idx(obj_idx) # XXX not generally useful and reliable if False and objs_per_slab > 1: all_zeroes = True for i in range(objs_per_slab): - obj_idx = int(freelist[i]) + obj_idx = int(freelist[i]) if obj_idx != 0: all_zeroes = False if all_zeroes: @@ -164,9 +164,9 @@ def contains_obj(self, addr): return (True, int(obj_addr), None) - def __error(self, msg, misplaced = False): + def __error(self, msg, misplaced: bool = False): msg = col_error("cache %s slab %x%s" % (self.kmem_cache.name, - int(self.gdb_obj.address), msg)) + int(self.gdb_obj.address), msg)) self.error = True if misplaced: self.misplaced_error = msg @@ -176,7 +176,8 @@ def __error(self, msg, misplaced = False): def __free_error(self, list_name): self.misplaced_list = list_name self.__error(": is on list %s, but has %d of %d objects allocated" % - (list_name, self.inuse, self.kmem_cache.objs_per_slab), misplaced = True) + (list_name, self.inuse, self.kmem_cache.objs_per_slab), + misplaced=True) def get_objects(self): bufsize = self.kmem_cache.buffer_size @@ -205,24 +206,25 @@ def check(self, slabtype, nid): if not self.kmem_cache.off_slab_cache: if struct_slab_cache != "size-64" and struct_slab_cache != "size-128": self.__error(": OFF_SLAB struct slab is in a wrong cache %s" % - struct_slab_cache) + struct_slab_cache) else: self.kmem_cache.off_slab_cache = struct_slab_cache elif struct_slab_cache != self.kmem_cache.off_slab_cache: self.__error(": OFF_SLAB struct slab is in a wrong cache %s" % - struct_slab_cache) + struct_slab_cache) struct_slab_obj = struct_slab_slab.contains_obj(self.gdb_obj.address) if not struct_slab_obj[0]: self.__error(": OFF_SLAB struct slab is not allocated") print(struct_slab_obj) elif struct_slab_obj[1] != int(self.gdb_obj.address): - self.__error(": OFF_SLAB struct slab at wrong offset{}".format( - int(self.gdb_obj.address) - struct_slab_obj[1])) + self.__error(": OFF_SLAB struct slab at wrong offset{}" + .format(int(self.gdb_obj.address) - struct_slab_obj[1])) if self.inuse + num_free != max_free: self.__error(": inuse=%d free=%d adds up to %d (should be %d)" % - (self.inuse, num_free, self.inuse + num_free, max_free)) + (self.inuse, num_free, + self.inuse + num_free, max_free)) if slabtype == slab_free: if num_free != max_free: @@ -238,7 +240,7 @@ def check(self, slabtype, nid): slab_nid = self.page.get_nid() if nid != slab_nid: self.__error(": slab is on nid %d instead of %d" % - (slab_nid, nid)) + (slab_nid, nid)) print("free objects %d" % num_free) ac = self.kmem_cache.get_array_caches() @@ -260,13 +262,14 @@ def check(self, slabtype, nid): if page.get_nid() != nid: self.__error(": obj %x is on nid %d instead of %d" % - (obj, page.get_nid(), nid)) + (obj, page.get_nid(), nid)) if not page.is_slab(): self.__error(": obj %x is not on PageSlab page" % obj) kmem_cache_addr = int(page.get_slab_cache()) if kmem_cache_addr != int(self.kmem_cache.gdb_obj.address): self.__error(": obj %x is on page where pointer to kmem_cache points to %x instead of %x" % - (obj, kmem_cache_addr, int(self.kmem_cache.gdb_obj.address))) + (obj, kmem_cache_addr, + int(self.kmem_cache.gdb_obj.address))) if self.page_slab: continue @@ -274,7 +277,7 @@ def check(self, slabtype, nid): slab_addr = int(page.get_slab_page()) if slab_addr != self.gdb_obj.address: self.__error(": obj %x is on page where pointer to slab wrongly points to %x" % - (obj, slab_addr)) + (obj, slab_addr)) return num_free def __init__(self, gdb_obj, kmem_cache, error=False): @@ -354,8 +357,9 @@ def __fill_array_cache(self, acache, ac_type, nid_src, nid_tgt): if avail == 0: return - cache_dict = {"ac_type" : ac_type, "nid_src" : nid_src, - "nid_tgt" : nid_tgt} + cache_dict = {"ac_type" : ac_type, + "nid_src" : nid_src, + "nid_tgt" : nid_tgt} # print(cache_dict) if ac_type == AC_PERCPU: @@ -373,8 +377,8 @@ def __fill_array_cache(self, acache, ac_type, nid_src, nid_tgt): obj_nid = page.get_nid() if obj_nid != nid_tgt: - print(col_error("Object {:#x} in cache {} is on wrong nid {} instead of {}".format( - ptr, cache_dict, obj_nid, nid_tgt))) + print(col_error("Object {:#x} in cache {} is on wrong nid {} instead of {}" + .format(ptr, cache_dict, obj_nid, nid_tgt))) def __fill_alien_caches(self, node, nid_src): alien_cache = node["alien"] @@ -453,15 +457,16 @@ def get_slabs_of_type(self, node, slabtype, reverse=False, exact_cycles=False): try: if int(list_head) in wrong_list_nodes.keys(): wrong_type = wrong_list_nodes[int(list_head)] - print(col_error("Encountered head of {} slab list while traversing {} slab list, skipping". - format(slab_list_name[wrong_type], slab_list_name[slabtype]))) + print(col_error("Encountered head of {} slab list while traversing {} slab list, skipping" + .format(slab_list_name[wrong_type], + slab_list_name[slabtype]))) continue slab = Slab.from_list_head(list_head, self) except: traceback.print_exc() - print("failed to initialize slab object from list_head {:#x}: {}".format( - int(list_head), sys.exc_info()[0])) + print("failed to initialize slab object from list_head {:#x}: {}" + .format(int(list_head), sys.exc_info()[0])) continue yield slab @@ -475,7 +480,8 @@ def __check_slab(self, slab, slabtype, nid, errors): if slab.misplaced_error is None and errors['num_misplaced'] > 0: if errors['num_misplaced'] > 0: - print(col_error("{} slab objects were misplaced, printing the last:".format(errors['num_misplaced']))) + print(col_error("{} slab objects were misplaced, printing the last:" + .format(errors['num_misplaced']))) print(errors['last_misplaced']) errors['num_misplaced'] = 0 errors['last_misplaced'] = None @@ -487,8 +493,8 @@ def __check_slab(self, slab, slabtype, nid, errors): errors['first_ok'] = addr else: if errors['num_ok'] > 0: - print("{} slab objects were ok between {:#x} and {:#x}". - format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) + print("{} slab objects were ok between {:#x} and {:#x}" + .format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) errors['num_ok'] = 0 errors['first_ok'] = None errors['last_ok'] = None @@ -506,31 +512,36 @@ def ___check_slabs(self, node, slabtype, nid, reverse=False): free = 0 check_ok = True - errors = {'first_ok': None, 'last_ok': None, 'num_ok': 0, - 'first_misplaced': None, 'last_misplaced': None, 'num_misplaced': 0} + errors = {'first_ok': None, + 'last_ok': None, + 'num_ok': 0, + 'first_misplaced': None, + 'last_misplaced': None, + 'num_misplaced': 0} try: for slab in self.get_slabs_of_type(node, slabtype, reverse, exact_cycles=True): try: free += self.__check_slab(slab, slabtype, nid, errors) except Exception as e: - print(col_error("Exception when checking slab {:#x}:{}". - format(int(slab.gdb_obj.address), e))) + print(col_error("Exception when checking slab {:#x}:{}" + .format(int(slab.gdb_obj.address), e))) traceback.print_exc() slabs += 1 except Exception as e: - print(col_error("Unrecoverable error when traversing {} slab list: {}".format( - slab_list_name[slabtype], e))) + print(col_error("Unrecoverable error when traversing {} slab list: {}" + .format(slab_list_name[slabtype], e))) check_ok = False if errors['num_ok'] > 0: - print("{} slab objects were ok between {:#x} and {:#x}". - format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) + print("{} slab objects were ok between {:#x} and {:#x}" + .format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) if errors['num_misplaced'] > 0: - print(col_error("{} slab objects were misplaced, printing the last:".format(errors['num_misplaced']))) - print(errors['last_misplaced']) + print(col_error("{} slab objects were misplaced, printing the last:" + .format(errors['num_misplaced']))) + print(errors['last_misplaced']) return (check_ok, slabs, free) @@ -539,17 +550,21 @@ def __check_slabs(self, node, slabtype, nid): slab_list = node[slab_list_fullname[slabtype]] print("checking {} slab list {:#x}".format(slab_list_name[slabtype], - int(slab_list.address))) + int(slab_list.address))) - errors = {'first_ok': None, 'last_ok': None, 'num_ok': 0, - 'first_misplaced': None, 'last_misplaced': None, 'num_misplaced': 0} + errors = {'first_ok': None, + 'last_ok': None, + 'num_ok': 0, + 'first_misplaced': None, + 'last_misplaced': None, + 'num_misplaced': 0} (check_ok, slabs, free) = self.___check_slabs(node, slabtype, nid) if not check_ok: print("Retrying the slab list in reverse order") - (check_ok, slabs_rev, free_rev) = self.___check_slabs(node, - slabtype, nid, reverse=True) + (check_ok, slabs_rev, free_rev) = \ + self.___check_slabs(node, slabtype, nid, reverse=True) slabs += slabs_rev free += free_rev @@ -563,11 +578,11 @@ def check_array_caches(self): for ac_ptr in acs.keys(): ac_obj_slab = slab_from_obj_addr(ac_ptr) if not ac_obj_slab: - print("cached pointer {:#x} in {} not found in slab".format( - ac_ptr, acs[ac_ptr])) + print("cached pointer {:#x} in {} not found in slab" + .format(ac_ptr, acs[ac_ptr])) elif ac_obj_slab.kmem_cache.name != self.name: - print("cached pointer {:#x} in {} belongs to wrong kmem cache {}".format( - ac_ptr, acs[ac_ptr], ac_obj_slab.kmem_cache.name)) + print("cached pointer {:#x} in {} belongs to wrong kmem cache {}" + .format(ac_ptr, acs[ac_ptr], ac_obj_slab.kmem_cache.name)) else: ac_obj_obj = ac_obj_slab.contains_obj(ac_ptr) if ac_obj_obj[0] == False and ac_obj_obj[2] is None: @@ -575,8 +590,8 @@ def check_array_caches(self): ac_ptr, acs[ac_ptr], ac_obj_obj)) elif ac_obj_obj[1] != ac_ptr: print("cached pointer {:#x} in {} has wrong offset: ({}, {:#x}, {})" - .format( ac_ptr, acs[ac_ptr], ac_obj_obj[0], - ac_obj_obj[1], ac_obj_obj[2])) + .format(ac_ptr, acs[ac_ptr], ac_obj_obj[0], + ac_obj_obj[1], ac_obj_obj[2])) def check_all(self): for (nid, node) in self.__get_nodelists(): @@ -584,8 +599,8 @@ def check_all(self): # This is version and architecture specific lock = int(node["list_lock"]["rlock"]["raw_lock"]["slock"]) if lock != 0: - print(col_error("unexpected lock value in kmem_list3 {:#x}: {:#x}". - format(int(node.address), lock))) + print(col_error("unexpected lock value in kmem_list3 {:#x}: {:#x}" + .format(int(node.address), lock))) except gdb.error: print("Can't check lock state -- locking implementation unknown.") free_declared = int(node["free_objects"]) @@ -594,7 +609,7 @@ def check_all(self): free_counted += self.__check_slabs(node, slab_free, nid) if free_declared != free_counted: print(col_error("free objects mismatch on node %d: declared=%d counted=%d" % - (nid, free_declared, free_counted))) + (nid, free_declared, free_counted))) self.check_array_caches() kmem_caches = None @@ -640,13 +655,13 @@ def slab_from_obj_addr(addr): return Slab.from_page(page) -type_cbs = TypeCallbacks([ ('struct page', Slab.check_page_type), - ('struct slab', Slab.check_slab_type), - ('kmem_bufctl_t', Slab.check_bufctl_type), - ('freelist_idx_t', Slab.check_bufctl_type), - ('struct kmem_cache', - KmemCache.check_kmem_cache_type), - ('struct alien_cache', - KmemCache.setup_alien_cache_type) ]) -symbol_cbs = SymbolCallbacks([ ('slab_caches', setup_slab_caches), - (' cache_chain', setup_slab_caches) ]) +type_cbs = TypeCallbacks([('struct page', Slab.check_page_type), + ('struct slab', Slab.check_slab_type), + ('kmem_bufctl_t', Slab.check_bufctl_type), + ('freelist_idx_t', Slab.check_bufctl_type), + ('struct kmem_cache', + KmemCache.check_kmem_cache_type), + ('struct alien_cache', + KmemCache.setup_alien_cache_type)]) +symbol_cbs = SymbolCallbacks([('slab_caches', setup_slab_caches), + ('cache_chain', setup_slab_caches)]) diff --git a/crash/types/task.py b/crash/types/task.py index 87057bf012d..d96def2d031 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -17,8 +17,8 @@ def get_value(symname): if sym[0]: return sym[0].value() -types = Types(['struct task_struct', 'struct mm_struct', 'atomic_long_t' ]) -symvals = Symvals([ 'task_state_array', 'init_task' ]) +types = Types(['struct task_struct', 'struct mm_struct', 'atomic_long_t']) +symvals = Symvals(['task_state_array', 'init_task']) # This is pretty painful. These are all #defines so none of them end # up with symbols in the kernel. The best approximation we have is @@ -38,22 +38,22 @@ class TaskStateFlags(object): TASK_FLAG_UNINITIALIZED = -1 - TASK_INTERRUPTIBLE: int=TASK_FLAG_UNINITIALIZED - TASK_UNINTERRUPTIBLE: int=TASK_FLAG_UNINITIALIZED - TASK_STOPPED: int=TASK_FLAG_UNINITIALIZED - EXIT_ZOMBIE: int=TASK_FLAG_UNINITIALIZED - TASK_DEAD: int=TASK_FLAG_UNINITIALIZED - EXIT_DEAD: int=TASK_FLAG_UNINITIALIZED - TASK_SWAPPING: int=TASK_FLAG_UNINITIALIZED - TASK_TRACING_STOPPED: int=TASK_FLAG_UNINITIALIZED - TASK_WAKEKILL: int=TASK_FLAG_UNINITIALIZED - TASK_WAKING: int=TASK_FLAG_UNINITIALIZED - TASK_PARKED: int=TASK_FLAG_UNINITIALIZED - __TASK_IDLE: int=TASK_FLAG_UNINITIALIZED - - TASK_NOLOAD: int=TASK_FLAG_UNINITIALIZED - TASK_NEW: int=TASK_FLAG_UNINITIALIZED - TASK_IDLE: int=TASK_FLAG_UNINITIALIZED + TASK_INTERRUPTIBLE: int = TASK_FLAG_UNINITIALIZED + TASK_UNINTERRUPTIBLE: int = TASK_FLAG_UNINITIALIZED + TASK_STOPPED: int = TASK_FLAG_UNINITIALIZED + EXIT_ZOMBIE: int = TASK_FLAG_UNINITIALIZED + TASK_DEAD: int = TASK_FLAG_UNINITIALIZED + EXIT_DEAD: int = TASK_FLAG_UNINITIALIZED + TASK_SWAPPING: int = TASK_FLAG_UNINITIALIZED + TASK_TRACING_STOPPED: int = TASK_FLAG_UNINITIALIZED + TASK_WAKEKILL: int = TASK_FLAG_UNINITIALIZED + TASK_WAKING: int = TASK_FLAG_UNINITIALIZED + TASK_PARKED: int = TASK_FLAG_UNINITIALIZED + __TASK_IDLE: int = TASK_FLAG_UNINITIALIZED + + TASK_NOLOAD: int = TASK_FLAG_UNINITIALIZED + TASK_NEW: int = TASK_FLAG_UNINITIALIZED + TASK_IDLE: int = TASK_FLAG_UNINITIALIZED def __init__(self): raise NotImplementedError("This class is not meant to be instantiated") @@ -105,10 +105,10 @@ def _task_state_flags_callback(cls, symbol): cls.TASK_NOLOAD = newbits << 3 cls.TASK_NEW = newbits << 4 - assert(cls.TASK_PARKED == 0x0040) - assert(cls.TASK_DEAD == 0x0080) + assert(cls.TASK_PARKED == 0x0040) + assert(cls.TASK_DEAD == 0x0080) assert(cls.TASK_WAKEKILL == 0x0100) - assert(cls.TASK_WAKING == 0x0200) + assert(cls.TASK_WAKING == 0x0200) # Linux 3.14 removed several elements from task_state_array # so we'll have to make some assumptions. @@ -128,15 +128,15 @@ def _task_state_flags_callback(cls, symbol): cls.TASK_NOLOAD = newbits << 4 cls.TASK_NEW = newbits << 5 - assert(cls.TASK_DEAD == 0x0040) + assert(cls.TASK_DEAD == 0x0040) assert(cls.TASK_WAKEKILL == 0x0080) - assert(cls.TASK_WAKING == 0x0100) - assert(cls.TASK_PARKED == 0x0200) + assert(cls.TASK_WAKING == 0x0100) + assert(cls.TASK_PARKED == 0x0200) else: - assert(cls.TASK_DEAD == 64) + assert(cls.TASK_DEAD == 64) assert(cls.TASK_WAKEKILL == 128) - assert(cls.TASK_WAKING == 256) - assert(cls.TASK_PARKED == 512) + assert(cls.TASK_WAKING == 256) + assert(cls.TASK_PARKED == 512) if cls.has_flag('TASK_NOLOAD'): assert(cls.TASK_NOLOAD == 1024) @@ -167,8 +167,8 @@ def _check_state_bits(cls): raise RuntimeError("Missing required task states: {}" .format(",".join(missing))) -symbol_cbs = SymbolCallbacks([ ('task_state_array', - TaskStateFlags._task_state_flags_callback) ]) +symbol_cbs = SymbolCallbacks([('task_state_array', + TaskStateFlags._task_state_flags_callback)]) TF = TaskStateFlags @@ -216,9 +216,9 @@ def __init__(self, task_struct: gdb.Value): raise ArgumentTypeError('task_struct', task_struct, gdb.Value) if not (task_struct.type == types.task_struct_type or - task_struct.type == types.task_struct_type.pointer()): - raise UnexpectedGDBTypeError('task_struct', task_struct, - types.task_struct_type) + task_struct.type == types.task_struct_type.pointer()): + raise UnexpectedGDBTypeError('task_struct', task_struct, + types.task_struct_type) self.task_struct = task_struct self.active = False @@ -415,7 +415,7 @@ def update_mem_usage(self) -> None: self.pgd_addr = int(mm['pgd']) self.mem_valid = True - def task_name(self, brackets: bool=False) -> str: + def task_name(self, brackets: bool = False) -> str: """ Returns the ``comm`` field of this task @@ -559,7 +559,7 @@ def _pick_get_rss(cls): raise RuntimeError("No method to retrieve RSS from task found.") def _get_rss(self) -> int: - raise NotImplementedError("_get_rss not implemented") + raise NotImplementedError("_get_rss not implemented") def get_rss(self): """ @@ -580,7 +580,7 @@ def _last_run__last_arrival(self): return int(self.task_struct['sched_info']['last_arrival']) def _get_last_run(self) -> int: - raise NotImplementedError("_get_last_run not implemented") + raise NotImplementedError("_get_last_run not implemented") @classmethod def _pick_last_run(cls): @@ -616,7 +616,7 @@ def for_each_thread_group_leader() -> Iterator[gdb.Value]: """ task_list = symvals.init_task['tasks'] for task in list_for_each_entry(task_list, symvals.init_task.type, - 'tasks', include_head=True): + 'tasks', include_head=True): yield task def for_each_thread_in_group(task: gdb.Value) -> Iterator[gdb.Value]: diff --git a/crash/types/vmstat.py b/crash/types/vmstat.py index 1f2c77aae38..37f04f142fb 100644 --- a/crash/types/vmstat.py +++ b/crash/types/vmstat.py @@ -11,7 +11,7 @@ class VmStat(object): types = Types(['enum zone_stat_item', 'enum vm_event_item']) - symbols = Symbols([ 'vm_event_states' ]) + symbols = Symbols(['vm_event_states']) nr_stat_items = None nr_event_items = None @@ -23,12 +23,12 @@ class VmStat(object): def check_enum_type(cls, gdbtype): if gdbtype == cls.types.enum_zone_stat_item_type: (items, names) = cls.__populate_names(gdbtype, - 'NR_VM_ZONE_STAT_ITEMS') + 'NR_VM_ZONE_STAT_ITEMS') cls.nr_stat_items = items cls.vm_stat_names = names elif gdbtype == cls.types.enum_vm_event_item_type: (items, names) = cls.__populate_names(gdbtype, - 'NR_VM_EVENT_ITEMS') + 'NR_VM_EVENT_ITEMS') cls.nr_event_items = items cls.vm_event_names = names else: @@ -36,15 +36,15 @@ def check_enum_type(cls, gdbtype): @classmethod def __populate_names(cls, enum_type, items_name): - nr_items = enum_type[items_name].enumval + nr_items = enum_type[items_name].enumval - names = ["__UNKNOWN__"] * nr_items + names = ["__UNKNOWN__"] * nr_items - for field in enum_type.fields(): - if field.enumval < nr_items: - names[field.enumval] = field.name + for field in enum_type.fields(): + if field.enumval < nr_items: + names[field.enumval] = field.name - return (nr_items, names) + return (nr_items, names) @classmethod def get_stat_names(cls): @@ -66,7 +66,5 @@ def get_events(cls): return events -type_cbs = TypeCallbacks([ ('enum zone_stat_item', - VmStat.check_enum_type), - ('enum vm_event_item', - VmStat.check_enum_type) ]) +type_cbs = TypeCallbacks([('enum zone_stat_item', VmStat.check_enum_type), + ('enum vm_event_item', VmStat.check_enum_type)]) diff --git a/crash/types/zone.py b/crash/types/zone.py index 836ab682e9f..2424928f7f9 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -12,7 +12,7 @@ class Zone(object): - types = Types([' struct page' ]) + types = Types(['struct page']) def __init__(self, obj, zid): self.gdb_obj = obj @@ -29,7 +29,7 @@ def get_vmstat(self): stats = [0] * VmStat.nr_stat_items vm_stat = self.gdb_obj["vm_stat"] - for item in range (0, VmStat.nr_stat_items): + for item in range(0, VmStat.nr_stat_items): # TODO abstract atomic? stats[item] = int(vm_stat[item]["counter"]) return stats @@ -38,7 +38,7 @@ def add_vmstat_diffs(self, diffs): for cpu in for_each_online_cpu(): pageset = get_percpu_var(self.gdb_obj["pageset"], cpu) vmdiff = pageset["vm_stat_diff"] - for item in range (0, VmStat.nr_stat_items): + for item in range(0, VmStat.nr_stat_items): diffs[item] += int(vmdiff[item]) def get_vmstat_diffs(self): @@ -57,13 +57,13 @@ def _check_free_area(self, area, is_pcp): nr_free += 1 if page.get_nid() != self.nid or page.get_zid() != self.zid: print("page {:#x} misplaced on {} of zone {}:{}, has flags for zone {}:{}". - format(int(page_obj.address), "pcplist" if is_pcp else "freelist", - self.nid, self.zid, page.get_nid(), page.get_zid())) + format(int(page_obj.address), "pcplist" if is_pcp else "freelist", + self.nid, self.zid, page.get_nid(), page.get_zid())) nr_expected = area["count"] if is_pcp else area["nr_free"] if nr_free != nr_expected: print("nr_free mismatch in {} {}: expected {}, counted {}". - format("pcplist" if is_pcp else "area", area.address, - nr_expected, nr_free)) + format("pcplist" if is_pcp else "area", area.address, + nr_expected, nr_free)) def check_free_pages(self): for area in array_for_each(self.gdb_obj["free_area"]): @@ -82,4 +82,3 @@ def for_each_populated_zone(): for zone in for_each_zone(): if zone.is_populated(): yield zone - diff --git a/crash/util/__init__.py b/crash/util/__init__.py index d284f88e66e..e86dd92ed93 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -11,8 +11,8 @@ from crash.exceptions import MissingTypeError, MissingSymbolError from crash.exceptions import ArgumentTypeError, NotStructOrUnionError -TypeSpecifier = Union [ gdb.Type, gdb.Value, str, gdb.Symbol ] -AddressSpecifier = Union [ gdb.Value, str, int ] +TypeSpecifier = Union[gdb.Type, gdb.Value, str, gdb.Symbol] +AddressSpecifier = Union[gdb.Value, str, int] class InvalidComponentError(LookupError): """An error occured while resolving the member specification""" @@ -48,7 +48,7 @@ def __init__(self, member, gdbtype): self.member = member self.type = gdbtype -types = Types([ 'char *', 'uuid_t' ]) +types = Types(['char *', 'uuid_t']) def container_of(val: gdb.Value, gdbtype: gdb.Type, member) -> gdb.Value: """ @@ -108,8 +108,8 @@ def struct_has_member(gdbtype: TypeSpecifier, name: str) -> bool: except InvalidComponentError: return False -def get_symbol_value(symname: str, block: gdb.Block=None, - domain: int=None) -> gdb.Value: +def get_symbol_value(symname: str, block: gdb.Block = None, + domain: int = None) -> gdb.Value: """ Returns the value associated with a named symbol @@ -131,8 +131,8 @@ def get_symbol_value(symname: str, block: gdb.Block=None, return sym.value() raise MissingSymbolError("Cannot locate symbol {}".format(symname)) -def safe_get_symbol_value(symname: str, block: gdb.Block=None, - domain: int=None) -> gdb.Value: +def safe_get_symbol_value(symname: str, block: gdb.Block = None, + domain: int = None) -> gdb.Value: """ Returns the value associated with a named symbol @@ -218,7 +218,7 @@ def __offsetof(val, spec, error): return (offset, gdbtype) def offsetof_type(gdbtype: gdb.Type, member_name: str, - error: bool=True) -> Union[Tuple[int, gdb.Type], None]: + error: bool = True) -> Union[Tuple[int, gdb.Type], None]: """ Returns the offset and type of a named member of a structure @@ -258,7 +258,7 @@ def offsetof_type(gdbtype: gdb.Type, member_name: str, return None def offsetof(gdbtype: gdb.Type, member_name: str, - error: bool=True) -> Union[int, None]: + error: bool = True) -> Union[int, None]: """ Returns the offset of a named member of a structure @@ -306,7 +306,8 @@ def find_member_variant(gdbtype: gdb.Type, variants: List[str]) -> str: raise TypeError("Unrecognized '{}': could not find member '{}'" .format(str(gdbtype), variants[0])) -def safe_lookup_type(name: str, block: gdb.Block=None) -> Union[gdb.Type, None]: +def safe_lookup_type(name: str, + block: gdb.Block = None) -> Union[gdb.Type, None]: """ Looks up a gdb.Type without throwing an exception on failure @@ -387,7 +388,7 @@ def array_for_each(value: gdb.Value) -> Iterator[gdb.Value]: yield value[i] def decode_flags(value: gdb.Value, names: Dict[int, str], - separator: str="|") -> str: + separator: str = "|") -> str: """ Present a bitfield of individual flags in a human-readable format. @@ -444,8 +445,8 @@ def decode_uuid(value: gdb.Value) -> uuid.UUID: raise TypeError("value must be gdb.Value") if (value.type.code != gdb.TYPE_CODE_ARRAY or - value[0].type.sizeof != 1 or value.type.sizeof != 16): - raise TypeError("value must describe an array of 16 bytes") + value[0].type.sizeof != 1 or value.type.sizeof != 16): + raise TypeError("value must describe an array of 16 bytes") u = 0 for i in range(0, 16): @@ -472,7 +473,7 @@ def decode_uuid_t(value: gdb.Value) -> uuid.UUID: if value.type != types.uuid_t_type: if (value.type.code == gdb.TYPE_CODE_PTR and - value.type.target() == types.uuid_t_type): + value.type.target() == types.uuid_t_type): value = value.dereference() else: raise TypeError("value must describe a uuid_t") diff --git a/crash/util/symbols.py b/crash/util/symbols.py index 580294b8b30..e51b96599d9 100644 --- a/crash/util/symbols.py +++ b/crash/util/symbols.py @@ -52,7 +52,7 @@ def __init__(self, cls: Type[DelayedValue], names: Names): self.attrs: Dict[str, DelayedValue] = {} if isinstance(names, str): - names = [ names ] + names = [names] for name in names: t = cls(name) @@ -311,7 +311,7 @@ def __init__(self, names: Names): class CallbackCollection(object): def __init__(self, cls: Type[NamedCallback], cbs: CallbackSpecifiers): if isinstance(cbs, tuple): - cbs = [ cbs ] + cbs = [cbs] for cb in cbs: t = cls(cb[0], cb[1]) @@ -328,4 +328,3 @@ def __init__(self, cbs): class MinimalSymbolCallbacks(CallbackCollection): def __init__(self, cbs): super().__init__(MinimalSymbolCallback, cbs) - diff --git a/kdump/target.py b/kdump/target.py index 92ced9d1b4c..c7225a1f849 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -45,7 +45,7 @@ def open(self, filename, from_tty): self.kdump = kdumpfile(file=filename) except Exception as e: raise gdb.GdbError("Failed to open `{}': {}" - .format(filename, str(e))) + .format(filename, str(e))) self.kdump.attr['addrxlat.ostype'] = 'linux' ctx = self.kdump.get_addrxlat_ctx() diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 18933e3048b..0f26f7684c9 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation [REPORTS] From ebeb0aedaa9a744fa31677ea96f13129a4438716 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 30 May 2019 20:53:27 -0400 Subject: [PATCH 166/367] crash.exceptions: share base class for unexpected gdb type exceptions The two exceptions for unexpected gdb types shouldn't stack as they don't share messages and it gets messy. Introduce a base type and share that instead. Signed-off-by: Jeff Mahoney --- crash/exceptions.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crash/exceptions.py b/crash/exceptions.py index 44b4f92da96..2be5a158ada 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -45,16 +45,20 @@ def format_clsname(self, cls): else: return module + '.' + cls.__name__ -class UnexpectedGDBTypeError(InvalidArgumentError): +class UnexpectedGDBTypeBaseError(InvalidArgumentError): + """Base class for unexpected gdb type exceptions""" + pass + +class UnexpectedGDBTypeError(UnexpectedGDBTypeBaseError): """The gdb.Type passed describes an inappropriate type for the operation""" formatter = "expected gdb.Type `{}' to describe `{}' not `{}'" def __init__(self, name, gdbtype, expected_type): msg = self.formatter.format(name, str(gdbtype), str(expected_type)) super().__init__(msg) -class NotStructOrUnionError(UnexpectedGDBTypeError): +class NotStructOrUnionError(UnexpectedGDBTypeBaseError): """The provided type is not a struct or union""" formatter = "argument `{}' describes type `{}' which is not a struct or union" def __init__(self, name, gdbtype): - super().__init__(name, gdbtype, gdbtype) msg = self.formatter.format(name, str(gdbtype)) + super().__init__(msg) From e4fec7263a0c5121bb4eb021a0b230a3defa8fa1 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 30 May 2019 20:55:24 -0400 Subject: [PATCH 167/367] lint: fix superfluous-parens complaints This commit fixes the following lint complaints and enables enforcement of the 'superfluous-parens' pylint rule. ************* Module crash.kernel C:149, 0: Unnecessary parens after 'elif' keyword (superfluous-parens) ************* Module crash.types.bitmap C:155, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:228, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) ************* Module crash.types.task C:108, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:109, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:110, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:111, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:131, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:132, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:133, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:134, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:136, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:137, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:138, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:139, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:142, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:144, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) C:146, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) ************* Module crash.types.percpu C:292, 0: Unnecessary parens after 'assert' keyword (superfluous-parens) ************* Module crash.types.slab C: 89, 0: Unnecessary parens after 'if' keyword (superfluous-parens) C:411, 0: Unnecessary parens after 'if' keyword (superfluous-parens) Signed-off-by: Jeff Mahoney --- crash/kernel.py | 2 +- crash/types/bitmap.py | 4 ++-- crash/types/percpu.py | 2 +- crash/types/slab.py | 4 ++-- crash/types/task.py | 30 +++++++++++++++--------------- tests/pylintrc-enforce | 2 +- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 7ee9b0412e2..a4b73d8c3d4 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -146,7 +146,7 @@ def __init__(self, roots: PathSpecifier = None, if x is None: x = ["/"] self.roots = x - elif (isinstance(roots, str)): + elif isinstance(roots, str): x = None if os.path.exists(roots): if x is None: diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index 1da76a21e32..f5cc356c4eb 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -152,7 +152,7 @@ def find_next_zero_bit(bitmap: gdb.Value, start: int, v = _find_first_set_bit(item) if v > 0: ret = n * (types.unsigned_long_type.sizeof << 3) + v - assert(ret >= start) + assert ret >= start return ret offset = 0 @@ -225,7 +225,7 @@ def find_next_set_bit(bitmap: gdb.Value, start: int, v = _find_first_set_bit(item) if v > 0: ret = n * (types.unsigned_long_type.sizeof << 3) + v - assert(ret >= start) + assert ret >= start return ret offset = 0 diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 191ac9882a7..1adab5ce499 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -289,7 +289,7 @@ def _resolve_percpu_var(self, var): elif self.is_percpu_var(var): if var.type != types.void_p_type: var = var.dereference().address - assert(self.is_percpu_var(var)) + assert self.is_percpu_var(var) else: raise PerCPUError(orig_var) # object is a percpu diff --git a/crash/types/slab.py b/crash/types/slab.py index 95389d5abb9..5bedfbf6d8b 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -86,7 +86,7 @@ def __add_free_obj_by_idx(self, idx): objs_per_slab = self.kmem_cache.objs_per_slab bufsize = self.kmem_cache.buffer_size - if (idx >= objs_per_slab): + if idx >= objs_per_slab: self.__error(": free object index %d overflows %d" % (idx, objs_per_slab)) return False @@ -408,7 +408,7 @@ def __fill_percpu_caches(self): cpu_cache = self.gdb_obj[KmemCache.percpu_name] for cpu in for_each_online_cpu(): - if (KmemCache.percpu_cache): + if KmemCache.percpu_cache: array = get_percpu_var(cpu_cache, cpu) else: array = cpu_cache[cpu].dereference() diff --git a/crash/types/task.py b/crash/types/task.py index d96def2d031..73229597346 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -105,10 +105,10 @@ def _task_state_flags_callback(cls, symbol): cls.TASK_NOLOAD = newbits << 3 cls.TASK_NEW = newbits << 4 - assert(cls.TASK_PARKED == 0x0040) - assert(cls.TASK_DEAD == 0x0080) - assert(cls.TASK_WAKEKILL == 0x0100) - assert(cls.TASK_WAKING == 0x0200) + assert cls.TASK_PARKED == 0x0040 + assert cls.TASK_DEAD == 0x0080 + assert cls.TASK_WAKEKILL == 0x0100 + assert cls.TASK_WAKING == 0x0200 # Linux 3.14 removed several elements from task_state_array # so we'll have to make some assumptions. @@ -128,22 +128,22 @@ def _task_state_flags_callback(cls, symbol): cls.TASK_NOLOAD = newbits << 4 cls.TASK_NEW = newbits << 5 - assert(cls.TASK_DEAD == 0x0040) - assert(cls.TASK_WAKEKILL == 0x0080) - assert(cls.TASK_WAKING == 0x0100) - assert(cls.TASK_PARKED == 0x0200) + assert cls.TASK_DEAD == 0x0040 + assert cls.TASK_WAKEKILL == 0x0080 + assert cls.TASK_WAKING == 0x0100 + assert cls.TASK_PARKED == 0x0200 else: - assert(cls.TASK_DEAD == 64) - assert(cls.TASK_WAKEKILL == 128) - assert(cls.TASK_WAKING == 256) - assert(cls.TASK_PARKED == 512) + assert cls.TASK_DEAD == 64 + assert cls.TASK_WAKEKILL == 128 + assert cls.TASK_WAKING == 256 + assert cls.TASK_PARKED == 512 if cls.has_flag('TASK_NOLOAD'): - assert(cls.TASK_NOLOAD == 1024) + assert cls.TASK_NOLOAD == 1024 cls.TASK_IDLE = cls.TASK_NOLOAD | cls.TASK_UNINTERRUPTIBLE - assert(cls.TASK_IDLE == 1026) + assert cls.TASK_IDLE == 1026 if cls.has_flag('TASK_NEW'): - assert(cls.TASK_NEW == 2048) + assert cls.TASK_NEW == 2048 cls._check_state_bits() diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 0f26f7684c9..839ffc3b779 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens [REPORTS] From 37322b77ffd4cc51f24d1b5cbb17cee4d01dd780 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 11:22:02 -0400 Subject: [PATCH 168/367] kmem: move for_each_zone/for_each_populated_zone to crash.types.node Although it appears these are zone functions, they iterate over the nodes to get the full list of zones. As a result we get a cyclic import between node and zone. Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 2 +- crash/types/node.py | 11 +++++++++++ crash/types/zone.py | 13 +------------ kernel-tests/test_types_zone.py | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 0f5bb8be380..7e2fc57591f 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -7,7 +7,7 @@ from crash.commands import CommandError, CommandLineError from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name from crash.types.slab import slab_from_obj_addr -from crash.types.zone import for_each_zone, for_each_populated_zone +from crash.types.node import for_each_zone, for_each_populated_zone from crash.types.vmstat import VmStat from crash.util import get_symbol_value from crash.exceptions import MissingSymbolError diff --git a/crash/types/node.py b/crash/types/node.py index 42e02fdfd43..0603a0ad2a0 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -178,3 +178,14 @@ def for_each_online_node() -> Iterable[Node]: """ for nid in for_each_online_nid(): yield Node.from_nid(nid) + +def for_each_zone(): + for node in for_each_node(): + for zone in node.for_each_zone(): + yield zone + +def for_each_populated_zone(): + #TODO: some filter thing? + for zone in for_each_zone(): + if zone.is_populated(): + yield zone diff --git a/crash/types/zone.py b/crash/types/zone.py index 2424928f7f9..5c062454b50 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -4,11 +4,11 @@ import gdb from crash.util import container_of, find_member_variant, array_for_each from crash.util.symbols import Types -import crash.types.node from crash.types.percpu import get_percpu_var from crash.types.vmstat import VmStat from crash.types.cpu import for_each_online_cpu from crash.types.list import list_for_each_entry +import crash.types.page class Zone(object): @@ -71,14 +71,3 @@ def check_free_pages(self): for cpu in for_each_online_cpu(): pageset = get_percpu_var(self.gdb_obj["pageset"], cpu) self._check_free_area(pageset["pcp"], True) - -def for_each_zone(): - for node in crash.types.node.for_each_node(): - for zone in node.for_each_zone(): - yield zone - -def for_each_populated_zone(): - #TODO: some filter thing? - for zone in for_each_zone(): - if zone.is_populated(): - yield zone diff --git a/kernel-tests/test_types_zone.py b/kernel-tests/test_types_zone.py index 45d65fd489e..ef56ae7338c 100644 --- a/kernel-tests/test_types_zone.py +++ b/kernel-tests/test_types_zone.py @@ -19,7 +19,7 @@ def test_for_each_zone(self): def test_for_each_populated_zone(self): count = 0 - for zone in mmzone.for_each_populated_zone(): + for zone in numa_node.for_each_populated_zone(): self.assertTrue(type(zone) is mmzone.Zone) count += 1 From c96210ce2c4ee11470d8bff0de6f30bd38d51945 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 30 May 2019 22:08:49 -0400 Subject: [PATCH 169/367] lint: fix import warnings and complaints This commit fixes the following lint warnings and complaints and enables enforcement of the 'multiple-imports', 'wrong-import-order', 'ungrouped-imports', 'wrong-import-position', 'import-error', 'relative-beyond-top-level', 'cyclic-import', 'wildcard-import', 'deprecated-module', 'reimported', 'import-self', 'unused-import', and 'unused-wildcard-import' pylint rules. ************* Module crash.kernel W: 13, 0: Reimport 'list_for_each_entry' (imported line 12) (reimported) W: 12, 0: Unused list_for_each_entry imported from crash.types.list (unused-import) C: 5, 0: standard import "import sys" should be placed before "import gdb" (wrong-import-order) C: 6, 0: standard import "import re" should be placed before "import gdb" (wrong-import-order) C: 7, 0: standard import "import fnmatch" should be placed before "import gdb" (wrong-import-order) C: 8, 0: standard import "import os.path" should be placed before "import gdb" (wrong-import-order) C: 15, 0: third party import "from elftools.elf.elffile import ELFFile" should be placed before "import gdb" (wrong-import-order) C: 20, 0: standard import "from typing import Pattern, Union, List, Dict, Any" should be placed before "from elftools.elf.elffile import ELFFile" (wrong-import-order) C: 16, 0: Imports from package crash are not grouped (ungrouped-imports) ************* Module crash.session W: 5, 0: Unused import sys (unused-import) C: 5, 0: standard import "import sys" should be placed before "import gdb" (wrong-import-order) ************* Module crash.cache.tasks W: 4, 0: Unused import gdb (unused-import) W: 5, 0: Unused CrashCache imported from crash.cache (unused-import) ************* Module crash.cache.vm W: 4, 0: Unused import gdb (unused-import) ************* Module crash.cache C: 6, 0: standard import "import os" should be placed before "import gdb" (wrong-import-order) C: 7, 0: standard import "import glob" should be placed before "import gdb" (wrong-import-order) C: 8, 0: standard import "import importlib" should be placed before "import gdb" (wrong-import-order) ************* Module crash.cache.slab W: 4, 0: Unused import gdb (unused-import) W: 5, 0: Unused list_for_each_entry imported from crash.types.list (unused-import) ************* Module crash.cache.syscache C: 9, 0: standard import "import re" should be placed before "import gdb" (wrong-import-order) C: 10, 0: standard import "import zlib" should be placed before "import gdb" (wrong-import-order) C: 11, 0: standard import "from datetime import timedelta" should be placed before "import gdb" (wrong-import-order) ************* Module crash.commands.syscmd W: 4, 0: Unused import gdb (unused-import) ************* Module crash.commands C: 8, 0: standard import "import os" should be placed before "import gdb" (wrong-import-order) C: 9, 0: standard import "import glob" should be placed before "import gdb" (wrong-import-order) C: 10, 0: standard import "import importlib" should be placed before "import gdb" (wrong-import-order) C: 11, 0: standard import "import argparse" should be placed before "import gdb" (wrong-import-order) ************* Module crash.commands.vtop W: 4, 0: Unused import gdb (unused-import) C: 5, 0: standard import "import argparse" should be placed before "import gdb" (wrong-import-order) C: 8, 0: third party import "import addrxlat" should be placed before "import gdb" (wrong-import-order) ************* Module crash.commands.lsmod W: 4, 0: Unused import gdb (unused-import) W: 15, 0: Unused import crash.types.percpu (unused-import) C: 5, 0: standard import "import re" should be placed before "import gdb" (wrong-import-order) C: 6, 0: standard import "import fnmatch" should be placed before "import gdb" (wrong-import-order) C: 7, 0: standard import "import argparse" should be placed before "import gdb" (wrong-import-order) ************* Module crash.commands.btrfs W: 7, 0: Unused DelayedAttributeError imported from crash.exceptions (unused-import) ************* Module crash.commands.dmesg W: 4, 0: Unused import gdb (unused-import) W: 5, 0: Unused import os.path (unused-import) W: 6, 0: Unused import argparse (unused-import) C: 5, 0: standard import "import os.path" should be placed before "import gdb" (wrong-import-order) C: 6, 0: standard import "import argparse" should be placed before "import gdb" (wrong-import-order) C: 7, 0: standard import "import re" should be placed before "import gdb" (wrong-import-order) ************* Module crash.commands.task C: 7, 0: standard import "import argparse" should be placed before "import gdb" (wrong-import-order) ************* Module crash.commands.xfs W: 5, 0: Unused import os.path (unused-import) W: 6, 0: Unused import argparse (unused-import) W: 7, 0: Unused import re (unused-import) W: 12, 0: Unused DelayedAttributeError imported from crash.exceptions (unused-import) W: 21, 0: Unused XFS_LI_EFD imported from crash.subsystem.filesystem.xfs (unused-import) W: 22, 0: Unused XFS_LI_IUNLINK imported from crash.subsystem.filesystem.xfs (unused-import) C: 5, 0: standard import "import os.path" should be placed before "import gdb" (wrong-import-order) C: 6, 0: standard import "import argparse" should be placed before "import gdb" (wrong-import-order) C: 7, 0: standard import "import re" should be placed before "import gdb" (wrong-import-order) C: 9, 0: standard import "from argparse import Namespace" should be placed before "import gdb" (wrong-import-order) C: 9, 0: Imports from package argparse are not grouped (ungrouped-imports) ************* Module crash.commands.mount W: 9, 0: Unused MNT_NOSUID imported from crash.subsystem.filesystem.mount (unused-import) W: 9, 0: Unused MNT_NODEV imported from crash.subsystem.filesystem.mount (unused-import) W: 9, 0: Unused MNT_NOEXEC imported from crash.subsystem.filesystem.mount (unused-import) W: 10, 0: Unused MNT_NOATIME imported from crash.subsystem.filesystem.mount (unused-import) W: 10, 0: Unused MNT_NODIRATIME imported from crash.subsystem.filesystem.mount (unused-import) W: 11, 0: Unused MNT_RELATIME imported from crash.subsystem.filesystem.mount (unused-import) W: 11, 0: Unused MNT_READONLY imported from crash.subsystem.filesystem.mount (unused-import) W: 12, 0: Unused MNT_SHRINKABLE imported from crash.subsystem.filesystem.mount (unused-import) W: 12, 0: Unused MNT_WRITE_HOLD imported from crash.subsystem.filesystem.mount (unused-import) W: 13, 0: Unused MNT_SHARED imported from crash.subsystem.filesystem.mount (unused-import) W: 13, 0: Unused MNT_UNBINDABLE imported from crash.subsystem.filesystem.mount (unused-import) C: 6, 0: standard import "from argparse import Namespace" should be placed before "import gdb" (wrong-import-order) ************* Module crash.commands.help W: 4, 0: Unused import gdb (unused-import) C: 5, 0: standard import "import argparse" should be placed before "import gdb" (wrong-import-order) ************* Module crash.commands.ps W: 10, 0: Unused CommandLineError imported from crash.commands (unused-import) C: 5, 0: standard import "import argparse" should be placed before "import gdb" (wrong-import-order) C: 6, 0: standard import "import fnmatch" should be placed before "import gdb" (wrong-import-order) C: 7, 0: standard import "import re" should be placed before "import gdb" (wrong-import-order) ************* Module crash.commands.kmem W: 4, 0: Unused import gdb (unused-import) W: 5, 0: Unused import crash (unused-import) ************* Module crash.infra.callback W: 7, 0: Unused import traceback (unused-import) W: 8, 0: Unused import sys (unused-import) C: 7, 0: standard import "import traceback" should be placed before "import gdb" (wrong-import-order) C: 8, 0: standard import "import sys" should be placed before "import gdb" (wrong-import-order) ************* Module crash.infra.lookup W: 8, 0: Unused import crash.infra (unused-import) C: 6, 0: standard import "from typing import Tuple, Any, Union" should be placed before "import gdb" (wrong-import-order) ************* Module crash.subsystem.filesystem.btrfs C: 5, 0: standard import "import uuid" should be placed before "import gdb" (wrong-import-order) ************* Module crash.subsystem.filesystem.xfs W: 11, 0: Unused Union imported from typing (unused-import) C: 9, 0: standard import "import uuid" should be placed before "import gdb" (wrong-import-order) C: 11, 0: standard import "from typing import Union, Iterable" should be placed before "import gdb" (wrong-import-order) ************* Module crash.subsystem.filesystem.mount C: 17, 0: standard import "from typing import Iterator" should be placed before "import gdb" (wrong-import-order) ************* Module crash.subsystem.storage.decoders C: 5, 0: standard import "from typing import Union, List, Dict, Iterable" should be placed before "import gdb" (wrong-import-order) ************* Module crash.util W: 9, 0: Reimport 'Dict' (imported line 4) (reimported) C: 7, 0: standard import "import uuid" should be placed before "import gdb" (wrong-import-order) C: 9, 0: standard import "from typing import Dict" should be placed before "import gdb" (wrong-import-order) C: 9, 0: Imports from package typing are not grouped (ungrouped-imports) ************* Module crash.types.bitmap W: 17, 0: Unused log imported from math (unused-import) C: 17, 0: standard import "from math import log" should be placed before "import gdb" (wrong-import-order) ************* Module crash.types.node W: 11, 0: Unused container_of imported from crash.util (unused-import) W: 11, 0: Unused find_member_variant imported from crash.util (unused-import) W: 11, 0: Unused get_symbol_value imported from crash.util (unused-import) ************* Module crash.types.percpu W: 8, 0: Unused MinimalSymbols imported from crash.util.symbols (unused-import) W: 13, 0: Unused find_first_set_bit imported from crash.types.bitmap (unused-import) W: 13, 0: Unused find_last_set_bit imported from crash.types.bitmap (unused-import) W: 14, 0: Unused find_next_set_bit imported from crash.types.bitmap (unused-import) W: 14, 0: Unused find_next_zero_bit imported from crash.types.bitmap (unused-import) ************* Module crash.types.cpu W: 14, 0: Reimport 'List' (imported line 7) (reimported) W: 14, 0: Reimport 'Iterable' (imported line 7) (reimported) C: 14, 0: standard import "from typing import List, Iterable" should be placed before "import gdb" (wrong-import-order) ************* Module crash.types.page W: 8, 0: Unused container_of imported from crash.util (unused-import) ************* Module crash.types.slab W: 5, 0: Unused import crash (unused-import) W: 8, 0: Unused get_symbol_value imported from crash.util (unused-import) W: 9, 0: Unused safe_get_symbol_value imported from crash.util (unused-import) W: 13, 0: Unused Page imported from crash.types.page (unused-import) C: 6, 0: standard import "import sys" should be placed before "import gdb" (wrong-import-order) C: 7, 0: standard import "import traceback" should be placed before "import gdb" (wrong-import-order) ************* Module crash.types.vmstat W: 4, 0: Unused import gdb (unused-import) W: 5, 0: Unused container_of imported from crash.util (unused-import) W: 5, 0: Unused find_member_variant imported from crash.util (unused-import) W: 7, 0: Unused import crash.types.node (unused-import) ************* Module crash.types.zone W: 4, 0: Unused import gdb (unused-import) W: 5, 0: Unused container_of imported from crash.util (unused-import) W: 5, 0: Unused find_member_variant imported from crash.util (unused-import) ************* Module kdump.target W: 7, 0: Wildcard import kdumpfile.exceptions (wildcard-import) W: 7, 0: Unused import KDUMP_OK from wildcard import (unused-wildcard-import) W: 7, 0: Unused import KDUMP_ERR_SYSTEM from wildcard import (unused-wildcard-import) W: 7, 0: Unused import KDUMP_ERR_NOTIMPL from wildcard import (unused-wildcard-import) W: 7, 0: Unused import KDUMP_ERR_NODATA from wildcard import (unused-wildcard-import) W: 7, 0: Unused import KDUMP_ERR_CORRUPT from wildcard import (unused-wildcard-import) W: 7, 0: Unused import KDUMP_ERR_INVALID from wildcard import (unused-wildcard-import) W: 7, 0: Unused import KDUMP_ERR_EOF from wildcard import (unused-wildcard-import) W: 7, 0: Unused import KDUMP_ERR_NOKEY from wildcard import (unused-wildcard-import) W: 7, 0: Unused import KDUMP_ERR_BUSY from wildcard import (unused-wildcard-import) W: 7, 0: Unused import KDUMP_ERR_ADDRXLAT from wildcard import (unused-wildcard-import) W: 7, 0: Unused import KDumpBaseException from wildcard import (unused-wildcard-import) W: 7, 0: Unused import OSErrorException from wildcard import (unused-wildcard-import) W: 7, 0: Unused import NotImplementedException from wildcard import (unused-wildcard-import) W: 7, 0: Unused import CorruptException from wildcard import (unused-wildcard-import) W: 7, 0: Unused import InvalidException from wildcard import (unused-wildcard-import) W: 7, 0: Unused import NoKeyException from wildcard import (unused-wildcard-import) W: 7, 0: Unused import BusyException from wildcard import (unused-wildcard-import) C: 5, 0: standard import "import sys" should be placed before "import gdb" (wrong-import-order) C: 6, 0: third party import "from kdumpfile import kdumpfile, KDUMP_KVADDR" should be placed before "import gdb" (wrong-import-order) C: 7, 0: third party import "from kdumpfile.exceptions import *" should be placed before "import gdb" (wrong-import-order) C: 8, 0: third party import "import addrxlat" should be placed before "import gdb" (wrong-import-order) R: 1, 0: Cyclic import (crash.types.node -> crash.types.zone -> crash.types.vmstat) (cyclic-import) Signed-off-by: Jeff Mahoney --- crash/arch/ppc64.py | 4 ++-- crash/arch/x86_64.py | 4 ++-- crash/cache/__init__.py | 4 ++-- crash/cache/slab.py | 2 -- crash/cache/syscache.py | 2 +- crash/cache/tasks.py | 3 --- crash/cache/vm.py | 3 +-- crash/commands/__init__.py | 4 ++-- crash/commands/btrfs.py | 1 - crash/commands/dmesg.py | 3 --- crash/commands/help.py | 2 +- crash/commands/kmem.py | 2 -- crash/commands/lsmod.py | 2 -- crash/commands/mount.py | 10 +++------- crash/commands/ps.py | 5 +++-- crash/commands/syscmd.py | 1 - crash/commands/task.py | 6 ++++-- crash/commands/vtop.py | 4 ++-- crash/commands/xfs.py | 13 +++++-------- crash/infra/callback.py | 2 -- crash/infra/lookup.py | 5 ++--- crash/kernel.py | 11 ++++++----- crash/session.py | 5 ++--- crash/subsystem/filesystem/__init__.py | 3 ++- crash/subsystem/filesystem/btrfs.py | 3 ++- crash/subsystem/filesystem/decoders.py | 3 ++- crash/subsystem/filesystem/ext3.py | 4 ++-- crash/subsystem/filesystem/mount.py | 4 ++-- crash/subsystem/filesystem/xfs.py | 7 ++++--- crash/subsystem/storage/__init__.py | 6 +++--- crash/subsystem/storage/blocksq.py | 4 ++-- crash/subsystem/storage/decoders.py | 3 ++- crash/subsystem/storage/device_mapper.py | 4 ++-- crash/types/bitmap.py | 5 ++--- crash/types/classdev.py | 4 ++-- crash/types/cpu.py | 3 +-- crash/types/klist.py | 4 ++-- crash/types/list.py | 3 ++- crash/types/module.py | 3 ++- crash/types/node.py | 6 +++--- crash/types/page.py | 6 ++++-- crash/types/percpu.py | 7 +++---- crash/types/slab.py | 10 +++++----- crash/types/task.py | 3 ++- crash/types/vmstat.py | 4 ---- crash/types/zone.py | 3 +-- crash/util/__init__.py | 4 ++-- crash/util/symbols.py | 4 ++-- kdump/target.py | 15 +++++++++------ tests/pylintrc-enforce | 2 +- 50 files changed, 104 insertions(+), 121 deletions(-) diff --git a/crash/arch/ppc64.py b/crash/arch/ppc64.py index 1d361f6ef97..a7b9085794a 100644 --- a/crash/arch/ppc64.py +++ b/crash/arch/ppc64.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb - from crash.arch import CrashArchitecture, register, KernelFrameFilter +import gdb + class Powerpc64Architecture(CrashArchitecture): ident = "powerpc:common64" aliases = ["ppc64", "elf64-powerpc"] diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 7febdd2ab79..f3bc698db2d 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb - from crash.arch import CrashArchitecture, register, KernelFrameFilter +import gdb + class x86_64Architecture(CrashArchitecture): ident = "i386:x86-64" aliases = ["x86_64"] diff --git a/crash/cache/__init__.py b/crash/cache/__init__.py index 2afb4a5aa6e..b9bb320071b 100644 --- a/crash/cache/__init__.py +++ b/crash/cache/__init__.py @@ -1,14 +1,14 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb - import os import glob import importlib from crash.infra import autoload_submodules +import gdb + class CrashCache(object): def refresh(self): pass diff --git a/crash/cache/slab.py b/crash/cache/slab.py index 13270230d86..673386fb3c2 100644 --- a/crash/cache/slab.py +++ b/crash/cache/slab.py @@ -1,8 +1,6 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb -from crash.types.list import list_for_each_entry from crash.cache import CrashCache class CrashCacheSlab(CrashCache): diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index 18741aa83bc..399c42b14ab 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -5,7 +5,6 @@ from builtins import round -import gdb import re import zlib from datetime import timedelta @@ -16,6 +15,7 @@ from crash.util.symbols import Types, Symvals, SymbolCallbacks, MinimalSymvals from crash.infra.lookup import DelayedValue +import gdb ImageLocation = Dict[str, Dict[str, int]] diff --git a/crash/cache/tasks.py b/crash/cache/tasks.py index c3909a96a40..7bc15f948d3 100644 --- a/crash/cache/tasks.py +++ b/crash/cache/tasks.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb -from crash.cache import CrashCache - tasks = {} def cache_task(task): diff --git a/crash/cache/vm.py b/crash/cache/vm.py index 3a0cea75605..2cfda3bea13 100644 --- a/crash/cache/vm.py +++ b/crash/cache/vm.py @@ -1,9 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb - from crash.cache import CrashCache + class CrashCacheVM(CrashCache): def __init__(self): super().__init__() diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 9c40f3ce538..5f28d7af779 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -3,8 +3,6 @@ from typing import Dict -import gdb - import os import glob import importlib @@ -12,6 +10,8 @@ from crash.exceptions import DelayedAttributeError, ArgumentTypeError +import gdb + class CommandError(RuntimeError): pass diff --git a/crash/commands/btrfs.py b/crash/commands/btrfs.py index fcf868e272b..fadd14534d2 100644 --- a/crash/commands/btrfs.py +++ b/crash/commands/btrfs.py @@ -4,7 +4,6 @@ from argparse import Namespace from crash.commands import Command, ArgumentParser from crash.commands import CommandLineError -from crash.exceptions import DelayedAttributeError from crash.subsystem.filesystem import for_each_super_block, super_fstype from crash.subsystem.filesystem.btrfs import btrfs_fsid, btrfs_metadata_uuid diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index c7659c2e488..327ba40df24 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -1,9 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb -import os.path -import argparse import re from crash.commands import Command, ArgumentParser, CommandError diff --git a/crash/commands/help.py b/crash/commands/help.py index dc60a67506d..422e06b08ba 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb import argparse + from crash.commands import Command, CommandError, ArgumentParser class _Parser(ArgumentParser): diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 7e2fc57591f..a9f2097e91f 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -1,8 +1,6 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb -import crash from crash.commands import Command, ArgumentParser from crash.commands import CommandError, CommandLineError from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index 3c08bed9fe2..6fac6349934 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb import re import fnmatch import argparse @@ -12,7 +11,6 @@ from crash.util.symbols import Types from crash.types.list import list_for_each_entry from crash.types.percpu import get_percpu_var -import crash.types.percpu class _Parser(ArgumentParser): """ diff --git a/crash/commands/mount.py b/crash/commands/mount.py index ce8242b2773..55e17a9b70e 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -1,21 +1,17 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb - from argparse import Namespace + from crash.commands import Command, ArgumentParser from crash.types.task import LinuxTask -from crash.subsystem.filesystem.mount import MNT_NOSUID, MNT_NODEV, MNT_NOEXEC -from crash.subsystem.filesystem.mount import MNT_NOATIME, MNT_NODIRATIME -from crash.subsystem.filesystem.mount import MNT_RELATIME, MNT_READONLY -from crash.subsystem.filesystem.mount import MNT_SHRINKABLE, MNT_WRITE_HOLD -from crash.subsystem.filesystem.mount import MNT_SHARED, MNT_UNBINDABLE from crash.subsystem.filesystem.mount import d_path, for_each_mount from crash.subsystem.filesystem.mount import mount_device, mount_fstype from crash.subsystem.filesystem.mount import mount_super, mount_flags from crash.subsystem.filesystem.mount import mount_root +import gdb + class _Parser(ArgumentParser): """ NAME diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 7b21a2fba00..df8c4dca01e 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -1,15 +1,16 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb import argparse import fnmatch import re from crash.commands import Command, ArgumentParser -from crash.commands import CommandLineError, CommandError +from crash.commands import CommandError from crash.types.task import LinuxTask, TaskStateFlags as TF +import gdb + class TaskFormat(object): """ This class is responsible for converting the arguments into formatting diff --git a/crash/commands/syscmd.py b/crash/commands/syscmd.py index b46064c9d27..41fc755ca11 100644 --- a/crash/commands/syscmd.py +++ b/crash/commands/syscmd.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb from crash.commands import Command, ArgumentParser from crash.commands import CommandLineError from crash.cache.syscache import utsname, config, kernel diff --git a/crash/commands/task.py b/crash/commands/task.py index 296c12d1a98..44fa458dcd6 100644 --- a/crash/commands/task.py +++ b/crash/commands/task.py @@ -1,10 +1,12 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb +import argparse + from crash.commands import Command, ArgumentParser import crash.cache.tasks -import argparse + +import gdb class _Parser(ArgumentParser): """ diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index 4f79d2e34c6..3475f124ca0 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -1,11 +1,11 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb import argparse +import addrxlat + from crash.commands import Command, ArgumentParser from crash.addrxlat import CrashAddressTranslation -import addrxlat class LinuxPGT(object): table_names = ('PTE', 'PMD', 'PUD', 'PGD') diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index 56cc8e04836..5561f79aefb 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -1,15 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb -import os.path -import argparse -import re - from argparse import Namespace + from crash.commands import Command, ArgumentParser from crash.commands import CommandLineError, CommandError -from crash.exceptions import DelayedAttributeError from crash.types.list import list_for_each_entry, list_empty from crash.subsystem.filesystem import for_each_super_block, get_super_block from crash.subsystem.filesystem import super_flags @@ -18,8 +13,8 @@ from crash.subsystem.filesystem.xfs import xfs_log_item_typed from crash.subsystem.filesystem.xfs import xfs_format_xfsbuf from crash.subsystem.filesystem.xfs import XFS_LI_TYPES -from crash.subsystem.filesystem.xfs import XFS_LI_EFI, XFS_LI_EFD -from crash.subsystem.filesystem.xfs import XFS_LI_IUNLINK, XFS_LI_INODE +from crash.subsystem.filesystem.xfs import XFS_LI_EFI +from crash.subsystem.filesystem.xfs import XFS_LI_INODE from crash.subsystem.filesystem.xfs import XFS_LI_BUF, XFS_LI_DQUOT from crash.subsystem.filesystem.xfs import XFS_LI_QUOTAOFF, XFS_BLI_FLAGS from crash.subsystem.filesystem.xfs import XFS_DQ_FLAGS @@ -28,6 +23,8 @@ from crash.util import decode_flags from crash.util.symbols import Types +import gdb + types = Types(['struct xfs_buf *']) class _Parser(ArgumentParser): diff --git a/crash/infra/callback.py b/crash/infra/callback.py index 88b4b5753f0..45de05b751a 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -4,8 +4,6 @@ from typing import Callable, Any, Union import gdb -import traceback -import sys Callback = Callable[[Any], Union[bool, None]] diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index 5883c722ac5..126f902e90e 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -1,15 +1,14 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb - from typing import Tuple, Any, Union -import crash.infra from crash.infra.callback import ObjfileEventCallback from crash.infra.callback import Callback from crash.exceptions import DelayedAttributeError +import gdb + class NamedCallback(ObjfileEventCallback): """ A base class for Callbacks with names diff --git a/crash/kernel.py b/crash/kernel.py index a4b73d8c3d4..23476339010 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -1,23 +1,24 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb +from typing import Pattern, Union, List, Dict, Any + import sys import re import fnmatch import os.path + +from elftools.elf.elffile import ELFFile + import crash.arch import crash.arch.x86_64 import crash.arch.ppc64 -from crash.types.list import list_for_each_entry -from crash.types.list import list_for_each_entry from crash.types.module import for_each_module, for_each_module_section -from elftools.elf.elffile import ELFFile from crash.util import get_symbol_value from crash.util.symbols import Types, Symvals, Symbols from crash.exceptions import MissingSymbolError, InvalidArgumentError -from typing import Pattern, Union, List, Dict, Any +import gdb class CrashKernelError(RuntimeError): pass diff --git a/crash/session.py b/crash/session.py index c2805808ce7..d692789c620 100644 --- a/crash/session.py +++ b/crash/session.py @@ -1,12 +1,11 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb -import sys - from crash.infra import autoload_submodules from crash.kernel import CrashKernel, CrashKernelError +import gdb + class Session(object): """ crash.Session is the main driver component for crash-python diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index 888bb2a7832..314c8328905 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -3,13 +3,14 @@ from typing import Iterable, Union -import gdb from crash.util import container_of, get_typed_pointer, decode_flags from crash.util.symbols import Types, Symvals from crash.infra.lookup import DelayedSymval, DelayedType from crash.types.list import list_for_each_entry from crash.subsystem.storage import block_device_name +import gdb + types = Types('struct super_block') symvals = Symvals('super_blocks') diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index 25e6a76a2d3..241b1d7c8f8 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb import uuid from crash.exceptions import InvalidArgumentError @@ -9,6 +8,8 @@ from crash.util.symbols import Types from crash.subsystem.filesystem import is_fstype_super +import gdb + types = Types(['struct btrfs_inode', 'struct btrfs_fs_info *', 'struct btrfs_fs_info']) diff --git a/crash/subsystem/filesystem/decoders.py b/crash/subsystem/filesystem/decoders.py index eea1596286a..937d9424085 100644 --- a/crash/subsystem/filesystem/decoders.py +++ b/crash/subsystem/filesystem/decoders.py @@ -1,12 +1,13 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb from crash.util.symbols import Types from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder, decode_bh from crash.subsystem.filesystem import super_fstype +import gdb + class DIOBioDecoder(Decoder): """ Decodes a bio used for direct i/o. diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index 3361a99352c..4004a6c3744 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb - from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder +import gdb + class Ext3Decoder(Decoder): """ Decodes an ext3 journal buffer diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 021c477db55..bc722c13cc8 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -12,8 +12,6 @@ will be required and/or returned instead. """ -import gdb - from typing import Iterator from crash.subsystem.filesystem import super_fstype @@ -21,6 +19,8 @@ from crash.util import container_of, decode_flags, struct_has_member from crash.util.symbols import Types, Symvals, TypeCallbacks, SymbolCallbacks +import gdb + MNT_NOSUID = 0x01 MNT_NODEV = 0x02 MNT_NOEXEC = 0x04 diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index a2c62013c77..c60b04213eb 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -5,10 +5,9 @@ XFS file systems. """ -import gdb -import uuid +from typing import Iterable -from typing import Union, Iterable +import uuid from crash.exceptions import InvalidArgumentError from crash.types.list import list_for_each_entry @@ -19,6 +18,8 @@ from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder +import gdb + # XFS inode locks XFS_IOLOCK_EXCL = 0x01 XFS_IOLOCK_SHARED = 0x02 diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index d04c35b30fc..5af75bfca54 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -3,14 +3,14 @@ from typing import Iterable -import gdb -from gdb.types import get_basic_type - from crash.util import container_of from crash.util.symbols import Types, Symvals, SymbolCallbacks, TypeCallbacks from crash.types.classdev import for_each_class_device from crash.exceptions import DelayedAttributeError, InvalidArgumentError +import gdb +from gdb.types import get_basic_type + types = Types(['struct gendisk', 'struct hd_struct', 'struct device', 'struct device_type', 'struct bdev_inode']) symvals = Symvals(['block_class', 'blockdev_superblock', 'disk_type', diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index 977de22237f..2ac138f5002 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -3,12 +3,12 @@ from typing import Iterable, Tuple -import gdb - from crash.util.symbols import Types from crash.types.list import list_for_each_entry from crash.cache.syscache import kernel +import gdb + class NoQueueError(RuntimeError): pass diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index dc9e94305a0..1d5016afc75 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -1,12 +1,13 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb from typing import Union, List, Dict, Iterable from crash.infra.lookup import SymbolCallback from crash.subsystem.storage import block_device_name +import gdb + EndIOSpecifier = Union[int, str, List[str], gdb.Value, gdb.Symbol, None] class Decoder(object): diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index ef616183753..59d9fa0a93e 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb - from crash.util import container_of from crash.util.symbols import Types from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder, decode_bio +import gdb + class ClonedBioReqDecoder(Decoder): """ Decodes a request-based device mapper cloned bio diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index f5cc356c4eb..596cc605a3c 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -13,12 +13,11 @@ from typing import Iterable -import gdb -from math import log - from crash.exceptions import InvalidArgumentError from crash.util.symbols import Types +import gdb + types = Types('unsigned long') def _check_bitmap_type(bitmap: gdb.Value) -> None: diff --git a/crash/types/classdev.py b/crash/types/classdev.py index 5759607a3f0..2eaa5b039af 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -6,12 +6,12 @@ from typing import Iterable -import gdb - from crash.types.klist import klist_for_each from crash.util import struct_has_member, container_of from crash.util.symbols import Types, TypeCallbacks +import gdb + types = Types(['struct device', 'struct device_private']) class ClassdevState(object): diff --git a/crash/types/cpu.py b/crash/types/cpu.py index cf000498602..61ba5ea2485 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -6,12 +6,11 @@ from typing import Iterable, List -import gdb from crash.util.symbols import SymbolCallbacks from crash.types.bitmap import for_each_set_bit from crash.exceptions import DelayedAttributeError -from typing import List, Iterable +import gdb # this wraps no particular type, rather it's a placeholder for # functions to iterate over online cpu's etc. diff --git a/crash/types/klist.py b/crash/types/klist.py index 7796df44bfc..3768a8bdbca 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -3,13 +3,13 @@ from typing import Iterable -import gdb from crash.util import container_of from crash.types.list import list_for_each_entry from crash.exceptions import CorruptedError, InvalidArgumentError - from crash.util.symbols import Types +import gdb + types = Types(['struct klist_node', 'struct klist']) class KlistCorruptedError(CorruptedError): diff --git a/crash/types/list.py b/crash/types/list.py index a39dd5d4aae..aa9473a9c92 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -3,11 +3,12 @@ from typing import Iterator, Set -import gdb from crash.util import container_of from crash.util.symbols import Types from crash.exceptions import ArgumentTypeError, UnexpectedGDBTypeError +import gdb + class ListError(Exception): pass diff --git a/crash/types/module.py b/crash/types/module.py index 567449b1b6e..6b1f318a548 100644 --- a/crash/types/module.py +++ b/crash/types/module.py @@ -3,10 +3,11 @@ from typing import Iterable, Tuple -import gdb from crash.types.list import list_for_each_entry from crash.util.symbols import Symvals, Types +import gdb + symvals = Symvals(['modules']) types = Types(['struct module']) diff --git a/crash/types/node.py b/crash/types/node.py index 0603a0ad2a0..d50c7ce2cc4 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -6,13 +6,13 @@ from typing import Iterable, List, Type, TypeVar -import gdb from crash.util.symbols import Symbols, Symvals, Types, SymbolCallbacks -from crash.util import container_of, find_member_variant, get_symbol_value from crash.types.percpu import get_percpu_var from crash.types.bitmap import for_each_set_bit -import crash.types.zone from crash.exceptions import DelayedAttributeError +import crash.types.zone + +import gdb symbols = Symbols(['numa_node']) symvals = Symvals(['numa_cpu_lookup_table', 'node_data']) diff --git a/crash/types/page.py b/crash/types/page.py index a6d90687ac3..c6640a59faa 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -4,12 +4,14 @@ from typing import Dict from math import log, ceil -import gdb -from crash.util import container_of, find_member_variant + +from crash.util import find_member_variant from crash.util.symbols import Types, Symvals, TypeCallbacks from crash.util.symbols import SymbolCallbacks, MinimalSymbolCallbacks from crash.cache.syscache import config +import gdb + #TODO debuginfo won't tell us, depends on version? PAGE_MAPPING_ANON = 1 diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 1adab5ce499..7dfd79641a2 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -3,18 +3,17 @@ from typing import Dict, Union, List, Tuple -import gdb from crash.util import array_size, struct_has_member -from crash.util.symbols import Types, Symvals, MinimalSymvals, MinimalSymbols +from crash.util.symbols import Types, Symvals, MinimalSymvals from crash.util.symbols import MinimalSymbolCallbacks, SymbolCallbacks from crash.types.list import list_for_each_entry from crash.types.module import for_each_module from crash.exceptions import DelayedAttributeError, InvalidArgumentError -from crash.types.bitmap import find_first_set_bit, find_last_set_bit -from crash.types.bitmap import find_next_set_bit, find_next_zero_bit from crash.types.page import Page from crash.types.cpu import highest_possible_cpu_nr +import gdb + class PerCPUError(TypeError): """The passed object does not respond to a percpu pointer.""" _fmt = "{} does not correspond to a percpu pointer." diff --git a/crash/types/slab.py b/crash/types/slab.py index 5bedfbf6d8b..3b6d1771043 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -1,20 +1,20 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb -import crash import sys import traceback -from crash.util import container_of, find_member_variant, get_symbol_value -from crash.util import safe_get_symbol_value + +from crash.util import container_of, find_member_variant from crash.util.symbols import Types, TypeCallbacks, SymbolCallbacks from crash.types.percpu import get_percpu_var from crash.types.list import list_for_each, list_for_each_entry -from crash.types.page import Page, page_from_gdb_obj, page_from_addr +from crash.types.page import page_from_gdb_obj, page_from_addr from crash.types.node import for_each_nid from crash.types.cpu import for_each_online_cpu from crash.types.node import numa_node_id +import gdb + AC_PERCPU = "percpu" AC_SHARED = "shared" AC_ALIEN = "alien" diff --git a/crash/types/task.py b/crash/types/task.py index 73229597346..7360ef393c9 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -3,13 +3,14 @@ from typing import Iterator, Callable, Dict -import gdb from crash.exceptions import InvalidArgumentError, ArgumentTypeError from crash.exceptions import UnexpectedGDBTypeError from crash.util import array_size, struct_has_member from crash.util.symbols import Types, Symvals, SymbolCallbacks from crash.types.list import list_for_each_entry +import gdb + PF_EXITING = 0x4 def get_value(symname): diff --git a/crash/types/vmstat.py b/crash/types/vmstat.py index 37f04f142fb..081264374ca 100644 --- a/crash/types/vmstat.py +++ b/crash/types/vmstat.py @@ -1,14 +1,10 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb -from crash.util import container_of, find_member_variant from crash.util.symbols import Types, TypeCallbacks, Symbols -import crash.types.node from crash.types.percpu import get_percpu_var from crash.types.cpu import for_each_online_cpu - class VmStat(object): types = Types(['enum zone_stat_item', 'enum vm_event_item']) symbols = Symbols(['vm_event_states']) diff --git a/crash/types/zone.py b/crash/types/zone.py index 5c062454b50..bac398bee2a 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -1,8 +1,7 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb -from crash.util import container_of, find_member_variant, array_for_each +from crash.util import array_for_each from crash.util.symbols import Types from crash.types.percpu import get_percpu_var from crash.types.vmstat import VmStat diff --git a/crash/util/__init__.py b/crash/util/__init__.py index e86dd92ed93..51f8f5c87a2 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -3,14 +3,14 @@ from typing import Union, Tuple, List, Iterator, Dict -import gdb import uuid -from typing import Dict from crash.util.symbols import Types from crash.exceptions import MissingTypeError, MissingSymbolError from crash.exceptions import ArgumentTypeError, NotStructOrUnionError +import gdb + TypeSpecifier = Union[gdb.Type, gdb.Value, str, gdb.Symbol] AddressSpecifier = Union[gdb.Value, str, int] diff --git a/crash/util/symbols.py b/crash/util/symbols.py index e51b96599d9..2473233104c 100644 --- a/crash/util/symbols.py +++ b/crash/util/symbols.py @@ -17,8 +17,6 @@ from typing import Type, List, Tuple, Callable, Union, Dict, Any -import gdb - from crash.infra.lookup import DelayedType, DelayedSymbol, DelayedSymval from crash.infra.lookup import DelayedValue, DelayedMinimalSymbol from crash.infra.lookup import DelayedMinimalSymval @@ -26,6 +24,8 @@ from crash.infra.lookup import SymbolCallback, MinimalSymbolCallback from crash.exceptions import DelayedAttributeError +import gdb + CollectedValue = Union[gdb.Type, gdb.Value, gdb.Symbol, gdb.MinSymbol, Any] Names = Union[List[str], str] diff --git a/kdump/target.py b/kdump/target.py index c7225a1f849..5fa79043bd7 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -1,11 +1,14 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb import sys + from kdumpfile import kdumpfile, KDUMP_KVADDR -from kdumpfile.exceptions import * +from kdumpfile.exceptions import AddressTranslationException, EOFException import addrxlat +import addrxlat.exceptions + +import gdb class SymbolCallback(object): "addrxlat symbolic callback" @@ -18,14 +21,14 @@ def __call__(self, symtype, *args): if self.ctx is not None: try: return self.ctx.next_cb_sym(symtype, *args) - except addrxlat.BaseException: + except addrxlat.exceptions.BaseException: self.ctx.clear_err() if symtype == addrxlat.SYM_VALUE: ms = gdb.lookup_minimal_symbol(args[0]) if ms is not None: return int(ms.value().address) - raise addrxlat.NoDataError() + raise addrxlat.exceptions.NoDataError() class Target(gdb.Target): def __init__(self, debug=False): @@ -97,8 +100,8 @@ def xfer_partial(self, obj, annex, readbuf, writebuf, offset, ln): except EOFException as e: if self.debug: self.report_error(offset, ln, e) - raise gdb.TargetXferEof(str(e)) - except NoDataException as e: + raise gdb.TargetXferEOF(str(e)) + except addrxlat.exceptions.NoDataError as e: if self.debug: self.report_error(offset, ln, e) raise gdb.TargetXferUnavailable(str(e)) diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 839ffc3b779..ac9cac5478d 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import [REPORTS] From 0958f19911d6dee70772237d5481de9af116a5d0 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 16:32:48 -0400 Subject: [PATCH 170/367] crash.types.task: fix rss field detection on older kernels The logic to detect whether we found any usable rss fields was reversed such that it would treat finding any fields as an error. Signed-off-by: Jeff Mahoney --- crash/types/task.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crash/types/task.py b/crash/types/task.py index 7360ef393c9..df24b4370b9 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -209,6 +209,7 @@ class LinuxTask(object): """ _valid = False _task_state_has_exit_state = None + anon_file_rss_fields = list() def __init__(self, task_struct: gdb.Value): self._init_task_types(task_struct) @@ -545,8 +546,6 @@ def _pick_get_rss(cls): cls.MM_ANONPAGES = get_value('MM_ANONPAGES') cls._get_rss = cls._get_rss_stat_field else: - cls.anon_file_rss_fields = [] - if struct_has_member(types.mm_struct_type, '_file_rss'): cls.anon_file_rss_fields.append('_file_rss') @@ -556,7 +555,7 @@ def _pick_get_rss(cls): cls.atomic_long_type = gdb.lookup_type('atomic_long_t') cls._get_rss = cls._get_anon_file_rss_fields - if len(cls.anon_file_rss_fields): + if not cls.anon_file_rss_fields: raise RuntimeError("No method to retrieve RSS from task found.") def _get_rss(self) -> int: From 8887f51a5aec0ffb49e82447914c7488237bbcb1 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 31 May 2019 17:10:14 -0400 Subject: [PATCH 171/367] crash.util: fix bad exception handling This commit fixes a few issues with exception handling: - The addrxlat exceptions are in addrxlat.exceptions - The first exception in __offsetof doesn't have a field yet - The TypeError in get_typed_pointer had a bad format string Signed-off-by: Jeff Mahoney --- crash/commands/vtop.py | 6 ++++-- crash/util/__init__.py | 5 ++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index 3475f124ca0..0bb6feb1e8e 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -3,6 +3,7 @@ import argparse import addrxlat +import addrxlat.exceptions from crash.commands import Command, ArgumentParser from crash.addrxlat import CrashAddressTranslation @@ -38,7 +39,7 @@ def next(self): self.note = '' try: self.step.step() - except addrxlat.NotPresentError: + except addrxlat.exceptions.NotPresentError: self.note = ' (NOT PRESENT)' self.step.remain = 0 return True @@ -56,7 +57,8 @@ def address(self): try: tmp.conv(addrxlat.KPHYSADDR, self.context, self.system) return addr + '{:x} [phys]'.format(tmp.addr) - except (addrxlat.NotPresentError, addrxlat.NoDataError): + except (addrxlat.exceptions.NotPresentError, + addrxlat.exceptions.NoDataError): return addr + 'N/A' class _Parser(ArgumentParser): diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 51f8f5c87a2..1038f67d8f6 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -191,7 +191,7 @@ def __offsetof(val, spec, error): found = False if gdbtype.code != gdb.TYPE_CODE_STRUCT and \ gdbtype.code != gdb.TYPE_CODE_UNION: - raise _InvalidComponentTypeError(field.name, spec) + raise _InvalidComponentTypeError(member, spec) for field in gdbtype.fields(): off = field.bitpos >> 3 if field.name == member: @@ -364,8 +364,7 @@ def get_typed_pointer(val: AddressSpecifier, gdbtype: gdb.Type) -> gdb.Type: try: val = int(val, 16) except TypeError as e: - print(e) - raise TypeError("string must describe hex address: ".format(e)) + raise TypeError("string must describe hex address: {}".format(e)) if isinstance(val, int): val = gdb.Value(val).cast(gdbtype) else: From c2c95a834c420fdc412da6b583ca71b3dfdeca30 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 31 May 2019 17:34:33 -0400 Subject: [PATCH 172/367] crash.addrxlat: rename builtin names map and range The 'map' and 'range' names are used as python builtins. Using them as variable names is confusing. Signed-off-by: Jeff Mahoney --- crash/addrxlat.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crash/addrxlat.py b/crash/addrxlat.py index ada21bdf711..2185c823398 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -56,11 +56,11 @@ def __init__(self): type=addrxlat.OS_LINUX) self.is_non_auto = False - map = self.system.get_map(addrxlat.SYS_MAP_MACHPHYS_KPHYS) - for range in map: - if range.meth == addrxlat.SYS_METH_NONE: + xlatmap = self.system.get_map(addrxlat.SYS_MAP_MACHPHYS_KPHYS) + for addr_range in xlatmap: + if addr_range.meth == addrxlat.SYS_METH_NONE: continue - meth = self.system.get_meth(range.meth) + meth = self.system.get_meth(addr_range.meth) if meth.kind != addrxlat.LINEAR or meth.off != 0: self.is_non_auto = True break From 98526619839e00c0fd0f11daecef7f35ac0a4cd5 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 31 May 2019 17:37:07 -0400 Subject: [PATCH 173/367] crash.infra.callback: fix wrong variable name _flush_symbol_cache_callback is a classmethod but its first argument was named 'self.' This commit renames it to 'cls.' This is just a correctness issue as it doesn't use the argument at all. Signed-off-by: Jeff Mahoney --- crash/infra/callback.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/infra/callback.py b/crash/infra/callback.py index 45de05b751a..b77939fda8a 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -88,7 +88,7 @@ def _setup_symbol_cache_flush_callback(cls): # symtab code. The symtab observer is behind the python observers # in the execution queue so the cache flush executes /after/ us. @classmethod - def _flush_symbol_cache_callback(self, event): + def _flush_symbol_cache_callback(cls, event): gdb.execute("maint flush-symbol-cache") def _new_objfile_callback(self, event): From 3b7e512c3733bf9718c261f4a989f830cac1bb69 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 17:28:46 -0400 Subject: [PATCH 174/367] crash.types.percpu: eliminate useless loops while determining percpu range When we determine whether an address is a percpu variable, we only need to look at the static range -- and that doesn't change based on cpu number. Signed-off-by: Jeff Mahoney --- crash/types/percpu.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 7dfd79641a2..0e4dd2745a9 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -214,10 +214,9 @@ def is_static_percpu_var(self, addr: int) -> bool: :obj:`bool`: Whether this address belongs to a static range """ for start in self._static_ranges: - for cpu in range(0, self._last_cpu): - size = self._static_ranges[start] - if addr >= start and addr < start + size: - return True + size = self._static_ranges[start] + if addr >= start and addr < start + size: + return True return False # The percpu range should start at offset 0 but gdb relocation @@ -244,10 +243,9 @@ def is_module_percpu_var(self, addr: int) -> bool: :obj:`bool`: Whether this address belongs to a module range """ for start in self._module_ranges: - for cpu in range(0, self._last_cpu): - size = self._module_ranges[start] - if addr >= start and addr < start + size: - return True + size = self._module_ranges[start] + if addr >= start and addr < start + size: + return True return False def is_percpu_var(self, var: SymbolOrValue) -> bool: From c1895adaa26e764bc4d631353c20275519ec3644 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 3 Jun 2019 16:13:51 -0400 Subject: [PATCH 175/367] lint: rename crash.arch.register to crach.arch.register_arch pylint complains about the arch register function sharing a name with the register argument for the arch callbacks. This renames the function. Signed-off-by: Jeff Mahoney --- crash/arch/__init__.py | 2 +- crash/arch/ppc64.py | 4 ++-- crash/arch/x86_64.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index 51f033395b9..b67bed72fc2 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -55,7 +55,7 @@ def __next__(self): return frame architectures = {} -def register(arch): +def register_arch(arch): architectures[arch.ident] = arch for ident in arch.aliases: architectures[ident] = arch diff --git a/crash/arch/ppc64.py b/crash/arch/ppc64.py index a7b9085794a..9a288229d10 100644 --- a/crash/arch/ppc64.py +++ b/crash/arch/ppc64.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from crash.arch import CrashArchitecture, register, KernelFrameFilter +from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch import gdb @@ -27,4 +27,4 @@ def setup_thread_info(self, thread: gdb.InferiorThread) -> None: def get_stack_pointer(cls, thread_struct: gdb.Value) -> gdb.Value: return thread_struct['ksp'] -register(Powerpc64Architecture) +register_arch(Powerpc64Architecture) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index f3bc698db2d..912448f821d 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from crash.arch import CrashArchitecture, register, KernelFrameFilter +from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch import gdb @@ -120,4 +120,4 @@ def fetch_register_scheduled_thread_return(self, thread: gdb.InferiorThread, def get_stack_pointer(cls, thread_struct: gdb.Value) -> gdb.Value: return thread_struct['sp'] -register(x86_64Architecture) +register_arch(x86_64Architecture) From 4dfdd4f69f84ea3c63e3c9b84e1993e137db70c9 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 3 Jun 2019 16:37:25 -0400 Subject: [PATCH 176/367] crash.infra.lookup: do remaining typing Signed-off-by: Jeff Mahoney --- crash/infra/lookup.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index 126f902e90e..6e2de0aef8f 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -248,12 +248,20 @@ class DelayedValue(object): A generic class for making class attributes available that describe to-be-loaded symbols, minimal symbols, and types. """ - def __init__(self, name, attrname=None): + def __init__(self, name: str, attrname: str = None): + if name is None or not isinstance(name, str): + raise ValueError("Name must be a valid string") + self.name = name - self.attrname = attrname - if self.attrname is None: + + if attrname is None: self.attrname = name - self.value = None + else: + self.attrname = attrname + + assert self.attrname is not None + + self.value: Any = None def get(self): if self.value is None: From 1147f9107b3a333efd75927c80e78809c66555a0 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 3 Jun 2019 16:44:22 -0400 Subject: [PATCH 177/367] crash.util: enforce find_member_variant accepting a list The page and slab modules both pass a tuple. It works but it fails type checking. Signed-off-by: Jeff Mahoney --- crash/types/page.py | 6 +++--- crash/types/slab.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/crash/types/page.py b/crash/types/page.py index c6640a59faa..1a85968ed87 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -62,9 +62,9 @@ def setup_page_type(cls, gdbtype): cls.PAGE_SIZE = 1 << cls.PAGE_SHIFT - cls.slab_cache_name = find_member_variant(gdbtype, ('slab_cache', 'lru')) - cls.slab_page_name = find_member_variant(gdbtype, ('slab_page', 'lru')) - cls.compound_head_name = find_member_variant(gdbtype, ('compound_head', 'first_page')) + cls.slab_cache_name = find_member_variant(gdbtype, ['slab_cache', 'lru']) + cls.slab_page_name = find_member_variant(gdbtype, ['slab_page', 'lru']) + cls.compound_head_name = find_member_variant(gdbtype, ['compound_head', 'first_page']) cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(gdbtype.pointer()) cls.setup_page_type_done = True diff --git a/crash/types/slab.py b/crash/types/slab.py index 3b6d1771043..69a5e066c35 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -308,11 +308,11 @@ class KmemCache(object): @classmethod def check_kmem_cache_type(cls, gdbtype): - cls.buffer_size_name = find_member_variant(gdbtype, ('buffer_size', 'size')) - cls.nodelists_name = find_member_variant(gdbtype, ('nodelists', 'node')) - cls.percpu_name = find_member_variant(gdbtype, ('cpu_cache', 'array')) + cls.buffer_size_name = find_member_variant(gdbtype, ['buffer_size', 'size']) + cls.nodelists_name = find_member_variant(gdbtype, ['nodelists', 'node']) + cls.percpu_name = find_member_variant(gdbtype, ['cpu_cache', 'array']) cls.percpu_cache = bool(cls.percpu_name == 'cpu_cache') - cls.head_name = find_member_variant(gdbtype, ('next', 'list')) + cls.head_name = find_member_variant(gdbtype, ['next', 'list']) @classmethod def setup_alien_cache_type(cls, gdbtype): From 5ac6174c255d41b0e4d2ac748058d1a4796ba03d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 3 Jun 2019 17:37:03 -0400 Subject: [PATCH 178/367] crash.types.page: add Page.from_obj The zone code expects from_obj to exist. It's easily implemented as an intermediate state between Page.from_page_addr and the regular constructor. Signed-off-by: Jeff Mahoney --- crash/types/page.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/crash/types/page.py b/crash/types/page.py index 1a85968ed87..aff8d5f5761 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -144,11 +144,15 @@ def setup_pageflags_finish(cls): cls.PG_tail = 1 << cls.pageflags['PG_compound'] | 1 << cls.pageflags['PG_reclaim'] cls.is_tail = cls.__is_tail_flagcombo - @staticmethod - def from_page_addr(addr): + @classmethod + def from_obj(cls, page): + pfn = (int(page.address) - Page.vmemmap_base) / types.page_type.sizeof + return Page(page, pfn) + + @classmethod + def from_page_addr(cls, addr): page_ptr = gdb.Value(addr).cast(types.page_type.pointer()) - pfn = (addr - Page.vmemmap_base) / types.page_type.sizeof - return Page(page_ptr.dereference(), pfn) + return cls.from_obj(page_ptr.dereference()) def __is_tail_flagcombo(self): return bool((self.flags & self.PG_tail) == self.PG_tail) From 8d72654c008b696fd366a4994cd2610fe9a9aab9 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 3 Jun 2019 17:43:34 -0400 Subject: [PATCH 179/367] crash.types.task: don't do type lookup in _pick_get_rss We moved atomic_long_t to 'types' so we don't need to look it up manually. Signed-off-by: Jeff Mahoney --- crash/types/task.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crash/types/task.py b/crash/types/task.py index df24b4370b9..62349d471e3 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -526,7 +526,7 @@ def _get_anon_file_rss_fields(self): mm = self.task_struct['mm'] rss = 0 for name in self.anon_file_rss_fields: - if mm[name].type == self.atomic_long_type: + if mm[name].type == types.atomic_long_t_type: rss += int(mm[name]['counter']) else: rss += int(mm[name]) @@ -552,7 +552,6 @@ def _pick_get_rss(cls): if struct_has_member(types.mm_struct_type, '_anon_rss'): cls.anon_file_rss_fields.append('_anon_rss') - cls.atomic_long_type = gdb.lookup_type('atomic_long_t') cls._get_rss = cls._get_anon_file_rss_fields if not cls.anon_file_rss_fields: From 16824f111f43825558ad2111d025ff131589b876 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 3 Jun 2019 17:51:17 -0400 Subject: [PATCH 180/367] crash.types.task: move init_mm to module Symvals We don't need to look up 'init_mm' manually. We can move it to the module Symvals call instead. Signed-off-by: Jeff Mahoney --- crash/types/task.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crash/types/task.py b/crash/types/task.py index 62349d471e3..2abc27d8610 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -19,7 +19,7 @@ def get_value(symname): return sym[0].value() types = Types(['struct task_struct', 'struct mm_struct', 'atomic_long_t']) -symvals = Symvals(['task_state_array', 'init_task']) +symvals = Symvals(['task_state_array', 'init_task', 'init_mm']) # This is pretty painful. These are all #defines so none of them end # up with symbols in the kernel. The best approximation we have is @@ -253,7 +253,6 @@ def _init_task_types(cls, task): cls._task_state_has_exit_state = 'exit_state' in fields cls._pick_get_rss() cls._pick_last_run() - cls.init_mm = get_value('init_mm') cls._valid = True def set_active(self, cpu: int, regs: Dict[str, int]) -> None: @@ -471,7 +470,7 @@ def is_kernel_task(self): mm = self.task_struct['mm'] if mm == 0: return True - elif self.init_mm and mm == self.init_mm.address: + elif symvals.init_mm and mm == symvals.init_mm.address: return True return False From 1cb2951c2246e07e33dc7a4fd95b62d37657caf4 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 3 Jun 2019 17:53:51 -0400 Subject: [PATCH 181/367] crash.types.task: remove unused MM_FILEPAGES and MM_ANONPAGES These variables are unused. Those are also the last users of get_value so we can remove that too. Signed-off-by: Jeff Mahoney --- crash/types/task.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/crash/types/task.py b/crash/types/task.py index 2abc27d8610..7209317485b 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -13,11 +13,6 @@ PF_EXITING = 0x4 -def get_value(symname): - sym = gdb.lookup_symbol(symname, block=None, domain=gdb.SYMBOL_VAR_DOMAIN) - if sym[0]: - return sym[0].value() - types = Types(['struct task_struct', 'struct mm_struct', 'atomic_long_t']) symvals = Symvals(['task_state_array', 'init_task', 'init_mm']) @@ -541,8 +536,6 @@ def _pick_get_rss(cls): elif struct_has_member(types.mm_struct_type, '_rss'): cls._get_rss = cls._get__rss_field elif struct_has_member(types.mm_struct_type, 'rss_stat'): - cls.MM_FILEPAGES = get_value('MM_FILEPAGES') - cls.MM_ANONPAGES = get_value('MM_ANONPAGES') cls._get_rss = cls._get_rss_stat_field else: if struct_has_member(types.mm_struct_type, '_file_rss'): From bf4d4bed0fcd5cb2d9f59a89ab5408a2f3d94353 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 12:31:43 -0400 Subject: [PATCH 182/367] lint: fix len-as-condition complaints This commit fixes the following lint complaints and enables enforcement of the 'len-as-condition' pylint rule. ************* Module crash.kernel C:135,42: Do not use `len(SEQUENCE)` to determine if a sequence is empty (len-as-condition) C:188,14: Do not use `len(SEQUENCE)` to determine if a sequence is empty (len-as-condition) ************* Module crash.infra.callback C: 55,11: Do not use `len(SEQUENCE)` to determine if a sequence is empty (len-as-condition) ************* Module crash.types.task C:162,11: Do not use `len(SEQUENCE)` to determine if a sequence is empty (len-as-condition) ************* Module kdump.target C: 45,11: Do not use `len(SEQUENCE)` to determine if a sequence is empty (len-as-condition) Signed-off-by: Jeff Mahoney --- crash/infra/callback.py | 3 ++- crash/kernel.py | 5 ++--- crash/types/task.py | 2 +- kdump/target.py | 3 ++- tests/pylintrc-enforce | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/crash/infra/callback.py b/crash/infra/callback.py index b77939fda8a..8b9431cab0b 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -52,7 +52,8 @@ def connect_callback(self): # We don't want to do lookups immediately if we don't have # an objfile. It'll fail for any custom types but it can # also return builtin types that are eventually changed. - if len(gdb.objfiles()) > 0: + objfiles = gdb.objfiles() + if objfiles: result = self.check_ready() if not (result is None or result is False): self.completed = self.callback(result) diff --git a/crash/kernel.py b/crash/kernel.py index 23476339010..ef10496270c 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -132,7 +132,7 @@ def __init__(self, roots: PathSpecifier = None, if roots is None: self.roots = ["/"] - elif (isinstance(roots, list) and len(roots) > 0 and + elif (isinstance(roots, list) and roots and isinstance(roots[0], str)): x = None for root in roots: @@ -185,8 +185,7 @@ def __init__(self, roots: PathSpecifier = None, self.vmlinux_debuginfo = x elif (isinstance(vmlinux_debuginfo, list) and - len(vmlinux_debuginfo) > 0 and - isinstance(vmlinux_debuginfo[0], str)): + vmlinux_debuginfo and isinstance(vmlinux_debuginfo[0], str)): self.vmlinux_debuginfo = vmlinux_debuginfo elif isinstance(vmlinux_debuginfo, str): self.vmlinux_debuginfo = [vmlinux_debuginfo] diff --git a/crash/types/task.py b/crash/types/task.py index 7209317485b..b35f98a034f 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -159,7 +159,7 @@ def _check_state_bits(cls): if not cls.has_flag(bit): missing.append(bit) - if len(missing): + if missing: raise RuntimeError("Missing required task states: {}" .format(",".join(missing))) diff --git a/kdump/target.py b/kdump/target.py index 5fa79043bd7..fed37502d00 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -42,7 +42,8 @@ def __init__(self, debug=False): def open(self, filename, from_tty): - if len(gdb.objfiles()) == 0: + objfiles = gdb.objfiles() + if not objfiles: raise gdb.GdbError("kdumpfile target requires kernel to be already loaded for symbol resolution") try: self.kdump = kdumpfile(file=filename) diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index ac9cac5478d..4d23174f518 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition [REPORTS] From 2661caab6da6bffb63dc80d1d57c53e3596fda7b Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 18:22:52 -0400 Subject: [PATCH 183/367] crash.types.slab: raise an exception when a slab can't be found Rather than confuse the static checker by returning None when we can't find a slab by name or address, raise an exception. Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 7 ++++--- crash/types/slab.py | 8 ++++++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index a9f2097e91f..98d0739be89 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -4,7 +4,7 @@ from crash.commands import Command, ArgumentParser from crash.commands import CommandError, CommandLineError from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name -from crash.types.slab import slab_from_obj_addr +from crash.types.slab import slab_from_obj_addr, KmemCacheNotFound from crash.types.node import for_each_zone, for_each_populated_zone from crash.types.vmstat import VmStat from crash.util import get_symbol_value @@ -56,8 +56,9 @@ def execute(self, args): else: cache_name = args.slabname print(f"Checking kmem cache {cache_name}") - cache = kmem_cache_from_name(cache_name) - if cache is None: + try: + cache = kmem_cache_from_name(cache_name) + except KmemCacheNotFound: raise CommandError(f"Cache {cache_name} not found.") cache.check_all() diff --git a/crash/types/slab.py b/crash/types/slab.py index 69a5e066c35..fe674880675 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -612,6 +612,10 @@ def check_all(self): (nid, free_declared, free_counted))) self.check_array_caches() +class KmemCacheNotFound(RuntimeError): + """The specified kmem_cache could not be found.""" + pass + kmem_caches = None kmem_caches_by_addr = None @@ -637,13 +641,13 @@ def kmem_cache_from_addr(addr): try: return kmem_caches_by_addr[addr] except KeyError: - return None + raise KmemCacheNotFound(f"No kmem cache found for {addr}.") def kmem_cache_from_name(name): try: return kmem_caches[name] except KeyError: - return None + raise KmemCacheNotFound(f"No kmem cache found for {name}.") def kmem_cache_get_all(): return kmem_caches.values() From b69dabc357b0b47858a01949e64bd1cc4a592f01 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 18:31:21 -0400 Subject: [PATCH 184/367] crash.subsystem.filesystem.mount: remove debug code When I was working on the (not yet merged) files command, I was hacking on d_path a bit and left some debugging code behind. This removes it. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/mount.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index bc722c13cc8..93d9868a872 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -256,8 +256,6 @@ def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value = None): # dentry == dentry->d_parent while dentry != root['dentry'] or mnt != root['mnt']: if dentry == mnt['mnt_root'] or dentry == dentry['d_parent']: - if dentry != mnt['mnt_root']: - return None if mount != mount['mnt_parent']: dentry = mount['mnt_mountpoint'] mount = mount['mnt_parent'] From cceef84fa953eeea1806a6b29316649ee225a7ab Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 18:15:57 -0400 Subject: [PATCH 185/367] crash: complete typing This commit completes the typing of the project. All functions and methods are now be properly typed. 'make test' now includes the static checks and will fail if typing is not missing. Signed-off-by: Jeff Mahoney --- Makefile | 2 +- crash/addrxlat.py | 14 +- crash/arch/__init__.py | 35 ++-- crash/arch/ppc64.py | 2 +- crash/arch/x86_64.py | 14 +- crash/cache/__init__.py | 6 +- crash/cache/slab.py | 14 +- crash/cache/syscache.py | 44 ++--- crash/cache/tasks.py | 8 +- crash/cache/vm.py | 4 +- crash/commands/__init__.py | 14 +- crash/commands/btrfs.py | 8 +- crash/commands/dmesg.py | 26 ++- crash/commands/help.py | 4 +- crash/commands/kmem.py | 18 +- crash/commands/lsmod.py | 8 +- crash/commands/mount.py | 12 +- crash/commands/ps.py | 21 +-- crash/commands/syscmd.py | 8 +- crash/commands/task.py | 4 +- crash/commands/vtop.py | 23 +-- crash/commands/xfs.py | 14 +- crash/exceptions.py | 17 +- crash/infra/__init__.py | 13 +- crash/infra/callback.py | 29 +-- crash/infra/lookup.py | 43 ++--- crash/kernel.py | 21 ++- crash/subsystem/filesystem/decoders.py | 32 ++-- crash/subsystem/filesystem/ext3.py | 6 +- crash/subsystem/filesystem/mount.py | 39 +++-- crash/subsystem/filesystem/xfs.py | 16 +- crash/subsystem/storage/__init__.py | 2 +- crash/subsystem/storage/decoders.py | 32 ++-- crash/subsystem/storage/device_mapper.py | 34 ++-- crash/types/classdev.py | 2 +- crash/types/cpu.py | 2 +- crash/types/node.py | 14 +- crash/types/page.py | 114 ++++++------ crash/types/percpu.py | 10 +- crash/types/slab.py | 214 +++++++++++++---------- crash/types/task.py | 59 ++++--- crash/types/vmstat.py | 23 ++- crash/types/zone.py | 18 +- crash/util/__init__.py | 13 +- crash/util/symbols.py | 27 +-- kdump/target.py | 35 ++-- 46 files changed, 617 insertions(+), 501 deletions(-) diff --git a/Makefile b/Makefile index 58df012f6b8..70396fd2775 100644 --- a/Makefile +++ b/Makefile @@ -74,7 +74,7 @@ static-check: clean-build live-tests: clean-build sh tests/run-kernel-tests.sh $(INI_FILES) -test: unit-tests lint-enforce live-tests +test: unit-tests static-check lint-enforce live-tests @echo -n doc: build FORCE diff --git a/crash/addrxlat.py b/crash/addrxlat.py index 2185c823398..f1a39bc6843 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -11,11 +11,11 @@ types = Types(['uint32_t *', 'uint64_t *']) class TranslationContext(addrxlat.Context): - def __init__(self, *args, **kwargs): + def __init__(self, *args: int, **kwargs: int) -> None: super().__init__(*args, **kwargs) self.read_caps = addrxlat.CAPS(addrxlat.KVADDR) - def cb_sym(self, symtype, *args): + def cb_sym(self, symtype: int, *args: str) -> int: if symtype == addrxlat.SYM_VALUE: ms = gdb.lookup_minimal_symbol(args[0]) if ms is not None: @@ -30,20 +30,22 @@ def cb_sym(self, symtype, *args): # this works for typedefs: sym = gdb.lookup_symbol(args[0], None)[0] if sym is not None: - return offsetof(sym.type, args[1]) + ret = offsetof(sym.type, args[1], True) + if ret is None: + raise RuntimeError("offsetof can't return None with errors=True") return super().cb_sym(symtype, *args) - def cb_read32(self, faddr): + def cb_read32(self, faddr: addrxlat.FullAddress) -> gdb.Value: v = gdb.Value(faddr.addr).cast(types.uint32_t_p_type) return int(v.dereference()) - def cb_read64(self, faddr): + def cb_read64(self, faddr: addrxlat.FullAddress) -> gdb.Value: v = gdb.Value(faddr.addr).cast(types.uint64_t_p_type) return int(v.dereference()) class CrashAddressTranslation(object): - def __init__(self): + def __init__(self) -> None: try: target = gdb.current_target() self.context = target.kdump.get_addrxlat_ctx() diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index b67bed72fc2..2c0e7c907ba 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -1,52 +1,58 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import List +from typing import List, Iterator, Any, Optional, Type import gdb +from gdb.FrameDecorator import FrameDecorator class CrashArchitecture(object): ident = "base-class" aliases: List[str] = list() - def __init__(self): + def __init__(self) -> None: pass - def fetch_register_active(self, thread, register): + def fetch_register_active(self, thread: gdb.InferiorThread, + register: int) -> None: raise NotImplementedError("setup_thread_active not implemented") - def fetch_register_scheduled(self, thread, register): + def fetch_register_scheduled(self, thread: gdb.InferiorThread, + register: int) -> None: raise NotImplementedError("setup_thread_scheduled not implemented") - def setup_thread_info(self, thread): + def setup_thread_info(self, thread: gdb.InferiorThread) -> None: raise NotImplementedError("setup_thread_info not implemented") - def fetch_register(self, thread, register): + def fetch_register(self, thread: gdb.InferiorThread, register: int) -> None: if thread.info.active: self.fetch_register_active(thread, register) else: self.fetch_register_scheduled(thread, register) + def get_stack_pointer(self, thread_struct: gdb.Value) -> gdb.Value: + raise NotImplementedError("get_stack_pointer is not implemented") + # This keeps stack traces from continuing into userspace and causing problems. class KernelFrameFilter(object): - def __init__(self, address): + def __init__(self, address: int) -> None: self.name = "KernelFrameFilter" self.priority = 100 self.enabled = True self.address = address gdb.frame_filters[self.name] = self - def filter(self, frame_iter): + def filter(self, frame_iter: Iterator[FrameDecorator]) -> Any: return KernelAddressIterator(frame_iter, self.address) class KernelAddressIterator(object): - def __init__(self, ii, address): + def __init__(self, ii: Iterator[gdb.Frame], address: int) -> None: self.input_iterator = ii self.address = address - def __iter__(self): + def __iter__(self) -> Any: return self - def __next__(self): + def __next__(self) -> Any: frame = next(self.input_iterator) if frame.inferior_frame().pc() < self.address: @@ -55,13 +61,12 @@ def __next__(self): return frame architectures = {} -def register_arch(arch): +def register_arch(arch: Type[CrashArchitecture]) -> None: architectures[arch.ident] = arch for ident in arch.aliases: architectures[ident] = arch -def get_architecture(archname): +def get_architecture(archname: str) -> Type[CrashArchitecture]: if archname in architectures: return architectures[archname] - - return None + raise RuntimeError(f"Couldn't locate helpers for arch: {archname}") diff --git a/crash/arch/ppc64.py b/crash/arch/ppc64.py index 9a288229d10..aa4cc6fa2e2 100644 --- a/crash/arch/ppc64.py +++ b/crash/arch/ppc64.py @@ -9,7 +9,7 @@ class Powerpc64Architecture(CrashArchitecture): ident = "powerpc:common64" aliases = ["ppc64", "elf64-powerpc"] - def __init__(self): + def __init__(self) -> None: super(Powerpc64Architecture, self).__init__() self.ulong_type = gdb.lookup_type('unsigned long') thread_info_type = gdb.lookup_type('struct thread_info') diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 912448f821d..30fb934861e 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -9,19 +9,19 @@ class x86_64Architecture(CrashArchitecture): ident = "i386:x86-64" aliases = ["x86_64"] - def __init__(self): + def __init__(self) -> None: super(x86_64Architecture, self).__init__() # PC for blocked threads try: inactive = gdb.lookup_type('struct inactive_task_frame') - self.fetch_register_scheduled = \ + self._fetch_register_scheduled = \ self.fetch_register_scheduled_inactive self.inactive_task_frame_type = inactive except gdb.error as e: try: thread_return = gdb.lookup_minimal_symbol("thread_return") self.thread_return = thread_return.value().address - self.fetch_register_scheduled = \ + self._fetch_register_scheduled = \ self.fetch_register_scheduled_thread_return except Exception: raise RuntimeError("{} requires symbol 'thread_return'" @@ -49,6 +49,10 @@ def fetch_register_active(self, thread: gdb.InferiorThread, except KeyError as e: pass + def fetch_register_scheduled(self, thread: gdb.InferiorThread, + register: gdb.Register) -> None: + return self._fetch_register_scheduled(thread, register) + def fetch_register_scheduled_inactive(self, thread: gdb.InferiorThread, register: gdb.Register) -> None: ulong_type = self.ulong_type @@ -78,7 +82,7 @@ def fetch_register_scheduled_inactive(self, thread: gdb.InferiorThread, thread.info.valid_stack = True def fetch_register_scheduled_thread_return(self, thread: gdb.InferiorThread, - register: gdb.Register): + register: gdb.Register) -> None: ulong_type = self.ulong_type task = thread.info.task_struct @@ -86,7 +90,7 @@ def fetch_register_scheduled_thread_return(self, thread: gdb.InferiorThread, if register == 16 or register == -1: thread.registers['rip'].value = self.thread_return if register == 16: - return True + return rsp = task['thread']['sp'].cast(ulong_type.pointer()) rbp = rsp.dereference().cast(ulong_type.pointer()) diff --git a/crash/cache/__init__.py b/crash/cache/__init__.py index b9bb320071b..7f36b4b6a0e 100644 --- a/crash/cache/__init__.py +++ b/crash/cache/__init__.py @@ -10,11 +10,11 @@ import gdb class CrashCache(object): - def refresh(self): + def refresh(self) -> None: pass - def needs_updating(self): + def needs_updating(self) -> bool: return False -def discover(): +def discover() -> None: autoload_submodules('crash.cache') diff --git a/crash/cache/slab.py b/crash/cache/slab.py index 673386fb3c2..c78d2accfb9 100644 --- a/crash/cache/slab.py +++ b/crash/cache/slab.py @@ -1,19 +1,19 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Dict, Any + from crash.cache import CrashCache class CrashCacheSlab(CrashCache): - def __init__(self): + def __init__(self) -> None: super().__init__() - self.populated = False - self.kmem_caches = dict() - self.kmem_caches_by_addr = dict() + self.refresh() - def refresh(self): + def refresh(self) -> None: self.populated = False - self.kmem_caches = dict() - self.kmem_caches_by_addr = dict() + self.kmem_caches: Dict[str, Any] = dict() + self.kmem_caches_by_addr: Dict[int, Any] = dict() cache = CrashCacheSlab() diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index 399c42b14ab..ab2fb032b02 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Dict +from typing import Dict, List, Any from builtins import round @@ -22,11 +22,11 @@ class CrashUtsnameCache(CrashCache): symvals = Symvals(['init_uts_ns']) - def load_utsname(self): + def load_utsname(self) -> gdb.Value: self.utsname = self.symvals.init_uts_ns['name'] return self.utsname - def init_utsname_cache(self): + def init_utsname_cache(self) -> Dict[str, str]: d = {} for field in self.utsname.type.fields(): @@ -38,7 +38,7 @@ def init_utsname_cache(self): utsname_fields = ['sysname', 'nodename', 'release', 'version', 'machine', 'domainname'] - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: if name == 'utsname_cache': return self.init_utsname_cache() elif name == 'utsname': @@ -53,7 +53,7 @@ class CrashConfigCache(CrashCache): msymvals = MinimalSymvals(['kernel_config_data', 'kernel_config_data_end']) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: if name == 'config_buffer': return self.decompress_config_buffer() elif name == 'ikconfig_cache': @@ -126,7 +126,7 @@ def decompress_config_buffer(self) -> str: self.config_buffer = str(decompressed.decode('utf-8')) return self.config_buffer - def __str__(self): + def __str__(self) -> str: return self.config_buffer def _parse_config(self) -> Dict[str, str]: @@ -145,7 +145,7 @@ def _parse_config(self) -> Dict[str, str]: return self.ikconfig_cache - def __getitem__(self, name): + def __getitem__(self, name: str) -> Any: try: return self.ikconfig_cache[name] except KeyError: @@ -160,15 +160,15 @@ class CrashKernelCache(CrashCache): jiffies_dv = DelayedValue('jiffies') @property - def jiffies(self): + def jiffies(self) -> gdb.Value: v = self.jiffies_dv.get() return v - def __init__(self, config): + def __init__(self, config: CrashConfigCache) -> None: CrashCache.__init__(self) self.config = config - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: if name == 'hz': self.hz = int(self.config['HZ']) return self.hz @@ -178,28 +178,26 @@ def __getattr__(self, name): return self.get_loadavg() return getattr(self.__class__, name) - @staticmethod - def calculate_loadavg(metric): + def calculate_loadavg(self, metric: int) -> float: # The kernel needs to do fixed point trickery to calculate # a floating point average. We can just return a float. return round(int(metric) / (1 << 11), 2) - @staticmethod - def format_loadavg(metrics): + def format_loadavg(self, metrics: List[float]) -> str: out = [] for metric in metrics: out.append(str(metric)) return " ".join(out) - def get_loadavg_values(self): + def get_loadavg_values(self) -> List[float]: metrics = [] for index in range(0, array_size(self.symvals.avenrun)): metrics.append(self.calculate_loadavg(self.symvals.avenrun[index])) return metrics - def get_loadavg(self): + def get_loadavg(self) -> str: try: metrics = self.get_loadavg_values() self.loadavg = self.format_loadavg(metrics) @@ -208,14 +206,14 @@ def get_loadavg(self): return "Unknown" @classmethod - def set_jiffies(cls, value): + def set_jiffies(cls, value: gdb.Value) -> None: cls.jiffies_dv.value = None cls.jiffies_dv.callback(value) @classmethod - def setup_jiffies(cls, symbol): + def setup_jiffies(cls, symbol: gdb.Symbol) -> bool: if cls.jiffies_ready: - return + return True jiffies_sym = gdb.lookup_global_symbol('jiffies_64') @@ -231,13 +229,15 @@ def setup_jiffies(cls, symbol): cls.set_jiffies(jiffies) - def adjusted_jiffies(self): + return True + + def adjusted_jiffies(self) -> gdb.Value: if self.adjust_jiffies: return self.jiffies -(int(0x100000000) - 300 * self.hz) else: return self.jiffies - def get_uptime(self): + def get_uptime(self) -> timedelta: self.uptime = timedelta(seconds=self.adjusted_jiffies() // self.hz) return self.uptime @@ -248,5 +248,5 @@ def get_uptime(self): config = CrashConfigCache() kernel = CrashKernelCache(config) -def jiffies_to_msec(jiffies): +def jiffies_to_msec(jiffies: int) -> int: return 1000 // kernel.hz * jiffies diff --git a/crash/cache/tasks.py b/crash/cache/tasks.py index 7bc15f948d3..58d7fcd5a6b 100644 --- a/crash/cache/tasks.py +++ b/crash/cache/tasks.py @@ -1,13 +1,15 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from crash.types.task import LinuxTask + tasks = {} -def cache_task(task): +def cache_task(task: LinuxTask) -> None: tasks[int(task.task_struct['pid'])] = task -def get_task(pid): +def get_task(pid: int) -> LinuxTask: return tasks[pid] -def drop_task(pid): +def drop_task(pid: int) -> None: del tasks[pid] diff --git a/crash/cache/vm.py b/crash/cache/vm.py index 2cfda3bea13..e9c58f04602 100644 --- a/crash/cache/vm.py +++ b/crash/cache/vm.py @@ -4,10 +4,10 @@ from crash.cache import CrashCache class CrashCacheVM(CrashCache): - def __init__(self): + def __init__(self) -> None: super().__init__() - def refresh(self): + def refresh(self) -> None: pass cache = CrashCacheVM() diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 5f28d7af779..43ad21e1eeb 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Dict +from typing import Dict, Any import os import glob @@ -19,7 +19,7 @@ class CommandLineError(RuntimeError): pass class ArgumentParser(argparse.ArgumentParser): - def error(self, message: str): + def error(self, message: str) -> Any: raise CommandLineError(message) def format_help(self) -> str: @@ -29,7 +29,7 @@ def format_help(self) -> str: class Command(gdb.Command): commands: Dict[str, gdb.Command] = dict() - def __init__(self, name, parser=None): + def __init__(self, name: str, parser: ArgumentParser = None) -> None: self.name = "py" + name if parser is None: parser = ArgumentParser(prog=self.name) @@ -43,12 +43,12 @@ def __init__(self, name, parser=None): def format_help(self) -> str: return self.parser.format_help() - def invoke_uncaught(self, argstr, from_tty=False): + def invoke_uncaught(self, argstr: str, from_tty: bool = False) -> None: argv = gdb.string_to_argv(argstr) args = self.parser.parse_args(argv) self.execute(args) - def invoke(self, argstr, from_tty=False): + def invoke(self, argstr: str, from_tty: bool = False) -> None: try: self.invoke_uncaught(argstr, from_tty) except CommandError as e: @@ -61,10 +61,10 @@ def invoke(self, argstr, from_tty=False): except (SystemExit, KeyboardInterrupt): pass - def execute(self, argv): + def execute(self, argv: argparse.Namespace) -> None: raise NotImplementedError("Command should not be called directly") -def discover(): +def discover() -> None: modules = glob.glob(os.path.dirname(__file__)+"/[A-Za-z]*.py") __all__ = [os.path.basename(f)[:-3] for f in modules] diff --git a/crash/commands/btrfs.py b/crash/commands/btrfs.py index fadd14534d2..1f167346050 100644 --- a/crash/commands/btrfs.py +++ b/crash/commands/btrfs.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from argparse import Namespace +import argparse from crash.commands import Command, ArgumentParser from crash.commands import CommandLineError from crash.subsystem.filesystem import for_each_super_block, super_fstype @@ -24,7 +24,7 @@ def format_usage(self) -> str: class BtrfsCommand(Command): """display Btrfs internal data structures""" - def __init__(self, name): + def __init__(self, name: str) -> None: parser = _Parser(prog=name) subparsers = parser.add_subparsers(help="sub-command help") list_parser = subparsers.add_parser('list', help='list help') @@ -33,7 +33,7 @@ def __init__(self, name): Command.__init__(self, name, parser) - def list_btrfs(self, args: Namespace) -> None: + def list_btrfs(self, args: argparse.Namespace) -> None: print_header = True count = 0 for sb in for_each_super_block(): @@ -52,7 +52,7 @@ def list_btrfs(self, args: Namespace) -> None: if count == 0: print("No btrfs file systems were mounted.") - def execute(self, args): + def execute(self, args: argparse.Namespace) -> None: if hasattr(args, 'subcommand'): args.subcommand(args) else: diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 327ba40df24..c0675701155 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -1,12 +1,17 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Dict, Iterable, Any + import re +import argparse from crash.commands import Command, ArgumentParser, CommandError from crash.exceptions import DelayedAttributeError from crash.util.symbols import Types, Symvals +import gdb + types = Types(['struct printk_log *', 'char *']) symvals = Symvals(['log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', 'clear_seq', 'log_first_seq', 'log_next_seq']) @@ -145,7 +150,7 @@ def format_usage(self) -> str: class LogCommand(Command): """dump system message buffer""" - def __init__(self, name): + def __init__(self, name: str) -> None: parser = _Parser(prog=name) parser.add_argument('-t', action='store_true', default=False) @@ -155,7 +160,7 @@ def __init__(self, name): Command.__init__(self, name, parser) @classmethod - def filter_unstructured_log(cls, log, args): + def filter_unstructured_log(cls, log: str, args: argparse.Namespace) -> str: lines = log.split('\n') if not args.m: newlog = [] @@ -169,7 +174,8 @@ def filter_unstructured_log(cls, log, args): return '\n'.join(lines) - def log_from_idx(self, logbuf, idx, dict_needed=False): + def log_from_idx(self, logbuf: gdb.Value, idx: int, + dict_needed: bool = False) -> Dict: msg = (logbuf + idx).cast(types.printk_log_p_type) try: @@ -206,15 +212,17 @@ def log_from_idx(self, logbuf, idx, dict_needed=False): msgdict['dict'].append(s) return msgdict - def get_log_msgs(self, dict_needed=False): + def get_log_msgs(self, + dict_needed: bool = False) -> Iterable[Dict[str, Any]]: try: idx = symvals.log_first_idx except DelayedAttributeError as e: raise LogTypeException('not structured log') if symvals.clear_seq < symvals.log_first_seq: - symvals.clear_seq = symvals.log_first_seq - + # mypy seems to think the preceding clear_seq is fine but this + # one isn't. Derp. + symvals.clear_seq = symvals.log_first_seq # type: ignore seq = symvals.clear_seq idx = symvals.log_first_idx @@ -225,7 +233,7 @@ def get_log_msgs(self, dict_needed=False): idx = msg['next'] yield msg - def handle_structured_log(self, args): + def handle_structured_log(self, args: argparse.Namespace) -> None: for msg in self.get_log_msgs(args.d): timestamp = '' if not args.t: @@ -243,14 +251,14 @@ def handle_structured_log(self, args): for d in msg['dict']: print(d) - def handle_logbuf(self, args): + def handle_logbuf(self, args: argparse.Namespace) -> None: if symvals.log_buf_len and symvals.log_buf: if args.d: raise LogInvalidOption("Unstructured logs don't offer key/value pair support") print(self.filter_unstructured_log(symvals.log_buf.string('utf-8', 'replace'), args)) - def execute(self, args): + def execute(self, args: argparse.Namespace) -> None: try: self.handle_structured_log(args) return diff --git a/crash/commands/help.py b/crash/commands/help.py index 422e06b08ba..4b665dda297 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -22,12 +22,12 @@ class _Parser(ArgumentParser): class HelpCommand(Command): """ this command""" - def __init__(self): + def __init__(self) -> None: parser = _Parser(prog="help") parser.add_argument('args', nargs=argparse.REMAINDER) super().__init__('help', parser) - def execute(self, argv): + def execute(self, argv: argparse.Namespace) -> None: if not argv.args: print("Available commands:") for cmd in sorted(self.commands): diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 98d0739be89..c5f9a83a4c7 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -1,6 +1,10 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import List + +import argparse + from crash.commands import Command, ArgumentParser from crash.commands import CommandError, CommandLineError from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name @@ -28,7 +32,7 @@ class _Parser(ArgumentParser): class KmemCommand(Command): """ kernel memory inspection""" - def __init__(self, name): + def __init__(self, name: str) -> None: parser = ArgumentParser(prog=name) group = parser.add_mutually_exclusive_group() @@ -40,7 +44,7 @@ def __init__(self, name): super().__init__(name, parser) - def execute(self, args): + def execute(self, args: argparse.Namespace) -> None: if args.z: self.print_zones() return @@ -88,7 +92,7 @@ def execute(self, args): % name) elif not obj[2]: print("FREE object %x from slab %s" % (obj[1], name)) - else: + elif obj[2] is not None: ac = obj[2] if ac["ac_type"] == "percpu": ac_desc = "cpu %d cache" % ac["nid_tgt"] @@ -101,8 +105,10 @@ def execute(self, args): print("FREE object %x from slab %s (in %s)" % (obj[1], name, ac_desc)) + else: + raise RuntimeError("odd return value from contains_obj") - def __print_vmstat(self, vmstat, diffs): + def __print_vmstat(self, vmstat: List[int], diffs: List[int]) -> None: vmstat_names = VmStat.get_stat_names(); just = max(map(len, vmstat_names)) nr_items = VmStat.nr_stat_items @@ -113,7 +119,7 @@ def __print_vmstat(self, vmstat, diffs): print("%s: %d (%d)" % (vmstat_names[i].rjust(just), vmstat[i], diffs[i])) - def print_vmstats(self): + def print_vmstats(self) -> None: try: vm_stat = get_symbol_value("vm_stat") except MissingSymbolError: @@ -146,7 +152,7 @@ def print_vmstats(self): for name, val in zip(names, vm_events): print("%s: %d" % (name.rjust(just), val)) - def print_zones(self): + def print_zones(self) -> None: for zone in for_each_zone(): zone_struct = zone.gdb_obj diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index 6fac6349934..56c35d822f0 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -12,6 +12,8 @@ from crash.types.list import list_for_each_entry from crash.types.percpu import get_percpu_var +import gdb + class _Parser(ArgumentParser): """ NAME @@ -40,7 +42,7 @@ def format_usage(self) -> str: class ModuleCommand(Command): """display module information""" - def __init__(self): + def __init__(self) -> None: parser = _Parser(prog="lsmod") parser.add_argument('-p', nargs='?', const=-1, default=None, type=int) @@ -48,7 +50,7 @@ def __init__(self): Command.__init__(self, "lsmod", parser) - def print_module_percpu(self, mod, cpu=-1): + def print_module_percpu(self, mod: gdb.Value, cpu: int = -1) -> None: cpu = int(cpu) addr = int(mod['percpu']) if addr == 0: @@ -65,7 +67,7 @@ def print_module_percpu(self, mod, cpu=-1): tabs, size)) - def execute(self, argv): + def execute(self, argv: argparse.Namespace) -> None: regex = None show_deps = True print_header = True diff --git a/crash/commands/mount.py b/crash/commands/mount.py index 55e17a9b70e..fb9df20a77a 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -1,7 +1,9 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from argparse import Namespace +from typing import Any + +import argparse from crash.commands import Command, ArgumentParser from crash.types.task import LinuxTask @@ -25,7 +27,7 @@ class _Parser(ArgumentParser): class MountCommand(Command): """display mounted file systems""" - def __init__(self, name): + def __init__(self, name: str) -> None: parser = _Parser(prog=name) parser.add_argument('-v', action='store_true', default=False) @@ -34,7 +36,7 @@ def __init__(self, name): super().__init__(name, parser) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: if name == 'charp': self.charp = gdb.lookup_type('char').pointer() else: @@ -42,14 +44,14 @@ def __getattr__(self, name): return getattr(self, name) - def execute(self, args): + def execute(self, args: argparse.Namespace) -> None: if args.v: print("{:^16} {:^16} {:^10} {:^16} {}" .format("MOUNT", "SUPERBLK", "TYPE", "DEVNAME", "PATH")) for mnt in for_each_mount(): self.show_one_mount(mnt, args) - def show_one_mount(self, mnt: gdb.Value, args: Namespace, + def show_one_mount(self, mnt: gdb.Value, args: argparse.Namespace, task: LinuxTask = None) -> None: if mnt.type.code == gdb.TYPE_CODE_PTR: mnt = mnt.dereference() diff --git a/crash/commands/ps.py b/crash/commands/ps.py index df8c4dca01e..469179b16af 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Pattern, Optional, Callable + import argparse import fnmatch import re @@ -16,9 +18,10 @@ class TaskFormat(object): This class is responsible for converting the arguments into formatting rules. """ - def __init__(self, argv, regex): + def __init__(self, argv: argparse.Namespace, + regex: Optional[Pattern[str]]) -> None: self.sort = lambda x: x.info.task_pid() - self._filter = lambda x: True + self._filter: Callable[[LinuxTask], bool] = lambda x: True self._format_one_task = self._format_common_line self._regex = regex @@ -531,7 +534,7 @@ def format_usage(self) -> str: class PSCommand(Command): """display process status information""" - def __init__(self): + def __init__(self) -> None: parser = _Parser(prog="ps") group = parser.add_mutually_exclusive_group() @@ -556,9 +559,9 @@ def __init__(self): Command.__init__(self, "ps", parser) - def task_state_string(self, task): + def task_state_string(self, task: LinuxTask) -> str: state = task.task_state() - buf = None + buf = "" exclusive = False try: @@ -567,8 +570,6 @@ def task_state_string(self, task): except AttributeError: pass - buf = None - for bits in sorted(self.task_states.keys(), reverse=True): if (state & bits) == bits: buf = self.task_states[bits] @@ -584,7 +585,7 @@ def task_state_string(self, task): return buf - def setup_task_states(self): + def setup_task_states(self) -> None: self.task_states = { TF.TASK_RUNNING : "RU", TF.TASK_INTERRUPTIBLE : "IN", @@ -602,7 +603,7 @@ def setup_task_states(self): if TF.has_flag('TASK_IDLE'): self.task_states[TF.TASK_IDLE] = "ID" - def execute(self, argv): + def execute(self, argv: argparse.Namespace) -> None: # Unimplemented if argv.p or argv.c or argv.t or argv.a or argv.g or argv.r: raise CommandError("Support for the -p, -c, -t, -a, -g, and -r options is unimplemented.") @@ -627,7 +628,7 @@ def execute(self, argv): if header: print(header) - header = None + header = "" task.update_mem_usage() state = self.task_state_string(task) diff --git a/crash/commands/syscmd.py b/crash/commands/syscmd.py index 41fc755ca11..7a6a6e5b711 100644 --- a/crash/commands/syscmd.py +++ b/crash/commands/syscmd.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import argparse + from crash.commands import Command, ArgumentParser from crash.commands import CommandLineError from crash.cache.syscache import utsname, config, kernel @@ -40,7 +42,7 @@ class _Parser(ArgumentParser): class SysCommand(Command): """system data""" - def __init__(self, name): + def __init__(self, name: str) -> None: parser = _Parser(prog=name) @@ -49,7 +51,7 @@ def __init__(self, name): Command.__init__(self, name, parser) @staticmethod - def show_default(): + def show_default() -> None: print(" UPTIME: {}".format(kernel.uptime)) print("LOAD AVERAGE: {}".format(kernel.loadavg)) print(" NODENAME: {}".format(utsname.nodename)) @@ -57,7 +59,7 @@ def show_default(): print(" VERSION: {}".format(utsname.version)) print(" MACHINE: {}".format(utsname.machine)) - def execute(self, args): + def execute(self, args: argparse.Namespace) -> None: if args.config: if args.config == "config": print(config) diff --git a/crash/commands/task.py b/crash/commands/task.py index 44fa458dcd6..43f26ab9807 100644 --- a/crash/commands/task.py +++ b/crash/commands/task.py @@ -26,7 +26,7 @@ class _Parser(ArgumentParser): class TaskCommand(Command): """select task by pid""" - def __init__(self, name): + def __init__(self, name: str) -> None: parser = ArgumentParser(prog=name) @@ -34,7 +34,7 @@ def __init__(self, name): Command.__init__(self, name, parser) - def execute(self, args): + def execute(self, args: argparse.Namespace) -> None: try: if args.pid: thread = crash.cache.tasks.get_task(args.pid[0]).thread diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index 0bb6feb1e8e..d2650fd2482 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -11,11 +11,11 @@ class LinuxPGT(object): table_names = ('PTE', 'PMD', 'PUD', 'PGD') - def __init__(self, ctx, sys): + def __init__(self, ctx: addrxlat.Context, sys: addrxlat.System) -> None: self.context = ctx self.system = sys - def begin(self, addr): + def begin(self, addr: int) -> bool: meth = self.system.get_map(addrxlat.SYS_MAP_HW).search(addr) if meth == addrxlat.SYS_METH_NONE: meth = self.system.get_map(addrxlat.SYS_MAP_KV_PHYS).search(addr) @@ -27,7 +27,7 @@ def begin(self, addr): self.step.launch(addr) return True - def next(self): + def next(self) -> bool: if self.step.remain <= 1: return False @@ -44,14 +44,14 @@ def next(self): self.step.remain = 0 return True - def address(self): + def address(self) -> str: return '{:16x}'.format(self.ptr.addr) - def value(self): + def value(self) -> str: return '{:x}{}'.format(self.step.raw, self.note) class LinuxNonAutoPGT(LinuxPGT): - def address(self): + def address(self) -> str: addr = super().address() + ' [machine], ' tmp = self.ptr.copy() try: @@ -178,7 +178,7 @@ def format_usage(self) -> str: class VTOPCommand(Command): """convert virtual address to physical""" - def __init__(self): + def __init__(self) -> None: parser = ArgumentParser(prog="vtop") group = parser.add_mutually_exclusive_group() @@ -191,12 +191,13 @@ def __init__(self): super().__init__("vtop", parser) - def execute(self, argv): + def execute(self, argv: argparse.Namespace) -> None: trans = CrashAddressTranslation() - if trans.is_non_auto: - pgt = LinuxNonAutoPGT(trans.context, trans.system) - else: + # Silly mypy bug means the base class needs come first + if not trans.is_non_auto: pgt = LinuxPGT(trans.context, trans.system) + else: + pgt = LinuxNonAutoPGT(trans.context, trans.system) for addr in argv.args: addr = int(addr, 16) diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index 5561f79aefb..4c42b0652ee 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from argparse import Namespace +import argparse from crash.commands import Command, ArgumentParser from crash.commands import CommandLineError, CommandError @@ -45,7 +45,7 @@ class _Parser(ArgumentParser): class XFSCommand(Command): """display XFS internal data structures""" - def __init__(self, name): + def __init__(self, name: str) -> None: parser = ArgumentParser(prog=name) subparsers = parser.add_subparsers(help="sub-command help") show_parser = subparsers.add_parser('show', help='show help') @@ -62,7 +62,7 @@ def __init__(self, name): Command.__init__(self, name, parser) - def list_xfs(self, args: Namespace) -> None: + def list_xfs(self, args: argparse.Namespace) -> None: count = 0 print_header = True for sb in for_each_super_block(): @@ -79,7 +79,7 @@ def list_xfs(self, args: Namespace) -> None: if count == 0: print("No xfs file systems are mounted.") - def show_xfs(self, args: Namespace) -> None: + def show_xfs(self, args: argparse.Namespace) -> None: try: sb = get_super_block(args.addr) except gdb.NotAvailableError as e: @@ -97,7 +97,7 @@ def show_xfs(self, args: Namespace) -> None: else: print("AIL has items queued") - def dump_ail(self, args: Namespace) -> None: + def dump_ail(self, args: argparse.Namespace) -> None: try: sb = get_super_block(args.addr) except gdb.NotAvailableError as e: @@ -165,7 +165,7 @@ def dump_buftarg(cls, targ: gdb.Value) -> None: print("{:x} {}".format(int(buf.address), xfs_format_xfsbuf(buf))) @classmethod - def dump_buftargs(cls, args: Namespace): + def dump_buftargs(cls, args: argparse.Namespace) -> None: try: sb = get_super_block(args.addr) except gdb.NotAvailableError as e: @@ -181,7 +181,7 @@ def dump_buftargs(cls, args: Namespace): print("Log device queue:") cls.dump_buftarg(ldev) - def execute(self, args): + def execute(self, args: argparse.Namespace) -> None: if hasattr(args, 'subcommand'): args.subcommand(args) else: diff --git a/crash/exceptions.py b/crash/exceptions.py index 2be5a158ada..f2b362a41f6 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -1,6 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Type + +import gdb + class MissingSymbolError(RuntimeError): """The requested symbol cannot be located.""" pass @@ -18,27 +22,27 @@ class DelayedAttributeError(AttributeError): The attribute has been declared but the symbol to fill it has not yet been located. """ - def __init__(self, name): + def __init__(self, name: str) -> None: msg = "Delayed attribute {} has not been completed." self.name = name super().__init__(msg.format(name)) class InvalidArgumentError(TypeError): """Base class for invalid argument exceptions""" - def __init__(self, msg): + def __init__(self, msg: str) -> None: super().__init__(msg) class ArgumentTypeError(InvalidArgumentError): """The provided object could not be converted to the expected type""" formatter = "cannot convert argument `{}' of type {} to {}" - def __init__(self, name, val, expected_type): + def __init__(self, name: str, val: Type, expected_type: Type) -> None: msg = self.formatter.format(name, self.format_clsname(val.__class__), self.format_clsname(expected_type)) super().__init__(msg) self.val = val - def format_clsname(self, cls): + def format_clsname(self, cls: Type) -> str: module = cls.__module__ if module is None or module == str.__class__.__module__: return cls.__name__ # Avoid reporting __builtin__ @@ -52,13 +56,14 @@ class UnexpectedGDBTypeBaseError(InvalidArgumentError): class UnexpectedGDBTypeError(UnexpectedGDBTypeBaseError): """The gdb.Type passed describes an inappropriate type for the operation""" formatter = "expected gdb.Type `{}' to describe `{}' not `{}'" - def __init__(self, name, gdbtype, expected_type): + def __init__(self, name: str, gdbtype: gdb.Type, + expected_type: gdb.Type) -> None: msg = self.formatter.format(name, str(gdbtype), str(expected_type)) super().__init__(msg) class NotStructOrUnionError(UnexpectedGDBTypeBaseError): """The provided type is not a struct or union""" formatter = "argument `{}' describes type `{}' which is not a struct or union" - def __init__(self, name, gdbtype): + def __init__(self, name: str, gdbtype: gdb.Type) -> None: msg = self.formatter.format(name, str(gdbtype)) super().__init__(msg) diff --git a/crash/infra/__init__.py b/crash/infra/__init__.py index 5d53aa0888b..3b15529054e 100644 --- a/crash/infra/__init__.py +++ b/crash/infra/__init__.py @@ -1,12 +1,15 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Callable, Any, List + import sys import glob import os.path import importlib -def autoload_submodules(caller, callback=None): +def autoload_submodules(caller: str, + callback: Callable[[Any], None] = None) -> List[str]: mods = [] try: mod = sys.modules[caller] @@ -15,11 +18,11 @@ def autoload_submodules(caller, callback=None): mods.append(caller) path = os.path.dirname(mod.__file__) modules = glob.glob("{}/[A-Za-z0-9_]*.py".format(path)) - for mod in modules: - mod = os.path.basename(mod)[:-3] - if mod == '__init__': + for modname in modules: + modname = os.path.basename(modname)[:-3] + if modname == '__init__': continue - modname = "{}.{}".format(caller, mod) + modname = "{}.{}".format(caller, modname) x = importlib.import_module(modname) if callback: callback(x) diff --git a/crash/infra/callback.py b/crash/infra/callback.py index 8b9431cab0b..267fc684fc4 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -1,16 +1,18 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Callable, Any, Union +from typing import Callable, Any, Union, TypeVar, Optional import gdb Callback = Callable[[Any], Union[bool, None]] +OECType = TypeVar('OECType', bound='ObjfileEventCallback') + class CallbackCompleted(RuntimeError): """The callback has already been completed and is no longer valid""" - def __init__(self, callback_obj): - msg = "{} has already completed.".format(callback_obj.name) + def __init__(self, callback_obj: 'ObjfileEventCallback') -> None: + msg = "Callback has already completed." super().__init__(msg) self.callback_obj = callback_obj @@ -28,13 +30,13 @@ class ObjfileEventCallback(object): Consumers of this interface must also call :meth:`connect_callback` to connect the object to the callback infrastructure. """ - def __init__(self): + def __init__(self) -> None: self.completed = False self.connected = False self._setup_symbol_cache_flush_callback() - def connect_callback(self): + def connect_callback(self) -> bool: """ Connect this callback to the event system. @@ -45,7 +47,7 @@ def connect_callback(self): raise CallbackCompleted(self) if self.connected: - return + return False self.connected = True @@ -56,14 +58,17 @@ def connect_callback(self): if objfiles: result = self.check_ready() if not (result is None or result is False): - self.completed = self.callback(result) + completed = self.callback(result) + if completed is None: + completed = True + self.completed = completed if self.completed is False: gdb.events.new_objfile.connect(self._new_objfile_callback) return self.completed - def complete(self): + def complete(self) -> None: """ Complete and disconnect this callback from the event system. @@ -79,7 +84,7 @@ def complete(self): _symbol_cache_flush_setup = False @classmethod - def _setup_symbol_cache_flush_callback(cls): + def _setup_symbol_cache_flush_callback(cls) -> None: if not cls._symbol_cache_flush_setup: gdb.events.new_objfile.connect(cls._flush_symbol_cache_callback) cls._symbol_cache_flush_setup = True @@ -89,10 +94,10 @@ def _setup_symbol_cache_flush_callback(cls): # symtab code. The symtab observer is behind the python observers # in the execution queue so the cache flush executes /after/ us. @classmethod - def _flush_symbol_cache_callback(cls, event): + def _flush_symbol_cache_callback(cls, event: gdb.NewObjFileEvent) -> None: gdb.execute("maint flush-symbol-cache") - def _new_objfile_callback(self, event): + def _new_objfile_callback(self, event: gdb.NewObjFileEvent) -> None: # GDB purposely copies the event list prior to calling the callbacks # If we remove an event from another handler, it will still be sent if self.completed: @@ -116,7 +121,7 @@ def check_ready(self) -> Any: """ raise NotImplementedError("check_ready must be implemented by derived class.") - def callback(self, result: Any) -> Union[None, bool]: + def callback(self, result: Any) -> Optional[bool]: """ The callback that derived classes implement for handling the sucessful result of :meth:`check_ready`. diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index 6e2de0aef8f..abe11d16266 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -28,7 +28,8 @@ class NamedCallback(ObjfileEventCallback): attrname (:obj:`str`): The name of symbol or type being resolved translated for use as an attribute name. """ - def __init__(self, name: str, callback: Callback, attrname: str = None): + def __init__(self, name: str, callback: Callback, + attrname: str = None) -> None: super().__init__() self.name = name @@ -68,7 +69,8 @@ class MinimalSymbolCallback(NamedCallback): callback: The callback to execute when the minimal symbol is discovered symbol_file (optional): Name of the symbol file to use """ - def __init__(self, name: str, callback: Callback, symbol_file: str = None): + def __init__(self, name: str, callback: Callback, + symbol_file: str = None) -> None: super().__init__(name, callback) self.symbol_file = symbol_file @@ -85,7 +87,7 @@ def check_ready(self) -> gdb.MinSymbol: """ return gdb.lookup_minimal_symbol(self.name, self.symbol_file, None) - def __str__(self): + def __str__(self) -> str: return ("<{}({}, {}, {})>" .format(self.__class__.__name__, self.name, self.symbol_file, self.callback)) @@ -106,7 +108,7 @@ class SymbolCallback(NamedCallback): constant, i.e. SYMBOL_*_DOMAIN. """ def __init__(self, name: str, callback: Callback, - domain: int = gdb.SYMBOL_VAR_DOMAIN): + domain: int = gdb.SYMBOL_VAR_DOMAIN) -> None: super().__init__(name, callback) self.domain = domain @@ -123,7 +125,7 @@ def check_ready(self) -> gdb.Symbol: """ return gdb.lookup_symbol(self.name, None, self.domain)[0] - def __str__(self): + def __str__(self) -> str: return ("<{}({}, {})>" .format(self.__class__.__name__, self.name, self.domain)) @@ -168,7 +170,8 @@ class TypeCallback(NamedCallback): block (optional): The :obj:`gdb.Block` to search for the symbol """ - def __init__(self, name: str, callback: Callback, block: gdb.Block = None): + def __init__(self, name: str, callback: Callback, + block: gdb.Block = None) -> None: (name, attrname, self.pointer) = self.resolve_type(name) super().__init__(name, callback, attrname) @@ -233,13 +236,13 @@ def resolve_type(name: str) -> Tuple[str, str, bool]: return (name, attrname, pointer) - def check_ready(self): + def check_ready(self) -> Union[None, gdb.Type]: try: return gdb.lookup_type(self.name, self.block) except gdb.error as e: return None - def __str__(self): + def __str__(self) -> str: return ("<{}({}, {})>" .format(self.__class__.__name__, self.name, self.block)) @@ -248,7 +251,7 @@ class DelayedValue(object): A generic class for making class attributes available that describe to-be-loaded symbols, minimal symbols, and types. """ - def __init__(self, name: str, attrname: str = None): + def __init__(self, name: str, attrname: str = None) -> None: if name is None or not isinstance(name, str): raise ValueError("Name must be a valid string") @@ -263,12 +266,12 @@ def __init__(self, name: str, attrname: str = None): self.value: Any = None - def get(self): + def get(self) -> Any: if self.value is None: raise DelayedAttributeError(self.name) return self.value - def callback(self, value): + def callback(self, value: Any) -> None: if self.value is not None: return self.value = value @@ -280,11 +283,11 @@ class DelayedMinimalSymbol(DelayedValue): Args: name: The name of the minimal symbol """ - def __init__(self, name: str): + def __init__(self, name: str) -> None: super().__init__(name) self.cb = MinimalSymbolCallback(name, self.callback) - def __str__(self): + def __str__(self) -> str: return "{} attached with {}".format(self.__class__, str(self.cb)) class DelayedSymbol(DelayedValue): @@ -294,11 +297,11 @@ class DelayedSymbol(DelayedValue): Args: name: The name of the symbol """ - def __init__(self, name: str): + def __init__(self, name: str) -> None: super().__init__(name) self.cb = SymbolCallback(name, self.callback) - def __str__(self): + def __str__(self) -> str: return "{} attached with {}".format(self.__class__, str(self.cb)) class DelayedType(DelayedValue): @@ -308,15 +311,15 @@ class DelayedType(DelayedValue): Args: name: The name of the type. """ - def __init__(self, name: str): + def __init__(self, name: str) -> None: (name, attrname, self.pointer) = TypeCallback.resolve_type(name) super().__init__(name, attrname) self.cb = TypeCallback(name, self.callback) - def __str__(self): + def __str__(self) -> str: return "{} attached with {}".format(self.__class__, str(self.callback)) - def callback(self, value): + def callback(self, value: gdb.Type) -> None: if self.pointer: value = value.pointer() self.value = value @@ -335,7 +338,7 @@ def callback(self, value: gdb.Symbol) -> None: symval = symval.address self.value = symval - def __str__(self): + def __str__(self) -> str: return "{} attached with {}".format(self.__class__, str(self.cb)) class DelayedMinimalSymval(DelayedMinimalSymbol): @@ -349,5 +352,5 @@ class DelayedMinimalSymval(DelayedMinimalSymbol): def callback(self, value: gdb.MinSymbol) -> None: self.value = int(value.value().address) - def __str__(self): + def __str__(self) -> str: return "{} attached with {}".format(self.__class__, str(self.cb)) diff --git a/crash/kernel.py b/crash/kernel.py index ef10496270c..9cd053d406d 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Pattern, Union, List, Dict, Any +from typing import Pattern, Union, List, Dict, Any, Optional import sys import re @@ -28,7 +28,8 @@ class NoMatchingFileError(FileNotFoundError): class ModinfoMismatchError(ValueError): _fmt = "module {} has mismatched {} (got `{}' expected `{}')" - def __init__(self, attribute, path, value, expected_value): + def __init__(self, attribute: str, path: str, value: Optional[str], + expected_value: Optional[str]) -> None: msg = self._fmt.format(path, attribute, value, expected_value) super().__init__(msg) self.path = path @@ -37,11 +38,13 @@ def __init__(self, attribute, path, value, expected_value): self.attribute = attribute class ModVersionMismatchError(ModinfoMismatchError): - def __init__(self, path, module_value, expected_value): + def __init__(self, path: str, module_value: Optional[str], + expected_value: Optional[str]) -> None: super().__init__('vermagic', path, module_value, expected_value) class ModSourceVersionMismatchError(ModinfoMismatchError): - def __init__(self, path, module_value, expected_value): + def __init__(self, path: str, module_value: Optional[str], + expected_value: Optional[str]) -> None: super().__init__('srcversion', path, module_value, expected_value) LINUX_KERNEL_PID = 1 @@ -57,7 +60,7 @@ def __init__(self, roots: PathSpecifier = None, vmlinux_debuginfo: PathSpecifier = None, module_path: PathSpecifier = None, module_debuginfo_path: PathSpecifier = None, - verbose: bool = False, debug: bool = False): + verbose: bool = False, debug: bool = False) -> None: """ Initialize a basic kernel semantic debugging session. @@ -288,7 +291,11 @@ def __init__(self, roots: PathSpecifier = None, self.vermagic = self.extract_vermagic() archname = obj.architecture.name() - archclass = crash.arch.get_architecture(archname) + try: + archclass = crash.arch.get_architecture(archname) + except RuntimeError as e: + raise CrashKernelError(str(e)) + self.arch = archclass() self.target = gdb.current_target() @@ -496,7 +503,7 @@ def get_module_path_from_modules_order(self, path: str, name: str) -> str: except KeyError: raise NoMatchingFileError(name) - def cache_file_tree(self, path, regex: Pattern[str] = None) -> None: + def cache_file_tree(self, path: str, regex: Pattern[str] = None) -> None: if not path in self.findmap: self.findmap[path] = { 'filters' : [], diff --git a/crash/subsystem/filesystem/decoders.py b/crash/subsystem/filesystem/decoders.py index 937d9424085..ce046e4ccfa 100644 --- a/crash/subsystem/filesystem/decoders.py +++ b/crash/subsystem/filesystem/decoders.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Any + from crash.util.symbols import Types from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder, decode_bh @@ -34,23 +36,23 @@ class DIOBioDecoder(Decoder): __endio__ = ['dio_bio_end_io', 'dio_bio_end_io'] _description = "{:x} bio: Direct I/O for {} inode {}, sector {} on {}" - def __init__(self, bio: gdb.Value): + def __init__(self, bio: gdb.Value) -> None: super().__init__() self.bio = bio - def interpret(self): + def interpret(self) -> None: """Interprets a direct i/o bio to populate its attributes""" self.dio = self.bio['bi_private'].cast(self._types.dio_p_type) self.fstype = super_fstype(self.dio['inode']['i_sb']) self.dev = block_device_name(self.dio['inode']['i_sb']['s_bdev']) self.offset = self.dio['block_in_file'] << self.dio['blkbits'] - def __str__(self): + def __str__(self) -> str: return self._description.format(int(self.bio), self.fstype, self.dio['inode']['i_ino'], self.bio['bi_sector'], self.dev) - def __next__(self): + def __next__(self) -> Any: return None DIOBioDecoder.register() @@ -78,17 +80,17 @@ class DecodeMPage(Decoder): description = "{:x} bio: Multipage I/O: inode {}, type {}, dev {}" - def __init__(self, bio: gdb.Value): + def __init__(self, bio: gdb.Value) -> None: super().__init__() self.bio = bio - def interpret(self): + def interpret(self) -> None: """Interpret the multipage bio to populate its attributes""" self.inode = self.bio['bi_io_vec'][0]['bv_page']['mapping']['host'] self.fstype = super_fstype(self.inode['i_sb']) - def __str__(self): + def __str__(self) -> str: return self.description.format(int(self.bio), self.inode['i_ino'], self.fstype, block_device_name(self.bio['bi_bdev'])) @@ -116,18 +118,18 @@ class DecodeBioBH(Decoder): __endio__ = 'end_bio_bh_io_sync' _description = "{:x} bio: Bio representation of buffer head" - def __init__(self, bio: gdb.Value): + def __init__(self, bio: gdb.Value) -> None: super().__init__() self.bio = bio - def interpret(self): + def interpret(self) -> None: """Interpret the buffer_head bio to populate its attributes""" self.bh = self.bio['bi_private'].cast(self._types.buffer_head_p_type) - def __str__(self): + def __str__(self) -> str: return self._description.format(int(self.bio)) - def __next__(self): + def __next__(self) -> Any: return decode_bh(self.bh) DecodeBioBH.register() @@ -148,12 +150,12 @@ class DecodeSyncWBBH(Decoder): __endio__ = 'end_buffer_write_sync' _description = "{:x} buffer_head: for dev {}, block {}, size {} (unassociated)" - def __init__(self, bh): + def __init__(self, bh: gdb.Value) -> None: super().__init__() self.bh = bh - def __str__(self): - self._description.format(block_device_name(self.bh['b_bdev']), - self.bh['b_blocknr'], self.bh['b_size']) + def __str__(self) -> str: + return self._description.format(block_device_name(self.bh['b_bdev']), + self.bh['b_blocknr'], self.bh['b_size']) DecodeSyncWBBH.register() diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index 4004a6c3744..7abecb890bd 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -27,18 +27,18 @@ class Ext3Decoder(Decoder): __endio__ = 'journal_end_buffer_io_sync' _description = "{:x} buffer_head: {} journal block (jbd) on {}" - def __init__(self, bh: gdb.Value): + def __init__(self, bh: gdb.Value) -> None: super().__init__() self.bh = bh - def interpret(self): + def interpret(self) -> None: """Interprets the ext3 buffer_head to populate its attributes""" self.fstype = "journal on ext3" self.devname = block_device_name(self.bh['b_bdev']) self.offset = int(self.bh['b_blocknr']) * int(self.bh['b_size']) self.length = int(self.bh['b_size']) - def __str__(self): + def __str__(self) -> str: return self._description.format(int(self.bh), self.fstype, self.devname) Ext3Decoder.register() diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 93d9868a872..b11db07145d 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -12,7 +12,7 @@ will be required and/or returned instead. """ -from typing import Iterator +from typing import Iterator, Callable, Any from crash.subsystem.filesystem import super_fstype from crash.types.list import list_for_each_entry @@ -54,13 +54,10 @@ types = Types(['struct mount', 'struct vfsmount']) symvals = Symvals(['init_task']) -class _Mount(object): - @classmethod - def _for_each_mount_impl(cls, task): - raise NotImplementedError("_Mount.for_each_mount is unhandled on this kernel version.") +class Mount(object): + _for_each_mount: Callable[[Any, gdb.Value], Iterator[gdb.Value]] - @classmethod - def for_each_mount_nsproxy(cls, task): + def _for_each_mount_nsproxy(self, task: gdb.Value) -> Iterator[gdb.Value]: """ An implementation of for_each_mount that uses the task's nsproxy to locate the mount namespace. See :ref:`for_each_mount` @@ -70,20 +67,24 @@ def for_each_mount_nsproxy(cls, task): types.mount_type, 'mnt_list') @classmethod - def _check_task_interface(cls, symval): + def _check_task_interface(cls, symval: gdb.Value) -> None: try: nsproxy = symvals.init_task['nsproxy'] - cls._for_each_mount_impl = cls.for_each_mount_nsproxy + cls._for_each_mount = cls._for_each_mount_nsproxy except KeyError: - print("check_task_interface called but no init_task?") - pass + raise NotImplementedError("Mount.for_each_mount is unhandled on this kernel version") + + def for_each_mount(self, task: gdb.Value) -> Iterator[gdb.Value]: + return self._for_each_mount(task) + +_Mount = Mount() -def _check_mount_type(gdbtype): +def _check_mount_type(gdbtype: gdb.Type) -> None: try: - types.mount_type = gdb.lookup_type('struct mount') + types.mount_type = gdb.lookup_type('struct mount') # type: ignore except gdb.error: # Older kernels didn't separate mount from vfsmount - types.mount_type = types.vfsmount_type + types.mount_type = types.vfsmount_type # type: ignore def for_each_mount(task: gdb.Value = None) -> Iterator[gdb.Value]: """ @@ -110,7 +111,7 @@ def for_each_mount(task: gdb.Value = None) -> Iterator[gdb.Value]: """ if task is None: task = symvals.init_task - return _Mount._for_each_mount_impl(task) + return _Mount.for_each_mount(task) def mount_flags(mnt: gdb.Value, show_hidden: bool = False) -> str: """ @@ -201,18 +202,18 @@ def mount_device(mnt: gdb.Value) -> str: devname = "none" return devname -def _real_mount(vfsmnt): +def _real_mount(vfsmnt: gdb.Value) -> gdb.Value: if (vfsmnt.type == types.mount_type or vfsmnt.type == types.mount_type.pointer()): t = vfsmnt.type if t.code == gdb.TYPE_CODE_PTR: t = t.target() if t is not types.mount_type: - types.mount_type = t + types.mount_type = t # type: ignore return vfsmnt return container_of(vfsmnt, types.mount_type, 'mnt') -def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value = None): +def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value = None) -> str: """ Returns a file system path described by a mount and dentry @@ -273,4 +274,4 @@ def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value = None): return name type_cbs = TypeCallbacks([('struct vfsmount', _check_mount_type)]) -symbols_cbs = SymbolCallbacks([('init_task', _Mount._check_task_interface)]) +symbols_cbs = SymbolCallbacks([('init_task', Mount._check_task_interface)]) diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index c60b04213eb..e89aff151b6 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -5,7 +5,7 @@ XFS file systems. """ -from typing import Iterable +from typing import Iterable, Any import uuid @@ -214,11 +214,11 @@ class XFSBufDecoder(Decoder): Decodes a struct xfs_buf into human-readable form """ - def __init__(self, xfsbuf): + def __init__(self, xfsbuf: gdb.Value) -> None: super(XFSBufDecoder, self).__init__() self.xfsbuf = xfsbuf - def __str__(self): + def __str__(self) -> str: return xfs_format_xfsbuf(self.xfsbuf) class XFSBufBioDecoder(Decoder): @@ -238,19 +238,19 @@ class XFSBufBioDecoder(Decoder): __endio__ = 'xfs_buf_bio_end_io' _types = Types(['struct xfs_buf *']) - def __init__(self, bio: gdb.Value): + def __init__(self, bio: gdb.Value) -> None: super(XFSBufBioDecoder, self).__init__() self.bio = bio - def interpret(self): + def interpret(self) -> None: """Interpret the xfsbuf bio to populate its attributes""" self.xfsbuf = self.bio['bi_private'].cast(self._types.xfs_buf_p_type) self.devname = block_device_name(self.bio['bi_bdev']) - def __next__(self): + def __next__(self) -> Any: return XFSBufDecoder(self.xfsbuf) - def __str__(self): + def __str__(self) -> str: return self._description.format(self.bio, self.devname) XFSBufBioDecoder.register() @@ -268,7 +268,7 @@ class _XFS(object): _ail_head_name = None @classmethod - def _detect_ail_version(cls, gdbtype): + def _detect_ail_version(cls, gdbtype: gdb.Type) -> None: if struct_has_member(gdbtype, 'ail_head'): cls._ail_head_name = 'ail_head' else: diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 5af75bfca54..149e27bb29f 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -244,7 +244,7 @@ def inode_on_bdev(inode: gdb.Value) -> gdb.Value: else: return inode['i_sb']['s_bdev'].dereference() -def _check_types(result): +def _check_types(result: gdb.Symbol) -> None: try: if symvals.part_type.type.unqualified() != types.device_type_type: raise TypeError("part_type expected to be {} not {}" diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 1d5016afc75..62abc757965 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Union, List, Dict, Iterable +from typing import Union, List, Dict, Iterable, Type, Any from crash.infra.lookup import SymbolCallback from crash.subsystem.storage import block_device_name @@ -24,7 +24,7 @@ class Decoder(object): """ __endio__: EndIOSpecifier = None - def __init__(self): + def __init__(self, value: gdb.Value = None) -> None: self.interpreted = False def interpret(self) -> None: @@ -39,7 +39,7 @@ def interpret(self) -> None: """ pass - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: if self.interpreted: raise AttributeError(f"No such attribute `{name}'") @@ -48,7 +48,7 @@ def __getattr__(self, name): return getattr(self, name) @classmethod - def register(cls): + def register(cls) -> None: """ Registers a decoder with the storage decoder subsystem. @@ -67,7 +67,7 @@ def __str__(self) -> str: """ pass - def __next__(self): + def __next__(self) -> Any: """ For stacked storage, the object may have been generated as part of an operation on another object. e.g. a bio generated by @@ -106,11 +106,11 @@ class BadBHDecoder(Decoder): """ _description = "{:x} bh: invalid buffer_head" - def __init__(self, bh: gdb.Value): + def __init__(self, bh: gdb.Value) -> None: super().__init__() self.bh = bh - def __str__(self): + def __str__(self) -> str: return self._description.format(int(self.bh)) class GenericBHDecoder(Decoder): @@ -132,20 +132,20 @@ class GenericBHDecoder(Decoder): _description = "{:x} buffer_head: for dev {}, block {}, size {} (undecoded)" - def __init__(self, bh: gdb.Value): + def __init__(self, bh: gdb.Value) -> None: super().__init__() self.bh = bh - def interpret(self): + def interpret(self) -> None: self.block_device = block_device_name(self.bh['b_bdev']) - def __str__(self): + def __str__(self) -> str: return self._description.format(int(self.bh), self.block_device, self.bh['b_blocknr'], self.bh['b_size']) -_decoders: Dict[int, Decoder] = dict() +_decoders: Dict[int, Type[Decoder]] = dict() -def register_decoder(endio: EndIOSpecifier, decoder: Decoder) -> None: +def register_decoder(endio: EndIOSpecifier, decoder: Type[Decoder]) -> None: """ Registers a bio/buffer_head decoder with the storage subsystem. @@ -220,11 +220,11 @@ class BadBioDecoder(Decoder): """ _description = "{:x} bio: invalid bio" - def __init__(self, bio: gdb.Value): + def __init__(self, bio: gdb.Value) -> None: super().__init__() self.bio = bio - def __str__(self): + def __str__(self) -> str: return self._description.format(int(self.bio)) class GenericBioDecoder(Decoder): @@ -239,11 +239,11 @@ class GenericBioDecoder(Decoder): ``struct bio``. """ _description = "{:x} bio: undecoded bio on {} ({})" - def __init__(self, bio: gdb.Value): + def __init__(self, bio: gdb.Value) -> None: super().__init__() self.bio = bio - def __str__(self): + def __str__(self) -> str: return self._description.format(int(self.bio), block_device_name(self.bio['bi_bdev']), self.bio['bi_end_io']) diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index 59d9fa0a93e..633f17c5bd2 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Callable, Any + from crash.util import container_of from crash.util.symbols import Types from crash.subsystem.storage import block_device_name @@ -23,9 +25,9 @@ class ClonedBioReqDecoder(Decoder): __endio__ = 'end_clone_bio' _description = '{:x} bio: Request-based Device Mapper on {}' - _get_clone_bio_rq_info = None + _get_clone_bio_rq_info: Callable[[Any, gdb.Value], gdb.Value] - def __init__(self, bio: gdb.Value): + def __init__(self, bio: gdb.Value) -> None: super().__init__() self.bio = bio if self._get_clone_bio_rq_info is None: @@ -35,23 +37,23 @@ def __init__(self, bio: gdb.Value): getter = self._get_clone_bio_rq_info_old self._get_clone_bio_rq_info = getter - def interpret(self): + def interpret(self) -> None: """Interprets the request-based device mapper bio to populate its attributes""" self.info = self._get_clone_bio_rq_info(self.bio) self.tio = self.info['tio'] - def __str__(self): - self._description.format(int(self.bio), - block_device_name(self.bio['bi_bdev'])) + def __str__(self) -> str: + return self._description.format(int(self.bio), + block_device_name(self.bio['bi_bdev'])) - def __next__(self): + def __next__(self) -> Decoder: return decode_bio(self.info['orig']) - def _get_clone_bio_rq_info_old(self, bio): + def _get_clone_bio_rq_info_old(self, bio: gdb.Value) -> gdb.Value: return bio['bi_private'].cast(self._types.dm_rq_clone_bio_info_p_type) - def _get_clone_bio_rq_info_3_7(self, bio): + def _get_clone_bio_rq_info_3_7(self, bio: gdb.Value) -> gdb.Value: return container_of(bio, self._types.dm_rq_clone_bio_info_p_type, 'clone') ClonedBioReqDecoder.register() @@ -78,11 +80,11 @@ class ClonedBioDecoder(Decoder): value is of type ``struct dm_target_io``. """ _types = Types(['struct dm_target_io *']) - _get_clone_bio_tio = None + _get_clone_bio_tio: Callable[[Any, gdb.Value], gdb.Value] __endio__ = 'clone_endio' _description = "{:x} bio: device mapper clone: {}[{}] -> {}[{}]" - def __init__(self, bio: gdb.Value): + def __init__(self, bio: gdb.Value) -> None: super().__init__() self.bio = bio @@ -93,26 +95,26 @@ def __init__(self, bio: gdb.Value): getter = self._get_clone_bio_tio_old self._get_clone_bio_tio = getter - def interpret(self): + def interpret(self) -> None: """Interprets the cloned device mapper bio to populate its attributes""" self.tio = self._get_clone_bio_tio(self.bio) self.next_bio = self.tio['io']['bio'] - def __str__(self): + def __str__(self) -> str: return self._description.format( int(self.bio), block_device_name(self.bio['bi_bdev']), int(self.bio['bi_sector']), block_device_name(self.next_bio['bi_bdev']), int(self.next_bio['bi_sector'])) - def __next__(self): + def __next__(self) -> Decoder: return decode_bio(self.next_bio) - def _get_clone_bio_tio_old(self, bio): + def _get_clone_bio_tio_old(self, bio: gdb.Value) -> gdb.Value: return bio['bi_private'].cast(self._types.dm_target_io_p_type) - def _get_clone_bio_tio_3_15(self, bio): + def _get_clone_bio_tio_3_15(self, bio: gdb.Value) -> gdb.Value: return container_of(bio['bi_private'], self._types.dm_clone_bio_info_p_type, 'clone') diff --git a/crash/types/classdev.py b/crash/types/classdev.py index 2eaa5b039af..e4cf4834f46 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -19,7 +19,7 @@ class ClassdevState(object): #v5.1-rc1 moved knode_class from struct device to struct device_private @classmethod - def _setup_iterator_type(cls, gdbtype): + def _setup_iterator_type(cls, gdbtype: gdb.Type) -> None: if struct_has_member(gdbtype, 'knode_class'): cls._class_is_private = False diff --git a/crash/types/cpu.py b/crash/types/cpu.py index 61ba5ea2485..776a7feab7c 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -28,7 +28,7 @@ class TypesCPUClass(object): _cpu_online_mask: gdb.Value = None _cpu_possible_mask: gdb.Value = None - def __init__(self): + def __init__(self) -> None: raise NotImplementedError("This class is not meant to be instantiated") @classmethod diff --git a/crash/types/node.py b/crash/types/node.py index d50c7ce2cc4..13fb8e3aa2a 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -39,7 +39,7 @@ class Node(object): A wrapper around the Linux kernel 'struct node' structure """ @classmethod - def from_nid(cls: Type[NodeType], nid: int) -> NodeType: + def from_nid(cls: Type[NodeType], nid: int) -> 'Node': """ Obtain a Node using the NUMA Node ID (nid) @@ -72,7 +72,7 @@ def for_each_zone(self) -> Iterable[crash.types.zone.Zone]: yield crash.types.zone.Zone(zone, zid) ptr += types.zone_type.sizeof - def __init__(self, obj: gdb.Value): + def __init__(self, obj: gdb.Value) -> None: """ Initialize a Node using the gdb.Value for the struct node @@ -95,7 +95,7 @@ class NodeStates(object): nids_possible: List[int] = list() @classmethod - def _setup_node_states(cls, node_states_sym): + def _setup_node_states(cls, node_states_sym: gdb.Symbol) -> None: node_states = node_states_sym.value() enum_node_states = gdb.lookup_type("enum node_states") @@ -139,7 +139,7 @@ def for_each_online_nid(self) -> Iterable[int]: _state = NodeStates() -def for_each_nid(): +def for_each_nid() -> Iterable[int]: """ Iterate over each NUMA Node ID @@ -149,7 +149,7 @@ def for_each_nid(): for nid in _state.for_each_nid(): yield nid -def for_each_online_nid(): +def for_each_online_nid() -> Iterable[int]: """ Iterate over each online NUMA Node ID @@ -179,12 +179,12 @@ def for_each_online_node() -> Iterable[Node]: for nid in for_each_online_nid(): yield Node.from_nid(nid) -def for_each_zone(): +def for_each_zone() -> Iterable[crash.types.zone.Zone]: for node in for_each_node(): for zone in node.for_each_zone(): yield zone -def for_each_populated_zone(): +def for_each_populated_zone() -> Iterable[crash.types.zone.Zone]: #TODO: some filter thing? for zone in for_each_zone(): if zone.is_populated(): diff --git a/crash/types/page.py b/crash/types/page.py index aff8d5f5761..7bb55175c2f 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Dict +from typing import Dict, Union, TypeVar, Iterable, Callable from math import log, ceil @@ -19,34 +19,42 @@ 'enum zone_type', 'struct mem_section']) symvals = Symvals(['mem_section']) +PageType = TypeVar('PageType', bound='Page') + class Page(object): slab_cache_name = None slab_page_name = None compound_head_name = None vmemmap_base = 0xffffea0000000000 - vmemmap = None + vmemmap: gdb.Value = None directmap_base = 0xffff880000000000 pageflags: Dict[str, int] = dict() - PG_tail = None - PG_slab = None - PG_lru = None + PG_tail = -1 + PG_slab = -1 + PG_lru = -1 setup_page_type_done = False setup_pageflags_done = False setup_pageflags_finish_done = False - ZONES_WIDTH = None - NODES_WIDTH = None + ZONES_WIDTH = -1 + NODES_WIDTH = -1 # TODO have arch provide this? - BITS_PER_LONG = None + BITS_PER_LONG = -1 PAGE_SIZE = 4096 + PAGE_SHIFT = 12 sparsemem = False + SECTION_SIZE_BITS = -1 # Depends on sparsemem=True + SECTIONS_PER_ROOT = -1 # Depends on SPARSEMEM_EXTREME + + _is_tail: Callable[['Page'], bool] + _compound_head: Callable[['Page'], int] @classmethod - def setup_page_type(cls, gdbtype): + def setup_page_type(cls, gdbtype: gdb.Type) -> None: # TODO: should check config, but that failed to work on ppc64, hardcode # 64k for now if gdb.current_target().arch.name() == "powerpc:common64": @@ -56,9 +64,6 @@ def setup_page_type(cls, gdbtype): cls.sparsemem = True cls.SECTION_SIZE_BITS = 24 - else: - cls.PAGE_SHIFT = 12 - cls.PAGE_SIZE = 4096 cls.PAGE_SIZE = 1 << cls.PAGE_SHIFT @@ -72,12 +77,12 @@ def setup_page_type(cls, gdbtype): cls.setup_pageflags_finish() @classmethod - def setup_mem_section(cls, gdbtype): + def setup_mem_section(cls, gdbtype: gdb.Type) -> None: # TODO assumes SPARSEMEM_EXTREME cls.SECTIONS_PER_ROOT = cls.PAGE_SIZE / gdbtype.sizeof @classmethod - def pfn_to_page(cls, pfn): + def pfn_to_page(cls, pfn: int) -> gdb.Value: if cls.sparsemem: section_nr = pfn >> (cls.SECTION_SIZE_BITS - cls.PAGE_SHIFT) root_idx = section_nr / cls.SECTIONS_PER_ROOT @@ -90,7 +95,7 @@ def pfn_to_page(cls, pfn): return cls.vmemmap[pfn] @classmethod - def setup_pageflags(cls, gdbtype): + def setup_pageflags(cls, gdbtype: gdb.Type) -> None: for field in gdbtype.fields(): cls.pageflags[field.name] = field.enumval @@ -102,7 +107,7 @@ def setup_pageflags(cls, gdbtype): cls.PG_lru = 1 << cls.pageflags['PG_lru'] @classmethod - def setup_vmemmap_base(cls, symbol): + def setup_vmemmap_base(cls, symbol: gdb.Symbol) -> None: cls.vmemmap_base = int(symbol.value()) # setup_page_type() was first and used the hardcoded initial value, # we have to update @@ -110,16 +115,16 @@ def setup_vmemmap_base(cls, symbol): cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(types.page_type.pointer()) @classmethod - def setup_directmap_base(cls, symbol): + def setup_directmap_base(cls, symbol: gdb.Symbol) -> None: cls.directmap_base = int(symbol.value()) @classmethod - def setup_zone_type(cls, gdbtype): + def setup_zone_type(cls, gdbtype: gdb.Type) -> None: max_nr_zones = gdbtype['__MAX_NR_ZONES'].enumval cls.ZONES_WIDTH = int(ceil(log(max_nr_zones))) @classmethod - def setup_nodes_width(cls, symbol): + def setup_nodes_width(cls, symbol: Union[gdb.Symbol, gdb.MinSymbol]) -> None: # TODO: handle kernels with no space for nodes in page flags try: cls.NODES_WIDTH = int(config['NODES_SHIFT']) @@ -132,81 +137,90 @@ def setup_nodes_width(cls, symbol): cls.BITS_PER_LONG = types.unsigned_long_type.sizeof * 8 @classmethod - def setup_pageflags_finish(cls): + def setup_pageflags_finish(cls) -> None: cls.setup_pageflags_finish_done = True + cls._is_tail = cls.__is_tail_compound_head_bit + cls._compound_head = cls.__compound_head_uses_low_bit + if 'PG_tail' in cls.pageflags.keys(): cls.PG_tail = 1 << cls.pageflags['PG_tail'] - cls.is_tail = cls.__is_tail_flag + cls._is_tail = cls.__is_tail_flag if cls.compound_head_name == 'first_page': - cls.__compound_head = cls.__compound_head_first_page - if cls.PG_tail is None: + cls._compound_head = cls.__compound_head_first_page + if cls.PG_tail == -1: cls.PG_tail = 1 << cls.pageflags['PG_compound'] | 1 << cls.pageflags['PG_reclaim'] - cls.is_tail = cls.__is_tail_flagcombo + cls._is_tail = cls.__is_tail_flagcombo @classmethod - def from_obj(cls, page): + def from_obj(cls, page: gdb.Value) -> 'Page': pfn = (int(page.address) - Page.vmemmap_base) / types.page_type.sizeof return Page(page, pfn) @classmethod - def from_page_addr(cls, addr): + def from_page_addr(cls, addr: int) -> 'Page': page_ptr = gdb.Value(addr).cast(types.page_type.pointer()) return cls.from_obj(page_ptr.dereference()) - def __is_tail_flagcombo(self): + def __init__(self, obj: gdb.Value, pfn: int) -> None: + self.gdb_obj = obj + self.pfn = pfn + self.flags = int(obj["flags"]) + + def __is_tail_flagcombo(self) -> bool: return bool((self.flags & self.PG_tail) == self.PG_tail) - def __is_tail_flag(self): + def __is_tail_flag(self) -> bool: return bool(self.flags & self.PG_tail) - def is_tail(self): + def __is_tail_compound_head_bit(self) -> bool: return bool(self.gdb_obj['compound_head'] & 1) - def is_slab(self): + def is_tail(self) -> bool: + return self._is_tail() + + def is_slab(self) -> bool: return bool(self.flags & self.PG_slab) - def is_lru(self): + def is_lru(self) -> bool: return bool(self.flags & self.PG_lru) - def is_anon(self): + def is_anon(self) -> bool: mapping = int(self.gdb_obj["mapping"]) return (mapping & PAGE_MAPPING_ANON) != 0 - def get_slab_cache(self): + def get_slab_cache(self) -> gdb.Value: if Page.slab_cache_name == "lru": return self.gdb_obj["lru"]["next"] return self.gdb_obj[Page.slab_cache_name] - def get_slab_page(self): + def get_slab_page(self) -> gdb.Value: if Page.slab_page_name == "lru": return self.gdb_obj["lru"]["prev"] return self.gdb_obj[Page.slab_page_name] - def get_nid(self): + def get_nid(self) -> int: return self.flags >> (self.BITS_PER_LONG - self.NODES_WIDTH) - def get_zid(self): + def get_zid(self) -> int: shift = self.BITS_PER_LONG - self.NODES_WIDTH - self.ZONES_WIDTH zid = self.flags >> shift & ((1 << self.ZONES_WIDTH) - 1) return zid - def __compound_head_first_page(self): + def __compound_head_first_page(self) -> int: return int(self.gdb_obj['first_page']) - def __compound_head(self): + def __compound_head_uses_low_bit(self) -> int: return int(self.gdb_obj['compound_head']) - 1 - def compound_head(self): + def __compound_head(self) -> int: + return self._compound_head() + + def compound_head(self) -> 'Page': if not self.is_tail(): return self - return Page.from_page_addr(self.__compound_head()) - - def __init__(self, obj, pfn): - self.gdb_obj = obj - self.pfn = pfn - self.flags = int(obj["flags"]) + return self.__class__.from_page_addr(self.__compound_head()) type_cbs = TypeCallbacks([('struct page', Page.setup_page_type), ('enum pageflags', Page.setup_pageflags), @@ -222,18 +236,18 @@ def __init__(self, obj, pfn): Page.setup_directmap_base)]) -def pfn_to_page(pfn): +def pfn_to_page(pfn: int) -> 'Page': return Page(Page.pfn_to_page(pfn), pfn) -def page_from_addr(addr): - pfn = (addr - Page.directmap_base) / Page.PAGE_SIZE +def page_from_addr(addr: int) -> 'Page': + pfn = (addr - Page.directmap_base) // Page.PAGE_SIZE return pfn_to_page(pfn) -def page_from_gdb_obj(gdb_obj): +def page_from_gdb_obj(gdb_obj: gdb.Value) -> 'Page': pfn = (int(gdb_obj.address) - Page.vmemmap_base) / types.page_type.sizeof return Page(gdb_obj, pfn) -def for_each_page(): +def for_each_page() -> Iterable['Page']: # TODO works only on x86? max_pfn = int(gdb.lookup_global_symbol("max_pfn").value()) for pfn in range(max_pfn): diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 0e4dd2745a9..56a43d38e8f 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -14,10 +14,12 @@ import gdb +SymbolOrValue = Union[gdb.Value, gdb.Symbol] + class PerCPUError(TypeError): """The passed object does not respond to a percpu pointer.""" _fmt = "{} does not correspond to a percpu pointer." - def __init__(self, var): + def __init__(self, var: SymbolOrValue) -> None: super().__init__(self._fmt.format(var)) types = Types(['void *', 'char *', 'struct pcpu_chunk', @@ -26,8 +28,6 @@ def __init__(self, var): 'pcpu_nr_slots', 'pcpu_group_offsets']) msymvals = MinimalSymvals(['__per_cpu_start', '__per_cpu_end']) -SymbolOrValue = Union[gdb.Value, gdb.Symbol] - class PerCPUState(object): """ Per-cpus come in a few forms: @@ -223,7 +223,7 @@ def is_static_percpu_var(self, addr: int) -> bool: # treats 0 as a special value indicating it should just be after # the previous section. It's possible to override this while # loading debuginfo but not when debuginfo is embedded. - def _relocated_offset(self, var): + def _relocated_offset(self, var: gdb.Value) -> int: addr = int(var) start = msymvals['__per_cpu_start'] size = self._static_ranges[start] @@ -271,7 +271,7 @@ def is_percpu_var(self, var: SymbolOrValue) -> bool: return True return False - def _resolve_percpu_var(self, var): + def _resolve_percpu_var(self, var: SymbolOrValue) -> gdb.Value: orig_var = var if isinstance(var, gdb.Symbol) or isinstance(var, gdb.MinSymbol): var = var.value() diff --git a/crash/types/slab.py b/crash/types/slab.py index fe674880675..3ed32ad8492 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -1,6 +1,9 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import TypeVar, Union, Tuple, Iterable, Dict, Optional, Set +from typing import ValuesView + import sys import traceback @@ -8,7 +11,7 @@ from crash.util.symbols import Types, TypeCallbacks, SymbolCallbacks from crash.types.percpu import get_percpu_var from crash.types.list import list_for_each, list_for_each_entry -from crash.types.page import page_from_gdb_obj, page_from_addr +from crash.types.page import page_from_gdb_obj, page_from_addr, Page from crash.types.node import for_each_nid from crash.types.cpu import for_each_online_cpu from crash.types.node import numa_node_id @@ -28,61 +31,90 @@ BUFCTL_END = ~0 & 0xffffffff -def col_error(msg): +def col_error(msg: str) -> str: return "\033[1;31;40m {}\033[0;37;40m ".format(msg) -def col_bold(msg): +def col_bold(msg: str) -> str: return "\033[1;37;40m {}\033[0;37;40m ".format(msg) types = Types(['kmem_cache', 'struct kmem_cache']) +SlabType = TypeVar('SlabType', bound='Slab') +KmemCacheType = TypeVar('KmemCacheType', bound='KmemCache') + class Slab(object): - slab_list_head = None - page_slab = None - real_slab_type = None - bufctl_type = None + slab_list_head: str = 'list' + page_slab: bool = False + real_slab_type: gdb.Type = None + bufctl_type: gdb.Type = None @classmethod - def check_page_type(cls, gdbtype): - if cls.page_slab is None: + def check_page_type(cls, gdbtype: gdb.Type) -> None: + if cls.page_slab is False: cls.page_slab = True cls.real_slab_type = gdbtype cls.slab_list_head = 'lru' @classmethod - def check_slab_type(cls, gdbtype): + def check_slab_type(cls, gdbtype: gdb.Type) -> None: cls.page_slab = False cls.real_slab_type = gdbtype cls.slab_list_head = 'list' @classmethod - def check_bufctl_type(cls, gdbtype): + def check_bufctl_type(cls, gdbtype: gdb.Type) -> None: cls.bufctl_type = gdbtype @classmethod - def from_addr(cls, slab_addr, kmem_cache): + def from_addr(cls, slab_addr: int, + kmem_cache: Union[int, 'KmemCache']) -> 'Slab': if not isinstance(kmem_cache, KmemCache): kmem_cache = kmem_cache_from_addr(kmem_cache) slab_struct = gdb.Value(slab_addr).cast(cls.real_slab_type.pointer()).dereference() - return Slab(slab_struct, kmem_cache) + return cls(slab_struct, kmem_cache) @classmethod - def from_page(cls, page): + def from_page(cls, page: Page) -> 'Slab': kmem_cache_addr = int(page.get_slab_cache()) kmem_cache = kmem_cache_from_addr(kmem_cache_addr) + if kmem_cache is None: + raise RuntimeError("No kmem cache found for page") if cls.page_slab: - return Slab(page.gdb_obj, kmem_cache) + return cls(page.gdb_obj, kmem_cache) else: slab_addr = int(page.get_slab_page()) - return Slab.from_addr(slab_addr, kmem_cache) + return cls.from_addr(slab_addr, kmem_cache) @classmethod - def from_list_head(cls, list_head, kmem_cache): + def from_list_head(cls, list_head: gdb.Value, + kmem_cache: 'KmemCache') -> 'Slab': gdb_obj = container_of(list_head, cls.real_slab_type, cls.slab_list_head) - return Slab(gdb_obj, kmem_cache) + return cls(gdb_obj, kmem_cache) + + def __init__(self, gdb_obj: gdb.Value, kmem_cache: 'KmemCache', + error: bool = False) -> None: + self.error = error + self.gdb_obj = gdb_obj + self.kmem_cache = kmem_cache + self.free: Set[int] = set() + self.misplaced_list: Optional[str] + self.misplaced_error: Optional[str] + + self.misplaced_list = None + self.misplaced_error = None + + if error: + return - def __add_free_obj_by_idx(self, idx): + if self.page_slab: + self.inuse = int(gdb_obj["active"]) + self.page = page_from_gdb_obj(gdb_obj) + else: + self.inuse = int(gdb_obj["inuse"]) + self.s_mem = int(gdb_obj["s_mem"]) + + def __add_free_obj_by_idx(self, idx: int) -> bool: objs_per_slab = self.kmem_cache.objs_per_slab bufsize = self.kmem_cache.buffer_size @@ -100,11 +132,10 @@ def __add_free_obj_by_idx(self, idx): return True - def __populate_free(self): + def __populate_free(self) -> None: if self.free: return - self.free = set() bufsize = self.kmem_cache.buffer_size objs_per_slab = self.kmem_cache.objs_per_slab @@ -134,20 +165,21 @@ def __populate_free(self): f = int(bufctl[f]) - def find_obj(self, addr): + def find_obj(self, addr: int) -> Union[int, None]: bufsize = self.kmem_cache.buffer_size objs_per_slab = self.kmem_cache.objs_per_slab if int(addr) < self.s_mem: return None - idx = (int(addr) - self.s_mem) / bufsize + idx = (int(addr) - self.s_mem) // bufsize if idx >= objs_per_slab: return None return self.s_mem + (idx * bufsize) - def contains_obj(self, addr): + def contains_obj(self, + addr: int) -> Tuple[bool, int, Union[gdb.Value, None]]: obj_addr = self.find_obj(addr) if not obj_addr: @@ -164,7 +196,7 @@ def contains_obj(self, addr): return (True, int(obj_addr), None) - def __error(self, msg, misplaced: bool = False): + def __error(self, msg: str, misplaced: bool = False) -> None: msg = col_error("cache %s slab %x%s" % (self.kmem_cache.name, int(self.gdb_obj.address), msg)) self.error = True @@ -173,26 +205,26 @@ def __error(self, msg, misplaced: bool = False): else: print(msg) - def __free_error(self, list_name): + def __free_error(self, list_name: str) -> None: self.misplaced_list = list_name self.__error(": is on list %s, but has %d of %d objects allocated" % (list_name, self.inuse, self.kmem_cache.objs_per_slab), misplaced=True) - def get_objects(self): + def get_objects(self) -> Iterable[int]: bufsize = self.kmem_cache.buffer_size obj = self.s_mem for i in range(self.kmem_cache.objs_per_slab): yield obj obj += bufsize - def get_allocated_objects(self): + def get_allocated_objects(self) -> Iterable[int]: for obj in self.get_objects(): c = self.contains_obj(obj) if c[0]: yield obj - def check(self, slabtype, nid): + def check(self, slabtype: int, nid: int) -> int: self.__populate_free() num_free = len(self.free) max_free = self.kmem_cache.objs_per_slab @@ -280,34 +312,16 @@ def check(self, slabtype, nid): (obj, slab_addr)) return num_free - def __init__(self, gdb_obj, kmem_cache, error=False): - self.error = error - self.gdb_obj = gdb_obj - self.kmem_cache = kmem_cache - self.free = None - self.misplaced_list = None - self.misplaced_error = None - - if error: - return - - if self.page_slab: - self.inuse = int(gdb_obj["active"]) - self.page = page_from_gdb_obj(gdb_obj) - else: - self.inuse = int(gdb_obj["inuse"]) - self.s_mem = int(gdb_obj["s_mem"]) - class KmemCache(object): buffer_size_name = None nodelists_name = None percpu_name = None percpu_cache = None - head_name = None + head_name = "list" alien_cache_type_exists = False @classmethod - def check_kmem_cache_type(cls, gdbtype): + def check_kmem_cache_type(cls, gdbtype: gdb.Type) -> None: cls.buffer_size_name = find_member_variant(gdbtype, ['buffer_size', 'size']) cls.nodelists_name = find_member_variant(gdbtype, ['nodelists', 'node']) cls.percpu_name = find_member_variant(gdbtype, ['cpu_cache', 'array']) @@ -315,13 +329,29 @@ def check_kmem_cache_type(cls, gdbtype): cls.head_name = find_member_variant(gdbtype, ['next', 'list']) @classmethod - def setup_alien_cache_type(cls, gdbtype): + def setup_alien_cache_type(cls, gdbtype: gdb.Type) -> None: cls.alien_cache_type_exists = True - def __get_nodelist(self, node): + def __init__(self, name: str, gdb_obj: gdb.Value) -> None: + self.name = name + self.gdb_obj = gdb_obj + + self.objs_per_slab = int(gdb_obj["num"]) + self.buffer_size = int(gdb_obj[KmemCache.buffer_size_name]) + + if int(gdb_obj["flags"]) & 0x80000000: + self.off_slab = True + self.off_slab_cache: Optional[str] + self.off_slab_cache = None + else: + self.off_slab = False + + self.array_caches: Dict[int, Dict] = dict() + + def __get_nodelist(self, node: int) -> gdb.Value: return self.gdb_obj[KmemCache.nodelists_name][node] - def __get_nodelists(self): + def __get_nodelists(self) -> Iterable[Tuple[int, gdb.Value]]: for nid in for_each_nid(): node = self.__get_nodelist(nid) if int(node) == 0: @@ -329,27 +359,15 @@ def __get_nodelists(self): yield (nid, node.dereference()) @staticmethod - def all_find_obj(addr): + def all_find_obj(addr: int) -> Union[None, Tuple[bool, int, + Union[gdb.Value, None]]]: slab = slab_from_obj_addr(addr) if not slab: return None return slab.contains_obj(addr) - def __init__(self, name, gdb_obj): - self.name = name - self.gdb_obj = gdb_obj - self.array_caches = None - - self.objs_per_slab = int(gdb_obj["num"]) - self.buffer_size = int(gdb_obj[KmemCache.buffer_size_name]) - - if int(gdb_obj["flags"]) & 0x80000000: - self.off_slab = True - self.off_slab_cache = None - else: - self.off_slab = False - - def __fill_array_cache(self, acache, ac_type, nid_src, nid_tgt): + def __fill_array_cache(self, acache: gdb.Value, ac_type: str, + nid_src: int, nid_tgt: int) -> None: avail = int(acache["avail"]) limit = int(acache["limit"]) @@ -380,7 +398,7 @@ def __fill_array_cache(self, acache, ac_type, nid_src, nid_tgt): print(col_error("Object {:#x} in cache {} is on wrong nid {} instead of {}" .format(ptr, cache_dict, obj_nid, nid_tgt))) - def __fill_alien_caches(self, node, nid_src): + def __fill_alien_caches(self, node: gdb.Value, nid_src: int) -> None: alien_cache = node["alien"] # TODO check that this only happens for single-node systems? @@ -404,7 +422,7 @@ def __fill_alien_caches(self, node, nid_src): self.__fill_array_cache(array, AC_ALIEN, nid_src, nid) - def __fill_percpu_caches(self): + def __fill_percpu_caches(self) -> None: cpu_cache = self.gdb_obj[KmemCache.percpu_name] for cpu in for_each_online_cpu(): @@ -415,7 +433,7 @@ def __fill_percpu_caches(self): self.__fill_array_cache(array, AC_PERCPU, -1, cpu) - def __fill_all_array_caches(self): + def __fill_all_array_caches(self) -> None: self.array_caches = dict() self.__fill_percpu_caches() @@ -428,25 +446,28 @@ def __fill_all_array_caches(self): self.__fill_alien_caches(node, nid) - def get_array_caches(self): - if self.array_caches is None: + def get_array_caches(self) -> Dict[int, Dict]: + if not self.array_caches: self.__fill_all_array_caches() return self.array_caches - def __get_allocated_objects(self, node, slabtype): + def __get_allocated_objects(self, node: gdb.Value, + slabtype: int) -> Iterable[int]: for slab in self.get_slabs_of_type(node, slabtype): for obj in slab.get_allocated_objects(): yield obj - def get_allocated_objects(self): + def get_allocated_objects(self) -> Iterable[int]: for (nid, node) in self.__get_nodelists(): for obj in self.__get_allocated_objects(node, slab_partial): yield obj for obj in self.__get_allocated_objects(node, slab_full): yield obj - def get_slabs_of_type(self, node, slabtype, reverse=False, exact_cycles=False): + def get_slabs_of_type(self, node: gdb.Value, slabtype: int, + reverse: bool = False, + exact_cycles: bool = False) -> Iterable[Slab]: wrong_list_nodes = dict() for stype in range(3): if stype != slabtype: @@ -471,7 +492,8 @@ def get_slabs_of_type(self, node, slabtype, reverse=False, exact_cycles=False): yield slab - def __check_slab(self, slab, slabtype, nid, errors): + def __check_slab(self, slab: Slab, slabtype: int, nid: int, + errors: Dict) -> int: addr = int(slab.gdb_obj.address) free = 0 @@ -507,7 +529,8 @@ def __check_slab(self, slab, slabtype, nid, errors): return free - def ___check_slabs(self, node, slabtype, nid, reverse=False): + def ___check_slabs(self, node: gdb.Value, slabtype: int, nid: int, + reverse: bool = False) -> Tuple[bool, int, int]: slabs = 0 free = 0 check_ok = True @@ -520,7 +543,8 @@ def ___check_slabs(self, node, slabtype, nid, reverse=False): 'num_misplaced': 0} try: - for slab in self.get_slabs_of_type(node, slabtype, reverse, exact_cycles=True): + for slab in self.get_slabs_of_type(node, slabtype, reverse, + exact_cycles=True): try: free += self.__check_slab(slab, slabtype, nid, errors) except Exception as e: @@ -534,18 +558,20 @@ def ___check_slabs(self, node, slabtype, nid, reverse=False): .format(slab_list_name[slabtype], e))) check_ok = False - if errors['num_ok'] > 0: + count = errors['num_ok'] + if count: print("{} slab objects were ok between {:#x} and {:#x}" .format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) - if errors['num_misplaced'] > 0: + count = errors['num_misplaced'] + if count: print(col_error("{} slab objects were misplaced, printing the last:" .format(errors['num_misplaced']))) print(errors['last_misplaced']) return (check_ok, slabs, free) - def __check_slabs(self, node, slabtype, nid): + def __check_slabs(self, node: gdb.Value, slabtype: int, nid: int) -> int: slab_list = node[slab_list_fullname[slabtype]] @@ -573,7 +599,7 @@ def __check_slabs(self, node, slabtype, nid): return free - def check_array_caches(self): + def check_array_caches(self) -> None: acs = self.get_array_caches() for ac_ptr in acs.keys(): ac_obj_slab = slab_from_obj_addr(ac_ptr) @@ -593,7 +619,7 @@ def check_array_caches(self): .format(ac_ptr, acs[ac_ptr], ac_obj_obj[0], ac_obj_obj[1], ac_obj_obj[2])) - def check_all(self): + def check_all(self) -> None: for (nid, node) in self.__get_nodelists(): try: # This is version and architecture specific @@ -616,16 +642,10 @@ class KmemCacheNotFound(RuntimeError): """The specified kmem_cache could not be found.""" pass -kmem_caches = None -kmem_caches_by_addr = None - -def setup_slab_caches(slab_caches): - global kmem_caches - global kmem_caches_by_addr - - kmem_caches = dict() - kmem_caches_by_addr = dict() +kmem_caches: Dict[str, KmemCache] = dict() +kmem_caches_by_addr: Dict[int, KmemCache] = dict() +def setup_slab_caches(slab_caches: gdb.Symbol) -> None: list_caches = slab_caches.value() for cache in list_for_each_entry(list_caches, @@ -637,22 +657,22 @@ def setup_slab_caches(slab_caches): kmem_caches[name] = kmem_cache kmem_caches_by_addr[int(cache.address)] = kmem_cache -def kmem_cache_from_addr(addr): +def kmem_cache_from_addr(addr: int) -> KmemCache: try: return kmem_caches_by_addr[addr] except KeyError: raise KmemCacheNotFound(f"No kmem cache found for {addr}.") -def kmem_cache_from_name(name): +def kmem_cache_from_name(name: str) -> KmemCache: try: return kmem_caches[name] except KeyError: raise KmemCacheNotFound(f"No kmem cache found for {name}.") -def kmem_cache_get_all(): +def kmem_cache_get_all() -> ValuesView[KmemCache]: return kmem_caches.values() -def slab_from_obj_addr(addr): +def slab_from_obj_addr(addr: int) -> Union[Slab, None]: page = page_from_addr(addr).compound_head() if not page.is_slab(): return None diff --git a/crash/types/task.py b/crash/types/task.py index b35f98a034f..81c8fd6923e 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Iterator, Callable, Dict +from typing import Iterator, Callable, Dict, List from crash.exceptions import InvalidArgumentError, ArgumentTypeError from crash.exceptions import UnexpectedGDBTypeError @@ -46,21 +46,22 @@ class TaskStateFlags(object): TASK_WAKING: int = TASK_FLAG_UNINITIALIZED TASK_PARKED: int = TASK_FLAG_UNINITIALIZED __TASK_IDLE: int = TASK_FLAG_UNINITIALIZED + TASK_EXCLUSIVE: int = TASK_FLAG_UNINITIALIZED TASK_NOLOAD: int = TASK_FLAG_UNINITIALIZED TASK_NEW: int = TASK_FLAG_UNINITIALIZED TASK_IDLE: int = TASK_FLAG_UNINITIALIZED - def __init__(self): + def __init__(self) -> None: raise NotImplementedError("This class is not meant to be instantiated") @classmethod - def has_flag(cls, flagname): + def has_flag(cls, flagname: str) -> bool: v = getattr(cls, flagname) return v != cls.TASK_FLAG_UNINITIALIZED @classmethod - def _task_state_flags_callback(cls, symbol): + def _task_state_flags_callback(cls, symbol: gdb.Symbol) -> None: count = array_size(symvals.task_state_array) bit = 0 @@ -144,7 +145,7 @@ def _task_state_flags_callback(cls, symbol): cls._check_state_bits() @classmethod - def _check_state_bits(cls): + def _check_state_bits(cls) -> None: required = [ 'TASK_RUNNING', 'TASK_INTERRUPTIBLE', @@ -204,9 +205,14 @@ class LinuxTask(object): """ _valid = False _task_state_has_exit_state = None - anon_file_rss_fields = list() + _anon_file_rss_fields: List[str] = list() - def __init__(self, task_struct: gdb.Value): + # Version-specific hooks -- these will be None here but we'll raise a + # NotImplementedError if any of them aren't found. + _get_rss: Callable[['LinuxTask'], int] + _get_last_run: Callable[['LinuxTask'], int] + + def __init__(self, task_struct: gdb.Value) -> None: self._init_task_types(task_struct) if not isinstance(task_struct, gdb.Value): @@ -232,7 +238,7 @@ def __init__(self, task_struct: gdb.Value): self.pgd_addr = 0 @classmethod - def _init_task_types(cls, task): + def _init_task_types(cls, task: gdb.Value) -> None: if not cls._valid: t = types.task_struct_type if task.type != t: @@ -455,7 +461,7 @@ def task_address(self) -> int: """ return int(self.task_struct.address) - def is_kernel_task(self): + def is_kernel_task(self) -> bool: if self.task_struct['pid'] == 0: return True @@ -471,7 +477,7 @@ def is_kernel_task(self): return False @classmethod - def set_get_stack_pointer(cls, fn: Callable[[gdb.Value], int]): + def set_get_stack_pointer(cls, fn: Callable[[gdb.Value], int]) -> None: """ Set the stack pointer callback for this architecture @@ -502,13 +508,13 @@ def get_stack_pointer(self) -> int: return int(fn(self.task_struct['thread'])) - def _get_rss_field(self): + def _get_rss_field(self) -> int: return int(self.task_struct['mm']['rss'].value()) - def _get__rss_field(self): + def _get__rss_field(self) -> int: return int(self.task_struct['mm']['_rss'].value()) - def _get_rss_stat_field(self): + def _get_rss_stat_field(self) -> int: stat = self.task_struct['mm']['rss_stat']['count'] stat0 = self.task_struct['mm']['rss_stat']['count'][0] rss = 0 @@ -516,10 +522,10 @@ def _get_rss_stat_field(self): rss += int(stat[i]['counter']) return rss - def _get_anon_file_rss_fields(self): + def _get_anon_file_rss_fields(self) -> int: mm = self.task_struct['mm'] rss = 0 - for name in self.anon_file_rss_fields: + for name in self._anon_file_rss_fields: if mm[name].type == types.atomic_long_t_type: rss += int(mm[name]['counter']) else: @@ -530,7 +536,7 @@ def _get_anon_file_rss_fields(self): # dynamically. We may do that eventually, but for now we can just # select the proper function and assign it to the class. @classmethod - def _pick_get_rss(cls): + def _pick_get_rss(cls) -> None: if struct_has_member(types.mm_struct_type, 'rss'): cls._get_rss = cls._get_rss_field elif struct_has_member(types.mm_struct_type, '_rss'): @@ -539,20 +545,20 @@ def _pick_get_rss(cls): cls._get_rss = cls._get_rss_stat_field else: if struct_has_member(types.mm_struct_type, '_file_rss'): - cls.anon_file_rss_fields.append('_file_rss') + cls._anon_file_rss_fields.append('_file_rss') if struct_has_member(types.mm_struct_type, '_anon_rss'): - cls.anon_file_rss_fields.append('_anon_rss') + cls._anon_file_rss_fields.append('_anon_rss') cls._get_rss = cls._get_anon_file_rss_fields - if not cls.anon_file_rss_fields: + if not cls._anon_file_rss_fields: raise RuntimeError("No method to retrieve RSS from task found.") - def _get_rss(self) -> int: + def __get_rss(self) -> int: raise NotImplementedError("_get_rss not implemented") - def get_rss(self): + def get_rss(self) -> int: """ Return the resident set for this task @@ -561,20 +567,17 @@ def get_rss(self): """ return self._get_rss() - def _last_run__last_run(self): + def _last_run__last_run(self) -> int: return int(self.task_struct['last_run']) - def _last_run__timestamp(self): + def _last_run__timestamp(self) -> int: return int(self.task_struct['timestamp']) - def _last_run__last_arrival(self): + def _last_run__last_arrival(self) -> int: return int(self.task_struct['sched_info']['last_arrival']) - def _get_last_run(self) -> int: - raise NotImplementedError("_get_last_run not implemented") - @classmethod - def _pick_last_run(cls): + def _pick_last_run(cls) -> None: fields = types.task_struct_type.keys() if ('sched_info' in fields and 'last_arrival' in types.task_struct_type['sched_info'].type.keys()): diff --git a/crash/types/vmstat.py b/crash/types/vmstat.py index 081264374ca..a5fc0b05b15 100644 --- a/crash/types/vmstat.py +++ b/crash/types/vmstat.py @@ -1,22 +1,26 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import List, Tuple + from crash.util.symbols import Types, TypeCallbacks, Symbols from crash.types.percpu import get_percpu_var from crash.types.cpu import for_each_online_cpu +import gdb + class VmStat(object): types = Types(['enum zone_stat_item', 'enum vm_event_item']) symbols = Symbols(['vm_event_states']) - nr_stat_items = None - nr_event_items = None + nr_stat_items = -1 + nr_event_items = -1 - vm_stat_names = None - vm_event_names = None + vm_stat_names: List[str] = list() + vm_event_names: List[str] = list() @classmethod - def check_enum_type(cls, gdbtype): + def check_enum_type(cls, gdbtype: gdb.Type) -> None: if gdbtype == cls.types.enum_zone_stat_item_type: (items, names) = cls.__populate_names(gdbtype, 'NR_VM_ZONE_STAT_ITEMS') @@ -31,7 +35,8 @@ def check_enum_type(cls, gdbtype): raise TypeError("Unexpected type {}".format(gdbtype.name)) @classmethod - def __populate_names(cls, enum_type, items_name): + def __populate_names(cls, enum_type: gdb.Type, + items_name: str) -> Tuple[int, List[str]]: nr_items = enum_type[items_name].enumval names = ["__UNKNOWN__"] * nr_items @@ -43,15 +48,15 @@ def __populate_names(cls, enum_type, items_name): return (nr_items, names) @classmethod - def get_stat_names(cls): + def get_stat_names(cls) -> List[str]: return cls.vm_stat_names @classmethod - def get_event_names(cls): + def get_event_names(cls) -> List[str]: return cls.vm_event_names @classmethod - def get_events(cls): + def get_events(cls) -> List[int]: nr = cls.nr_event_items events = [0] * nr diff --git a/crash/types/zone.py b/crash/types/zone.py index bac398bee2a..b17ac4df163 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -1,6 +1,8 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import List + from crash.util import array_for_each from crash.util.symbols import Types from crash.types.percpu import get_percpu_var @@ -9,22 +11,24 @@ from crash.types.list import list_for_each_entry import crash.types.page +import gdb + class Zone(object): types = Types(['struct page']) - def __init__(self, obj, zid): + def __init__(self, obj: gdb.Value, zid: int) -> None: self.gdb_obj = obj self.zid = zid self.nid = int(obj["node"]) - def is_populated(self): + def is_populated(self) -> bool: if self.gdb_obj["present_pages"] != 0: return True else: return False - def get_vmstat(self): + def get_vmstat(self) -> List[int]: stats = [0] * VmStat.nr_stat_items vm_stat = self.gdb_obj["vm_stat"] @@ -33,19 +37,19 @@ def get_vmstat(self): stats[item] = int(vm_stat[item]["counter"]) return stats - def add_vmstat_diffs(self, diffs): + def add_vmstat_diffs(self, diffs: List[int]) -> None: for cpu in for_each_online_cpu(): pageset = get_percpu_var(self.gdb_obj["pageset"], cpu) vmdiff = pageset["vm_stat_diff"] for item in range(0, VmStat.nr_stat_items): diffs[item] += int(vmdiff[item]) - def get_vmstat_diffs(self): + def get_vmstat_diffs(self) -> List[int]: diffs = [0] * VmStat.nr_stat_items self.add_vmstat_diffs(diffs) return diffs - def _check_free_area(self, area, is_pcp): + def _check_free_area(self, area: gdb.Value, is_pcp: bool) -> None: nr_free = 0 list_array_name = "lists" if is_pcp else "free_list" for free_list in array_for_each(area[list_array_name]): @@ -64,7 +68,7 @@ def _check_free_area(self, area, is_pcp): format("pcplist" if is_pcp else "area", area.address, nr_expected, nr_free)) - def check_free_pages(self): + def check_free_pages(self) -> None: for area in array_for_each(self.gdb_obj["free_area"]): self._check_free_area(area, False) for cpu in for_each_online_cpu(): diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 1038f67d8f6..56557e92ad6 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Union, Tuple, List, Iterator, Dict +from typing import Union, Tuple, List, Iterator, Dict, Optional import uuid @@ -17,7 +17,7 @@ class InvalidComponentError(LookupError): """An error occured while resolving the member specification""" formatter = "cannot resolve '{}->{}' ({})" - def __init__(self, gdbtype, spec, message): + def __init__(self, gdbtype: gdb.Type, spec: str, message: str) -> None: msg = self.formatter.format(str(gdbtype), spec, message) super().__init__(msg) self.type = gdbtype @@ -32,7 +32,7 @@ class _InvalidComponentBaseError(RuntimeError): class _InvalidComponentTypeError(_InvalidComponentBaseError): """The component expects the type to be a struct or union but it is not.""" formatter = "component `{}' in `{}' is not a struct or union" - def __init__(self, name, spec): + def __init__(self, name: str, spec: str) -> None: msg = self.formatter.format(name, spec) super().__init__(msg) self.name = name @@ -42,7 +42,7 @@ class _InvalidComponentNameError(_InvalidComponentBaseError): """The requested member component does not exist in the provided type.""" formatter = "no such member `{}' in `{}'" - def __init__(self, member, gdbtype): + def __init__(self, member: str, gdbtype: gdb.Type) -> None: msg = self.formatter.format(member, str(gdbtype)) super().__init__(msg) self.member = member @@ -50,7 +50,7 @@ def __init__(self, member, gdbtype): types = Types(['char *', 'uuid_t']) -def container_of(val: gdb.Value, gdbtype: gdb.Type, member) -> gdb.Value: +def container_of(val: gdb.Value, gdbtype: gdb.Type, member: str) -> gdb.Value: """ Returns an object that contains the specified object at the given offset. @@ -183,7 +183,8 @@ def resolve_type(val: TypeSpecifier) -> gdb.Type: raise TypeError("Invalid type {}".format(str(type(val)))) return gdbtype -def __offsetof(val, spec, error): +def __offsetof(val: gdb.Type, spec: str, + error: bool) -> Optional[Tuple[int, gdb.Type]]: gdbtype = val offset = 0 diff --git a/crash/util/symbols.py b/crash/util/symbols.py index 2473233104c..43b491f9990 100644 --- a/crash/util/symbols.py +++ b/crash/util/symbols.py @@ -48,7 +48,7 @@ class DelayedCollection(object): the container object *or* the contained object if it has been overridden via :meth:`override`. """ - def __init__(self, cls: Type[DelayedValue], names: Names): + def __init__(self, cls: Type[DelayedValue], names: Names) -> None: self.attrs: Dict[str, DelayedValue] = {} if isinstance(names, str): @@ -95,13 +95,13 @@ def override(self, name: str, value: CollectedValue) -> None: self.attrs[name].value = value - def __getitem__(self, name): + def __getitem__(self, name: str) -> Any: try: return self.get(name) except NameError as e: raise KeyError(str(e)) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: try: return self.get(name) except NameError as e: @@ -129,7 +129,7 @@ class Types(DelayedCollection): names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names of the types to resolve. """ - def __init__(self, names: Names): + def __init__(self, names: Names) -> None: super(Types, self).__init__(DelayedType, names) def override(self, name: str, value: gdb.Type) -> None: @@ -170,7 +170,7 @@ class Symbols(DelayedCollection): names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names of the symbols to resolve. """ - def __init__(self, names): + def __init__(self, names: Names) -> None: super(Symbols, self).__init__(DelayedSymbol, names) class Symvals(DelayedCollection): @@ -204,7 +204,7 @@ class Symvals(DelayedCollection): names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names of the symbols to resolve. """ - def __init__(self, names): + def __init__(self, names: Names) -> None: super(Symvals, self).__init__(DelayedSymval, names) class MinimalSymbols(DelayedCollection): @@ -238,7 +238,7 @@ class MinimalSymbols(DelayedCollection): names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names of the minimal symbols to resolve. """ - def __init__(self, names): + def __init__(self, names: Names) -> None: super(MinimalSymbols, self).__init__(DelayedMinimalSymbol, names) class MinimalSymvals(DelayedCollection): @@ -267,7 +267,7 @@ class MinimalSymvals(DelayedCollection): names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names of the minimal symbols to resolve. """ - def __init__(self, names): + def __init__(self, names: Names) -> None: super(MinimalSymvals, self).__init__(DelayedMinimalSymval, names) class DelayedValues(DelayedCollection): @@ -302,14 +302,15 @@ class DelayedValues(DelayedCollection): Args: names: The names to use for the :obj:`.DelayedValue` objects. """ - def __init__(self, names: Names): + def __init__(self, names: Names) -> None: super(DelayedValues, self).__init__(DelayedValue, names) CallbackSpecifier = Tuple[str, Callable] CallbackSpecifiers = Union[List[CallbackSpecifier], CallbackSpecifier] class CallbackCollection(object): - def __init__(self, cls: Type[NamedCallback], cbs: CallbackSpecifiers): + def __init__(self, cls: Type[NamedCallback], + cbs: CallbackSpecifiers) -> None: if isinstance(cbs, tuple): cbs = [cbs] @@ -318,13 +319,13 @@ def __init__(self, cls: Type[NamedCallback], cbs: CallbackSpecifiers): setattr(self, t.attrname, t) class TypeCallbacks(CallbackCollection): - def __init__(self, cbs): + def __init__(self, cbs: CallbackSpecifiers) -> None: super().__init__(TypeCallback, cbs) class SymbolCallbacks(CallbackCollection): - def __init__(self, cbs): + def __init__(self, cbs: CallbackSpecifiers) -> None: super().__init__(SymbolCallback, cbs) class MinimalSymbolCallbacks(CallbackCollection): - def __init__(self, cbs): + def __init__(self, cbs: CallbackSpecifiers) -> None: super().__init__(MinimalSymbolCallback, cbs) diff --git a/kdump/target.py b/kdump/target.py index fed37502d00..133c355ab77 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Tuple, Optional + import sys from kdumpfile import kdumpfile, KDUMP_KVADDR @@ -10,14 +12,16 @@ import gdb +PTID = Tuple[int, int, int] + class SymbolCallback(object): "addrxlat symbolic callback" - def __init__(self, ctx=None, *args, **kwargs): - super().__init__(*args, **kwargs) + def __init__(self, ctx: Optional[addrxlat.Context] = None, + *args: int, **kwargs: int) -> None: self.ctx = ctx - def __call__(self, symtype, *args): + def __call__(self, symtype: int, *args: int) -> int: if self.ctx is not None: try: return self.ctx.next_cb_sym(symtype, *args) @@ -31,7 +35,7 @@ def __call__(self, symtype, *args): raise addrxlat.exceptions.NoDataError() class Target(gdb.Target): - def __init__(self, debug=False): + def __init__(self, debug: bool = False) -> None: super().__init__() self.debug = debug self.shortname = "kdumpfile" @@ -40,7 +44,7 @@ def __init__(self, debug=False): self.register() - def open(self, filename, from_tty): + def open(self, filename: str, from_tty: bool) -> None: objfiles = gdb.objfiles() if not objfiles: @@ -78,7 +82,7 @@ def open(self, filename, from_tty): # Clear out the old symbol cache gdb.execute("file {}".format(vmlinux)) - def close(self): + def close(self) -> None: try: self.unregister() except: @@ -86,12 +90,13 @@ def close(self): del self.kdump @classmethod - def report_error(cls, addr, length, error): + def report_error(cls, addr: int, length: int, error: Exception) -> None: print("Error while reading {:d} bytes from {:#x}: {}" .format(length, addr, str(error)), file=sys.stderr) - def xfer_partial(self, obj, annex, readbuf, writebuf, offset, ln): + def xfer_partial(self, obj: int, annex: str, readbuf: bytearray, + writebuf: bytearray, offset: int, ln: int) -> int: ret = -1 if obj == self.TARGET_OBJECT_MEMORY: try: @@ -114,21 +119,21 @@ def xfer_partial(self, obj, annex, readbuf, writebuf, offset, ln): raise IOError("Unknown obj type") return ret - def thread_alive(self, ptid): + def thread_alive(self, ptid: PTID) -> bool: return True - def pid_to_str(self, ptid): + def pid_to_str(self, ptid: PTID) -> str: return "pid {:d}".format(ptid[1]) - def fetch_registers(self, register): - return False + def fetch_registers(self, register: gdb.Register) -> None: + pass - def prepare_to_store(self, thread): + def prepare_to_store(self, thread: gdb.InferiorThread) -> None: pass # We don't need to store anything; The regcache is already written. - def store_registers(self, thread): + def store_registers(self, register: gdb.Register) -> None: pass - def has_execution(self, ptid): + def has_execution(self, ptid: PTID) -> bool: return False From e7abd25229d87d0457e2fc855cb044c47c3d9d6e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 18:40:58 -0400 Subject: [PATCH 186/367] crash: use two-argument fetch/store registers from gdb The py-target code assumed that the implementation would use the selected thread and there's no reason for that assumption to always be valid. I've updated the py-target code to pass the thread and the register to the python target. That required some changes elsewhere. Signed-off-by: Jeff Mahoney --- crash/arch/x86_64.py | 6 +++--- crash/kernel.py | 10 +++++++--- kdump/target.py | 6 ++++-- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 30fb934861e..f531baa98e7 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -39,7 +39,7 @@ def setup_thread_info(self, thread: gdb.InferiorThread) -> None: thread.info.set_thread_info(thread_info) def fetch_register_active(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: + register: int) -> None: task = thread.info for reg in task.regs: if reg == "rip" and (register != 16 and register != -1): @@ -54,7 +54,7 @@ def fetch_register_scheduled(self, thread: gdb.InferiorThread, return self._fetch_register_scheduled(thread, register) def fetch_register_scheduled_inactive(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: + register: int) -> None: ulong_type = self.ulong_type task = thread.info.task_struct @@ -82,7 +82,7 @@ def fetch_register_scheduled_inactive(self, thread: gdb.InferiorThread, thread.info.valid_stack = True def fetch_register_scheduled_thread_return(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: + register: int) -> None: ulong_type = self.ulong_type task = thread.info.task_struct diff --git a/crash/kernel.py b/crash/kernel.py index 9cd053d406d..23c720309bb 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -354,9 +354,13 @@ def extract_modinfo_from_module(self, modpath: str) -> Dict[str, str]: f.close() return d - def fetch_registers(self, register: gdb.Register) -> None: - thread = gdb.selected_thread() - self.arch.fetch_register(thread, register.regnum) + def fetch_registers(self, thread: gdb.InferiorThread, + register: gdb.Register) -> None: + if register is None: + regnum = -1 + else: + regnum = register.regnum + self.arch.fetch_register(thread, regnum) def get_module_sections(self, module: gdb.Value) -> str: out = [] diff --git a/kdump/target.py b/kdump/target.py index 133c355ab77..023c05eb089 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -125,14 +125,16 @@ def thread_alive(self, ptid: PTID) -> bool: def pid_to_str(self, ptid: PTID) -> str: return "pid {:d}".format(ptid[1]) - def fetch_registers(self, register: gdb.Register) -> None: + def fetch_registers(self, thread: gdb.InferiorThread, + register: gdb.Register) -> None: pass def prepare_to_store(self, thread: gdb.InferiorThread) -> None: pass # We don't need to store anything; The regcache is already written. - def store_registers(self, register: gdb.Register) -> None: + def store_registers(self, thread: gdb.InferiorThread, + register: gdb.Register) -> None: pass def has_execution(self, ptid: PTID) -> bool: From 0eeb462b8d60dc5f8d4391bbec3db7a392392233 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 21:29:59 -0400 Subject: [PATCH 187/367] crash.util: fix misused ArgumentTypeError The argument checking in container_of passes the type of the value being checked rather than the value as it should. Signed-off-by: Jeff Mahoney --- crash/util/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 56557e92ad6..df6d20073f9 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -69,9 +69,9 @@ def container_of(val: gdb.Value, gdbtype: gdb.Type, member: str) -> gdb.Value: TypeError: val is not a gdb.Value """ if not isinstance(val, gdb.Value): - raise ArgumentTypeError('val', type(val), gdb.Value) + raise ArgumentTypeError('val', val, gdb.Value) if not isinstance(gdbtype, gdb.Type): - raise ArgumentTypeError('gdbtype', type(gdbtype), gdb.Type) + raise ArgumentTypeError('gdbtype', gdbtype, gdb.Type) charp = types.char_p_type if val.type.code != gdb.TYPE_CODE_PTR: val = val.address From 16016006c589ee7d3041b61764e34a02866c20a6 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 21:54:47 -0400 Subject: [PATCH 188/367] crash.subsystem.filesystem.mount: rework iterator detection to use struct_has_member Rather than handle an exception when looking at whether 'task_struct' has an 'nsproxy' member, use struct_has_member instead. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/mount.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index b11db07145d..4d061d79375 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -67,11 +67,10 @@ def _for_each_mount_nsproxy(self, task: gdb.Value) -> Iterator[gdb.Value]: types.mount_type, 'mnt_list') @classmethod - def _check_task_interface(cls, symval: gdb.Value) -> None: - try: - nsproxy = symvals.init_task['nsproxy'] + def _check_task_interface(cls, init_task: gdb.Value) -> None: + if struct_has_member(init_task, 'nsproxy'): cls._for_each_mount = cls._for_each_mount_nsproxy - except KeyError: + else: raise NotImplementedError("Mount.for_each_mount is unhandled on this kernel version") def for_each_mount(self, task: gdb.Value) -> Iterator[gdb.Value]: From 5ba19b10790b050d4a5d1c19032466cb2f2599ab Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 4 Jun 2019 21:55:24 -0400 Subject: [PATCH 189/367] lint: fix unused-variable warnings This commit fixes the following lint warnings and enables enforcement of the 'unused-variable' pylint rule. ************* Module crash.kernel W:530,18: Unused variable 'dirs' (unused-variable) W:614, 8: Unused variable 'task_list' (unused-variable) W:625, 8: Unused variable 'e' (unused-variable) ************* Module crash.arch.x86_64 W: 20, 8: Unused variable 'e' (unused-variable) W: 49,12: Unused variable 'e' (unused-variable) ************* Module crash.commands.dmesg W:219, 8: Unused variable 'e' (unused-variable) W:265, 8: Unused variable 'lte' (unused-variable) ************* Module crash.commands.lsmod W: 72, 8: Unused variable 'show_deps' (unused-variable) ************* Module crash.commands W: 73, 8: Unused variable 'x' (unused-variable) ************* Module crash.infra.lookup W:242, 8: Unused variable 'e' (unused-variable) ************* Module crash.subsystem.filesystem W:148,12: Unused variable 'x' (unused-variable) ************* Module crash.subsystem.storage.decoders W:186, 8: Unused variable 'x' (unused-variable) ************* Module crash.util.symbols W:146,25: Unused variable 'pointer' (unused-variable) ************* Module crash.util W:106, 8: Unused variable 'x' (unused-variable) W:386, 4: Unused variable 'size' (unused-variable) ************* Module crash.types.bitmap W: 64,16: Unused variable 'off' (unused-variable) ************* Module crash.types.list W:118,20: Unused variable 'i' (unused-variable) ************* Module crash.types.percpu W:158, 8: Unused variable 'group_offset' (unused-variable) W:160, 8: Unused variable 'size_in_bits' (unused-variable) W:161, 8: Unused variable 'start' (unused-variable) W:162, 8: Unused variable 'end' (unused-variable) ************* Module crash.types.slab W:139, 8: Unused variable 'bufsize' (unused-variable) W:217,12: Unused variable 'i' (unused-variable) W:372, 8: Unused variable 'limit' (unused-variable) W:462,13: Unused variable 'nid' (unused-variable) W:581, 8: Unused variable 'errors' (unused-variable) ************* Module crash.types.task W:506, 8: Unused variable 'e' (unused-variable) W:519, 8: Unused variable 'stat0' (unused-variable) Signed-off-by: Jeff Mahoney --- crash/arch/x86_64.py | 4 ++-- crash/commands/__init__.py | 1 + crash/commands/dmesg.py | 6 +++--- crash/commands/lsmod.py | 4 ---- crash/infra/lookup.py | 2 +- crash/kernel.py | 9 ++++----- crash/subsystem/filesystem/__init__.py | 2 +- crash/subsystem/storage/decoders.py | 4 ++-- crash/types/bitmap.py | 1 + crash/types/list.py | 2 +- crash/types/percpu.py | 4 ---- crash/types/slab.py | 11 ++--------- crash/types/task.py | 5 ++--- crash/util/__init__.py | 3 +-- crash/util/symbols.py | 1 + tests/pylintrc-enforce | 2 +- 16 files changed, 23 insertions(+), 38 deletions(-) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index f531baa98e7..d692a5644da 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -17,7 +17,7 @@ def __init__(self) -> None: self._fetch_register_scheduled = \ self.fetch_register_scheduled_inactive self.inactive_task_frame_type = inactive - except gdb.error as e: + except gdb.error: try: thread_return = gdb.lookup_minimal_symbol("thread_return") self.thread_return = thread_return.value().address @@ -46,7 +46,7 @@ def fetch_register_active(self, thread: gdb.InferiorThread, continue try: thread.registers[reg].value = task.regs[reg] - except KeyError as e: + except KeyError: pass def fetch_register_scheduled(self, thread: gdb.InferiorThread, diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 43ad21e1eeb..8f97d625f4e 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -70,4 +70,5 @@ def discover() -> None: mods = __all__ for mod in mods: + # pylint: disable=unused-variable x = importlib.import_module("crash.commands.{}".format(mod)) diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index c0675701155..7a000a26a5d 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -216,7 +216,7 @@ def get_log_msgs(self, dict_needed: bool = False) -> Iterable[Dict[str, Any]]: try: idx = symvals.log_first_idx - except DelayedAttributeError as e: + except DelayedAttributeError: raise LogTypeException('not structured log') if symvals.clear_seq < symvals.log_first_seq: @@ -262,13 +262,13 @@ def execute(self, args: argparse.Namespace) -> None: try: self.handle_structured_log(args) return - except LogTypeException as lte: + except LogTypeException: pass try: self.handle_logbuf(args) return - except LogTypeException as lte: + except LogTypeException: pass except LogInvalidOption as lio: raise CommandError(str(lio)) diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index 56c35d822f0..49638d42ac0 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -69,14 +69,10 @@ def print_module_percpu(self, mod: gdb.Value, cpu: int = -1) -> None: def execute(self, argv: argparse.Namespace) -> None: regex = None - show_deps = True print_header = True if argv.args: regex = re.compile(fnmatch.translate(argv.args[0])) - if argv.p is not None: - show_deps = False - core_layout = None for mod in for_each_module(): diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index abe11d16266..c31e9a4ab00 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -239,7 +239,7 @@ def resolve_type(name: str) -> Tuple[str, str, bool]: def check_ready(self) -> Union[None, gdb.Type]: try: return gdb.lookup_type(self.name, self.block) - except gdb.error as e: + except gdb.error: return None def __str__(self) -> str: diff --git a/crash/kernel.py b/crash/kernel.py index 23c720309bb..a69ae241e5c 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -281,7 +281,7 @@ def __init__(self, roots: PathSpecifier = None, obj.add_separate_debug_file(path) if obj.has_symbols(): break - except gdb.error as e: + except gdb.error: pass if not obj.has_symbols(): @@ -527,6 +527,7 @@ def cache_file_tree(self, path: str, regex: Pattern[str] = None) -> None: return self.findmap[path]['filters'].append(pattern) + # pylint: disable=unused-variable for root, dirs, files in os.walk(path): for filename in files: modname = self.normalize_modname(filename) @@ -611,8 +612,6 @@ def setup_tasks(self) -> None: import crash.cache.tasks gdb.execute('set print thread-events 0') - task_list = self.symvals.init_task['tasks'] - rqs = get_percpu_vars(self.symbols.runqueues) rqscurrs = {int(x["curr"]) : k for (k, x) in rqs.items()} @@ -622,7 +621,7 @@ def setup_tasks(self) -> None: task_count = 0 try: crashing_cpu = int(get_symbol_value('crashing_cpu')) - except Exception as e: + except Exception: crashing_cpu = -1 for task in for_each_all_tasks(): @@ -639,7 +638,7 @@ def setup_tasks(self) -> None: try: thread = gdb.selected_inferior().new_thread(ptid, ltask) - except gdb.error as e: + except gdb.error: print("Failed to setup task @{:#x}".format(int(task.address))) continue thread.name = task['comm'].string() diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index 314c8328905..98fc056f0fa 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -145,7 +145,7 @@ def get_super_block(desc: AddressSpecifier, force: bool = False) -> gdb.Value: sb = get_typed_pointer(desc, types.super_block_type).dereference() if not force: try: - x = int(sb['s_dev']) + x = int(sb['s_dev']) # pylint: disable=unused-variable except gdb.NotAvailableError: raise gdb.NotAvailableError(f"no superblock available at `{desc}'") diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 62abc757965..644fbc67cab 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -183,13 +183,13 @@ def register_decoder(endio: EndIOSpecifier, decoder: Type[Decoder]) -> None: if isinstance(endio, str): if debug: print(f"Registering {endio} as callback") - x = SymbolCallback(endio, lambda a: register_decoder(a, decoder)) + SymbolCallback(endio, lambda a: register_decoder(a, decoder)) return elif isinstance(endio, list) and isinstance(endio[0], str): for sym in endio: if debug: print(f"Registering {sym} as callback") - x = SymbolCallback(sym, lambda a: register_decoder(a, decoder)) + SymbolCallback(sym, lambda a: register_decoder(a, decoder)) return if isinstance(endio, gdb.Symbol): diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index 596cc605a3c..6f038b72959 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -61,6 +61,7 @@ def for_each_set_bit(bitmap: gdb.Value, ulong = bitmap[idx] if ulong != 0: + # pylint: disable=unused-variable for off in range(min(size, bits_per_ulong)): if ulong & 1 != 0: yield bit diff --git a/crash/types/list.py b/crash/types/list.py index aa9473a9c92..0be32c6a668 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -115,7 +115,7 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, # are we detecting cycles? advance fast 2 times and compare # each with our current node (Floyd's Tortoise and Hare # algorithm) - for i in range(2): + for i in range(2): # pylint: disable=unused-variable fast = fast[next_].dereference() if node.address == fast.address: raise ListCycleError("Cycle in list detected.") diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 56a43d38e8f..e7e76244a87 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -155,11 +155,7 @@ def _setup_dynamic_offset_cache_area_map(self, chunk: gdb.Value) -> None: def _setup_dynamic_offset_cache_bitmap(self, chunk: gdb.Value) -> None: - group_offset = int(symvals.pcpu_group_offsets[0]) size_in_bytes = int(chunk['nr_pages']) * Page.PAGE_SIZE - size_in_bits = size_in_bytes << 3 - start = -1 - end = 0 chunk_base = int(chunk["base_addr"]) - int(symvals.pcpu_base_addr) self._add_to_offset_cache(chunk_base, 0, size_in_bytes) diff --git a/crash/types/slab.py b/crash/types/slab.py index 3ed32ad8492..d7a6314b8e2 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -136,7 +136,6 @@ def __populate_free(self) -> None: if self.free: return - bufsize = self.kmem_cache.buffer_size objs_per_slab = self.kmem_cache.objs_per_slab if self.page_slab: @@ -214,6 +213,7 @@ def __free_error(self, list_name: str) -> None: def get_objects(self) -> Iterable[int]: bufsize = self.kmem_cache.buffer_size obj = self.s_mem + # pylint: disable=unused-variable for i in range(self.kmem_cache.objs_per_slab): yield obj obj += bufsize @@ -369,7 +369,6 @@ def all_find_obj(addr: int) -> Union[None, Tuple[bool, int, def __fill_array_cache(self, acache: gdb.Value, ac_type: str, nid_src: int, nid_tgt: int) -> None: avail = int(acache["avail"]) - limit = int(acache["limit"]) # TODO check avail > limit if avail == 0: @@ -459,6 +458,7 @@ def __get_allocated_objects(self, node: gdb.Value, yield obj def get_allocated_objects(self) -> Iterable[int]: + # pylint: disable=unused-variable for (nid, node) in self.__get_nodelists(): for obj in self.__get_allocated_objects(node, slab_partial): yield obj @@ -578,13 +578,6 @@ def __check_slabs(self, node: gdb.Value, slabtype: int, nid: int) -> int: print("checking {} slab list {:#x}".format(slab_list_name[slabtype], int(slab_list.address))) - errors = {'first_ok': None, - 'last_ok': None, - 'num_ok': 0, - 'first_misplaced': None, - 'last_misplaced': None, - 'num_misplaced': 0} - (check_ok, slabs, free) = self.___check_slabs(node, slabtype, nid) if not check_ok: diff --git a/crash/types/task.py b/crash/types/task.py index 81c8fd6923e..e81c5b51c82 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -503,7 +503,7 @@ def get_stack_pointer(self) -> int: """ try: fn = getattr(self, '_get_stack_pointer_fn') - except AttributeError as e: + except AttributeError: raise NotImplementedError("Architecture hasn't provided stack pointer callback") return int(fn(self.task_struct['thread'])) @@ -516,9 +516,8 @@ def _get__rss_field(self) -> int: def _get_rss_stat_field(self) -> int: stat = self.task_struct['mm']['rss_stat']['count'] - stat0 = self.task_struct['mm']['rss_stat']['count'][0] rss = 0 - for i in range(stat.type.sizeof // stat[0].type.sizeof): + for i in range(array_size(stat)): rss += int(stat[i]['counter']) return rss diff --git a/crash/util/__init__.py b/crash/util/__init__.py index df6d20073f9..0cd6fc0ac75 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -103,7 +103,7 @@ def struct_has_member(gdbtype: TypeSpecifier, name: str) -> bool: """ gdbtype = resolve_type(gdbtype) try: - x = offsetof(gdbtype, name) + x = offsetof(gdbtype, name) # pylint: disable=unused-variable return True except InvalidComponentError: return False @@ -383,7 +383,6 @@ def array_for_each(value: gdb.Value) -> Iterator[gdb.Value]: Yields: gdb.Value: One element in the array at a time """ - size = array_size(value) for i in range(array_size(value)): yield value[i] diff --git a/crash/util/symbols.py b/crash/util/symbols.py index 43b491f9990..f08e319c1f5 100644 --- a/crash/util/symbols.py +++ b/crash/util/symbols.py @@ -143,6 +143,7 @@ def override(self, name: str, value: gdb.Type) -> None: >>> t = gdb.lookup_type('struct foo') >>> types.override('struct foo', t) """ + # pylint: disable=unused-variable (name, attrname, pointer) = TypeCallback.resolve_type(name) super().override(name, value) diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 4d23174f518..ec4417b5431 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable [REPORTS] From f8ba0cad7c55632fc4ce84f7483cc09479944639 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 08:21:23 -0400 Subject: [PATCH 190/367] lint: fix unused-argument warnings This commit fixes the following lint warnings and enables enforcement of the 'unused-argument' pylint rule. ************* Module crash.kernel W: 63,40: Unused argument 'debug' (unused-argument) ************* Module crash.cache.syscache W:214,27: Unused argument 'symbol' (unused-argument) ************* Module crash.commands.xfs W: 65,23: Unused argument 'args' (unused-argument) ************* Module crash.commands W: 46,43: Unused argument 'from_tty' (unused-argument) ************* Module crash.commands.mount W: 55,23: Unused argument 'task' (unused-argument) ************* Module crash.infra.callback W: 97,42: Unused argument 'event' (unused-argument) W:100,36: Unused argument 'event' (unused-argument) ************* Module crash.subsystem.filesystem.mount W: 81,22: Unused argument 'gdbtype' (unused-argument) ************* Module crash.subsystem.storage W:247,17: Unused argument 'result' (unused-argument) ************* Module crash.subsystem.storage.decoders W: 27,23: Unused argument 'value' (unused-argument) ************* Module crash.types.slab W:332,36: Unused argument 'gdbtype' (unused-argument) ************* Module crash.types.page W:127,31: Unused argument 'symbol' (unused-argument) ************* Module crash.types.task W: 64,40: Unused argument 'symbol' (unused-argument) ************* Module crash.types.percpu W: 50,33: Unused argument 'symbol' (unused-argument) W: 68,28: Unused argument 'ignored' (unused-argument) W: 75,34: Unused argument 'modules' (unused-argument) ************* Module kdump.target W: 20, 0: Unused argument 'args' (unused-argument) W: 20, 0: Unused argument 'kwargs' (unused-argument) W: 47,34: Unused argument 'from_tty' (unused-argument) W: 98,37: Unused argument 'annex' (unused-argument) W: 99,21: Unused argument 'writebuf' (unused-argument) W:122,27: Unused argument 'ptid' (unused-argument) W:140,28: Unused argument 'ptid' (unused-argument) Signed-off-by: Jeff Mahoney --- crash/cache/syscache.py | 1 + crash/commands/__init__.py | 1 + crash/commands/mount.py | 4 +--- crash/commands/xfs.py | 1 + crash/infra/callback.py | 2 ++ crash/kernel.py | 1 + crash/subsystem/filesystem/mount.py | 1 + crash/subsystem/storage/__init__.py | 1 + crash/subsystem/storage/decoders.py | 1 + crash/types/page.py | 1 + crash/types/percpu.py | 9 ++++++--- crash/types/slab.py | 1 + crash/types/task.py | 4 ++-- kdump/target.py | 7 +++++-- tests/pylintrc-enforce | 2 +- 15 files changed, 26 insertions(+), 11 deletions(-) diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index ab2fb032b02..f230abc724f 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -211,6 +211,7 @@ def set_jiffies(cls, value: gdb.Value) -> None: cls.jiffies_dv.callback(value) @classmethod + # pylint: disable=unused-argument def setup_jiffies(cls, symbol: gdb.Symbol) -> bool: if cls.jiffies_ready: return True diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 8f97d625f4e..e53f3dd5dfb 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -43,6 +43,7 @@ def __init__(self, name: str, parser: ArgumentParser = None) -> None: def format_help(self) -> str: return self.parser.format_help() + # pylint: disable=unused-argument def invoke_uncaught(self, argstr: str, from_tty: bool = False) -> None: argv = gdb.string_to_argv(argstr) args = self.parser.parse_args(argv) diff --git a/crash/commands/mount.py b/crash/commands/mount.py index fb9df20a77a..80837617590 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -6,7 +6,6 @@ import argparse from crash.commands import Command, ArgumentParser -from crash.types.task import LinuxTask from crash.subsystem.filesystem.mount import d_path, for_each_mount from crash.subsystem.filesystem.mount import mount_device, mount_fstype from crash.subsystem.filesystem.mount import mount_super, mount_flags @@ -51,8 +50,7 @@ def execute(self, args: argparse.Namespace) -> None: for mnt in for_each_mount(): self.show_one_mount(mnt, args) - def show_one_mount(self, mnt: gdb.Value, args: argparse.Namespace, - task: LinuxTask = None) -> None: + def show_one_mount(self, mnt: gdb.Value, args: argparse.Namespace) -> None: if mnt.type.code == gdb.TYPE_CODE_PTR: mnt = mnt.dereference() diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index 4c42b0652ee..2f61975550d 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -62,6 +62,7 @@ def __init__(self, name: str) -> None: Command.__init__(self, name, parser) + # pylint: disable=unused-argument def list_xfs(self, args: argparse.Namespace) -> None: count = 0 print_header = True diff --git a/crash/infra/callback.py b/crash/infra/callback.py index 267fc684fc4..0f880cf028b 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -94,9 +94,11 @@ def _setup_symbol_cache_flush_callback(cls) -> None: # symtab code. The symtab observer is behind the python observers # in the execution queue so the cache flush executes /after/ us. @classmethod + # pylint: disable=unused-argument def _flush_symbol_cache_callback(cls, event: gdb.NewObjFileEvent) -> None: gdb.execute("maint flush-symbol-cache") + # pylint: disable=unused-argument def _new_objfile_callback(self, event: gdb.NewObjFileEvent) -> None: # GDB purposely copies the event list prior to calling the callbacks # If we remove an event from another handler, it will still be sent diff --git a/crash/kernel.py b/crash/kernel.py index a69ae241e5c..9ee9c216ecd 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -56,6 +56,7 @@ class CrashKernel(object): symvals = Symvals(['init_task']) symbols = Symbols(['runqueues']) + # pylint: disable=unused-argument def __init__(self, roots: PathSpecifier = None, vmlinux_debuginfo: PathSpecifier = None, module_path: PathSpecifier = None, diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 4d061d79375..6026525c0fb 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -78,6 +78,7 @@ def for_each_mount(self, task: gdb.Value) -> Iterator[gdb.Value]: _Mount = Mount() +# pylint: disable=unused-argument def _check_mount_type(gdbtype: gdb.Type) -> None: try: types.mount_type = gdb.lookup_type('struct mount') # type: ignore diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 149e27bb29f..b85806831a3 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -244,6 +244,7 @@ def inode_on_bdev(inode: gdb.Value) -> gdb.Value: else: return inode['i_sb']['s_bdev'].dereference() +# pylint: disable=unused-argument def _check_types(result: gdb.Symbol) -> None: try: if symvals.part_type.type.unqualified() != types.device_type_type: diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 644fbc67cab..73120ca4c89 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -24,6 +24,7 @@ class Decoder(object): """ __endio__: EndIOSpecifier = None + # pylint: disable=unused-argument def __init__(self, value: gdb.Value = None) -> None: self.interpreted = False diff --git a/crash/types/page.py b/crash/types/page.py index 7bb55175c2f..b2e92883a4a 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -124,6 +124,7 @@ def setup_zone_type(cls, gdbtype: gdb.Type) -> None: cls.ZONES_WIDTH = int(ceil(log(max_nr_zones))) @classmethod + # pylint: disable=unused-argument def setup_nodes_width(cls, symbol: Union[gdb.Symbol, gdb.MinSymbol]) -> None: # TODO: handle kernels with no space for nodes in page flags try: diff --git a/crash/types/percpu.py b/crash/types/percpu.py index e7e76244a87..d9e138929ee 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -47,7 +47,8 @@ class PerCPUState(object): _nr_cpus = 0 @classmethod - def _setup_per_cpu_size(cls, symbol: gdb.Symbol) -> None: + # pylint: disable=unused-argument + def _setup_per_cpu_size(cls, unused: gdb.Symbol) -> None: try: size = msymvals['__per_cpu_end'] - msymvals['__per_cpu_start'] except DelayedAttributeError: @@ -65,14 +66,16 @@ def _setup_per_cpu_size(cls, symbol: gdb.Symbol) -> None: pass @classmethod - def _setup_nr_cpus(cls, ignored: gdb.Symbol) -> None: + # pylint: disable=unused-argument + def _setup_nr_cpus(cls, unused: gdb.Symbol) -> None: cls._nr_cpus = array_size(symvals['__per_cpu_offset']) if cls._last_cpu == -1: cls._last_cpu = cls._nr_cpus @classmethod - def _setup_module_ranges(cls, modules: gdb.Symbol) -> None: + # pylint: disable=unused-argument + def _setup_module_ranges(cls, unused: gdb.Symbol) -> None: for module in for_each_module(): start = int(module['percpu']) if start == 0: diff --git a/crash/types/slab.py b/crash/types/slab.py index d7a6314b8e2..e192501e588 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -329,6 +329,7 @@ def check_kmem_cache_type(cls, gdbtype: gdb.Type) -> None: cls.head_name = find_member_variant(gdbtype, ['next', 'list']) @classmethod + # pylint: disable=unused-argument def setup_alien_cache_type(cls, gdbtype: gdb.Type) -> None: cls.alien_cache_type_exists = True diff --git a/crash/types/task.py b/crash/types/task.py index e81c5b51c82..272f2de51c8 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -61,8 +61,8 @@ def has_flag(cls, flagname: str) -> bool: return v != cls.TASK_FLAG_UNINITIALIZED @classmethod - def _task_state_flags_callback(cls, symbol: gdb.Symbol) -> None: - count = array_size(symvals.task_state_array) + def _task_state_flags_callback(cls, task_state_array: gdb.Symbol) -> None: + count = array_size(task_state_array) bit = 0 for i in range(count): diff --git a/kdump/target.py b/kdump/target.py index 023c05eb089..86ec7cbcc69 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -17,8 +17,7 @@ class SymbolCallback(object): "addrxlat symbolic callback" - def __init__(self, ctx: Optional[addrxlat.Context] = None, - *args: int, **kwargs: int) -> None: + def __init__(self, ctx: Optional[addrxlat.Context] = None) -> None: self.ctx = ctx def __call__(self, symtype: int, *args: int) -> int: @@ -44,6 +43,7 @@ def __init__(self, debug: bool = False) -> None: self.register() + # pylint: disable=unused-argument def open(self, filename: str, from_tty: bool) -> None: objfiles = gdb.objfiles() @@ -95,6 +95,7 @@ def report_error(cls, addr: int, length: int, error: Exception) -> None: .format(length, addr, str(error)), file=sys.stderr) + # pylint: disable=unused-argument def xfer_partial(self, obj: int, annex: str, readbuf: bytearray, writebuf: bytearray, offset: int, ln: int) -> int: ret = -1 @@ -133,9 +134,11 @@ def prepare_to_store(self, thread: gdb.InferiorThread) -> None: pass # We don't need to store anything; The regcache is already written. + # pylint: disable=unused-argument def store_registers(self, thread: gdb.InferiorThread, register: gdb.Register) -> None: pass + # pylint: disable=unused-argument def has_execution(self, ptid: PTID) -> bool: return False diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index ec4417b5431..ff1117be418 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument [REPORTS] From ea2a0233e4662191988a60cb7c0e0932f95791f4 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 17:42:59 -0400 Subject: [PATCH 191/367] lint: fix arguments-differ warnings This commit fixes the following lint warnings and enables enforcement of the 'arguments-differ' pylint rule. ************* Module crash.commands.task W: 37, 4: Parameters differ from overridden 'execute' method (arguments-differ) ************* Module crash.commands.mount W: 46, 4: Parameters differ from overridden 'execute' method (arguments-differ) ************* Module crash.commands.btrfs W: 55, 4: Parameters differ from overridden 'execute' method (arguments-differ) ************* Module crash.commands.xfs W:185, 4: Parameters differ from overridden 'execute' method (arguments-differ) ************* Module crash.commands.kmem W: 47, 4: Parameters differ from overridden 'execute' method (arguments-differ) ************* Module crash.commands.syscmd W: 62, 4: Parameters differ from overridden 'execute' method (arguments-differ) ************* Module crash.commands.dmesg W:261, 4: Parameters differ from overridden 'execute' method (arguments-differ) Signed-off-by: Jeff Mahoney --- crash/commands/__init__.py | 2 +- crash/commands/help.py | 6 +++--- crash/commands/lsmod.py | 14 +++++++------- crash/commands/mount.py | 10 ---------- crash/commands/ps.py | 32 +++++++++++++++++--------------- crash/commands/vtop.py | 4 ++-- tests/pylintrc-enforce | 2 +- 7 files changed, 31 insertions(+), 39 deletions(-) diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index e53f3dd5dfb..41cf5a7de2c 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -62,7 +62,7 @@ def invoke(self, argstr: str, from_tty: bool = False) -> None: except (SystemExit, KeyboardInterrupt): pass - def execute(self, argv: argparse.Namespace) -> None: + def execute(self, args: argparse.Namespace) -> None: raise NotImplementedError("Command should not be called directly") def discover() -> None: diff --git a/crash/commands/help.py b/crash/commands/help.py index 4b665dda297..dd9c3a8c5e8 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -27,8 +27,8 @@ def __init__(self) -> None: parser.add_argument('args', nargs=argparse.REMAINDER) super().__init__('help', parser) - def execute(self, argv: argparse.Namespace) -> None: - if not argv.args: + def execute(self, args: argparse.Namespace) -> None: + if not args.args: print("Available commands:") for cmd in sorted(self.commands): summary = self.commands[cmd].__doc__.strip() @@ -36,7 +36,7 @@ def execute(self, argv: argparse.Namespace) -> None: summary = "no help text provided" print("{:<15} - {}".format(cmd, summary)) else: - for cmd in argv.args: + for cmd in args.args: try: text = self.commands[cmd].format_help().strip() except KeyError: diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index 49638d42ac0..5eeca9d6ad8 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -67,11 +67,11 @@ def print_module_percpu(self, mod: gdb.Value, cpu: int = -1) -> None: tabs, size)) - def execute(self, argv: argparse.Namespace) -> None: + def execute(self, args: argparse.Namespace) -> None: regex = None print_header = True - if argv.args: - regex = re.compile(fnmatch.translate(argv.args[0])) + if args.args: + regex = re.compile(fnmatch.translate(args.args[0])) core_layout = None @@ -85,15 +85,15 @@ def execute(self, argv: argparse.Namespace) -> None: if m is None: continue - if argv.p is not None: + if args.p is not None: if print_header: print_header = False - if argv.p == -1: + if args.p == -1: print("Module\t\t\tPercpu Base\t\tSize") else: print("Module\t\t\tPercpu Base@CPU{:d}\t\tSize" - .format(argv.p)) - self.print_module_percpu(mod, argv.p) + .format(args.p)) + self.print_module_percpu(mod, args.p) continue if print_header: diff --git a/crash/commands/mount.py b/crash/commands/mount.py index 80837617590..3b41f587847 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Any - import argparse from crash.commands import Command, ArgumentParser @@ -35,14 +33,6 @@ def __init__(self, name: str) -> None: super().__init__(name, parser) - def __getattr__(self, name: str) -> Any: - if name == 'charp': - self.charp = gdb.lookup_type('char').pointer() - else: - raise AttributeError - - return getattr(self, name) - def execute(self, args: argparse.Namespace) -> None: if args.v: print("{:^16} {:^16} {:^10} {:^16} {}" diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 469179b16af..1ba23820aa9 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Pattern, Optional, Callable +from typing import Pattern, Optional, Callable, Dict import argparse import fnmatch @@ -18,31 +18,31 @@ class TaskFormat(object): This class is responsible for converting the arguments into formatting rules. """ - def __init__(self, argv: argparse.Namespace, + def __init__(self, args: argparse.Namespace, regex: Optional[Pattern[str]]) -> None: self.sort = lambda x: x.info.task_pid() self._filter: Callable[[LinuxTask], bool] = lambda x: True self._format_one_task = self._format_common_line self._regex = regex - if argv.s: + if args.s: self._format_header = self._format_stack_header self._format_column4 = self._format_stack_address - elif argv.n: + elif args.n: self._format_header = self._format_threadnum_header self._format_column4 = self._format_thread_num else: self._format_header = self._format_task_header self._format_column4 = self._format_task_address - if argv.k: + if args.k: self._filter = self._is_kernel_thread - elif argv.u: + elif args.u: self._filter = self._is_user_task - elif argv.G: + elif args.G: self._filter = self._is_thread_group_leader - if argv.l: + if args.l: self.sort = lambda x: -x.info.last_run() self._format_one_task = self._format_last_run self._format_header = lambda: "" @@ -559,6 +559,8 @@ def __init__(self) -> None: Command.__init__(self, "ps", parser) + self.task_states: Dict[int, str] = dict() + def task_state_string(self, task: LinuxTask) -> str: state = task.task_state() buf = "" @@ -603,19 +605,19 @@ def setup_task_states(self) -> None: if TF.has_flag('TASK_IDLE'): self.task_states[TF.TASK_IDLE] = "ID" - def execute(self, argv: argparse.Namespace) -> None: + def execute(self, args: argparse.Namespace) -> None: # Unimplemented - if argv.p or argv.c or argv.t or argv.a or argv.g or argv.r: + if args.p or args.c or args.t or args.a or args.g or args.r: raise CommandError("Support for the -p, -c, -t, -a, -g, and -r options is unimplemented.") - if not hasattr(self, 'task_states'): + if not self.task_states: self.setup_task_states() regex = None - if argv.args: - regex = re.compile(fnmatch.translate(argv.args[0])) + if args.args: + regex = re.compile(fnmatch.translate(args.args[0])) - taskformat = TaskFormat(argv, regex) + taskformat = TaskFormat(args, regex) count = 0 header = taskformat.format_header() @@ -638,7 +640,7 @@ def execute(self, argv: argparse.Namespace) -> None: if count == 0: if regex: - print(f"No matches for {argv.args[0]}.") + print(f"No matches for {args.args[0]}.") else: raise CommandError("Unfiltered output has no matches. BUG?") diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index d2650fd2482..95472f5410a 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -191,7 +191,7 @@ def __init__(self) -> None: super().__init__("vtop", parser) - def execute(self, argv: argparse.Namespace) -> None: + def execute(self, args: argparse.Namespace) -> None: trans = CrashAddressTranslation() # Silly mypy bug means the base class needs come first if not trans.is_non_auto: @@ -199,7 +199,7 @@ def execute(self, argv: argparse.Namespace) -> None: else: pgt = LinuxNonAutoPGT(trans.context, trans.system) - for addr in argv.args: + for addr in args.args: addr = int(addr, 16) fulladdr = addrxlat.FullAddress(addrxlat.KVADDR, addr) print('{:16} {:16}'.format('VIRTUAL', 'PHYSICAL')) diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index ff1117be418..18495b39465 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ [REPORTS] From 311adcbe232b81c2840f684f5cfe5d7a424c099e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 08:24:35 -0400 Subject: [PATCH 192/367] lint: fix attribute-defined-outside-init warnings This commit fixes the following lint warnings and enables enforcement of the 'attribute-defined-outside-init' pylint rule. ************* Module crash.cache.syscache W: 26, 8: Attribute 'utsname' defined outside __init__ (attribute-defined-outside-init) W: 36, 8: Attribute 'utsname_cache' defined outside __init__ (attribute-defined-outside-init) W:126, 8: Attribute 'config_buffer' defined outside __init__ (attribute-defined-outside-init) W:133, 8: Attribute 'ikconfig_cache' defined outside __init__ (attribute-defined-outside-init) W:173,12: Attribute 'hz' defined outside __init__ (attribute-defined-outside-init) W:203,12: Attribute 'loadavg' defined outside __init__ (attribute-defined-outside-init) W:242, 8: Attribute 'uptime' defined outside __init__ (attribute-defined-outside-init) ************* Module crash.commands.vtop W: 25, 8: Attribute 'step' defined outside __init__ (attribute-defined-outside-init) W: 35, 8: Attribute 'table' defined outside __init__ (attribute-defined-outside-init) W: 36, 8: Attribute 'ptr' defined outside __init__ (attribute-defined-outside-init) W:217,20: Attribute 'ptr' defined outside __init__ (attribute-defined-outside-init) W: 39, 8: Attribute 'note' defined outside __init__ (attribute-defined-outside-init) W: 43,12: Attribute 'note' defined outside __init__ (attribute-defined-outside-init) W:217,20: Attribute 'ptr' defined outside __init__ (attribute-defined-outside-init) ************* Module crash.subsystem.filesystem.xfs W:247, 8: Attribute 'xfsbuf' defined outside __init__ (attribute-defined-outside-init) W:248, 8: Attribute 'devname' defined outside __init__ (attribute-defined-outside-init) ************* Module crash.subsystem.filesystem.decoders W: 45, 8: Attribute 'dio' defined outside __init__ (attribute-defined-outside-init) W: 46, 8: Attribute 'fstype' defined outside __init__ (attribute-defined-outside-init) W: 47, 8: Attribute 'dev' defined outside __init__ (attribute-defined-outside-init) W: 48, 8: Attribute 'offset' defined outside __init__ (attribute-defined-outside-init) W: 90, 8: Attribute 'inode' defined outside __init__ (attribute-defined-outside-init) W: 91, 8: Attribute 'fstype' defined outside __init__ (attribute-defined-outside-init) W:127, 8: Attribute 'bh' defined outside __init__ (attribute-defined-outside-init) ************* Module crash.subsystem.filesystem.ext3 W: 36, 8: Attribute 'fstype' defined outside __init__ (attribute-defined-outside-init) W: 37, 8: Attribute 'devname' defined outside __init__ (attribute-defined-outside-init) W: 38, 8: Attribute 'offset' defined outside __init__ (attribute-defined-outside-init) W: 39, 8: Attribute 'length' defined outside __init__ (attribute-defined-outside-init) ************* Module crash.subsystem.storage.device_mapper W: 43, 8: Attribute 'info' defined outside __init__ (attribute-defined-outside-init) W: 44, 8: Attribute 'tio' defined outside __init__ (attribute-defined-outside-init) W:101, 8: Attribute 'tio' defined outside __init__ (attribute-defined-outside-init) W:102, 8: Attribute 'next_bio' defined outside __init__ (attribute-defined-outside-init) ************* Module crash.subsystem.storage.decoders W:141, 8: Attribute 'block_device' defined outside __init__ (attribute-defined-outside-init) ************* Module kdump.target W: 65,12: Attribute 'base_offset' defined outside __init__ (attribute-defined-outside-init) W: 67,12: Attribute 'base_offset' defined outside __init__ (attribute-defined-outside-init) Signed-off-by: Jeff Mahoney --- crash/cache/syscache.py | 244 +++++++++++++---------- crash/commands/vtop.py | 4 + crash/subsystem/filesystem/decoders.py | 3 + crash/subsystem/filesystem/ext3.py | 1 + crash/subsystem/filesystem/xfs.py | 1 + crash/subsystem/storage/blocksq.py | 4 +- crash/subsystem/storage/decoders.py | 1 + crash/subsystem/storage/device_mapper.py | 2 + crash/types/task.py | 7 +- kdump/target.py | 5 +- tests/pylintrc-enforce | 2 +- tests/test_syscache.py | 23 +-- 12 files changed, 172 insertions(+), 125 deletions(-) diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index f230abc724f..61417fb5c88 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -22,30 +22,56 @@ class CrashUtsnameCache(CrashCache): symvals = Symvals(['init_uts_ns']) - def load_utsname(self) -> gdb.Value: - self.utsname = self.symvals.init_uts_ns['name'] - return self.utsname + def __init__(self) -> None: + self._utsname_cache_dict: Dict[str, str] = dict() - def init_utsname_cache(self) -> Dict[str, str]: - d = {} + @property + def utsname(self) -> gdb.Value: + return self.symvals.init_uts_ns['name'] + + def _init_utsname_cache(self) -> None: + d = self._utsname_cache_dict for field in self.utsname.type.fields(): val = self.utsname[field.name].string() d[field.name] = val - self.utsname_cache = d - return self.utsname_cache + @property + def _utsname_cache(self) -> Dict[str, str]: + if not self._utsname_cache_dict: + self._init_utsname_cache() + + return self._utsname_cache_dict + + def _utsname_field(self, name: str) -> str: + try: + return self._utsname_cache[name] + except KeyError: + raise DelayedAttributeError(name) + + @property + def sysname(self) -> str: + return self._utsname_field('sysname') + + @property + def nodename(self) -> str: + return self._utsname_field('nodename') + + @property + def release(self) -> str: + return self._utsname_field('release') + + @property + def version(self) -> str: + return self._utsname_field('version') - utsname_fields = ['sysname', 'nodename', 'release', - 'version', 'machine', 'domainname'] - def __getattr__(self, name: str) -> Any: - if name == 'utsname_cache': - return self.init_utsname_cache() - elif name == 'utsname': - return self.load_utsname() - if name in self.utsname_fields: - return self.utsname_cache[name] - return getattr(self.__class__, name) + @property + def machine(self) -> str: + return self._utsname_field('machine') + + @property + def domainname(self) -> str: + return self._utsname_field('domainname') class CrashConfigCache(CrashCache): types = Types(['char *']) @@ -53,20 +79,33 @@ class CrashConfigCache(CrashCache): msymvals = MinimalSymvals(['kernel_config_data', 'kernel_config_data_end']) - def __getattr__(self, name: str) -> Any: - if name == 'config_buffer': - return self.decompress_config_buffer() - elif name == 'ikconfig_cache': - return self._parse_config() - return getattr(self.__class__, name) + def __init__(self) -> None: + self._config_buffer = "" + self._ikconfig_cache: Dict[str, str] = dict() - def read_buf(self, address: int, size: int) -> memoryview: - return gdb.selected_inferior().read_memory(address, size) + @property + def config_buffer(self) -> str: + if not self._config_buffer: + self._config_buffer = self._decompress_config_buffer() + return self._config_buffer - def read_buf_bytes(self, address: int, size: int) -> bytes: - return self.read_buf(address, size).tobytes() + @property + def ikconfig_cache(self) -> Dict[str, str]: + if not self._ikconfig_cache: + self._parse_config() + return self._ikconfig_cache - def locate_config_buffer_section(self) -> ImageLocation: + def __getitem__(self, name: str) -> Any: + try: + return self.ikconfig_cache[name] + except KeyError: + return None + + @staticmethod + def _read_buf_bytes(address: int, size: int) -> bytes: + return gdb.selected_inferior().read_memory(address, size).tobytes() + + def _locate_config_buffer_section(self) -> ImageLocation: data_start = int(self.msymvals.kernel_config_data) data_end = int(self.msymvals.kernel_config_data_end) @@ -81,7 +120,7 @@ def locate_config_buffer_section(self) -> ImageLocation: }, } - def locate_config_buffer_typed(self) -> ImageLocation: + def _locate_config_buffer_typed(self) -> ImageLocation: start = int(self.symvals.kernel_config_data.address) end = start + self.symvals.kernel_config_data.type.sizeof @@ -96,42 +135,38 @@ def locate_config_buffer_typed(self) -> ImageLocation: }, } - def verify_image(self, location: ImageLocation) -> None: - MAGIC_START = b'IKCFG_ST' - MAGIC_END = b'IKCFG_ED' + def _verify_image(self, location: ImageLocation) -> None: + magic_start = b'IKCFG_ST' + magic_end = b'IKCFG_ED' - buf_len = len(MAGIC_START) - buf = self.read_buf_bytes(location['magic']['start'], buf_len) - if buf != MAGIC_START: - raise IOError(f"Missing MAGIC_START in kernel_config_data. Got `{buf}'") + buf_len = len(magic_start) + buf = self._read_buf_bytes(location['magic']['start'], buf_len) + if buf != magic_start: + raise IOError(f"Missing magic_start in kernel_config_data. Got `{buf}'") - buf_len = len(MAGIC_END) - buf = self.read_buf_bytes(location['magic']['end'], buf_len) - if buf != MAGIC_END: - raise IOError("Missing MAGIC_END in kernel_config_data. Got `{buf}'") + buf_len = len(magic_end) + buf = self._read_buf_bytes(location['magic']['end'], buf_len) + if buf != magic_end: + raise IOError("Missing magic_end in kernel_config_data. Got `{buf}'") - def decompress_config_buffer(self) -> str: + def _decompress_config_buffer(self) -> str: try: - location = self.locate_config_buffer_section() + location = self._locate_config_buffer_section() except DelayedAttributeError: - location = self.locate_config_buffer_typed() + location = self._locate_config_buffer_typed() - self.verify_image(location) + self._verify_image(location) # Read the compressed data - buf = self.read_buf_bytes(location['data']['start'], - location['data']['size']) + buf = self._read_buf_bytes(location['data']['start'], + location['data']['size']) - decompressed = zlib.decompress(buf, 16 + zlib.MAX_WBITS) - self.config_buffer = str(decompressed.decode('utf-8')) - return self.config_buffer + return zlib.decompress(buf, 16 + zlib.MAX_WBITS).decode('utf-8') def __str__(self) -> str: return self.config_buffer - def _parse_config(self) -> Dict[str, str]: - self.ikconfig_cache: Dict[str, str] = dict() - + def _parse_config(self) -> None: for line in self.config_buffer.splitlines(): # bin comments line = re.sub("#.*$", "", line).strip() @@ -141,81 +176,84 @@ def _parse_config(self) -> Dict[str, str]: m = re.match("CONFIG_([^=]*)=(.*)", line) if m: - self.ikconfig_cache[m.group(1)] = m.group(2) - - return self.ikconfig_cache - - def __getitem__(self, name: str) -> Any: - try: - return self.ikconfig_cache[name] - except KeyError: - return None + self._ikconfig_cache[m.group(1)] = m.group(2) class CrashKernelCache(CrashCache): symvals = Symvals(['avenrun']) - jiffies_ready = False - adjust_jiffies = False + _adjust_jiffies = False + _reset_uptime = True + + _jiffies_dv = DelayedValue('jiffies') - jiffies_dv = DelayedValue('jiffies') + def __init__(self, config: CrashConfigCache) -> None: + CrashCache.__init__(self) + self.config = config + self._hz = -1 + self._uptime = timedelta(seconds=0) + self._loadavg = "" @property def jiffies(self) -> gdb.Value: - v = self.jiffies_dv.get() + v = self._jiffies_dv.get() return v - def __init__(self, config: CrashConfigCache) -> None: - CrashCache.__init__(self) - self.config = config + @property + def hz(self) -> int: + if self._hz == -1: + self._hz = int(self.config['HZ']) + + return self._hz - def __getattr__(self, name: str) -> Any: - if name == 'hz': - self.hz = int(self.config['HZ']) - return self.hz - elif name == 'uptime': - return self.get_uptime() - elif name == 'loadavg': - return self.get_loadavg() - return getattr(self.__class__, name) - - def calculate_loadavg(self, metric: int) -> float: + def get_uptime(self) -> timedelta: + return self.uptime + + @property + def uptime(self) -> timedelta: + if self._uptime == 0 or self._reset_uptime: + uptime = self._adjusted_jiffies() // self.hz + self._uptime = timedelta(seconds=uptime) + self._reset_uptime = False + return self._uptime + + @property + def loadavg(self) -> str: + if not self._loadavg: + try: + metrics = self._get_loadavg_values() + self._loadavg = self._format_loadavg(metrics) + except DelayedAttributeError: + return "Unknown" + return self._loadavg + + def _calculate_loadavg(self, metric: int) -> float: # The kernel needs to do fixed point trickery to calculate # a floating point average. We can just return a float. return round(int(metric) / (1 << 11), 2) - def format_loadavg(self, metrics: List[float]) -> str: + def _format_loadavg(self, metrics: List[float]) -> str: out = [] for metric in metrics: out.append(str(metric)) return " ".join(out) - def get_loadavg_values(self) -> List[float]: + def _get_loadavg_values(self) -> List[float]: metrics = [] for index in range(0, array_size(self.symvals.avenrun)): - metrics.append(self.calculate_loadavg(self.symvals.avenrun[index])) + metrics.append(self._calculate_loadavg(self.symvals.avenrun[index])) return metrics - def get_loadavg(self) -> str: - try: - metrics = self.get_loadavg_values() - self.loadavg = self.format_loadavg(metrics) - return self.loadavg - except DelayedAttributeError: - return "Unknown" - @classmethod def set_jiffies(cls, value: gdb.Value) -> None: - cls.jiffies_dv.value = None - cls.jiffies_dv.callback(value) + cls._jiffies_dv.value = None + cls._jiffies_dv.callback(value) + cls._reset_uptime = True @classmethod # pylint: disable=unused-argument def setup_jiffies(cls, symbol: gdb.Symbol) -> bool: - if cls.jiffies_ready: - return True - jiffies_sym = gdb.lookup_global_symbol('jiffies_64') if jiffies_sym: @@ -223,24 +261,20 @@ def setup_jiffies(cls, symbol: gdb.Symbol) -> bool: jiffies = int(jiffies_sym.value()) except gdb.MemoryError: return False - cls.adjust_jiffies = True + cls._adjust_jiffies = True else: jiffies = int(gdb.lookup_global_symbol('jiffies').value()) - cls.adjust_jiffies = False + cls._adjust_jiffies = False cls.set_jiffies(jiffies) return True - def adjusted_jiffies(self) -> gdb.Value: - if self.adjust_jiffies: - return self.jiffies -(int(0x100000000) - 300 * self.hz) - else: - return self.jiffies - def get_uptime(self) -> timedelta: - self.uptime = timedelta(seconds=self.adjusted_jiffies() // self.hz) - return self.uptime + def _adjusted_jiffies(self) -> gdb.Value: + if self._adjust_jiffies: + return self.jiffies -(int(0x100000000) - 300 * self.hz) + return self.jiffies symbol_cbs = SymbolCallbacks([('jiffies', CrashKernelCache.setup_jiffies), ('jiffies_64', CrashKernelCache.setup_jiffies)]) diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index 95472f5410a..f55b261eb53 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -14,6 +14,10 @@ class LinuxPGT(object): def __init__(self, ctx: addrxlat.Context, sys: addrxlat.System) -> None: self.context = ctx self.system = sys + self.step: addrxlat.Step = None + self.table = self.table_names[0] + self.ptr: addrxlat.FullAddress = None + self.note = '' def begin(self, addr: int) -> bool: meth = self.system.get_map(addrxlat.SYS_MAP_HW).search(addr) diff --git a/crash/subsystem/filesystem/decoders.py b/crash/subsystem/filesystem/decoders.py index ce046e4ccfa..a3f33596069 100644 --- a/crash/subsystem/filesystem/decoders.py +++ b/crash/subsystem/filesystem/decoders.py @@ -42,6 +42,7 @@ def __init__(self, bio: gdb.Value) -> None: def interpret(self) -> None: """Interprets a direct i/o bio to populate its attributes""" + # pylint: disable=attribute-defined-outside-init self.dio = self.bio['bi_private'].cast(self._types.dio_p_type) self.fstype = super_fstype(self.dio['inode']['i_sb']) self.dev = block_device_name(self.dio['inode']['i_sb']['s_bdev']) @@ -87,6 +88,7 @@ def __init__(self, bio: gdb.Value) -> None: def interpret(self) -> None: """Interpret the multipage bio to populate its attributes""" + # pylint: disable=attribute-defined-outside-init self.inode = self.bio['bi_io_vec'][0]['bv_page']['mapping']['host'] self.fstype = super_fstype(self.inode['i_sb']) @@ -124,6 +126,7 @@ def __init__(self, bio: gdb.Value) -> None: def interpret(self) -> None: """Interpret the buffer_head bio to populate its attributes""" + # pylint: disable=attribute-defined-outside-init self.bh = self.bio['bi_private'].cast(self._types.buffer_head_p_type) def __str__(self) -> str: diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index 7abecb890bd..bf1dd16ee0c 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -33,6 +33,7 @@ def __init__(self, bh: gdb.Value) -> None: def interpret(self) -> None: """Interprets the ext3 buffer_head to populate its attributes""" + # pylint: disable=attribute-defined-outside-init self.fstype = "journal on ext3" self.devname = block_device_name(self.bh['b_bdev']) self.offset = int(self.bh['b_blocknr']) * int(self.bh['b_size']) diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index e89aff151b6..639a1b16162 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -244,6 +244,7 @@ def __init__(self, bio: gdb.Value) -> None: def interpret(self) -> None: """Interpret the xfsbuf bio to populate its attributes""" + # pylint: disable=attribute-defined-outside-init self.xfsbuf = self.bio['bi_private'].cast(self._types.xfs_buf_p_type) self.devname = block_device_name(self.bio['bi_bdev']) diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index 2ac138f5002..f7653110f18 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -5,7 +5,7 @@ from crash.util.symbols import Types from crash.types.list import list_for_each_entry -from crash.cache.syscache import kernel +from crash.cache.syscache import kernel, jiffies_to_msec import gdb @@ -49,7 +49,7 @@ def request_age_ms(request: gdb.Value) -> int: :obj:`int`: Difference between the request's ``start_time`` and current ``jiffies`` in milliseconds. """ - return kernel.jiffies_to_msec(kernel.jiffies - request['start_time']) + return jiffies_to_msec(kernel.jiffies - request['start_time']) def requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: """ diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 73120ca4c89..8ec69f4d75e 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -138,6 +138,7 @@ def __init__(self, bh: gdb.Value) -> None: self.bh = bh def interpret(self) -> None: + # pylint: disable=attribute-defined-outside-init self.block_device = block_device_name(self.bh['b_bdev']) def __str__(self) -> str: diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index 633f17c5bd2..5e6d325e4f9 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -40,6 +40,7 @@ def __init__(self, bio: gdb.Value) -> None: def interpret(self) -> None: """Interprets the request-based device mapper bio to populate its attributes""" + # pylint: disable=attribute-defined-outside-init self.info = self._get_clone_bio_rq_info(self.bio) self.tio = self.info['tio'] @@ -98,6 +99,7 @@ def __init__(self, bio: gdb.Value) -> None: def interpret(self) -> None: """Interprets the cloned device mapper bio to populate its attributes""" + # pylint: disable=attribute-defined-outside-init self.tio = self._get_clone_bio_tio(self.bio) self.next_bio = self.tio['io']['bio'] diff --git a/crash/types/task.py b/crash/types/task.py index 272f2de51c8..757668b8280 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -14,7 +14,7 @@ PF_EXITING = 0x4 types = Types(['struct task_struct', 'struct mm_struct', 'atomic_long_t']) -symvals = Symvals(['task_state_array', 'init_task', 'init_mm']) +symvals = Symvals(['init_task', 'init_mm']) # This is pretty painful. These are all #defines so none of them end # up with symbols in the kernel. The best approximation we have is @@ -61,12 +61,13 @@ def has_flag(cls, flagname: str) -> bool: return v != cls.TASK_FLAG_UNINITIALIZED @classmethod - def _task_state_flags_callback(cls, task_state_array: gdb.Symbol) -> None: + def _task_state_flags_callback(cls, symbol: gdb.Symbol) -> None: + task_state_array = symbol.value() count = array_size(task_state_array) bit = 0 for i in range(count): - state = symvals.task_state_array[i].string() + state = task_state_array[i].string() state_strings = { '(running)' : 'TASK_RUNNING', '(sleeping)' : 'TASK_INTERRUPTIBLE', diff --git a/kdump/target.py b/kdump/target.py index 86ec7cbcc69..a894b94e828 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -40,6 +40,7 @@ def __init__(self, debug: bool = False) -> None: self.shortname = "kdumpfile" self.longname = "Use a Linux kernel kdump file as a target" self.kdump: kdumpfile = None + self.base_offset = 0 self.register() @@ -63,8 +64,8 @@ def open(self, filename: str, from_tty: bool) -> None: try: attr = self.kdump.attr.get(KERNELOFFSET, "0") self.base_offset = int(attr, base=16) - except Exception as e: - self.base_offset = 0 + except (TypeError, ValueError): + pass vmlinux = gdb.objfiles()[0].filename diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 18495b39465..d9adb515efe 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init [REPORTS] diff --git a/tests/test_syscache.py b/tests/test_syscache.py index 980df67280e..1ee1089bf69 100644 --- a/tests/test_syscache.py +++ b/tests/test_syscache.py @@ -43,9 +43,8 @@ def clear_namespace(self): def get_fake_config(self): from crash.cache.syscache import CrashConfigCache class FakeConfigCache(CrashConfigCache): - def decompress_config_buffer(self): - self.config_buffer = fake_config - return self.config_buffer + def _decompress_config_buffer(self): + return fake_config return FakeConfigCache() @@ -112,7 +111,7 @@ def test_get_uptime_value(self): config = CrashConfigCache() kernel = CrashKernelCache(config) kernel.set_jiffies(27028508) - kernel.adjust_jiffies = False + kernel._adjust_jiffies = False x = kernel.uptime uptime = str(x) self.assertTrue(uptime == '1 day, 6:01:54') @@ -138,18 +137,18 @@ def test_kernel_namespace_nofile(self): def test_calculate_loadavg(self): config = self.CrashConfigCache() kernel = self.CrashKernelCache(config) - self.assertTrue(kernel.calculate_loadavg(344) == 0.17) - self.assertTrue(kernel.calculate_loadavg(105) == 0.05) - self.assertTrue(kernel.calculate_loadavg(28) == 0.01) + self.assertTrue(kernel._calculate_loadavg(344) == 0.17) + self.assertTrue(kernel._calculate_loadavg(105) == 0.05) + self.assertTrue(kernel._calculate_loadavg(28) == 0.01) - self.assertTrue(kernel.calculate_loadavg(458524) == 223.89) - self.assertTrue(kernel.calculate_loadavg(455057) == 222.20) - self.assertTrue(kernel.calculate_loadavg(446962) == 218.24) + self.assertTrue(kernel._calculate_loadavg(458524) == 223.89) + self.assertTrue(kernel._calculate_loadavg(455057) == 222.20) + self.assertTrue(kernel._calculate_loadavg(446962) == 218.24) def test_loadavg_values(self): config = self.CrashConfigCache() kernel = self.CrashKernelCache(config) - metrics = kernel.get_loadavg_values() + metrics = kernel._get_loadavg_values() self.assertTrue(metrics[0] == 0.17) self.assertTrue(metrics[1] == 0.05) self.assertTrue(metrics[2] == 0.01) @@ -165,7 +164,7 @@ def test_loadavg_values_missing_symbol(self): config = self.CrashConfigCache() kernel = self.CrashKernelCache(config) with self.assertRaises(DelayedAttributeError): - metrics = kernel.get_loadavg_values() + metrics = kernel._get_loadavg_values() def test_loadavg_missing_symbol(self): self.clear_namespace() From c01243aac8256953de9a229588a62340da89dd88 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 10:21:27 -0400 Subject: [PATCH 193/367] lint: fix redefined-builtin failures This commit fixes the following lint warnings and enables enforcement of the 'redefined-builtin' pylint rule. ************* Module crash.cache.syscache W: 6, 0: Redefining built-in 'round' (redefined-builtin) Signed-off-by: Jeff Mahoney --- crash/cache/syscache.py | 2 -- tests/pylintrc-enforce | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index 61417fb5c88..0e98c072c30 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -3,8 +3,6 @@ from typing import Dict, List, Any -from builtins import round - import re import zlib from datetime import timedelta diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index d9adb515efe..d3ff0d93c7b 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin [REPORTS] From dde716fe89eed9baa328ea236288d34d0587b266 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 17:35:27 -0400 Subject: [PATCH 194/367] lint: fix unsubscriptable-object errors This commit fixes the following lint errors and enables enforcement of the 'unsubscriptable-object' pylint rule. ************* Module crash.commands.vtop E: 41,25: Value 'self.step.idx' is unsubscriptable (unsubscriptable-object) ************* Module crash.commands.kmem E: 97,19: Value 'ac' is unsubscriptable (unsubscriptable-object) E: 98,47: Value 'ac' is unsubscriptable (unsubscriptable-object) E: 99,21: Value 'ac' is unsubscriptable (unsubscriptable-object) E:100,58: Value 'ac' is unsubscriptable (unsubscriptable-object) E:101,21: Value 'ac' is unsubscriptable (unsubscriptable-object) E:102,70: Value 'ac' is unsubscriptable (unsubscriptable-object) E:102,85: Value 'ac' is unsubscriptable (unsubscriptable-object) Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 16 ++++++++++------ crash/commands/vtop.py | 2 ++ tests/pylintrc-enforce | 2 +- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index c5f9a83a4c7..a0a94d677aa 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -94,12 +94,16 @@ def execute(self, args: argparse.Namespace) -> None: print("FREE object %x from slab %s" % (obj[1], name)) elif obj[2] is not None: ac = obj[2] - if ac["ac_type"] == "percpu": - ac_desc = "cpu %d cache" % ac["nid_tgt"] - elif ac["ac_type"] == "shared": - ac_desc = "shared cache on node %d" % ac["nid_tgt"] - elif ac["ac_type"] == "alien": - ac_desc = "alien cache of node %d for node %d" % (ac["nid_src"], ac["nid_tgt"]) + ac_type = ac['ac_type'] # pylint: disable=unsubscriptable-object + nid_tgt = ac['nid_tgt'] # pylint: disable=unsubscriptable-object + if ac_type == "percpu": + ac_desc = "cpu %d cache" % nid_tgt + elif ac_type == "shared": + ac_desc = "shared cache on node %d" % nid_tgt + elif ac_type == "alien": + nid_src = ac['nid_src'] # pylint: disable=unsubscriptable-object + ac_desc = "alien cache of node %d for node %d" % \ + (nid_src, nid_tgt) else: raise CommandError(f"unexpected array cache type {str(ac)}") diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index f55b261eb53..2e88e21d26a 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -38,6 +38,8 @@ def next(self) -> bool: level = self.step.remain - 1 self.table = self.table_names[level - 1] self.ptr = self.step.base.copy() + # self.step.idx is a 9-tuple + # pylint: disable=unsubscriptable-object self.ptr.addr += self.step.idx[level] * self.step.elemsz self.note = '' diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index d3ff0d93c7b..1ebb6609bbf 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object [REPORTS] From 63515e8387877aa6ef3a858b77ca7f96453d7245 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 17:40:42 -0400 Subject: [PATCH 195/367] lint: fix no-member errors This commit fixes the following lint errors and enables enforcement of the 'no-member' pylint rule. ************* Module crash.commands.vtop E: 40,19: Class 'base' has no 'copy' member (no-member) E: 48,15: Module 'addrxlat.exceptions' has no 'NotPresentError' member (no-member) E: 66,16: Module 'addrxlat.exceptions' has no 'NotPresentError' member (no-member) E: 67,16: Module 'addrxlat.exceptions' has no 'NoDataError' member (no-member) ************* Module kdump.target E: 34,14: Module 'addrxlat.exceptions' has no 'NoDataError' member (no-member) E: 65,19: Class 'attr' has no 'get' member (no-member) E:112,19: Module 'addrxlat.exceptions' has no 'NoDataError' member (no-member) Signed-off-by: Jeff Mahoney --- crash/commands/vtop.py | 9 ++++++--- crash/infra/callback.py | 3 +++ kdump/target.py | 7 +++++-- tests/pylintrc-enforce | 2 +- 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index 2e88e21d26a..1822fed5760 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -37,6 +37,9 @@ def next(self) -> bool: level = self.step.remain - 1 self.table = self.table_names[level - 1] + # pylint is picking up base as _addrxlat.FullAddress instead of + # addrxlat.FullAddress + # pylint: disable=no-member self.ptr = self.step.base.copy() # self.step.idx is a 9-tuple # pylint: disable=unsubscriptable-object @@ -45,7 +48,7 @@ def next(self) -> bool: self.note = '' try: self.step.step() - except addrxlat.exceptions.NotPresentError: + except addrxlat.exceptions.NotPresentError: # pylint: disable=no-member self.note = ' (NOT PRESENT)' self.step.remain = 0 return True @@ -63,8 +66,8 @@ def address(self) -> str: try: tmp.conv(addrxlat.KPHYSADDR, self.context, self.system) return addr + '{:x} [phys]'.format(tmp.addr) - except (addrxlat.exceptions.NotPresentError, - addrxlat.exceptions.NoDataError): + except (addrxlat.exceptions.NotPresentError, # pylint: disable=no-member + addrxlat.exceptions.NoDataError): # pylint: disable=no-member return addr + 'N/A' class _Parser(ArgumentParser): diff --git a/crash/infra/callback.py b/crash/infra/callback.py index 0f880cf028b..75135639210 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -64,6 +64,7 @@ def connect_callback(self) -> bool: self.completed = completed if self.completed is False: + # pylint: disable=no-member gdb.events.new_objfile.connect(self._new_objfile_callback) return self.completed @@ -76,6 +77,7 @@ def complete(self) -> None: :obj:`CallbackCompleted`: This callback has already been completed. """ if not self.completed: + # pylint: disable=no-member gdb.events.new_objfile.disconnect(self._new_objfile_callback) self.completed = True self.connected = False @@ -86,6 +88,7 @@ def complete(self) -> None: @classmethod def _setup_symbol_cache_flush_callback(cls) -> None: if not cls._symbol_cache_flush_setup: + # pylint: disable=no-member gdb.events.new_objfile.connect(cls._flush_symbol_cache_callback) cls._symbol_cache_flush_setup = True diff --git a/kdump/target.py b/kdump/target.py index a894b94e828..052e3e0fa23 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -31,6 +31,8 @@ def __call__(self, symtype: int, *args: int) -> int: ms = gdb.lookup_minimal_symbol(args[0]) if ms is not None: return int(ms.value().address) + + # pylint: disable=no-member raise addrxlat.exceptions.NoDataError() class Target(gdb.Target): @@ -62,7 +64,7 @@ def open(self, filename: str, from_tty: bool) -> None: KERNELOFFSET = "linux.vmcoreinfo.lines.KERNELOFFSET" try: - attr = self.kdump.attr.get(KERNELOFFSET, "0") + attr = self.kdump.attr.get(KERNELOFFSET, "0") # pylint: disable=no-member self.base_offset = int(attr, base=16) except (TypeError, ValueError): pass @@ -109,7 +111,7 @@ def xfer_partial(self, obj: int, annex: str, readbuf: bytearray, if self.debug: self.report_error(offset, ln, e) raise gdb.TargetXferEOF(str(e)) - except addrxlat.exceptions.NoDataError as e: + except addrxlat.exceptions.NoDataError as e: # pylint: disable=no-member if self.debug: self.report_error(offset, ln, e) raise gdb.TargetXferUnavailable(str(e)) @@ -121,6 +123,7 @@ def xfer_partial(self, obj: int, annex: str, readbuf: bytearray, raise IOError("Unknown obj type") return ret + # pylint: disable=unused-argument def thread_alive(self, ptid: PTID) -> bool: return True diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 1ebb6609bbf..51c9afcfb6f 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member [REPORTS] From 65d0ff4b891aa17bb0abbe370454da851b68a9f0 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 19:02:16 -0400 Subject: [PATCH 196/367] lint: fix useless-super-delegation warnings This commit fixes the following lint warnings and enables enforcement of the 'useless-super-delegation' pylint rule. ************* Module crash.exceptions W: 32, 4: Useless super delegation in method '__init__' (useless-super-delegation) ************* Module crash.cache.vm W: 7, 4: Useless super delegation in method '__init__' (useless-super-delegation) Signed-off-by: Jeff Mahoney --- crash/cache/vm.py | 3 --- crash/exceptions.py | 2 -- tests/pylintrc-enforce | 2 +- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/crash/cache/vm.py b/crash/cache/vm.py index e9c58f04602..00096d03554 100644 --- a/crash/cache/vm.py +++ b/crash/cache/vm.py @@ -4,9 +4,6 @@ from crash.cache import CrashCache class CrashCacheVM(CrashCache): - def __init__(self) -> None: - super().__init__() - def refresh(self) -> None: pass diff --git a/crash/exceptions.py b/crash/exceptions.py index f2b362a41f6..49c9537e6af 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -29,8 +29,6 @@ def __init__(self, name: str) -> None: class InvalidArgumentError(TypeError): """Base class for invalid argument exceptions""" - def __init__(self, msg: str) -> None: - super().__init__(msg) class ArgumentTypeError(InvalidArgumentError): """The provided object could not be converted to the expected type""" diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 51c9afcfb6f..2a0090ec7e5 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation [REPORTS] From 9f2d1c0948e6df9523a67cd4078a60a70ad7d077 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 19:02:29 -0400 Subject: [PATCH 197/367] lint: fix protected-access warnings This commit fixes the following lint warnings and enables enforcement of the 'protected-access' pylint rule. ************* Module crash.subsystem.filesystem.xfs W:410,15: Access to a protected member _ail_head_name of a client class (protected-access) W:649,45: Access to a protected member _detect_ail_version of a client class (protected-access) ************* Module crash.subsystem.filesystem.mount W:277,45: Access to a protected member _check_task_interface of a client class (protected-access) ************* Module crash.types.classdev W: 28,27: Access to a protected member _setup_iterator_type of a client class (protected-access) W: 49, 7: Access to a protected member _class_is_private of a client class (protected-access) W: 54,11: Access to a protected member _class_is_private of a client class (protected-access) ************* Module crash.types.node W:138,46: Access to a protected member _setup_node_states of a client class (protected-access) ************* Module crash.types.task W:169,31: Access to a protected member _task_state_flags_callback of a client class (protected-access) ************* Module crash.types.percpu W:370,36: Access to a protected member _setup_per_cpu_size of a client class (protected-access) W:372,36: Access to a protected member _setup_per_cpu_size of a client class (protected-access) W:373,51: Access to a protected member _setup_nr_cpus of a client class (protected-access) W:374,42: Access to a protected member _setup_module_ranges of a client class (protected-access) ************* Module crash.types.cpu W: 89,31: Access to a protected member _setup_online_mask of a client class (protected-access) W: 91,31: Access to a protected member _setup_online_mask of a client class (protected-access) W: 93,31: Access to a protected member _setup_possible_mask of a client class (protected-access) W: 95,31: Access to a protected member _setup_possible_mask of a client class (protected-access) Signed-off-by: Jeff Mahoney --- crash/kernel.py | 123 +++++++++++++++------------- crash/subsystem/filesystem/mount.py | 12 ++- crash/subsystem/filesystem/xfs.py | 24 +++++- crash/types/classdev.py | 31 +++++-- crash/types/cpu.py | 32 ++++++-- crash/types/node.py | 13 ++- crash/types/page.py | 8 ++ crash/types/percpu.py | 14 ++-- crash/types/task.py | 17 +++- tests/pylintrc-enforce | 2 +- 10 files changed, 190 insertions(+), 86 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 9ee9c216ecd..92e92b38a09 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -130,14 +130,51 @@ def __init__(self, roots: PathSpecifier = None, self.modules_order: Dict[str, Dict[str, str]] = dict() obj = gdb.objfiles()[0] kernel = os.path.basename(obj.filename) - debugroot = "/usr/lib/debug" - version = self.extract_version() + self.kernel = kernel + self.version = self.extract_version() + + self._setup_roots(roots, verbose) + self._setup_vmlinux_debuginfo(vmlinux_debuginfo) + self._setup_module_path(module_path, verbose) + self._setup_module_debuginfo_path(module_debuginfo_path, verbose) + + # We need separate debuginfo. Let's go find it. + if not obj.has_symbols(): + print("Loading debug symbols for vmlinux") + for path in [self.build_id_path(obj)] + self.vmlinux_debuginfo: + try: + obj.add_separate_debug_file(path) + if obj.has_symbols(): + break + except gdb.error: + pass + + if not obj.has_symbols(): + raise CrashKernelError("Couldn't locate debuginfo for {}" + .format(kernel)) + self.vermagic = self.extract_vermagic() + + archname = obj.architecture.name() + try: + archclass = crash.arch.get_architecture(archname) + except RuntimeError as e: + raise CrashKernelError(str(e)) + + self.arch = archclass() + + self.target = gdb.current_target() + self.vmcore = self.target.kdump + + self.target.fetch_registers = self.fetch_registers + self.crashing_thread = None + + def _setup_roots(self, roots: PathSpecifier = None, + verbose: bool = False) -> None: if roots is None: self.roots = ["/"] - elif (isinstance(roots, list) and roots and - isinstance(roots[0], str)): + elif isinstance(roots, list) and roots and isinstance(roots[0], str): x = None for root in roots: if os.path.exists(root): @@ -163,19 +200,21 @@ def __init__(self, roots: PathSpecifier = None, self.roots = x else: raise InvalidArgumentError("roots must be None, str, or list of str") - if verbose: print("roots={}".format(self.roots)) + def _setup_vmlinux_debuginfo(self, vmlinux_debuginfo: PathSpecifier = None, + verbose: bool = False) -> None: + debugroot = "/usr/lib/debug" if vmlinux_debuginfo is None: - x = [] + x: List[str] = [] defaults = [ - "{}.debug".format(kernel), - "vmlinux-{}.debug".format(version), - "{}/{}.debug".format(debugroot, kernel), + "{}.debug".format(self.kernel), + "vmlinux-{}.debug".format(self.version), + "{}/{}.debug".format(debugroot, self.kernel), "{}/boot/{}.debug".format(debugroot, - os.path.basename(kernel)), - "{}/boot/vmlinux-{}.debug".format(debugroot, version), + os.path.basename(self.kernel)), + "{}/boot/vmlinux-{}.debug".format(debugroot, self.version), ] for root in self.roots: for mpath in defaults: @@ -188,8 +227,8 @@ def __init__(self, roots: PathSpecifier = None, self.vmlinux_debuginfo = x - elif (isinstance(vmlinux_debuginfo, list) and - vmlinux_debuginfo and isinstance(vmlinux_debuginfo[0], str)): + elif (isinstance(vmlinux_debuginfo, list) and vmlinux_debuginfo and + isinstance(vmlinux_debuginfo[0], str)): self.vmlinux_debuginfo = vmlinux_debuginfo elif isinstance(vmlinux_debuginfo, str): self.vmlinux_debuginfo = [vmlinux_debuginfo] @@ -199,23 +238,23 @@ def __init__(self, roots: PathSpecifier = None, if verbose: print("vmlinux_debuginfo={}".format(self.vmlinux_debuginfo)) + def _setup_module_path(self, module_path: PathSpecifier = None, + verbose: bool = False) -> None: + x: List[str] = [] if module_path is None: - x = [] path = "modules" if os.path.exists(path): x.append(path) for root in self.roots: - path = "{}/lib/modules/{}".format(root, version) + path = "{}/lib/modules/{}".format(root, self.version) if os.path.exists(path): x.append(path) self.module_path = x elif (isinstance(module_path, list) and isinstance(module_path[0], str)): - x = [] - for root in self.roots: for mpath in module_path: path = "{}/{}".format(root, mpath) @@ -224,8 +263,6 @@ def __init__(self, roots: PathSpecifier = None, self.module_path = x elif isinstance(module_path, str): - x = [] - if os.path.exists(module_path): x.append(module_path) @@ -236,21 +273,25 @@ def __init__(self, roots: PathSpecifier = None, if verbose: print("module_path={}".format(self.module_path)) + def _setup_module_debuginfo_path(self, module_debuginfo_path: PathSpecifier = None, + verbose: bool = False) -> None: + debugroot = "/usr/lib/debug" + + x: List[str] = [] if module_debuginfo_path is None: - x = [] path = "modules.debug" if os.path.exists(path): x.append(path) for root in self.roots: - path = "{}/{}/lib/modules/{}".format(root, debugroot, version) + path = "{}/{}/lib/modules/{}".format(root, debugroot, + self.version) if os.path.exists(path): x.append(path) self.module_debuginfo_path = x elif (isinstance(module_debuginfo_path, list) and isinstance(module_debuginfo_path[0], str)): - x = [] for root in self.roots: for mpath in module_debuginfo_path: @@ -260,7 +301,6 @@ def __init__(self, roots: PathSpecifier = None, self.module_debuginfo_path = x elif isinstance(module_debuginfo_path, str): - x = [] for root in self.roots: path = "{}/{}".format(root, module_debuginfo_path) @@ -274,40 +314,9 @@ def __init__(self, roots: PathSpecifier = None, if verbose: print("module_debuginfo_path={}".format(self.module_debuginfo_path)) - # We need separate debuginfo. Let's go find it. - if not obj.has_symbols(): - print("Loading debug symbols for vmlinux") - for path in [self.build_id_path(obj)] + self.vmlinux_debuginfo: - try: - obj.add_separate_debug_file(path) - if obj.has_symbols(): - break - except gdb.error: - pass - - if not obj.has_symbols(): - raise CrashKernelError("Couldn't locate debuginfo for {}" - .format(kernel)) - - self.vermagic = self.extract_vermagic() - - archname = obj.architecture.name() - try: - archclass = crash.arch.get_architecture(archname) - except RuntimeError as e: - raise CrashKernelError(str(e)) - - self.arch = archclass() - - self.target = gdb.current_target() - self.vmcore = self.target.kdump - - self.target.fetch_registers = self.fetch_registers - self.crashing_thread = None - # When working without a symbol table, we still need to be able # to resolve version information. - def get_minsymbol_as_string(self, name: str) -> str: + def _get_minsymbol_as_string(self, name: str) -> str: sym = gdb.lookup_minimal_symbol(name).value() return sym.address.cast(self.types.char_p_type).string() @@ -319,7 +328,7 @@ def extract_version(self) -> str: except (AttributeError, NameError, MissingSymbolError): pass - banner = self.get_minsymbol_as_string('linux_banner') + banner = self._get_minsymbol_as_string('linux_banner') return banner.split(' ')[2] @@ -330,7 +339,7 @@ def extract_vermagic(self) -> str: except (AttributeError, NameError): pass - return self.get_minsymbol_as_string('vermagic') + return self._get_minsymbol_as_string('vermagic') def extract_modinfo_from_module(self, modpath: str) -> Dict[str, str]: f = open(modpath, 'rb') diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 6026525c0fb..c66004dbc35 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -67,7 +67,15 @@ def _for_each_mount_nsproxy(self, task: gdb.Value) -> Iterator[gdb.Value]: types.mount_type, 'mnt_list') @classmethod - def _check_task_interface(cls, init_task: gdb.Value) -> None: + def check_task_interface(cls, init_task: gdb.Symbol) -> None: + """ + Check which interface to iterating over mount structures is in use + + Meant to be used as a SymbolCallback. + + Args: + init_task: The ``init_task`` symbol. + """ if struct_has_member(init_task, 'nsproxy'): cls._for_each_mount = cls._for_each_mount_nsproxy else: @@ -274,4 +282,4 @@ def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value = None) -> str: return name type_cbs = TypeCallbacks([('struct vfsmount', _check_mount_type)]) -symbols_cbs = SymbolCallbacks([('init_task', Mount._check_task_interface)]) +symbols_cbs = SymbolCallbacks([('init_task', Mount.check_task_interface)]) diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index 639a1b16162..1778609a122 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -262,19 +262,35 @@ def __str__(self) -> str: 'struct xfs_qoff_logitem', 'struct xfs_inode', 'struct xfs_mount *', 'struct xfs_buf *']) -class _XFS(object): +class XFS(object): """ XFS File system state class. Not meant to be instantiated directly. """ _ail_head_name = None @classmethod - def _detect_ail_version(cls, gdbtype: gdb.Type) -> None: + def detect_ail_version(cls, gdbtype: gdb.Type) -> None: + """ + Detect what version of the ail structure is in use + + Linux v4.17 renamed the xfs_ail members to use + ail_* instead of xa_* except for xa_ail which + was renamed to ail_head. + + Meant to be used as a TypeCallback. + + Args: + gdbtype: The ``struct xfs_ail` type. + """ if struct_has_member(gdbtype, 'ail_head'): cls._ail_head_name = 'ail_head' else: cls._ail_head_name = 'xa_ail' + @classmethod + def get_ail_head(cls, ail: gdb.Value) -> gdb.Value: + return ail[cls._ail_head_name] + def is_xfs_super(super_block: gdb.Value) -> bool: """ Tests whether a ``struct super_block`` belongs to XFS. @@ -407,7 +423,7 @@ def xfs_for_each_ail_entry(ail: gdb.Value) -> Iterable[gdb.Value]: Raises: :obj:`gdb.NotAvailableError`: The target value was not available. """ - head = ail[_XFS._ail_head_name] + head = XFS.get_ail_head(ail) for item in list_for_each_entry(head, types.xfs_log_item_type, 'li_ail'): yield item @@ -646,4 +662,4 @@ def xfs_for_each_ail_log_item_typed(mp: gdb.Value) -> gdb.Value: for item in types.xfs_for_each_ail_log_item(mp): yield types.xfs_log_item_typed(item) -type_cbs = TypeCallbacks([('struct xfs_ail', _XFS._detect_ail_version)]) +type_cbs = TypeCallbacks([('struct xfs_ail', XFS.detect_ail_version)]) diff --git a/crash/types/classdev.py b/crash/types/classdev.py index e4cf4834f46..614e6004496 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -17,15 +17,36 @@ class ClassdevState(object): _class_is_private = True - #v5.1-rc1 moved knode_class from struct device to struct device_private @classmethod - def _setup_iterator_type(cls, gdbtype: gdb.Type) -> None: + def setup_iterator_type(cls, gdbtype: gdb.Type) -> None: + """ + Detect whether to iterate the class list using ``struct device`` + or ``struct device_private``. + + Linux v5.1-rc1 moved ``knode_class`` from ``struct device`` to + ``struct device_private``. We need to detect it here to ensure + list iteration works properly. + + Meant to be used as a TypeCallback. + + Args: + gdbtype: The ``struct device`` type. + """ if struct_has_member(gdbtype, 'knode_class'): cls._class_is_private = False + @classmethod + def class_is_private(cls) -> bool: + """ + Returns whether the class device uses ``struct device_private`` + + Meant to be used only be crash.types.classdev. + """ + return cls._class_is_private + type_cbs = TypeCallbacks([('struct device', - ClassdevState._setup_iterator_type)]) + ClassdevState.setup_iterator_type)]) def for_each_class_device(class_struct: gdb.Value, subtype: gdb.Value = None) -> Iterable[gdb.Value]: @@ -46,12 +67,12 @@ def for_each_class_device(class_struct: gdb.Value, klist = class_struct['p']['klist_devices'] container_type = types.device_type - if ClassdevState._class_is_private: + if ClassdevState.class_is_private(): container_type = types.device_private_type for knode in klist_for_each(klist): dev = container_of(knode, container_type, 'knode_class') - if ClassdevState._class_is_private: + if ClassdevState.class_is_private(): dev = dev['device'].dereference() if subtype is None or int(subtype) == int(dev['type']): diff --git a/crash/types/cpu.py b/crash/types/cpu.py index 776a7feab7c..278b987bd22 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -32,13 +32,33 @@ def __init__(self) -> None: raise NotImplementedError("This class is not meant to be instantiated") @classmethod - def _setup_online_mask(cls, symbol: gdb.Symbol) -> None: + def setup_online_mask(cls, symbol: gdb.Symbol) -> None: + """ + Translate the ``cpu_online_mask`` bitmap into a list of + online CPU numbers. + + Meant to be used as a SymbolCallback. + + Args: + symbol: The symbol for ``cpu_online_mask`` or + ``__cpu_online_mask``, depending on kernel version. + """ cls._cpu_online_mask = symbol.value() bits = cls._cpu_online_mask["bits"] cls.cpus_online = list(for_each_set_bit(bits)) @classmethod - def _setup_possible_mask(cls, cpu_mask: gdb.Symbol) -> None: + def setup_possible_mask(cls, cpu_mask: gdb.Symbol) -> None: + """ + Translate the ``cpu_possible_mask`` bitmap into a list of + possible CPU numbers. + + Meant to be used as a SymbolCallback. + + Args: + cpu_mask: The symbol for ``cpu_possible_mask`` or + ``__cpu_possible_mask``, depending on kernel version. + """ cls._cpu_possible_mask = cpu_mask.value() bits = cls._cpu_possible_mask["bits"] cls.cpus_possible = list(for_each_set_bit(bits)) @@ -86,10 +106,10 @@ def highest_possible_cpu_nr() -> int: return TypesCPUClass.cpus_possible[-1] symbol_cbs = SymbolCallbacks([('cpu_online_mask', - TypesCPUClass._setup_online_mask), + TypesCPUClass.setup_online_mask), ('__cpu_online_mask', - TypesCPUClass._setup_online_mask), + TypesCPUClass.setup_online_mask), ('cpu_possible_mask', - TypesCPUClass._setup_possible_mask), + TypesCPUClass.setup_possible_mask), ('__cpu_possible_mask', - TypesCPUClass._setup_possible_mask)]) + TypesCPUClass.setup_possible_mask)]) diff --git a/crash/types/node.py b/crash/types/node.py index 13fb8e3aa2a..023d0a97bed 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -95,7 +95,16 @@ class NodeStates(object): nids_possible: List[int] = list() @classmethod - def _setup_node_states(cls, node_states_sym: gdb.Symbol) -> None: + def setup_node_states(cls, node_states_sym: gdb.Symbol) -> None: + """ + Detect names of node states and which nodes are possible + and online. + + Meant to be used as a SymbolCallback. + + Args: + node_states_sym: The ``node_states`` symbol. + """ node_states = node_states_sym.value() enum_node_states = gdb.lookup_type("enum node_states") @@ -135,7 +144,7 @@ def for_each_online_nid(self) -> Iterable[int]: for nid in self.nids_online: yield nid -symbol_cbs = SymbolCallbacks([('node_states', NodeStates._setup_node_states)]) +symbol_cbs = SymbolCallbacks([('node_states', NodeStates.setup_node_states)]) _state = NodeStates() diff --git a/crash/types/page.py b/crash/types/page.py index b2e92883a4a..587fb81f8a6 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -126,6 +126,14 @@ def setup_zone_type(cls, gdbtype: gdb.Type) -> None: @classmethod # pylint: disable=unused-argument def setup_nodes_width(cls, symbol: Union[gdb.Symbol, gdb.MinSymbol]) -> None: + """ + Detect NODES_WITH from the in-kernel config table + + Args: + symbol: The ``kernel_config_data`` symbol or minimal symbol. + It is not used directly. It is used to determine whether + the config data should be available. + """ # TODO: handle kernels with no space for nodes in page flags try: cls.NODES_WIDTH = int(config['NODES_SHIFT']) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index d9e138929ee..5665854996b 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -48,7 +48,7 @@ class PerCPUState(object): @classmethod # pylint: disable=unused-argument - def _setup_per_cpu_size(cls, unused: gdb.Symbol) -> None: + def setup_per_cpu_size(cls, unused: gdb.Symbol) -> None: try: size = msymvals['__per_cpu_end'] - msymvals['__per_cpu_start'] except DelayedAttributeError: @@ -67,7 +67,7 @@ def _setup_per_cpu_size(cls, unused: gdb.Symbol) -> None: @classmethod # pylint: disable=unused-argument - def _setup_nr_cpus(cls, unused: gdb.Symbol) -> None: + def setup_nr_cpus(cls, unused: gdb.Symbol) -> None: cls._nr_cpus = array_size(symvals['__per_cpu_offset']) if cls._last_cpu == -1: @@ -75,7 +75,7 @@ def _setup_nr_cpus(cls, unused: gdb.Symbol) -> None: @classmethod # pylint: disable=unused-argument - def _setup_module_ranges(cls, unused: gdb.Symbol) -> None: + def setup_module_ranges(cls, unused: gdb.Symbol) -> None: for module in for_each_module(): start = int(module['percpu']) if start == 0: @@ -367,11 +367,11 @@ def get_percpu_vars(self, var: SymbolOrValue, return vals msym_cbs = MinimalSymbolCallbacks([('__per_cpu_start', - PerCPUState._setup_per_cpu_size), + PerCPUState.setup_per_cpu_size), ('__per_cpu_end', - PerCPUState._setup_per_cpu_size)]) -symbol_cbs = SymbolCallbacks([('__per_cpu_offset', PerCPUState._setup_nr_cpus), - ('modules', PerCPUState._setup_module_ranges)]) + PerCPUState.setup_per_cpu_size)]) +symbol_cbs = SymbolCallbacks([('__per_cpu_offset', PerCPUState.setup_nr_cpus), + ('modules', PerCPUState.setup_module_ranges)]) _state = PerCPUState() diff --git a/crash/types/task.py b/crash/types/task.py index 757668b8280..dc4ab52e4fe 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -61,7 +61,20 @@ def has_flag(cls, flagname: str) -> bool: return v != cls.TASK_FLAG_UNINITIALIZED @classmethod - def _task_state_flags_callback(cls, symbol: gdb.Symbol) -> None: + def task_state_flags_callback(cls, symbol: gdb.Symbol) -> None: + # pylint: disable=unused-argument + """ + Detect which task flags this kernel uses. + + Meant to be used as a SymbolCallback. + + Different kernels use different task flags or even different values + for the same flags. This method tries to determine the flags for + the kernel. + + Args: + symbol: The ``task_state_array`` symbol. + """ task_state_array = symbol.value() count = array_size(task_state_array) @@ -166,7 +179,7 @@ def _check_state_bits(cls) -> None: .format(",".join(missing))) symbol_cbs = SymbolCallbacks([('task_state_array', - TaskStateFlags._task_state_flags_callback)]) + TaskStateFlags.task_state_flags_callback)]) TF = TaskStateFlags diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 2a0090ec7e5..6114f91cc9f 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access [REPORTS] From 5c6ee7ded77473e08047f1b737c3273e3fc5e4ee Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 19:03:54 -0400 Subject: [PATCH 198/367] lint: fix no-else-return complaints This commit fixes the following lint warnings and enables enforcement of the 'no-else-return' pylint rule. ************* Module crash.exceptions R: 45, 8: Unnecessary "else" after "return" (no-else-return) ************* Module crash.subsystem.filesystem.btrfs R:141, 4: Unnecessary "else" after "return" (no-else-return) ************* Module crash.subsystem.storage R:242, 4: Unnecessary "else" after "return" (no-else-return) ************* Module crash.types.page R: 86, 8: Unnecessary "else" after "return" (no-else-return) ************* Module crash.types.slab R: 83, 8: Unnecessary "else" after "return" (no-else-return) ************* Module crash.types.task R:446, 8: Unnecessary "else" after "return" (no-else-return) ************* Module crash.types.zone R: 26, 8: Unnecessary "else" after "return" (no-else-return) ************* Module crash.types.node R: 30, 4: Unnecessary "else" after "return" (no-else-return) Signed-off-by: Jeff Mahoney --- crash/exceptions.py | 3 +-- crash/subsystem/filesystem/btrfs.py | 3 +-- crash/subsystem/storage/__init__.py | 3 +-- crash/types/node.py | 3 +-- crash/types/page.py | 4 ++-- crash/types/slab.py | 5 ++--- crash/types/task.py | 3 +-- crash/types/zone.py | 5 +---- tests/pylintrc-enforce | 2 +- 9 files changed, 11 insertions(+), 20 deletions(-) diff --git a/crash/exceptions.py b/crash/exceptions.py index 49c9537e6af..d1e2ea6c01b 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -44,8 +44,7 @@ def format_clsname(self, cls: Type) -> str: module = cls.__module__ if module is None or module == str.__class__.__module__: return cls.__name__ # Avoid reporting __builtin__ - else: - return module + '.' + cls.__name__ + return module + '.' + cls.__name__ class UnexpectedGDBTypeBaseError(InvalidArgumentError): """Base class for unexpected gdb type exceptions""" diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index 241b1d7c8f8..6732e7677eb 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -142,5 +142,4 @@ def btrfs_metadata_uuid(sb: gdb.Value, force: bool = False) -> uuid.UUID: return decode_uuid(fs_info['metadata_uuid']) elif struct_has_member(fs_info['fs_devices'].type, 'metadata_uuid'): return decode_uuid(fs_info['fs_devices']['metadata_uuid']) - else: - return btrfs_fsid(sb, force) + return btrfs_fsid(sb, force) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index b85806831a3..c184f1b8a38 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -241,8 +241,7 @@ def inode_on_bdev(inode: gdb.Value) -> gdb.Value: """ if is_bdev_inode(inode): return inode_to_block_device(inode) - else: - return inode['i_sb']['s_bdev'].dereference() + return inode['i_sb']['s_bdev'].dereference() # pylint: disable=unused-argument def _check_types(result: gdb.Symbol) -> None: diff --git a/crash/types/node.py b/crash/types/node.py index 023d0a97bed..6dc833961bb 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -29,8 +29,7 @@ def numa_node_id(cpu: int) -> int: """ if gdb.current_target().arch.name() == "powerpc:common64": return int(symvals.numa_cpu_lookup_table[cpu]) - else: - return int(get_percpu_var(symbols.numa_node, cpu)) + return int(get_percpu_var(symbols.numa_node, cpu)) NodeType = TypeVar('NodeType', bound='Node') diff --git a/crash/types/page.py b/crash/types/page.py index 587fb81f8a6..ca288b9af1c 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -91,8 +91,8 @@ def pfn_to_page(cls, pfn: int) -> gdb.Value: pagemap = section["section_mem_map"] & ~3 return (pagemap.cast(types.page_type.pointer()) + pfn).dereference() - else: - return cls.vmemmap[pfn] + + return cls.vmemmap[pfn] @classmethod def setup_pageflags(cls, gdbtype: gdb.Type) -> None: diff --git a/crash/types/slab.py b/crash/types/slab.py index e192501e588..715208b66b5 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -82,9 +82,8 @@ def from_page(cls, page: Page) -> 'Slab': raise RuntimeError("No kmem cache found for page") if cls.page_slab: return cls(page.gdb_obj, kmem_cache) - else: - slab_addr = int(page.get_slab_page()) - return cls.from_addr(slab_addr, kmem_cache) + slab_addr = int(page.get_slab_page()) + return cls.from_addr(slab_addr, kmem_cache) @classmethod def from_list_head(cls, list_head: gdb.Value, diff --git a/crash/types/task.py b/crash/types/task.py index dc4ab52e4fe..59d429f90f3 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -445,8 +445,7 @@ def task_name(self, brackets: bool = False) -> str: name = self.task_struct['comm'].string() if brackets and self.is_kernel_task(): return f"[{name}]" - else: - return name + return name def task_pid(self) -> int: """ diff --git a/crash/types/zone.py b/crash/types/zone.py index b17ac4df163..bacc565a5b6 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -23,10 +23,7 @@ def __init__(self, obj: gdb.Value, zid: int) -> None: self.nid = int(obj["node"]) def is_populated(self) -> bool: - if self.gdb_obj["present_pages"] != 0: - return True - else: - return False + return self.gdb_obj["present_pages"] != 0 def get_vmstat(self) -> List[int]: stats = [0] * VmStat.nr_stat_items diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 6114f91cc9f..9aacedcc1cd 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return [REPORTS] From b5692bcda44d0d0d0221ee76c62ca2031bc68f37 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 19:46:29 -0400 Subject: [PATCH 199/367] lint: fix bare-except and broad-except warnings This commit fixes the following lint warnings and enables enforcement of the 'bare-except' and 'broad-except' pylint rules. ************* Module crash.kernel W:358,15: Catching too general exception Exception (broad-except) W:634,15: Catching too general exception Exception (broad-except) ************* Module crash.commands.xfs W:116, 8: No exception type(s) specified (bare-except) ************* Module crash.types.slab W:285,12: No exception type(s) specified (bare-except) W:487,12: No exception type(s) specified (bare-except) W:556,15: Catching too general exception Exception (broad-except) W:550,23: Catching too general exception Exception (broad-except) ************* Module crash.types.page W:140, 8: No exception type(s) specified (bare-except) ************* Module kdump.target W: 91, 8: No exception type(s) specified (bare-except) Signed-off-by: Jeff Mahoney --- crash/commands/xfs.py | 8 +++++--- crash/kernel.py | 26 ++++++++++---------------- crash/types/page.py | 3 ++- crash/types/slab.py | 10 +++++----- kdump/target.py | 2 +- tests/pylintrc-enforce | 2 +- 6 files changed, 24 insertions(+), 27 deletions(-) diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index 2f61975550d..d4f7ef400a9 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -20,7 +20,7 @@ from crash.subsystem.filesystem.xfs import XFS_DQ_FLAGS from crash.subsystem.filesystem.xfs import xfs_mount_flags, xfs_mount_uuid from crash.subsystem.filesystem.xfs import xfs_mount_version -from crash.util import decode_flags +from crash.util import decode_flags, struct_has_member from crash.util.symbols import Types import gdb @@ -111,9 +111,11 @@ def dump_ail(self, args: argparse.Namespace) -> None: print("target={} last_pushed_lsn={} log_flush=" .format(int(ail['xa_target']), int(ail['xa_last_pushed_lsn'])), end='') - try: + + # This was added in Linux v3.2 (670ce93fef93b) + if struct_has_member(ail, 'xa_log_flush'): print("{}".format(int(ail['xa_log_flush']))) - except: + else: print("[N/A]") for bitem in xfs_for_each_ail_log_item(mp): diff --git a/crash/kernel.py b/crash/kernel.py index 92e92b38a09..77c2a2f8bb8 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -344,21 +344,15 @@ def extract_vermagic(self) -> str: def extract_modinfo_from_module(self, modpath: str) -> Dict[str, str]: f = open(modpath, 'rb') - d = None - try: - elf = ELFFile(f) - modinfo = elf.get_section_by_name('.modinfo') - - d = {} - for line in modinfo.data().split(b'\x00'): - val = line.decode('utf-8') - if val: - eq = val.index('=') - d[val[0:eq]] = val[eq + 1:] - except Exception as e: - print(e) - del d - d = dict() + elf = ELFFile(f) + modinfo = elf.get_section_by_name('.modinfo') + + d = {} + for line in modinfo.data().split(b'\x00'): + val = line.decode('utf-8') + if val: + eq = val.index('=') + d[val[0:eq]] = val[eq + 1:] del elf f.close() @@ -631,7 +625,7 @@ def setup_tasks(self) -> None: task_count = 0 try: crashing_cpu = int(get_symbol_value('crashing_cpu')) - except Exception: + except MissingSymbolError: crashing_cpu = -1 for task in for_each_all_tasks(): diff --git a/crash/types/page.py b/crash/types/page.py index ca288b9af1c..acb93bd733f 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -9,6 +9,7 @@ from crash.util.symbols import Types, Symvals, TypeCallbacks from crash.util.symbols import SymbolCallbacks, MinimalSymbolCallbacks from crash.cache.syscache import config +from crash.exceptions import DelayedAttributeError import gdb @@ -137,7 +138,7 @@ def setup_nodes_width(cls, symbol: Union[gdb.Symbol, gdb.MinSymbol]) -> None: # TODO: handle kernels with no space for nodes in page flags try: cls.NODES_WIDTH = int(config['NODES_SHIFT']) - except: + except (KeyError, DelayedAttributeError): # XXX print("Unable to determine NODES_SHIFT from config, trying 8") cls.NODES_WIDTH = 8 diff --git a/crash/types/slab.py b/crash/types/slab.py index 715208b66b5..b32af67ed8c 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -10,7 +10,7 @@ from crash.util import container_of, find_member_variant from crash.util.symbols import Types, TypeCallbacks, SymbolCallbacks from crash.types.percpu import get_percpu_var -from crash.types.list import list_for_each, list_for_each_entry +from crash.types.list import list_for_each, list_for_each_entry, ListError from crash.types.page import page_from_gdb_obj, page_from_addr, Page from crash.types.node import for_each_nid from crash.types.cpu import for_each_online_cpu @@ -282,7 +282,7 @@ def check(self, slabtype: int, nid: int) -> int: print(ac[obj]) try: page = page_from_addr(obj).compound_head() - except: + except gdb.NotAvailableError: self.__error(": failed to get page for object %x" % obj) continue @@ -484,7 +484,7 @@ def get_slabs_of_type(self, node: gdb.Value, slabtype: int, continue slab = Slab.from_list_head(list_head, self) - except: + except gdb.NotAvailableError: traceback.print_exc() print("failed to initialize slab object from list_head {:#x}: {}" .format(int(list_head), sys.exc_info()[0])) @@ -547,13 +547,13 @@ def ___check_slabs(self, node: gdb.Value, slabtype: int, nid: int, exact_cycles=True): try: free += self.__check_slab(slab, slabtype, nid, errors) - except Exception as e: + except gdb.NotAvailableError as e: print(col_error("Exception when checking slab {:#x}:{}" .format(int(slab.gdb_obj.address), e))) traceback.print_exc() slabs += 1 - except Exception as e: + except (gdb.NotAvailableError, ListError) as e: print(col_error("Unrecoverable error when traversing {} slab list: {}" .format(slab_list_name[slabtype], e))) check_ok = False diff --git a/kdump/target.py b/kdump/target.py index 052e3e0fa23..34ca1b8d89f 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -88,7 +88,7 @@ def open(self, filename: str, from_tty: bool) -> None: def close(self) -> None: try: self.unregister() - except: + except RuntimeError: pass del self.kdump diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 9aacedcc1cd..16864056559 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except [REPORTS] From bb8f917cb419e9819d83ff0439155ca65378789c Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 20:01:44 -0400 Subject: [PATCH 200/367] lint: fix stop-iteration-return complaint This commit fixes the following lint warnings and enables enforcement of the 'stop-iteration-return' pylint rule. ************* Module crash.subsystem.storage.decoders R:323,18: Do not raise StopIteration in generator, use return statement instead (stop-iteration-return) Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/decoders.py | 7 +++++-- tests/pylintrc-enforce | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 8ec69f4d75e..31bffc84655 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -86,7 +86,7 @@ def __next__(self) -> Any: If there are no objects beyond this one, it does not need to be overridden. """ - return None + pass class BadBHDecoder(Decoder): """ @@ -320,4 +320,7 @@ def for_each_bio_in_stack(bio: gdb.Value) -> Iterable[Decoder]: decoder = decode_bio(bio) while decoder is not None: yield decoder - decoder = next(decoder) + try: + decoder = next(decoder) + except StopIteration: + break diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 16864056559..9b0b69d2f95 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return [REPORTS] From 7707e0740d780484eb1a5f5572f0330e510b1d05 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 20:06:36 -0400 Subject: [PATCH 201/367] lint: fix singleton-comparison complaints This commit fixes the following lint complaints and enables enforcement of the 'singleton-comparison' pylint rule. ************* Module crash.types.slab C:500,11: Comparison to False should be 'not expr' or 'expr is False' (singleton-comparison) C:511,11: Comparison to False should be 'not expr' or 'expr is False' (singleton-comparison) C:607,19: Comparison to False should be 'not expr' or 'expr is False' (singleton-comparison) Signed-off-by: Jeff Mahoney --- crash/types/slab.py | 6 +++--- tests/pylintrc-enforce | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crash/types/slab.py b/crash/types/slab.py index b32af67ed8c..46d646fbfe4 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -497,7 +497,7 @@ def __check_slab(self, slab: Slab, slabtype: int, nid: int, addr = int(slab.gdb_obj.address) free = 0 - if slab.error == False: + if slab.error is False: free = slab.check(slabtype, nid) if slab.misplaced_error is None and errors['num_misplaced'] > 0: @@ -508,7 +508,7 @@ def __check_slab(self, slab: Slab, slabtype: int, nid: int, errors['num_misplaced'] = 0 errors['last_misplaced'] = None - if slab.error == False: + if slab.error is False: errors['num_ok'] += 1 errors['last_ok'] = addr if not errors['first_ok']: @@ -604,7 +604,7 @@ def check_array_caches(self) -> None: .format(ac_ptr, acs[ac_ptr], ac_obj_slab.kmem_cache.name)) else: ac_obj_obj = ac_obj_slab.contains_obj(ac_ptr) - if ac_obj_obj[0] == False and ac_obj_obj[2] is None: + if ac_obj_obj[0] is False and ac_obj_obj[2] is None: print("cached pointer {:#x} in {} is not allocated: {}".format( ac_ptr, acs[ac_ptr], ac_obj_obj)) elif ac_obj_obj[1] != ac_ptr: diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 9b0b69d2f95..a7209703556 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison [REPORTS] From f47b4514d1e489b9f367a34a6647960a94e31ad3 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 20:07:48 -0400 Subject: [PATCH 202/367] lint: fix consider-iterating-dictionary complaint This commit fixes the following lint complaints and enables enforcement of the 'consider-iterating-dictionary' pylint rule. ************* Module crash.types.slab C:597,22: Consider iterating the dictionary directly instead of calling .keys() (consider-iterating-dictionary) Signed-off-by: Jeff Mahoney --- crash/types/slab.py | 2 +- tests/pylintrc-enforce | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/types/slab.py b/crash/types/slab.py index 46d646fbfe4..7647c551836 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -594,7 +594,7 @@ def __check_slabs(self, node: gdb.Value, slabtype: int, nid: int) -> int: def check_array_caches(self) -> None: acs = self.get_array_caches() - for ac_ptr in acs.keys(): + for ac_ptr in acs: ac_obj_slab = slab_from_obj_addr(ac_ptr) if not ac_obj_slab: print("cached pointer {:#x} in {} not found in slab" diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index a7209703556..ebb0c8ff81b 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison,consider-iterating-dictionary [REPORTS] From 7d7532ddfedc337da33b500f69622fd413079a43 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 5 Jun 2019 20:09:24 -0400 Subject: [PATCH 203/367] lint: fix redefined-outer-name warnings This commit fixes the following lint warnings and enables enforcement of the 'redefined-outer-name' pylint rule. ************* Module crash.kernel W:398, 8: Redefining name 'crash' from outer scope (line 13) (redefined-outer-name) W:616, 8: Redefining name 'crash' from outer scope (line 13) (redefined-outer-name) ************* Module crash.cache.syscache W:187,23: Redefining name 'config' from outer scope (line 281) (redefined-outer-name) Signed-off-by: Jeff Mahoney --- crash/cache/syscache.py | 4 ++-- crash/kernel.py | 4 ++-- tests/pylintrc-enforce | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index 0e98c072c30..b7adf47b81c 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -184,9 +184,9 @@ class CrashKernelCache(CrashCache): _jiffies_dv = DelayedValue('jiffies') - def __init__(self, config: CrashConfigCache) -> None: + def __init__(self, config_cache: CrashConfigCache) -> None: CrashCache.__init__(self) - self.config = config + self.config = config_cache self._hz = -1 self._uptime = timedelta(seconds=0) self._loadavg = "" diff --git a/crash/kernel.py b/crash/kernel.py index 77c2a2f8bb8..b8d0e8046eb 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -395,7 +395,7 @@ def check_module_version(self, modpath: str, module: gdb.Value) -> None: mod_srcversion) def load_modules(self, verbose: bool = False, debug: bool = False) -> None: - import crash.cache.syscache + import crash.cache.syscache # pylint: disable=redefined-outer-name version = crash.cache.syscache.utsname.release print("Loading modules for {}".format(version), end='') if verbose: @@ -613,7 +613,7 @@ def load_module_debuginfo(self, objfile: gdb.Objfile, def setup_tasks(self) -> None: from crash.types.percpu import get_percpu_vars from crash.types.task import LinuxTask, for_each_all_tasks - import crash.cache.tasks + import crash.cache.tasks # pylint: disable=redefined-outer-name gdb.execute('set print thread-events 0') rqs = get_percpu_vars(self.symbols.runqueues) diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index ebb0c8ff81b..91b8a4de91d 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison,consider-iterating-dictionary +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison,consider-iterating-dictionary,redefined-outer-name [REPORTS] From 8222306285bd9cbc108c2e4166ac1ee948157ab4 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 31 May 2019 09:46:41 -0400 Subject: [PATCH 204/367] lint: fix too-many-return-statements complaint This commit fixes the following lint warnings and enables enforcement of the 'too-many-return-statements' pylint rule. ************* Module crash.subsystem.filesystem.xfs R:568, 0: Too many return statements (7/6) (too-many-return-statements) Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/xfs.py | 19 ++++++++++--------- tests/pylintrc-enforce | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index 1778609a122..0c36f6e6dc9 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -591,22 +591,23 @@ def xfs_log_item_typed(item: gdb.Value) -> gdb.Value: """ li_type = int(item['li_type']) if li_type == XFS_LI_BUF: - return item_to_buf_log_item(item) + typed_item = item_to_buf_log_item(item) elif li_type == XFS_LI_INODE: - return item_to_inode_log_item(item) + typed_item = item_to_inode_log_item(item) elif li_type == XFS_LI_EFI: - return item_to_efi_log_item(item) + typed_item = item_to_efi_log_item(item) elif li_type == XFS_LI_EFD: - return item_to_efd_log_item(item) + typed_item = item_to_efd_log_item(item) elif li_type == XFS_LI_IUNLINK: # There isn't actually any type information for this - return item['li_type'] + typed_item = item['li_type'] elif li_type == XFS_LI_DQUOT: - return item_to_dquot_log_item(item) + typed_item = item_to_dquot_log_item(item) elif li_type == XFS_LI_QUOTAOFF: - return item_to_quotaoff_log_item(item) - - raise RuntimeError("Unknown AIL item type {:x}".format(li_type)) + typed_item = item_to_quotaoff_log_item(item) + else: + raise RuntimeError("Unknown AIL item type {:x}".format(li_type)) + return typed_item def xfs_format_xfsbuf(buf: gdb.Value) -> str: """ diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 91b8a4de91d..ebb5e21b70e 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison,consider-iterating-dictionary,redefined-outer-name +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison,consider-iterating-dictionary,redefined-outer-name,too-many-return-statements [REPORTS] From 3505088ed4bbf06f468a82b86e31dc1b81e2135c Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 12:52:52 -0400 Subject: [PATCH 205/367] crash.infra.lookup: duplicate check_ready to silence abstract-method warning This commit duplicates ObjfileEventCallback.check_ready in NamedCallback to avoid pylint complaining about it not implementing it itself. Signed-off-by: Jeff Mahoney --- crash/infra/lookup.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index c31e9a4ab00..89bc446aa2c 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -40,6 +40,19 @@ def __init__(self, name: str, callback: Callback, self._callback = callback + # This is silly but it avoids pylint abstract-method warnings + def check_ready(self) -> Any: + """ + The method that derived classes implement for detecting when the + conditions required to call the callback have been met. + + Returns: + :obj:`object`: This method can return an arbitrary object. It will + be passed untouched to :meth:`callback` if the result is anything + other than :obj:`None` or :obj:`False`. + """ + raise NotImplementedError("check_ready must be implemented by derived class.") + def callback(self, result: Any) -> Union[None, bool]: """ The callback for handling the sucessful result of :meth:`check_ready`. From 650efb4cc15740df0e96125c8b6d7c209f7c566a Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 12:53:12 -0400 Subject: [PATCH 206/367] lint: fix unnecessary-semicolon warning This commit fixes the following lint warnings and enables enforcement of the 'unnecessary-semicolon' pylint rule. ************* Module crash.commands.kmem W:116, 0: Unnecessary semicolon (unnecessary-semicolon) Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 2 +- tests/pylintrc-enforce | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index a0a94d677aa..36259a1e0e0 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -113,7 +113,7 @@ def execute(self, args: argparse.Namespace) -> None: raise RuntimeError("odd return value from contains_obj") def __print_vmstat(self, vmstat: List[int], diffs: List[int]) -> None: - vmstat_names = VmStat.get_stat_names(); + vmstat_names = VmStat.get_stat_names() just = max(map(len, vmstat_names)) nr_items = VmStat.nr_stat_items diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index ebb5e21b70e..2868c0650d4 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison,consider-iterating-dictionary,redefined-outer-name,too-many-return-statements +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison,consider-iterating-dictionary,redefined-outer-name,too-many-return-statements,unnecessary-semicolon [REPORTS] From 5068045fc78f3b57b990b15b8495158f6aefef1f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 12:57:39 -0400 Subject: [PATCH 207/367] lint: fix false positives This commit contains disabling comments for what are probably bugs in pylint. Signed-off-by: Jeff Mahoney --- crash/types/list.py | 3 ++- kdump/target.py | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/crash/types/list.py b/crash/types/list.py index 0be32c6a668..b9d330e9f5f 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -131,7 +131,8 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, node = nxt.dereference() if pending_exception is not None: - raise pending_exception + # The pylint error seems to think we'll raise None here + raise pending_exception # pylint: disable=raising-bad-type def list_for_each_entry(list_head: gdb.Value, gdbtype: gdb.Type, member: str, include_head: bool = False, diff --git a/kdump/target.py b/kdump/target.py index 34ca1b8d89f..7b54d0de31d 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -35,6 +35,9 @@ def __call__(self, symtype: int, *args: int) -> int: # pylint: disable=no-member raise addrxlat.exceptions.NoDataError() + # This silences pylint: disable=inconsistent-return-statements + return 0 # pylint: disable=unreachable + class Target(gdb.Target): def __init__(self, debug: bool = False) -> None: super().__init__() @@ -58,6 +61,7 @@ def open(self, filename: str, from_tty: bool) -> None: raise gdb.GdbError("Failed to open `{}': {}" .format(filename, str(e))) + # pylint: disable=unsupported-assignment-operation self.kdump.attr['addrxlat.ostype'] = 'linux' ctx = self.kdump.get_addrxlat_ctx() ctx.cb_sym = SymbolCallback(ctx) From 2830a3f4c7e81e5e3b930d6c5aef0f0ec2b633ed Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 13:06:17 -0400 Subject: [PATCH 208/367] crash.arch.ppc64: fix setup_thread_info powerpc has thread_info in the task_struct (at least in newer releases), so use that. Also add stubs for register callbacks to silence pylint abstract-method warnings. Signed-off-by: Jeff Mahoney None: super(Powerpc64Architecture, self).__init__() - self.ulong_type = gdb.lookup_type('unsigned long') - thread_info_type = gdb.lookup_type('struct thread_info') - self.thread_info_p_type = thread_info_type.pointer() - # Stop stack traces with addresses below this self.filter = KernelFrameFilter(0xffff000000000000) def setup_thread_info(self, thread: gdb.InferiorThread) -> None: task = thread.info.task_struct - thread_info = task['stack'].cast(self.thread_info_p_type) - thread.info.set_thread_info(thread_info) + thread.info.set_thread_info(task['thread_info'].address) @classmethod def get_stack_pointer(cls, thread_struct: gdb.Value) -> gdb.Value: return thread_struct['ksp'] + def fetch_register_active(self, thread: gdb.InferiorThread, + register: int) -> None: + raise NotImplementedError("ppc64 support does not cover threads yet") + + def fetch_register_scheduled(self, thread: gdb.InferiorThread, + register: gdb.Register) -> None: + raise NotImplementedError("ppc64 support does not cover threads yet") + register_arch(Powerpc64Architecture) From ace88855690fc13c6600d9b82b57bb0455c574ac Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 17:39:01 -0400 Subject: [PATCH 209/367] lint: fix consider-merging-isinstance warning This commit fixes the following lint warnings and enables enforcement of the 'consider-merging-isinstance' pylint rule. ************* Module crash.types.percpu R:303,11: Consider merging these isinstance calls to isinstance(var, (gdb.MinSymbol, gdb.Symbol)) (consider-merging-isinstance) Signed-off-by: Jeff Mahoney --- crash/types/percpu.py | 2 +- tests/pylintrc-enforce | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 5665854996b..ab1d2afac69 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -272,7 +272,7 @@ def is_percpu_var(self, var: SymbolOrValue) -> bool: def _resolve_percpu_var(self, var: SymbolOrValue) -> gdb.Value: orig_var = var - if isinstance(var, gdb.Symbol) or isinstance(var, gdb.MinSymbol): + if isinstance(var, (gdb.Symbol, gdb.MinSymbol)): var = var.value() if not isinstance(var, gdb.Value): raise InvalidArgumentError("Argument must be gdb.Symbol or gdb.Value") diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce index 2868c0650d4..f3be5c23ad1 100644 --- a/tests/pylintrc-enforce +++ b/tests/pylintrc-enforce @@ -65,7 +65,7 @@ disable=all # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison,consider-iterating-dictionary,redefined-outer-name,too-many-return-statements,unnecessary-semicolon +enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison,consider-iterating-dictionary,redefined-outer-name,too-many-return-statements,unnecessary-semicolon,consider-merging-isinstance [REPORTS] From a1d10df4390261d6f228ce6313d149097d1af8e3 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 13:15:51 -0400 Subject: [PATCH 210/367] tests: enforce lint with a disabled list instead Now that we've cleaned up the lint errors and warnings in stages, we can switch to general checking. Signed-off-by: Jeff Mahoney --- Makefile | 9 +- tests/{pylintrc-check => pylintrc} | 2 +- tests/pylintrc-enforce | 355 ----------------------------- 3 files changed, 3 insertions(+), 363 deletions(-) rename tests/{pylintrc-check => pylintrc} (99%) delete mode 100644 tests/pylintrc-enforce diff --git a/Makefile b/Makefile index 70396fd2775..7f404479c72 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ GZ_MAN1 = $(patsubst %.asciidoc,%.1.gz,$(MAN1_TXT)) man: $(GZ_MAN1) -PYLINT_ARGS ?= --rcfile tests/pylintrc-check -r n +PYLINT_ARGS ?= --rcfile tests/pylintrc -r n ifeq ($(E),1) PYLINT_ARGS += -E @@ -60,11 +60,6 @@ unit-tests: clean-build make -C tests -s sh tests/run-tests.sh -PYLINT_ENFORCE="" - -lint-enforce: clean-build - sh tests/run-pylint.sh -r n --rcfile tests/pylintrc-enforce crash kdump - lint: clean-build sh tests/run-pylint.sh $(PYLINT_ARGS) crash kdump @@ -74,7 +69,7 @@ static-check: clean-build live-tests: clean-build sh tests/run-kernel-tests.sh $(INI_FILES) -test: unit-tests static-check lint-enforce live-tests +test: unit-tests static-check lint live-tests @echo -n doc: build FORCE diff --git a/tests/pylintrc-check b/tests/pylintrc similarity index 99% rename from tests/pylintrc-check rename to tests/pylintrc index b03e59af6c9..35f95c1f1b0 100644 --- a/tests/pylintrc-check +++ b/tests/pylintrc @@ -65,7 +65,7 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable=missing-docstring,too-few-public-methods,invalid-name,too-many-locals,too-many-instance-attributes,too-many-public-methods,fixme,no-self-use,too-many-branches,too-many-statements,too-many-arguments,too-many-boolean-expressions,line-too-long +disable=missing-docstring,too-few-public-methods,invalid-name,too-many-locals,too-many-instance-attributes,too-many-public-methods,fixme,no-self-use,too-many-branches,too-many-statements,too-many-arguments,too-many-boolean-expressions,line-too-long,duplicate-code [REPORTS] diff --git a/tests/pylintrc-enforce b/tests/pylintrc-enforce deleted file mode 100644 index f3be5c23ad1..00000000000 --- a/tests/pylintrc-enforce +++ /dev/null @@ -1,355 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Add files or directories matching the regex patterns to the blacklist. The -# regex matches against base names, not paths. -ignore-patterns= - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - -# Use multiple processes to speed up Pylint. -jobs=1 - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= - -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. This option is deprecated -# and it will be removed in Pylint 2.0. -optimize-ast=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=all - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable=undefined-variable,undefined-all-variable,global-variable-undefined,undefined-loop-variable,not-callable,bad-whitespace,trailing-whitespace,bad-continuation,mixed-indentation,superfluous-parens,multiple-imports,wrong-import-order,ungrouped-imports,wrong-import-position,import-error,relative-beyond-top-level,cyclic-import,wildcard-import,deprecated-module,reimported,import-self,unused-import,unused-wildcard-import,len-as-condition,unused-variable,unused-argument,arguments-differ,attribute-defined-outside-init,redefined-builtin,unsubscriptable-object,no-member,useless-super-delegation,protected-access,no-else-return,bare-except,broad-except,stop-iteration-return,singleton-comparison,consider-iterating-dictionary,redefined-outer-name,too-many-return-statements,unnecessary-semicolon,consider-merging-isinstance - - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". This option is deprecated -# and it will be removed in Pylint 2.0. -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=100 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[BASIC] - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -property-classes=abc.abstractproperty - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[ELIF] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception From 856a404df6dc9af9da9c872ce392a66ba0b92a4b Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 6 Jun 2019 14:24:29 -0400 Subject: [PATCH 211/367] crash.commands.vtop: add test cases and document -c as unimplemented The -c option is documented but isn't implemented. We'll still accept it but raise a CommandError that it's unimplemented. Also, check to see if the address is a hex address and raise a CommandLineError if it's not. Finally, add simple test cases for running the command. Signed-off-by: Jeff Mahoney --- crash/commands/vtop.py | 13 +++++- kernel-tests/decorators.py | 18 +++++++++ kernel-tests/test_commands_ps.py | 18 +-------- kernel-tests/test_commands_vtop.py | 65 ++++++++++++++++++++++++++++++ 4 files changed, 96 insertions(+), 18 deletions(-) create mode 100644 kernel-tests/test_commands_vtop.py diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index 1822fed5760..f3f902fe01f 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -6,6 +6,7 @@ import addrxlat.exceptions from crash.commands import Command, ArgumentParser +from crash.commands import CommandError, CommandLineError from crash.addrxlat import CrashAddressTranslation class LinuxPGT(object): @@ -104,6 +105,10 @@ class _Parser(ArgumentParser): each task specified by "foreach". address A hexadecimal user or kernel virtual address. + NOTE + Although the -c option is referenced in the documentation, it + is currently unimplemented and will cause a command error. + EXAMPLES Translate user virtual address 80b4000: @@ -201,6 +206,9 @@ def __init__(self) -> None: super().__init__("vtop", parser) def execute(self, args: argparse.Namespace) -> None: + if args.c: + raise CommandError("support for the -c argument is unimplemented") + trans = CrashAddressTranslation() # Silly mypy bug means the base class needs come first if not trans.is_non_auto: @@ -209,7 +217,10 @@ def execute(self, args: argparse.Namespace) -> None: pgt = LinuxNonAutoPGT(trans.context, trans.system) for addr in args.args: - addr = int(addr, 16) + try: + addr = int(addr, 16) + except ValueError: + raise CommandLineError(f"{addr} is not a hex address") fulladdr = addrxlat.FullAddress(addrxlat.KVADDR, addr) print('{:16} {:16}'.format('VIRTUAL', 'PHYSICAL')) try: diff --git a/kernel-tests/decorators.py b/kernel-tests/decorators.py index db205bfb2e0..3ff04d49d52 100644 --- a/kernel-tests/decorators.py +++ b/kernel-tests/decorators.py @@ -60,3 +60,21 @@ def skip_without_supers(name): return lambda func: func return unittest.skip(f"no {name} file systems in image") + +def bad_command_line(fn, ignored=True): + """Marks test to expect CommandLineError for unimplemented options""" + def test_decorator(fn): + def test_decorated(self, *args, **kwargs): + self.assertRaises(CommandLineError, fn, self, *args, **kwargs) + return test_decorated + test_decorator.__doc__ = fn.__doc__ + " (bad command line raises CommandLineError)" + return test_decorator + +def unimplemented(fn, ignored=True): + """Marks test to expect CommandError for unimplemented options""" + def test_decorator(fn): + def test_decorated(self, *args, **kwargs): + self.assertRaises(CommandError, fn, self, *args, **kwargs) + return test_decorated + test_decorator.__doc__ = fn.__doc__ + " (unimplemented command raises CommandError)" + return test_decorator diff --git a/kernel-tests/test_commands_ps.py b/kernel-tests/test_commands_ps.py index 9c76a074c59..f7ffeba72a3 100644 --- a/kernel-tests/test_commands_ps.py +++ b/kernel-tests/test_commands_ps.py @@ -12,23 +12,7 @@ from crash.commands.ps import PSCommand import crash.types.task as tasks -def bad_command_line(fn, ignored=True): - """Marks test to expect CommandLineError for unimplemented options""" - def test_decorator(fn): - def test_decorated(self, *args, **kwargs): - self.assertRaises(CommandLineError, fn, self, *args, **kwargs) - return test_decorated - test_decorator.__doc__ = fn.__doc__ + " (bad command line raises CommandLineError)" - return test_decorator - -def unimplemented(fn, ignored=True): - """Marks test to expect CommandError for unimplemented options""" - def test_decorator(fn): - def test_decorated(self, *args, **kwargs): - self.assertRaises(CommandError, fn, self, *args, **kwargs) - return test_decorated - test_decorator.__doc__ = fn.__doc__ + " (unimplemented command raises CommandError)" - return test_decorator +from decorators import bad_command_line, unimplemented PF_KTHREAD = 0x200000 diff --git a/kernel-tests/test_commands_vtop.py b/kernel-tests/test_commands_vtop.py new file mode 100644 index 00000000000..20beb2110a8 --- /dev/null +++ b/kernel-tests/test_commands_vtop.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + +from decorators import bad_command_line, unimplemented + +from crash.commands.vtop import VTOPCommand +from crash.commands import CommandError, CommandLineError +from crash.exceptions import DelayedAttributeError + +class TestCommandsVTOP(unittest.TestCase): + def setUp(self): + self.stdout = sys.stdout + self.redirected = io.StringIO() + sys.stdout = self.redirected + self.command = VTOPCommand() + self.addr = int(gdb.lookup_symbol('modules', None)[0].value().address) + + def tearDown(self): + sys.stdout = self.stdout + + def output(self): + return self.redirected.getvalue() + + def output_lines(self): + output = self.output() + return len(output.split("\n")) - 1 + + @bad_command_line + def test_vtop_empty(self): + """Test `vtop`""" + self.command.invoke_uncaught("") + + @bad_command_line + def test_vtop_symname(self): + """Test `vtop `""" + self.command.invoke_uncaught("modules") + + def test_vtop_addr(self): + """`Test vtop `""" + self.command.invoke_uncaught(f"{self.addr:#x}") + self.assertTrue(self.output_lines() > 0) + + def test_vtop_addr_k(self): + """`Test vtop -k `""" + self.command.invoke_uncaught(f"-k {self.addr:#x}") + self.assertTrue(self.output_lines() > 0) + + def test_vtop_addr_u(self): + """`Test vtop -u `""" + self.command.invoke_uncaught(f"-u {self.addr:#x}") + self.assertTrue(self.output_lines() > 0) + + @bad_command_line + def test_vtop_addr_uk(self): + """`Test vtop -k -u `""" + self.command.invoke_uncaught(f"-k -u {self.addr:#x}") + + @unimplemented + def test_vtop_addr_c(self): + """Test `vtop -c `""" + self.command.invoke_uncaught(f"-c {self.addr:#x}") From e77f9a296a10cfe12807bd9795a119d39df56ab1 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Jun 2019 10:40:36 -0400 Subject: [PATCH 212/367] crash: fix documentation build failures The documentation failed to build after all the typing updates. This commit adds the required mock types and fixes some broken docstrings. Also add a new make target 'make full-test' that adds the doc target to the testing. Signed-off-by: Jeff Mahoney --- Makefile | 2 ++ crash/subsystem/filesystem/xfs.py | 2 +- crash/types/task.py | 2 +- doc-source/mock/addrxlat/__init__.py | 6 ++++++ doc-source/mock/addrxlat/exceptions.py | 5 +++++ doc-source/mock/gdb/FrameDecorator.py | 5 +++++ doc-source/mock/gdb/__init__.py | 8 ++++++++ doc-source/mock/gdb/types.py | 3 +++ 8 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 doc-source/mock/addrxlat/exceptions.py create mode 100644 doc-source/mock/gdb/FrameDecorator.py diff --git a/Makefile b/Makefile index 7f404479c72..258f2aa810a 100644 --- a/Makefile +++ b/Makefile @@ -72,6 +72,8 @@ live-tests: clean-build test: unit-tests static-check lint live-tests @echo -n +full-test: test doc + doc: build FORCE rm -rf docs rm -f doc-source/crash/.*rst doc-source/kdump/*.rst diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index 0c36f6e6dc9..bfdaf2ec0e3 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -280,7 +280,7 @@ def detect_ail_version(cls, gdbtype: gdb.Type) -> None: Meant to be used as a TypeCallback. Args: - gdbtype: The ``struct xfs_ail` type. + gdbtype: The ``struct xfs_ail`` type. """ if struct_has_member(gdbtype, 'ail_head'): cls._ail_head_name = 'ail_head' diff --git a/crash/types/task.py b/crash/types/task.py index 59d429f90f3..371db4a73dd 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -70,7 +70,7 @@ def task_state_flags_callback(cls, symbol: gdb.Symbol) -> None: Different kernels use different task flags or even different values for the same flags. This method tries to determine the flags for - the kernel. + the kernel. Args: symbol: The ``task_state_array`` symbol. diff --git a/doc-source/mock/addrxlat/__init__.py b/doc-source/mock/addrxlat/__init__.py index d4464cde36c..0c6d54d7f22 100644 --- a/doc-source/mock/addrxlat/__init__.py +++ b/doc-source/mock/addrxlat/__init__.py @@ -1,3 +1,6 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + class Context(object): pass @@ -11,5 +14,8 @@ def get_map(x): def CAPS(x): pass +class FullAddress(object): + pass + KVADDR = 0 SYS_MAP_MACHPHYS_KPHYS = 0 diff --git a/doc-source/mock/addrxlat/exceptions.py b/doc-source/mock/addrxlat/exceptions.py new file mode 100644 index 00000000000..926c38b1417 --- /dev/null +++ b/doc-source/mock/addrxlat/exceptions.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +class NoDataError(Exception): + pass diff --git a/doc-source/mock/gdb/FrameDecorator.py b/doc-source/mock/gdb/FrameDecorator.py new file mode 100644 index 00000000000..3ac52f91530 --- /dev/null +++ b/doc-source/mock/gdb/FrameDecorator.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +class FrameDecorator(object): + pass diff --git a/doc-source/mock/gdb/__init__.py b/doc-source/mock/gdb/__init__.py index 9c110d7881e..5abe61258fa 100644 --- a/doc-source/mock/gdb/__init__.py +++ b/doc-source/mock/gdb/__init__.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: class Target(object): class kdump(object): @@ -65,5 +67,11 @@ class Command(object): def __init__(self, x, y): pass +class NewObjFileEvent(object): + pass + +class Frame(object): + pass + SYMBOL_VAR_DOMAIN = 0 COMMAND_USER = 0 diff --git a/doc-source/mock/gdb/types.py b/doc-source/mock/gdb/types.py index 6a059386f0e..9badabfe7e6 100644 --- a/doc-source/mock/gdb/types.py +++ b/doc-source/mock/gdb/types.py @@ -1,2 +1,5 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + def get_basic_type(x): pass From da734495c123b8b1e8f7b21790f8c7b02dfb4d88 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Jun 2019 10:45:44 -0400 Subject: [PATCH 213/367] crash: update documentation for kernel, session, exceptions This commit updates the crash.kernel, crash.session, and crash.exceptions modules to hide internal methods and document public ones. Signed-off-by: Jeff Mahoney --- crash/exceptions.py | 14 +-- crash/kernel.py | 289 +++++++++++++++++++++++++++----------------- crash/session.py | 6 +- 3 files changed, 191 insertions(+), 118 deletions(-) diff --git a/crash/exceptions.py b/crash/exceptions.py index d1e2ea6c01b..c4434b82292 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -32,11 +32,11 @@ class InvalidArgumentError(TypeError): class ArgumentTypeError(InvalidArgumentError): """The provided object could not be converted to the expected type""" - formatter = "cannot convert argument `{}' of type {} to {}" + _fmt = "cannot convert argument `{}' of type {} to {}" def __init__(self, name: str, val: Type, expected_type: Type) -> None: - msg = self.formatter.format(name, self.format_clsname(val.__class__), - self.format_clsname(expected_type)) + msg = self._fmt.format(name, self.format_clsname(val.__class__), + self.format_clsname(expected_type)) super().__init__(msg) self.val = val @@ -52,15 +52,15 @@ class UnexpectedGDBTypeBaseError(InvalidArgumentError): class UnexpectedGDBTypeError(UnexpectedGDBTypeBaseError): """The gdb.Type passed describes an inappropriate type for the operation""" - formatter = "expected gdb.Type `{}' to describe `{}' not `{}'" + _fmt = "expected gdb.Type `{}' to describe `{}' not `{}'" def __init__(self, name: str, gdbtype: gdb.Type, expected_type: gdb.Type) -> None: - msg = self.formatter.format(name, str(gdbtype), str(expected_type)) + msg = self._fmt.format(name, str(gdbtype), str(expected_type)) super().__init__(msg) class NotStructOrUnionError(UnexpectedGDBTypeBaseError): """The provided type is not a struct or union""" - formatter = "argument `{}' describes type `{}' which is not a struct or union" + _fmt = "argument `{}' describes type `{}' which is not a struct or union" def __init__(self, name: str, gdbtype: gdb.Type) -> None: - msg = self.formatter.format(name, str(gdbtype)) + msg = self._fmt.format(name, str(gdbtype)) super().__init__(msg) diff --git a/crash/kernel.py b/crash/kernel.py index b8d0e8046eb..fe03d362c43 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -21,12 +21,13 @@ import gdb class CrashKernelError(RuntimeError): + """Raised when an error occurs while initializing the debugging session""" pass -class NoMatchingFileError(FileNotFoundError): +class _NoMatchingFileError(FileNotFoundError): pass -class ModinfoMismatchError(ValueError): +class _ModinfoMismatchError(ValueError): _fmt = "module {} has mismatched {} (got `{}' expected `{}')" def __init__(self, attribute: str, path: str, value: Optional[str], expected_value: Optional[str]) -> None: @@ -37,12 +38,12 @@ def __init__(self, attribute: str, path: str, value: Optional[str], self.expected_value = expected_value self.attribute = attribute -class ModVersionMismatchError(ModinfoMismatchError): +class _ModVersionMismatchError(_ModinfoMismatchError): def __init__(self, path: str, module_value: Optional[str], expected_value: Optional[str]) -> None: super().__init__('vermagic', path, module_value, expected_value) -class ModSourceVersionMismatchError(ModinfoMismatchError): +class _ModSourceVersionMismatchError(_ModinfoMismatchError): def __init__(self, path: str, module_value: Optional[str], expected_value: Optional[str]) -> None: super().__init__('srcversion', path, module_value, expected_value) @@ -52,6 +53,78 @@ def __init__(self, path: str, module_value: Optional[str], PathSpecifier = Union[List[str], str] class CrashKernel(object): + """ + Initialize a basic kernel semantic debugging session. + + This means that we load the following: + + - Kernel image symbol table (and debuginfo, if not integrated) + relocated to the base offset used by kASLR + - Kernel modules that were loaded on the the crashed system (again, + with debuginfo if not integrated) + - Percpu ranges used by kernel module + - Architecture-specific details + - Linux tasks populated into the GDB thread table + + If kernel module files and debuginfo cannot be located, backtraces + may be incomplete if the addresses used by the modules are crossed. + Percpu ranges will be properly loaded regardless. + + For arguments that accept paths to specify a base directory to be + used, the entire directory structure will be read and cached to + speed up subsequent searches. Still, reading large directory trees + is a time consuming operation and being exact as possible will + improve startup time. + + Args: + root (None for defaults): The roots of trees + to search for debuginfo files. When specified, all roots + will be searched using the following arguments (including + the absolute paths in the defaults if unspecified). + + Defaults to: / + + vmlinux_debuginfo (None for defaults): The + location of the separate debuginfo file corresponding + to the kernel being debugged. + + Defaults to: + + - .debug + - ./vmlinux-.debug + - /usr/lib/debug/.build-id/xx/.debug + - /usr/lib/debug/.debug + - /usr/lib/debug/boot/.debug + - /usr/lib/debug/boot/vmlinux- + + + module_path (None for defaults): The base directory to + be used to search for kernel modules (e.g. module.ko) to be + used to load symbols for the kernel being debugged. + + Defaults to: + + - ./modules + - /lib/modules/ + + + module_debuginfo_path (None for defaults): The base + directory to search for debuginfo matching the kernel + modules already loaded. + + Defaults to: + + - ./modules.debug + - /usr/lib/debug/.build-id/xx/.debug + - /usr/lib/debug/lib/modules/ + + + Raises: + CrashKernelError: If the kernel debuginfo cannot be loaded. + InvalidArgumentError: If any of the arguments are not None, str, + or list of str + + """ types = Types(['char *']) symvals = Symvals(['init_task']) symbols = Symbols(['runqueues']) @@ -62,70 +135,6 @@ def __init__(self, roots: PathSpecifier = None, module_path: PathSpecifier = None, module_debuginfo_path: PathSpecifier = None, verbose: bool = False, debug: bool = False) -> None: - """ - Initialize a basic kernel semantic debugging session. - - This means that we load the following: - - Kernel image symbol table (and debuginfo, if not integrated) - relocated to the base offset used by kASLR - - Kernel modules that were loaded on the the crashed system (again, - with debuginfo if not integrated) - - Percpu ranges used by kernel module - - Architecture-specific details - - Linux tasks populated into the GDB thread table - - If kernel module files and debuginfo cannot be located, backtraces - may be incomplete if the addresses used by the modules are crossed. - Percpu ranges will be properly loaded regardless. - - For arguments that accept paths to specify a base directory to be - used, the entire directory structure will be read and cached to - speed up subsequent searches. Still, reading large directory trees - is a time consuming operation and being exact as possible will - improve startup time. - - Args: - root (str or list of str, None for defaults): The roots of trees - to search for debuginfo files. When specified, all roots - will be searched using the following arguments (including - the absolute paths in the defaults if unspecified). - - Defaults to: / - - vmlinux_debuginfo (str or list of str, None for defaults): The - location of the separate debuginfo file corresponding - to the kernel being debugged. - - Defaults to: - - .debug - - ./vmlinux-.debug - - /usr/lib/debug/.build-id/xx/.debug - - /usr/lib/debug/.debug - - /usr/lib/debug/boot/.debug - - /usr/lib/debug/boot/vmlinux- - - module_path (string, None for defaults): The base directory to - be used to search for kernel modules (e.g. module.ko) to be - used to load symbols for the kernel being debugged. - - Defaults to: - - ./modules - - /lib/modules/ - - module_debuginfo_path (string, None for defaults): The base - directory to search for debuginfo matching the kernel - modules already loaded. - - Defaults to: - - ./modules.debug - - /usr/lib/debug/.build-id/xx/.debug - - /usr/lib/debug/lib/modules/ - Raises: - CrashKernelError: If the kernel debuginfo cannot be loaded. - InvalidArgumentError: If any of the arguments are not None, str, - or list of str - - """ self.findmap: Dict[str, Dict[Any, Any]] = dict() self.modules_order: Dict[str, Dict[str, str]] = dict() obj = gdb.objfiles()[0] @@ -322,6 +331,15 @@ def _get_minsymbol_as_string(self, name: str) -> str: return sym.address.cast(self.types.char_p_type).string() def extract_version(self) -> str: + """ + Returns the version from the loaded vmlinux + + If debuginfo is available, ``init_uts_ns`` will be used. + Otherwise, it will be extracted from the version banner. + + Returns: + str: The version text. + """ try: uts = get_symbol_value('init_uts_ns') return uts['name']['release'].string() @@ -333,6 +351,12 @@ def extract_version(self) -> str: return banner.split(' ')[2] def extract_vermagic(self) -> str: + """ + Returns the vermagic from the loaded vmlinux + + Returns: + str: The version text. + """ try: magic = get_symbol_value('vermagic') return magic.string() @@ -342,6 +366,16 @@ def extract_vermagic(self) -> str: return self._get_minsymbol_as_string('vermagic') def extract_modinfo_from_module(self, modpath: str) -> Dict[str, str]: + """ + Returns the modinfo from a module file + + Args: + modpath: The path to the module file. + + Returns: + dict: A dictionary containing the names and values of the modinfo + variables. + """ f = open(modpath, 'rb') elf = ELFFile(f) @@ -360,19 +394,29 @@ def extract_modinfo_from_module(self, modpath: str) -> Dict[str, str]: def fetch_registers(self, thread: gdb.InferiorThread, register: gdb.Register) -> None: + """ + Loads the value for a register (or registers if Register.regnum is + ``-1``) + + Meant to be used as a callback from gdb.Target. + + Args: + thread: The thread for which to load the registers + register: The register (or registers) to load. + """ if register is None: regnum = -1 else: regnum = register.regnum self.arch.fetch_register(thread, regnum) - def get_module_sections(self, module: gdb.Value) -> str: + def _get_module_sections(self, module: gdb.Value) -> str: out = [] for (name, addr) in for_each_module_section(module): out.append("-s {} {:#x}".format(name, addr)) return " ".join(out) - def check_module_version(self, modpath: str, module: gdb.Value) -> None: + def _check_module_version(self, modpath: str, module: gdb.Value) -> None: modinfo = self.extract_modinfo_from_module(modpath) vermagic = None @@ -380,7 +424,7 @@ def check_module_version(self, modpath: str, module: gdb.Value) -> None: vermagic = modinfo['vermagic'] if vermagic != self.vermagic: - raise ModVersionMismatchError(modpath, vermagic, self.vermagic) + raise _ModVersionMismatchError(modpath, vermagic, self.vermagic) mi_srcversion = None if 'srcversion' in modinfo: @@ -391,10 +435,26 @@ def check_module_version(self, modpath: str, module: gdb.Value) -> None: mod_srcversion = module['srcversion'].string() if mi_srcversion != mod_srcversion: - raise ModSourceVersionMismatchError(modpath, mi_srcversion, - mod_srcversion) + raise _ModSourceVersionMismatchError(modpath, mi_srcversion, + mod_srcversion) def load_modules(self, verbose: bool = False, debug: bool = False) -> None: + """ + Load modules (including debuginfo) into the crash session. + + This routine will attempt to locate modules and the corresponding + debuginfo files, if separate, using the parameters defined + when the CrashKernel object was initialized. + + Args: + verbose (default=False): enable verbose output + debug (default=False): enable even more verbose debugging output + + Raises: + CrashKernelError: An error was encountered while loading a module. + This does not include a failure to locate a module or + its debuginfo. + """ import crash.cache.syscache # pylint: disable=redefined-outer-name version = crash.cache.syscache.utsname.release print("Loading modules for {}".format(version), end='') @@ -409,13 +469,13 @@ def load_modules(self, verbose: bool = False, debug: bool = False) -> None: for path in self.module_path: try: - modpath = self.find_module_file(modfname, path) - except NoMatchingFileError: + modpath = self._find_module_file(modfname, path) + except _NoMatchingFileError: continue try: - self.check_module_version(modpath, module) - except ModinfoMismatchError as e: + self._check_module_version(modpath, module) + except _ModinfoMismatchError as e: if verbose: print(str(e)) continue @@ -435,7 +495,7 @@ def load_modules(self, verbose: bool = False, debug: bool = False) -> None: print(".", end='') sys.stdout.flush() - sections = self.get_module_sections(module) + sections = self._get_module_sections(module) percpu = int(module['percpu']) if percpu > 0: @@ -453,7 +513,7 @@ def load_modules(self, verbose: bool = False, debug: bool = False) -> None: objfile = gdb.lookup_objfile(modpath) if not objfile.has_symbols(): - self.load_module_debuginfo(objfile, modpath, verbose) + self._load_module_debuginfo(objfile, modpath, verbose) elif debug: print(" + has debug symbols") @@ -481,18 +541,17 @@ def load_modules(self, verbose: bool = False, debug: bool = False) -> None: del self.findmap self.findmap = {} - @staticmethod - def normalize_modname(mod: str) -> str: + def _normalize_modname(self, mod: str) -> str: return mod.replace('-', '_') - def cache_modules_order(self, path: str) -> None: + def _cache_modules_order(self, path: str) -> None: self.modules_order[path] = dict() order = os.path.join(path, "modules.order") try: f = open(order) for line in f.readlines(): modpath = line.rstrip() - modname = self.normalize_modname(os.path.basename(modpath)) + modname = self._normalize_modname(os.path.basename(modpath)) if modname[:7] == "kernel/": modname = modname[7:] modpath = os.path.join(path, modpath) @@ -502,16 +561,16 @@ def cache_modules_order(self, path: str) -> None: except OSError: pass - def get_module_path_from_modules_order(self, path: str, name: str) -> str: + def _get_module_path_from_modules_order(self, path: str, name: str) -> str: if not path in self.modules_order: - self.cache_modules_order(path) + self._cache_modules_order(path) try: return self.modules_order[path][name] except KeyError: - raise NoMatchingFileError(name) + raise _NoMatchingFileError(name) - def cache_file_tree(self, path: str, regex: Pattern[str] = None) -> None: + def _cache_file_tree(self, path: str, regex: Pattern[str] = None) -> None: if not path in self.findmap: self.findmap[path] = { 'filters' : [], @@ -534,7 +593,7 @@ def cache_file_tree(self, path: str, regex: Pattern[str] = None) -> None: # pylint: disable=unused-variable for root, dirs, files in os.walk(path): for filename in files: - modname = self.normalize_modname(filename) + modname = self._normalize_modname(filename) if regex and regex.match(modname) is None: continue @@ -542,36 +601,42 @@ def cache_file_tree(self, path: str, regex: Pattern[str] = None) -> None: modpath = os.path.join(root, filename) self.findmap[path]['files'][modname] = modpath - def get_file_path_from_tree_search(self, path: str, name: str, - regex: Pattern[str] = None) -> str: - self.cache_file_tree(path, regex) + def _get_file_path_from_tree_search(self, path: str, name: str, + regex: Pattern[str] = None) -> str: + self._cache_file_tree(path, regex) try: - modname = self.normalize_modname(name) + modname = self._normalize_modname(name) return self.findmap[path]['files'][modname] except KeyError: - raise NoMatchingFileError(name) + raise _NoMatchingFileError(name) - def find_module_file(self, name: str, path: str) -> str: + def _find_module_file(self, name: str, path: str) -> str: try: - return self.get_module_path_from_modules_order(path, name) - except NoMatchingFileError: + return self._get_module_path_from_modules_order(path, name) + except _NoMatchingFileError: pass regex = re.compile(fnmatch.translate("*.ko")) - return self.get_file_path_from_tree_search(path, name, regex) + return self._get_file_path_from_tree_search(path, name, regex) - def find_module_debuginfo_file(self, name: str, path: str) -> str: + def _find_module_debuginfo_file(self, name: str, path: str) -> str: regex = re.compile(fnmatch.translate("*.ko.debug")) - return self.get_file_path_from_tree_search(path, name, regex) + return self._get_file_path_from_tree_search(path, name, regex) @staticmethod def build_id_path(objfile: gdb.Objfile) -> str: + """ + Returns the relative path for debuginfo using the objfile's build-id. + + Args: + objfile: The objfile for which to return the path + """ build_id = objfile.build_id return ".build_id/{}/{}.debug".format(build_id[0:2], build_id[2:]) - def try_load_debuginfo(self, objfile: gdb.Objfile, - path: str, verbose: bool = False) -> bool: + def _try_load_debuginfo(self, objfile: gdb.Objfile, + path: str, verbose: bool = False) -> bool: if not os.path.exists(path): return False @@ -586,9 +651,9 @@ def try_load_debuginfo(self, objfile: gdb.Objfile, return False - def load_module_debuginfo(self, objfile: gdb.Objfile, - modpath: str = None, - verbose: bool = False) -> None: + def _load_module_debuginfo(self, objfile: gdb.Objfile, + modpath: str = None, + verbose: bool = False) -> None: if modpath is None: modpath = objfile.filename if ".gz" in modpath: @@ -599,18 +664,26 @@ def load_module_debuginfo(self, objfile: gdb.Objfile, for path in self.module_debuginfo_path: filepath = "{}/{}".format(path, build_id_path) - if self.try_load_debuginfo(objfile, filepath, verbose): + if self._try_load_debuginfo(objfile, filepath, verbose): break try: - filepath = self.find_module_debuginfo_file(filename, path) - except NoMatchingFileError: + filepath = self._find_module_debuginfo_file(filename, path) + except _NoMatchingFileError: continue - if self.try_load_debuginfo(objfile, filepath, verbose): + if self._try_load_debuginfo(objfile, filepath, verbose): break def setup_tasks(self) -> None: + """ + Populate GDB's thread list using the kernel's task lists + + This method will iterate over the kernel's task lists, create a + LinuxTask object, and create a gdb thread for each one. The + threads will be built so that the registers are ready to be + populated, which allows symbolic stack traces to be made available. + """ from crash.types.percpu import get_percpu_vars from crash.types.task import LinuxTask, for_each_all_tasks import crash.cache.tasks # pylint: disable=redefined-outer-name diff --git a/crash/session.py b/crash/session.py index d692789c620..e1bc109d163 100644 --- a/crash/session.py +++ b/crash/session.py @@ -14,10 +14,10 @@ class Session(object): any sub modules for autoinitializing commands and subsystems. Args: - kernel (CrashKernel): The kernel to debug during this session - verbose (bool, optional, default=False): Whether to enable verbose + kernel: The kernel to debug during this session + verbose (optional, default=False): Whether to enable verbose output - debug (bool, optional, default=False): Whether to enable verbose + debug (optional, default=False): Whether to enable verbose debugging output """ def __init__(self, kernel: CrashKernel, verbose: bool = False, From cbbd6e241c3b517d2be4d59bab0c9999566d02f5 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Jun 2019 10:54:30 -0400 Subject: [PATCH 214/367] crash: add mock elffile package for readthedocs.io building The sphinx worker on readthedocs doesn't have the elffile module, so we need to provide a mock one. Otherwise the crash.kernel module documentation isn't built properly. Signed-off-by: Jeff Mahoney --- doc-source/mock/elftools/__init__.py | 2 ++ doc-source/mock/elftools/elf/__init__.py | 2 ++ doc-source/mock/elftools/elf/elffile.py | 5 +++++ 3 files changed, 9 insertions(+) create mode 100644 doc-source/mock/elftools/__init__.py create mode 100644 doc-source/mock/elftools/elf/__init__.py create mode 100644 doc-source/mock/elftools/elf/elffile.py diff --git a/doc-source/mock/elftools/__init__.py b/doc-source/mock/elftools/__init__.py new file mode 100644 index 00000000000..9e72c13b9b3 --- /dev/null +++ b/doc-source/mock/elftools/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: diff --git a/doc-source/mock/elftools/elf/__init__.py b/doc-source/mock/elftools/elf/__init__.py new file mode 100644 index 00000000000..9e72c13b9b3 --- /dev/null +++ b/doc-source/mock/elftools/elf/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: diff --git a/doc-source/mock/elftools/elf/elffile.py b/doc-source/mock/elftools/elf/elffile.py new file mode 100644 index 00000000000..cd7d5df5f23 --- /dev/null +++ b/doc-source/mock/elftools/elf/elffile.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +class ELFFile(object): + pass From 299e20c6b2d9d3b26ce88f4e016b52794dfd96ad Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Jun 2019 11:59:53 -0400 Subject: [PATCH 215/367] testing: update documentation This commit updates the documentation for testing. Signed-off-by: Jeff Mahoney --- doc-source/mock/README | 8 +- doc-source/testing.rst | 176 +++++++++++++++++++++++++++++++++++------ 2 files changed, 159 insertions(+), 25 deletions(-) diff --git a/doc-source/mock/README b/doc-source/mock/README index d8832fe5048..d50b8007fcf 100644 --- a/doc-source/mock/README +++ b/doc-source/mock/README @@ -1,3 +1,7 @@ -This directory contains a mockup of the gdb and addrxlat modules. +This directory contains a mockup of modules that are used as dependencies +of this project. -Otherwise, sphinx fails to import the modules and can't build the docs. +They are used to build the documentation as sphinx fails to import the +dependencies offered by gdb and can't build the docs. Likewise, +readthedocs.org doesn't provide the dependencies (and shouldn't), so +the mock modules are used to allow the build process to succeed. diff --git a/doc-source/testing.rst b/doc-source/testing.rst index 743a56f9f40..47de9664c00 100644 --- a/doc-source/testing.rst +++ b/doc-source/testing.rst @@ -4,26 +4,113 @@ Testing Summary ------- -There are unit tests in the tests/ dir that are standalone and useful for -testing basic functionality. +There are unit tests in the ``tests`` directory that are standalone and +useful for testing basic functionality. -There are unit tests in the kernel-tests dir that require configuration, -kernel images, debuginfo, and vmcores to use. +There are unit tests in the ``kernel-tests`` directory that require +configuration, kernel images, debuginfo, and vmcores to use. If installed, there is support for running the `mypy `_ static checker and the `pylint `_ code checker. -`pylint` runs properly from within the gdb environment but `mypy` spawns -external interpreters and cannot run from within gdb. +Unit tests +---------- -Configuration +The standalone unit tests are in the tests directory and are prefixed +with ``test_``. Only tests that don't need to access a real vmcore should +go here. This is mostly basic sanity testing. + +To run the unit tests: + +.. code-block:: bash + + $ make unit-tests + +Adding new tests is as easy as creating a new python file using a filename +prefixed with ``test_``. It uses the +`unittest `_ framework and +the tests are run from within the ``gdb`` python environment. + +Other test cases can be used as examples. + +Type checking ------------- -The configuration for each kernel/vmcore to be tested goes in a .ini file -with the following format. All fields except kernel and vmcore are -optional, and defaults will be used. A kernel missing debuginfo cannot -be used for testing. Missing modules will mean module-specific tests -will be skipped. +Although python isn't static typed as in languages like +`C `_, Python 3.5 +added support for `typing `_ +to be used for static analysis. The crash-python project uses the typing +facility extensively and requires that new code be properly typed. The +typing can be verified using the `mypy `_ tool. + +If ``mypy`` is installed, the following will invoke it. + +.. code-block:: bash + + $ make static-check + +The tool does spawn external interpreters and so it currently does not +operate properly from within the ``gdb`` python environment. We've worked +around that shortcoming by ignoring missing imports. + +Code sanitization +----------------- + +One of the tools available to ensure that python code is free of certain +classes of bugs and that it conforms to typical conventions, is the +`pylint ` code checker. The crash-python project +requires that all new code pass the ``pylint`` checks or be properly +annotated as to why a particular addition doesn't pass. + +There are some checks that are an expression of the developer's preference +and those have been disabled: + +- ``missing-docstring`` +- ``too-few-public-methods`` +- ``invalid-name`` +- ``too-many-locals`` +- ``too-many-instance-attributes`` +- ``too-many-public-methods`` +- ``fixme`` +- ``no-self-use`` +- ``too-many-branches`` +- ``too-many-statements`` +- ``too-many-arguments`` +- ``too-many-boolean-expressions`` +- ``line-too-long`` +- ``duplicate-code`` + +If ``pylint`` is installed, the following will invoke it. + +.. code-block:: bash + + $ make lint + +The ``lint`` target does allow several options: + +- ``E=1`` -- Only report errors +- ``PYLINT_ARGS`` -- Override the default arguments. It will still operate + on the :py:mod:`crash` and :py:mod:`kdump` modules but no other default + arguments will be used. + +Testing with vmcores +-------------------- + +Basic unit tests are helpful for shaking out simple bugs but many failures +can occur in response to the data contained in real crash dumps. Symbols +may be missing or changed. Types may have members added or removed. Flags +may have changed semantic meaning or numeric value. A semantic debugger +must be continually updated as new kernel versions are released that change +interfaces. + +The best way to ensure that the debugger operates on a particular kernel +release is to use the live testing functionality provided by the ``live-tests`` +target. In order to provide a flexible environment for enabling those +tests, the configuration for each kernel to be tested is contained in +an individual `.ini` file. The ``kernel`` and ``vmcore`` fields are +mandatory. Any other fields are optional and defaults will be used if they +are unspecified. The fields and their defaults match those defined in +:py:class:`crash.kernel.CrashKernel`. .. code-block:: ini @@ -35,7 +122,9 @@ will be skipped. module_debuginfo_path=/path/to/module/debuginfo root=/root/for/tree/searches -The optional fields match those defined in `crash.kernel.CrashKernel`. +Like running the debugger normally, modules and debuginfo are required for +testing. Missing modules will prevent module-specific tests being run +and they will be skipped without failing the test. Example 1: @@ -58,17 +147,12 @@ Example 2: root=/var/cache/crash-setup/leap15/4.12.14-150.14-default In this example, the kernel and debuginfo packages are installed under -/var/cache/crash-setup/leap15/4.12.14-150.14-default and so we only +``/var/cache/crash-setup/leap15/4.12.14-150.14-default`` and so we only specify a root directory. -Running -------- - -The make target `test` will run all standalone tests. The absence of `pylint` -or `mypy` is not considered an error. - -To run the tests using live vmcores using the configuration detailed above, -the `INI_FILES` option should be used. +To invoke these test scenarios, the ``live-tests`` target can be used with +the ``INI_FILES`` option. The ``INI_FILES`` option is a quoted, +space-separated list of paths to the `.ini` files described above. Example: @@ -83,6 +167,52 @@ or $ make live-tests INI_FILES=kernel-test-configs/*.ini - Each configuration will execute independently from one another. +Similar to the standalone unit tests, adding a new test is as simple as +creating a new python file with a name prefixed with ``test_`` and +creating the testcases. + +Test everything +--------------- + +To run all standalone tests: + +.. code-block:: bash + + $ make test + +To run all tests, including testing real vmcores, specify the ``INI_FILES`` +option as described above. + +.. code-block:: bash + + $ make test INI_FILES=kernel-test-configs/*.ini + +The absence of ``pylint`` or ``mypy`` is not considered an error. + +Lastly, documentation is built using docstrings found in the code. Building +documentation requires the +`sphinx-apidoc `_ +package and the `sphinx `_ +package with the +`autodoc `_, +`coverage `_, +`intersphinx `_, +`viewcode _`, and +`napoleon `_ extensions. + +To test everything including documentation: + +.. code-block:: bash + + $ make full-test + + +The documentation is published on `readthedocs.org `_ +which doesn't provide a ``gdb`` environment or the required dependencies +(nor should it). In order to build the documentation properly, mock +interfaces to those packages are used. If you've added code that requires +extending the mock interfaces, they can be found in the ``doc-source/mock`` +directory of the source code +`repository `_. From 0fc10f015c1a5395a7bf0cc7e86ee2d12c830e23 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Jun 2019 15:55:20 -0400 Subject: [PATCH 216/367] man: switch to using rst for manpages Since the rest of the documentation is now in rst, keeping the manpage as the lone asciidoc document doesn't make any sense. Signed-off-by: Jeff Mahoney --- .gitignore | 2 +- Makefile | 70 ++++++++----------- asciidoc.conf | 36 ---------- doc-source/conf.py | 4 +- .../crash-python.rst | 33 +++++---- doc-source/user_guide.rst | 3 + manpage-base.xsl | 35 ---------- manpage-bold-literal.xsl | 17 ----- manpage-normal.xsl | 13 ---- setup.cfg | 7 -- 10 files changed, 59 insertions(+), 161 deletions(-) delete mode 100644 asciidoc.conf rename pycrash.asciidoc => doc-source/crash-python.rst (89%) delete mode 100644 manpage-base.xsl delete mode 100644 manpage-bold-literal.xsl delete mode 100644 manpage-normal.xsl delete mode 100644 setup.cfg diff --git a/.gitignore b/.gitignore index 79f4f801954..143d5d9fd33 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ *.pyc *~ -doc-source/crash*.rst +doc-source/crash.*.rst doc-source/modules.rst docs tests/test_imports.py diff --git a/Makefile b/Makefile index 258f2aa810a..b38dd6fdffd 100644 --- a/Makefile +++ b/Makefile @@ -1,54 +1,25 @@ -ASCIIDOC = /usr/bin/asciidoc -ASCIIDOC_EXTRA = -MANPAGE_XSL = manpage-normal.xsl -XMLTO = /usr/bin/xmlto -XMLTO_EXTRA = -m manpage-bold-literal.xsl GZIPCMD = /usr/bin/gzip INSTALL = /usr/bin/install -c -MAN1_TXT = pycrash.asciidoc -prefix ?= /usr -mandir ?= $(prefix)/share/man -man1dir = $(mandir)/man1 - -GZ_MAN1 = $(patsubst %.asciidoc,%.1.gz,$(MAN1_TXT)) - -%.1.gz : %.1 - $(GZIPCMD) -n -c $< > $@ - -%.1 : %.xml - $(RM) -f $@ && \ - $(XMLTO) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $< - -%.xml : %.asciidoc asciidoc.conf - rm -f $@+ $@ - $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \ - $(ASCIIDOC_EXTRA) -o $@+ $< - mv $@+ $@ - -man: $(GZ_MAN1) - PYLINT_ARGS ?= --rcfile tests/pylintrc -r n ifeq ($(E),1) PYLINT_ARGS += -E endif -all: man +all: clean build doc test -man-install: man - $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) - $(INSTALL) -m 644 $(GZ_MAN1) $(DESTDIR)$(man1dir) +doc-source-clean: + rm -f doc-source/crash/*.rst doc-source/kdump/*.rst -doc-clean: +doc-clean: doc-source-clean rm -rf docs - rm -f doc-source/crash/*.rst doc-source/kdump/*.rst -clean: doc-clean +clean: doc-clean man-clean make -C tests clean rm -rf build -build: crash tests +build: FORCE python3 setup.py -q build clean-build: clean build @@ -74,8 +45,29 @@ test: unit-tests static-check lint live-tests full-test: test doc -doc: build FORCE - rm -rf docs - rm -f doc-source/crash/.*rst doc-source/kdump/*.rst - python3 setup.py -q build_sphinx +doc: doc-source-clean man + sphinx-build -a -b html doc-source docs/html + +pycrash.1 : crash-python.1 + +%.1 : doc-source/%.rst doc-source/conf.py + sphinx-build -a -b man doc-source . + +%.1.gz : %.1 + $(GZIPCMD) -n -c $< > $@ + +GZ_MAN1 := pycrash.1.gz crash-python.1.gz +MAN1 := $(patsubst %.asciidoc,%.1.gz,$(MAN1_TXT)) + +man: $(GZ_MAN1) + +man-clean: FORCE + rm -f $(GZ_MAN1) + rm -f pycrash.1 crash-python.1 + +man-install: man + $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) + $(INSTALL) -m 644 $(GZ_MAN1) $(DESTDIR)$(man1dir) + + FORCE: diff --git a/asciidoc.conf b/asciidoc.conf deleted file mode 100644 index cd1f830df6c..00000000000 --- a/asciidoc.conf +++ /dev/null @@ -1,36 +0,0 @@ -[tags] -bracket-emphasis={1?[{1}]}<|> - -[quotes] -<|>=#bracket-emphasis - -[attributes] -asterisk=* -plus=+ -caret=^ -startsb=[ -endsb=] -backslash=\ -tilde=~ -apostrophe=' -backtick=` -litdd=-- - -ifdef::doctype-manpage[] -ifdef::backend-docbook[] -[header] -template::[header-declarations] - - -{mantitle} -{manvolnum} -Pycrash -0.1 -Pycrash Manual - - - {manname} - {manpurpose} - -endif::backend-docbook[] -endif::doctype-manpage[] diff --git a/doc-source/conf.py b/doc-source/conf.py index 815b4398459..fb58529e72f 100644 --- a/doc-source/conf.py +++ b/doc-source/conf.py @@ -197,7 +197,9 @@ def setup(app): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'crash-python', 'crash-python Documentation', + ('crash-python', 'crash-python', 'crash-python Documentation', + [author], 1), + ('crash-python', 'pycrash', 'crash-python Documentation', [author], 1) ] diff --git a/pycrash.asciidoc b/doc-source/crash-python.rst similarity index 89% rename from pycrash.asciidoc rename to doc-source/crash-python.rst index c4d1bced9f5..e2ab5018ded 100644 --- a/pycrash.asciidoc +++ b/doc-source/crash-python.rst @@ -49,7 +49,8 @@ OPTIONS Each of the following options may be specified multiple times. -*-r | --root *:: +``-r | --root `` + Use the specified directory as the root for all file searches. When using properly configured .build-id symbolic links, this is the best method to use as the debuginfo will be loaded automatically via @@ -57,35 +58,43 @@ Each of the following options may be specified multiple times. specified, the defaults documented above will be used relative to each root. -*-m | --modules *:: +``-m | --modules `` + Use the specified directory to search for modules -*-d | --modules-debuginfo *:: +``-d | --modules-debuginfo `` + Use the specified directory to search for module debuginfo -*-D | --vmlinux-debuginfo *:: +``-D | --vmlinux-debuginfo `` + Use the specified directory to search for vmlinux debuginfo -*-b | --build-dir *:: +``-b | --build-dir `` + Use the specified directory as the root for all file searches. This directory should be the root of a built kernel source tree. This is - shorthand for *-r -m . -d . -D .* and will override preceding + shorthand for ``-r -m . -d . -D .`` and will override preceding options. DEBUGGING OPTIONS: ------------------ -*-v | --verbose*:: +``-v | --verbose`` + Enable verbose output for debugging the debugger -*--debug*:: +``--debug`` + Enable even noisier output for debugging the debugger -*--gdb*:: +``--gdb`` + Run the embedded gdb underneath a separate gdb instance. This is useful for debugging issues in gdb that are seen while running crash-python. -*--valgrind*:: +``--valgrind`` + Run the embedded gdb underneath valgrind. This is useful for debugging memory leaks in gdb patches. @@ -101,5 +110,5 @@ Please refer to the GitHub repository at https://github.com/jeffmahoney/crash-py SEE ALSO -------- -`gdb`(1) -`libdkumpfile` +gdb(1) +libkdumpfile(7) diff --git a/doc-source/user_guide.rst b/doc-source/user_guide.rst index e4cf6ee4ec6..bd5723dc9f1 100644 --- a/doc-source/user_guide.rst +++ b/doc-source/user_guide.rst @@ -1,4 +1,7 @@ User Guide ========== +.. toctree:: + crash-python + To be written. diff --git a/manpage-base.xsl b/manpage-base.xsl deleted file mode 100644 index a264fa61609..00000000000 --- a/manpage-base.xsl +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - - - - - - - - sp - - - - - - - - br - - - diff --git a/manpage-bold-literal.xsl b/manpage-bold-literal.xsl deleted file mode 100644 index 608eb5df628..00000000000 --- a/manpage-bold-literal.xsl +++ /dev/null @@ -1,17 +0,0 @@ - - - - - - - fB - - - fR - - - diff --git a/manpage-normal.xsl b/manpage-normal.xsl deleted file mode 100644 index a48f5b11f3d..00000000000 --- a/manpage-normal.xsl +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - -\ -. - - diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 8ad6eff0a0d..00000000000 --- a/setup.cfg +++ /dev/null @@ -1,7 +0,0 @@ -[build_sphinx] -source-dir = doc-source -build-dir = docs -all_files = 1 - -[upload_sphinx] -upload-dir = docs/html From 0dcffd998c677816df5af62b7ffb9777bd0fbbc5 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Jun 2019 16:05:30 -0400 Subject: [PATCH 217/367] doc: convert INTERNALS.md to rst This is the last non-rst documentation. Signed-off-by: Jeff Mahoney --- INTERNALS.md => doc-source/gdb-internals.rst | 11 ++++++----- doc-source/index.rst | 2 ++ 2 files changed, 8 insertions(+), 5 deletions(-) rename INTERNALS.md => doc-source/gdb-internals.rst (74%) diff --git a/INTERNALS.md b/doc-source/gdb-internals.rst similarity index 74% rename from INTERNALS.md rename to doc-source/gdb-internals.rst index 871cac2b4b9..1ede0275cd7 100644 --- a/INTERNALS.md +++ b/doc-source/gdb-internals.rst @@ -1,7 +1,8 @@ +GDB +=== -# GDB - -## Python contexts within GDB +Python contexts within GDB +-------------------------- Each time gdb enters the Python interpreter it establishes a context. Part of the context includes what architecture gdb believes it is @@ -13,7 +14,7 @@ When gdb starts up on x86_64, it uses a gdbarch of i386 -- with 32-bit words and pointers. Only when we load an executable or target does it switch to i386:x86_64. -The effect of this is that any code that relys on type information *must* +The effect of this is that any code that relies on type information *must* be executed in a separate context from the one that loaded the executable -and/or taret. Otherwise, any built-in types that are pointers or `long` +and/or target. Otherwise, any built-in types that are pointers or ``long`` based will use the 32-bit sizes. diff --git a/doc-source/index.rst b/doc-source/index.rst index cb737c1f2dc..416324d68e6 100644 --- a/doc-source/index.rst +++ b/doc-source/index.rst @@ -33,6 +33,8 @@ Table of Contents kdump/modules crash/modules + gdb-internals + Indices and tables ------------------ From 76830d5463e890cb1e83e7d7a0a23e7b80e72ae7 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Jun 2019 16:08:53 -0400 Subject: [PATCH 218/367] test: remove dead test-all.sh This script does nothing now. Remove it. Signed-off-by: Jeff Mahoney --- test-all.sh | 3 --- 1 file changed, 3 deletions(-) delete mode 100755 test-all.sh diff --git a/test-all.sh b/test-all.sh deleted file mode 100755 index 31bdda8af9e..00000000000 --- a/test-all.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -make test INI_FILES="$@" From 369ffa5480bd8d19639ee85638cb4dcdf303958c Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Jun 2019 23:44:57 -0400 Subject: [PATCH 219/367] docs: rework help (again) to work as documentation too This commit reworks how we handle the help text (again) so that we also generate html documentation from it. The help text is now rST formatted text in the module docstring for each command. An individual parser for each command is not required anymore. Signed-off-by: Jeff Mahoney --- Makefile | 15 +- crash.sh | 3 + crash/commands/__init__.py | 168 ++++++- crash/commands/btrfs.py | 25 +- crash/commands/dmesg.py | 265 ++++++----- crash/commands/help.py | 37 +- crash/commands/kmem.py | 33 +- crash/commands/lsmod.py | 51 ++- crash/commands/mount.py | 22 +- crash/commands/ps.py | 787 +++++++++++++++++--------------- crash/commands/syscmd.py | 74 +-- crash/commands/task.py | 38 +- crash/commands/vtop.py | 247 +++++----- crash/commands/xfs.py | 35 +- doc-source/conf.py | 37 +- doc-source/mock/gdb/__init__.py | 7 +- doc-source/user_guide.rst | 4 +- 17 files changed, 1071 insertions(+), 777 deletions(-) diff --git a/Makefile b/Makefile index b38dd6fdffd..c1f1e6bf9ac 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,7 @@ all: clean build doc test doc-source-clean: rm -f doc-source/crash/*.rst doc-source/kdump/*.rst + rm -f doc-source/commands/*.rst doc-clean: doc-source-clean rm -rf docs @@ -19,7 +20,7 @@ clean: doc-clean man-clean make -C tests clean rm -rf build -build: FORCE +build: doc-help FORCE python3 setup.py -q build clean-build: clean build @@ -45,8 +46,6 @@ test: unit-tests static-check lint live-tests full-test: test doc -doc: doc-source-clean man - sphinx-build -a -b html doc-source docs/html pycrash.1 : crash-python.1 @@ -69,5 +68,15 @@ man-install: man $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) $(INSTALL) -m 644 $(GZ_MAN1) $(DESTDIR)$(man1dir) +doc-commands: FORCE + sh doc-source/gen-command-docs.sh + +doc-html: doc-source-clean doc-commands + sphinx-build -a -b html doc-source docs/html + +doc-help: doc-source-clean doc-commands + sphinx-build -a -b text doc-source docs/text + +doc: doc-source-clean doc-html doc-help man FORCE FORCE: diff --git a/crash.sh b/crash.sh index d49cae7e7f9..79b01465442 100755 --- a/crash.sh +++ b/crash.sh @@ -170,6 +170,9 @@ if [ -e "$DIR/setup.py" ]; then python3 setup.py build > /dev/null echo "python sys.path.insert(0, '$DIR/build/lib')" >> $GDBINIT popd > /dev/null + export CRASH_PYTHON_HELP="$DIR/docs/text" +else + export CRASH_PYTHON_HELP="/usr/share/doc/packages/crash-python" fi ZKERNEL="$1" diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 41cf5a7de2c..6bf304c02a4 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -1,7 +1,58 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +The crash.commands module is the interface for implementing commands +in crash-python. -from typing import Dict, Any +The only mandatory part of implementing a command is to derive a class +from :class:`.Command`, implement the :meth:`.Command.execute` method, +and instantiate it. If the command should have multiple aliases, +accept a name in the constructor and instantiate it multiple times. + +Optional extensions: + +- Adding a parser (derived from :class:`.ArgumentParser`) that parses + arguments. If not provided, an empty parser will be used. +- Adding a module docstring to be used as help text. If not provided, + the argparse generic help text will be used instead. + +The module docstring will be placed automatically in the command reference +section of the user guide and will also be converted into plaintext help +for use in command execution. It should be in `reStructuredText +`_ +format. + +Example: + +:: + + \"\"\" + NAME + ---- + + helloworld + + SYNOPSYS + -------- + + ``helloworld`` -- a command that prints hello world + \"\"\" + + import crash.commands + + class HelloWorld(crash.commands.Command): + def __init__(self) -> None: + parser = crash.commands.ArgumentParser(prog='helloworld') + + super().__init__('helloworld', parser) + + def execute(self, args: argparse.Namespace) -> None: + print("hello world") + + HelloWorld() +""" + +from typing import Dict, Any, Optional, Tuple import os import glob @@ -13,56 +64,153 @@ import gdb class CommandError(RuntimeError): + """An error occured while executing this command""" pass class CommandLineError(RuntimeError): + """An error occured while handling the command line for this command""" pass class ArgumentParser(argparse.ArgumentParser): + """ + A simple extension to :class:`argparse.ArgumentParser` that: + + - Requires a command name be set + - Loads help text automatically from files + - Handles errors by raising :obj:`.CommandLineError` + + """ + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + if not self.prog: + raise CommandError("Cannot build command with no name") + def error(self, message: str) -> Any: + """ + An error callback that raises the :obj:`CommandLineError` exception. + """ raise CommandLineError(message) def format_help(self) -> str: - if self.__doc__ is None: - raise NotImplementedError("This command does not have help text") - return self.__doc__.strip() + "\n" + """ + A help formatter that loads the parsed rST documentation from disk + or returns the generic help text otherwise. + """ + try: + path = os.path.join(os.environ['CRASH_PYTHON_HELP'], 'commands', + f"{self.prog}.txt") + f = open(path) + helptext = f.read() + f.close() + except (KeyError, FileNotFoundError): + helptext = "Could not locate help file.\n" + helptext += "Generic help text follows.\n\n" + helptext += super().format_help() + + return helptext class Command(gdb.Command): - commands: Dict[str, gdb.Command] = dict() + """ + The Command class is the starting point for implementing a new command. + + The :meth:`.Command.execute` method will be invoked when the user + invokes the command. + + Once the constructor returns, the command will be registered with + ``gdb`` and the command will be available for use. + + Args: + name: The name of the command. The string ``py`` will be prefixed + to it. + parser: The parser to use to handle the arguments. It must be derived + from the :class:`.ArgumentParser` class. + + Raises: + ArgumentTypeError: The parser is not derived from + :class:`.ArgumentParser`. + + """ + _commands: Dict[str, gdb.Command] = dict() def __init__(self, name: str, parser: ArgumentParser = None) -> None: + """ + """ self.name = "py" + name if parser is None: parser = ArgumentParser(prog=self.name) elif not isinstance(parser, ArgumentParser): raise ArgumentTypeError('parser', parser, ArgumentParser) - self.parser = parser - self.commands[self.name] = self + self._parser = parser + self._commands[self.name] = self gdb.Command.__init__(self, self.name, gdb.COMMAND_USER) def format_help(self) -> str: - return self.parser.format_help() + """ + Used by the :mod:`.help` module, it delegates the help formatting + to the parser object. + """ + return self._parser.format_help() # pylint: disable=unused-argument def invoke_uncaught(self, argstr: str, from_tty: bool = False) -> None: + """ + Invokes the command directly and does not catch exceptions. + + This is used mainly for unit testing to ensure proper exceptions + are raised. + + Unless you are doing something special, see :meth:`execute` instead. + + Args: + argstr: The command arguments + from_tty (default=False): Whether the command was invoked from a + tty. + """ argv = gdb.string_to_argv(argstr) - args = self.parser.parse_args(argv) + args = self._parser.parse_args(argv) self.execute(args) def invoke(self, argstr: str, from_tty: bool = False) -> None: + """ + Invokes the command directly and translates exceptions. + + This method is called by ``gdb`` to implement the command. + + It translates the :class:`.CommandError`, :class:`.CommandLineError`, + and :class:`.DelayedAttributeError` exceptions into readable + error messages. + + Unless you are doing something special, see :meth:`execute` instead. + + Args: + argstr: The command arguments + from_tty (default=False): Whether the command was invoked from a + tty. + """ try: self.invoke_uncaught(argstr, from_tty) except CommandError as e: print(f"{self.name}: {str(e)}") except CommandLineError as e: print(f"{self.name}: {str(e)}") - self.parser.print_usage() + self._parser.print_usage() except DelayedAttributeError as e: print(f"{self.name}: command unavailable, {str(e)}") except (SystemExit, KeyboardInterrupt): pass def execute(self, args: argparse.Namespace) -> None: + """ + This method implements the command functionality. + + Each command has a derived class associated with it that, + minimally, implements this method. + + Args: + args: The arguments to this command already parsed by the + commmand's parser. + """ raise NotImplementedError("Command should not be called directly") def discover() -> None: diff --git a/crash/commands/btrfs.py b/crash/commands/btrfs.py index 1f167346050..e2ec5f8d5e9 100644 --- a/crash/commands/btrfs.py +++ b/crash/commands/btrfs.py @@ -1,5 +1,20 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- + +Display Btrfs internal data structures + +:: + + btrfs + +COMMANDS +-------- + + ``btrfs list [-m]`` -- list all btrfs file systems (-m to show metadata uuid) +""" import argparse from crash.commands import Command, ArgumentParser @@ -8,16 +23,6 @@ from crash.subsystem.filesystem.btrfs import btrfs_fsid, btrfs_metadata_uuid class _Parser(ArgumentParser): - """ - NAME - btrfs - display Btrfs internal data structures - - SYNOPSIS - btrfs - - COMMANDS - btrfs list [-m] - list all btrfs file systems (-m to show metadata uuid) - """ def format_usage(self) -> str: return "btrfs [args...]\n" diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 7a000a26a5d..c92e7194b5a 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -1,5 +1,143 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- + +Display system message buffer + +:: + + log [-tdm] + dmesg [-tdm] + +DESCRIPTION +----------- + +This command dumps the kernel ``log_buf`` contents in chronological order. +The command supports the older log_buf formats, which may or may not contain a +timestamp inserted prior to each message, as well as the newer variable-length +record format, where the timestamp is contained in each log entry's header. + + + -t Display the message text without the timestamp. + -d Display the dictionary of key/value pair properties that are + optionally appended to a message by the kernel's dev_printk() + function; only applicable to the variable-length record format. + -m Display the message log level in brackets preceding each message. + For the variable-length record format, the level will be displayed + in hexadecimal, and depending upon the kernel version, also contains + the facility or flags bits. + +EXAMPLES +-------- + +Dump the kernel message buffer: + +:: + + py-crash> log + Linux version 2.2.5-15smp (root@mclinux1) (gcc version egcs-2.91.66 19990 + 314/Linux (egcs-1.1.2 release)) #1 SMP Thu Aug 26 11:04:37 EDT 1999 + Intel MultiProcessor Specification v1.4 + Virtual Wire compatibility mode. + OEM ID: DELL Product ID: WS 410 APIC at: 0xFEE00000 + Processor #0 Pentium(tm) Pro APIC version 17 + Processor #1 Pentium(tm) Pro APIC version 17 + I/O APIC #2 Version 17 at 0xFEC00000. + Processors: 2 + mapped APIC to ffffe000 (fee00000) + mapped IOAPIC to ffffd000 (fec00000) + Detected 447696347 Hz processor. + Console: colour VGA+ 80x25 + Calibrating delay loop... 445.64 BogoMIPS + ... + 8K byte-wide RAM 5:3 Rx:Tx split, autoselect/Autonegotiate interface. + MII transceiver found at address 24, status 782d. + Enabling bus-master transmits and whole-frame receives. + Installing knfsd (copyright (C) 1996 okir@monad.swb.de). + nfsd_init: initialized fhcache, entries=256 + ... + +Do the same thing, but also show the log level preceding each message: + +:: + + py-crash> log -m + <4>Linux version 2.2.5-15smp (root@mclinux1) (gcc version egcs-2.91.66 19990 + 314/Linux (egcs-1.1.2 release)) #1 SMP Thu Aug 26 11:04:37 EDT 1999 + <4>Intel MultiProcessor Specification v1.4 + <4> Virtual Wire compatibility mode. + <4>OEM ID: DELL Product ID: WS 410 APIC at: 0xFEE00000 + <4>Processor #0 Pentium(tm) Pro APIC version 17 + <4>Processor #1 Pentium(tm) Pro APIC version 17 + <4>I/O APIC #2 Version 17 at 0xFEC00000. + <4>Processors: 2 + <4>mapped APIC to ffffe000 (fee00000) + <4>mapped IOAPIC to ffffd000 (fec00000) + <4>Detected 447696347 Hz processor. + <4>Console: colour VGA+ 80x25 + <4>Calibrating delay loop... 445.64 BogoMIPS + ... + <6> 8K byte-wide RAM 5:3 Rx:Tx split, autoselect/Autonegotiate interface. + <6> MII transceiver found at address 24, status 782d. + <6> Enabling bus-master transmits and whole-frame receives. + <6>Installing knfsd (copyright (C) 1996 okir@monad.swb.de). + <7>nfsd_init: initialized fhcache, entries=256 + ... + +On a system with the variable-length record format, and whose log_buf has been +filled and wrapped around, display the log with timestamp data: + +:: + + py-crash> log + [ 0.467730] pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 + [ 0.467749] pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 + [ 0.467769] pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 + [ 0.467788] pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 + [ 0.467809] pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 + [ 0.467828] pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 + ... + +Display the same message text as above, without the timestamp data: + +:: + + py-crash> log -t + pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 + pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 + pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 + pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 + pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 + pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 + ... + +Display the same message text as above, with appended dictionary data: + +:: + + py-crash> log -td + pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:02.0 + pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:02.1 + pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:02.4 + pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:02.5 + pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:03.0 + pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 + SUBSYSTEM=pci + DEVICE=+pci:0000:ff:03.1 + ... +""" from typing import Dict, Iterable, Any @@ -22,136 +160,11 @@ class LogTypeException(Exception): class LogInvalidOption(Exception): pass -class _Parser(ArgumentParser): - """ - NAME - log - dump system message buffer - - SYNOPSIS - log [-tdm] - - DESCRIPTION - This command dumps the kernel log_buf contents in chronological order. The - command supports the older log_buf formats, which may or may not contain a - timestamp inserted prior to each message, as well as the newer variable-length - record format, where the timestamp is contained in each log entry's header. - - -t Display the message text without the timestamp. - -d Display the dictionary of key/value pair properties that are optionally - appended to a message by the kernel's dev_printk() function; only - applicable to the variable-length record format. - -m Display the message log level in brackets preceding each message. For - the variable-length record format, the level will be displayed in - hexadecimal, and depending upon the kernel version, also contains the - facility or flags bits. - - - EXAMPLES - Dump the kernel message buffer: - - crash> log - Linux version 2.2.5-15smp (root@mclinux1) (gcc version egcs-2.91.66 19990 - 314/Linux (egcs-1.1.2 release)) #1 SMP Thu Aug 26 11:04:37 EDT 1999 - Intel MultiProcessor Specification v1.4 - Virtual Wire compatibility mode. - OEM ID: DELL Product ID: WS 410 APIC at: 0xFEE00000 - Processor #0 Pentium(tm) Pro APIC version 17 - Processor #1 Pentium(tm) Pro APIC version 17 - I/O APIC #2 Version 17 at 0xFEC00000. - Processors: 2 - mapped APIC to ffffe000 (fee00000) - mapped IOAPIC to ffffd000 (fec00000) - Detected 447696347 Hz processor. - Console: colour VGA+ 80x25 - Calibrating delay loop... 445.64 BogoMIPS - ... - 8K byte-wide RAM 5:3 Rx:Tx split, autoselect/Autonegotiate interface. - MII transceiver found at address 24, status 782d. - Enabling bus-master transmits and whole-frame receives. - Installing knfsd (copyright (C) 1996 okir@monad.swb.de). - nfsd_init: initialized fhcache, entries=256 - ... - - Do the same thing, but also show the log level preceding each message: - - crash> log -m - <4>Linux version 2.2.5-15smp (root@mclinux1) (gcc version egcs-2.91.66 19990 - 314/Linux (egcs-1.1.2 release)) #1 SMP Thu Aug 26 11:04:37 EDT 1999 - <4>Intel MultiProcessor Specification v1.4 - <4> Virtual Wire compatibility mode. - <4>OEM ID: DELL Product ID: WS 410 APIC at: 0xFEE00000 - <4>Processor #0 Pentium(tm) Pro APIC version 17 - <4>Processor #1 Pentium(tm) Pro APIC version 17 - <4>I/O APIC #2 Version 17 at 0xFEC00000. - <4>Processors: 2 - <4>mapped APIC to ffffe000 (fee00000) - <4>mapped IOAPIC to ffffd000 (fec00000) - <4>Detected 447696347 Hz processor. - <4>Console: colour VGA+ 80x25 - <4>Calibrating delay loop... 445.64 BogoMIPS - ... - <6> 8K byte-wide RAM 5:3 Rx:Tx split, autoselect/Autonegotiate interface. - <6> MII transceiver found at address 24, status 782d. - <6> Enabling bus-master transmits and whole-frame receives. - <6>Installing knfsd (copyright (C) 1996 okir@monad.swb.de). - <7>nfsd_init: initialized fhcache, entries=256 - ... - - On a system with the variable-length record format, and whose log_buf has been - filled and wrapped around, display the log with timestamp data: - - crash> log - [ 0.467730] pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 - [ 0.467749] pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 - [ 0.467769] pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 - [ 0.467788] pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 - [ 0.467809] pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 - [ 0.467828] pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 - ... - - Display the same message text as above, without the timestamp data: - - crash> log -t - pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 - pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 - pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 - pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 - pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 - pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 - ... - - Display the same message text as above, with appended dictionary data: - - crash> log -td - pci 0000:ff:02.0: [8086:2c10] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:02.0 - pci 0000:ff:02.1: [8086:2c11] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:02.1 - pci 0000:ff:02.4: [8086:2c14] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:02.4 - pci 0000:ff:02.5: [8086:2c15] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:02.5 - pci 0000:ff:03.0: [8086:2c18] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:03.0 - pci 0000:ff:03.1: [8086:2c19] type 00 class 0x060000 - SUBSYSTEM=pci - DEVICE=+pci:0000:ff:03.1 - ... - """ - - def format_usage(self) -> str: - return 'log [-tdm]\n' - class LogCommand(Command): """dump system message buffer""" def __init__(self, name: str) -> None: - parser = _Parser(prog=name) + parser = ArgumentParser(prog=name) parser.add_argument('-t', action='store_true', default=False) parser.add_argument('-d', action='store_true', default=False) diff --git a/crash/commands/help.py b/crash/commands/help.py index dd9c3a8c5e8..fe0d311461d 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -1,44 +1,47 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- -import argparse +Display help for crash commands -from crash.commands import Command, CommandError, ArgumentParser +:: + + help [command] -class _Parser(ArgumentParser): - """ - NAME - help - display help for crash commands +DESCRIPTION +----------- - SYNOPSIS - help [command] +This command displays help text for crash commands. When used alone, +it provides a list of commands. When an argument is specified, the help +text for that command will be printed. +""" - DESCRIPTION - This command displays help text for crash commands. When used alone, - it provides a list of commands. When an argument is specified, the help - text for that command will be printed. - """ +import argparse + +from crash.commands import Command, CommandError, ArgumentParser class HelpCommand(Command): """ this command""" def __init__(self) -> None: - parser = _Parser(prog="help") + parser = ArgumentParser(prog="help") parser.add_argument('args', nargs=argparse.REMAINDER) super().__init__('help', parser) def execute(self, args: argparse.Namespace) -> None: if not args.args: print("Available commands:") - for cmd in sorted(self.commands): - summary = self.commands[cmd].__doc__.strip() + for cmd in sorted(self._commands): + summary = self._commands[cmd].__doc__.strip() if not summary: summary = "no help text provided" print("{:<15} - {}".format(cmd, summary)) else: for cmd in args.args: try: - text = self.commands[cmd].format_help().strip() + text = self._commands[cmd].format_help().strip() except KeyError: raise CommandError("No such command `{}'".format(cmd)) if text is None: diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 36259a1e0e0..a1512415c89 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -1,5 +1,23 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- + +Kernel memory inspection + +:: + + kmem addr - try to find addr within kmem caches + kmem -s [slabname] - check consistency of single or all kmem cache + kmem -z - report zones + kmem -V - report vmstats + +DESCRIPTION +----------- + +This command currently offers very basic kmem cache query and checking. +""" from typing import List @@ -14,21 +32,6 @@ from crash.util import get_symbol_value from crash.exceptions import MissingSymbolError -class _Parser(ArgumentParser): - """ - NAME - kmem - kernel memory inspection - - SYNOPSIS - kmem addr - try to find addr within kmem caches - kmem -s [slabname] - check consistency of single or all kmem cache - kmem -z - report zones - kmem -V - report vmstats - - DESCRIPTION - This command currently offers very basic kmem cache query and checking. - """ - class KmemCommand(Command): """ kernel memory inspection""" diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index 5eeca9d6ad8..d3ec5e24150 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -1,5 +1,31 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- + +Display module information + +:: + + lsmod [-p [n]] [name-wildcard] + +DESCRIPTION +----------- + +This command displays information about loaded modules. + +The default output will show all loaded modules, the core address, +its size, and any users of the module. By specifying [name-wildcard], +the results can be filtered to modules matching the wildcard. + +The following options are available: + +-p display the percpu base for the module and the size of its region +-p CPU display the percpu base for the module and the size of its region + for the specified CPU number + +""" import re import fnmatch @@ -14,36 +40,13 @@ import gdb -class _Parser(ArgumentParser): - """ - NAME - lsmod - display module information - - SYNOPSIS - lsmod [-p [n]] [name-wildcard] - - DESCRIPTION - This command displays information about loaded modules. - - The default output will show all loaded modules, the core address, - its size, and any users of the module. By specifying [name-wildcard], - the results can be filtered to modules matching the wildcard. - - The following options are available: - -p display the percpu base for the module and the size of its region - -p CPU# display the percpu base for the module and the size of its region - for the specified CPU number - """ - def format_usage(self) -> str: - return "lsmod [-p] [regex] ...\n" - types = Types(['struct module_use']) class ModuleCommand(Command): """display module information""" def __init__(self) -> None: - parser = _Parser(prog="lsmod") + parser = ArgumentParser(prog="lsmod") parser.add_argument('-p', nargs='?', const=-1, default=None, type=int) parser.add_argument('args', nargs=argparse.REMAINDER) diff --git a/crash/commands/mount.py b/crash/commands/mount.py index 3b41f587847..c664095c6c8 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -1,5 +1,15 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- + +Display mounted file systems + + -f display common mount flags + -v display superblock and vfsmount addresses + -d display device obtained from super_block +""" import argparse @@ -11,21 +21,11 @@ import gdb -class _Parser(ArgumentParser): - """ - NAME - mount - display mounted file systems - - -f display common mount flags - -v display superblock and vfsmount addresses - -d display device obtained from super_block - """ - class MountCommand(Command): """display mounted file systems""" def __init__(self, name: str) -> None: - parser = _Parser(prog=name) + parser = ArgumentParser(prog=name) parser.add_argument('-v', action='store_true', default=False) parser.add_argument('-f', action='store_true', default=False) diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 1ba23820aa9..83db0426bf0 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -1,5 +1,422 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- + +Display process status information + +:: + + ps [-k|-u|-G][-s|-n][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ...`` + +DESCRIPTION +----------- + +This command displays process status for selected, or all, processes +in the system. If no arguments are entered, the process data is +is displayed for all processes. Specific processes may be selected +by using the following identifier formats: + + + ``pid`` -- a process PID + + ``taskp`` -- a hexadecimal ``struct task_struct`` pointer + + ``command`` -- a command name + + +If a command name is made up of letters that are all numerical values, +precede the name string with a ". If the command string is +enclosed within "'" characters, then the encompassed string must be a +POSIX extended regular expression that will be used to match task names. + +The process list may be further restricted by the following options: + + ``-k`` restrict the output to only kernel threads. + + ``-u`` restrict the output to only user tasks. + + ``-G`` display only the thread group leader in a thread group. + +The process identifier types may be mixed. For each task, the following +items are displayed: + + 1. the process PID. + 2. the parent process PID. + 3. the CPU number that the task ran on last. + 4. the task_struct address or the kernel stack pointer of the process. + (see -s option below) + 5. the task state (RU, IN, UN, ZO, ST, TR, DE, SW). + 6. the percentage of physical memory being used by this task. + 7. the virtual address size of this task in kilobytes. + 8. the resident set size of this task in kilobytes. + 9. the command name. + +The default output shows the task_struct address of each process under a +column titled "``TASK``". This can be changed to show the kernel stack +pointer under a column titled "``KSTACKP``". + + ``-s`` replace the ``TASK`` column with the ``KSTACKP`` column. + +On SMP machines, the active task on each CPU will be highlighted by an +angle bracket (">") preceding its information. + +Alternatively, information regarding parent-child relationships, +per-task time usage data, argument/environment data, thread groups, +or resource limits may be displayed: + + ``-p`` display the parental hierarchy of selected, or all, tasks. + + ``-c`` display the children of selected, or all, tasks. + + ``-t`` display the task run time, start time, and cumulative user and system times. + + ``-l`` display the task last_run or timestamp value, whichever applies, of selected, or all, tasks; the list is sorted with the most recently-run task (largest last_run/timestamp) shown first, followed by the task's current state. + + ``-a`` display the command line arguments and environment strings of selected, or all, user-mode tasks. + + ``-g`` display tasks by thread group, of selected, or all, tasks. + + ``-r`` display resource limits (rlimits) of selected, or all, tasks. + + ``-n`` display gdb thread number + + +EXAMPLES +-------- + +Show the process status of all current tasks: + +:: + + py-crash> ps + PID PPID CPU TASK ST %MEM VSZ RSS COMM + > 0 0 3 c024c000 RU 0.0 0 0 [swapper] + > 0 0 0 c0dce000 RU 0.0 0 0 [swapper] + 0 0 1 c0fa8000 RU 0.0 0 0 [swapper] + > 0 0 2 c009a000 RU 0.0 0 0 [swapper] + 1 0 1 c0098000 IN 0.0 1096 476 init + 2 1 1 c0090000 IN 0.0 0 0 [kflushd] + 3 1 1 c000e000 IN 0.0 0 0 [kpiod] + 4 1 3 c000c000 IN 0.0 0 0 [kswapd] + 5 1 1 c0008000 IN 0.0 0 0 [mdrecoveryd] + 253 1 2 fbc4c000 IN 0.0 1088 376 portmap + 268 1 2 fbc82000 IN 0.1 1232 504 ypbind + 274 268 2 fa984000 IN 0.1 1260 556 ypbind + 321 1 1 fabf6000 IN 0.1 1264 608 syslogd + 332 1 1 fa9be000 RU 0.1 1364 736 klogd + 346 1 2 fae88000 IN 0.0 1112 472 atd + 360 1 2 faeb2000 IN 0.1 1284 592 crond + 378 1 2 fafd6000 IN 0.1 1236 560 inetd + 392 1 0 fb710000 IN 0.1 2264 1468 named + 406 1 3 fb768000 IN 0.1 1284 560 lpd + 423 1 1 fb8ac000 IN 0.1 1128 528 rpc.statd + 434 1 2 fb75a000 IN 0.0 1072 376 rpc.rquotad + 445 1 2 fb4a4000 IN 0.0 1132 456 rpc.mountd + 460 1 1 fa938000 IN 0.0 0 0 [nfsd] + 461 1 1 faa86000 IN 0.0 0 0 [nfsd] + 462 1 0 fac48000 IN 0.0 0 0 [nfsd] + 463 1 0 fb4ca000 IN 0.0 0 0 [nfsd] + 464 1 0 fb4c8000 IN 0.0 0 0 [nfsd] + 465 1 2 fba6e000 IN 0.0 0 0 [nfsd] + 466 1 1 fba6c000 IN 0.0 0 0 [nfsd] + 467 1 2 fac04000 IN 0.0 0 0 [nfsd] + 468 461 2 fa93a000 IN 0.0 0 0 [lockd] + 469 468 2 fa93e000 IN 0.0 0 0 [rpciod] + 486 1 0 fab54000 IN 0.1 1596 880 amd + 523 1 2 fa84e000 IN 0.1 1884 1128 sendmail + 538 1 0 fa82c000 IN 0.0 1112 416 gpm + 552 1 3 fa70a000 IN 0.1 2384 1220 httpd + 556 552 3 fa776000 IN 0.1 2572 1352 httpd + 557 552 2 faba4000 IN 0.1 2572 1352 httpd + 558 552 1 fa802000 IN 0.1 2572 1352 httpd + 559 552 3 fa6ee000 IN 0.1 2572 1352 httpd + 560 552 3 fa700000 IN 0.1 2572 1352 httpd + 561 552 0 fa6f0000 IN 0.1 2572 1352 httpd + 562 552 3 fa6ea000 IN 0.1 2572 1352 httpd + 563 552 0 fa67c000 IN 0.1 2572 1352 httpd + 564 552 3 fa674000 IN 0.1 2572 1352 httpd + 565 552 3 fa66a000 IN 0.1 2572 1352 httpd + 582 1 2 fa402000 IN 0.2 2968 1916 xfs + 633 1 2 fa1ec000 IN 0.2 5512 2248 innd + 636 1 3 fa088000 IN 0.1 2536 804 actived + 676 1 0 fa840000 IN 0.0 1060 384 mingetty + 677 1 1 fa590000 IN 0.0 1060 384 mingetty + 678 1 2 fa3b8000 IN 0.0 1060 384 mingetty + 679 1 0 fa5b8000 IN 0.0 1060 384 mingetty + 680 1 1 fa3a4000 IN 0.0 1060 384 mingetty + 681 1 2 fa30a000 IN 0.0 1060 384 mingetty + 683 1 3 fa5d8000 IN 0.0 1052 280 update + 686 378 1 fa3aa000 IN 0.1 2320 1136 in.rlogind + 687 686 2 f9e52000 IN 0.1 2136 1000 login + 688 687 0 f9dec000 IN 0.1 1732 976 bash + > 700 688 1 f9d62000 RU 0.0 1048 256 gen12 + +Display the parental hierarchy of the "crash" process on a live system: + +:: + + py-crash> ps -p 4249 + PID: 0 TASK: c0252000 CPU: 0 COMMAND: "swapper" + PID: 1 TASK: c009a000 CPU: 1 COMMAND: "init" + PID: 632 TASK: c73b6000 CPU: 1 COMMAND: "prefdm" + PID: 637 TASK: c5a4a000 CPU: 1 COMMAND: "prefdm" + PID: 649 TASK: c179a000 CPU: 0 COMMAND: "kwm" + PID: 683 TASK: c1164000 CPU: 0 COMMAND: "kfm" + PID: 1186 TASK: c165a000 CPU: 0 COMMAND: "xterm" + PID: 1188 TASK: c705e000 CPU: 1 COMMAND: "bash" + PID: 4249 TASK: c6b9a000 CPU: 0 COMMAND: "crash" + +Display all children of the "kwm" window manager: + +:: + + py-crash> ps -c kwm + PID: 649 TASK: c179a000 CPU: 0 COMMAND: "kwm" + PID: 682 TASK: c2d58000 CPU: 1 COMMAND: "kwmsound" + PID: 683 TASK: c1164000 CPU: 1 COMMAND: "kfm" + PID: 685 TASK: c053c000 CPU: 0 COMMAND: "krootwm" + PID: 686 TASK: c13fa000 CPU: 0 COMMAND: "kpanel" + PID: 687 TASK: c13f0000 CPU: 1 COMMAND: "kbgndwm" + +Display all threads in a firefox session: + +:: + + py-crash> ps firefox + PID PPID CPU TASK ST %MEM VSZ RSS COMM + 21273 21256 6 ffff81003ec15080 IN 46.3 1138276 484364 firefox + 21276 21256 6 ffff81003f49e7e0 IN 46.3 1138276 484364 firefox + 21280 21256 0 ffff81003ec1d7e0 IN 46.3 1138276 484364 firefox + 21286 21256 6 ffff81000b0d1820 IN 46.3 1138276 484364 firefox + 21287 21256 2 ffff81000b0d10c0 IN 46.3 1138276 484364 firefox + 26975 21256 5 ffff81003b5c1820 IN 46.3 1138276 484364 firefox + 26976 21256 5 ffff810023232820 IN 46.3 1138276 484364 firefox + 26977 21256 4 ffff810021a11820 IN 46.3 1138276 484364 firefox + 26978 21256 5 ffff810003159040 IN 46.3 1138276 484364 firefox + 26979 21256 5 ffff81003a058820 IN 46.3 1138276 484364 firefox + +Display only the thread group leader in the firefox session: + +:: + + py-crash> ps -G firefox + PID PPID CPU TASK ST %MEM VSZ RSS COMM + 21273 21256 0 ffff81003ec15080 IN 46.3 1138276 484364 firefox + +Show the time usage data for pid 10318: + +:: + + py-crash> ps -t 10318 + PID: 10318 TASK: f7b85550 CPU: 5 COMMAND: "bash" + RUN TIME: 1 days, 01:35:32 + START TIME: 5209 + UTIME: 95 + STIME: 57 + +Show the process status of PID 1, task f9dec000, and all nfsd tasks: + +:: + + py-crash> ps 1 f9dec000 nfsd + PID PPID CPU TASK ST %MEM VSZ RSS COMM + 1 0 1 c0098000 IN 0.0 1096 476 init + 688 687 0 f9dec000 IN 0.1 1732 976 bash + 460 1 1 fa938000 IN 0.0 0 0 [nfsd] + 461 1 1 faa86000 IN 0.0 0 0 [nfsd] + 462 1 0 fac48000 IN 0.0 0 0 [nfsd] + 463 1 0 fb4ca000 IN 0.0 0 0 [nfsd] + 464 1 0 fb4c8000 IN 0.0 0 0 [nfsd] + 465 1 2 fba6e000 IN 0.0 0 0 [nfsd] + 466 1 1 fba6c000 IN 0.0 0 0 [nfsd] + 467 1 2 fac04000 IN 0.0 0 0 [nfsd] + +Show all kernel threads: + +:: + + py-crash> ps -k + PID PPID CPU TASK ST %MEM VSZ RSS COMM + 0 0 1 c0fac000 RU 0.0 0 0 [swapper] + 0 0 0 c0252000 RU 0.0 0 0 [swapper] + 2 1 1 c0fa0000 IN 0.0 0 0 [kflushd] + 3 1 1 c03de000 IN 0.0 0 0 [kpiod] + 4 1 1 c03dc000 IN 0.0 0 0 [kswapd] + 5 1 0 c0092000 IN 0.0 0 0 [mdrecoveryd] + 336 1 0 c4a9a000 IN 0.0 0 0 [rpciod] + 337 1 0 c4830000 IN 0.0 0 0 [lockd] + 487 1 1 c4ba6000 IN 0.0 0 0 [nfsd] + 488 1 0 c18c6000 IN 0.0 0 0 [nfsd] + 489 1 0 c0cac000 IN 0.0 0 0 [nfsd] + 490 1 0 c056a000 IN 0.0 0 0 [nfsd] + 491 1 0 c0860000 IN 0.0 0 0 [nfsd] + 492 1 1 c0254000 IN 0.0 0 0 [nfsd] + 493 1 0 c0a86000 IN 0.0 0 0 [nfsd] + 494 1 0 c0968000 IN 0.0 0 0 [nfsd] + +Show all tasks sorted by their task_struct's last_run or timestamp value, +whichever applies: + +:: + + py-crash> ps -l + [280195] [RU] PID: 2 TASK: c1468000 CPU: 0 COMMAND: "keventd" + [280195] [IN] PID: 1986 TASK: c5af4000 CPU: 0 COMMAND: "sshd" + [280195] [IN] PID: 2039 TASK: c58e6000 CPU: 0 COMMAND: "sshd" + [280195] [RU] PID: 2044 TASK: c5554000 CPU: 0 COMMAND: "bash" + [280195] [RU] PID: 2289 TASK: c70c0000 CPU: 0 COMMAND: "s" + [280190] [IN] PID: 1621 TASK: c54f8000 CPU: 0 COMMAND: "cupsd" + [280184] [IN] PID: 5 TASK: c154c000 CPU: 0 COMMAND: "kswapd" + [280184] [IN] PID: 6 TASK: c7ff6000 CPU: 0 COMMAND: "kscand" + [280170] [IN] PID: 0 TASK: c038e000 CPU: 0 COMMAND: "swapper" + [280166] [IN] PID: 2106 TASK: c0c0c000 CPU: 0 COMMAND: "sshd" + [280166] [IN] PID: 2162 TASK: c03a4000 CPU: 0 COMMAND: "vmstat" + [280160] [IN] PID: 1 TASK: c154a000 CPU: 0 COMMAND: "init" + [280131] [IN] PID: 3 TASK: c11ce000 CPU: 0 COMMAND: "kapmd" + [280117] [IN] PID: 1568 TASK: c5a8c000 CPU: 0 COMMAND: "smartd" + [280103] [IN] PID: 1694 TASK: c4c66000 CPU: 0 COMMAND: "ntpd" + [280060] [IN] PID: 8 TASK: c7ff2000 CPU: 0 COMMAND: "kupdated" + [279767] [IN] PID: 1720 TASK: c4608000 CPU: 0 COMMAND: "sendmail" + [279060] [IN] PID: 13 TASK: c69f4000 CPU: 0 COMMAND: "kjournald" + [278657] [IN] PID: 1523 TASK: c5ad4000 CPU: 0 COMMAND: "ypbind" + [277712] [IN] PID: 2163 TASK: c06e0000 CPU: 0 COMMAND: "sshd" + [277711] [IN] PID: 2244 TASK: c4cdc000 CPU: 0 COMMAND: "ssh" + [277261] [IN] PID: 1391 TASK: c5d8e000 CPU: 0 COMMAND: "syslogd" + [276837] [IN] PID: 1990 TASK: c58d8000 CPU: 0 COMMAND: "bash" + [276802] [IN] PID: 1853 TASK: c3828000 CPU: 0 COMMAND: "atd" + [276496] [IN] PID: 1749 TASK: c4480000 CPU: 0 COMMAND: "cannaserver" + [274931] [IN] PID: 1760 TASK: c43ac000 CPU: 0 COMMAND: "crond" + [246773] [IN] PID: 1844 TASK: c38d8000 CPU: 0 COMMAND: "xfs" + [125620] [IN] PID: 2170 TASK: c48dc000 CPU: 0 COMMAND: "bash" + [119059] [IN] PID: 1033 TASK: c64be000 CPU: 0 COMMAND: "kjournald" + [110916] [IN] PID: 1663 TASK: c528a000 CPU: 0 COMMAND: "sshd" + [ 86122] [IN] PID: 2112 TASK: c0da6000 CPU: 0 COMMAND: "bash" + [ 13637] [IN] PID: 1891 TASK: c67ae000 CPU: 0 COMMAND: "sshd" + [ 13636] [IN] PID: 1894 TASK: c38ec000 CPU: 0 COMMAND: "bash" + [ 7662] [IN] PID: 1885 TASK: c6478000 CPU: 0 COMMAND: "mingetty" + [ 7662] [IN] PID: 1886 TASK: c62da000 CPU: 0 COMMAND: "mingetty" + [ 7662] [IN] PID: 1887 TASK: c5f8c000 CPU: 0 COMMAND: "mingetty" + [ 7662] [IN] PID: 1888 TASK: c5f88000 CPU: 0 COMMAND: "mingetty" + [ 7662] [IN] PID: 1889 TASK: c5f86000 CPU: 0 COMMAND: "mingetty" + [ 7662] [IN] PID: 1890 TASK: c6424000 CPU: 0 COMMAND: "mingetty" + [ 7661] [IN] PID: 4 TASK: c154e000 CPU: 0 COMMAND: "ksoftirqd/0" + [ 7595] [IN] PID: 1872 TASK: c2e7e000 CPU: 0 COMMAND: "inventory.pl" + [ 6617] [IN] PID: 1771 TASK: c435a000 CPU: 0 COMMAND: "jserver" + [ 6307] [IN] PID: 1739 TASK: c48f8000 CPU: 0 COMMAND: "gpm" + [ 6285] [IN] PID: 1729 TASK: c4552000 CPU: 0 COMMAND: "sendmail" + [ 6009] [IN] PID: 1395 TASK: c6344000 CPU: 0 COMMAND: "klogd" + [ 5820] [IN] PID: 1677 TASK: c4d74000 CPU: 0 COMMAND: "xinetd" + [ 5719] [IN] PID: 1422 TASK: c5d04000 CPU: 0 COMMAND: "portmap" + [ 4633] [IN] PID: 1509 TASK: c5ed4000 CPU: 0 COMMAND: "apmd" + [ 4529] [IN] PID: 1520 TASK: c5d98000 CPU: 0 COMMAND: "ypbind" + [ 4515] [IN] PID: 1522 TASK: c5d32000 CPU: 0 COMMAND: "ypbind" + [ 4373] [IN] PID: 1441 TASK: c5d48000 CPU: 0 COMMAND: "rpc.statd" + [ 4210] [IN] PID: 1352 TASK: c5b30000 CPU: 0 COMMAND: "dhclient" + [ 1184] [IN] PID: 71 TASK: c65b6000 CPU: 0 COMMAND: "khubd" + [ 434] [IN] PID: 9 TASK: c11de000 CPU: 0 COMMAND: "mdrecoveryd" + [ 48] [IN] PID: 7 TASK: c7ff4000 CPU: 0 COMMAND: "bdflush" + +Show the kernel stack pointer of each user task: + +:: + + py-crash> ps -us + PID PPID CPU KSTACKP ST %MEM VSZ RSS COMM + 1 0 0 c009bedc IN 0.0 1096 52 init + 239 1 0 c15e7ed8 IN 0.2 1332 224 pump + 280 1 1 c7cbdedc IN 0.2 1092 208 portmap + 295 1 0 c7481edc IN 0.0 1232 0 ypbind + 301 295 0 c7c7bf28 IN 0.1 1260 124 ypbind + 376 1 1 c5053f28 IN 0.0 1316 40 automount + 381 1 0 c34ddf28 IN 0.2 1316 224 automount + 391 1 1 c2777f28 IN 0.2 1316 224 automount + ... + +Display the argument and environment data for the automount task: + +:: + + py-crash> ps -a automount + PID: 3948 TASK: f722ee30 CPU: 0 COMMAND: "automount" + ARG: /usr/sbin/automount --timeout=60 /net program /etc/auto.net + ENV: SELINUX_INIT=YES + CONSOLE=/dev/console + TERM=linux + INIT_VERSION=sysvinit-2.85 + PATH=/sbin:/usr/sbin:/bin:/usr/bin + LC_MESSAGES=en_US + RUNLEVEL=3 + runlevel=3 + PWD=/ + LANG=ja_JP.UTF-8 + PREVLEVEL=N + previous=N + HOME=/ + SHLVL=2 + _=/usr/sbin/automount + +Display the tasks in the thread group containing task c20ab0b0: + +:: + + py-crash> ps -g c20ab0b0 + PID: 6425 TASK: f72f50b0 CPU: 0 COMMAND: "firefox-bin" + PID: 6516 TASK: f71bf1b0 CPU: 0 COMMAND: "firefox-bin" + PID: 6518 TASK: d394b930 CPU: 0 COMMAND: "firefox-bin" + PID: 6520 TASK: c20aa030 CPU: 0 COMMAND: "firefox-bin" + PID: 6523 TASK: c20ab0b0 CPU: 0 COMMAND: "firefox-bin" + PID: 6614 TASK: f1f181b0 CPU: 0 COMMAND: "firefox-bin" + +Display the tasks in the thread group for each instance of the +program named "multi-thread": + +:: + + py-crash> ps -g multi-thread + PID: 2522 TASK: 1003f0dc7f0 CPU: 1 COMMAND: "multi-thread" + PID: 2523 TASK: 10037b13030 CPU: 1 COMMAND: "multi-thread" + PID: 2524 TASK: 1003e064030 CPU: 1 COMMAND: "multi-thread" + PID: 2525 TASK: 1003e13a7f0 CPU: 1 COMMAND: "multi-thread" + + PID: 2526 TASK: 1002f82b7f0 CPU: 1 COMMAND: "multi-thread" + PID: 2527 TASK: 1003e1737f0 CPU: 1 COMMAND: "multi-thread" + PID: 2528 TASK: 10035b4b7f0 CPU: 1 COMMAND: "multi-thread" + PID: 2529 TASK: 1003f0c37f0 CPU: 1 COMMAND: "multi-thread" + PID: 2530 TASK: 10035597030 CPU: 1 COMMAND: "multi-thread" + PID: 2531 TASK: 100184be7f0 CPU: 1 COMMAND: "multi-thread" + +Display the resource limits of "bash" task 13896: + +:: + + py-crash> ps -r 13896 + PID: 13896 TASK: cf402000 CPU: 0 COMMAND: "bash" + RLIMIT CURRENT MAXIMUM + CPU (unlimited) (unlimited) + FSIZE (unlimited) (unlimited) + DATA (unlimited) (unlimited) + STACK 10485760 (unlimited) + CORE (unlimited) (unlimited) + RSS (unlimited) (unlimited) + NPROC 4091 4091 + NOFILE 1024 1024 + MEMLOCK 4096 4096 + AS (unlimited) (unlimited) + LOCKS (unlimited) (unlimited) + +Search for task names matching a POSIX regular expression: + +:: + + py-crash> ps 'migration*' + PID PPID CPU TASK ST %MEM VSZ RSS COMM + 8 2 0 ffff8802128a2e20 IN 0.0 0 0 [migration/0] + 10 2 1 ffff880212969710 IN 0.0 0 0 [migration/1] + 15 2 2 ffff880212989710 IN 0.0 0 0 [migration/2] + 20 2 3 ffff8802129a9710 IN 0.0 0 0 [migration/3] +""" from typing import Pattern, Optional, Callable, Dict @@ -158,376 +575,6 @@ def format_header(self) -> str: return self._format_header() class _Parser(ArgumentParser): - """ - NAME - ps - display process status information - - SYNOPSIS - ps [-k|-u|-G][-s|-n][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ... - - DESCRIPTION - This command displays process status for selected, or all, processes - in the system. If no arguments are entered, the process data is - is displayed for all processes. Specific processes may be selected - by using the following identifier formats: - - pid a process PID. - taskp a hexadecimal task_struct pointer. - command a command name. If a command name is made up of letters that - are all numerical values, precede the name string with a "\". - If the command string is enclosed within "'" characters, then - the encompassed string must be a POSIX extended regular expression - that will be used to match task names. - - The process list may be further restricted by the following options: - - -k restrict the output to only kernel threads. - -u restrict the output to only user tasks. - -G display only the thread group leader in a thread group. - - The process identifier types may be mixed. For each task, the following - items are displayed: - - 1. the process PID. - 2. the parent process PID. - 3. the CPU number that the task ran on last. - 4. the task_struct address or the kernel stack pointer of the process. - (see -s option below) - 5. the task state (RU, IN, UN, ZO, ST, TR, DE, SW). - 6. the percentage of physical memory being used by this task. - 7. the virtual address size of this task in kilobytes. - 8. the resident set size of this task in kilobytes. - 9. the command name. - - The default output shows the task_struct address of each process under a - column titled "TASK". This can be changed to show the kernel stack - pointer under a column titled "KSTACKP". - - -s replace the TASK column with the KSTACKP column. - - On SMP machines, the active task on each CPU will be highlighted by an - angle bracket (">") preceding its information. - - Alternatively, information regarding parent-child relationships, - per-task time usage data, argument/environment data, thread groups, - or resource limits may be displayed: - - -p display the parental hierarchy of selected, or all, tasks. - -c display the children of selected, or all, tasks. - -t display the task run time, start time, and cumulative user - and system times. - -l display the task last_run or timestamp value, whichever applies, - of selected, or all, tasks; the list is sorted with the most - recently-run task (largest last_run/timestamp) shown first, - followed by the task's current state. - -a display the command line arguments and environment strings of - selected, or all, user-mode tasks. - -g display tasks by thread group, of selected, or all, tasks. - -r display resource limits (rlimits) of selected, or all, tasks. - -n display gdb thread number - - EXAMPLES - Show the process status of all current tasks: - - crash> ps - PID PPID CPU TASK ST %MEM VSZ RSS COMM - > 0 0 3 c024c000 RU 0.0 0 0 [swapper] - > 0 0 0 c0dce000 RU 0.0 0 0 [swapper] - 0 0 1 c0fa8000 RU 0.0 0 0 [swapper] - > 0 0 2 c009a000 RU 0.0 0 0 [swapper] - 1 0 1 c0098000 IN 0.0 1096 476 init - 2 1 1 c0090000 IN 0.0 0 0 [kflushd] - 3 1 1 c000e000 IN 0.0 0 0 [kpiod] - 4 1 3 c000c000 IN 0.0 0 0 [kswapd] - 5 1 1 c0008000 IN 0.0 0 0 [mdrecoveryd] - 253 1 2 fbc4c000 IN 0.0 1088 376 portmap - 268 1 2 fbc82000 IN 0.1 1232 504 ypbind - 274 268 2 fa984000 IN 0.1 1260 556 ypbind - 321 1 1 fabf6000 IN 0.1 1264 608 syslogd - 332 1 1 fa9be000 RU 0.1 1364 736 klogd - 346 1 2 fae88000 IN 0.0 1112 472 atd - 360 1 2 faeb2000 IN 0.1 1284 592 crond - 378 1 2 fafd6000 IN 0.1 1236 560 inetd - 392 1 0 fb710000 IN 0.1 2264 1468 named - 406 1 3 fb768000 IN 0.1 1284 560 lpd - 423 1 1 fb8ac000 IN 0.1 1128 528 rpc.statd - 434 1 2 fb75a000 IN 0.0 1072 376 rpc.rquotad - 445 1 2 fb4a4000 IN 0.0 1132 456 rpc.mountd - 460 1 1 fa938000 IN 0.0 0 0 [nfsd] - 461 1 1 faa86000 IN 0.0 0 0 [nfsd] - 462 1 0 fac48000 IN 0.0 0 0 [nfsd] - 463 1 0 fb4ca000 IN 0.0 0 0 [nfsd] - 464 1 0 fb4c8000 IN 0.0 0 0 [nfsd] - 465 1 2 fba6e000 IN 0.0 0 0 [nfsd] - 466 1 1 fba6c000 IN 0.0 0 0 [nfsd] - 467 1 2 fac04000 IN 0.0 0 0 [nfsd] - 468 461 2 fa93a000 IN 0.0 0 0 [lockd] - 469 468 2 fa93e000 IN 0.0 0 0 [rpciod] - 486 1 0 fab54000 IN 0.1 1596 880 amd - 523 1 2 fa84e000 IN 0.1 1884 1128 sendmail - 538 1 0 fa82c000 IN 0.0 1112 416 gpm - 552 1 3 fa70a000 IN 0.1 2384 1220 httpd - 556 552 3 fa776000 IN 0.1 2572 1352 httpd - 557 552 2 faba4000 IN 0.1 2572 1352 httpd - 558 552 1 fa802000 IN 0.1 2572 1352 httpd - 559 552 3 fa6ee000 IN 0.1 2572 1352 httpd - 560 552 3 fa700000 IN 0.1 2572 1352 httpd - 561 552 0 fa6f0000 IN 0.1 2572 1352 httpd - 562 552 3 fa6ea000 IN 0.1 2572 1352 httpd - 563 552 0 fa67c000 IN 0.1 2572 1352 httpd - 564 552 3 fa674000 IN 0.1 2572 1352 httpd - 565 552 3 fa66a000 IN 0.1 2572 1352 httpd - 582 1 2 fa402000 IN 0.2 2968 1916 xfs - 633 1 2 fa1ec000 IN 0.2 5512 2248 innd - 636 1 3 fa088000 IN 0.1 2536 804 actived - 676 1 0 fa840000 IN 0.0 1060 384 mingetty - 677 1 1 fa590000 IN 0.0 1060 384 mingetty - 678 1 2 fa3b8000 IN 0.0 1060 384 mingetty - 679 1 0 fa5b8000 IN 0.0 1060 384 mingetty - 680 1 1 fa3a4000 IN 0.0 1060 384 mingetty - 681 1 2 fa30a000 IN 0.0 1060 384 mingetty - 683 1 3 fa5d8000 IN 0.0 1052 280 update - 686 378 1 fa3aa000 IN 0.1 2320 1136 in.rlogind - 687 686 2 f9e52000 IN 0.1 2136 1000 login - 688 687 0 f9dec000 IN 0.1 1732 976 bash - > 700 688 1 f9d62000 RU 0.0 1048 256 gen12 - - Display the parental hierarchy of the "crash" process on a live system: - - crash> ps -p 4249 - PID: 0 TASK: c0252000 CPU: 0 COMMAND: "swapper" - PID: 1 TASK: c009a000 CPU: 1 COMMAND: "init" - PID: 632 TASK: c73b6000 CPU: 1 COMMAND: "prefdm" - PID: 637 TASK: c5a4a000 CPU: 1 COMMAND: "prefdm" - PID: 649 TASK: c179a000 CPU: 0 COMMAND: "kwm" - PID: 683 TASK: c1164000 CPU: 0 COMMAND: "kfm" - PID: 1186 TASK: c165a000 CPU: 0 COMMAND: "xterm" - PID: 1188 TASK: c705e000 CPU: 1 COMMAND: "bash" - PID: 4249 TASK: c6b9a000 CPU: 0 COMMAND: "crash" - - Display all children of the "kwm" window manager: - - crash> ps -c kwm - PID: 649 TASK: c179a000 CPU: 0 COMMAND: "kwm" - PID: 682 TASK: c2d58000 CPU: 1 COMMAND: "kwmsound" - PID: 683 TASK: c1164000 CPU: 1 COMMAND: "kfm" - PID: 685 TASK: c053c000 CPU: 0 COMMAND: "krootwm" - PID: 686 TASK: c13fa000 CPU: 0 COMMAND: "kpanel" - PID: 687 TASK: c13f0000 CPU: 1 COMMAND: "kbgndwm" - - Display all threads in a firefox session: - - crash> ps firefox - PID PPID CPU TASK ST %MEM VSZ RSS COMM - 21273 21256 6 ffff81003ec15080 IN 46.3 1138276 484364 firefox - 21276 21256 6 ffff81003f49e7e0 IN 46.3 1138276 484364 firefox - 21280 21256 0 ffff81003ec1d7e0 IN 46.3 1138276 484364 firefox - 21286 21256 6 ffff81000b0d1820 IN 46.3 1138276 484364 firefox - 21287 21256 2 ffff81000b0d10c0 IN 46.3 1138276 484364 firefox - 26975 21256 5 ffff81003b5c1820 IN 46.3 1138276 484364 firefox - 26976 21256 5 ffff810023232820 IN 46.3 1138276 484364 firefox - 26977 21256 4 ffff810021a11820 IN 46.3 1138276 484364 firefox - 26978 21256 5 ffff810003159040 IN 46.3 1138276 484364 firefox - 26979 21256 5 ffff81003a058820 IN 46.3 1138276 484364 firefox - - Display only the thread group leader in the firefox session: - - crash> ps -G firefox - PID PPID CPU TASK ST %MEM VSZ RSS COMM - 21273 21256 0 ffff81003ec15080 IN 46.3 1138276 484364 firefox - - Show the time usage data for pid 10318: - - crash> ps -t 10318 - PID: 10318 TASK: f7b85550 CPU: 5 COMMAND: "bash" - RUN TIME: 1 days, 01:35:32 - START TIME: 5209 - UTIME: 95 - STIME: 57 - - Show the process status of PID 1, task f9dec000, and all nfsd tasks: - - crash> ps 1 f9dec000 nfsd - PID PPID CPU TASK ST %MEM VSZ RSS COMM - 1 0 1 c0098000 IN 0.0 1096 476 init - 688 687 0 f9dec000 IN 0.1 1732 976 bash - 460 1 1 fa938000 IN 0.0 0 0 [nfsd] - 461 1 1 faa86000 IN 0.0 0 0 [nfsd] - 462 1 0 fac48000 IN 0.0 0 0 [nfsd] - 463 1 0 fb4ca000 IN 0.0 0 0 [nfsd] - 464 1 0 fb4c8000 IN 0.0 0 0 [nfsd] - 465 1 2 fba6e000 IN 0.0 0 0 [nfsd] - 466 1 1 fba6c000 IN 0.0 0 0 [nfsd] - 467 1 2 fac04000 IN 0.0 0 0 [nfsd] - - Show all kernel threads: - - crash> ps -k - PID PPID CPU TASK ST %MEM VSZ RSS COMM - 0 0 1 c0fac000 RU 0.0 0 0 [swapper] - 0 0 0 c0252000 RU 0.0 0 0 [swapper] - 2 1 1 c0fa0000 IN 0.0 0 0 [kflushd] - 3 1 1 c03de000 IN 0.0 0 0 [kpiod] - 4 1 1 c03dc000 IN 0.0 0 0 [kswapd] - 5 1 0 c0092000 IN 0.0 0 0 [mdrecoveryd] - 336 1 0 c4a9a000 IN 0.0 0 0 [rpciod] - 337 1 0 c4830000 IN 0.0 0 0 [lockd] - 487 1 1 c4ba6000 IN 0.0 0 0 [nfsd] - 488 1 0 c18c6000 IN 0.0 0 0 [nfsd] - 489 1 0 c0cac000 IN 0.0 0 0 [nfsd] - 490 1 0 c056a000 IN 0.0 0 0 [nfsd] - 491 1 0 c0860000 IN 0.0 0 0 [nfsd] - 492 1 1 c0254000 IN 0.0 0 0 [nfsd] - 493 1 0 c0a86000 IN 0.0 0 0 [nfsd] - 494 1 0 c0968000 IN 0.0 0 0 [nfsd] - - Show all tasks sorted by their task_struct's last_run or timestamp value, - whichever applies: - - crash> ps -l - [280195] [RU] PID: 2 TASK: c1468000 CPU: 0 COMMAND: "keventd" - [280195] [IN] PID: 1986 TASK: c5af4000 CPU: 0 COMMAND: "sshd" - [280195] [IN] PID: 2039 TASK: c58e6000 CPU: 0 COMMAND: "sshd" - [280195] [RU] PID: 2044 TASK: c5554000 CPU: 0 COMMAND: "bash" - [280195] [RU] PID: 2289 TASK: c70c0000 CPU: 0 COMMAND: "s" - [280190] [IN] PID: 1621 TASK: c54f8000 CPU: 0 COMMAND: "cupsd" - [280184] [IN] PID: 5 TASK: c154c000 CPU: 0 COMMAND: "kswapd" - [280184] [IN] PID: 6 TASK: c7ff6000 CPU: 0 COMMAND: "kscand" - [280170] [IN] PID: 0 TASK: c038e000 CPU: 0 COMMAND: "swapper" - [280166] [IN] PID: 2106 TASK: c0c0c000 CPU: 0 COMMAND: "sshd" - [280166] [IN] PID: 2162 TASK: c03a4000 CPU: 0 COMMAND: "vmstat" - [280160] [IN] PID: 1 TASK: c154a000 CPU: 0 COMMAND: "init" - [280131] [IN] PID: 3 TASK: c11ce000 CPU: 0 COMMAND: "kapmd" - [280117] [IN] PID: 1568 TASK: c5a8c000 CPU: 0 COMMAND: "smartd" - [280103] [IN] PID: 1694 TASK: c4c66000 CPU: 0 COMMAND: "ntpd" - [280060] [IN] PID: 8 TASK: c7ff2000 CPU: 0 COMMAND: "kupdated" - [279767] [IN] PID: 1720 TASK: c4608000 CPU: 0 COMMAND: "sendmail" - [279060] [IN] PID: 13 TASK: c69f4000 CPU: 0 COMMAND: "kjournald" - [278657] [IN] PID: 1523 TASK: c5ad4000 CPU: 0 COMMAND: "ypbind" - [277712] [IN] PID: 2163 TASK: c06e0000 CPU: 0 COMMAND: "sshd" - [277711] [IN] PID: 2244 TASK: c4cdc000 CPU: 0 COMMAND: "ssh" - [277261] [IN] PID: 1391 TASK: c5d8e000 CPU: 0 COMMAND: "syslogd" - [276837] [IN] PID: 1990 TASK: c58d8000 CPU: 0 COMMAND: "bash" - [276802] [IN] PID: 1853 TASK: c3828000 CPU: 0 COMMAND: "atd" - [276496] [IN] PID: 1749 TASK: c4480000 CPU: 0 COMMAND: "cannaserver" - [274931] [IN] PID: 1760 TASK: c43ac000 CPU: 0 COMMAND: "crond" - [246773] [IN] PID: 1844 TASK: c38d8000 CPU: 0 COMMAND: "xfs" - [125620] [IN] PID: 2170 TASK: c48dc000 CPU: 0 COMMAND: "bash" - [119059] [IN] PID: 1033 TASK: c64be000 CPU: 0 COMMAND: "kjournald" - [110916] [IN] PID: 1663 TASK: c528a000 CPU: 0 COMMAND: "sshd" - [ 86122] [IN] PID: 2112 TASK: c0da6000 CPU: 0 COMMAND: "bash" - [ 13637] [IN] PID: 1891 TASK: c67ae000 CPU: 0 COMMAND: "sshd" - [ 13636] [IN] PID: 1894 TASK: c38ec000 CPU: 0 COMMAND: "bash" - [ 7662] [IN] PID: 1885 TASK: c6478000 CPU: 0 COMMAND: "mingetty" - [ 7662] [IN] PID: 1886 TASK: c62da000 CPU: 0 COMMAND: "mingetty" - [ 7662] [IN] PID: 1887 TASK: c5f8c000 CPU: 0 COMMAND: "mingetty" - [ 7662] [IN] PID: 1888 TASK: c5f88000 CPU: 0 COMMAND: "mingetty" - [ 7662] [IN] PID: 1889 TASK: c5f86000 CPU: 0 COMMAND: "mingetty" - [ 7662] [IN] PID: 1890 TASK: c6424000 CPU: 0 COMMAND: "mingetty" - [ 7661] [IN] PID: 4 TASK: c154e000 CPU: 0 COMMAND: "ksoftirqd/0" - [ 7595] [IN] PID: 1872 TASK: c2e7e000 CPU: 0 COMMAND: "inventory.pl" - [ 6617] [IN] PID: 1771 TASK: c435a000 CPU: 0 COMMAND: "jserver" - [ 6307] [IN] PID: 1739 TASK: c48f8000 CPU: 0 COMMAND: "gpm" - [ 6285] [IN] PID: 1729 TASK: c4552000 CPU: 0 COMMAND: "sendmail" - [ 6009] [IN] PID: 1395 TASK: c6344000 CPU: 0 COMMAND: "klogd" - [ 5820] [IN] PID: 1677 TASK: c4d74000 CPU: 0 COMMAND: "xinetd" - [ 5719] [IN] PID: 1422 TASK: c5d04000 CPU: 0 COMMAND: "portmap" - [ 4633] [IN] PID: 1509 TASK: c5ed4000 CPU: 0 COMMAND: "apmd" - [ 4529] [IN] PID: 1520 TASK: c5d98000 CPU: 0 COMMAND: "ypbind" - [ 4515] [IN] PID: 1522 TASK: c5d32000 CPU: 0 COMMAND: "ypbind" - [ 4373] [IN] PID: 1441 TASK: c5d48000 CPU: 0 COMMAND: "rpc.statd" - [ 4210] [IN] PID: 1352 TASK: c5b30000 CPU: 0 COMMAND: "dhclient" - [ 1184] [IN] PID: 71 TASK: c65b6000 CPU: 0 COMMAND: "khubd" - [ 434] [IN] PID: 9 TASK: c11de000 CPU: 0 COMMAND: "mdrecoveryd" - [ 48] [IN] PID: 7 TASK: c7ff4000 CPU: 0 COMMAND: "bdflush" - - Show the kernel stack pointer of each user task: - - crash> ps -us - PID PPID CPU KSTACKP ST %MEM VSZ RSS COMM - 1 0 0 c009bedc IN 0.0 1096 52 init - 239 1 0 c15e7ed8 IN 0.2 1332 224 pump - 280 1 1 c7cbdedc IN 0.2 1092 208 portmap - 295 1 0 c7481edc IN 0.0 1232 0 ypbind - 301 295 0 c7c7bf28 IN 0.1 1260 124 ypbind - 376 1 1 c5053f28 IN 0.0 1316 40 automount - 381 1 0 c34ddf28 IN 0.2 1316 224 automount - 391 1 1 c2777f28 IN 0.2 1316 224 automount - ... - - Display the argument and environment data for the automount task: - - crash> ps -a automount - PID: 3948 TASK: f722ee30 CPU: 0 COMMAND: "automount" - ARG: /usr/sbin/automount --timeout=60 /net program /etc/auto.net - ENV: SELINUX_INIT=YES - CONSOLE=/dev/console - TERM=linux - INIT_VERSION=sysvinit-2.85 - PATH=/sbin:/usr/sbin:/bin:/usr/bin - LC_MESSAGES=en_US - RUNLEVEL=3 - runlevel=3 - PWD=/ - LANG=ja_JP.UTF-8 - PREVLEVEL=N - previous=N - HOME=/ - SHLVL=2 - _=/usr/sbin/automount - - Display the tasks in the thread group containing task c20ab0b0: - - crash> ps -g c20ab0b0 - PID: 6425 TASK: f72f50b0 CPU: 0 COMMAND: "firefox-bin" - PID: 6516 TASK: f71bf1b0 CPU: 0 COMMAND: "firefox-bin" - PID: 6518 TASK: d394b930 CPU: 0 COMMAND: "firefox-bin" - PID: 6520 TASK: c20aa030 CPU: 0 COMMAND: "firefox-bin" - PID: 6523 TASK: c20ab0b0 CPU: 0 COMMAND: "firefox-bin" - PID: 6614 TASK: f1f181b0 CPU: 0 COMMAND: "firefox-bin" - - Display the tasks in the thread group for each instance of the - program named "multi-thread": - - crash> ps -g multi-thread - PID: 2522 TASK: 1003f0dc7f0 CPU: 1 COMMAND: "multi-thread" - PID: 2523 TASK: 10037b13030 CPU: 1 COMMAND: "multi-thread" - PID: 2524 TASK: 1003e064030 CPU: 1 COMMAND: "multi-thread" - PID: 2525 TASK: 1003e13a7f0 CPU: 1 COMMAND: "multi-thread" - - PID: 2526 TASK: 1002f82b7f0 CPU: 1 COMMAND: "multi-thread" - PID: 2527 TASK: 1003e1737f0 CPU: 1 COMMAND: "multi-thread" - PID: 2528 TASK: 10035b4b7f0 CPU: 1 COMMAND: "multi-thread" - PID: 2529 TASK: 1003f0c37f0 CPU: 1 COMMAND: "multi-thread" - PID: 2530 TASK: 10035597030 CPU: 1 COMMAND: "multi-thread" - PID: 2531 TASK: 100184be7f0 CPU: 1 COMMAND: "multi-thread" - - Display the resource limits of "bash" task 13896: - - crash> ps -r 13896 - PID: 13896 TASK: cf402000 CPU: 0 COMMAND: "bash" - RLIMIT CURRENT MAXIMUM - CPU (unlimited) (unlimited) - FSIZE (unlimited) (unlimited) - DATA (unlimited) (unlimited) - STACK 10485760 (unlimited) - CORE (unlimited) (unlimited) - RSS (unlimited) (unlimited) - NPROC 4091 4091 - NOFILE 1024 1024 - MEMLOCK 4096 4096 - AS (unlimited) (unlimited) - LOCKS (unlimited) (unlimited) - - Search for task names matching a POSIX regular expression: - - crash> ps 'migration*' - PID PPID CPU TASK ST %MEM VSZ RSS COMM - 8 2 0 ffff8802128a2e20 IN 0.0 0 0 [migration/0] - 10 2 1 ffff880212969710 IN 0.0 0 0 [migration/1] - 15 2 2 ffff880212989710 IN 0.0 0 0 [migration/2] - 20 2 3 ffff8802129a9710 IN 0.0 0 0 [migration/3] - """ def format_usage(self) -> str: return \ "ps [-k|-u|-G][-s][-p|-c|-t|-l|-a|-g|-r] [pid | taskp | command] ...\n" diff --git a/crash/commands/syscmd.py b/crash/commands/syscmd.py index 7a6a6e5b711..147b53eb53f 100644 --- a/crash/commands/syscmd.py +++ b/crash/commands/syscmd.py @@ -1,5 +1,44 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- + +Display system information and configuration data +:: + + sys [config] + +DESCRIPTION +----------- + + This command displays system-specific data. If no arguments are entered, + the same system data shown during crash invocation is shown. + + ``config`` If the kernel was configured with ``CONFIG_IKCONFIG``, then + dump the in-kernel configuration data. + +EXAMPLES +-------- + + Display essential system information: + +:: + + py-crash> sys config + KERNEL: vmlinux.4 + DUMPFILE: lcore.cr.4 + CPUS: 4 + DATE: Mon Oct 11 18:48:55 1999 + UPTIME: 10 days, 14:14:39 + LOAD AVERAGE: 0.74, 0.23, 0.08 + TASKS: 77 + NODENAME: test.mclinux.com + RELEASE: 2.2.5-15smp + VERSION: #24 SMP Mon Oct 11 17:41:40 CDT 1999 + MACHINE: i686 (500 MHz) + MEMORY: 1 GB +""" import argparse @@ -7,44 +46,11 @@ from crash.commands import CommandLineError from crash.cache.syscache import utsname, config, kernel -class _Parser(ArgumentParser): - """ - NAME - sys - system data - - SYNOPSIS - sys [config] - - DESCRIPTION - This command displays system-specific data. If no arguments are entered, - the same system data shown during crash invocation is shown. - - config If the kernel was configured with CONFIG_IKCONFIG, then - dump the in-kernel configuration data. - - EXAMPLES - Display essential system information: - - crash> sys config - KERNEL: vmlinux.4 - DUMPFILE: lcore.cr.4 - CPUS: 4 - DATE: Mon Oct 11 18:48:55 1999 - UPTIME: 10 days, 14:14:39 - LOAD AVERAGE: 0.74, 0.23, 0.08 - TASKS: 77 - NODENAME: test.mclinux.com - RELEASE: 2.2.5-15smp - VERSION: #24 SMP Mon Oct 11 17:41:40 CDT 1999 - MACHINE: i686 (500 MHz) - MEMORY: 1 GB - """ - class SysCommand(Command): """system data""" def __init__(self, name: str) -> None: - parser = _Parser(prog=name) + parser = ArgumentParser(prog=name) parser.add_argument('config', nargs='?') diff --git a/crash/commands/task.py b/crash/commands/task.py index 43f26ab9807..535bf0b629d 100644 --- a/crash/commands/task.py +++ b/crash/commands/task.py @@ -1,27 +1,35 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- -import argparse +Select task by pid -from crash.commands import Command, ArgumentParser -import crash.cache.tasks +:: -import gdb + task + +DESCRIPTION +----------- + +This command selects the appropriate gdb thread using its Linux pid. -class _Parser(ArgumentParser): - """ - NAME - task - select task by pid +If no pid is specified, the current pid will be displayed. - SYNOPSIS - task +EXAMPLES +-------- - DESCRIPTION - This command selects the appropriate gdb thread using its Linux pid. +:: + task 1402 +""" - EXAMPLES - task 1402 - """ +import argparse + +from crash.commands import Command, ArgumentParser +import crash.cache.tasks + +import gdb class TaskCommand(Command): """select task by pid""" diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index f3f902fe01f..ed3080469c6 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -1,5 +1,136 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- + +Translate virtual addresses to physical addresses + +:: + + vtop [-c [pid | taskp]] [-u|-k] address ... + + +DESCRIPTION +----------- + +This command translates a user or kernel virtual address to its physical +address. Also displayed is the PTE translation, the vm_area_struct data +for user virtual addresses, the mem_map page data associated with the +physical page, and the swap location or file location if the page is +not mapped. The -u and -k options specify that the address is a user +or kernel virtual address; -u and -k are not necessary on processors whose +virtual addresses self-define themselves as user or kernel. User addresses +are translated with respect to the current context unless the -c option +is used. Kernel virtual addresses are translated using the swapper_pg_dir +as the base page directory unless the -c option is used. + + -u The address is a user virtual address; only required + on processors with overlapping user and kernel virtual + address spaces. + -k The address is a kernel virtual address; only required + on processors with overlapping user and kernel virtual + address spaces. + -c pid-or-taskp Translate the virtual address from the page directory + of the specified PID or hexadecimal task_struct pointer. + However, if this command is invoked from "foreach vtop", + the pid or taskp argument should NOT be entered; the + address will be translated using the page directory of + each task specified by "foreach". + +``address`` A hexadecimal user or kernel virtual address. + +NOTE +---- + +Although the ``-c`` option is referenced in the documentation, it +is currently unimplemented and will cause a command error. + +EXAMPLES +-------- + +Translate user virtual address 80b4000: + +:: + + py-crash> vtop 80b4000 + VIRTUAL PHYSICAL + 80b4000 660f000 + + PAGE DIRECTORY: c37f0000 + PGD: c37f0080 => e0d067 + PMD: c37f0080 => e0d067 + PTE: c0e0d2d0 => 660f067 + PAGE: 660f000 + + PTE PHYSICAL FLAGS + 660f067 660f000 (PRESENT|RW|USER|ACCESSED|DIRTY) + + VMA START END FLAGS FILE + c773daa0 80b4000 810c000 77 + + PAGE PHYSICAL INODE OFFSET CNT FLAGS + c0393258 660f000 0 17000 1 uptodate + +Translate kernel virtual address c806e000, first using swapper_pg_dir +as the page directory base, and secondly, using the page table base +of PID 1359: + +:: + + py-crash> vtop c806e000 + VIRTUAL PHYSICAL + c806e000 2216000 + + PAGE DIRECTORY: c0101000 + PGD: c0101c80 => 94063 + PMD: c0101c80 => 94063 + PTE: c00941b8 => 2216063 + PAGE: 2216000 + + PTE PHYSICAL FLAGS + 2216063 2216000 (PRESENT|RW|ACCESSED|DIRTY) + + PAGE PHYSICAL INODE OFFSET CNT FLAGS + c02e9370 2216000 0 0 1 + + py-crash> vtop -c 1359 c806e000 + VIRTUAL PHYSICAL + c806e000 2216000 + + PAGE DIRECTORY: c5caf000 + PGD: c5cafc80 => 94063 + PMD: c5cafc80 => 94063 + PTE: c00941b8 => 2216063 + PAGE: 2216000 + + PTE PHYSICAL FLAGS + 2216063 2216000 (PRESENT|RW|ACCESSED|DIRTY) + + PAGE PHYSICAL INODE OFFSET CNT FLAGS + c02e9370 2216000 0 0 1 + +Determine swap location of user virtual address 40104000: + +:: + + py-crash> vtop 40104000 + VIRTUAL PHYSICAL + 40104000 (not mapped) + + PAGE DIRECTORY: c40d8000 + PGD: c40d8400 => 6bbe067 + PMD: c40d8400 => 6bbe067 + PTE: c6bbe410 => 58bc00 + + PTE SWAP OFFSET + 58bc00 /dev/sda8 22716 + + VMA START END FLAGS FILE + c7200ae0 40104000 40b08000 73 + + SWAP: /dev/sda8 OFFSET: 22716 +""" import argparse import addrxlat @@ -72,120 +203,6 @@ def address(self) -> str: return addr + 'N/A' class _Parser(ArgumentParser): - """ - NAME - vtop - virtual to physical - - SYNOPSIS - vtop [-c [pid | taskp]] [-u|-k] address ... - - DESCRIPTION - This command translates a user or kernel virtual address to its physical - address. Also displayed is the PTE translation, the vm_area_struct data - for user virtual addresses, the mem_map page data associated with the - physical page, and the swap location or file location if the page is - not mapped. The -u and -k options specify that the address is a user - or kernel virtual address; -u and -k are not necessary on processors whose - virtual addresses self-define themselves as user or kernel. User addresses - are translated with respect to the current context unless the -c option - is used. Kernel virtual addresses are translated using the swapper_pg_dir - as the base page directory unless the -c option is used. - - -u The address is a user virtual address; only required - on processors with overlapping user and kernel virtual - address spaces. - -k The address is a kernel virtual address; only required - on processors with overlapping user and kernel virtual - address spaces. - -c [pid | taskp] Translate the virtual address from the page directory - of the specified PID or hexadecimal task_struct pointer. - However, if this command is invoked from "foreach vtop", - the pid or taskp argument should NOT be entered; the - address will be translated using the page directory of - each task specified by "foreach". - address A hexadecimal user or kernel virtual address. - - NOTE - Although the -c option is referenced in the documentation, it - is currently unimplemented and will cause a command error. - - EXAMPLES - Translate user virtual address 80b4000: - - crash> vtop 80b4000 - VIRTUAL PHYSICAL - 80b4000 660f000 - - PAGE DIRECTORY: c37f0000 - PGD: c37f0080 => e0d067 - PMD: c37f0080 => e0d067 - PTE: c0e0d2d0 => 660f067 - PAGE: 660f000 - - PTE PHYSICAL FLAGS - 660f067 660f000 (PRESENT|RW|USER|ACCESSED|DIRTY) - - VMA START END FLAGS FILE - c773daa0 80b4000 810c000 77 - - PAGE PHYSICAL INODE OFFSET CNT FLAGS - c0393258 660f000 0 17000 1 uptodate - - Translate kernel virtual address c806e000, first using swapper_pg_dir - as the page directory base, and secondly, using the page table base - of PID 1359: - - crash> vtop c806e000 - VIRTUAL PHYSICAL - c806e000 2216000 - - PAGE DIRECTORY: c0101000 - PGD: c0101c80 => 94063 - PMD: c0101c80 => 94063 - PTE: c00941b8 => 2216063 - PAGE: 2216000 - - PTE PHYSICAL FLAGS - 2216063 2216000 (PRESENT|RW|ACCESSED|DIRTY) - - PAGE PHYSICAL INODE OFFSET CNT FLAGS - c02e9370 2216000 0 0 1 - - crash> vtop -c 1359 c806e000 - VIRTUAL PHYSICAL - c806e000 2216000 - - PAGE DIRECTORY: c5caf000 - PGD: c5cafc80 => 94063 - PMD: c5cafc80 => 94063 - PTE: c00941b8 => 2216063 - PAGE: 2216000 - - PTE PHYSICAL FLAGS - 2216063 2216000 (PRESENT|RW|ACCESSED|DIRTY) - - PAGE PHYSICAL INODE OFFSET CNT FLAGS - c02e9370 2216000 0 0 1 - - Determine swap location of user virtual address 40104000: - - crash> vtop 40104000 - VIRTUAL PHYSICAL - 40104000 (not mapped) - - PAGE DIRECTORY: c40d8000 - PGD: c40d8400 => 6bbe067 - PMD: c40d8400 => 6bbe067 - PTE: c6bbe410 => 58bc00 - - PTE SWAP OFFSET - 58bc00 /dev/sda8 22716 - - VMA START END FLAGS FILE - c7200ae0 40104000 40b08000 73 - - SWAP: /dev/sda8 OFFSET: 22716 - """ def format_usage(self) -> str: return "vtop [-c [pid | taskp]] [-u|-k] address ...\n" @@ -193,7 +210,7 @@ class VTOPCommand(Command): """convert virtual address to physical""" def __init__(self) -> None: - parser = ArgumentParser(prog="vtop") + parser = _Parser(prog="vtop") group = parser.add_mutually_exclusive_group() group.add_argument('-u', action='store_true', default=False) diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index d4f7ef400a9..f3c541dd6f7 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -1,5 +1,25 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- + +Display XFS internal data structures + +:: + + xfs [arguments ...] + +COMMANDS +-------- + +:: + + xfs list + xfs show + xfs dump-ail + xfs dump-buft +""" import argparse @@ -27,21 +47,6 @@ types = Types(['struct xfs_buf *']) -class _Parser(ArgumentParser): - """ - NAME - xfs - display XFS internal data structures - - SYNOPSIS - xfs [arguments ...] - - COMMANDS - xfs list - xfs show - xfs dump-ail - xfs dump-buft - """ - class XFSCommand(Command): """display XFS internal data structures""" diff --git a/doc-source/conf.py b/doc-source/conf.py index fb58529e72f..31686607b7e 100644 --- a/doc-source/conf.py +++ b/doc-source/conf.py @@ -19,9 +19,11 @@ # import os import sys +sys.path.insert(0, os.path.abspath('..')) sys.path.insert(0, os.path.abspath('./mock')) sys.path.insert(0, os.path.abspath('.')) +from sphinx.ext import autodoc def run_apidoc(_): try: @@ -40,17 +42,36 @@ def run_apidoc(_): sys.path.append(os.path.join(os.path.dirname(__file__), '..')) cur_dir = os.path.abspath(os.path.dirname(__file__)) argv = [ '-M', '-e', '-H', 'Crash API Reference', '-f', - '-o', out + "/crash", crash_mod ] + '-o', out + "/crash", crash_mod , f'*crash/commands/[a-z]*' ] main(argv) + + # We want to document the commands as part of the command reference + # not the API documentation. + f = open("doc-source/crash/crash.commands.rst") + lines = f.readlines() + f.close() + f = open("doc-source/crash/crash.commands.rst", "w") + printit = True + for line in lines: + if 'Submodules' in line: + printit = False + elif 'Module contents' in line: + printit = True + + if printit: + print(line, file=f, end='') + f.close() + argv = [ '-M', '-e', '-H', 'Kdump Target API Reference', '-f', - '-o', out + "/kdump", kdump_mod ] + '-o', out + "/kdump", kdump_mod ] main(argv) make_gdb_refs.make_gdb_refs() def setup(app): - app.connect('builder-inited', run_apidoc) + app.connect('builder-inited', run_apidoc) + # -- General configuration ------------------------------------------------ @@ -62,13 +83,13 @@ def setup(app): # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.coverage', - 'sphinx.ext.intersphinx', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon'] + 'sphinx.ext.coverage', + 'sphinx.ext.intersphinx', + 'sphinx.ext.viewcode', + 'sphinx.ext.napoleon'] intersphinx_mapping = { 'gdb' : - ("https://sourceware.org/gdb/onlinedocs/gdb/", "gdb.inv") } + ("https://sourceware.org/gdb/onlinedocs/gdb/", "gdb.inv") } # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/doc-source/mock/gdb/__init__.py b/doc-source/mock/gdb/__init__.py index 5abe61258fa..525b50a2b1c 100644 --- a/doc-source/mock/gdb/__init__.py +++ b/doc-source/mock/gdb/__init__.py @@ -63,9 +63,12 @@ def current_target(): class Block(object): pass +commands = list() + class Command(object): - def __init__(self, x, y): - pass + def __init__(self, name, parser): + self.name = name + commands.append(name) class NewObjFileEvent(object): pass diff --git a/doc-source/user_guide.rst b/doc-source/user_guide.rst index bd5723dc9f1..3ce3ddd833f 100644 --- a/doc-source/user_guide.rst +++ b/doc-source/user_guide.rst @@ -2,6 +2,6 @@ User Guide ========== .. toctree:: - crash-python -To be written. + crash-python + commands/commands From 8119808be85c0cf8f222daddb11c29e84df74a4b Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 7 Jun 2019 23:53:42 -0400 Subject: [PATCH 220/367] docs: fix generation of automatic docs This commit fixes the generation of command documentation and creation of the gdb.inv file. The former was broken since readthedocs doesn't use the makefile (and the script wasn't committed anyway) and the latter was placing the file in the wrong directory. Signed-off-by: Jeff Mahoney --- .gitignore | 5 +++ Makefile | 7 +--- doc-source/conf.py | 26 +++++++------ doc-source/gen_command_docs.py | 71 ++++++++++++++++++++++++++++++++++ doc-source/make_gdb_refs.py | 7 ++-- 5 files changed, 96 insertions(+), 20 deletions(-) create mode 100644 doc-source/gen_command_docs.py diff --git a/.gitignore b/.gitignore index 143d5d9fd33..210be211c0b 100644 --- a/.gitignore +++ b/.gitignore @@ -2,5 +2,10 @@ *~ doc-source/crash.*.rst doc-source/modules.rst +doc-source/gdb.inv +doc-source/commands +doc-source/_static +doc-source/_templates docs tests/test_imports.py +__pycache__ diff --git a/Makefile b/Makefile index c1f1e6bf9ac..8914dba9741 100644 --- a/Makefile +++ b/Makefile @@ -68,13 +68,10 @@ man-install: man $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) $(INSTALL) -m 644 $(GZ_MAN1) $(DESTDIR)$(man1dir) -doc-commands: FORCE - sh doc-source/gen-command-docs.sh - -doc-html: doc-source-clean doc-commands +doc-html: doc-source-clean sphinx-build -a -b html doc-source docs/html -doc-help: doc-source-clean doc-commands +doc-help: doc-source-clean sphinx-build -a -b text doc-source docs/text doc: doc-source-clean doc-html doc-help man FORCE diff --git a/doc-source/conf.py b/doc-source/conf.py index 31686607b7e..49832ad2b21 100644 --- a/doc-source/conf.py +++ b/doc-source/conf.py @@ -28,30 +28,27 @@ def run_apidoc(_): try: from sphinx.ext.apidoc import main - crash_mod = "../crash" - kdump_mod = "../kdump" - out = "." except ImportError as e: from sphinx.apidoc import main - crash_mod = "crash" - kdump_mod = "kdump" - out = "doc-source" import make_gdb_refs + import gen_command_docs import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..')) cur_dir = os.path.abspath(os.path.dirname(__file__)) + + out_dir = os.path.join(cur_dir, "crash") + mod_dir = os.path.join(cur_dir, "..", "crash") argv = [ '-M', '-e', '-H', 'Crash API Reference', '-f', - '-o', out + "/crash", crash_mod , f'*crash/commands/[a-z]*' ] + '-o', out_dir, mod_dir , f'*crash/commands/[a-z]*' ] main(argv) - # We want to document the commands as part of the command reference # not the API documentation. - f = open("doc-source/crash/crash.commands.rst") + f = open(f"{cur_dir}/crash/crash.commands.rst") lines = f.readlines() f.close() - f = open("doc-source/crash/crash.commands.rst", "w") + f = open(f"{cur_dir}/crash/crash.commands.rst", "w") printit = True for line in lines: if 'Submodules' in line: @@ -63,11 +60,16 @@ def run_apidoc(_): print(line, file=f, end='') f.close() + out_dir = os.path.join(cur_dir, "kdump") + mod_dir = os.path.join(cur_dir, "..", "kdump") argv = [ '-M', '-e', '-H', 'Kdump Target API Reference', '-f', - '-o', out + "/kdump", kdump_mod ] + '-o', out_dir, mod_dir ] main(argv) - make_gdb_refs.make_gdb_refs() + print("*** Generating doc templates") + + make_gdb_refs.make_gdb_refs(cur_dir) + gen_command_docs.gen_command_docs(cur_dir) def setup(app): app.connect('builder-inited', run_apidoc) diff --git a/doc-source/gen_command_docs.py b/doc-source/gen_command_docs.py new file mode 100644 index 00000000000..0287ca670ed --- /dev/null +++ b/doc-source/gen_command_docs.py @@ -0,0 +1,71 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import sys +import os +import fnmatch +import re + +sys.path.insert(0, 'doc-source/mock') +sys.path.insert(0, 'mock') +import gdb + +header = \ +""" +Command Reference +================= + +.. toctree:: + :titlesonly: + +""" + +def gen_command_docs(root: str) -> None: + modules = list() + print("*** Generating command docs") + + regex = re.compile(fnmatch.translate("[a-z]*.py")) + for _root, dirs, files in os.walk(f"{root}/../crash/commands"): + if '__pycache__' in _root: + continue + for filename in files: + path = os.path.join(_root, filename) + if regex.match(filename): + mod = filename.replace(".py", "") + modules.append(f"crash.commands.{mod}") + + old = set() + + try: + os.mkdir(f"{root}/commands") + except FileExistsError: + pass + + print(f"Writing {root}/commands/commands.rst") + cmdtoc = open(f"{root}/commands/commands.rst", "w") + + print(header, file=cmdtoc) + + print(f"** Generating docs for {modules}") + + for mod in modules: + __import__(mod) + + new = set(gdb.commands) + + commands = new - old + + for command in commands: + f = open(f"{root}/commands/{command}.rst", "w") + print(f"``{command}``", file=f) + print(f"----------------", file=f) + print(f".. automodule:: {mod}", file=f) + f.close() + + old = new + + for command in sorted(gdb.commands): + print(f" {command}", file=cmdtoc) + + cmdtoc.close() diff --git a/doc-source/make_gdb_refs.py b/doc-source/make_gdb_refs.py index e3c68e78eda..c8ac9c7e020 100644 --- a/doc-source/make_gdb_refs.py +++ b/doc-source/make_gdb_refs.py @@ -2,7 +2,6 @@ # This creates a mock objects.inv file to reference external documentation -from sphinx.ext import intersphinx from sphinx.util.inventory import InventoryFile class config(object): @@ -41,7 +40,8 @@ class MockBuilder(object): def get_target_uri(self, docname): return docname -def make_gdb_refs(): +def make_gdb_refs(root): + print("*** Generating gdb inventory file") env = MockEnvironment() builder = MockBuilder() @@ -62,4 +62,5 @@ def make_gdb_refs(): env.add_domain(classes) - InventoryFile.dump("gdb.inv", env, builder) + print(f"Writing {root}/gdb.inv") + InventoryFile.dump(f"{root}/gdb.inv", env, builder) From 3904acff54cd1494bb322b533cc722baf4d36b87 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 12:06:54 -0400 Subject: [PATCH 221/367] crash: don't try to decompress a file that doesn't exist We'll try to zcat a compressed kernel whether or not it exists. Signed-off-by: Jeff Mahoney --- crash.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crash.sh b/crash.sh index 79b01465442..d9cab197919 100755 --- a/crash.sh +++ b/crash.sh @@ -177,6 +177,12 @@ fi ZKERNEL="$1" KERNEL="${ZKERNEL%.gz}" + +if ! test -e "$ZKERNEL"; then + echo "$ZKERNEL: No such file or directory" + exit 1 +fi + if test "$KERNEL" != "$ZKERNEL"; then KERNEL="$TMPDIR/$(basename "$KERNEL")" zcat $ZKERNEL > $KERNEL From a1f84f0ee20b11f6876ec24349a4b52e3196bc58 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 12:12:57 -0400 Subject: [PATCH 222/367] crash: move gdb compatibility testing to a separate module This commit moves the gdb compatibility testing to a separate module. It will be invoked using a separate gdb instance during startup. Signed-off-by: Jeff Mahoney --- Makefile | 5 ++++ crash.sh | 8 +++++++ crash/__init__.py | 40 ------------------------------- crash/exceptions.py | 6 +++++ crash/requirements/__init__.py | 44 ++++++++++++++++++++++++++++++++++ test-gdb-compatibility.gdbinit | 14 +++++++++++ 6 files changed, 77 insertions(+), 40 deletions(-) create mode 100644 crash/requirements/__init__.py create mode 100644 test-gdb-compatibility.gdbinit diff --git a/Makefile b/Makefile index 8914dba9741..92a3e84bd63 100644 --- a/Makefile +++ b/Makefile @@ -25,8 +25,13 @@ build: doc-help FORCE clean-build: clean build +datadir ?= /usr/share +pkgdatadir = $(datadir)/crash-python + install: man-install build python3 setup.py install + install -m 755 -d $(DESTDIR)$(pkgdatadir) + install -m 644 -t $(DESTDIR)$(pkgdatadir) test-gdb-compatibility.gdbinit unit-tests: clean-build make -C tests -s diff --git a/crash.sh b/crash.sh index d9cab197919..eb7e3f46a96 100755 --- a/crash.sh +++ b/crash.sh @@ -171,8 +171,16 @@ if [ -e "$DIR/setup.py" ]; then echo "python sys.path.insert(0, '$DIR/build/lib')" >> $GDBINIT popd > /dev/null export CRASH_PYTHON_HELP="$DIR/docs/text" + TEST_GDBINIT="test-gdb-compatibility.gdbinit" else export CRASH_PYTHON_HELP="/usr/share/doc/packages/crash-python" + :> $GDBINIT + TEST_GDBINIT="/usr/share/crash-python/test-gdb-compatibility.gdbinit" +fi + +if ! $GDB -nx -batch -x $GDBINIT -x $TEST_TARGET; then + echo "fatal: crash-python cannot initialize" >&2 + exit 1 fi ZKERNEL="$1" diff --git a/crash/__init__.py b/crash/__init__.py index 59a03f1b8f0..9e72c13b9b3 100644 --- a/crash/__init__.py +++ b/crash/__init__.py @@ -1,42 +1,2 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: - -# Perform some sanity checks to ensure that we can actually work -import gdb - -try: - x = gdb.Target -except AttributeError as e: - raise RuntimeError("the installed gdb doesn't provide gdb.Target") - -try: - x = gdb.lookup_symbol('x', None) -except TypeError as e: - raise RuntimeError("the installed gdb doesn't support looking up symbols without a gdb.Block") - -try: - x = gdb.MinSymbol -except AttributeError as e: - raise RuntimeError("the installed gdb doesn't provide gdb.MinSymbol") - -try: - x = gdb.Register -except AttributeError as e: - raise RuntimeError("the installed gdb doesn't provide gdb.Register") - -try: - x = gdb.Symbol.section -except AttributeError as e: - raise RuntimeError("the installed gdb doesn't provide gdb.Symbol.section") - -try: - x = gdb.Inferior.new_thread -except AttributeError as e: - raise RuntimeError("the installed gdb doesn't provide gdb.Inferior.new_thread") - -try: - x = gdb.Objfile.architecture -except AttributeError as e: - raise RuntimeError("the installed gdb doesn't provide gdb.Objfile.architecture") - -del x diff --git a/crash/exceptions.py b/crash/exceptions.py index c4434b82292..43f6d65d8b7 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -5,6 +5,12 @@ import gdb +class IncompatibleGDBError(RuntimeError): + """This version of GDB is incompatible""" + _fmt = "The installed gdb doesn't provide {}" + def __init__(self, message: str) -> None: + super().__init__(self._fmt.format(message)) + class MissingSymbolError(RuntimeError): """The requested symbol cannot be located.""" pass diff --git a/crash/requirements/__init__.py b/crash/requirements/__init__.py new file mode 100644 index 00000000000..0c097593717 --- /dev/null +++ b/crash/requirements/__init__.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from crash.exceptions import IncompatibleGDBError + +# Perform some sanity checks to ensure that we can actually work +import gdb + +try: + x = gdb.Target +except AttributeError as e: + raise IncompatibleGDBError("gdb.Target") + +try: + x = gdb.lookup_symbol('x', None) +except TypeError as e: + raise IncompatibleGDBError("a compatible gdb.lookup_symbol") + +try: + x = gdb.MinSymbol +except AttributeError as e: + raise IncompatibleGDBError("gdb.MinSymbol") + +try: + x = gdb.Register +except AttributeError as e: + raise IncompatibleGDBError("gdb.Register") + +try: + x = gdb.Symbol.section +except AttributeError as e: + raise IncompatibleGDBError("gdb.Symbol.section") + +try: + x = gdb.Inferior.new_thread +except AttributeError as e: + raise IncompatibleGDBError("gdb.Inferior.new_thread") + +try: + x = gdb.Objfile.architecture +except AttributeError as e: + raise IncompatibleGDBError("gdb.Objfile.architecture") + +del x diff --git a/test-gdb-compatibility.gdbinit b/test-gdb-compatibility.gdbinit new file mode 100644 index 00000000000..a9f4cde9815 --- /dev/null +++ b/test-gdb-compatibility.gdbinit @@ -0,0 +1,14 @@ +# This gdbinit segment tests the invoked gdb for compatibility as +# a crash-python host. +python +import sys +import os + +from crash.exceptions import IncompatibleGDBError + +try: + import crash.requirements +except IncompatibleGDBError as e: + print(e) + sys.exit(1) +end From 0ab33104eca0b3d9565a32c9a458112b55db7c29 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 12:20:50 -0400 Subject: [PATCH 223/367] crash: fix 'make install' with DESTDIR Without specifying --root to setup.py install, we'll end up failing to install as a regular user. Signed-off-by: Jeff Mahoney --- Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 92a3e84bd63..d3f6111dc73 100644 --- a/Makefile +++ b/Makefile @@ -28,8 +28,12 @@ clean-build: clean build datadir ?= /usr/share pkgdatadir = $(datadir)/crash-python +ifneq ($(DESTDIR),) +ROOT=--root $(DESTDIR) +endif + install: man-install build - python3 setup.py install + python3 setup.py install $(ROOT) install -m 755 -d $(DESTDIR)$(pkgdatadir) install -m 644 -t $(DESTDIR)$(pkgdatadir) test-gdb-compatibility.gdbinit From b230e6b6529658b9b6da014401deb4aa4b0c6a76 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 12:23:32 -0400 Subject: [PATCH 224/367] crash: fix man page installation When I removed the asciidoc, I was a bit overzealous in cleanup and removed the installation variables for manpages. Signed-off-by: Jeff Mahoney --- Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d3f6111dc73..9613a78b0bb 100644 --- a/Makefile +++ b/Makefile @@ -64,8 +64,10 @@ pycrash.1 : crash-python.1 %.1.gz : %.1 $(GZIPCMD) -n -c $< > $@ +prefix ?= /usr +mandir ?= $(prefix)/share/man +man1dir = $(mandir)/man1 GZ_MAN1 := pycrash.1.gz crash-python.1.gz -MAN1 := $(patsubst %.asciidoc,%.1.gz,$(MAN1_TXT)) man: $(GZ_MAN1) From e6e46132ca31ad59a74c6b971cc4dde6b881b925 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 12:25:45 -0400 Subject: [PATCH 225/367] crash: install documentation The help file commits assumed that the help files would be installed but we didn't install them. We also should install the manual. Signed-off-by: Jeff Mahoney --- Makefile | 23 ++++++++++++++++++++++- crash.sh | 2 +- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 9613a78b0bb..053df107339 100644 --- a/Makefile +++ b/Makefile @@ -32,11 +32,31 @@ ifneq ($(DESTDIR),) ROOT=--root $(DESTDIR) endif -install: man-install build +install: man-install doc-help-install doc-text-install doc-html-install build python3 setup.py install $(ROOT) install -m 755 -d $(DESTDIR)$(pkgdatadir) install -m 644 -t $(DESTDIR)$(pkgdatadir) test-gdb-compatibility.gdbinit +helpdir=$(pkgdatadir)/help +doc-help-install: doc-help + install -d $(DESTDIR)$(helpdir)/commands + install -t $(DESTDIR)$(helpdir)/commands docs/text/commands/*.txt + +docdir=$(datadir)/doc/packages/crash-python +textdir=$(docdir)/text + +doc-text-install: doc-help + install -m 755 -d $(DESTDIR)$(textdir)/crash + install -m 644 -t $(DESTDIR)$(textdir)/crash docs/text/crash/*.txt + install -m 755 -d $(DESTDIR)$(textdir)/kdump + install -m 644 -t $(DESTDIR)$(textdir)/kdump docs/text/kdump/*.txt + install -m 644 -t $(DESTDIR)$(textdir) docs/text/*.txt + +htmldir=$(docdir)/html +doc-html-install: doc-html + install -m 755 -d $(DESTDIR)$(docdir) + cp -a docs/html $(DESTDIR)$(htmldir) + unit-tests: clean-build make -C tests -s sh tests/run-tests.sh @@ -84,6 +104,7 @@ doc-html: doc-source-clean doc-help: doc-source-clean sphinx-build -a -b text doc-source docs/text + rm -f docs/text/commands/commands.txt doc: doc-source-clean doc-html doc-help man FORCE diff --git a/crash.sh b/crash.sh index eb7e3f46a96..2490d6a0dd6 100755 --- a/crash.sh +++ b/crash.sh @@ -173,7 +173,7 @@ if [ -e "$DIR/setup.py" ]; then export CRASH_PYTHON_HELP="$DIR/docs/text" TEST_GDBINIT="test-gdb-compatibility.gdbinit" else - export CRASH_PYTHON_HELP="/usr/share/doc/packages/crash-python" + export CRASH_PYTHON_HELP="/usr/share/crash-python/help" :> $GDBINIT TEST_GDBINIT="/usr/share/crash-python/test-gdb-compatibility.gdbinit" fi From 17fb2c5fad78365543f5c9693cd2b0a11a61a6d4 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 12:29:36 -0400 Subject: [PATCH 226/367] crash: test target compatibility Since crash-python-gdb now passes the thread to the fetch_registers callback, we'll need to detect that during compatibility or we'll fail during setup_tasks. Signed-off-by: Jeff Mahoney --- crash/requirements/test_target.py | 33 +++++++++++++++++++++++++++++++ test-gdb-compatibility.gdbinit | 16 +++++++++++++++ tests/test_target.py | 2 +- 3 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 crash/requirements/test_target.py diff --git a/crash/requirements/test_target.py b/crash/requirements/test_target.py new file mode 100644 index 00000000000..06e2c5e5acc --- /dev/null +++ b/crash/requirements/test_target.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Tuple + +import gdb + +PTID = Tuple[int, int, int] + +class TestTarget(gdb.Target): + def __init__(self) -> None: + super().__init__() + + self.shortname = "testtarget" + self.longname = "Target to test Target compatibility" + self.register() + + def open(self, args: str, from_tty: bool) -> None: + pass + + def close(self) -> None: + pass + + def fetch_registers(self, thread: gdb.InferiorThread, + register: gdb.Register) -> None: + pass + + # pylint: disable=unused-argument + def thread_alive(self, ptid: PTID) -> bool: + return True + + def setup_task(self) -> None: + ptid = (1, 1, 0) + gdb.selected_inferior().new_thread(ptid, self) diff --git a/test-gdb-compatibility.gdbinit b/test-gdb-compatibility.gdbinit index a9f4cde9815..1bef4e030e6 100644 --- a/test-gdb-compatibility.gdbinit +++ b/test-gdb-compatibility.gdbinit @@ -8,7 +8,23 @@ from crash.exceptions import IncompatibleGDBError try: import crash.requirements + from crash.requirements.test_target import TestTarget + target = TestTarget() except IncompatibleGDBError as e: print(e) sys.exit(1) end + +target testtarget foo + +python +try: + gdb.execute('set print thread-events 0') + target.setup_task() + gdb.execute("thread 1", to_string=True) + sys.exit(0) +except gdb.error as e: + print(e) + print("This version of gdb is not compatible with crash-python") + sys.exit(1) +end diff --git a/tests/test_target.py b/tests/test_target.py index 27df6590904..dd824992c64 100644 --- a/tests/test_target.py +++ b/tests/test_target.py @@ -6,7 +6,7 @@ import os.path from kdump.target import Target -class TestUtil(unittest.TestCase): +class TestTarget(unittest.TestCase): def setUp(self): gdb.execute("file") self.do_real_tests = os.path.exists("tests/vmcore") From 010bdea64b7642bbeef492fc6d5e4afb3ec8bf78 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 12:41:00 -0400 Subject: [PATCH 227/367] crash: don't rebuild docs unnecessarily We don't use the help docs during testing so skip rebuilding them for testing. We also don't need sphinx-build -a. Signed-off-by: Jeff Mahoney --- Makefile | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 053df107339..f86f471e135 100644 --- a/Makefile +++ b/Makefile @@ -16,14 +16,17 @@ doc-source-clean: doc-clean: doc-source-clean rm -rf docs -clean: doc-clean man-clean +clean: doc-clean man-clean build-clean make -C tests clean + +build-clean: rm -rf build build: doc-help FORCE python3 setup.py -q build -clean-build: clean build +force-rebuild: build-clean + python3 setup.py -q build datadir ?= /usr/share pkgdatadir = $(datadir)/crash-python @@ -57,17 +60,17 @@ doc-html-install: doc-html install -m 755 -d $(DESTDIR)$(docdir) cp -a docs/html $(DESTDIR)$(htmldir) -unit-tests: clean-build +unit-tests: force-rebuild make -C tests -s sh tests/run-tests.sh -lint: clean-build +lint: force-rebuild sh tests/run-pylint.sh $(PYLINT_ARGS) crash kdump -static-check: clean-build +static-check: force-rebuild sh tests/run-static-checks.sh -live-tests: clean-build +live-tests: force-rebuild sh tests/run-kernel-tests.sh $(INI_FILES) test: unit-tests static-check lint live-tests @@ -75,7 +78,6 @@ test: unit-tests static-check lint live-tests full-test: test doc - pycrash.1 : crash-python.1 %.1 : doc-source/%.rst doc-source/conf.py @@ -100,10 +102,10 @@ man-install: man $(INSTALL) -m 644 $(GZ_MAN1) $(DESTDIR)$(man1dir) doc-html: doc-source-clean - sphinx-build -a -b html doc-source docs/html + sphinx-build -b html doc-source docs/html doc-help: doc-source-clean - sphinx-build -a -b text doc-source docs/text + sphinx-build -b text doc-source docs/text rm -f docs/text/commands/commands.txt doc: doc-source-clean doc-html doc-help man FORCE From e064f7e337338e30da0e10843901f9fbf8404279 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 12:46:42 -0400 Subject: [PATCH 228/367] crash: install crash.sh script and test-gdb-compartibility.gdbinit Signed-off-by: Jeff Mahoney --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index f86f471e135..2f759c919e1 100644 --- a/Makefile +++ b/Makefile @@ -39,6 +39,9 @@ install: man-install doc-help-install doc-text-install doc-html-install build python3 setup.py install $(ROOT) install -m 755 -d $(DESTDIR)$(pkgdatadir) install -m 644 -t $(DESTDIR)$(pkgdatadir) test-gdb-compatibility.gdbinit + install -m 755 -d $(DESTDIR)/usr/bin + install -m 755 crash.sh $(DESTDIR)/usr/bin/crash-python + ln -fs crash-python $(DESTDIR)/usr/bin/pycrash helpdir=$(pkgdatadir)/help doc-help-install: doc-help From dc993c15fb3a8d54d5ceffea1764237852b5c6e5 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 13:24:54 -0400 Subject: [PATCH 229/367] docs: create gdb.inv prior to apidoc intersphinx and our apidoc handler both run as builder-inited callbacks, which means that gdb.inv isn't created for the first run through. The proper way to create the file is via a config-inited event callback. This works fine for readthedocs. My local system uses an earlier version without that callback so we'll use a make rule to create it instead. Signed-off-by: Jeff Mahoney --- Makefile | 7 +++++-- doc-source/conf.py | 14 ++++++++++++-- doc-source/make_gdb_refs.py | 5 +++++ 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 2f759c919e1..53a5bc3d159 100644 --- a/Makefile +++ b/Makefile @@ -104,10 +104,13 @@ man-install: man $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) $(INSTALL) -m 644 $(GZ_MAN1) $(DESTDIR)$(man1dir) -doc-html: doc-source-clean +gdb.inv: + python3 doc-source/make_gdb_refs.py + +doc-html: gdb.inv doc-source-clean sphinx-build -b html doc-source docs/html -doc-help: doc-source-clean +doc-help: gdb.inv doc-source-clean sphinx-build -b text doc-source docs/text rm -f docs/text/commands/commands.txt diff --git a/doc-source/conf.py b/doc-source/conf.py index 49832ad2b21..bddfa56d4a7 100644 --- a/doc-source/conf.py +++ b/doc-source/conf.py @@ -24,13 +24,13 @@ sys.path.insert(0, os.path.abspath('.')) from sphinx.ext import autodoc +from sphinx.errors import ExtensionError def run_apidoc(_): try: from sphinx.ext.apidoc import main except ImportError as e: from sphinx.apidoc import main - import make_gdb_refs import gen_command_docs import os import sys @@ -68,10 +68,20 @@ def run_apidoc(_): print("*** Generating doc templates") - make_gdb_refs.make_gdb_refs(cur_dir) gen_command_docs.gen_command_docs(cur_dir) +def init_callback(x, y): + import make_gdb_refs + import os + cur_dir = os.path.abspath(os.path.dirname(__file__)) + make_gdb_refs.make_gdb_refs(cur_dir) + def setup(app): + try: + app.connect('config-inited', init_callback) + except ExtensionError as e: + pass + app.connect('builder-inited', run_apidoc) diff --git a/doc-source/make_gdb_refs.py b/doc-source/make_gdb_refs.py index c8ac9c7e020..25f1d31f721 100644 --- a/doc-source/make_gdb_refs.py +++ b/doc-source/make_gdb_refs.py @@ -64,3 +64,8 @@ def make_gdb_refs(root): print(f"Writing {root}/gdb.inv") InventoryFile.dump(f"{root}/gdb.inv", env, builder) + +if __name__ == '__main__': + import os + import sys + make_gdb_refs(os.path.dirname(sys.argv[0])) From 450ae7abd5d36d094186558ebf1dd8d542c5d6e3 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 16:31:30 -0400 Subject: [PATCH 230/367] tests: don't assume 64-bit There's a single test that assumes bit size, the unsigned long test in test_util_symbols. Check the architecture before checking the size of a long. Signed-off-by: Jeff Mahoney --- tests/test_util_symbols.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/test_util_symbols.py b/tests/test_util_symbols.py index 3bb43e9f372..e32c6728e5f 100644 --- a/tests/test_util_symbols.py +++ b/tests/test_util_symbols.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import unittest +import platform import gdb from crash.exceptions import DelayedAttributeError @@ -238,4 +239,7 @@ def test_type_callback_multi(self): self.assertTrue(x.ulong_valid) y = x.types.unsigned_long_type self.assertTrue(isinstance(y, gdb.Type)) - self.assertTrue(y.sizeof > 4) + if platform.architecture()[0] == '64bit': + self.assertTrue(y.sizeof == 8) + else: + self.assertTrue(y.sizeof == 4) From 0438b084e6788328aaa08aa8d175124ad9bad945 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 17:54:42 -0400 Subject: [PATCH 231/367] lint: fix unnecessary-pass warnings This commit fixes the following lint warnings: ************* Module crash.exceptions crash/exceptions.py:54:4: W0107: Unnecessary pass statement (unnecessary-pass) ************* Module crash.exceptions crash/exceptions.py:16:4: W0107: Unnecessary pass statement (unnecessary-pass) crash/exceptions.py:20:4: W0107: Unnecessary pass statement (unnecessary-pass) crash/exceptions.py:24:4: W0107: Unnecessary pass statement (unnecessary-pass) crash/exceptions.py:57:4: W0107: Unnecessary pass statement (unnecessary-pass) ************* Module crash.kernel crash/kernel.py:25:4: W0107: Unnecessary pass statement (unnecessary-pass) ************* Module crash.subsystem.storage.decoders crash/subsystem/storage/decoders.py:41:8: W0107: Unnecessary pass statement (unnecessary-pass) crash/subsystem/storage/decoders.py:69:8: W0107: Unnecessary pass statement (unnecessary-pass) crash/subsystem/storage/decoders.py:89:8: W0107: Unnecessary pass statement (unnecessary-pass) ************* Module crash.util crash/util/__init__.py:30:4: W0107: Unnecessary pass statement (unnecessary-pass) ************* Module crash.types.slab crash/types/slab.py:636:4: W0107: Unnecessary pass statement (unnecessary-pass) Signed-off-by: Jeff Mahoney --- crash/commands/__init__.py | 2 -- crash/exceptions.py | 4 ---- crash/kernel.py | 1 - crash/subsystem/storage/decoders.py | 3 --- crash/types/slab.py | 1 - crash/util/__init__.py | 1 - 6 files changed, 12 deletions(-) diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 6bf304c02a4..552a1846844 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -65,11 +65,9 @@ def execute(self, args: argparse.Namespace) -> None: class CommandError(RuntimeError): """An error occured while executing this command""" - pass class CommandLineError(RuntimeError): """An error occured while handling the command line for this command""" - pass class ArgumentParser(argparse.ArgumentParser): """ diff --git a/crash/exceptions.py b/crash/exceptions.py index 43f6d65d8b7..8dca0082642 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -13,15 +13,12 @@ def __init__(self, message: str) -> None: class MissingSymbolError(RuntimeError): """The requested symbol cannot be located.""" - pass class MissingTypeError(RuntimeError): """The requested type cannot be located.""" - pass class CorruptedError(RuntimeError): """A corrupted data structure has been encountered.""" - pass class DelayedAttributeError(AttributeError): """ @@ -54,7 +51,6 @@ def format_clsname(self, cls: Type) -> str: class UnexpectedGDBTypeBaseError(InvalidArgumentError): """Base class for unexpected gdb type exceptions""" - pass class UnexpectedGDBTypeError(UnexpectedGDBTypeBaseError): """The gdb.Type passed describes an inappropriate type for the operation""" diff --git a/crash/kernel.py b/crash/kernel.py index fe03d362c43..952f0162319 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -22,7 +22,6 @@ class CrashKernelError(RuntimeError): """Raised when an error occurs while initializing the debugging session""" - pass class _NoMatchingFileError(FileNotFoundError): pass diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 31bffc84655..c242847ade8 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -38,7 +38,6 @@ def interpret(self) -> None: This method will examine the object passed to the derived class's constructor and produce the attributes required for each object. """ - pass def __getattr__(self, name: str) -> Any: if self.interpreted: @@ -66,7 +65,6 @@ def __str__(self) -> str: :obj:`.Decoder`s include the address of the object, the block device it uses, and the location(s) affected by the object. """ - pass def __next__(self) -> Any: """ @@ -86,7 +84,6 @@ def __next__(self) -> Any: If there are no objects beyond this one, it does not need to be overridden. """ - pass class BadBHDecoder(Decoder): """ diff --git a/crash/types/slab.py b/crash/types/slab.py index 7647c551836..da628796065 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -633,7 +633,6 @@ def check_all(self) -> None: class KmemCacheNotFound(RuntimeError): """The specified kmem_cache could not be found.""" - pass kmem_caches: Dict[str, KmemCache] = dict() kmem_caches_by_addr: Dict[int, KmemCache] = dict() diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 0cd6fc0ac75..52065939b9b 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -27,7 +27,6 @@ def __init__(self, gdbtype: gdb.Type, spec: str, message: str) -> None: # visible outside of this module. class _InvalidComponentBaseError(RuntimeError): """An internal error occured while resolving the member specification""" - pass class _InvalidComponentTypeError(_InvalidComponentBaseError): """The component expects the type to be a struct or union but it is not.""" From 7084a5c71076cdc3fa39cb0aec5fac2bce63f2e1 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 17:59:27 -0400 Subject: [PATCH 232/367] lint: fix useless-object-inheritance complaints In Python3-only code, all classes are "new-style" so inheriting from object is unnecessary. This commit fixes the following lint complaints: ************* Module crash.kernel crash/kernel.py:54:0: R0205: Class 'CrashKernel' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.addrxlat crash/addrxlat.py:47:0: R0205: Class 'CrashAddressTranslation' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.session crash/session.py:9:0: R0205: Class 'Session' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.arch crash/arch/__init__.py:9:0: R0205: Class 'CrashArchitecture' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) crash/arch/__init__.py:36:0: R0205: Class 'KernelFrameFilter' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) crash/arch/__init__.py:47:0: R0205: Class 'KernelAddressIterator' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.cache crash/cache/__init__.py:12:0: R0205: Class 'CrashCache' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.commands.vtop crash/commands/vtop.py:143:0: R0205: Class 'LinuxPGT' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.commands.ps crash/commands/ps.py:433:0: R0205: Class 'TaskFormat' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.infra.callback crash/infra/callback.py:19:0: R0205: Class 'ObjfileEventCallback' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.infra.lookup crash/infra/lookup.py:262:0: R0205: Class 'DelayedValue' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.subsystem.filesystem.mount crash/subsystem/filesystem/mount.py:57:0: R0205: Class 'Mount' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.subsystem.filesystem.xfs crash/subsystem/filesystem/xfs.py:265:0: R0205: Class 'XFS' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.subsystem.storage.decoders crash/subsystem/storage/decoders.py:13:0: R0205: Class 'Decoder' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.util.symbols crash/util/symbols.py:32:0: R0205: Class 'DelayedCollection' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) crash/util/symbols.py:312:0: R0205: Class 'CallbackCollection' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.types.task crash/types/task.py:24:0: R0205: Class 'TaskStateFlags' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) crash/types/task.py:186:0: R0205: Class 'LinuxTask' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.types.cpu crash/types/cpu.py:17:0: R0205: Class 'TypesCPUClass' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.types.percpu crash/types/percpu.py:31:0: R0205: Class 'PerCPUState' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.types.node crash/types/node.py:36:0: R0205: Class 'Node' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) crash/types/node.py:84:0: R0205: Class 'NodeStates' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.types.zone crash/types/zone.py:16:0: R0205: Class 'Zone' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.types.classdev crash/types/classdev.py:17:0: R0205: Class 'ClassdevState' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.types.page crash/types/page.py:25:0: R0205: Class 'Page' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.types.vmstat crash/types/vmstat.py:12:0: R0205: Class 'VmStat' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module crash.types.slab crash/types/slab.py:45:0: R0205: Class 'Slab' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) crash/types/slab.py:314:0: R0205: Class 'KmemCache' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) ************* Module kdump.target kdump/target.py:17:0: R0205: Class 'SymbolCallback' inherits from object, can be safely removed from bases in python3 (useless-object-inheritance) Signed-off-by: Jeff Mahoney --- crash/addrxlat.py | 2 +- crash/arch/__init__.py | 6 +++--- crash/cache/__init__.py | 2 +- crash/commands/ps.py | 2 +- crash/commands/vtop.py | 2 +- crash/infra/callback.py | 2 +- crash/infra/lookup.py | 2 +- crash/kernel.py | 2 +- crash/session.py | 2 +- crash/subsystem/filesystem/mount.py | 2 +- crash/subsystem/filesystem/xfs.py | 2 +- crash/subsystem/storage/decoders.py | 2 +- crash/types/classdev.py | 2 +- crash/types/cpu.py | 2 +- crash/types/node.py | 4 ++-- crash/types/page.py | 2 +- crash/types/percpu.py | 2 +- crash/types/slab.py | 4 ++-- crash/types/task.py | 4 ++-- crash/types/vmstat.py | 2 +- crash/types/zone.py | 2 +- crash/util/symbols.py | 4 ++-- kdump/target.py | 2 +- 23 files changed, 29 insertions(+), 29 deletions(-) diff --git a/crash/addrxlat.py b/crash/addrxlat.py index f1a39bc6843..9949f6c0ffd 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -44,7 +44,7 @@ def cb_read64(self, faddr: addrxlat.FullAddress) -> gdb.Value: v = gdb.Value(faddr.addr).cast(types.uint64_t_p_type) return int(v.dereference()) -class CrashAddressTranslation(object): +class CrashAddressTranslation: def __init__(self) -> None: try: target = gdb.current_target() diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index 2c0e7c907ba..7c19b709829 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -6,7 +6,7 @@ import gdb from gdb.FrameDecorator import FrameDecorator -class CrashArchitecture(object): +class CrashArchitecture: ident = "base-class" aliases: List[str] = list() def __init__(self) -> None: @@ -33,7 +33,7 @@ def get_stack_pointer(self, thread_struct: gdb.Value) -> gdb.Value: raise NotImplementedError("get_stack_pointer is not implemented") # This keeps stack traces from continuing into userspace and causing problems. -class KernelFrameFilter(object): +class KernelFrameFilter: def __init__(self, address: int) -> None: self.name = "KernelFrameFilter" self.priority = 100 @@ -44,7 +44,7 @@ def __init__(self, address: int) -> None: def filter(self, frame_iter: Iterator[FrameDecorator]) -> Any: return KernelAddressIterator(frame_iter, self.address) -class KernelAddressIterator(object): +class KernelAddressIterator: def __init__(self, ii: Iterator[gdb.Frame], address: int) -> None: self.input_iterator = ii self.address = address diff --git a/crash/cache/__init__.py b/crash/cache/__init__.py index 7f36b4b6a0e..cbf829fa56d 100644 --- a/crash/cache/__init__.py +++ b/crash/cache/__init__.py @@ -9,7 +9,7 @@ import gdb -class CrashCache(object): +class CrashCache: def refresh(self) -> None: pass diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 83db0426bf0..10ef49d4a14 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -430,7 +430,7 @@ import gdb -class TaskFormat(object): +class TaskFormat: """ This class is responsible for converting the arguments into formatting rules. diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index ed3080469c6..d150a8f2e21 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -140,7 +140,7 @@ from crash.commands import CommandError, CommandLineError from crash.addrxlat import CrashAddressTranslation -class LinuxPGT(object): +class LinuxPGT: table_names = ('PTE', 'PMD', 'PUD', 'PGD') def __init__(self, ctx: addrxlat.Context, sys: addrxlat.System) -> None: diff --git a/crash/infra/callback.py b/crash/infra/callback.py index 75135639210..ddee8d5a5f2 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -16,7 +16,7 @@ def __init__(self, callback_obj: 'ObjfileEventCallback') -> None: super().__init__(msg) self.callback_obj = callback_obj -class ObjfileEventCallback(object): +class ObjfileEventCallback: """ A generic objfile callback class diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index 89bc446aa2c..c6e849f6335 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -259,7 +259,7 @@ def __str__(self) -> str: return ("<{}({}, {})>" .format(self.__class__.__name__, self.name, self.block)) -class DelayedValue(object): +class DelayedValue: """ A generic class for making class attributes available that describe to-be-loaded symbols, minimal symbols, and types. diff --git a/crash/kernel.py b/crash/kernel.py index 952f0162319..1b58d092c77 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -51,7 +51,7 @@ def __init__(self, path: str, module_value: Optional[str], PathSpecifier = Union[List[str], str] -class CrashKernel(object): +class CrashKernel: """ Initialize a basic kernel semantic debugging session. diff --git a/crash/session.py b/crash/session.py index e1bc109d163..c2b55268342 100644 --- a/crash/session.py +++ b/crash/session.py @@ -6,7 +6,7 @@ import gdb -class Session(object): +class Session: """ crash.Session is the main driver component for crash-python diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index c66004dbc35..95d29a2e1ad 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -54,7 +54,7 @@ types = Types(['struct mount', 'struct vfsmount']) symvals = Symvals(['init_task']) -class Mount(object): +class Mount: _for_each_mount: Callable[[Any, gdb.Value], Iterator[gdb.Value]] def _for_each_mount_nsproxy(self, task: gdb.Value) -> Iterator[gdb.Value]: diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index bfdaf2ec0e3..0bbc7c6ddd5 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -262,7 +262,7 @@ def __str__(self) -> str: 'struct xfs_qoff_logitem', 'struct xfs_inode', 'struct xfs_mount *', 'struct xfs_buf *']) -class XFS(object): +class XFS: """ XFS File system state class. Not meant to be instantiated directly. """ diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index c242847ade8..1b8a32c278c 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -10,7 +10,7 @@ EndIOSpecifier = Union[int, str, List[str], gdb.Value, gdb.Symbol, None] -class Decoder(object): +class Decoder: """Decoder objects are used to unwind the storage stack They are relatively lightweight at runtime, meaning that the object diff --git a/crash/types/classdev.py b/crash/types/classdev.py index 614e6004496..05a7788562b 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -14,7 +14,7 @@ types = Types(['struct device', 'struct device_private']) -class ClassdevState(object): +class ClassdevState: _class_is_private = True @classmethod diff --git a/crash/types/cpu.py b/crash/types/cpu.py index 278b987bd22..9034e19b347 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -14,7 +14,7 @@ # this wraps no particular type, rather it's a placeholder for # functions to iterate over online cpu's etc. -class TypesCPUClass(object): +class TypesCPUClass: """A state holder class for handling CPUs. Not meant to be instantiated. Attributes: diff --git a/crash/types/node.py b/crash/types/node.py index 6dc833961bb..e1d9118d7a9 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -33,7 +33,7 @@ def numa_node_id(cpu: int) -> int: NodeType = TypeVar('NodeType', bound='Node') -class Node(object): +class Node: """ A wrapper around the Linux kernel 'struct node' structure """ @@ -81,7 +81,7 @@ def __init__(self, obj: gdb.Value) -> None: """ self.gdb_obj = obj -class NodeStates(object): +class NodeStates: """ A state holder for Node states. diff --git a/crash/types/page.py b/crash/types/page.py index acb93bd733f..3554fa9917f 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -22,7 +22,7 @@ PageType = TypeVar('PageType', bound='Page') -class Page(object): +class Page: slab_cache_name = None slab_page_name = None compound_head_name = None diff --git a/crash/types/percpu.py b/crash/types/percpu.py index ab1d2afac69..19ec1dfa30c 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -28,7 +28,7 @@ def __init__(self, var: SymbolOrValue) -> None: 'pcpu_nr_slots', 'pcpu_group_offsets']) msymvals = MinimalSymvals(['__per_cpu_start', '__per_cpu_end']) -class PerCPUState(object): +class PerCPUState: """ Per-cpus come in a few forms: - "Array" of objects diff --git a/crash/types/slab.py b/crash/types/slab.py index da628796065..b767118e748 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -42,7 +42,7 @@ def col_bold(msg: str) -> str: SlabType = TypeVar('SlabType', bound='Slab') KmemCacheType = TypeVar('KmemCacheType', bound='KmemCache') -class Slab(object): +class Slab: slab_list_head: str = 'list' page_slab: bool = False @@ -311,7 +311,7 @@ def check(self, slabtype: int, nid: int) -> int: (obj, slab_addr)) return num_free -class KmemCache(object): +class KmemCache: buffer_size_name = None nodelists_name = None percpu_name = None diff --git a/crash/types/task.py b/crash/types/task.py index 371db4a73dd..e8cad6166f7 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -21,7 +21,7 @@ # task_state_array which doesn't include all of them. All we can do # is make some assumptions based on the changes upstream. This will # be fragile. -class TaskStateFlags(object): +class TaskStateFlags: """ A class to contain state related to discovering task flag values. Not meant to be instantiated. @@ -183,7 +183,7 @@ def _check_state_bits(cls) -> None: TF = TaskStateFlags -class LinuxTask(object): +class LinuxTask: """ A wrapper class for ``struct task_struct``. There will be typically one of these allocated for every task discovered in the debugging diff --git a/crash/types/vmstat.py b/crash/types/vmstat.py index a5fc0b05b15..df47185276e 100644 --- a/crash/types/vmstat.py +++ b/crash/types/vmstat.py @@ -9,7 +9,7 @@ import gdb -class VmStat(object): +class VmStat: types = Types(['enum zone_stat_item', 'enum vm_event_item']) symbols = Symbols(['vm_event_states']) diff --git a/crash/types/zone.py b/crash/types/zone.py index bacc565a5b6..d7282c819b7 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -13,7 +13,7 @@ import gdb -class Zone(object): +class Zone: types = Types(['struct page']) diff --git a/crash/util/symbols.py b/crash/util/symbols.py index f08e319c1f5..96be1afc7fe 100644 --- a/crash/util/symbols.py +++ b/crash/util/symbols.py @@ -29,7 +29,7 @@ CollectedValue = Union[gdb.Type, gdb.Value, gdb.Symbol, gdb.MinSymbol, Any] Names = Union[List[str], str] -class DelayedCollection(object): +class DelayedCollection: """ A generic container for delayed lookups. @@ -309,7 +309,7 @@ def __init__(self, names: Names) -> None: CallbackSpecifier = Tuple[str, Callable] CallbackSpecifiers = Union[List[CallbackSpecifier], CallbackSpecifier] -class CallbackCollection(object): +class CallbackCollection: def __init__(self, cls: Type[NamedCallback], cbs: CallbackSpecifiers) -> None: if isinstance(cbs, tuple): diff --git a/kdump/target.py b/kdump/target.py index 7b54d0de31d..e8c46f7fcbd 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -14,7 +14,7 @@ PTID = Tuple[int, int, int] -class SymbolCallback(object): +class SymbolCallback: "addrxlat symbolic callback" def __init__(self, ctx: Optional[addrxlat.Context] = None) -> None: From 9399771a51d3e46c4b0c69cc962c6eedb7bf7f35 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 18:02:46 -0400 Subject: [PATCH 233/367] lint: fix no-else-raise complaints This commit fixes the following lint complaints: ************* Module crash.util crash/util/__init__.py:211:12: R1720: Unnecessary "else" after "raise" (no-else-raise) crash/util/__init__.py:255:8: R1720: Unnecessary "else" after "raise" (no-else-raise) ************* Module crash.types.list crash/types/list.py:88:12: R1720: Unnecessary "else" after "raise" (no-else-raise) Signed-off-by: Jeff Mahoney --- crash/types/list.py | 3 +-- crash/types/slab.py | 3 +-- crash/util/__init__.py | 6 ++---- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/crash/types/list.py b/crash/types/list.py index b9d330e9f5f..d2ec5bea81a 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -87,8 +87,7 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, if exact_cycles: if int(node.address) in visited: raise ListCycleError("Cycle in list detected.") - else: - visited.add(int(node.address)) + visited.add(int(node.address)) try: if int(prev.address) != int(node[prev_]): error = ("broken {} link {:#x} -{}-> {:#x} -{}-> {:#x}" diff --git a/crash/types/slab.py b/crash/types/slab.py index b767118e748..c532177bc73 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -126,9 +126,8 @@ def __add_free_obj_by_idx(self, idx: int) -> bool: if obj_addr in self.free: self.__error(": object %x duplicated on freelist" % obj_addr) return False - else: - self.free.add(obj_addr) + self.free.add(obj_addr) return True def __populate_free(self) -> None: diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 52065939b9b..015528d039b 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -210,8 +210,7 @@ def __offsetof(val: gdb.Type, spec: str, if not found: if error: raise _InvalidComponentNameError(member, gdbtype) - else: - return None + return None gdbtype = nexttype offset += off @@ -254,8 +253,7 @@ def offsetof_type(gdbtype: gdb.Type, member_name: str, except _InvalidComponentBaseError as e: if error: raise InvalidComponentError(gdbtype, member_name, str(e)) - else: - return None + return None def offsetof(gdbtype: gdb.Type, member_name: str, error: bool = True) -> Union[int, None]: From 178fb3768ec37c512da2ca0f6f47a3985d341868 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 18:05:41 -0400 Subject: [PATCH 234/367] lint: fix no-else-return complaints This commit fixes the following lint complaints: ************* Module crash.commands.kmem crash/commands/kmem.py:51:8: R1705: Unnecessary "elif" after "return" (no-else-return) ************* Module crash.subsystem.filesystem.btrfs crash/subsystem/filesystem/btrfs.py:141:4: R1705: Unnecessary "elif" after "return" (no-else-return) ************* Module crash.subsystem.storage crash/subsystem/storage/__init__.py:158:4: R1705: Unnecessary "elif" after "return" (no-else-return) ************* Module crash.subsystem.storage.decoders crash/subsystem/storage/decoders.py:182:4: R1705: Unnecessary "elif" after "return" (no-else-return) ************* Module crash.types.task crash/types/task.py:485:8: R1705: Unnecessary "elif" after "return" (no-else-return) Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 6 ++++-- crash/subsystem/filesystem/btrfs.py | 2 +- crash/subsystem/storage/__init__.py | 13 +++++++------ crash/subsystem/storage/decoders.py | 3 ++- crash/types/task.py | 3 ++- 5 files changed, 16 insertions(+), 11 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index a1512415c89..c829fcea3fe 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -51,10 +51,12 @@ def execute(self, args: argparse.Namespace) -> None: if args.z: self.print_zones() return - elif args.V: + + if args.V: self.print_vmstats() return - elif args.slabname: + + if args.slabname: if args.slabname is True: print("Checking all kmem caches...") for cache in kmem_cache_get_all(): diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index 6732e7677eb..a05befd2355 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -140,6 +140,6 @@ def btrfs_metadata_uuid(sb: gdb.Value, force: bool = False) -> uuid.UUID: fs_info = btrfs_fs_info(sb, force) if struct_has_member(types.btrfs_fs_info_type, 'metadata_uuid'): return decode_uuid(fs_info['metadata_uuid']) - elif struct_has_member(fs_info['fs_devices'].type, 'metadata_uuid'): + if struct_has_member(fs_info['fs_devices'].type, 'metadata_uuid'): return decode_uuid(fs_info['fs_devices']['metadata_uuid']) return btrfs_fsid(sb, force) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index c184f1b8a38..1a221e76577 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -157,14 +157,15 @@ def gendisk_name(gendisk: gdb.Value) -> str: if get_basic_type(gendisk.type) == types.gendisk_type: return gendisk['disk_name'].string() - elif get_basic_type(gendisk.type) == types.hd_struct_type: + + if get_basic_type(gendisk.type) == types.hd_struct_type: parent = dev_to_gendisk(part_to_dev(gendisk)['parent']) return "{}{:d}".format(gendisk_name(parent), int(gendisk['partno'])) - else: - raise InvalidArgumentError("expected {} or {}, not {}" - .format(types.gendisk_type, - types.hd_struct_type, - gendisk.type.unqualified())) + + raise InvalidArgumentError("expected {} or {}, not {}" + .format(types.gendisk_type, + types.hd_struct_type, + gendisk.type.unqualified())) def block_device_name(bdev: gdb.Value) -> str: """ diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 1b8a32c278c..8e188395b7b 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -184,7 +184,8 @@ def register_decoder(endio: EndIOSpecifier, decoder: Type[Decoder]) -> None: print(f"Registering {endio} as callback") SymbolCallback(endio, lambda a: register_decoder(a, decoder)) return - elif isinstance(endio, list) and isinstance(endio[0], str): + + if isinstance(endio, list) and isinstance(endio[0], str): for sym in endio: if debug: print(f"Registering {sym} as callback") diff --git a/crash/types/task.py b/crash/types/task.py index e8cad6166f7..be80dfa4011 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -484,7 +484,8 @@ def is_kernel_task(self) -> bool: mm = self.task_struct['mm'] if mm == 0: return True - elif symvals.init_mm and mm == symvals.init_mm.address: + + if symvals.init_mm and mm == symvals.init_mm.address: return True return False From 24fd3548127d0e6513bd80ed50934c8b66090d76 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 18:07:32 -0400 Subject: [PATCH 235/367] lint: fix consider-using-get complaints This commit fixes the following lint complaints: ************* Module crash.kernel crash/kernel.py:422:8: R1715: Consider using dict.get for getting values from a dict if a key is present or a default if not (consider-using-get) crash/kernel.py:429:8: R1715: Consider using dict.get for getting values from a dict if a key is present or a default if not (consider-using-get) Signed-off-by: Jeff Mahoney --- crash/kernel.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 1b58d092c77..7ae47f3884d 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -418,16 +418,12 @@ def _get_module_sections(self, module: gdb.Value) -> str: def _check_module_version(self, modpath: str, module: gdb.Value) -> None: modinfo = self.extract_modinfo_from_module(modpath) - vermagic = None - if 'vermagic' in modinfo: - vermagic = modinfo['vermagic'] + vermagic = modinfo.get('vermagic', None) if vermagic != self.vermagic: raise _ModVersionMismatchError(modpath, vermagic, self.vermagic) - mi_srcversion = None - if 'srcversion' in modinfo: - mi_srcversion = modinfo['srcversion'] + mi_srcversion = modinfo.get('srcversion', None) mod_srcversion = None if 'srcversion' in module.type: From bfed9c0a18a0b41ccc5b19d202a2f8092ea84f16 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 18:39:21 -0400 Subject: [PATCH 236/367] lint: disable 'bad-option-value' check The 'bad-option-value' check checks the "pylint: disable=" comments to ensure the names are valid. The problem is that it means older lint don't work since the valid names aren't known to them yet. I'd rather throw lint failures on commit hook (when that's implemented) than force anyone developing crash-python to have the latest tools that are just for sanity checks. Signed-off-by: Jeff Mahoney --- tests/pylintrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pylintrc b/tests/pylintrc index 35f95c1f1b0..ba13178f0f4 100644 --- a/tests/pylintrc +++ b/tests/pylintrc @@ -65,7 +65,7 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable=missing-docstring,too-few-public-methods,invalid-name,too-many-locals,too-many-instance-attributes,too-many-public-methods,fixme,no-self-use,too-many-branches,too-many-statements,too-many-arguments,too-many-boolean-expressions,line-too-long,duplicate-code +disable=missing-docstring,too-few-public-methods,invalid-name,too-many-locals,too-many-instance-attributes,too-many-public-methods,fixme,no-self-use,too-many-branches,too-many-statements,too-many-arguments,too-many-boolean-expressions,line-too-long,duplicate-code,bad-option-value [REPORTS] From c61c36a6c1cffb387c762d72b8f95945a2bb5fdb Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 18:11:15 -0400 Subject: [PATCH 237/367] lint: fix consider-using-in complaints This commit fixes the following lint complaints: ************* Module crash.arch.x86_64 crash/arch/x86_64.py:45:33: R1714: Consider merging these comparisons with "in" to 'register not in (16, -1)' (consider-using-in) crash/arch/x86_64.py:67:11: R1714: Consider merging these comparisons with "in" to 'register in (16, -1)' (consider-using-in) crash/arch/x86_64.py:90:11: R1714: Consider merging these comparisons with "in" to 'register in (16, -1)' (consider-using-in) ************* Module crash.subsystem.filesystem.mount crash/subsystem/filesystem/mount.py:267:11: R1714: Consider merging these comparisons with "in" to "dentry in (mnt['mnt_root'], dentry['d_parent'])" (consider-using-in) ************* Module crash.types.slab crash/types/slab.py:237:23: R1714: Consider merging these comparisons with "in" to "struct_slab_cache not in ('size-64', 'size-128')" (consider-using-in) crash/types/slab.py:263:15: R1714: Consider merging these comparisons with "in" to 'num_free in (0, max_free)' (consider-using-in) Signed-off-by: Jeff Mahoney --- crash/arch/x86_64.py | 6 +++--- crash/subsystem/filesystem/mount.py | 1 + crash/types/slab.py | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index d692a5644da..ff93100357a 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -42,7 +42,7 @@ def fetch_register_active(self, thread: gdb.InferiorThread, register: int) -> None: task = thread.info for reg in task.regs: - if reg == "rip" and (register != 16 and register != -1): + if reg == "rip" and register not in (16, -1): continue try: thread.registers[reg].value = task.regs[reg] @@ -64,7 +64,7 @@ def fetch_register_scheduled_inactive(self, thread: gdb.InferiorThread, frame = rsp.cast(self.inactive_task_frame_type.pointer()).dereference() # Only write rip when requested; It resets the frame cache - if register == 16 or register == -1: + if register in (16, -1): thread.registers['rip'].value = frame['ret_addr'] if register == 16: return @@ -87,7 +87,7 @@ def fetch_register_scheduled_thread_return(self, thread: gdb.InferiorThread, task = thread.info.task_struct # Only write rip when requested; It resets the frame cache - if register == 16 or register == -1: + if register in (16, -1): thread.registers['rip'].value = self.thread_return if register == 16: return diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 95d29a2e1ad..417d5737556 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -264,6 +264,7 @@ def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value = None) -> str: # Gone are the days where finding the root was as simple as # dentry == dentry->d_parent while dentry != root['dentry'] or mnt != root['mnt']: + # pylint: disable=consider-using-in if dentry == mnt['mnt_root'] or dentry == dentry['d_parent']: if mount != mount['mnt_parent']: dentry = mount['mnt_mountpoint'] diff --git a/crash/types/slab.py b/crash/types/slab.py index c532177bc73..5bd67d8c99c 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -234,7 +234,7 @@ def check(self, slabtype: int, nid: int) -> int: else: struct_slab_cache = struct_slab_slab.kmem_cache.name if not self.kmem_cache.off_slab_cache: - if struct_slab_cache != "size-64" and struct_slab_cache != "size-128": + if struct_slab_cache not in ("size-64", "size-128"): self.__error(": OFF_SLAB struct slab is in a wrong cache %s" % struct_slab_cache) else: @@ -260,7 +260,7 @@ def check(self, slabtype: int, nid: int) -> int: if num_free != max_free: self.__free_error("slab_free") elif slabtype == slab_partial: - if num_free == 0 or num_free == max_free: + if num_free in (0, max_free): self.__free_error("slab_partial") elif slabtype == slab_full: if num_free > 0: From a5d156ddce4bcc5b6f60811a4220e4148699997e Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 18:14:22 -0400 Subject: [PATCH 238/367] lint: fix chained-comparison complaints This commit fixes the following lint complaints: ************* Module crash.types.percpu crash/types/percpu.py:185:19: R1716: Simplify chained comparison between the operands (chained-comparison) crash/types/percpu.py:199:19: R1716: Simplify chained comparison between the operands (chained-comparison) crash/types/percpu.py:217:15: R1716: Simplify chained comparison between the operands (chained-comparison) crash/types/percpu.py:229:11: R1716: Simplify chained comparison between the operands (chained-comparison) crash/types/percpu.py:246:15: R1716: Simplify chained comparison between the operands (chained-comparison) Signed-off-by: Jeff Mahoney --- crash/types/percpu.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 19ec1dfa30c..12d159192f3 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -182,7 +182,7 @@ def _is_percpu_var_dynamic(self, var: int) -> bool: # TODO: we could sort the list... for (start, end) in self._dynamic_offset_cache: - if var >= start and var < end: + if start <= var < end: return True except DelayedAttributeError: # This can happen with the testcases or in kernels prior to 2.6.30 @@ -196,7 +196,7 @@ def _is_static_percpu_address(self, addr: int) -> bool: size = self._static_ranges[start] for cpu in range(0, self._last_cpu): offset = int(symvals['__per_cpu_offset'][cpu]) + start - if addr >= offset and addr < offset + size: + if offset <= addr < offset + size: return True return False @@ -214,7 +214,7 @@ def is_static_percpu_var(self, addr: int) -> bool: """ for start in self._static_ranges: size = self._static_ranges[start] - if addr >= start and addr < start + size: + if start <= addr < start + size: return True return False @@ -226,7 +226,7 @@ def _relocated_offset(self, var: gdb.Value) -> int: addr = int(var) start = msymvals['__per_cpu_start'] size = self._static_ranges[start] - if addr >= start and addr < start + size: + if start <= addr < start + size: return addr - start return addr @@ -243,7 +243,7 @@ def is_module_percpu_var(self, addr: int) -> bool: """ for start in self._module_ranges: size = self._module_ranges[start] - if addr >= start and addr < start + size: + if start <= addr < start + size: return True return False From 42afad529700c8983b47ef553f9ef51df62e809d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 18:20:29 -0400 Subject: [PATCH 239/367] lint: silence false unscriptable-object error This commit fixes the following pylint error: ************* Module crash.types.page crash/types/page.py:96:15: E1136: Value 'cls.vmemmap' is unsubscriptable (unsubscriptable-object) pylint doesn't have the visibility into gdb.Value to recognize that it's indexing into an array type. Signed-off-by: Jeff Mahoney --- crash/types/page.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crash/types/page.py b/crash/types/page.py index 3554fa9917f..81ef4d01656 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -93,6 +93,8 @@ def pfn_to_page(cls, pfn: int) -> gdb.Value: pagemap = section["section_mem_map"] & ~3 return (pagemap.cast(types.page_type.pointer()) + pfn).dereference() + # pylint doesn't have the visibility it needs to evaluate this + # pylint: disable=unsubscriptable-object return cls.vmemmap[pfn] @classmethod From 0b712b84236ff85927ef11ef92645215c62b555d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 10 Jun 2019 18:25:03 -0400 Subject: [PATCH 240/367] lint: fix duplicate-string-formatting-argument warning This commit fixes the following lint warning: ************* Module crash.types.list crash/types/list.py:93:25: W1308: Duplicate string formatting argument 'prev_', consider passing as named argument (duplicate-string-formatting-argument) I don't like the idea of forcing developers to use old-style named argument formatting, but f-strings make this much more sane. Signed-off-by: Jeff Mahoney --- crash/types/list.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crash/types/list.py b/crash/types/list.py index d2ec5bea81a..4d496ba25d4 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -90,9 +90,9 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, visited.add(int(node.address)) try: if int(prev.address) != int(node[prev_]): - error = ("broken {} link {:#x} -{}-> {:#x} -{}-> {:#x}" - .format(prev_, int(prev.address), next_, int(node.address), - prev_, int(node[prev_]))) + error = f"broken {prev_} link {int(prev.address):#x} " + error += f"-{next_}-> {int(node.address):#x} " + error += f"-{prev_}-> {int(node[prev_]):#x}" pending_exception = CorruptListError(error) if print_broken_links: print(error) From af0b24df2401e5b94e9c6abda4daf7b69cfad166 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Tue, 11 Jun 2019 14:38:41 -0400 Subject: [PATCH 241/367] crash.subsystem.filesystem.mount: improve d_path performance d_path was performing poorly in testing. A bit of debugging revealed the cause to be repeated type resolution on what gdb believed was an opaque type. Caching init_task->fs->root improved performance significantly. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/mount.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 417d5737556..4df1006096c 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -56,6 +56,7 @@ class Mount: _for_each_mount: Callable[[Any, gdb.Value], Iterator[gdb.Value]] + _init_fs_root: gdb.Value def _for_each_mount_nsproxy(self, task: gdb.Value) -> Iterator[gdb.Value]: """ @@ -76,6 +77,7 @@ def check_task_interface(cls, init_task: gdb.Symbol) -> None: Args: init_task: The ``init_task`` symbol. """ + cls._init_fs_root = init_task.value()['fs']['root'] if struct_has_member(init_task, 'nsproxy'): cls._for_each_mount = cls._for_each_mount_nsproxy else: @@ -84,6 +86,10 @@ def check_task_interface(cls, init_task: gdb.Symbol) -> None: def for_each_mount(self, task: gdb.Value) -> Iterator[gdb.Value]: return self._for_each_mount(task) + @property + def init_fs_root(self) -> gdb.Value: + return self._init_fs_root + _Mount = Mount() # pylint: disable=unused-argument @@ -242,7 +248,7 @@ def d_path(mnt: gdb.Value, dentry: gdb.Value, root: gdb.Value = None) -> str: :obj:`gdb.NotAvailableError`: The target value was not available. """ if root is None: - root = symvals.init_task['fs']['root'] + root = _Mount.init_fs_root if dentry.type.code != gdb.TYPE_CODE_PTR: dentry = dentry.address From 39ba5288929ad65f4115913ca1a649e6e973cdbe Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Tue, 11 Jun 2019 14:40:01 -0400 Subject: [PATCH 242/367] tests: specify subset of live-tests to run using TESTS=wildcard While developing test cases, it's useful to only run the test case you're developing. This commit adds a TESTS= option to live tests that allows it to limit the tests to a subset. The value should be a path-wildcard matching string corresponding to filenames in the kernel-tests directory. Signed-off-by: Jeff Mahoney --- Makefile | 6 +++- kernel-tests/unittest-bootstrap.py | 12 ++++++-- tests/run-kernel-tests.sh | 46 ++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 53a5bc3d159..090d8854fc8 100644 --- a/Makefile +++ b/Makefile @@ -73,8 +73,12 @@ lint: force-rebuild static-check: force-rebuild sh tests/run-static-checks.sh +ifneq ($(TESTS),) +TESTSARG=--tests $(TESTS) +endif + live-tests: force-rebuild - sh tests/run-kernel-tests.sh $(INI_FILES) + sh tests/run-kernel-tests.sh $(TESTSARG) $(INI_FILES) test: unit-tests static-check lint live-tests @echo -n diff --git a/kernel-tests/unittest-bootstrap.py b/kernel-tests/unittest-bootstrap.py index a0fd8fb97cd..cbbb9ebdf97 100644 --- a/kernel-tests/unittest-bootstrap.py +++ b/kernel-tests/unittest-bootstrap.py @@ -11,6 +11,14 @@ config = configparser.ConfigParser() filename = os.environ['CRASH_PYTHON_TESTFILE'] +try: + matchfn = os.environ['CRASH_PYTHON_TESTS'] +except KeyError: + matchfn = None + +if not matchfn: + matchfn = "test_*.py" + try: f = open(filename) config.read_file(f) @@ -32,13 +40,13 @@ from crash.kernel import CrashKernel kernel = CrashKernel(roots=roots, vmlinux_debuginfo=vmlinux_debuginfo, module_path=module_path, - module_debuginfo_path=module_debuginfo_path) + module_debuginfo_path=module_debuginfo_path, verbose=True) kernel.setup_tasks() kernel.load_modules() test_loader = unittest.TestLoader() -test_suite = test_loader.discover('kernel-tests', pattern='test_*.py') +test_suite = test_loader.discover('kernel-tests', pattern=matchfn) ret = unittest.TextTestRunner(verbosity=2).run(test_suite) if not ret.wasSuccessful(): sys.exit(1) diff --git a/tests/run-kernel-tests.sh b/tests/run-kernel-tests.sh index 6fef357fbb7..45b01e5cb2d 100755 --- a/tests/run-kernel-tests.sh +++ b/tests/run-kernel-tests.sh @@ -1,5 +1,47 @@ #!/bin/bash +usage() { +cat <&2 +usage: $(basename $0) [options] + +Options: +-t | --tests + test_match is a pathname-style wildcard expression to specify + which tests to run +END +} + +TEMP=$(getopt -o 'ht:' --long 'help,tests:' -n "$(basename $0)" -- "$@") + +if test $? -ne 0; then + usage + exit 1 +fi + +eval set -- "$TEMP" +unset TEMP + +TESTS="" +while true; do + case "$1" in + '-t' | '--tests') + TESTS="$2" + shift 2 + ;; + '-h' | '--help') + usage + exit 0 + ;; + --) + shift + break + ;; + *) + echo "internal error [$1]" >&2 + exit 1 + esac +done + if test $# -eq 0; then echo "No ini files specified. Nothing to do." exit 0 @@ -19,6 +61,10 @@ export CRASH_PYTHON_TESTDIR=$DIR TOPDIR=$(realpath "$(dirname "$0")"/..) for f in "$@"; do export CRASH_PYTHON_TESTFILE="$f" + if test -n "$TESTS"; then + export CRASH_PYTHON_TESTS="$TESTS" + echo "Running subset $TESTS" + fi $RUN/run-gdb.sh -x $TOPDIR/kernel-tests/unittest-prepare.py \ -x $TOPDIR/kernel-tests/unittest-bootstrap.py done From 5bafbd8c8bfcbbe7bf3437e0b1f9f277f84d5985 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Tue, 11 Jun 2019 21:19:39 -0400 Subject: [PATCH 243/367] crash.kernel: pass the verbose flag to _setup_vmlinux_debuginfo Signed-off-by: Jeff Mahoney --- crash/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/kernel.py b/crash/kernel.py index 7ae47f3884d..a71cfdc542b 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -143,7 +143,7 @@ def __init__(self, roots: PathSpecifier = None, self.version = self.extract_version() self._setup_roots(roots, verbose) - self._setup_vmlinux_debuginfo(vmlinux_debuginfo) + self._setup_vmlinux_debuginfo(vmlinux_debuginfo, verbose) self._setup_module_path(module_path, verbose) self._setup_module_debuginfo_path(module_debuginfo_path, verbose) From 9b728b3324c3245ef38b6dbce88bd6686dd5b071 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 11 Jun 2019 21:35:02 -0400 Subject: [PATCH 244/367] crash.kernel: make debuginfo searching more flexible This commit modifies the debuginfo searching algorithm to look in each root for the debuginfo files under both the root and root/usr/lib/debug. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 47 ++++++++++++++------------------ kernel-tests/unittest-prepare.py | 4 +++ 2 files changed, 24 insertions(+), 27 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index a71cfdc542b..541ad6f571e 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -211,29 +211,29 @@ def _setup_roots(self, roots: PathSpecifier = None, if verbose: print("roots={}".format(self.roots)) + def _find_debuginfo_paths(self, variants: List[str]) -> List[str]: + x: List[str] = list() + + for root in self.roots: + for debug_path in [ "", "usr/lib/debug" ]: + for variant in variants: + path = os.path.join(root, debug_path, variant) + if os.path.exists(path): + x.append(path) + + return x + def _setup_vmlinux_debuginfo(self, vmlinux_debuginfo: PathSpecifier = None, verbose: bool = False) -> None: - debugroot = "/usr/lib/debug" if vmlinux_debuginfo is None: - x: List[str] = [] defaults = [ "{}.debug".format(self.kernel), "vmlinux-{}.debug".format(self.version), - "{}/{}.debug".format(debugroot, self.kernel), - "{}/boot/{}.debug".format(debugroot, - os.path.basename(self.kernel)), - "{}/boot/vmlinux-{}.debug".format(debugroot, self.version), + "boot/{}.debug".format(os.path.basename(self.kernel)), + "boot/vmlinux-{}.debug".format(self.version), ] - for root in self.roots: - for mpath in defaults: - path = "{}/{}".format(root, mpath) - if os.path.exists(path): - if x is None: - x = [path] - else: - x.append(path) - self.vmlinux_debuginfo = x + self.vmlinux_debuginfo = self._find_debuginfo_paths(defaults) elif (isinstance(vmlinux_debuginfo, list) and vmlinux_debuginfo and isinstance(vmlinux_debuginfo[0], str)): @@ -283,21 +283,14 @@ def _setup_module_path(self, module_path: PathSpecifier = None, def _setup_module_debuginfo_path(self, module_debuginfo_path: PathSpecifier = None, verbose: bool = False) -> None: - debugroot = "/usr/lib/debug" - x: List[str] = [] if module_debuginfo_path is None: + defaults = [ + "modules.debug", + "lib/modules/{}".format(self.version), + ] - path = "modules.debug" - if os.path.exists(path): - x.append(path) - - for root in self.roots: - path = "{}/{}/lib/modules/{}".format(root, debugroot, - self.version) - if os.path.exists(path): - x.append(path) - self.module_debuginfo_path = x + self.module_debuginfo_path = self._find_debuginfo_paths(defaults) elif (isinstance(module_debuginfo_path, list) and isinstance(module_debuginfo_path[0], str)): diff --git a/kernel-tests/unittest-prepare.py b/kernel-tests/unittest-prepare.py index 924684ace2a..4c3819d406a 100644 --- a/kernel-tests/unittest-prepare.py +++ b/kernel-tests/unittest-prepare.py @@ -29,6 +29,10 @@ module_path = config['test'].get('module_path', None) module_debuginfo_path = config['test'].get('module_debuginfo_path', None) +if roots: + dfd = ":".join(roots) + ":/usr/lib/debug" + gdb.execute(f"set debug-file-directory {dfd}") + try: if vmlinux.endswith(".gz"): vmlinux_gz = vmlinux From 7566595bca422f96f87c6f2fa691c5343b6b181c Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 12 Jun 2019 09:55:19 -0400 Subject: [PATCH 245/367] docs: add short summary of recent development changes Signed-off-by: Jeff Mahoney --- doc-source/api_changes.rst | 146 +++++++++++++++++++++++++++++++++++++ doc-source/development.rst | 19 +++++ doc-source/index.rst | 6 +- 3 files changed, 166 insertions(+), 5 deletions(-) create mode 100644 doc-source/api_changes.rst create mode 100644 doc-source/development.rst diff --git a/doc-source/api_changes.rst b/doc-source/api_changes.rst new file mode 100644 index 00000000000..57cbb0c0ec3 --- /dev/null +++ b/doc-source/api_changes.rst @@ -0,0 +1,146 @@ +API Changes +=========== + +April 2019 +---------- + +In April-June 2019, significant development effort was invested in improving code quality. The following changes were the result of that work. + +Python 3.6 Required +------------------- + +A system using Python 3.6 or newer is required. Recent development has taken advantage of features introduced in Python versions as recent as 3.6: + +- `typing for function parameters and return values `_ Introduced in 3.5 +- `typing for variables `_ -- Introduced in 3.6 +- `f-strings `_ -- Introduced in 3.6 + +This also means: + +- Python 2/3 compatibility hacks are obsolete and removed: + + - ``long = int if sys.version.major > 3`` + - ``from __future__`` imports + +- Exception handling uses ``except as `` syntax + +- Absolute imports are required + +- f-strings are preferred -- though there are cases where ``str.format()`` is still useful (e.g. with a static format string and per-call formatting). + + - There is much outstanding work in converting existing strings to f-strings, but it has not been a development priority. + +Typing +------ + +As part of the drive to improve code quality, I've added typing to every function and method in the project. ``make test`` with `mypy `_ installed will fail if there are functions or methods (or dependent variables) without typing information. In the example below, the new version of ``MyClass`` has several examples of typing in use. + +Public / Protected Namespace +---------------------------- + +The use of ``_`` as a prefix for protected members of classes is now expected and will be enforced during ``make test`` if `pylint `_ is installed. In the example below, several internal members and methods of `MyClass` have been renamed to indicate that they are +protected. + +New mechanism for delayed lookups +--------------------------------- + +In earlier versions of crash-python, the way to pull symbols and types in your classes was to inherit from :class:`crash.infra.CrashBaseClass` and to export symbols desired in the global namespace by using the :func:`crash.infra.export` decorator. The infrastructure to make this work was complex and esoteric and formed a barrier to entry with benefits that were dwarfed by the cost of knowledge ramp-up to maintain it. It also required the developer to declare a class to contain the declarations even if a class wasn't really required for the implementation. + +The current version of crash-python uses the :class:`crash.util.symbol` module to do delayed lookups. This has several advantages: + +- These can be declared in class or module context (or object context, but there's no real reason to do it, IMO). + +- The namespaces are separated. There are no collisions within the host class as inferred names override class-defined names. + +- There are accessors beyond attributes. The :class:`.DelayedCollection` family of classes all have :meth:`~.DelayedCollection.__getattr__`, :meth:`~DelayedCollection.__getitem__`, and :meth:`~DelayedCollection.get` defined, so they can be accessed as attribute names, dictionary keys, or by function call. The latter two can be used with any name, but the attribute names cannot be used for symbols that start with ``__``. + +Example +------- + +An older crash-python module might look like: + +.. code-block:: py + + from crash.infra import CrashBaseClass, export + + class MyClass(CrashBaseClass): + __types__ = ['struct task_struct'] + __symvals__ = ['init_task'] + __symbol_callbacks__ = [('init_task', 'setup_init_task')] + valid = False + + def __init__(self, task): + self.init_task_types(task) + + @classmethod + def setup_init_task(cls, task): + # do something + pass + + @classmethod + def init_task_types(cls, task): + if not cls.valid: + if task.type == self.task_struct_type: + self.task_struct_type = task.type + + cls.valid = True + + def some_method(self): + print("i have an init_task at {:x}".format(int(self.init_task.address))) + + @export + def for_each_task(self): + task_list = self.init_task['tasks'] + for task in list_for_each_entry(task_list, self.task_struct_type, + 'task', include_head=True): + thread_list = task['thread_group'] + for thread in list_for_each_entry(thread_list, + self.task_struct_type, + 'thread_group'): + yield thread + + + +With :class:`CrashBaseClass` removed, typing added, f-string formatting used, and the code restructured to only put the minimum (contrived here) functionality in ``MyClass``, that same code looks like: + +.. code-block:: py + + from typing import Iterable + from crash.util.symbols import Types, Symvals, SymbolCallbacks + + types = Types(['struct task_struct']) + symvals = Symvals(['init_task']) + + class MyClass: + _valid = False + + def __init__(self, task: gdb.Value) -> None: + self._init_task_types(task) + + @classmethod + def _init_task_types(cls, task: gdb.Value) -> None: + if not cls._valid: + if task.type == self.task_struct_type: + types.override('struct task_struct', task.type) + + cls._valid = True + + @classmethod + def _setup_init_task(cls) -> None: + # do something + pass + + symbol_cbs = SymbolCallbacks([('init_task', MyClass._setup_init_task)]) + + def some_method() -> None: + print(f"i have an init_task at {int(symvals.init_task.address):#x}") + + def for_each_task() -> Iterable[gdb.Value]: + task_list = symvals.init_task['tasks'] + for task in list_for_each_entry(task_list, types.task_struct_type, + 'task', include_head=True): + thread_list = task['thread_group'] + for thread in list_for_each_entry(thread_list, + types.task_struct_type, + 'thread_group'): + yield thread diff --git a/doc-source/development.rst b/doc-source/development.rst new file mode 100644 index 00000000000..13b4c2cfeb2 --- /dev/null +++ b/doc-source/development.rst @@ -0,0 +1,19 @@ +Development +=========== + +.. toctree:: + :maxdepth: 2 + + api_changes + testing + kdump/modules + crash/modules + + gdb-internals + +Indices and tables +------------------ + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/doc-source/index.rst b/doc-source/index.rst index 416324d68e6..c8f9931e5df 100644 --- a/doc-source/index.rst +++ b/doc-source/index.rst @@ -28,12 +28,8 @@ Table of Contents :maxdepth: 2 installation - testing user_guide - kdump/modules - crash/modules - - gdb-internals + development Indices and tables ------------------ From 53476aa29fdc1878d568a9f139b260c613e8f76f Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 12 Jun 2019 11:00:12 -0400 Subject: [PATCH 246/367] crash.sh: use correct variable name for test target An older variable name was used in crash.sh for the test target, causing startup failure. Signed-off-by: Jeff Mahoney --- crash.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash.sh b/crash.sh index 2490d6a0dd6..2bf23486d9d 100755 --- a/crash.sh +++ b/crash.sh @@ -178,7 +178,7 @@ else TEST_GDBINIT="/usr/share/crash-python/test-gdb-compatibility.gdbinit" fi -if ! $GDB -nx -batch -x $GDBINIT -x $TEST_TARGET; then +if ! $GDB -nx -batch -x $GDBINIT -x $TEST_GDBINIT; then echo "fatal: crash-python cannot initialize" >&2 exit 1 fi From 105281a23e5d51e73bf49662bc82927e98fb09ea Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 12 Jun 2019 11:01:59 -0400 Subject: [PATCH 247/367] crash.sh: add warning if help text needs updating This commit adds a warning if the help text is out of date when running out of the git repo. We don't want to force a full rebuild when updating a command. Signed-off-by: Jeff Mahoney --- crash.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crash.sh b/crash.sh index 2bf23486d9d..5739ce270d4 100755 --- a/crash.sh +++ b/crash.sh @@ -172,6 +172,14 @@ if [ -e "$DIR/setup.py" ]; then popd > /dev/null export CRASH_PYTHON_HELP="$DIR/docs/text" TEST_GDBINIT="test-gdb-compatibility.gdbinit" + + for command in $(find crash/commands/*.py); do + if test "$command" -nt "$CRASH_PYTHON_HELP"; then + echo "warning: help text documentation is out-of-date" >&2 + echo "To update it, run 'make doc-help'" >&2 + break + fi + done else export CRASH_PYTHON_HELP="/usr/share/crash-python/help" :> $GDBINIT From cfbf5726ebc0b2d5ba083dc25e06e3c5c0bd6de3 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 12 Jun 2019 11:22:03 -0400 Subject: [PATCH 248/367] docs: add new command documentation to api changes Signed-off-by: Jeff Mahoney --- doc-source/api_changes.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc-source/api_changes.rst b/doc-source/api_changes.rst index 57cbb0c0ec3..aefdbe089c1 100644 --- a/doc-source/api_changes.rst +++ b/doc-source/api_changes.rst @@ -54,6 +54,11 @@ The current version of crash-python uses the :class:`crash.util.symbol` module t - There are accessors beyond attributes. The :class:`.DelayedCollection` family of classes all have :meth:`~.DelayedCollection.__getattr__`, :meth:`~DelayedCollection.__getitem__`, and :meth:`~DelayedCollection.get` defined, so they can be accessed as attribute names, dictionary keys, or by function call. The latter two can be used with any name, but the attribute names cannot be used for symbols that start with ``__``. +Command documentation +--------------------- + +In earlier versions of crash-python, commands were documented using the docstring of the Command itself. This has changed to use the docstring of the module instead. More details can be found in :class:`~crash.command.Command` and :class:`~crash.command.ArgumentParser`. The format of the docstring is `reStructuredText `_ and is parsed using `Sphinx `_. The documentation is used for both the user guide and the application command help. This is an area that is subject to change in the future. + Example ------- From a38634c209b9b4304be94f8f3fc14de2292b51bf Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 12 Jun 2019 11:22:28 -0400 Subject: [PATCH 249/367] docs: add explanations for make targets Signed-off-by: Jeff Mahoney --- doc-source/development.rst | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/doc-source/development.rst b/doc-source/development.rst index 13b4c2cfeb2..8768eb31897 100644 --- a/doc-source/development.rst +++ b/doc-source/development.rst @@ -11,6 +11,27 @@ Development gdb-internals + +Documentation is automatically build from the python code for the user +guide, command help text, and API reference. + +There are several make targets to assist in your development efforts: + +- ``make`` or ``make all`` -- Start fresh, build the python code + and documentation, and then run the standalone test suite. + +- ``make doc`` -- Build all documentation (html, text, and man page). + +- ``make doc-help`` -- Build only the documentation required for help text + +- ``make doc-html`` -- Build the user manual + +- ``make man`` -- Build the man page + +For testing, see the :doc:`testing` section. + +To develop a command, see the :mod:`crash.commands` API. + Indices and tables ------------------ From afcf006b5638d2ea591c703c22f9f00bd97aa644 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 12 Jun 2019 11:35:41 -0400 Subject: [PATCH 250/367] docs: don't specify sidebar contents Our sidebar specification meant that we didn't have a navigation section. We don't need anything special. Signed-off-by: Jeff Mahoney --- doc-source/conf.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/doc-source/conf.py b/doc-source/conf.py index bddfa56d4a7..0101071bebd 100644 --- a/doc-source/conf.py +++ b/doc-source/conf.py @@ -171,16 +171,15 @@ def setup(app): # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars -html_sidebars = { - '**': [ - 'relations.html', # needs 'show_related': True theme option to display - 'searchbox.html', - ] -} +#html_sidebars = { +# '**': [ +# 'relations.html', # needs 'show_related': True theme option to display +# 'searchbox.html', +# ] +#} html_theme_options = { 'description': 'Kernel debugger in Python', - 'logo': 'logo.png', 'logo_name': True, 'logo_text_align': 'center', 'github_user': 'jeffmahoney', From 810e229fbf30e7b4c684a0c6f794935ecfe1f814 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 13 Jun 2019 16:04:35 -0400 Subject: [PATCH 251/367] crash: fix some typing errors mypy really doesn't like it when you reuse the same variable for different things. The "worst" case is crash.requirements, which is easy to fix. The other cases are just errors that didn't get caught due to gdb not having type stubs. I'm writing those up and they caught a few errors. Signed-off-by: Jeff Mahoney --- crash/arch/__init__.py | 2 +- crash/infra/lookup.py | 4 ++-- crash/requirements/__init__.py | 23 ++++++++++++++--------- crash/types/slab.py | 2 +- crash/util/__init__.py | 2 +- crash/util/symbols.py | 2 +- 6 files changed, 20 insertions(+), 15 deletions(-) diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index 7c19b709829..3b1426d7b3b 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -41,7 +41,7 @@ def __init__(self, address: int) -> None: self.address = address gdb.frame_filters[self.name] = self - def filter(self, frame_iter: Iterator[FrameDecorator]) -> Any: + def filter(self, frame_iter: Iterator[Any]) -> Any: return KernelAddressIterator(frame_iter, self.address) class KernelAddressIterator: diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index c6e849f6335..f9eb09bcaa0 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Tuple, Any, Union +from typing import Tuple, Any, Union, Optional from crash.infra.callback import ObjfileEventCallback from crash.infra.callback import Callback @@ -153,7 +153,7 @@ class SymvalCallback(SymbolCallback): See :obj:`SymbolCallback` for arguments. """ - def check_ready(self) -> gdb.Value: + def check_ready(self) -> Optional[gdb.Value]: # type: ignore """ After successfully looking up the :obj:`gdb.Symbol`, returns the :obj:`gdb.Value` associated with it. diff --git a/crash/requirements/__init__.py b/crash/requirements/__init__.py index 0c097593717..d36b5310cd2 100644 --- a/crash/requirements/__init__.py +++ b/crash/requirements/__init__.py @@ -7,38 +7,43 @@ import gdb try: - x = gdb.Target + x1 = gdb.Target + del x1 except AttributeError as e: raise IncompatibleGDBError("gdb.Target") try: - x = gdb.lookup_symbol('x', None) + x2 = gdb.lookup_symbol('x', None) + del x2 except TypeError as e: raise IncompatibleGDBError("a compatible gdb.lookup_symbol") try: - x = gdb.MinSymbol + x3 = gdb.MinSymbol + del x3 except AttributeError as e: raise IncompatibleGDBError("gdb.MinSymbol") try: - x = gdb.Register + x4 = gdb.Register + del x4 except AttributeError as e: raise IncompatibleGDBError("gdb.Register") try: - x = gdb.Symbol.section + x5 = gdb.Symbol.section + del x5 except AttributeError as e: raise IncompatibleGDBError("gdb.Symbol.section") try: - x = gdb.Inferior.new_thread + x6 = gdb.Inferior.new_thread + del x6 except AttributeError as e: raise IncompatibleGDBError("gdb.Inferior.new_thread") try: - x = gdb.Objfile.architecture + x7 = gdb.Objfile.architecture + del x7 except AttributeError as e: raise IncompatibleGDBError("gdb.Objfile.architecture") - -del x diff --git a/crash/types/slab.py b/crash/types/slab.py index 5bd67d8c99c..454c1c256b0 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -176,7 +176,7 @@ def find_obj(self, addr: int) -> Union[int, None]: return self.s_mem + (idx * bufsize) def contains_obj(self, - addr: int) -> Tuple[bool, int, Union[gdb.Value, None]]: + addr: int) -> Tuple[bool, int, Optional[gdb.Value]]: obj_addr = self.find_obj(addr) if not obj_addr: diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 015528d039b..90bc4ce1db1 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -334,7 +334,7 @@ def array_size(value: gdb.Value) -> int: """ return value.type.sizeof // value[0].type.sizeof -def get_typed_pointer(val: AddressSpecifier, gdbtype: gdb.Type) -> gdb.Type: +def get_typed_pointer(val: AddressSpecifier, gdbtype: gdb.Type) -> gdb.Value: """ Returns a pointer to the requested type at the given address diff --git a/crash/util/symbols.py b/crash/util/symbols.py index 96be1afc7fe..cbe806731f5 100644 --- a/crash/util/symbols.py +++ b/crash/util/symbols.py @@ -132,7 +132,7 @@ class Types(DelayedCollection): def __init__(self, names: Names) -> None: super(Types, self).__init__(DelayedType, names) - def override(self, name: str, value: gdb.Type) -> None: + def override(self, name: str, value: gdb.Type) -> None: # type: ignore """ Override the type value, resolving the type name first. From 17945a63fb486cdb06560c2d2b740fa831319d42 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 24 Jun 2019 11:36:39 +0200 Subject: [PATCH 252/367] crash.sh: add missing $DIR prefixes Fixes some file not found errors when pycrash is run from git checkout and CWD points elsewhere. Signed-off-by: Vlastimil Babka --- crash.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash.sh b/crash.sh index 5739ce270d4..f750617f48e 100755 --- a/crash.sh +++ b/crash.sh @@ -171,9 +171,9 @@ if [ -e "$DIR/setup.py" ]; then echo "python sys.path.insert(0, '$DIR/build/lib')" >> $GDBINIT popd > /dev/null export CRASH_PYTHON_HELP="$DIR/docs/text" - TEST_GDBINIT="test-gdb-compatibility.gdbinit" + TEST_GDBINIT="$DIR/test-gdb-compatibility.gdbinit" - for command in $(find crash/commands/*.py); do + for command in $(find $DIR/crash/commands/*.py); do if test "$command" -nt "$CRASH_PYTHON_HELP"; then echo "warning: help text documentation is out-of-date" >&2 echo "To update it, run 'make doc-help'" >&2 From b92b0952207852775e8570807c92c93b4beb6319 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 13:38:46 -0400 Subject: [PATCH 253/367] crash.kernel: remove leading space in debug path initialization Signed-off-by: Jeff Mahoney --- crash/kernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/kernel.py b/crash/kernel.py index 541ad6f571e..ef0acfa0eb8 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -215,7 +215,7 @@ def _find_debuginfo_paths(self, variants: List[str]) -> List[str]: x: List[str] = list() for root in self.roots: - for debug_path in [ "", "usr/lib/debug" ]: + for debug_path in ["", "usr/lib/debug"]: for variant in variants: path = os.path.join(root, debug_path, variant) if os.path.exists(path): From 509905462ddf3bf1407da0630536757efaed9696 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 12 Jun 2019 14:37:40 -0400 Subject: [PATCH 254/367] kdump.target: remove unused addrxlat context The kdump target doesn't need an addrxlat context since it always requests reads from the dump via the virtual address. Signed-off-by: Jeff Mahoney --- kdump/target.py | 29 +---------------------------- 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/kdump/target.py b/kdump/target.py index e8c46f7fcbd..a2b359df7a8 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -1,43 +1,18 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Tuple, Optional +from typing import Tuple import sys from kdumpfile import kdumpfile, KDUMP_KVADDR from kdumpfile.exceptions import AddressTranslationException, EOFException -import addrxlat import addrxlat.exceptions import gdb PTID = Tuple[int, int, int] -class SymbolCallback: - "addrxlat symbolic callback" - - def __init__(self, ctx: Optional[addrxlat.Context] = None) -> None: - self.ctx = ctx - - def __call__(self, symtype: int, *args: int) -> int: - if self.ctx is not None: - try: - return self.ctx.next_cb_sym(symtype, *args) - except addrxlat.exceptions.BaseException: - self.ctx.clear_err() - - if symtype == addrxlat.SYM_VALUE: - ms = gdb.lookup_minimal_symbol(args[0]) - if ms is not None: - return int(ms.value().address) - - # pylint: disable=no-member - raise addrxlat.exceptions.NoDataError() - - # This silences pylint: disable=inconsistent-return-statements - return 0 # pylint: disable=unreachable - class Target(gdb.Target): def __init__(self, debug: bool = False) -> None: super().__init__() @@ -63,8 +38,6 @@ def open(self, filename: str, from_tty: bool) -> None: # pylint: disable=unsupported-assignment-operation self.kdump.attr['addrxlat.ostype'] = 'linux' - ctx = self.kdump.get_addrxlat_ctx() - ctx.cb_sym = SymbolCallback(ctx) KERNELOFFSET = "linux.vmcoreinfo.lines.KERNELOFFSET" try: From 885f38736dcb346ae145ad6bf6d78cbd64823d12 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 09:46:13 -0400 Subject: [PATCH 255/367] crash.util: annotate return of safe_get_symbol_value as Optional safe_get_symbol_value exists to avoid throwing an exception on a failed lookup. It returns None by design so annotated it as such. Signed-off-by: Jeff Mahoney --- crash/util/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 90bc4ce1db1..f012955692d 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -131,7 +131,7 @@ def get_symbol_value(symname: str, block: gdb.Block = None, raise MissingSymbolError("Cannot locate symbol {}".format(symname)) def safe_get_symbol_value(symname: str, block: gdb.Block = None, - domain: int = None) -> gdb.Value: + domain: int = None) -> Optional[gdb.Value]: """ Returns the value associated with a named symbol From 4ec3d9cc3d246efa6ad7be5b423f16110346ceeb Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 09:48:47 -0400 Subject: [PATCH 256/367] crash.util: fix get_typed_pointer typing The variable reuse is confusing the static checker. Handle passing in a value that can't be converted to an address. Signed-off-by: Jeff Mahoney --- crash/util/__init__.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/crash/util/__init__.py b/crash/util/__init__.py index f012955692d..1df6c09890c 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -351,7 +351,8 @@ def get_typed_pointer(val: AddressSpecifier, gdbtype: gdb.Type) -> gdb.Value: gdb.Value: The casted pointer of the requested type Raises: - TypeError: string value for val does not describe a hex address + TypeError: string value for val does not describe a hex address or + the type cannot be converted to an address """ if gdbtype.code != gdb.TYPE_CODE_PTR: gdbtype = gdbtype.pointer() @@ -363,12 +364,17 @@ def get_typed_pointer(val: AddressSpecifier, gdbtype: gdb.Type) -> gdb.Value: val = int(val, 16) except TypeError as e: raise TypeError("string must describe hex address: {}".format(e)) + else: + val = int(val) + if isinstance(val, int): - val = gdb.Value(val).cast(gdbtype) + ret = gdb.Value(val).cast(gdbtype) + elif isinstance(val, gdb.Value): + ret = val.cast(gdbtype) else: - val = val.cast(gdbtype) + raise TypeError(f"val is unexpected type {type(val)}") - return val + return ret def array_for_each(value: gdb.Value) -> Iterator[gdb.Value]: """ From e9f590ab81f292794c131928d7d3d663bb5e422d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 09:59:38 -0400 Subject: [PATCH 257/367] crash: remove initializers to None for non-optional values When we use a default value of None, the static checker assumes that value is valid even when it's not supposed to be. Remove the initialization and deal with the missing attribute as needed. Signed-off-by: Jeff Mahoney --- crash/commands/vtop.py | 4 ++-- crash/types/cpu.py | 4 ++-- crash/types/page.py | 8 ++++---- crash/types/slab.py | 4 ++-- crash/types/task.py | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index d150a8f2e21..4eaad2320af 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -146,9 +146,9 @@ class LinuxPGT: def __init__(self, ctx: addrxlat.Context, sys: addrxlat.System) -> None: self.context = ctx self.system = sys - self.step: addrxlat.Step = None + self.step: addrxlat.Step self.table = self.table_names[0] - self.ptr: addrxlat.FullAddress = None + self.ptr: addrxlat.FullAddress self.note = '' def begin(self, addr: int) -> bool: diff --git a/crash/types/cpu.py b/crash/types/cpu.py index 9034e19b347..7ca1c0b662e 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -25,8 +25,8 @@ class TypesCPUClass: cpus_online: List[int] = list() cpus_possible: List[int] = list() - _cpu_online_mask: gdb.Value = None - _cpu_possible_mask: gdb.Value = None + _cpu_online_mask: gdb.Value + _cpu_possible_mask: gdb.Value def __init__(self) -> None: raise NotImplementedError("This class is not meant to be instantiated") diff --git a/crash/types/page.py b/crash/types/page.py index 81ef4d01656..f8e7c98aaf0 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -27,7 +27,7 @@ class Page: slab_page_name = None compound_head_name = None vmemmap_base = 0xffffea0000000000 - vmemmap: gdb.Value = None + vmemmap: gdb.Value directmap_base = 0xffff880000000000 pageflags: Dict[str, int] = dict() @@ -71,7 +71,8 @@ def setup_page_type(cls, gdbtype: gdb.Type) -> None: cls.slab_cache_name = find_member_variant(gdbtype, ['slab_cache', 'lru']) cls.slab_page_name = find_member_variant(gdbtype, ['slab_page', 'lru']) cls.compound_head_name = find_member_variant(gdbtype, ['compound_head', 'first_page']) - cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(gdbtype.pointer()) + if not hasattr(cls, 'vmemmap'): + cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(gdbtype.pointer()) cls.setup_page_type_done = True if cls.setup_pageflags_done and not cls.setup_pageflags_finish_done: @@ -114,8 +115,7 @@ def setup_vmemmap_base(cls, symbol: gdb.Symbol) -> None: cls.vmemmap_base = int(symbol.value()) # setup_page_type() was first and used the hardcoded initial value, # we have to update - if cls.vmemmap is not None: - cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(types.page_type.pointer()) + cls.vmemmap = gdb.Value(cls.vmemmap_base).cast(types.page_type.pointer()) @classmethod def setup_directmap_base(cls, symbol: gdb.Symbol) -> None: diff --git a/crash/types/slab.py b/crash/types/slab.py index 454c1c256b0..528f0e0229c 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -46,8 +46,8 @@ class Slab: slab_list_head: str = 'list' page_slab: bool = False - real_slab_type: gdb.Type = None - bufctl_type: gdb.Type = None + real_slab_type: gdb.Type + bufctl_type: gdb.Type @classmethod def check_page_type(cls, gdbtype: gdb.Type) -> None: diff --git a/crash/types/task.py b/crash/types/task.py index be80dfa4011..9bc8a8bb9d3 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -242,8 +242,8 @@ def __init__(self, task_struct: gdb.Value) -> None: self.cpu = -1 self.regs: Dict[str, int] = dict() - self.thread_info: gdb.Value = None - self.thread: gdb.InferiorThread = None + self.thread_info: gdb.Value + self.thread: gdb.InferiorThread # mem data self.mem_valid = False From 0743f34097a02b5bdf01dd7cf147d8740528bb8f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:00:57 -0400 Subject: [PATCH 258/367] crash.types.page: use integer division Python 3 division returns a float instead of an int like Python 2 did. Signed-off-by: Jeff Mahoney --- crash/types/page.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crash/types/page.py b/crash/types/page.py index f8e7c98aaf0..74dfdab68ae 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -81,13 +81,13 @@ def setup_page_type(cls, gdbtype: gdb.Type) -> None: @classmethod def setup_mem_section(cls, gdbtype: gdb.Type) -> None: # TODO assumes SPARSEMEM_EXTREME - cls.SECTIONS_PER_ROOT = cls.PAGE_SIZE / gdbtype.sizeof + cls.SECTIONS_PER_ROOT = cls.PAGE_SIZE // gdbtype.sizeof @classmethod def pfn_to_page(cls, pfn: int) -> gdb.Value: if cls.sparsemem: section_nr = pfn >> (cls.SECTION_SIZE_BITS - cls.PAGE_SHIFT) - root_idx = section_nr / cls.SECTIONS_PER_ROOT + root_idx = section_nr // cls.SECTIONS_PER_ROOT offset = section_nr & (cls.SECTIONS_PER_ROOT - 1) section = symvals.mem_section[root_idx][offset] @@ -166,7 +166,7 @@ def setup_pageflags_finish(cls) -> None: @classmethod def from_obj(cls, page: gdb.Value) -> 'Page': - pfn = (int(page.address) - Page.vmemmap_base) / types.page_type.sizeof + pfn = (int(page.address) - Page.vmemmap_base) // types.page_type.sizeof return Page(page, pfn) @classmethod @@ -256,7 +256,7 @@ def page_from_addr(addr: int) -> 'Page': return pfn_to_page(pfn) def page_from_gdb_obj(gdb_obj: gdb.Value) -> 'Page': - pfn = (int(gdb_obj.address) - Page.vmemmap_base) / types.page_type.sizeof + pfn = (int(gdb_obj.address) - Page.vmemmap_base) // types.page_type.sizeof return Page(gdb_obj, pfn) def for_each_page() -> Iterable['Page']: From ed85e170d67fcdcaf8254a71a1ae7e89732dd3a4 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 09:52:32 -0400 Subject: [PATCH 259/367] crash.addrxlat: callback routines return int addrxlat isn't involved with gdb so those routines return int Signed-off-by: Jeff Mahoney --- crash/addrxlat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/addrxlat.py b/crash/addrxlat.py index 9949f6c0ffd..0f57ba36154 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -36,11 +36,11 @@ def cb_sym(self, symtype: int, *args: str) -> int: return super().cb_sym(symtype, *args) - def cb_read32(self, faddr: addrxlat.FullAddress) -> gdb.Value: + def cb_read32(self, faddr: addrxlat.FullAddress) -> int: v = gdb.Value(faddr.addr).cast(types.uint32_t_p_type) return int(v.dereference()) - def cb_read64(self, faddr: addrxlat.FullAddress) -> gdb.Value: + def cb_read64(self, faddr: addrxlat.FullAddress) -> int: v = gdb.Value(faddr.addr).cast(types.uint64_t_p_type) return int(v.dereference()) From d610da6c5d8c59539642458e67b878dc3f5f351d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:19:24 -0400 Subject: [PATCH 260/367] crash.types.slab: fix typing for array cache --- crash/commands/kmem.py | 4 ++-- crash/types/slab.py | 15 +++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index c829fcea3fe..2bdb5d3e322 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -100,13 +100,13 @@ def execute(self, args: argparse.Namespace) -> None: elif obj[2] is not None: ac = obj[2] ac_type = ac['ac_type'] # pylint: disable=unsubscriptable-object - nid_tgt = ac['nid_tgt'] # pylint: disable=unsubscriptable-object + nid_tgt = int(ac['nid_tgt']) # pylint: disable=unsubscriptable-object if ac_type == "percpu": ac_desc = "cpu %d cache" % nid_tgt elif ac_type == "shared": ac_desc = "shared cache on node %d" % nid_tgt elif ac_type == "alien": - nid_src = ac['nid_src'] # pylint: disable=unsubscriptable-object + nid_src = int(ac['nid_src']) # pylint: disable=unsubscriptable-object ac_desc = "alien cache of node %d for node %d" % \ (nid_src, nid_tgt) else: diff --git a/crash/types/slab.py b/crash/types/slab.py index 528f0e0229c..0138e95793b 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -22,6 +22,8 @@ AC_SHARED = "shared" AC_ALIEN = "alien" +ArrayCacheEntry = Dict[str, Union[int, str]] + slab_partial = 0 slab_full = 1 slab_free = 2 @@ -175,8 +177,8 @@ def find_obj(self, addr: int) -> Union[int, None]: return self.s_mem + (idx * bufsize) - def contains_obj(self, - addr: int) -> Tuple[bool, int, Optional[gdb.Value]]: + def contains_obj(self, addr: int) -> Tuple[bool, int, + Optional[ArrayCacheEntry]]: obj_addr = self.find_obj(addr) if not obj_addr: @@ -243,7 +245,8 @@ def check(self, slabtype: int, nid: int) -> int: self.__error(": OFF_SLAB struct slab is in a wrong cache %s" % struct_slab_cache) - struct_slab_obj = struct_slab_slab.contains_obj(self.gdb_obj.address) + addr = int(self.gdb_obj.address) + struct_slab_obj = struct_slab_slab.contains_obj(addr) if not struct_slab_obj[0]: self.__error(": OFF_SLAB struct slab is not allocated") print(struct_slab_obj) @@ -358,8 +361,8 @@ def __get_nodelists(self) -> Iterable[Tuple[int, gdb.Value]]: yield (nid, node.dereference()) @staticmethod - def all_find_obj(addr: int) -> Union[None, Tuple[bool, int, - Union[gdb.Value, None]]]: + def all_find_obj(addr: int) -> Optional[Tuple[bool, int, + Optional[ArrayCacheEntry]]]: slab = slab_from_obj_addr(addr) if not slab: return None @@ -444,7 +447,7 @@ def __fill_all_array_caches(self) -> None: self.__fill_alien_caches(node, nid) - def get_array_caches(self) -> Dict[int, Dict]: + def get_array_caches(self) -> Dict[int, ArrayCacheEntry]: if not self.array_caches: self.__fill_all_array_caches() From d173aed7238d596981c17d48842438f523fa41be Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:21:13 -0400 Subject: [PATCH 261/367] crash.types.page: for_each_page yields gdb.Value --- crash/types/page.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/types/page.py b/crash/types/page.py index 74dfdab68ae..161de4df989 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -259,7 +259,7 @@ def page_from_gdb_obj(gdb_obj: gdb.Value) -> 'Page': pfn = (int(gdb_obj.address) - Page.vmemmap_base) // types.page_type.sizeof return Page(gdb_obj, pfn) -def for_each_page() -> Iterable['Page']: +def for_each_page() -> Iterable[gdb.Value]: # TODO works only on x86? max_pfn = int(gdb.lookup_global_symbol("max_pfn").value()) for pfn in range(max_pfn): From b4cd3825a29578f6fa2831232e87fc2df0219465 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:22:19 -0400 Subject: [PATCH 262/367] crash.types.page: use Symvals to get max_pfn --- crash/types/page.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/types/page.py b/crash/types/page.py index 161de4df989..474f6480e7e 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -18,7 +18,7 @@ types = Types(['unsigned long', 'struct page', 'enum pageflags', 'enum zone_type', 'struct mem_section']) -symvals = Symvals(['mem_section']) +symvals = Symvals(['mem_section', 'max_pfn']) PageType = TypeVar('PageType', bound='Page') @@ -261,7 +261,7 @@ def page_from_gdb_obj(gdb_obj: gdb.Value) -> 'Page': def for_each_page() -> Iterable[gdb.Value]: # TODO works only on x86? - max_pfn = int(gdb.lookup_global_symbol("max_pfn").value()) + max_pfn = int(symvals.max_pfn) for pfn in range(max_pfn): try: yield Page.pfn_to_page(pfn) From b35b3c275ebf805a65337b2b0e00d7f1b2329ac9 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:27:31 -0400 Subject: [PATCH 263/367] crash: fix typing for iterables An XFS AIL loop and the klist_for_each_entry loop were annotated as returning gdb.Value rather than an Iterable[gdb.Value]. Signed-off-by: Jeff Mahoney --- crash/subsystem/filesystem/xfs.py | 2 +- crash/types/klist.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index 0bbc7c6ddd5..3c032c693ef 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -636,7 +636,7 @@ def xfs_format_xfsbuf(buf: gdb.Value) -> str: f"size {buf['b_buffer_len']:d}, block number {buf['b_bn']:d}, " \ f"flags {bflags}, state {state}" -def xfs_for_each_ail_log_item_typed(mp: gdb.Value) -> gdb.Value: +def xfs_for_each_ail_log_item_typed(mp: gdb.Value) -> Iterable[gdb.Value]: """ Iterates over the XFS Active Item Log and returns each item, resolved to the specific type. diff --git a/crash/types/klist.py b/crash/types/klist.py index 3768a8bdbca..6283971a330 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -42,7 +42,7 @@ def klist_for_each(klist: gdb.Value) -> Iterable[gdb.Value]: yield node def klist_for_each_entry(klist: gdb.Value, gdbtype: gdb.Type, - member: str) -> gdb.Value: + member: str) -> Iterable[gdb.Value]: """ Iterate over a klist and yield each node's containing object From 5a899bbc87d97cf6ef66e8997820dd75fb0c8c1d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:28:42 -0400 Subject: [PATCH 264/367] crash.exceptions: ArgumentTypeError takes a value not a type We resolve the type in __init__ so we need to annotate it as Any. Signed-off-by: Jeff Mahoney --- crash/exceptions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/exceptions.py b/crash/exceptions.py index 8dca0082642..02429777330 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Type +from typing import Type, Any import gdb @@ -37,7 +37,7 @@ class ArgumentTypeError(InvalidArgumentError): """The provided object could not be converted to the expected type""" _fmt = "cannot convert argument `{}' of type {} to {}" - def __init__(self, name: str, val: Type, expected_type: Type) -> None: + def __init__(self, name: str, val: Any, expected_type: Type) -> None: msg = self._fmt.format(name, self.format_clsname(val.__class__), self.format_clsname(expected_type)) super().__init__(msg) From 18ed5725e1b20499cf0967ae18986cb41ced11f3 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:29:48 -0400 Subject: [PATCH 265/367] crash.exceptions: UnexpectedGDBTypeError accepts a gdb.Value We resolve the type in __init__ so annotate it as accepting gdb.Value. Signed-off-by: Jeff Mahoney --- crash/exceptions.py | 4 ++-- crash/types/list.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crash/exceptions.py b/crash/exceptions.py index 02429777330..d0c4d3b3cde 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -55,9 +55,9 @@ class UnexpectedGDBTypeBaseError(InvalidArgumentError): class UnexpectedGDBTypeError(UnexpectedGDBTypeBaseError): """The gdb.Type passed describes an inappropriate type for the operation""" _fmt = "expected gdb.Type `{}' to describe `{}' not `{}'" - def __init__(self, name: str, gdbtype: gdb.Type, + def __init__(self, name: str, val: gdb.Value, expected_type: gdb.Type) -> None: - msg = self._fmt.format(name, str(gdbtype), str(expected_type)) + msg = self._fmt.format(name, str(val.type), str(expected_type)) super().__init__(msg) class NotStructOrUnionError(UnexpectedGDBTypeBaseError): diff --git a/crash/types/list.py b/crash/types/list.py index 4d496ba25d4..2e65f67296f 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -53,8 +53,8 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, if list_head.type == types.list_head_type.pointer(): list_head = list_head.dereference() elif list_head.type != types.list_head_type: - raise UnexpectedGDBTypeError('list_head', types.list_head_type, - list_head.type) + raise UnexpectedGDBTypeError('list_head', list_head, + types.list_head_type) if list_head.type is not types.list_head_type: types.override('struct list_head', list_head.type) fast = None From 61e43e00092286d9461dad3efdb5d507b5675999 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:30:39 -0400 Subject: [PATCH 266/367] crash.infra.lookup: check_ready can return None Since it can return None it needs to be annotated as Optional. Signed-off-by: Jeff Mahoney --- crash/infra/lookup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index f9eb09bcaa0..a2046ac65be 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -90,7 +90,7 @@ def __init__(self, name: str, callback: Callback, self.connect_callback() - def check_ready(self) -> gdb.MinSymbol: + def check_ready(self) -> Optional[gdb.MinSymbol]: """ Returns the result of looking up the minimal symbol when a new object file is loaded. @@ -128,7 +128,7 @@ def __init__(self, name: str, callback: Callback, self.connect_callback() - def check_ready(self) -> gdb.Symbol: + def check_ready(self) -> Optional[gdb.Symbol]: """ Returns the result of looking up the symbol when a new object file is loaded. @@ -249,7 +249,7 @@ def resolve_type(name: str) -> Tuple[str, str, bool]: return (name, attrname, pointer) - def check_ready(self) -> Union[None, gdb.Type]: + def check_ready(self) -> Optional[gdb.Type]: try: return gdb.lookup_type(self.name, self.block) except gdb.error: From 637c2ca9abab91a3a8cb0a376cdd2a34748c36df Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:40:23 -0400 Subject: [PATCH 267/367] crash.cache.syscache: annotate jiffies as int set_jiffies and the jiffies property are annotated as accepting and returning gdb.Value but setup_jiffies uses an int. Signed-off-by: Jeff Mahoney --- crash/cache/syscache.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index b7adf47b81c..299f9c5ecf0 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -192,7 +192,7 @@ def __init__(self, config_cache: CrashConfigCache) -> None: self._loadavg = "" @property - def jiffies(self) -> gdb.Value: + def jiffies(self) -> int: v = self._jiffies_dv.get() return v @@ -244,7 +244,7 @@ def _get_loadavg_values(self) -> List[float]: return metrics @classmethod - def set_jiffies(cls, value: gdb.Value) -> None: + def set_jiffies(cls, value: int) -> None: cls._jiffies_dv.value = None cls._jiffies_dv.callback(value) cls._reset_uptime = True @@ -261,15 +261,17 @@ def setup_jiffies(cls, symbol: gdb.Symbol) -> bool: return False cls._adjust_jiffies = True else: - jiffies = int(gdb.lookup_global_symbol('jiffies').value()) + jiffies_sym = gdb.lookup_global_symbol('jiffies') + if not jiffies_sym: + return False + jiffies = int(jiffies_sym.value()) cls._adjust_jiffies = False cls.set_jiffies(jiffies) return True - - def _adjusted_jiffies(self) -> gdb.Value: + def _adjusted_jiffies(self) -> int: if self._adjust_jiffies: return self.jiffies -(int(0x100000000) - 300 * self.hz) return self.jiffies From f3f8faa51b669f1656a243a5e977b936df5f00fb Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:55:18 -0400 Subject: [PATCH 268/367] crash.kernel: handle objfile.build_id being None Objfile.build_id can be None but we don't handle that case. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index ef0acfa0eb8..29f9b53cb0e 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -148,9 +148,14 @@ def __init__(self, roots: PathSpecifier = None, self._setup_module_debuginfo_path(module_debuginfo_path, verbose) # We need separate debuginfo. Let's go find it. + path_list = [] + build_id_path = self.build_id_path(obj) + if build_id_path: + path_list.append(build_id_path) + path_list += self.vmlinux_debuginfo if not obj.has_symbols(): print("Loading debug symbols for vmlinux") - for path in [self.build_id_path(obj)] + self.vmlinux_debuginfo: + for path in path_list: try: obj.add_separate_debug_file(path) if obj.has_symbols(): @@ -613,7 +618,7 @@ def _find_module_debuginfo_file(self, name: str, path: str) -> str: return self._get_file_path_from_tree_search(path, name, regex) @staticmethod - def build_id_path(objfile: gdb.Objfile) -> str: + def build_id_path(objfile: gdb.Objfile) -> Optional[str]: """ Returns the relative path for debuginfo using the objfile's build-id. @@ -621,6 +626,8 @@ def build_id_path(objfile: gdb.Objfile) -> str: objfile: The objfile for which to return the path """ build_id = objfile.build_id + if build_id is None: + return None return ".build_id/{}/{}.debug".format(build_id[0:2], build_id[2:]) def _try_load_debuginfo(self, objfile: gdb.Objfile, @@ -651,9 +658,10 @@ def _load_module_debuginfo(self, objfile: gdb.Objfile, build_id_path = self.build_id_path(objfile) for path in self.module_debuginfo_path: - filepath = "{}/{}".format(path, build_id_path) - if self._try_load_debuginfo(objfile, filepath, verbose): - break + if build_id_path: + filepath = "{}/{}".format(path, build_id_path) + if self._try_load_debuginfo(objfile, filepath, verbose): + break try: filepath = self._find_module_debuginfo_file(filename, path) From fee65534c12f6777aa891f4f3ce3a7d326275e3d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:59:30 -0400 Subject: [PATCH 269/367] crash.kernel: handle missing minimal symbols when resolving version Signed-off-by: Jeff Mahoney --- crash/kernel.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index 29f9b53cb0e..a8282b8a894 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -323,9 +323,13 @@ def _setup_module_debuginfo_path(self, module_debuginfo_path: PathSpecifier = No # When working without a symbol table, we still need to be able # to resolve version information. def _get_minsymbol_as_string(self, name: str) -> str: - sym = gdb.lookup_minimal_symbol(name).value() + sym = gdb.lookup_minimal_symbol(name) + if sym is None: + raise MissingSymbolError(name) - return sym.address.cast(self.types.char_p_type).string() + val = sym.value() + + return val.address.cast(self.types.char_p_type).string() def extract_version(self) -> str: """ From be0084e292f3d7288fa93686f9bb9ac262a3625c Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 11:56:46 -0400 Subject: [PATCH 270/367] crash.commands.help: fix typing for help Handle __doc__ == None and properly annotate the command dict as crash.Command instead of gdb.Command. Signed-off-by: Jeff Mahoney --- crash/commands/__init__.py | 2 +- crash/commands/help.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 552a1846844..5859bf8ef52 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -129,7 +129,7 @@ class Command(gdb.Command): :class:`.ArgumentParser`. """ - _commands: Dict[str, gdb.Command] = dict() + _commands: Dict[str, 'Command'] = dict() def __init__(self, name: str, parser: ArgumentParser = None) -> None: """ """ diff --git a/crash/commands/help.py b/crash/commands/help.py index fe0d311461d..3fee10a29b1 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -34,7 +34,10 @@ def execute(self, args: argparse.Namespace) -> None: if not args.args: print("Available commands:") for cmd in sorted(self._commands): - summary = self._commands[cmd].__doc__.strip() + summary = None + doc = self._commands[cmd].__doc__ + if doc: + summary = doc.strip() if not summary: summary = "no help text provided" print("{:<15} - {}".format(cmd, summary)) From a28ef65f57845bb3eea41bfccaa9943512071c7a Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Mon, 24 Jun 2019 13:10:21 -0400 Subject: [PATCH 271/367] crash: rework how gdb.Target.fetch_registers is set up and invoked --- crash/arch/__init__.py | 68 +++++++++++++++++++++------- crash/arch/ppc64.py | 14 +++--- crash/arch/x86_64.py | 100 +++++++++++++++++++++-------------------- crash/kernel.py | 19 -------- kdump/target.py | 15 ++++++- 5 files changed, 123 insertions(+), 93 deletions(-) diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index 3b1426d7b3b..751e8a1fc8c 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -3,33 +3,71 @@ from typing import List, Iterator, Any, Optional, Type +import crash +import kdump.target + import gdb from gdb.FrameDecorator import FrameDecorator +class FetchRegistersCallback: + """ + The base class from which to implement the fetch_registers callback. + + The architecture code must implement the :meth:`fetch_active` and + :meth:`fetch_scheduled` methods. + """ + def fetch_active(self, thread: gdb.InferiorThread, register: int) -> None: + raise NotImplementedError("Target has no fetch_active callback") + + def fetch_scheduled(self, thread: gdb.InferiorThread, + register: int) -> None: + raise NotImplementedError("Target has no fetch_scheduled callback") + + def __call__(self, thread: gdb.InferiorThread, + register: gdb.Register) -> None: + if register is None: + regnum = -1 + else: + regnum = register.regnum + + if thread.info.active: + return self.fetch_active(thread, regnum) + + return self.fetch_scheduled(thread, regnum) + class CrashArchitecture: ident = "base-class" aliases: List[str] = list() - def __init__(self) -> None: - pass - def fetch_register_active(self, thread: gdb.InferiorThread, - register: int) -> None: - raise NotImplementedError("setup_thread_active not implemented") + _fetch_registers: Type[FetchRegistersCallback] - def fetch_register_scheduled(self, thread: gdb.InferiorThread, - register: int) -> None: - raise NotImplementedError("setup_thread_scheduled not implemented") + def __init__(self) -> None: + target = gdb.current_target() + if not isinstance(target, kdump.target.Target): + raise ValueError("target is not kdumpfile") + try: + target.set_fetch_registers(self._fetch_registers()) + except AttributeError: + raise NotImplementedError("No fetch_registers callback defined") + + @classmethod + def set_fetch_registers(cls, + callback: Type[FetchRegistersCallback]) -> None: + """ + Set a fetch_regisers callback for the Target to use. + + Args: + callback: A Callable that accepts a :obj:`gdb.InferiorThread` and + :obj:`gdb.Register` and populates the requested registers for + the specified thread. A register with the seemingly invalid + register number of -1 is a request to populate all registers. + """ + cls._fetch_registers = callback def setup_thread_info(self, thread: gdb.InferiorThread) -> None: raise NotImplementedError("setup_thread_info not implemented") - def fetch_register(self, thread: gdb.InferiorThread, register: int) -> None: - if thread.info.active: - self.fetch_register_active(thread, register) - else: - self.fetch_register_scheduled(thread, register) - - def get_stack_pointer(self, thread_struct: gdb.Value) -> gdb.Value: + def get_stack_pointer(self, thread_struct: gdb.Value) -> int: raise NotImplementedError("get_stack_pointer is not implemented") # This keeps stack traces from continuing into userspace and causing problems. diff --git a/crash/arch/ppc64.py b/crash/arch/ppc64.py index 98590191f2a..92b68922b3c 100644 --- a/crash/arch/ppc64.py +++ b/crash/arch/ppc64.py @@ -2,13 +2,19 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch +from crash.arch import FetchRegistersCallback import gdb +class FR_Placeholder(FetchRegistersCallback): # pylint: disable=abstract-method + pass + class Powerpc64Architecture(CrashArchitecture): ident = "powerpc:common64" aliases = ["ppc64", "elf64-powerpc"] + _fetch_registers = FR_Placeholder + def __init__(self) -> None: super(Powerpc64Architecture, self).__init__() # Stop stack traces with addresses below this @@ -22,12 +28,4 @@ def setup_thread_info(self, thread: gdb.InferiorThread) -> None: def get_stack_pointer(cls, thread_struct: gdb.Value) -> gdb.Value: return thread_struct['ksp'] - def fetch_register_active(self, thread: gdb.InferiorThread, - register: int) -> None: - raise NotImplementedError("ppc64 support does not cover threads yet") - - def fetch_register_scheduled(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: - raise NotImplementedError("ppc64 support does not cover threads yet") - register_arch(Powerpc64Architecture) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index ff93100357a..887ba40d863 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -2,44 +2,19 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch +from crash.arch import FetchRegistersCallback +from crash.util.symbols import Types, MinimalSymvals +from crash.util.symbols import TypeCallbacks, MinimalSymbolCallbacks import gdb -class x86_64Architecture(CrashArchitecture): - ident = "i386:x86-64" - aliases = ["x86_64"] +types = Types(['struct inactive_task_frame *', 'struct thread_info *', + 'unsigned long *']) +msymvals = MinimalSymvals(['thread_return']) - def __init__(self) -> None: - super(x86_64Architecture, self).__init__() - # PC for blocked threads - try: - inactive = gdb.lookup_type('struct inactive_task_frame') - self._fetch_register_scheduled = \ - self.fetch_register_scheduled_inactive - self.inactive_task_frame_type = inactive - except gdb.error: - try: - thread_return = gdb.lookup_minimal_symbol("thread_return") - self.thread_return = thread_return.value().address - self._fetch_register_scheduled = \ - self.fetch_register_scheduled_thread_return - except Exception: - raise RuntimeError("{} requires symbol 'thread_return'" - .format(self.__class__.__name__)) - self.ulong_type = gdb.lookup_type('unsigned long') - thread_info_type = gdb.lookup_type('struct thread_info') - self.thread_info_p_type = thread_info_type.pointer() - - # Stop stack traces with addresses below this - self.filter = KernelFrameFilter(0xffff000000000000) - - def setup_thread_info(self, thread: gdb.InferiorThread) -> None: - task = thread.info.task_struct - thread_info = task['stack'].cast(self.thread_info_p_type) - thread.info.set_thread_info(thread_info) - - def fetch_register_active(self, thread: gdb.InferiorThread, - register: int) -> None: +# pylint: disable=abstract-method +class _FetchRegistersBase(FetchRegistersCallback): + def fetch_active(self, thread: gdb.InferiorThread, register: int) -> None: task = thread.info for reg in task.regs: if reg == "rip" and register not in (16, -1): @@ -49,19 +24,16 @@ def fetch_register_active(self, thread: gdb.InferiorThread, except KeyError: pass - def fetch_register_scheduled(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: - return self._fetch_register_scheduled(thread, register) - - def fetch_register_scheduled_inactive(self, thread: gdb.InferiorThread, - register: int) -> None: - ulong_type = self.ulong_type +# pylint: disable=abstract-method +class _FRC_inactive_task_frame(_FetchRegistersBase): + def fetch_scheduled(self, thread: gdb.InferiorThread, + register: int) -> None: task = thread.info.task_struct - rsp = task['thread']['sp'].cast(ulong_type.pointer()) + rsp = task['thread']['sp'].cast(types.unsigned_long_p_type) thread.registers['rsp'].value = rsp - frame = rsp.cast(self.inactive_task_frame_type.pointer()).dereference() + frame = rsp.cast(types.inactive_task_frame_p_type).dereference() # Only write rip when requested; It resets the frame cache if register in (16, -1): @@ -81,19 +53,19 @@ def fetch_register_scheduled_inactive(self, thread: gdb.InferiorThread, thread.info.stack_pointer = rsp thread.info.valid_stack = True - def fetch_register_scheduled_thread_return(self, thread: gdb.InferiorThread, - register: int) -> None: - ulong_type = self.ulong_type +class _FRC_thread_return(_FetchRegistersBase): + def __call__(self, thread: gdb.InferiorThread, + register: gdb.Register) -> None: task = thread.info.task_struct # Only write rip when requested; It resets the frame cache if register in (16, -1): - thread.registers['rip'].value = self.thread_return + thread.registers['rip'].value = msymvals.thread_return if register == 16: return - rsp = task['thread']['sp'].cast(ulong_type.pointer()) - rbp = rsp.dereference().cast(ulong_type.pointer()) + rsp = task['thread']['sp'].cast(types.unsigned_long_p_type) + rbp = rsp.dereference().cast(types.unsigned_long_p_type) rbx = (rbp - 1).dereference() r12 = (rbp - 2).dereference() r13 = (rbp - 3).dereference() @@ -120,8 +92,38 @@ def fetch_register_scheduled_thread_return(self, thread: gdb.InferiorThread, thread.info.stack_pointer = rsp thread.info.valid_stack = True +class x86_64Architecture(CrashArchitecture): + ident = "i386:x86-64" + aliases = ["x86_64"] + + def __init__(self) -> None: + super(x86_64Architecture, self).__init__() + + # Stop stack traces with addresses below this + self.filter = KernelFrameFilter(0xffff000000000000) + + def setup_thread_info(self, thread: gdb.InferiorThread) -> None: + task = thread.info.task_struct + thread_info = task['stack'].cast(types.thread_info_p_type) + thread.info.set_thread_info(thread_info) + + @classmethod + # pylint: disable=unused-argument + def setup_inactive_task_frame_handler(cls, inactive: gdb.Type) -> None: + cls.set_fetch_registers(_FRC_inactive_task_frame) + + @classmethod + # pylint: disable=unused-argument + def setup_thread_return_handler(cls, inactive: gdb.Type) -> None: + cls.set_fetch_registers(_FRC_thread_return) + @classmethod def get_stack_pointer(cls, thread_struct: gdb.Value) -> gdb.Value: return thread_struct['sp'] +type_cbs = TypeCallbacks([('struct inactive_task_frame', + x86_64Architecture.setup_inactive_task_frame_handler)]) +msymbol_cbs = MinimalSymbolCallbacks([('thread_return', + x86_64Architecture.setup_thread_return_handler)]) + register_arch(x86_64Architecture) diff --git a/crash/kernel.py b/crash/kernel.py index a8282b8a894..c7916cda041 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -180,7 +180,6 @@ def __init__(self, roots: PathSpecifier = None, self.target = gdb.current_target() self.vmcore = self.target.kdump - self.target.fetch_registers = self.fetch_registers self.crashing_thread = None def _setup_roots(self, roots: PathSpecifier = None, @@ -393,24 +392,6 @@ def extract_modinfo_from_module(self, modpath: str) -> Dict[str, str]: f.close() return d - def fetch_registers(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: - """ - Loads the value for a register (or registers if Register.regnum is - ``-1``) - - Meant to be used as a callback from gdb.Target. - - Args: - thread: The thread for which to load the registers - register: The register (or registers) to load. - """ - if register is None: - regnum = -1 - else: - regnum = register.regnum - self.arch.fetch_register(thread, regnum) - def _get_module_sections(self, module: gdb.Value) -> str: out = [] for (name, addr) in for_each_module_section(module): diff --git a/kdump/target.py b/kdump/target.py index a2b359df7a8..51930a2b579 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Tuple +from typing import Tuple, Callable import sys @@ -11,9 +11,14 @@ import gdb +TargetFetchRegisters = Callable[[gdb.InferiorThread, gdb.Register], None] + PTID = Tuple[int, int, int] class Target(gdb.Target): + + _fetch_registers: TargetFetchRegisters + def __init__(self, debug: bool = False) -> None: super().__init__() self.debug = debug @@ -107,9 +112,15 @@ def thread_alive(self, ptid: PTID) -> bool: def pid_to_str(self, ptid: PTID) -> str: return "pid {:d}".format(ptid[1]) + def set_fetch_registers(self, callback: TargetFetchRegisters) -> None: + self._fetch_registers = callback # type: ignore + def fetch_registers(self, thread: gdb.InferiorThread, register: gdb.Register) -> None: - pass + try: + return self._fetch_registers(thread, register) # type: ignore + except AttributeError: + raise NotImplementedError("Target did not define fetch_registers callback") def prepare_to_store(self, thread: gdb.InferiorThread) -> None: pass From a356960fdc225811e11b7e43c302c43a73311a94 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 10:16:39 -0400 Subject: [PATCH 272/367] crash: introduce crash.current_target() There are places where we want to access the current target but gdb.current_target() returns gdb.Target not kdump.target.Target. Add a helper to check and ensure it's a kdump.target.Target. Signed-off-by: Jeff Mahoney --- crash/__init__.py | 13 +++++++++++++ crash/addrxlat.py | 3 ++- crash/arch/__init__.py | 5 +---- crash/kernel.py | 3 ++- crash/types/node.py | 3 ++- crash/types/page.py | 3 ++- 6 files changed, 22 insertions(+), 8 deletions(-) diff --git a/crash/__init__.py b/crash/__init__.py index 9e72c13b9b3..a7d1c344683 100644 --- a/crash/__init__.py +++ b/crash/__init__.py @@ -1,2 +1,15 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb +import kdump.target + +def current_target() -> kdump.target.Target: + target = gdb.current_target() + if target is None: + raise ValueError("No current target") + + if not isinstance(target, kdump.target.Target): + raise ValueError(f"Current target {type(target)} is not supported") + + return target diff --git a/crash/addrxlat.py b/crash/addrxlat.py index 0f57ba36154..fa634cdfb5c 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -2,6 +2,7 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import addrxlat +import crash from crash.cache.syscache import utsname from crash.util import offsetof from crash.util.symbols import Types @@ -47,7 +48,7 @@ def cb_read64(self, faddr: addrxlat.FullAddress) -> int: class CrashAddressTranslation: def __init__(self) -> None: try: - target = gdb.current_target() + target = crash.current_target() self.context = target.kdump.get_addrxlat_ctx() self.system = target.kdump.get_addrxlat_sys() except AttributeError: diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index 751e8a1fc8c..01992649607 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -4,7 +4,6 @@ from typing import List, Iterator, Any, Optional, Type import crash -import kdump.target import gdb from gdb.FrameDecorator import FrameDecorator @@ -42,9 +41,7 @@ class CrashArchitecture: _fetch_registers: Type[FetchRegistersCallback] def __init__(self) -> None: - target = gdb.current_target() - if not isinstance(target, kdump.target.Target): - raise ValueError("target is not kdumpfile") + target = crash.current_target() try: target.set_fetch_registers(self._fetch_registers()) except AttributeError: diff --git a/crash/kernel.py b/crash/kernel.py index c7916cda041..83695e4c05f 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -10,6 +10,7 @@ from elftools.elf.elffile import ELFFile +import crash import crash.arch import crash.arch.x86_64 import crash.arch.ppc64 @@ -177,7 +178,7 @@ def __init__(self, roots: PathSpecifier = None, self.arch = archclass() - self.target = gdb.current_target() + self.target = crash.current_target() self.vmcore = self.target.kdump self.crashing_thread = None diff --git a/crash/types/node.py b/crash/types/node.py index e1d9118d7a9..401bc1efff8 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -6,6 +6,7 @@ from typing import Iterable, List, Type, TypeVar +import crash from crash.util.symbols import Symbols, Symvals, Types, SymbolCallbacks from crash.types.percpu import get_percpu_var from crash.types.bitmap import for_each_set_bit @@ -27,7 +28,7 @@ def numa_node_id(cpu: int) -> int: Returns: :obj:`int`: The NUMA node ID for the specified CPU. """ - if gdb.current_target().arch.name() == "powerpc:common64": + if crash.current_target().arch.name() == "powerpc:common64": return int(symvals.numa_cpu_lookup_table[cpu]) return int(get_percpu_var(symbols.numa_node, cpu)) diff --git a/crash/types/page.py b/crash/types/page.py index 474f6480e7e..744a4e5de17 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -5,6 +5,7 @@ from math import log, ceil +import crash from crash.util import find_member_variant from crash.util.symbols import Types, Symvals, TypeCallbacks from crash.util.symbols import SymbolCallbacks, MinimalSymbolCallbacks @@ -58,7 +59,7 @@ class Page: def setup_page_type(cls, gdbtype: gdb.Type) -> None: # TODO: should check config, but that failed to work on ppc64, hardcode # 64k for now - if gdb.current_target().arch.name() == "powerpc:common64": + if crash.current_target().arch.name() == "powerpc:common64": cls.PAGE_SHIFT = 16 # also a config cls.directmap_base = 0xc000000000000000 From 872df02b411e7a7a712c568569f04a73b1c4b092 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 13:12:50 -0400 Subject: [PATCH 273/367] crash.kernel: fix remaining typing issues --- crash/kernel.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crash/kernel.py b/crash/kernel.py index 83695e4c05f..516079667ba 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -138,6 +138,8 @@ def __init__(self, roots: PathSpecifier = None, self.findmap: Dict[str, Dict[Any, Any]] = dict() self.modules_order: Dict[str, Dict[str, str]] = dict() obj = gdb.objfiles()[0] + if not obj.filename: + raise RuntimeError("loaded objfile has no filename???") kernel = os.path.basename(obj.filename) self.kernel = kernel @@ -181,7 +183,7 @@ def __init__(self, roots: PathSpecifier = None, self.target = crash.current_target() self.vmcore = self.target.kdump - self.crashing_thread = None + self.crashing_thread: Optional[gdb.InferiorThread] = None def _setup_roots(self, roots: PathSpecifier = None, verbose: bool = False) -> None: @@ -637,6 +639,8 @@ def _load_module_debuginfo(self, objfile: gdb.Objfile, verbose: bool = False) -> None: if modpath is None: modpath = objfile.filename + if modpath is None: + raise RuntimeError("loaded objfile has no filename???") if ".gz" in modpath: modpath = modpath.replace(".gz", "") filename = "{}.debug".format(os.path.basename(modpath)) From d7c304950f6d0b3f84c760d93aaf9fd6f95aa6cd Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 13:28:07 -0400 Subject: [PATCH 274/367] crash.commands.lsmod: fix typing with get_percpu_var -> int mypy doesn't like reusing variables, so just call it an int from the beginning. Signed-off-by: Jeff Mahoney --- crash/commands/lsmod.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index d3ec5e24150..1185b70a5e2 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -60,13 +60,13 @@ def print_module_percpu(self, mod: gdb.Value, cpu: int = -1) -> None: return if cpu != -1: - addr = get_percpu_var(mod['percpu'], cpu) + addr = int(get_percpu_var(mod['percpu'], cpu)) tabs = "\t\t" else: tabs = "\t\t\t" size = int(mod['percpu_size']) - print("{:16s}\t{:#x}{}{:d}".format(mod['name'].string(), int(addr), + print("{:16s}\t{:#x}{}{:d}".format(mod['name'].string(), addr, tabs, size)) From 47a596115a0540164598aaf77193e498564356b9 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 13:29:47 -0400 Subject: [PATCH 275/367] crash.commands.kmem: fix typing in print_zones Mypy doesn't always convert from gdb.Value to int, so do it explicitly. Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 2bdb5d3e322..f09895422d4 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -166,8 +166,8 @@ def print_zones(self) -> None: zone_struct = zone.gdb_obj print("NODE: %d ZONE: %d ADDR: %x NAME: \"%s\"" % - (zone_struct["node"], zone.zid, zone_struct.address, - zone_struct["name"].string())) + (int(zone_struct["node"]), zone.zid, + int(zone_struct.address), zone_struct["name"].string())) if not zone.is_populated(): print(" [unpopulated]") From bef3e01134c6740debd2f4a1eef03c0a24b542d2 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 13:35:21 -0400 Subject: [PATCH 276/367] kdump.target: don't initialize kdumpfile member Initializing it to None marks it as Optional and then the static checker wants us to check it everywhere. Signed-off-by: Jeff Mahoney --- kdump/target.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kdump/target.py b/kdump/target.py index 51930a2b579..864f46f3e81 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -24,7 +24,7 @@ def __init__(self, debug: bool = False) -> None: self.debug = debug self.shortname = "kdumpfile" self.longname = "Use a Linux kernel kdump file as a target" - self.kdump: kdumpfile = None + self.kdump: kdumpfile self.base_offset = 0 self.register() From 5adc3ca004e314c6bfd8ebcc4c45f7d419f3975d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 13:39:18 -0400 Subject: [PATCH 277/367] crash.types.percpu: fix variable reuse confusing type checking Signed-off-by: Jeff Mahoney --- crash/types/percpu.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 12d159192f3..4431ff52077 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -261,19 +261,21 @@ def is_percpu_var(self, var: SymbolOrValue) -> bool: if isinstance(var, gdb.Symbol): var = var.value().address - var = int(var) - if self.is_static_percpu_var(var): + ivar = int(var) + if self.is_static_percpu_var(ivar): return True - if self.is_module_percpu_var(var): + if self.is_module_percpu_var(ivar): return True - if self._is_percpu_var_dynamic(var): + if self._is_percpu_var_dynamic(ivar): return True return False - def _resolve_percpu_var(self, var: SymbolOrValue) -> gdb.Value: - orig_var = var - if isinstance(var, (gdb.Symbol, gdb.MinSymbol)): - var = var.value() + def _resolve_percpu_var(self, symvar: SymbolOrValue) -> gdb.Value: + orig_var = symvar + if isinstance(symvar, gdb.Symbol): + var = symvar.value() + else: + var = symvar if not isinstance(var, gdb.Value): raise InvalidArgumentError("Argument must be gdb.Symbol or gdb.Value") @@ -296,7 +298,13 @@ def _resolve_percpu_var(self, var: SymbolOrValue) -> gdb.Value: return var - def _get_percpu_var(self, var: SymbolOrValue, cpu: int) -> gdb.Value: + def _get_percpu_var(self, symvar: SymbolOrValue, cpu: int) -> gdb.Value: + if isinstance(symvar, (gdb.Symbol, gdb.MinSymbol)): + var = symvar.value() + else: + var = symvar + if not isinstance(var, gdb.Value): + raise InvalidArgumentError("Argument must be gdb.Symbol or gdb.Value") if cpu < 0: raise ValueError("cpu must be >= 0") From 053ae614c2d450e518e242bca9043cdd8e80c440 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 13:39:55 -0400 Subject: [PATCH 278/367] crash.arch: remove Iterator type from KernelAddressIterator The Iterator can accept duck-typed objects, so specifying Frame doesn't work. Signed-off-by: Jeff Mahoney --- crash/arch/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index 01992649607..eb354716cd1 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -80,7 +80,7 @@ def filter(self, frame_iter: Iterator[Any]) -> Any: return KernelAddressIterator(frame_iter, self.address) class KernelAddressIterator: - def __init__(self, ii: Iterator[gdb.Frame], address: int) -> None: + def __init__(self, ii: Iterator, address: int) -> None: self.input_iterator = ii self.address = address From 413464252c5273137f9307a0a180303fadb55ece Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 14:31:20 -0400 Subject: [PATCH 279/367] crash.arch: get_stack_pointer should return int The base class in crash.arch returns an int but the arch-specific implementations return a value. Convert and return an int from the beginning. Signed-off-by: Jeff Mahoney --- crash/arch/ppc64.py | 4 ++-- crash/arch/x86_64.py | 4 ++-- crash/types/task.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crash/arch/ppc64.py b/crash/arch/ppc64.py index 92b68922b3c..5a9beb9809a 100644 --- a/crash/arch/ppc64.py +++ b/crash/arch/ppc64.py @@ -25,7 +25,7 @@ def setup_thread_info(self, thread: gdb.InferiorThread) -> None: thread.info.set_thread_info(task['thread_info'].address) @classmethod - def get_stack_pointer(cls, thread_struct: gdb.Value) -> gdb.Value: - return thread_struct['ksp'] + def get_stack_pointer(cls, thread_struct: gdb.Value) -> int: + return int(thread_struct['ksp']) register_arch(Powerpc64Architecture) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 887ba40d863..4cc9fa1ffe0 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -118,8 +118,8 @@ def setup_thread_return_handler(cls, inactive: gdb.Type) -> None: cls.set_fetch_registers(_FRC_thread_return) @classmethod - def get_stack_pointer(cls, thread_struct: gdb.Value) -> gdb.Value: - return thread_struct['sp'] + def get_stack_pointer(cls, thread_struct: gdb.Value) -> int: + return int(thread_struct['sp']) type_cbs = TypeCallbacks([('struct inactive_task_frame', x86_64Architecture.setup_inactive_task_frame_handler)]) diff --git a/crash/types/task.py b/crash/types/task.py index 9bc8a8bb9d3..8f14a4a35bd 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -520,7 +520,7 @@ def get_stack_pointer(self) -> int: except AttributeError: raise NotImplementedError("Architecture hasn't provided stack pointer callback") - return int(fn(self.task_struct['thread'])) + return fn(self.task_struct['thread']) def _get_rss_field(self) -> int: return int(self.task_struct['mm']['rss'].value()) From 61ba0238b31c50af8cd9ab986d80c64307dbb224 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Wed, 12 Jun 2019 14:15:26 -0400 Subject: [PATCH 280/367] tests: add stubs for gdb types In order to get full test coverage, we need to provide stubs for external modules. We don't use enough of elftools to bother with the stubs. libaddrxlat needs surgery in order for it to pass type checking. So for now, we just use the typing for gdb. This means that the static checker will now check the interface with GDB. Prior to this change, the interface was not checked at all. Signed-off-by: Jeff Mahoney --- tests/run-static-checks.sh | 2 + tests/stubs/_gdb.pyi | 690 ++++++++++++++++++++ tests/stubs/gdb/FrameDecorator.pyi | 40 ++ tests/stubs/gdb/FrameIterator.pyi | 12 + tests/stubs/gdb/FrameWrapper.pyi | 14 + tests/stubs/gdb/__init__.pyi | 41 ++ tests/stubs/gdb/backtrace.pyi | 8 + tests/stubs/gdb/command/__init__.pyi | 4 + tests/stubs/gdb/command/backtrace.pyi | 19 + tests/stubs/gdb/command/explore.pyi | 90 +++ tests/stubs/gdb/command/frame_filters.pyi | 48 ++ tests/stubs/gdb/command/ignore_errors.pyi | 10 + tests/stubs/gdb/command/pretty_printers.pyi | 36 + tests/stubs/gdb/command/prompt.pyi | 17 + tests/stubs/gdb/command/type_printers.pyi | 25 + tests/stubs/gdb/command/unwinders.pyi | 27 + tests/stubs/gdb/command/xmethods.pyi | 28 + tests/stubs/gdb/frames.pyi | 13 + tests/stubs/gdb/function/__init__.pyi | 4 + tests/stubs/gdb/function/as_string.pyi | 10 + tests/stubs/gdb/function/caller_is.pyi | 22 + tests/stubs/gdb/function/in_scope.pyi | 10 + tests/stubs/gdb/function/strfns.pyi | 22 + tests/stubs/gdb/printer/__init__.pyi | 4 + tests/stubs/gdb/printer/bound_registers.pyi | 13 + tests/stubs/gdb/printing.pyi | 48 ++ tests/stubs/gdb/prompt.pyi | 23 + tests/stubs/gdb/types.pyi | 25 + tests/stubs/gdb/unwinder.pyi | 13 + tests/stubs/gdb/xmethod.pyi | 43 ++ 30 files changed, 1361 insertions(+) create mode 100644 tests/stubs/_gdb.pyi create mode 100644 tests/stubs/gdb/FrameDecorator.pyi create mode 100644 tests/stubs/gdb/FrameIterator.pyi create mode 100644 tests/stubs/gdb/FrameWrapper.pyi create mode 100644 tests/stubs/gdb/__init__.pyi create mode 100644 tests/stubs/gdb/backtrace.pyi create mode 100644 tests/stubs/gdb/command/__init__.pyi create mode 100644 tests/stubs/gdb/command/backtrace.pyi create mode 100644 tests/stubs/gdb/command/explore.pyi create mode 100644 tests/stubs/gdb/command/frame_filters.pyi create mode 100644 tests/stubs/gdb/command/ignore_errors.pyi create mode 100644 tests/stubs/gdb/command/pretty_printers.pyi create mode 100644 tests/stubs/gdb/command/prompt.pyi create mode 100644 tests/stubs/gdb/command/type_printers.pyi create mode 100644 tests/stubs/gdb/command/unwinders.pyi create mode 100644 tests/stubs/gdb/command/xmethods.pyi create mode 100644 tests/stubs/gdb/frames.pyi create mode 100644 tests/stubs/gdb/function/__init__.pyi create mode 100644 tests/stubs/gdb/function/as_string.pyi create mode 100644 tests/stubs/gdb/function/caller_is.pyi create mode 100644 tests/stubs/gdb/function/in_scope.pyi create mode 100644 tests/stubs/gdb/function/strfns.pyi create mode 100644 tests/stubs/gdb/printer/__init__.pyi create mode 100644 tests/stubs/gdb/printer/bound_registers.pyi create mode 100644 tests/stubs/gdb/printing.pyi create mode 100644 tests/stubs/gdb/prompt.pyi create mode 100644 tests/stubs/gdb/types.pyi create mode 100644 tests/stubs/gdb/unwinder.pyi create mode 100644 tests/stubs/gdb/xmethod.pyi diff --git a/tests/run-static-checks.sh b/tests/run-static-checks.sh index 31b0758761b..8c14f3a9df6 100755 --- a/tests/run-static-checks.sh +++ b/tests/run-static-checks.sh @@ -12,4 +12,6 @@ fi set -e DIR=$(dirname "$0") +export MYPYPATH="$(realpath $DIR/stubs)" + python3 $DIR/run-mypy.py diff --git a/tests/stubs/_gdb.pyi b/tests/stubs/_gdb.pyi new file mode 100644 index 00000000000..5fafb9a9fd9 --- /dev/null +++ b/tests/stubs/_gdb.pyi @@ -0,0 +1,690 @@ +# Stfor _gdb (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any, Tuple, List, Optional, Dict, Iterator, Callable +from typing import Union, Iterable, Sequence, NewType +from typing import TypeVar, Generic + +Buffer = Any + +frame_type = NewType('frame_type', int) +bptype_type = NewType('bptype_type', int) + +ARCH_FRAME: frame_type +BP_ACCESS_WATCHPOINT: bptype_type +BP_BREAKPOINT: bptype_type +BP_HARDWARE_WATCHPOINT: bptype_type +BP_NONE: bptype_type +BP_READ_WATCHPOINT: bptype_type +BP_WATCHPOINT: bptype_type + +command_class_enum = NewType('command_class_enum', int) +COMMAND_BREAKPOINTS: command_class_enum +COMMAND_DATA: command_class_enum +COMMAND_FILES: command_class_enum +COMMAND_MAINTENANCE: command_class_enum +COMMAND_NONE: command_class_enum +COMMAND_OBSCURE: command_class_enum +COMMAND_RUNNING: command_class_enum +COMMAND_STACK: command_class_enum +COMMAND_STATUS: command_class_enum +COMMAND_SUPPORT: command_class_enum +COMMAND_TRACEPOINTS: command_class_enum +COMMAND_USER: command_class_enum + +command_completer_type = NewType('command_completer_type', int) +COMPLETE_COMMAND: command_completer_type +COMPLETE_EXPRESSION: command_completer_type +COMPLETE_FILENAME: command_completer_type +COMPLETE_LOCATION: command_completer_type +COMPLETE_NONE: command_completer_type +COMPLETE_SYMBOL: command_completer_type + + +DUMMY_FRAME: frame_type + +unwind_stop_reason = NewType('unwind_stop_reason', int) +FRAME_UNWIND_INNER_ID: unwind_stop_reason +FRAME_UNWIND_MEMORY_ERROR: unwind_stop_reason +FRAME_UNWIND_NO_REASON: unwind_stop_reason +FRAME_UNWIND_NO_SAVED_PC: unwind_stop_reason +FRAME_UNWIND_NULL_ID: unwind_stop_reason +FRAME_UNWIND_OUTERMOST: unwind_stop_reason +FRAME_UNWIND_SAME_ID: unwind_stop_reason +FRAME_UNWIND_UNAVAILABLE: unwind_stop_reason +HOST_CONFIG: str +INLINE_FRAME: frame_type + +minimal_symbol_type = NewType('minimal_symbol_type', int) +MINSYMBOL_TYPE_ABS: minimal_symbol_type +MINSYMBOL_TYPE_BSS: minimal_symbol_type +MINSYMBOL_TYPE_DATA: minimal_symbol_type +MINSYMBOL_TYPE_FILE_BSS: minimal_symbol_type +MINSYMBOL_TYPE_FILE_DATA: minimal_symbol_type +MINSYMBOL_TYPE_FILE_TEXT: minimal_symbol_type +MINSYMBOL_TYPE_SLOT_GOT_PLT: minimal_symbol_type +MINSYMBOL_TYPE_SOLIB_TRAMPOLINE: minimal_symbol_type +MINSYMBOL_TYPE_TEXT: minimal_symbol_type +MINSYMBOL_TYPE_TEXT_GNU_IFUNC: minimal_symbol_type +MINSYMBOL_TYPE_UNKNOWN: minimal_symbol_type + +NORMAL_FRAME: frame_type + +var_types_enum = NewType('var_types_enum', int) +PARAM_AUTO_BOOLEAN: var_types_enum +PARAM_BOOLEAN: var_types_enum +PARAM_ENUM: var_types_enum +PARAM_FILENAME: var_types_enum +PARAM_INTEGER: var_types_enum +PARAM_OPTIONAL_FILENAME: var_types_enum +PARAM_STRING: var_types_enum +PARAM_STRING_NOESCAPE: var_types_enum +PARAM_UINTEGER: var_types_enum +PARAM_ZINTEGER: var_types_enum +PARAM_ZUINTEGER: var_types_enum +PARAM_ZUINTEGER_UNLIMITED: var_types_enum +SENTINEL_FRAME: frame_type +SIGTRAMP_FRAME: frame_type + +stream_type = NewType('stream_type', int) +STDERR: stream_type +STDLOG: stream_type +STDOUT: stream_type + +domain_enum_type = NewType('domain_enum_type', int) +address_class_type = NewType('address_class_type', int) +SYMBOL_COMMON_BLOCK_DOMAIN: domain_enum_type +SYMBOL_FUNCTIONS_DOMAIN: domain_enum_type +SYMBOL_LOC_ARG: address_class_type +SYMBOL_LOC_BLOCK: address_class_type +SYMBOL_LOC_COMMON_BLOCK: address_class_type +SYMBOL_LOC_COMPUTED: address_class_type +SYMBOL_LOC_CONST: address_class_type +SYMBOL_LOC_CONST_BYTES: address_class_type +SYMBOL_LOC_LABEL: address_class_type +SYMBOL_LOC_LOCAL: address_class_type +SYMBOL_LOC_OPTIMIZED_OUT: address_class_type +SYMBOL_LOC_REF_ARG: address_class_type +SYMBOL_LOC_REGISTER: address_class_type +SYMBOL_LOC_REGPARM_ADDR: address_class_type +SYMBOL_LOC_STATIC: address_class_type +SYMBOL_LOC_TYPEDEF: address_class_type +SYMBOL_LOC_UNDEF: address_class_type +SYMBOL_LOC_UNRESOLVED: address_class_type +SYMBOL_MODULE_DOMAIN: domain_enum_type +SYMBOL_STRUCT_DOMAIN: domain_enum_type +SYMBOL_TYPES_DOMAIN: domain_enum_type +SYMBOL_UNDEF_DOMAIN: domain_enum_type +SYMBOL_VARIABLES_DOMAIN: domain_enum_type +SYMBOL_VAR_DOMAIN: domain_enum_type +TAILCALL_FRAME: frame_type +TARGET_CONFIG: str + +type_code_enum = NewType('type_code_enum', int) +TYPE_CODE_ARRAY: type_code_enum +TYPE_CODE_BITSTRING: type_code_enum +TYPE_CODE_BOOL: type_code_enum +TYPE_CODE_CHAR: type_code_enum +TYPE_CODE_COMPLEX: type_code_enum +TYPE_CODE_DECFLOAT: type_code_enum +TYPE_CODE_ENUM: type_code_enum +TYPE_CODE_ERROR: type_code_enum +TYPE_CODE_FLAGS: type_code_enum +TYPE_CODE_FLT: type_code_enum +TYPE_CODE_FUNC: type_code_enum +TYPE_CODE_INT: type_code_enum +TYPE_CODE_INTERNAL_FUNCTION: type_code_enum +TYPE_CODE_MEMBERPTR: type_code_enum +TYPE_CODE_METHOD: type_code_enum +TYPE_CODE_METHODPTR: type_code_enum +TYPE_CODE_NAMESPACE: type_code_enum +TYPE_CODE_PTR: type_code_enum +TYPE_CODE_RANGE: type_code_enum +TYPE_CODE_REF: type_code_enum +TYPE_CODE_RVALUE_REF: type_code_enum +TYPE_CODE_SET: type_code_enum +TYPE_CODE_STRING: type_code_enum +TYPE_CODE_STRUCT: type_code_enum +TYPE_CODE_TYPEDEF: type_code_enum +TYPE_CODE_UNION: type_code_enum +TYPE_CODE_VOID: type_code_enum +VERSION: str + +hw_bp_type = NewType('hw_bp_type', int) +WP_ACCESS: hw_bp_type +WP_READ: hw_bp_type +WP_WRITE: hw_bp_type + +def breakpoints() -> Tuple[Breakpoint, ...]: ... +def cli() -> None: ... +def convenience_variable(name: str) -> Value: ... +def current_objfile() -> Optional[Objfile]: ... +def current_recording() -> Optional[Record]: ... +def current_target() -> Optional[Target]: ... +def decode_line(loc: str) -> Tuple[str, Optional[Tuple[Symtab_and_line, ...]]]: ... +def default_visualizer(value: Value) -> Any: ... +def execute(command: str, *args: bool, **kwargs: bool) -> Optional[str]: ... +def flush(stream: Optional[int] = ...) -> None: ... +def frame_stop_reason_string(reason: unwind_stop_reason) -> str: ... +def history(i: int) -> Value: ... +def inferiors() -> List[Inferior]: ... +def invalidate_cached_frames() -> None: ... +def lookup_global_symbol(name: str, domain: Optional[int] = ...) -> Optional[Symbol]: ... +def lookup_minimal_symbol(name: str, sfile: Optional[str] = ..., objfile: Optional[Objfile] = ...) -> Optional[MinSymbol]: ... +def lookup_objfile(name: str, by_build_id: Optional[bool] = ...) -> Objfile: ... +def lookup_symbol(name: str, block: Optional[Block] = ..., domain: Optional[int] = ...) -> Tuple[Optional[Symbol], bool]: ... +def lookup_type(name: str, block: Optional[Block] = ...) -> Type: ... +def newest_frame() -> Frame: ... +def parameter(var: str) -> Any: ... +def parse_and_eval(str: str) -> Value: ... +def post_event(event: Callable[[None], Any]) -> None: ... +def progspaces() -> List[Progspace]: ... +def rbreak(regex: str, **kwargs: Union[bool, int, Iterable[Symtab]]) -> List[Breakpoint]: ... +def selected_frame() -> Frame: ... +def selected_inferior() -> Inferior: ... +def selected_thread() -> InferiorThread: ... +def set_convenience_variable(name: str, value: Value) -> None: ... +def start_recording(method: Optional[str] = ..., format: Optional[str] = ...) -> Record: ... +def stop_recording() -> None: ... +def string_to_argv(string: str) -> List[str]: ... +def target_charset() -> str: ... +def target_wide_charset() -> str: ... +def write(text: str, stream: Optional[int] = ...) -> None: ... + +class Architecture: + def disassemble(self, start_pc: int, end_pc: Optional[int] = ..., count: Optional[int] = ...) -> List[Dict[str, int]]: ... + def name(self) -> str: ... + +class Block: + end: int = ... + function: Optional[Symbol] = ... + global_block: Block = ... + is_global: bool = ... + is_static: bool = ... + start: int = ... + static_block: Optional[Block] = ... + superblock: Optional[Block] = ... + symbols: Iterator[Symbol] = ... + def is_valid(self) -> bool: ... + def __iter__(self) -> BlockIterator: ... + +class BlockIterator: + def is_valid(self) -> bool: ... + def __iter__(self) -> BlockIterator: ... + def __next__(self) -> Symbol: ... + +class Breakpoint: + commands: Optional[str] = ... + condition: Optional[str] = ... + enabled: bool = ... + expression: Optional[str] = ... + hit_count: int = ... + ignore_count: int = ... + location: Optional[str] = ... + number: int = ... + pending: bool = ... + silent: bool = ... + task: Optional[int] = ... + temporary: bool = ... + thread: Optional[int] = ... + type: bptype_type = ... + visible: bool = ... + def __init__(self, *args: str, **kwargs: Union[str, bptype_type, hw_bp_type, bool, int]) -> None: ... + def delete(self) -> None: ... + def is_valid(self) -> bool: ... + def __delattr__(self, name: str) -> Any: ... + def __setattr__(self, name: str, value: Any) -> Any: ... + +class BreakpointEvent(StopEvent): ... + +class ClearObjFilesEvent(Event): ... + +class Command: + def __init__(self, name: str, command_class: command_class_enum, + completer_class: Optional[command_completer_type] = ..., + prefix: Optional[bool] = ...) -> None: ... + def dont_repeat(self) -> None: ... + +class ContinueEvent(ThreadEvent): ... + +class Event: ... + +EventType = TypeVar('EventType') + +class EventRegistry(Generic[EventType]): + def connect(self, func: Callable[[EventType], Any]) -> None: ... + def disconnect(self, func: Callable[[EventType], Any]) -> None: ... + +class ExitedEvent(Event): ... + +class Field: ... + +class FinishBreakpoint(Breakpoint): + return_value: Optional[Value] = ... + def __init__(self, frame: Optional[Frame] = ..., + internal: Optional[bool] = ...) -> None: ... + +class Frame: + def architecture(self) -> Architecture: ... + def block(self) -> Block: ... + def find_sal(self) -> Symtab_and_line: ... + def function(self) -> Symbol: ... + def is_valid(self) -> bool: ... + def name(self) -> str: ... + def newer(self) -> Frame: ... + def older(self) -> Frame: ... + def pc(self) -> int: ... + def read_register(self, register_name: str) -> Value: ... + def read_var(self, var: Symbol, block: Optional[Block] = ...) -> Value: ... + def select(self) -> None: ... + def type(self) -> int: ... + def unwind_stop_reason(self) -> int: ... + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + +class Function: + def __init__(self, name: str) -> None: ... + +class GdbError(Exception): ... + +IntValue = Union[Value, int] + +class Inferior: + executing: bool = ... + num: int = ... + pid: bool = ... + progspace: Progspace = ... + was_attached: bool = ... + def appeared(self, pid: int) -> None: ... + def architecture(self) -> Architecture: ... + def delete_thread(self, ptid: Tuple[int, int, int]) -> None: ... + def is_valid(self) -> bool: ... + def new_thread(self, ptid: Tuple[int, int, int], + priv: Optional[Any] = ...) -> InferiorThread: ... + def read_memory(self, address: IntValue, length: IntValue) -> Membuf: ... + def search_memory(self, address: IntValue, length: IntValue, + pattern: Buffer) -> int: ... + def thread_from_handle(self, handle: Buffer) -> InferiorThread: ... + def thread_from_thread_handle(self, handle: Buffer) -> InferiorThread: ... + def threads(self) -> List[InferiorThread]: ... + def write_memory(self, address: IntValue, buffer: Buffer, + length: Optional[int] = ...) -> None: ... + +class InferiorCallPostEvent(Event): ... + +class InferiorCallPreEvent(Event): ... + +class InferiorDeletedEvent(Event): ... + +class InferiorThread: + executing: bool = ... + global_num: int = ... + inferior: Inferior = ... + info: Any = ... + name: str = ... + num: int = ... + ptid: Tuple[int, int, int] = ... + registers: Dict[str, Register] = ... + def handle(self) -> bytes: ... + def is_exited(self) -> bool: ... + def is_running(self) -> bool: ... + def is_stopped(self) -> bool: ... + def is_valid(self) -> bool: ... + def switch(self) -> None: ... + +class LineTable: + def has_line(self, lineno: int) -> bool: ... + def is_valid(self) -> bool: ... + def line(self, lineno: int) -> Tuple[LineTableEntry, ...] : ... + def source_lines(self) -> List: ... + def __iter__(self) -> LineTableIterator: ... + +class LineTableEntry: + line: int = ... + pc: int = ... + +class LineTableIterator: + def is_valid(self) -> bool: ... + def __iter__(self) -> LineTableIterator: ... + def __next__(self) -> LineTableEntry: ... + +class Membuf: + def tobytes(self) -> bytes: ... + + +class MemoryChangedEvent(Event): ... + +class MemoryError(error): ... + +class MinSymbol: + filename: str = ... + linkage_name: str = ... + name: str = ... + print_name: str = ... + section: Optional[str] = ... + type: minimal_symbol_type = ... + def is_code(self) -> bool: ... + def is_data(self) -> bool: ... + def is_valid(self) -> bool: ... + def value(self) -> Value: ... + +class NewInferiorEvent(Event): ... + +class NewObjFileEvent(Event): ... + +class NewThreadEvent(ThreadEvent): ... + +class NotAvailableError(error): ... + +class Objfile: + architecture: Architecture = ... + build_id: Optional[str] = ... + filename: Optional[str] = ... + frame_filters: Dict[Any, Any] = ... + frame_unwinders: List[Any] = ... + owner: Optional[Objfile] = ... + pretty_printers: List[Any] = ... + progspace: Progspace = ... + type_printers: List[Any] = ... + username: Optional[str] = ... + xmethods: List[Any] = ... + def add_separate_debug_file(self, file_name: str) -> None: ... + def has_symbols(self) -> bool: ... + def is_valid(self) -> bool: ... + +class Parameter: + def __init__(self, name: str, cmdtype: int, parmclass: int, + enum_values: Optional[Sequence[str]] = ...) -> None: ... + def __delattr__(self, name: str) -> None: ... + def __setattr__(self, name: str, value: Any) -> None: ... + +class PendingFrame: + def create_unwind_info(self, frame_id: Any) -> UnwindInfo: ... + def read_register(self, register_id: Union[str, int]) -> Value: ... + + +class Progspace: + filename: Optional[str] = ... + frame_filters: Dict[Any, Any] = ... + frame_unwinders: List[Any] = ... + pretty_printers: List[Any] = ... + type_printers: List[Any] = ... + xmethods: List[Any] = ... + def __init__(self) -> None: ... + def block_for_pc(self, pc: int) -> Optional[Block]: ... + def find_pc_line(self, pc: int) -> Symtab_and_line: ... + def is_valid(self) -> bool: ... + def objfiles(self) -> List[Objfile]: ... + def solib_name(self, name: int) -> Optional[str]: ... + +class Register: + name: Optional[str] = ... + regnum: int = ... + size: int = ... + type: Type = ... + value: Union[Value, int] = ... + +class RegisterChangedEvent(Event): ... + +class SignalEvent(StopEvent): ... + +class StopEvent(ThreadEvent): ... + +class Symbol: + addr_class: int = ... + is_argument: bool = ... + is_constant: bool = ... + is_function: bool = ... + is_variable: bool = ... + line: int = ... + linkage_name: str = ... + name: str = ... + needs_frame: bool = ... + print_name: str = ... + section: Optional[str] = ... + symtab: Symtab = ... + type: Type = ... + def is_valid(self) -> bool: ... + def value(self, frame: Optional[Frame] = ...) -> Value: ... + +class Symtab: + filename: str = ... + objfile: Objfile = ... + producer: Optional[str] = ... + def fullname(self) -> str: ... + def global_block(self) -> Block: ... + def is_valid(self) -> bool: ... + def linetable(self) -> LineTable: ... + def static_block(self) -> Block: ... + +class Symtab_and_line: + last: Optional[int] = ... + line: int = ... + pc: int = ... + symtab: Symtab = ... + def is_valid(self) -> bool: ... + +class Target: + TARGET_OBJECT_AUXV: int = ... + TARGET_OBJECT_AVAILABLE_FEATURES: int = ... + TARGET_OBJECT_AVR: int = ... + TARGET_OBJECT_BTRACE: int = ... + TARGET_OBJECT_BTRACE_CONF: int = ... + TARGET_OBJECT_CODE_MEMORY: int = ... + TARGET_OBJECT_DARWIN_DYLD_INFO: int = ... + TARGET_OBJECT_EXEC_FILE: int = ... + TARGET_OBJECT_FDPIC: int = ... + TARGET_OBJECT_FLASH: int = ... + TARGET_OBJECT_FREEBSD_PS_STRINGS: int = ... + TARGET_OBJECT_FREEBSD_VMMAP: int = ... + TARGET_OBJECT_LIBRARIES: int = ... + TARGET_OBJECT_LIBRARIES_AIX: int = ... + TARGET_OBJECT_LIBRARIES_SVR4: int = ... + TARGET_OBJECT_MEMORY: int = ... + TARGET_OBJECT_MEMORY_MAP: int = ... + TARGET_OBJECT_OPENVMS_UIB: int = ... + TARGET_OBJECT_OSDATA: int = ... + TARGET_OBJECT_RAW_MEMORY: int = ... + TARGET_OBJECT_SIGNAL_INFO: int = ... + TARGET_OBJECT_SPU: int = ... + TARGET_OBJECT_STACK_MEMORY: int = ... + TARGET_OBJECT_STATIC_TRACE_DATA: int = ... + TARGET_OBJECT_THREADS: int = ... + TARGET_OBJECT_TRACEFRAME_INFO: int = ... + TARGET_OBJECT_UNWIND_TABLE: int = ... + TARGET_OBJECT_WCOOKIE: int = ... + arch: Architecture = ... + docstring: str = ... + longname: str = ... + name: str = ... + shortname: str = ... + stratum: int = ... + @classmethod + def __init__(self) -> None: ... + def register(self) -> Any: ... + def unregister(self) -> Any: ... + + def stacked_target(self) -> bool: ... + def open(self, argstring: str, from_tty: bool) -> None: ... + def close(self) -> None: ... + def info(self, thread: InferiorThread) -> str: ... + def xfer_partial(self, object: int, annex: str, readbuf: bytearray, + writebuf: bytearray, offset: int, len: int) -> int: ... + def extra_thread_info(self) -> str: ... + def update_thread_list(self) -> None: ... + def thread_alive(self, ptid: Tuple[int, int, int]) -> bool: ... + def pid_to_str(self, ptid: Tuple[int, int,int]) -> str: ... + def fetch_registers(self, thread: InferiorThread, + register: Register) -> None: ... + def prepare_to_store(self, thread: InferiorThread) -> None: ... + def store_registers(self, thread: InferiorThread, + register: Register) -> None: ... + def has_execution(self, ptid: Tuple[int, int, int]) -> bool: ... + +class TargetXferEOF(EOFError): ... + +class TargetXferUnavailable(LookupError): ... + +class ThreadEvent(Event): ... + +class Type: + alignof: int = ... + code: int = ... + name: Optional[str] = ... + sizeof: int = ... + tag: Optional[str] = ... + def array(self, low: int, high: Optional[int] = ...) -> Type: ... + def const(self) -> Type: ... + def fields(self) -> List: ... + def get(self, k: str, + default: Optional[Field] = ...) -> Optional[Field]: ... + def has_key(self, k: str) -> bool: ... + def items(self) -> List: ... + def iteritems(self) -> Iterator[Tuple[str, Field]]: ... + def iterkeys(self) -> Iterator[str]: ... + def itervalues(self) -> Iterator[Field]: ... + def keys(self) -> List: ... + def optimized_out(self) -> Value: ... + def pointer(self) -> Type: ... + def range(self) -> tuple: ... + def reference(self) -> Type: ... + def strip_typedefs(self) -> Type: ... + def target(self) -> Type: ... + def template_argument(self, arg: int, block: Optional[Block] = ...) -> Type: ... + def unqualified(self) -> Type: ... + def values(self) -> List: ... + def vector(self, low: int, high: Optional[int] = ...) -> Type: ... + def volatile(self) -> Type: ... + def __bool__(self) -> bool: ... + def __contains__(self, k: Any) -> bool: ... + def __eq__(self, other: Any) -> bool: ... + def __ge__(self, other: Any) -> bool: ... + def __getitem__(self, index: Any) -> Any: ... + def __gt__(self, other: Any) -> bool: ... + def __iter__(self) -> TypeIterator: ... + def __le__(self, other: Any) -> bool: ... + def __len__(self) -> int: ... + def __lt__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + +class TypeIterator: + def __iter__(self) -> Any: ... + def __next__(self) -> Any: ... + +class UnwindInfo: + def add_saved_register(self, reg: IntValue, value: Value) -> None: ... + +class Value: + address: Value = ... + dynamic_type: Type = ... + is_lazy: bool = ... + is_optimized_out: bool = ... + type: Type = ... + @classmethod + def __init__(self, val: Union[Value, Buffer], + type: Optional[Type]) -> None: ... + def cast(self, type: Type) -> Value: ... + def const_value(self) -> Value: ... + def dereference(self) -> Value: ... + def dynamic_cast(self, type: Type) -> Value: ... + def fetch_lazy(self) -> None: ... + def format_string(self, **kwargs: Union[bool, int, str]) -> str: ... + def lazy_string(self, encoding: Optional[str] = ..., length: Optional[int] = ...) -> LazyString: ... + def reference_value(self) -> Value: ... + def referenced_value(self) -> Value: ... + def reinterpret_cast(self, type: Type) -> Value: ... + def rvalue_reference_value(self) -> Value: ... + def string(self, encoding: Optional[str] = ..., errors: Optional[str] = ..., length: Optional[int] = ...) -> str: ... + def __abs__(self) -> Value: ... + def __add__(self, other: Any) -> Value: ... + def __and__(self, other: Any) -> Value: ... + def __bool__(self) -> bool: ... + def __call__(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any: ... + def __delitem__(self, index: Any) -> None: ... + def __eq__(self, other: Any) -> bool: ... + def __float__(self) -> float: ... + def __ge__(self, other: Any) -> bool: ... + def __getitem__(self, index: Any) -> Any: ... + def __gt__(self, other: Any) -> bool: ... + def __hash__(self) -> int: ... + def __index__(self) -> int: ... + def __int__(self) -> int: ... + def __invert__(self) -> Value: ... + def __le__(self, other: Any) -> bool: ... + def __len__(self) -> int: ... + def __lshift__(self, other: Any) -> Value: ... + def __lt__(self, other: Any) -> Value: ... + def __mod__(self, other: Any) -> Any: ... + def __mul__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> bool: ... + def __neg__(self) -> Value: ... + def __or__(self, other: Any) -> Value: ... + def __pos__(self) -> Any: ... + def __pow__(self, other: Any) -> Value: ... + def __radd__(self, other: Any) -> Any: ... + def __rand__(self, other: Any) -> Any: ... + def __rlshift__(self, other: Any) -> Any: ... + def __rmod__(self, other: Any) -> Any: ... + def __rmul__(self, other: Any) -> Any: ... + def __ror__(self, other: Any) -> Any: ... + def __rpow__(self, other: Any) -> Any: ... + def __rrshift__(self, other: Any) -> Any: ... + def __rshift__(self, other: Any) -> Any: ... + def __rsub__(self, other: Any) -> Any: ... + def __rtruediv__(self, other: Any) -> Any: ... + def __rxor__(self, other: Any) -> Any: ... + def __setitem__(self, index: Any, object: Any) -> Any: ... + def __sub__(self, other: Any) -> Value: ... + def __truediv__(self, other: Any) -> Value: ... + def __xor__(self, other: Any) -> Value: ... + +class error(RuntimeError): ... + +RecordPosition = Optional[Union[RecordInstruction, RecordGap]] + +class Record: + method: str = ... + format: str = ... + replay_position: RecordPosition = ... + instruction_history: Optional[List[RecordInstruction]] = ... + function_call_history: Optional[List[RecordFunctionSegment]] = ... + begin: RecordPosition = ... + end: RecordPosition = ... + def goto(self, instruction: Record) -> None: ... + +class RecordInstruction: + number: int = ... + sal: Symtab_and_line = ... + pc: int = ... + data: Buffer = ... + decoded: str = ... + size: int = ... + is_speculative: bool = ... + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + def __hash__(self) -> int: ... + +class RecordFunctionSegment: + number: int = ... + level: int = ... + symbol: Symbol = ... + instructions: List[RecordInstruction] = ... + up: Optional[RecordFunctionSegment] = ... + prev: Optional[RecordFunctionSegment] = ... + next: Optional[RecordFunctionSegment] = ... + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + def __hash__(self) -> int: ... + +class RecordGap: + number: int = ... + reason_code: int = ... + reason_string: str = ... + +class LazyString: + address: int = ... + encoding: str = ... + length: int = ... + type: Type = ... + def value(self) -> str: ... diff --git a/tests/stubs/gdb/FrameDecorator.pyi b/tests/stubs/gdb/FrameDecorator.pyi new file mode 100644 index 00000000000..9f1d55a9b4a --- /dev/null +++ b/tests/stubs/gdb/FrameDecorator.pyi @@ -0,0 +1,40 @@ +# Stubs for gdb.FrameDecorator (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any, Union, List, Optional, Iterable + +import gdb + +basestring = str + +FrameOrDecorator = Union[gdb.Frame, FrameDecorator] + +class FrameDecorator: + _base: FrameOrDecorator = ... + def __init__(self, base: FrameOrDecorator) -> None: ... + @staticmethod + def _is_limited_frame(frame: gdb.Frame) -> bool: ... + def elided(self) -> Optional[List[gdb.Frame]]: ... + def function(self) -> Union[str, int]: ... + def address(self) -> int: ... + def filename(self) -> str: ... + def frame_args(self) -> Optional[Iterable]: ... + def frame_locals(self) -> Optional[Iterable]: ... + def line(self) -> Optional[int]: ... + def inferior_frame(self) -> gdb.Frame: ... + +class SymValueWrapper: + sym: Any = ... + val: Any = ... + def __init__(self, symbol: gdb.Symbol, value: gdb.Value) -> None: ... + def value(self) -> gdb.Value: ... + def symbol(self) -> gdb.Symbol: ... + +class FrameVars: + frame: gdb.Frame = ... + symbol_class: gdb.address_class_type = ... + def __init__(self, frame: Any) -> None: ... + def fetch_b(self, sym: gdb.Symbol) -> bool: ... + def fetch_frame_locals(self) -> List[SymValueWrapper]: ... + def fetch_frame_args(self) -> List[SymValueWrapper]: ... diff --git a/tests/stubs/gdb/FrameIterator.pyi b/tests/stubs/gdb/FrameIterator.pyi new file mode 100644 index 00000000000..b87fa0aafa5 --- /dev/null +++ b/tests/stubs/gdb/FrameIterator.pyi @@ -0,0 +1,12 @@ +# Stubs for gdb.FrameIterator (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any + +class FrameIterator: + frame: Any = ... + def __init__(self, frame_obj: Any) -> None: ... + def __iter__(self): ... + def next(self): ... + def __next__(self): ... diff --git a/tests/stubs/gdb/FrameWrapper.pyi b/tests/stubs/gdb/FrameWrapper.pyi new file mode 100644 index 00000000000..187f8345039 --- /dev/null +++ b/tests/stubs/gdb/FrameWrapper.pyi @@ -0,0 +1,14 @@ +# Stubs for gdb.FrameWrapper (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any + +class FrameWrapper: + frame: Any = ... + def __init__(self, frame: Any) -> None: ... + def write_symbol(self, stream: Any, sym: Any, block: Any) -> None: ... + def print_frame_locals(self, stream: Any, func: Any) -> None: ... + def print_frame_args(self, stream: Any, func: Any) -> None: ... + def describe(self, stream: Any, full: Any) -> None: ... + def __getattr__(self, name: Any): ... diff --git a/tests/stubs/gdb/__init__.pyi b/tests/stubs/gdb/__init__.pyi new file mode 100644 index 00000000000..c59ff9c1aae --- /dev/null +++ b/tests/stubs/gdb/__init__.pyi @@ -0,0 +1,41 @@ +# Stubs for gdb (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from _gdb import * +import gdb.events +from typing import Any, Optional, List + +class _GdbFile: + encoding: str = ... + errors: str = ... + def close(self) -> None: ... + def isatty(self) -> bool: ... + def writelines(self, iterable: Any) -> None: ... + def flush(self) -> None: ... + +class GdbOutputFile(_GdbFile): + def write(self, s: Any) -> None: ... + +class GdbOutputErrorFile(_GdbFile): + def write(self, s: Any) -> None: ... + +prompt_hook: Any +pretty_printers: Any +type_printers: Any +xmethods: Any +frame_filters: Any +frame_unwinders: Any + +def execute_unwinders(pending_frame: PendingFrame) -> Optional[UnwindInfo]: ... + +PYTHONDIR: str +packages: List[str] + +def auto_load_packages() -> None: ... +def GdbSetPythonDirectory(dir: Any) -> None: ... +def current_progspace() -> Progspace: ... +def objfiles() -> List[Objfile]: ... +def solib_name(int: Any) -> str: ... +def block_for_pc(pc: int) -> Optional[Block]: ... +def find_pc_line(pc: int) -> Symtab_and_line: ... diff --git a/tests/stubs/gdb/backtrace.pyi b/tests/stubs/gdb/backtrace.pyi new file mode 100644 index 00000000000..e7013d83f5b --- /dev/null +++ b/tests/stubs/gdb/backtrace.pyi @@ -0,0 +1,8 @@ +# Stubs for gdb.backtrace (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any + +def push_frame_filter(constructor: Any): ... +def create_frame_filter(iter: Any): ... diff --git a/tests/stubs/gdb/command/__init__.pyi b/tests/stubs/gdb/command/__init__.pyi new file mode 100644 index 00000000000..121bf2ba6db --- /dev/null +++ b/tests/stubs/gdb/command/__init__.pyi @@ -0,0 +1,4 @@ +# Stubs for gdb.command (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + diff --git a/tests/stubs/gdb/command/backtrace.pyi b/tests/stubs/gdb/command/backtrace.pyi new file mode 100644 index 00000000000..e32facf326b --- /dev/null +++ b/tests/stubs/gdb/command/backtrace.pyi @@ -0,0 +1,19 @@ +# Stubs for gdb.command.backtrace (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb.backtrace +from typing import Any + +class ReverseBacktraceParameter(gdb.Parameter): + set_doc: str = ... + show_doc: str = ... + value: bool = ... + def __init__(self) -> None: ... + +class FilteringBacktrace(gdb.Command): + reverse: Any = ... + def __init__(self) -> None: ... + def reverse_iter(self, iter: Any): ... + def final_n(self, iter: Any, x: Any): ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... diff --git a/tests/stubs/gdb/command/explore.pyi b/tests/stubs/gdb/command/explore.pyi new file mode 100644 index 00000000000..0711a3e0907 --- /dev/null +++ b/tests/stubs/gdb/command/explore.pyi @@ -0,0 +1,90 @@ +# Stubs for gdb.command.explore (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb +from typing import Any + +raw_input = input + +class Explorer: + type_code_to_explorer_map: Any = ... + _SCALAR_TYPE_LIST: Any = ... + @staticmethod + def guard_expr(expr: Any): ... + @staticmethod + def explore_expr(expr: Any, value: Any, is_child: Any) -> None: ... + @staticmethod + def explore_type(name: Any, datatype: Any, is_child: Any) -> None: ... + @staticmethod + def init_env() -> None: ... + @staticmethod + def is_scalar_type(type: Any): ... + @staticmethod + def return_to_parent_value() -> None: ... + @staticmethod + def return_to_parent_value_prompt() -> None: ... + @staticmethod + def return_to_enclosing_type() -> None: ... + @staticmethod + def return_to_enclosing_type_prompt() -> None: ... + +class ScalarExplorer: + @staticmethod + def explore_expr(expr: Any, value: Any, is_child: Any): ... + @staticmethod + def explore_type(name: Any, datatype: Any, is_child: Any): ... + +class PointerExplorer: + @staticmethod + def explore_expr(expr: Any, value: Any, is_child: Any): ... + @staticmethod + def explore_type(name: Any, datatype: Any, is_child: Any): ... + +class ReferenceExplorer: + @staticmethod + def explore_expr(expr: Any, value: Any, is_child: Any): ... + @staticmethod + def explore_type(name: Any, datatype: Any, is_child: Any): ... + +class ArrayExplorer: + @staticmethod + def explore_expr(expr: Any, value: Any, is_child: Any): ... + @staticmethod + def explore_type(name: Any, datatype: Any, is_child: Any): ... + +class CompoundExplorer: + @staticmethod + def _print_fields(print_list: Any) -> None: ... + @staticmethod + def _get_real_field_count(fields: Any): ... + @staticmethod + def explore_expr(expr: Any, value: Any, is_child: Any): ... + @staticmethod + def explore_type(name: Any, datatype: Any, is_child: Any): ... + +class TypedefExplorer: + @staticmethod + def explore_expr(expr: Any, value: Any, is_child: Any): ... + @staticmethod + def explore_type(name: Any, datatype: Any, is_child: Any): ... + +class ExploreUtils: + @staticmethod + def check_args(name: Any, arg_str: Any): ... + @staticmethod + def get_type_from_str(type_str: Any): ... + @staticmethod + def get_value_from_str(value_str: Any): ... + +class ExploreCommand(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg_str: Any, from_tty: Any) -> None: ... + +class ExploreValueCommand(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg_str: Any, from_tty: Any) -> None: ... + +class ExploreTypeCommand(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg_str: Any, from_tty: Any) -> None: ... diff --git a/tests/stubs/gdb/command/frame_filters.pyi b/tests/stubs/gdb/command/frame_filters.pyi new file mode 100644 index 00000000000..8c5168d9827 --- /dev/null +++ b/tests/stubs/gdb/command/frame_filters.pyi @@ -0,0 +1,48 @@ +# Stubs for gdb.command.frame_filters (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb.frames +from typing import Any + +class SetFilterPrefixCmd(gdb.Command): + def __init__(self) -> None: ... + +class ShowFilterPrefixCmd(gdb.Command): + def __init__(self) -> None: ... + +class InfoFrameFilter(gdb.Command): + def __init__(self) -> None: ... + @staticmethod + def enabled_string(state: Any): ... + def print_list(self, title: Any, frame_filters: Any, blank_line: Any): ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +def _enable_parse_arg(cmd_name: Any, arg: Any): ... +def _do_enable_frame_filter(command_tuple: Any, flag: Any) -> None: ... +def _complete_frame_filter_list(text: Any, word: Any, all_flag: Any): ... +def _complete_frame_filter_name(word: Any, printer_dict: Any): ... + +class EnableFrameFilter(gdb.Command): + def __init__(self) -> None: ... + def complete(self, text: Any, word: Any): ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +class DisableFrameFilter(gdb.Command): + def __init__(self) -> None: ... + def complete(self, text: Any, word: Any): ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +class SetFrameFilterPriority(gdb.Command): + def __init__(self) -> None: ... + def _parse_pri_arg(self, arg: Any): ... + def _set_filter_priority(self, command_tuple: Any) -> None: ... + def complete(self, text: Any, word: Any): ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +class ShowFrameFilterPriority(gdb.Command): + def __init__(self) -> None: ... + def _parse_pri_arg(self, arg: Any): ... + def get_filter_priority(self, frame_filters: Any, name: Any): ... + def complete(self, text: Any, word: Any): ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... diff --git a/tests/stubs/gdb/command/ignore_errors.pyi b/tests/stubs/gdb/command/ignore_errors.pyi new file mode 100644 index 00000000000..a4f244b662f --- /dev/null +++ b/tests/stubs/gdb/command/ignore_errors.pyi @@ -0,0 +1,10 @@ +# Stubs for gdb.command.ignore_errors (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb +from typing import Any + +class IgnoreErrorsCommand(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... diff --git a/tests/stubs/gdb/command/pretty_printers.pyi b/tests/stubs/gdb/command/pretty_printers.pyi new file mode 100644 index 00000000000..09af91d2cee --- /dev/null +++ b/tests/stubs/gdb/command/pretty_printers.pyi @@ -0,0 +1,36 @@ +# Stubs for gdb.command.pretty_printers (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb +from typing import Any + +def parse_printer_regexps(arg: Any): ... +def printer_enabled_p(printer: Any): ... + +class InfoPrettyPrinter(gdb.Command): + def __init__(self) -> None: ... + @staticmethod + def enabled_string(printer: Any): ... + @staticmethod + def printer_name(printer: Any): ... + def list_pretty_printers(self, pretty_printers: Any, name_re: Any, subname_re: Any) -> None: ... + def invoke1(self, title: Any, printer_list: Any, obj_name_to_match: Any, object_re: Any, name_re: Any, subname_re: Any) -> None: ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +def count_enabled_printers(pretty_printers: Any): ... +def count_all_enabled_printers(): ... +def pluralize(text: Any, n: Any, suffix: str = ...): ... +def show_pretty_printer_enabled_summary() -> None: ... +def do_enable_pretty_printer_1(pretty_printers: Any, name_re: Any, subname_re: Any, flag: Any): ... +def do_enable_pretty_printer(arg: Any, flag: Any) -> None: ... + +class EnablePrettyPrinter(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +class DisablePrettyPrinter(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +def register_pretty_printer_commands() -> None: ... diff --git a/tests/stubs/gdb/command/prompt.pyi b/tests/stubs/gdb/command/prompt.pyi new file mode 100644 index 00000000000..b330afd9d05 --- /dev/null +++ b/tests/stubs/gdb/command/prompt.pyi @@ -0,0 +1,17 @@ +# Stubs for gdb.command.prompt (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb.prompt +from typing import Any + +class _ExtendedPrompt(gdb.Parameter): + __doc__: Any = ... + set_doc: str = ... + show_doc: str = ... + value: str = ... + hook_set: bool = ... + def __init__(self) -> None: ... + def get_show_string(self, pvalue: Any): ... + def get_set_string(self): ... + def before_prompt_hook(self, current: Any): ... diff --git a/tests/stubs/gdb/command/type_printers.pyi b/tests/stubs/gdb/command/type_printers.pyi new file mode 100644 index 00000000000..c729cbb6b0d --- /dev/null +++ b/tests/stubs/gdb/command/type_printers.pyi @@ -0,0 +1,25 @@ +# Stubs for gdb.command.type_printers (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb +from typing import Any + +class InfoTypePrinter(gdb.Command): + def __init__(self) -> None: ... + def list_type_printers(self, type_printers: Any): ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +class _EnableOrDisableCommand(gdb.Command): + setting: Any = ... + def __init__(self, setting: Any, name: Any) -> None: ... + def set_some(self, name: Any, printers: Any): ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + def add_some(self, result: Any, word: Any, printers: Any) -> None: ... + def complete(self, text: Any, word: Any): ... + +class EnableTypePrinter(_EnableOrDisableCommand): + def __init__(self) -> None: ... + +class DisableTypePrinter(_EnableOrDisableCommand): + def __init__(self) -> None: ... diff --git a/tests/stubs/gdb/command/unwinders.pyi b/tests/stubs/gdb/command/unwinders.pyi new file mode 100644 index 00000000000..150f81739fd --- /dev/null +++ b/tests/stubs/gdb/command/unwinders.pyi @@ -0,0 +1,27 @@ +# Stubs for gdb.command.unwinders (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb +from typing import Any + +def validate_regexp(exp: Any, idstring: Any): ... +def parse_unwinder_command_args(arg: Any): ... + +class InfoUnwinder(gdb.Command): + def __init__(self) -> None: ... + def list_unwinders(self, title: Any, unwinders: Any, name_re: Any) -> None: ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +def do_enable_unwinder1(unwinders: Any, name_re: Any, flag: Any): ... +def do_enable_unwinder(arg: Any, flag: Any) -> None: ... + +class EnableUnwinder(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +class DisableUnwinder(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +def register_unwinder_commands() -> None: ... diff --git a/tests/stubs/gdb/command/xmethods.pyi b/tests/stubs/gdb/command/xmethods.pyi new file mode 100644 index 00000000000..e96d7faf69d --- /dev/null +++ b/tests/stubs/gdb/command/xmethods.pyi @@ -0,0 +1,28 @@ +# Stubs for gdb.command.xmethods (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb +from typing import Any + +def validate_xm_regexp(part_name: Any, regexp: Any): ... +def parse_xm_command_args(arg: Any): ... +def get_global_method_matchers(locus_re: Any, matcher_re: Any): ... +def get_method_matchers_in_loci(loci: Any, locus_re: Any, matcher_re: Any): ... +def print_xm_info(xm_dict: Any, name_re: Any): ... +def set_xm_status1(xm_dict: Any, name_re: Any, status: Any) -> None: ... +def set_xm_status(arg: Any, status: Any) -> None: ... + +class InfoXMethod(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +class EnableXMethod(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +class DisableXMethod(gdb.Command): + def __init__(self) -> None: ... + def invoke(self, arg: Any, from_tty: Any) -> None: ... + +def register_xmethod_commands() -> None: ... diff --git a/tests/stubs/gdb/frames.pyi b/tests/stubs/gdb/frames.pyi new file mode 100644 index 00000000000..3907e037c3e --- /dev/null +++ b/tests/stubs/gdb/frames.pyi @@ -0,0 +1,13 @@ +# Stubs for gdb.frames (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any + +def get_priority(filter_item: Any): ... +def set_priority(filter_item: Any, priority: Any) -> None: ... +def get_enabled(filter_item: Any): ... +def set_enabled(filter_item: Any, state: Any) -> None: ... +def return_list(name: Any): ... +def _sort_list(): ... +def execute_frame_filters(frame: Any, frame_low: Any, frame_high: Any): ... diff --git a/tests/stubs/gdb/function/__init__.pyi b/tests/stubs/gdb/function/__init__.pyi new file mode 100644 index 00000000000..ff8be3e043a --- /dev/null +++ b/tests/stubs/gdb/function/__init__.pyi @@ -0,0 +1,4 @@ +# Stubs for gdb.function (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + diff --git a/tests/stubs/gdb/function/as_string.pyi b/tests/stubs/gdb/function/as_string.pyi new file mode 100644 index 00000000000..bfe81b767dc --- /dev/null +++ b/tests/stubs/gdb/function/as_string.pyi @@ -0,0 +1,10 @@ +# Stubs for gdb.function.as_string (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb +from typing import Any + +class _AsString(gdb.Function): + def __init__(self) -> None: ... + def invoke(self, val: Any): ... diff --git a/tests/stubs/gdb/function/caller_is.pyi b/tests/stubs/gdb/function/caller_is.pyi new file mode 100644 index 00000000000..8f872b6f612 --- /dev/null +++ b/tests/stubs/gdb/function/caller_is.pyi @@ -0,0 +1,22 @@ +# Stubs for gdb.function.caller_is (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb +from typing import Any + +class CallerIs(gdb.Function): + def __init__(self) -> None: ... + def invoke(self, name: Any, nframes: int = ...): ... + +class CallerMatches(gdb.Function): + def __init__(self) -> None: ... + def invoke(self, name: Any, nframes: int = ...): ... + +class AnyCallerIs(gdb.Function): + def __init__(self) -> None: ... + def invoke(self, name: Any, nframes: int = ...): ... + +class AnyCallerMatches(gdb.Function): + def __init__(self) -> None: ... + def invoke(self, name: Any, nframes: int = ...): ... diff --git a/tests/stubs/gdb/function/in_scope.pyi b/tests/stubs/gdb/function/in_scope.pyi new file mode 100644 index 00000000000..f7e76aeb29e --- /dev/null +++ b/tests/stubs/gdb/function/in_scope.pyi @@ -0,0 +1,10 @@ +# Stubs for gdb.function.in_scope (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb +from typing import Any + +class InScope(gdb.Function): + def __init__(self) -> None: ... + def invoke(self, *vars: Any): ... diff --git a/tests/stubs/gdb/function/strfns.pyi b/tests/stubs/gdb/function/strfns.pyi new file mode 100644 index 00000000000..29be686bf74 --- /dev/null +++ b/tests/stubs/gdb/function/strfns.pyi @@ -0,0 +1,22 @@ +# Stubs for gdb.function.strfns (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +import gdb +from typing import Any + +class _MemEq(gdb.Function): + def __init__(self) -> None: ... + def invoke(self, a: Any, b: Any, length: Any): ... + +class _StrLen(gdb.Function): + def __init__(self) -> None: ... + def invoke(self, a: Any): ... + +class _StrEq(gdb.Function): + def __init__(self) -> None: ... + def invoke(self, a: Any, b: Any): ... + +class _RegEx(gdb.Function): + def __init__(self) -> None: ... + def invoke(self, string: Any, regex: Any): ... diff --git a/tests/stubs/gdb/printer/__init__.pyi b/tests/stubs/gdb/printer/__init__.pyi new file mode 100644 index 00000000000..c8704c21564 --- /dev/null +++ b/tests/stubs/gdb/printer/__init__.pyi @@ -0,0 +1,4 @@ +# Stubs for gdb.printer (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + diff --git a/tests/stubs/gdb/printer/bound_registers.pyi b/tests/stubs/gdb/printer/bound_registers.pyi new file mode 100644 index 00000000000..4433a725d30 --- /dev/null +++ b/tests/stubs/gdb/printer/bound_registers.pyi @@ -0,0 +1,13 @@ +# Stubs for gdb.printer.bound_registers (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any + +basestring = str +long = int + +class MpxBound128Printer: + val: Any = ... + def __init__(self, val: Any) -> None: ... + def to_string(self): ... diff --git a/tests/stubs/gdb/printing.pyi b/tests/stubs/gdb/printing.pyi new file mode 100644 index 00000000000..21012213d00 --- /dev/null +++ b/tests/stubs/gdb/printing.pyi @@ -0,0 +1,48 @@ +# Stubs for gdb.printing (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any, Optional + +basestring = str +long = int + +class PrettyPrinter: + name: Any = ... + subprinters: Any = ... + enabled: bool = ... + def __init__(self, name: Any, subprinters: Optional[Any] = ...) -> None: ... + def __call__(self, val: Any) -> None: ... + +class SubPrettyPrinter: + name: Any = ... + enabled: bool = ... + def __init__(self, name: Any) -> None: ... + +def register_pretty_printer(obj: Any, printer: Any, replace: bool = ...) -> None: ... + +class RegexpCollectionPrettyPrinter(PrettyPrinter): + class RegexpSubprinter(SubPrettyPrinter): + regexp: Any = ... + gen_printer: Any = ... + compiled_re: Any = ... + def __init__(self, name: Any, regexp: Any, gen_printer: Any) -> None: ... + def __init__(self, name: Any) -> None: ... + def add_printer(self, name: Any, regexp: Any, gen_printer: Any) -> None: ... + def __call__(self, val: Any): ... + +class _EnumInstance: + enumerators: Any = ... + val: Any = ... + def __init__(self, enumerators: Any, val: Any) -> None: ... + def to_string(self): ... + +class FlagEnumerationPrinter(PrettyPrinter): + initialized: bool = ... + def __init__(self, enum_type: Any) -> None: ... + enumerators: Any = ... + def __call__(self, val: Any): ... + +_builtin_pretty_printers: Any + +def add_builtin_pretty_printer(name: Any, regexp: Any, printer: Any) -> None: ... diff --git a/tests/stubs/gdb/prompt.pyi b/tests/stubs/gdb/prompt.pyi new file mode 100644 index 00000000000..c5f28b300f5 --- /dev/null +++ b/tests/stubs/gdb/prompt.pyi @@ -0,0 +1,23 @@ +# Stubs for gdb.prompt (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any + +def _prompt_pwd(ignore: Any): ... +def _prompt_object_attr(func: Any, what: Any, attr: Any, nattr: Any): ... +def _prompt_frame(attr: Any): ... +def _prompt_thread(attr: Any): ... +def _prompt_version(attr: Any): ... +def _prompt_esc(attr: Any): ... +def _prompt_bs(attr: Any): ... +def _prompt_n(attr: Any): ... +def _prompt_r(attr: Any): ... +def _prompt_param(attr: Any): ... +def _prompt_noprint_begin(attr: Any): ... +def _prompt_noprint_end(attr: Any): ... + +prompt_substitutions: Any + +def prompt_help(): ... +def substitute_prompt(prompt: Any): ... diff --git a/tests/stubs/gdb/types.pyi b/tests/stubs/gdb/types.pyi new file mode 100644 index 00000000000..e940516ac5f --- /dev/null +++ b/tests/stubs/gdb/types.pyi @@ -0,0 +1,25 @@ +# Stubs for gdb.types (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any, List, Dict, Optional, Union + +import gdb + +def get_basic_type(type_: gdb.Type) -> gdb.Type: ... +def has_field(type_: gdb.Type, field: str) -> bool: ... +def make_enum_dict(enum_type: gdb.Type) -> Dict[str, int]: ... +def deep_items(type_: gdb.Type) -> None: ... + +class TypePrinter: + name: str = ... + enabled: bool = ... + def __init__(self, name: str) -> None: ... + def instantiate(self) -> None: ... + +def _get_some_type_recognizers(result: Any, plist: List[Any]) -> None: ... +def get_type_recognizers() -> Any: ... +def apply_type_recognizers(recognizers: Any, + type_obj: gdb.Type) -> Optional[Any]: ... +def register_type_printer(locus: Optional[Union[gdb.Objfile, gdb.Progspace]], + printer: Any) -> None: ... diff --git a/tests/stubs/gdb/unwinder.pyi b/tests/stubs/gdb/unwinder.pyi new file mode 100644 index 00000000000..46f302845ba --- /dev/null +++ b/tests/stubs/gdb/unwinder.pyi @@ -0,0 +1,13 @@ +# Stubs for gdb.unwinder (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any + +class Unwinder: + name: Any = ... + enabled: bool = ... + def __init__(self, name: Any) -> None: ... + def __call__(self, pending_frame: Any) -> None: ... + +def register_unwinder(locus: Any, unwinder: Any, replace: bool = ...) -> None: ... diff --git a/tests/stubs/gdb/xmethod.pyi b/tests/stubs/gdb/xmethod.pyi new file mode 100644 index 00000000000..9b201dc95a8 --- /dev/null +++ b/tests/stubs/gdb/xmethod.pyi @@ -0,0 +1,43 @@ +# Stubs for gdb.xmethod (Python 3) +# +# NOTE: This dynamically typed stub was automatically generated by stubgen. + +from typing import Any + +basestring = str +long = int + +class XMethod: + name: Any = ... + enabled: bool = ... + def __init__(self, name: Any) -> None: ... + +class XMethodMatcher: + name: Any = ... + enabled: bool = ... + methods: Any = ... + def __init__(self, name: Any) -> None: ... + def match(self, class_type: Any, method_name: Any) -> None: ... + +class XMethodWorker: + def get_arg_types(self) -> None: ... + def get_result_type(self, *args: Any) -> None: ... + def __call__(self, *args: Any) -> None: ... + +class SimpleXMethodMatcher(XMethodMatcher): + class SimpleXMethodWorker(XMethodWorker): + _arg_types: Any = ... + _method_function: Any = ... + def __init__(self, method_function: Any, arg_types: Any) -> None: ... + def get_arg_types(self): ... + def __call__(self, *args: Any): ... + _method_function: Any = ... + _class_matcher: Any = ... + _method_matcher: Any = ... + _arg_types: Any = ... + def __init__(self, name: Any, class_matcher: Any, method_matcher: Any, method_function: Any, *arg_types: Any) -> None: ... + def match(self, class_type: Any, method_name: Any): ... + +def _validate_xmethod_matcher(matcher: Any): ... +def _lookup_xmethod_matcher(locus: Any, name: Any): ... +def register_xmethod_matcher(locus: Any, matcher: Any, replace: bool = ...) -> None: ... From c01e934478abfbf208d3a7f6490f21faa59c2a0f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 18:51:19 -0400 Subject: [PATCH 281/367] tests: fix __init__ typing for gdb.Value It properly had an Optional tag but no indication there was a default value, leading to incorrect 'error: Too few arguments for "Value"' errors. I had a different verison of mypy installed that didn't catch this. Signed-off-by: Jeff Mahoney --- tests/stubs/_gdb.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/stubs/_gdb.pyi b/tests/stubs/_gdb.pyi index 5fafb9a9fd9..375a5e2ef7c 100644 --- a/tests/stubs/_gdb.pyi +++ b/tests/stubs/_gdb.pyi @@ -583,7 +583,7 @@ class Value: type: Type = ... @classmethod def __init__(self, val: Union[Value, Buffer], - type: Optional[Type]) -> None: ... + type: Optional[Type] = ...) -> None: ... def cast(self, type: Type) -> Value: ... def const_value(self) -> Value: ... def dereference(self) -> Value: ... From ef874ed85cebc5b9d01b1b7764b288904dbfecc5 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 18:52:34 -0400 Subject: [PATCH 282/367] tests: specify stdout/stderr for mypy.main The version of mypy in OBS requires stdout/stderr params for main. Signed-off-by: Jeff Mahoney --- tests/run-mypy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/run-mypy.py b/tests/run-mypy.py index 5758b0ac703..a5fa4bf9381 100644 --- a/tests/run-mypy.py +++ b/tests/run-mypy.py @@ -14,8 +14,8 @@ "--check-untyped-defs", "--disallow-untyped-globals"] -ret = main(None, args=["-p", "kdump"] + common_args) -ret2 = main(None, args=["-p", "crash"] + common_args) +ret = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "kdump"] + common_args) +ret2 = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "crash"] + common_args) if ret or ret2: print("static checking failed.", file=sys.stderr) From 8013b1b00b72ef8e98100e6793f316fec8360918 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 24 Jun 2019 18:53:07 -0400 Subject: [PATCH 283/367] crash.subsystem.storage.decoders: endio is not optional The endio for a decoder is not optional and should not be initialized to None. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/decoders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 8e188395b7b..167685b2055 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -8,7 +8,7 @@ import gdb -EndIOSpecifier = Union[int, str, List[str], gdb.Value, gdb.Symbol, None] +EndIOSpecifier = Union[int, str, List[str], gdb.Value, gdb.Symbol] class Decoder: """Decoder objects are used to unwind the storage stack @@ -22,7 +22,7 @@ class Decoder: interpreted (:obj:`bool`): Whether the contents of this :obj:`.Decoder` have already been interpreted """ - __endio__: EndIOSpecifier = None + __endio__: EndIOSpecifier # pylint: disable=unused-argument def __init__(self, value: gdb.Value = None) -> None: From f2b0cffb21017e3860167dcf317b320dbacc458a Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 25 Jun 2019 09:30:02 -0400 Subject: [PATCH 284/367] Makefile: install text documentation with specific perms Signed-off-by: Jeff Mahoney --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 090d8854fc8..343208ea2e0 100644 --- a/Makefile +++ b/Makefile @@ -45,8 +45,8 @@ install: man-install doc-help-install doc-text-install doc-html-install build helpdir=$(pkgdatadir)/help doc-help-install: doc-help - install -d $(DESTDIR)$(helpdir)/commands - install -t $(DESTDIR)$(helpdir)/commands docs/text/commands/*.txt + install -m 755 -d $(DESTDIR)$(helpdir)/commands + install -m 644 -t $(DESTDIR)$(helpdir)/commands docs/text/commands/*.txt docdir=$(datadir)/doc/packages/crash-python textdir=$(docdir)/text From af1430a26d37fb51e37b47583f392c5beb6b989b Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 26 Jun 2019 12:00:54 +0200 Subject: [PATCH 285/367] crash.subsystem.storage: Cleanup singlequeue definitions Rename function definitions in blocksq.py to start with sq prefix so that we can have also corresponding mq alternatives without clashing names. Also move request_age_ms to common part since it is not sq specific. Signed-off-by: Jan Kara --- crash/subsystem/storage/__init__.py | 18 ++++++++++++++++++ crash/subsystem/storage/blocksq.py | 22 ++-------------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 1a221e76577..c656d3e5c38 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -7,6 +7,7 @@ from crash.util.symbols import Types, Symvals, SymbolCallbacks, TypeCallbacks from crash.types.classdev import for_each_class_device from crash.exceptions import DelayedAttributeError, InvalidArgumentError +from crash.cache.syscache import kernel, jiffies_to_msec import gdb from gdb.types import get_basic_type @@ -244,6 +245,23 @@ def inode_on_bdev(inode: gdb.Value) -> gdb.Value: return inode_to_block_device(inode) return inode['i_sb']['s_bdev'].dereference() +def request_age_ms(request: gdb.Value) -> int: + """ + Returns the age of the request in milliseconds + + This method returns the difference between the current time + (``jiffies``) and the request's ``start_time``, in milliseconds. + + Args: + request: The ``struct request`` used to determine age. The value + is of type ``struct request``. + + Returns: + :obj:`int`: Difference between the request's ``start_time`` and + current ``jiffies`` in milliseconds. + """ + return jiffies_to_msec(kernel.jiffies - request['start_time']) + # pylint: disable=unused-argument def _check_types(result: gdb.Symbol) -> None: try: diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index f7653110f18..c3f064074ef 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -5,7 +5,6 @@ from crash.util.symbols import Types from crash.types.list import list_for_each_entry -from crash.cache.syscache import kernel, jiffies_to_msec import gdb @@ -14,7 +13,7 @@ class NoQueueError(RuntimeError): types = Types(['struct request']) -def for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: +def sq_for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: """ Iterates over each ``struct request`` in request_queue @@ -34,24 +33,7 @@ def for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: return list_for_each_entry(queue['queue_head'], types.request_type, 'queuelist') -def request_age_ms(request: gdb.Value) -> int: - """ - Returns the age of the request in milliseconds - - This method returns the difference between the current time - (``jiffies``) and the request's ``start_time``, in milliseconds. - - Args: - request: The ``struct request`` used to determine age. The value - is of type ``struct request``. - - Returns: - :obj:`int`: Difference between the request's ``start_time`` and - current ``jiffies`` in milliseconds. - """ - return jiffies_to_msec(kernel.jiffies - request['start_time']) - -def requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: +def sq_requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: """ Report how many requests are in flight for this queue From cddd1b64b407840def60ec42044a7f1b959d72e1 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 26 Jun 2019 12:18:53 +0200 Subject: [PATCH 286/367] crash.subsystem.storage: Add function to test for blk-mq queues Add function testing whether a given queue is a multiqueue queue or not. Signed-off-by: Jan Kara --- crash/subsystem/storage/__init__.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index c656d3e5c38..f68dfa599c7 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -3,7 +3,7 @@ from typing import Iterable -from crash.util import container_of +from crash.util import container_of, struct_has_member from crash.util.symbols import Types, Symvals, SymbolCallbacks, TypeCallbacks from crash.types.classdev import for_each_class_device from crash.exceptions import DelayedAttributeError, InvalidArgumentError @@ -13,7 +13,8 @@ from gdb.types import get_basic_type types = Types(['struct gendisk', 'struct hd_struct', 'struct device', - 'struct device_type', 'struct bdev_inode']) + 'struct device_type', 'struct bdev_inode', + 'struct request_queue', 'struct request']) symvals = Symvals(['block_class', 'blockdev_superblock', 'disk_type', 'part_type']) @@ -262,6 +263,21 @@ def request_age_ms(request: gdb.Value) -> int: """ return jiffies_to_msec(kernel.jiffies - request['start_time']) +def queue_is_mq(queue: gdb.Value) -> bool: + """ + Tests whether the queue is blk-mq queue. + + Args: + queue: The request queue to test. The value must be + of type ``struct request_queue``. + + Returns: + :obj:`bool`: whether the ``struct request_queue`` is a multiqueue queue + """ + if not struct_has_member(queue, 'mq_ops'): + return False + return int(queue['mq_ops']) != 0 + # pylint: disable=unused-argument def _check_types(result: gdb.Symbol) -> None: try: From 28c0e973d7fd9ef47b576a67e5739dc9c8ff01d7 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 26 Jun 2019 12:28:35 +0200 Subject: [PATCH 287/367] crash.subsystem.storage: Add checks for queue type in sq functions Add checks to functions for single-queue queues that they are indeed operating on such queues. Signed-off-by: Jan Kara --- crash/subsystem/storage/blocksq.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index c3f064074ef..54ef9b763a4 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -5,6 +5,8 @@ from crash.util.symbols import Types from crash.types.list import list_for_each_entry +from crash.subsystem.storage import queue_is_mq +from crash.exceptions import InvalidArgumentError import gdb @@ -13,6 +15,10 @@ class NoQueueError(RuntimeError): types = Types(['struct request']) +def _check_queue_type(queue: gdb.Value) -> None: + if queue_is_mq(queue): + raise InvalidArgumentError("Passed request queue is a multiqueue queue") + def sq_for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: """ Iterates over each ``struct request`` in request_queue @@ -30,6 +36,7 @@ def sq_for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: """ if int(queue) == 0: raise NoQueueError("Queue is NULL") + _check_queue_type(queue) return list_for_each_entry(queue['queue_head'], types.request_type, 'queuelist') @@ -46,5 +53,6 @@ def sq_requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: the 2-tuple is the number of read requests, the second is the number of write requests. """ + _check_queue_type(queue) return (int(queue['in_flight'][0]), int(queue['in_flight'][1])) From bc6d73eb5bc130940558b3f2f27cfcf1377e385e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 26 Jun 2019 20:04:51 +0200 Subject: [PATCH 288/367] crash.subsystem.storage: Add function to query number of queued request Add function to query number of queued requests for single queue request queues. Signed-off-by: Jan Kara --- crash/subsystem/storage/blocksq.py | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index 54ef9b763a4..badfe4fb31a 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -3,6 +3,7 @@ from typing import Iterable, Tuple +from crash.util import struct_has_member from crash.util.symbols import Types from crash.types.list import list_for_each_entry from crash.subsystem.storage import queue_is_mq @@ -50,9 +51,31 @@ def sq_requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: Returns: (:obj:`int`, :obj:`int`): The requests in flight. The first member of - the 2-tuple is the number of read requests, the second is the number - of write requests. + the 2-tuple is the number of async requests, the second is the number + of sync requests. """ _check_queue_type(queue) return (int(queue['in_flight'][0]), int(queue['in_flight'][1])) + +def sq_requests_queued(queue: gdb.Value) -> Tuple[int, int]: + """ + Report how many requests are queued for this queue + + Args: + queue: The request queue to inspect for queued requests. + The value must be of type ``struct request_queue``. + + Returns: + (:obj:`int`, :obj:`int`): The queued requests. The first member of + the 2-tuple is the number of async requests, the second is the number + of sync requests. + """ + _check_queue_type(queue) + if struct_has_member(queue, 'rq'): + rqlist = queue['rq'] + else: + rqlist = queue['root_rl'] + return (int(rqlist['count'][0]), + int(rqlist['count'][1])) + From 8d8b922aefbea5eee17ec00d22fffdf8d8877a1e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 26 Jun 2019 14:12:04 +0200 Subject: [PATCH 289/367] crash.subsystem.storage: Add blk-mq support Add support for iterating requests in blk-mq queue and reporting number of requests in flight. Also provide generic wrappers detecting queue type and using appropriate single / multi queue handlers. Signed-off-by: Jan Kara --- crash/subsystem/storage/__init__.py | 86 +++++++++++++++++++- crash/subsystem/storage/block.py | 67 +++++++++++++++ crash/subsystem/storage/blockmq.py | 121 ++++++++++++++++++++++++++++ crash/types/sbitmap.py | 41 ++++++++++ 4 files changed, 313 insertions(+), 2 deletions(-) create mode 100644 crash/subsystem/storage/block.py create mode 100644 crash/subsystem/storage/blockmq.py create mode 100644 crash/types/sbitmap.py diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index f68dfa599c7..0e970aeeef4 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -14,9 +14,12 @@ types = Types(['struct gendisk', 'struct hd_struct', 'struct device', 'struct device_type', 'struct bdev_inode', - 'struct request_queue', 'struct request']) + 'struct request_queue', 'struct request', 'enum req_flag_bits', + 'enum mq_rq_state', 'enum rq_atomic_flags']) symvals = Symvals(['block_class', 'blockdev_superblock', 'disk_type', 'part_type']) +READ = 0 +WRITE = 1 def dev_to_gendisk(dev: gdb.Value) -> gdb.Value: """ @@ -263,6 +266,56 @@ def request_age_ms(request: gdb.Value) -> int: """ return jiffies_to_msec(kernel.jiffies - request['start_time']) +def rq_data_dir(request: gdb.Value) -> int: + """ + Returns direction of the request + + This method returns 0 if the request is read and 1 if the request is write. + + Args: + request: The ``struct request`` to query data direction in. + + Returns: + :obj:`int`: 0 for reads, 1 for writes. + """ + if request['cmd_flags'] & 1 != 0: + return WRITE + return READ + +def rq_is_sync(request: gdb.Value) -> bool: + """ + Returns whether request is synchronous + + This method returns True if the request is synchronous and False otherwise. + + Args: + request: The ``struct request`` to query. + + Returns: + :obj:`bool`: True for synchronous requests, False otherwise. + """ + return (request['cmd_flags'] & 1 == 0 or + request['cmd_flags'] & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH) != 0) # type: ignore + +# This is a stub to make static checker happy. It gets overridden once 'struct +# request' is resolved. +def _rq_in_flight(request: gdb.Value) -> bool: + raise RuntimeError("struct request type not resolved yet!") + +def rq_in_flight(request: gdb.Value) -> bool: + """ + Returns whether request is currently processed by the device + + This method returns True if the request is being processed by the device + + Args: + request: The ``struct request`` to query. + + Returns: + :obj:`bool`: True for requests in flight, False otherwise. + """ + return _rq_in_flight(request) + def queue_is_mq(queue: gdb.Value) -> bool: """ Tests whether the queue is blk-mq queue. @@ -293,6 +346,35 @@ def _check_types(result: gdb.Symbol) -> None: except DelayedAttributeError: pass +# Export REQ_ flags into namespace as constants +def _export_req_flags(req_flag_bits: gdb.Type) -> None: + for (name, field) in req_flag_bits.items(): + globals()[name[2:]] = 1 << field.enumval + + # Define to 0 flags that don't exist. + for name in ['REQ_PREFLUSH', 'REQ_FLUSH']: + if not name in globals(): + globals()[name] = 0 + +# Check struct request and define functions based on its current form in this +# kernel +def _check_struct_request(request_s: gdb.Type) -> None: + global _rq_in_flight + if struct_has_member(request_s, 'rq_state'): + def _rq_in_flight(request: gdb.Value) -> bool: + return (request['rq_state'] != + types.enum_mq_rq_state_type['MQ_RQ_IDLE']) + elif struct_has_member(request_s, 'atomic_flags'): + def _rq_in_flight(request: gdb.Value) -> bool: + return (request['atomic_flags'] & + (1 << int(types.enum_rq_atomic_flags_type['REQ_ATOM_STARTED'].enumval)) != 0) + else: + def _rq_in_flight(request: gdb.Value) -> bool: + return request['cmd_flags'] & REQ_STARTED != 0 # type: ignore + symbol_cbs = SymbolCallbacks([('disk_type', _check_types), ('part_type', _check_types)]) -type_cbs = TypeCallbacks([('struct device_type', _check_types)]) +type_cbs = TypeCallbacks([('struct device_type', _check_types), + ('enum req_flag_bits', _export_req_flags), + ('struct request', _check_struct_request)]) + diff --git a/crash/subsystem/storage/block.py b/crash/subsystem/storage/block.py new file mode 100644 index 00000000000..d76245bc3b1 --- /dev/null +++ b/crash/subsystem/storage/block.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from typing import Iterable, Tuple + +from crash.util.symbols import Types +from crash.subsystem.storage import queue_is_mq +from crash.subsystem.storage.blocksq import sq_for_each_request_in_queue, \ + sq_requests_in_flight, sq_requests_queued +from crash.subsystem.storage.blockmq import mq_for_each_request_in_queue, \ + mq_requests_in_flight, mq_requests_queued + +import gdb + +def requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: + """ + Report how many requests are in flight for this queue + + Args: + queue: The request queue to inspect for requests in flight. + The value must be of type ``struct request_queue``. + + Returns: + (:obj:`int`, :obj:`int`): The requests in flight. The first member of + the 2-tuple is the number of async requests, the second is the number + of sync requests. + """ + if queue_is_mq(queue): + return mq_requests_in_flight(queue) + return sq_requests_in_flight(queue) + +def requests_queued(queue: gdb.Value) -> Tuple[int, int]: + """ + Report how many requests are queued for this queue + + Args: + queue: The request queue to inspect for number of queued requests. + The value must be of type ``struct request_queue``. + + Returns: + (:obj:`int`, :obj:`int`): The number of queued requests. The first + member of the 2-tuple is the number of async requests, the second is + the number of sync requests. + """ + if queue_is_mq(queue): + return mq_requests_queued(queue) + return sq_requests_queued(queue) + +def for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: + """ + Iterates over each ``struct request`` in request_queue + + This method iterates over requests queued in ``request_queue``. It takes + care of properly handling both single and multiqueue queues. + + Args: + queue: The ``struct request_queue`` used to iterate. The value + must be of type ``struct request_queue``. + + Yields: + :obj:`gdb.Value`: Each ``struct request`` contained within the + ``request_queue``. The value is of type ``struct request``. + """ + if queue_is_mq(queue): + return mq_for_each_request_in_queue(queue) + return sq_for_each_request_in_queue(queue) + diff --git a/crash/subsystem/storage/blockmq.py b/crash/subsystem/storage/blockmq.py new file mode 100644 index 00000000000..55386cf265e --- /dev/null +++ b/crash/subsystem/storage/blockmq.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from typing import Iterable, Tuple + +from crash.util.symbols import Types +from crash.subsystem.storage import queue_is_mq, rq_is_sync, rq_in_flight +from crash.types.sbitmap import sbitmap_for_each_set +from crash.exceptions import InvalidArgumentError + +import gdb + +class NoQueueError(RuntimeError): + pass + +types = Types(['struct request', 'struct request_queue', + 'struct sbitmap_queue', 'struct blk_mq_hw_ctx' ]) + +def _check_queue_type(queue: gdb.Value) -> None: + if not queue_is_mq(queue): + raise InvalidArgumentError("Passed request queue is not a multiqueue queue") + +def mq_queue_for_each_hw_ctx(queue: gdb.Value) -> Iterable[gdb.Value]: + """ + Iterates over each ``struct blk_mq_hw_ctx`` in request_queue + + This method iterates over each blk-mq hardware context in request_queue + and yields each blk_mq_hw_ctx. + + Args: + queue: The ``struct request_queue`` used to iterate. The value + must be of type ``struct request_queue``. + + Yields: + :obj:`gdb.Value`: Each blk-mq hardware context contained within the + ``request_queue``. The value is of type ``struct blk_mq_hw_ctx``. + """ + _check_queue_type(queue) + for i in range(int(queue['nr_hw_queues'])): + yield queue['queue_hw_ctx'][i] + +def mq_for_each_request_in_queue(queue: gdb.Value, reserved: bool = True) \ + -> Iterable[gdb.Value]: + """ + Iterates over each ``struct request`` in request_queue + + This method iterates over the ``request_queue``'s queuelist and + returns a request for each member. + This method iterates over the tags of all hardware contexts of + ``request_queue`` and returns a request for each member. + + Args: + queue: The ``struct request_queue`` used to iterate. The value + must be of type ``struct request_queue``. + reserved: If true, also reserved requests will be included in the + iteration + + Yields: + :obj:`gdb.Value`: Each ``struct request`` contained within the + ``request_queue``'s queuelist. The value is of type ``struct request``. + ``request_queue``'s tags. The value is of type ``struct request``. + """ + if int(queue) == 0: + raise NoQueueError("Queue is NULL") + _check_queue_type(queue) + + for hctx in mq_queue_for_each_hw_ctx(queue): + tags = hctx['tags'] + if int(hctx['nr_ctx']) == 0 or int(tags) == 0: + continue + if reserved == True and int(tags['nr_reserved_tags']) > 0: + for tag in sbitmap_for_each_set(tags['breserved_tags']['sb']): + rq = tags['rqs'][tag] + if int(rq) != 0 and rq['q'] == queue: + yield rq + + for tag in sbitmap_for_each_set(tags['bitmap_tags']['sb']): + rq = tags['rqs'][tag + int(tags['nr_reserved_tags'])] + if int(rq) != 0 and rq['q'] == queue: + yield rq + +def mq_requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: + """ + Report how many requests are in flight for this queue + + Args: + queue: The request queue to inspect for requests in flight. + The value must be of type ``struct request_queue``. + + Returns: + (:obj:`int`, :obj:`int`): The requests in flight. The first member of + the 2-tuple is the number of async requests, the second is the number + of sync requests. + """ + _check_queue_type(queue) + in_flight = [0, 0] + for rq in mq_for_each_request_in_queue(queue): + if rq_in_flight(rq): + in_flight[rq_is_sync(rq)] += 1 + + return (in_flight[0], in_flight[1]) + +def mq_requests_queued(queue: gdb.Value) -> Tuple[int, int]: + """ + Report how many requests are queued for this queue + + Args: + queue: The request queue to inspect for queued requests. + The value must be of type ``struct request_queue``. + + Returns: + (:obj:`int`, :obj:`int`): The queued requests. The first member of + the 2-tuple is the number of async requests, the second is the number + of sync requests. + """ + _check_queue_type(queue) + queued = [0, 0] + for rq in mq_for_each_request_in_queue(queue): + queued[rq_is_sync(rq)] += 1 + + return (queued[0], queued[1]) diff --git a/crash/types/sbitmap.py b/crash/types/sbitmap.py new file mode 100644 index 00000000000..dbf653aa2b5 --- /dev/null +++ b/crash/types/sbitmap.py @@ -0,0 +1,41 @@ +#!/usr/bin/python3 +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +The crash.types.sbitmap module provides helpers for iterating and scanning +scalable bitmaps + +""" + +from typing import Iterable + +from crash.exceptions import InvalidArgumentError +from crash.util.symbols import Types +from crash.util import struct_has_member + +import gdb + +types = Types(['struct sbitmap', 'struct sbitmap_word']) + +def sbitmap_for_each_set(sbitmap: gdb.Value) -> Iterable[int]: + """ + Yield each set bit in a scalable bitmap + + Args: + sbitmap: The bitmap to iterate. + + Yields: + :obj:`int`: The position of a bit that is set + + """ + + length = int(sbitmap['depth']) + for i in range(0, int(sbitmap['map_nr'])): + word = sbitmap['map'][i]['word'] + if struct_has_member(sbitmap['map'][i], 'cleared'): + word &= ~sbitmap['map'][i]['cleared'] + offset = i << int(sbitmap['shift']) + bits = min(int(sbitmap['map'][i]['depth']), length - offset) + for j in range(0, bits): + if word & (1 << j): + yield offset + j + From 9ef21fa91b766e12d724ce5fde00af45ef41a26b Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 27 Jun 2019 09:18:35 +0200 Subject: [PATCH 290/367] crash.subsystem.storage: Add function to query all queue request stats Since gathering of blk-mq queue statistics about requests is relatively expensive (we have to iterate all tag bitmaps and inspect all requests), provide a function to gather all information at once so that we don't have to iterate everything multiple times. Signed-off-by: Jan Kara --- crash/subsystem/storage/block.py | 19 ++++++++++++++++++- crash/subsystem/storage/blockmq.py | 23 +++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/crash/subsystem/storage/block.py b/crash/subsystem/storage/block.py index d76245bc3b1..b4bf748fb9f 100644 --- a/crash/subsystem/storage/block.py +++ b/crash/subsystem/storage/block.py @@ -8,7 +8,7 @@ from crash.subsystem.storage.blocksq import sq_for_each_request_in_queue, \ sq_requests_in_flight, sq_requests_queued from crash.subsystem.storage.blockmq import mq_for_each_request_in_queue, \ - mq_requests_in_flight, mq_requests_queued + mq_requests_in_flight, mq_requests_queued, mq_queue_request_stats import gdb @@ -65,3 +65,20 @@ def for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: return mq_for_each_request_in_queue(queue) return sq_for_each_request_in_queue(queue) +def queue_request_stats(queue: gdb.Value) -> Tuple[int, int, int, int]: + """ + Report various request information for this queue + + Args: + queue: The request queue to inspect for request information. + The value must be of type ``struct request_queue``. + + Returns: + (:obj:`int`, :obj:`int`, :obj:`int`, :obj:`int`): Number queued async + requests, number of queued sync requests, number of async requests + being processed by the driver, number of sync requests being processed + by the driver. + """ + if queue_is_mq(queue): + return mq_queue_request_stats(queue) + return sq_requests_queued(queue) + sq_requests_in_flight(queue) # type: ignore diff --git a/crash/subsystem/storage/blockmq.py b/crash/subsystem/storage/blockmq.py index 55386cf265e..827e23b16f6 100644 --- a/crash/subsystem/storage/blockmq.py +++ b/crash/subsystem/storage/blockmq.py @@ -119,3 +119,26 @@ def mq_requests_queued(queue: gdb.Value) -> Tuple[int, int]: queued[rq_is_sync(rq)] += 1 return (queued[0], queued[1]) + +def mq_queue_request_stats(queue: gdb.Value) -> Tuple[int, int, int, int]: + """ + Report various request information for this queue + + Args: + queue: The request queue to inspect for request information. + The value must be of type ``struct request_queue``. + + Returns: + (:obj:`int`, :obj:`int`, :obj:`int`, :obj:`int`): Number queued async + requests, number of queued sync requests, number of async requests + being processed by the driver, number of sync requests being processed + by the driver. + """ + _check_queue_type(queue) + stats = [0, 0, 0, 0] + for rq in mq_for_each_request_in_queue(queue): + stats[rq_is_sync(rq)] += 1 + if rq_in_flight(rq): + stats[2 + rq_is_sync(rq)] += 1 + + return (stats[0], stats[1], stats[2], stats[3]) From 5b2b237c683aa0805d55a9f4bc0cbe9a6439e3d4 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 27 Jun 2019 09:37:26 +0200 Subject: [PATCH 291/367] crash.commands: Add dev command Add 'dev -d' command. Signed-off-by: Jan Kara --- crash/commands/dev.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 crash/commands/dev.py diff --git a/crash/commands/dev.py b/crash/commands/dev.py new file mode 100644 index 00000000000..700f4c0f4f3 --- /dev/null +++ b/crash/commands/dev.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +""" +SUMMARY +------- + +Display character and block devices + + -d display information about disks +""" + +import argparse +from crash.commands import Command, ArgumentParser +from crash.subsystem.storage import for_each_disk, gendisk_name +from crash.subsystem.storage.block import queue_request_stats +import gdb + +class DevCommand(Command): + """display character and block devices""" + + def __init__(self, name : str) -> None: + parser = ArgumentParser(prog=name) + + parser.add_argument('-d', action='store_true', default=False) + + super().__init__(name, parser) + + def execute(self, args : argparse.Namespace) -> None: + if args.d: + print("{:^5} {:^16} {:^10} {:^16} {:^5} {:^5} {:^5} {:^5}" + .format("MAJOR", "GENDISK", "NAME", "REQUEST_QUEUE", + "TOTAL", "ASYNC", "SYNC", "DRV")) + for disk in for_each_disk(): + stats = queue_request_stats(disk['queue']) + print("{:5d} {:016x} {:<10} {:016x} {:5d} {:5d} {:5d} {:5d}" + .format(int(disk['major']), int(disk.address), + gendisk_name(disk), int(disk['queue']), + stats[0] + stats[1], stats[0], stats[1], + stats[2] + stats[3])) + +DevCommand("dev") From 43eb588452cc86cc287604ad6675f96624811dac Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 28 Jun 2019 11:24:13 +0200 Subject: [PATCH 292/367] contrib: Fixup stuck-requests.py Fixup stuck-requests.py to import for_each_request_in_queue from block and not blocksq where it used to be. Signed-off-by: Jan Kara --- contrib/stuck-requests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/stuck-requests.py b/contrib/stuck-requests.py index 0ff4e1ae1a6..dba7be2622b 100644 --- a/contrib/stuck-requests.py +++ b/contrib/stuck-requests.py @@ -7,7 +7,7 @@ from crash.subsystem.storage import for_each_disk from crash.subsystem.storage.decoders import for_each_bio_in_stack from crash.subsystem.storage import gendisk_name -from crash.subsystem.storage.blocksq import for_each_request_in_queue +from crash.subsystem.storage.block import for_each_request_in_queue from crash.types.list import list_for_each_entry from crash.util import get_symbol_value from crash.cache.syscache import kernel, jiffies_to_msec From 237b136a2529c93e1d3fb6aba039dcd6f4557a4b Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 25 Jun 2019 14:23:04 -0400 Subject: [PATCH 293/367] crash.types.bitmap: add test_bit routine Signed-off-by: Jeff Mahoney --- crash/types/bitmap.py | 44 ++++++++++++++- tests/test_types_bitmap.py | 113 +++++++++++++++++++++++++++++++++++++ 2 files changed, 154 insertions(+), 3 deletions(-) create mode 100644 tests/test_types_bitmap.py diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index 6f038b72959..5676b8c0740 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -11,7 +11,7 @@ requires that it be of either type. """ -from typing import Iterable +from typing import Iterable, Tuple from crash.exceptions import InvalidArgumentError from crash.util.symbols import Types @@ -30,6 +30,13 @@ def _check_bitmap_type(bitmap: gdb.Value) -> None: raise InvalidArgumentError("bitmaps are expected to be arrays of unsigned long not `{}'" .format(bitmap.type)) +def _get_bit_location(bit: int) -> Tuple[int, int]: + element = bit // (types.unsigned_long_type.sizeof << 3) + offset = bit % (types.unsigned_long_type.sizeof << 3) + + return (element, offset) + + def for_each_set_bit(bitmap: gdb.Value, size_in_bytes: int = None) -> Iterable[int]: """ @@ -211,8 +218,7 @@ def find_next_set_bit(bitmap: gdb.Value, start: int, raise IndexError("Element {} is out of range ({} elements)" .format(start, elements)) - element = start // (types.unsigned_long_type.sizeof << 3) - offset = start % (types.unsigned_long_type.sizeof << 3) + (element, offset) = _get_bit_location(start) for n in range(element, elements): if bitmap[n] == 0: @@ -316,3 +322,35 @@ def find_last_set_bit(bitmap: gdb.Value, size_in_bytes: int = None) -> int: return n * (types.unsigned_long_type.sizeof << 3) + v return 0 + +def test_bit(bitmap: gdb.Value, bit: int, size_in_bytes: int = None) -> bool: + """ + Test a bit in a bitmap. Unlike the ``find`` family of functions, + the index starts at 0. + + Args: + bitmap: The bitmap to use for testing + bit: The bit in the bitmap to test, starting at offset 0 + size_in_bytes (optional, default = None): The size of the bitmap + if a pointer is used. + Returns: + :obj:`bool`: Whether the bit is set or not + + Raises: + :obj:`.InvalidArgumentError`: The :obj:`gdb.Value` is not + of type ``unsigned long[]`` or ``unsigned long *``. + + """ + _check_bitmap_type(bitmap) + + if size_in_bytes is None: + size_in_bytes = bitmap.type.sizeof + + elements = size_in_bytes // types.unsigned_long_type.sizeof + + (element, offset) = _get_bit_location(bit) + + if element >= elements: + raise ValueError(f"bit {bit} is out of range > {size_in_bytes << 3}") + + return (bitmap[element] & (1 << offset)) != 0 diff --git a/tests/test_types_bitmap.py b/tests/test_types_bitmap.py new file mode 100644 index 00000000000..b11cf2d4f93 --- /dev/null +++ b/tests/test_types_bitmap.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import unittest +import sys + +import crash.types.bitmap as bm + +import gdb + +class TestBitmap(unittest.TestCase): + def setUp(self): + gdb.execute("file tests/test-percpu") + ulong = gdb.lookup_type('unsigned long') + ulong_array = ulong.array(0) + + # 10101010010100101010100010101001000101001000100010100101001001001010010 + val = 0x552954548A445292 + + self.bitmap = gdb.Value(val.to_bytes(8, sys.byteorder), ulong_array) + + def test_test_bit(self): + self.assertFalse(bm.test_bit(self.bitmap, 0)) + self.assertTrue(bm.test_bit(self.bitmap, 1)) + self.assertFalse(bm.test_bit(self.bitmap, 2)) + self.assertFalse(bm.test_bit(self.bitmap, 3)) + self.assertTrue(bm.test_bit(self.bitmap, 4)) + self.assertFalse(bm.test_bit(self.bitmap, 5)) + self.assertFalse(bm.test_bit(self.bitmap, 6)) + self.assertTrue(bm.test_bit(self.bitmap, 7)) + self.assertFalse(bm.test_bit(self.bitmap, 8)) + self.assertTrue(bm.test_bit(self.bitmap, 9)) + self.assertFalse(bm.test_bit(self.bitmap, 10)) + self.assertFalse(bm.test_bit(self.bitmap, 11)) + self.assertTrue(bm.test_bit(self.bitmap, 12)) + self.assertFalse(bm.test_bit(self.bitmap, 13)) + self.assertTrue(bm.test_bit(self.bitmap, 14)) + self.assertFalse(bm.test_bit(self.bitmap, 15)) + self.assertFalse(bm.test_bit(self.bitmap, 16)) + self.assertFalse(bm.test_bit(self.bitmap, 17)) + self.assertTrue(bm.test_bit(self.bitmap, 18)) + self.assertFalse(bm.test_bit(self.bitmap, 19)) + self.assertFalse(bm.test_bit(self.bitmap, 20)) + self.assertFalse(bm.test_bit(self.bitmap, 21)) + self.assertTrue(bm.test_bit(self.bitmap, 22)) + self.assertFalse(bm.test_bit(self.bitmap, 23)) + self.assertFalse(bm.test_bit(self.bitmap, 24)) + self.assertTrue(bm.test_bit(self.bitmap, 25)) + self.assertFalse(bm.test_bit(self.bitmap, 26)) + self.assertTrue(bm.test_bit(self.bitmap, 27)) + self.assertFalse(bm.test_bit(self.bitmap, 28)) + self.assertFalse(bm.test_bit(self.bitmap, 29)) + self.assertFalse(bm.test_bit(self.bitmap, 30)) + self.assertTrue(bm.test_bit(self.bitmap, 31)) + self.assertFalse(bm.test_bit(self.bitmap, 32)) + self.assertFalse(bm.test_bit(self.bitmap, 33)) + self.assertTrue(bm.test_bit(self.bitmap, 34)) + self.assertFalse(bm.test_bit(self.bitmap, 35)) + self.assertTrue(bm.test_bit(self.bitmap, 36)) + self.assertFalse(bm.test_bit(self.bitmap, 37)) + self.assertTrue(bm.test_bit(self.bitmap, 38)) + self.assertFalse(bm.test_bit(self.bitmap, 39)) + self.assertFalse(bm.test_bit(self.bitmap, 40)) + self.assertFalse(bm.test_bit(self.bitmap, 41)) + self.assertTrue(bm.test_bit(self.bitmap, 42)) + self.assertFalse(bm.test_bit(self.bitmap, 43)) + self.assertTrue(bm.test_bit(self.bitmap, 44)) + self.assertFalse(bm.test_bit(self.bitmap, 45)) + self.assertTrue(bm.test_bit(self.bitmap, 46)) + self.assertFalse(bm.test_bit(self.bitmap, 47)) + self.assertTrue(bm.test_bit(self.bitmap, 48)) + self.assertFalse(bm.test_bit(self.bitmap, 49)) + self.assertFalse(bm.test_bit(self.bitmap, 50)) + self.assertTrue(bm.test_bit(self.bitmap, 51)) + self.assertFalse(bm.test_bit(self.bitmap, 52)) + self.assertTrue(bm.test_bit(self.bitmap, 53)) + self.assertFalse(bm.test_bit(self.bitmap, 54)) + self.assertFalse(bm.test_bit(self.bitmap, 55)) + self.assertTrue(bm.test_bit(self.bitmap, 56)) + self.assertFalse(bm.test_bit(self.bitmap, 57)) + self.assertTrue(bm.test_bit(self.bitmap, 58)) + self.assertFalse(bm.test_bit(self.bitmap, 59)) + self.assertTrue(bm.test_bit(self.bitmap, 60)) + self.assertFalse(bm.test_bit(self.bitmap, 61)) + self.assertTrue(bm.test_bit(self.bitmap, 62)) + self.assertFalse(bm.test_bit(self.bitmap, 63)) + + def test_for_each_set_bit(self): + count = 0 + for bit in bm.for_each_set_bit(self.bitmap): + count += 1 + + self.assertTrue(count == 24) + + def test_find_first_set_bit(self): + bit = bm.find_first_set_bit(self.bitmap) + self.assertTrue(bit == 2) + + def test_find_first_zero_bit(self): + bit = bm.find_first_zero_bit(self.bitmap) + self.assertTrue(bit == 1) + + def test_find_next_set_bit(self): + bit = bm.find_next_set_bit(self.bitmap, 27) + self.assertTrue(bit == 28) + + def test_find_next_zero_bit(self): + bit = bm.find_next_zero_bit(self.bitmap, 51) + self.assertTrue(bit == 53) + + def test_find_last_set_bit(self): + bit = bm.find_last_set_bit(self.bitmap) + self.assertTrue(bit == 63) From d7c7c4f1f4d7486af3a4f7229b414f6ccbab58a9 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 26 Jun 2019 08:29:53 -0400 Subject: [PATCH 294/367] docs: fix typo in development.rst Signed-off-by: Jeff Mahoney --- doc-source/development.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc-source/development.rst b/doc-source/development.rst index 8768eb31897..ae9f75c0452 100644 --- a/doc-source/development.rst +++ b/doc-source/development.rst @@ -12,7 +12,7 @@ Development gdb-internals -Documentation is automatically build from the python code for the user +Documentation is automatically built from the python code for the user guide, command help text, and API reference. There are several make targets to assist in your development efforts: From 27347104f9fe2ad1c924bcb5fddc5ab4b76ac714 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 26 Jun 2019 08:53:08 -0400 Subject: [PATCH 295/367] docs: add page on development patterns I've fielded questions about some review feedback that involve code patterns that are acceptable for the project. This page will contain the documentation for those as it's created. Signed-off-by: Jeff Mahoney --- doc-source/development.rst | 1 + doc-source/patterns.rst | 86 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 doc-source/patterns.rst diff --git a/doc-source/development.rst b/doc-source/development.rst index ae9f75c0452..9d26f65fd6c 100644 --- a/doc-source/development.rst +++ b/doc-source/development.rst @@ -10,6 +10,7 @@ Development crash/modules gdb-internals + patterns Documentation is automatically built from the python code for the user diff --git a/doc-source/patterns.rst b/doc-source/patterns.rst new file mode 100644 index 00000000000..1189eb9f201 --- /dev/null +++ b/doc-source/patterns.rst @@ -0,0 +1,86 @@ +Patterns +======== + +Optional error handling +----------------------- + +In some cases it may be desirable to keep exception handling in a helper +that returns :obj:`None` on error. In the past, the project used an +optional ``error`` argument that defaulted to :obj:`True` that indicated +that exceptions should be raised. Callers could pass ``error=False`` to +instruct the function to return :obj:`None` instead. + +With Python's +`typing `_ +annotations, these routines must be annotated as returning an +`Optional `_ +value. While the +`@overload `_ +decorator allows us to associate return types with specific argument types +and counts, there is no way to associate a return type with specific +argument `values`, like ``error=False``. + +A function annotated as returning an ``Optional`` value affects the implied +types of the variables used to assign the result. Every caller of such +a routine would need to check the result against :obj:`None` in order to +drop the ``Optional`` annotation from the type. Even when we know the +function `cannot` return :obj:`None` when passed ``error=True``. + +The way we handle this is to have separate functions for each case +so that callers which will never have a :obj:`None` value returned +do not need to check it. + +Here are a few examples: + + +Function raises its own exceptions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: py + + from typing import Optional + + import gdb + + def new_routine(val: gdb.Value) -> str: + if some_condition: + raise RuntimeError("something bad happened") + + return val.string() + + def new_routine_safe(val: gdb.Value) -> Optional[str]: + try: + return new_routine(val) + except RuntimeError: + return None + + +Function calls functions that raise optional exceptions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: py + + from typing import Optional + + import gdb + + def some_existing_routine(val: gdb.Value, error: bool = True) -> Optional[str]: + if some_condition: + if error: + raise RuntimeError("something bad happened") + return None + + return val.string() + + def new_routine(val: gdb.Value) -> str: + print("do something") + + ret = some_existing_routine(val) + + # This is required to drop the Optional annotation + if ret is None: + raise RuntimeError("some_existing_routine can't return None") + return ret + + def new_routine_safe(val: gdb.Value) -> Optional[str]: + return some_existing_routine(val, False) From e9200d350cc23bee1ef5f8ad16fcfb433e84d548 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 26 Jun 2019 09:35:01 -0400 Subject: [PATCH 296/367] docs: move command documentation above the delayed lookup explanation The example doesn't demonstrate the command documentation so it's confusing to have it be the last thing before the example. Signed-off-by: Jeff Mahoney --- doc-source/api_changes.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc-source/api_changes.rst b/doc-source/api_changes.rst index aefdbe089c1..6a66c85e32f 100644 --- a/doc-source/api_changes.rst +++ b/doc-source/api_changes.rst @@ -41,6 +41,11 @@ Public / Protected Namespace The use of ``_`` as a prefix for protected members of classes is now expected and will be enforced during ``make test`` if `pylint `_ is installed. In the example below, several internal members and methods of `MyClass` have been renamed to indicate that they are protected. +Command documentation +--------------------- + +In earlier versions of crash-python, commands were documented using the docstring of the Command itself. This has changed to use the docstring of the module instead. More details can be found in :class:`~crash.command.Command` and :class:`~crash.command.ArgumentParser`. The format of the docstring is `reStructuredText `_ and is parsed using `Sphinx `_. The documentation is used for both the user guide and the application command help. This is an area that is subject to change in the future. + New mechanism for delayed lookups --------------------------------- @@ -54,11 +59,6 @@ The current version of crash-python uses the :class:`crash.util.symbol` module t - There are accessors beyond attributes. The :class:`.DelayedCollection` family of classes all have :meth:`~.DelayedCollection.__getattr__`, :meth:`~DelayedCollection.__getitem__`, and :meth:`~DelayedCollection.get` defined, so they can be accessed as attribute names, dictionary keys, or by function call. The latter two can be used with any name, but the attribute names cannot be used for symbols that start with ``__``. -Command documentation ---------------------- - -In earlier versions of crash-python, commands were documented using the docstring of the Command itself. This has changed to use the docstring of the module instead. More details can be found in :class:`~crash.command.Command` and :class:`~crash.command.ArgumentParser`. The format of the docstring is `reStructuredText `_ and is parsed using `Sphinx `_. The documentation is used for both the user guide and the application command help. This is an area that is subject to change in the future. - Example ------- From 289a43ee28e0914a2524b6e65f7532a00572b048 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 26 Jun 2019 10:52:24 -0400 Subject: [PATCH 297/367] crash.requirements: remove dependency on Symbol.section We don't use this interface anymore so we don't need to test for it Signed-off-by: Jeff Mahoney --- crash/requirements/__init__.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/crash/requirements/__init__.py b/crash/requirements/__init__.py index d36b5310cd2..465843a12ae 100644 --- a/crash/requirements/__init__.py +++ b/crash/requirements/__init__.py @@ -30,12 +30,6 @@ except AttributeError as e: raise IncompatibleGDBError("gdb.Register") -try: - x5 = gdb.Symbol.section - del x5 -except AttributeError as e: - raise IncompatibleGDBError("gdb.Symbol.section") - try: x6 = gdb.Inferior.new_thread del x6 From 0d7ef30ce6eeecd6b17a066a988ac7fb48023754 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 1 Jul 2019 13:47:43 -0400 Subject: [PATCH 298/367] crash.arch.x86_64: fix fetching registers on older kernels This one is embarassing and would've been caught by a better test matrix. Signed-off-by: Jeff Mahoney --- crash/arch/x86_64.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 4cc9fa1ffe0..a354a81eb59 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -24,6 +24,10 @@ def fetch_active(self, thread: gdb.InferiorThread, register: int) -> None: except KeyError: pass + def fetch_scheduled(self, thread: gdb.InferiorThread, + register: int) -> None: + pass + # pylint: disable=abstract-method class _FRC_inactive_task_frame(_FetchRegistersBase): def fetch_scheduled(self, thread: gdb.InferiorThread, @@ -54,8 +58,8 @@ def fetch_scheduled(self, thread: gdb.InferiorThread, thread.info.valid_stack = True class _FRC_thread_return(_FetchRegistersBase): - def __call__(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: + def fetch_scheduled(self, thread: gdb.InferiorThread, + register: int) -> None: task = thread.info.task_struct # Only write rip when requested; It resets the frame cache From b3d6ab0eb926c9a2f5f77aeb70396345c952838f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 1 Jul 2019 17:44:40 -0400 Subject: [PATCH 299/367] crash.types.task: remove TASK_EXCLUSIVE This bit was originally cloned from crash and, as best I can tell, it hasn't been used in any kernel in nearly 20 years. There was also a bug associated with declaring all the flags where this one is handled specially and using -1 as a placeholder meant it always matched. As a result, state would always = 0 and all tasks would be shown as running. Signed-off-by: Jeff Mahoney --- crash/commands/ps.py | 12 +----------- crash/types/task.py | 1 - 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/crash/commands/ps.py b/crash/commands/ps.py index 10ef49d4a14..b7ec475ed10 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -611,13 +611,6 @@ def __init__(self) -> None: def task_state_string(self, task: LinuxTask) -> str: state = task.task_state() buf = "" - exclusive = False - - try: - exclusive = (state & TF.TASK_EXCLUSIVE) == TF.TASK_EXCLUSIVE - state &= ~TF.TASK_EXCLUSIVE - except AttributeError: - pass for bits in sorted(self.task_states.keys(), reverse=True): if (state & bits) == bits: @@ -626,10 +619,7 @@ def task_state_string(self, task: LinuxTask) -> str: if state & TF.TASK_DEAD and task.maybe_dead(): buf = self.task_states[TF.TASK_DEAD] - if buf is not None and exclusive: - buf += "EX" - - if buf is None: + if not buf: print(f"Unknown state {state} found") return buf diff --git a/crash/types/task.py b/crash/types/task.py index 8f14a4a35bd..77ff6b4ca29 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -46,7 +46,6 @@ class TaskStateFlags: TASK_WAKING: int = TASK_FLAG_UNINITIALIZED TASK_PARKED: int = TASK_FLAG_UNINITIALIZED __TASK_IDLE: int = TASK_FLAG_UNINITIALIZED - TASK_EXCLUSIVE: int = TASK_FLAG_UNINITIALIZED TASK_NOLOAD: int = TASK_FLAG_UNINITIALIZED TASK_NEW: int = TASK_FLAG_UNINITIALIZED From 84d32868a269ff761c7ff9e43dcedd27fbe70dbc Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 17 Sep 2019 07:24:53 -0400 Subject: [PATCH 300/367] tests: handle different versions of mypy At some point the main routine of mypy changed its arguments. This commit adjusts the arguments when it gets a failure calling with the real arguments. Signed-off-by: Jeff Mahoney --- tests/run-mypy.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/run-mypy.py b/tests/run-mypy.py index a5fa4bf9381..ce164e140c4 100644 --- a/tests/run-mypy.py +++ b/tests/run-mypy.py @@ -14,8 +14,12 @@ "--check-untyped-defs", "--disallow-untyped-globals"] -ret = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "kdump"] + common_args) -ret2 = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "crash"] + common_args) +try: + ret = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "kdump"] + common_args) + ret2 = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "crash"] + common_args) +except TypeError: + ret = main(None, args=["-p", "kdump"] + common_args) + ret2 = main(None, args=["-p", "crash"] + common_args) if ret or ret2: print("static checking failed.", file=sys.stderr) From af19533125fb6059ea2c82fe955260a4630636d4 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 10 Mar 2020 15:08:26 +0100 Subject: [PATCH 301/367] types/percpu: fix off-by-one in get_percpu_vars() When nr_cpus is not passed to get_percpu_vars(), it incorrectly uses self._last_cpu, effectively ommiting the last cpu. Use self._nr_cpus instead. Signed-off-by: Vlastimil Babka --- crash/types/percpu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 4431ff52077..5b61ee4a8f0 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -362,7 +362,7 @@ def get_percpu_vars(self, var: SymbolOrValue, :obj:`ValueError`: nr_cpus is <= ``0`` """ if nr_cpus is None: - nr_cpus = self._last_cpu + nr_cpus = self._nr_cpus if nr_cpus <= 0: raise ValueError("nr_cpus must be > 0") From c6cc475c524a5dc95d4f54cc04a130a2b85ddd4f Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Thu, 26 Mar 2020 17:06:34 -0400 Subject: [PATCH 302/367] crash.kernel: gdb.InferiorThread now allows arbitrary attributes The latest set of gdb-python patches removed the 'info' attribute from thread_object to make handling of native threads simpler. Instead a __dict__ member is used to allow adding arbitrary attributes. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crash/kernel.py b/crash/kernel.py index 516079667ba..d48d7973b21 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -700,7 +700,8 @@ def setup_tasks(self) -> None: ptid = (LINUX_KERNEL_PID, task['pid'], 0) try: - thread = gdb.selected_inferior().new_thread(ptid, ltask) + thread = gdb.selected_inferior().new_thread(ptid) + thread.info = ltask except gdb.error: print("Failed to setup task @{:#x}".format(int(task.address))) continue From fe28a08dfc05c133c5d522d2b3148164b6fd6da0 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Fri, 27 Mar 2020 14:57:17 -0400 Subject: [PATCH 303/367] crash.sh: only set debug-file-directory if changing it Setting debug-file-directory makes gdb not search in the default locations. Signed-off-by: Jeff Mahoney --- crash.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crash.sh b/crash.sh index f750617f48e..6213989c8f1 100755 --- a/crash.sh +++ b/crash.sh @@ -214,8 +214,12 @@ for path in $SEARCH_DIRS; do DFD="$path" fi done + +if test -n "$DFD"; then + echo "set debug-file-directory $DFD:/usr/lib/debug" >> $GDBINIT +fi + cat << EOF >> $GDBINIT -set debug-file-directory $DFD:/usr/lib/debug set build-id-verbose 0 set python print-stack full set prompt py-crash> From f8cdb6515bd6b2b87e6b7796b58a51914b937215 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Fri, 27 Mar 2020 15:03:10 -0400 Subject: [PATCH 304/367] crash: remove setting build-id-verbose GDB 9.1 includes the build-id patches similar to the ones carried in the SUSE gdb patchset. build-id-verbose was not a part of it, so we get an error. Signed-off-by: Jeff Mahoney --- crash.sh | 1 - kernel-tests/test_types_module.py | 1 - tests/gdbinit-boilerplate | 1 - tests/test_percpu.py | 1 - 4 files changed, 4 deletions(-) diff --git a/crash.sh b/crash.sh index 6213989c8f1..cc10b1a08c4 100755 --- a/crash.sh +++ b/crash.sh @@ -220,7 +220,6 @@ if test -n "$DFD"; then fi cat << EOF >> $GDBINIT -set build-id-verbose 0 set python print-stack full set prompt py-crash> set height 0 diff --git a/kernel-tests/test_types_module.py b/kernel-tests/test_types_module.py index 2954901906b..0e55401b53e 100644 --- a/kernel-tests/test_types_module.py +++ b/kernel-tests/test_types_module.py @@ -4,7 +4,6 @@ import gdb gdbinit = """ -set build-id-verbose 0 set python print-stack full set prompt py-crash> set height 0 diff --git a/tests/gdbinit-boilerplate b/tests/gdbinit-boilerplate index c6b0d7a56ea..f2230e0cb2a 100644 --- a/tests/gdbinit-boilerplate +++ b/tests/gdbinit-boilerplate @@ -1,4 +1,3 @@ -set build-id-verbose 0 set python print-stack full set height 0 set print pretty on diff --git a/tests/test_percpu.py b/tests/test_percpu.py index 0be6f2ed1c8..9087fccb6c5 100644 --- a/tests/test_percpu.py +++ b/tests/test_percpu.py @@ -12,7 +12,6 @@ def setUp(self): gdb.execute("file tests/test-percpu", to_string=True) try: - gdb.execute("set build-id-verbose 0") print() print("--- Unsuppressable gdb output ---", end='') From c86f07785c4ebb9742f8dbaab5c84462696f7f7a Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Fri, 27 Mar 2020 15:08:06 -0400 Subject: [PATCH 305/367] kdump.target: accept both vmlinux and vmcore arguments GDB 9.1 is leaving exec-file symbols behind even when symbol-file symbols are loaded at an offset. As a result, we get the original unrelocated address and encounter memory errors. This change passes the vmlinux path to the kdumpfile target so that we can get the base offset prior to loading the kernel. Signed-off-by: Jeff Mahoney --- crash.sh | 4 +--- kdump/target.py | 31 ++++++++++++++++++++----------- kernel-tests/unittest-prepare.py | 4 +--- 3 files changed, 22 insertions(+), 17 deletions(-) diff --git a/crash.sh b/crash.sh index cc10b1a08c4..1c18fa30ca8 100755 --- a/crash.sh +++ b/crash.sh @@ -225,14 +225,12 @@ set prompt py-crash> set height 0 set print pretty on -file "$KERNEL" - python from kdump.target import Target target = Target(debug=False) end -target kdumpfile $VMCORE +target kdumpfile $KERNEL $VMCORE python import sys diff --git a/kdump/target.py b/kdump/target.py index 864f46f3e81..5383a78710d 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -4,9 +4,11 @@ from typing import Tuple, Callable import sys +import shlex from kdumpfile import kdumpfile, KDUMP_KVADDR from kdumpfile.exceptions import AddressTranslationException, EOFException +from kdumpfile.exceptions import NoDataException import addrxlat.exceptions import gdb @@ -30,11 +32,14 @@ def __init__(self, debug: bool = False) -> None: self.register() # pylint: disable=unused-argument - def open(self, filename: str, from_tty: bool) -> None: + def open(self, args: str, from_tty: bool) -> None: + argv = shlex.split(args) + if len(argv) < 2: + raise gdb.GdbError("kdumpfile target requires kernel image and vmcore") + + vmlinux = argv[0] + filename = argv[1] - objfiles = gdb.objfiles() - if not objfiles: - raise gdb.GdbError("kdumpfile target requires kernel to be already loaded for symbol resolution") try: self.kdump = kdumpfile(file=filename) except Exception as e: @@ -51,21 +56,24 @@ def open(self, filename: str, from_tty: bool) -> None: except (TypeError, ValueError): pass - vmlinux = gdb.objfiles()[0].filename - - # Load the kernel at the relocated address # Unfortunately, the percpu section has an offset of 0 and # ends up getting placed at the offset base. This is easy # enough to handle in the percpu code. - result = gdb.execute("add-symbol-file {} -o {:#x}" + result = gdb.execute("symbol-file {} -o {:#x}" .format(vmlinux, self.base_offset), to_string=True) + + if self.debug: + print(result) + + # We don't have an exec-file so we need to set the architecture + # explicitly. + arch = gdb.objfiles()[0].architecture.name() + result = gdb.execute("set architecture {}".format(arch), to_string=True) if self.debug: print(result) - # Clear out the old symbol cache - gdb.execute("file {}".format(vmlinux)) def close(self) -> None: try: @@ -93,7 +101,8 @@ def xfer_partial(self, obj: int, annex: str, readbuf: bytearray, if self.debug: self.report_error(offset, ln, e) raise gdb.TargetXferEOF(str(e)) - except addrxlat.exceptions.NoDataError as e: # pylint: disable=no-member + # pylint: disable=no-member + except (NoDataException, addrxlat.exceptions.NoDataError) as e: if self.debug: self.report_error(offset, ln, e) raise gdb.TargetXferUnavailable(str(e)) diff --git a/kernel-tests/unittest-prepare.py b/kernel-tests/unittest-prepare.py index 4c3819d406a..c76173b5e21 100644 --- a/kernel-tests/unittest-prepare.py +++ b/kernel-tests/unittest-prepare.py @@ -46,12 +46,10 @@ f_out.close() f_in.close() - gdb.execute(f"file {vmlinux}") - from kdump.target import Target target = Target(debug=False) - gdb.execute(f"target kdumpfile {vmcore}") + gdb.execute(f"target kdumpfile {vmlinux} {vmcore}") except Exception as e: print(str(e)) sys.exit(1) From a2d35a0a7e6f980601fcf97a40e1f8e355ac31d0 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Fri, 27 Mar 2020 15:10:54 -0400 Subject: [PATCH 306/367] crash.types.percpu: prefer possible CPU count for setting nr_cpus We'll use either the cpu possible mask or the size of the percpu range to determine the number of CPUs. THe former is much more accurate than the latter, so let's prefer that. Signed-off-by: Jeff Mahoney --- crash/types/percpu.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 5b61ee4a8f0..1f8d6e8f9dc 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -62,13 +62,15 @@ def setup_per_cpu_size(cls, unused: gdb.Symbol) -> None: # This is only an optimization so we don't return NR_CPUS values # when there are far fewer CPUs on the system. cls._last_cpu = highest_possible_cpu_nr() + cls._nr_cpus = cls._last_cpu + 1 except DelayedAttributeError: pass @classmethod # pylint: disable=unused-argument def setup_nr_cpus(cls, unused: gdb.Symbol) -> None: - cls._nr_cpus = array_size(symvals['__per_cpu_offset']) + if cls._nr_cpus == 0: + cls._nr_cpus = array_size(symvals['__per_cpu_offset']) if cls._last_cpu == -1: cls._last_cpu = cls._nr_cpus From 22572007d0961c14dbc7396f1524777c620a7af7 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Fri, 27 Mar 2020 15:18:08 -0400 Subject: [PATCH 307/367] README.rst: update URLs and GDB version Since the project has moved to its own organization account, update the URLs to point to that. crash-python now depends on the gdb-9.1-target branch, so update that link. Signed-off-by: Jeff Mahoney --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index b211d06cb71..987c7a894cd 100644 --- a/README.rst +++ b/README.rst @@ -85,14 +85,14 @@ Installation .. start-installation -`Crash-python `_ is on `GitHub `_. +`Crash-python `_ is on `GitHub `_. It requires the following components to work successfully: - `Python `_ 3.6 or newer - `pyelftools `_ - `libkdumpfile `_ -- `GDB `_ with python extensions and built with Python 3.6 or newer. +- `GDB `_ with python extensions and built with Python 3.6 or newer. If you are using a SUSE or openSUSE release, pre-built packages are available on the `Open Build Service `_. From 284d161905c166504f09788ce5a80eb36f68699d Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Sun, 29 Mar 2020 22:51:23 -0400 Subject: [PATCH 308/367] crash: fix simple mypy failures An update to mypy raised a few problems, but they're simple fixes. Signed-off-by: Jeff Mahoney --- crash/cache/syscache.py | 2 +- crash/types/slab.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index 299f9c5ecf0..1694a37ba0a 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -140,7 +140,7 @@ def _verify_image(self, location: ImageLocation) -> None: buf_len = len(magic_start) buf = self._read_buf_bytes(location['magic']['start'], buf_len) if buf != magic_start: - raise IOError(f"Missing magic_start in kernel_config_data. Got `{buf}'") + raise IOError(f"Missing magic_start in kernel_config_data. Got `{buf!r}'") buf_len = len(magic_end) buf = self._read_buf_bytes(location['magic']['end'], buf_len) diff --git a/crash/types/slab.py b/crash/types/slab.py index 0138e95793b..4d2fd9f461e 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -561,7 +561,8 @@ def ___check_slabs(self, node: gdb.Value, slabtype: int, nid: int, check_ok = False count = errors['num_ok'] - if count: + if (count and errors['first_ok'] is not None and + errors['last_ok'] is not None): print("{} slab objects were ok between {:#x} and {:#x}" .format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) From 5ad9a127cd1cfdb5fd33f1e9e89b865af5492f23 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 25 Jun 2019 18:28:05 +0200 Subject: [PATCH 309/367] commands/kmem: fix address parsing Remove leftover args indexing. Signed-off-by: Vlastimil Babka --- crash/commands/kmem.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index f09895422d4..faf8eebaa1c 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -78,7 +78,7 @@ def execute(self, args: argparse.Namespace) -> None: raise CommandLineError("no address specified") try: - addr = int(args.address[0], 0) + addr = int(args.address, 0) except ValueError: raise CommandLineError("address must be numeric") slab = slab_from_obj_addr(addr) From 52df8676cb9d4444bcc0eb7b34a7f45d3fc3e92c Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 25 Jun 2019 18:29:14 +0200 Subject: [PATCH 310/367] util: add safe_find_member_variant() flavour Allow returning None instead of rising exception when no member variant found. Signed-off-by: Vlastimil Babka --- crash/util/__init__.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 1df6c09890c..d370c2b4f9c 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -304,6 +304,27 @@ def find_member_variant(gdbtype: gdb.Type, variants: List[str]) -> str: raise TypeError("Unrecognized '{}': could not find member '{}'" .format(str(gdbtype), variants[0])) +def safe_find_member_variant(gdbtype: gdb.Type, variants: List[str]) -> Optional[str]: + """ + Examines the given type and returns the first found member name + + Over time, structure member names may change. This routine + allows the caller to provide a list of potential names and returns + the first one found. + + Args: + gdbtype (gdb.Type): The type of structure or union to examine + variants (list of str): The names of members to search + + Returns: + str: The first member name found or + None: if no named member could be found + """ + try: + return find_member_variant(gdbtype, variants) + except TypeError: + return None + def safe_lookup_type(name: str, block: gdb.Block = None) -> Union[gdb.Type, None]: """ From a61705f6ef11ea6ccb5470fba968e92294b08a30 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 26 Jun 2019 16:48:53 +0200 Subject: [PATCH 311/367] types/page: add page_addr() Add function to convert struct page address to page's direct mapping address. Signed-off-by: Vlastimil Babka --- crash/types/page.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crash/types/page.py b/crash/types/page.py index 744a4e5de17..2042a8528e0 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -248,6 +248,9 @@ def compound_head(self) -> 'Page': ('page_offset_base', Page.setup_directmap_base)]) +def page_addr(struct_page_addr: int) -> int: + pfn = (struct_page_addr - Page.vmemmap_base) // types.page_type.sizeof + return Page.directmap_base + (pfn * Page.PAGE_SIZE) def pfn_to_page(pfn: int) -> 'Page': return Page(Page.pfn_to_page(pfn), pfn) From 53620c285a6ae24f6ea99bcb87573809b0a5fb6e Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 26 Jun 2019 16:54:15 +0200 Subject: [PATCH 312/367] kmem/slab: restructure object validity and usage checking Existing function contains_obj() now determines whether address is inside a valid object range and optionally describes why it's invalid. New function obj_in_use() determines whether object is allocated or freed, and optionally describes in which array cache it is, moving this code over from kmem command. Signed-off-by: Vlastimil Babka --- crash/commands/kmem.py | 45 +++++++++++++++++------------------------- crash/types/slab.py | 45 +++++++++++++++++++++++++++++------------- 2 files changed, 49 insertions(+), 41 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index faf8eebaa1c..76e0f31fe4b 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -86,36 +86,27 @@ def execute(self, args: argparse.Namespace) -> None: if not slab: raise CommandError("Address not found in any kmem cache.") - obj = slab.contains_obj(addr) + (valid, obj, reason) = slab.contains_obj(addr) name = slab.kmem_cache.name - if obj[0]: - print("ALLOCATED object %x from slab %s" % (obj[1], name)) + if valid: + (is_used, details) = slab.obj_in_use(obj) + offset = addr - obj + + offset_str = "" if offset == 0 else f" offset {offset}/0x{offset:x}" + details_str = "" if details is None else f" (in {details})" + + print(f"{'ALLOCATED' if is_used else 'FREE'}{details_str} " + f"object 0x{obj:x}{offset_str} from cache {name}") else: - if obj[1] == 0: - print("Address on slab %s but not within valid object slot" - % name) - elif not obj[2]: - print("FREE object %x from slab %s" % (obj[1], name)) - elif obj[2] is not None: - ac = obj[2] - ac_type = ac['ac_type'] # pylint: disable=unsubscriptable-object - nid_tgt = int(ac['nid_tgt']) # pylint: disable=unsubscriptable-object - if ac_type == "percpu": - ac_desc = "cpu %d cache" % nid_tgt - elif ac_type == "shared": - ac_desc = "shared cache on node %d" % nid_tgt - elif ac_type == "alien": - nid_src = int(ac['nid_src']) # pylint: disable=unsubscriptable-object - ac_desc = "alien cache of node %d for node %d" % \ - (nid_src, nid_tgt) - else: - raise CommandError(f"unexpected array cache type {str(ac)}") - - print("FREE object %x from slab %s (in %s)" % - (obj[1], name, ac_desc)) - else: - raise RuntimeError("odd return value from contains_obj") + obj_str = "" + if obj is not None: + obj_str = f" object 0x{obj:x}" + reason_str = "" + if reason is not None: + reason_str = f" ({reason})" + print(f"INVALID address on slab {slab.gdb_obj.address} " + f"from cache {name}{obj_str}{reason_str}") def __print_vmstat(self, vmstat: List[int], diffs: List[int]) -> None: vmstat_names = VmStat.get_stat_names() diff --git a/crash/types/slab.py b/crash/types/slab.py index 4d2fd9f461e..5eef794cee1 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -164,7 +164,8 @@ def __populate_free(self) -> None: f = int(bufctl[f]) - def find_obj(self, addr: int) -> Union[int, None]: + def find_obj(self, addr: int) -> Optional[int]: + bufsize = self.kmem_cache.buffer_size objs_per_slab = self.kmem_cache.objs_per_slab @@ -175,25 +176,42 @@ def find_obj(self, addr: int) -> Union[int, None]: if idx >= objs_per_slab: return None - return self.s_mem + (idx * bufsize) + return int(self.s_mem + (idx * bufsize)) - def contains_obj(self, addr: int) -> Tuple[bool, int, - Optional[ArrayCacheEntry]]: + def contains_obj(self, addr: int) -> Tuple[bool, int, Optional[str]]: obj_addr = self.find_obj(addr) if not obj_addr: - return (False, 0, None) + return (False, 0, "address outside of valid object range") - self.__populate_free() - if obj_addr in self.free: - return (False, int(obj_addr), None) + return (True, obj_addr, None) - ac = self.kmem_cache.get_array_caches() + def obj_in_use(self, addr: int) -> Tuple[bool, Optional[str]]: + + self.__populate_free() + if addr in self.free: + return (False, None) + + array_caches = self.kmem_cache.get_array_caches() + + if addr in array_caches: + ac = array_caches[addr] + + ac_type = ac['ac_type'] # pylint: disable=unsubscriptable-object + nid_tgt = int(ac['nid_tgt']) # pylint: disable=unsubscriptable-object + if ac_type == AC_PERCPU: + ac_desc = f"cpu {nid_tgt} cache" + elif ac_type == AC_SHARED: + ac_desc = f"shared cache on node {nid_tgt}" + elif ac_type == AC_ALIEN: + nid_src = int(ac['nid_src']) # pylint: disable=unsubscriptable-object + ac_desc = f"alien cache on node {nid_src} for node {nid_tgt}" + else: + ac_desc = "unknown cache" - if obj_addr in ac: - return (False, int(obj_addr), ac[obj_addr]) + return (False, ac_desc) - return (True, int(obj_addr), None) + return (True, None) def __error(self, msg: str, misplaced: bool = False) -> None: msg = col_error("cache %s slab %x%s" % (self.kmem_cache.name, @@ -361,8 +379,7 @@ def __get_nodelists(self) -> Iterable[Tuple[int, gdb.Value]]: yield (nid, node.dereference()) @staticmethod - def all_find_obj(addr: int) -> Optional[Tuple[bool, int, - Optional[ArrayCacheEntry]]]: + def all_find_obj(addr: int) -> Optional[Tuple[bool, int, Optional[str]]]: slab = slab_from_obj_addr(addr) if not slab: return None From ba5dc31023a7df1bd4f3427f0f36b0290405f30c Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 10 Feb 2020 11:38:10 +0100 Subject: [PATCH 313/367] types/page: fix determining ZONES_WIDTH We need to use binary logarithm, not natural. Signed-off-by: Vlastimil Babka --- crash/types/page.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crash/types/page.py b/crash/types/page.py index 2042a8528e0..c0ad494e498 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -125,7 +125,7 @@ def setup_directmap_base(cls, symbol: gdb.Symbol) -> None: @classmethod def setup_zone_type(cls, gdbtype: gdb.Type) -> None: max_nr_zones = gdbtype['__MAX_NR_ZONES'].enumval - cls.ZONES_WIDTH = int(ceil(log(max_nr_zones))) + cls.ZONES_WIDTH = int(ceil(log(max_nr_zones, 2))) @classmethod # pylint: disable=unused-argument @@ -213,6 +213,7 @@ def get_slab_page(self) -> gdb.Value: return self.gdb_obj[Page.slab_page_name] def get_nid(self) -> int: + # TODO: this only works when there are no sections (i.e. sparsemem_vmemmap) return self.flags >> (self.BITS_PER_LONG - self.NODES_WIDTH) def get_zid(self) -> int: From 99c6dc1556b6d3653ff9b1dc5a27d3c868f9f4d5 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 18 Feb 2020 16:20:38 +0100 Subject: [PATCH 314/367] types/list: include last good list_head in thrown exception When throwing BufferError from list_for_each(), print also the last good list_head address to help further debugging of the list corruption. Also convert the error to f-string. Signed-off-by: Vlastimil Babka --- crash/types/list.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/crash/types/list.py b/crash/types/list.py index 2e65f67296f..366df0226d6 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -83,6 +83,7 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, raise BufferError("Failed to read list_head {:#x}: {}" .format(int(list_head.address), str(e))) + last_good_addr = None while node.address != list_head.address: if exact_cycles: if int(node.address) in visited: @@ -106,8 +107,13 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, # point in giving out bogus list elements yield node.address except gdb.error as e: - raise BufferError("Failed to read list_head {:#x} in list {:#x}: {}" - .format(int(node.address), int(list_head.address), str(e))) + if last_good_addr is not None: + last_good_str = f"0x{last_good_addr:x}" + else: + last_good_str = "(none)" + raise BufferError(f"Failed to read list_head 0x{int(node.address):x} " + f"in list 0x{int(list_head.address):x}, last good " + f"list_head {last_good_str}: {str(e)}") try: if fast is not None: @@ -127,6 +133,7 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, if int(nxt) == 0: raise CorruptListError("{} -> {} pointer is NULL" .format(node.address, next_)) + last_good_addr = int(node.address) node = nxt.dereference() if pending_exception is not None: From 4a2b9b55c74064f080962a12977d6cf54e2461e9 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 18 Feb 2020 16:33:22 +0100 Subject: [PATCH 315/367] types/page: expand functions to iterate over all pages Add for_each_struct_page_pfn() to get pfn as well as Page object. Add for_each_page_flag() to quickly filter by flag before creating a Page instance. Also add Page.address attribute. Signed-off-by: Vlastimil Babka --- crash/types/page.py | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/crash/types/page.py b/crash/types/page.py index c0ad494e498..e188f75e100 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Dict, Union, TypeVar, Iterable, Callable +from typing import Dict, Union, TypeVar, Iterable, Callable, Tuple from math import log, ceil @@ -177,6 +177,7 @@ def from_page_addr(cls, addr: int) -> 'Page': def __init__(self, obj: gdb.Value, pfn: int) -> None: self.gdb_obj = obj + self.address = int(obj.address) self.pfn = pfn self.flags = int(obj["flags"]) @@ -264,12 +265,32 @@ def page_from_gdb_obj(gdb_obj: gdb.Value) -> 'Page': pfn = (int(gdb_obj.address) - Page.vmemmap_base) // types.page_type.sizeof return Page(gdb_obj, pfn) -def for_each_page() -> Iterable[gdb.Value]: +def for_each_struct_page_pfn() -> Iterable[Tuple[gdb.Value, int]]: # TODO works only on x86? max_pfn = int(symvals.max_pfn) for pfn in range(max_pfn): try: - yield Page.pfn_to_page(pfn) + yield (Page.pfn_to_page(pfn), pfn) except gdb.error: # TODO: distinguish pfn_valid() and report failures for those? pass + +def for_each_page() -> Iterable[Page]: + # TODO works only on x86? + max_pfn = int(symvals.max_pfn) + for pfn in range(max_pfn): + try: + yield pfn_to_page(pfn) + except gdb.error: + # TODO: distinguish pfn_valid() and report failures for those? + pass + +# Optimized to filter flags on gdb.Value before instantiating Page +def for_each_page_flag(flag: int) -> Iterable[Page]: + for (struct_page, pfn) in for_each_struct_page_pfn(): + try: + if struct_page["flags"] & flag == 0: + continue + yield Page(struct_page, pfn) + except gdb.error: + pass From 5dafc4855cbe94b55a4ccc2b8b570d98f9ad2e69 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 18 Feb 2020 16:56:40 +0100 Subject: [PATCH 316/367] types/zone: improve and wire up freelist checking Improve zone's _check_free_area() error printing and robustness against broken lists. Wire the checking up to the "pykmem -z" command. Signed-off-by: Vlastimil Babka --- crash/commands/kmem.py | 2 ++ crash/types/zone.py | 33 ++++++++++++++++++++------------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 76e0f31fe4b..72cef9e9c0d 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -172,4 +172,6 @@ def print_zones(self) -> None: print() + zone.check_free_pages() + KmemCommand("kmem") diff --git a/crash/types/zone.py b/crash/types/zone.py index d7282c819b7..2a715eb0507 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -48,22 +48,29 @@ def get_vmstat_diffs(self) -> List[int]: def _check_free_area(self, area: gdb.Value, is_pcp: bool) -> None: nr_free = 0 - list_array_name = "lists" if is_pcp else "free_list" + if is_pcp: + list_array_name = "lists" + error_desc = "pcplist" + else: + list_array_name = "free_list" + error_desc = "free area" for free_list in array_for_each(area[list_array_name]): - for page_obj in list_for_each_entry(free_list, - self.types.page_type, - "lru"): - page = crash.types.page.Page.from_obj(page_obj) - nr_free += 1 - if page.get_nid() != self.nid or page.get_zid() != self.zid: - print("page {:#x} misplaced on {} of zone {}:{}, has flags for zone {}:{}". - format(int(page_obj.address), "pcplist" if is_pcp else "freelist", - self.nid, self.zid, page.get_nid(), page.get_zid())) + try: + for page_obj in list_for_each_entry(free_list, + self.types.page_type, + "lru"): + page = crash.types.page.Page.from_obj(page_obj) + nr_free += 1 + if page.get_nid() != self.nid or page.get_zid() != self.zid: + print(f"page 0x{int(page_obj.address):x} misplaced on " + f"{error_desc} of node {self.nid} zone {self.zid}, " + f"has flags for node {page.get_nid()} zone {page.get_zid()}") + except BufferError as e: + print(f"Error traversing free area: {e}") nr_expected = area["count"] if is_pcp else area["nr_free"] if nr_free != nr_expected: - print("nr_free mismatch in {} {}: expected {}, counted {}". - format("pcplist" if is_pcp else "area", area.address, - nr_expected, nr_free)) + print(f"nr_free mismatch in {error_desc} 0x{int(area.address):x}: " + f"expected {nr_expected}, counted {nr_free}") def check_free_pages(self) -> None: for area in array_for_each(self.gdb_obj["free_area"]): From 2a788e1228dc4a0c255ef564d05305ed3eac19b0 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 18 Feb 2020 17:42:59 +0100 Subject: [PATCH 317/367] kmem/slab: add SLUB support Add SLUB support, while refactoring the existing SLAB support extensively. Introduce abstract base class Slab with subclasses SlabSLAB and SlabSLUB. Analogically, abstract base class KmemCache with two subclasses. Add "pykmem -S" command variant to print individual objects in slabs (only working for SLUB for now). Convert hopefully all formatted strings in types/slab.py to f-strings. Some static-check errors introduced that I am not sure how to solve. Some make doc warnings introduced that I am not sure how to debug. Signed-off-by: Vlastimil Babka --- crash/commands/kmem.py | 34 +- crash/types/slab.py | 1152 +++++++++++++++++++++++++++++++--------- 2 files changed, 936 insertions(+), 250 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 72cef9e9c0d..cd8b6aaa729 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -10,6 +10,7 @@ kmem addr - try to find addr within kmem caches kmem -s [slabname] - check consistency of single or all kmem cache + kmem -S [slabname] - list objects in a single or all kmem caches kmem -z - report zones kmem -V - report vmstats @@ -40,7 +41,9 @@ def __init__(self, name: str) -> None: group = parser.add_mutually_exclusive_group() group.add_argument('-s', nargs='?', const=True, default=False, - dest='slabname') + dest='slabcheck') + group.add_argument('-S', nargs='?', const=True, default=False, + dest='slablist') group.add_argument('-z', action='store_true', default=False) group.add_argument('-V', action='store_true', default=False) group.add_argument('address', nargs='?') @@ -56,14 +59,14 @@ def execute(self, args: argparse.Namespace) -> None: self.print_vmstats() return - if args.slabname: - if args.slabname is True: + if args.slabcheck: + if args.slabcheck is True: print("Checking all kmem caches...") for cache in kmem_cache_get_all(): print(cache.name) cache.check_all() else: - cache_name = args.slabname + cache_name = args.slabcheck print(f"Checking kmem cache {cache_name}") try: cache = kmem_cache_from_name(cache_name) @@ -74,6 +77,20 @@ def execute(self, args: argparse.Namespace) -> None: print("Checking done.") return + if args.slablist: + if args.slablist is True: + print("Listing all kmem caches...") + for cache in kmem_cache_get_all(): + cache.list_all() + else: + cache_name = args.slablist + try: + cache = kmem_cache_from_name(cache_name) + except KmemCacheNotFound: + raise CommandError(f"Cache {cache_name} not found.") + cache.list_all() + return + if not args.address: raise CommandLineError("no address specified") @@ -93,11 +110,14 @@ def execute(self, args: argparse.Namespace) -> None: (is_used, details) = slab.obj_in_use(obj) offset = addr - obj - offset_str = "" if offset == 0 else f" offset {offset}/0x{offset:x}" + offset_str = "" if offset == 0 else f" offset 0x{offset:x} ({offset})" details_str = "" if details is None else f" (in {details})" + objsize = slab.kmem_cache.object_size + state = "ALLOCATED" if is_used else "FREE" - print(f"{'ALLOCATED' if is_used else 'FREE'}{details_str} " - f"object 0x{obj:x}{offset_str} from cache {name}") + print(f"{state}{details_str} object 0x{obj:x}{offset_str} " + f"size 0x{objsize:x} ({objsize}) from cache {name} " + f"slab {slab.short_header()}") else: obj_str = "" if obj is not None: diff --git a/crash/types/slab.py b/crash/types/slab.py index 5eef794cee1..f096811e4f1 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -1,55 +1,229 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import TypeVar, Union, Tuple, Iterable, Dict, Optional, Set +from abc import ABC, abstractmethod + +from typing import TypeVar, Union, Tuple, Iterable, Dict, Optional, Set, List,\ + cast from typing import ValuesView import sys import traceback -from crash.util import container_of, find_member_variant +from crash.util import container_of, find_member_variant,\ + safe_find_member_variant from crash.util.symbols import Types, TypeCallbacks, SymbolCallbacks from crash.types.percpu import get_percpu_var from crash.types.list import list_for_each, list_for_each_entry, ListError -from crash.types.page import page_from_gdb_obj, page_from_addr, Page +from crash.types.page import page_from_gdb_obj, page_from_addr, Page, page_addr,\ + for_each_page_flag from crash.types.node import for_each_nid from crash.types.cpu import for_each_online_cpu from crash.types.node import numa_node_id import gdb -AC_PERCPU = "percpu" -AC_SHARED = "shared" -AC_ALIEN = "alien" +# TODO: put in utils +def print_flags(val: int, names: Dict[str, int]) -> str: + first = True + ret = f"0x{val:x}" + for name, flag in names.items(): + if val & flag != 0: + prefix = " (" if first else "|" + ret += f"{prefix}{name}" + first = False + if not first: + ret += ")" + return ret + +def col_error(msg: str) -> str: + return f"\033[1;31;40m {msg}\033[0;37;40m " + +def col_bold(msg: str) -> str: + return f"\033[1;37;40m {msg}\033[0;37;40m " + +# TODO: put to separate type +def atomic_long_read(val: gdb.Value) -> int: + return int(val["counter"]) + +types = Types(['kmem_cache', 'struct kmem_cache', 'struct page', 'void *']) + +SlabType = TypeVar('SlabType', bound='Slab') +KmemCacheType = TypeVar('KmemCacheType', bound='KmemCache') ArrayCacheEntry = Dict[str, Union[int, str]] -slab_partial = 0 -slab_full = 1 -slab_free = 2 +class ProcessingFlags: + + def __init__(self, print_level: int = 0): + self.print_level = print_level + +class Slab(ABC): + """ + A common superclass representing a slab, i.e. a collection of objects on a + single (possibly high-order) page. + + Args: + gdb_obj: The structure to wrap, of type ``struct slab`` or + ``struct page``. + kmem_cache: The kmem_cache instance the slab belongs to. + + Attributes: + gdb_obj (:obj:`gdb.Value`): The structure being wrapped. The value + is of type ``struct slab`` or ``struct page``. + address (:obj:`int`): Address of the structure being wrapped. + kmem_cache (:obj:`KmemCache`): The cache the slab belongs to. + free (:obj:`Set[int]`): Set of addresses of free objects in the slab. + nr_objects (:obj:`int): Total number of objects in the slab. + nr_inuse (:obj:`int): Number of allocated objects in the slab. + nr_free (:obj:`int): Number of free objects in the slab. + """ + + def __init__(self, gdb_obj: gdb.Value, kmem_cache: 'KmemCache') -> None: + self.gdb_obj = gdb_obj + self.address = int(gdb_obj.address) + self.kmem_cache = kmem_cache + self.free: Set[int] = set() -slab_list_name = {0: "partial", 1: "full", 2: "free"} -slab_list_fullname = {0: "slabs_partial", 1: "slabs_full", 2: "slabs_free"} + self._free_populated = False + self.error = False + self._misplaced_error = "" -BUFCTL_END = ~0 & 0xffffffff + @classmethod + @abstractmethod + def from_page(cls, page: Page) -> 'Slab': + """ + Create Slab object wrapper from a ``Page`` struct page wrapper. + """ -def col_error(msg: str) -> str: - return "\033[1;31;40m {}\033[0;37;40m ".format(msg) + @classmethod + @abstractmethod + def from_list_head(cls, list_head: gdb.Value, + kmem_cache: 'KmemCache') -> 'Slab': + """ + Create Slab oject wrapper from a :obj:`gdb.Value` object of + ``struct list_head`` type. + """ -def col_bold(msg: str) -> str: - return "\033[1;37;40m {}\033[0;37;40m ".format(msg) + @abstractmethod + def find_obj(self, addr: int) -> Optional[int]: + """ + Returns starting address of object in slab. -types = Types(['kmem_cache', 'struct kmem_cache']) + Args: + addr: Address of the object to find. -SlabType = TypeVar('SlabType', bound='Slab') -KmemCacheType = TypeVar('KmemCacheType', bound='KmemCache') + Returns: + :obj:`int`: Starting address of the object, or None if the address + does not fall into the slab's range. Note that while object + address might be returned, addr might still be outside of valid + range. Use contains_obj() to verify. + """ + + @abstractmethod + def contains_obj(self, addr: int) -> Tuple[bool, int, Optional[str]]: + """ + Returns whether the slab contains an object at given address. + + Args: + addr: Address of object to check. + + Returns: + :obj:`(contains, address, description)`: A tuple with object information. + contains (:obj:`bool`): Whether addr points inside valid object + range. + address (:obj:`int`): Starting address of the object. Might be + returned even if contains is False, when addr falls e.g. into a + red zone or padding of an object. + description (:obj:`str`): Optional description of where addr points + to in case it's not a valid object. + """ + + @abstractmethod + def obj_in_use(self, addr: int) -> Tuple[bool, Optional[str]]: + """ + Returns information about whether object is allocated (in use) or free. + + Arg: + addr: Base address of object to check, obtained e.g. by find.obj() + + Returns: + :obj:`(in_use, free_details): A tuple with information. + in_use (:obj:`bool`): Whether object is currently in use. + free_details (:obj:`str`): If an object is in some special cache + of free objects (e.g. per-cpu), a short description of such + cache. + """ -class Slab: + @abstractmethod + def get_allocated_objects(self) -> Iterable[int]: + """ + Generates object addresses for all allocated objects in the slab. + """ + + @abstractmethod + def short_header(self) -> str: + """ + Return a short header consisting of slab's address and role. + """ + + @abstractmethod + def _do_populate_free(self) -> None: + """ Populate the set of free objects """ + + def populate_free(self) -> None: + """ Make sure the set of free objects is populated """ + if self._free_populated: + return + self._do_populate_free() + self._free_populated = True + + def _pr_err(self, msg: str) -> None: + msg = col_error(f"cache {self.kmem_cache.name} slab " + f"0x{self.address:x}{msg}") + self.error = True + print(msg) + +class SlabSLAB(Slab): slab_list_head: str = 'list' page_slab: bool = False - real_slab_type: gdb.Type bufctl_type: gdb.Type + real_slab_type: gdb.Type + + slab_partial = 0 + slab_full = 1 + slab_free = 2 + + AC_PERCPU = "percpu" + AC_SHARED = "shared" + AC_ALIEN = "alien" + + BUFCTL_END = ~0 & 0xffffffff + + def __init__(self, gdb_obj: gdb.Value, kmem_cache: 'KmemCacheSLAB', + error: bool = False) -> None: + super().__init__(gdb_obj, kmem_cache) + # FIXME: this doesn't seem to help + self.kmem_cache = cast(KmemCacheSLAB, kmem_cache) + self.error = error + self.misplaced_list: Optional[str] + self.misplaced_error: Optional[str] + + self.misplaced_list = None + self.misplaced_error = None + + if error: + return + + self.nr_objects = kmem_cache.objs_per_slab + if self.page_slab: + self.nr_inuse = int(gdb_obj["active"]) + self.page = page_from_gdb_obj(gdb_obj) + else: + self.nr_inuse = int(gdb_obj["inuse"]) + self.nr_free = self.nr_objects - self.nr_inuse + self.s_mem = int(gdb_obj["s_mem"]) @classmethod def check_page_type(cls, gdbtype: gdb.Type) -> None: @@ -70,16 +244,16 @@ def check_bufctl_type(cls, gdbtype: gdb.Type) -> None: @classmethod def from_addr(cls, slab_addr: int, - kmem_cache: Union[int, 'KmemCache']) -> 'Slab': - if not isinstance(kmem_cache, KmemCache): - kmem_cache = kmem_cache_from_addr(kmem_cache) + kmem_cache: Union[int, 'KmemCacheSLAB']) -> 'SlabSLAB': + if not isinstance(kmem_cache, KmemCacheSLAB): + kmem_cache = cast(KmemCacheSLAB, kmem_cache_from_addr(kmem_cache)) slab_struct = gdb.Value(slab_addr).cast(cls.real_slab_type.pointer()).dereference() return cls(slab_struct, kmem_cache) @classmethod - def from_page(cls, page: Page) -> 'Slab': + def from_page(cls, page: Page) -> 'SlabSLAB': kmem_cache_addr = int(page.get_slab_cache()) - kmem_cache = kmem_cache_from_addr(kmem_cache_addr) + kmem_cache = cast(KmemCacheSLAB, kmem_cache_from_addr(kmem_cache_addr)) if kmem_cache is None: raise RuntimeError("No kmem cache found for page") if cls.page_slab: @@ -89,77 +263,62 @@ def from_page(cls, page: Page) -> 'Slab': @classmethod def from_list_head(cls, list_head: gdb.Value, - kmem_cache: 'KmemCache') -> 'Slab': + kmem_cache: 'KmemCache') -> 'SlabSLAB': gdb_obj = container_of(list_head, cls.real_slab_type, cls.slab_list_head) + kmem_cache = cast(KmemCacheSLAB, kmem_cache) return cls(gdb_obj, kmem_cache) - def __init__(self, gdb_obj: gdb.Value, kmem_cache: 'KmemCache', - error: bool = False) -> None: - self.error = error - self.gdb_obj = gdb_obj - self.kmem_cache = kmem_cache - self.free: Set[int] = set() - self.misplaced_list: Optional[str] - self.misplaced_error: Optional[str] - - self.misplaced_list = None - self.misplaced_error = None + def short_header(self) -> str: + return f"0x{self.address:x}" - if error: - return - - if self.page_slab: - self.inuse = int(gdb_obj["active"]) - self.page = page_from_gdb_obj(gdb_obj) + # pylint: disable=arguments-differ + def _pr_err(self, msg: str, misplaced: bool = False) -> None: + msg = col_error(f"cache {self.kmem_cache.name} slab " + f"0x{self.address:x}{msg}") + self.error = True + if misplaced: + self.misplaced_error = msg else: - self.inuse = int(gdb_obj["inuse"]) - self.s_mem = int(gdb_obj["s_mem"]) + print(msg) def __add_free_obj_by_idx(self, idx: int) -> bool: - objs_per_slab = self.kmem_cache.objs_per_slab bufsize = self.kmem_cache.buffer_size - if idx >= objs_per_slab: - self.__error(": free object index %d overflows %d" % - (idx, objs_per_slab)) + if idx >= self.nr_objects: + self._pr_err(f": free object index {idx} overflows {self.nr_objects}") return False obj_addr = self.s_mem + idx * bufsize if obj_addr in self.free: - self.__error(": object %x duplicated on freelist" % obj_addr) + self._pr_err(f": object 0x{obj_addr:x} duplicated on freelist") return False self.free.add(obj_addr) return True - def __populate_free(self) -> None: - if self.free: - return - - objs_per_slab = self.kmem_cache.objs_per_slab - + def _do_populate_free(self) -> None: if self.page_slab: page = self.gdb_obj freelist = page["freelist"].cast(self.bufctl_type.pointer()) - for i in range(self.inuse, objs_per_slab): + for i in range(self.nr_inuse, self.nr_objects): obj_idx = int(freelist[i]) self.__add_free_obj_by_idx(obj_idx) # XXX not generally useful and reliable - if False and objs_per_slab > 1: + if False and self.nr_objects > 1: all_zeroes = True - for i in range(objs_per_slab): + for i in range(self.nr_objects): obj_idx = int(freelist[i]) if obj_idx != 0: all_zeroes = False if all_zeroes: - self.__error(": freelist full of zeroes") + self._pr_err(": freelist full of zeroes") else: bufctl = self.gdb_obj.address[1].cast(self.bufctl_type).address f = int(self.gdb_obj["free"]) - while f != BUFCTL_END: + while f != self.BUFCTL_END: if not self.__add_free_obj_by_idx(f): - self.__error(": bufctl cycle detected") + self._pr_err(": bufctl cycle detected") break f = int(bufctl[f]) @@ -167,13 +326,12 @@ def __populate_free(self) -> None: def find_obj(self, addr: int) -> Optional[int]: bufsize = self.kmem_cache.buffer_size - objs_per_slab = self.kmem_cache.objs_per_slab if int(addr) < self.s_mem: return None idx = (int(addr) - self.s_mem) // bufsize - if idx >= objs_per_slab: + if idx >= self.nr_objects: return None return int(self.s_mem + (idx * bufsize)) @@ -188,7 +346,7 @@ def contains_obj(self, addr: int) -> Tuple[bool, int, Optional[str]]: def obj_in_use(self, addr: int) -> Tuple[bool, Optional[str]]: - self.__populate_free() + self.populate_free() if addr in self.free: return (False, None) @@ -199,11 +357,11 @@ def obj_in_use(self, addr: int) -> Tuple[bool, Optional[str]]: ac_type = ac['ac_type'] # pylint: disable=unsubscriptable-object nid_tgt = int(ac['nid_tgt']) # pylint: disable=unsubscriptable-object - if ac_type == AC_PERCPU: + if ac_type == self.AC_PERCPU: ac_desc = f"cpu {nid_tgt} cache" - elif ac_type == AC_SHARED: + elif ac_type == self.AC_SHARED: ac_desc = f"shared cache on node {nid_tgt}" - elif ac_type == AC_ALIEN: + elif ac_type == self.AC_ALIEN: nid_src = int(ac['nid_src']) # pylint: disable=unsubscriptable-object ac_desc = f"alien cache on node {nid_src} for node {nid_tgt}" else: @@ -213,149 +371,381 @@ def obj_in_use(self, addr: int) -> Tuple[bool, Optional[str]]: return (True, None) - def __error(self, msg: str, misplaced: bool = False) -> None: - msg = col_error("cache %s slab %x%s" % (self.kmem_cache.name, - int(self.gdb_obj.address), msg)) - self.error = True - if misplaced: - self.misplaced_error = msg - else: - print(msg) - def __free_error(self, list_name: str) -> None: self.misplaced_list = list_name - self.__error(": is on list %s, but has %d of %d objects allocated" % - (list_name, self.inuse, self.kmem_cache.objs_per_slab), - misplaced=True) + self._pr_err(f": is on list {list_name}, but has {self.nr_inuse} of " + f"{self.nr_objects} objects allocated", misplaced=True) def get_objects(self) -> Iterable[int]: bufsize = self.kmem_cache.buffer_size obj = self.s_mem # pylint: disable=unused-variable - for i in range(self.kmem_cache.objs_per_slab): + for i in range(self.nr_objects): yield obj obj += bufsize def get_allocated_objects(self) -> Iterable[int]: for obj in self.get_objects(): - c = self.contains_obj(obj) - if c[0]: + (in_use, _) = self.obj_in_use(obj) + if in_use: yield obj def check(self, slabtype: int, nid: int) -> int: - self.__populate_free() + self.populate_free() num_free = len(self.free) - max_free = self.kmem_cache.objs_per_slab + max_free = self.nr_objects - if self.kmem_cache.off_slab and not Slab.page_slab: - struct_slab_slab = slab_from_obj_addr(int(self.gdb_obj.address)) + if self.kmem_cache.off_slab and not SlabSLAB.page_slab: + struct_slab_slab = slab_from_obj_addr(self.address) if not struct_slab_slab: - self.__error(": OFF_SLAB struct slab is not a slab object itself") + self._pr_err(": OFF_SLAB struct slab is not a slab object itself") else: struct_slab_cache = struct_slab_slab.kmem_cache.name if not self.kmem_cache.off_slab_cache: if struct_slab_cache not in ("size-64", "size-128"): - self.__error(": OFF_SLAB struct slab is in a wrong cache %s" % - struct_slab_cache) + self._pr_err(f": OFF_SLAB struct slab is in a wrong " + f"cache {struct_slab_cache}") else: self.kmem_cache.off_slab_cache = struct_slab_cache elif struct_slab_cache != self.kmem_cache.off_slab_cache: - self.__error(": OFF_SLAB struct slab is in a wrong cache %s" % - struct_slab_cache) + self._pr_err(f": OFF_SLAB struct slab is in a wrong cache " + f"{struct_slab_cache}") - addr = int(self.gdb_obj.address) + addr = self.address struct_slab_obj = struct_slab_slab.contains_obj(addr) if not struct_slab_obj[0]: - self.__error(": OFF_SLAB struct slab is not allocated") + self._pr_err(": OFF_SLAB struct slab is not allocated") print(struct_slab_obj) - elif struct_slab_obj[1] != int(self.gdb_obj.address): - self.__error(": OFF_SLAB struct slab at wrong offset{}" - .format(int(self.gdb_obj.address) - struct_slab_obj[1])) + elif struct_slab_obj[1] != addr: + off = addr - struct_slab_obj[1] + self._pr_err(f": OFF_SLAB struct slab at wrong offset {off}") - if self.inuse + num_free != max_free: - self.__error(": inuse=%d free=%d adds up to %d (should be %d)" % - (self.inuse, num_free, - self.inuse + num_free, max_free)) + if self.nr_inuse + num_free != max_free: + self._pr_err(f": inuse={self.nr_inuse} free={num_free} adds up to " + f"{self.nr_inuse + num_free} (should be {max_free})") - if slabtype == slab_free: + if slabtype == self.slab_free: if num_free != max_free: self.__free_error("slab_free") - elif slabtype == slab_partial: + elif slabtype == self.slab_partial: if num_free in (0, max_free): self.__free_error("slab_partial") - elif slabtype == slab_full: + elif slabtype == self.slab_full: if num_free > 0: self.__free_error("slab_full") if self.page_slab: slab_nid = self.page.get_nid() if nid != slab_nid: - self.__error(": slab is on nid %d instead of %d" % - (slab_nid, nid)) - print("free objects %d" % num_free) + self._pr_err(f": slab is on nid {slab_nid} instead of {nid}") + print(f"free objects {num_free}") ac = self.kmem_cache.get_array_caches() last_page_addr = 0 for obj in self.get_objects(): if obj in self.free and obj in ac: - self.__error(": obj %x is marked as free but in array cache:" % obj) + self._pr_err(f": obj 0x{obj:x} is marked as free but in array cache:") print(ac[obj]) try: page = page_from_addr(obj).compound_head() except gdb.NotAvailableError: - self.__error(": failed to get page for object %x" % obj) + self._pr_err(f": failed to get page for object 0x{obj:x}") continue - if int(page.gdb_obj.address) == last_page_addr: + if page.address == last_page_addr: continue - last_page_addr = int(page.gdb_obj.address) + last_page_addr = page.address if page.get_nid() != nid: - self.__error(": obj %x is on nid %d instead of %d" % - (obj, page.get_nid(), nid)) + self._pr_err(f": obj 0x{obj:x} is on nid {page.get_nid()} instead of {nid}") if not page.is_slab(): - self.__error(": obj %x is not on PageSlab page" % obj) + self._pr_err(f": obj 0x{obj:x} is not on PageSlab page") kmem_cache_addr = int(page.get_slab_cache()) - if kmem_cache_addr != int(self.kmem_cache.gdb_obj.address): - self.__error(": obj %x is on page where pointer to kmem_cache points to %x instead of %x" % - (obj, kmem_cache_addr, - int(self.kmem_cache.gdb_obj.address))) + if kmem_cache_addr != self.kmem_cache.address: + self._pr_err(f": obj 0x{obj:x} is on page where pointer to kmem_cache " + f"points to 0x{kmem_cache_addr:x} instead of " + f"0x{self.kmem_cache.address:x}") if self.page_slab: continue slab_addr = int(page.get_slab_page()) - if slab_addr != self.gdb_obj.address: - self.__error(": obj %x is on page where pointer to slab wrongly points to %x" % - (obj, slab_addr)) + if slab_addr != self.address: + self._pr_err(f": obj 0x{obj:x} is on page where pointer to slab " + f"wrongly points to 0x{slab_addr:x}") return num_free -class KmemCache: +class SlabSLUB(Slab): + + def __init__(self, gdb_obj: gdb.Value, kmem_cache: 'KmemCacheSLUB') -> None: + super().__init__(gdb_obj, kmem_cache) + self.nr_objects = int(gdb_obj["objects"]) + self.nr_inuse = int(gdb_obj["inuse"]) + self.nr_free = self.nr_objects - self.nr_inuse + + def slab_role(self) -> str: + self.kmem_cache.slub_process_once() + addr = self.address + if addr in self.kmem_cache.cpu_slabs: + return self.kmem_cache.cpu_slabs[addr] + if addr in self.kmem_cache.node_slabs: + return self.kmem_cache.node_slabs[addr] + if self.nr_free == 0: + return "untracked full" + return "unknown" + + def short_header(self) -> str: + return f"0x{self.address:x} ({self.slab_role()})" + + def print_header(self) -> str: + return (f"0x{self.address:x} ({self.slab_role()} objects {self.nr_objects} " + f"active {self.nr_inuse} free {self.nr_free}") + + def _do_populate_free(self) -> None: + cpu_freelists = self.kmem_cache.cpu_freelists + + self.free = set() + fp_offset = self.kmem_cache.fp_offset + + page = self.gdb_obj + freelist = page["freelist"] + nr_free = 0 + + while freelist != 0: + nr_free += 1 + if nr_free > self.nr_objects: + self._pr_err(":too many objects on freelist, aborting traversal") + break + + #TODO validate the pointers - check_valid_pointer() + obj_addr = int(freelist) + self.free.add(obj_addr) + freelist += fp_offset + freelist = freelist.cast(types.void_p_type.pointer()).dereference() + if obj_addr in cpu_freelists: + self._pr_err(f": free object 0x{obj_addr:x} found cached in " + f"{cpu_freelists[obj_addr]}") + + if len(self.free) != self.nr_free: + self._pr_err(f": nr_free={self.nr_free} but freelist has " + f"{len(self.free)} entries") + + def find_obj(self, addr: int) -> Optional[int]: + + page = self.gdb_obj + base = page_addr(int(page.address)) + + if addr < base: + return None + + nr_objects = int(page["objects"]) + + idx = (addr - base) // self.kmem_cache.size + + if idx >= nr_objects: + return None + + obj_addr = base + self.kmem_cache.red_left_pad() + idx * self.kmem_cache.size + + return obj_addr + + def contains_obj(self, addr: int) -> Tuple[bool, int, Optional[str]]: + + obj_addr = self.find_obj(addr) + + if not obj_addr: + return (False, 0, "address outside of valid object range") + + if addr < obj_addr: + return (False, obj_addr, "address inside left red zone padding") + + if addr > obj_addr + self.kmem_cache.inuse: + # TODO perhaps distinguish which metadata + return (False, obj_addr, "address inside metadata behind object") + + return (True, obj_addr, None) + + def obj_in_use(self, addr: int) -> Tuple[bool, Optional[str]]: + + self.kmem_cache.slub_process_once() + self.populate_free() + + if addr in self.free: + return (False, None) + + if addr in self.kmem_cache.cpu_freelists: + return (False, self.kmem_cache.cpu_freelists[addr]) + + return (True, None) + + def get_objects(self) -> Iterable[int]: + page = self.gdb_obj + base = page_addr(int(page.address)) + self.kmem_cache.red_left_pad() + slot_size = self.kmem_cache.size + + nr_objects = int(page["objects"]) + for idx in range(nr_objects): + obj_addr = base + idx * slot_size + yield obj_addr + + def get_allocated_objects(self) -> Iterable[int]: + for obj in self.get_objects(): + (in_use, _) = self.obj_in_use(obj) + if in_use: + yield obj + + def print_objects(self) -> None: + self.populate_free() + print(" FREE / [ALLOCATED]") + for obj in self.get_objects(): + free = False + free_where = "" + if obj in self.free: + free = True + if obj in self.kmem_cache.cpu_freelists: + free = True + free_where = f" ({self.kmem_cache.cpu_freelists[obj]})" + if free: + print(f" 0x{obj:x}{free_where}") + else: + print(f" [0x{obj:x}]") + + + def warn_frozen(self, expected: int, header: str) -> None: + warning = "not frozen but should be" if expected else "frozen but shouldn't be" + if expected != self.gdb_obj["frozen"]: + self._pr_err(f"({header}) {warning}") + + @classmethod + def from_list_head(cls, list_head: gdb.Value, + kmem_cache: 'KmemCache') -> 'SlabSLUB': + gdb_obj = container_of(list_head, types.page_type, 'lru') + kmem_cache = cast(KmemCacheSLUB, kmem_cache) + return cls(gdb_obj, kmem_cache) + + @classmethod + def from_page_obj(cls, page: gdb.Value) -> 'SlabSLUB': + if page.type.code == gdb.TYPE_CODE_PTR: + page = page.dereference() + kmem_cache_addr = int(page["slab_cache"]) + kmem_cache = kmem_cache_from_addr(kmem_cache_addr) + if kmem_cache is None: + raise RuntimeError("No kmem cache found for page") + kmem_cache = cast(KmemCacheSLUB, kmem_cache) + return cls(page, kmem_cache) + + @classmethod + def from_page(cls, page: Page) -> 'SlabSLUB': + return cls.from_page_obj(page.gdb_obj) + +SLAB_RED_ZONE = 0x00000400 + +class KmemCache(ABC): buffer_size_name = None nodelists_name = None percpu_name = None percpu_cache = None head_name = "list" alien_cache_type_exists = False + SLUB = False + slub_debug_compiled = True + + SlabFlags = { + 'CONSISTENCY_CHECKS' : 0x00000100, + 'RED_ZONE' : 0x00000400, + 'POISON' : 0x00000800, + 'HWCACHE_ALIGN' : 0x00002000, + 'CACHE_DMA' : 0x00004000, + 'STORE_USER' : 0x00010000, + 'RECLAIM_ACCOUNT' : 0x00020000, + 'PANIC' : 0x00040000, + 'TYPESAFE_BY_RCU' : 0x00080000, + 'MEM_SPREAD' : 0x00100000, + 'TRACE' : 0x00200000, + 'DEBUG_OBJECTS' : 0x00400000, + 'NOLEAKTRACE' : 0x00800000, + 'NOTRACK' : 0x01000000, + 'FAILSLAB' : 0x02000000, + 'ACCOUNT' : 0x04000000, + } + + def __init__(self, name: str, gdb_obj: gdb.Value) -> None: + self.name = name + self.gdb_obj = gdb_obj + self.address = int(gdb_obj.address) + + self.size = int(gdb_obj["size"]) + self.object_size = int(gdb_obj["object_size"]) + self.flags = int(gdb_obj["flags"]) + + self.objs_per_slab = 0 + + self.array_caches: Dict[int, Dict] = dict() @classmethod def check_kmem_cache_type(cls, gdbtype: gdb.Type) -> None: - cls.buffer_size_name = find_member_variant(gdbtype, ['buffer_size', 'size']) + cls.percpu_name = find_member_variant(gdbtype, ['cpu_cache', 'cpu_slab', 'array']) + if cls.percpu_name == 'cpu_slab': + cls.SLUB = True + else: + cls.buffer_size_name = find_member_variant(gdbtype, ['buffer_size', 'size']) + cls.percpu_cache = bool(cls.percpu_name == 'cpu_cache') + cls.head_name = find_member_variant(gdbtype, ['next', 'list']) cls.nodelists_name = find_member_variant(gdbtype, ['nodelists', 'node']) - cls.percpu_name = find_member_variant(gdbtype, ['cpu_cache', 'array']) - cls.percpu_cache = bool(cls.percpu_name == 'cpu_cache') - cls.head_name = find_member_variant(gdbtype, ['next', 'list']) @classmethod - # pylint: disable=unused-argument - def setup_alien_cache_type(cls, gdbtype: gdb.Type) -> None: - cls.alien_cache_type_exists = True + def check_kmem_cache_node_type(cls, gdbtype: gdb.Type) -> None: + nr_slabs_name = safe_find_member_variant(gdbtype, ['nr_slabs']) + if nr_slabs_name is not None: + cls.slub_debug_compiled = True - def __init__(self, name: str, gdb_obj: gdb.Value) -> None: - self.name = name - self.gdb_obj = gdb_obj + @classmethod + def create(cls, name: str, gdb_obj: gdb.Value): + if cls.SLUB: + return KmemCacheSLUB(name, gdb_obj) + return KmemCacheSLAB(name, gdb_obj) + + @abstractmethod + def list_all(self) -> None: + pass + + @abstractmethod + def check_all(self) -> None: + pass + + @abstractmethod + def get_allocated_objects(self) -> Iterable[int]: + pass + + def has_flag(self, flag_name: str) -> bool: + flag = self.SlabFlags[flag_name] + return self.flags & flag != 0 + + def tracks_full_slabs(self) -> bool: + return self.has_flag("STORE_USER") + + def _get_nodelist(self, node: int) -> gdb.Value: + return self.gdb_obj[KmemCache.nodelists_name][node] + + def _get_nodelists(self) -> Iterable[Tuple[int, gdb.Value]]: + for nid in for_each_nid(): + node = self._get_nodelist(nid) + if int(node) == 0: + continue + yield (nid, node.dereference()) + def _pr_err(self, msg: str) -> None: + msg = col_error(f"cache {self.name}{msg}") + print(msg) + +class KmemCacheSLAB(KmemCache): + + slab_list_name = {0: "partial", 1: "full", 2: "free"} + slab_list_fullname = {0: "slabs_partial", 1: "slabs_full", 2: "slabs_free"} + + def __init__(self, name: str, gdb_obj: gdb.Value) -> None: + super().__init__(name, gdb_obj) self.objs_per_slab = int(gdb_obj["num"]) self.buffer_size = int(gdb_obj[KmemCache.buffer_size_name]) @@ -366,24 +756,47 @@ def __init__(self, name: str, gdb_obj: gdb.Value) -> None: else: self.off_slab = False - self.array_caches: Dict[int, Dict] = dict() + @classmethod + # pylint: disable=unused-argument + def setup_alien_cache_type(cls, gdbtype: gdb.Type) -> None: + cls.alien_cache_type_exists = True - def __get_nodelist(self, node: int) -> gdb.Value: - return self.gdb_obj[KmemCache.nodelists_name][node] + def list_all(self) -> None: + print("Not yet implemented for SLAB") - def __get_nodelists(self) -> Iterable[Tuple[int, gdb.Value]]: - for nid in for_each_nid(): - node = self.__get_nodelist(nid) - if int(node) == 0: - continue - yield (nid, node.dereference()) + def check_all(self) -> None: + nr_slabs = 0 + nr_objs = 0 + nr_free = 0 + + for (nid, node) in self._get_nodelists(): +# try: +# # This is version and architecture specific +# lock = int(node["list_lock"]["rlock"]["raw_lock"]["slock"]) +# if lock != 0: +# print(col_error("unexpected lock value in kmem_list3 {:#x}: {:#x}" +# .format(int(node.address), lock))) +# except gdb.error: +# print("Can't check lock state -- locking implementation unknown.") - @staticmethod - def all_find_obj(addr: int) -> Optional[Tuple[bool, int, Optional[str]]]: - slab = slab_from_obj_addr(addr) - if not slab: - return None - return slab.contains_obj(addr) + free_declared = int(node["free_objects"]) + free_counted = self.__check_slabs(node, SlabSLAB.slab_partial, nid) + free_counted += self.__check_slabs(node, SlabSLAB.slab_full, nid) + free_counted += self.__check_slabs(node, SlabSLAB.slab_free, nid) + + if free_declared != free_counted: + self._pr_err(f": free objects mismatch on node {nid}: " + f"declared={free_declared} counted={free_counted}") + self.check_array_caches() + + print(f"Node {nid}: nr_slabs={nr_slabs}, nr_objs={nr_objs}, nr_free={nr_free}") + + def get_allocated_objects(self) -> Iterable[int]: + for (_, node) in self._get_nodelists(): + for obj in self.__get_allocated_objects(node, SlabSLAB.slab_partial): + yield obj + for obj in self.__get_allocated_objects(node, SlabSLAB.slab_full): + yield obj def __fill_array_cache(self, acache: gdb.Value, ac_type: str, nid_src: int, nid_tgt: int) -> None: @@ -397,15 +810,14 @@ def __fill_array_cache(self, acache: gdb.Value, ac_type: str, "nid_src" : nid_src, "nid_tgt" : nid_tgt} -# print(cache_dict) - if ac_type == AC_PERCPU: + if ac_type == SlabSLAB.AC_PERCPU: nid_tgt = numa_node_id(nid_tgt) for i in range(avail): ptr = int(acache["entry"][i]) -# print(hex(ptr)) if ptr in self.array_caches: - print(col_error("WARNING: array cache duplicity detected!")) + self._pr_err(f": object 0x{ptr:x} is in cache {cache_dict} " + f"but also {self.array_caches[ptr]}") else: self.array_caches[ptr] = cache_dict @@ -413,8 +825,8 @@ def __fill_array_cache(self, acache: gdb.Value, ac_type: str, obj_nid = page.get_nid() if obj_nid != nid_tgt: - print(col_error("Object {:#x} in cache {} is on wrong nid {} instead of {}" - .format(ptr, cache_dict, obj_nid, nid_tgt))) + self._pr_err(f": object 0x{ptr:x} in cache {cache_dict} is " + f"on wrong nid {obj_nid} instead of {nid_tgt}") def __fill_alien_caches(self, node: gdb.Value, nid_src: int) -> None: alien_cache = node["alien"] @@ -438,7 +850,7 @@ def __fill_alien_caches(self, node: gdb.Value, nid_src: int) -> None: if nid_src == nid: continue - self.__fill_array_cache(array, AC_ALIEN, nid_src, nid) + self.__fill_array_cache(array, SlabSLAB.AC_ALIEN, nid_src, nid) def __fill_percpu_caches(self) -> None: cpu_cache = self.gdb_obj[KmemCache.percpu_name] @@ -449,7 +861,7 @@ def __fill_percpu_caches(self) -> None: else: array = cpu_cache[cpu].dereference() - self.__fill_array_cache(array, AC_PERCPU, -1, cpu) + self.__fill_array_cache(array, SlabSLAB.AC_PERCPU, -1, cpu) def __fill_all_array_caches(self) -> None: self.array_caches = dict() @@ -457,10 +869,11 @@ def __fill_all_array_caches(self) -> None: self.__fill_percpu_caches() # TODO check and report collisions - for (nid, node) in self.__get_nodelists(): + for (nid, node) in self._get_nodelists(): shared_cache = node["shared"] if int(shared_cache) != 0: - self.__fill_array_cache(shared_cache.dereference(), AC_SHARED, nid, nid) + self.__fill_array_cache(shared_cache.dereference(),\ + SlabSLAB.AC_SHARED, nid, nid) self.__fill_alien_caches(node, nid) @@ -470,50 +883,41 @@ def get_array_caches(self) -> Dict[int, ArrayCacheEntry]: return self.array_caches - def __get_allocated_objects(self, node: gdb.Value, - slabtype: int) -> Iterable[int]: - for slab in self.get_slabs_of_type(node, slabtype): - for obj in slab.get_allocated_objects(): - yield obj - - def get_allocated_objects(self) -> Iterable[int]: - # pylint: disable=unused-variable - for (nid, node) in self.__get_nodelists(): - for obj in self.__get_allocated_objects(node, slab_partial): - yield obj - for obj in self.__get_allocated_objects(node, slab_full): - yield obj - def get_slabs_of_type(self, node: gdb.Value, slabtype: int, reverse: bool = False, - exact_cycles: bool = False) -> Iterable[Slab]: + exact_cycles: bool = False) -> Iterable[SlabSLAB]: wrong_list_nodes = dict() for stype in range(3): if stype != slabtype: - wrong_list_nodes[int(node[slab_list_fullname[stype]].address)] = stype + wrong_list_nodes[int(node[self.slab_list_fullname[stype]].address)] = stype - slab_list = node[slab_list_fullname[slabtype]] + slab_list = node[self.slab_list_fullname[slabtype]] for list_head in list_for_each(slab_list, reverse=reverse, exact_cycles=exact_cycles): try: if int(list_head) in wrong_list_nodes.keys(): wrong_type = wrong_list_nodes[int(list_head)] - print(col_error("Encountered head of {} slab list while traversing {} slab list, skipping" - .format(slab_list_name[wrong_type], - slab_list_name[slabtype]))) + self._pr_err(f": encountered head of {self.slab_list_name[wrong_type]} " + f"slab list while traversing {self.slab_list_name[slabtype]} " + f"slab list, skipping") continue - slab = Slab.from_list_head(list_head, self) + slab = SlabSLAB.from_list_head(list_head, self) except gdb.NotAvailableError: traceback.print_exc() - print("failed to initialize slab object from list_head {:#x}: {}" - .format(int(list_head), sys.exc_info()[0])) + self._pr_err(f": failed to initialize slab object from list_head " + f"0x{int(list_head):x}: {sys.exc_info()[0]}") continue yield slab + def __get_allocated_objects(self, node: gdb.Value, + slabtype: int) -> Iterable[int]: + for slab in self.get_slabs_of_type(node, slabtype): + for obj in slab.get_allocated_objects(): + yield obj - def __check_slab(self, slab: Slab, slabtype: int, nid: int, + def __check_slab(self, slab: SlabSLAB, slabtype: int, nid: int, errors: Dict) -> int: - addr = int(slab.gdb_obj.address) + addr = slab.address free = 0 if slab.error is False: @@ -521,8 +925,8 @@ def __check_slab(self, slab: Slab, slabtype: int, nid: int, if slab.misplaced_error is None and errors['num_misplaced'] > 0: if errors['num_misplaced'] > 0: - print(col_error("{} slab objects were misplaced, printing the last:" - .format(errors['num_misplaced']))) + print(col_error(f"{errors['num_misplaced']} slab objects " + f"were misplaced, printing the last:")) print(errors['last_misplaced']) errors['num_misplaced'] = 0 errors['last_misplaced'] = None @@ -534,8 +938,8 @@ def __check_slab(self, slab: Slab, slabtype: int, nid: int, errors['first_ok'] = addr else: if errors['num_ok'] > 0: - print("{} slab objects were ok between {:#x} and {:#x}" - .format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) + print(f"{errors['num_ok']} slab objects were ok between " + f"0x{errors['first_ok']:x} and 0x{errors['last_ok']:x}") errors['num_ok'] = 0 errors['first_ok'] = None errors['last_ok'] = None @@ -567,36 +971,36 @@ def ___check_slabs(self, node: gdb.Value, slabtype: int, nid: int, try: free += self.__check_slab(slab, slabtype, nid, errors) except gdb.NotAvailableError as e: - print(col_error("Exception when checking slab {:#x}:{}" - .format(int(slab.gdb_obj.address), e))) + self._pr_err(f": exception when checking slab " + f"0x{slab.address:x}: {e}") traceback.print_exc() slabs += 1 except (gdb.NotAvailableError, ListError) as e: - print(col_error("Unrecoverable error when traversing {} slab list: {}" - .format(slab_list_name[slabtype], e))) + self._pr_err(f": unrecoverable error when traversing " + f"{self.slab_list_name[slabtype]} slab list: {e}") check_ok = False count = errors['num_ok'] if (count and errors['first_ok'] is not None and errors['last_ok'] is not None): - print("{} slab objects were ok between {:#x} and {:#x}" - .format(errors['num_ok'], errors['first_ok'], errors['last_ok'])) + print(f"{errors['num_ok']} slab objects were ok between " + f"0x{errors['first_ok']:x} and 0x{errors['last_ok']:x}") count = errors['num_misplaced'] if count: - print(col_error("{} slab objects were misplaced, printing the last:" - .format(errors['num_misplaced']))) + print(col_error(f"{errors['num_misplaced']} slab objects were " + f"misplaced, printing the last:")) print(errors['last_misplaced']) return (check_ok, slabs, free) def __check_slabs(self, node: gdb.Value, slabtype: int, nid: int) -> int: - slab_list = node[slab_list_fullname[slabtype]] + slab_list = node[self.slab_list_fullname[slabtype]] - print("checking {} slab list {:#x}".format(slab_list_name[slabtype], - int(slab_list.address))) + print(f"checking {self.slab_list_name[slabtype]} slab list " + f"0x{int(slab_list.address):x}") (check_ok, slabs, free) = self.___check_slabs(node, slabtype, nid) @@ -617,87 +1021,349 @@ def check_array_caches(self) -> None: for ac_ptr in acs: ac_obj_slab = slab_from_obj_addr(ac_ptr) if not ac_obj_slab: - print("cached pointer {:#x} in {} not found in slab" - .format(ac_ptr, acs[ac_ptr])) - elif ac_obj_slab.kmem_cache.name != self.name: - print("cached pointer {:#x} in {} belongs to wrong kmem cache {}" - .format(ac_ptr, acs[ac_ptr], ac_obj_slab.kmem_cache.name)) + self._pr_err(f": cached pointer 0x{ac_ptr:x} in 0x{acs[ac_ptr]:x} " + f"not found in any slab") + elif ac_obj_slab.kmem_cache.address != self.address: + self._pr_err(f": cached pointer 0x{ac_ptr:x} in 0x{acs[ac_ptr]:x} " + f"belongs to wrong kmem cache {ac_obj_slab.kmem_cache.name}") else: ac_obj_obj = ac_obj_slab.contains_obj(ac_ptr) if ac_obj_obj[0] is False and ac_obj_obj[2] is None: - print("cached pointer {:#x} in {} is not allocated: {}".format( - ac_ptr, acs[ac_ptr], ac_obj_obj)) + self._pr_err(f": cached pointer 0x{ac_ptr:x} in 0x{acs[ac_ptr]:x} " + f"is not allocated: {ac_obj_obj}") elif ac_obj_obj[1] != ac_ptr: - print("cached pointer {:#x} in {} has wrong offset: ({}, {:#x}, {})" - .format(ac_ptr, acs[ac_ptr], ac_obj_obj[0], - ac_obj_obj[1], ac_obj_obj[2])) + self._pr_err(f": cached pointer 0x{ac_ptr:x} in 0x{acs[ac_ptr]:x} " + f"has wrong offset: ({ac_obj_obj[0]}, 0x{ac_obj_obj[1]:x}, " + f"{ac_obj_obj[2]})") + +class KmemCacheSLUB(KmemCache): + + __slub_full_slabs_scanned = False + + @classmethod + def __slub_find_full_slabs(cls) -> None: + + if cls.__slub_full_slabs_scanned: + return + + print("Searching for SLUB pages...") + + for page in for_each_page_flag(Page.PG_slab): + if page.is_tail(): + continue + + if page.gdb_obj["inuse"] < page.gdb_obj["objects"]: + continue + + cache_ptr = int(page.get_slab_cache()) + cache = kmem_cache_from_addr(cache_ptr) + nid = page.get_nid() + cache.full_slabs[nid].add(page.address) + + print("Searching for SLUB pages done!") + cls.__slub_full_slabs_scanned = True + + def __init__(self, name: str, gdb_obj: gdb.Value) -> None: + super().__init__(name, gdb_obj) + self.fp_offset = int(gdb_obj["offset"]) + self.flags = int(gdb_obj["flags"]) + self._red_left_pad = int(gdb_obj["red_left_pad"]) + self.inuse = int(gdb_obj["inuse"]) + self.full_slabs: List[Set[int]] = [set() for x in for_each_nid()] + self.cpu_slabs: Dict[int, str] = dict() + self.node_slabs: Dict[int, str] = dict() + self.cpu_slabs_objects = 0 + self.cpu_slabs_free = 0 + self.cpu_freelists: Dict[int, str] = dict() + self.cpu_freelists_sizes: List[int] = [0 for x in for_each_online_cpu()] + self.processed = False + + def list_all(self) -> None: + flags = ProcessingFlags(print_level=3) + self.process_all(flags) def check_all(self) -> None: - for (nid, node) in self.__get_nodelists(): - try: - # This is version and architecture specific - lock = int(node["list_lock"]["rlock"]["raw_lock"]["slock"]) - if lock != 0: - print(col_error("unexpected lock value in kmem_list3 {:#x}: {:#x}" - .format(int(node.address), lock))) - except gdb.error: - print("Can't check lock state -- locking implementation unknown.") - free_declared = int(node["free_objects"]) - free_counted = self.__check_slabs(node, slab_partial, nid) - free_counted += self.__check_slabs(node, slab_full, nid) - free_counted += self.__check_slabs(node, slab_free, nid) - if free_declared != free_counted: - print(col_error("free objects mismatch on node %d: declared=%d counted=%d" % - (nid, free_declared, free_counted))) - self.check_array_caches() + flags = ProcessingFlags(print_level=1) + self.process_all(flags) + + def get_allocated_objects(self) -> Iterable[int]: + # TODO this is incomplete! + for (_, node) in self._get_nodelists(): + partial_list = node["partial"] + for list_head in list_for_each(partial_list): + slub = SlabSLUB.from_list_head(list_head, self) + for obj in slub.get_allocated_objects(): + yield obj + + def red_left_pad(self) -> int: + if self.flags & SLAB_RED_ZONE != 0: + return self._red_left_pad + return 0 + + def _add_percpu_slub(self, slub: SlabSLUB, addr: int, _type: str) -> None: + if addr in self.cpu_slabs: + self._pr_err(f": slab page 0x{addr:x} is both a {_type} and {self.cpu_slabs[addr]}") + else: + self.cpu_slabs[addr] = _type + self.cpu_slabs_objects += slub.nr_objects + self.cpu_slabs_free += slub.nr_free + # TODO warn if full/free? + slub.populate_free() + + def _populate_cpu_freelist(self, cpu_slab: gdb.Value, cache_type: str) -> int: + + fp_offset = self.fp_offset + + freelist = cpu_slab["freelist"] + if freelist != 0: + nr_objects = int(cpu_slab["page"]["objects"]) + nr_free = 0 + + # unlike page.freelist (void *), kmem_cache_cpu is (void **) + # this messes with the += fp_offset arithmetic, so recast it + freelist = freelist.cast(types.void_p_type) + while freelist != 0: + nr_free += 1 + if nr_free > nr_objects: + self._pr_err(f" has too many objects on {cache_type}, aborting traversal") + break + + #TODO validate the pointers - check_valid_pointer() + obj_addr = int(freelist) + if obj_addr in self.cpu_freelists: + self._pr_err(f" per-cpu freelist duplicitydetected: object " + f"0x{obj_addr:x} is in {cache_type} and also " + f"{self.cpu_freelists[obj_addr]}") + else: + self.cpu_freelists[obj_addr] = cache_type + freelist += fp_offset + + freelist = freelist.cast(types.void_p_type.pointer()).dereference() + + return nr_free + + def _process_percpu(self, flags: ProcessingFlags) -> None: + fill_cpu_slabs = bool(len(self.cpu_slabs) == 0) + + if not fill_cpu_slabs and flags.print_level == 0: + # nothing to do + return + + cpu_slab_var = self.gdb_obj["cpu_slab"] + for cpu in for_each_online_cpu(): + cpu_slab = get_percpu_var(cpu_slab_var, cpu) + + if fill_cpu_slabs: + nr_freelist = self._populate_cpu_freelist(cpu_slab, f"CPU {cpu} freelist") + self.cpu_freelists_sizes[cpu] = nr_freelist + else: + nr_freelist = self.cpu_freelists_sizes[cpu] + + if flags.print_level >= 2: + print(f"CPU {cpu} kmem_cache_cpu 0x{int(cpu_slab.address):x}, " + f"freelist has {nr_freelist} cached objects") + + slab_addr = int(cpu_slab["page"]) + if slab_addr == 0: + if flags.print_level >= 2: + print(f"CPU {cpu} slab: (none)") + else: + slab = SlabSLUB.from_page_obj(cpu_slab["page"]) + if fill_cpu_slabs: + self._add_percpu_slub(slab, slab_addr, f"CPU {cpu} slab") + if flags.print_level >= 2: + print(f"CPU {cpu} slab: {slab.print_header()}") + slab.warn_frozen(1, f"CPU {cpu}") + if flags.print_level >= 3: + slab.print_objects() + + partial = cpu_slab["partial"] + if int(partial) == 0: + if flags.print_level >= 2: + print(f"CPU {cpu} partial: (empty)") + else: + if flags.print_level >= 3: + print(f"CPU {cpu} partial:") + + # pages should grow down by 1, last in partial list should have 1 + pages_expected = int(partial["pages"]) + pages = -1 + + nr_partial = 0 + nr_objects = 0 + nr_active = 0 + + while int(partial) != 0: + nr_partial += 1 + slab = SlabSLUB.from_page_obj(partial) + if fill_cpu_slabs: + self._add_percpu_slub(slab, int(partial), f"CPU {cpu} partial") + pages = int(partial["pages"]) + if flags.print_level >= 3: + print(f" {slab.print_header()}") + slab.print_objects() + slab.warn_frozen(1, f"CPU {cpu} partial") + nr_objects += slab.nr_objects + nr_active += slab.nr_inuse + if pages != pages_expected: + self._pr_err(f" CPU {cpu} partial 0x{int(partial):x} " + f"pages={pages} expected {pages_expected}") + pages_expected = pages - 1 + partial = partial["next"] + if pages != 1: + self._pr_err(f"CPU {cpu} last partial 0x{int(partial):x} " + f"pages field is {pages} and not 1") + if flags.print_level >= 2: + print(f"CPU {cpu} partial: Slabs: {nr_partial} Objects: total " + f"{nr_objects} active {nr_active} free {nr_objects - nr_active}") + + def process_all(self, flags: ProcessingFlags) -> None: + + if flags.print_level == 0 and self.processed: + # nothing to do + return + + if flags.print_level >= 2: + # TODO slab sizes (kmem_cache.oo etc) + cache_flags = print_flags(self.flags, self.SlabFlags) + print(f"Cache {self.name} at 0x{self.address:x} objsize " + f"{self.object_size} ({self.size}) flags {cache_flags}") + + self._process_percpu(flags) + nr_slabs = 0 + nr_partial = 0 + nr_objs = 0 + nr_free_objs = self.cpu_slabs_free + len(self.cpu_freelists) + nr_full_list = 0 + + for (nid, node) in self._get_nodelists(): + if self.slub_debug_compiled: + node_nr_slabs = atomic_long_read(node["nr_slabs"]) + node_nr_objs = atomic_long_read(node["total_objects"]) + nr_slabs += node_nr_slabs + nr_objs += node_nr_objs + + nr_partial_expected = int(node["nr_partial"]) + nr_partial += nr_partial_expected + partial_list = node["partial"] + node_nr_partial = 0 + + if flags.print_level >= 2: + print(f"Node {nid} Slabs: total {node_nr_slabs} partial " + f"{nr_partial_expected} Objects: total {node_nr_objs}") + + # TODO check if slab page is on proper node (also full slabs) + for list_head in list_for_each(partial_list): + node_nr_partial += 1 + slub = SlabSLUB.from_list_head(list_head, self) + if flags.print_level >= 3: + print(f"Partial slab {slub.print_header()}") + slub.print_objects() + slub.warn_frozen(0, f"Node {nid} partial") + if not self.processed: + self.node_slabs[slub.address] = f"Node {nid} partial" + nr_free_objs += slub.nr_free + + if flags.print_level >= 3: + slub.print_objects() + if nr_partial_expected != node_nr_partial: + self._pr_err(f" node {nid} partial list has {node_nr_partial} " + f"pages but expected {nr_partial_expected}") + + if self.slub_debug_compiled: + nr_full_list_node = 0 + for list_head in list_for_each(node["full"]): + nr_full_list_node += 1 + slub = SlabSLUB.from_list_head(list_head, self) + if flags.print_level >= 3: + print(f"Full slab {slub.print_header()}") + slub.print_objects() + slub.warn_frozen(0, f"Node {nid} full") + if not self.processed: + self.node_slabs[slub.address] = f"Node {nid} full" + # TODO warn if not actually full + if nr_full_list_node > 0 and not self.tracks_full_slabs(): + self._pr_err(f" node {nid} full list not empty ({nr_full_list_node} slabs) " + f"although SLAB_STORE_USER not enabled") + nr_full_list += nr_full_list_node + + nr_percpu = len(self.cpu_slabs) + nr_full = nr_slabs - nr_partial - nr_percpu + if self.tracks_full_slabs() and nr_full_list != nr_full: + self._pr_err(f": expected to find {nr_full} slabs on full lists, " + f"but found {nr_full_list}") + if flags.print_level == 1: + cache_flags = print_flags(self.flags, self.SlabFlags) + print(f"Cache {self.name} at 0x{self.address:x} objsize " + f"{self.object_size} ({self.size}) Slabs: total {nr_slabs} partial " + f"{nr_partial} percpu {nr_percpu} full {nr_full} " + f"Objects: total {nr_objs} active {nr_objs - nr_free_objs} " + f"free {nr_free_objs} Flags: {cache_flags}") + + elif flags.print_level >= 2: + print(f"Cache {self.name} total: Slabs: total {nr_slabs} partial " + f"{nr_partial} percpu {nr_percpu} full {nr_full} " + f"Objects: total {nr_objs} active {nr_objs - nr_free_objs} " + f"free {nr_free_objs}") + + self.processed = True + + def slub_process_once(self) -> None: + if not self.processed: + self.process_all(ProcessingFlags()) class KmemCacheNotFound(RuntimeError): """The specified kmem_cache could not be found.""" -kmem_caches: Dict[str, KmemCache] = dict() -kmem_caches_by_addr: Dict[int, KmemCache] = dict() +__kmem_caches: Dict[str, KmemCache] = dict() +__kmem_caches_by_addr: Dict[int, KmemCache] = dict() -def setup_slab_caches(slab_caches: gdb.Symbol) -> None: +def __setup_slab_caches(slab_caches: gdb.Symbol) -> None: list_caches = slab_caches.value() for cache in list_for_each_entry(list_caches, types.kmem_cache_type, KmemCache.head_name): name = cache["name"].string() - kmem_cache = KmemCache(name, cache) + kmem_cache = KmemCache.create(name, cache) - kmem_caches[name] = kmem_cache - kmem_caches_by_addr[int(cache.address)] = kmem_cache + __kmem_caches[name] = kmem_cache + __kmem_caches_by_addr[int(cache.address)] = kmem_cache +# TODO: move the following functions to subsystem/ ? def kmem_cache_from_addr(addr: int) -> KmemCache: try: - return kmem_caches_by_addr[addr] + return __kmem_caches_by_addr[addr] except KeyError: raise KmemCacheNotFound(f"No kmem cache found for {addr}.") def kmem_cache_from_name(name: str) -> KmemCache: try: - return kmem_caches[name] + return __kmem_caches[name] except KeyError: raise KmemCacheNotFound(f"No kmem cache found for {name}.") def kmem_cache_get_all() -> ValuesView[KmemCache]: - return kmem_caches.values() + return __kmem_caches.values() -def slab_from_obj_addr(addr: int) -> Union[Slab, None]: +def slab_from_obj_addr(addr: int) -> Optional[Slab]: page = page_from_addr(addr).compound_head() if not page.is_slab(): return None - return Slab.from_page(page) + if KmemCache.SLUB: + return SlabSLUB.from_page(page) + return SlabSLAB.from_page(page) -type_cbs = TypeCallbacks([('struct page', Slab.check_page_type), - ('struct slab', Slab.check_slab_type), - ('kmem_bufctl_t', Slab.check_bufctl_type), - ('freelist_idx_t', Slab.check_bufctl_type), +type_cbs = TypeCallbacks([('struct page', SlabSLAB.check_page_type), + ('struct slab', SlabSLAB.check_slab_type), + ('kmem_bufctl_t', SlabSLAB.check_bufctl_type), + ('freelist_idx_t', SlabSLAB.check_bufctl_type), ('struct kmem_cache', KmemCache.check_kmem_cache_type), + ('struct kmem_cache_node', + KmemCache.check_kmem_cache_node_type), ('struct alien_cache', - KmemCache.setup_alien_cache_type)]) -symbol_cbs = SymbolCallbacks([('slab_caches', setup_slab_caches), - ('cache_chain', setup_slab_caches)]) + KmemCacheSLAB.setup_alien_cache_type)]) +symbol_cbs = SymbolCallbacks([('slab_caches', __setup_slab_caches), + ('cache_chain', __setup_slab_caches)]) From db99ff02b4389cb30831a42a9b378915904a1186 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 19 Feb 2020 14:42:23 +0100 Subject: [PATCH 318/367] util: add safe_int() function Add safe_int() that tries to parse input as integer, and returns None instead of raising an exception. It can also parse hexadecimal input without the '0x' prefix. This will be useful for the pykmem command. Signed-off-by: Vlastimil Babka --- crash/util/__init__.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/crash/util/__init__.py b/crash/util/__init__.py index d370c2b4f9c..2cf9497606e 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Union, Tuple, List, Iterator, Dict, Optional +from typing import Union, Tuple, List, Iterator, Dict, Optional, Any import uuid @@ -507,3 +507,25 @@ def decode_uuid_t(value: gdb.Value) -> uuid.UUID: member = '__u_bits' return decode_uuid(value[member]) + +def safe_int(value: Any) -> Optional[int]: + """ + Try to parse the input (typically string) as int. + + Args: + value (Any): the input to be parsed + + Returns: + int: the parsed input value, or + None: if input could not be parsed as int + """ + try: + # try autodetecting the base first + return int(value, 0) + except ValueError: + try: + # try hex number without 0x prefix + return int(value, 16) + except ValueError: + # no luck + return None From 44acebd8ae3965599939328fe994e92437177b9f Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 19 Feb 2020 14:44:39 +0100 Subject: [PATCH 319/367] kmem: improve the pykmem command usefulness - pykmem now recognizes struct page addresses in addition to slab - pykmem [-s] and [-S] print more info about slab addresses - pykmem -S can be given multiple -S parameters, increasing verbosity - addresses are now passed via safe_int() so hexadecimal addresses without '0x' prefix now work Signed-off-by: Vlastimil Babka --- crash/commands/kmem.py | 126 ++++++++++++++++++++++++++++------------- crash/types/page.py | 11 +++- crash/types/slab.py | 65 ++++++++++++++------- 3 files changed, 143 insertions(+), 59 deletions(-) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index cd8b6aaa729..4570b9a27b3 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -8,29 +8,42 @@ :: - kmem addr - try to find addr within kmem caches - kmem -s [slabname] - check consistency of single or all kmem cache - kmem -S [slabname] - list objects in a single or all kmem caches - kmem -z - report zones - kmem -V - report vmstats + kmem [-s] [-S] addr - information about address + kmem -s [cache] - check consistency of single or all kmem cache + kmem -S[SS...] [cache] - list details / objects in a single or all kmem caches + kmem -z - report zones + kmem -V - report vmstats DESCRIPTION ----------- This command currently offers very basic kmem cache query and checking. +Currently it reports whether addr is a struct page, or slab object. If +it's a slab object, the -s parameter will tell more about the slab page +and the -S parameter will list all objects on the same slab page. + +The -s and -S parameters can be also used with a kmem cache name or +address, to check consistency or list details. If no cache is name is +given, it will check/list all caches. The -S parameter can be repeated +multiple times (up to 4), increasing the verbosity of listing. + +The -S parameter currently only works for SLUB kernels. """ -from typing import List +from typing import List, Optional import argparse from crash.commands import Command, ArgumentParser from crash.commands import CommandError, CommandLineError -from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name -from crash.types.slab import slab_from_obj_addr, KmemCacheNotFound +from crash.types.slab import kmem_cache_get_all, kmem_cache_from_name,\ + kmem_cache_from_addr, KmemCache +from crash.types.slab import slab_from_obj_addr, KmemCacheNotFound,\ + slab_from_page from crash.types.node import for_each_zone, for_each_populated_zone +from crash.types.page import safe_page_from_page_addr from crash.types.vmstat import VmStat -from crash.util import get_symbol_value +from crash.util import get_symbol_value, safe_int from crash.exceptions import MissingSymbolError class KmemCommand(Command): @@ -40,16 +53,28 @@ def __init__(self, name: str) -> None: parser = ArgumentParser(prog=name) group = parser.add_mutually_exclusive_group() - group.add_argument('-s', nargs='?', const=True, default=False, + group.add_argument('-s', action='store_true', default=False, dest='slabcheck') - group.add_argument('-S', nargs='?', const=True, default=False, - dest='slablist') + group.add_argument('-S', action="count", dest='slablist') group.add_argument('-z', action='store_true', default=False) group.add_argument('-V', action='store_true', default=False) - group.add_argument('address', nargs='?') + parser.add_argument('address', nargs='?') super().__init__(name, parser) + def _find_kmem_cache(self, query: str) -> Optional[KmemCache]: + cache = None + try: + cache = kmem_cache_from_name(query) + except KmemCacheNotFound: + addr = safe_int(query) + if addr is not None: + try: + cache = kmem_cache_from_addr(addr) + except KmemCacheNotFound: + pass + return cache + def execute(self, args: argparse.Namespace) -> None: if args.z: self.print_zones() @@ -59,49 +84,68 @@ def execute(self, args: argparse.Namespace) -> None: self.print_vmstats() return + cache = None if args.slabcheck: - if args.slabcheck is True: + if args.address is None: print("Checking all kmem caches...") for cache in kmem_cache_get_all(): print(cache.name) cache.check_all() - else: - cache_name = args.slabcheck + print("Checking done.") + return + cache_name = args.address + cache = self._find_kmem_cache(cache_name) + if cache is not None: print(f"Checking kmem cache {cache_name}") - try: - cache = kmem_cache_from_name(cache_name) - except KmemCacheNotFound: - raise CommandError(f"Cache {cache_name} not found.") cache.check_all() - - print("Checking done.") - return + print("Checking done.") + return if args.slablist: - if args.slablist is True: + if args.address is None: print("Listing all kmem caches...") for cache in kmem_cache_get_all(): - cache.list_all() - else: - cache_name = args.slablist - try: - cache = kmem_cache_from_name(cache_name) - except KmemCacheNotFound: - raise CommandError(f"Cache {cache_name} not found.") - cache.list_all() - return + cache.list_all(args.slablist) + return + cache_name = args.address + cache = self._find_kmem_cache(cache_name) + if cache is not None: + cache.list_all(args.slablist) + return if not args.address: raise CommandLineError("no address specified") - try: - addr = int(args.address, 0) - except ValueError: + addr = safe_int(args.address) + if addr is None: raise CommandLineError("address must be numeric") - slab = slab_from_obj_addr(addr) - if not slab: - raise CommandError("Address not found in any kmem cache.") + slab = None + page = safe_page_from_page_addr(addr) + if page is not None: + #TODO improve + print(f"0x{addr:x} belongs to a struct page 0x{page.address:x} " + f"pfn {page.pfn}") + + if page.compound_head().is_slab(): + slab = slab_from_page(page) + name = slab.kmem_cache.name + if args.slabcheck or args.slablist: + print(f"page belongs to cache {name} slab " + f"{slab.short_header()}") + if args.slablist: + print("") + print(f"Slab details: {slab.long_header()}") + slab.print_objects() + return + else: + slab = slab_from_obj_addr(addr) + if not slab: + raise CommandError(f"Kmem cache not found: '{args.address}' is not " + f"a valid name of a kmem cache or an address " + f"known to the kmem subsystem.") + if slab is None: + return (valid, obj, reason) = slab.contains_obj(addr) name = slab.kmem_cache.name @@ -127,6 +171,10 @@ def execute(self, args: argparse.Namespace) -> None: reason_str = f" ({reason})" print(f"INVALID address on slab {slab.gdb_obj.address} " f"from cache {name}{obj_str}{reason_str}") + if args.slablist: + print("") + print(f"Slab details: {slab.long_header()}") + slab.print_objects() def __print_vmstat(self, vmstat: List[int], diffs: List[int]) -> None: vmstat_names = VmStat.get_stat_names() diff --git a/crash/types/page.py b/crash/types/page.py index e188f75e100..3ac0bc3dc4f 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -1,7 +1,8 @@ #!/usr/bin/python3 # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Dict, Union, TypeVar, Iterable, Callable, Tuple +from typing import Dict, Union, TypeVar, Iterable, Callable, Tuple,\ + Optional from math import log, ceil @@ -261,6 +262,14 @@ def page_from_addr(addr: int) -> 'Page': pfn = (addr - Page.directmap_base) // Page.PAGE_SIZE return pfn_to_page(pfn) +def safe_page_from_page_addr(addr: int) -> Optional[Page]: + if addr < Page.vmemmap_base: + return None + pfn = (addr - Page.vmemmap_base) // types.page_type.sizeof + if pfn > int(symvals.max_pfn): + return None + return Page.from_page_addr(addr) + def page_from_gdb_obj(gdb_obj: gdb.Value) -> 'Page': pfn = (int(gdb_obj.address) - Page.vmemmap_base) // types.page_type.sizeof return Page(gdb_obj, pfn) diff --git a/crash/types/slab.py b/crash/types/slab.py index f096811e4f1..c749b896363 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -167,6 +167,18 @@ def short_header(self) -> str: Return a short header consisting of slab's address and role. """ + @abstractmethod + def long_header(self) -> str: + """ + Return a long header consisting of slab's address, role and stats. + """ + + @abstractmethod + def print_objects(self) -> None: + """ + Print all objects in slab, indicating if they are free or allocated. + """ + @abstractmethod def _do_populate_free(self) -> None: """ Populate the set of free objects """ @@ -271,6 +283,13 @@ def from_list_head(cls, list_head: gdb.Value, def short_header(self) -> str: return f"0x{self.address:x}" + def long_header(self) -> str: + return f"0x{self.address:x}" + + def print_objects(self) -> None: + #TODO implement + pass + # pylint: disable=arguments-differ def _pr_err(self, msg: str, misplaced: bool = False) -> None: msg = col_error(f"cache {self.kmem_cache.name} slab " @@ -483,6 +502,7 @@ def __init__(self, gdb_obj: gdb.Value, kmem_cache: 'KmemCacheSLUB') -> None: self.nr_objects = int(gdb_obj["objects"]) self.nr_inuse = int(gdb_obj["inuse"]) self.nr_free = self.nr_objects - self.nr_inuse + self.base_address = page_addr(int(gdb_obj.address)) def slab_role(self) -> str: self.kmem_cache.slub_process_once() @@ -498,9 +518,10 @@ def slab_role(self) -> str: def short_header(self) -> str: return f"0x{self.address:x} ({self.slab_role()})" - def print_header(self) -> str: - return (f"0x{self.address:x} ({self.slab_role()} objects {self.nr_objects} " - f"active {self.nr_inuse} free {self.nr_free}") + def long_header(self) -> str: + return (f"0x{self.address:x} ({self.slab_role()}) objects {self.nr_objects} " + f"active {self.nr_inuse} free {self.nr_free} base addr " + f"0x{self.base_address:x}") def _do_populate_free(self) -> None: cpu_freelists = self.kmem_cache.cpu_freelists @@ -707,7 +728,7 @@ def create(cls, name: str, gdb_obj: gdb.Value): return KmemCacheSLAB(name, gdb_obj) @abstractmethod - def list_all(self) -> None: + def list_all(self, verbosity: int) -> None: pass @abstractmethod @@ -761,7 +782,7 @@ def __init__(self, name: str, gdb_obj: gdb.Value) -> None: def setup_alien_cache_type(cls, gdbtype: gdb.Type) -> None: cls.alien_cache_type_exists = True - def list_all(self) -> None: + def list_all(self, verbosity: int) -> None: print("Not yet implemented for SLAB") def check_all(self) -> None: @@ -1078,8 +1099,8 @@ def __init__(self, name: str, gdb_obj: gdb.Value) -> None: self.cpu_freelists_sizes: List[int] = [0 for x in for_each_online_cpu()] self.processed = False - def list_all(self) -> None: - flags = ProcessingFlags(print_level=3) + def list_all(self, verbosity: int) -> None: + flags = ProcessingFlags(print_level=verbosity) self.process_all(flags) def check_all(self) -> None: @@ -1172,9 +1193,9 @@ def _process_percpu(self, flags: ProcessingFlags) -> None: if fill_cpu_slabs: self._add_percpu_slub(slab, slab_addr, f"CPU {cpu} slab") if flags.print_level >= 2: - print(f"CPU {cpu} slab: {slab.print_header()}") + print(f"CPU {cpu} slab: {slab.long_header()}") slab.warn_frozen(1, f"CPU {cpu}") - if flags.print_level >= 3: + if flags.print_level >= 4: slab.print_objects() partial = cpu_slab["partial"] @@ -1200,8 +1221,9 @@ def _process_percpu(self, flags: ProcessingFlags) -> None: self._add_percpu_slub(slab, int(partial), f"CPU {cpu} partial") pages = int(partial["pages"]) if flags.print_level >= 3: - print(f" {slab.print_header()}") - slab.print_objects() + print(f" {slab.long_header()}") + if flags.print_level >= 4: + slab.print_objects() slab.warn_frozen(1, f"CPU {cpu} partial") nr_objects += slab.nr_objects nr_active += slab.nr_inuse @@ -1257,14 +1279,15 @@ def process_all(self, flags: ProcessingFlags) -> None: node_nr_partial += 1 slub = SlabSLUB.from_list_head(list_head, self) if flags.print_level >= 3: - print(f"Partial slab {slub.print_header()}") - slub.print_objects() + print(f"Partial slab {slub.long_header()}") + if flags.print_level >= 4: + slub.print_objects() slub.warn_frozen(0, f"Node {nid} partial") if not self.processed: self.node_slabs[slub.address] = f"Node {nid} partial" nr_free_objs += slub.nr_free - if flags.print_level >= 3: + if flags.print_level >= 4: slub.print_objects() if nr_partial_expected != node_nr_partial: self._pr_err(f" node {nid} partial list has {node_nr_partial} " @@ -1276,8 +1299,9 @@ def process_all(self, flags: ProcessingFlags) -> None: nr_full_list_node += 1 slub = SlabSLUB.from_list_head(list_head, self) if flags.print_level >= 3: - print(f"Full slab {slub.print_header()}") - slub.print_objects() + print(f"Full slab {slub.long_header()}") + if flags.print_level >= 4: + slub.print_objects() slub.warn_frozen(0, f"Node {nid} full") if not self.processed: self.node_slabs[slub.address] = f"Node {nid} full" @@ -1346,14 +1370,17 @@ def kmem_cache_from_name(name: str) -> KmemCache: def kmem_cache_get_all() -> ValuesView[KmemCache]: return __kmem_caches.values() +def slab_from_page(page: Page) -> Slab: + if KmemCache.SLUB: + return SlabSLUB.from_page(page) + return SlabSLAB.from_page(page) + def slab_from_obj_addr(addr: int) -> Optional[Slab]: page = page_from_addr(addr).compound_head() if not page.is_slab(): return None - if KmemCache.SLUB: - return SlabSLUB.from_page(page) - return SlabSLAB.from_page(page) + return slab_from_page(page) type_cbs = TypeCallbacks([('struct page', SlabSLAB.check_page_type), ('struct slab', SlabSLAB.check_slab_type), From cfad3d7b4e60db07b53d5e48df56e05ebe4e97e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Koutn=C3=BD?= Date: Thu, 27 Jun 2019 13:49:32 +0200 Subject: [PATCH 320/367] git: Ignore vim swap files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michal Koutný --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 210be211c0b..872810f96d1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.*.sw[op] *.pyc *~ doc-source/crash.*.rst From 0e1407922bb87038ff108148bc9e68bf9229c52b Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 25 Jun 2019 17:35:21 +0200 Subject: [PATCH 321/367] types: Add basic rbtree traversal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add functions for traversing generic RB tree structures and accompanying tests (both for correctness and applicability to real kernel dumps). Signed-off-by: Jeff Mahoney Signed-off-by: Michal Koutný --- crash/types/rbtree.py | 103 ++++++++++++++++++++++ kernel-tests/test_types_rbtree.py | 34 +++++++ tests/Makefile | 3 +- tests/test-rbtree.c | 142 ++++++++++++++++++++++++++++++ tests/test_rbtree.py | 94 ++++++++++++++++++++ 5 files changed, 375 insertions(+), 1 deletion(-) create mode 100644 crash/types/rbtree.py create mode 100644 kernel-tests/test_types_rbtree.py create mode 100644 tests/test-rbtree.c create mode 100644 tests/test_rbtree.py diff --git a/crash/types/rbtree.py b/crash/types/rbtree.py new file mode 100644 index 00000000000..e662d9ec690 --- /dev/null +++ b/crash/types/rbtree.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from typing import Optional, Iterable + +import gdb +from crash.util import container_of +from crash.util.symbols import Types +from crash.exceptions import ArgumentTypeError, UnexpectedGDBTypeError + +class TreeError(Exception): + pass + +class CorruptTreeError(TreeError): + pass + +types = Types(['struct rb_root', 'struct rb_node']) + +def _rb_left_deepest_node(node: gdb.Value) -> Optional[gdb.Value]: + while int(node) != 0: + if int(node['rb_left']) != 0: + node = node['rb_left'] + elif int(node['rb_right']) != 0: + node = node['rb_right'] + else: + return node + + return None + +def _rb_parent(node: gdb.Value) -> Optional[gdb.Value]: + addr = int(node['__rb_parent_color']) + addr &= ~0x3 + if addr == 0: + return None + return gdb.Value(addr).cast(node.type) + +def _rb_next_postorder(node: gdb.Value) -> Optional[gdb.Value]: + if int(node) == 0: + return None + + parent = _rb_parent(node) + if (parent is not None and int(node) == int(parent['rb_left']) and + int(parent['rb_right']) != 0): + return _rb_left_deepest_node(parent['rb_right']) + + return parent + +def rbtree_postorder_for_each(root: gdb.Value) -> Iterable[gdb.Value]: + """ + Iterate over nodes of a rooted RB tree in post-order fashion + + Args: + root: The tree to iterate. The value must be of type + ``struct rb_root`` or ``struct rb_root *``. + + Yields: + gdb.Value: The next node of the tree. The value is + of type ``struct rb_node``. + + Raises: + :obj:`.CorruptTreeError`: the list is corrupted + """ + if not isinstance(root, gdb.Value): + raise ArgumentTypeError('root', root, gdb.Value) + if root.type == types.rb_root_type.pointer(): + root = root.dereference() + elif root.type != types.rb_root_type: + raise UnexpectedGDBTypeError('root', root, types.rb_root_type) + + if root.type is not types.rb_root_type: + types.override('struct rb_root', root.type) + + if int(root.address) == 0: + raise CorruptTreeError("root is NULL pointer") + + node = _rb_left_deepest_node(root['rb_node']) + + while node is not None: + yield node.dereference() + node = _rb_next_postorder(node) + +def rbtree_postorder_for_each_entry(root: gdb.Value, + gdbtype: gdb.Type, member: str) -> Iterable[gdb.Value]: + """ + Iterate over nodes of a rooted RB tree in post-order fashion and yield each + node's containing object + + Args: + root: The tree to iterate. The value must be of type + ``struct rb_root`` or ``struct rb_root *``. + gdbtype: The type of the containing object + member: The name of the member in the containing object that + corresponds to the rb_node + + Yields: + gdb.Value: The next node of the tree. The value is + of the specified type. + + Raises: + :obj:`.CorruptTreeError`: the list is corrupted + """ + for node in rbtree_postorder_for_each(root): + yield container_of(node, gdbtype, member) diff --git a/kernel-tests/test_types_rbtree.py b/kernel-tests/test_types_rbtree.py new file mode 100644 index 00000000000..a03818975f0 --- /dev/null +++ b/kernel-tests/test_types_rbtree.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb + +from crash.types.rbtree import rbtree_postorder_for_each, rbtree_postorder_for_each_entry + +class TestRbtree(unittest.TestCase): + def setUp(self): + self.vmap_area_root = gdb.lookup_symbol('vmap_area_root')[0].value() + self.vmap_area_type = gdb.lookup_type('struct vmap_area') + self.rb_node_type = gdb.lookup_type('struct rb_node') + + def test_postorder_for_each(self): + count = 0 + last = None + for node in rbtree_postorder_for_each(self.vmap_area_root): + count += 1 + last = node + + self.assertTrue(count > 0) + self.assertTrue(last.type == self.rb_node_type) + + def test_postorder_for_each_entry(self): + count = 0 + last = None + for vmap_area in rbtree_postorder_for_each_entry(self.vmap_area_root, + self.vmap_area_type, 'rb_node'): + count += 1 + last = vmap_area + + self.assertTrue(count > 0) + self.assertTrue(last.type == self.vmap_area_type) + self.assertTrue(int(last['va_start']) <= int(last['va_end'])) diff --git a/tests/Makefile b/tests/Makefile index 09189355e55..d2605a7ce67 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,5 +1,6 @@ CFLAGS = -ggdb -TARGETS := test-util.o test-list.o test-list test-util test-percpu +TARGETS := test-util.o test-list.o test-list test-util test-percpu \ + test-rbtree.o test-rbtree TARGETS += test-syscache test_imports.py all: $(TARGETS) diff --git a/tests/test-rbtree.c b/tests/test-rbtree.c new file mode 100644 index 00000000000..a950c5b66f1 --- /dev/null +++ b/tests/test-rbtree.c @@ -0,0 +1,142 @@ +#include + +#define RB_RED 0 +#define RB_BLACK 1 +#define mk_par(p, c) ((unsigned long) (p) | (c)) + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +}; + +struct rb_root { + struct rb_node *rb_node; +}; + +struct number_node { + int v; + struct rb_node rb; +}; + +struct rb_node naked_node; +struct rb_root empty_tree_root; +struct rb_root singular_tree_root; +struct rb_root full_binary_tree_root; +struct rb_root linear_binary_tree_root; + + +int +main(void) +{ + /* The tree structure cannot be initialized statically (because + * __rb_parent_color is initialized with non-const expression) so + * allocate it in runtime (stack is mostly unused besides these) and + * publish the pointer in the global variable to find it easily via + * gdb. */ + struct rb_node singular_tree[] = { + { + .__rb_parent_color = mk_par(NULL, RB_BLACK), + .rb_right = NULL, + .rb_left = NULL, + }, + }; + singular_tree_root.rb_node = &singular_tree[0]; + + struct number_node full_binary_tree[] = { + { + .v = 0, + .rb = { + .__rb_parent_color = mk_par(NULL, RB_BLACK), + .rb_right = &full_binary_tree[2].rb, + .rb_left = &full_binary_tree[1].rb, + }, + }, + { + .v = 1, + .rb = { + .__rb_parent_color = mk_par(&full_binary_tree[0].rb, RB_RED), + .rb_right = &full_binary_tree[4].rb, + .rb_left = &full_binary_tree[3].rb, + }, + }, + { + .v = 2, + .rb = { + .__rb_parent_color = mk_par(&full_binary_tree[0].rb, RB_RED), + .rb_right = &full_binary_tree[6].rb, + .rb_left = &full_binary_tree[5].rb, + }, + }, + { + .v = 3, + .rb = { + .__rb_parent_color = mk_par(&full_binary_tree[1].rb, RB_BLACK), + .rb_right = NULL, + .rb_left = NULL, + }, + }, + { + .v = 4, + .rb = { + .__rb_parent_color = mk_par(&full_binary_tree[1].rb, RB_BLACK), + .rb_right = NULL, + .rb_left = NULL, + }, + }, + { + .v = 5, + .rb = { + .__rb_parent_color = mk_par(&full_binary_tree[2].rb, RB_BLACK), + .rb_right = NULL, + .rb_left = NULL, + }, + }, + { + .v = 6, + .rb = { + .__rb_parent_color = mk_par(&full_binary_tree[2].rb, RB_BLACK), + .rb_right = NULL, + .rb_left = NULL, + }, + }, + }; + full_binary_tree_root.rb_node = &full_binary_tree[0].rb; + + /* Not a true RB tree but good for testing */ + struct number_node linear_binary_tree[] = { + { + .v = 0, + .rb = { + .__rb_parent_color = mk_par(NULL, RB_BLACK), + .rb_right = NULL, + .rb_left = &linear_binary_tree[1].rb, + }, + }, + { + .v = 1, + .rb = { + .__rb_parent_color = mk_par(&linear_binary_tree[0].rb, RB_RED), + .rb_right = &linear_binary_tree[2].rb, + .rb_left = NULL, + }, + }, + { + .v = 2, + .rb = { + .__rb_parent_color = mk_par(&linear_binary_tree[1].rb, RB_BLACK), + .rb_right = NULL, + .rb_left = NULL, + }, + }, + }; + linear_binary_tree_root.rb_node = &linear_binary_tree[0].rb; + + (void)&empty_tree_root; + (void)&singular_tree_root; + (void)&full_binary_tree_root; + + /* We want to give gdb a core dump to work with */ + abort(); + return 0; +} diff --git a/tests/test_rbtree.py b/tests/test_rbtree.py new file mode 100644 index 00000000000..5346ac7900a --- /dev/null +++ b/tests/test_rbtree.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import unittest +import gdb + +from crash.types.rbtree import rbtree_postorder_for_each, rbtree_postorder_for_each_entry + +def get_symbol(name): + return gdb.lookup_symbol(name, None)[0].value() + +class TestRbtree(unittest.TestCase): + def setUp(self): + gdb.execute("file tests/test-rbtree", to_string=True) + try: + print() + print("--- Unsuppressable gdb output ---", end='') + + gdb.execute("run", to_string=False) + self.number_node_type = gdb.lookup_type("struct number_node") + except gdb.error as e: + # If we don't tear it down, the rest of the tests in + # other files will fail due to it using the wrong core file + self.tearDown() + raise(e) + + def tearDown(self): + try: + gdb.execute("detach", to_string=True) + gdb.execute("file") + except gdb.error: + print() + pass + print("--- End gdb output ---") + + def test_none_root(self): + count = 0 + with self.assertRaises(TypeError): + for node in rbtree_postorder_for_each(None): + count += 1 + + def test_invalid_value(self): + count = 0 + gdbtype = gdb.lookup_type('unsigned int') + with self.assertRaises(TypeError): + for node in rbtree_postorder_for_each(gdbtype): + count += 1 + + def test_invalid_value_pointer(self): + count = 0 + gdbtype = gdb.lookup_type('unsigned int').pointer() + with self.assertRaises(TypeError): + for node in rbtree_postorder_for_each(gdbtype): + count += 1 + + def test_nonroot_value(self): + count = 0 + nn = get_symbol('naked_node') + with self.assertRaises(TypeError): + for node in rbtree_postorder_for_each(nn): + count += 1 + + def test_empty_tree(self): + count = 0 + root = get_symbol('empty_tree_root') + for node in rbtree_postorder_for_each(root): + count += 1 + + self.assertEqual(count, 0) + + def test_singular_tree(self): + count = 0 + root = get_symbol('singular_tree_root') + for node in rbtree_postorder_for_each(root): + count += 1 + + self.assertEqual(count, 1) + + def test_linear_binary_tree(self): + vals = [] + root = get_symbol('linear_binary_tree_root') + for node in rbtree_postorder_for_each_entry(root, self.number_node_type, 'rb'): + vals.append(int(node['v'])) + + self.assertEqual(vals, [2, 1, 0]) + + def test_full_binary_tree(self): + vals = [] + root = get_symbol('full_binary_tree_root') + for node in rbtree_postorder_for_each_entry(root, self.number_node_type, 'rb'): + vals.append(int(node['v'])) + + self.assertEqual(vals, [3, 4, 1, 5, 6, 2, 0]) + From 47567cc0e7c689984873c49d852be03d562e1c4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Koutn=C3=BD?= Date: Wed, 8 Jul 2020 12:39:46 +0200 Subject: [PATCH 322/367] crash.types.list: Unify return type of iterator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We honor the principle of accepting both pointer to or object itself on input and giving out the object on output. The list iterator implementation returns the pointer (contradicting docs). Switch to the object type for consistency with other APIs (e.g. list_for_each_entry). Signed-off-by: Michal Koutný --- crash/types/list.py | 4 ++-- crash/types/slab.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/crash/types/list.py b/crash/types/list.py index 2e65f67296f..8fcd77647c4 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -71,7 +71,7 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, visited: Set[int] = set() if include_head: - yield list_head.address + yield list_head try: nxt = list_head[next_] @@ -104,7 +104,7 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, nxt = node[next_] # only yield after trying to read something from the node, no # point in giving out bogus list elements - yield node.address + yield node except gdb.error as e: raise BufferError("Failed to read list_head {:#x} in list {:#x}: {}" .format(int(node.address), int(list_head.address), str(e))) diff --git a/crash/types/slab.py b/crash/types/slab.py index 4d2fd9f461e..5368416b8ae 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -477,9 +477,10 @@ def get_slabs_of_type(self, node: gdb.Value, slabtype: int, slab_list = node[slab_list_fullname[slabtype]] for list_head in list_for_each(slab_list, reverse=reverse, exact_cycles=exact_cycles): + addr = int(list_head.address) try: - if int(list_head) in wrong_list_nodes.keys(): - wrong_type = wrong_list_nodes[int(list_head)] + if addr in wrong_list_nodes.keys(): + wrong_type = wrong_list_nodes[addr] print(col_error("Encountered head of {} slab list while traversing {} slab list, skipping" .format(slab_list_name[wrong_type], slab_list_name[slabtype]))) @@ -489,7 +490,7 @@ def get_slabs_of_type(self, node: gdb.Value, slabtype: int, except gdb.NotAvailableError: traceback.print_exc() print("failed to initialize slab object from list_head {:#x}: {}" - .format(int(list_head), sys.exc_info()[0])) + .format(addr, sys.exc_info()[0])) continue yield slab From 48b30471b02ef1f443d928b0447f13c3f1e85c22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Koutn=C3=BD?= Date: Tue, 25 Jun 2019 18:36:42 +0200 Subject: [PATCH 323/367] filesystem: Add simple kernfs support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement single-dir and recursive listing of kernfs directories, that should provide basic insight into the kernfs hierarchy. Add crash dump tests of sysfs to make basic kernfs command invocation remain working on various kernel versions. Signed-off-by: Michal Koutný --- crash/commands/__init__.py | 4 ++ crash/commands/kernfs.py | 73 ++++++++++++++++++++++++++++ crash/subsystem/filesystem/kernfs.py | 67 +++++++++++++++++++++++++ kernel-tests/test_commands_kernfs.py | 45 +++++++++++++++++ 4 files changed, 189 insertions(+) create mode 100644 crash/commands/kernfs.py create mode 100644 crash/subsystem/filesystem/kernfs.py create mode 100644 kernel-tests/test_commands_kernfs.py diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 5859bf8ef52..129f86fd55c 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -108,6 +108,10 @@ def format_help(self) -> str: return helptext + @staticmethod + def address(v: str) -> int: + return int(v, 16) + class Command(gdb.Command): """ The Command class is the starting point for implementing a new command. diff --git a/crash/commands/kernfs.py b/crash/commands/kernfs.py new file mode 100644 index 00000000000..9c475f1b9fb --- /dev/null +++ b/crash/commands/kernfs.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import argparse + +from crash.commands import Command, ArgumentParser +from crash.commands import CommandLineError, CommandError +from crash.subsystem.filesystem.kernfs import find_kn, for_each_child +from crash.subsystem.filesystem.kernfs import KERNFS_DIR, KERNFS_LINK + +import gdb + +class KernfsCommand(Command): + + def __init__(self, name: str) -> None: + parser = ArgumentParser(prog=name) + + subparsers = parser.add_subparsers() + ls_parser = subparsers.add_parser('ls') + ls_parser.set_defaults(subcommand=self.command_ls) + ls_parser.add_argument('kn', type=ArgumentParser.address) + ls_parser.add_argument('-R', type=int, default=0) + ls_parser.add_argument('-f', action='store_false', default=True) + + super().__init__(name, parser) + + def command_ls(self, args: argparse.Namespace) -> None: + kn = find_kn(args.kn) + if not kn['flags'] & KERNFS_DIR: + raise CommandError("{} is not a kernfs directory".format(args.kn)) + + print("{:^6} {:^6} {:^32} {:^16}".format( + "flags", "mode", "name", "kernfs_node")) + self._ls_dir(kn, args.R, args) + + def _ls_dir(self, kn: gdb.Value, depth: int, args: argparse.Namespace, prefix: str = '') -> None: + prefix += kn['name'].string() + '/' + print(f"{prefix}:") + + children = for_each_child(kn) + if args.f: + children = sorted(children, + key=lambda kn: (not kn['flags'] & KERNFS_DIR, + kn['name'].string())) + + subdirs = [] + for ckn in children: + if ckn['flags'] & KERNFS_DIR: + subdirs.append(ckn) + self.show_one_kn(ckn, args) + print() + + if depth != 0: + for dkn in subdirs: + self._ls_dir(dkn, depth - 1, args, prefix) + + # pylint: disable=unused-argument + def show_one_kn(self, kn: gdb.Value, args: argparse.Namespace) -> None: + print(" {}{} {:>03o} {:32} {:016x}".format( + 'd' if kn['flags'] & KERNFS_DIR else ' ', + 'l' if kn['flags'] & KERNFS_LINK else ' ', + int(kn['mode']) & 0x1ff, + kn['name'].string(), + int(kn.address) + )) + + def execute(self, args: argparse.Namespace) -> None: + if hasattr(args, 'subcommand'): + args.subcommand(args) + else: + raise CommandLineError("no command specified") + +KernfsCommand("kernfs") diff --git a/crash/subsystem/filesystem/kernfs.py b/crash/subsystem/filesystem/kernfs.py new file mode 100644 index 00000000000..045b5583448 --- /dev/null +++ b/crash/subsystem/filesystem/kernfs.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from typing import Iterable + +from crash.util import get_typed_pointer, AddressSpecifier +from crash.util.symbols import Types +from crash.exceptions import InvalidArgumentError +from crash.types.rbtree import rbtree_postorder_for_each_entry + +import gdb + +types = Types('struct kernfs_node') + +KERNFS_DIR = 1 +KERNFS_FILE = 2 +KERNFS_LINK = 4 + +def find_kn(addr: AddressSpecifier) -> gdb.Value: + """ + Finds ``struct kernfs_node`` by given address. + Note: Function does no checking whether address points to ``struct + kernfs_node``. This may change in future. + + Args: + addr: representation of memory address + + Returns: + :obj:`gdb.Value`: ``struct kernfs_node`` + """ + kn = get_typed_pointer(addr, types.kernfs_node_type).dereference() + return kn + +def for_each_child(kn: gdb.Value) -> Iterable[gdb.Value]: + """ + Iterates over all child nodes of given kernfs_node. + + Args: + kn: ``struct kernfs_node`` of directory type + + Yields: + gdb.Value: ``struct kernfs_node`` + + Raises: + :obj:`.InvalidArgumentError`: kernfs_node is not a directory + """ + if int(kn['flags']) & KERNFS_DIR == 0: + raise InvalidArgumentError(f"kernfs_node at {kn.address} is not a directory") + + return rbtree_postorder_for_each_entry(kn['dir']['children'], types.kernfs_node_type, 'rb') + +def path_from_node(kn: gdb.Value) -> str: + """ + Traverses kernfs to root to return node's patch. + + Args: + kn: ``struct kernfs_node`` + + Returns: + str: path from root to kn (inclusive) + """ + path = [] + while int(kn['parent']): + path.append(kn['name'].string()) + kn = kn['parent'].dereference() + + return '/' + '/'.join(path[::-1]) diff --git a/kernel-tests/test_commands_kernfs.py b/kernel-tests/test_commands_kernfs.py new file mode 100644 index 00000000000..5d6718e46d1 --- /dev/null +++ b/kernel-tests/test_commands_kernfs.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + + +from crash.commands.kernfs import KernfsCommand +from crash.commands import CommandLineError + +class TestCommandsKernfs(unittest.TestCase): + def setUp(self): + self.stdout = sys.stdout + self.redirected = io.StringIO() + sys.stdout = self.redirected + self.command = KernfsCommand("kernfs") + self.kn_addr = int(gdb.lookup_symbol('sysfs_root_kn', None)[0].value()) + + def tearDown(self): + sys.stdout = self.stdout + + def output(self): + return self.redirected.getvalue() + + def output_lines(self): + output = self.output() + return len(output.split("\n")) - 1 + + def test_kernfs_empty(self): + """`kernfs` raises CommandLineError""" + with self.assertRaises(CommandLineError): + self.command.invoke_uncaught("") + + def test_kernfs_list(self): + """`kernfs ls` produces valid output""" + self.command.invoke_uncaught(f"ls {self.kn_addr:x}") + # header + listing + self.assertTrue(self.output_lines() > 1) + + def test_kernfs_list_recursive(self): + """`kernfs ls` produces valid output""" + self.command.invoke_uncaught(f"ls -R 2 {self.kn_addr:x}") + # header + listing + self.assertTrue(self.output_lines() > 1) From 5b954ba6dc3e55d3b16ed6d18aa17455532d0e69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Koutn=C3=BD?= Date: Tue, 25 Jun 2019 11:04:18 +0200 Subject: [PATCH 324/367] cgroup: Add basic functions and commands MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - list v1 controllers (aka /proc/cgroups) - list cgroup membership (aka /proc/$PID/cgroup) - list tasks in a cgroup (aka .../tasks or cgroup.threads) Signed-off-by: Michal Koutný --- crash/commands/cgroup.py | 99 ++++++++++++++++++++++++++++++ crash/subsystem/cgroup/__init__.py | 90 +++++++++++++++++++++++++++ 2 files changed, 189 insertions(+) create mode 100644 crash/commands/cgroup.py create mode 100644 crash/subsystem/cgroup/__init__.py diff --git a/crash/commands/cgroup.py b/crash/commands/cgroup.py new file mode 100644 index 00000000000..1ae351cbff0 --- /dev/null +++ b/crash/commands/cgroup.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import argparse + +from crash.cache.tasks import get_task +from crash.commands import Command, CommandError, ArgumentParser +from crash.subsystem.cgroup import ( + cgroup_from_root, + find_cgroup, + for_each_cgroup_task, + for_each_hierarchy, + for_each_subsys, + subsys_mask_to_names, +) +from crash.subsystem.filesystem.kernfs import path_from_node +from crash.util import AddressSpecifier + +class CgroupCommand(Command): + + def __init__(self, name: str) -> None: + parser = ArgumentParser(prog=name) + group = parser.add_mutually_exclusive_group() + + group.add_argument('-t', type=int, default=False, + help='Show task cgroup membership') + group.add_argument('-g', type=ArgumentParser.address, default=False, + help='List all tasks in cgroup') + group.add_argument('-s', type=int, default=False, # TODO cgroup arg type + help='Show cgroup attributes') + group.add_argument('-c', type=str, nargs=2, default=False, # TODO cgroup arg type + help='Show controller attributes of cgroup') + + super().__init__(name, parser) + + def execute(self, args: argparse.Namespace) -> None: + if args.t != False: + self.show_task(args.t) + elif args.g != False: + self.show_cgroup_tasks(args.g) + elif args.s: + raise NotImplementedError("NI") + elif args.c: + raise NotImplementedError("NI") + else: + self.show_controllers() + + def show_controllers(self) -> None: + """Output based on /proc/cgroups""" + + print("{:^16} {:^16} {:^16} {:^16} {:^16}".format( + "subsys", "hierarchy_id", "num_cgroups", "cgroup_subsys", + "cgroup_root")) + for ss in for_each_subsys(): + print("{:<16} {:>16} {:>16} {:016x} {:016x}".format( + ss['legacy_name'].string(), + int(ss['root']['hierarchy_id']), + int(ss['root']['nr_cgrps']['counter']), + int(ss.address), + int(ss['root']) + )) + + def show_task(self, pid: int) -> None: + try: + ltask = get_task(pid) + print("{:^12} {:^16} {:^32} {:^16} {:^20}".format( + "hierarchy_id", "cgroup_root", "controllers/name", "cgroup", "path" + )) + for h in sorted(for_each_hierarchy(), key=lambda h: int(h['hierarchy_id'])): + controllers = subsys_mask_to_names(h['subsys_mask']) + if h['name'].string(): + controllers.append("name={}".format(h['name'].string())) + + cgroup = cgroup_from_root(ltask.task_struct, h) + + print("{:>12} {:016x} {:<32} {:016x} {:<20}".format( + int(h['hierarchy_id']), + int(h.address), + ','.join(controllers), + int(cgroup.address), + path_from_node(cgroup['kn'].dereference()) + )) + + except KeyError: + raise CommandError("No such task with pid {}".format(pid)) + + def show_cgroup_tasks(self, addr: AddressSpecifier) -> None: + cgrp = find_cgroup(addr) + print("{:^10} {:^16}".format( + "PID", "task_struct" + )) + for t in sorted(for_each_cgroup_task(cgrp), key=lambda t: int(t['pid'])): + print("{:>10} {:016x}".format( + int(t['pid']), + int(t.address) + )) + + +CgroupCommand("cgroup") diff --git a/crash/subsystem/cgroup/__init__.py b/crash/subsystem/cgroup/__init__.py new file mode 100644 index 00000000000..55d5396a165 --- /dev/null +++ b/crash/subsystem/cgroup/__init__.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from typing import Dict, Iterator, List + +from crash.exceptions import InvalidArgumentError, CorruptedError +from crash.types.list import list_for_each_entry +from crash.util import AddressSpecifier, get_typed_pointer +from crash.util.symbols import Types, Symvals, TypeCallbacks, SymbolCallbacks + +import gdb + +symvals = Symvals(['cgroup_roots', 'cgroup_subsys']) +types = Types([ + 'struct cgroup', + 'struct cgroup_root', + 'struct cgroup_subsys', + 'struct cgrp_cset_link', + 'struct task_struct', +]) + +class Subsys: + _subsys_names: Dict[int, str] = dict() + _available_mask = 0 + + @classmethod + def init_subsys_ids(cls, subsys_enum: gdb.Symbol) -> None: + suffix = '_cgrp_id' + for k in subsys_enum.keys(): + if k == 'CGROUP_SUBSYS_COUNT': + continue + if subsys_enum[k].enumval in cls._subsys_names: + raise InvalidArgumentError("Enum {} is not unique".format(subsys_enum.name)) + if not k.endswith(suffix): + raise InvalidArgumentError("Enum {} has unknown names".format(subsys_enum.name)) + + cls._subsys_names[subsys_enum[k].enumval] = k[:-len(suffix)] + cls._available_mask |= (1 << subsys_enum[k].enumval) + + def for_each_subsys(self) -> Iterator[gdb.Value]: + for ssid in self._subsys_names: + yield symvals.cgroup_subsys[ssid].dereference() + + def subsys_mask_to_names(self, mask: int) -> List[str]: + unknown = mask & ~self._available_mask + if unknown: + raise InvalidArgumentError(f"Mask contains unknown controllers {unknown:x}") + + ret = [] + for ssid in self._subsys_names: + if mask & (1 << ssid): + ret.append(self._subsys_names[ssid]) + return ret + +_Subsys = Subsys() + +def for_each_hierarchy() -> Iterator[gdb.Value]: + # TODO should we factor in cgrp_dfl_visible? + return list_for_each_entry(symvals.cgroup_roots, + types.cgroup_root_type, 'root_list') + +def for_each_subsys() -> Iterator[gdb.Value]: + return _Subsys.for_each_subsys() + +def subsys_mask_to_names(mask: int) -> List[str]: + return _Subsys.subsys_mask_to_names(mask) + +def cgroup_from_root(task: gdb.Value, cgroup_root: gdb.Value) -> gdb.Value: + cssset = task['cgroups'].dereference() + for link in list_for_each_entry(cssset['cgrp_links'], types.cgrp_cset_link_type, 'cgrp_link'): + if link['cgrp']['root'] == cgroup_root.address: + return link['cgrp'].dereference() + + # TODO think about migrating tasks + raise CorruptedError( + "Task {int(task.address):016x} not under cgroup_root {int(cgroup_root.address):016x}}" + ) + +def find_cgroup(addr: AddressSpecifier) -> gdb.Value: + cgrp = get_typed_pointer(addr, types.cgroup_type).dereference() + return cgrp + +def for_each_cgroup_task(cgrp: gdb.Value) -> Iterator[gdb.Value]: + # TODO migrating tasks?, zombies? + for link in list_for_each_entry(cgrp['cset_links'], types.cgrp_cset_link_type, 'cset_link'): + cssset = link['cset'].dereference() + for task in list_for_each_entry(cssset['tasks'], types.task_struct_type, 'cg_list'): + yield task + +type_cbs = TypeCallbacks([('enum cgroup_subsys_id', Subsys.init_subsys_ids)]) From 1806f54482f5ce7c7c3e0ec15b3d631531d87870 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Koutn=C3=BD?= Date: Thu, 22 Aug 2019 13:31:54 +0200 Subject: [PATCH 325/367] cgroup: Add kernel image tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Simple test to check that cgroup command isn't broken upon kernel API change. Signed-off-by: Michal Koutný --- kernel-tests/test_commands_cgroup.py | 45 ++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 kernel-tests/test_commands_cgroup.py diff --git a/kernel-tests/test_commands_cgroup.py b/kernel-tests/test_commands_cgroup.py new file mode 100644 index 00000000000..7e074f15b05 --- /dev/null +++ b/kernel-tests/test_commands_cgroup.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import unittest +import gdb +import io +import sys + + +from crash.commands.cgroup import CgroupCommand +from crash.commands import CommandLineError + +class TestCommandsCgroup(unittest.TestCase): + def setUp(self): + self.stdout = sys.stdout + self.redirected = io.StringIO() + sys.stdout = self.redirected + self.command = CgroupCommand("cgroup") + # cgrp_dfl_root is available since v3.15-rc1 + cgroup_root = gdb.lookup_symbol('cgrp_dfl_root', None)[0].value() + self.cgrp_dfl_root = cgroup_root['cgrp'] + + def tearDown(self): + sys.stdout = self.stdout + + def output(self): + return self.redirected.getvalue() + + def output_lines(self): + output = self.output() + return len(output.split("\n")) - 1 + + def test_proc_cgroup(self): + """`cgroup` lists controllers""" + self.command.invoke_uncaught(f"") + # header + listing (at least one controller) + self.assertTrue(self.output_lines() > 1) + + def test_cgroup_tasks(self): + """`cgroup -g` lists cgroup tasks""" + addr = int(self.cgrp_dfl_root.address) + self.command.invoke_uncaught(f"-g {addr:x}") + # header + listing (at least one task) + self.assertTrue(self.output_lines() > 1) + + From 14fec5d8e2b3551ba5584eda6bbb273e746a3f23 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 17 Nov 2020 16:49:31 -0500 Subject: [PATCH 326/367] crash.subsystem.cgroup: fix annotation for init_subsys_ids subsys_enum is a gdb.Type not a gdb.Value Signed-off-by: Jeff Mahoney --- crash/subsystem/cgroup/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/subsystem/cgroup/__init__.py b/crash/subsystem/cgroup/__init__.py index 55d5396a165..4af9d7bfdd7 100644 --- a/crash/subsystem/cgroup/__init__.py +++ b/crash/subsystem/cgroup/__init__.py @@ -24,7 +24,7 @@ class Subsys: _available_mask = 0 @classmethod - def init_subsys_ids(cls, subsys_enum: gdb.Symbol) -> None: + def init_subsys_ids(cls, subsys_enum: gdb.Type) -> None: suffix = '_cgrp_id' for k in subsys_enum.keys(): if k == 'CGROUP_SUBSYS_COUNT': From 9e2c54ed0ec4f1f64397c72f48308befd4795b64 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Thu, 19 Nov 2020 14:31:12 -0500 Subject: [PATCH 327/367] pylint: define sys.executable as /usr/bin/python3 gdb sets its own sys.executable as /usr/bin/python regardless of whether python 2 or 3 is used. Pylint tries to exec the interpreter and fails when it tries to use python 2 on python 3 code. Signed-off-by: Jeff Mahoney --- tests/run-pylint.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/run-pylint.py b/tests/run-pylint.py index d5b110944d1..da77f607231 100644 --- a/tests/run-pylint.py +++ b/tests/run-pylint.py @@ -1,6 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +# gdb sets sys.executable as python regardless of whether it's python2 or 3 +import sys +sys.executable = "/usr/bin/python3" + from pylint import lint import os import shlex From 276e53822b183bc9b90544fd826c25e92bcd43a2 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Thu, 19 Nov 2020 14:32:36 -0500 Subject: [PATCH 328/367] pylint: fix wrong-import-order warnings with 'import gdb' The order for imports should be: standard modules, third party modules, local modules. gdb was usually imported last, so this fixes it. Signed-off-by: Jeff Mahoney --- crash/__init__.py | 1 + crash/addrxlat.py | 5 +++-- crash/arch/__init__.py | 4 ++-- crash/arch/ppc64.py | 4 ++-- crash/arch/x86_64.py | 4 ++-- crash/cache/__init__.py | 4 ++-- crash/cache/syscache.py | 4 ++-- crash/commands/__init__.py | 4 ++-- crash/commands/dev.py | 3 +-- crash/commands/dmesg.py | 4 ++-- crash/commands/kernfs.py | 4 ++-- crash/commands/lsmod.py | 4 ++-- crash/commands/mount.py | 4 ++-- crash/commands/ps.py | 4 ++-- crash/commands/task.py | 4 ++-- crash/commands/xfs.py | 4 ++-- crash/infra/lookup.py | 4 ++-- crash/kernel.py | 3 +-- crash/requirements/__init__.py | 4 ++-- crash/session.py | 4 ++-- crash/subsystem/cgroup/__init__.py | 4 ++-- crash/subsystem/filesystem/__init__.py | 4 ++-- crash/subsystem/filesystem/btrfs.py | 4 ++-- crash/subsystem/filesystem/decoders.py | 4 ++-- crash/subsystem/filesystem/ext3.py | 4 ++-- crash/subsystem/filesystem/kernfs.py | 4 ++-- crash/subsystem/filesystem/mount.py | 4 ++-- crash/subsystem/filesystem/xfs.py | 4 ++-- crash/subsystem/storage/__init__.py | 6 +++--- crash/subsystem/storage/block.py | 4 ++-- crash/subsystem/storage/blockmq.py | 4 ++-- crash/subsystem/storage/blocksq.py | 4 ++-- crash/subsystem/storage/decoders.py | 4 ++-- crash/subsystem/storage/device_mapper.py | 4 ++-- crash/types/bitmap.py | 4 ++-- crash/types/classdev.py | 4 ++-- crash/types/cpu.py | 4 ++-- crash/types/klist.py | 4 ++-- crash/types/list.py | 4 ++-- crash/types/module.py | 4 ++-- crash/types/node.py | 4 ++-- crash/types/page.py | 4 ++-- crash/types/percpu.py | 4 ++-- crash/types/rbtree.py | 1 + crash/types/sbitmap.py | 4 ++-- crash/types/slab.py | 4 ++-- crash/types/task.py | 4 ++-- crash/types/vmstat.py | 4 ++-- crash/types/zone.py | 4 ++-- crash/util/__init__.py | 4 ++-- crash/util/symbols.py | 4 ++-- 51 files changed, 100 insertions(+), 99 deletions(-) diff --git a/crash/__init__.py b/crash/__init__.py index a7d1c344683..19ed28db6fe 100644 --- a/crash/__init__.py +++ b/crash/__init__.py @@ -2,6 +2,7 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb + import kdump.target def current_target() -> kdump.target.Target: diff --git a/crash/addrxlat.py b/crash/addrxlat.py index fa634cdfb5c..0638e7da532 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -1,14 +1,15 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import gdb + import addrxlat import crash + from crash.cache.syscache import utsname from crash.util import offsetof from crash.util.symbols import Types -import gdb - types = Types(['uint32_t *', 'uint64_t *']) class TranslationContext(addrxlat.Context): diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index eb354716cd1..2c3d80b9a63 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -3,11 +3,11 @@ from typing import List, Iterator, Any, Optional, Type -import crash - import gdb from gdb.FrameDecorator import FrameDecorator +import crash + class FetchRegistersCallback: """ The base class from which to implement the fetch_registers callback. diff --git a/crash/arch/ppc64.py b/crash/arch/ppc64.py index 5a9beb9809a..07da586cad7 100644 --- a/crash/arch/ppc64.py +++ b/crash/arch/ppc64.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import gdb + from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch from crash.arch import FetchRegistersCallback -import gdb - class FR_Placeholder(FetchRegistersCallback): # pylint: disable=abstract-method pass diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index a354a81eb59..e816b34e947 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import gdb + from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch from crash.arch import FetchRegistersCallback from crash.util.symbols import Types, MinimalSymvals from crash.util.symbols import TypeCallbacks, MinimalSymbolCallbacks -import gdb - types = Types(['struct inactive_task_frame *', 'struct thread_info *', 'unsigned long *']) msymvals = MinimalSymvals(['thread_return']) diff --git a/crash/cache/__init__.py b/crash/cache/__init__.py index cbf829fa56d..40eddbe6ebc 100644 --- a/crash/cache/__init__.py +++ b/crash/cache/__init__.py @@ -5,10 +5,10 @@ import glob import importlib -from crash.infra import autoload_submodules - import gdb +from crash.infra import autoload_submodules + class CrashCache: def refresh(self) -> None: pass diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index 1694a37ba0a..f56d7c5c60d 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -7,14 +7,14 @@ import zlib from datetime import timedelta +import gdb + from crash.exceptions import DelayedAttributeError from crash.cache import CrashCache from crash.util import array_size from crash.util.symbols import Types, Symvals, SymbolCallbacks, MinimalSymvals from crash.infra.lookup import DelayedValue -import gdb - ImageLocation = Dict[str, Dict[str, int]] class CrashUtsnameCache(CrashCache): diff --git a/crash/commands/__init__.py b/crash/commands/__init__.py index 129f86fd55c..7c0cb5a0952 100644 --- a/crash/commands/__init__.py +++ b/crash/commands/__init__.py @@ -59,10 +59,10 @@ def execute(self, args: argparse.Namespace) -> None: import importlib import argparse -from crash.exceptions import DelayedAttributeError, ArgumentTypeError - import gdb +from crash.exceptions import DelayedAttributeError, ArgumentTypeError + class CommandError(RuntimeError): """An error occured while executing this command""" diff --git a/crash/commands/dev.py b/crash/commands/dev.py index 700f4c0f4f3..5a6ebdc1031 100644 --- a/crash/commands/dev.py +++ b/crash/commands/dev.py @@ -10,11 +10,10 @@ """ import argparse + from crash.commands import Command, ArgumentParser from crash.subsystem.storage import for_each_disk, gendisk_name from crash.subsystem.storage.block import queue_request_stats -import gdb - class DevCommand(Command): """display character and block devices""" diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index c92e7194b5a..7b6c7957807 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -144,12 +144,12 @@ import re import argparse +import gdb + from crash.commands import Command, ArgumentParser, CommandError from crash.exceptions import DelayedAttributeError from crash.util.symbols import Types, Symvals -import gdb - types = Types(['struct printk_log *', 'char *']) symvals = Symvals(['log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', 'clear_seq', 'log_first_seq', 'log_next_seq']) diff --git a/crash/commands/kernfs.py b/crash/commands/kernfs.py index 9c475f1b9fb..578eff8d109 100644 --- a/crash/commands/kernfs.py +++ b/crash/commands/kernfs.py @@ -3,13 +3,13 @@ import argparse +import gdb + from crash.commands import Command, ArgumentParser from crash.commands import CommandLineError, CommandError from crash.subsystem.filesystem.kernfs import find_kn, for_each_child from crash.subsystem.filesystem.kernfs import KERNFS_DIR, KERNFS_LINK -import gdb - class KernfsCommand(Command): def __init__(self, name: str) -> None: diff --git a/crash/commands/lsmod.py b/crash/commands/lsmod.py index 1185b70a5e2..c3e6b189a39 100644 --- a/crash/commands/lsmod.py +++ b/crash/commands/lsmod.py @@ -31,6 +31,8 @@ import fnmatch import argparse +import gdb + from crash.commands import Command, ArgumentParser from crash.types.module import for_each_module from crash.util import struct_has_member @@ -38,8 +40,6 @@ from crash.types.list import list_for_each_entry from crash.types.percpu import get_percpu_var -import gdb - types = Types(['struct module_use']) class ModuleCommand(Command): diff --git a/crash/commands/mount.py b/crash/commands/mount.py index c664095c6c8..8c2e805509b 100644 --- a/crash/commands/mount.py +++ b/crash/commands/mount.py @@ -13,14 +13,14 @@ import argparse +import gdb + from crash.commands import Command, ArgumentParser from crash.subsystem.filesystem.mount import d_path, for_each_mount from crash.subsystem.filesystem.mount import mount_device, mount_fstype from crash.subsystem.filesystem.mount import mount_super, mount_flags from crash.subsystem.filesystem.mount import mount_root -import gdb - class MountCommand(Command): """display mounted file systems""" diff --git a/crash/commands/ps.py b/crash/commands/ps.py index b7ec475ed10..95cd47ba6f8 100755 --- a/crash/commands/ps.py +++ b/crash/commands/ps.py @@ -424,12 +424,12 @@ import fnmatch import re +import gdb + from crash.commands import Command, ArgumentParser from crash.commands import CommandError from crash.types.task import LinuxTask, TaskStateFlags as TF -import gdb - class TaskFormat: """ This class is responsible for converting the arguments into formatting diff --git a/crash/commands/task.py b/crash/commands/task.py index 535bf0b629d..0b21b993f5e 100644 --- a/crash/commands/task.py +++ b/crash/commands/task.py @@ -26,11 +26,11 @@ import argparse +import gdb + from crash.commands import Command, ArgumentParser import crash.cache.tasks -import gdb - class TaskCommand(Command): """select task by pid""" diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index f3c541dd6f7..7bc1e017a70 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -23,6 +23,8 @@ import argparse +import gdb + from crash.commands import Command, ArgumentParser from crash.commands import CommandLineError, CommandError from crash.types.list import list_for_each_entry, list_empty @@ -43,8 +45,6 @@ from crash.util import decode_flags, struct_has_member from crash.util.symbols import Types -import gdb - types = Types(['struct xfs_buf *']) class XFSCommand(Command): diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index a2046ac65be..ad50cdc0b0e 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -3,12 +3,12 @@ from typing import Tuple, Any, Union, Optional +import gdb + from crash.infra.callback import ObjfileEventCallback from crash.infra.callback import Callback from crash.exceptions import DelayedAttributeError -import gdb - class NamedCallback(ObjfileEventCallback): """ A base class for Callbacks with names diff --git a/crash/kernel.py b/crash/kernel.py index d48d7973b21..4add626e3af 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -9,6 +9,7 @@ import os.path from elftools.elf.elffile import ELFFile +import gdb import crash import crash.arch @@ -19,8 +20,6 @@ from crash.util.symbols import Types, Symvals, Symbols from crash.exceptions import MissingSymbolError, InvalidArgumentError -import gdb - class CrashKernelError(RuntimeError): """Raised when an error occurs while initializing the debugging session""" diff --git a/crash/requirements/__init__.py b/crash/requirements/__init__.py index 465843a12ae..e8f2cef3cee 100644 --- a/crash/requirements/__init__.py +++ b/crash/requirements/__init__.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from crash.exceptions import IncompatibleGDBError - # Perform some sanity checks to ensure that we can actually work import gdb +from crash.exceptions import IncompatibleGDBError + try: x1 = gdb.Target del x1 diff --git a/crash/session.py b/crash/session.py index c2b55268342..9f912e8b847 100644 --- a/crash/session.py +++ b/crash/session.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import gdb + from crash.infra import autoload_submodules from crash.kernel import CrashKernel, CrashKernelError -import gdb - class Session: """ crash.Session is the main driver component for crash-python diff --git a/crash/subsystem/cgroup/__init__.py b/crash/subsystem/cgroup/__init__.py index 4af9d7bfdd7..93438a066a6 100644 --- a/crash/subsystem/cgroup/__init__.py +++ b/crash/subsystem/cgroup/__init__.py @@ -3,13 +3,13 @@ from typing import Dict, Iterator, List +import gdb + from crash.exceptions import InvalidArgumentError, CorruptedError from crash.types.list import list_for_each_entry from crash.util import AddressSpecifier, get_typed_pointer from crash.util.symbols import Types, Symvals, TypeCallbacks, SymbolCallbacks -import gdb - symvals = Symvals(['cgroup_roots', 'cgroup_subsys']) types = Types([ 'struct cgroup', diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index 98fc056f0fa..b0e1487880d 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -3,14 +3,14 @@ from typing import Iterable, Union +import gdb + from crash.util import container_of, get_typed_pointer, decode_flags from crash.util.symbols import Types, Symvals from crash.infra.lookup import DelayedSymval, DelayedType from crash.types.list import list_for_each_entry from crash.subsystem.storage import block_device_name -import gdb - types = Types('struct super_block') symvals = Symvals('super_blocks') diff --git a/crash/subsystem/filesystem/btrfs.py b/crash/subsystem/filesystem/btrfs.py index a05befd2355..f95bd4c21e1 100644 --- a/crash/subsystem/filesystem/btrfs.py +++ b/crash/subsystem/filesystem/btrfs.py @@ -3,13 +3,13 @@ import uuid +import gdb + from crash.exceptions import InvalidArgumentError from crash.util import decode_uuid, struct_has_member, container_of from crash.util.symbols import Types from crash.subsystem.filesystem import is_fstype_super -import gdb - types = Types(['struct btrfs_inode', 'struct btrfs_fs_info *', 'struct btrfs_fs_info']) diff --git a/crash/subsystem/filesystem/decoders.py b/crash/subsystem/filesystem/decoders.py index a3f33596069..8e6eaab6e68 100644 --- a/crash/subsystem/filesystem/decoders.py +++ b/crash/subsystem/filesystem/decoders.py @@ -3,13 +3,13 @@ from typing import Any +import gdb + from crash.util.symbols import Types from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder, decode_bh from crash.subsystem.filesystem import super_fstype -import gdb - class DIOBioDecoder(Decoder): """ Decodes a bio used for direct i/o. diff --git a/crash/subsystem/filesystem/ext3.py b/crash/subsystem/filesystem/ext3.py index bf1dd16ee0c..eb5b7351413 100644 --- a/crash/subsystem/filesystem/ext3.py +++ b/crash/subsystem/filesystem/ext3.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +import gdb + from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder -import gdb - class Ext3Decoder(Decoder): """ Decodes an ext3 journal buffer diff --git a/crash/subsystem/filesystem/kernfs.py b/crash/subsystem/filesystem/kernfs.py index 045b5583448..e89b3e5c83f 100644 --- a/crash/subsystem/filesystem/kernfs.py +++ b/crash/subsystem/filesystem/kernfs.py @@ -3,13 +3,13 @@ from typing import Iterable +import gdb + from crash.util import get_typed_pointer, AddressSpecifier from crash.util.symbols import Types from crash.exceptions import InvalidArgumentError from crash.types.rbtree import rbtree_postorder_for_each_entry -import gdb - types = Types('struct kernfs_node') KERNFS_DIR = 1 diff --git a/crash/subsystem/filesystem/mount.py b/crash/subsystem/filesystem/mount.py index 4df1006096c..6da2b47f3bf 100644 --- a/crash/subsystem/filesystem/mount.py +++ b/crash/subsystem/filesystem/mount.py @@ -14,13 +14,13 @@ from typing import Iterator, Callable, Any +import gdb + from crash.subsystem.filesystem import super_fstype from crash.types.list import list_for_each_entry from crash.util import container_of, decode_flags, struct_has_member from crash.util.symbols import Types, Symvals, TypeCallbacks, SymbolCallbacks -import gdb - MNT_NOSUID = 0x01 MNT_NODEV = 0x02 MNT_NOEXEC = 0x04 diff --git a/crash/subsystem/filesystem/xfs.py b/crash/subsystem/filesystem/xfs.py index 3c032c693ef..89958bba306 100644 --- a/crash/subsystem/filesystem/xfs.py +++ b/crash/subsystem/filesystem/xfs.py @@ -9,6 +9,8 @@ import uuid +import gdb + from crash.exceptions import InvalidArgumentError from crash.types.list import list_for_each_entry from crash.util import container_of, decode_uuid_t, decode_flags @@ -18,8 +20,6 @@ from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder -import gdb - # XFS inode locks XFS_IOLOCK_EXCL = 0x01 XFS_IOLOCK_SHARED = 0x02 diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 0e970aeeef4..1ed6ec90b4c 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -3,15 +3,15 @@ from typing import Iterable +import gdb +from gdb.types import get_basic_type + from crash.util import container_of, struct_has_member from crash.util.symbols import Types, Symvals, SymbolCallbacks, TypeCallbacks from crash.types.classdev import for_each_class_device from crash.exceptions import DelayedAttributeError, InvalidArgumentError from crash.cache.syscache import kernel, jiffies_to_msec -import gdb -from gdb.types import get_basic_type - types = Types(['struct gendisk', 'struct hd_struct', 'struct device', 'struct device_type', 'struct bdev_inode', 'struct request_queue', 'struct request', 'enum req_flag_bits', diff --git a/crash/subsystem/storage/block.py b/crash/subsystem/storage/block.py index b4bf748fb9f..8fc2361b836 100644 --- a/crash/subsystem/storage/block.py +++ b/crash/subsystem/storage/block.py @@ -3,6 +3,8 @@ from typing import Iterable, Tuple +import gdb + from crash.util.symbols import Types from crash.subsystem.storage import queue_is_mq from crash.subsystem.storage.blocksq import sq_for_each_request_in_queue, \ @@ -10,8 +12,6 @@ from crash.subsystem.storage.blockmq import mq_for_each_request_in_queue, \ mq_requests_in_flight, mq_requests_queued, mq_queue_request_stats -import gdb - def requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: """ Report how many requests are in flight for this queue diff --git a/crash/subsystem/storage/blockmq.py b/crash/subsystem/storage/blockmq.py index 827e23b16f6..acb3de4b0cd 100644 --- a/crash/subsystem/storage/blockmq.py +++ b/crash/subsystem/storage/blockmq.py @@ -3,13 +3,13 @@ from typing import Iterable, Tuple +import gdb + from crash.util.symbols import Types from crash.subsystem.storage import queue_is_mq, rq_is_sync, rq_in_flight from crash.types.sbitmap import sbitmap_for_each_set from crash.exceptions import InvalidArgumentError -import gdb - class NoQueueError(RuntimeError): pass diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index badfe4fb31a..e64ed1deace 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -3,14 +3,14 @@ from typing import Iterable, Tuple +import gdb + from crash.util import struct_has_member from crash.util.symbols import Types from crash.types.list import list_for_each_entry from crash.subsystem.storage import queue_is_mq from crash.exceptions import InvalidArgumentError -import gdb - class NoQueueError(RuntimeError): pass diff --git a/crash/subsystem/storage/decoders.py b/crash/subsystem/storage/decoders.py index 167685b2055..b7db144cef2 100644 --- a/crash/subsystem/storage/decoders.py +++ b/crash/subsystem/storage/decoders.py @@ -3,11 +3,11 @@ from typing import Union, List, Dict, Iterable, Type, Any +import gdb + from crash.infra.lookup import SymbolCallback from crash.subsystem.storage import block_device_name -import gdb - EndIOSpecifier = Union[int, str, List[str], gdb.Value, gdb.Symbol] class Decoder: diff --git a/crash/subsystem/storage/device_mapper.py b/crash/subsystem/storage/device_mapper.py index 5e6d325e4f9..0475af3362f 100644 --- a/crash/subsystem/storage/device_mapper.py +++ b/crash/subsystem/storage/device_mapper.py @@ -3,13 +3,13 @@ from typing import Callable, Any +import gdb + from crash.util import container_of from crash.util.symbols import Types from crash.subsystem.storage import block_device_name from crash.subsystem.storage.decoders import Decoder, decode_bio -import gdb - class ClonedBioReqDecoder(Decoder): """ Decodes a request-based device mapper cloned bio diff --git a/crash/types/bitmap.py b/crash/types/bitmap.py index 5676b8c0740..befaa3a2a09 100644 --- a/crash/types/bitmap.py +++ b/crash/types/bitmap.py @@ -13,11 +13,11 @@ from typing import Iterable, Tuple +import gdb + from crash.exceptions import InvalidArgumentError from crash.util.symbols import Types -import gdb - types = Types('unsigned long') def _check_bitmap_type(bitmap: gdb.Value) -> None: diff --git a/crash/types/classdev.py b/crash/types/classdev.py index 05a7788562b..1d861dd0bbc 100644 --- a/crash/types/classdev.py +++ b/crash/types/classdev.py @@ -6,12 +6,12 @@ from typing import Iterable +import gdb + from crash.types.klist import klist_for_each from crash.util import struct_has_member, container_of from crash.util.symbols import Types, TypeCallbacks -import gdb - types = Types(['struct device', 'struct device_private']) class ClassdevState: diff --git a/crash/types/cpu.py b/crash/types/cpu.py index 7ca1c0b662e..0485084c71a 100644 --- a/crash/types/cpu.py +++ b/crash/types/cpu.py @@ -6,12 +6,12 @@ from typing import Iterable, List +import gdb + from crash.util.symbols import SymbolCallbacks from crash.types.bitmap import for_each_set_bit from crash.exceptions import DelayedAttributeError -import gdb - # this wraps no particular type, rather it's a placeholder for # functions to iterate over online cpu's etc. class TypesCPUClass: diff --git a/crash/types/klist.py b/crash/types/klist.py index 6283971a330..b2966715c7d 100644 --- a/crash/types/klist.py +++ b/crash/types/klist.py @@ -3,13 +3,13 @@ from typing import Iterable +import gdb + from crash.util import container_of from crash.types.list import list_for_each_entry from crash.exceptions import CorruptedError, InvalidArgumentError from crash.util.symbols import Types -import gdb - types = Types(['struct klist_node', 'struct klist']) class KlistCorruptedError(CorruptedError): diff --git a/crash/types/list.py b/crash/types/list.py index 7d7cf8fb0e0..c6f7bdabfc2 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -3,12 +3,12 @@ from typing import Iterator, Set +import gdb + from crash.util import container_of from crash.util.symbols import Types from crash.exceptions import ArgumentTypeError, UnexpectedGDBTypeError -import gdb - class ListError(Exception): pass diff --git a/crash/types/module.py b/crash/types/module.py index 6b1f318a548..5391ae47a7b 100644 --- a/crash/types/module.py +++ b/crash/types/module.py @@ -3,11 +3,11 @@ from typing import Iterable, Tuple +import gdb + from crash.types.list import list_for_each_entry from crash.util.symbols import Symvals, Types -import gdb - symvals = Symvals(['modules']) types = Types(['struct module']) diff --git a/crash/types/node.py b/crash/types/node.py index 401bc1efff8..cc4fc71609b 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -6,6 +6,8 @@ from typing import Iterable, List, Type, TypeVar +import gdb + import crash from crash.util.symbols import Symbols, Symvals, Types, SymbolCallbacks from crash.types.percpu import get_percpu_var @@ -13,8 +15,6 @@ from crash.exceptions import DelayedAttributeError import crash.types.zone -import gdb - symbols = Symbols(['numa_node']) symvals = Symvals(['numa_cpu_lookup_table', 'node_data']) types = Types(['pg_data_t', 'struct zone']) diff --git a/crash/types/page.py b/crash/types/page.py index 3ac0bc3dc4f..8648615fe92 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -6,6 +6,8 @@ from math import log, ceil +import gdb + import crash from crash.util import find_member_variant from crash.util.symbols import Types, Symvals, TypeCallbacks @@ -13,8 +15,6 @@ from crash.cache.syscache import config from crash.exceptions import DelayedAttributeError -import gdb - #TODO debuginfo won't tell us, depends on version? PAGE_MAPPING_ANON = 1 diff --git a/crash/types/percpu.py b/crash/types/percpu.py index 1f8d6e8f9dc..9b61d09601b 100644 --- a/crash/types/percpu.py +++ b/crash/types/percpu.py @@ -3,6 +3,8 @@ from typing import Dict, Union, List, Tuple +import gdb + from crash.util import array_size, struct_has_member from crash.util.symbols import Types, Symvals, MinimalSymvals from crash.util.symbols import MinimalSymbolCallbacks, SymbolCallbacks @@ -12,8 +14,6 @@ from crash.types.page import Page from crash.types.cpu import highest_possible_cpu_nr -import gdb - SymbolOrValue = Union[gdb.Value, gdb.Symbol] class PerCPUError(TypeError): diff --git a/crash/types/rbtree.py b/crash/types/rbtree.py index e662d9ec690..5af2a1ba5b9 100644 --- a/crash/types/rbtree.py +++ b/crash/types/rbtree.py @@ -4,6 +4,7 @@ from typing import Optional, Iterable import gdb + from crash.util import container_of from crash.util.symbols import Types from crash.exceptions import ArgumentTypeError, UnexpectedGDBTypeError diff --git a/crash/types/sbitmap.py b/crash/types/sbitmap.py index dbf653aa2b5..ce13cf40641 100644 --- a/crash/types/sbitmap.py +++ b/crash/types/sbitmap.py @@ -8,12 +8,12 @@ from typing import Iterable +import gdb + from crash.exceptions import InvalidArgumentError from crash.util.symbols import Types from crash.util import struct_has_member -import gdb - types = Types(['struct sbitmap', 'struct sbitmap_word']) def sbitmap_for_each_set(sbitmap: gdb.Value) -> Iterable[int]: diff --git a/crash/types/slab.py b/crash/types/slab.py index 3e168b91a62..18741bfd04c 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -10,6 +10,8 @@ import sys import traceback +import gdb + from crash.util import container_of, find_member_variant,\ safe_find_member_variant from crash.util.symbols import Types, TypeCallbacks, SymbolCallbacks @@ -21,8 +23,6 @@ from crash.types.cpu import for_each_online_cpu from crash.types.node import numa_node_id -import gdb - # TODO: put in utils def print_flags(val: int, names: Dict[str, int]) -> str: first = True diff --git a/crash/types/task.py b/crash/types/task.py index 77ff6b4ca29..ab5a3c37ceb 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -3,14 +3,14 @@ from typing import Iterator, Callable, Dict, List +import gdb + from crash.exceptions import InvalidArgumentError, ArgumentTypeError from crash.exceptions import UnexpectedGDBTypeError from crash.util import array_size, struct_has_member from crash.util.symbols import Types, Symvals, SymbolCallbacks from crash.types.list import list_for_each_entry -import gdb - PF_EXITING = 0x4 types = Types(['struct task_struct', 'struct mm_struct', 'atomic_long_t']) diff --git a/crash/types/vmstat.py b/crash/types/vmstat.py index df47185276e..963efb6f146 100644 --- a/crash/types/vmstat.py +++ b/crash/types/vmstat.py @@ -3,12 +3,12 @@ from typing import List, Tuple +import gdb + from crash.util.symbols import Types, TypeCallbacks, Symbols from crash.types.percpu import get_percpu_var from crash.types.cpu import for_each_online_cpu -import gdb - class VmStat: types = Types(['enum zone_stat_item', 'enum vm_event_item']) symbols = Symbols(['vm_event_states']) diff --git a/crash/types/zone.py b/crash/types/zone.py index 2a715eb0507..813d325cb61 100644 --- a/crash/types/zone.py +++ b/crash/types/zone.py @@ -3,6 +3,8 @@ from typing import List +import gdb + from crash.util import array_for_each from crash.util.symbols import Types from crash.types.percpu import get_percpu_var @@ -11,8 +13,6 @@ from crash.types.list import list_for_each_entry import crash.types.page -import gdb - class Zone: types = Types(['struct page']) diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 2cf9497606e..24617e92662 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -5,12 +5,12 @@ import uuid +import gdb + from crash.util.symbols import Types from crash.exceptions import MissingTypeError, MissingSymbolError from crash.exceptions import ArgumentTypeError, NotStructOrUnionError -import gdb - TypeSpecifier = Union[gdb.Type, gdb.Value, str, gdb.Symbol] AddressSpecifier = Union[gdb.Value, str, int] diff --git a/crash/util/symbols.py b/crash/util/symbols.py index cbe806731f5..7b80cb0fb3d 100644 --- a/crash/util/symbols.py +++ b/crash/util/symbols.py @@ -17,6 +17,8 @@ from typing import Type, List, Tuple, Callable, Union, Dict, Any +import gdb + from crash.infra.lookup import DelayedType, DelayedSymbol, DelayedSymval from crash.infra.lookup import DelayedValue, DelayedMinimalSymbol from crash.infra.lookup import DelayedMinimalSymval @@ -24,8 +26,6 @@ from crash.infra.lookup import SymbolCallback, MinimalSymbolCallback from crash.exceptions import DelayedAttributeError -import gdb - CollectedValue = Union[gdb.Type, gdb.Value, gdb.Symbol, gdb.MinSymbol, Any] Names = Union[List[str], str] From fffabab6be0ff56c52169be82eba9d71d89eea31 Mon Sep 17 00:00:00 2001 From: Jeffrey Mahoney Date: Thu, 19 Nov 2020 15:48:16 -0500 Subject: [PATCH 329/367] pylint: fix raise-missing-from warnings Pylint 2.6 introduced a check to ensure that exceptions raised from within an exception use the from keyword. This is a new requirement and I've updated affected call sites. Signed-off-by: Jeff Mahoney --- crash/arch/__init__.py | 2 +- crash/cache/syscache.py | 2 +- crash/commands/cgroup.py | 2 +- crash/commands/dmesg.py | 4 ++-- crash/commands/help.py | 2 +- crash/commands/kmem.py | 2 +- crash/commands/vtop.py | 2 +- crash/commands/xfs.py | 6 +++--- crash/kernel.py | 8 ++++---- crash/requirements/__init__.py | 12 ++++++------ crash/subsystem/filesystem/__init__.py | 4 ++-- crash/types/list.py | 4 ++-- crash/types/slab.py | 4 ++-- crash/types/task.py | 2 +- crash/util/__init__.py | 8 ++++---- crash/util/symbols.py | 4 ++-- kdump/target.py | 2 +- 17 files changed, 35 insertions(+), 35 deletions(-) diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index 2c3d80b9a63..9668d7eeb49 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -45,7 +45,7 @@ def __init__(self) -> None: try: target.set_fetch_registers(self._fetch_registers()) except AttributeError: - raise NotImplementedError("No fetch_registers callback defined") + raise NotImplementedError("No fetch_registers callback defined") from None @classmethod def set_fetch_registers(cls, diff --git a/crash/cache/syscache.py b/crash/cache/syscache.py index f56d7c5c60d..fa6c517bfd8 100644 --- a/crash/cache/syscache.py +++ b/crash/cache/syscache.py @@ -45,7 +45,7 @@ def _utsname_field(self, name: str) -> str: try: return self._utsname_cache[name] except KeyError: - raise DelayedAttributeError(name) + raise DelayedAttributeError(name) from None @property def sysname(self) -> str: diff --git a/crash/commands/cgroup.py b/crash/commands/cgroup.py index 1ae351cbff0..2ac4ee8eccc 100644 --- a/crash/commands/cgroup.py +++ b/crash/commands/cgroup.py @@ -82,7 +82,7 @@ def show_task(self, pid: int) -> None: )) except KeyError: - raise CommandError("No such task with pid {}".format(pid)) + raise CommandError("No such task with pid {}".format(pid)) from None def show_cgroup_tasks(self, addr: AddressSpecifier) -> None: cgrp = find_cgroup(addr) diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 7b6c7957807..a0b2a87ea87 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -230,7 +230,7 @@ def get_log_msgs(self, try: idx = symvals.log_first_idx except DelayedAttributeError: - raise LogTypeException('not structured log') + raise LogTypeException('not structured log') from None if symvals.clear_seq < symvals.log_first_seq: # mypy seems to think the preceding clear_seq is fine but this @@ -284,7 +284,7 @@ def execute(self, args: argparse.Namespace) -> None: except LogTypeException: pass except LogInvalidOption as lio: - raise CommandError(str(lio)) + raise CommandError(str(lio)) from lio print("Can't find valid log") diff --git a/crash/commands/help.py b/crash/commands/help.py index 3fee10a29b1..a862e1b0c9b 100644 --- a/crash/commands/help.py +++ b/crash/commands/help.py @@ -46,7 +46,7 @@ def execute(self, args: argparse.Namespace) -> None: try: text = self._commands[cmd].format_help().strip() except KeyError: - raise CommandError("No such command `{}'".format(cmd)) + raise CommandError("No such command `{}'".format(cmd)) from None if text is None: print("No help text available.") else: diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index 4570b9a27b3..b7a1a573d4c 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -191,7 +191,7 @@ def print_vmstats(self) -> None: try: vm_stat = get_symbol_value("vm_stat") except MissingSymbolError: - raise CommandError("Support for new-style vmstat is unimplemented.") + raise CommandError("Support for new-style vmstat is unimplemented.") from None print(" VM_STAT:") #TODO put this... where? diff --git a/crash/commands/vtop.py b/crash/commands/vtop.py index 4eaad2320af..dfa46190614 100644 --- a/crash/commands/vtop.py +++ b/crash/commands/vtop.py @@ -237,7 +237,7 @@ def execute(self, args: argparse.Namespace) -> None: try: addr = int(addr, 16) except ValueError: - raise CommandLineError(f"{addr} is not a hex address") + raise CommandLineError(f"{addr} is not a hex address") from None fulladdr = addrxlat.FullAddress(addrxlat.KVADDR, addr) print('{:16} {:16}'.format('VIRTUAL', 'PHYSICAL')) try: diff --git a/crash/commands/xfs.py b/crash/commands/xfs.py index 7bc1e017a70..eb623b3650a 100644 --- a/crash/commands/xfs.py +++ b/crash/commands/xfs.py @@ -89,7 +89,7 @@ def show_xfs(self, args: argparse.Namespace) -> None: try: sb = get_super_block(args.addr) except gdb.NotAvailableError as e: - raise CommandError(str(e)) + raise CommandError(str(e)) from e mp = xfs_mount(sb) @@ -107,7 +107,7 @@ def dump_ail(self, args: argparse.Namespace) -> None: try: sb = get_super_block(args.addr) except gdb.NotAvailableError as e: - raise CommandError(str(e)) + raise CommandError(str(e)) from e mp = xfs_mount(sb) ail = mp['m_ail'] @@ -177,7 +177,7 @@ def dump_buftargs(cls, args: argparse.Namespace) -> None: try: sb = get_super_block(args.addr) except gdb.NotAvailableError as e: - raise CommandError(str(e)) + raise CommandError(str(e)) from e mp = xfs_mount(sb) ddev = mp['m_ddev_targp'] ldev = mp['m_logdev_targp'] diff --git a/crash/kernel.py b/crash/kernel.py index 4add626e3af..7a62397778f 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -175,7 +175,7 @@ def __init__(self, roots: PathSpecifier = None, try: archclass = crash.arch.get_architecture(archname) except RuntimeError as e: - raise CrashKernelError(str(e)) + raise CrashKernelError(str(e)) from e self.arch = archclass() @@ -487,7 +487,7 @@ def load_modules(self, verbose: bool = False, debug: bool = False) -> None: to_string=True) except gdb.error as e: raise CrashKernelError("Error while loading module `{}': {}" - .format(modname, str(e))) + .format(modname, str(e))) from e if debug: print(result) @@ -548,7 +548,7 @@ def _get_module_path_from_modules_order(self, path: str, name: str) -> str: try: return self.modules_order[path][name] except KeyError: - raise _NoMatchingFileError(name) + raise _NoMatchingFileError(name) from None def _cache_file_tree(self, path: str, regex: Pattern[str] = None) -> None: if not path in self.findmap: @@ -589,7 +589,7 @@ def _get_file_path_from_tree_search(self, path: str, name: str, modname = self._normalize_modname(name) return self.findmap[path]['files'][modname] except KeyError: - raise _NoMatchingFileError(name) + raise _NoMatchingFileError(name) from None def _find_module_file(self, name: str, path: str) -> str: try: diff --git a/crash/requirements/__init__.py b/crash/requirements/__init__.py index e8f2cef3cee..5e1b217e31d 100644 --- a/crash/requirements/__init__.py +++ b/crash/requirements/__init__.py @@ -10,34 +10,34 @@ x1 = gdb.Target del x1 except AttributeError as e: - raise IncompatibleGDBError("gdb.Target") + raise IncompatibleGDBError("gdb.Target") from e try: x2 = gdb.lookup_symbol('x', None) del x2 except TypeError as e: - raise IncompatibleGDBError("a compatible gdb.lookup_symbol") + raise IncompatibleGDBError("a compatible gdb.lookup_symbol") from e try: x3 = gdb.MinSymbol del x3 except AttributeError as e: - raise IncompatibleGDBError("gdb.MinSymbol") + raise IncompatibleGDBError("gdb.MinSymbol") from e try: x4 = gdb.Register del x4 except AttributeError as e: - raise IncompatibleGDBError("gdb.Register") + raise IncompatibleGDBError("gdb.Register") from e try: x6 = gdb.Inferior.new_thread del x6 except AttributeError as e: - raise IncompatibleGDBError("gdb.Inferior.new_thread") + raise IncompatibleGDBError("gdb.Inferior.new_thread") from e try: x7 = gdb.Objfile.architecture del x7 except AttributeError as e: - raise IncompatibleGDBError("gdb.Objfile.architecture") + raise IncompatibleGDBError("gdb.Objfile.architecture") from e diff --git a/crash/subsystem/filesystem/__init__.py b/crash/subsystem/filesystem/__init__.py index b0e1487880d..fb96644da02 100644 --- a/crash/subsystem/filesystem/__init__.py +++ b/crash/subsystem/filesystem/__init__.py @@ -146,8 +146,8 @@ def get_super_block(desc: AddressSpecifier, force: bool = False) -> gdb.Value: if not force: try: x = int(sb['s_dev']) # pylint: disable=unused-variable - except gdb.NotAvailableError: - raise gdb.NotAvailableError(f"no superblock available at `{desc}'") + except gdb.NotAvailableError as e: + raise gdb.NotAvailableError(f"no superblock available at `{desc}'") from e return sb diff --git a/crash/types/list.py b/crash/types/list.py index c6f7bdabfc2..7dc0ea624d5 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -81,7 +81,7 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, node = nxt.dereference() except gdb.error as e: raise BufferError("Failed to read list_head {:#x}: {}" - .format(int(list_head.address), str(e))) + .format(int(list_head.address), str(e))) from e last_good_addr = None while node.address != list_head.address: @@ -113,7 +113,7 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, last_good_str = "(none)" raise BufferError(f"Failed to read list_head 0x{int(node.address):x} " f"in list 0x{int(list_head.address):x}, last good " - f"list_head {last_good_str}: {str(e)}") + f"list_head {last_good_str}: {str(e)}") from e try: if fast is not None: diff --git a/crash/types/slab.py b/crash/types/slab.py index 18741bfd04c..2ca38a6e059 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -1369,13 +1369,13 @@ def kmem_cache_from_addr(addr: int) -> KmemCache: try: return __kmem_caches_by_addr[addr] except KeyError: - raise KmemCacheNotFound(f"No kmem cache found for {addr}.") + raise KmemCacheNotFound(f"No kmem cache found for {addr}.") from None def kmem_cache_from_name(name: str) -> KmemCache: try: return __kmem_caches[name] except KeyError: - raise KmemCacheNotFound(f"No kmem cache found for {name}.") + raise KmemCacheNotFound(f"No kmem cache found for {name}.") from None def kmem_cache_get_all() -> ValuesView[KmemCache]: return __kmem_caches.values() diff --git a/crash/types/task.py b/crash/types/task.py index ab5a3c37ceb..a0be1a6cdeb 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -517,7 +517,7 @@ def get_stack_pointer(self) -> int: try: fn = getattr(self, '_get_stack_pointer_fn') except AttributeError: - raise NotImplementedError("Architecture hasn't provided stack pointer callback") + raise NotImplementedError("Architecture hasn't provided stack pointer callback") from None return fn(self.task_struct['thread']) diff --git a/crash/util/__init__.py b/crash/util/__init__.py index 24617e92662..e1954b7ea10 100644 --- a/crash/util/__init__.py +++ b/crash/util/__init__.py @@ -173,9 +173,9 @@ def resolve_type(val: TypeSpecifier) -> gdb.Type: elif isinstance(val, str): try: gdbtype = gdb.lookup_type(val) - except gdb.error: + except gdb.error as e: raise MissingTypeError("Could not resolve type {}" - .format(val)) + .format(val)) from e elif isinstance(val, gdb.Symbol): gdbtype = val.value().type else: @@ -252,7 +252,7 @@ def offsetof_type(gdbtype: gdb.Type, member_name: str, return __offsetof(gdbtype, member_name, error) except _InvalidComponentBaseError as e: if error: - raise InvalidComponentError(gdbtype, member_name, str(e)) + raise InvalidComponentError(gdbtype, member_name, str(e)) from e return None def offsetof(gdbtype: gdb.Type, member_name: str, @@ -384,7 +384,7 @@ def get_typed_pointer(val: AddressSpecifier, gdbtype: gdb.Type) -> gdb.Value: try: val = int(val, 16) except TypeError as e: - raise TypeError("string must describe hex address: {}".format(e)) + raise TypeError("string must describe hex address: {}".format(e)) from None else: val = int(val) diff --git a/crash/util/symbols.py b/crash/util/symbols.py index 7b80cb0fb3d..0c11a56bc33 100644 --- a/crash/util/symbols.py +++ b/crash/util/symbols.py @@ -99,13 +99,13 @@ def __getitem__(self, name: str) -> Any: try: return self.get(name) except NameError as e: - raise KeyError(str(e)) + raise KeyError(str(e)) from None def __getattr__(self, name: str) -> Any: try: return self.get(name) except NameError as e: - raise AttributeError(str(e)) + raise AttributeError(str(e)) from None class Types(DelayedCollection): """ diff --git a/kdump/target.py b/kdump/target.py index 5383a78710d..0949d21551b 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -129,7 +129,7 @@ def fetch_registers(self, thread: gdb.InferiorThread, try: return self._fetch_registers(thread, register) # type: ignore except AttributeError: - raise NotImplementedError("Target did not define fetch_registers callback") + raise NotImplementedError("Target did not define fetch_registers callback") from None def prepare_to_store(self, thread: gdb.InferiorThread) -> None: pass From 3314f7f3ef1ad8d978788e2bd17475d651467188 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 20 Nov 2020 11:32:57 -0500 Subject: [PATCH 330/367] pylint: fix singleton-comparison warnings These warnings match the x == True/False/None cases when == should be 'is.' Signed-off-by: Jeff Mahoney --- crash/commands/cgroup.py | 4 ++-- crash/subsystem/storage/blockmq.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crash/commands/cgroup.py b/crash/commands/cgroup.py index 2ac4ee8eccc..174252c5cc2 100644 --- a/crash/commands/cgroup.py +++ b/crash/commands/cgroup.py @@ -34,9 +34,9 @@ def __init__(self, name: str) -> None: super().__init__(name, parser) def execute(self, args: argparse.Namespace) -> None: - if args.t != False: + if args.t is not False: self.show_task(args.t) - elif args.g != False: + elif args.g is not False: self.show_cgroup_tasks(args.g) elif args.s: raise NotImplementedError("NI") diff --git a/crash/subsystem/storage/blockmq.py b/crash/subsystem/storage/blockmq.py index acb3de4b0cd..407a3bd7903 100644 --- a/crash/subsystem/storage/blockmq.py +++ b/crash/subsystem/storage/blockmq.py @@ -68,7 +68,7 @@ def mq_for_each_request_in_queue(queue: gdb.Value, reserved: bool = True) \ tags = hctx['tags'] if int(hctx['nr_ctx']) == 0 or int(tags) == 0: continue - if reserved == True and int(tags['nr_reserved_tags']) > 0: + if reserved is True and int(tags['nr_reserved_tags']) > 0: for tag in sbitmap_for_each_set(tags['breserved_tags']['sb']): rq = tags['rqs'][tag] if int(rq) != 0 and rq['q'] == queue: From 45ac8a3b7d6ecc603493d26fef18c9a91f6cdeb0 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 20 Nov 2020 11:37:07 -0500 Subject: [PATCH 331/367] pylint: fix whitespace warnings This fixes bad-indentation and trailing-newlines. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/block.py | 4 ++-- crash/subsystem/storage/blocksq.py | 1 - crash/types/sbitmap.py | 1 - 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/crash/subsystem/storage/block.py b/crash/subsystem/storage/block.py index 8fc2361b836..2427a17a441 100644 --- a/crash/subsystem/storage/block.py +++ b/crash/subsystem/storage/block.py @@ -26,7 +26,7 @@ def requests_in_flight(queue: gdb.Value) -> Tuple[int, int]: of sync requests. """ if queue_is_mq(queue): - return mq_requests_in_flight(queue) + return mq_requests_in_flight(queue) return sq_requests_in_flight(queue) def requests_queued(queue: gdb.Value) -> Tuple[int, int]: @@ -43,7 +43,7 @@ def requests_queued(queue: gdb.Value) -> Tuple[int, int]: the number of sync requests. """ if queue_is_mq(queue): - return mq_requests_queued(queue) + return mq_requests_queued(queue) return sq_requests_queued(queue) def for_each_request_in_queue(queue: gdb.Value) -> Iterable[gdb.Value]: diff --git a/crash/subsystem/storage/blocksq.py b/crash/subsystem/storage/blocksq.py index e64ed1deace..2d2081cc7eb 100644 --- a/crash/subsystem/storage/blocksq.py +++ b/crash/subsystem/storage/blocksq.py @@ -78,4 +78,3 @@ def sq_requests_queued(queue: gdb.Value) -> Tuple[int, int]: rqlist = queue['root_rl'] return (int(rqlist['count'][0]), int(rqlist['count'][1])) - diff --git a/crash/types/sbitmap.py b/crash/types/sbitmap.py index ce13cf40641..33f0ad439af 100644 --- a/crash/types/sbitmap.py +++ b/crash/types/sbitmap.py @@ -38,4 +38,3 @@ def sbitmap_for_each_set(sbitmap: gdb.Value) -> Iterable[int]: for j in range(0, bits): if word & (1 << j): yield offset + j - From 3938854a848f8e76dafc0ffffa6df75e0292d8aa Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jan 2021 16:36:31 -0500 Subject: [PATCH 332/367] crash.arch.x86_64: make stack traces more reliable We can't depend on CFI in __switch_to_asm, but we can unwind just enough to where it was called and use the saved register state to set up the frame. This caches the stack frame size (to adjust rsp) and the location of the caller of __switch_to_asm to set up the frames for other scheduled tasks. Signed-off-by: Jeff Mahoney --- crash/arch/__init__.py | 3 +++ crash/arch/x86_64.py | 53 +++++++++++++++++++++++++++++++++++++++++- crash/kernel.py | 4 +++- 3 files changed, 58 insertions(+), 2 deletions(-) diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index 9668d7eeb49..3046ea881a1 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -67,6 +67,9 @@ def setup_thread_info(self, thread: gdb.InferiorThread) -> None: def get_stack_pointer(self, thread_struct: gdb.Value) -> int: raise NotImplementedError("get_stack_pointer is not implemented") + def setup_scheduled_frame_offset(self, task: gdb.Value) -> None: + pass + # This keeps stack traces from continuing into userspace and causing problems. class KernelFrameFilter: def __init__(self, address: int) -> None: diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index e816b34e947..58c6be880c8 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -2,6 +2,9 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import gdb +import re + +from typing import Optional from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch from crash.arch import FetchRegistersCallback @@ -35,13 +38,16 @@ def fetch_scheduled(self, thread: gdb.InferiorThread, task = thread.info.task_struct rsp = task['thread']['sp'].cast(types.unsigned_long_p_type) + + rsp = thread.arch.adjust_scheduled_frame_offset(rsp) + thread.registers['rsp'].value = rsp frame = rsp.cast(types.inactive_task_frame_p_type).dereference() # Only write rip when requested; It resets the frame cache if register in (16, -1): - thread.registers['rip'].value = frame['ret_addr'] + thread.registers['rip'].value = thread.arch.get_scheduled_rip() if register == 16: return @@ -100,6 +106,8 @@ class x86_64Architecture(CrashArchitecture): ident = "i386:x86-64" aliases = ["x86_64"] + _frame_offset : Optional[int] = None + def __init__(self) -> None: super(x86_64Architecture, self).__init__() @@ -111,6 +119,49 @@ def setup_thread_info(self, thread: gdb.InferiorThread) -> None: thread_info = task['stack'].cast(types.thread_info_p_type) thread.info.set_thread_info(thread_info) + # We don't have CFI for __switch_to_asm but we do know what it looks like. + # We push 6 registers and then swap rsp, so we can just rewind back + # to __switch_to_asm getting called and then populate the registers that + # were saved on the stack. + def setup_scheduled_frame_offset(self, task: gdb.Value) -> None: + if self._frame_offset: + return + + top = int(task['stack']) + 16*1024 + callq = re.compile("callq.*<(\w+)>") + + orig_rsp = rsp = task['thread']['sp'].cast(types.unsigned_long_p_type) + + count = 0 + while int(rsp) < top: + val = int(rsp.dereference()) - 5 + if val > self.filter.address: + try: + insn = gdb.execute(f"x/i {val:#x}", to_string=True) + except Exception as e: + rsp += 1 + count += 1 + continue + + m = callq.search(insn) + if m and m.group(1) == "__switch_to_asm": + self._frame_offset = rsp - orig_rsp + 1 + self._scheduled_rip = val + return + + rsp += 1 + count += 1 + + raise RuntimeError("Cannot locate stack frame offset for __schedule") + + def adjust_scheduled_frame_offset(self, rsp: gdb.Value) -> gdb.Value: + if self._frame_offset: + return rsp + self._frame_offset + return rsp + + def get_scheduled_rip(self) -> None: + return self._scheduled_rip + @classmethod # pylint: disable=unused-argument def setup_inactive_task_frame_handler(cls, inactive: gdb.Type) -> None: diff --git a/crash/kernel.py b/crash/kernel.py index 7a62397778f..b5f1ada38a3 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -694,13 +694,15 @@ def setup_tasks(self) -> None: cpu = rqscurrs[int(task.address)] regs = self.vmcore.attr.cpu[cpu].reg ltask.set_active(cpu, regs) - + else: + self.arch.setup_scheduled_frame_offset(task) ptid = (LINUX_KERNEL_PID, task['pid'], 0) try: thread = gdb.selected_inferior().new_thread(ptid) thread.info = ltask + thread.arch = self.arch except gdb.error: print("Failed to setup task @{:#x}".format(int(task.address))) continue From 521f5e4ba52410311851b7f9c1c08d1c3caa97e5 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jan 2021 16:41:29 -0500 Subject: [PATCH 333/367] crash.types.module: handle module attribute churn Linux commit ed66f991bb1 (module: Refactor section attr into bin attribute) changed the module section attributes to use struct bin_attribute instead of struct module_attribute. That really just pushed the 'attr' field one level deeper so we need to account for that in for_each_module_section. Signed-off-by: Jeff Mahoney --- crash/types/module.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crash/types/module.py b/crash/types/module.py index 5391ae47a7b..b0f7b8261fa 100644 --- a/crash/types/module.py +++ b/crash/types/module.py @@ -47,7 +47,10 @@ def for_each_module_section(module: gdb.Value) -> Iterable[Tuple[str, int]]: for sec in range(0, attrs['nsections']): attr = attrs['attrs'][sec] - name = attr['name'].string() + if 'battr' in attr.type: + name = attr['battr']['attr']['name'].string() + elif 'name' in attr.type: + name = attr['name'].string() if name == '.text': continue From 50a19e63632d82f207d8880be1b209da4d78fc7f Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 22 Mar 2021 16:58:05 +0100 Subject: [PATCH 334/367] types/list: fix fast cycle detection Due to logical error the "tortoise and hare" cycle detection didn't actually work. Signed-off-by: Vlastimil Babka --- crash/types/list.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/types/list.py b/crash/types/list.py index 7dc0ea624d5..b63b5189b84 100644 --- a/crash/types/list.py +++ b/crash/types/list.py @@ -100,7 +100,7 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, # broken prev link means there might be a cycle that # does not include the initial head, so start detecting # cycles - if not exact_cycles and fast is not None: + if not exact_cycles and fast is None: fast = node nxt = node[next_] # only yield after trying to read something from the node, no @@ -122,7 +122,7 @@ def list_for_each(list_head: gdb.Value, include_head: bool = False, # algorithm) for i in range(2): # pylint: disable=unused-variable fast = fast[next_].dereference() - if node.address == fast.address: + if int(node.address) == int(fast.address): raise ListCycleError("Cycle in list detected.") except gdb.error: # we hit an unreadable element, so just stop detecting cycles From 552e143a7b48243b78b61c6d5444c51836147cf4 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Fri, 26 Mar 2021 14:00:46 +0100 Subject: [PATCH 335/367] pylog: Make -d option work again The dictionary values are separated by '\0' in the structured printk ringbuffer. The existing code was not able to handle it. Fix it by for-cycle and string.split(). Also simplify the code a bit. Always read the dictionary. Handle the '-d' parameter only when printing the lines. Signed-off-by: Petr Mladek --- crash/commands/dmesg.py | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index a0b2a87ea87..437b00412e8 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -187,8 +187,7 @@ def filter_unstructured_log(cls, log: str, args: argparse.Namespace) -> str: return '\n'.join(lines) - def log_from_idx(self, logbuf: gdb.Value, idx: int, - dict_needed: bool = False) -> Dict: + def log_from_idx(self, logbuf: gdb.Value, idx: int) -> Dict: msg = (logbuf + idx).cast(types.printk_log_p_type) try: @@ -198,6 +197,13 @@ def log_from_idx(self, logbuf: gdb.Value, idx: int, except UnicodeDecodeError as e: print(e) + textlen = int(msg['text_len']) + dictlen = int(msg['dict_len']) + + dictval = (msg.cast(types.char_p_type) + + types.printk_log_p_type.target().sizeof + textlen) + dict = dictval.string(length=dictlen) + msglen = int(msg['len']) # A zero-length message means we wrap back to the beginning @@ -206,27 +212,17 @@ def log_from_idx(self, logbuf: gdb.Value, idx: int, else: nextidx = idx + msglen - textlen = int(msg['text_len']) - msgdict = { 'text' : text[0:textlen], 'timestamp' : int(msg['ts_nsec']), 'level' : int(msg['level']), 'next' : nextidx, - 'dict' : [], + 'dict' : dict[0:dictlen], } - if dict_needed: - dict_len = int(msg['dict_len']) - d = (msg.cast(types.char_p_type) + - types.printk_log_p_type.target().sizeof + textlen) - if dict_len > 0: - s = d.string('ascii', 'backslashreplace', dict_len) - msgdict['dict'].append(s) return msgdict - def get_log_msgs(self, - dict_needed: bool = False) -> Iterable[Dict[str, Any]]: + def get_log_msgs(self) -> Iterable[Dict[str, Any]]: try: idx = symvals.log_first_idx except DelayedAttributeError: @@ -241,13 +237,13 @@ def get_log_msgs(self, idx = symvals.log_first_idx while seq < symvals.log_next_seq: - msg = self.log_from_idx(symvals.log_buf, idx, dict_needed) + msg = self.log_from_idx(symvals.log_buf, idx) seq += 1 idx = msg['next'] yield msg def handle_structured_log(self, args: argparse.Namespace) -> None: - for msg in self.get_log_msgs(args.d): + for msg in self.get_log_msgs(): timestamp = '' if not args.t: usecs = int(msg['timestamp']) @@ -261,8 +257,9 @@ def handle_structured_log(self, args: argparse.Namespace) -> None: for line in msg['text'].split('\n'): print('{}{}{}'.format(level, timestamp, line)) - for d in msg['dict']: - print(d) + if (args.d and msg['dict']): + for dict in msg['dict'].split('\0'): + print(' {}'.format(dict)) def handle_logbuf(self, args: argparse.Namespace) -> None: if symvals.log_buf_len and symvals.log_buf: From e50446b5b986dd8d8d77c65bd5c932132caf6d99 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Fri, 26 Mar 2021 11:46:56 +0100 Subject: [PATCH 336/367] pylog: Create printk subsystem to handle different ringbuffer formats pylog command supports two formats of the internal kernel ringbuffer. kernel-5.10 implements a new lockless ringbuffer and pylog will need to support it. Split the implementation of the current two formats as a preparation step. It will better separate the completely different implementations. Signed-off-by: Petr Mladek --- crash/commands/dmesg.py | 115 +----------------- crash/subsystem/printk/__init__.py | 12 ++ crash/subsystem/printk/plain_ringbuffer.py | 34 ++++++ .../subsystem/printk/structured_ringbuffer.py | 92 ++++++++++++++ 4 files changed, 143 insertions(+), 110 deletions(-) create mode 100644 crash/subsystem/printk/__init__.py create mode 100644 crash/subsystem/printk/plain_ringbuffer.py create mode 100644 crash/subsystem/printk/structured_ringbuffer.py diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 437b00412e8..f7b68135b1d 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -141,24 +141,15 @@ from typing import Dict, Iterable, Any -import re import argparse import gdb from crash.commands import Command, ArgumentParser, CommandError from crash.exceptions import DelayedAttributeError -from crash.util.symbols import Types, Symvals - -types = Types(['struct printk_log *', 'char *']) -symvals = Symvals(['log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', - 'clear_seq', 'log_first_seq', 'log_next_seq']) - -class LogTypeException(Exception): - pass - -class LogInvalidOption(Exception): - pass +from crash.subsystem.printk import LogTypeException, LogInvalidOption +from crash.subsystem.printk.structured_ringbuffer import structured_rb_show +from crash.subsystem.printk.plain_ringbuffer import plain_rb_show class LogCommand(Command): """dump system message buffer""" @@ -172,111 +163,15 @@ def __init__(self, name: str) -> None: Command.__init__(self, name, parser) - @classmethod - def filter_unstructured_log(cls, log: str, args: argparse.Namespace) -> str: - lines = log.split('\n') - if not args.m: - newlog = [] - for line in lines: - if not args.m: - line = re.sub(r'^<[0-9]+>', '', line) - if args.t: - line = re.sub(r'^\[[0-9\. ]+\] ', '', line) - newlog.append(line) - lines = newlog - - return '\n'.join(lines) - - def log_from_idx(self, logbuf: gdb.Value, idx: int) -> Dict: - msg = (logbuf + idx).cast(types.printk_log_p_type) - - try: - textval = (msg.cast(types.char_p_type) + - types.printk_log_p_type.target().sizeof) - text = textval.string(length=int(msg['text_len'])) - except UnicodeDecodeError as e: - print(e) - - textlen = int(msg['text_len']) - dictlen = int(msg['dict_len']) - - dictval = (msg.cast(types.char_p_type) + - types.printk_log_p_type.target().sizeof + textlen) - dict = dictval.string(length=dictlen) - - msglen = int(msg['len']) - - # A zero-length message means we wrap back to the beginning - if msglen == 0: - nextidx = 0 - else: - nextidx = idx + msglen - - msgdict = { - 'text' : text[0:textlen], - 'timestamp' : int(msg['ts_nsec']), - 'level' : int(msg['level']), - 'next' : nextidx, - 'dict' : dict[0:dictlen], - } - - return msgdict - - def get_log_msgs(self) -> Iterable[Dict[str, Any]]: - try: - idx = symvals.log_first_idx - except DelayedAttributeError: - raise LogTypeException('not structured log') from None - - if symvals.clear_seq < symvals.log_first_seq: - # mypy seems to think the preceding clear_seq is fine but this - # one isn't. Derp. - symvals.clear_seq = symvals.log_first_seq # type: ignore - - seq = symvals.clear_seq - idx = symvals.log_first_idx - - while seq < symvals.log_next_seq: - msg = self.log_from_idx(symvals.log_buf, idx) - seq += 1 - idx = msg['next'] - yield msg - - def handle_structured_log(self, args: argparse.Namespace) -> None: - for msg in self.get_log_msgs(): - timestamp = '' - if not args.t: - usecs = int(msg['timestamp']) - timestamp = ('[{:5d}.{:06d}] ' - .format(usecs // 1000000000, - (usecs % 1000000000) // 1000)) - level = '' - if args.m: - level = '<{:d}>'.format(msg['level']) - - for line in msg['text'].split('\n'): - print('{}{}{}'.format(level, timestamp, line)) - - if (args.d and msg['dict']): - for dict in msg['dict'].split('\0'): - print(' {}'.format(dict)) - - def handle_logbuf(self, args: argparse.Namespace) -> None: - if symvals.log_buf_len and symvals.log_buf: - if args.d: - raise LogInvalidOption("Unstructured logs don't offer key/value pair support") - - print(self.filter_unstructured_log(symvals.log_buf.string('utf-8', 'replace'), args)) - def execute(self, args: argparse.Namespace) -> None: try: - self.handle_structured_log(args) + structured_rb_show(args) return except LogTypeException: pass try: - self.handle_logbuf(args) + plain_rb_show(args) return except LogTypeException: pass diff --git a/crash/subsystem/printk/__init__.py b/crash/subsystem/printk/__init__.py new file mode 100644 index 00000000000..4dd1847e63b --- /dev/null +++ b/crash/subsystem/printk/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import gdb + +from crash.exceptions import DelayedAttributeError + +class LogTypeException(Exception): + pass + +class LogInvalidOption(Exception): + pass diff --git a/crash/subsystem/printk/plain_ringbuffer.py b/crash/subsystem/printk/plain_ringbuffer.py new file mode 100644 index 00000000000..fd4174f3732 --- /dev/null +++ b/crash/subsystem/printk/plain_ringbuffer.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +import argparse +import re + +import gdb + +from crash.util.symbols import Types, Symvals +from crash.subsystem.printk import LogTypeException, LogInvalidOption + +types = Types(['char *']) +symvals = Symvals(['log_buf', 'log_buf_len']) + +def plain_rb_filter(log: str, args: argparse.Namespace) -> str: + lines = log.split('\n') + if not args.m: + newlog = [] + for line in lines: + if not args.m: + line = re.sub(r'^<[0-9]+>', '', line) + if args.t: + line = re.sub(r'^\[[0-9\. ]+\] ', '', line) + newlog.append(line) + lines = newlog + + return '\n'.join(lines) + +def plain_rb_show(args: argparse.Namespace) -> None: + if symvals.log_buf_len and symvals.log_buf: + if args.d: + raise LogInvalidOption("Unstructured logs don't offer key/value pair support") + + print(plain_rb_filter(symvals.log_buf.string('utf-8', 'replace'), args)) diff --git a/crash/subsystem/printk/structured_ringbuffer.py b/crash/subsystem/printk/structured_ringbuffer.py new file mode 100644 index 00000000000..456672c10e4 --- /dev/null +++ b/crash/subsystem/printk/structured_ringbuffer.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from typing import Dict, Iterable, Any + +import argparse + +import gdb + +from crash.util.symbols import Types, Symvals +from crash.exceptions import DelayedAttributeError +from crash.subsystem.printk import LogTypeException, LogInvalidOption + +types = Types(['struct printk_log *', 'char *']) +symvals = Symvals(['log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', + 'clear_seq', 'log_first_seq', 'log_next_seq']) + + +def log_from_idx(logbuf: gdb.Value, idx: int) -> Dict: + msg = (logbuf + idx).cast(types.printk_log_p_type) + + try: + textval = (msg.cast(types.char_p_type) + + types.printk_log_p_type.target().sizeof) + text = textval.string(length=int(msg['text_len'])) + except UnicodeDecodeError as e: + print(e) + + textlen = int(msg['text_len']) + dictlen = int(msg['dict_len']) + + dictval = (msg.cast(types.char_p_type) + + types.printk_log_p_type.target().sizeof + textlen) + dict = dictval.string(length=dictlen) + + msglen = int(msg['len']) + + # A zero-length message means we wrap back to the beginning + if msglen == 0: + nextidx = 0 + else: + nextidx = idx + msglen + + msgdict = { + 'text' : text[0:textlen], + 'timestamp' : int(msg['ts_nsec']), + 'level' : int(msg['level']), + 'next' : nextidx, + 'dict' : dict[0:dictlen], + } + + return msgdict + +def get_log_msgs() -> Iterable[Dict[str, Any]]: + try: + idx = symvals.log_first_idx + except DelayedAttributeError: + raise LogTypeException('not structured log') from None + + if symvals.clear_seq < symvals.log_first_seq: + # mypy seems to think the preceding clear_seq is fine but this + # one isn't. Derp. + symvals.clear_seq = symvals.log_first_seq # type: ignore + + seq = symvals.clear_seq + idx = symvals.log_first_idx + + while seq < symvals.log_next_seq: + msg = log_from_idx(symvals.log_buf, idx) + seq += 1 + idx = msg['next'] + yield msg + +def structured_rb_show(args: argparse.Namespace) -> None: + for msg in get_log_msgs(): + timestamp = '' + if not args.t: + usecs = int(msg['timestamp']) + timestamp = ('[{:5d}.{:06d}] ' + .format(usecs // 1000000000, + (usecs % 1000000000) // 1000)) + + level = '' + if args.m: + level = '<{:d}>'.format(msg['level']) + + for line in msg['text'].split('\n'): + print('{}{}{}'.format(level, timestamp, line)) + + if (args.d and msg['dict']): + for dict in msg['dict'].split('\0'): + print(' {}'.format(dict)) From 1d3c808733936d1e57598b823c0005988339c20f Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Fri, 26 Mar 2021 18:26:40 +0100 Subject: [PATCH 337/367] pylog: Allow to read lockless ringbuffer used in kernel-5.10+ Add support for the lockless ring buffer that has been added into kernel-5.10 by the commit 896fbe20b4e2333fb55c ("printk: use the lockless ringbuffer"). Signed-off-by: Petr Mladek --- crash/commands/dmesg.py | 7 + crash/subsystem/printk/lockless_ringbuffer.py | 282 ++++++++++++++++++ 2 files changed, 289 insertions(+) create mode 100644 crash/subsystem/printk/lockless_ringbuffer.py diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index f7b68135b1d..75348bb0437 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -148,6 +148,7 @@ from crash.commands import Command, ArgumentParser, CommandError from crash.exceptions import DelayedAttributeError from crash.subsystem.printk import LogTypeException, LogInvalidOption +from crash.subsystem.printk.lockless_ringbuffer import lockless_rb_show from crash.subsystem.printk.structured_ringbuffer import structured_rb_show from crash.subsystem.printk.plain_ringbuffer import plain_rb_show @@ -164,6 +165,12 @@ def __init__(self, name: str) -> None: Command.__init__(self, name, parser) def execute(self, args: argparse.Namespace) -> None: + try: + lockless_rb_show(args) + return + except LogTypeException: + pass + try: structured_rb_show(args) return diff --git a/crash/subsystem/printk/lockless_ringbuffer.py b/crash/subsystem/printk/lockless_ringbuffer.py new file mode 100644 index 00000000000..15492cdd675 --- /dev/null +++ b/crash/subsystem/printk/lockless_ringbuffer.py @@ -0,0 +1,282 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from typing import Dict, Iterable, Any + +import argparse +import sys +import gdb + +from crash.util.symbols import Types, Symvals +from crash.exceptions import DelayedAttributeError +from crash.subsystem.printk import LogTypeException, LogInvalidOption + +types = Types(['struct printk_info *', + 'struct prb_desc *', + 'struct prb_data_block *', + 'unsigned long', + 'char *']) + +symvals = Symvals(['prb', 'clear_seq']) + +# TODO: put to separate type +def atomic_long_read(val: gdb.Value) -> int: + return int(val["counter"]) + +def read_null_end_string(buf: gdb.Value) -> str: + ''' Read null-terminated string from a given buffer. ''' + text = buf.string(encoding='utf8', errors='replace') + return text.partition('\0')[0] + +class LogConsistencyException(Exception): + pass + +class DevPrintkInfo: + ''' Kernel struct dev_printk_info ''' + subsystem: str + device: str + + def __init__(self, info: gdb.Value) -> None: + self.subsystem = read_null_end_string(info['subsystem']) + self.device = read_null_end_string(info['device']) + + +class PrintkInfo: + ''' Kernel struct printk_info ''' + seq: int # sequence number + ts_nsec: int # timestamp in nanoseconds + text_len: int # length of text message + facility: int # syslog facility + flags: int # internal record flags + level: int # syslog level + caller_id: int # thread id or processor id + dev_info: DevPrintkInfo + + def __init__(self, info: gdb.Value) -> None: + self.seq = int(info['seq']) + self.ts_nsec = int(info['ts_nsec']) + self.text_len = int(info['text_len']) + self.facility = int(info['facility']) + self.flags = int(info['flags']) + self.level = int(info['level']) + self.caller_id = int(info['caller_id']) + self.dev_info = DevPrintkInfo(info['dev_info']) + + +class PrbDataBlkLPos: + ''' Kernel struct prb_data_blk_pos ''' + begin: int + next: int + + def __init__(self, blk_lpos: gdb.Value) -> None: + self.begin = int(blk_lpos['begin']) + self.next = int(blk_lpos['next']) + + +class PrbDesc: + ''' Kernel struct prb_desc ''' + state_var: int + text_blk_lpos: PrbDataBlkLPos + sv_shift: int + sv_mask: int + + def __init__(self, desc: gdb.Value) -> None: + self.state_var = atomic_long_read(desc['state_var']) + self.text_blk_lpos = PrbDataBlkLPos(desc['text_blk_lpos']) + + sv_bits = types.unsigned_long_type.sizeof * 8 + self.sv_shift = sv_bits - 2 + self.sv_mask = 0x3 << self.sv_shift + + def desc_state(self) -> int: + ''' Return state of the descriptor ''' + return (self.state_var & self.sv_mask) >> self.sv_shift + + def is_finalized(self): + ''' Finalized desriptor points to a valid (deta) message ''' + return self.desc_state() == 0x2 + + def is_reusable(self): + ''' + Reusable descriptor still has a valid sequence number + but the data are gone. + ''' + return self.desc_state() == 0x3 + + +class PrbDataBlock: + ''' Kernel struct prb_data_block ''' + id: int + data: gdb.Value + + def __init__(self, dr: gdb.Value) -> None: + self.id = int(dr['id']) + self.data = dr['data'] + +class PrbDataRing: + ''' Kernel struct prb_data_ring ''' + size_bits: int + data: gdb.Value + lpos_mask: int + + def __init__(self, dr: gdb.Value) -> None: + self.size_bits = int(dr['size_bits']) + self.data = dr['data'] + + self.lpos_mask = (1 << self.size_bits) - 1 + + def get_data_block(self, blk_lpos: PrbDataBlkLPos) -> PrbDataBlock: + ''' Return PrbDataBlock for the given blk_lpos ''' + begin_idx = blk_lpos.begin & self.lpos_mask + blk_p = self.data.cast(types.char_p_type) + begin_idx + return PrbDataBlock(blk_p.cast(types.prb_data_block_p_type)) + + def get_text(self, blk_lpos: PrbDataBlkLPos, len: int) -> str: + ''' return string stored at the given blk_lpos ''' + data_block = self.get_data_block(blk_lpos) + return data_block.data.string(length=len) + + +class PrbDescRing: + ''' Kernel struct prb_desc_ring ''' + count_bits: int + descs: gdb.Value + infos: gdb.Value + head_id: int + tail_id: int + mask_id: int + + def __init__(self, dr: gdb.Value) -> None: + self.count_bits = int(dr['count_bits']) + self.descs = dr['descs'] + self.infos = dr['infos'] + self.head_id = atomic_long_read(dr['head_id']) + self.tail_id = atomic_long_read(dr['tail_id']) + self.mask_id = (1 << self.count_bits) - 1 + + def get_idx(self, id: int) -> int: + ''' Return index to the desc ring for the given id ''' + return id & self.mask_id + + def get_desc(self, id: int) -> PrbDesc: + ''' Return prb_desc structure for the given id ''' + idx = self.get_idx(id) + desc_p = (self.descs.cast(types.char_p_type) + + types.prb_desc_p_type.target().sizeof * idx) + return PrbDesc(desc_p.cast(types.prb_desc_p_type)) + + def get_info(self, id: int) -> PrintkInfo: + ''' return printk_info structure for the given id ''' + idx = self.get_idx(id) + info_p = (self.infos.cast(types.char_p_type) + + types.printk_info_p_type.target().sizeof * idx) + return PrintkInfo(info_p.cast(types.printk_info_p_type)) + + +class PrbRingBuffer: + ''' Kernel struct prb_ring_buffer ''' + desc_ring: PrbDescRing + data_ring: PrbDataRing + + def __init__(self, prb: gdb.Value) -> None: + self.desc_ring = PrbDescRing(gdb.Value(prb['desc_ring'])) + self.data_ring = PrbDataRing(gdb.Value(prb['text_data_ring'])) + + def is_valid_desc(self, desc: PrbDesc, info: PrintkInfo, seq: int) -> bool: + ''' Does the descritor constains consistent values? ''' + if (not (desc.is_finalized() or desc.is_reusable())): + return False + # Must match the expected seq number. Otherwise is being updated. + return (info.seq == seq) + + def first_seq(self) -> int: + ''' + Get sequence number of the tail entry. + ''' + + # The lockless algorithm guarantees that the tail entry + # always points to a descriptor in finalized or reusable state. + # The only exception is when the tail is being moved + # to the next entry, see prb_first_seq() in printk_ringbuffer.c + # + # As a result, the valid sequence number should be either in tail_id + # or tail_id + 1 entry. + for i in range(0, 1): + id = self.desc_ring.tail_id + i + desc = self.desc_ring.get_desc(id) + + if (desc.is_finalized() or desc.is_reusable()): + info = self.desc_ring.get_info(id) + return info.seq + + # Something went wrong. Do not continue with an invalid sequence number. + raise LogConsistencyException('Can not find valid info in the tail descriptor') + + def show_msg(self, desc: PrbDesc, info: PrintkInfo, + args: argparse.Namespace) -> None: + ''' + Show the message for the gived descriptor, printk info. + The output is mofified by pylog parameters. + ''' + + timestamp = '' + if not args.t: + timestamp = ('[{:5d}.{:06d}] ' + .format(info.ts_nsec // 1000000000, + (info.ts_nsec % 1000000000) // 1000)) + + level = '' + if args.m: + level = '<{:d}>'.format(info.level) + + text = self.data_ring.get_text(desc.text_blk_lpos, info.text_len) + print('{}{}{}'.format(level,timestamp,text)) + + if (args.d): + # Only two dev_info values are supported at the moment + if (len(info.dev_info.subsystem)): + print(' SUBSYSTEM={}'.format(info.dev_info.subsystem)) + if (len(info.dev_info.device)): + print(' DEVICE={}'.format(info.dev_info.device)) + + def show_log(self, args: argparse.Namespace) -> None: + """ Show the entire log """ + seq = self.first_seq() + + # Iterate over all entries with valid sequence number + while True: + desc = self.desc_ring.get_desc(seq) + info = self.desc_ring.get_info(seq) + if (not self.is_valid_desc(desc, info, seq)): + break + + seq += 1 + + # Sequence numbers are stored in separate ring buffer. + # The descriptor ring might include valid sequence numbers + # but the data might already be replaced. + if (desc.is_reusable()): + continue + + self.show_msg(desc, info, args) + return + +def lockless_rb_show(args: argparse.Namespace) -> None: + """ + Try to show printk log stored in the lockless ringbuffer + + This type of ringbuffer has replaced the structured ring buffer + in kernel-5.10. + + Raises: + LogTypeException: The log is not in the lockless ringbuffer. + """ + + try: + test = symvals.prb + except DelayedAttributeError: + raise LogTypeException('not lockless log') from None + + prb = PrbRingBuffer(symvals.prb) + + prb.show_log(args) From 9ea6dcbfe4baa1ccc4b8282599138110c48b22d6 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 9 Dec 2020 13:14:30 -0500 Subject: [PATCH 338/367] crash.sh: handle .xz compressed kernel images --- crash.sh | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/crash.sh b/crash.sh index 1c18fa30ca8..5bb4dbac4a4 100755 --- a/crash.sh +++ b/crash.sh @@ -192,19 +192,25 @@ if ! $GDB -nx -batch -x $GDBINIT -x $TEST_GDBINIT; then fi ZKERNEL="$1" -KERNEL="${ZKERNEL%.gz}" if ! test -e "$ZKERNEL"; then echo "$ZKERNEL: No such file or directory" exit 1 fi -if test "$KERNEL" != "$ZKERNEL"; then - KERNEL="$TMPDIR/$(basename "$KERNEL")" - zcat $ZKERNEL > $KERNEL -else - KERNEL="$ZKERNEL" -fi +case "$ZKERNEL" in + *.gz) + KERNEL="$TMPDIR/$(basename "${ZKERNEL%.gz}")" + zcat $ZKERNEL > $KERNEL + ;; + *.xz) + KERNEL="$TMPDIR/$(basename "${ZKERNEL%.xz}")" + xzcat $ZKERNEL > $KERNEL + ;; + *) + KERNEL=$ZKERNEL + ;; +esac VMCORE=$2 for path in $SEARCH_DIRS; do From ffbdef251fa9224b9afa1f17832398d67d9239ee Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 25 Jan 2021 08:41:46 -0500 Subject: [PATCH 339/367] crash.sh: allow user-specified $GDB for testing Signed-off-by: Jeff Mahoney --- crash.sh | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/crash.sh b/crash.sh index 5bb4dbac4a4..ed258f47748 100755 --- a/crash.sh +++ b/crash.sh @@ -149,13 +149,14 @@ GDBINIT="$TMPDIR/gdbinit" set -e -GDB= -for gdb in crash-python-gdb gdb; do - if $gdb -v > /dev/null 2> /dev/null; then - GDB=$gdb - break - fi -done +if test -z "$GDB"; then + for gdb in crash-python-gdb gdb; do + if $gdb -v > /dev/null 2> /dev/null; then + GDB=$gdb + break + fi + done +fi if [ -z "$GDB" ]; then echo "ERROR: gdb is not available." >&2 From 0ed0f9da516f02e7dfec842542245e1c307250ed Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 21 Jul 2022 23:19:03 -0400 Subject: [PATCH 340/367] lockless_ringbuffer: cast char[] to char * before obtaining string For some reason, the char[] array doesn't work with string(). Signed-off-by: Jeff Mahoney --- crash/subsystem/printk/lockless_ringbuffer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crash/subsystem/printk/lockless_ringbuffer.py b/crash/subsystem/printk/lockless_ringbuffer.py index 15492cdd675..736281d3d33 100644 --- a/crash/subsystem/printk/lockless_ringbuffer.py +++ b/crash/subsystem/printk/lockless_ringbuffer.py @@ -134,7 +134,7 @@ def get_data_block(self, blk_lpos: PrbDataBlkLPos) -> PrbDataBlock: def get_text(self, blk_lpos: PrbDataBlkLPos, len: int) -> str: ''' return string stored at the given blk_lpos ''' data_block = self.get_data_block(blk_lpos) - return data_block.data.string(length=len) + return data_block.data.cast(types.char_p_type).string(length=len) class PrbDescRing: From a78d8a7d230978bf8dceb5be204aea0d0e38e89a Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 6 Sep 2022 11:12:28 -0400 Subject: [PATCH 341/367] update copyright date in README --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 987c7a894cd..2bfa3e73eca 100644 --- a/README.rst +++ b/README.rst @@ -145,7 +145,7 @@ License: .. start-license -Copyright 2016-2019 Jeff Mahoney, `SUSE `_. +Copyright 2016-2022 Jeff Mahoney, `SUSE `_. crash-python is licensed under the `GPLv2 `_. From 39cde66be9ccbcee7c47ae964003a5df81ca6dfe Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 28 Jul 2022 10:13:30 -0400 Subject: [PATCH 342/367] pylintrc: disable too-many-lines --- tests/pylintrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pylintrc b/tests/pylintrc index ba13178f0f4..b4358b2fb7e 100644 --- a/tests/pylintrc +++ b/tests/pylintrc @@ -65,7 +65,7 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable=missing-docstring,too-few-public-methods,invalid-name,too-many-locals,too-many-instance-attributes,too-many-public-methods,fixme,no-self-use,too-many-branches,too-many-statements,too-many-arguments,too-many-boolean-expressions,line-too-long,duplicate-code,bad-option-value +disable=missing-docstring,too-few-public-methods,invalid-name,too-many-locals,too-many-instance-attributes,too-many-public-methods,fixme,no-self-use,too-many-branches,too-many-statements,too-many-arguments,too-many-boolean-expressions,line-too-long,duplicate-code,bad-option-value,too-many-lines [REPORTS] From b50cc54a5bf527f009f378331a762ec778e6d051 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jul 2022 11:10:16 -0400 Subject: [PATCH 343/367] crash.sh: always recreate gdbinit file gdbinit was always appended when used out of the git tree, which would lead to odd failures Signed-off-by: Jeff Mahoney --- crash.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crash.sh b/crash.sh index ed258f47748..abe0f9d3531 100755 --- a/crash.sh +++ b/crash.sh @@ -163,6 +163,8 @@ if [ -z "$GDB" ]; then exit 1 fi +:> $GDBINIT + # If we're using crash.sh from the git repo, use the modules from the git repo DIR="$(dirname $0)" if [ -e "$DIR/setup.py" ]; then @@ -183,7 +185,6 @@ if [ -e "$DIR/setup.py" ]; then done else export CRASH_PYTHON_HELP="/usr/share/crash-python/help" - :> $GDBINIT TEST_GDBINIT="/usr/share/crash-python/test-gdb-compatibility.gdbinit" fi From a42efca24c2fe12d94dcc7bd5267c256a8990d36 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jul 2022 11:13:34 -0400 Subject: [PATCH 344/367] crash.sh: Allow specifying gdb command line via $GDB_CMDLINE environment var When developing gdb extensions, it's helpful to be able to use a gdb that hasn't been fully installed. This means that the --data-directory option must be passed as well. Signed-off-by: Jeff Mahoney --- crash.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crash.sh b/crash.sh index abe0f9d3531..8276c928ac4 100755 --- a/crash.sh +++ b/crash.sh @@ -188,7 +188,7 @@ else TEST_GDBINIT="/usr/share/crash-python/test-gdb-compatibility.gdbinit" fi -if ! $GDB -nx -batch -x $GDBINIT -x $TEST_GDBINIT; then +if ! $GDB $GDB_CMDLINE -nx -batch -x $GDBINIT -x $TEST_GDBINIT; then echo "fatal: crash-python cannot initialize" >&2 exit 1 fi @@ -296,12 +296,12 @@ EOF # This is how we debug gdb problems when running crash if [ "$DEBUGMODE" = "gdb" ]; then - RUN="run -nx -q -x $GDBINIT" + RUN="run $GDB_CMDLINE -nx -q -x $GDBINIT" echo $RUN > $TMPDIR/gdbinit-debug gdb $GDB -nx -q -x $TMPDIR/gdbinit-debug elif [ "$DEBUGMODE" = "valgrind" ]; then valgrind --keep-stacktraces=alloc-and-free $GDB -nh -q -x $GDBINIT else - $GDB -nx -q -x $GDBINIT + $GDB $GDB_CMDLINE -nx -q -x $GDBINIT fi From 6070f624d498f4340c7da8b01432fe6a40039bfd Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jul 2022 16:03:42 -0400 Subject: [PATCH 345/367] tests: allow use of custom gdb and command line Similar to running pycrash with a gdb under development, being able to use it to run the tests is helpful as well. Signed-off-by: Jeff Mahoney --- tests/run-gdb.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/run-gdb.sh b/tests/run-gdb.sh index b09f53b7df4..26f8b8e3af2 100755 --- a/tests/run-gdb.sh +++ b/tests/run-gdb.sh @@ -1,5 +1,10 @@ #!/bin/bash DIR=$(dirname "$0") + +if test -z "$GDB"; then + GDB=crash-python-gdb +fi + echo "Starting gdb" -exec crash-python-gdb -nx -batch -x $DIR/gdbinit-boilerplate "$@" +exec $GDB $GDB_CMDLINE -nx -batch -x $DIR/gdbinit-boilerplate "$@" From 29782e66daae7bcf47f946be968afec6694ed68d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jul 2022 15:57:44 -0400 Subject: [PATCH 346/367] lockless_ringbuffer: add type annotations Add missing type annotations for is_finalized and is_reusable. Signed-off-by: Jeff Mahoney --- crash/subsystem/printk/lockless_ringbuffer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/subsystem/printk/lockless_ringbuffer.py b/crash/subsystem/printk/lockless_ringbuffer.py index 736281d3d33..355e7a40fe9 100644 --- a/crash/subsystem/printk/lockless_ringbuffer.py +++ b/crash/subsystem/printk/lockless_ringbuffer.py @@ -92,11 +92,11 @@ def desc_state(self) -> int: ''' Return state of the descriptor ''' return (self.state_var & self.sv_mask) >> self.sv_shift - def is_finalized(self): + def is_finalized(self) -> bool: ''' Finalized desriptor points to a valid (deta) message ''' return self.desc_state() == 0x2 - def is_reusable(self): + def is_reusable(self) -> bool: ''' Reusable descriptor still has a valid sequence number but the data are gone. From 9a1c3e2123079ffc67aaa7f8536885d268eeee8c Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 27 Jul 2022 23:08:08 -0400 Subject: [PATCH 347/367] lockless_ringbuffer: fix lint issues --- crash/subsystem/printk/lockless_ringbuffer.py | 50 +++++++++---------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/crash/subsystem/printk/lockless_ringbuffer.py b/crash/subsystem/printk/lockless_ringbuffer.py index 355e7a40fe9..bb6362c0675 100644 --- a/crash/subsystem/printk/lockless_ringbuffer.py +++ b/crash/subsystem/printk/lockless_ringbuffer.py @@ -1,15 +1,13 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Dict, Iterable, Any - import argparse -import sys + import gdb from crash.util.symbols import Types, Symvals from crash.exceptions import DelayedAttributeError -from crash.subsystem.printk import LogTypeException, LogInvalidOption +from crash.subsystem.printk import LogTypeException types = Types(['struct printk_info *', 'struct prb_desc *', @@ -131,10 +129,10 @@ def get_data_block(self, blk_lpos: PrbDataBlkLPos) -> PrbDataBlock: blk_p = self.data.cast(types.char_p_type) + begin_idx return PrbDataBlock(blk_p.cast(types.prb_data_block_p_type)) - def get_text(self, blk_lpos: PrbDataBlkLPos, len: int) -> str: + def get_text(self, blk_lpos: PrbDataBlkLPos, _len: int) -> str: ''' return string stored at the given blk_lpos ''' data_block = self.get_data_block(blk_lpos) - return data_block.data.cast(types.char_p_type).string(length=len) + return data_block.data.cast(types.char_p_type).string(length=_len) class PrbDescRing: @@ -154,20 +152,20 @@ def __init__(self, dr: gdb.Value) -> None: self.tail_id = atomic_long_read(dr['tail_id']) self.mask_id = (1 << self.count_bits) - 1 - def get_idx(self, id: int) -> int: + def get_idx(self, _id: int) -> int: ''' Return index to the desc ring for the given id ''' - return id & self.mask_id + return _id & self.mask_id - def get_desc(self, id: int) -> PrbDesc: + def get_desc(self, _id: int) -> PrbDesc: ''' Return prb_desc structure for the given id ''' - idx = self.get_idx(id) + idx = self.get_idx(_id) desc_p = (self.descs.cast(types.char_p_type) + types.prb_desc_p_type.target().sizeof * idx) return PrbDesc(desc_p.cast(types.prb_desc_p_type)) - def get_info(self, id: int) -> PrintkInfo: + def get_info(self, _id: int) -> PrintkInfo: ''' return printk_info structure for the given id ''' - idx = self.get_idx(id) + idx = self.get_idx(_id) info_p = (self.infos.cast(types.char_p_type) + types.printk_info_p_type.target().sizeof * idx) return PrintkInfo(info_p.cast(types.printk_info_p_type)) @@ -184,10 +182,10 @@ def __init__(self, prb: gdb.Value) -> None: def is_valid_desc(self, desc: PrbDesc, info: PrintkInfo, seq: int) -> bool: ''' Does the descritor constains consistent values? ''' - if (not (desc.is_finalized() or desc.is_reusable())): + if not (desc.is_finalized() or desc.is_reusable()): return False # Must match the expected seq number. Otherwise is being updated. - return (info.seq == seq) + return info.seq == seq def first_seq(self) -> int: ''' @@ -202,11 +200,11 @@ def first_seq(self) -> int: # As a result, the valid sequence number should be either in tail_id # or tail_id + 1 entry. for i in range(0, 1): - id = self.desc_ring.tail_id + i - desc = self.desc_ring.get_desc(id) + _id = self.desc_ring.tail_id + i + desc = self.desc_ring.get_desc(_id) - if (desc.is_finalized() or desc.is_reusable()): - info = self.desc_ring.get_info(id) + if desc.is_finalized() or desc.is_reusable(): + info = self.desc_ring.get_info(_id) return info.seq # Something went wrong. Do not continue with an invalid sequence number. @@ -230,13 +228,13 @@ def show_msg(self, desc: PrbDesc, info: PrintkInfo, level = '<{:d}>'.format(info.level) text = self.data_ring.get_text(desc.text_blk_lpos, info.text_len) - print('{}{}{}'.format(level,timestamp,text)) + print('{}{}{}'.format(level, timestamp, text)) - if (args.d): + if args.d: # Only two dev_info values are supported at the moment - if (len(info.dev_info.subsystem)): + if info.dev_info.subsystem: print(' SUBSYSTEM={}'.format(info.dev_info.subsystem)) - if (len(info.dev_info.device)): + if info.dev_info.device: print(' DEVICE={}'.format(info.dev_info.device)) def show_log(self, args: argparse.Namespace) -> None: @@ -247,7 +245,7 @@ def show_log(self, args: argparse.Namespace) -> None: while True: desc = self.desc_ring.get_desc(seq) info = self.desc_ring.get_info(seq) - if (not self.is_valid_desc(desc, info, seq)): + if not self.is_valid_desc(desc, info, seq): break seq += 1 @@ -255,7 +253,7 @@ def show_log(self, args: argparse.Namespace) -> None: # Sequence numbers are stored in separate ring buffer. # The descriptor ring might include valid sequence numbers # but the data might already be replaced. - if (desc.is_reusable()): + if desc.is_reusable(): continue self.show_msg(desc, info, args) @@ -273,10 +271,8 @@ def lockless_rb_show(args: argparse.Namespace) -> None: """ try: - test = symvals.prb + prb = PrbRingBuffer(symvals.prb) except DelayedAttributeError: raise LogTypeException('not lockless log') from None - prb = PrbRingBuffer(symvals.prb) - prb.show_log(args) From 270f9802f926d2dd7b8b38bafc0b6d17984a3ddf Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 28 Jul 2022 00:31:45 -0400 Subject: [PATCH 348/367] structured_ringbuffer: fix lint warnings --- crash/subsystem/printk/structured_ringbuffer.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/crash/subsystem/printk/structured_ringbuffer.py b/crash/subsystem/printk/structured_ringbuffer.py index 456672c10e4..2ce233d388f 100644 --- a/crash/subsystem/printk/structured_ringbuffer.py +++ b/crash/subsystem/printk/structured_ringbuffer.py @@ -31,7 +31,7 @@ def log_from_idx(logbuf: gdb.Value, idx: int) -> Dict: dictval = (msg.cast(types.char_p_type) + types.printk_log_p_type.target().sizeof + textlen) - dict = dictval.string(length=dictlen) + msgdict = dictval.string(length=dictlen) msglen = int(msg['len']) @@ -41,16 +41,14 @@ def log_from_idx(logbuf: gdb.Value, idx: int) -> Dict: else: nextidx = idx + msglen - msgdict = { + return { 'text' : text[0:textlen], 'timestamp' : int(msg['ts_nsec']), 'level' : int(msg['level']), 'next' : nextidx, - 'dict' : dict[0:dictlen], + 'dict' : msgdict[0:dictlen], } - return msgdict - def get_log_msgs() -> Iterable[Dict[str, Any]]: try: idx = symvals.log_first_idx @@ -88,5 +86,5 @@ def structured_rb_show(args: argparse.Namespace) -> None: print('{}{}{}'.format(level, timestamp, line)) if (args.d and msg['dict']): - for dict in msg['dict'].split('\0'): - print(' {}'.format(dict)) + for entry in msg['dict'].split('\0'): + print(' {}'.format(entry)) From 368da924b04be42426e0781e07b4337ec2977f3a Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 27 Jul 2022 23:32:33 -0400 Subject: [PATCH 349/367] lint: fix whitespace This commit only modifies whitespace to make pylint happy. Signed-off-by: Jeff Mahoney --- crash/arch/x86_64.py | 2 +- crash/commands/dev.py | 4 ++-- crash/subsystem/storage/__init__.py | 3 +-- crash/subsystem/storage/blockmq.py | 2 +- crash/types/slab.py | 8 ++++---- 5 files changed, 9 insertions(+), 10 deletions(-) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 58c6be880c8..73cc415e28a 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -106,7 +106,7 @@ class x86_64Architecture(CrashArchitecture): ident = "i386:x86-64" aliases = ["x86_64"] - _frame_offset : Optional[int] = None + _frame_offset: Optional[int] = None def __init__(self) -> None: super(x86_64Architecture, self).__init__() diff --git a/crash/commands/dev.py b/crash/commands/dev.py index 5a6ebdc1031..d388eaf5687 100644 --- a/crash/commands/dev.py +++ b/crash/commands/dev.py @@ -17,14 +17,14 @@ class DevCommand(Command): """display character and block devices""" - def __init__(self, name : str) -> None: + def __init__(self, name: str) -> None: parser = ArgumentParser(prog=name) parser.add_argument('-d', action='store_true', default=False) super().__init__(name, parser) - def execute(self, args : argparse.Namespace) -> None: + def execute(self, args: argparse.Namespace) -> None: if args.d: print("{:^5} {:^16} {:^10} {:^16} {:^5} {:^5} {:^5} {:^5}" .format("MAJOR", "GENDISK", "NAME", "REQUEST_QUEUE", diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 1ed6ec90b4c..0651522894d 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -367,7 +367,7 @@ def _rq_in_flight(request: gdb.Value) -> bool: elif struct_has_member(request_s, 'atomic_flags'): def _rq_in_flight(request: gdb.Value) -> bool: return (request['atomic_flags'] & - (1 << int(types.enum_rq_atomic_flags_type['REQ_ATOM_STARTED'].enumval)) != 0) + (1 << int(types.enum_rq_atomic_flags_type['REQ_ATOM_STARTED'].enumval)) != 0) else: def _rq_in_flight(request: gdb.Value) -> bool: return request['cmd_flags'] & REQ_STARTED != 0 # type: ignore @@ -377,4 +377,3 @@ def _rq_in_flight(request: gdb.Value) -> bool: type_cbs = TypeCallbacks([('struct device_type', _check_types), ('enum req_flag_bits', _export_req_flags), ('struct request', _check_struct_request)]) - diff --git a/crash/subsystem/storage/blockmq.py b/crash/subsystem/storage/blockmq.py index 407a3bd7903..b48455228f8 100644 --- a/crash/subsystem/storage/blockmq.py +++ b/crash/subsystem/storage/blockmq.py @@ -14,7 +14,7 @@ class NoQueueError(RuntimeError): pass types = Types(['struct request', 'struct request_queue', - 'struct sbitmap_queue', 'struct blk_mq_hw_ctx' ]) + 'struct sbitmap_queue', 'struct blk_mq_hw_ctx']) def _check_queue_type(queue: gdb.Value) -> None: if not queue_is_mq(queue): diff --git a/crash/types/slab.py b/crash/types/slab.py index 2ca38a6e059..af50e1eae44 100644 --- a/crash/types/slab.py +++ b/crash/types/slab.py @@ -213,7 +213,7 @@ class SlabSLAB(Slab): BUFCTL_END = ~0 & 0xffffffff - kmem_cache : 'KmemCacheSLAB' + kmem_cache: 'KmemCacheSLAB' def __init__(self, gdb_obj: gdb.Value, kmem_cache: 'KmemCacheSLAB', error: bool = False) -> None: @@ -501,7 +501,7 @@ def check(self, slabtype: int, nid: int) -> int: class SlabSLUB(Slab): - kmem_cache : 'KmemCacheSLUB' + kmem_cache: 'KmemCacheSLUB' def __init__(self, gdb_obj: gdb.Value, kmem_cache: 'KmemCacheSLUB') -> None: super().__init__(gdb_obj, kmem_cache) @@ -770,7 +770,7 @@ class KmemCacheSLAB(KmemCache): slab_list_name = {0: "partial", 1: "full", 2: "free"} slab_list_fullname = {0: "slabs_partial", 1: "slabs_full", 2: "slabs_free"} - buffer_size : int + buffer_size: int def __init__(self, name: str, gdb_obj: gdb.Value) -> None: super().__init__(name, gdb_obj) @@ -1012,7 +1012,7 @@ def ___check_slabs(self, node: gdb.Value, slabtype: int, nid: int, count = errors['num_ok'] if (count and errors['first_ok'] is not None and - errors['last_ok'] is not None): + errors['last_ok'] is not None): print(f"{errors['num_ok']} slab objects were ok between " f"0x{errors['first_ok']:x} and 0x{errors['last_ok']:x}") From 5401ba326fd54a23d840d1710e8ee1a4f3b2e83f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 28 Jul 2022 16:24:49 -0400 Subject: [PATCH 350/367] crash.arch.x86_64: add typing for _scheduled_rip --- crash/arch/x86_64.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 73cc415e28a..ea494422161 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -114,6 +114,8 @@ def __init__(self) -> None: # Stop stack traces with addresses below this self.filter = KernelFrameFilter(0xffff000000000000) + self._scheduled_rip: int + def setup_thread_info(self, thread: gdb.InferiorThread) -> None: task = thread.info.task_struct thread_info = task['stack'].cast(types.thread_info_p_type) @@ -159,7 +161,7 @@ def adjust_scheduled_frame_offset(self, rsp: gdb.Value) -> gdb.Value: return rsp + self._frame_offset return rsp - def get_scheduled_rip(self) -> None: + def get_scheduled_rip(self) -> int: return self._scheduled_rip @classmethod From f6af51d79d122d5ac6ef49767542329397ac59f4 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 28 Jul 2022 00:22:21 -0400 Subject: [PATCH 351/367] lint: clean up imports --- crash/addrxlat.py | 4 ++-- crash/arch/x86_64.py | 4 ++-- crash/commands/dmesg.py | 5 ----- crash/subsystem/printk/plain_ringbuffer.py | 4 +--- crash/subsystem/printk/structured_ringbuffer.py | 2 +- crash/subsystem/storage/block.py | 1 - crash/types/sbitmap.py | 1 - 7 files changed, 6 insertions(+), 15 deletions(-) diff --git a/crash/addrxlat.py b/crash/addrxlat.py index 0638e7da532..568bf4cd0a5 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb - import addrxlat + +import gdb import crash from crash.cache.syscache import utsname diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index ea494422161..40427cb41f2 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -import gdb +from typing import Optional import re -from typing import Optional +import gdb from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch from crash.arch import FetchRegistersCallback diff --git a/crash/commands/dmesg.py b/crash/commands/dmesg.py index 75348bb0437..c2d63868d2e 100644 --- a/crash/commands/dmesg.py +++ b/crash/commands/dmesg.py @@ -139,14 +139,9 @@ ... """ -from typing import Dict, Iterable, Any - import argparse -import gdb - from crash.commands import Command, ArgumentParser, CommandError -from crash.exceptions import DelayedAttributeError from crash.subsystem.printk import LogTypeException, LogInvalidOption from crash.subsystem.printk.lockless_ringbuffer import lockless_rb_show from crash.subsystem.printk.structured_ringbuffer import structured_rb_show diff --git a/crash/subsystem/printk/plain_ringbuffer.py b/crash/subsystem/printk/plain_ringbuffer.py index fd4174f3732..c8c8fc99528 100644 --- a/crash/subsystem/printk/plain_ringbuffer.py +++ b/crash/subsystem/printk/plain_ringbuffer.py @@ -4,10 +4,8 @@ import argparse import re -import gdb - from crash.util.symbols import Types, Symvals -from crash.subsystem.printk import LogTypeException, LogInvalidOption +from crash.subsystem.printk import LogInvalidOption types = Types(['char *']) symvals = Symvals(['log_buf', 'log_buf_len']) diff --git a/crash/subsystem/printk/structured_ringbuffer.py b/crash/subsystem/printk/structured_ringbuffer.py index 2ce233d388f..bf389f68193 100644 --- a/crash/subsystem/printk/structured_ringbuffer.py +++ b/crash/subsystem/printk/structured_ringbuffer.py @@ -9,7 +9,7 @@ from crash.util.symbols import Types, Symvals from crash.exceptions import DelayedAttributeError -from crash.subsystem.printk import LogTypeException, LogInvalidOption +from crash.subsystem.printk import LogTypeException types = Types(['struct printk_log *', 'char *']) symvals = Symvals(['log_buf', 'log_buf_len', 'log_first_idx', 'log_next_idx', diff --git a/crash/subsystem/storage/block.py b/crash/subsystem/storage/block.py index 2427a17a441..62abdf33ca2 100644 --- a/crash/subsystem/storage/block.py +++ b/crash/subsystem/storage/block.py @@ -5,7 +5,6 @@ import gdb -from crash.util.symbols import Types from crash.subsystem.storage import queue_is_mq from crash.subsystem.storage.blocksq import sq_for_each_request_in_queue, \ sq_requests_in_flight, sq_requests_queued diff --git a/crash/types/sbitmap.py b/crash/types/sbitmap.py index 33f0ad439af..dacef415aeb 100644 --- a/crash/types/sbitmap.py +++ b/crash/types/sbitmap.py @@ -10,7 +10,6 @@ import gdb -from crash.exceptions import InvalidArgumentError from crash.util.symbols import Types from crash.util import struct_has_member From 743a8eb2cd7ad609de0f2ce6c6b2aa6a0969760a Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jul 2022 11:17:16 -0400 Subject: [PATCH 352/367] crash: fix requirements checking The crash module imported the kdump target, which depends on the functionality in GDB that the requirements checker is supposed to discover. Move the target check to crash.kernel instead. Signed-off-by: Jeff Mahoney --- crash/__init__.py | 13 ++----------- crash/addrxlat.py | 2 +- crash/arch/__init__.py | 2 +- crash/kernel.py | 17 ++++++++++++++++- crash/types/node.py | 2 +- crash/types/page.py | 2 +- 6 files changed, 22 insertions(+), 16 deletions(-) diff --git a/crash/__init__.py b/crash/__init__.py index 19ed28db6fe..e7e966536a9 100644 --- a/crash/__init__.py +++ b/crash/__init__.py @@ -3,14 +3,5 @@ import gdb -import kdump.target - -def current_target() -> kdump.target.Target: - target = gdb.current_target() - if target is None: - raise ValueError("No current target") - - if not isinstance(target, kdump.target.Target): - raise ValueError(f"Current target {type(target)} is not supported") - - return target +def archname() -> str: + return gdb.selected_inferior().architecture().name() diff --git a/crash/addrxlat.py b/crash/addrxlat.py index 568bf4cd0a5..75044124543 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -49,7 +49,7 @@ def cb_read64(self, faddr: addrxlat.FullAddress) -> int: class CrashAddressTranslation: def __init__(self) -> None: try: - target = crash.current_target() + target = gdb.current_target() self.context = target.kdump.get_addrxlat_ctx() self.system = target.kdump.get_addrxlat_sys() except AttributeError: diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index 3046ea881a1..f38ac75d122 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -41,7 +41,7 @@ class CrashArchitecture: _fetch_registers: Type[FetchRegistersCallback] def __init__(self) -> None: - target = crash.current_target() + target = gdb.current_target() try: target.set_fetch_registers(self._fetch_registers()) except AttributeError: diff --git a/crash/kernel.py b/crash/kernel.py index b5f1ada38a3..dfa9a039e36 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -11,6 +11,8 @@ from elftools.elf.elffile import ELFFile import gdb +import kdump.target + import crash import crash.arch import crash.arch.x86_64 @@ -128,12 +130,26 @@ class CrashKernel: symvals = Symvals(['init_task']) symbols = Symbols(['runqueues']) + def check_target(self) -> kdump.target.Target: + target = gdb.current_target() + + if target is None: + raise ValueError("No current target") + + if not isinstance(target, kdump.target.Target): + raise ValueError(f"Current target {type(target)} is not supported") + + return target + # pylint: disable=unused-argument def __init__(self, roots: PathSpecifier = None, vmlinux_debuginfo: PathSpecifier = None, module_path: PathSpecifier = None, module_debuginfo_path: PathSpecifier = None, verbose: bool = False, debug: bool = False) -> None: + + self.target = self.check_target() + self.findmap: Dict[str, Dict[Any, Any]] = dict() self.modules_order: Dict[str, Dict[str, str]] = dict() obj = gdb.objfiles()[0] @@ -179,7 +195,6 @@ def __init__(self, roots: PathSpecifier = None, self.arch = archclass() - self.target = crash.current_target() self.vmcore = self.target.kdump self.crashing_thread: Optional[gdb.InferiorThread] = None diff --git a/crash/types/node.py b/crash/types/node.py index cc4fc71609b..7658f26f5aa 100644 --- a/crash/types/node.py +++ b/crash/types/node.py @@ -28,7 +28,7 @@ def numa_node_id(cpu: int) -> int: Returns: :obj:`int`: The NUMA node ID for the specified CPU. """ - if crash.current_target().arch.name() == "powerpc:common64": + if crash.archname() == "powerpc:common64": return int(symvals.numa_cpu_lookup_table[cpu]) return int(get_percpu_var(symbols.numa_node, cpu)) diff --git a/crash/types/page.py b/crash/types/page.py index 8648615fe92..a487975da55 100644 --- a/crash/types/page.py +++ b/crash/types/page.py @@ -60,7 +60,7 @@ class Page: def setup_page_type(cls, gdbtype: gdb.Type) -> None: # TODO: should check config, but that failed to work on ppc64, hardcode # 64k for now - if crash.current_target().arch.name() == "powerpc:common64": + if crash.archname() == "powerpc:common64": cls.PAGE_SHIFT = 16 # also a config cls.directmap_base = 0xc000000000000000 From 0c9b564818e93a5719ddd98fed6f196c12702ac2 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Wed, 27 Jul 2022 23:03:59 -0400 Subject: [PATCH 353/367] crash.util.symbols: add facility to delay until the target is initialized This commit removes the complexity of ordering import, initialization, and delayed symbol/type resolution that made some kinds of changes fragile. By default, the callbacks won't occur until a target based on gdb.LinuxKernelTarget is on top. This implies that the kernel has been properly relocated and we don't need to do things like flush the symbol cache or manually order imports and initializations. We can also now pause and unpause callbacks around module loading so that the module loading process drops from about 5 seconds per module to 5 seconds total. Some types are needed during early initialization and the waiting behavior can be overridden by specifying wait_for_target=False. Signed-off-by: Jeff Mahoney --- crash/infra/callback.py | 160 +++++++++++++++++++++----------- crash/infra/lookup.py | 91 ++++++++---------- crash/kernel.py | 8 +- crash/util/symbols.py | 46 ++++----- tests/test_infra_lookup.py | 26 ++++-- tests/test_list.py | 4 + tests/test_objfile_callbacks.py | 63 ++++++++++++- tests/test_percpu.py | 2 + tests/test_rbtree.py | 2 + tests/test_syscache.py | 7 +- tests/test_syscmd.py | 2 + tests/test_types_bitmap.py | 2 + tests/test_util.py | 3 + tests/test_util_symbols.py | 40 +++++--- 14 files changed, 293 insertions(+), 163 deletions(-) diff --git a/crash/infra/callback.py b/crash/infra/callback.py index ddee8d5a5f2..ede7ec1daa0 100644 --- a/crash/infra/callback.py +++ b/crash/infra/callback.py @@ -1,7 +1,9 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Callable, Any, Union, TypeVar, Optional +from typing import Any, Callable, List, Optional, TypeVar, Union + +import abc import gdb @@ -16,7 +18,7 @@ def __init__(self, callback_obj: 'ObjfileEventCallback') -> None: super().__init__(msg) self.callback_obj = callback_obj -class ObjfileEventCallback: +class ObjfileEventCallback(metaclass=abc.ABCMeta): """ A generic objfile callback class @@ -30,11 +32,61 @@ class ObjfileEventCallback: Consumers of this interface must also call :meth:`connect_callback` to connect the object to the callback infrastructure. """ - def __init__(self) -> None: + + _target_waitlist: List['ObjfileEventCallback'] = list() + _pending_list: List['ObjfileEventCallback'] = list() + _paused: bool = False + _connected_to_objfile_callback: bool = False + + def check_target(self) -> bool: + return isinstance(gdb.current_target(), gdb.LinuxKernelTarget) + + def __init__(self, wait_for_target: bool = True) -> None: self.completed = False self.connected = False + self._waiting_for_target = wait_for_target and not self.check_target() + + if not self._connected_to_objfile_callback: + # pylint: disable=no-member + gdb.events.new_objfile.connect(self._new_objfile_callback) + self._connected_to_objfile_callback = True + + # pylint: disable=unused-argument + @classmethod + def _new_objfile_callback(cls, event: gdb.NewObjFileEvent) -> None: + cls.evaluate_all() + + @classmethod + def target_ready(cls) -> None: + for callback in cls._target_waitlist: + callback.complete_wait_for_target() + + cls._target_waitlist[:] = list() + cls._update_pending() + + @classmethod + def evaluate_all(cls) -> None: + if not cls._paused: + for callback in cls._pending_list: + callback.evaluate(False) + cls._update_pending() - self._setup_symbol_cache_flush_callback() + @classmethod + def pause(cls) -> None: + cls._paused = True + + @classmethod + def unpause(cls) -> None: + cls._paused = False + cls.evaluate_all() + @classmethod + def dump_lists(cls) -> None: + print(f"Pending list: {[str(x) for x in ObjfileEventCallback._pending_list]}") + print(f"Target waitlist: {[str(x) for x in ObjfileEventCallback._target_waitlist]}") + + def complete_wait_for_target(self) -> None: + self._waiting_for_target = False + self.evaluate(False) def connect_callback(self) -> bool: """ @@ -49,27 +101,26 @@ def connect_callback(self) -> bool: if self.connected: return False - self.connected = True - - # We don't want to do lookups immediately if we don't have - # an objfile. It'll fail for any custom types but it can - # also return builtin types that are eventually changed. - objfiles = gdb.objfiles() - if objfiles: - result = self.check_ready() - if not (result is None or result is False): - completed = self.callback(result) - if completed is None: - completed = True - self.completed = completed + if not self._waiting_for_target: + # We don't want to do lookups immediately if we don't have + # an objfile. It'll fail for any custom types but it can + # also return builtin types that are eventually changed. + if gdb.objfiles(): + self.evaluate() + else: + self._target_waitlist.append(self) if self.completed is False: - # pylint: disable=no-member - gdb.events.new_objfile.connect(self._new_objfile_callback) + self.connected = True + self._pending_list.append(self) return self.completed - def complete(self) -> None: + @classmethod + def _update_pending(cls) -> None: + cls._pending_list[:] = [x for x in cls._pending_list if x.connected] + + def complete(self, update_now: bool = True) -> None: """ Complete and disconnect this callback from the event system. @@ -77,43 +128,26 @@ def complete(self) -> None: :obj:`CallbackCompleted`: This callback has already been completed. """ if not self.completed: - # pylint: disable=no-member - gdb.events.new_objfile.disconnect(self._new_objfile_callback) self.completed = True - self.connected = False + if self.connected: + self.connected = False + if update_now: + self._update_pending() else: raise CallbackCompleted(self) - _symbol_cache_flush_setup = False - @classmethod - def _setup_symbol_cache_flush_callback(cls) -> None: - if not cls._symbol_cache_flush_setup: - # pylint: disable=no-member - gdb.events.new_objfile.connect(cls._flush_symbol_cache_callback) - cls._symbol_cache_flush_setup = True - - - # GDB does this itself, but Python is initialized ahead of the - # symtab code. The symtab observer is behind the python observers - # in the execution queue so the cache flush executes /after/ us. - @classmethod - # pylint: disable=unused-argument - def _flush_symbol_cache_callback(cls, event: gdb.NewObjFileEvent) -> None: - gdb.execute("maint flush-symbol-cache") - - # pylint: disable=unused-argument - def _new_objfile_callback(self, event: gdb.NewObjFileEvent) -> None: - # GDB purposely copies the event list prior to calling the callbacks - # If we remove an event from another handler, it will still be sent - if self.completed: - return - - result = self.check_ready() - if not (result is None or result is False): - completed = self.callback(result) - if completed is True or completed is None: - self.complete() - + def evaluate(self, update_now: bool = True) -> None: + if not self._waiting_for_target: + try: + result = self.check_ready() + if not (result is None or result is False): + completed = self.callback(result) + if completed is True or completed is None: + self.complete(update_now) + except gdb.error: + pass + + @abc.abstractmethod def check_ready(self) -> Any: """ The method that derived classes implement for detecting when the @@ -124,8 +158,9 @@ def check_ready(self) -> Any: be passed untouched to :meth:`callback` if the result is anything other than :obj:`None` or :obj:`False`. """ - raise NotImplementedError("check_ready must be implemented by derived class.") + pass + @abc.abstractmethod def callback(self, result: Any) -> Optional[bool]: """ The callback that derived classes implement for handling the @@ -139,4 +174,19 @@ def callback(self, result: Any) -> Optional[bool]: the callback succeeded and will be completed and removed. Otherwise, the callback will stay connected for future completion. """ - raise NotImplementedError("callback must be implemented by derived class.") + pass + +def target_ready() -> None: + ObjfileEventCallback.target_ready() + +def evaluate_all() -> None: + ObjfileEventCallback.evaluate_all() + +def pause_objfile_callbacks() -> None: + ObjfileEventCallback.pause() + +def unpause_objfile_callbacks() -> None: + ObjfileEventCallback.unpause() + +def dump_lists() -> None: + ObjfileEventCallback.dump_lists() diff --git a/crash/infra/lookup.py b/crash/infra/lookup.py index ad50cdc0b0e..7d84ad47d77 100644 --- a/crash/infra/lookup.py +++ b/crash/infra/lookup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Tuple, Any, Union, Optional +from typing import Any, Optional, Tuple, Type, Union import gdb @@ -9,6 +9,7 @@ from crash.infra.callback import Callback from crash.exceptions import DelayedAttributeError +# pylint: disable=abstract-method class NamedCallback(ObjfileEventCallback): """ A base class for Callbacks with names @@ -28,31 +29,14 @@ class NamedCallback(ObjfileEventCallback): attrname (:obj:`str`): The name of symbol or type being resolved translated for use as an attribute name. """ - def __init__(self, name: str, callback: Callback, - attrname: str = None) -> None: - super().__init__() + def __init__(self, name: str, callback: Callback, wait_for_target: bool = True, **kwargs: Any) -> None: + super().__init__(wait_for_target) self.name = name - self.attrname = self.name - - if attrname is not None: - self.attrname = attrname + self.attrname = kwargs.get('attrname', self.name) self._callback = callback - # This is silly but it avoids pylint abstract-method warnings - def check_ready(self) -> Any: - """ - The method that derived classes implement for detecting when the - conditions required to call the callback have been met. - - Returns: - :obj:`object`: This method can return an arbitrary object. It will - be passed untouched to :meth:`callback` if the result is anything - other than :obj:`None` or :obj:`False`. - """ - raise NotImplementedError("check_ready must be implemented by derived class.") - def callback(self, result: Any) -> Union[None, bool]: """ The callback for handling the sucessful result of :meth:`check_ready`. @@ -82,9 +66,9 @@ class MinimalSymbolCallback(NamedCallback): callback: The callback to execute when the minimal symbol is discovered symbol_file (optional): Name of the symbol file to use """ - def __init__(self, name: str, callback: Callback, + def __init__(self, name: str, callback: Callback, wait_for_target: bool = True, symbol_file: str = None) -> None: - super().__init__(name, callback) + super().__init__(name, callback, wait_for_target) self.symbol_file = symbol_file @@ -120,9 +104,9 @@ class SymbolCallback(NamedCallback): is assumed to be one of the value associated with :obj:`gdb.Symbol` constant, i.e. SYMBOL_*_DOMAIN. """ - def __init__(self, name: str, callback: Callback, + def __init__(self, name: str, callback: Callback, wait_for_target: bool = True, domain: int = gdb.SYMBOL_VAR_DOMAIN) -> None: - super().__init__(name, callback) + super().__init__(name, callback, wait_for_target) self.domain = domain @@ -183,11 +167,11 @@ class TypeCallback(NamedCallback): block (optional): The :obj:`gdb.Block` to search for the symbol """ - def __init__(self, name: str, callback: Callback, - block: gdb.Block = None) -> None: + def __init__(self, name: str, callback: Callback, wait_for_target: bool = True, + block: gdb.Block = None, **kwargs: Any) -> None: (name, attrname, self.pointer) = self.resolve_type(name) - super().__init__(name, callback, attrname) + super().__init__(name, callback, wait_for_target, attrname=attrname) self.block = block @@ -264,21 +248,23 @@ class DelayedValue: A generic class for making class attributes available that describe to-be-loaded symbols, minimal symbols, and types. """ - def __init__(self, name: str, attrname: str = None) -> None: + def __init__(self, name: str, wait_for_target: bool = True, **kwargs: Any) -> None: if name is None or not isinstance(name, str): raise ValueError("Name must be a valid string") self.name = name - - if attrname is None: - self.attrname = name - else: - self.attrname = attrname + self.wait_for_target = wait_for_target + self.attrname = kwargs.get('attrname', self.name) assert self.attrname is not None + self.cb: NamedCallback + self.value: Any = None + def attach_callback(self, cbcls: Type[NamedCallback], **kwargs: Any) -> None: + self.cb = cbcls(self.name, self.callback, self.wait_for_target, **kwargs) + def get(self) -> Any: if self.value is None: raise DelayedAttributeError(self.name) @@ -288,6 +274,13 @@ def callback(self, value: Any) -> None: if self.value is not None: return self.value = value + try: + del self.cb + except AttributeError: + pass + + def __str__(self) -> str: + return "{} attached with {}".format(self.__class__, str(self.cb)) class DelayedMinimalSymbol(DelayedValue): """ @@ -296,12 +289,9 @@ class DelayedMinimalSymbol(DelayedValue): Args: name: The name of the minimal symbol """ - def __init__(self, name: str) -> None: - super().__init__(name) - self.cb = MinimalSymbolCallback(name, self.callback) - - def __str__(self) -> str: - return "{} attached with {}".format(self.__class__, str(self.cb)) + def __init__(self, name: str, wait_for_target: bool = True) -> None: + super().__init__(name, wait_for_target) + self.attach_callback(MinimalSymbolCallback) class DelayedSymbol(DelayedValue): """ @@ -310,12 +300,9 @@ class DelayedSymbol(DelayedValue): Args: name: The name of the symbol """ - def __init__(self, name: str) -> None: - super().__init__(name) - self.cb = SymbolCallback(name, self.callback) - - def __str__(self) -> str: - return "{} attached with {}".format(self.__class__, str(self.cb)) + def __init__(self, name: str, wait_for_target: bool = True) -> None: + super().__init__(name, wait_for_target) + self.attach_callback(SymbolCallback) class DelayedType(DelayedValue): """ @@ -324,10 +311,11 @@ class DelayedType(DelayedValue): Args: name: The name of the type. """ - def __init__(self, name: str) -> None: + def __init__(self, name: str, wait_for_target: bool = True, + block: gdb.Block = None) -> None: (name, attrname, self.pointer) = TypeCallback.resolve_type(name) - super().__init__(name, attrname) - self.cb = TypeCallback(name, self.callback) + super().__init__(name, wait_for_target, attrname=attrname) + self.attach_callback(TypeCallback, block=block) def __str__(self) -> str: return "{} attached with {}".format(self.__class__, str(self.callback)) @@ -352,7 +340,7 @@ def callback(self, value: gdb.Symbol) -> None: self.value = symval def __str__(self) -> str: - return "{} attached with {}".format(self.__class__, str(self.cb)) + return "{} attached with {}".format(self.__class__, str(self.callback)) class DelayedMinimalSymval(DelayedMinimalSymbol): """ @@ -364,6 +352,3 @@ class DelayedMinimalSymval(DelayedMinimalSymbol): """ def callback(self, value: gdb.MinSymbol) -> None: self.value = int(value.value().address) - - def __str__(self) -> str: - return "{} attached with {}".format(self.__class__, str(self.cb)) diff --git a/crash/kernel.py b/crash/kernel.py index dfa9a039e36..44121256e7c 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -21,6 +21,8 @@ from crash.util import get_symbol_value from crash.util.symbols import Types, Symvals, Symbols from crash.exceptions import MissingSymbolError, InvalidArgumentError +from crash.infra.callback import pause_objfile_callbacks, unpause_objfile_callbacks +from crash.cache.syscache import utsname class CrashKernelError(RuntimeError): """Raised when an error occurs while initializing the debugging session""" @@ -450,13 +452,12 @@ def load_modules(self, verbose: bool = False, debug: bool = False) -> None: This does not include a failure to locate a module or its debuginfo. """ - import crash.cache.syscache # pylint: disable=redefined-outer-name - version = crash.cache.syscache.utsname.release - print("Loading modules for {}".format(version), end='') + print("Loading modules for {}".format(utsname.release), end='') if verbose: print(":", flush=True) failed = 0 loaded = 0 + pause_objfile_callbacks() for module in for_each_module(): modname = "{}".format(module['name'].string()) modfname = "{}.ko".format(modname) @@ -535,6 +536,7 @@ def load_modules(self, verbose: bool = False, debug: bool = False) -> None: # We shouldn't need this again, so why keep it around? del self.findmap self.findmap = {} + unpause_objfile_callbacks() def _normalize_modname(self, mod: str) -> str: return mod.replace('-', '_') diff --git a/crash/util/symbols.py b/crash/util/symbols.py index 0c11a56bc33..075338cf626 100644 --- a/crash/util/symbols.py +++ b/crash/util/symbols.py @@ -48,14 +48,14 @@ class DelayedCollection: the container object *or* the contained object if it has been overridden via :meth:`override`. """ - def __init__(self, cls: Type[DelayedValue], names: Names) -> None: + def __init__(self, cls: Type[DelayedValue], names: Names, wait_for_target: bool) -> None: self.attrs: Dict[str, DelayedValue] = {} if isinstance(names, str): names = [names] for name in names: - t = cls(name) + t = cls(name, wait_for_target=wait_for_target) self.attrs[t.attrname] = t self.attrs[t.name] = t @@ -129,8 +129,8 @@ class Types(DelayedCollection): names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names of the types to resolve. """ - def __init__(self, names: Names) -> None: - super(Types, self).__init__(DelayedType, names) + def __init__(self, names: Names, wait_for_target: bool = True) -> None: + super(Types, self).__init__(DelayedType, names, wait_for_target) def override(self, name: str, value: gdb.Type) -> None: # type: ignore """ @@ -171,8 +171,8 @@ class Symbols(DelayedCollection): names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names of the symbols to resolve. """ - def __init__(self, names: Names) -> None: - super(Symbols, self).__init__(DelayedSymbol, names) + def __init__(self, names: Names, wait_for_target: bool = True) -> None: + super(Symbols, self).__init__(DelayedSymbol, names, wait_for_target) class Symvals(DelayedCollection): """ @@ -205,8 +205,8 @@ class Symvals(DelayedCollection): names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names of the symbols to resolve. """ - def __init__(self, names: Names) -> None: - super(Symvals, self).__init__(DelayedSymval, names) + def __init__(self, names: Names, wait_for_target: bool = True) -> None: + super(Symvals, self).__init__(DelayedSymval, names, wait_for_target) class MinimalSymbols(DelayedCollection): """ @@ -239,8 +239,8 @@ class MinimalSymbols(DelayedCollection): names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names of the minimal symbols to resolve. """ - def __init__(self, names: Names) -> None: - super(MinimalSymbols, self).__init__(DelayedMinimalSymbol, names) + def __init__(self, names: Names, wait_for_target: bool = True) -> None: + super().__init__(DelayedMinimalSymbol, names, wait_for_target) class MinimalSymvals(DelayedCollection): """ @@ -268,8 +268,8 @@ class MinimalSymvals(DelayedCollection): names: A :obj:`str` or :obj:`list` of :obj:`str` containing the names of the minimal symbols to resolve. """ - def __init__(self, names: Names) -> None: - super(MinimalSymvals, self).__init__(DelayedMinimalSymval, names) + def __init__(self, names: Names, wait_for_target: bool = True) -> None: + super().__init__(DelayedMinimalSymval, names, wait_for_target) class DelayedValues(DelayedCollection): """ @@ -303,30 +303,30 @@ class DelayedValues(DelayedCollection): Args: names: The names to use for the :obj:`.DelayedValue` objects. """ - def __init__(self, names: Names) -> None: - super(DelayedValues, self).__init__(DelayedValue, names) + def __init__(self, names: Names, wait_for_target: bool = True) -> None: + super().__init__(DelayedValue, names, wait_for_target) CallbackSpecifier = Tuple[str, Callable] CallbackSpecifiers = Union[List[CallbackSpecifier], CallbackSpecifier] class CallbackCollection: - def __init__(self, cls: Type[NamedCallback], - cbs: CallbackSpecifiers) -> None: + def __init__(self, cls: Type[NamedCallback], cbs: CallbackSpecifiers, + wait_for_target: bool) -> None: if isinstance(cbs, tuple): cbs = [cbs] for cb in cbs: - t = cls(cb[0], cb[1]) + t = cls(cb[0], cb[1], wait_for_target=wait_for_target) setattr(self, t.attrname, t) class TypeCallbacks(CallbackCollection): - def __init__(self, cbs: CallbackSpecifiers) -> None: - super().__init__(TypeCallback, cbs) + def __init__(self, cbs: CallbackSpecifiers, wait_for_target: bool = True) -> None: + super().__init__(TypeCallback, cbs, wait_for_target) class SymbolCallbacks(CallbackCollection): - def __init__(self, cbs: CallbackSpecifiers) -> None: - super().__init__(SymbolCallback, cbs) + def __init__(self, cbs: CallbackSpecifiers, wait_for_target: bool = True) -> None: + super().__init__(SymbolCallback, cbs, wait_for_target) class MinimalSymbolCallbacks(CallbackCollection): - def __init__(self, cbs: CallbackSpecifiers) -> None: - super().__init__(MinimalSymbolCallback, cbs) + def __init__(self, cbs: CallbackSpecifiers, wait_for_target: bool = True) -> None: + super().__init__(MinimalSymbolCallback, cbs, wait_for_target) diff --git a/tests/test_infra_lookup.py b/tests/test_infra_lookup.py index 8f79ccdd7e8..ca180fbea08 100644 --- a/tests/test_infra_lookup.py +++ b/tests/test_infra_lookup.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import unittest +from unittest.mock import patch import gdb from crash.exceptions import DelayedAttributeError -from crash.infra.callback import ObjfileEventCallback from crash.infra.lookup import SymbolCallback, TypeCallback from crash.infra.lookup import MinimalSymbolCallback from crash.infra.lookup import DelayedType, DelayedSymbol, DelayedSymval @@ -74,14 +74,18 @@ def setUp(self): def tearDown(self): gdb.execute("file") - def load_file(self): + def load_util_file(self): gdb.execute("file tests/test-util") + def load_list_file(self): + gdb.execute("file tests/test-list") + def get_test_class(self): class test_class(object): def __init__(self): self.found = False - cb = MinimalSymbolCallback('test_struct', self.callback) + with patch.object(MinimalSymbolCallback, 'check_target', return_value=True): + cb = MinimalSymbolCallback('test_struct', self.callback) def callback(self, result): self.found = True @@ -93,12 +97,12 @@ def test_minsymbol_no_symbol_found(self): test_class = self.get_test_class() x = test_class() self.assertFalse(x.found) - gdb.execute("file tests/test-list") + self.load_list_file() self.assertFalse(x.found) def test_minsymbol_found_immediately(self): test_class = self.get_test_class() - self.load_file() + self.load_util_file() x = test_class() self.assertTrue(x.found) self.assertTrue(isinstance(x.result, gdb.MinSymbol)) @@ -107,7 +111,7 @@ def test_minsymbol_found_after_load(self): test_class = self.get_test_class() x = test_class() self.assertFalse(x.found) - self.load_file() + self.load_util_file() self.assertTrue(x.found) self.assertTrue(isinstance(x.result, gdb.MinSymbol)) @@ -115,9 +119,9 @@ def test_minsymbol_not_found_in_early_load_then_found_after_load(self): test_class = self.get_test_class() x = test_class() self.assertFalse(x.found) - gdb.execute("file tests/test-list") + self.load_list_file() self.assertFalse(x.found) - self.load_file() + self.load_util_file() self.assertTrue(x.found) self.assertTrue(isinstance(x.result, gdb.MinSymbol)) @@ -132,7 +136,8 @@ def get_test_class(self): class test_class(object): def __init__(self): self.found = False - cb = SymbolCallback('test_struct', self.callback) + with patch.object(SymbolCallback, 'check_target', return_value=True): + cb = SymbolCallback('test_struct', self.callback) def callback(self, result): self.found = True @@ -183,7 +188,8 @@ def get_test_class(self): class test_class(object): def __init__(self): self.found = False - cb = TypeCallback('struct test', self.callback) + with patch.object(TypeCallback, 'check_target', return_value=True): + cb = TypeCallback('struct test', self.callback) def callback(self, result): self.found = True diff --git a/tests/test_list.py b/tests/test_list.py index 4a8dec2beab..51216083bba 100644 --- a/tests/test_list.py +++ b/tests/test_list.py @@ -2,8 +2,11 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import unittest +from unittest.mock import patch import gdb +import crash.infra.callback + from crash.exceptions import ArgumentTypeError, UnexpectedGDBTypeError from crash.exceptions import InvalidArgumentError from crash.types.list import list_for_each, list_for_each_entry @@ -15,6 +18,7 @@ def get_symbol(name): class TestList(unittest.TestCase): def setUp(self): gdb.execute("file tests/test-list") + crash.infra.callback.target_ready() self.list_head = gdb.lookup_type("struct list_head") def tearDown(self): diff --git a/tests/test_objfile_callbacks.py b/tests/test_objfile_callbacks.py index ae1906e3dc8..2cba0c63d14 100644 --- a/tests/test_objfile_callbacks.py +++ b/tests/test_objfile_callbacks.py @@ -2,6 +2,7 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import unittest +from unittest.mock import patch import gdb from crash.util import safe_get_symbol_value @@ -17,12 +18,13 @@ def tearDown(self): def load_file(self): gdb.execute("file tests/test-util") - def test_registering(self): + def get_test_class(self): class test_class(ObjfileEventCallback): - def __init__(self): + def __init__(self, *args, **kwargs): self.called = False self.checked = False - super(test_class, self).__init__() + self.result = None + super(test_class, self).__init__(*args, **kwargs) self.connect_callback() @@ -34,12 +36,67 @@ def callback(self, result): self.called = True self.result = result + return test_class + + def test_registering(self): + test_class = self.get_test_class() + with patch.object(test_class, 'check_target', return_value=True): + x = test_class() + + self.assertFalse(x.called) + self.assertFalse(x.completed) + self.assertFalse(x.checked) + self.assertTrue(x.result is None) + + self.load_file() + self.assertTrue(x.checked) + self.assertTrue(x.called) + self.assertTrue(x.completed) + + self.assertTrue(isinstance(x.result, gdb.Value)) + + def test_early_callback_with_target_wait(self): + test_class = self.get_test_class() + x = test_class() + self.assertFalse(x.called) self.assertFalse(x.completed) self.assertFalse(x.checked) + self.assertTrue(x.result is None) + self.load_file() + self.assertFalse(x.called) + self.assertFalse(x.completed) + self.assertFalse(x.checked) + self.assertTrue(x.result is None) + + x.target_ready() + self.assertTrue(x.checked) + self.assertTrue(x.called) + self.assertTrue(x.completed) + + self.assertTrue(isinstance(x.result, gdb.Value)) + + def test_early_callback_without_target_wait(self): + test_class = self.get_test_class() + + x = test_class(False) + + self.assertFalse(x.called) + self.assertFalse(x.completed) + self.assertFalse(x.checked) + self.assertTrue(x.result is None) + + self.load_file() + self.assertTrue(x.called) + self.assertTrue(x.completed) + self.assertTrue(x.checked) + self.assertTrue(isinstance(x.result, gdb.Value)) + + x.target_ready() self.assertTrue(x.checked) self.assertTrue(x.called) self.assertTrue(x.completed) + self.assertTrue(isinstance(x.result, gdb.Value)) diff --git a/tests/test_percpu.py b/tests/test_percpu.py index 9087fccb6c5..7e88038ea06 100644 --- a/tests/test_percpu.py +++ b/tests/test_percpu.py @@ -5,11 +5,13 @@ import gdb import crash +import crash.infra.callback import crash.types.percpu as percpu class TestPerCPU(unittest.TestCase): def setUp(self): gdb.execute("file tests/test-percpu", to_string=True) + crash.infra.callback.target_ready() try: print() diff --git a/tests/test_rbtree.py b/tests/test_rbtree.py index 5346ac7900a..5f0f17791d2 100644 --- a/tests/test_rbtree.py +++ b/tests/test_rbtree.py @@ -4,6 +4,7 @@ import unittest import gdb +import crash.infra.callback from crash.types.rbtree import rbtree_postorder_for_each, rbtree_postorder_for_each_entry def get_symbol(name): @@ -12,6 +13,7 @@ def get_symbol(name): class TestRbtree(unittest.TestCase): def setUp(self): gdb.execute("file tests/test-rbtree", to_string=True) + crash.infra.callback.target_ready() try: print() print("--- Unsuppressable gdb output ---", end='') diff --git a/tests/test_syscache.py b/tests/test_syscache.py index 1ee1089bf69..f3cdfc97f9e 100644 --- a/tests/test_syscache.py +++ b/tests/test_syscache.py @@ -2,10 +2,14 @@ # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import unittest +from unittest.mock import patch import gdb import sys from importlib import reload +import crash.infra.callback +from crash.infra.callback import ObjfileEventCallback + from crash.exceptions import DelayedAttributeError fake_config = ( """ @@ -35,6 +39,7 @@ def cycle_namespace(self): self.utsname = crash.cache.syscache.utsname self.kernel = crash.cache.syscache.kernel self.config = crash.cache.syscache.config + crash.infra.callback.target_ready() def clear_namespace(self): gdb.execute("file") @@ -50,7 +55,7 @@ def _decompress_config_buffer(self): def test_utsname_no_sym(self): gdb.execute("file") - gdb.execute("maint flush-symbol-cache") + gdb.execute("maint flush symbol-cache") self.cycle_namespace() utsname = self.CrashUtsnameCache() with self.assertRaises(DelayedAttributeError): diff --git a/tests/test_syscmd.py b/tests/test_syscmd.py index 7387d18600c..d5a7d002df6 100644 --- a/tests/test_syscmd.py +++ b/tests/test_syscmd.py @@ -7,12 +7,14 @@ from io import StringIO from crash.exceptions import MissingSymbolError +import crash.infra.callback from crash.commands import CommandLineError from crash.commands.syscmd import SysCommand class TestSysCmd(unittest.TestCase): def setUp(self): gdb.execute("file tests/test-syscache", to_string=True) + crash.infra.callback.target_ready() self.cmd = SysCommand("pysys") def tearDown(self): diff --git a/tests/test_types_bitmap.py b/tests/test_types_bitmap.py index b11cf2d4f93..c5ffda9fd4c 100644 --- a/tests/test_types_bitmap.py +++ b/tests/test_types_bitmap.py @@ -4,6 +4,7 @@ import unittest import sys +import crash.infra.callback import crash.types.bitmap as bm import gdb @@ -11,6 +12,7 @@ class TestBitmap(unittest.TestCase): def setUp(self): gdb.execute("file tests/test-percpu") + crash.infra.callback.target_ready() ulong = gdb.lookup_type('unsigned long') ulong_array = ulong.array(0) diff --git a/tests/test_util.py b/tests/test_util.py index dd3fdf2ae32..797a31ca09f 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -3,6 +3,7 @@ import unittest import gdb +import crash.infra.callback from crash.exceptions import MissingTypeError, MissingSymbolError from crash.util import offsetof, container_of, resolve_type from crash.util import get_symbol_value, safe_get_symbol_value @@ -10,12 +11,14 @@ from crash.exceptions import NotStructOrUnionError from crash.util import InvalidComponentError + def getsym(sym): return gdb.lookup_symbol(sym, None)[0].value() class TestUtil(unittest.TestCase): def setUp(self): gdb.execute("file tests/test-util") + crash.infra.callback.target_ready() self.ulong = gdb.lookup_type('unsigned long') self.ulongsize = self.ulong.sizeof self.test_struct = gdb.lookup_type("struct test") diff --git a/tests/test_util_symbols.py b/tests/test_util_symbols.py index e32c6728e5f..4a2f10dce5c 100644 --- a/tests/test_util_symbols.py +++ b/tests/test_util_symbols.py @@ -1,11 +1,13 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: import unittest +from unittest.mock import patch import platform import gdb from crash.exceptions import DelayedAttributeError +from crash.infra.lookup import NamedCallback from crash.util.symbols import MinimalSymbols, Symbols, Symvals, Types from crash.util.symbols import TypeCallbacks, SymbolCallbacks from crash.util.symbols import MinimalSymbolCallbacks @@ -19,7 +21,8 @@ def load_file(self): def msymbol_test(self): class Test(object): - msymbols = MinimalSymbols([ 'test_struct' ]) + with patch.object(NamedCallback, 'check_target', return_value=True): + msymbols = MinimalSymbols([ 'test_struct' ]) return Test def test_bad_msymbol_name(self): @@ -51,7 +54,8 @@ def test_msymbol_available_at_start(self): def symbol_test(self): class Test(object): - symbols = Symbols([ 'test_struct' ]) + with patch.object(NamedCallback, 'check_target', return_value=True): + symbols = Symbols([ 'test_struct' ]) return Test def test_bad_symbol_name(self): @@ -83,7 +87,8 @@ def test_symbol_available_at_start(self): def symval_test(self): class Test(object): - symvals = Symvals( [ 'test_struct' ] ) + with patch.object(NamedCallback, 'check_target', return_value=True): + symvals = Symvals( [ 'test_struct' ] ) return Test def test_bad_symval_name(self): @@ -115,7 +120,8 @@ def test_symval_available_at_start(self): def type_test(self): class Test(object): - types = Types( [ 'struct test' ] ) + with patch.object(NamedCallback, 'check_target', return_value=True): + types = Types( [ 'struct test' ] ) return Test def test_bad_type_name(self): @@ -149,7 +155,8 @@ def test_type_available_at_start(self): def ptype_test(self): class Test(object): - types = Types( [ 'struct test *' ]) + with patch.object(NamedCallback, 'check_target', return_value=True): + types = Types( [ 'struct test *' ]) return Test def test_bad_ptype_name(self): @@ -190,8 +197,9 @@ class nested(object): def check_ulong(cls, gdbtype): cls.ulong_valid = True - type_cbs = TypeCallbacks( [ ('unsigned long', - nested.check_ulong) ] ) + with patch.object(NamedCallback, 'check_target', return_value=True): + type_cbs = TypeCallbacks( [ ('unsigned long', + nested.check_ulong) ] ) return Test def test_type_callback_nofile(self): @@ -211,17 +219,19 @@ def test_type_callback(self): def type_callback_test_multi(self): class Test(object): - class nested(object): - types = Types( [ 'unsigned long' ] ) + with patch.object(NamedCallback, 'check_target', return_value=True): + class nested(object): + types = Types( [ 'unsigned long' ] ) - ulong_valid = False + ulong_valid = False - @classmethod - def check_ulong(cls, gdbtype): - cls.ulong_valid = True + @classmethod + def check_ulong(cls, gdbtype): + cls.ulong_valid = True - type_cbs = TypeCallbacks( [ ('unsigned long', - nested.check_ulong) ] ) + with patch.object(NamedCallback, 'check_target', return_value=True): + type_cbs = TypeCallbacks( [ ('unsigned long', + nested.check_ulong) ] ) return Test From 02332747e595444c196adb8d13af0def424d63cc Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 28 Jul 2022 13:27:46 -0400 Subject: [PATCH 354/367] crash.addrxlat: align with newest version The type=enum argument has been replaced with an os_type=str argument. Signed-off-by: Jeff Mahoney --- crash/addrxlat.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crash/addrxlat.py b/crash/addrxlat.py index 75044124543..e2b1322cc38 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -57,7 +57,7 @@ def __init__(self) -> None: self.system = addrxlat.System() self.system.os_init(self.context, arch=utsname.machine, - type=addrxlat.OS_LINUX) + os_type="linux") self.is_non_auto = False xlatmap = self.system.get_map(addrxlat.SYS_MAP_MACHPHYS_KPHYS) diff --git a/setup.py b/setup.py index 3a8db1515ce..4575cd0aaeb 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ }, python_requires='>=3.6', - install_requires = [ 'pyelftools' ], + install_requires = [ 'pyelftools', 'addrxlat' ], author = "Jeff Mahoney", author_email = "jeffm@suse.com", From 3bfe5f80b717578d8e55f67eef595d8d8c82ea54 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jul 2022 11:38:59 -0400 Subject: [PATCH 355/367] crash-python: Update to use gdb-12.1 with kdumpfile target The crash-python GDB implementation has been updated to use gdb 12.1, which integrates libkdumpfile into libbfd and offers a small semantic target to improve performance. As part of the update, the registers interface has changed, we populate the thread list from the target implementation in C, and use the core file interface. Signed-off-by: Jeff Mahoney --- README.rst | 4 +- crash.sh | 7 ++- crash/addrxlat.py | 7 ++- crash/arch/__init__.py | 27 ++++---- crash/arch/x86_64.py | 85 +++++++++++++------------ crash/exceptions.py | 6 ++ crash/kernel.py | 39 +++++------- crash/requirements/__init__.py | 18 +++--- crash/requirements/test_target.py | 10 +-- doc-source/mock/gdb/__init__.py | 16 ++++- kdump/target.py | 100 ++++-------------------------- test-gdb-compatibility.gdbinit | 16 +---- tests/stubs/_gdb.pyi | 34 +++++----- 13 files changed, 153 insertions(+), 216 deletions(-) diff --git a/README.rst b/README.rst index 2bfa3e73eca..b5a5da11590 100644 --- a/README.rst +++ b/README.rst @@ -91,8 +91,8 @@ It requires the following components to work successfully: - `Python `_ 3.6 or newer - `pyelftools `_ -- `libkdumpfile `_ -- `GDB `_ with python extensions and built with Python 3.6 or newer. +- `libkdumpfile `_ with the kdumpfile.from_pointer method to allow cloning native kdump contexts. +- `GDB `_ with python extensions and built with Python 3.6 or newer. If you are using a SUSE or openSUSE release, pre-built packages are available on the `Open Build Service `_. diff --git a/crash.sh b/crash.sh index 8276c928ac4..2404be90fdb 100755 --- a/crash.sh +++ b/crash.sh @@ -233,12 +233,15 @@ set prompt py-crash> set height 0 set print pretty on +file $KERNEL +core $VMCORE + python from kdump.target import Target target = Target(debug=False) end -target kdumpfile $KERNEL $VMCORE +target kdumpfile python import sys @@ -290,8 +293,6 @@ except RuntimeError as e: traceback.print_exc() sys.exit(1) -target.unregister() -del target EOF # This is how we debug gdb problems when running crash diff --git a/crash/addrxlat.py b/crash/addrxlat.py index e2b1322cc38..a73308f6eb2 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -5,6 +5,7 @@ import gdb import crash +import kdump.target from crash.cache.syscache import utsname from crash.util import offsetof @@ -50,8 +51,10 @@ class CrashAddressTranslation: def __init__(self) -> None: try: target = gdb.current_target() - self.context = target.kdump.get_addrxlat_ctx() - self.system = target.kdump.get_addrxlat_sys() + if not isinstance(target, kdump.target.Target): + raise TypeError("Not using kdump target") + self.context = target.kdumpfile.get_addrxlat_ctx() + self.system = target.kdumpfile.get_addrxlat_sys() except AttributeError: self.context = TranslationContext() self.system = addrxlat.System() diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py index f38ac75d122..633bc710dfd 100644 --- a/crash/arch/__init__.py +++ b/crash/arch/__init__.py @@ -4,6 +4,8 @@ from typing import List, Iterator, Any, Optional, Type import gdb +from gdb import RegisterNameType, RegisterCollectionType +from gdb import FetchRegistersCallbackType from gdb.FrameDecorator import FrameDecorator import crash @@ -15,41 +17,42 @@ class FetchRegistersCallback: The architecture code must implement the :meth:`fetch_active` and :meth:`fetch_scheduled` methods. """ - def fetch_active(self, thread: gdb.InferiorThread, register: int) -> None: + def fetch_active(self, thread: gdb.InferiorThread, + register: RegisterNameType) -> RegisterCollectionType: raise NotImplementedError("Target has no fetch_active callback") def fetch_scheduled(self, thread: gdb.InferiorThread, - register: int) -> None: + register: RegisterNameType) -> RegisterCollectionType: raise NotImplementedError("Target has no fetch_scheduled callback") def __call__(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: - if register is None: - regnum = -1 - else: - regnum = register.regnum + register: RegisterNameType) -> RegisterCollectionType: if thread.info.active: - return self.fetch_active(thread, regnum) + return self.fetch_active(thread, register) - return self.fetch_scheduled(thread, regnum) + return self.fetch_scheduled(thread, register) class CrashArchitecture: ident = "base-class" aliases: List[str] = list() - _fetch_registers: Type[FetchRegistersCallback] + _fetch_registers: FetchRegistersCallbackType def __init__(self) -> None: target = gdb.current_target() + if target is None: + raise ValueError("No target loaded") from None + if not isinstance(target, gdb.LinuxKernelTarget): + raise ValueError("Incorrect target loaded") from None + try: target.set_fetch_registers(self._fetch_registers()) except AttributeError: raise NotImplementedError("No fetch_registers callback defined") from None @classmethod - def set_fetch_registers(cls, - callback: Type[FetchRegistersCallback]) -> None: + def set_fetch_registers(cls, callback: FetchRegistersCallbackType) -> None: """ Set a fetch_regisers callback for the Target to use. diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 40427cb41f2..03de4342614 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -3,6 +3,7 @@ from typing import Optional import re +import sys import gdb @@ -17,63 +18,63 @@ # pylint: disable=abstract-method class _FetchRegistersBase(FetchRegistersCallback): - def fetch_active(self, thread: gdb.InferiorThread, register: int) -> None: + def fetch_active(self, thread: gdb.InferiorThread, + register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: + regmap = { + "rflags" : "eflags" + } + registers = {} task = thread.info for reg in task.regs: - if reg == "rip" and register not in (16, -1): + if (reg == "rip" and register is not None and + register.name != "rip"): continue try: - thread.registers[reg].value = task.regs[reg] + # vmcore uses rflags, gdb uses eflags + if reg in regmap: + reg = regmap[reg] + registers[reg] = task.regs[reg] except KeyError: pass - def fetch_scheduled(self, thread: gdb.InferiorThread, - register: int) -> None: - pass + return registers # pylint: disable=abstract-method class _FRC_inactive_task_frame(_FetchRegistersBase): def fetch_scheduled(self, thread: gdb.InferiorThread, - register: int) -> None: + register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: + registers: gdb.RegisterCollectionType = {} task = thread.info.task_struct rsp = task['thread']['sp'].cast(types.unsigned_long_p_type) rsp = thread.arch.adjust_scheduled_frame_offset(rsp) - thread.registers['rsp'].value = rsp + registers['rsp'] = rsp frame = rsp.cast(types.inactive_task_frame_p_type).dereference() - # Only write rip when requested; It resets the frame cache - if register in (16, -1): - thread.registers['rip'].value = thread.arch.get_scheduled_rip() - if register == 16: - return - - thread.registers['rbp'].value = frame['bp'] - thread.registers['rbx'].value = frame['bx'] - thread.registers['r12'].value = frame['r12'] - thread.registers['r13'].value = frame['r13'] - thread.registers['r14'].value = frame['r14'] - thread.registers['r15'].value = frame['r15'] - thread.registers['cs'].value = 2*8 - thread.registers['ss'].value = 3*8 + registers['rip'] = thread.arch.get_scheduled_rip() + registers['rbp'] = frame['bp'] + registers['rbx'] = frame['bx'] + registers['r12'] = frame['r12'] + registers['r13'] = frame['r13'] + registers['r14'] = frame['r14'] + registers['r15'] = frame['r15'] + registers['cs'] = 2*8 + registers['ss'] = 3*8 thread.info.stack_pointer = rsp thread.info.valid_stack = True + return registers + class _FRC_thread_return(_FetchRegistersBase): def fetch_scheduled(self, thread: gdb.InferiorThread, - register: int) -> None: + register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: + registers: gdb.RegisterCollectionType = {} task = thread.info.task_struct - # Only write rip when requested; It resets the frame cache - if register in (16, -1): - thread.registers['rip'].value = msymvals.thread_return - if register == 16: - return - rsp = task['thread']['sp'].cast(types.unsigned_long_p_type) rbp = rsp.dereference().cast(types.unsigned_long_p_type) rbx = (rbp - 1).dereference() @@ -89,19 +90,22 @@ def fetch_scheduled(self, thread: gdb.InferiorThread, # if ex: # print("EXCEPTION STACK: pid {:d}".format(task['pid'])) - thread.registers['rsp'].value = rsp - thread.registers['rbp'].value = rbp - thread.registers['rbx'].value = rbx - thread.registers['r12'].value = r12 - thread.registers['r13'].value = r13 - thread.registers['r14'].value = r14 - thread.registers['r15'].value = r15 - thread.registers['cs'].value = 2*8 - thread.registers['ss'].value = 3*8 + registers['rip'] = msymvals.thread_return + registers['rsp'] = rsp + registers['rbp'] = rbp + registers['rbx'] = rbx + registers['r12'] = r12 + registers['r13'] = r13 + registers['r14'] = r14 + registers['r15'] = r15 + registers['cs'] = 2*8 + registers['ss'] = 3*8 thread.info.stack_pointer = rsp thread.info.valid_stack = True + return registers + class x86_64Architecture(CrashArchitecture): ident = "i386:x86-64" aliases = ["x86_64"] @@ -130,7 +134,7 @@ def setup_scheduled_frame_offset(self, task: gdb.Value) -> None: return top = int(task['stack']) + 16*1024 - callq = re.compile("callq.*<(\w+)>") + callq = re.compile("callq?.*<(\w+)>") orig_rsp = rsp = task['thread']['sp'].cast(types.unsigned_long_p_type) @@ -145,6 +149,9 @@ def setup_scheduled_frame_offset(self, task: gdb.Value) -> None: count += 1 continue + if not insn: + continue + m = callq.search(insn) if m and m.group(1) == "__switch_to_asm": self._frame_offset = rsp - orig_rsp + 1 diff --git a/crash/exceptions.py b/crash/exceptions.py index d0c4d3b3cde..d03e9347107 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -11,6 +11,12 @@ class IncompatibleGDBError(RuntimeError): def __init__(self, message: str) -> None: super().__init__(self._fmt.format(message)) +class IncompatibleKdumpfileError(RuntimeError): + """This version of kdumpfile is incompatible""" + _fmt = "The installed kdumpfile module doesn't provide {}" + def __init__(self, message: str) -> None: + super().__init__(self._fmt.format(message)) + class MissingSymbolError(RuntimeError): """The requested symbol cannot be located.""" diff --git a/crash/kernel.py b/crash/kernel.py index 44121256e7c..bb959009203 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -18,7 +18,7 @@ import crash.arch.x86_64 import crash.arch.ppc64 from crash.types.module import for_each_module, for_each_module_section -from crash.util import get_symbol_value +from crash.util import get_symbol_value, get_typed_pointer from crash.util.symbols import Types, Symvals, Symbols from crash.exceptions import MissingSymbolError, InvalidArgumentError from crash.infra.callback import pause_objfile_callbacks, unpause_objfile_callbacks @@ -189,7 +189,7 @@ def __init__(self, roots: PathSpecifier = None, self.vermagic = self.extract_vermagic() - archname = obj.architecture.name() + archname = crash.archname() try: archclass = crash.arch.get_architecture(archname) except RuntimeError as e: @@ -197,8 +197,6 @@ def __init__(self, roots: PathSpecifier = None, self.arch = archclass() - self.vmcore = self.target.kdump - self.crashing_thread: Optional[gdb.InferiorThread] = None def _setup_roots(self, roots: PathSpecifier = None, @@ -687,7 +685,7 @@ def setup_tasks(self) -> None: populated, which allows symbolic stack traces to be made available. """ from crash.types.percpu import get_percpu_vars - from crash.types.task import LinuxTask, for_each_all_tasks + from crash.types.task import LinuxTask, types as task_types import crash.cache.tasks # pylint: disable=redefined-outer-name gdb.execute('set print thread-events 0') @@ -703,27 +701,26 @@ def setup_tasks(self) -> None: except MissingSymbolError: crashing_cpu = -1 - for task in for_each_all_tasks(): - ltask = LinuxTask(task) + kdumpfile = self.target.kdumpfile + task_struct_p_type = task_types.task_struct_type.pointer() + + for thread in gdb.selected_inferior().threads(): + task_address = thread.ptid[2] + + task = get_typed_pointer(task_address, task_struct_p_type) + + ltask = LinuxTask(task.dereference()) - active = int(task.address) in rqscurrs + active = task_address in rqscurrs if active: - cpu = rqscurrs[int(task.address)] - regs = self.vmcore.attr.cpu[cpu].reg + cpu = rqscurrs[task_address] + regs = kdumpfile.attr.cpu[cpu].reg ltask.set_active(cpu, regs) else: self.arch.setup_scheduled_frame_offset(task) - ptid = (LINUX_KERNEL_PID, task['pid'], 0) - - try: - thread = gdb.selected_inferior().new_thread(ptid) - thread.info = ltask - thread.arch = self.arch - except gdb.error: - print("Failed to setup task @{:#x}".format(int(task.address))) - continue - thread.name = task['comm'].string() + thread.info = ltask + thread.arch = self.arch if active and cpu == crashing_cpu: self.crashing_thread = thread @@ -738,5 +735,3 @@ def setup_tasks(self) -> None: print(".", end='') sys.stdout.flush() print(" done. ({} tasks total)".format(task_count)) - - gdb.selected_inferior().executing = False diff --git a/crash/requirements/__init__.py b/crash/requirements/__init__.py index 5e1b217e31d..a3b451c48bf 100644 --- a/crash/requirements/__init__.py +++ b/crash/requirements/__init__.py @@ -3,8 +3,9 @@ # Perform some sanity checks to ensure that we can actually work import gdb +import kdumpfile -from crash.exceptions import IncompatibleGDBError +from crash.exceptions import IncompatibleGDBError, IncompatibleKdumpfileError try: x1 = gdb.Target @@ -25,19 +26,16 @@ raise IncompatibleGDBError("gdb.MinSymbol") from e try: - x4 = gdb.Register + x4 = gdb.RegisterDescriptor del x4 except AttributeError as e: raise IncompatibleGDBError("gdb.Register") from e try: - x6 = gdb.Inferior.new_thread - del x6 + x5 = gdb.LinuxKernelTarget + del x5 except AttributeError as e: - raise IncompatibleGDBError("gdb.Inferior.new_thread") from e + raise IncompatibleGDBError("gdb.LinuxKernelTarget") from e -try: - x7 = gdb.Objfile.architecture - del x7 -except AttributeError as e: - raise IncompatibleGDBError("gdb.Objfile.architecture") from e +if not hasattr(kdumpfile.kdumpfile, "from_pointer"): + raise IncompatibleKdumpfileError("from_pointer method") diff --git a/crash/requirements/test_target.py b/crash/requirements/test_target.py index 06e2c5e5acc..38bfd157777 100644 --- a/crash/requirements/test_target.py +++ b/crash/requirements/test_target.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Tuple +from typing import Optional, Tuple import gdb PTID = Tuple[int, int, int] -class TestTarget(gdb.Target): +class TestTarget(gdb.LinuxKernelTarget): def __init__(self) -> None: super().__init__() @@ -21,13 +21,9 @@ def close(self) -> None: pass def fetch_registers(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: + register: Optional[gdb.RegisterDescriptor]) -> Optional[gdb.RegisterCollectionType]: pass # pylint: disable=unused-argument def thread_alive(self, ptid: PTID) -> bool: return True - - def setup_task(self) -> None: - ptid = (1, 1, 0) - gdb.selected_inferior().new_thread(ptid, self) diff --git a/doc-source/mock/gdb/__init__.py b/doc-source/mock/gdb/__init__.py index 525b50a2b1c..ab0b877815c 100644 --- a/doc-source/mock/gdb/__init__.py +++ b/doc-source/mock/gdb/__init__.py @@ -1,9 +1,13 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import Dict, Optional, Union + class Target(object): - class kdump(object): - pass + pass + +class LinuxKernelTarget(Target): + class kdumpfile(object): def get_addrxlat_ctx(): pass class get_addrxlat_sys(): @@ -76,5 +80,13 @@ class NewObjFileEvent(object): class Frame(object): pass +class RegisterDescriptor: + pass + +RegisterNameType = Union[RegisterDescriptor, str] +RegisterValueType = Optional[Union[int, bytearray]] +RegisterCollectionType = Dict[RegisterNameType, RegisterValueType] + + SYMBOL_VAR_DOMAIN = 0 COMMAND_USER = 0 diff --git a/kdump/target.py b/kdump/target.py index 0949d21551b..5092b7ed1cf 100644 --- a/kdump/target.py +++ b/kdump/target.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Tuple, Callable +from typing import Tuple, Callable, Optional import sys import shlex @@ -13,106 +13,32 @@ import gdb -TargetFetchRegisters = Callable[[gdb.InferiorThread, gdb.Register], None] +FetchRegistersCallbackType = Callable[[gdb.InferiorThread, Optional[gdb.RegisterDescriptor]], + gdb.RegisterCollectionType] +StoreRegistersCallbackType = Callable[[gdb.InferiorThread, gdb.RegisterCollectionType], None] PTID = Tuple[int, int, int] -class Target(gdb.Target): +class Target(gdb.LinuxKernelTarget): - _fetch_registers: TargetFetchRegisters + _fetch_registers: FetchRegistersCallbackType def __init__(self, debug: bool = False) -> None: super().__init__() self.debug = debug self.shortname = "kdumpfile" self.longname = "Use a Linux kernel kdump file as a target" - self.kdump: kdumpfile - self.base_offset = 0 self.register() - # pylint: disable=unused-argument - def open(self, args: str, from_tty: bool) -> None: - argv = shlex.split(args) - if len(argv) < 2: - raise gdb.GdbError("kdumpfile target requires kernel image and vmcore") - - vmlinux = argv[0] - filename = argv[1] - - try: - self.kdump = kdumpfile(file=filename) - except Exception as e: - raise gdb.GdbError("Failed to open `{}': {}" - .format(filename, str(e))) - - # pylint: disable=unsupported-assignment-operation - self.kdump.attr['addrxlat.ostype'] = 'linux' - - KERNELOFFSET = "linux.vmcoreinfo.lines.KERNELOFFSET" - try: - attr = self.kdump.attr.get(KERNELOFFSET, "0") # pylint: disable=no-member - self.base_offset = int(attr, base=16) - except (TypeError, ValueError): - pass - - # Load the kernel at the relocated address - # Unfortunately, the percpu section has an offset of 0 and - # ends up getting placed at the offset base. This is easy - # enough to handle in the percpu code. - result = gdb.execute("symbol-file {} -o {:#x}" - .format(vmlinux, self.base_offset), - to_string=True) - - if self.debug: - print(result) - - # We don't have an exec-file so we need to set the architecture - # explicitly. - arch = gdb.objfiles()[0].architecture.name() - result = gdb.execute("set architecture {}".format(arch), to_string=True) - if self.debug: - print(result) - + def open(self, name: str, from_tty: bool) -> None: + print("Opened kdump.Target") def close(self) -> None: try: self.unregister() except RuntimeError: pass - del self.kdump - - @classmethod - def report_error(cls, addr: int, length: int, error: Exception) -> None: - print("Error while reading {:d} bytes from {:#x}: {}" - .format(length, addr, str(error)), - file=sys.stderr) - - # pylint: disable=unused-argument - def xfer_partial(self, obj: int, annex: str, readbuf: bytearray, - writebuf: bytearray, offset: int, ln: int) -> int: - ret = -1 - if obj == self.TARGET_OBJECT_MEMORY: - try: - r = self.kdump.read(KDUMP_KVADDR, offset, ln) - readbuf[:] = r - ret = ln - except EOFException as e: - if self.debug: - self.report_error(offset, ln, e) - raise gdb.TargetXferEOF(str(e)) - # pylint: disable=no-member - except (NoDataException, addrxlat.exceptions.NoDataError) as e: - if self.debug: - self.report_error(offset, ln, e) - raise gdb.TargetXferUnavailable(str(e)) - except AddressTranslationException as e: - if self.debug: - self.report_error(offset, ln, e) - raise gdb.TargetXferUnavailable(str(e)) - else: - raise IOError("Unknown obj type") - return ret # pylint: disable=unused-argument def thread_alive(self, ptid: PTID) -> bool: @@ -121,15 +47,15 @@ def thread_alive(self, ptid: PTID) -> bool: def pid_to_str(self, ptid: PTID) -> str: return "pid {:d}".format(ptid[1]) - def set_fetch_registers(self, callback: TargetFetchRegisters) -> None: + def set_fetch_registers(self, callback: FetchRegistersCallbackType) -> None: self._fetch_registers = callback # type: ignore def fetch_registers(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: + register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: try: return self._fetch_registers(thread, register) # type: ignore - except AttributeError: - raise NotImplementedError("Target did not define fetch_registers callback") from None + except AttributeError as e: + raise NotImplementedError(f"Target did not define fetch_registers callback: {e}") from e def prepare_to_store(self, thread: gdb.InferiorThread) -> None: pass @@ -137,7 +63,7 @@ def prepare_to_store(self, thread: gdb.InferiorThread) -> None: # We don't need to store anything; The regcache is already written. # pylint: disable=unused-argument def store_registers(self, thread: gdb.InferiorThread, - register: gdb.Register) -> None: + register: gdb.RegisterCollectionType) -> None: pass # pylint: disable=unused-argument diff --git a/test-gdb-compatibility.gdbinit b/test-gdb-compatibility.gdbinit index 1bef4e030e6..f9850b15f12 100644 --- a/test-gdb-compatibility.gdbinit +++ b/test-gdb-compatibility.gdbinit @@ -4,27 +4,15 @@ python import sys import os -from crash.exceptions import IncompatibleGDBError +from crash.exceptions import IncompatibleGDBError, IncompatibleKdumpfileError try: import crash.requirements from crash.requirements.test_target import TestTarget target = TestTarget() -except IncompatibleGDBError as e: +except (IncompatibleGDBError, IncompatibleKdumpfileError) as e: print(e) sys.exit(1) end target testtarget foo - -python -try: - gdb.execute('set print thread-events 0') - target.setup_task() - gdb.execute("thread 1", to_string=True) - sys.exit(0) -except gdb.error as e: - print(e) - print("This version of gdb is not compatible with crash-python") - sys.exit(1) -end diff --git a/tests/stubs/_gdb.pyi b/tests/stubs/_gdb.pyi index 375a5e2ef7c..07829021b81 100644 --- a/tests/stubs/_gdb.pyi +++ b/tests/stubs/_gdb.pyi @@ -2,6 +2,8 @@ # # NOTE: This dynamically typed stub was automatically generated by stubgen. +from kdumpfile import kdumpfile as kdumpfile_type + from typing import Any, Tuple, List, Optional, Dict, Iterator, Callable from typing import Union, Iterable, Sequence, NewType from typing import TypeVar, Generic @@ -258,7 +260,9 @@ class EventRegistry(Generic[EventType]): class ExitedEvent(Event): ... -class Field: ... +class Field: + enumval: int = ... + bitpos: int = ... class FinishBreakpoint(Breakpoint): return_value: Optional[Value] = ... @@ -291,17 +295,13 @@ class GdbError(Exception): ... IntValue = Union[Value, int] class Inferior: - executing: bool = ... num: int = ... pid: bool = ... progspace: Progspace = ... was_attached: bool = ... def appeared(self, pid: int) -> None: ... def architecture(self) -> Architecture: ... - def delete_thread(self, ptid: Tuple[int, int, int]) -> None: ... def is_valid(self) -> bool: ... - def new_thread(self, ptid: Tuple[int, int, int], - priv: Optional[Any] = ...) -> InferiorThread: ... def read_memory(self, address: IntValue, length: IntValue) -> Membuf: ... def search_memory(self, address: IntValue, length: IntValue, pattern: Buffer) -> int: ... @@ -318,14 +318,14 @@ class InferiorCallPreEvent(Event): ... class InferiorDeletedEvent(Event): ... class InferiorThread: - executing: bool = ... + arch: Any = ... + details: str = ... global_num: int = ... inferior: Inferior = ... info: Any = ... name: str = ... num: int = ... ptid: Tuple[int, int, int] = ... - registers: Dict[str, Register] = ... def handle(self) -> bytes: ... def is_exited(self) -> bool: ... def is_running(self) -> bool: ... @@ -418,12 +418,12 @@ class Progspace: def objfiles(self) -> List[Objfile]: ... def solib_name(self, name: int) -> Optional[str]: ... -class Register: - name: Optional[str] = ... - regnum: int = ... - size: int = ... - type: Type = ... - value: Union[Value, int] = ... +class RegisterDescriptor: + name: str = ... + +RegisterNameType = Union[str, RegisterDescriptor] +RegisterValueType = Optional[Union[int, bytearray]] +RegisterCollectionType = Dict[RegisterNameType, RegisterValueType] class RegisterChangedEvent(Event): ... @@ -505,7 +505,6 @@ class Target: def register(self) -> Any: ... def unregister(self) -> Any: ... - def stacked_target(self) -> bool: ... def open(self, argstring: str, from_tty: bool) -> None: ... def close(self) -> None: ... def info(self, thread: InferiorThread) -> str: ... @@ -516,12 +515,15 @@ class Target: def thread_alive(self, ptid: Tuple[int, int, int]) -> bool: ... def pid_to_str(self, ptid: Tuple[int, int,int]) -> str: ... def fetch_registers(self, thread: InferiorThread, - register: Register) -> None: ... + register: Optional[RegisterDescriptor]) -> Optional[RegisterCollectionType]: ... def prepare_to_store(self, thread: InferiorThread) -> None: ... def store_registers(self, thread: InferiorThread, - register: Register) -> None: ... + registers: RegisterCollectionType) -> None: ... def has_execution(self, ptid: Tuple[int, int, int]) -> bool: ... +class LinuxKernelTarget(Target): + kdumpfile: kdumpfile_type = ... + class TargetXferEOF(EOFError): ... class TargetXferUnavailable(LookupError): ... From 0f60795489825b6cc9b5b7775464f5c980243148 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jul 2022 11:44:21 -0400 Subject: [PATCH 356/367] crash.types.task: Update for Linux 5.14 Linux 5.14 repurposed the task_struct::state field. As part of the change, the field was renamed to __state. Signed-off-by: Jeff Mahoney --- crash/exceptions.py | 3 +++ crash/types/task.py | 17 ++++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/crash/exceptions.py b/crash/exceptions.py index d03e9347107..f86b4330a99 100644 --- a/crash/exceptions.py +++ b/crash/exceptions.py @@ -23,6 +23,9 @@ class MissingSymbolError(RuntimeError): class MissingTypeError(RuntimeError): """The requested type cannot be located.""" +class MissingFieldError(RuntimeError): + """The requested field cannot be located.""" + class CorruptedError(RuntimeError): """A corrupted data structure has been encountered.""" diff --git a/crash/types/task.py b/crash/types/task.py index a0be1a6cdeb..aca75e19a1d 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -6,7 +6,7 @@ import gdb from crash.exceptions import InvalidArgumentError, ArgumentTypeError -from crash.exceptions import UnexpectedGDBTypeError +from crash.exceptions import UnexpectedGDBTypeError, MissingFieldError from crash.util import array_size, struct_has_member from crash.util.symbols import Types, Symvals, SymbolCallbacks from crash.types.list import list_for_each_entry @@ -51,6 +51,8 @@ class TaskStateFlags: TASK_NEW: int = TASK_FLAG_UNINITIALIZED TASK_IDLE: int = TASK_FLAG_UNINITIALIZED + _state_field: str = 'state' + def __init__(self) -> None: raise NotImplementedError("This class is not meant to be instantiated") @@ -225,6 +227,8 @@ class LinuxTask: _get_rss: Callable[['LinuxTask'], int] _get_last_run: Callable[['LinuxTask'], int] + _state_field: str + def __init__(self, task_struct: gdb.Value) -> None: self._init_task_types(task_struct) @@ -263,8 +267,15 @@ def _init_task_types(cls, task: gdb.Value) -> None: # within gdb. Equality requires a deep comparison rather than # a simple pointer comparison. types.override('struct task_struct', task.type) - fields = types.task_struct_type.fields() + fields = [x.name for x in types.task_struct_type.fields()] cls._task_state_has_exit_state = 'exit_state' in fields + if 'state' in fields: + cls._state_field = 'state' + elif '__state' in fields: + cls._state_field = '__state' + else: + raise MissingFieldError("No way to resolve task_struct.state") + cls._pick_get_rss() cls._pick_last_run() cls._valid = True @@ -348,7 +359,7 @@ def task_state(self) -> int: Returns: :obj:`int`: The state flags for this task. """ - state = int(self.task_struct['state']) + state = int(self.task_struct[self._state_field]) if self._task_state_has_exit_state: state |= int(self.task_struct['exit_state']) return state From 0e57b15e3cb8969687d8113f34e01f5d505702b8 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jul 2022 11:46:47 -0400 Subject: [PATCH 357/367] crash.kernel: Add support for zstd compressed modules Modules on SUSE products are now shipped compressed with zstd and need special handling. This commit splits up the module loading code so that we can handle these more easily by decompressing the module files into a temporary director for loading and then cleaning up the mess afterward. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 183 ++++++++++++++++++++++++++++-------------------- setup.py | 2 +- 2 files changed, 109 insertions(+), 76 deletions(-) diff --git a/crash/kernel.py b/crash/kernel.py index bb959009203..d2fd9eb98f7 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -1,18 +1,22 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Pattern, Union, List, Dict, Any, Optional +from typing import Pattern, Union, List, Dict, Any, Optional, BinaryIO import sys import re import fnmatch import os.path +import tempfile from elftools.elf.elffile import ELFFile import gdb -import kdump.target +# This is from the C extension and published via __all__; pylint bug? +# pylint: disable=no-name-in-module +from zstd import decompress as zstd_decompress +import kdump.target import crash import crash.arch import crash.arch.x86_64 @@ -382,20 +386,19 @@ def extract_vermagic(self) -> str: return self._get_minsymbol_as_string('vermagic') - def extract_modinfo_from_module(self, modpath: str) -> Dict[str, str]: + def extract_modinfo_from_module(self, modfile: BinaryIO) -> Dict[str, str]: """ Returns the modinfo from a module file Args: - modpath: The path to the module file. + modpath: An open module file Returns: dict: A dictionary containing the names and values of the modinfo variables. """ - f = open(modpath, 'rb') - elf = ELFFile(f) + elf = ELFFile(modfile) modinfo = elf.get_section_by_name('.modinfo') d = {} @@ -406,7 +409,6 @@ def extract_modinfo_from_module(self, modpath: str) -> Dict[str, str]: d[val[0:eq]] = val[eq + 1:] del elf - f.close() return d def _get_module_sections(self, module: gdb.Value) -> str: @@ -415,8 +417,9 @@ def _get_module_sections(self, module: gdb.Value) -> str: out.append("-s {} {:#x}".format(name, addr)) return " ".join(out) - def _check_module_version(self, modpath: str, module: gdb.Value) -> None: - modinfo = self.extract_modinfo_from_module(modpath) + def _check_module_version(self, modfile: BinaryIO, module: gdb.Value) -> None: + modinfo = self.extract_modinfo_from_module(modfile) + modpath = modfile.name vermagic = modinfo.get('vermagic', None) @@ -433,6 +436,56 @@ def _check_module_version(self, modpath: str, module: gdb.Value) -> None: raise _ModSourceVersionMismatchError(modpath, mi_srcversion, mod_srcversion) + def _try_load_module(self, modname: str, module: gdb.Value, modfile: BinaryIO, + verbose: bool = False, debug: bool = False) -> gdb.Objfile: + self._check_module_version(modfile, module) + + modpath = modfile.name + + if 'module_core' in module.type: + addr = int(module['module_core']) + else: + addr = int(module['core_layout']['base']) + + if debug: + print(f"Loading {modpath} at {addr:#x} from {modname}") + elif verbose: + print(f"Loading {modname} at {addr:#x}") + else: + print(".", end='') + sys.stdout.flush() + + sections = self._get_module_sections(module) + + percpu = int(module['percpu']) + if percpu > 0: + sections += " -s .data..percpu {:#x}".format(percpu) + + try: + result = gdb.execute("add-symbol-file {} {:#x} {}" + .format(modpath, addr, sections), + to_string=True) + except gdb.error as e: + raise CrashKernelError("Error while loading module `{}': {}" + .format(modname, str(e))) from e + if debug: + print(result) + + return gdb.lookup_objfile(modpath) + + def try_load_module(self, modname: str, module: gdb.Value, modpath: str, + tmpdirname: str, + verbose: bool = False, debug: bool = False) -> gdb.Objfile: + if modpath.endswith(".zst"): + with open(modpath, 'rb') as cmodfile: + with open(os.path.join(tmpdirname, modname + ".ko"), 'w+b') as modfile: + modfile.write(zstd_decompress(cmodfile.read())) + return self._try_load_module(modname, module, modfile, debug) + else: + with open(modpath, 'rb') as modfile: + return self._try_load_module(modname, module, modfile, debug) + + def load_modules(self, verbose: bool = False, debug: bool = False) -> None: """ Load modules (including debuginfo) into the crash session. @@ -450,81 +503,57 @@ def load_modules(self, verbose: bool = False, debug: bool = False) -> None: This does not include a failure to locate a module or its debuginfo. """ - print("Loading modules for {}".format(utsname.release), end='') + count = 0 + for module in for_each_module(): + count += 1 + print(f"Loading {count} modules for {utsname.release}", end='') if verbose: print(":", flush=True) + else: + print(".", end='', flush=True) failed = 0 loaded = 0 - pause_objfile_callbacks() - for module in for_each_module(): - modname = "{}".format(module['name'].string()) - modfname = "{}.ko".format(modname) - found = False - for path in self.module_path: - try: - modpath = self._find_module_file(modfname, path) - except _NoMatchingFileError: - continue - - try: - self._check_module_version(modpath, module) - except _ModinfoMismatchError as e: - if verbose: - print(str(e)) - continue - - found = True + pause_objfile_callbacks() + with tempfile.TemporaryDirectory() as tmpdirname: + for module in for_each_module(): + modname = module['name'].string() + modfname = f"{modname}.ko" + objfile = None + for path in self.module_path: + + try: + modpath = self._find_module_file(modfname, path) + except _NoMatchingFileError: + continue + + try: + objfile = self.try_load_module(modname, module, modpath, + tmpdirname, verbose, debug) + except (_ModinfoMismatchError, OSError) as e: + if verbose: + print(f"Module open failed: {str(e)}") + continue + + if not objfile.has_symbols(): + self._load_module_debuginfo(objfile, modpath, verbose) + elif debug: + print(" + has debug symbols") + break - if 'module_core' in module.type: - addr = int(module['module_core']) + if objfile: + if not objfile.has_symbols(): + print("Couldn't find debuginfo for {}".format(modname)) + loaded += 1 else: - addr = int(module['core_layout']['base']) + if failed == 0: + print() + print("Couldn't find module file for {}".format(modname)) + failed += 1 - if debug: - print("Loading {} at {:#x}".format(modpath, addr)) - elif verbose: - print("Loading {} at {:#x}".format(modname, addr)) - else: + if (loaded + failed) % 10 == 10: print(".", end='') sys.stdout.flush() - - sections = self._get_module_sections(module) - - percpu = int(module['percpu']) - if percpu > 0: - sections += " -s .data..percpu {:#x}".format(percpu) - - try: - result = gdb.execute("add-symbol-file {} {:#x} {}" - .format(modpath, addr, sections), - to_string=True) - except gdb.error as e: - raise CrashKernelError("Error while loading module `{}': {}" - .format(modname, str(e))) from e - if debug: - print(result) - - objfile = gdb.lookup_objfile(modpath) - if not objfile.has_symbols(): - self._load_module_debuginfo(objfile, modpath, verbose) - elif debug: - print(" + has debug symbols") - - break - - if not found: - if failed == 0: - print() - print("Couldn't find module file for {}".format(modname)) - failed += 1 - else: - if not objfile.has_symbols(): - print("Couldn't find debuginfo for {}".format(modname)) - loaded += 1 - if (loaded + failed) % 10 == 10: - print(".", end='') - sys.stdout.flush() print(" done. ({} loaded".format(loaded), end='') if failed: print(", {} failed)".format(failed)) @@ -552,6 +581,8 @@ def _cache_modules_order(self, path: str) -> None: modpath = os.path.join(path, modpath) if os.path.exists(modpath): self.modules_order[path][modname] = modpath + if os.path.exists(modpath + ".zst"): + self.modules_order[path][modname] = modpath + ".zst" f.close() except OSError: pass @@ -656,7 +687,9 @@ def _load_module_debuginfo(self, objfile: gdb.Objfile, if modpath is None: raise RuntimeError("loaded objfile has no filename???") if ".gz" in modpath: - modpath = modpath.replace(".gz", "") + modpath = modpath[:-3] + elif ".zst" in modpath: + modpath = modpath[:-4] filename = "{}.debug".format(os.path.basename(modpath)) build_id_path = self.build_id_path(objfile) diff --git a/setup.py b/setup.py index 4575cd0aaeb..467adc843c7 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ }, python_requires='>=3.6', - install_requires = [ 'pyelftools', 'addrxlat' ], + install_requires = [ 'pyelftools', 'addrxlat', 'zstd' ], author = "Jeff Mahoney", author_email = "jeffm@suse.com", From 9ed4552dab760512aef2bd0cd403a2a5035e31e9 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 22 Jul 2022 11:49:13 -0400 Subject: [PATCH 358/367] crash.arch.x86_64: fix stack setup on kernels using __switch_to_asm The frame offset being calculated was incorrect. Signed-off-by: Jeff Mahoney --- crash/arch/x86_64.py | 1 - 1 file changed, 1 deletion(-) diff --git a/crash/arch/x86_64.py b/crash/arch/x86_64.py index 03de4342614..e689e513bbc 100644 --- a/crash/arch/x86_64.py +++ b/crash/arch/x86_64.py @@ -154,7 +154,6 @@ def setup_scheduled_frame_offset(self, task: gdb.Value) -> None: m = callq.search(insn) if m and m.group(1) == "__switch_to_asm": - self._frame_offset = rsp - orig_rsp + 1 self._scheduled_rip = val return From cf0374d3ec45b9aff752356b9a066059f1d41205 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 28 Jul 2022 00:43:43 -0400 Subject: [PATCH 359/367] crash: use gdb.LinuxKernelTarget This commit removes crash.arch in favor of a new crash.target which is derived from gdb.LinuxKernelTarget. It simplifies startup and also uses the native implementation of libkdumpfile which improves read performance substantially. Signed-off-by: Jeff Mahoney --- Makefile | 6 +- crash.sh | 12 +- crash/addrxlat.py | 7 +- crash/arch/__init__.py | 113 -------------- crash/arch/ppc64.py | 31 ---- crash/kernel.py | 95 +----------- crash/session.py | 33 ++++- crash/target/__init__.py | 243 +++++++++++++++++++++++++++++++ crash/target/ppc64.py | 59 ++++++++ crash/{arch => target}/x86_64.py | 155 ++++++++++---------- crash/types/task.py | 51 ++++--- doc-source/conf.py | 5 - doc-source/development.rst | 1 - doc-source/testing.rst | 3 +- kdump/__init__.py | 0 kdump/target.py | 71 --------- tests/gen-import-tests.sh | 2 +- tests/run-mypy.py | 8 +- tests/test_target.py | 34 ----- 19 files changed, 446 insertions(+), 483 deletions(-) delete mode 100644 crash/arch/__init__.py delete mode 100644 crash/arch/ppc64.py create mode 100644 crash/target/__init__.py create mode 100644 crash/target/ppc64.py rename crash/{arch => target}/x86_64.py (63%) delete mode 100644 kdump/__init__.py delete mode 100644 kdump/target.py delete mode 100644 tests/test_target.py diff --git a/Makefile b/Makefile index 343208ea2e0..b729e85ce2d 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ endif all: clean build doc test doc-source-clean: - rm -f doc-source/crash/*.rst doc-source/kdump/*.rst + rm -f doc-source/crash/*.rst rm -f doc-source/commands/*.rst doc-clean: doc-source-clean @@ -54,8 +54,6 @@ textdir=$(docdir)/text doc-text-install: doc-help install -m 755 -d $(DESTDIR)$(textdir)/crash install -m 644 -t $(DESTDIR)$(textdir)/crash docs/text/crash/*.txt - install -m 755 -d $(DESTDIR)$(textdir)/kdump - install -m 644 -t $(DESTDIR)$(textdir)/kdump docs/text/kdump/*.txt install -m 644 -t $(DESTDIR)$(textdir) docs/text/*.txt htmldir=$(docdir)/html @@ -68,7 +66,7 @@ unit-tests: force-rebuild sh tests/run-tests.sh lint: force-rebuild - sh tests/run-pylint.sh $(PYLINT_ARGS) crash kdump + sh tests/run-pylint.sh $(PYLINT_ARGS) crash static-check: force-rebuild sh tests/run-static-checks.sh diff --git a/crash.sh b/crash.sh index 2404be90fdb..09277b7ab75 100755 --- a/crash.sh +++ b/crash.sh @@ -236,19 +236,11 @@ set print pretty on file $KERNEL core $VMCORE -python -from kdump.target import Target -target = Target(debug=False) -end - -target kdumpfile - python import sys import traceback try: import crash.session - from crash.kernel import CrashKernel except RuntimeError as e: print("crash-python: {}, exiting".format(str(e)), file=sys.stderr) traceback.print_exc() @@ -278,10 +270,8 @@ if len(s) > 0: module_debuginfo_path = s.split(" ") try: - kernel = CrashKernel(roots, vmlinux_debuginfo, module_path, + x = crash.session.Session(roots, vmlinux_debuginfo, module_path, module_debuginfo_path, verbose, debug) - - x = crash.session.Session(kernel, verbose=verbose, debug=debug) print("The 'pyhelp' command will list the command extensions.") except gdb.error as e: print("crash-python: {}, exiting".format(str(e)), file=sys.stderr) diff --git a/crash/addrxlat.py b/crash/addrxlat.py index a73308f6eb2..31a8867c5fa 100644 --- a/crash/addrxlat.py +++ b/crash/addrxlat.py @@ -4,8 +4,9 @@ import addrxlat import gdb + import crash -import kdump.target +import crash.target from crash.cache.syscache import utsname from crash.util import offsetof @@ -50,9 +51,7 @@ def cb_read64(self, faddr: addrxlat.FullAddress) -> int: class CrashAddressTranslation: def __init__(self) -> None: try: - target = gdb.current_target() - if not isinstance(target, kdump.target.Target): - raise TypeError("Not using kdump target") + target = crash.target.check_target() self.context = target.kdumpfile.get_addrxlat_ctx() self.system = target.kdumpfile.get_addrxlat_sys() except AttributeError: diff --git a/crash/arch/__init__.py b/crash/arch/__init__.py deleted file mode 100644 index 633bc710dfd..00000000000 --- a/crash/arch/__init__.py +++ /dev/null @@ -1,113 +0,0 @@ -# -*- coding: utf-8 -*- -# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: - -from typing import List, Iterator, Any, Optional, Type - -import gdb -from gdb import RegisterNameType, RegisterCollectionType -from gdb import FetchRegistersCallbackType -from gdb.FrameDecorator import FrameDecorator - -import crash - -class FetchRegistersCallback: - """ - The base class from which to implement the fetch_registers callback. - - The architecture code must implement the :meth:`fetch_active` and - :meth:`fetch_scheduled` methods. - """ - def fetch_active(self, thread: gdb.InferiorThread, - register: RegisterNameType) -> RegisterCollectionType: - raise NotImplementedError("Target has no fetch_active callback") - - def fetch_scheduled(self, thread: gdb.InferiorThread, - register: RegisterNameType) -> RegisterCollectionType: - raise NotImplementedError("Target has no fetch_scheduled callback") - - def __call__(self, thread: gdb.InferiorThread, - register: RegisterNameType) -> RegisterCollectionType: - - if thread.info.active: - return self.fetch_active(thread, register) - - return self.fetch_scheduled(thread, register) - -class CrashArchitecture: - ident = "base-class" - aliases: List[str] = list() - - _fetch_registers: FetchRegistersCallbackType - - def __init__(self) -> None: - target = gdb.current_target() - if target is None: - raise ValueError("No target loaded") from None - if not isinstance(target, gdb.LinuxKernelTarget): - raise ValueError("Incorrect target loaded") from None - - try: - target.set_fetch_registers(self._fetch_registers()) - except AttributeError: - raise NotImplementedError("No fetch_registers callback defined") from None - - @classmethod - def set_fetch_registers(cls, callback: FetchRegistersCallbackType) -> None: - """ - Set a fetch_regisers callback for the Target to use. - - Args: - callback: A Callable that accepts a :obj:`gdb.InferiorThread` and - :obj:`gdb.Register` and populates the requested registers for - the specified thread. A register with the seemingly invalid - register number of -1 is a request to populate all registers. - """ - cls._fetch_registers = callback - - def setup_thread_info(self, thread: gdb.InferiorThread) -> None: - raise NotImplementedError("setup_thread_info not implemented") - - def get_stack_pointer(self, thread_struct: gdb.Value) -> int: - raise NotImplementedError("get_stack_pointer is not implemented") - - def setup_scheduled_frame_offset(self, task: gdb.Value) -> None: - pass - -# This keeps stack traces from continuing into userspace and causing problems. -class KernelFrameFilter: - def __init__(self, address: int) -> None: - self.name = "KernelFrameFilter" - self.priority = 100 - self.enabled = True - self.address = address - gdb.frame_filters[self.name] = self - - def filter(self, frame_iter: Iterator[Any]) -> Any: - return KernelAddressIterator(frame_iter, self.address) - -class KernelAddressIterator: - def __init__(self, ii: Iterator, address: int) -> None: - self.input_iterator = ii - self.address = address - - def __iter__(self) -> Any: - return self - - def __next__(self) -> Any: - frame = next(self.input_iterator) - - if frame.inferior_frame().pc() < self.address: - raise StopIteration - - return frame - -architectures = {} -def register_arch(arch: Type[CrashArchitecture]) -> None: - architectures[arch.ident] = arch - for ident in arch.aliases: - architectures[ident] = arch - -def get_architecture(archname: str) -> Type[CrashArchitecture]: - if archname in architectures: - return architectures[archname] - raise RuntimeError(f"Couldn't locate helpers for arch: {archname}") diff --git a/crash/arch/ppc64.py b/crash/arch/ppc64.py deleted file mode 100644 index 07da586cad7..00000000000 --- a/crash/arch/ppc64.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: - -import gdb - -from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch -from crash.arch import FetchRegistersCallback - -class FR_Placeholder(FetchRegistersCallback): # pylint: disable=abstract-method - pass - -class Powerpc64Architecture(CrashArchitecture): - ident = "powerpc:common64" - aliases = ["ppc64", "elf64-powerpc"] - - _fetch_registers = FR_Placeholder - - def __init__(self) -> None: - super(Powerpc64Architecture, self).__init__() - # Stop stack traces with addresses below this - self.filter = KernelFrameFilter(0xffff000000000000) - - def setup_thread_info(self, thread: gdb.InferiorThread) -> None: - task = thread.info.task_struct - thread.info.set_thread_info(task['thread_info'].address) - - @classmethod - def get_stack_pointer(cls, thread_struct: gdb.Value) -> int: - return int(thread_struct['ksp']) - -register_arch(Powerpc64Architecture) diff --git a/crash/kernel.py b/crash/kernel.py index d2fd9eb98f7..fd1661e4b0f 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -16,14 +16,11 @@ # pylint: disable=no-name-in-module from zstd import decompress as zstd_decompress -import kdump.target import crash -import crash.arch -import crash.arch.x86_64 -import crash.arch.ppc64 +import crash.target from crash.types.module import for_each_module, for_each_module_section -from crash.util import get_symbol_value, get_typed_pointer -from crash.util.symbols import Types, Symvals, Symbols +from crash.util import get_symbol_value +from crash.util.symbols import Types from crash.exceptions import MissingSymbolError, InvalidArgumentError from crash.infra.callback import pause_objfile_callbacks, unpause_objfile_callbacks from crash.cache.syscache import utsname @@ -133,19 +130,6 @@ class CrashKernel: """ types = Types(['char *']) - symvals = Symvals(['init_task']) - symbols = Symbols(['runqueues']) - - def check_target(self) -> kdump.target.Target: - target = gdb.current_target() - - if target is None: - raise ValueError("No current target") - - if not isinstance(target, kdump.target.Target): - raise ValueError(f"Current target {type(target)} is not supported") - - return target # pylint: disable=unused-argument def __init__(self, roots: PathSpecifier = None, @@ -154,7 +138,7 @@ def __init__(self, roots: PathSpecifier = None, module_debuginfo_path: PathSpecifier = None, verbose: bool = False, debug: bool = False) -> None: - self.target = self.check_target() + self.target = crash.target.check_target() self.findmap: Dict[str, Dict[Any, Any]] = dict() self.modules_order: Dict[str, Dict[str, str]] = dict() @@ -193,16 +177,6 @@ def __init__(self, roots: PathSpecifier = None, self.vermagic = self.extract_vermagic() - archname = crash.archname() - try: - archclass = crash.arch.get_architecture(archname) - except RuntimeError as e: - raise CrashKernelError(str(e)) from e - - self.arch = archclass() - - self.crashing_thread: Optional[gdb.InferiorThread] = None - def _setup_roots(self, roots: PathSpecifier = None, verbose: bool = False) -> None: if roots is None: @@ -707,64 +681,3 @@ def _load_module_debuginfo(self, objfile: gdb.Objfile, if self._try_load_debuginfo(objfile, filepath, verbose): break - - def setup_tasks(self) -> None: - """ - Populate GDB's thread list using the kernel's task lists - - This method will iterate over the kernel's task lists, create a - LinuxTask object, and create a gdb thread for each one. The - threads will be built so that the registers are ready to be - populated, which allows symbolic stack traces to be made available. - """ - from crash.types.percpu import get_percpu_vars - from crash.types.task import LinuxTask, types as task_types - import crash.cache.tasks # pylint: disable=redefined-outer-name - gdb.execute('set print thread-events 0') - - rqs = get_percpu_vars(self.symbols.runqueues) - rqscurrs = {int(x["curr"]) : k for (k, x) in rqs.items()} - - print("Loading tasks...", end='') - sys.stdout.flush() - - task_count = 0 - try: - crashing_cpu = int(get_symbol_value('crashing_cpu')) - except MissingSymbolError: - crashing_cpu = -1 - - kdumpfile = self.target.kdumpfile - task_struct_p_type = task_types.task_struct_type.pointer() - - for thread in gdb.selected_inferior().threads(): - task_address = thread.ptid[2] - - task = get_typed_pointer(task_address, task_struct_p_type) - - ltask = LinuxTask(task.dereference()) - - active = task_address in rqscurrs - if active: - cpu = rqscurrs[task_address] - regs = kdumpfile.attr.cpu[cpu].reg - ltask.set_active(cpu, regs) - else: - self.arch.setup_scheduled_frame_offset(task) - - thread.info = ltask - thread.arch = self.arch - if active and cpu == crashing_cpu: - self.crashing_thread = thread - - self.arch.setup_thread_info(thread) - ltask.attach_thread(thread) - ltask.set_get_stack_pointer(self.arch.get_stack_pointer) - - crash.cache.tasks.cache_task(ltask) - - task_count += 1 - if task_count % 100 == 0: - print(".", end='') - sys.stdout.flush() - print(" done. ({} tasks total)".format(task_count)) diff --git a/crash/session.py b/crash/session.py index 9f912e8b847..e47a8cd7a0e 100644 --- a/crash/session.py +++ b/crash/session.py @@ -1,10 +1,16 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: +from typing import List, Union + import gdb from crash.infra import autoload_submodules -from crash.kernel import CrashKernel, CrashKernelError +import crash.target +import crash.target.ppc64 +import crash.target.x86_64 + +PathSpecifier = Union[List[str], str] class Session: """ @@ -20,27 +26,38 @@ class Session: debug (optional, default=False): Whether to enable verbose debugging output """ - def __init__(self, kernel: CrashKernel, verbose: bool = False, - debug: bool = False) -> None: + def __init__(self, roots: PathSpecifier = None, + vmlinux_debuginfo: PathSpecifier = None, + module_path: PathSpecifier = None, + module_debuginfo_path: PathSpecifier = None, + verbose: bool = False, debug: bool = False) -> None: print("crash-python initializing...") - self.kernel = kernel + + self.debug = debug + self.verbose = verbose + + target = crash.target.setup_target() + from crash.kernel import CrashKernel, CrashKernelError + + self.kernel = CrashKernel(roots, vmlinux_debuginfo, module_path, + module_debuginfo_path, verbose, debug) autoload_submodules('crash.cache') autoload_submodules('crash.subsystem') autoload_submodules('crash.commands') try: - self.kernel.setup_tasks() + print("Loading modules") self.kernel.load_modules(verbose=verbose, debug=debug) except CrashKernelError as e: print(str(e)) print("Further debugging may not be possible.") return - if self.kernel.crashing_thread: + if target.crashing_thread: try: result = gdb.execute("thread {}" - .format(self.kernel.crashing_thread.num), + .format(target.crashing_thread.num), to_string=True) if debug: print(result) @@ -51,5 +68,5 @@ def __init__(self, kernel: CrashKernel, verbose: bool = False, return print("Backtrace from crashing task (PID {:d}):" - .format(self.kernel.crashing_thread.ptid[1])) + .format(target.crashing_thread.ptid[1])) gdb.execute("where") diff --git a/crash/target/__init__.py b/crash/target/__init__.py new file mode 100644 index 00000000000..320d002e7de --- /dev/null +++ b/crash/target/__init__.py @@ -0,0 +1,243 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from typing import Any, Iterator, List, Optional, Tuple, Type + +import abc +import sys + +import gdb + +from crash.exceptions import MissingSymbolError +import crash.infra.callback + +from crash.types.percpu import get_percpu_vars +from crash.util.symbols import Symbols, Symvals +from crash.util import get_typed_pointer + +symbols = Symbols(['runqueues']) +symvals = Symvals(['crashing_cpu']) + +class IncorrectTargetError(ValueError): + """Incorrect target implementation for this kernel""" + pass + +PTID = Tuple[int, int, int] + +# This keeps stack traces from continuing into userspace and causing problems. +class KernelFrameFilter: + def __init__(self, address: int) -> None: + self.name = "KernelFrameFilter" + self.priority = 100 + self.enabled = True + self.address = address + gdb.frame_filters[self.name] = self + + def filter(self, frame_iter: Iterator[Any]) -> Any: + return KernelAddressIterator(frame_iter, self.address) + +class KernelAddressIterator: + def __init__(self, ii: Iterator, address: int) -> None: + self.input_iterator = ii + self.address = address + + def __iter__(self) -> Any: + return self + + def __next__(self) -> Any: + frame = next(self.input_iterator) + + if frame.inferior_frame().pc() < self.address: + raise StopIteration + + return frame + +# A working target will be a mixin composed of a class derived from +# TargetBase and TargetFetchRegistersBase + +class TargetBase(gdb.LinuxKernelTarget, metaclass=abc.ABCMeta): + def __init__(self, debug: int = 0) -> None: + super().__init__() + + self.debug = debug + self.shortname = "Crash-Python Linux Target" + self.longname = "Use a Core file as a Linux Kernel Target" + self.ready = False + + self.crashing_thread: Optional[gdb.InferiorThread] = None + + def open(self, name: str, from_tty: bool) -> None: + if not self.fetch_registers_usable(): + raise IncorrectTargetError("Not usable") + + if not gdb.objfiles()[0].has_symbols(): + raise ValueError("Cannot debug kernel without symbol table") + + super().open(name, from_tty) + + crash.infra.callback.target_ready() + + self.setup_tasks() + + def setup_tasks(self) -> None: + # pylint complains about this. It's ugly but putting the import within + # setup_tasks breaks the cycle. + # pylint: disable=cyclic-import + from crash.types.task import LinuxTask, types as task_types + import crash.cache.tasks # pylint: disable=redefined-outer-name + print("Loading tasks...", end="") + sys.stdout.flush() + + rqs = get_percpu_vars(symbols.runqueues) + rqscurrs = {int(x["curr"]) : k for (k, x) in rqs.items()} + + task_count = 0 + try: + crashing_cpu = symvals.crashing_cpu + except MissingSymbolError: + crashing_cpu = -1 + + task_struct_p_type = task_types.task_struct_type.pointer() + for thread in gdb.selected_inferior().threads(): + task_address = thread.ptid[2] + + task = get_typed_pointer(task_address, task_struct_p_type) + ltask = LinuxTask(task.dereference()) + + active = task_address in rqscurrs + if active: + cpu = rqscurrs[task_address] + regs = self.kdumpfile.attr.cpu[cpu].reg + ltask.set_active(cpu, regs) + + thread.info = ltask + if active and cpu == crashing_cpu: + self.crashing_thread = thread + + self.arch_setup_thread(thread) + ltask.attach_thread(thread) + + crash.cache.tasks.cache_task(ltask) + + task_count += 1 + if task_count % 100 == 0: + print(".", end='') + sys.stdout.flush() + print(" done. ({} tasks total)".format(task_count)) + + def close(self) -> None: + pass + + # pylint: disable=unused-argument + def thread_alive(self, ptid: PTID) -> bool: + return True + + # pylint: disable=unused-argument + def prepare_to_store(self, thread: gdb.InferiorThread) -> None: + pass + + @abc.abstractmethod + def fetch_registers_usable(self) -> bool: + pass + + @abc.abstractmethod + def fetch_registers(self, thread: gdb.InferiorThread, + register: Optional[gdb.RegisterDescriptor]) -> Optional[gdb.RegisterCollectionType]: + pass + + # pylint: disable=unused-argument + def store_registers(self, thread: gdb.InferiorThread, registers: gdb.RegisterCollectionType) -> None: + raise TypeError("This target is read-only.") + + # pylint: disable=unused-argument + def has_execution(self, ptid: PTID) -> bool: + return False + + @abc.abstractmethod + def arch_setup_thread(self, thread: gdb.InferiorThread) -> None: + pass + + @abc.abstractmethod + def get_stack_pointer(self, thread: gdb.InferiorThread) -> int: + pass + +class TargetFetchRegistersBase(metaclass=abc.ABCMeta): + """ + The base class from which to implement the fetch_registers callback. + + The architecture code must implement the :meth:`fetch_active` and + :meth:`fetch_scheduled` methods. + """ + _enabled: bool = False + + def __init__(self) -> None: + super().__init__() + self.fetching: bool = False + + # pylint: disable=unused-argument + @classmethod + def enable(cls, unused: Optional[gdb.Type] = None) -> None: + cls._enabled = True + + @classmethod + def fetch_registers_usable(cls) -> bool: + return cls._enabled + + @abc.abstractmethod + def fetch_active(self, thread: gdb.InferiorThread, + register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: + pass + + @abc.abstractmethod + def fetch_scheduled(self, thread: gdb.InferiorThread, + register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: + pass + + def fetch_registers(self, thread: gdb.InferiorThread, + register: Optional[gdb.RegisterDescriptor]) -> Optional[gdb.RegisterCollectionType]: + ret: Optional[gdb.RegisterCollectionType] = None + + # Don't recurse, but don't fail either + if self.fetching: + return None + + self.fetching = True + try: + if thread.info.active: + ret = self.fetch_active(thread, register) + else: + ret = self.fetch_scheduled(thread, register) + except AttributeError: + # We still want to be able to list the threads even if we haven't + # setup tasks. + ret = None + + self.fetching = False + return ret + +_targets: List[Type[TargetBase]] = [] +def register_target(new_target: Type[TargetBase]) -> None: + _targets.append(new_target) + +def setup_target() -> TargetBase: + for target in _targets: + t = None + try: + t = target() + t.open("", False) + return t + except IncorrectTargetError: + del t + + raise IncorrectTargetError("Could not identify target implementation for this kernel") + +def check_target() -> TargetBase: + target = gdb.current_target() + + if target is None: + raise ValueError("No current target") + + if not isinstance(target, TargetBase): + raise ValueError(f"Current target {type(target)} is not supported") + + return target diff --git a/crash/target/ppc64.py b/crash/target/ppc64.py new file mode 100644 index 00000000000..ae54f2930d1 --- /dev/null +++ b/crash/target/ppc64.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: + +from typing import Optional + +import gdb + +import crash.target +from crash.target import register_target +from crash.target import KernelFrameFilter + +class _FetchRegistersBase(crash.target.TargetFetchRegistersBase): + def __init__(self) -> None: + super().__init__() + self.filter: KernelFrameFilter + + def fetch_active(self, thread: gdb.InferiorThread, + register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: + registers = {} + task = thread.info + for reg in task.regs: + if (reg == "pc" and register is not None and + register.name != "pc"): + continue + try: + registers[reg] = task.regs[reg] + except KeyError: + pass + + return registers + + def fetch_scheduled(self, thread: gdb.InferiorThread, + register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: + registers: gdb.RegisterCollectionType = {} + return registers + +# pylint: disable=abstract-method +class PPC64TargetBase(crash.target.TargetBase): + ident = "powerpc:common64" + aliases = ["ppc64", "elf64-powerpc"] + + def __init__(self) -> None: + super().__init__() + + # Stop stack traces with addresses below this + self.filter = KernelFrameFilter(0xffff000000000000) + + def arch_setup_thread(self, thread: gdb.InferiorThread) -> None: + task = thread.info.task_struct + thread.info.set_thread_info(task['thread_info'].address) + thread.info.set_thread_struct(task['thread']) + + def get_stack_pointer(self, thread: gdb.InferiorThread) -> int: + return int(thread.info.thread_struct['ksp']) + +class PPC64Target(_FetchRegistersBase, PPC64TargetBase): + pass + +register_target(PPC64Target) diff --git a/crash/arch/x86_64.py b/crash/target/x86_64.py similarity index 63% rename from crash/arch/x86_64.py rename to crash/target/x86_64.py index e689e513bbc..9a6dc8eb100 100644 --- a/crash/arch/x86_64.py +++ b/crash/target/x86_64.py @@ -3,12 +3,11 @@ from typing import Optional import re -import sys import gdb - -from crash.arch import CrashArchitecture, KernelFrameFilter, register_arch -from crash.arch import FetchRegistersCallback +import crash.target +from crash.target import IncorrectTargetError, register_target +from crash.target import KernelFrameFilter from crash.util.symbols import Types, MinimalSymvals from crash.util.symbols import TypeCallbacks, MinimalSymbolCallbacks @@ -17,17 +16,21 @@ msymvals = MinimalSymvals(['thread_return']) # pylint: disable=abstract-method -class _FetchRegistersBase(FetchRegistersCallback): +class _FetchRegistersBase(crash.target.TargetFetchRegistersBase): + def __init__(self) -> None: + super().__init__() + self.filter: KernelFrameFilter + def fetch_active(self, thread: gdb.InferiorThread, register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: regmap = { - "rflags" : "eflags" + "rflags" : "eflags" } registers = {} task = thread.info for reg in task.regs: if (reg == "rip" and register is not None and - register.name != "rip"): + register.name != "rip"): continue try: # vmcore uses rflags, gdb uses eflags @@ -39,22 +42,62 @@ def fetch_active(self, thread: gdb.InferiorThread, return registers -# pylint: disable=abstract-method -class _FRC_inactive_task_frame(_FetchRegistersBase): +class _FetchRegistersInactiveFrame(_FetchRegistersBase): + def __init__(self) -> None: + super().__init__() + + self._scheduled_rip: int = 0 + if not self._enabled: + raise IncorrectTargetError("Missing struct inactive_task_frame type") + + # We don't have CFI for __switch_to_asm but we do know what it looks like. + # We push 6 registers and then swap rsp, so we can just rewind back + # to __switch_to_asm getting called and then populate the registers that + # were saved on the stack. + def find_scheduled_rip(self, task: gdb.Value) -> None: + top = int(task['stack']) + 16*1024 + callq = re.compile(r"callq?.*<(\w+)>") + + rsp = task['thread']['sp'].cast(types.unsigned_long_p_type) + + count = 0 + while int(rsp) < top: + val = int(rsp.dereference()) - 5 + if val > self.filter.address: + try: + insn = gdb.execute(f"x/i {val:#x}", to_string=True) + except gdb.error: + insn = None + + if insn: + m = callq.search(insn) + if m and m.group(1) == "__switch_to_asm": + print("Set scheduled RIP") + self._scheduled_rip = val + return + + rsp += 1 + count += 1 + + raise RuntimeError("Cannot locate stack frame offset for __schedule") + + def get_scheduled_rip(self, task: gdb.Value) -> int: + if self._scheduled_rip == 0: + self.find_scheduled_rip(task) + + return self._scheduled_rip + def fetch_scheduled(self, thread: gdb.InferiorThread, register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: registers: gdb.RegisterCollectionType = {} task = thread.info.task_struct rsp = task['thread']['sp'].cast(types.unsigned_long_p_type) - - rsp = thread.arch.adjust_scheduled_frame_offset(rsp) - registers['rsp'] = rsp frame = rsp.cast(types.inactive_task_frame_p_type).dereference() - registers['rip'] = thread.arch.get_scheduled_rip() + registers['rip'] = self.get_scheduled_rip(task) registers['rbp'] = frame['bp'] registers['rbx'] = frame['bx'] registers['r12'] = frame['r12'] @@ -69,7 +112,7 @@ def fetch_scheduled(self, thread: gdb.InferiorThread, return registers -class _FRC_thread_return(_FetchRegistersBase): +class _FetchRegistersThreadReturn(_FetchRegistersBase): def fetch_scheduled(self, thread: gdb.InferiorThread, register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: registers: gdb.RegisterCollectionType = {} @@ -106,87 +149,35 @@ def fetch_scheduled(self, thread: gdb.InferiorThread, return registers -class x86_64Architecture(CrashArchitecture): +class X8664TargetBase(crash.target.TargetBase): ident = "i386:x86-64" aliases = ["x86_64"] - _frame_offset: Optional[int] = None - def __init__(self) -> None: - super(x86_64Architecture, self).__init__() + super().__init__() # Stop stack traces with addresses below this self.filter = KernelFrameFilter(0xffff000000000000) - self._scheduled_rip: int - - def setup_thread_info(self, thread: gdb.InferiorThread) -> None: + def arch_setup_thread(self, thread: gdb.InferiorThread) -> None: task = thread.info.task_struct thread_info = task['stack'].cast(types.thread_info_p_type) thread.info.set_thread_info(thread_info) + thread.info.set_thread_struct(task['thread']) - # We don't have CFI for __switch_to_asm but we do know what it looks like. - # We push 6 registers and then swap rsp, so we can just rewind back - # to __switch_to_asm getting called and then populate the registers that - # were saved on the stack. - def setup_scheduled_frame_offset(self, task: gdb.Value) -> None: - if self._frame_offset: - return - - top = int(task['stack']) + 16*1024 - callq = re.compile("callq?.*<(\w+)>") - - orig_rsp = rsp = task['thread']['sp'].cast(types.unsigned_long_p_type) - - count = 0 - while int(rsp) < top: - val = int(rsp.dereference()) - 5 - if val > self.filter.address: - try: - insn = gdb.execute(f"x/i {val:#x}", to_string=True) - except Exception as e: - rsp += 1 - count += 1 - continue - - if not insn: - continue - - m = callq.search(insn) - if m and m.group(1) == "__switch_to_asm": - self._scheduled_rip = val - return - - rsp += 1 - count += 1 - - raise RuntimeError("Cannot locate stack frame offset for __schedule") - - def adjust_scheduled_frame_offset(self, rsp: gdb.Value) -> gdb.Value: - if self._frame_offset: - return rsp + self._frame_offset - return rsp - - def get_scheduled_rip(self) -> int: - return self._scheduled_rip - - @classmethod - # pylint: disable=unused-argument - def setup_inactive_task_frame_handler(cls, inactive: gdb.Type) -> None: - cls.set_fetch_registers(_FRC_inactive_task_frame) + def get_stack_pointer(self, thread: gdb.InferiorThread) -> int: + return int(thread.info.thread_struct['sp']) - @classmethod - # pylint: disable=unused-argument - def setup_thread_return_handler(cls, inactive: gdb.Type) -> None: - cls.set_fetch_registers(_FRC_thread_return) +class X8664ThreadReturnTarget(_FetchRegistersThreadReturn, X8664TargetBase): + pass - @classmethod - def get_stack_pointer(cls, thread_struct: gdb.Value) -> int: - return int(thread_struct['sp']) +class X8664InactiveFrameTarget(_FetchRegistersInactiveFrame, X8664TargetBase): + pass -type_cbs = TypeCallbacks([('struct inactive_task_frame', - x86_64Architecture.setup_inactive_task_frame_handler)]) -msymbol_cbs = MinimalSymbolCallbacks([('thread_return', - x86_64Architecture.setup_thread_return_handler)]) +type_cbs = TypeCallbacks([('struct inactive_task_frame', _FetchRegistersInactiveFrame.enable)], + wait_for_target=False) +msymbol_cbs = MinimalSymbolCallbacks([('thread_return', _FetchRegistersThreadReturn.enable)], + wait_for_target=False) -register_arch(x86_64Architecture) +register_target(X8664ThreadReturnTarget) +register_target(X8664InactiveFrameTarget) diff --git a/crash/types/task.py b/crash/types/task.py index aca75e19a1d..6018cf7b4e2 100644 --- a/crash/types/task.py +++ b/crash/types/task.py @@ -5,6 +5,7 @@ import gdb +from crash.target import check_target from crash.exceptions import InvalidArgumentError, ArgumentTypeError from crash.exceptions import UnexpectedGDBTypeError, MissingFieldError from crash.util import array_size, struct_has_member @@ -245,6 +246,7 @@ def __init__(self, task_struct: gdb.Value) -> None: self.cpu = -1 self.regs: Dict[str, int] = dict() + self.thread_struct: gdb.Value self.thread_info: gdb.Value self.thread: gdb.InferiorThread @@ -309,6 +311,33 @@ def attach_thread(self, thread: gdb.InferiorThread) -> None: raise TypeError("Expected gdb.InferiorThread") self.thread = thread + def set_thread_struct(self, thread_struct: gdb.Value) -> None: + """ + Set the thread struct for this task + + The thread struct structure is architecture specific. This method + allows the architecture code to assign its thread struct structure + to this task. + + Args: + thread_struct: The ``struct thread_struct`` to be associated with + this task. The value must be of type ``struct thread_struct``. + """ + self.thread_struct = thread_struct + + def get_thread_struct(self) -> gdb.Value: + """ + Get the thread struct for this task + + The thread struct structure is architecture specific and so this + method abstracts its retreival. + + Returns: + :obj:`gdb.Value`: The struct thread_struct associated with this + task. The type of the value is ``struct thread_struct``. + """ + return self.thread_struct + def set_thread_info(self, thread_info: gdb.Value) -> None: """ Set the thread info for this task @@ -500,20 +529,6 @@ def is_kernel_task(self) -> bool: return False - @classmethod - def set_get_stack_pointer(cls, fn: Callable[[gdb.Value], int]) -> None: - """ - Set the stack pointer callback for this architecture - - The callback must accept a :obj:`gdb.Value` of type - ``struct thread`` and return a :obj:`int` containing the address - of the stack pointer. - - Args: - fn: The callback to use. It will be used by all tasks. - """ - setattr(cls, '_get_stack_pointer_fn', fn) - def get_stack_pointer(self) -> int: """ Get the stack pointer for this task @@ -525,12 +540,8 @@ def get_stack_pointer(self) -> int: :obj:`NotImplementedError`: The architecture hasn't provided a stack pointer callback. """ - try: - fn = getattr(self, '_get_stack_pointer_fn') - except AttributeError: - raise NotImplementedError("Architecture hasn't provided stack pointer callback") from None - - return fn(self.task_struct['thread']) + target = check_target() + return target.get_stack_pointer(self.thread) def _get_rss_field(self) -> int: return int(self.task_struct['mm']['rss'].value()) diff --git a/doc-source/conf.py b/doc-source/conf.py index 0101071bebd..020cec09be5 100644 --- a/doc-source/conf.py +++ b/doc-source/conf.py @@ -60,11 +60,6 @@ def run_apidoc(_): print(line, file=f, end='') f.close() - out_dir = os.path.join(cur_dir, "kdump") - mod_dir = os.path.join(cur_dir, "..", "kdump") - argv = [ '-M', '-e', '-H', 'Kdump Target API Reference', '-f', - '-o', out_dir, mod_dir ] - main(argv) print("*** Generating doc templates") diff --git a/doc-source/development.rst b/doc-source/development.rst index 9d26f65fd6c..06422a42f2c 100644 --- a/doc-source/development.rst +++ b/doc-source/development.rst @@ -6,7 +6,6 @@ Development api_changes testing - kdump/modules crash/modules gdb-internals diff --git a/doc-source/testing.rst b/doc-source/testing.rst index 47de9664c00..792ea650d4c 100644 --- a/doc-source/testing.rst +++ b/doc-source/testing.rst @@ -90,8 +90,7 @@ The ``lint`` target does allow several options: - ``E=1`` -- Only report errors - ``PYLINT_ARGS`` -- Override the default arguments. It will still operate - on the :py:mod:`crash` and :py:mod:`kdump` modules but no other default - arguments will be used. + on the :py:mod:`crash` modules but no other default arguments will be used. Testing with vmcores -------------------- diff --git a/kdump/__init__.py b/kdump/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/kdump/target.py b/kdump/target.py deleted file mode 100644 index 5092b7ed1cf..00000000000 --- a/kdump/target.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: - -from typing import Tuple, Callable, Optional - -import sys -import shlex - -from kdumpfile import kdumpfile, KDUMP_KVADDR -from kdumpfile.exceptions import AddressTranslationException, EOFException -from kdumpfile.exceptions import NoDataException -import addrxlat.exceptions - -import gdb - -FetchRegistersCallbackType = Callable[[gdb.InferiorThread, Optional[gdb.RegisterDescriptor]], - gdb.RegisterCollectionType] -StoreRegistersCallbackType = Callable[[gdb.InferiorThread, gdb.RegisterCollectionType], None] - -PTID = Tuple[int, int, int] - -class Target(gdb.LinuxKernelTarget): - - _fetch_registers: FetchRegistersCallbackType - - def __init__(self, debug: bool = False) -> None: - super().__init__() - self.debug = debug - self.shortname = "kdumpfile" - self.longname = "Use a Linux kernel kdump file as a target" - - self.register() - - def open(self, name: str, from_tty: bool) -> None: - print("Opened kdump.Target") - - def close(self) -> None: - try: - self.unregister() - except RuntimeError: - pass - - # pylint: disable=unused-argument - def thread_alive(self, ptid: PTID) -> bool: - return True - - def pid_to_str(self, ptid: PTID) -> str: - return "pid {:d}".format(ptid[1]) - - def set_fetch_registers(self, callback: FetchRegistersCallbackType) -> None: - self._fetch_registers = callback # type: ignore - - def fetch_registers(self, thread: gdb.InferiorThread, - register: Optional[gdb.RegisterDescriptor]) -> gdb.RegisterCollectionType: - try: - return self._fetch_registers(thread, register) # type: ignore - except AttributeError as e: - raise NotImplementedError(f"Target did not define fetch_registers callback: {e}") from e - - def prepare_to_store(self, thread: gdb.InferiorThread) -> None: - pass - - # We don't need to store anything; The regcache is already written. - # pylint: disable=unused-argument - def store_registers(self, thread: gdb.InferiorThread, - register: gdb.RegisterCollectionType) -> None: - pass - - # pylint: disable=unused-argument - def has_execution(self, ptid: PTID) -> bool: - return False diff --git a/tests/gen-import-tests.sh b/tests/gen-import-tests.sh index f5ffe202bcf..d5ff37c076d 100755 --- a/tests/gen-import-tests.sh +++ b/tests/gen-import-tests.sh @@ -13,7 +13,7 @@ import unittest class TestImports(unittest.TestCase): END -for f in $(cd $DIR ; find crash kdump -name '*.py'); do +for f in $(cd $DIR ; find crash -name '*.py'); do path=$(echo $f | sed -e 's#/__init__.py##' -e 's#.py##') name=$(echo $path | tr / .) tname=$(echo $path | tr / _) diff --git a/tests/run-mypy.py b/tests/run-mypy.py index ce164e140c4..ae3765e433e 100644 --- a/tests/run-mypy.py +++ b/tests/run-mypy.py @@ -15,13 +15,11 @@ "--disallow-untyped-globals"] try: - ret = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "kdump"] + common_args) - ret2 = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "crash"] + common_args) + ret = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "crash"] + common_args) except TypeError: - ret = main(None, args=["-p", "kdump"] + common_args) - ret2 = main(None, args=["-p", "crash"] + common_args) + ret = main(None, args=["-p", "crash"] + common_args) -if ret or ret2: +if ret: print("static checking failed.", file=sys.stderr) sys.exit(1) diff --git a/tests/test_target.py b/tests/test_target.py deleted file mode 100644 index dd824992c64..00000000000 --- a/tests/test_target.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- -# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: - -import unittest -import gdb -import os.path -from kdump.target import Target - -class TestTarget(unittest.TestCase): - def setUp(self): - gdb.execute("file") - self.do_real_tests = os.path.exists("tests/vmcore") - - def tearDown(self): - try: - x = gdb.current_target() - del x - except: - pass - gdb.execute('target exec') - - def test_bad_file(self): - x = Target() - with self.assertRaises(gdb.error): - gdb.execute('target kdumpfile /does/not/exist') - x.unregister() - - def test_real_open_with_no_kernel(self): - if self.do_real_tests: - x = Target() - with self.assertRaises(gdb.error): - gdb.execute('target kdumpfile tests/vmcore') - x.unregister() - From ff8cb638769db3bb13f952cea83c6a5ab5d54a56 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Thu, 28 Jul 2022 09:40:45 -0400 Subject: [PATCH 360/367] storage: fix lint warnings pylint is complaining about missing names, when the names are autogenerated. Add the names with just a type but not value to silence the checker. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/__init__.py | 41 ++++++++++++++++++----------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index 0651522894d..f0428e3a480 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: -from typing import Iterable +from typing import Callable, Iterable import gdb from gdb.types import get_basic_type @@ -21,6 +21,13 @@ READ = 0 WRITE = 1 +# Values will be filled in via callback. These are declared here to honor +# imports for lint. +REQ_FUA: int +REQ_PREFLUSH: int +REQ_STARTED: int +REQ_SYNC: int + def dev_to_gendisk(dev: gdb.Value) -> gdb.Value: """ Converts a ``struct device`` that is embedded in a ``struct gendisk`` @@ -295,12 +302,21 @@ def rq_is_sync(request: gdb.Value) -> bool: :obj:`bool`: True for synchronous requests, False otherwise. """ return (request['cmd_flags'] & 1 == 0 or - request['cmd_flags'] & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH) != 0) # type: ignore + request['cmd_flags'] & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH) != 0) + + +_rq_in_flight: Callable[[gdb.Value], bool] + +def _rq_in_flight_rq_state(request: gdb.Value) -> bool: + return (request['rq_state'] != + types.enum_mq_rq_state_type['MQ_RQ_IDLE']) + +def _rq_in_flight_atomic_flags(request: gdb.Value) -> bool: + return (request['atomic_flags'] & + (1 << int(types.enum_rq_atomic_flags_type['REQ_ATOM_STARTED'].enumval)) != 0) -# This is a stub to make static checker happy. It gets overridden once 'struct -# request' is resolved. -def _rq_in_flight(request: gdb.Value) -> bool: - raise RuntimeError("struct request type not resolved yet!") +def _rq_in_flight_cmd_flags(request: gdb.Value) -> bool: + return request['cmd_flags'] & REQ_STARTED != 0 def rq_in_flight(request: gdb.Value) -> bool: """ @@ -359,18 +375,13 @@ def _export_req_flags(req_flag_bits: gdb.Type) -> None: # Check struct request and define functions based on its current form in this # kernel def _check_struct_request(request_s: gdb.Type) -> None: - global _rq_in_flight if struct_has_member(request_s, 'rq_state'): - def _rq_in_flight(request: gdb.Value) -> bool: - return (request['rq_state'] != - types.enum_mq_rq_state_type['MQ_RQ_IDLE']) + impl = _rq_in_flight_rq_state elif struct_has_member(request_s, 'atomic_flags'): - def _rq_in_flight(request: gdb.Value) -> bool: - return (request['atomic_flags'] & - (1 << int(types.enum_rq_atomic_flags_type['REQ_ATOM_STARTED'].enumval)) != 0) + impl = _rq_in_flight_atomic_flags else: - def _rq_in_flight(request: gdb.Value) -> bool: - return request['cmd_flags'] & REQ_STARTED != 0 # type: ignore + impl = _rq_in_flight_cmd_flags + globals()['_rq_in_flight'] = impl symbol_cbs = SymbolCallbacks([('disk_type', _check_types), ('part_type', _check_types)]) From b6cd079f920570b1e2bbd276da7599f952ef0f23 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 29 Jul 2022 13:57:57 -0400 Subject: [PATCH 361/367] crash.commands.kmem: silence silly pylint warning Signed-off-by: Jeff Mahoney --- crash/commands/kmem.py | 1 + 1 file changed, 1 insertion(+) diff --git a/crash/commands/kmem.py b/crash/commands/kmem.py index b7a1a573d4c..516fef814b2 100644 --- a/crash/commands/kmem.py +++ b/crash/commands/kmem.py @@ -75,6 +75,7 @@ def _find_kmem_cache(self, query: str) -> Optional[KmemCache]: pass return cache + # pylint: disable=too-many-return-statements def execute(self, args: argparse.Namespace) -> None: if args.z: self.print_zones() From f842efd2de8c7bf84c7c1b770df4447a57cc5273 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 29 Jul 2022 18:42:47 -0400 Subject: [PATCH 362/367] docs: workaround Sphinx bug#10701 There's a bug in Sphinx 5.1.0 (fixed in 5.1.1) that causes make docs to crash with: Handler for event 'autodoc-process-docstring' threw an exception (exception: pop from an empty deque) This commit works around it. Signed-off-by: Jeff Mahoney --- doc-source/conf.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/doc-source/conf.py b/doc-source/conf.py index 020cec09be5..bf518b5602c 100644 --- a/doc-source/conf.py +++ b/doc-source/conf.py @@ -242,3 +242,25 @@ def setup(app): 'Miscellaneous'), ] + + +# Temporary workaround for 5.1.0 bug +import sphinx +if sphinx.__version__ == '5.1.0': + # see https://github.com/sphinx-doc/sphinx/issues/10701 + # hope is it would get fixed for the next release + + # Although crash happens within NumpyDocstring, it is subclass of GoogleDocstring + # so we need to overload method there + from sphinx.ext.napoleon.docstring import GoogleDocstring + from functools import wraps + + @wraps(GoogleDocstring._consume_inline_attribute) + def _consume_inline_attribute_safe(self): + try: + return self._consume_inline_attribute_safe() + except: + return "", [] + + GoogleDocstring._consume_inline_attribute = _consume_inline_attribute_safe + From dd43549c47333c38671ee6997874e2c502ff29f2 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 29 Jul 2022 22:08:07 -0400 Subject: [PATCH 363/367] crash.infra: check mod.__file__ to make mypy happy Signed-off-by: Jeff Mahoney --- crash/infra/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crash/infra/__init__.py b/crash/infra/__init__.py index 3b15529054e..5e8f6119289 100644 --- a/crash/infra/__init__.py +++ b/crash/infra/__init__.py @@ -16,6 +16,8 @@ def autoload_submodules(caller: str, except KeyError: mod = importlib.import_module(caller) mods.append(caller) + if mod.__file__ is None: + return list() path = os.path.dirname(mod.__file__) modules = glob.glob("{}/[A-Za-z0-9_]*.py".format(path)) for modname in modules: From 6a2a77058390411c2ca132825e5bdfc07a7010b4 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 2 Sep 2022 10:53:50 -0400 Subject: [PATCH 364/367] crash.kernel: use a default offset of 0xff000000 for loading modules Kernel modules have a number of sections with an LMA of 0, which ends up making those symbol names appear when NULL is used as a value in the stack trace output. Loading them with a default offset elsewhere just moves those (unused) symbols out of the way. Signed-off-by: Jeff Mahoney --- crash/kernel.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crash/kernel.py b/crash/kernel.py index fd1661e4b0f..d997237c5a4 100644 --- a/crash/kernel.py +++ b/crash/kernel.py @@ -435,6 +435,8 @@ def _try_load_module(self, modname: str, module: gdb.Value, modfile: BinaryIO, if percpu > 0: sections += " -s .data..percpu {:#x}".format(percpu) + sections += " -o 0xff000000" + try: result = gdb.execute("add-symbol-file {} {:#x} {}" .format(modpath, addr, sections), From c62d4daee8de55cc910b7067e4721e01194a3b07 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 2 Sep 2022 13:38:33 -0400 Subject: [PATCH 365/367] crash.commands.dev: mark -d option as required The -d option is the only one implemented right now and 'pydev' does nothing otherwise. Signed-off-by: Jeff Mahoney --- crash/commands/dev.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crash/commands/dev.py b/crash/commands/dev.py index d388eaf5687..ced373ed364 100644 --- a/crash/commands/dev.py +++ b/crash/commands/dev.py @@ -20,7 +20,8 @@ class DevCommand(Command): def __init__(self, name: str) -> None: parser = ArgumentParser(prog=name) - parser.add_argument('-d', action='store_true', default=False) + parser.add_argument('-d', action='store_true', default=False, + required=True) super().__init__(name, parser) From 75eacf83c0e6aaf7c52e697108a5aa02f3590f6d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Fri, 2 Sep 2022 13:49:09 -0400 Subject: [PATCH 366/367] crash.subsystem.storage: update for Linux 5.11 Linux 5.11 merged hd_struct and block_device, changing how to resolve block devices from struct device. Signed-off-by: Jeff Mahoney --- crash/subsystem/storage/__init__.py | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/crash/subsystem/storage/__init__.py b/crash/subsystem/storage/__init__.py index f0428e3a480..2ee6921a2c2 100644 --- a/crash/subsystem/storage/__init__.py +++ b/crash/subsystem/storage/__init__.py @@ -6,14 +6,14 @@ import gdb from gdb.types import get_basic_type -from crash.util import container_of, struct_has_member +from crash.util import container_of, struct_has_member, InvalidComponentError from crash.util.symbols import Types, Symvals, SymbolCallbacks, TypeCallbacks from crash.types.classdev import for_each_class_device from crash.exceptions import DelayedAttributeError, InvalidArgumentError from crash.cache.syscache import kernel, jiffies_to_msec types = Types(['struct gendisk', 'struct hd_struct', 'struct device', - 'struct device_type', 'struct bdev_inode', + 'struct device_type', 'struct bdev_inode', 'struct block_device', 'struct request_queue', 'struct request', 'enum req_flag_bits', 'enum mq_rq_state', 'enum rq_atomic_flags']) symvals = Symvals(['block_class', 'blockdev_superblock', 'disk_type', @@ -28,6 +28,21 @@ REQ_STARTED: int REQ_SYNC: int +def dev_to_bdev(dev: gdb.Value) -> gdb.Value: + """ + Converts a ``struct device'' that is embedded in a ``struct block_device`` + back to the ``struct block_device``. + + Args: + dev: A ``struct device'' contained within a ``struct block_device``. + The vlaue must be of type ``struct device``. + + Returns: + :obj:`gdb.Value`: The converted block device. The value is of type + ``struct block_device``. + """ + return container_of(dev, types.block_device_type, 'bd_device') + def dev_to_gendisk(dev: gdb.Value) -> gdb.Value: """ Converts a ``struct device`` that is embedded in a ``struct gendisk`` @@ -41,7 +56,10 @@ def dev_to_gendisk(dev: gdb.Value) -> gdb.Value: :obj:`gdb.Value`: The converted gendisk. The value is of type ``struct gendisk``. """ - return container_of(dev, types.gendisk_type, 'part0.__dev') + try: + return container_of(dev, types.gendisk_type, 'part0.__dev') + except InvalidComponentError: + return dev_to_bdev(dev)['bd_disk'] def dev_to_part(dev: gdb.Value) -> gdb.Value: """ @@ -73,6 +91,9 @@ def gendisk_to_dev(gendisk: gdb.Value) -> gdb.Value: of type ``struct device``. """ + if struct_has_member(gendisk['part0'], 'bd_device'): + return gendisk['part0']['bd_device'] + return gendisk['part0']['__dev'] def part_to_dev(part: gdb.Value) -> gdb.Value: From e10f5ce39b8ce93ca7a2d6693543334903721841 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 28 Feb 2023 11:46:41 -0500 Subject: [PATCH 367/367] run-mypy.py: update main() prototype to drop unused initial argument --- tests/run-mypy.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/run-mypy.py b/tests/run-mypy.py index ae3765e433e..f4df83c6091 100644 --- a/tests/run-mypy.py +++ b/tests/run-mypy.py @@ -15,9 +15,12 @@ "--disallow-untyped-globals"] try: - ret = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "crash"] + common_args) + ret = main(stdout=sys.stdout, stderr=sys.stderr, args=["-p", "crash"] + common_args) except TypeError: - ret = main(None, args=["-p", "crash"] + common_args) + try: + ret = main(None, stdout=sys.stdout, stderr=sys.stderr, args=["-p", "crash"] + common_args) + except TypeError: + ret = main(None, args=["-p", "crash"] + common_args) if ret: print("static checking failed.", file=sys.stderr)