From 68bc6fbf952b9b208428fb0b630bcb91a19ad84a Mon Sep 17 00:00:00 2001 From: TheAnyKey <32773684+TheAnyKey@users.noreply.github.com> Date: Thu, 30 Apr 2020 18:30:38 +0200 Subject: [PATCH 01/39] Update ci.yaml builds now on any push --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 41023d037f5..65b66a2dd01 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,6 +1,6 @@ on: push: - branches: [master, release] + pull_request: name: CI From 90123df1883585a66ec9094015ce92ba91445f8d Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 12:26:19 -0400 Subject: [PATCH 02/39] Initial implementation of Py3.9 dict union and python-level tests --- tests/snippets/dict_union.py | 47 +++++++++++++++ vm/src/obj/objdict.rs | 111 +++++++++++++++++++++++++++++++++++ 2 files changed, 158 insertions(+) create mode 100644 tests/snippets/dict_union.py diff --git a/tests/snippets/dict_union.py b/tests/snippets/dict_union.py new file mode 100644 index 00000000000..acc1d430633 --- /dev/null +++ b/tests/snippets/dict_union.py @@ -0,0 +1,47 @@ + + + +def test_dunion_ior0(): + a={1:2,2:3} + b={3:4,5:6} + a|=b + + assert a == {1:2,2:3,3:4,5:6}, f"wrong value assigned {a=}" + assert b == {3:4,5:6}, f"right hand side modified, {b=}" + +def test_dunion_or0(): + a={1:2,2:3} + b={3:4,5:6} + c=a|b + + assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" + assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" + assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" + + +def test_dunion_or1(): + a={1:2,2:3} + b={3:4,5:6} + c=a.__or__(b) + + assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" + assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" + assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" + + +def test_dunion_ror0(): + a={1:2,2:3} + b={3:4,5:6} + c=b.__ror__(a) + + assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" + assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" + assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" + +test_dunion_ior0() +test_dunion_or0() +test_dunion_or1() +test_dunion_ror0() + + + diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index a5330bdbcfd..6712a159892 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -104,6 +104,91 @@ impl PyDictRef { Ok(()) } + fn merge_no_arg( + dict: &DictContentType, + dict_obj: OptionalArg, + vm: &VirtualMachine, + ) -> PyResult<()> { + if let OptionalArg::Present(dict_obj) = dict_obj { + let dicted: Result = dict_obj.clone().downcast(); + if let Ok(dict_obj) = dicted { + for (key, value) in dict_obj { + dict.insert(vm, &key, value)?; + } + } else if let Some(keys) = vm.get_method(dict_obj.clone(), "keys") { + let keys = objiter::get_iter(vm, &vm.invoke(&keys?, vec![])?)?; + while let Some(key) = objiter::get_next_object(vm, &keys)? { + let val = dict_obj.get_item(&key, vm)?; + dict.insert(vm, &key, val)?; + } + } else { + let iter = objiter::get_iter(vm, &dict_obj)?; + loop { + fn err(vm: &VirtualMachine) -> PyBaseExceptionRef { + vm.new_type_error("Iterator must have exactly two elements".to_owned()) + } + let element = match objiter::get_next_object(vm, &iter)? { + Some(obj) => obj, + None => break, + }; + let elem_iter = objiter::get_iter(vm, &element)?; + let key = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; + let value = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; + if objiter::get_next_object(vm, &elem_iter)?.is_some() { + return Err(err(vm)); + } + dict.insert(vm, &key, value)?; + } + } + } + + Ok(()) + } + + fn merge_no_arg_dict( + dict: &DictContentType, + dict_other: PyDictRef, + vm: &VirtualMachine, + ) -> PyResult<()> { + for (key, value) in dict_other { + dict.insert(vm, &key, value)?; + } + // if let OptionalArg::Present(dict_obj) = dict_obj { + // let dicted: Result = dict_obj.clone().downcast(); + // if let Ok(dict_obj) = dicted { + // for (key, value) in dict_obj { + // dict.insert(vm, &key, value)?; + // } + // } else if let Some(keys) = vm.get_method(dict_obj.clone(), "keys") { + // let keys = objiter::get_iter(vm, &vm.invoke(&keys?, vec![])?)?; + // while let Some(key) = objiter::get_next_object(vm, &keys)? { + // let val = dict_obj.get_item(&key, vm)?; + // dict.insert(vm, &key, val)?; + // } + // } else { + // let iter = objiter::get_iter(vm, &dict_obj)?; + // loop { + // fn err(vm: &VirtualMachine) -> PyBaseExceptionRef { + // vm.new_type_error("Iterator must have exactly two elements".to_owned()) + // } + // let element = match objiter::get_next_object(vm, &iter)? { + // Some(obj) => obj, + // None => break, + // }; + // let elem_iter = objiter::get_iter(vm, &element)?; + // let key = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; + // let value = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; + // if objiter::get_next_object(vm, &elem_iter)?.is_some() { + // return Err(err(vm)); + // } + // dict.insert(vm, &key, value)?; + // } + // } + // } + + Ok(()) + } + #[pyclassmethod] fn fromkeys( class: PyClassRef, @@ -320,6 +405,32 @@ impl PyDictRef { PyDictRef::merge(&self.entries, dict_obj, kwargs, vm) } + #[pymethod(name="__ior__")] + fn ior(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { + PyDictRef::merge_no_arg(&self.entries, OptionalArg::Present(other), vm); + Ok(self.into_object()) + } + + #[pymethod(name="__ror__")] + fn ror(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { + let dicted: Result = other.clone().downcast(); + if let Ok(other) = dicted { + let other_cp=other.copy(); + PyDictRef::merge_no_arg_dict(&other_cp.entries, self, vm); + return Ok(other_cp); + } + let err_msg = vm.new_str("__ror__ not implemented for non-dict type".to_owned()); + Err(vm.new_key_error(err_msg)) + } + + // #[pymethod(name="__or__")] + // fn or(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { // PyResult { + // //if type(other)==dict + // let cp=self.copy(); + // PyDictRef::merge_no_arg(&cp.entries, OptionalArg::Present(other), vm); + // Ok(cp) + // } + #[pymethod] fn pop( self, From 155f3cadff41706b800e4b0b91dd130f8cfc7041 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 12:39:28 -0400 Subject: [PATCH 03/39] first complete? implementation. TODO: Cleanup, remove warnings, clippy, fmt --- vm/src/obj/objdict.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index 6712a159892..a21d064b3b4 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -423,13 +423,17 @@ impl PyDictRef { Err(vm.new_key_error(err_msg)) } - // #[pymethod(name="__or__")] - // fn or(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { // PyResult { - // //if type(other)==dict - // let cp=self.copy(); - // PyDictRef::merge_no_arg(&cp.entries, OptionalArg::Present(other), vm); - // Ok(cp) - // } + #[pymethod(name="__or__")] + fn or(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { // PyResult { + let dicted: Result = other.clone().downcast(); + if let Ok(other) = dicted { + let self_cp=self.copy(); + PyDictRef::merge_no_arg_dict(&self_cp.entries, other, vm); + return Ok(self_cp); + } + let err_msg = vm.new_str("__or__ not implemented for non-dict type".to_owned()); + Err(vm.new_key_error(err_msg)) + } #[pymethod] fn pop( From 9a8b8e7d3cd7cd6447fec268600b3123f445c870 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 13:51:11 -0400 Subject: [PATCH 04/39] fixed clippy, fmt, warnings, etc.. Improved tests and extended testutils with defined skpping methods --- tests/snippets/dict_union.py | 46 +++++++++++++-- tests/snippets/testutils.py | 26 +++++++++ vm/src/obj/objdict.rs | 105 ++++++----------------------------- 3 files changed, 85 insertions(+), 92 deletions(-) diff --git a/tests/snippets/dict_union.py b/tests/snippets/dict_union.py index acc1d430633..29e0718d458 100644 --- a/tests/snippets/dict_union.py +++ b/tests/snippets/dict_union.py @@ -1,5 +1,5 @@ - +import testutils def test_dunion_ior0(): a={1:2,2:3} @@ -38,10 +38,46 @@ def test_dunion_ror0(): assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" -test_dunion_ior0() -test_dunion_or0() -test_dunion_or1() -test_dunion_ror0() + +def test_dunion_other_types(): + def perf_test_or(other_obj): + d={1:2} + try: + d.__or__(other_obj) + except: + return True + return False + + def perf_test_ior(other_obj): + d={1:2} + try: + d.__ior__(other_obj) + except: + return True + return False + + def perf_test_ror(other_obj): + d={1:2} + try: + d.__ror__(other_obj) + except: + return True + return False + + test_fct={'__or__':perf_test_or, '__ror__':perf_test_ror, '__ior__':perf_test_ior} + others=['FooBar', 42, [36], set([19]), ['aa'], None] + for tfn,tf in test_fct.items(): + for other in others: + assert tf(other), f"Failed: dict {tfn}, accepted {other}" + + + + +testutils.skip_if_unsupported(3,9,test_dunion_ior0) +testutils.skip_if_unsupported(3,9,test_dunion_or0) +testutils.skip_if_unsupported(3,9,test_dunion_or1) +testutils.skip_if_unsupported(3,9,test_dunion_ror0) +testutils.skip_if_unsupported(3,9,test_dunion_other_types) diff --git a/tests/snippets/testutils.py b/tests/snippets/testutils.py index 8a9fdddb2fa..9c7fadf3384 100644 --- a/tests/snippets/testutils.py +++ b/tests/snippets/testutils.py @@ -1,3 +1,6 @@ +import platform +import sys + def assert_raises(expected, *args, _msg=None, **kw): if args: f, f_args = args[0], args[1:] @@ -67,3 +70,26 @@ def assert_isinstance(obj, klass): def assert_in(a, b): _assert_print(lambda: a in b, [a, 'in', b]) + +def skip_if_unsupported(req_maj_vers, req_min_vers, test_fct): + def exec(): + test_fct() + + if platform.python_implementation == 'RustPython': + exec() + elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: + exec() + else: + print(f'Skipping test as a higher python version is required. Using {platform.python_implementation()} {platform.python_version()}') + +def fail_if_unsupported(req_maj_vers, req_min_vers, test_fct): + def exec(): + test_fct() + + if platform.python_implementation == 'RustPython': + exec() + elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: + exec() + else: + assert False, f'Test cannot performed on this python version. {platform.python_implementation()} {paltform.python_version()}' + diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index a21d064b3b4..80b2e0a3b12 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -104,48 +104,7 @@ impl PyDictRef { Ok(()) } - fn merge_no_arg( - dict: &DictContentType, - dict_obj: OptionalArg, - vm: &VirtualMachine, - ) -> PyResult<()> { - if let OptionalArg::Present(dict_obj) = dict_obj { - let dicted: Result = dict_obj.clone().downcast(); - if let Ok(dict_obj) = dicted { - for (key, value) in dict_obj { - dict.insert(vm, &key, value)?; - } - } else if let Some(keys) = vm.get_method(dict_obj.clone(), "keys") { - let keys = objiter::get_iter(vm, &vm.invoke(&keys?, vec![])?)?; - while let Some(key) = objiter::get_next_object(vm, &keys)? { - let val = dict_obj.get_item(&key, vm)?; - dict.insert(vm, &key, val)?; - } - } else { - let iter = objiter::get_iter(vm, &dict_obj)?; - loop { - fn err(vm: &VirtualMachine) -> PyBaseExceptionRef { - vm.new_type_error("Iterator must have exactly two elements".to_owned()) - } - let element = match objiter::get_next_object(vm, &iter)? { - Some(obj) => obj, - None => break, - }; - let elem_iter = objiter::get_iter(vm, &element)?; - let key = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; - let value = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; - if objiter::get_next_object(vm, &elem_iter)?.is_some() { - return Err(err(vm)); - } - dict.insert(vm, &key, value)?; - } - } - } - - Ok(()) - } - - fn merge_no_arg_dict( + fn merge_dict( dict: &DictContentType, dict_other: PyDictRef, vm: &VirtualMachine, @@ -153,39 +112,6 @@ impl PyDictRef { for (key, value) in dict_other { dict.insert(vm, &key, value)?; } - // if let OptionalArg::Present(dict_obj) = dict_obj { - // let dicted: Result = dict_obj.clone().downcast(); - // if let Ok(dict_obj) = dicted { - // for (key, value) in dict_obj { - // dict.insert(vm, &key, value)?; - // } - // } else if let Some(keys) = vm.get_method(dict_obj.clone(), "keys") { - // let keys = objiter::get_iter(vm, &vm.invoke(&keys?, vec![])?)?; - // while let Some(key) = objiter::get_next_object(vm, &keys)? { - // let val = dict_obj.get_item(&key, vm)?; - // dict.insert(vm, &key, val)?; - // } - // } else { - // let iter = objiter::get_iter(vm, &dict_obj)?; - // loop { - // fn err(vm: &VirtualMachine) -> PyBaseExceptionRef { - // vm.new_type_error("Iterator must have exactly two elements".to_owned()) - // } - // let element = match objiter::get_next_object(vm, &iter)? { - // Some(obj) => obj, - // None => break, - // }; - // let elem_iter = objiter::get_iter(vm, &element)?; - // let key = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; - // let value = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; - // if objiter::get_next_object(vm, &elem_iter)?.is_some() { - // return Err(err(vm)); - // } - // dict.insert(vm, &key, value)?; - // } - // } - // } - Ok(()) } @@ -405,30 +331,35 @@ impl PyDictRef { PyDictRef::merge(&self.entries, dict_obj, kwargs, vm) } - #[pymethod(name="__ior__")] - fn ior(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { - PyDictRef::merge_no_arg(&self.entries, OptionalArg::Present(other), vm); - Ok(self.into_object()) + #[pymethod(name = "__ior__")] + fn ior(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { + let dicted: Result = other.clone().downcast(); + if let Ok(other) = dicted { + PyDictRef::merge_dict(&self.entries, other, vm)?; + return Ok(self.into_object()); + } + let err_msg = vm.new_str("__ior__ not implemented for non-dict type".to_owned()); + Err(vm.new_key_error(err_msg)) } - #[pymethod(name="__ror__")] - fn ror(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { + #[pymethod(name = "__ror__")] + fn ror(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { let dicted: Result = other.clone().downcast(); if let Ok(other) = dicted { - let other_cp=other.copy(); - PyDictRef::merge_no_arg_dict(&other_cp.entries, self, vm); + let other_cp = other.copy(); + PyDictRef::merge_dict(&other_cp.entries, self, vm)?; return Ok(other_cp); } let err_msg = vm.new_str("__ror__ not implemented for non-dict type".to_owned()); Err(vm.new_key_error(err_msg)) } - #[pymethod(name="__or__")] - fn or(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { // PyResult { + #[pymethod(name = "__or__")] + fn or(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { let dicted: Result = other.clone().downcast(); if let Ok(other) = dicted { - let self_cp=self.copy(); - PyDictRef::merge_no_arg_dict(&self_cp.entries, other, vm); + let self_cp = self.copy(); + PyDictRef::merge_dict(&self_cp.entries, other, vm)?; return Ok(self_cp); } let err_msg = vm.new_str("__or__ not implemented for non-dict type".to_owned()); From f6f97544361f872bcbfd2576f8ebd60214a48898 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 13:54:42 -0400 Subject: [PATCH 05/39] changed reference python version to 3.9 - lets see what happens --- .github/workflows/ci.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 41023d037f5..6c62ed000de 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -62,7 +62,7 @@ jobs: args: --release --verbose --all - uses: actions/setup-python@v1 with: - python-version: 3.8 + python-version: 3.9 - name: Install pipenv run: | python -V @@ -108,7 +108,7 @@ jobs: - uses: actions/checkout@master - uses: actions/setup-python@v1 with: - python-version: 3.8 + python-version: 3.9 - name: install flake8 run: python -m pip install flake8 - name: run lint @@ -135,7 +135,7 @@ jobs: tar -xzf geckodriver-v0.24.0-linux32.tar.gz -C geckodriver - uses: actions/setup-python@v1 with: - python-version: 3.8 + python-version: 3.9 - name: Install pipenv run: | python -V From 6b9c4cf3397eae0c4abb6ebeefb1044a3d016f86 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 17:07:33 -0400 Subject: [PATCH 06/39] first implementation of removeprefix and removesuffix for string and tests; fixed minor issue in testutils::skip_if_unsupported and testutils::fail_if_unsupported --- tests/snippets/strings.py | 72 ++++++++++++++++++++++++++++++++++++- tests/snippets/testutils.py | 4 +-- vm/src/obj/objstr.rs | 16 +++++++++ 3 files changed, 89 insertions(+), 3 deletions(-) diff --git a/tests/snippets/strings.py b/tests/snippets/strings.py index 7471b700501..7ceb653c1c3 100644 --- a/tests/snippets/strings.py +++ b/tests/snippets/strings.py @@ -1,4 +1,4 @@ -from testutils import assert_raises, AssertRaises +from testutils import assert_raises, AssertRaises, skip_if_unsupported assert "".__eq__(1) == NotImplemented assert "a" == 'a' @@ -471,3 +471,73 @@ def try_mutate_str(): assert '{:e}'.format(float('inf')) == 'inf' assert '{:e}'.format(float('-inf')) == '-inf' assert '{:E}'.format(float('inf')) == 'INF' + + +# remove*fix test +def test_removeprefix(): + s='foobarfoo' + s_ref='foobarfoo' + assert s.removeprefix('f') == s_ref[1:] + assert s.removeprefix('fo') == s_ref[2:] + assert s.removeprefix('foo') == s_ref[3:] + + assert s.removeprefix('') == s_ref + assert s.removeprefix('bar') == s_ref + assert s.removeprefix('lol') == s_ref + assert s.removeprefix('_foo') == s_ref + assert s.removeprefix('-foo') == s_ref + assert s.removeprefix('afoo') == s_ref + assert s.removeprefix('*foo') == s_ref + + assert s==s_ref, 'undefined test fail' + +def test_removeprefix_types(): + s='0123456' + s_ref='0123456' + others=[0,['012']] + found=False + for o in others: + try: + s.removeprefix(o) + except: + found=True + + assert found, f'Removeprefix accepts other type: {type(o)}: {o=}' + +def test_removesuffix(): + s='foobarfoo' + s_ref='foobarfoo' + assert s.removesuffix('o') == s_ref[:-1] + assert s.removesuffix('oo') == s_ref[:-2] + assert s.removesuffix('foo') == s_ref[:-3] + + assert s.removesuffix('') == s_ref + assert s.removesuffix('bar') == s_ref + assert s.removesuffix('lol') == s_ref + assert s.removesuffix('foo_') == s_ref + assert s.removesuffix('foo-') == s_ref + assert s.removesuffix('foo*') == s_ref + assert s.removesuffix('fooa') == s_ref + + assert s==s_ref, 'undefined test fail' + +def test_removesuffix_types(): + s='0123456' + s_ref='0123456' + others=[0,6,['6']] + found=False + for o in others: + try: + s.removesuffix(o) + except: + found=True + + assert found, f'Removesuffix accepts other type: {type(o)}: {o=}' + + +skip_if_unsupported(3,9,test_removeprefix) +skip_if_unsupported(3,9,test_removeprefix_types) +skip_if_unsupported(3,9,test_removesuffix) +skip_if_unsupported(3,9,test_removesuffix_types) + + diff --git a/tests/snippets/testutils.py b/tests/snippets/testutils.py index 9c7fadf3384..b6be6989539 100644 --- a/tests/snippets/testutils.py +++ b/tests/snippets/testutils.py @@ -75,7 +75,7 @@ def skip_if_unsupported(req_maj_vers, req_min_vers, test_fct): def exec(): test_fct() - if platform.python_implementation == 'RustPython': + if platform.python_implementation() == 'RustPython': exec() elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: exec() @@ -86,7 +86,7 @@ def fail_if_unsupported(req_maj_vers, req_min_vers, test_fct): def exec(): test_fct() - if platform.python_implementation == 'RustPython': + if platform.python_implementation() == 'RustPython': exec() elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: exec() diff --git a/vm/src/obj/objstr.rs b/vm/src/obj/objstr.rs index 93667379728..2be82d23697 100644 --- a/vm/src/obj/objstr.rs +++ b/vm/src/obj/objstr.rs @@ -528,6 +528,22 @@ impl PyString { ) } + #[pymethod] + fn removeprefix(&self, pref:PyStringRef) -> PyResult { + if self.value.as_str().starts_with(&pref.value) { + return Ok(self.value[pref.len()..].to_string()); + } + Ok(self.value.to_string()) + } + + #[pymethod] + fn removesuffix(&self, suff:PyStringRef) -> PyResult { + if self.value.as_str().ends_with(&suff.value) { + return Ok(self.value[..self.value.len()-suff.len()].to_string()); + } + Ok(self.value.to_string()) + } + #[pymethod] fn isalnum(&self) -> bool { !self.value.is_empty() && self.value.chars().all(char::is_alphanumeric) From a285474487b17e25d88cf44ea7fcfc5c427126ce Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 17:09:45 -0400 Subject: [PATCH 07/39] fixed issued in testutils.skip_if_unsupported and testutils.fail_if_unsupported --- tests/snippets/testutils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/snippets/testutils.py b/tests/snippets/testutils.py index 9c7fadf3384..b6be6989539 100644 --- a/tests/snippets/testutils.py +++ b/tests/snippets/testutils.py @@ -75,7 +75,7 @@ def skip_if_unsupported(req_maj_vers, req_min_vers, test_fct): def exec(): test_fct() - if platform.python_implementation == 'RustPython': + if platform.python_implementation() == 'RustPython': exec() elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: exec() @@ -86,7 +86,7 @@ def fail_if_unsupported(req_maj_vers, req_min_vers, test_fct): def exec(): test_fct() - if platform.python_implementation == 'RustPython': + if platform.python_implementation() == 'RustPython': exec() elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: exec() From 4857f04595122a3421e4b2d035def01fb7cbf86e Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 17:27:37 -0400 Subject: [PATCH 08/39] as Py3.9 is so far not supported -> rolled back to 3.8 --- .github/workflows/ci.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 6c62ed000de..41023d037f5 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -62,7 +62,7 @@ jobs: args: --release --verbose --all - uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: 3.8 - name: Install pipenv run: | python -V @@ -108,7 +108,7 @@ jobs: - uses: actions/checkout@master - uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: 3.8 - name: install flake8 run: python -m pip install flake8 - name: run lint @@ -135,7 +135,7 @@ jobs: tar -xzf geckodriver-v0.24.0-linux32.tar.gz -C geckodriver - uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: 3.8 - name: Install pipenv run: | python -V From 4e4dc750a6d11c9ddbb9473178890e49775ac26f Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 17:30:41 -0400 Subject: [PATCH 09/39] fixed fmt --- vm/src/obj/objstr.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vm/src/obj/objstr.rs b/vm/src/obj/objstr.rs index 2be82d23697..47caffd471c 100644 --- a/vm/src/obj/objstr.rs +++ b/vm/src/obj/objstr.rs @@ -529,7 +529,7 @@ impl PyString { } #[pymethod] - fn removeprefix(&self, pref:PyStringRef) -> PyResult { + fn removeprefix(&self, pref: PyStringRef) -> PyResult { if self.value.as_str().starts_with(&pref.value) { return Ok(self.value[pref.len()..].to_string()); } @@ -537,9 +537,9 @@ impl PyString { } #[pymethod] - fn removesuffix(&self, suff:PyStringRef) -> PyResult { + fn removesuffix(&self, suff: PyStringRef) -> PyResult { if self.value.as_str().ends_with(&suff.value) { - return Ok(self.value[..self.value.len()-suff.len()].to_string()); + return Ok(self.value[..self.value.len() - suff.len()].to_string()); } Ok(self.value.to_string()) } From 41849ab531eecbf7204996e8c9eed2651ef3888c Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sun, 3 May 2020 12:06:41 -0400 Subject: [PATCH 10/39] fixed: returned wrong error when union opration is invoked with other type. So far it was a key error now it is a type error. --- vm/src/obj/objdict.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index 80b2e0a3b12..e2cdb108db4 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -338,8 +338,7 @@ impl PyDictRef { PyDictRef::merge_dict(&self.entries, other, vm)?; return Ok(self.into_object()); } - let err_msg = vm.new_str("__ior__ not implemented for non-dict type".to_owned()); - Err(vm.new_key_error(err_msg)) + Err(vm.new_type_error("__ior__ not implemented for non-dict type".to_owned())) } #[pymethod(name = "__ror__")] @@ -350,8 +349,7 @@ impl PyDictRef { PyDictRef::merge_dict(&other_cp.entries, self, vm)?; return Ok(other_cp); } - let err_msg = vm.new_str("__ror__ not implemented for non-dict type".to_owned()); - Err(vm.new_key_error(err_msg)) + Err(vm.new_type_error("__ror__ not implemented for non-dict type".to_owned())) } #[pymethod(name = "__or__")] @@ -362,8 +360,7 @@ impl PyDictRef { PyDictRef::merge_dict(&self_cp.entries, other, vm)?; return Ok(self_cp); } - let err_msg = vm.new_str("__or__ not implemented for non-dict type".to_owned()); - Err(vm.new_key_error(err_msg)) + Err(vm.new_type_error("__or__ not implemented for non-dict type".to_owned())) } #[pymethod] From 1697ce05ad136a6b566dcb06a13df6699a20a942 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sun, 3 May 2020 12:09:40 -0400 Subject: [PATCH 11/39] reverted toolchain settings to python version 3.8 as 3.9 is so far not supported --- .github/workflows/ci.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 6c62ed000de..41023d037f5 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -62,7 +62,7 @@ jobs: args: --release --verbose --all - uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: 3.8 - name: Install pipenv run: | python -V @@ -108,7 +108,7 @@ jobs: - uses: actions/checkout@master - uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: 3.8 - name: install flake8 run: python -m pip install flake8 - name: run lint @@ -135,7 +135,7 @@ jobs: tar -xzf geckodriver-v0.24.0-linux32.tar.gz -C geckodriver - uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: 3.8 - name: Install pipenv run: | python -V From 0faf968678a1e1f8f1908b09eb7e2e54c5640677 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sun, 3 May 2020 12:11:22 -0400 Subject: [PATCH 12/39] fixed typo --- tests/snippets/testutils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/snippets/testutils.py b/tests/snippets/testutils.py index b6be6989539..c779d2c8982 100644 --- a/tests/snippets/testutils.py +++ b/tests/snippets/testutils.py @@ -91,5 +91,5 @@ def exec(): elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: exec() else: - assert False, f'Test cannot performed on this python version. {platform.python_implementation()} {paltform.python_version()}' + assert False, f'Test cannot performed on this python version. {platform.python_implementation()} {platform.python_version()}' From 3d5687cbf6171fca99a79e1e116d4adbadc24c04 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sun, 3 May 2020 12:36:50 -0400 Subject: [PATCH 13/39] messed with branches -> shame on me => now cleaned up --- tests/snippets/dict_union.py | 83 ------------------------------------ vm/src/obj/objdict.rs | 46 -------------------- 2 files changed, 129 deletions(-) delete mode 100644 tests/snippets/dict_union.py diff --git a/tests/snippets/dict_union.py b/tests/snippets/dict_union.py deleted file mode 100644 index 29e0718d458..00000000000 --- a/tests/snippets/dict_union.py +++ /dev/null @@ -1,83 +0,0 @@ - -import testutils - -def test_dunion_ior0(): - a={1:2,2:3} - b={3:4,5:6} - a|=b - - assert a == {1:2,2:3,3:4,5:6}, f"wrong value assigned {a=}" - assert b == {3:4,5:6}, f"right hand side modified, {b=}" - -def test_dunion_or0(): - a={1:2,2:3} - b={3:4,5:6} - c=a|b - - assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" - assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" - assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" - - -def test_dunion_or1(): - a={1:2,2:3} - b={3:4,5:6} - c=a.__or__(b) - - assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" - assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" - assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" - - -def test_dunion_ror0(): - a={1:2,2:3} - b={3:4,5:6} - c=b.__ror__(a) - - assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" - assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" - assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" - - -def test_dunion_other_types(): - def perf_test_or(other_obj): - d={1:2} - try: - d.__or__(other_obj) - except: - return True - return False - - def perf_test_ior(other_obj): - d={1:2} - try: - d.__ior__(other_obj) - except: - return True - return False - - def perf_test_ror(other_obj): - d={1:2} - try: - d.__ror__(other_obj) - except: - return True - return False - - test_fct={'__or__':perf_test_or, '__ror__':perf_test_ror, '__ior__':perf_test_ior} - others=['FooBar', 42, [36], set([19]), ['aa'], None] - for tfn,tf in test_fct.items(): - for other in others: - assert tf(other), f"Failed: dict {tfn}, accepted {other}" - - - - -testutils.skip_if_unsupported(3,9,test_dunion_ior0) -testutils.skip_if_unsupported(3,9,test_dunion_or0) -testutils.skip_if_unsupported(3,9,test_dunion_or1) -testutils.skip_if_unsupported(3,9,test_dunion_ror0) -testutils.skip_if_unsupported(3,9,test_dunion_other_types) - - - diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index 80b2e0a3b12..a5330bdbcfd 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -104,17 +104,6 @@ impl PyDictRef { Ok(()) } - fn merge_dict( - dict: &DictContentType, - dict_other: PyDictRef, - vm: &VirtualMachine, - ) -> PyResult<()> { - for (key, value) in dict_other { - dict.insert(vm, &key, value)?; - } - Ok(()) - } - #[pyclassmethod] fn fromkeys( class: PyClassRef, @@ -331,41 +320,6 @@ impl PyDictRef { PyDictRef::merge(&self.entries, dict_obj, kwargs, vm) } - #[pymethod(name = "__ior__")] - fn ior(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let dicted: Result = other.clone().downcast(); - if let Ok(other) = dicted { - PyDictRef::merge_dict(&self.entries, other, vm)?; - return Ok(self.into_object()); - } - let err_msg = vm.new_str("__ior__ not implemented for non-dict type".to_owned()); - Err(vm.new_key_error(err_msg)) - } - - #[pymethod(name = "__ror__")] - fn ror(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let dicted: Result = other.clone().downcast(); - if let Ok(other) = dicted { - let other_cp = other.copy(); - PyDictRef::merge_dict(&other_cp.entries, self, vm)?; - return Ok(other_cp); - } - let err_msg = vm.new_str("__ror__ not implemented for non-dict type".to_owned()); - Err(vm.new_key_error(err_msg)) - } - - #[pymethod(name = "__or__")] - fn or(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let dicted: Result = other.clone().downcast(); - if let Ok(other) = dicted { - let self_cp = self.copy(); - PyDictRef::merge_dict(&self_cp.entries, other, vm)?; - return Ok(self_cp); - } - let err_msg = vm.new_str("__or__ not implemented for non-dict type".to_owned()); - Err(vm.new_key_error(err_msg)) - } - #[pymethod] fn pop( self, From 31590ab4e112f8f80b8c05931e927ee197bed527 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sun, 3 May 2020 12:46:32 -0400 Subject: [PATCH 14/39] last clean up and check --- tests/snippets/testutils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/snippets/testutils.py b/tests/snippets/testutils.py index b6be6989539..c779d2c8982 100644 --- a/tests/snippets/testutils.py +++ b/tests/snippets/testutils.py @@ -91,5 +91,5 @@ def exec(): elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: exec() else: - assert False, f'Test cannot performed on this python version. {platform.python_implementation()} {paltform.python_version()}' + assert False, f'Test cannot performed on this python version. {platform.python_implementation()} {platform.python_version()}' From 55434d4fcdc4c2a8a5cc8b4595e22af8fce91ff8 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sun, 3 May 2020 13:03:29 -0400 Subject: [PATCH 15/39] sync up with master --- vm/src/obj/objdict.rs | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index 31e0650def3..a5330bdbcfd 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -320,38 +320,6 @@ impl PyDictRef { PyDictRef::merge(&self.entries, dict_obj, kwargs, vm) } - #[pymethod(name = "__ior__")] - fn ior(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let dicted: Result = other.clone().downcast(); - if let Ok(other) = dicted { - PyDictRef::merge_dict(&self.entries, other, vm)?; - return Ok(self.into_object()); - } - Err(vm.new_type_error("__ior__ not implemented for non-dict type".to_owned())) - } - - #[pymethod(name = "__ror__")] - fn ror(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let dicted: Result = other.clone().downcast(); - if let Ok(other) = dicted { - let other_cp = other.copy(); - PyDictRef::merge_dict(&other_cp.entries, self, vm)?; - return Ok(other_cp); - } - Err(vm.new_type_error("__ror__ not implemented for non-dict type".to_owned())) - } - - #[pymethod(name = "__or__")] - fn or(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let dicted: Result = other.clone().downcast(); - if let Ok(other) = dicted { - let self_cp = self.copy(); - PyDictRef::merge_dict(&self_cp.entries, other, vm)?; - return Ok(self_cp); - } - Err(vm.new_type_error("__or__ not implemented for non-dict type".to_owned())) - } - #[pymethod] fn pop( self, From 87ae8d8b028ba53a497a677e590c0e3697a5d322 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Mon, 4 May 2020 15:48:56 -0400 Subject: [PATCH 16/39] added lcm with var args and changed to gcd to var args --- vm/src/stdlib/math.rs | 53 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/vm/src/stdlib/math.rs b/vm/src/stdlib/math.rs index 6873e4352e9..1e7b35e5408 100644 --- a/vm/src/stdlib/math.rs +++ b/vm/src/stdlib/math.rs @@ -9,9 +9,9 @@ use statrs::function::gamma::{gamma, ln_gamma}; use num_bigint::BigInt; use num_traits::{One, Zero}; -use crate::function::OptionalArg; +use crate::function::{OptionalArg, PyFuncArgs}; use crate::obj::objfloat::{self, IntoPyFloat, PyFloatRef}; -use crate::obj::objint::{self, PyIntRef}; +use crate::obj::objint::{self, PyInt, PyIntRef}; use crate::obj::objtype; use crate::pyobject::{Either, PyObjectRef, PyResult, TypeProtocol}; use crate::vm::VirtualMachine; @@ -272,9 +272,53 @@ fn math_ldexp( Ok(value * (2_f64).powf(objint::try_float(i.as_bigint(), vm)?)) } -fn math_gcd(a: PyIntRef, b: PyIntRef) -> BigInt { +fn math_perf_arb_len_int_op( + args: PyFuncArgs, + vm: &VirtualMachine, + op: F, + default: BigInt, +) -> PyResult +where + F: Fn(&BigInt, &PyInt) -> BigInt, +{ + if !args.kwargs.is_empty() { + Err(vm.new_type_error("Takes no keyword arguments".to_owned())) + } else if args.args.is_empty() { + return Ok(default); + } else if args.args.len() == 1 { + let a: PyObjectRef = args.args[0].clone(); + if let Some(aa) = a.payload_if_subclass::(vm) { + let res = op(aa.as_bigint(), aa); + Ok(res) + } else { + Err(vm.new_type_error("Only integer arguments are supported".to_owned())) + } + } else { + let a = args.args[0].clone(); + if let Some(aa) = a.payload_if_subclass::(vm) { + let mut res = aa.as_bigint().clone(); + for b in args.args[1..].iter() { + if let Some(bb) = b.payload_if_subclass::(vm) { + res = op(&res, bb); + } else { + return Err(vm.new_type_error("Only integer arguments are supported".to_owned())); + } + } + Ok(res) + } else { + Err(vm.new_type_error("Only integer arguments are supported".to_owned())) + } + } +} + +fn math_gcd(args: PyFuncArgs, vm: &VirtualMachine) -> PyResult { + use num_integer::Integer; + math_perf_arb_len_int_op(args, vm, |x, y| x.gcd(y.as_bigint()), BigInt::zero()) +} + +fn math_lcm(args: PyFuncArgs, vm: &VirtualMachine) -> PyResult { use num_integer::Integer; - a.as_bigint().gcd(b.as_bigint()) + math_perf_arb_len_int_op(args, vm, |x, y| x.lcm(y.as_bigint()), BigInt::one()) } fn math_factorial(value: PyIntRef, vm: &VirtualMachine) -> PyResult { @@ -436,6 +480,7 @@ pub fn make_module(vm: &VirtualMachine) -> PyObjectRef { // Gcd function "gcd" => ctx.new_function(math_gcd), + "lcm" => ctx.new_function(math_lcm), // Factorial function "factorial" => ctx.new_function(math_factorial), From ed6927009f1733a9d9a2c8c207b83c491c8243eb Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Mon, 4 May 2020 15:59:29 -0400 Subject: [PATCH 17/39] cleaned up --- vm/src/stdlib/math.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vm/src/stdlib/math.rs b/vm/src/stdlib/math.rs index 1e7b35e5408..7130d35cf92 100644 --- a/vm/src/stdlib/math.rs +++ b/vm/src/stdlib/math.rs @@ -301,7 +301,9 @@ where if let Some(bb) = b.payload_if_subclass::(vm) { res = op(&res, bb); } else { - return Err(vm.new_type_error("Only integer arguments are supported".to_owned())); + return Err( + vm.new_type_error("Only integer arguments are supported".to_owned()) + ); } } Ok(res) From 9d238bd0fe4f37646ba4e6c058eff2b8dd04c972 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Mon, 4 May 2020 16:09:40 -0400 Subject: [PATCH 18/39] cleaned up --- vm/src/stdlib/math.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vm/src/stdlib/math.rs b/vm/src/stdlib/math.rs index 7130d35cf92..1ddbdf28934 100644 --- a/vm/src/stdlib/math.rs +++ b/vm/src/stdlib/math.rs @@ -284,7 +284,7 @@ where if !args.kwargs.is_empty() { Err(vm.new_type_error("Takes no keyword arguments".to_owned())) } else if args.args.is_empty() { - return Ok(default); + Ok(default) } else if args.args.len() == 1 { let a: PyObjectRef = args.args[0].clone(); if let Some(aa) = a.payload_if_subclass::(vm) { From 045c8055c43a080e3376c5cadb23657122119316 Mon Sep 17 00:00:00 2001 From: TheAnyKey <32773684+TheAnyKey@users.noreply.github.com> Date: Wed, 6 May 2020 19:06:13 +0200 Subject: [PATCH 19/39] added info for this fork --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 8afc59657c0..13c1fe52b0d 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,9 @@ A Python-3 (CPython >= 3.5.0) Interpreter written in Rust :snake: :scream: [![WAPM package](https://wapm.io/package/rustpython/badge.svg?style=flat)](https://wapm.io/package/rustpython) [![Open in Gitpod](https://img.shields.io/static/v1?label=Open%20in&message=Gitpod&color=1aa6e4&logo=gitpod)](https://gitpod.io#https://github.com/RustPython/RustPython) +For this Fork +[![Open in Gitpod](https://img.shields.io/static/v1?label=Open%20in&message=Gitpod&color=1aa6e4&logo=gitpod)](https://gitpod.io#https://github.com/TheAnyKey/RustPython) + ## Usage #### Check out our [online demo](https://rustpython.github.io/demo/) running on WebAssembly. From 34c4142ede4751b2370a6490574e0380c0088213 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 12:26:19 -0400 Subject: [PATCH 20/39] Initial implementation of Py3.9 dict union and python-level tests --- tests/snippets/dict_union.py | 47 +++++++++++++++ vm/src/obj/objdict.rs | 111 +++++++++++++++++++++++++++++++++++ 2 files changed, 158 insertions(+) create mode 100644 tests/snippets/dict_union.py diff --git a/tests/snippets/dict_union.py b/tests/snippets/dict_union.py new file mode 100644 index 00000000000..acc1d430633 --- /dev/null +++ b/tests/snippets/dict_union.py @@ -0,0 +1,47 @@ + + + +def test_dunion_ior0(): + a={1:2,2:3} + b={3:4,5:6} + a|=b + + assert a == {1:2,2:3,3:4,5:6}, f"wrong value assigned {a=}" + assert b == {3:4,5:6}, f"right hand side modified, {b=}" + +def test_dunion_or0(): + a={1:2,2:3} + b={3:4,5:6} + c=a|b + + assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" + assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" + assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" + + +def test_dunion_or1(): + a={1:2,2:3} + b={3:4,5:6} + c=a.__or__(b) + + assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" + assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" + assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" + + +def test_dunion_ror0(): + a={1:2,2:3} + b={3:4,5:6} + c=b.__ror__(a) + + assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" + assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" + assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" + +test_dunion_ior0() +test_dunion_or0() +test_dunion_or1() +test_dunion_ror0() + + + diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index a5330bdbcfd..6712a159892 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -104,6 +104,91 @@ impl PyDictRef { Ok(()) } + fn merge_no_arg( + dict: &DictContentType, + dict_obj: OptionalArg, + vm: &VirtualMachine, + ) -> PyResult<()> { + if let OptionalArg::Present(dict_obj) = dict_obj { + let dicted: Result = dict_obj.clone().downcast(); + if let Ok(dict_obj) = dicted { + for (key, value) in dict_obj { + dict.insert(vm, &key, value)?; + } + } else if let Some(keys) = vm.get_method(dict_obj.clone(), "keys") { + let keys = objiter::get_iter(vm, &vm.invoke(&keys?, vec![])?)?; + while let Some(key) = objiter::get_next_object(vm, &keys)? { + let val = dict_obj.get_item(&key, vm)?; + dict.insert(vm, &key, val)?; + } + } else { + let iter = objiter::get_iter(vm, &dict_obj)?; + loop { + fn err(vm: &VirtualMachine) -> PyBaseExceptionRef { + vm.new_type_error("Iterator must have exactly two elements".to_owned()) + } + let element = match objiter::get_next_object(vm, &iter)? { + Some(obj) => obj, + None => break, + }; + let elem_iter = objiter::get_iter(vm, &element)?; + let key = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; + let value = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; + if objiter::get_next_object(vm, &elem_iter)?.is_some() { + return Err(err(vm)); + } + dict.insert(vm, &key, value)?; + } + } + } + + Ok(()) + } + + fn merge_no_arg_dict( + dict: &DictContentType, + dict_other: PyDictRef, + vm: &VirtualMachine, + ) -> PyResult<()> { + for (key, value) in dict_other { + dict.insert(vm, &key, value)?; + } + // if let OptionalArg::Present(dict_obj) = dict_obj { + // let dicted: Result = dict_obj.clone().downcast(); + // if let Ok(dict_obj) = dicted { + // for (key, value) in dict_obj { + // dict.insert(vm, &key, value)?; + // } + // } else if let Some(keys) = vm.get_method(dict_obj.clone(), "keys") { + // let keys = objiter::get_iter(vm, &vm.invoke(&keys?, vec![])?)?; + // while let Some(key) = objiter::get_next_object(vm, &keys)? { + // let val = dict_obj.get_item(&key, vm)?; + // dict.insert(vm, &key, val)?; + // } + // } else { + // let iter = objiter::get_iter(vm, &dict_obj)?; + // loop { + // fn err(vm: &VirtualMachine) -> PyBaseExceptionRef { + // vm.new_type_error("Iterator must have exactly two elements".to_owned()) + // } + // let element = match objiter::get_next_object(vm, &iter)? { + // Some(obj) => obj, + // None => break, + // }; + // let elem_iter = objiter::get_iter(vm, &element)?; + // let key = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; + // let value = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; + // if objiter::get_next_object(vm, &elem_iter)?.is_some() { + // return Err(err(vm)); + // } + // dict.insert(vm, &key, value)?; + // } + // } + // } + + Ok(()) + } + #[pyclassmethod] fn fromkeys( class: PyClassRef, @@ -320,6 +405,32 @@ impl PyDictRef { PyDictRef::merge(&self.entries, dict_obj, kwargs, vm) } + #[pymethod(name="__ior__")] + fn ior(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { + PyDictRef::merge_no_arg(&self.entries, OptionalArg::Present(other), vm); + Ok(self.into_object()) + } + + #[pymethod(name="__ror__")] + fn ror(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { + let dicted: Result = other.clone().downcast(); + if let Ok(other) = dicted { + let other_cp=other.copy(); + PyDictRef::merge_no_arg_dict(&other_cp.entries, self, vm); + return Ok(other_cp); + } + let err_msg = vm.new_str("__ror__ not implemented for non-dict type".to_owned()); + Err(vm.new_key_error(err_msg)) + } + + // #[pymethod(name="__or__")] + // fn or(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { // PyResult { + // //if type(other)==dict + // let cp=self.copy(); + // PyDictRef::merge_no_arg(&cp.entries, OptionalArg::Present(other), vm); + // Ok(cp) + // } + #[pymethod] fn pop( self, From aae94873a71ca2ab6b393155c4154f1e38a2cedc Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 12:39:28 -0400 Subject: [PATCH 21/39] first complete? implementation. TODO: Cleanup, remove warnings, clippy, fmt --- vm/src/obj/objdict.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index 6712a159892..a21d064b3b4 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -423,13 +423,17 @@ impl PyDictRef { Err(vm.new_key_error(err_msg)) } - // #[pymethod(name="__or__")] - // fn or(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { // PyResult { - // //if type(other)==dict - // let cp=self.copy(); - // PyDictRef::merge_no_arg(&cp.entries, OptionalArg::Present(other), vm); - // Ok(cp) - // } + #[pymethod(name="__or__")] + fn or(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { // PyResult { + let dicted: Result = other.clone().downcast(); + if let Ok(other) = dicted { + let self_cp=self.copy(); + PyDictRef::merge_no_arg_dict(&self_cp.entries, other, vm); + return Ok(self_cp); + } + let err_msg = vm.new_str("__or__ not implemented for non-dict type".to_owned()); + Err(vm.new_key_error(err_msg)) + } #[pymethod] fn pop( From eb5fbfe9e9062abb576e9373d0eadac39a673a28 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 13:51:11 -0400 Subject: [PATCH 22/39] fixed clippy, fmt, warnings, etc.. Improved tests and extended testutils with defined skpping methods --- tests/snippets/dict_union.py | 46 +++++++++++++-- tests/snippets/testutils.py | 26 +++++++++ vm/src/obj/objdict.rs | 105 ++++++----------------------------- 3 files changed, 85 insertions(+), 92 deletions(-) diff --git a/tests/snippets/dict_union.py b/tests/snippets/dict_union.py index acc1d430633..29e0718d458 100644 --- a/tests/snippets/dict_union.py +++ b/tests/snippets/dict_union.py @@ -1,5 +1,5 @@ - +import testutils def test_dunion_ior0(): a={1:2,2:3} @@ -38,10 +38,46 @@ def test_dunion_ror0(): assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" -test_dunion_ior0() -test_dunion_or0() -test_dunion_or1() -test_dunion_ror0() + +def test_dunion_other_types(): + def perf_test_or(other_obj): + d={1:2} + try: + d.__or__(other_obj) + except: + return True + return False + + def perf_test_ior(other_obj): + d={1:2} + try: + d.__ior__(other_obj) + except: + return True + return False + + def perf_test_ror(other_obj): + d={1:2} + try: + d.__ror__(other_obj) + except: + return True + return False + + test_fct={'__or__':perf_test_or, '__ror__':perf_test_ror, '__ior__':perf_test_ior} + others=['FooBar', 42, [36], set([19]), ['aa'], None] + for tfn,tf in test_fct.items(): + for other in others: + assert tf(other), f"Failed: dict {tfn}, accepted {other}" + + + + +testutils.skip_if_unsupported(3,9,test_dunion_ior0) +testutils.skip_if_unsupported(3,9,test_dunion_or0) +testutils.skip_if_unsupported(3,9,test_dunion_or1) +testutils.skip_if_unsupported(3,9,test_dunion_ror0) +testutils.skip_if_unsupported(3,9,test_dunion_other_types) diff --git a/tests/snippets/testutils.py b/tests/snippets/testutils.py index 8a9fdddb2fa..9c7fadf3384 100644 --- a/tests/snippets/testutils.py +++ b/tests/snippets/testutils.py @@ -1,3 +1,6 @@ +import platform +import sys + def assert_raises(expected, *args, _msg=None, **kw): if args: f, f_args = args[0], args[1:] @@ -67,3 +70,26 @@ def assert_isinstance(obj, klass): def assert_in(a, b): _assert_print(lambda: a in b, [a, 'in', b]) + +def skip_if_unsupported(req_maj_vers, req_min_vers, test_fct): + def exec(): + test_fct() + + if platform.python_implementation == 'RustPython': + exec() + elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: + exec() + else: + print(f'Skipping test as a higher python version is required. Using {platform.python_implementation()} {platform.python_version()}') + +def fail_if_unsupported(req_maj_vers, req_min_vers, test_fct): + def exec(): + test_fct() + + if platform.python_implementation == 'RustPython': + exec() + elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: + exec() + else: + assert False, f'Test cannot performed on this python version. {platform.python_implementation()} {paltform.python_version()}' + diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index a21d064b3b4..80b2e0a3b12 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -104,48 +104,7 @@ impl PyDictRef { Ok(()) } - fn merge_no_arg( - dict: &DictContentType, - dict_obj: OptionalArg, - vm: &VirtualMachine, - ) -> PyResult<()> { - if let OptionalArg::Present(dict_obj) = dict_obj { - let dicted: Result = dict_obj.clone().downcast(); - if let Ok(dict_obj) = dicted { - for (key, value) in dict_obj { - dict.insert(vm, &key, value)?; - } - } else if let Some(keys) = vm.get_method(dict_obj.clone(), "keys") { - let keys = objiter::get_iter(vm, &vm.invoke(&keys?, vec![])?)?; - while let Some(key) = objiter::get_next_object(vm, &keys)? { - let val = dict_obj.get_item(&key, vm)?; - dict.insert(vm, &key, val)?; - } - } else { - let iter = objiter::get_iter(vm, &dict_obj)?; - loop { - fn err(vm: &VirtualMachine) -> PyBaseExceptionRef { - vm.new_type_error("Iterator must have exactly two elements".to_owned()) - } - let element = match objiter::get_next_object(vm, &iter)? { - Some(obj) => obj, - None => break, - }; - let elem_iter = objiter::get_iter(vm, &element)?; - let key = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; - let value = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; - if objiter::get_next_object(vm, &elem_iter)?.is_some() { - return Err(err(vm)); - } - dict.insert(vm, &key, value)?; - } - } - } - - Ok(()) - } - - fn merge_no_arg_dict( + fn merge_dict( dict: &DictContentType, dict_other: PyDictRef, vm: &VirtualMachine, @@ -153,39 +112,6 @@ impl PyDictRef { for (key, value) in dict_other { dict.insert(vm, &key, value)?; } - // if let OptionalArg::Present(dict_obj) = dict_obj { - // let dicted: Result = dict_obj.clone().downcast(); - // if let Ok(dict_obj) = dicted { - // for (key, value) in dict_obj { - // dict.insert(vm, &key, value)?; - // } - // } else if let Some(keys) = vm.get_method(dict_obj.clone(), "keys") { - // let keys = objiter::get_iter(vm, &vm.invoke(&keys?, vec![])?)?; - // while let Some(key) = objiter::get_next_object(vm, &keys)? { - // let val = dict_obj.get_item(&key, vm)?; - // dict.insert(vm, &key, val)?; - // } - // } else { - // let iter = objiter::get_iter(vm, &dict_obj)?; - // loop { - // fn err(vm: &VirtualMachine) -> PyBaseExceptionRef { - // vm.new_type_error("Iterator must have exactly two elements".to_owned()) - // } - // let element = match objiter::get_next_object(vm, &iter)? { - // Some(obj) => obj, - // None => break, - // }; - // let elem_iter = objiter::get_iter(vm, &element)?; - // let key = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; - // let value = objiter::get_next_object(vm, &elem_iter)?.ok_or_else(|| err(vm))?; - // if objiter::get_next_object(vm, &elem_iter)?.is_some() { - // return Err(err(vm)); - // } - // dict.insert(vm, &key, value)?; - // } - // } - // } - Ok(()) } @@ -405,30 +331,35 @@ impl PyDictRef { PyDictRef::merge(&self.entries, dict_obj, kwargs, vm) } - #[pymethod(name="__ior__")] - fn ior(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { - PyDictRef::merge_no_arg(&self.entries, OptionalArg::Present(other), vm); - Ok(self.into_object()) + #[pymethod(name = "__ior__")] + fn ior(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { + let dicted: Result = other.clone().downcast(); + if let Ok(other) = dicted { + PyDictRef::merge_dict(&self.entries, other, vm)?; + return Ok(self.into_object()); + } + let err_msg = vm.new_str("__ior__ not implemented for non-dict type".to_owned()); + Err(vm.new_key_error(err_msg)) } - #[pymethod(name="__ror__")] - fn ror(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { + #[pymethod(name = "__ror__")] + fn ror(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { let dicted: Result = other.clone().downcast(); if let Ok(other) = dicted { - let other_cp=other.copy(); - PyDictRef::merge_no_arg_dict(&other_cp.entries, self, vm); + let other_cp = other.copy(); + PyDictRef::merge_dict(&other_cp.entries, self, vm)?; return Ok(other_cp); } let err_msg = vm.new_str("__ror__ not implemented for non-dict type".to_owned()); Err(vm.new_key_error(err_msg)) } - #[pymethod(name="__or__")] - fn or(self, other:PyObjectRef, vm:&VirtualMachine) -> PyResult { // PyResult { + #[pymethod(name = "__or__")] + fn or(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { let dicted: Result = other.clone().downcast(); if let Ok(other) = dicted { - let self_cp=self.copy(); - PyDictRef::merge_no_arg_dict(&self_cp.entries, other, vm); + let self_cp = self.copy(); + PyDictRef::merge_dict(&self_cp.entries, other, vm)?; return Ok(self_cp); } let err_msg = vm.new_str("__or__ not implemented for non-dict type".to_owned()); From f961ac9263f41e3310d574e2e12a1f0b530d244a Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 13:54:42 -0400 Subject: [PATCH 23/39] changed reference python version to 3.9 - lets see what happens --- .github/workflows/ci.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e3cf7668295..f1aa1a0987c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -62,7 +62,7 @@ jobs: args: --release --verbose ${{ env.CARGO_ARGS }} - uses: actions/setup-python@v1 with: - python-version: 3.8 + python-version: 3.9 - name: Install pipenv run: | python -V @@ -108,7 +108,7 @@ jobs: - uses: actions/checkout@master - uses: actions/setup-python@v1 with: - python-version: 3.8 + python-version: 3.9 - name: install flake8 run: python -m pip install flake8 - name: run lint @@ -135,7 +135,7 @@ jobs: tar -xzf geckodriver-v0.24.0-linux32.tar.gz -C geckodriver - uses: actions/setup-python@v1 with: - python-version: 3.8 + python-version: 3.9 - name: Install pipenv run: | python -V From 5cd5199d15298fbd05be21c17ede3b4d1038b43d Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 17:07:33 -0400 Subject: [PATCH 24/39] first implementation of removeprefix and removesuffix for string and tests; fixed minor issue in testutils::skip_if_unsupported and testutils::fail_if_unsupported --- tests/snippets/strings.py | 72 ++++++++++++++++++++++++++++++++++++- tests/snippets/testutils.py | 4 +-- vm/src/obj/objstr.rs | 16 +++++++++ 3 files changed, 89 insertions(+), 3 deletions(-) diff --git a/tests/snippets/strings.py b/tests/snippets/strings.py index 7471b700501..7ceb653c1c3 100644 --- a/tests/snippets/strings.py +++ b/tests/snippets/strings.py @@ -1,4 +1,4 @@ -from testutils import assert_raises, AssertRaises +from testutils import assert_raises, AssertRaises, skip_if_unsupported assert "".__eq__(1) == NotImplemented assert "a" == 'a' @@ -471,3 +471,73 @@ def try_mutate_str(): assert '{:e}'.format(float('inf')) == 'inf' assert '{:e}'.format(float('-inf')) == '-inf' assert '{:E}'.format(float('inf')) == 'INF' + + +# remove*fix test +def test_removeprefix(): + s='foobarfoo' + s_ref='foobarfoo' + assert s.removeprefix('f') == s_ref[1:] + assert s.removeprefix('fo') == s_ref[2:] + assert s.removeprefix('foo') == s_ref[3:] + + assert s.removeprefix('') == s_ref + assert s.removeprefix('bar') == s_ref + assert s.removeprefix('lol') == s_ref + assert s.removeprefix('_foo') == s_ref + assert s.removeprefix('-foo') == s_ref + assert s.removeprefix('afoo') == s_ref + assert s.removeprefix('*foo') == s_ref + + assert s==s_ref, 'undefined test fail' + +def test_removeprefix_types(): + s='0123456' + s_ref='0123456' + others=[0,['012']] + found=False + for o in others: + try: + s.removeprefix(o) + except: + found=True + + assert found, f'Removeprefix accepts other type: {type(o)}: {o=}' + +def test_removesuffix(): + s='foobarfoo' + s_ref='foobarfoo' + assert s.removesuffix('o') == s_ref[:-1] + assert s.removesuffix('oo') == s_ref[:-2] + assert s.removesuffix('foo') == s_ref[:-3] + + assert s.removesuffix('') == s_ref + assert s.removesuffix('bar') == s_ref + assert s.removesuffix('lol') == s_ref + assert s.removesuffix('foo_') == s_ref + assert s.removesuffix('foo-') == s_ref + assert s.removesuffix('foo*') == s_ref + assert s.removesuffix('fooa') == s_ref + + assert s==s_ref, 'undefined test fail' + +def test_removesuffix_types(): + s='0123456' + s_ref='0123456' + others=[0,6,['6']] + found=False + for o in others: + try: + s.removesuffix(o) + except: + found=True + + assert found, f'Removesuffix accepts other type: {type(o)}: {o=}' + + +skip_if_unsupported(3,9,test_removeprefix) +skip_if_unsupported(3,9,test_removeprefix_types) +skip_if_unsupported(3,9,test_removesuffix) +skip_if_unsupported(3,9,test_removesuffix_types) + + diff --git a/tests/snippets/testutils.py b/tests/snippets/testutils.py index 9c7fadf3384..b6be6989539 100644 --- a/tests/snippets/testutils.py +++ b/tests/snippets/testutils.py @@ -75,7 +75,7 @@ def skip_if_unsupported(req_maj_vers, req_min_vers, test_fct): def exec(): test_fct() - if platform.python_implementation == 'RustPython': + if platform.python_implementation() == 'RustPython': exec() elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: exec() @@ -86,7 +86,7 @@ def fail_if_unsupported(req_maj_vers, req_min_vers, test_fct): def exec(): test_fct() - if platform.python_implementation == 'RustPython': + if platform.python_implementation() == 'RustPython': exec() elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: exec() diff --git a/vm/src/obj/objstr.rs b/vm/src/obj/objstr.rs index eb3a434db88..9b87380faa2 100644 --- a/vm/src/obj/objstr.rs +++ b/vm/src/obj/objstr.rs @@ -532,6 +532,22 @@ impl PyString { ) } + #[pymethod] + fn removeprefix(&self, pref:PyStringRef) -> PyResult { + if self.value.as_str().starts_with(&pref.value) { + return Ok(self.value[pref.len()..].to_string()); + } + Ok(self.value.to_string()) + } + + #[pymethod] + fn removesuffix(&self, suff:PyStringRef) -> PyResult { + if self.value.as_str().ends_with(&suff.value) { + return Ok(self.value[..self.value.len()-suff.len()].to_string()); + } + Ok(self.value.to_string()) + } + #[pymethod] fn isalnum(&self) -> bool { !self.value.is_empty() && self.value.chars().all(char::is_alphanumeric) From c6acb1b93f322ae7985ee5039b17c109e9001722 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 17:27:37 -0400 Subject: [PATCH 25/39] as Py3.9 is so far not supported -> rolled back to 3.8 --- .github/workflows/ci.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index f1aa1a0987c..e3cf7668295 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -62,7 +62,7 @@ jobs: args: --release --verbose ${{ env.CARGO_ARGS }} - uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: 3.8 - name: Install pipenv run: | python -V @@ -108,7 +108,7 @@ jobs: - uses: actions/checkout@master - uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: 3.8 - name: install flake8 run: python -m pip install flake8 - name: run lint @@ -135,7 +135,7 @@ jobs: tar -xzf geckodriver-v0.24.0-linux32.tar.gz -C geckodriver - uses: actions/setup-python@v1 with: - python-version: 3.9 + python-version: 3.8 - name: Install pipenv run: | python -V From d7c54538cb0f7fbe39a194dd2281581a4faa08d7 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sat, 2 May 2020 17:30:41 -0400 Subject: [PATCH 26/39] fixed fmt --- vm/src/obj/objstr.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vm/src/obj/objstr.rs b/vm/src/obj/objstr.rs index 9b87380faa2..0b1747affdc 100644 --- a/vm/src/obj/objstr.rs +++ b/vm/src/obj/objstr.rs @@ -533,7 +533,7 @@ impl PyString { } #[pymethod] - fn removeprefix(&self, pref:PyStringRef) -> PyResult { + fn removeprefix(&self, pref: PyStringRef) -> PyResult { if self.value.as_str().starts_with(&pref.value) { return Ok(self.value[pref.len()..].to_string()); } @@ -541,9 +541,9 @@ impl PyString { } #[pymethod] - fn removesuffix(&self, suff:PyStringRef) -> PyResult { + fn removesuffix(&self, suff: PyStringRef) -> PyResult { if self.value.as_str().ends_with(&suff.value) { - return Ok(self.value[..self.value.len()-suff.len()].to_string()); + return Ok(self.value[..self.value.len() - suff.len()].to_string()); } Ok(self.value.to_string()) } From 9be1dea7fea87c181b122f05103c9282bad4e703 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sun, 3 May 2020 12:36:50 -0400 Subject: [PATCH 27/39] messed with branches -> shame on me => now cleaned up --- tests/snippets/dict_union.py | 83 ------------------------------------ vm/src/obj/objdict.rs | 46 -------------------- 2 files changed, 129 deletions(-) delete mode 100644 tests/snippets/dict_union.py diff --git a/tests/snippets/dict_union.py b/tests/snippets/dict_union.py deleted file mode 100644 index 29e0718d458..00000000000 --- a/tests/snippets/dict_union.py +++ /dev/null @@ -1,83 +0,0 @@ - -import testutils - -def test_dunion_ior0(): - a={1:2,2:3} - b={3:4,5:6} - a|=b - - assert a == {1:2,2:3,3:4,5:6}, f"wrong value assigned {a=}" - assert b == {3:4,5:6}, f"right hand side modified, {b=}" - -def test_dunion_or0(): - a={1:2,2:3} - b={3:4,5:6} - c=a|b - - assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" - assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" - assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" - - -def test_dunion_or1(): - a={1:2,2:3} - b={3:4,5:6} - c=a.__or__(b) - - assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" - assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" - assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" - - -def test_dunion_ror0(): - a={1:2,2:3} - b={3:4,5:6} - c=b.__ror__(a) - - assert a == {1:2,2:3}, f"left hand side of non-assignment operator modified {a=}" - assert b == {3:4,5:6}, f"right hand side of non-assignment operator modified, {b=}" - assert c == {1:2,2:3, 3:4, 5:6}, f"unexpected result of dict union {c=}" - - -def test_dunion_other_types(): - def perf_test_or(other_obj): - d={1:2} - try: - d.__or__(other_obj) - except: - return True - return False - - def perf_test_ior(other_obj): - d={1:2} - try: - d.__ior__(other_obj) - except: - return True - return False - - def perf_test_ror(other_obj): - d={1:2} - try: - d.__ror__(other_obj) - except: - return True - return False - - test_fct={'__or__':perf_test_or, '__ror__':perf_test_ror, '__ior__':perf_test_ior} - others=['FooBar', 42, [36], set([19]), ['aa'], None] - for tfn,tf in test_fct.items(): - for other in others: - assert tf(other), f"Failed: dict {tfn}, accepted {other}" - - - - -testutils.skip_if_unsupported(3,9,test_dunion_ior0) -testutils.skip_if_unsupported(3,9,test_dunion_or0) -testutils.skip_if_unsupported(3,9,test_dunion_or1) -testutils.skip_if_unsupported(3,9,test_dunion_ror0) -testutils.skip_if_unsupported(3,9,test_dunion_other_types) - - - diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index 80b2e0a3b12..a5330bdbcfd 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -104,17 +104,6 @@ impl PyDictRef { Ok(()) } - fn merge_dict( - dict: &DictContentType, - dict_other: PyDictRef, - vm: &VirtualMachine, - ) -> PyResult<()> { - for (key, value) in dict_other { - dict.insert(vm, &key, value)?; - } - Ok(()) - } - #[pyclassmethod] fn fromkeys( class: PyClassRef, @@ -331,41 +320,6 @@ impl PyDictRef { PyDictRef::merge(&self.entries, dict_obj, kwargs, vm) } - #[pymethod(name = "__ior__")] - fn ior(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let dicted: Result = other.clone().downcast(); - if let Ok(other) = dicted { - PyDictRef::merge_dict(&self.entries, other, vm)?; - return Ok(self.into_object()); - } - let err_msg = vm.new_str("__ior__ not implemented for non-dict type".to_owned()); - Err(vm.new_key_error(err_msg)) - } - - #[pymethod(name = "__ror__")] - fn ror(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let dicted: Result = other.clone().downcast(); - if let Ok(other) = dicted { - let other_cp = other.copy(); - PyDictRef::merge_dict(&other_cp.entries, self, vm)?; - return Ok(other_cp); - } - let err_msg = vm.new_str("__ror__ not implemented for non-dict type".to_owned()); - Err(vm.new_key_error(err_msg)) - } - - #[pymethod(name = "__or__")] - fn or(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let dicted: Result = other.clone().downcast(); - if let Ok(other) = dicted { - let self_cp = self.copy(); - PyDictRef::merge_dict(&self_cp.entries, other, vm)?; - return Ok(self_cp); - } - let err_msg = vm.new_str("__or__ not implemented for non-dict type".to_owned()); - Err(vm.new_key_error(err_msg)) - } - #[pymethod] fn pop( self, From 1f895fac1f5216761fb6c01a8249919e91d385de Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sun, 3 May 2020 12:46:32 -0400 Subject: [PATCH 28/39] last clean up and check --- tests/snippets/testutils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/snippets/testutils.py b/tests/snippets/testutils.py index b6be6989539..c779d2c8982 100644 --- a/tests/snippets/testutils.py +++ b/tests/snippets/testutils.py @@ -91,5 +91,5 @@ def exec(): elif sys.version_info.major>=req_maj_vers and sys.version_info.minor>=req_min_vers: exec() else: - assert False, f'Test cannot performed on this python version. {platform.python_implementation()} {paltform.python_version()}' + assert False, f'Test cannot performed on this python version. {platform.python_implementation()} {platform.python_version()}' From cbf720940d5ba74368dfd8d9094b5308197ff7a6 Mon Sep 17 00:00:00 2001 From: TheAnyKey <32773684+TheAnyKey@users.noreply.github.com> Date: Thu, 30 Apr 2020 18:30:38 +0200 Subject: [PATCH 29/39] Update ci.yaml builds now on any push --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e3cf7668295..0a3fac5cd3b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,6 +1,6 @@ on: push: - branches: [master, release] + pull_request: name: CI From 224ad3f09b82070d4f4f36170348cd36d9b92ef8 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Sun, 3 May 2020 12:06:41 -0400 Subject: [PATCH 30/39] fixed: returned wrong error when union opration is invoked with other type. So far it was a key error now it is a type error. --- vm/src/obj/objdict.rs | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index a5330bdbcfd..b639c95e154 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -320,6 +320,41 @@ impl PyDictRef { PyDictRef::merge(&self.entries, dict_obj, kwargs, vm) } +<<<<<<< HEAD +======= + #[pymethod(name = "__ior__")] + fn ior(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { + let dicted: Result = other.clone().downcast(); + if let Ok(other) = dicted { + PyDictRef::merge_dict(&self.entries, other, vm)?; + return Ok(self.into_object()); + } + Err(vm.new_type_error("__ior__ not implemented for non-dict type".to_owned())) + } + + #[pymethod(name = "__ror__")] + fn ror(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { + let dicted: Result = other.clone().downcast(); + if let Ok(other) = dicted { + let other_cp = other.copy(); + PyDictRef::merge_dict(&other_cp.entries, self, vm)?; + return Ok(other_cp); + } + Err(vm.new_type_error("__ror__ not implemented for non-dict type".to_owned())) + } + + #[pymethod(name = "__or__")] + fn or(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { + let dicted: Result = other.clone().downcast(); + if let Ok(other) = dicted { + let self_cp = self.copy(); + PyDictRef::merge_dict(&self_cp.entries, other, vm)?; + return Ok(self_cp); + } + Err(vm.new_type_error("__or__ not implemented for non-dict type".to_owned())) + } + +>>>>>>> 41849ab5... fixed: returned wrong error when union opration is invoked with other type. So far it was a key error now it is a type error. #[pymethod] fn pop( self, From e17847897a763ad9916fca81f54003f9c0d71d9d Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Thu, 7 May 2020 19:14:41 +0000 Subject: [PATCH 31/39] merged --- vm/src/obj/objdict.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index b639c95e154..31e0650def3 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -320,8 +320,6 @@ impl PyDictRef { PyDictRef::merge(&self.entries, dict_obj, kwargs, vm) } -<<<<<<< HEAD -======= #[pymethod(name = "__ior__")] fn ior(self, other: PyObjectRef, vm: &VirtualMachine) -> PyResult { let dicted: Result = other.clone().downcast(); @@ -354,7 +352,6 @@ impl PyDictRef { Err(vm.new_type_error("__or__ not implemented for non-dict type".to_owned())) } ->>>>>>> 41849ab5... fixed: returned wrong error when union opration is invoked with other type. So far it was a key error now it is a type error. #[pymethod] fn pop( self, From 582d52f775373840e43f0e7fcdbd1e88b782cfcb Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Mon, 4 May 2020 15:48:56 -0400 Subject: [PATCH 32/39] added lcm with var args and changed to gcd to var args --- vm/src/stdlib/math.rs | 53 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/vm/src/stdlib/math.rs b/vm/src/stdlib/math.rs index 6873e4352e9..1e7b35e5408 100644 --- a/vm/src/stdlib/math.rs +++ b/vm/src/stdlib/math.rs @@ -9,9 +9,9 @@ use statrs::function::gamma::{gamma, ln_gamma}; use num_bigint::BigInt; use num_traits::{One, Zero}; -use crate::function::OptionalArg; +use crate::function::{OptionalArg, PyFuncArgs}; use crate::obj::objfloat::{self, IntoPyFloat, PyFloatRef}; -use crate::obj::objint::{self, PyIntRef}; +use crate::obj::objint::{self, PyInt, PyIntRef}; use crate::obj::objtype; use crate::pyobject::{Either, PyObjectRef, PyResult, TypeProtocol}; use crate::vm::VirtualMachine; @@ -272,9 +272,53 @@ fn math_ldexp( Ok(value * (2_f64).powf(objint::try_float(i.as_bigint(), vm)?)) } -fn math_gcd(a: PyIntRef, b: PyIntRef) -> BigInt { +fn math_perf_arb_len_int_op( + args: PyFuncArgs, + vm: &VirtualMachine, + op: F, + default: BigInt, +) -> PyResult +where + F: Fn(&BigInt, &PyInt) -> BigInt, +{ + if !args.kwargs.is_empty() { + Err(vm.new_type_error("Takes no keyword arguments".to_owned())) + } else if args.args.is_empty() { + return Ok(default); + } else if args.args.len() == 1 { + let a: PyObjectRef = args.args[0].clone(); + if let Some(aa) = a.payload_if_subclass::(vm) { + let res = op(aa.as_bigint(), aa); + Ok(res) + } else { + Err(vm.new_type_error("Only integer arguments are supported".to_owned())) + } + } else { + let a = args.args[0].clone(); + if let Some(aa) = a.payload_if_subclass::(vm) { + let mut res = aa.as_bigint().clone(); + for b in args.args[1..].iter() { + if let Some(bb) = b.payload_if_subclass::(vm) { + res = op(&res, bb); + } else { + return Err(vm.new_type_error("Only integer arguments are supported".to_owned())); + } + } + Ok(res) + } else { + Err(vm.new_type_error("Only integer arguments are supported".to_owned())) + } + } +} + +fn math_gcd(args: PyFuncArgs, vm: &VirtualMachine) -> PyResult { + use num_integer::Integer; + math_perf_arb_len_int_op(args, vm, |x, y| x.gcd(y.as_bigint()), BigInt::zero()) +} + +fn math_lcm(args: PyFuncArgs, vm: &VirtualMachine) -> PyResult { use num_integer::Integer; - a.as_bigint().gcd(b.as_bigint()) + math_perf_arb_len_int_op(args, vm, |x, y| x.lcm(y.as_bigint()), BigInt::one()) } fn math_factorial(value: PyIntRef, vm: &VirtualMachine) -> PyResult { @@ -436,6 +480,7 @@ pub fn make_module(vm: &VirtualMachine) -> PyObjectRef { // Gcd function "gcd" => ctx.new_function(math_gcd), + "lcm" => ctx.new_function(math_lcm), // Factorial function "factorial" => ctx.new_function(math_factorial), From 0f98b65f28984d3a123ac20534c98c21c64b3173 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Mon, 4 May 2020 15:59:29 -0400 Subject: [PATCH 33/39] cleaned up --- vm/src/stdlib/math.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vm/src/stdlib/math.rs b/vm/src/stdlib/math.rs index 1e7b35e5408..7130d35cf92 100644 --- a/vm/src/stdlib/math.rs +++ b/vm/src/stdlib/math.rs @@ -301,7 +301,9 @@ where if let Some(bb) = b.payload_if_subclass::(vm) { res = op(&res, bb); } else { - return Err(vm.new_type_error("Only integer arguments are supported".to_owned())); + return Err( + vm.new_type_error("Only integer arguments are supported".to_owned()) + ); } } Ok(res) From cb70670ab2cc314f6e2a57cf7aa8311133ba0482 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Mon, 4 May 2020 16:09:40 -0400 Subject: [PATCH 34/39] cleaned up --- vm/src/stdlib/math.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vm/src/stdlib/math.rs b/vm/src/stdlib/math.rs index 7130d35cf92..1ddbdf28934 100644 --- a/vm/src/stdlib/math.rs +++ b/vm/src/stdlib/math.rs @@ -284,7 +284,7 @@ where if !args.kwargs.is_empty() { Err(vm.new_type_error("Takes no keyword arguments".to_owned())) } else if args.args.is_empty() { - return Ok(default); + Ok(default) } else if args.args.len() == 1 { let a: PyObjectRef = args.args[0].clone(); if let Some(aa) = a.payload_if_subclass::(vm) { From e8283185539c636992ca513ede948e4606440761 Mon Sep 17 00:00:00 2001 From: TheAnyKey <32773684+TheAnyKey@users.noreply.github.com> Date: Wed, 6 May 2020 19:06:13 +0200 Subject: [PATCH 35/39] added info for this fork --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index c3f5fefed98..8eb2f9e31f5 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,9 @@ A Python-3 (CPython >= 3.5.0) Interpreter written in Rust :snake: :scream: [![WAPM package](https://wapm.io/package/rustpython/badge.svg?style=flat)](https://wapm.io/package/rustpython) [![Open in Gitpod](https://img.shields.io/static/v1?label=Open%20in&message=Gitpod&color=1aa6e4&logo=gitpod)](https://gitpod.io#https://github.com/RustPython/RustPython) +For this Fork +[![Open in Gitpod](https://img.shields.io/static/v1?label=Open%20in&message=Gitpod&color=1aa6e4&logo=gitpod)](https://gitpod.io#https://github.com/TheAnyKey/RustPython) + ## Usage #### Check out our [online demo](https://rustpython.github.io/demo/) running on WebAssembly. From a66436a4469a24b3e65857d4a7e27e50030eb229 Mon Sep 17 00:00:00 2001 From: TheAnyKey Date: Thu, 7 May 2020 19:28:36 +0000 Subject: [PATCH 36/39] merged --- vm/src/obj/objdict.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/vm/src/obj/objdict.rs b/vm/src/obj/objdict.rs index 31e0650def3..e2cdb108db4 100644 --- a/vm/src/obj/objdict.rs +++ b/vm/src/obj/objdict.rs @@ -104,6 +104,17 @@ impl PyDictRef { Ok(()) } + fn merge_dict( + dict: &DictContentType, + dict_other: PyDictRef, + vm: &VirtualMachine, + ) -> PyResult<()> { + for (key, value) in dict_other { + dict.insert(vm, &key, value)?; + } + Ok(()) + } + #[pyclassmethod] fn fromkeys( class: PyClassRef, From 44f141c367617c430e739ef8abfbff7fd1003007 Mon Sep 17 00:00:00 2001 From: TheAnyKey <32773684+TheAnyKey@users.noreply.github.com> Date: Sat, 9 May 2020 20:13:31 +0200 Subject: [PATCH 37/39] Revert "Merge remote-tracking branch 'upstream/master'" This reverts commit 5b6b02770a18382730004797bc27fcb67220e9a8, reversing changes made to a66436a4469a24b3e65857d4a7e27e50030eb229. --- Cargo.lock | 10 - Lib/bdb.py | 880 ---------- Lib/cmd.py | 401 ----- Lib/doctest.py | 2786 ------------------------------ Lib/pdb.py | 1730 ------------------- Lib/test/test_json/__init__.py | 6 +- README.md | 9 +- bytecode/src/bytecode.rs | 4 - parser/src/lexer.rs | 4 +- src/shell.rs | 19 +- vm/src/builtins.rs | 2 +- vm/src/exceptions.rs | 62 +- vm/src/frame.rs | 11 +- vm/src/obj/objasyncgenerator.rs | 63 +- vm/src/obj/objbool.rs | 2 +- vm/src/obj/objcoroinner.rs | 55 +- vm/src/obj/objcoroutine.rs | 6 +- vm/src/obj/objfunction.rs | 5 - vm/src/obj/objgenerator.rs | 4 +- vm/src/stdlib/os.rs | 15 - vm/src/sysmodule.rs | 24 +- wasm/lib/Cargo.toml | 3 +- wasm/lib/src/convert.rs | 63 +- wasm/lib/src/vm_class.rs | 45 +- wasm/lib/src/wasm_builtins.rs | 84 +- wasm/tests/test_exec_mode.py | 11 +- wasm/tests/test_inject_module.py | 1 + 27 files changed, 240 insertions(+), 6065 deletions(-) delete mode 100644 Lib/bdb.py delete mode 100644 Lib/cmd.py delete mode 100644 Lib/doctest.py delete mode 100755 Lib/pdb.py diff --git a/Cargo.lock b/Cargo.lock index 023c6240ce8..1c279a2aaf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -618,15 +618,6 @@ version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" -[[package]] -name = "generational-arena" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "921c3803adaeb9f9639de5149d9f0f9f4b79f00c423915b701db2e02ed80b9ce" -dependencies = [ - "cfg-if", -] - [[package]] name = "generic-array" version = "0.12.3" @@ -1651,7 +1642,6 @@ version = "0.1.0-pre-alpha.2" dependencies = [ "cfg-if", "futures", - "generational-arena", "js-sys", "rustpython-compiler", "rustpython-parser", diff --git a/Lib/bdb.py b/Lib/bdb.py deleted file mode 100644 index 18491da8973..00000000000 --- a/Lib/bdb.py +++ /dev/null @@ -1,880 +0,0 @@ -"""Debugger basics""" - -import fnmatch -import sys -import os -from inspect import CO_GENERATOR, CO_COROUTINE, CO_ASYNC_GENERATOR - -__all__ = ["BdbQuit", "Bdb", "Breakpoint"] - -GENERATOR_AND_COROUTINE_FLAGS = CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR - - -class BdbQuit(Exception): - """Exception to give up completely.""" - - -class Bdb: - """Generic Python debugger base class. - - This class takes care of details of the trace facility; - a derived class should implement user interaction. - The standard debugger class (pdb.Pdb) is an example. - - The optional skip argument must be an iterable of glob-style - module name patterns. The debugger will not step into frames - that originate in a module that matches one of these patterns. - Whether a frame is considered to originate in a certain module - is determined by the __name__ in the frame globals. - """ - - def __init__(self, skip=None): - self.skip = set(skip) if skip else None - self.breaks = {} - self.fncache = {} - self.frame_returning = None - - def canonic(self, filename): - """Return canonical form of filename. - - For real filenames, the canonical form is a case-normalized (on - case insensitive filesystems) absolute path. 'Filenames' with - angle brackets, such as "", generated in interactive - mode, are returned unchanged. - """ - if filename == "<" + filename[1:-1] + ">": - return filename - canonic = self.fncache.get(filename) - if not canonic: - canonic = os.path.abspath(filename) - canonic = os.path.normcase(canonic) - self.fncache[filename] = canonic - return canonic - - def reset(self): - """Set values of attributes as ready to start debugging.""" - import linecache - linecache.checkcache() - self.botframe = None - self._set_stopinfo(None, None) - - def trace_dispatch(self, frame, event, arg): - """Dispatch a trace function for debugged frames based on the event. - - This function is installed as the trace function for debugged - frames. Its return value is the new trace function, which is - usually itself. The default implementation decides how to - dispatch a frame, depending on the type of event (passed in as a - string) that is about to be executed. - - The event can be one of the following: - line: A new line of code is going to be executed. - call: A function is about to be called or another code block - is entered. - return: A function or other code block is about to return. - exception: An exception has occurred. - c_call: A C function is about to be called. - c_return: A C function has returned. - c_exception: A C function has raised an exception. - - For the Python events, specialized functions (see the dispatch_*() - methods) are called. For the C events, no action is taken. - - The arg parameter depends on the previous event. - """ - if self.quitting: - return # None - if event == 'line': - return self.dispatch_line(frame) - if event == 'call': - return self.dispatch_call(frame, arg) - if event == 'return': - return self.dispatch_return(frame, arg) - if event == 'exception': - return self.dispatch_exception(frame, arg) - if event == 'c_call': - return self.trace_dispatch - if event == 'c_exception': - return self.trace_dispatch - if event == 'c_return': - return self.trace_dispatch - print('bdb.Bdb.dispatch: unknown debugging event:', repr(event)) - return self.trace_dispatch - - def dispatch_line(self, frame): - """Invoke user function and return trace function for line event. - - If the debugger stops on the current line, invoke - self.user_line(). Raise BdbQuit if self.quitting is set. - Return self.trace_dispatch to continue tracing in this scope. - """ - if self.stop_here(frame) or self.break_here(frame): - self.user_line(frame) - if self.quitting: raise BdbQuit - return self.trace_dispatch - - def dispatch_call(self, frame, arg): - """Invoke user function and return trace function for call event. - - If the debugger stops on this function call, invoke - self.user_call(). Raise BbdQuit if self.quitting is set. - Return self.trace_dispatch to continue tracing in this scope. - """ - # XXX 'arg' is no longer used - if self.botframe is None: - # First call of dispatch since reset() - self.botframe = frame.f_back # (CT) Note that this may also be None! - return self.trace_dispatch - if not (self.stop_here(frame) or self.break_anywhere(frame)): - # No need to trace this function - return # None - # Ignore call events in generator except when stepping. - if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS: - return self.trace_dispatch - self.user_call(frame, arg) - if self.quitting: raise BdbQuit - return self.trace_dispatch - - def dispatch_return(self, frame, arg): - """Invoke user function and return trace function for return event. - - If the debugger stops on this function return, invoke - self.user_return(). Raise BdbQuit if self.quitting is set. - Return self.trace_dispatch to continue tracing in this scope. - """ - if self.stop_here(frame) or frame == self.returnframe: - # Ignore return events in generator except when stepping. - if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS: - return self.trace_dispatch - try: - self.frame_returning = frame - self.user_return(frame, arg) - finally: - self.frame_returning = None - if self.quitting: raise BdbQuit - # The user issued a 'next' or 'until' command. - if self.stopframe is frame and self.stoplineno != -1: - self._set_stopinfo(None, None) - return self.trace_dispatch - - def dispatch_exception(self, frame, arg): - """Invoke user function and return trace function for exception event. - - If the debugger stops on this exception, invoke - self.user_exception(). Raise BdbQuit if self.quitting is set. - Return self.trace_dispatch to continue tracing in this scope. - """ - if self.stop_here(frame): - # When stepping with next/until/return in a generator frame, skip - # the internal StopIteration exception (with no traceback) - # triggered by a subiterator run with the 'yield from' statement. - if not (frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS - and arg[0] is StopIteration and arg[2] is None): - self.user_exception(frame, arg) - if self.quitting: raise BdbQuit - # Stop at the StopIteration or GeneratorExit exception when the user - # has set stopframe in a generator by issuing a return command, or a - # next/until command at the last statement in the generator before the - # exception. - elif (self.stopframe and frame is not self.stopframe - and self.stopframe.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS - and arg[0] in (StopIteration, GeneratorExit)): - self.user_exception(frame, arg) - if self.quitting: raise BdbQuit - - return self.trace_dispatch - - # Normally derived classes don't override the following - # methods, but they may if they want to redefine the - # definition of stopping and breakpoints. - - def is_skipped_module(self, module_name): - "Return True if module_name matches any skip pattern." - if module_name is None: # some modules do not have names - return False - for pattern in self.skip: - if fnmatch.fnmatch(module_name, pattern): - return True - return False - - def stop_here(self, frame): - "Return True if frame is below the starting frame in the stack." - # (CT) stopframe may now also be None, see dispatch_call. - # (CT) the former test for None is therefore removed from here. - if self.skip and \ - self.is_skipped_module(frame.f_globals.get('__name__')): - return False - if frame is self.stopframe: - if self.stoplineno == -1: - return False - return frame.f_lineno >= self.stoplineno - if not self.stopframe: - return True - return False - - def break_here(self, frame): - """Return True if there is an effective breakpoint for this line. - - Check for line or function breakpoint and if in effect. - Delete temporary breakpoints if effective() says to. - """ - filename = self.canonic(frame.f_code.co_filename) - if filename not in self.breaks: - return False - lineno = frame.f_lineno - if lineno not in self.breaks[filename]: - # The line itself has no breakpoint, but maybe the line is the - # first line of a function with breakpoint set by function name. - lineno = frame.f_code.co_firstlineno - if lineno not in self.breaks[filename]: - return False - - # flag says ok to delete temp. bp - (bp, flag) = effective(filename, lineno, frame) - if bp: - self.currentbp = bp.number - if (flag and bp.temporary): - self.do_clear(str(bp.number)) - return True - else: - return False - - def do_clear(self, arg): - """Remove temporary breakpoint. - - Must implement in derived classes or get NotImplementedError. - """ - raise NotImplementedError("subclass of bdb must implement do_clear()") - - def break_anywhere(self, frame): - """Return True if there is any breakpoint for frame's filename. - """ - return self.canonic(frame.f_code.co_filename) in self.breaks - - # Derived classes should override the user_* methods - # to gain control. - - def user_call(self, frame, argument_list): - """Called if we might stop in a function.""" - pass - - def user_line(self, frame): - """Called when we stop or break at a line.""" - pass - - def user_return(self, frame, return_value): - """Called when a return trap is set here.""" - pass - - def user_exception(self, frame, exc_info): - """Called when we stop on an exception.""" - pass - - def _set_stopinfo(self, stopframe, returnframe, stoplineno=0): - """Set the attributes for stopping. - - If stoplineno is greater than or equal to 0, then stop at line - greater than or equal to the stopline. If stoplineno is -1, then - don't stop at all. - """ - self.stopframe = stopframe - self.returnframe = returnframe - self.quitting = False - # stoplineno >= 0 means: stop at line >= the stoplineno - # stoplineno -1 means: don't stop at all - self.stoplineno = stoplineno - - # Derived classes and clients can call the following methods - # to affect the stepping state. - - def set_until(self, frame, lineno=None): - """Stop when the line with the lineno greater than the current one is - reached or when returning from current frame.""" - # the name "until" is borrowed from gdb - if lineno is None: - lineno = frame.f_lineno + 1 - self._set_stopinfo(frame, frame, lineno) - - def set_step(self): - """Stop after one line of code.""" - # Issue #13183: pdb skips frames after hitting a breakpoint and running - # step commands. - # Restore the trace function in the caller (that may not have been set - # for performance reasons) when returning from the current frame. - if self.frame_returning: - caller_frame = self.frame_returning.f_back - if caller_frame and not caller_frame.f_trace: - caller_frame.f_trace = self.trace_dispatch - self._set_stopinfo(None, None) - - def set_next(self, frame): - """Stop on the next line in or below the given frame.""" - self._set_stopinfo(frame, None) - - def set_return(self, frame): - """Stop when returning from the given frame.""" - if frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS: - self._set_stopinfo(frame, None, -1) - else: - self._set_stopinfo(frame.f_back, frame) - - def set_trace(self, frame=None): - """Start debugging from frame. - - If frame is not specified, debugging starts from caller's frame. - """ - if frame is None: - frame = sys._getframe().f_back - self.reset() - while frame: - frame.f_trace = self.trace_dispatch - self.botframe = frame - frame = frame.f_back - self.set_step() - sys.settrace(self.trace_dispatch) - - def set_continue(self): - """Stop only at breakpoints or when finished. - - If there are no breakpoints, set the system trace function to None. - """ - # Don't stop except at breakpoints or when finished - self._set_stopinfo(self.botframe, None, -1) - if not self.breaks: - # no breakpoints; run without debugger overhead - sys.settrace(None) - frame = sys._getframe().f_back - while frame and frame is not self.botframe: - del frame.f_trace - frame = frame.f_back - - def set_quit(self): - """Set quitting attribute to True. - - Raises BdbQuit exception in the next call to a dispatch_*() method. - """ - self.stopframe = self.botframe - self.returnframe = None - self.quitting = True - sys.settrace(None) - - # Derived classes and clients can call the following methods - # to manipulate breakpoints. These methods return an - # error message if something went wrong, None if all is well. - # Set_break prints out the breakpoint line and file:lineno. - # Call self.get_*break*() to see the breakpoints or better - # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint(). - - def set_break(self, filename, lineno, temporary=False, cond=None, - funcname=None): - """Set a new breakpoint for filename:lineno. - - If lineno doesn't exist for the filename, return an error message. - The filename should be in canonical form. - """ - filename = self.canonic(filename) - import linecache # Import as late as possible - line = linecache.getline(filename, lineno) - if not line: - return 'Line %s:%d does not exist' % (filename, lineno) - list = self.breaks.setdefault(filename, []) - if lineno not in list: - list.append(lineno) - bp = Breakpoint(filename, lineno, temporary, cond, funcname) - return None - - def _prune_breaks(self, filename, lineno): - """Prune breakpoints for filename:lineno. - - A list of breakpoints is maintained in the Bdb instance and in - the Breakpoint class. If a breakpoint in the Bdb instance no - longer exists in the Breakpoint class, then it's removed from the - Bdb instance. - """ - if (filename, lineno) not in Breakpoint.bplist: - self.breaks[filename].remove(lineno) - if not self.breaks[filename]: - del self.breaks[filename] - - def clear_break(self, filename, lineno): - """Delete breakpoints for filename:lineno. - - If no breakpoints were set, return an error message. - """ - filename = self.canonic(filename) - if filename not in self.breaks: - return 'There are no breakpoints in %s' % filename - if lineno not in self.breaks[filename]: - return 'There is no breakpoint at %s:%d' % (filename, lineno) - # If there's only one bp in the list for that file,line - # pair, then remove the breaks entry - for bp in Breakpoint.bplist[filename, lineno][:]: - bp.deleteMe() - self._prune_breaks(filename, lineno) - return None - - def clear_bpbynumber(self, arg): - """Delete a breakpoint by its index in Breakpoint.bpbynumber. - - If arg is invalid, return an error message. - """ - try: - bp = self.get_bpbynumber(arg) - except ValueError as err: - return str(err) - bp.deleteMe() - self._prune_breaks(bp.file, bp.line) - return None - - def clear_all_file_breaks(self, filename): - """Delete all breakpoints in filename. - - If none were set, return an error message. - """ - filename = self.canonic(filename) - if filename not in self.breaks: - return 'There are no breakpoints in %s' % filename - for line in self.breaks[filename]: - blist = Breakpoint.bplist[filename, line] - for bp in blist: - bp.deleteMe() - del self.breaks[filename] - return None - - def clear_all_breaks(self): - """Delete all existing breakpoints. - - If none were set, return an error message. - """ - if not self.breaks: - return 'There are no breakpoints' - for bp in Breakpoint.bpbynumber: - if bp: - bp.deleteMe() - self.breaks = {} - return None - - def get_bpbynumber(self, arg): - """Return a breakpoint by its index in Breakpoint.bybpnumber. - - For invalid arg values or if the breakpoint doesn't exist, - raise a ValueError. - """ - if not arg: - raise ValueError('Breakpoint number expected') - try: - number = int(arg) - except ValueError: - raise ValueError('Non-numeric breakpoint number %s' % arg) from None - try: - bp = Breakpoint.bpbynumber[number] - except IndexError: - raise ValueError('Breakpoint number %d out of range' % number) from None - if bp is None: - raise ValueError('Breakpoint %d already deleted' % number) - return bp - - def get_break(self, filename, lineno): - """Return True if there is a breakpoint for filename:lineno.""" - filename = self.canonic(filename) - return filename in self.breaks and \ - lineno in self.breaks[filename] - - def get_breaks(self, filename, lineno): - """Return all breakpoints for filename:lineno. - - If no breakpoints are set, return an empty list. - """ - filename = self.canonic(filename) - return filename in self.breaks and \ - lineno in self.breaks[filename] and \ - Breakpoint.bplist[filename, lineno] or [] - - def get_file_breaks(self, filename): - """Return all lines with breakpoints for filename. - - If no breakpoints are set, return an empty list. - """ - filename = self.canonic(filename) - if filename in self.breaks: - return self.breaks[filename] - else: - return [] - - def get_all_breaks(self): - """Return all breakpoints that are set.""" - return self.breaks - - # Derived classes and clients can call the following method - # to get a data structure representing a stack trace. - - def get_stack(self, f, t): - """Return a list of (frame, lineno) in a stack trace and a size. - - List starts with original calling frame, if there is one. - Size may be number of frames above or below f. - """ - stack = [] - if t and t.tb_frame is f: - t = t.tb_next - while f is not None: - stack.append((f, f.f_lineno)) - if f is self.botframe: - break - f = f.f_back - stack.reverse() - i = max(0, len(stack) - 1) - while t is not None: - stack.append((t.tb_frame, t.tb_lineno)) - t = t.tb_next - if f is None: - i = max(0, len(stack) - 1) - return stack, i - - def format_stack_entry(self, frame_lineno, lprefix=': '): - """Return a string with information about a stack entry. - - The stack entry frame_lineno is a (frame, lineno) tuple. The - return string contains the canonical filename, the function name - or '', the input arguments, the return value, and the - line of code (if it exists). - - """ - import linecache, reprlib - frame, lineno = frame_lineno - filename = self.canonic(frame.f_code.co_filename) - s = '%s(%r)' % (filename, lineno) - if frame.f_code.co_name: - s += frame.f_code.co_name - else: - s += "" - s += '()' - if '__return__' in frame.f_locals: - rv = frame.f_locals['__return__'] - s += '->' - s += reprlib.repr(rv) - line = linecache.getline(filename, lineno, frame.f_globals) - if line: - s += lprefix + line.strip() - return s - - # The following methods can be called by clients to use - # a debugger to debug a statement or an expression. - # Both can be given as a string, or a code object. - - def run(self, cmd, globals=None, locals=None): - """Debug a statement executed via the exec() function. - - globals defaults to __main__.dict; locals defaults to globals. - """ - if globals is None: - import __main__ - globals = __main__.__dict__ - if locals is None: - locals = globals - self.reset() - if isinstance(cmd, str): - cmd = compile(cmd, "", "exec") - sys.settrace(self.trace_dispatch) - try: - exec(cmd, globals, locals) - except BdbQuit: - pass - finally: - self.quitting = True - sys.settrace(None) - - def runeval(self, expr, globals=None, locals=None): - """Debug an expression executed via the eval() function. - - globals defaults to __main__.dict; locals defaults to globals. - """ - if globals is None: - import __main__ - globals = __main__.__dict__ - if locals is None: - locals = globals - self.reset() - sys.settrace(self.trace_dispatch) - try: - return eval(expr, globals, locals) - except BdbQuit: - pass - finally: - self.quitting = True - sys.settrace(None) - - def runctx(self, cmd, globals, locals): - """For backwards-compatibility. Defers to run().""" - # B/W compatibility - self.run(cmd, globals, locals) - - # This method is more useful to debug a single function call. - - def runcall(*args, **kwds): - """Debug a single function call. - - Return the result of the function call. - """ - if len(args) >= 2: - self, func, *args = args - elif not args: - raise TypeError("descriptor 'runcall' of 'Bdb' object " - "needs an argument") - elif 'func' in kwds: - func = kwds.pop('func') - self, *args = args - import warnings - warnings.warn("Passing 'func' as keyword argument is deprecated", - DeprecationWarning, stacklevel=2) - else: - raise TypeError('runcall expected at least 1 positional argument, ' - 'got %d' % (len(args)-1)) - - self.reset() - sys.settrace(self.trace_dispatch) - res = None - try: - res = func(*args, **kwds) - except BdbQuit: - pass - finally: - self.quitting = True - sys.settrace(None) - return res - runcall.__text_signature__ = '($self, func, /, *args, **kwds)' - - -def set_trace(): - """Start debugging with a Bdb instance from the caller's frame.""" - Bdb().set_trace() - - -class Breakpoint: - """Breakpoint class. - - Implements temporary breakpoints, ignore counts, disabling and - (re)-enabling, and conditionals. - - Breakpoints are indexed by number through bpbynumber and by - the (file, line) tuple using bplist. The former points to a - single instance of class Breakpoint. The latter points to a - list of such instances since there may be more than one - breakpoint per line. - - When creating a breakpoint, its associated filename should be - in canonical form. If funcname is defined, a breakpoint hit will be - counted when the first line of that function is executed. A - conditional breakpoint always counts a hit. - """ - - # XXX Keeping state in the class is a mistake -- this means - # you cannot have more than one active Bdb instance. - - next = 1 # Next bp to be assigned - bplist = {} # indexed by (file, lineno) tuple - bpbynumber = [None] # Each entry is None or an instance of Bpt - # index 0 is unused, except for marking an - # effective break .... see effective() - - def __init__(self, file, line, temporary=False, cond=None, funcname=None): - self.funcname = funcname - # Needed if funcname is not None. - self.func_first_executable_line = None - self.file = file # This better be in canonical form! - self.line = line - self.temporary = temporary - self.cond = cond - self.enabled = True - self.ignore = 0 - self.hits = 0 - self.number = Breakpoint.next - Breakpoint.next += 1 - # Build the two lists - self.bpbynumber.append(self) - if (file, line) in self.bplist: - self.bplist[file, line].append(self) - else: - self.bplist[file, line] = [self] - - def deleteMe(self): - """Delete the breakpoint from the list associated to a file:line. - - If it is the last breakpoint in that position, it also deletes - the entry for the file:line. - """ - - index = (self.file, self.line) - self.bpbynumber[self.number] = None # No longer in list - self.bplist[index].remove(self) - if not self.bplist[index]: - # No more bp for this f:l combo - del self.bplist[index] - - def enable(self): - """Mark the breakpoint as enabled.""" - self.enabled = True - - def disable(self): - """Mark the breakpoint as disabled.""" - self.enabled = False - - def bpprint(self, out=None): - """Print the output of bpformat(). - - The optional out argument directs where the output is sent - and defaults to standard output. - """ - if out is None: - out = sys.stdout - print(self.bpformat(), file=out) - - def bpformat(self): - """Return a string with information about the breakpoint. - - The information includes the breakpoint number, temporary - status, file:line position, break condition, number of times to - ignore, and number of times hit. - - """ - if self.temporary: - disp = 'del ' - else: - disp = 'keep ' - if self.enabled: - disp = disp + 'yes ' - else: - disp = disp + 'no ' - ret = '%-4dbreakpoint %s at %s:%d' % (self.number, disp, - self.file, self.line) - if self.cond: - ret += '\n\tstop only if %s' % (self.cond,) - if self.ignore: - ret += '\n\tignore next %d hits' % (self.ignore,) - if self.hits: - if self.hits > 1: - ss = 's' - else: - ss = '' - ret += '\n\tbreakpoint already hit %d time%s' % (self.hits, ss) - return ret - - def __str__(self): - "Return a condensed description of the breakpoint." - return 'breakpoint %s at %s:%s' % (self.number, self.file, self.line) - -# -----------end of Breakpoint class---------- - - -def checkfuncname(b, frame): - """Return True if break should happen here. - - Whether a break should happen depends on the way that b (the breakpoint) - was set. If it was set via line number, check if b.line is the same as - the one in the frame. If it was set via function name, check if this is - the right function and if it is on the first executable line. - """ - if not b.funcname: - # Breakpoint was set via line number. - if b.line != frame.f_lineno: - # Breakpoint was set at a line with a def statement and the function - # defined is called: don't break. - return False - return True - - # Breakpoint set via function name. - if frame.f_code.co_name != b.funcname: - # It's not a function call, but rather execution of def statement. - return False - - # We are in the right frame. - if not b.func_first_executable_line: - # The function is entered for the 1st time. - b.func_first_executable_line = frame.f_lineno - - if b.func_first_executable_line != frame.f_lineno: - # But we are not at the first line number: don't break. - return False - return True - - -# Determines if there is an effective (active) breakpoint at this -# line of code. Returns breakpoint number or 0 if none -def effective(file, line, frame): - """Determine which breakpoint for this file:line is to be acted upon. - - Called only if we know there is a breakpoint at this location. Return - the breakpoint that was triggered and a boolean that indicates if it is - ok to delete a temporary breakpoint. Return (None, None) if there is no - matching breakpoint. - """ - possibles = Breakpoint.bplist[file, line] - for b in possibles: - if not b.enabled: - continue - if not checkfuncname(b, frame): - continue - # Count every hit when bp is enabled - b.hits += 1 - if not b.cond: - # If unconditional, and ignoring go on to next, else break - if b.ignore > 0: - b.ignore -= 1 - continue - else: - # breakpoint and marker that it's ok to delete if temporary - return (b, True) - else: - # Conditional bp. - # Ignore count applies only to those bpt hits where the - # condition evaluates to true. - try: - val = eval(b.cond, frame.f_globals, frame.f_locals) - if val: - if b.ignore > 0: - b.ignore -= 1 - # continue - else: - return (b, True) - # else: - # continue - except: - # if eval fails, most conservative thing is to stop on - # breakpoint regardless of ignore count. Don't delete - # temporary, as another hint to user. - return (b, False) - return (None, None) - - -# -------------------- testing -------------------- - -class Tdb(Bdb): - def user_call(self, frame, args): - name = frame.f_code.co_name - if not name: name = '???' - print('+++ call', name, args) - def user_line(self, frame): - import linecache - name = frame.f_code.co_name - if not name: name = '???' - fn = self.canonic(frame.f_code.co_filename) - line = linecache.getline(fn, frame.f_lineno, frame.f_globals) - print('+++', fn, frame.f_lineno, name, ':', line.strip()) - def user_return(self, frame, retval): - print('+++ return', retval) - def user_exception(self, frame, exc_stuff): - print('+++ exception', exc_stuff) - self.set_continue() - -def foo(n): - print('foo(', n, ')') - x = bar(n*10) - print('bar returned', x) - -def bar(a): - print('bar(', a, ')') - return a/2 - -def test(): - t = Tdb() - t.run('import bdb; bdb.foo(10)') diff --git a/Lib/cmd.py b/Lib/cmd.py deleted file mode 100644 index 859e91096d8..00000000000 --- a/Lib/cmd.py +++ /dev/null @@ -1,401 +0,0 @@ -"""A generic class to build line-oriented command interpreters. - -Interpreters constructed with this class obey the following conventions: - -1. End of file on input is processed as the command 'EOF'. -2. A command is parsed out of each line by collecting the prefix composed - of characters in the identchars member. -3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method - is passed a single argument consisting of the remainder of the line. -4. Typing an empty line repeats the last command. (Actually, it calls the - method `emptyline', which may be overridden in a subclass.) -5. There is a predefined `help' method. Given an argument `topic', it - calls the command `help_topic'. With no arguments, it lists all topics - with defined help_ functions, broken into up to three topics; documented - commands, miscellaneous help topics, and undocumented commands. -6. The command '?' is a synonym for `help'. The command '!' is a synonym - for `shell', if a do_shell method exists. -7. If completion is enabled, completing commands will be done automatically, - and completing of commands args is done by calling complete_foo() with - arguments text, line, begidx, endidx. text is string we are matching - against, all returned matches must begin with it. line is the current - input line (lstripped), begidx and endidx are the beginning and end - indexes of the text being matched, which could be used to provide - different completion depending upon which position the argument is in. - -The `default' method may be overridden to intercept commands for which there -is no do_ method. - -The `completedefault' method may be overridden to intercept completions for -commands that have no complete_ method. - -The data member `self.ruler' sets the character used to draw separator lines -in the help messages. If empty, no ruler line is drawn. It defaults to "=". - -If the value of `self.intro' is nonempty when the cmdloop method is called, -it is printed out on interpreter startup. This value may be overridden -via an optional argument to the cmdloop() method. - -The data members `self.doc_header', `self.misc_header', and -`self.undoc_header' set the headers used for the help function's -listings of documented functions, miscellaneous topics, and undocumented -functions respectively. -""" - -import string, sys - -__all__ = ["Cmd"] - -PROMPT = '(Cmd) ' -IDENTCHARS = string.ascii_letters + string.digits + '_' - -class Cmd: - """A simple framework for writing line-oriented command interpreters. - - These are often useful for test harnesses, administrative tools, and - prototypes that will later be wrapped in a more sophisticated interface. - - A Cmd instance or subclass instance is a line-oriented interpreter - framework. There is no good reason to instantiate Cmd itself; rather, - it's useful as a superclass of an interpreter class you define yourself - in order to inherit Cmd's methods and encapsulate action methods. - - """ - prompt = PROMPT - identchars = IDENTCHARS - ruler = '=' - lastcmd = '' - intro = None - doc_leader = "" - doc_header = "Documented commands (type help ):" - misc_header = "Miscellaneous help topics:" - undoc_header = "Undocumented commands:" - nohelp = "*** No help on %s" - use_rawinput = 1 - - def __init__(self, completekey='tab', stdin=None, stdout=None): - """Instantiate a line-oriented interpreter framework. - - The optional argument 'completekey' is the readline name of a - completion key; it defaults to the Tab key. If completekey is - not None and the readline module is available, command completion - is done automatically. The optional arguments stdin and stdout - specify alternate input and output file objects; if not specified, - sys.stdin and sys.stdout are used. - - """ - if stdin is not None: - self.stdin = stdin - else: - self.stdin = sys.stdin - if stdout is not None: - self.stdout = stdout - else: - self.stdout = sys.stdout - self.cmdqueue = [] - self.completekey = completekey - - def cmdloop(self, intro=None): - """Repeatedly issue a prompt, accept input, parse an initial prefix - off the received input, and dispatch to action methods, passing them - the remainder of the line as argument. - - """ - - self.preloop() - if self.use_rawinput and self.completekey: - try: - import readline - self.old_completer = readline.get_completer() - readline.set_completer(self.complete) - readline.parse_and_bind(self.completekey+": complete") - except ImportError: - pass - try: - if intro is not None: - self.intro = intro - if self.intro: - self.stdout.write(str(self.intro)+"\n") - stop = None - while not stop: - if self.cmdqueue: - line = self.cmdqueue.pop(0) - else: - if self.use_rawinput: - try: - line = input(self.prompt) - except EOFError: - line = 'EOF' - else: - self.stdout.write(self.prompt) - self.stdout.flush() - line = self.stdin.readline() - if not len(line): - line = 'EOF' - else: - line = line.rstrip('\r\n') - line = self.precmd(line) - stop = self.onecmd(line) - stop = self.postcmd(stop, line) - self.postloop() - finally: - if self.use_rawinput and self.completekey: - try: - import readline - readline.set_completer(self.old_completer) - except ImportError: - pass - - - def precmd(self, line): - """Hook method executed just before the command line is - interpreted, but after the input prompt is generated and issued. - - """ - return line - - def postcmd(self, stop, line): - """Hook method executed just after a command dispatch is finished.""" - return stop - - def preloop(self): - """Hook method executed once when the cmdloop() method is called.""" - pass - - def postloop(self): - """Hook method executed once when the cmdloop() method is about to - return. - - """ - pass - - def parseline(self, line): - """Parse the line into a command name and a string containing - the arguments. Returns a tuple containing (command, args, line). - 'command' and 'args' may be None if the line couldn't be parsed. - """ - line = line.strip() - if not line: - return None, None, line - elif line[0] == '?': - line = 'help ' + line[1:] - elif line[0] == '!': - if hasattr(self, 'do_shell'): - line = 'shell ' + line[1:] - else: - return None, None, line - i, n = 0, len(line) - while i < n and line[i] in self.identchars: i = i+1 - cmd, arg = line[:i], line[i:].strip() - return cmd, arg, line - - def onecmd(self, line): - """Interpret the argument as though it had been typed in response - to the prompt. - - This may be overridden, but should not normally need to be; - see the precmd() and postcmd() methods for useful execution hooks. - The return value is a flag indicating whether interpretation of - commands by the interpreter should stop. - - """ - cmd, arg, line = self.parseline(line) - if not line: - return self.emptyline() - if cmd is None: - return self.default(line) - self.lastcmd = line - if line == 'EOF' : - self.lastcmd = '' - if cmd == '': - return self.default(line) - else: - try: - func = getattr(self, 'do_' + cmd) - except AttributeError: - return self.default(line) - return func(arg) - - def emptyline(self): - """Called when an empty line is entered in response to the prompt. - - If this method is not overridden, it repeats the last nonempty - command entered. - - """ - if self.lastcmd: - return self.onecmd(self.lastcmd) - - def default(self, line): - """Called on an input line when the command prefix is not recognized. - - If this method is not overridden, it prints an error message and - returns. - - """ - self.stdout.write('*** Unknown syntax: %s\n'%line) - - def completedefault(self, *ignored): - """Method called to complete an input line when no command-specific - complete_*() method is available. - - By default, it returns an empty list. - - """ - return [] - - def completenames(self, text, *ignored): - dotext = 'do_'+text - return [a[3:] for a in self.get_names() if a.startswith(dotext)] - - def complete(self, text, state): - """Return the next possible completion for 'text'. - - If a command has not been entered, then complete against command list. - Otherwise try to call complete_ to get list of completions. - """ - if state == 0: - import readline - origline = readline.get_line_buffer() - line = origline.lstrip() - stripped = len(origline) - len(line) - begidx = readline.get_begidx() - stripped - endidx = readline.get_endidx() - stripped - if begidx>0: - cmd, args, foo = self.parseline(line) - if cmd == '': - compfunc = self.completedefault - else: - try: - compfunc = getattr(self, 'complete_' + cmd) - except AttributeError: - compfunc = self.completedefault - else: - compfunc = self.completenames - self.completion_matches = compfunc(text, line, begidx, endidx) - try: - return self.completion_matches[state] - except IndexError: - return None - - def get_names(self): - # This method used to pull in base class attributes - # at a time dir() didn't do it yet. - return dir(self.__class__) - - def complete_help(self, *args): - commands = set(self.completenames(*args)) - topics = set(a[5:] for a in self.get_names() - if a.startswith('help_' + args[0])) - return list(commands | topics) - - def do_help(self, arg): - 'List available commands with "help" or detailed help with "help cmd".' - if arg: - # XXX check arg syntax - try: - func = getattr(self, 'help_' + arg) - except AttributeError: - try: - doc=getattr(self, 'do_' + arg).__doc__ - if doc: - self.stdout.write("%s\n"%str(doc)) - return - except AttributeError: - pass - self.stdout.write("%s\n"%str(self.nohelp % (arg,))) - return - func() - else: - names = self.get_names() - cmds_doc = [] - cmds_undoc = [] - help = {} - for name in names: - if name[:5] == 'help_': - help[name[5:]]=1 - names.sort() - # There can be duplicates if routines overridden - prevname = '' - for name in names: - if name[:3] == 'do_': - if name == prevname: - continue - prevname = name - cmd=name[3:] - if cmd in help: - cmds_doc.append(cmd) - del help[cmd] - elif getattr(self, name).__doc__: - cmds_doc.append(cmd) - else: - cmds_undoc.append(cmd) - self.stdout.write("%s\n"%str(self.doc_leader)) - self.print_topics(self.doc_header, cmds_doc, 15,80) - self.print_topics(self.misc_header, list(help.keys()),15,80) - self.print_topics(self.undoc_header, cmds_undoc, 15,80) - - def print_topics(self, header, cmds, cmdlen, maxcol): - if cmds: - self.stdout.write("%s\n"%str(header)) - if self.ruler: - self.stdout.write("%s\n"%str(self.ruler * len(header))) - self.columnize(cmds, maxcol-1) - self.stdout.write("\n") - - def columnize(self, list, displaywidth=80): - """Display a list of strings as a compact set of columns. - - Each column is only as wide as necessary. - Columns are separated by two spaces (one was not legible enough). - """ - if not list: - self.stdout.write("\n") - return - - nonstrings = [i for i in range(len(list)) - if not isinstance(list[i], str)] - if nonstrings: - raise TypeError("list[i] not a string for i in %s" - % ", ".join(map(str, nonstrings))) - size = len(list) - if size == 1: - self.stdout.write('%s\n'%str(list[0])) - return - # Try every row count from 1 upwards - for nrows in range(1, len(list)): - ncols = (size+nrows-1) // nrows - colwidths = [] - totwidth = -2 - for col in range(ncols): - colwidth = 0 - for row in range(nrows): - i = row + nrows*col - if i >= size: - break - x = list[i] - colwidth = max(colwidth, len(x)) - colwidths.append(colwidth) - totwidth += colwidth + 2 - if totwidth > displaywidth: - break - if totwidth <= displaywidth: - break - else: - nrows = len(list) - ncols = 1 - colwidths = [0] - for row in range(nrows): - texts = [] - for col in range(ncols): - i = row + nrows*col - if i >= size: - x = "" - else: - x = list[i] - texts.append(x) - while texts and not texts[-1]: - del texts[-1] - for col in range(len(texts)): - texts[col] = texts[col].ljust(colwidths[col]) - self.stdout.write("%s\n"%str(" ".join(texts))) diff --git a/Lib/doctest.py b/Lib/doctest.py deleted file mode 100644 index dcbcfe52e90..00000000000 --- a/Lib/doctest.py +++ /dev/null @@ -1,2786 +0,0 @@ -# Module doctest. -# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org). -# Major enhancements and refactoring by: -# Jim Fulton -# Edward Loper - -# Provided as-is; use at your own risk; no warranty; no promises; enjoy! - -r"""Module doctest -- a framework for running examples in docstrings. - -In simplest use, end each module M to be tested with: - -def _test(): - import doctest - doctest.testmod() - -if __name__ == "__main__": - _test() - -Then running the module as a script will cause the examples in the -docstrings to get executed and verified: - -python M.py - -This won't display anything unless an example fails, in which case the -failing example(s) and the cause(s) of the failure(s) are printed to stdout -(why not stderr? because stderr is a lame hack <0.2 wink>), and the final -line of output is "Test failed.". - -Run it with the -v switch instead: - -python M.py -v - -and a detailed report of all examples tried is printed to stdout, along -with assorted summaries at the end. - -You can force verbose mode by passing "verbose=True" to testmod, or prohibit -it by passing "verbose=False". In either of those cases, sys.argv is not -examined by testmod. - -There are a variety of other ways to run doctests, including integration -with the unittest framework, and support for running non-Python text -files containing doctests. There are also many ways to override parts -of doctest's default behaviors. See the Library Reference Manual for -details. -""" - -__docformat__ = 'reStructuredText en' - -__all__ = [ - # 0, Option Flags - 'register_optionflag', - 'DONT_ACCEPT_TRUE_FOR_1', - 'DONT_ACCEPT_BLANKLINE', - 'NORMALIZE_WHITESPACE', - 'ELLIPSIS', - 'SKIP', - 'IGNORE_EXCEPTION_DETAIL', - 'COMPARISON_FLAGS', - 'REPORT_UDIFF', - 'REPORT_CDIFF', - 'REPORT_NDIFF', - 'REPORT_ONLY_FIRST_FAILURE', - 'REPORTING_FLAGS', - 'FAIL_FAST', - # 1. Utility Functions - # 2. Example & DocTest - 'Example', - 'DocTest', - # 3. Doctest Parser - 'DocTestParser', - # 4. Doctest Finder - 'DocTestFinder', - # 5. Doctest Runner - 'DocTestRunner', - 'OutputChecker', - 'DocTestFailure', - 'UnexpectedException', - 'DebugRunner', - # 6. Test Functions - 'testmod', - 'testfile', - 'run_docstring_examples', - # 7. Unittest Support - 'DocTestSuite', - 'DocFileSuite', - 'set_unittest_reportflags', - # 8. Debugging Support - 'script_from_examples', - 'testsource', - 'debug_src', - 'debug', -] - -import __future__ -import difflib -import inspect -import linecache -import os -import pdb -import re -import sys -import traceback -import unittest -from io import StringIO -from collections import namedtuple - -TestResults = namedtuple('TestResults', 'failed attempted') - -# There are 4 basic classes: -# - Example: a pair, plus an intra-docstring line number. -# - DocTest: a collection of examples, parsed from a docstring, plus -# info about where the docstring came from (name, filename, lineno). -# - DocTestFinder: extracts DocTests from a given object's docstring and -# its contained objects' docstrings. -# - DocTestRunner: runs DocTest cases, and accumulates statistics. -# -# So the basic picture is: -# -# list of: -# +------+ +---------+ +-------+ -# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| -# +------+ +---------+ +-------+ -# | Example | -# | ... | -# | Example | -# +---------+ - -# Option constants. - -OPTIONFLAGS_BY_NAME = {} -def register_optionflag(name): - # Create a new flag unless `name` is already known. - return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME)) - -DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') -DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') -NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') -ELLIPSIS = register_optionflag('ELLIPSIS') -SKIP = register_optionflag('SKIP') -IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') - -COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | - DONT_ACCEPT_BLANKLINE | - NORMALIZE_WHITESPACE | - ELLIPSIS | - SKIP | - IGNORE_EXCEPTION_DETAIL) - -REPORT_UDIFF = register_optionflag('REPORT_UDIFF') -REPORT_CDIFF = register_optionflag('REPORT_CDIFF') -REPORT_NDIFF = register_optionflag('REPORT_NDIFF') -REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') -FAIL_FAST = register_optionflag('FAIL_FAST') - -REPORTING_FLAGS = (REPORT_UDIFF | - REPORT_CDIFF | - REPORT_NDIFF | - REPORT_ONLY_FIRST_FAILURE | - FAIL_FAST) - -# Special string markers for use in `want` strings: -BLANKLINE_MARKER = '' -ELLIPSIS_MARKER = '...' - -###################################################################### -## Table of Contents -###################################################################### -# 1. Utility Functions -# 2. Example & DocTest -- store test cases -# 3. DocTest Parser -- extracts examples from strings -# 4. DocTest Finder -- extracts test cases from objects -# 5. DocTest Runner -- runs test cases -# 6. Test Functions -- convenient wrappers for testing -# 7. Unittest Support -# 8. Debugging Support -# 9. Example Usage - -###################################################################### -## 1. Utility Functions -###################################################################### - -def _extract_future_flags(globs): - """ - Return the compiler-flags associated with the future features that - have been imported into the given namespace (globs). - """ - flags = 0 - for fname in __future__.all_feature_names: - feature = globs.get(fname, None) - if feature is getattr(__future__, fname): - flags |= feature.compiler_flag - return flags - -def _normalize_module(module, depth=2): - """ - Return the module specified by `module`. In particular: - - If `module` is a module, then return module. - - If `module` is a string, then import and return the - module with that name. - - If `module` is None, then return the calling module. - The calling module is assumed to be the module of - the stack frame at the given depth in the call stack. - """ - if inspect.ismodule(module): - return module - elif isinstance(module, str): - return __import__(module, globals(), locals(), ["*"]) - elif module is None: - return sys.modules[sys._getframe(depth).f_globals['__name__']] - else: - raise TypeError("Expected a module, string, or None") - -def _load_testfile(filename, package, module_relative, encoding): - if module_relative: - package = _normalize_module(package, 3) - filename = _module_relative_path(package, filename) - if getattr(package, '__loader__', None) is not None: - if hasattr(package.__loader__, 'get_data'): - file_contents = package.__loader__.get_data(filename) - file_contents = file_contents.decode(encoding) - # get_data() opens files as 'rb', so one must do the equivalent - # conversion as universal newlines would do. - return file_contents.replace(os.linesep, '\n'), filename - with open(filename, encoding=encoding) as f: - return f.read(), filename - -def _indent(s, indent=4): - """ - Add the given number of space characters to the beginning of - every non-blank line in `s`, and return the result. - """ - # This regexp matches the start of non-blank lines: - return re.sub('(?m)^(?!$)', indent*' ', s) - -def _exception_traceback(exc_info): - """ - Return a string containing a traceback message for the given - exc_info tuple (as returned by sys.exc_info()). - """ - # Get a traceback message. - excout = StringIO() - exc_type, exc_val, exc_tb = exc_info - traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) - return excout.getvalue() - -# Override some StringIO methods. -class _SpoofOut(StringIO): - def getvalue(self): - result = StringIO.getvalue(self) - # If anything at all was written, make sure there's a trailing - # newline. There's no way for the expected output to indicate - # that a trailing newline is missing. - if result and not result.endswith("\n"): - result += "\n" - return result - - def truncate(self, size=None): - self.seek(size) - StringIO.truncate(self) - -# Worst-case linear-time ellipsis matching. -def _ellipsis_match(want, got): - """ - Essentially the only subtle case: - >>> _ellipsis_match('aa...aa', 'aaa') - False - """ - if ELLIPSIS_MARKER not in want: - return want == got - - # Find "the real" strings. - ws = want.split(ELLIPSIS_MARKER) - assert len(ws) >= 2 - - # Deal with exact matches possibly needed at one or both ends. - startpos, endpos = 0, len(got) - w = ws[0] - if w: # starts with exact match - if got.startswith(w): - startpos = len(w) - del ws[0] - else: - return False - w = ws[-1] - if w: # ends with exact match - if got.endswith(w): - endpos -= len(w) - del ws[-1] - else: - return False - - if startpos > endpos: - # Exact end matches required more characters than we have, as in - # _ellipsis_match('aa...aa', 'aaa') - return False - - # For the rest, we only need to find the leftmost non-overlapping - # match for each piece. If there's no overall match that way alone, - # there's no overall match period. - for w in ws: - # w may be '' at times, if there are consecutive ellipses, or - # due to an ellipsis at the start or end of `want`. That's OK. - # Search for an empty string succeeds, and doesn't change startpos. - startpos = got.find(w, startpos, endpos) - if startpos < 0: - return False - startpos += len(w) - - return True - -def _comment_line(line): - "Return a commented form of the given line" - line = line.rstrip() - if line: - return '# '+line - else: - return '#' - -def _strip_exception_details(msg): - # Support for IGNORE_EXCEPTION_DETAIL. - # Get rid of everything except the exception name; in particular, drop - # the possibly dotted module path (if any) and the exception message (if - # any). We assume that a colon is never part of a dotted name, or of an - # exception name. - # E.g., given - # "foo.bar.MyError: la di da" - # return "MyError" - # Or for "abc.def" or "abc.def:\n" return "def". - - start, end = 0, len(msg) - # The exception name must appear on the first line. - i = msg.find("\n") - if i >= 0: - end = i - # retain up to the first colon (if any) - i = msg.find(':', 0, end) - if i >= 0: - end = i - # retain just the exception name - i = msg.rfind('.', 0, end) - if i >= 0: - start = i+1 - return msg[start: end] - -class _OutputRedirectingPdb(pdb.Pdb): - """ - A specialized version of the python debugger that redirects stdout - to a given stream when interacting with the user. Stdout is *not* - redirected when traced code is executed. - """ - def __init__(self, out): - self.__out = out - self.__debugger_used = False - # do not play signal games in the pdb - pdb.Pdb.__init__(self, stdout=out, nosigint=True) - # still use input() to get user input - self.use_rawinput = 1 - - def set_trace(self, frame=None): - self.__debugger_used = True - if frame is None: - frame = sys._getframe().f_back - pdb.Pdb.set_trace(self, frame) - - def set_continue(self): - # Calling set_continue unconditionally would break unit test - # coverage reporting, as Bdb.set_continue calls sys.settrace(None). - if self.__debugger_used: - pdb.Pdb.set_continue(self) - - def trace_dispatch(self, *args): - # Redirect stdout to the given stream. - save_stdout = sys.stdout - sys.stdout = self.__out - # Call Pdb's trace dispatch method. - try: - return pdb.Pdb.trace_dispatch(self, *args) - finally: - sys.stdout = save_stdout - -# [XX] Normalize with respect to os.path.pardir? -def _module_relative_path(module, test_path): - if not inspect.ismodule(module): - raise TypeError('Expected a module: %r' % module) - if test_path.startswith('/'): - raise ValueError('Module-relative files may not have absolute paths') - - # Normalize the path. On Windows, replace "/" with "\". - test_path = os.path.join(*(test_path.split('/'))) - - # Find the base directory for the path. - if hasattr(module, '__file__'): - # A normal module/package - basedir = os.path.split(module.__file__)[0] - elif module.__name__ == '__main__': - # An interactive session. - if len(sys.argv)>0 and sys.argv[0] != '': - basedir = os.path.split(sys.argv[0])[0] - else: - basedir = os.curdir - else: - if hasattr(module, '__path__'): - for directory in module.__path__: - fullpath = os.path.join(directory, test_path) - if os.path.exists(fullpath): - return fullpath - - # A module w/o __file__ (this includes builtins) - raise ValueError("Can't resolve paths relative to the module " - "%r (it has no __file__)" - % module.__name__) - - # Combine the base directory and the test path. - return os.path.join(basedir, test_path) - -###################################################################### -## 2. Example & DocTest -###################################################################### -## - An "example" is a pair, where "source" is a -## fragment of source code, and "want" is the expected output for -## "source." The Example class also includes information about -## where the example was extracted from. -## -## - A "doctest" is a collection of examples, typically extracted from -## a string (such as an object's docstring). The DocTest class also -## includes information about where the string was extracted from. - -class Example: - """ - A single doctest example, consisting of source code and expected - output. `Example` defines the following attributes: - - - source: A single Python statement, always ending with a newline. - The constructor adds a newline if needed. - - - want: The expected output from running the source code (either - from stdout, or a traceback in case of exception). `want` ends - with a newline unless it's empty, in which case it's an empty - string. The constructor adds a newline if needed. - - - exc_msg: The exception message generated by the example, if - the example is expected to generate an exception; or `None` if - it is not expected to generate an exception. This exception - message is compared against the return value of - `traceback.format_exception_only()`. `exc_msg` ends with a - newline unless it's `None`. The constructor adds a newline - if needed. - - - lineno: The line number within the DocTest string containing - this Example where the Example begins. This line number is - zero-based, with respect to the beginning of the DocTest. - - - indent: The example's indentation in the DocTest string. - I.e., the number of space characters that precede the - example's first prompt. - - - options: A dictionary mapping from option flags to True or - False, which is used to override default options for this - example. Any option flags not contained in this dictionary - are left at their default value (as specified by the - DocTestRunner's optionflags). By default, no options are set. - """ - def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, - options=None): - # Normalize inputs. - if not source.endswith('\n'): - source += '\n' - if want and not want.endswith('\n'): - want += '\n' - if exc_msg is not None and not exc_msg.endswith('\n'): - exc_msg += '\n' - # Store properties. - self.source = source - self.want = want - self.lineno = lineno - self.indent = indent - if options is None: options = {} - self.options = options - self.exc_msg = exc_msg - - def __eq__(self, other): - if type(self) is not type(other): - return NotImplemented - - return self.source == other.source and \ - self.want == other.want and \ - self.lineno == other.lineno and \ - self.indent == other.indent and \ - self.options == other.options and \ - self.exc_msg == other.exc_msg - - def __hash__(self): - return hash((self.source, self.want, self.lineno, self.indent, - self.exc_msg)) - -class DocTest: - """ - A collection of doctest examples that should be run in a single - namespace. Each `DocTest` defines the following attributes: - - - examples: the list of examples. - - - globs: The namespace (aka globals) that the examples should - be run in. - - - name: A name identifying the DocTest (typically, the name of - the object whose docstring this DocTest was extracted from). - - - filename: The name of the file that this DocTest was extracted - from, or `None` if the filename is unknown. - - - lineno: The line number within filename where this DocTest - begins, or `None` if the line number is unavailable. This - line number is zero-based, with respect to the beginning of - the file. - - - docstring: The string that the examples were extracted from, - or `None` if the string is unavailable. - """ - def __init__(self, examples, globs, name, filename, lineno, docstring): - """ - Create a new DocTest containing the given examples. The - DocTest's globals are initialized with a copy of `globs`. - """ - assert not isinstance(examples, str), \ - "DocTest no longer accepts str; use DocTestParser instead" - self.examples = examples - self.docstring = docstring - self.globs = globs.copy() - self.name = name - self.filename = filename - self.lineno = lineno - - def __repr__(self): - if len(self.examples) == 0: - examples = 'no examples' - elif len(self.examples) == 1: - examples = '1 example' - else: - examples = '%d examples' % len(self.examples) - return ('<%s %s from %s:%s (%s)>' % - (self.__class__.__name__, - self.name, self.filename, self.lineno, examples)) - - def __eq__(self, other): - if type(self) is not type(other): - return NotImplemented - - return self.examples == other.examples and \ - self.docstring == other.docstring and \ - self.globs == other.globs and \ - self.name == other.name and \ - self.filename == other.filename and \ - self.lineno == other.lineno - - def __hash__(self): - return hash((self.docstring, self.name, self.filename, self.lineno)) - - # This lets us sort tests by name: - def __lt__(self, other): - if not isinstance(other, DocTest): - return NotImplemented - return ((self.name, self.filename, self.lineno, id(self)) - < - (other.name, other.filename, other.lineno, id(other))) - -###################################################################### -## 3. DocTestParser -###################################################################### - -class DocTestParser: - """ - A class used to parse strings containing doctest examples. - """ - # This regular expression is used to find doctest examples in a - # string. It defines three groups: `source` is the source code - # (including leading indentation and prompts); `indent` is the - # indentation of the first (PS1) line of the source code; and - # `want` is the expected output (including leading indentation). - _EXAMPLE_RE = re.compile(r''' - # Source consists of a PS1 line followed by zero or more PS2 lines. - (?P - (?:^(?P [ ]*) >>> .*) # PS1 line - (?:\n [ ]* \.\.\. .*)*) # PS2 lines - \n? - # Want consists of any non-blank lines that do not start with PS1. - (?P (?:(?![ ]*$) # Not a blank line - (?![ ]*>>>) # Not a line starting with PS1 - .+$\n? # But any other line - )*) - ''', re.MULTILINE | re.VERBOSE) - - # A regular expression for handling `want` strings that contain - # expected exceptions. It divides `want` into three pieces: - # - the traceback header line (`hdr`) - # - the traceback stack (`stack`) - # - the exception message (`msg`), as generated by - # traceback.format_exception_only() - # `msg` may have multiple lines. We assume/require that the - # exception message is the first non-indented line starting with a word - # character following the traceback header line. - _EXCEPTION_RE = re.compile(r""" - # Grab the traceback header. Different versions of Python have - # said different things on the first traceback line. - ^(?P Traceback\ \( - (?: most\ recent\ call\ last - | innermost\ last - ) \) : - ) - \s* $ # toss trailing whitespace on the header. - (?P .*?) # don't blink: absorb stuff until... - ^ (?P \w+ .*) # a line *starts* with alphanum. - """, re.VERBOSE | re.MULTILINE | re.DOTALL) - - # A callable returning a true value iff its argument is a blank line - # or contains a single comment. - _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match - - def parse(self, string, name=''): - """ - Divide the given string into examples and intervening text, - and return them as a list of alternating Examples and strings. - Line numbers for the Examples are 0-based. The optional - argument `name` is a name identifying this string, and is only - used for error messages. - """ - string = string.expandtabs() - # If all lines begin with the same indentation, then strip it. - min_indent = self._min_indent(string) - if min_indent > 0: - string = '\n'.join([l[min_indent:] for l in string.split('\n')]) - - output = [] - charno, lineno = 0, 0 - # Find all doctest examples in the string: - for m in self._EXAMPLE_RE.finditer(string): - # Add the pre-example text to `output`. - output.append(string[charno:m.start()]) - # Update lineno (lines before this example) - lineno += string.count('\n', charno, m.start()) - # Extract info from the regexp match. - (source, options, want, exc_msg) = \ - self._parse_example(m, name, lineno) - # Create an Example, and add it to the list. - if not self._IS_BLANK_OR_COMMENT(source): - output.append( Example(source, want, exc_msg, - lineno=lineno, - indent=min_indent+len(m.group('indent')), - options=options) ) - # Update lineno (lines inside this example) - lineno += string.count('\n', m.start(), m.end()) - # Update charno. - charno = m.end() - # Add any remaining post-example text to `output`. - output.append(string[charno:]) - return output - - def get_doctest(self, string, globs, name, filename, lineno): - """ - Extract all doctest examples from the given string, and - collect them into a `DocTest` object. - - `globs`, `name`, `filename`, and `lineno` are attributes for - the new `DocTest` object. See the documentation for `DocTest` - for more information. - """ - return DocTest(self.get_examples(string, name), globs, - name, filename, lineno, string) - - def get_examples(self, string, name=''): - """ - Extract all doctest examples from the given string, and return - them as a list of `Example` objects. Line numbers are - 0-based, because it's most common in doctests that nothing - interesting appears on the same line as opening triple-quote, - and so the first interesting line is called \"line 1\" then. - - The optional argument `name` is a name identifying this - string, and is only used for error messages. - """ - return [x for x in self.parse(string, name) - if isinstance(x, Example)] - - def _parse_example(self, m, name, lineno): - """ - Given a regular expression match from `_EXAMPLE_RE` (`m`), - return a pair `(source, want)`, where `source` is the matched - example's source code (with prompts and indentation stripped); - and `want` is the example's expected output (with indentation - stripped). - - `name` is the string's name, and `lineno` is the line number - where the example starts; both are used for error messages. - """ - # Get the example's indentation level. - indent = len(m.group('indent')) - - # Divide source into lines; check that they're properly - # indented; and then strip their indentation & prompts. - source_lines = m.group('source').split('\n') - self._check_prompt_blank(source_lines, indent, name, lineno) - self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) - source = '\n'.join([sl[indent+4:] for sl in source_lines]) - - # Divide want into lines; check that it's properly indented; and - # then strip the indentation. Spaces before the last newline should - # be preserved, so plain rstrip() isn't good enough. - want = m.group('want') - want_lines = want.split('\n') - if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): - del want_lines[-1] # forget final newline & spaces after it - self._check_prefix(want_lines, ' '*indent, name, - lineno + len(source_lines)) - want = '\n'.join([wl[indent:] for wl in want_lines]) - - # If `want` contains a traceback message, then extract it. - m = self._EXCEPTION_RE.match(want) - if m: - exc_msg = m.group('msg') - else: - exc_msg = None - - # Extract options from the source. - options = self._find_options(source, name, lineno) - - return source, options, want, exc_msg - - # This regular expression looks for option directives in the - # source code of an example. Option directives are comments - # starting with "doctest:". Warning: this may give false - # positives for string-literals that contain the string - # "#doctest:". Eliminating these false positives would require - # actually parsing the string; but we limit them by ignoring any - # line containing "#doctest:" that is *followed* by a quote mark. - _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', - re.MULTILINE) - - def _find_options(self, source, name, lineno): - """ - Return a dictionary containing option overrides extracted from - option directives in the given source string. - - `name` is the string's name, and `lineno` is the line number - where the example starts; both are used for error messages. - """ - options = {} - # (note: with the current regexp, this will match at most once:) - for m in self._OPTION_DIRECTIVE_RE.finditer(source): - option_strings = m.group(1).replace(',', ' ').split() - for option in option_strings: - if (option[0] not in '+-' or - option[1:] not in OPTIONFLAGS_BY_NAME): - raise ValueError('line %r of the doctest for %s ' - 'has an invalid option: %r' % - (lineno+1, name, option)) - flag = OPTIONFLAGS_BY_NAME[option[1:]] - options[flag] = (option[0] == '+') - if options and self._IS_BLANK_OR_COMMENT(source): - raise ValueError('line %r of the doctest for %s has an option ' - 'directive on a line with no example: %r' % - (lineno, name, source)) - return options - - # This regular expression finds the indentation of every non-blank - # line in a string. - _INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE) - - def _min_indent(self, s): - "Return the minimum indentation of any non-blank line in `s`" - indents = [len(indent) for indent in self._INDENT_RE.findall(s)] - if len(indents) > 0: - return min(indents) - else: - return 0 - - def _check_prompt_blank(self, lines, indent, name, lineno): - """ - Given the lines of a source string (including prompts and - leading indentation), check to make sure that every prompt is - followed by a space character. If any line is not followed by - a space character, then raise ValueError. - """ - for i, line in enumerate(lines): - if len(line) >= indent+4 and line[indent+3] != ' ': - raise ValueError('line %r of the docstring for %s ' - 'lacks blank after %s: %r' % - (lineno+i+1, name, - line[indent:indent+3], line)) - - def _check_prefix(self, lines, prefix, name, lineno): - """ - Check that every line in the given list starts with the given - prefix; if any line does not, then raise a ValueError. - """ - for i, line in enumerate(lines): - if line and not line.startswith(prefix): - raise ValueError('line %r of the docstring for %s has ' - 'inconsistent leading whitespace: %r' % - (lineno+i+1, name, line)) - - -###################################################################### -## 4. DocTest Finder -###################################################################### - -class DocTestFinder: - """ - A class used to extract the DocTests that are relevant to a given - object, from its docstring and the docstrings of its contained - objects. Doctests can currently be extracted from the following - object types: modules, functions, classes, methods, staticmethods, - classmethods, and properties. - """ - - def __init__(self, verbose=False, parser=DocTestParser(), - recurse=True, exclude_empty=True): - """ - Create a new doctest finder. - - The optional argument `parser` specifies a class or - function that should be used to create new DocTest objects (or - objects that implement the same interface as DocTest). The - signature for this factory function should match the signature - of the DocTest constructor. - - If the optional argument `recurse` is false, then `find` will - only examine the given object, and not any contained objects. - - If the optional argument `exclude_empty` is false, then `find` - will include tests for objects with empty docstrings. - """ - self._parser = parser - self._verbose = verbose - self._recurse = recurse - self._exclude_empty = exclude_empty - - def find(self, obj, name=None, module=None, globs=None, extraglobs=None): - """ - Return a list of the DocTests that are defined by the given - object's docstring, or by any of its contained objects' - docstrings. - - The optional parameter `module` is the module that contains - the given object. If the module is not specified or is None, then - the test finder will attempt to automatically determine the - correct module. The object's module is used: - - - As a default namespace, if `globs` is not specified. - - To prevent the DocTestFinder from extracting DocTests - from objects that are imported from other modules. - - To find the name of the file containing the object. - - To help find the line number of the object within its - file. - - Contained objects whose module does not match `module` are ignored. - - If `module` is False, no attempt to find the module will be made. - This is obscure, of use mostly in tests: if `module` is False, or - is None but cannot be found automatically, then all objects are - considered to belong to the (non-existent) module, so all contained - objects will (recursively) be searched for doctests. - - The globals for each DocTest is formed by combining `globs` - and `extraglobs` (bindings in `extraglobs` override bindings - in `globs`). A new copy of the globals dictionary is created - for each DocTest. If `globs` is not specified, then it - defaults to the module's `__dict__`, if specified, or {} - otherwise. If `extraglobs` is not specified, then it defaults - to {}. - - """ - # If name was not specified, then extract it from the object. - if name is None: - name = getattr(obj, '__name__', None) - if name is None: - raise ValueError("DocTestFinder.find: name must be given " - "when obj.__name__ doesn't exist: %r" % - (type(obj),)) - - # Find the module that contains the given object (if obj is - # a module, then module=obj.). Note: this may fail, in which - # case module will be None. - if module is False: - module = None - elif module is None: - module = inspect.getmodule(obj) - - # Read the module's source code. This is used by - # DocTestFinder._find_lineno to find the line number for a - # given object's docstring. - try: - file = inspect.getsourcefile(obj) - except TypeError: - source_lines = None - else: - if not file: - # Check to see if it's one of our special internal "files" - # (see __patched_linecache_getlines). - file = inspect.getfile(obj) - if not file[0]+file[-2:] == '<]>': file = None - if file is None: - source_lines = None - else: - if module is not None: - # Supply the module globals in case the module was - # originally loaded via a PEP 302 loader and - # file is not a valid filesystem path - source_lines = linecache.getlines(file, module.__dict__) - else: - # No access to a loader, so assume it's a normal - # filesystem path - source_lines = linecache.getlines(file) - if not source_lines: - source_lines = None - - # Initialize globals, and merge in extraglobs. - if globs is None: - if module is None: - globs = {} - else: - globs = module.__dict__.copy() - else: - globs = globs.copy() - if extraglobs is not None: - globs.update(extraglobs) - if '__name__' not in globs: - globs['__name__'] = '__main__' # provide a default module name - - # Recursively explore `obj`, extracting DocTests. - tests = [] - self._find(tests, obj, name, module, source_lines, globs, {}) - # Sort the tests by alpha order of names, for consistency in - # verbose-mode output. This was a feature of doctest in Pythons - # <= 2.3 that got lost by accident in 2.4. It was repaired in - # 2.4.4 and 2.5. - tests.sort() - return tests - - def _from_module(self, module, object): - """ - Return true if the given object is defined in the given - module. - """ - if module is None: - return True - elif inspect.getmodule(object) is not None: - return module is inspect.getmodule(object) - elif inspect.isfunction(object): - return module.__dict__ is object.__globals__ - elif inspect.ismethoddescriptor(object): - if hasattr(object, '__objclass__'): - obj_mod = object.__objclass__.__module__ - elif hasattr(object, '__module__'): - obj_mod = object.__module__ - else: - return True # [XX] no easy way to tell otherwise - return module.__name__ == obj_mod - elif inspect.isclass(object): - return module.__name__ == object.__module__ - elif hasattr(object, '__module__'): - return module.__name__ == object.__module__ - elif isinstance(object, property): - return True # [XX] no way not be sure. - else: - raise ValueError("object must be a class or function") - - def _find(self, tests, obj, name, module, source_lines, globs, seen): - """ - Find tests for the given object and any contained objects, and - add them to `tests`. - """ - if self._verbose: - print('Finding tests in %s' % name) - - # If we've already processed this object, then ignore it. - if id(obj) in seen: - return - seen[id(obj)] = 1 - - # Find a test for this object, and add it to the list of tests. - test = self._get_test(obj, name, module, globs, source_lines) - if test is not None: - tests.append(test) - - # Look for tests in a module's contained objects. - if inspect.ismodule(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - valname = '%s.%s' % (name, valname) - # Recurse to functions & classes. - if ((inspect.isroutine(inspect.unwrap(val)) - or inspect.isclass(val)) and - self._from_module(module, val)): - self._find(tests, val, valname, module, source_lines, - globs, seen) - - # Look for tests in a module's __test__ dictionary. - if inspect.ismodule(obj) and self._recurse: - for valname, val in getattr(obj, '__test__', {}).items(): - if not isinstance(valname, str): - raise ValueError("DocTestFinder.find: __test__ keys " - "must be strings: %r" % - (type(valname),)) - if not (inspect.isroutine(val) or inspect.isclass(val) or - inspect.ismodule(val) or isinstance(val, str)): - raise ValueError("DocTestFinder.find: __test__ values " - "must be strings, functions, methods, " - "classes, or modules: %r" % - (type(val),)) - valname = '%s.__test__.%s' % (name, valname) - self._find(tests, val, valname, module, source_lines, - globs, seen) - - # Look for tests in a class's contained objects. - if inspect.isclass(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - # Special handling for staticmethod/classmethod. - if isinstance(val, staticmethod): - val = getattr(obj, valname) - if isinstance(val, classmethod): - val = getattr(obj, valname).__func__ - - # Recurse to methods, properties, and nested classes. - if ((inspect.isroutine(val) or inspect.isclass(val) or - isinstance(val, property)) and - self._from_module(module, val)): - valname = '%s.%s' % (name, valname) - self._find(tests, val, valname, module, source_lines, - globs, seen) - - def _get_test(self, obj, name, module, globs, source_lines): - """ - Return a DocTest for the given object, if it defines a docstring; - otherwise, return None. - """ - # Extract the object's docstring. If it doesn't have one, - # then return None (no test for this object). - if isinstance(obj, str): - docstring = obj - else: - try: - if obj.__doc__ is None: - docstring = '' - else: - docstring = obj.__doc__ - if not isinstance(docstring, str): - docstring = str(docstring) - except (TypeError, AttributeError): - docstring = '' - - # Find the docstring's location in the file. - lineno = self._find_lineno(obj, source_lines) - - # Don't bother if the docstring is empty. - if self._exclude_empty and not docstring: - return None - - # Return a DocTest for this object. - if module is None: - filename = None - else: - # __file__ can be None for namespace packages. - filename = getattr(module, '__file__', None) or module.__name__ - if filename[-4:] == ".pyc": - filename = filename[:-1] - return self._parser.get_doctest(docstring, globs, name, - filename, lineno) - - def _find_lineno(self, obj, source_lines): - """ - Return a line number of the given object's docstring. Note: - this method assumes that the object has a docstring. - """ - lineno = None - - # Find the line number for modules. - if inspect.ismodule(obj): - lineno = 0 - - # Find the line number for classes. - # Note: this could be fooled if a class is defined multiple - # times in a single file. - if inspect.isclass(obj): - if source_lines is None: - return None - pat = re.compile(r'^\s*class\s*%s\b' % - getattr(obj, '__name__', '-')) - for i, line in enumerate(source_lines): - if pat.match(line): - lineno = i - break - - # Find the line number for functions & methods. - if inspect.ismethod(obj): obj = obj.__func__ - if inspect.isfunction(obj): obj = obj.__code__ - if inspect.istraceback(obj): obj = obj.tb_frame - if inspect.isframe(obj): obj = obj.f_code - if inspect.iscode(obj): - lineno = getattr(obj, 'co_firstlineno', None)-1 - - # Find the line number where the docstring starts. Assume - # that it's the first line that begins with a quote mark. - # Note: this could be fooled by a multiline function - # signature, where a continuation line begins with a quote - # mark. - if lineno is not None: - if source_lines is None: - return lineno+1 - pat = re.compile(r'(^|.*:)\s*\w*("|\')') - for lineno in range(lineno, len(source_lines)): - if pat.match(source_lines[lineno]): - return lineno - - # We couldn't find the line number. - return None - -###################################################################### -## 5. DocTest Runner -###################################################################### - -class DocTestRunner: - """ - A class used to run DocTest test cases, and accumulate statistics. - The `run` method is used to process a single DocTest case. It - returns a tuple `(f, t)`, where `t` is the number of test cases - tried, and `f` is the number of test cases that failed. - - >>> tests = DocTestFinder().find(_TestClass) - >>> runner = DocTestRunner(verbose=False) - >>> tests.sort(key = lambda test: test.name) - >>> for test in tests: - ... print(test.name, '->', runner.run(test)) - _TestClass -> TestResults(failed=0, attempted=2) - _TestClass.__init__ -> TestResults(failed=0, attempted=2) - _TestClass.get -> TestResults(failed=0, attempted=2) - _TestClass.square -> TestResults(failed=0, attempted=1) - - The `summarize` method prints a summary of all the test cases that - have been run by the runner, and returns an aggregated `(f, t)` - tuple: - - >>> runner.summarize(verbose=1) - 4 items passed all tests: - 2 tests in _TestClass - 2 tests in _TestClass.__init__ - 2 tests in _TestClass.get - 1 tests in _TestClass.square - 7 tests in 4 items. - 7 passed and 0 failed. - Test passed. - TestResults(failed=0, attempted=7) - - The aggregated number of tried examples and failed examples is - also available via the `tries` and `failures` attributes: - - >>> runner.tries - 7 - >>> runner.failures - 0 - - The comparison between expected outputs and actual outputs is done - by an `OutputChecker`. This comparison may be customized with a - number of option flags; see the documentation for `testmod` for - more information. If the option flags are insufficient, then the - comparison may also be customized by passing a subclass of - `OutputChecker` to the constructor. - - The test runner's display output can be controlled in two ways. - First, an output function (`out) can be passed to - `TestRunner.run`; this function will be called with strings that - should be displayed. It defaults to `sys.stdout.write`. If - capturing the output is not sufficient, then the display output - can be also customized by subclassing DocTestRunner, and - overriding the methods `report_start`, `report_success`, - `report_unexpected_exception`, and `report_failure`. - """ - # This divider string is used to separate failure messages, and to - # separate sections of the summary. - DIVIDER = "*" * 70 - - def __init__(self, checker=None, verbose=None, optionflags=0): - """ - Create a new test runner. - - Optional keyword arg `checker` is the `OutputChecker` that - should be used to compare the expected outputs and actual - outputs of doctest examples. - - Optional keyword arg 'verbose' prints lots of stuff if true, - only failures if false; by default, it's true iff '-v' is in - sys.argv. - - Optional argument `optionflags` can be used to control how the - test runner compares expected output to actual output, and how - it displays failures. See the documentation for `testmod` for - more information. - """ - self._checker = checker or OutputChecker() - if verbose is None: - verbose = '-v' in sys.argv - self._verbose = verbose - self.optionflags = optionflags - self.original_optionflags = optionflags - - # Keep track of the examples we've run. - self.tries = 0 - self.failures = 0 - self._name2ft = {} - - # Create a fake output target for capturing doctest output. - self._fakeout = _SpoofOut() - - #///////////////////////////////////////////////////////////////// - # Reporting methods - #///////////////////////////////////////////////////////////////// - - def report_start(self, out, test, example): - """ - Report that the test runner is about to process the given - example. (Only displays a message if verbose=True) - """ - if self._verbose: - if example.want: - out('Trying:\n' + _indent(example.source) + - 'Expecting:\n' + _indent(example.want)) - else: - out('Trying:\n' + _indent(example.source) + - 'Expecting nothing\n') - - def report_success(self, out, test, example, got): - """ - Report that the given example ran successfully. (Only - displays a message if verbose=True) - """ - if self._verbose: - out("ok\n") - - def report_failure(self, out, test, example, got): - """ - Report that the given example failed. - """ - out(self._failure_header(test, example) + - self._checker.output_difference(example, got, self.optionflags)) - - def report_unexpected_exception(self, out, test, example, exc_info): - """ - Report that the given example raised an unexpected exception. - """ - out(self._failure_header(test, example) + - 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) - - def _failure_header(self, test, example): - out = [self.DIVIDER] - if test.filename: - if test.lineno is not None and example.lineno is not None: - lineno = test.lineno + example.lineno + 1 - else: - lineno = '?' - out.append('File "%s", line %s, in %s' % - (test.filename, lineno, test.name)) - else: - out.append('Line %s, in %s' % (example.lineno+1, test.name)) - out.append('Failed example:') - source = example.source - out.append(_indent(source)) - return '\n'.join(out) - - #///////////////////////////////////////////////////////////////// - # DocTest Running - #///////////////////////////////////////////////////////////////// - - def __run(self, test, compileflags, out): - """ - Run the examples in `test`. Write the outcome of each example - with one of the `DocTestRunner.report_*` methods, using the - writer function `out`. `compileflags` is the set of compiler - flags that should be used to execute examples. Return a tuple - `(f, t)`, where `t` is the number of examples tried, and `f` - is the number of examples that failed. The examples are run - in the namespace `test.globs`. - """ - # Keep track of the number of failures and tries. - failures = tries = 0 - - # Save the option flags (since option directives can be used - # to modify them). - original_optionflags = self.optionflags - - SUCCESS, FAILURE, BOOM = range(3) # `outcome` state - - check = self._checker.check_output - - # Process each example. - for examplenum, example in enumerate(test.examples): - - # If REPORT_ONLY_FIRST_FAILURE is set, then suppress - # reporting after the first failure. - quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and - failures > 0) - - # Merge in the example's options. - self.optionflags = original_optionflags - if example.options: - for (optionflag, val) in example.options.items(): - if val: - self.optionflags |= optionflag - else: - self.optionflags &= ~optionflag - - # If 'SKIP' is set, then skip this example. - if self.optionflags & SKIP: - continue - - # Record that we started this example. - tries += 1 - if not quiet: - self.report_start(out, test, example) - - # Use a special filename for compile(), so we can retrieve - # the source code during interactive debugging (see - # __patched_linecache_getlines). - filename = '' % (test.name, examplenum) - - # Run the example in the given context (globs), and record - # any exception that gets raised. (But don't intercept - # keyboard interrupts.) - try: - # Don't blink! This is where the user's code gets run. - exec(compile(example.source, filename, "single", - compileflags, 1), test.globs) - self.debugger.set_continue() # ==== Example Finished ==== - exception = None - except KeyboardInterrupt: - raise - except: - exception = sys.exc_info() - self.debugger.set_continue() # ==== Example Finished ==== - - got = self._fakeout.getvalue() # the actual output - self._fakeout.truncate(0) - outcome = FAILURE # guilty until proved innocent or insane - - # If the example executed without raising any exceptions, - # verify its output. - if exception is None: - if check(example.want, got, self.optionflags): - outcome = SUCCESS - - # The example raised an exception: check if it was expected. - else: - exc_msg = traceback.format_exception_only(*exception[:2])[-1] - if not quiet: - got += _exception_traceback(exception) - - # If `example.exc_msg` is None, then we weren't expecting - # an exception. - if example.exc_msg is None: - outcome = BOOM - - # We expected an exception: see whether it matches. - elif check(example.exc_msg, exc_msg, self.optionflags): - outcome = SUCCESS - - # Another chance if they didn't care about the detail. - elif self.optionflags & IGNORE_EXCEPTION_DETAIL: - if check(_strip_exception_details(example.exc_msg), - _strip_exception_details(exc_msg), - self.optionflags): - outcome = SUCCESS - - # Report the outcome. - if outcome is SUCCESS: - if not quiet: - self.report_success(out, test, example, got) - elif outcome is FAILURE: - if not quiet: - self.report_failure(out, test, example, got) - failures += 1 - elif outcome is BOOM: - if not quiet: - self.report_unexpected_exception(out, test, example, - exception) - failures += 1 - else: - assert False, ("unknown outcome", outcome) - - if failures and self.optionflags & FAIL_FAST: - break - - # Restore the option flags (in case they were modified) - self.optionflags = original_optionflags - - # Record and return the number of failures and tries. - self.__record_outcome(test, failures, tries) - return TestResults(failures, tries) - - def __record_outcome(self, test, f, t): - """ - Record the fact that the given DocTest (`test`) generated `f` - failures out of `t` tried examples. - """ - f2, t2 = self._name2ft.get(test.name, (0,0)) - self._name2ft[test.name] = (f+f2, t+t2) - self.failures += f - self.tries += t - - __LINECACHE_FILENAME_RE = re.compile(r'.+)' - r'\[(?P\d+)\]>$') - def __patched_linecache_getlines(self, filename, module_globals=None): - m = self.__LINECACHE_FILENAME_RE.match(filename) - if m and m.group('name') == self.test.name: - example = self.test.examples[int(m.group('examplenum'))] - return example.source.splitlines(keepends=True) - else: - return self.save_linecache_getlines(filename, module_globals) - - def run(self, test, compileflags=None, out=None, clear_globs=True): - """ - Run the examples in `test`, and display the results using the - writer function `out`. - - The examples are run in the namespace `test.globs`. If - `clear_globs` is true (the default), then this namespace will - be cleared after the test runs, to help with garbage - collection. If you would like to examine the namespace after - the test completes, then use `clear_globs=False`. - - `compileflags` gives the set of flags that should be used by - the Python compiler when running the examples. If not - specified, then it will default to the set of future-import - flags that apply to `globs`. - - The output of each example is checked using - `DocTestRunner.check_output`, and the results are formatted by - the `DocTestRunner.report_*` methods. - """ - self.test = test - - if compileflags is None: - compileflags = _extract_future_flags(test.globs) - - save_stdout = sys.stdout - if out is None: - encoding = save_stdout.encoding - if encoding is None or encoding.lower() == 'utf-8': - out = save_stdout.write - else: - # Use backslashreplace error handling on write - def out(s): - s = str(s.encode(encoding, 'backslashreplace'), encoding) - save_stdout.write(s) - sys.stdout = self._fakeout - - # Patch pdb.set_trace to restore sys.stdout during interactive - # debugging (so it's not still redirected to self._fakeout). - # Note that the interactive output will go to *our* - # save_stdout, even if that's not the real sys.stdout; this - # allows us to write test cases for the set_trace behavior. - save_trace = sys.gettrace() - save_set_trace = pdb.set_trace - self.debugger = _OutputRedirectingPdb(save_stdout) - self.debugger.reset() - pdb.set_trace = self.debugger.set_trace - - # Patch linecache.getlines, so we can see the example's source - # when we're inside the debugger. - self.save_linecache_getlines = linecache.getlines - linecache.getlines = self.__patched_linecache_getlines - - # Make sure sys.displayhook just prints the value to stdout - save_displayhook = sys.displayhook - sys.displayhook = sys.__displayhook__ - - try: - return self.__run(test, compileflags, out) - finally: - sys.stdout = save_stdout - pdb.set_trace = save_set_trace - sys.settrace(save_trace) - linecache.getlines = self.save_linecache_getlines - sys.displayhook = save_displayhook - if clear_globs: - test.globs.clear() - import builtins - builtins._ = None - - #///////////////////////////////////////////////////////////////// - # Summarization - #///////////////////////////////////////////////////////////////// - def summarize(self, verbose=None): - """ - Print a summary of all the test cases that have been run by - this DocTestRunner, and return a tuple `(f, t)`, where `f` is - the total number of failed examples, and `t` is the total - number of tried examples. - - The optional `verbose` argument controls how detailed the - summary is. If the verbosity is not specified, then the - DocTestRunner's verbosity is used. - """ - if verbose is None: - verbose = self._verbose - notests = [] - passed = [] - failed = [] - totalt = totalf = 0 - for x in self._name2ft.items(): - name, (f, t) = x - assert f <= t - totalt += t - totalf += f - if t == 0: - notests.append(name) - elif f == 0: - passed.append( (name, t) ) - else: - failed.append(x) - if verbose: - if notests: - print(len(notests), "items had no tests:") - notests.sort() - for thing in notests: - print(" ", thing) - if passed: - print(len(passed), "items passed all tests:") - passed.sort() - for thing, count in passed: - print(" %3d tests in %s" % (count, thing)) - if failed: - print(self.DIVIDER) - print(len(failed), "items had failures:") - failed.sort() - for thing, (f, t) in failed: - print(" %3d of %3d in %s" % (f, t, thing)) - if verbose: - print(totalt, "tests in", len(self._name2ft), "items.") - print(totalt - totalf, "passed and", totalf, "failed.") - if totalf: - print("***Test Failed***", totalf, "failures.") - elif verbose: - print("Test passed.") - return TestResults(totalf, totalt) - - #///////////////////////////////////////////////////////////////// - # Backward compatibility cruft to maintain doctest.master. - #///////////////////////////////////////////////////////////////// - def merge(self, other): - d = self._name2ft - for name, (f, t) in other._name2ft.items(): - if name in d: - # Don't print here by default, since doing - # so breaks some of the buildbots - #print("*** DocTestRunner.merge: '" + name + "' in both" \ - # " testers; summing outcomes.") - f2, t2 = d[name] - f = f + f2 - t = t + t2 - d[name] = f, t - -class OutputChecker: - """ - A class used to check the whether the actual output from a doctest - example matches the expected output. `OutputChecker` defines two - methods: `check_output`, which compares a given pair of outputs, - and returns true if they match; and `output_difference`, which - returns a string describing the differences between two outputs. - """ - def _toAscii(self, s): - """ - Convert string to hex-escaped ASCII string. - """ - return str(s.encode('ASCII', 'backslashreplace'), "ASCII") - - def check_output(self, want, got, optionflags): - """ - Return True iff the actual output from an example (`got`) - matches the expected output (`want`). These strings are - always considered to match if they are identical; but - depending on what option flags the test runner is using, - several non-exact match types are also possible. See the - documentation for `TestRunner` for more information about - option flags. - """ - - # If `want` contains hex-escaped character such as "\u1234", - # then `want` is a string of six characters(e.g. [\,u,1,2,3,4]). - # On the other hand, `got` could be another sequence of - # characters such as [\u1234], so `want` and `got` should - # be folded to hex-escaped ASCII string to compare. - got = self._toAscii(got) - want = self._toAscii(want) - - # Handle the common case first, for efficiency: - # if they're string-identical, always return true. - if got == want: - return True - - # The values True and False replaced 1 and 0 as the return - # value for boolean comparisons in Python 2.3. - if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): - if (got,want) == ("True\n", "1\n"): - return True - if (got,want) == ("False\n", "0\n"): - return True - - # can be used as a special sequence to signify a - # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. - if not (optionflags & DONT_ACCEPT_BLANKLINE): - # Replace in want with a blank line. - want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), - '', want) - # If a line in got contains only spaces, then remove the - # spaces. - got = re.sub(r'(?m)^[^\S\n]+$', '', got) - if got == want: - return True - - # This flag causes doctest to ignore any differences in the - # contents of whitespace strings. Note that this can be used - # in conjunction with the ELLIPSIS flag. - if optionflags & NORMALIZE_WHITESPACE: - got = ' '.join(got.split()) - want = ' '.join(want.split()) - if got == want: - return True - - # The ELLIPSIS flag says to let the sequence "..." in `want` - # match any substring in `got`. - if optionflags & ELLIPSIS: - if _ellipsis_match(want, got): - return True - - # We didn't find any match; return false. - return False - - # Should we do a fancy diff? - def _do_a_fancy_diff(self, want, got, optionflags): - # Not unless they asked for a fancy diff. - if not optionflags & (REPORT_UDIFF | - REPORT_CDIFF | - REPORT_NDIFF): - return False - - # If expected output uses ellipsis, a meaningful fancy diff is - # too hard ... or maybe not. In two real-life failures Tim saw, - # a diff was a major help anyway, so this is commented out. - # [todo] _ellipsis_match() knows which pieces do and don't match, - # and could be the basis for a kick-ass diff in this case. - ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: - ## return False - - # ndiff does intraline difference marking, so can be useful even - # for 1-line differences. - if optionflags & REPORT_NDIFF: - return True - - # The other diff types need at least a few lines to be helpful. - return want.count('\n') > 2 and got.count('\n') > 2 - - def output_difference(self, example, got, optionflags): - """ - Return a string describing the differences between the - expected output for a given example (`example`) and the actual - output (`got`). `optionflags` is the set of option flags used - to compare `want` and `got`. - """ - want = example.want - # If s are being used, then replace blank lines - # with in the actual output string. - if not (optionflags & DONT_ACCEPT_BLANKLINE): - got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) - - # Check if we should use diff. - if self._do_a_fancy_diff(want, got, optionflags): - # Split want & got into lines. - want_lines = want.splitlines(keepends=True) - got_lines = got.splitlines(keepends=True) - # Use difflib to find their differences. - if optionflags & REPORT_UDIFF: - diff = difflib.unified_diff(want_lines, got_lines, n=2) - diff = list(diff)[2:] # strip the diff header - kind = 'unified diff with -expected +actual' - elif optionflags & REPORT_CDIFF: - diff = difflib.context_diff(want_lines, got_lines, n=2) - diff = list(diff)[2:] # strip the diff header - kind = 'context diff with expected followed by actual' - elif optionflags & REPORT_NDIFF: - engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) - diff = list(engine.compare(want_lines, got_lines)) - kind = 'ndiff with -expected +actual' - else: - assert 0, 'Bad diff option' - return 'Differences (%s):\n' % kind + _indent(''.join(diff)) - - # If we're not using diff, then simply list the expected - # output followed by the actual output. - if want and got: - return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) - elif want: - return 'Expected:\n%sGot nothing\n' % _indent(want) - elif got: - return 'Expected nothing\nGot:\n%s' % _indent(got) - else: - return 'Expected nothing\nGot nothing\n' - -class DocTestFailure(Exception): - """A DocTest example has failed in debugging mode. - - The exception instance has variables: - - - test: the DocTest object being run - - - example: the Example object that failed - - - got: the actual output - """ - def __init__(self, test, example, got): - self.test = test - self.example = example - self.got = got - - def __str__(self): - return str(self.test) - -class UnexpectedException(Exception): - """A DocTest example has encountered an unexpected exception - - The exception instance has variables: - - - test: the DocTest object being run - - - example: the Example object that failed - - - exc_info: the exception info - """ - def __init__(self, test, example, exc_info): - self.test = test - self.example = example - self.exc_info = exc_info - - def __str__(self): - return str(self.test) - -class DebugRunner(DocTestRunner): - r"""Run doc tests but raise an exception as soon as there is a failure. - - If an unexpected exception occurs, an UnexpectedException is raised. - It contains the test, the example, and the original exception: - - >>> runner = DebugRunner(verbose=False) - >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', - ... {}, 'foo', 'foo.py', 0) - >>> try: - ... runner.run(test) - ... except UnexpectedException as f: - ... failure = f - - >>> failure.test is test - True - - >>> failure.example.want - '42\n' - - >>> exc_info = failure.exc_info - >>> raise exc_info[1] # Already has the traceback - Traceback (most recent call last): - ... - KeyError - - We wrap the original exception to give the calling application - access to the test and example information. - - If the output doesn't match, then a DocTestFailure is raised: - - >>> test = DocTestParser().get_doctest(''' - ... >>> x = 1 - ... >>> x - ... 2 - ... ''', {}, 'foo', 'foo.py', 0) - - >>> try: - ... runner.run(test) - ... except DocTestFailure as f: - ... failure = f - - DocTestFailure objects provide access to the test: - - >>> failure.test is test - True - - As well as to the example: - - >>> failure.example.want - '2\n' - - and the actual output: - - >>> failure.got - '1\n' - - If a failure or error occurs, the globals are left intact: - - >>> del test.globs['__builtins__'] - >>> test.globs - {'x': 1} - - >>> test = DocTestParser().get_doctest(''' - ... >>> x = 2 - ... >>> raise KeyError - ... ''', {}, 'foo', 'foo.py', 0) - - >>> runner.run(test) - Traceback (most recent call last): - ... - doctest.UnexpectedException: - - >>> del test.globs['__builtins__'] - >>> test.globs - {'x': 2} - - But the globals are cleared if there is no error: - - >>> test = DocTestParser().get_doctest(''' - ... >>> x = 2 - ... ''', {}, 'foo', 'foo.py', 0) - - >>> runner.run(test) - TestResults(failed=0, attempted=1) - - >>> test.globs - {} - - """ - - def run(self, test, compileflags=None, out=None, clear_globs=True): - r = DocTestRunner.run(self, test, compileflags, out, False) - if clear_globs: - test.globs.clear() - return r - - def report_unexpected_exception(self, out, test, example, exc_info): - raise UnexpectedException(test, example, exc_info) - - def report_failure(self, out, test, example, got): - raise DocTestFailure(test, example, got) - -###################################################################### -## 6. Test Functions -###################################################################### -# These should be backwards compatible. - -# For backward compatibility, a global instance of a DocTestRunner -# class, updated by testmod. -master = None - -def testmod(m=None, name=None, globs=None, verbose=None, - report=True, optionflags=0, extraglobs=None, - raise_on_error=False, exclude_empty=False): - """m=None, name=None, globs=None, verbose=None, report=True, - optionflags=0, extraglobs=None, raise_on_error=False, - exclude_empty=False - - Test examples in docstrings in functions and classes reachable - from module m (or the current module if m is not supplied), starting - with m.__doc__. - - Also test examples reachable from dict m.__test__ if it exists and is - not None. m.__test__ maps names to functions, classes and strings; - function and class docstrings are tested even if the name is private; - strings are tested directly, as if they were docstrings. - - Return (#failures, #tests). - - See help(doctest) for an overview. - - Optional keyword arg "name" gives the name of the module; by default - use m.__name__. - - Optional keyword arg "globs" gives a dict to be used as the globals - when executing examples; by default, use m.__dict__. A copy of this - dict is actually used for each docstring, so that each docstring's - examples start with a clean slate. - - Optional keyword arg "extraglobs" gives a dictionary that should be - merged into the globals that are used to execute examples. By - default, no extra globals are used. This is new in 2.4. - - Optional keyword arg "verbose" prints lots of stuff if true, prints - only failures if false; by default, it's true iff "-v" is in sys.argv. - - Optional keyword arg "report" prints a summary at the end when true, - else prints nothing at the end. In verbose mode, the summary is - detailed, else very brief (in fact, empty if all tests passed). - - Optional keyword arg "optionflags" or's together module constants, - and defaults to 0. This is new in 2.3. Possible values (see the - docs for details): - - DONT_ACCEPT_TRUE_FOR_1 - DONT_ACCEPT_BLANKLINE - NORMALIZE_WHITESPACE - ELLIPSIS - SKIP - IGNORE_EXCEPTION_DETAIL - REPORT_UDIFF - REPORT_CDIFF - REPORT_NDIFF - REPORT_ONLY_FIRST_FAILURE - - Optional keyword arg "raise_on_error" raises an exception on the - first unexpected exception or failure. This allows failures to be - post-mortem debugged. - - Advanced tomfoolery: testmod runs methods of a local instance of - class doctest.Tester, then merges the results into (or creates) - global Tester instance doctest.master. Methods of doctest.master - can be called directly too, if you want to do something unusual. - Passing report=0 to testmod is especially useful then, to delay - displaying a summary. Invoke doctest.master.summarize(verbose) - when you're done fiddling. - """ - global master - - # If no module was given, then use __main__. - if m is None: - # DWA - m will still be None if this wasn't invoked from the command - # line, in which case the following TypeError is about as good an error - # as we should expect - m = sys.modules.get('__main__') - - # Check that we were actually given a module. - if not inspect.ismodule(m): - raise TypeError("testmod: module required; %r" % (m,)) - - # If no name was given, then use the module's name. - if name is None: - name = m.__name__ - - # Find, parse, and run all tests in the given module. - finder = DocTestFinder(exclude_empty=exclude_empty) - - if raise_on_error: - runner = DebugRunner(verbose=verbose, optionflags=optionflags) - else: - runner = DocTestRunner(verbose=verbose, optionflags=optionflags) - - for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): - runner.run(test) - - if report: - runner.summarize() - - if master is None: - master = runner - else: - master.merge(runner) - - return TestResults(runner.failures, runner.tries) - -def testfile(filename, module_relative=True, name=None, package=None, - globs=None, verbose=None, report=True, optionflags=0, - extraglobs=None, raise_on_error=False, parser=DocTestParser(), - encoding=None): - """ - Test examples in the given file. Return (#failures, #tests). - - Optional keyword arg "module_relative" specifies how filenames - should be interpreted: - - - If "module_relative" is True (the default), then "filename" - specifies a module-relative path. By default, this path is - relative to the calling module's directory; but if the - "package" argument is specified, then it is relative to that - package. To ensure os-independence, "filename" should use - "/" characters to separate path segments, and should not - be an absolute path (i.e., it may not begin with "/"). - - - If "module_relative" is False, then "filename" specifies an - os-specific path. The path may be absolute or relative (to - the current working directory). - - Optional keyword arg "name" gives the name of the test; by default - use the file's basename. - - Optional keyword argument "package" is a Python package or the - name of a Python package whose directory should be used as the - base directory for a module relative filename. If no package is - specified, then the calling module's directory is used as the base - directory for module relative filenames. It is an error to - specify "package" if "module_relative" is False. - - Optional keyword arg "globs" gives a dict to be used as the globals - when executing examples; by default, use {}. A copy of this dict - is actually used for each docstring, so that each docstring's - examples start with a clean slate. - - Optional keyword arg "extraglobs" gives a dictionary that should be - merged into the globals that are used to execute examples. By - default, no extra globals are used. - - Optional keyword arg "verbose" prints lots of stuff if true, prints - only failures if false; by default, it's true iff "-v" is in sys.argv. - - Optional keyword arg "report" prints a summary at the end when true, - else prints nothing at the end. In verbose mode, the summary is - detailed, else very brief (in fact, empty if all tests passed). - - Optional keyword arg "optionflags" or's together module constants, - and defaults to 0. Possible values (see the docs for details): - - DONT_ACCEPT_TRUE_FOR_1 - DONT_ACCEPT_BLANKLINE - NORMALIZE_WHITESPACE - ELLIPSIS - SKIP - IGNORE_EXCEPTION_DETAIL - REPORT_UDIFF - REPORT_CDIFF - REPORT_NDIFF - REPORT_ONLY_FIRST_FAILURE - - Optional keyword arg "raise_on_error" raises an exception on the - first unexpected exception or failure. This allows failures to be - post-mortem debugged. - - Optional keyword arg "parser" specifies a DocTestParser (or - subclass) that should be used to extract tests from the files. - - Optional keyword arg "encoding" specifies an encoding that should - be used to convert the file to unicode. - - Advanced tomfoolery: testmod runs methods of a local instance of - class doctest.Tester, then merges the results into (or creates) - global Tester instance doctest.master. Methods of doctest.master - can be called directly too, if you want to do something unusual. - Passing report=0 to testmod is especially useful then, to delay - displaying a summary. Invoke doctest.master.summarize(verbose) - when you're done fiddling. - """ - global master - - if package and not module_relative: - raise ValueError("Package may only be specified for module-" - "relative paths.") - - # Relativize the path - text, filename = _load_testfile(filename, package, module_relative, - encoding or "utf-8") - - # If no name was given, then use the file's name. - if name is None: - name = os.path.basename(filename) - - # Assemble the globals. - if globs is None: - globs = {} - else: - globs = globs.copy() - if extraglobs is not None: - globs.update(extraglobs) - if '__name__' not in globs: - globs['__name__'] = '__main__' - - if raise_on_error: - runner = DebugRunner(verbose=verbose, optionflags=optionflags) - else: - runner = DocTestRunner(verbose=verbose, optionflags=optionflags) - - # Read the file, convert it to a test, and run it. - test = parser.get_doctest(text, globs, name, filename, 0) - runner.run(test) - - if report: - runner.summarize() - - if master is None: - master = runner - else: - master.merge(runner) - - return TestResults(runner.failures, runner.tries) - -def run_docstring_examples(f, globs, verbose=False, name="NoName", - compileflags=None, optionflags=0): - """ - Test examples in the given object's docstring (`f`), using `globs` - as globals. Optional argument `name` is used in failure messages. - If the optional argument `verbose` is true, then generate output - even if there are no failures. - - `compileflags` gives the set of flags that should be used by the - Python compiler when running the examples. If not specified, then - it will default to the set of future-import flags that apply to - `globs`. - - Optional keyword arg `optionflags` specifies options for the - testing and output. See the documentation for `testmod` for more - information. - """ - # Find, parse, and run all tests in the given module. - finder = DocTestFinder(verbose=verbose, recurse=False) - runner = DocTestRunner(verbose=verbose, optionflags=optionflags) - for test in finder.find(f, name, globs=globs): - runner.run(test, compileflags=compileflags) - -###################################################################### -## 7. Unittest Support -###################################################################### - -_unittest_reportflags = 0 - -def set_unittest_reportflags(flags): - """Sets the unittest option flags. - - The old flag is returned so that a runner could restore the old - value if it wished to: - - >>> import doctest - >>> old = doctest._unittest_reportflags - >>> doctest.set_unittest_reportflags(REPORT_NDIFF | - ... REPORT_ONLY_FIRST_FAILURE) == old - True - - >>> doctest._unittest_reportflags == (REPORT_NDIFF | - ... REPORT_ONLY_FIRST_FAILURE) - True - - Only reporting flags can be set: - - >>> doctest.set_unittest_reportflags(ELLIPSIS) - Traceback (most recent call last): - ... - ValueError: ('Only reporting flags allowed', 8) - - >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | - ... REPORT_ONLY_FIRST_FAILURE) - True - """ - global _unittest_reportflags - - if (flags & REPORTING_FLAGS) != flags: - raise ValueError("Only reporting flags allowed", flags) - old = _unittest_reportflags - _unittest_reportflags = flags - return old - - -class DocTestCase(unittest.TestCase): - - def __init__(self, test, optionflags=0, setUp=None, tearDown=None, - checker=None): - - unittest.TestCase.__init__(self) - self._dt_optionflags = optionflags - self._dt_checker = checker - self._dt_test = test - self._dt_setUp = setUp - self._dt_tearDown = tearDown - - def setUp(self): - test = self._dt_test - - if self._dt_setUp is not None: - self._dt_setUp(test) - - def tearDown(self): - test = self._dt_test - - if self._dt_tearDown is not None: - self._dt_tearDown(test) - - test.globs.clear() - - def runTest(self): - test = self._dt_test - old = sys.stdout - new = StringIO() - optionflags = self._dt_optionflags - - if not (optionflags & REPORTING_FLAGS): - # The option flags don't include any reporting flags, - # so add the default reporting flags - optionflags |= _unittest_reportflags - - runner = DocTestRunner(optionflags=optionflags, - checker=self._dt_checker, verbose=False) - - try: - runner.DIVIDER = "-"*70 - failures, tries = runner.run( - test, out=new.write, clear_globs=False) - finally: - sys.stdout = old - - if failures: - raise self.failureException(self.format_failure(new.getvalue())) - - def format_failure(self, err): - test = self._dt_test - if test.lineno is None: - lineno = 'unknown line number' - else: - lineno = '%s' % test.lineno - lname = '.'.join(test.name.split('.')[-1:]) - return ('Failed doctest test for %s\n' - ' File "%s", line %s, in %s\n\n%s' - % (test.name, test.filename, lineno, lname, err) - ) - - def debug(self): - r"""Run the test case without results and without catching exceptions - - The unit test framework includes a debug method on test cases - and test suites to support post-mortem debugging. The test code - is run in such a way that errors are not caught. This way a - caller can catch the errors and initiate post-mortem debugging. - - The DocTestCase provides a debug method that raises - UnexpectedException errors if there is an unexpected - exception: - - >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', - ... {}, 'foo', 'foo.py', 0) - >>> case = DocTestCase(test) - >>> try: - ... case.debug() - ... except UnexpectedException as f: - ... failure = f - - The UnexpectedException contains the test, the example, and - the original exception: - - >>> failure.test is test - True - - >>> failure.example.want - '42\n' - - >>> exc_info = failure.exc_info - >>> raise exc_info[1] # Already has the traceback - Traceback (most recent call last): - ... - KeyError - - If the output doesn't match, then a DocTestFailure is raised: - - >>> test = DocTestParser().get_doctest(''' - ... >>> x = 1 - ... >>> x - ... 2 - ... ''', {}, 'foo', 'foo.py', 0) - >>> case = DocTestCase(test) - - >>> try: - ... case.debug() - ... except DocTestFailure as f: - ... failure = f - - DocTestFailure objects provide access to the test: - - >>> failure.test is test - True - - As well as to the example: - - >>> failure.example.want - '2\n' - - and the actual output: - - >>> failure.got - '1\n' - - """ - - self.setUp() - runner = DebugRunner(optionflags=self._dt_optionflags, - checker=self._dt_checker, verbose=False) - runner.run(self._dt_test, clear_globs=False) - self.tearDown() - - def id(self): - return self._dt_test.name - - def __eq__(self, other): - if type(self) is not type(other): - return NotImplemented - - return self._dt_test == other._dt_test and \ - self._dt_optionflags == other._dt_optionflags and \ - self._dt_setUp == other._dt_setUp and \ - self._dt_tearDown == other._dt_tearDown and \ - self._dt_checker == other._dt_checker - - def __hash__(self): - return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown, - self._dt_checker)) - - def __repr__(self): - name = self._dt_test.name.split('.') - return "%s (%s)" % (name[-1], '.'.join(name[:-1])) - - __str__ = object.__str__ - - def shortDescription(self): - return "Doctest: " + self._dt_test.name - -class SkipDocTestCase(DocTestCase): - def __init__(self, module): - self.module = module - DocTestCase.__init__(self, None) - - def setUp(self): - self.skipTest("DocTestSuite will not work with -O2 and above") - - def test_skip(self): - pass - - def shortDescription(self): - return "Skipping tests from %s" % self.module.__name__ - - __str__ = shortDescription - - -class _DocTestSuite(unittest.TestSuite): - - def _removeTestAtIndex(self, index): - pass - - -def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, - **options): - """ - Convert doctest tests for a module to a unittest test suite. - - This converts each documentation string in a module that - contains doctest tests to a unittest test case. If any of the - tests in a doc string fail, then the test case fails. An exception - is raised showing the name of the file containing the test and a - (sometimes approximate) line number. - - The `module` argument provides the module to be tested. The argument - can be either a module or a module name. - - If no argument is given, the calling module is used. - - A number of options may be provided as keyword arguments: - - setUp - A set-up function. This is called before running the - tests in each file. The setUp function will be passed a DocTest - object. The setUp function can access the test globals as the - globs attribute of the test passed. - - tearDown - A tear-down function. This is called after running the - tests in each file. The tearDown function will be passed a DocTest - object. The tearDown function can access the test globals as the - globs attribute of the test passed. - - globs - A dictionary containing initial global variables for the tests. - - optionflags - A set of doctest option flags expressed as an integer. - """ - - if test_finder is None: - test_finder = DocTestFinder() - - module = _normalize_module(module) - tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) - - if not tests and sys.flags.optimize >=2: - # Skip doctests when running with -O2 - suite = _DocTestSuite() - suite.addTest(SkipDocTestCase(module)) - return suite - - tests.sort() - suite = _DocTestSuite() - - for test in tests: - if len(test.examples) == 0: - continue - if not test.filename: - filename = module.__file__ - if filename[-4:] == ".pyc": - filename = filename[:-1] - test.filename = filename - suite.addTest(DocTestCase(test, **options)) - - return suite - -class DocFileCase(DocTestCase): - - def id(self): - return '_'.join(self._dt_test.name.split('.')) - - def __repr__(self): - return self._dt_test.filename - - def format_failure(self, err): - return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' - % (self._dt_test.name, self._dt_test.filename, err) - ) - -def DocFileTest(path, module_relative=True, package=None, - globs=None, parser=DocTestParser(), - encoding=None, **options): - if globs is None: - globs = {} - else: - globs = globs.copy() - - if package and not module_relative: - raise ValueError("Package may only be specified for module-" - "relative paths.") - - # Relativize the path. - doc, path = _load_testfile(path, package, module_relative, - encoding or "utf-8") - - if "__file__" not in globs: - globs["__file__"] = path - - # Find the file and read it. - name = os.path.basename(path) - - # Convert it to a test, and wrap it in a DocFileCase. - test = parser.get_doctest(doc, globs, name, path, 0) - return DocFileCase(test, **options) - -def DocFileSuite(*paths, **kw): - """A unittest suite for one or more doctest files. - - The path to each doctest file is given as a string; the - interpretation of that string depends on the keyword argument - "module_relative". - - A number of options may be provided as keyword arguments: - - module_relative - If "module_relative" is True, then the given file paths are - interpreted as os-independent module-relative paths. By - default, these paths are relative to the calling module's - directory; but if the "package" argument is specified, then - they are relative to that package. To ensure os-independence, - "filename" should use "/" characters to separate path - segments, and may not be an absolute path (i.e., it may not - begin with "/"). - - If "module_relative" is False, then the given file paths are - interpreted as os-specific paths. These paths may be absolute - or relative (to the current working directory). - - package - A Python package or the name of a Python package whose directory - should be used as the base directory for module relative paths. - If "package" is not specified, then the calling module's - directory is used as the base directory for module relative - filenames. It is an error to specify "package" if - "module_relative" is False. - - setUp - A set-up function. This is called before running the - tests in each file. The setUp function will be passed a DocTest - object. The setUp function can access the test globals as the - globs attribute of the test passed. - - tearDown - A tear-down function. This is called after running the - tests in each file. The tearDown function will be passed a DocTest - object. The tearDown function can access the test globals as the - globs attribute of the test passed. - - globs - A dictionary containing initial global variables for the tests. - - optionflags - A set of doctest option flags expressed as an integer. - - parser - A DocTestParser (or subclass) that should be used to extract - tests from the files. - - encoding - An encoding that will be used to convert the files to unicode. - """ - suite = _DocTestSuite() - - # We do this here so that _normalize_module is called at the right - # level. If it were called in DocFileTest, then this function - # would be the caller and we might guess the package incorrectly. - if kw.get('module_relative', True): - kw['package'] = _normalize_module(kw.get('package')) - - for path in paths: - suite.addTest(DocFileTest(path, **kw)) - - return suite - -###################################################################### -## 8. Debugging Support -###################################################################### - -def script_from_examples(s): - r"""Extract script from text with examples. - - Converts text with examples to a Python script. Example input is - converted to regular code. Example output and all other words - are converted to comments: - - >>> text = ''' - ... Here are examples of simple math. - ... - ... Python has super accurate integer addition - ... - ... >>> 2 + 2 - ... 5 - ... - ... And very friendly error messages: - ... - ... >>> 1/0 - ... To Infinity - ... And - ... Beyond - ... - ... You can use logic if you want: - ... - ... >>> if 0: - ... ... blah - ... ... blah - ... ... - ... - ... Ho hum - ... ''' - - >>> print(script_from_examples(text)) - # Here are examples of simple math. - # - # Python has super accurate integer addition - # - 2 + 2 - # Expected: - ## 5 - # - # And very friendly error messages: - # - 1/0 - # Expected: - ## To Infinity - ## And - ## Beyond - # - # You can use logic if you want: - # - if 0: - blah - blah - # - # Ho hum - - """ - output = [] - for piece in DocTestParser().parse(s): - if isinstance(piece, Example): - # Add the example's source code (strip trailing NL) - output.append(piece.source[:-1]) - # Add the expected output: - want = piece.want - if want: - output.append('# Expected:') - output += ['## '+l for l in want.split('\n')[:-1]] - else: - # Add non-example text. - output += [_comment_line(l) - for l in piece.split('\n')[:-1]] - - # Trim junk on both ends. - while output and output[-1] == '#': - output.pop() - while output and output[0] == '#': - output.pop(0) - # Combine the output, and return it. - # Add a courtesy newline to prevent exec from choking (see bug #1172785) - return '\n'.join(output) + '\n' - -def testsource(module, name): - """Extract the test sources from a doctest docstring as a script. - - Provide the module (or dotted name of the module) containing the - test to be debugged and the name (within the module) of the object - with the doc string with tests to be debugged. - """ - module = _normalize_module(module) - tests = DocTestFinder().find(module) - test = [t for t in tests if t.name == name] - if not test: - raise ValueError(name, "not found in tests") - test = test[0] - testsrc = script_from_examples(test.docstring) - return testsrc - -def debug_src(src, pm=False, globs=None): - """Debug a single doctest docstring, in argument `src`'""" - testsrc = script_from_examples(src) - debug_script(testsrc, pm, globs) - -def debug_script(src, pm=False, globs=None): - "Debug a test script. `src` is the script, as a string." - import pdb - - if globs: - globs = globs.copy() - else: - globs = {} - - if pm: - try: - exec(src, globs, globs) - except: - print(sys.exc_info()[1]) - p = pdb.Pdb(nosigint=True) - p.reset() - p.interaction(None, sys.exc_info()[2]) - else: - pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs) - -def debug(module, name, pm=False): - """Debug a single doctest docstring. - - Provide the module (or dotted name of the module) containing the - test to be debugged and the name (within the module) of the object - with the docstring with tests to be debugged. - """ - module = _normalize_module(module) - testsrc = testsource(module, name) - debug_script(testsrc, pm, module.__dict__) - -###################################################################### -## 9. Example Usage -###################################################################### -class _TestClass: - """ - A pointless class, for sanity-checking of docstring testing. - - Methods: - square() - get() - - >>> _TestClass(13).get() + _TestClass(-12).get() - 1 - >>> hex(_TestClass(13).square().get()) - '0xa9' - """ - - def __init__(self, val): - """val -> _TestClass object with associated value val. - - >>> t = _TestClass(123) - >>> print(t.get()) - 123 - """ - - self.val = val - - def square(self): - """square() -> square TestClass's associated value - - >>> _TestClass(13).square().get() - 169 - """ - - self.val = self.val ** 2 - return self - - def get(self): - """get() -> return TestClass's associated value. - - >>> x = _TestClass(-42) - >>> print(x.get()) - -42 - """ - - return self.val - -__test__ = {"_TestClass": _TestClass, - "string": r""" - Example of a string object, searched as-is. - >>> x = 1; y = 2 - >>> x + y, x * y - (3, 2) - """, - - "bool-int equivalence": r""" - In 2.2, boolean expressions displayed - 0 or 1. By default, we still accept - them. This can be disabled by passing - DONT_ACCEPT_TRUE_FOR_1 to the new - optionflags argument. - >>> 4 == 4 - 1 - >>> 4 == 4 - True - >>> 4 > 4 - 0 - >>> 4 > 4 - False - """, - - "blank lines": r""" - Blank lines can be marked with : - >>> print('foo\n\nbar\n') - foo - - bar - - """, - - "ellipsis": r""" - If the ellipsis flag is used, then '...' can be used to - elide substrings in the desired output: - >>> print(list(range(1000))) #doctest: +ELLIPSIS - [0, 1, 2, ..., 999] - """, - - "whitespace normalization": r""" - If the whitespace normalization flag is used, then - differences in whitespace are ignored. - >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29] - """, - } - - -def _test(): - import argparse - - parser = argparse.ArgumentParser(description="doctest runner") - parser.add_argument('-v', '--verbose', action='store_true', default=False, - help='print very verbose output for all tests') - parser.add_argument('-o', '--option', action='append', - choices=OPTIONFLAGS_BY_NAME.keys(), default=[], - help=('specify a doctest option flag to apply' - ' to the test run; may be specified more' - ' than once to apply multiple options')) - parser.add_argument('-f', '--fail-fast', action='store_true', - help=('stop running tests after first failure (this' - ' is a shorthand for -o FAIL_FAST, and is' - ' in addition to any other -o options)')) - parser.add_argument('file', nargs='+', - help='file containing the tests to run') - args = parser.parse_args() - testfiles = args.file - # Verbose used to be handled by the "inspect argv" magic in DocTestRunner, - # but since we are using argparse we are passing it manually now. - verbose = args.verbose - options = 0 - for option in args.option: - options |= OPTIONFLAGS_BY_NAME[option] - if args.fail_fast: - options |= FAIL_FAST - for filename in testfiles: - if filename.endswith(".py"): - # It is a module -- insert its dir into sys.path and try to - # import it. If it is part of a package, that possibly - # won't work because of package imports. - dirname, filename = os.path.split(filename) - sys.path.insert(0, dirname) - m = __import__(filename[:-3]) - del sys.path[0] - failures, _ = testmod(m, verbose=verbose, optionflags=options) - else: - failures, _ = testfile(filename, module_relative=False, - verbose=verbose, optionflags=options) - if failures: - return 1 - return 0 - - -if __name__ == "__main__": - sys.exit(_test()) diff --git a/Lib/pdb.py b/Lib/pdb.py deleted file mode 100755 index bf503f1e73e..00000000000 --- a/Lib/pdb.py +++ /dev/null @@ -1,1730 +0,0 @@ -#! /usr/bin/env python3 - -""" -The Python Debugger Pdb -======================= - -To use the debugger in its simplest form: - - >>> import pdb - >>> pdb.run('') - -The debugger's prompt is '(Pdb) '. This will stop in the first -function call in . - -Alternatively, if a statement terminated with an unhandled exception, -you can use pdb's post-mortem facility to inspect the contents of the -traceback: - - >>> - - >>> import pdb - >>> pdb.pm() - -The commands recognized by the debugger are listed in the next -section. Most can be abbreviated as indicated; e.g., h(elp) means -that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', -nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in -square brackets. Alternatives in the command syntax are separated -by a vertical bar (|). - -A blank line repeats the previous command literally, except for -'list', where it lists the next 11 lines. - -Commands that the debugger doesn't recognize are assumed to be Python -statements and are executed in the context of the program being -debugged. Python statements can also be prefixed with an exclamation -point ('!'). This is a powerful way to inspect the program being -debugged; it is even possible to change variables or call functions. -When an exception occurs in such a statement, the exception name is -printed but the debugger's state is not changed. - -The debugger supports aliases, which can save typing. And aliases can -have parameters (see the alias help entry) which allows one a certain -level of adaptability to the context under examination. - -Multiple commands may be entered on a single line, separated by the -pair ';;'. No intelligence is applied to separating the commands; the -input is split at the first ';;', even if it is in the middle of a -quoted string. - -If a file ".pdbrc" exists in your home directory or in the current -directory, it is read in and executed as if it had been typed at the -debugger prompt. This is particularly useful for aliases. If both -files exist, the one in the home directory is read first and aliases -defined there can be overridden by the local file. This behavior can be -disabled by passing the "readrc=False" argument to the Pdb constructor. - -Aside from aliases, the debugger is not directly programmable; but it -is implemented as a class from which you can derive your own debugger -class, which you can make as fancy as you like. - - -Debugger commands -================= - -""" -# NOTE: the actual command documentation is collected from docstrings of the -# commands and is appended to __doc__ after the class has been defined. - -import os -import io -import re -import sys -import cmd -import bdb -import dis -import code -import glob -import pprint -import signal -import inspect -import traceback -import linecache - - -class Restart(Exception): - """Causes a debugger to be restarted for the debugged python program.""" - pass - -__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace", - "post_mortem", "help"] - -def find_function(funcname, filename): - cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname)) - try: - fp = open(filename) - except OSError: - return None - # consumer of this info expects the first line to be 1 - with fp: - for lineno, line in enumerate(fp, start=1): - if cre.match(line): - return funcname, filename, lineno - return None - -def getsourcelines(obj): - lines, lineno = inspect.findsource(obj) - if inspect.isframe(obj) and obj.f_globals is obj.f_locals: - # must be a module frame: do not try to cut a block out of it - return lines, 1 - elif inspect.ismodule(obj): - return lines, 1 - return inspect.getblock(lines[lineno:]), lineno+1 - -def lasti2lineno(code, lasti): - linestarts = list(dis.findlinestarts(code)) - linestarts.reverse() - for i, lineno in linestarts: - if lasti >= i: - return lineno - return 0 - - -class _rstr(str): - """String that doesn't quote its repr.""" - def __repr__(self): - return self - - -# Interaction prompt line will separate file and call info from code -# text using value of line_prefix string. A newline and arrow may -# be to your liking. You can set it once pdb is imported using the -# command "pdb.line_prefix = '\n% '". -# line_prefix = ': ' # Use this to get the old situation back -line_prefix = '\n-> ' # Probably a better default - -class Pdb(bdb.Bdb, cmd.Cmd): - - _previous_sigint_handler = None - - def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None, - nosigint=False, readrc=True): - bdb.Bdb.__init__(self, skip=skip) - cmd.Cmd.__init__(self, completekey, stdin, stdout) - sys.audit("pdb.Pdb") - if stdout: - self.use_rawinput = 0 - self.prompt = '(Pdb) ' - self.aliases = {} - self.displaying = {} - self.mainpyfile = '' - self._wait_for_mainpyfile = False - self.tb_lineno = {} - # Try to load readline if it exists - try: - import readline - # remove some common file name delimiters - readline.set_completer_delims(' \t\n`@#$%^&*()=+[{]}\\|;:\'",<>?') - except ImportError: - pass - self.allow_kbdint = False - self.nosigint = nosigint - - # Read ~/.pdbrc and ./.pdbrc - self.rcLines = [] - if readrc: - try: - with open(os.path.expanduser('~/.pdbrc')) as rcFile: - self.rcLines.extend(rcFile) - except OSError: - pass - try: - with open(".pdbrc") as rcFile: - self.rcLines.extend(rcFile) - except OSError: - pass - - self.commands = {} # associates a command list to breakpoint numbers - self.commands_doprompt = {} # for each bp num, tells if the prompt - # must be disp. after execing the cmd list - self.commands_silent = {} # for each bp num, tells if the stack trace - # must be disp. after execing the cmd list - self.commands_defining = False # True while in the process of defining - # a command list - self.commands_bnum = None # The breakpoint number for which we are - # defining a list - - def sigint_handler(self, signum, frame): - if self.allow_kbdint: - raise KeyboardInterrupt - self.message("\nProgram interrupted. (Use 'cont' to resume).") - self.set_step() - self.set_trace(frame) - - def reset(self): - bdb.Bdb.reset(self) - self.forget() - - def forget(self): - self.lineno = None - self.stack = [] - self.curindex = 0 - self.curframe = None - self.tb_lineno.clear() - - def setup(self, f, tb): - self.forget() - self.stack, self.curindex = self.get_stack(f, tb) - while tb: - # when setting up post-mortem debugging with a traceback, save all - # the original line numbers to be displayed along the current line - # numbers (which can be different, e.g. due to finally clauses) - lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti) - self.tb_lineno[tb.tb_frame] = lineno - tb = tb.tb_next - self.curframe = self.stack[self.curindex][0] - # The f_locals dictionary is updated from the actual frame - # locals whenever the .f_locals accessor is called, so we - # cache it here to ensure that modifications are not overwritten. - self.curframe_locals = self.curframe.f_locals - return self.execRcLines() - - # Can be executed earlier than 'setup' if desired - def execRcLines(self): - if not self.rcLines: - return - # local copy because of recursion - rcLines = self.rcLines - rcLines.reverse() - # execute every line only once - self.rcLines = [] - while rcLines: - line = rcLines.pop().strip() - if line and line[0] != '#': - if self.onecmd(line): - # if onecmd returns True, the command wants to exit - # from the interaction, save leftover rc lines - # to execute before next interaction - self.rcLines += reversed(rcLines) - return True - - # Override Bdb methods - - def user_call(self, frame, argument_list): - """This method is called when there is the remote possibility - that we ever need to stop in this function.""" - if self._wait_for_mainpyfile: - return - if self.stop_here(frame): - self.message('--Call--') - self.interaction(frame, None) - - def user_line(self, frame): - """This function is called when we stop or break at this line.""" - if self._wait_for_mainpyfile: - if (self.mainpyfile != self.canonic(frame.f_code.co_filename) - or frame.f_lineno <= 0): - return - self._wait_for_mainpyfile = False - if self.bp_commands(frame): - self.interaction(frame, None) - - def bp_commands(self, frame): - """Call every command that was set for the current active breakpoint - (if there is one). - - Returns True if the normal interaction function must be called, - False otherwise.""" - # self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit - if getattr(self, "currentbp", False) and \ - self.currentbp in self.commands: - currentbp = self.currentbp - self.currentbp = 0 - lastcmd_back = self.lastcmd - self.setup(frame, None) - for line in self.commands[currentbp]: - self.onecmd(line) - self.lastcmd = lastcmd_back - if not self.commands_silent[currentbp]: - self.print_stack_entry(self.stack[self.curindex]) - if self.commands_doprompt[currentbp]: - self._cmdloop() - self.forget() - return - return 1 - - def user_return(self, frame, return_value): - """This function is called when a return trap is set here.""" - if self._wait_for_mainpyfile: - return - frame.f_locals['__return__'] = return_value - self.message('--Return--') - self.interaction(frame, None) - - def user_exception(self, frame, exc_info): - """This function is called if an exception occurs, - but only if we are to stop at or just below this level.""" - if self._wait_for_mainpyfile: - return - exc_type, exc_value, exc_traceback = exc_info - frame.f_locals['__exception__'] = exc_type, exc_value - - # An 'Internal StopIteration' exception is an exception debug event - # issued by the interpreter when handling a subgenerator run with - # 'yield from' or a generator controlled by a for loop. No exception has - # actually occurred in this case. The debugger uses this debug event to - # stop when the debuggee is returning from such generators. - prefix = 'Internal ' if (not exc_traceback - and exc_type is StopIteration) else '' - self.message('%s%s' % (prefix, - traceback.format_exception_only(exc_type, exc_value)[-1].strip())) - self.interaction(frame, exc_traceback) - - # General interaction function - def _cmdloop(self): - while True: - try: - # keyboard interrupts allow for an easy way to cancel - # the current command, so allow them during interactive input - self.allow_kbdint = True - self.cmdloop() - self.allow_kbdint = False - break - except KeyboardInterrupt: - self.message('--KeyboardInterrupt--') - - # Called before loop, handles display expressions - def preloop(self): - displaying = self.displaying.get(self.curframe) - if displaying: - for expr, oldvalue in displaying.items(): - newvalue = self._getval_except(expr) - # check for identity first; this prevents custom __eq__ to - # be called at every loop, and also prevents instances whose - # fields are changed to be displayed - if newvalue is not oldvalue and newvalue != oldvalue: - displaying[expr] = newvalue - self.message('display %s: %r [old: %r]' % - (expr, newvalue, oldvalue)) - - def interaction(self, frame, traceback): - # Restore the previous signal handler at the Pdb prompt. - if Pdb._previous_sigint_handler: - try: - signal.signal(signal.SIGINT, Pdb._previous_sigint_handler) - except ValueError: # ValueError: signal only works in main thread - pass - else: - Pdb._previous_sigint_handler = None - if self.setup(frame, traceback): - # no interaction desired at this time (happens if .pdbrc contains - # a command like "continue") - self.forget() - return - self.print_stack_entry(self.stack[self.curindex]) - self._cmdloop() - self.forget() - - def displayhook(self, obj): - """Custom displayhook for the exec in default(), which prevents - assignment of the _ variable in the builtins. - """ - # reproduce the behavior of the standard displayhook, not printing None - if obj is not None: - self.message(repr(obj)) - - def default(self, line): - if line[:1] == '!': line = line[1:] - locals = self.curframe_locals - globals = self.curframe.f_globals - try: - code = compile(line + '\n', '', 'single') - save_stdout = sys.stdout - save_stdin = sys.stdin - save_displayhook = sys.displayhook - try: - sys.stdin = self.stdin - sys.stdout = self.stdout - sys.displayhook = self.displayhook - exec(code, globals, locals) - finally: - sys.stdout = save_stdout - sys.stdin = save_stdin - sys.displayhook = save_displayhook - except: - exc_info = sys.exc_info()[:2] - self.error(traceback.format_exception_only(*exc_info)[-1].strip()) - - def precmd(self, line): - """Handle alias expansion and ';;' separator.""" - if not line.strip(): - return line - args = line.split() - while args[0] in self.aliases: - line = self.aliases[args[0]] - ii = 1 - for tmpArg in args[1:]: - line = line.replace("%" + str(ii), - tmpArg) - ii += 1 - line = line.replace("%*", ' '.join(args[1:])) - args = line.split() - # split into ';;' separated commands - # unless it's an alias command - if args[0] != 'alias': - marker = line.find(';;') - if marker >= 0: - # queue up everything after marker - next = line[marker+2:].lstrip() - self.cmdqueue.append(next) - line = line[:marker].rstrip() - return line - - def onecmd(self, line): - """Interpret the argument as though it had been typed in response - to the prompt. - - Checks whether this line is typed at the normal prompt or in - a breakpoint command list definition. - """ - if not self.commands_defining: - return cmd.Cmd.onecmd(self, line) - else: - return self.handle_command_def(line) - - def handle_command_def(self, line): - """Handles one command line during command list definition.""" - cmd, arg, line = self.parseline(line) - if not cmd: - return - if cmd == 'silent': - self.commands_silent[self.commands_bnum] = True - return # continue to handle other cmd def in the cmd list - elif cmd == 'end': - self.cmdqueue = [] - return 1 # end of cmd list - cmdlist = self.commands[self.commands_bnum] - if arg: - cmdlist.append(cmd+' '+arg) - else: - cmdlist.append(cmd) - # Determine if we must stop - try: - func = getattr(self, 'do_' + cmd) - except AttributeError: - func = self.default - # one of the resuming commands - if func.__name__ in self.commands_resuming: - self.commands_doprompt[self.commands_bnum] = False - self.cmdqueue = [] - return 1 - return - - # interface abstraction functions - - def message(self, msg): - print(msg, file=self.stdout) - - def error(self, msg): - print('***', msg, file=self.stdout) - - # Generic completion functions. Individual complete_foo methods can be - # assigned below to one of these functions. - - def _complete_location(self, text, line, begidx, endidx): - # Complete a file/module/function location for break/tbreak/clear. - if line.strip().endswith((':', ',')): - # Here comes a line number or a condition which we can't complete. - return [] - # First, try to find matching functions (i.e. expressions). - try: - ret = self._complete_expression(text, line, begidx, endidx) - except Exception: - ret = [] - # Then, try to complete file names as well. - globs = glob.glob(text + '*') - for fn in globs: - if os.path.isdir(fn): - ret.append(fn + '/') - elif os.path.isfile(fn) and fn.lower().endswith(('.py', '.pyw')): - ret.append(fn + ':') - return ret - - def _complete_bpnumber(self, text, line, begidx, endidx): - # Complete a breakpoint number. (This would be more helpful if we could - # display additional info along with the completions, such as file/line - # of the breakpoint.) - return [str(i) for i, bp in enumerate(bdb.Breakpoint.bpbynumber) - if bp is not None and str(i).startswith(text)] - - def _complete_expression(self, text, line, begidx, endidx): - # Complete an arbitrary expression. - if not self.curframe: - return [] - # Collect globals and locals. It is usually not really sensible to also - # complete builtins, and they clutter the namespace quite heavily, so we - # leave them out. - ns = {**self.curframe.f_globals, **self.curframe_locals} - if '.' in text: - # Walk an attribute chain up to the last part, similar to what - # rlcompleter does. This will bail if any of the parts are not - # simple attribute access, which is what we want. - dotted = text.split('.') - try: - obj = ns[dotted[0]] - for part in dotted[1:-1]: - obj = getattr(obj, part) - except (KeyError, AttributeError): - return [] - prefix = '.'.join(dotted[:-1]) + '.' - return [prefix + n for n in dir(obj) if n.startswith(dotted[-1])] - else: - # Complete a simple name. - return [n for n in ns.keys() if n.startswith(text)] - - # Command definitions, called by cmdloop() - # The argument is the remaining string on the command line - # Return true to exit from the command loop - - def do_commands(self, arg): - """commands [bpnumber] - (com) ... - (com) end - (Pdb) - - Specify a list of commands for breakpoint number bpnumber. - The commands themselves are entered on the following lines. - Type a line containing just 'end' to terminate the commands. - The commands are executed when the breakpoint is hit. - - To remove all commands from a breakpoint, type commands and - follow it immediately with end; that is, give no commands. - - With no bpnumber argument, commands refers to the last - breakpoint set. - - You can use breakpoint commands to start your program up - again. Simply use the continue command, or step, or any other - command that resumes execution. - - Specifying any command resuming execution (currently continue, - step, next, return, jump, quit and their abbreviations) - terminates the command list (as if that command was - immediately followed by end). This is because any time you - resume execution (even with a simple next or step), you may - encounter another breakpoint -- which could have its own - command list, leading to ambiguities about which list to - execute. - - If you use the 'silent' command in the command list, the usual - message about stopping at a breakpoint is not printed. This - may be desirable for breakpoints that are to print a specific - message and then continue. If none of the other commands - print anything, you will see no sign that the breakpoint was - reached. - """ - if not arg: - bnum = len(bdb.Breakpoint.bpbynumber) - 1 - else: - try: - bnum = int(arg) - except: - self.error("Usage: commands [bnum]\n ...\n end") - return - self.commands_bnum = bnum - # Save old definitions for the case of a keyboard interrupt. - if bnum in self.commands: - old_command_defs = (self.commands[bnum], - self.commands_doprompt[bnum], - self.commands_silent[bnum]) - else: - old_command_defs = None - self.commands[bnum] = [] - self.commands_doprompt[bnum] = True - self.commands_silent[bnum] = False - - prompt_back = self.prompt - self.prompt = '(com) ' - self.commands_defining = True - try: - self.cmdloop() - except KeyboardInterrupt: - # Restore old definitions. - if old_command_defs: - self.commands[bnum] = old_command_defs[0] - self.commands_doprompt[bnum] = old_command_defs[1] - self.commands_silent[bnum] = old_command_defs[2] - else: - del self.commands[bnum] - del self.commands_doprompt[bnum] - del self.commands_silent[bnum] - self.error('command definition aborted, old commands restored') - finally: - self.commands_defining = False - self.prompt = prompt_back - - complete_commands = _complete_bpnumber - - def do_break(self, arg, temporary = 0): - """b(reak) [ ([filename:]lineno | function) [, condition] ] - Without argument, list all breaks. - - With a line number argument, set a break at this line in the - current file. With a function name, set a break at the first - executable line of that function. If a second argument is - present, it is a string specifying an expression which must - evaluate to true before the breakpoint is honored. - - The line number may be prefixed with a filename and a colon, - to specify a breakpoint in another file (probably one that - hasn't been loaded yet). The file is searched for on - sys.path; the .py suffix may be omitted. - """ - if not arg: - if self.breaks: # There's at least one - self.message("Num Type Disp Enb Where") - for bp in bdb.Breakpoint.bpbynumber: - if bp: - self.message(bp.bpformat()) - return - # parse arguments; comma has lowest precedence - # and cannot occur in filename - filename = None - lineno = None - cond = None - comma = arg.find(',') - if comma > 0: - # parse stuff after comma: "condition" - cond = arg[comma+1:].lstrip() - arg = arg[:comma].rstrip() - # parse stuff before comma: [filename:]lineno | function - colon = arg.rfind(':') - funcname = None - if colon >= 0: - filename = arg[:colon].rstrip() - f = self.lookupmodule(filename) - if not f: - self.error('%r not found from sys.path' % filename) - return - else: - filename = f - arg = arg[colon+1:].lstrip() - try: - lineno = int(arg) - except ValueError: - self.error('Bad lineno: %s' % arg) - return - else: - # no colon; can be lineno or function - try: - lineno = int(arg) - except ValueError: - try: - func = eval(arg, - self.curframe.f_globals, - self.curframe_locals) - except: - func = arg - try: - if hasattr(func, '__func__'): - func = func.__func__ - code = func.__code__ - #use co_name to identify the bkpt (function names - #could be aliased, but co_name is invariant) - funcname = code.co_name - lineno = code.co_firstlineno - filename = code.co_filename - except: - # last thing to try - (ok, filename, ln) = self.lineinfo(arg) - if not ok: - self.error('The specified object %r is not a function ' - 'or was not found along sys.path.' % arg) - return - funcname = ok # ok contains a function name - lineno = int(ln) - if not filename: - filename = self.defaultFile() - # Check for reasonable breakpoint - line = self.checkline(filename, lineno) - if line: - # now set the break point - err = self.set_break(filename, line, temporary, cond, funcname) - if err: - self.error(err) - else: - bp = self.get_breaks(filename, line)[-1] - self.message("Breakpoint %d at %s:%d" % - (bp.number, bp.file, bp.line)) - - # To be overridden in derived debuggers - def defaultFile(self): - """Produce a reasonable default.""" - filename = self.curframe.f_code.co_filename - if filename == '' and self.mainpyfile: - filename = self.mainpyfile - return filename - - do_b = do_break - - complete_break = _complete_location - complete_b = _complete_location - - def do_tbreak(self, arg): - """tbreak [ ([filename:]lineno | function) [, condition] ] - Same arguments as break, but sets a temporary breakpoint: it - is automatically deleted when first hit. - """ - self.do_break(arg, 1) - - complete_tbreak = _complete_location - - def lineinfo(self, identifier): - failed = (None, None, None) - # Input is identifier, may be in single quotes - idstring = identifier.split("'") - if len(idstring) == 1: - # not in single quotes - id = idstring[0].strip() - elif len(idstring) == 3: - # quoted - id = idstring[1].strip() - else: - return failed - if id == '': return failed - parts = id.split('.') - # Protection for derived debuggers - if parts[0] == 'self': - del parts[0] - if len(parts) == 0: - return failed - # Best first guess at file to look at - fname = self.defaultFile() - if len(parts) == 1: - item = parts[0] - else: - # More than one part. - # First is module, second is method/class - f = self.lookupmodule(parts[0]) - if f: - fname = f - item = parts[1] - answer = find_function(item, fname) - return answer or failed - - def checkline(self, filename, lineno): - """Check whether specified line seems to be executable. - - Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank - line or EOF). Warning: testing is not comprehensive. - """ - # this method should be callable before starting debugging, so default - # to "no globals" if there is no current frame - globs = self.curframe.f_globals if hasattr(self, 'curframe') else None - line = linecache.getline(filename, lineno, globs) - if not line: - self.message('End of file') - return 0 - line = line.strip() - # Don't allow setting breakpoint at a blank line - if (not line or (line[0] == '#') or - (line[:3] == '"""') or line[:3] == "'''"): - self.error('Blank or comment') - return 0 - return lineno - - def do_enable(self, arg): - """enable bpnumber [bpnumber ...] - Enables the breakpoints given as a space separated list of - breakpoint numbers. - """ - args = arg.split() - for i in args: - try: - bp = self.get_bpbynumber(i) - except ValueError as err: - self.error(err) - else: - bp.enable() - self.message('Enabled %s' % bp) - - complete_enable = _complete_bpnumber - - def do_disable(self, arg): - """disable bpnumber [bpnumber ...] - Disables the breakpoints given as a space separated list of - breakpoint numbers. Disabling a breakpoint means it cannot - cause the program to stop execution, but unlike clearing a - breakpoint, it remains in the list of breakpoints and can be - (re-)enabled. - """ - args = arg.split() - for i in args: - try: - bp = self.get_bpbynumber(i) - except ValueError as err: - self.error(err) - else: - bp.disable() - self.message('Disabled %s' % bp) - - complete_disable = _complete_bpnumber - - def do_condition(self, arg): - """condition bpnumber [condition] - Set a new condition for the breakpoint, an expression which - must evaluate to true before the breakpoint is honored. If - condition is absent, any existing condition is removed; i.e., - the breakpoint is made unconditional. - """ - args = arg.split(' ', 1) - try: - cond = args[1] - except IndexError: - cond = None - try: - bp = self.get_bpbynumber(args[0].strip()) - except IndexError: - self.error('Breakpoint number expected') - except ValueError as err: - self.error(err) - else: - bp.cond = cond - if not cond: - self.message('Breakpoint %d is now unconditional.' % bp.number) - else: - self.message('New condition set for breakpoint %d.' % bp.number) - - complete_condition = _complete_bpnumber - - def do_ignore(self, arg): - """ignore bpnumber [count] - Set the ignore count for the given breakpoint number. If - count is omitted, the ignore count is set to 0. A breakpoint - becomes active when the ignore count is zero. When non-zero, - the count is decremented each time the breakpoint is reached - and the breakpoint is not disabled and any associated - condition evaluates to true. - """ - args = arg.split() - try: - count = int(args[1].strip()) - except: - count = 0 - try: - bp = self.get_bpbynumber(args[0].strip()) - except IndexError: - self.error('Breakpoint number expected') - except ValueError as err: - self.error(err) - else: - bp.ignore = count - if count > 0: - if count > 1: - countstr = '%d crossings' % count - else: - countstr = '1 crossing' - self.message('Will ignore next %s of breakpoint %d.' % - (countstr, bp.number)) - else: - self.message('Will stop next time breakpoint %d is reached.' - % bp.number) - - complete_ignore = _complete_bpnumber - - def do_clear(self, arg): - """cl(ear) filename:lineno\ncl(ear) [bpnumber [bpnumber...]] - With a space separated list of breakpoint numbers, clear - those breakpoints. Without argument, clear all breaks (but - first ask confirmation). With a filename:lineno argument, - clear all breaks at that line in that file. - """ - if not arg: - try: - reply = input('Clear all breaks? ') - except EOFError: - reply = 'no' - reply = reply.strip().lower() - if reply in ('y', 'yes'): - bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp] - self.clear_all_breaks() - for bp in bplist: - self.message('Deleted %s' % bp) - return - if ':' in arg: - # Make sure it works for "clear C:\foo\bar.py:12" - i = arg.rfind(':') - filename = arg[:i] - arg = arg[i+1:] - try: - lineno = int(arg) - except ValueError: - err = "Invalid line number (%s)" % arg - else: - bplist = self.get_breaks(filename, lineno) - err = self.clear_break(filename, lineno) - if err: - self.error(err) - else: - for bp in bplist: - self.message('Deleted %s' % bp) - return - numberlist = arg.split() - for i in numberlist: - try: - bp = self.get_bpbynumber(i) - except ValueError as err: - self.error(err) - else: - self.clear_bpbynumber(i) - self.message('Deleted %s' % bp) - do_cl = do_clear # 'c' is already an abbreviation for 'continue' - - complete_clear = _complete_location - complete_cl = _complete_location - - def do_where(self, arg): - """w(here) - Print a stack trace, with the most recent frame at the bottom. - An arrow indicates the "current frame", which determines the - context of most commands. 'bt' is an alias for this command. - """ - self.print_stack_trace() - do_w = do_where - do_bt = do_where - - def _select_frame(self, number): - assert 0 <= number < len(self.stack) - self.curindex = number - self.curframe = self.stack[self.curindex][0] - self.curframe_locals = self.curframe.f_locals - self.print_stack_entry(self.stack[self.curindex]) - self.lineno = None - - def do_up(self, arg): - """u(p) [count] - Move the current frame count (default one) levels up in the - stack trace (to an older frame). - """ - if self.curindex == 0: - self.error('Oldest frame') - return - try: - count = int(arg or 1) - except ValueError: - self.error('Invalid frame count (%s)' % arg) - return - if count < 0: - newframe = 0 - else: - newframe = max(0, self.curindex - count) - self._select_frame(newframe) - do_u = do_up - - def do_down(self, arg): - """d(own) [count] - Move the current frame count (default one) levels down in the - stack trace (to a newer frame). - """ - if self.curindex + 1 == len(self.stack): - self.error('Newest frame') - return - try: - count = int(arg or 1) - except ValueError: - self.error('Invalid frame count (%s)' % arg) - return - if count < 0: - newframe = len(self.stack) - 1 - else: - newframe = min(len(self.stack) - 1, self.curindex + count) - self._select_frame(newframe) - do_d = do_down - - def do_until(self, arg): - """unt(il) [lineno] - Without argument, continue execution until the line with a - number greater than the current one is reached. With a line - number, continue execution until a line with a number greater - or equal to that is reached. In both cases, also stop when - the current frame returns. - """ - if arg: - try: - lineno = int(arg) - except ValueError: - self.error('Error in argument: %r' % arg) - return - if lineno <= self.curframe.f_lineno: - self.error('"until" line number is smaller than current ' - 'line number') - return - else: - lineno = None - self.set_until(self.curframe, lineno) - return 1 - do_unt = do_until - - def do_step(self, arg): - """s(tep) - Execute the current line, stop at the first possible occasion - (either in a function that is called or in the current - function). - """ - self.set_step() - return 1 - do_s = do_step - - def do_next(self, arg): - """n(ext) - Continue execution until the next line in the current function - is reached or it returns. - """ - self.set_next(self.curframe) - return 1 - do_n = do_next - - def do_run(self, arg): - """run [args...] - Restart the debugged python program. If a string is supplied - it is split with "shlex", and the result is used as the new - sys.argv. History, breakpoints, actions and debugger options - are preserved. "restart" is an alias for "run". - """ - if arg: - import shlex - argv0 = sys.argv[0:1] - sys.argv = shlex.split(arg) - sys.argv[:0] = argv0 - # this is caught in the main debugger loop - raise Restart - - do_restart = do_run - - def do_return(self, arg): - """r(eturn) - Continue execution until the current function returns. - """ - self.set_return(self.curframe) - return 1 - do_r = do_return - - def do_continue(self, arg): - """c(ont(inue)) - Continue execution, only stop when a breakpoint is encountered. - """ - if not self.nosigint: - try: - Pdb._previous_sigint_handler = \ - signal.signal(signal.SIGINT, self.sigint_handler) - except ValueError: - # ValueError happens when do_continue() is invoked from - # a non-main thread in which case we just continue without - # SIGINT set. Would printing a message here (once) make - # sense? - pass - self.set_continue() - return 1 - do_c = do_cont = do_continue - - def do_jump(self, arg): - """j(ump) lineno - Set the next line that will be executed. Only available in - the bottom-most frame. This lets you jump back and execute - code again, or jump forward to skip code that you don't want - to run. - - It should be noted that not all jumps are allowed -- for - instance it is not possible to jump into the middle of a - for loop or out of a finally clause. - """ - if self.curindex + 1 != len(self.stack): - self.error('You can only jump within the bottom frame') - return - try: - arg = int(arg) - except ValueError: - self.error("The 'jump' command requires a line number") - else: - try: - # Do the jump, fix up our copy of the stack, and display the - # new position - self.curframe.f_lineno = arg - self.stack[self.curindex] = self.stack[self.curindex][0], arg - self.print_stack_entry(self.stack[self.curindex]) - except ValueError as e: - self.error('Jump failed: %s' % e) - do_j = do_jump - - def do_debug(self, arg): - """debug code - Enter a recursive debugger that steps through the code - argument (which is an arbitrary expression or statement to be - executed in the current environment). - """ - sys.settrace(None) - globals = self.curframe.f_globals - locals = self.curframe_locals - p = Pdb(self.completekey, self.stdin, self.stdout) - p.prompt = "(%s) " % self.prompt.strip() - self.message("ENTERING RECURSIVE DEBUGGER") - try: - sys.call_tracing(p.run, (arg, globals, locals)) - except Exception: - exc_info = sys.exc_info()[:2] - self.error(traceback.format_exception_only(*exc_info)[-1].strip()) - self.message("LEAVING RECURSIVE DEBUGGER") - sys.settrace(self.trace_dispatch) - self.lastcmd = p.lastcmd - - complete_debug = _complete_expression - - def do_quit(self, arg): - """q(uit)\nexit - Quit from the debugger. The program being executed is aborted. - """ - self._user_requested_quit = True - self.set_quit() - return 1 - - do_q = do_quit - do_exit = do_quit - - def do_EOF(self, arg): - """EOF - Handles the receipt of EOF as a command. - """ - self.message('') - self._user_requested_quit = True - self.set_quit() - return 1 - - def do_args(self, arg): - """a(rgs) - Print the argument list of the current function. - """ - co = self.curframe.f_code - dict = self.curframe_locals - n = co.co_argcount + co.co_kwonlyargcount - if co.co_flags & inspect.CO_VARARGS: n = n+1 - if co.co_flags & inspect.CO_VARKEYWORDS: n = n+1 - for i in range(n): - name = co.co_varnames[i] - if name in dict: - self.message('%s = %r' % (name, dict[name])) - else: - self.message('%s = *** undefined ***' % (name,)) - do_a = do_args - - def do_retval(self, arg): - """retval - Print the return value for the last return of a function. - """ - if '__return__' in self.curframe_locals: - self.message(repr(self.curframe_locals['__return__'])) - else: - self.error('Not yet returned!') - do_rv = do_retval - - def _getval(self, arg): - try: - return eval(arg, self.curframe.f_globals, self.curframe_locals) - except: - exc_info = sys.exc_info()[:2] - self.error(traceback.format_exception_only(*exc_info)[-1].strip()) - raise - - def _getval_except(self, arg, frame=None): - try: - if frame is None: - return eval(arg, self.curframe.f_globals, self.curframe_locals) - else: - return eval(arg, frame.f_globals, frame.f_locals) - except: - exc_info = sys.exc_info()[:2] - err = traceback.format_exception_only(*exc_info)[-1].strip() - return _rstr('** raised %s **' % err) - - def do_p(self, arg): - """p expression - Print the value of the expression. - """ - try: - self.message(repr(self._getval(arg))) - except: - pass - - def do_pp(self, arg): - """pp expression - Pretty-print the value of the expression. - """ - try: - self.message(pprint.pformat(self._getval(arg))) - except: - pass - - complete_print = _complete_expression - complete_p = _complete_expression - complete_pp = _complete_expression - - def do_list(self, arg): - """l(ist) [first [,last] | .] - - List source code for the current file. Without arguments, - list 11 lines around the current line or continue the previous - listing. With . as argument, list 11 lines around the current - line. With one argument, list 11 lines starting at that line. - With two arguments, list the given range; if the second - argument is less than the first, it is a count. - - The current line in the current frame is indicated by "->". - If an exception is being debugged, the line where the - exception was originally raised or propagated is indicated by - ">>", if it differs from the current line. - """ - self.lastcmd = 'list' - last = None - if arg and arg != '.': - try: - if ',' in arg: - first, last = arg.split(',') - first = int(first.strip()) - last = int(last.strip()) - if last < first: - # assume it's a count - last = first + last - else: - first = int(arg.strip()) - first = max(1, first - 5) - except ValueError: - self.error('Error in argument: %r' % arg) - return - elif self.lineno is None or arg == '.': - first = max(1, self.curframe.f_lineno - 5) - else: - first = self.lineno + 1 - if last is None: - last = first + 10 - filename = self.curframe.f_code.co_filename - breaklist = self.get_file_breaks(filename) - try: - lines = linecache.getlines(filename, self.curframe.f_globals) - self._print_lines(lines[first-1:last], first, breaklist, - self.curframe) - self.lineno = min(last, len(lines)) - if len(lines) < last: - self.message('[EOF]') - except KeyboardInterrupt: - pass - do_l = do_list - - def do_longlist(self, arg): - """longlist | ll - List the whole source code for the current function or frame. - """ - filename = self.curframe.f_code.co_filename - breaklist = self.get_file_breaks(filename) - try: - lines, lineno = getsourcelines(self.curframe) - except OSError as err: - self.error(err) - return - self._print_lines(lines, lineno, breaklist, self.curframe) - do_ll = do_longlist - - def do_source(self, arg): - """source expression - Try to get source code for the given object and display it. - """ - try: - obj = self._getval(arg) - except: - return - try: - lines, lineno = getsourcelines(obj) - except (OSError, TypeError) as err: - self.error(err) - return - self._print_lines(lines, lineno) - - complete_source = _complete_expression - - def _print_lines(self, lines, start, breaks=(), frame=None): - """Print a range of lines.""" - if frame: - current_lineno = frame.f_lineno - exc_lineno = self.tb_lineno.get(frame, -1) - else: - current_lineno = exc_lineno = -1 - for lineno, line in enumerate(lines, start): - s = str(lineno).rjust(3) - if len(s) < 4: - s += ' ' - if lineno in breaks: - s += 'B' - else: - s += ' ' - if lineno == current_lineno: - s += '->' - elif lineno == exc_lineno: - s += '>>' - self.message(s + '\t' + line.rstrip()) - - def do_whatis(self, arg): - """whatis arg - Print the type of the argument. - """ - try: - value = self._getval(arg) - except: - # _getval() already printed the error - return - code = None - # Is it a function? - try: - code = value.__code__ - except Exception: - pass - if code: - self.message('Function %s' % code.co_name) - return - # Is it an instance method? - try: - code = value.__func__.__code__ - except Exception: - pass - if code: - self.message('Method %s' % code.co_name) - return - # Is it a class? - if value.__class__ is type: - self.message('Class %s.%s' % (value.__module__, value.__qualname__)) - return - # None of the above... - self.message(type(value)) - - complete_whatis = _complete_expression - - def do_display(self, arg): - """display [expression] - - Display the value of the expression if it changed, each time execution - stops in the current frame. - - Without expression, list all display expressions for the current frame. - """ - if not arg: - self.message('Currently displaying:') - for item in self.displaying.get(self.curframe, {}).items(): - self.message('%s: %r' % item) - else: - val = self._getval_except(arg) - self.displaying.setdefault(self.curframe, {})[arg] = val - self.message('display %s: %r' % (arg, val)) - - complete_display = _complete_expression - - def do_undisplay(self, arg): - """undisplay [expression] - - Do not display the expression any more in the current frame. - - Without expression, clear all display expressions for the current frame. - """ - if arg: - try: - del self.displaying.get(self.curframe, {})[arg] - except KeyError: - self.error('not displaying %s' % arg) - else: - self.displaying.pop(self.curframe, None) - - def complete_undisplay(self, text, line, begidx, endidx): - return [e for e in self.displaying.get(self.curframe, {}) - if e.startswith(text)] - - def do_interact(self, arg): - """interact - - Start an interactive interpreter whose global namespace - contains all the (global and local) names found in the current scope. - """ - ns = {**self.curframe.f_globals, **self.curframe_locals} - code.interact("*interactive*", local=ns) - - def do_alias(self, arg): - """alias [name [command [parameter parameter ...] ]] - Create an alias called 'name' that executes 'command'. The - command must *not* be enclosed in quotes. Replaceable - parameters can be indicated by %1, %2, and so on, while %* is - replaced by all the parameters. If no command is given, the - current alias for name is shown. If no name is given, all - aliases are listed. - - Aliases may be nested and can contain anything that can be - legally typed at the pdb prompt. Note! You *can* override - internal pdb commands with aliases! Those internal commands - are then hidden until the alias is removed. Aliasing is - recursively applied to the first word of the command line; all - other words in the line are left alone. - - As an example, here are two useful aliases (especially when - placed in the .pdbrc file): - - # Print instance variables (usage "pi classInst") - alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) - # Print instance variables in self - alias ps pi self - """ - args = arg.split() - if len(args) == 0: - keys = sorted(self.aliases.keys()) - for alias in keys: - self.message("%s = %s" % (alias, self.aliases[alias])) - return - if args[0] in self.aliases and len(args) == 1: - self.message("%s = %s" % (args[0], self.aliases[args[0]])) - else: - self.aliases[args[0]] = ' '.join(args[1:]) - - def do_unalias(self, arg): - """unalias name - Delete the specified alias. - """ - args = arg.split() - if len(args) == 0: return - if args[0] in self.aliases: - del self.aliases[args[0]] - - def complete_unalias(self, text, line, begidx, endidx): - return [a for a in self.aliases if a.startswith(text)] - - # List of all the commands making the program resume execution. - commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return', - 'do_quit', 'do_jump'] - - # Print a traceback starting at the top stack frame. - # The most recently entered frame is printed last; - # this is different from dbx and gdb, but consistent with - # the Python interpreter's stack trace. - # It is also consistent with the up/down commands (which are - # compatible with dbx and gdb: up moves towards 'main()' - # and down moves towards the most recent stack frame). - - def print_stack_trace(self): - try: - for frame_lineno in self.stack: - self.print_stack_entry(frame_lineno) - except KeyboardInterrupt: - pass - - def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix): - frame, lineno = frame_lineno - if frame is self.curframe: - prefix = '> ' - else: - prefix = ' ' - self.message(prefix + - self.format_stack_entry(frame_lineno, prompt_prefix)) - - # Provide help - - def do_help(self, arg): - """h(elp) - Without argument, print the list of available commands. - With a command name as argument, print help about that command. - "help pdb" shows the full pdb documentation. - "help exec" gives help on the ! command. - """ - if not arg: - return cmd.Cmd.do_help(self, arg) - try: - try: - topic = getattr(self, 'help_' + arg) - return topic() - except AttributeError: - command = getattr(self, 'do_' + arg) - except AttributeError: - self.error('No help for %r' % arg) - else: - if sys.flags.optimize >= 2: - self.error('No help for %r; please do not run Python with -OO ' - 'if you need command help' % arg) - return - self.message(command.__doc__.rstrip()) - - do_h = do_help - - def help_exec(self): - """(!) statement - Execute the (one-line) statement in the context of the current - stack frame. The exclamation point can be omitted unless the - first word of the statement resembles a debugger command. To - assign to a global variable you must always prefix the command - with a 'global' command, e.g.: - (Pdb) global list_options; list_options = ['-l'] - (Pdb) - """ - self.message((self.help_exec.__doc__ or '').strip()) - - def help_pdb(self): - help() - - # other helper functions - - def lookupmodule(self, filename): - """Helper function for break/clear parsing -- may be overridden. - - lookupmodule() translates (possibly incomplete) file or module name - into an absolute file name. - """ - if os.path.isabs(filename) and os.path.exists(filename): - return filename - f = os.path.join(sys.path[0], filename) - if os.path.exists(f) and self.canonic(f) == self.mainpyfile: - return f - root, ext = os.path.splitext(filename) - if ext == '': - filename = filename + '.py' - if os.path.isabs(filename): - return filename - for dirname in sys.path: - while os.path.islink(dirname): - dirname = os.readlink(dirname) - fullname = os.path.join(dirname, filename) - if os.path.exists(fullname): - return fullname - return None - - def _runmodule(self, module_name): - self._wait_for_mainpyfile = True - self._user_requested_quit = False - import runpy - mod_name, mod_spec, code = runpy._get_module_details(module_name) - self.mainpyfile = self.canonic(code.co_filename) - import __main__ - __main__.__dict__.clear() - __main__.__dict__.update({ - "__name__": "__main__", - "__file__": self.mainpyfile, - "__package__": mod_spec.parent, - "__loader__": mod_spec.loader, - "__spec__": mod_spec, - "__builtins__": __builtins__, - }) - self.run(code) - - def _runscript(self, filename): - # The script has to run in __main__ namespace (or imports from - # __main__ will break). - # - # So we clear up the __main__ and set several special variables - # (this gets rid of pdb's globals and cleans old variables on restarts). - import __main__ - __main__.__dict__.clear() - __main__.__dict__.update({"__name__" : "__main__", - "__file__" : filename, - "__builtins__": __builtins__, - }) - - # When bdb sets tracing, a number of call and line events happens - # BEFORE debugger even reaches user's code (and the exact sequence of - # events depends on python version). So we take special measures to - # avoid stopping before we reach the main script (see user_line and - # user_call for details). - self._wait_for_mainpyfile = True - self.mainpyfile = self.canonic(filename) - self._user_requested_quit = False - with io.open_code(filename) as fp: - statement = "exec(compile(%r, %r, 'exec'))" % \ - (fp.read(), self.mainpyfile) - self.run(statement) - -# Collect all command help into docstring, if not run with -OO - -if __doc__ is not None: - # unfortunately we can't guess this order from the class definition - _help_order = [ - 'help', 'where', 'down', 'up', 'break', 'tbreak', 'clear', 'disable', - 'enable', 'ignore', 'condition', 'commands', 'step', 'next', 'until', - 'jump', 'return', 'retval', 'run', 'continue', 'list', 'longlist', - 'args', 'p', 'pp', 'whatis', 'source', 'display', 'undisplay', - 'interact', 'alias', 'unalias', 'debug', 'quit', - ] - - for _command in _help_order: - __doc__ += getattr(Pdb, 'do_' + _command).__doc__.strip() + '\n\n' - __doc__ += Pdb.help_exec.__doc__ - - del _help_order, _command - - -# Simplified interface - -def run(statement, globals=None, locals=None): - Pdb().run(statement, globals, locals) - -def runeval(expression, globals=None, locals=None): - return Pdb().runeval(expression, globals, locals) - -def runctx(statement, globals, locals): - # B/W compatibility - run(statement, globals, locals) - -def runcall(*args, **kwds): - return Pdb().runcall(*args, **kwds) - -def set_trace(*, header=None): - pdb = Pdb() - if header is not None: - pdb.message(header) - pdb.set_trace(sys._getframe().f_back) - -# Post-Mortem interface - -def post_mortem(t=None): - # handling the default - if t is None: - # sys.exc_info() returns (type, value, traceback) if an exception is - # being handled, otherwise it returns None - t = sys.exc_info()[2] - if t is None: - raise ValueError("A valid traceback must be passed if no " - "exception is being handled") - - p = Pdb() - p.reset() - p.interaction(None, t) - -def pm(): - post_mortem(sys.last_traceback) - - -# Main program for testing - -TESTCMD = 'import x; x.main()' - -def test(): - run(TESTCMD) - -# print help -def help(): - import pydoc - pydoc.pager(__doc__) - -_usage = """\ -usage: pdb.py [-c command] ... [-m module | pyfile] [arg] ... - -Debug the Python program given by pyfile. Alternatively, -an executable module or package to debug can be specified using -the -m switch. - -Initial commands are read from .pdbrc files in your home directory -and in the current directory, if they exist. Commands supplied with --c are executed after commands from .pdbrc files. - -To let the script run until an exception occurs, use "-c continue". -To let the script run up to a given line X in the debugged file, use -"-c 'until X'".""" - -def main(): - import getopt - - opts, args = getopt.getopt(sys.argv[1:], 'mhc:', ['help', 'command=']) - - if not args: - print(_usage) - sys.exit(2) - - commands = [] - run_as_module = False - for opt, optarg in opts: - if opt in ['-h', '--help']: - print(_usage) - sys.exit() - elif opt in ['-c', '--command']: - commands.append(optarg) - elif opt in ['-m']: - run_as_module = True - - mainpyfile = args[0] # Get script filename - if not run_as_module and not os.path.exists(mainpyfile): - print('Error:', mainpyfile, 'does not exist') - sys.exit(1) - - sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list - - # Replace pdb's dir with script's dir in front of module search path. - if not run_as_module: - sys.path[0] = os.path.dirname(mainpyfile) - - # Note on saving/restoring sys.argv: it's a good idea when sys.argv was - # modified by the script being debugged. It's a bad idea when it was - # changed by the user from the command line. There is a "restart" command - # which allows explicit specification of command line arguments. - pdb = Pdb() - pdb.rcLines.extend(commands) - while True: - try: - if run_as_module: - pdb._runmodule(mainpyfile) - else: - pdb._runscript(mainpyfile) - if pdb._user_requested_quit: - break - print("The program finished and will be restarted") - except Restart: - print("Restarting", mainpyfile, "with arguments:") - print("\t" + " ".join(args)) - except SystemExit: - # In most cases SystemExit does not warrant a post-mortem session. - print("The program exited via sys.exit(). Exit status:", end=' ') - print(sys.exc_info()[1]) - except SyntaxError: - traceback.print_exc() - sys.exit(1) - except: - traceback.print_exc() - print("Uncaught exception. Entering post mortem debugging") - print("Running 'cont' or 'step' will restart the program") - t = sys.exc_info()[2] - pdb.interaction(None, t) - print("Post mortem debugger finished. The " + mainpyfile + - " will be restarted") - - -# When invoked as main program, invoke the debugger on a script -if __name__ == '__main__': - import pdb - pdb.main() diff --git a/Lib/test/test_json/__init__.py b/Lib/test/test_json/__init__.py index 08a79415faf..1a1684313b1 100644 --- a/Lib/test/test_json/__init__.py +++ b/Lib/test/test_json/__init__.py @@ -1,6 +1,6 @@ import os import json -import doctest +# import doctest import unittest from test import support @@ -50,8 +50,8 @@ def test_cjson(self): def load_tests(loader, _, pattern): suite = unittest.TestSuite() - for mod in (json, json.encoder, json.decoder): - suite.addTest(doctest.DocTestSuite(mod)) + # for mod in (json, json.encoder, json.decoder): + # suite.addTest(doctest.DocTestSuite(mod)) suite.addTest(TestPyTest('test_pyjson')) suite.addTest(TestCTest('test_cjson')) diff --git a/README.md b/README.md index 5de067e9d97..8eb2f9e31f5 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,7 @@ To test RustPython, do the following: $ git clone https://github.com/RustPython/RustPython $ cd RustPython + $ export RUSTPYTHONPATH=Lib $ cargo run demo.py Hello, RustPython! @@ -151,20 +152,20 @@ methods are often the simplest and easiest way to contribute. You can also simply run `./whats_left.sh` to assist in finding any unimplemented method. -## Using a different standard library +## Using a standard library -As of now the standard library is under construction. You can change a standard +As of now the standard library is under construction. You can use a standard library by setting the RUSTPYTHONPATH environment variable. To do this, follow this method: ```shell -$ export RUSTPYTHONPATH=./Lib # this is same as the default value +$ export RUSTPYTHONPATH=~/GIT/RustPython/Lib $ cargo run -- -c 'import xdrlib' ``` You can play around with other standard libraries for python. For example, the -[ouroboros library](https://github.com/pybee/ouroboros) or CPython Lib. +[ouroboros library](https://github.com/pybee/ouroboros). ## Compiling to WebAssembly diff --git a/bytecode/src/bytecode.rs b/bytecode/src/bytecode.rs index 907d488a867..c09d717a0b0 100644 --- a/bytecode/src/bytecode.rs +++ b/bytecode/src/bytecode.rs @@ -73,10 +73,6 @@ impl CodeFlags { pub const NAME_MAPPING: &'static [(&'static str, CodeFlags)] = &[ ("GENERATOR", CodeFlags::IS_GENERATOR), ("COROUTINE", CodeFlags::IS_COROUTINE), - ( - "ASYNC_GENERATOR", - Self::from_bits_truncate(Self::IS_GENERATOR.bits | Self::IS_COROUTINE.bits), - ), ("VARARGS", CodeFlags::HAS_VARARGS), ("VARKEYWORDS", CodeFlags::HAS_VARKEYWORDS), ]; diff --git a/parser/src/lexer.rs b/parser/src/lexer.rs index eda96ea7416..8667165ea40 100644 --- a/parser/src/lexer.rs +++ b/parser/src/lexer.rs @@ -530,7 +530,7 @@ where loop { match self.next_char() { Some('\\') => { - if self.chr0 == Some(quote_char) && !is_raw { + if self.chr0 == Some(quote_char) { string_content.push(quote_char); self.next_char(); } else if is_raw { @@ -1625,7 +1625,7 @@ mod tests { is_fstring: false, }, Tok::String { - value: String::from("raw\\'"), + value: String::from("raw\'"), is_fstring: false, }, Tok::String { diff --git a/src/shell.rs b/src/shell.rs index 4a6c095e613..e3922ee8828 100644 --- a/src/shell.rs +++ b/src/shell.rs @@ -6,7 +6,7 @@ use rustpython_vm::readline::{Readline, ReadlineResult}; use rustpython_vm::{ exceptions::{print_exception, PyBaseExceptionRef}, obj::objtype, - pyobject::PyResult, + pyobject::{ItemProtocol, PyResult}, scope::Scope, VirtualMachine, }; @@ -19,10 +19,19 @@ enum ShellExecResult { fn shell_exec(vm: &VirtualMachine, source: &str, scope: Scope) -> ShellExecResult { match vm.compile(source, compile::Mode::Single, "".to_owned()) { - Ok(code) => match vm.run_code_obj(code, scope) { - Ok(_val) => ShellExecResult::Ok, - Err(err) => ShellExecResult::PyErr(err), - }, + Ok(code) => { + match vm.run_code_obj(code, scope.clone()) { + Ok(value) => { + // Save non-None values as "_" + if !vm.is_none(&value) { + let key = "_"; + scope.globals.set_item(key, value, vm).unwrap(); + } + ShellExecResult::Ok + } + Err(err) => ShellExecResult::PyErr(err), + } + } Err(CompileError { error: CompileErrorType::Parse(ParseErrorType::EOF), .. diff --git a/vm/src/builtins.rs b/vm/src/builtins.rs index 1f5f08bd56a..d4899e0a5e0 100644 --- a/vm/src/builtins.rs +++ b/vm/src/builtins.rs @@ -618,7 +618,7 @@ pub fn builtin_exit(exit_code_arg: OptionalArg, vm: &VirtualMachine Err(vm.new_exception(vm.ctx.exceptions.system_exit.clone(), vec![code])) } -#[derive(Debug, Default, FromArgs)] +#[derive(Debug, FromArgs)] pub struct PrintOptions { #[pyarg(keyword_only, default = "None")] sep: Option, diff --git a/vm/src/exceptions.rs b/vm/src/exceptions.rs index 05d1a370fa6..3b0eeee7695 100644 --- a/vm/src/exceptions.rs +++ b/vm/src/exceptions.rs @@ -6,32 +6,27 @@ use crate::obj::objtuple::{PyTuple, PyTupleRef}; use crate::obj::objtype::{self, PyClass, PyClassRef}; use crate::py_serde; use crate::pyobject::{ - PyClassImpl, PyContext, PyIterable, PyObjectRef, PyRef, PyResult, PyValue, ThreadSafe, - TryFromObject, TypeProtocol, + PyClassImpl, PyContext, PyIterable, PyObjectRef, PyRef, PyResult, PyValue, TryFromObject, + TypeProtocol, }; use crate::slots::PyTpFlags; use crate::types::create_type; use crate::VirtualMachine; - use itertools::Itertools; +use std::cell::{Cell, RefCell}; use std::fmt; use std::fs::File; use std::io::{self, BufRead, BufReader, Write}; -use std::sync::RwLock; - -use crossbeam_utils::atomic::AtomicCell; #[pyclass] pub struct PyBaseException { - traceback: RwLock>, - cause: RwLock>, - context: RwLock>, - suppress_context: AtomicCell, - args: RwLock, + traceback: RefCell>, + cause: RefCell>, + context: RefCell>, + suppress_context: Cell, + args: RefCell, } -impl ThreadSafe for PyBaseException {} - impl fmt::Debug for PyBaseException { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // TODO: implement more detailed, non-recursive Debug formatter @@ -53,11 +48,11 @@ impl PyValue for PyBaseException { impl PyBaseException { pub(crate) fn new(args: Vec, vm: &VirtualMachine) -> PyBaseException { PyBaseException { - traceback: RwLock::new(None), - cause: RwLock::new(None), - context: RwLock::new(None), - suppress_context: AtomicCell::new(false), - args: RwLock::new(PyTuple::from(args).into_ref(vm)), + traceback: RefCell::new(None), + cause: RefCell::new(None), + context: RefCell::new(None), + suppress_context: Cell::new(false), + args: RefCell::new(PyTuple::from(args).into_ref(vm)), } } @@ -68,65 +63,65 @@ impl PyBaseException { #[pymethod(name = "__init__")] fn init(&self, args: PyFuncArgs, vm: &VirtualMachine) -> PyResult<()> { - *self.args.write().unwrap() = PyTuple::from(args.args).into_ref(vm); + self.args.replace(PyTuple::from(args.args).into_ref(vm)); Ok(()) } #[pyproperty] pub fn args(&self) -> PyTupleRef { - self.args.read().unwrap().clone() + self.args.borrow().clone() } #[pyproperty(setter)] fn set_args(&self, args: PyIterable, vm: &VirtualMachine) -> PyResult<()> { let args = args.iter(vm)?.collect::>>()?; - *self.args.write().unwrap() = PyTuple::from(args).into_ref(vm); + self.args.replace(PyTuple::from(args).into_ref(vm)); Ok(()) } #[pyproperty(name = "__traceback__")] pub fn traceback(&self) -> Option { - self.traceback.read().unwrap().clone() + self.traceback.borrow().clone() } #[pyproperty(name = "__traceback__", setter)] pub fn set_traceback(&self, traceback: Option) { - *self.traceback.write().unwrap() = traceback; + self.traceback.replace(traceback); } #[pyproperty(name = "__cause__")] pub fn cause(&self) -> Option { - self.cause.read().unwrap().clone() + self.cause.borrow().clone() } #[pyproperty(name = "__cause__", setter)] pub fn set_cause(&self, cause: Option) { - *self.cause.write().unwrap() = cause; + self.cause.replace(cause); } #[pyproperty(name = "__context__")] pub fn context(&self) -> Option { - self.context.read().unwrap().clone() + self.context.borrow().clone() } #[pyproperty(name = "__context__", setter)] pub fn set_context(&self, context: Option) { - *self.context.write().unwrap() = context; + self.context.replace(context); } #[pyproperty(name = "__suppress_context__")] fn get_suppress_context(&self) -> bool { - self.suppress_context.load() + self.suppress_context.get() } #[pyproperty(name = "__suppress_context__", setter)] fn set_suppress_context(&self, suppress_context: bool) { - self.suppress_context.store(suppress_context); + self.suppress_context.set(suppress_context); } #[pymethod] fn with_traceback(zelf: PyRef, tb: Option) -> PyResult { - *zelf.traceback.write().unwrap() = tb; + zelf.traceback.replace(tb); Ok(zelf.as_object().clone()) } @@ -218,7 +213,7 @@ pub fn write_exception_inner( vm: &VirtualMachine, exc: &PyBaseExceptionRef, ) -> io::Result<()> { - if let Some(tb) = exc.traceback.read().unwrap().clone() { + if let Some(tb) = exc.traceback.borrow().clone() { writeln!(output, "Traceback (most recent call last):")?; for tb in tb.iter() { write_traceback_entry(output, &tb)?; @@ -610,8 +605,7 @@ fn none_getter(_obj: PyObjectRef, vm: &VirtualMachine) -> PyObjectRef { fn make_arg_getter(idx: usize) -> impl Fn(PyBaseExceptionRef, &VirtualMachine) -> PyObjectRef { move |exc, vm| { exc.args - .read() - .unwrap() + .borrow() .as_slice() .get(idx) .cloned() @@ -722,7 +716,7 @@ impl serde::Serialize for SerializeException<'_> { "context", &self.exc.context().as_ref().map(|e| Self::new(self.vm, e)), )?; - struc.serialize_field("suppress_context", &self.exc.suppress_context.load())?; + struc.serialize_field("suppress_context", &self.exc.suppress_context.get())?; let args = { struct Args<'vm>(&'vm VirtualMachine, PyTupleRef); diff --git a/vm/src/frame.rs b/vm/src/frame.rs index e522a8b4f3d..ae171410ca2 100644 --- a/vm/src/frame.rs +++ b/vm/src/frame.rs @@ -647,10 +647,13 @@ impl ExecutingFrame<'_> { bytecode::Instruction::Continue => self.unwind_blocks(vm, UnwindReason::Continue), bytecode::Instruction::PrintExpr => { let expr = self.pop_value(); - - let displayhook = vm.get_attribute(vm.sys_module.clone(), "displayhook")?; - vm.invoke(&displayhook, vec![expr])?; - + if !expr.is(&vm.get_none()) { + let repr = vm.to_repr(&expr)?; + // TODO: implement sys.displayhook + if let Ok(ref print) = vm.get_attribute(vm.builtins.clone(), "print") { + vm.invoke(print, vec![repr.into_object()])?; + } + } Ok(None) } bytecode::Instruction::LoadBuildClass => { diff --git a/vm/src/obj/objasyncgenerator.rs b/vm/src/obj/objasyncgenerator.rs index c26e2a7c0ed..e209040d02a 100644 --- a/vm/src/obj/objasyncgenerator.rs +++ b/vm/src/obj/objasyncgenerator.rs @@ -4,19 +4,18 @@ use super::objtype::{self, PyClassRef}; use crate::exceptions::PyBaseExceptionRef; use crate::frame::FrameRef; use crate::function::OptionalArg; -use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue, ThreadSafe}; +use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue}; use crate::vm::VirtualMachine; -use crossbeam_utils::atomic::AtomicCell; +use std::cell::Cell; #[pyclass(name = "async_generator")] #[derive(Debug)] pub struct PyAsyncGen { inner: Coro, - running_async: AtomicCell, + running_async: Cell, } pub type PyAsyncGenRef = PyRef; -impl ThreadSafe for PyAsyncGen {} impl PyValue for PyAsyncGen { fn class(vm: &VirtualMachine) -> PyClassRef { @@ -33,7 +32,7 @@ impl PyAsyncGen { pub fn new(frame: FrameRef, vm: &VirtualMachine) -> PyAsyncGenRef { PyAsyncGen { inner: Coro::new(frame, Variant::AsyncGen), - running_async: AtomicCell::new(false), + running_async: Cell::new(false), } .into_ref(vm) } @@ -58,7 +57,7 @@ impl PyAsyncGen { fn asend(zelf: PyRef, value: PyObjectRef, _vm: &VirtualMachine) -> PyAsyncGenASend { PyAsyncGenASend { ag: zelf, - state: AtomicCell::new(AwaitableState::Init), + state: Cell::new(AwaitableState::Init), value, } } @@ -74,7 +73,7 @@ impl PyAsyncGen { PyAsyncGenAThrow { ag: zelf, aclose: false, - state: AtomicCell::new(AwaitableState::Init), + state: Cell::new(AwaitableState::Init), value: ( exc_type, exc_val.unwrap_or_else(|| vm.get_none()), @@ -88,7 +87,7 @@ impl PyAsyncGen { PyAsyncGenAThrow { ag: zelf, aclose: true, - state: AtomicCell::new(AwaitableState::Init), + state: Cell::new(AwaitableState::Init), value: ( vm.ctx.exceptions.generator_exit.clone().into_object(), vm.get_none(), @@ -130,15 +129,15 @@ impl PyAsyncGenWrappedValue { if objtype::isinstance(&e, &vm.ctx.exceptions.stop_async_iteration) || objtype::isinstance(&e, &vm.ctx.exceptions.generator_exit) { - ag.inner.closed.store(true); + ag.inner.closed.set(true); } - ag.running_async.store(false); + ag.running_async.set(false); } let val = val?; match_class!(match val { val @ Self => { - ag.running_async.store(false); + ag.running_async.set(false); Err(vm.new_exception( vm.ctx.exceptions.stop_iteration.clone(), vec![val.0.clone()], @@ -160,12 +159,10 @@ enum AwaitableState { #[derive(Debug)] struct PyAsyncGenASend { ag: PyAsyncGenRef, - state: AtomicCell, + state: Cell, value: PyObjectRef, } -impl ThreadSafe for PyAsyncGenASend {} - impl PyValue for PyAsyncGenASend { fn class(vm: &VirtualMachine) -> PyClassRef { vm.ctx.types.async_generator_asend.clone() @@ -190,7 +187,7 @@ impl PyAsyncGenASend { #[pymethod] fn send(&self, val: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let val = match self.state.load() { + let val = match self.state.get() { AwaitableState::Closed => { return Err(vm.new_runtime_error( "cannot reuse already awaited __anext__()/asend()".to_owned(), @@ -198,13 +195,13 @@ impl PyAsyncGenASend { } AwaitableState::Iter => val, // already running, all good AwaitableState::Init => { - if self.ag.running_async.load() { + if self.ag.running_async.get() { return Err(vm.new_runtime_error( "anext(): asynchronous generator is already running".to_owned(), )); } - self.ag.running_async.store(true); - self.state.store(AwaitableState::Iter); + self.ag.running_async.set(true); + self.state.set(AwaitableState::Iter); if vm.is_none(&val) { self.value.clone() } else { @@ -228,7 +225,7 @@ impl PyAsyncGenASend { exc_tb: OptionalArg, vm: &VirtualMachine, ) -> PyResult { - if let AwaitableState::Closed = self.state.load() { + if let AwaitableState::Closed = self.state.get() { return Err( vm.new_runtime_error("cannot reuse already awaited __anext__()/asend()".to_owned()) ); @@ -249,7 +246,7 @@ impl PyAsyncGenASend { #[pymethod] fn close(&self) { - self.state.store(AwaitableState::Closed); + self.state.set(AwaitableState::Closed); } } @@ -258,12 +255,10 @@ impl PyAsyncGenASend { struct PyAsyncGenAThrow { ag: PyAsyncGenRef, aclose: bool, - state: AtomicCell, + state: Cell, value: (PyObjectRef, PyObjectRef, PyObjectRef), } -impl ThreadSafe for PyAsyncGenAThrow {} - impl PyValue for PyAsyncGenAThrow { fn class(vm: &VirtualMachine) -> PyClassRef { vm.ctx.types.async_generator_athrow.clone() @@ -288,14 +283,14 @@ impl PyAsyncGenAThrow { #[pymethod] fn send(&self, val: PyObjectRef, vm: &VirtualMachine) -> PyResult { - match self.state.load() { + match self.state.get() { AwaitableState::Closed => { Err(vm .new_runtime_error("cannot reuse already awaited aclose()/athrow()".to_owned())) } AwaitableState::Init => { - if self.ag.running_async.load() { - self.state.store(AwaitableState::Closed); + if self.ag.running_async.get() { + self.state.set(AwaitableState::Closed); let msg = if self.aclose { "aclose(): asynchronous generator is already running" } else { @@ -304,7 +299,7 @@ impl PyAsyncGenAThrow { return Err(vm.new_runtime_error(msg.to_owned())); } if self.ag.inner.closed() { - self.state.store(AwaitableState::Closed); + self.state.set(AwaitableState::Closed); return Err(vm.new_exception_empty(vm.ctx.exceptions.stop_iteration.clone())); } if !vm.is_none(&val) { @@ -312,8 +307,8 @@ impl PyAsyncGenAThrow { "can't send non-None value to a just-started async generator".to_owned(), )); } - self.state.store(AwaitableState::Iter); - self.ag.running_async.store(true); + self.state.set(AwaitableState::Iter); + self.ag.running_async.set(true); let (ty, val, tb) = self.value.clone(); let ret = self.ag.inner.throw(ty, val, tb, vm); @@ -373,7 +368,7 @@ impl PyAsyncGenAThrow { #[pymethod] fn close(&self) { - self.state.store(AwaitableState::Closed); + self.state.set(AwaitableState::Closed); } fn ignored_close(&self, res: &PyResult) -> bool { @@ -381,13 +376,13 @@ impl PyAsyncGenAThrow { .map_or(false, |v| v.payload_is::()) } fn yield_close(&self, vm: &VirtualMachine) -> PyBaseExceptionRef { - self.ag.running_async.store(false); - self.state.store(AwaitableState::Closed); + self.ag.running_async.set(false); + self.state.set(AwaitableState::Closed); vm.new_runtime_error("async generator ignored GeneratorExit".to_owned()) } fn check_error(&self, exc: PyBaseExceptionRef, vm: &VirtualMachine) -> PyBaseExceptionRef { - self.ag.running_async.store(false); - self.state.store(AwaitableState::Closed); + self.ag.running_async.set(false); + self.state.set(AwaitableState::Closed); if self.aclose && (objtype::isinstance(&exc, &vm.ctx.exceptions.stop_async_iteration) || objtype::isinstance(&exc, &vm.ctx.exceptions.generator_exit)) diff --git a/vm/src/obj/objbool.rs b/vm/src/obj/objbool.rs index e67a39dc2ba..56d21bb5c5e 100644 --- a/vm/src/obj/objbool.rs +++ b/vm/src/obj/objbool.rs @@ -187,7 +187,7 @@ pub fn get_py_int(obj: &PyObjectRef) -> &PyInt { &obj.payload::().unwrap() } -#[derive(Debug, Default, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq)] pub struct IntoPyBool { value: bool, } diff --git a/vm/src/obj/objcoroinner.rs b/vm/src/obj/objcoroinner.rs index 4cbb0940b45..6b504d1892a 100644 --- a/vm/src/obj/objcoroinner.rs +++ b/vm/src/obj/objcoroinner.rs @@ -1,11 +1,10 @@ use super::objtype::{self, PyClassRef}; use crate::exceptions::{self, PyBaseExceptionRef}; use crate::frame::{ExecutionResult, FrameRef}; -use crate::pyobject::{PyObjectRef, PyResult, ThreadSafe}; +use crate::pyobject::{PyObjectRef, PyResult}; use crate::vm::VirtualMachine; -use crossbeam_utils::atomic::AtomicCell; -use std::sync::RwLock; +use std::cell::{Cell, RefCell}; #[derive(Debug, PartialEq, Clone, Copy)] pub enum Variant { @@ -35,30 +34,28 @@ impl Variant { #[derive(Debug)] pub struct Coro { frame: FrameRef, - pub closed: AtomicCell, - running: AtomicCell, - exceptions: RwLock>, - started: AtomicCell, + pub closed: Cell, + running: Cell, + exceptions: RefCell>, + started: Cell, variant: Variant, } -impl ThreadSafe for Coro {} - impl Coro { pub fn new(frame: FrameRef, variant: Variant) -> Self { Coro { frame, - closed: AtomicCell::new(false), - running: AtomicCell::new(false), - exceptions: RwLock::new(vec![]), - started: AtomicCell::new(false), + closed: Cell::new(false), + running: Cell::new(false), + exceptions: RefCell::new(vec![]), + started: Cell::new(false), variant, } } fn maybe_close(&self, res: &PyResult) { match res { - Ok(ExecutionResult::Return(_)) | Err(_) => self.closed.store(true), + Ok(ExecutionResult::Return(_)) | Err(_) => self.closed.set(true), Ok(ExecutionResult::Yield(_)) => {} } } @@ -67,29 +64,27 @@ impl Coro { where F: FnOnce(FrameRef) -> PyResult, { - self.running.store(true); + self.running.set(true); let curr_exception_stack_len = vm.exceptions.borrow().len(); vm.exceptions .borrow_mut() - .append(&mut self.exceptions.write().unwrap()); + .append(&mut self.exceptions.borrow_mut()); let result = vm.with_frame(self.frame.clone(), func); - std::mem::swap( - &mut *self.exceptions.write().unwrap(), - &mut vm - .exceptions + self.exceptions.replace( + vm.exceptions .borrow_mut() .split_off(curr_exception_stack_len), ); - self.running.store(false); - self.started.store(true); + self.running.set(false); + self.started.set(true); result } pub fn send(&self, value: PyObjectRef, vm: &VirtualMachine) -> PyResult { - if self.closed.load() { + if self.closed.get() { return Err(vm.new_exception_empty(self.variant.stop_iteration(vm))); } - if !self.started.load() && !vm.is_none(&value) { + if !self.started.get() && !vm.is_none(&value) { return Err(vm.new_type_error(format!( "can't send non-None value to a just-started {}", self.variant.name() @@ -125,7 +120,7 @@ impl Coro { exc_tb: PyObjectRef, vm: &VirtualMachine, ) -> PyResult { - if self.closed.load() { + if self.closed.get() { return Err(exceptions::normalize(exc_type, exc_val, exc_tb, vm)?); } let result = self.run_with_context(vm, |f| f.gen_throw(vm, exc_type, exc_val, exc_tb)); @@ -134,7 +129,7 @@ impl Coro { } pub fn close(&self, vm: &VirtualMachine) -> PyResult<()> { - if self.closed.load() { + if self.closed.get() { return Ok(()); } let result = self.run_with_context(vm, |f| { @@ -145,7 +140,7 @@ impl Coro { vm.get_none(), ) }); - self.closed.store(true); + self.closed.set(true); match result { Ok(ExecutionResult::Yield(_)) => { Err(vm.new_runtime_error(format!("{} ignored GeneratorExit", self.variant.name()))) @@ -156,13 +151,13 @@ impl Coro { } pub fn started(&self) -> bool { - self.started.load() + self.started.get() } pub fn running(&self) -> bool { - self.running.load() + self.running.get() } pub fn closed(&self) -> bool { - self.closed.load() + self.closed.get() } pub fn frame(&self) -> FrameRef { self.frame.clone() diff --git a/vm/src/obj/objcoroutine.rs b/vm/src/obj/objcoroutine.rs index 48ccfe4dc15..d1faacc2be0 100644 --- a/vm/src/obj/objcoroutine.rs +++ b/vm/src/obj/objcoroutine.rs @@ -4,7 +4,7 @@ use super::objstr::PyStringRef; use super::objtype::PyClassRef; use crate::frame::FrameRef; use crate::function::OptionalArg; -use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue, ThreadSafe}; +use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue}; use crate::vm::VirtualMachine; pub type PyCoroutineRef = PyRef; @@ -15,8 +15,6 @@ pub struct PyCoroutine { inner: Coro, } -impl ThreadSafe for PyCoroutine {} - impl PyValue for PyCoroutine { fn class(vm: &VirtualMachine) -> PyClassRef { vm.ctx.types.coroutine_type.clone() @@ -103,8 +101,6 @@ pub struct PyCoroutineWrapper { coro: PyCoroutineRef, } -impl ThreadSafe for PyCoroutineWrapper {} - impl PyValue for PyCoroutineWrapper { fn class(vm: &VirtualMachine) -> PyClassRef { vm.ctx.types.coroutine_wrapper_type.clone() diff --git a/vm/src/obj/objfunction.rs b/vm/src/obj/objfunction.rs index c6d24fe6f17..7386772ea0b 100644 --- a/vm/src/obj/objfunction.rs +++ b/vm/src/obj/objfunction.rs @@ -336,11 +336,6 @@ impl PyBoundMethod { fn func(&self) -> PyObjectRef { self.function.clone() } - - #[pyproperty(magic)] - fn module(&self, vm: &VirtualMachine) -> Option { - vm.get_attribute(self.function.clone(), "__module__").ok() - } } impl PyValue for PyBoundMethod { diff --git a/vm/src/obj/objgenerator.rs b/vm/src/obj/objgenerator.rs index b2044621f92..e91e24406c5 100644 --- a/vm/src/obj/objgenerator.rs +++ b/vm/src/obj/objgenerator.rs @@ -7,7 +7,7 @@ use super::objcoroinner::{Coro, Variant}; use super::objtype::PyClassRef; use crate::frame::FrameRef; use crate::function::OptionalArg; -use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue, ThreadSafe}; +use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue}; use crate::vm::VirtualMachine; pub type PyGeneratorRef = PyRef; @@ -18,8 +18,6 @@ pub struct PyGenerator { inner: Coro, } -impl ThreadSafe for PyGenerator {} - impl PyValue for PyGenerator { fn class(vm: &VirtualMachine) -> PyClassRef { vm.ctx.generator_type() diff --git a/vm/src/stdlib/os.rs b/vm/src/stdlib/os.rs index bd2a4e65367..e70061bc262 100644 --- a/vm/src/stdlib/os.rs +++ b/vm/src/stdlib/os.rs @@ -1194,11 +1194,6 @@ fn os_getpgid(pid: u32, vm: &VirtualMachine) -> PyResult { } } -#[cfg(unix)] -fn os_getpgrp(vm: &VirtualMachine) -> PyResult { - Ok(vm.new_int(unistd::getpgrp().as_raw())) -} - #[cfg(all(unix, not(target_os = "redox")))] fn os_getsid(pid: u32, vm: &VirtualMachine) -> PyResult { match unistd::getsid(Some(Pid::from_raw(pid as i32))) { @@ -1449,14 +1444,6 @@ fn os_utime( unimplemented!("utime") } -#[cfg(unix)] -fn os_sync(_vm: &VirtualMachine) -> PyResult<()> { - unsafe { - libc::sync(); - } - Ok(()) -} - pub fn make_module(vm: &VirtualMachine) -> PyObjectRef { let ctx = &vm.ctx; @@ -1633,14 +1620,12 @@ fn extend_module_platform_specific(vm: &VirtualMachine, module: &PyObjectRef) { "getegid" => ctx.new_function(os_getegid), "getpgid" => ctx.new_function(os_getpgid), "getuid" => ctx.new_function(os_getuid), - "getpgrp" => ctx.new_function(os_getpgrp), "geteuid" => ctx.new_function(os_geteuid), "pipe" => ctx.new_function(os_pipe), //TODO: windows "set_blocking" => ctx.new_function(os_set_blocking), "setgid" => ctx.new_function(os_setgid), "setpgid" => ctx.new_function(os_setpgid), "setuid" => ctx.new_function(os_setuid), - "sync" => ctx.new_function(os_sync), "system" => ctx.new_function(os_system), "ttyname" => ctx.new_function(os_ttyname), "uname" => ctx.new_function(os_uname), diff --git a/vm/src/sysmodule.rs b/vm/src/sysmodule.rs index 73d360d8065..52ab87699f3 100644 --- a/vm/src/sysmodule.rs +++ b/vm/src/sysmodule.rs @@ -1,9 +1,8 @@ use std::sync::Arc; use std::{env, mem}; -use crate::builtins; use crate::frame::FrameRef; -use crate::function::{Args, OptionalArg, PyFuncArgs}; +use crate::function::OptionalArg; use crate::obj::objstr::PyStringRef; use crate::pyhash::PyHashInfo; use crate::pyobject::{ @@ -203,24 +202,6 @@ fn sys_exit(code: OptionalArg, vm: &VirtualMachine) -> PyResult { Err(vm.new_exception(vm.ctx.exceptions.system_exit.clone(), vec![code])) } -fn sys_audit(_args: PyFuncArgs) { - // TODO: sys.audit implementation -} - -fn sys_displayhook(obj: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> { - // Save non-None values as "_" - if vm.is_none(&obj) { - return Ok(()); - } - // set to none to avoid recursion while printing - vm.set_attr(&vm.builtins, "_", vm.get_none())?; - // TODO: catch encoding errors - let repr = vm.to_repr(&obj)?.into_object(); - builtins::builtin_print(Args::new(vec![repr]), Default::default(), vm)?; - vm.set_attr(&vm.builtins, "_", obj)?; - Ok(()) -} - pub fn make_module(vm: &VirtualMachine, module: PyObjectRef, builtins: PyObjectRef) { let ctx = &vm.ctx; @@ -413,9 +394,6 @@ settrace() -- set the global debug tracing function "base_exec_prefix" => ctx.new_str(base_exec_prefix.to_owned()), "exit" => ctx.new_function(sys_exit), "abiflags" => ctx.new_str("".to_owned()), - "audit" => ctx.new_function(sys_audit), - "displayhook" => ctx.new_function(sys_displayhook), - "__displayhook__" => ctx.new_function(sys_displayhook), }); modules.set_item("sys", module.clone(), vm).unwrap(); diff --git a/wasm/lib/Cargo.toml b/wasm/lib/Cargo.toml index 677fc5bbce4..69eaa1b1e92 100644 --- a/wasm/lib/Cargo.toml +++ b/wasm/lib/Cargo.toml @@ -25,7 +25,6 @@ serde-wasm-bindgen = "0.1" serde = "1.0" js-sys = "0.3" futures = "0.1" -generational-arena = "0.2" [dependencies.web-sys] version = "0.3" @@ -41,4 +40,4 @@ features = [ ] [package.metadata.wasm-pack.profile.release] -wasm-opt = false#["-O1"] +wasm-opt = ["-O1"] diff --git a/wasm/lib/src/convert.rs b/wasm/lib/src/convert.rs index a352b3fa707..353f2b199cd 100644 --- a/wasm/lib/src/convert.rs +++ b/wasm/lib/src/convert.rs @@ -1,4 +1,3 @@ -use generational_arena::Arena; use std::cell::RefCell; use js_sys::{Array, ArrayBuffer, Object, Promise, Reflect, SyntaxError, Uint8Array}; @@ -17,29 +16,7 @@ use crate::browser_module; use crate::vm_class::{stored_vm_from_wasm, WASMVirtualMachine}; // Currently WASM do not support multithreading. We should change this once it is enabled. -thread_local!(static JS_HANDLES: RefCell> = RefCell::new(Arena::new())); - -pub struct JsHandle(generational_arena::Index); -impl JsHandle { - pub fn new(js: JsValue) -> Self { - let idx = JS_HANDLES.with(|arena| arena.borrow_mut().insert(js)); - JsHandle(idx) - } - pub fn get(&self) -> JsValue { - JS_HANDLES.with(|arena| { - arena - .borrow() - .get(self.0) - .expect("index was removed") - .clone() - }) - } -} -impl Drop for JsHandle { - fn drop(&mut self) { - JS_HANDLES.with(|arena| arena.borrow_mut().remove(self.0)); - } -} +thread_local!(static JS_FUNCS: RefCell> = RefCell::new(vec![])); #[wasm_bindgen(inline_js = r" export class PyError extends Error { @@ -218,22 +195,32 @@ pub fn js_to_py(vm: &VirtualMachine, js_val: JsValue) -> PyObjectRef { dict.into_object() } } else if js_val.is_function() { - let js_handle = JsHandle::new(js_val); + let func = js_sys::Function::from(js_val); + let idx = JS_FUNCS.with(|funcs| { + let mut funcs = funcs.borrow_mut(); + funcs.push(func); + funcs.len() - 1 + }); vm.ctx .new_method(move |vm: &VirtualMachine, args: PyFuncArgs| -> PyResult { - let this = Object::new(); - for (k, v) in args.kwargs { - Reflect::set(&this, &k.into(), &py_to_js(vm, v)) - .expect("property to be settable"); - } - let js_args = Array::new(); - for v in args.args { - js_args.push(&py_to_js(vm, v)); - } - let func = js_sys::Function::from(js_handle.get()); - func.apply(&this, &js_args) - .map(|val| js_to_py(vm, val)) - .map_err(|err| js_err_to_py_err(vm, &err)) + JS_FUNCS.with(|funcs| { + let this = Object::new(); + for (k, v) in args.kwargs { + Reflect::set(&this, &k.into(), &py_to_js(vm, v)) + .expect("property to be settable"); + } + let js_args = Array::new(); + for v in args.args { + js_args.push(&py_to_js(vm, v)); + } + funcs + .borrow() + .get(idx) + .unwrap() + .apply(&this, &js_args) + .map(|val| js_to_py(vm, val)) + .map_err(|err| js_err_to_py_err(vm, &err)) + }) }) } else if let Some(err) = js_val.dyn_ref::() { js_err_to_py_err(vm, err).into_object() diff --git a/wasm/lib/src/vm_class.rs b/wasm/lib/src/vm_class.rs index 199c0b5a5d6..8130e0c636c 100644 --- a/wasm/lib/src/vm_class.rs +++ b/wasm/lib/src/vm_class.rs @@ -6,12 +6,15 @@ use js_sys::{Object, TypeError}; use wasm_bindgen::prelude::*; use rustpython_compiler::compile; -use rustpython_vm::pyobject::{ItemProtocol, PyObject, PyObjectPayload, PyObjectRef, PyValue}; +use rustpython_vm::function::PyFuncArgs; +use rustpython_vm::pyobject::{ + ItemProtocol, PyObject, PyObjectPayload, PyObjectRef, PyResult, PyValue, +}; use rustpython_vm::scope::{NameProtocol, Scope}; use rustpython_vm::{InitParameter, PySettings, VirtualMachine}; use crate::browser_module::setup_browser_module; -use crate::convert::{self, JsHandle, PyResultExt}; +use crate::convert::{self, PyResultExt}; use crate::js_module; use crate::wasm_builtins; use rustpython_compiler::mode::Mode; @@ -64,6 +67,7 @@ impl StoredVirtualMachine { // https://rustwasm.github.io/2018/10/24/multithreading-rust-and-wasm.html#atomic-instructions thread_local! { static STORED_VMS: RefCell>> = RefCell::default(); + static JS_PRINT_FUNC: RefCell> = RefCell::new(None); } pub fn get_vm_id(vm: &VirtualMachine) -> &str { @@ -219,28 +223,39 @@ impl WASMVirtualMachine { fn error() -> JsValue { TypeError::new("Unknown stdout option, please pass a function or 'console'").into() } - use wasm_builtins::make_stdout_object; - let stdout: PyObjectRef = if let Some(s) = stdout.as_string() { + let print_fn: PyObjectRef = if let Some(s) = stdout.as_string() { match s.as_str() { - "console" => make_stdout_object(vm, wasm_builtins::sys_stdout_write_console), + "console" => vm.ctx.new_method(wasm_builtins::builtin_print_console), _ => return Err(error()), } } else if stdout.is_function() { - let func_handle = JsHandle::new(stdout); - make_stdout_object(vm, move |data, vm| { - let func = js_sys::Function::from(func_handle.get()); - func.call1(&JsValue::UNDEFINED, &data.into()) - .map_err(|err| convert::js_py_typeerror(vm, err))?; - Ok(()) - }) + let func = js_sys::Function::from(stdout); + JS_PRINT_FUNC.with(|thread_func| thread_func.replace(Some(func.clone()))); + vm.ctx + .new_method(move |vm: &VirtualMachine, args: PyFuncArgs| -> PyResult { + JS_PRINT_FUNC.with(|func| { + func.borrow() + .as_ref() + .unwrap() + .call1( + &JsValue::UNDEFINED, + &wasm_builtins::format_print_args(vm, args)?.into(), + ) + .map_err(|err| convert::js_py_typeerror(vm, err))?; + Ok(vm.get_none()) + }) + }) } else if stdout.is_null() { - make_stdout_object(vm, |_, _| Ok(())) + fn noop(vm: &VirtualMachine, _args: PyFuncArgs) -> PyResult { + Ok(vm.get_none()) + } + vm.ctx.new_method(noop) } else if stdout.is_undefined() { - make_stdout_object(vm, wasm_builtins::sys_stdout_write_console) + vm.ctx.new_method(wasm_builtins::builtin_print_console) } else { return Err(error()); }; - vm.set_attr(&vm.sys_module, "stdout", stdout).unwrap(); + vm.set_attr(&vm.builtins, "print", print_fn).unwrap(); Ok(()) })? } diff --git a/wasm/lib/src/wasm_builtins.rs b/wasm/lib/src/wasm_builtins.rs index 5b0b9fd6ebb..7fc6f65346e 100644 --- a/wasm/lib/src/wasm_builtins.rs +++ b/wasm/lib/src/wasm_builtins.rs @@ -4,37 +4,75 @@ //! desktop. //! Implements functions listed here: https://docs.python.org/3/library/builtins.html. +use js_sys::{self, Array}; use web_sys::{self, console}; -use rustpython_vm::obj::objstr::PyStringRef; -use rustpython_vm::pyobject::{PyObjectRef, PyResult}; +use rustpython_vm::function::PyFuncArgs; +use rustpython_vm::obj::{objstr, objtype}; +use rustpython_vm::pyobject::{IdProtocol, PyResult, TypeProtocol}; use rustpython_vm::VirtualMachine; pub(crate) fn window() -> web_sys::Window { web_sys::window().expect("Window to be available") } -pub fn sys_stdout_write_console(data: &str, _vm: &VirtualMachine) -> PyResult<()> { - console::log_1(&data.into()); - Ok(()) +pub fn format_print_args(vm: &VirtualMachine, args: PyFuncArgs) -> PyResult { + // Handle 'sep' kwarg: + let sep_arg = args + .get_optional_kwarg("sep") + .filter(|obj| !obj.is(&vm.get_none())); + if let Some(ref obj) = sep_arg { + if !objtype::isinstance(obj, &vm.ctx.str_type()) { + return Err(vm.new_type_error(format!( + "sep must be None or a string, not {}", + obj.class().name + ))); + } + } + let sep_str = sep_arg.as_ref().map(|obj| objstr::borrow_value(obj)); + + // Handle 'end' kwarg: + let end_arg = args + .get_optional_kwarg("end") + .filter(|obj| !obj.is(&vm.get_none())); + if let Some(ref obj) = end_arg { + if !objtype::isinstance(obj, &vm.ctx.str_type()) { + return Err(vm.new_type_error(format!( + "end must be None or a string, not {}", + obj.class().name + ))); + } + } + let end_str = end_arg.as_ref().map(|obj| objstr::borrow_value(obj)); + + // No need to handle 'flush' kwarg, irrelevant when writing to String + + let mut output = String::new(); + let mut first = true; + for a in args.args { + if first { + first = false; + } else if let Some(ref sep_str) = sep_str { + output.push_str(sep_str); + } else { + output.push(' '); + } + output.push_str(&vm.to_pystr(&a)?); + } + + if let Some(end_str) = end_str { + output.push_str(end_str.as_ref()) + } else { + output.push('\n'); + } + Ok(output) } -pub fn make_stdout_object( - vm: &VirtualMachine, - write_f: impl Fn(&str, &VirtualMachine) -> PyResult<()> + Send + Sync + 'static, -) -> PyObjectRef { - let ctx = &vm.ctx; - let write_method = ctx.new_method( - move |_self: PyObjectRef, data: PyStringRef, vm: &VirtualMachine| -> PyResult<()> { - write_f(data.as_str(), vm) - }, - ); - let flush_method = ctx.new_method(|_self: PyObjectRef| {}); - // there's not really any point to storing this class so that there's a consistent type object, - // we just want a half-decent repr() output - let cls = py_class!(ctx, "JSStdout", vm.ctx.object(), { - "write" => write_method, - "flush" => flush_method, - }); - ctx.new_base_object(cls, None) +pub fn builtin_print_console(vm: &VirtualMachine, args: PyFuncArgs) -> PyResult { + let arr = Array::new(); + for arg in args.args { + arr.push(&vm.to_pystr(&arg)?.into()); + } + console::log(&arr); + Ok(vm.get_none()) } diff --git a/wasm/tests/test_exec_mode.py b/wasm/tests/test_exec_mode.py index a2a55846f48..669d7049db1 100644 --- a/wasm/tests/test_exec_mode.py +++ b/wasm/tests/test_exec_mode.py @@ -1,21 +1,18 @@ def test_eval_mode(wdriver): assert wdriver.execute_script("return window.rp.pyEval('1+1')") == 2 - def test_exec_mode(wdriver): assert wdriver.execute_script("return window.rp.pyExec('1+1')") is None - def test_exec_single_mode(wdriver): assert wdriver.execute_script("return window.rp.pyExecSingle('1+1')") == 2 - stdout = wdriver.execute_script( + assert wdriver.execute_script( """ - let output = ""; + var output = []; save_output = function(text) {{ - output += text + output.push(text) }}; window.rp.pyExecSingle('1+1\\n2+2',{stdout: save_output}); return output; """ - ) - assert stdout == "2\n4\n" + ) == ["2\n", "4\n"] diff --git a/wasm/tests/test_inject_module.py b/wasm/tests/test_inject_module.py index afa25250d95..462ed653537 100644 --- a/wasm/tests/test_inject_module.py +++ b/wasm/tests/test_inject_module.py @@ -19,3 +19,4 @@ def get_thing(): return __thing() ); """ ) + From 4dbd0718398d92f4ab24b03c01c5fe5302d63c06 Mon Sep 17 00:00:00 2001 From: TheAnyKey <32773684+TheAnyKey@users.noreply.github.com> Date: Sat, 9 May 2020 20:16:37 +0200 Subject: [PATCH 38/39] Revert "Revert "Merge remote-tracking branch 'upstream/master'"" This reverts commit 44f141c367617c430e739ef8abfbff7fd1003007. --- Cargo.lock | 10 + Lib/bdb.py | 880 ++++++++++ Lib/cmd.py | 401 +++++ Lib/doctest.py | 2786 ++++++++++++++++++++++++++++++ Lib/pdb.py | 1730 +++++++++++++++++++ Lib/test/test_json/__init__.py | 6 +- README.md | 9 +- bytecode/src/bytecode.rs | 4 + parser/src/lexer.rs | 4 +- src/shell.rs | 19 +- vm/src/builtins.rs | 2 +- vm/src/exceptions.rs | 62 +- vm/src/frame.rs | 11 +- vm/src/obj/objasyncgenerator.rs | 63 +- vm/src/obj/objbool.rs | 2 +- vm/src/obj/objcoroinner.rs | 55 +- vm/src/obj/objcoroutine.rs | 6 +- vm/src/obj/objfunction.rs | 5 + vm/src/obj/objgenerator.rs | 4 +- vm/src/stdlib/os.rs | 15 + vm/src/sysmodule.rs | 24 +- wasm/lib/Cargo.toml | 3 +- wasm/lib/src/convert.rs | 63 +- wasm/lib/src/vm_class.rs | 45 +- wasm/lib/src/wasm_builtins.rs | 84 +- wasm/tests/test_exec_mode.py | 11 +- wasm/tests/test_inject_module.py | 1 - 27 files changed, 6065 insertions(+), 240 deletions(-) create mode 100644 Lib/bdb.py create mode 100644 Lib/cmd.py create mode 100644 Lib/doctest.py create mode 100755 Lib/pdb.py diff --git a/Cargo.lock b/Cargo.lock index 1c279a2aaf6..023c6240ce8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -618,6 +618,15 @@ version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" +[[package]] +name = "generational-arena" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "921c3803adaeb9f9639de5149d9f0f9f4b79f00c423915b701db2e02ed80b9ce" +dependencies = [ + "cfg-if", +] + [[package]] name = "generic-array" version = "0.12.3" @@ -1642,6 +1651,7 @@ version = "0.1.0-pre-alpha.2" dependencies = [ "cfg-if", "futures", + "generational-arena", "js-sys", "rustpython-compiler", "rustpython-parser", diff --git a/Lib/bdb.py b/Lib/bdb.py new file mode 100644 index 00000000000..18491da8973 --- /dev/null +++ b/Lib/bdb.py @@ -0,0 +1,880 @@ +"""Debugger basics""" + +import fnmatch +import sys +import os +from inspect import CO_GENERATOR, CO_COROUTINE, CO_ASYNC_GENERATOR + +__all__ = ["BdbQuit", "Bdb", "Breakpoint"] + +GENERATOR_AND_COROUTINE_FLAGS = CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR + + +class BdbQuit(Exception): + """Exception to give up completely.""" + + +class Bdb: + """Generic Python debugger base class. + + This class takes care of details of the trace facility; + a derived class should implement user interaction. + The standard debugger class (pdb.Pdb) is an example. + + The optional skip argument must be an iterable of glob-style + module name patterns. The debugger will not step into frames + that originate in a module that matches one of these patterns. + Whether a frame is considered to originate in a certain module + is determined by the __name__ in the frame globals. + """ + + def __init__(self, skip=None): + self.skip = set(skip) if skip else None + self.breaks = {} + self.fncache = {} + self.frame_returning = None + + def canonic(self, filename): + """Return canonical form of filename. + + For real filenames, the canonical form is a case-normalized (on + case insensitive filesystems) absolute path. 'Filenames' with + angle brackets, such as "", generated in interactive + mode, are returned unchanged. + """ + if filename == "<" + filename[1:-1] + ">": + return filename + canonic = self.fncache.get(filename) + if not canonic: + canonic = os.path.abspath(filename) + canonic = os.path.normcase(canonic) + self.fncache[filename] = canonic + return canonic + + def reset(self): + """Set values of attributes as ready to start debugging.""" + import linecache + linecache.checkcache() + self.botframe = None + self._set_stopinfo(None, None) + + def trace_dispatch(self, frame, event, arg): + """Dispatch a trace function for debugged frames based on the event. + + This function is installed as the trace function for debugged + frames. Its return value is the new trace function, which is + usually itself. The default implementation decides how to + dispatch a frame, depending on the type of event (passed in as a + string) that is about to be executed. + + The event can be one of the following: + line: A new line of code is going to be executed. + call: A function is about to be called or another code block + is entered. + return: A function or other code block is about to return. + exception: An exception has occurred. + c_call: A C function is about to be called. + c_return: A C function has returned. + c_exception: A C function has raised an exception. + + For the Python events, specialized functions (see the dispatch_*() + methods) are called. For the C events, no action is taken. + + The arg parameter depends on the previous event. + """ + if self.quitting: + return # None + if event == 'line': + return self.dispatch_line(frame) + if event == 'call': + return self.dispatch_call(frame, arg) + if event == 'return': + return self.dispatch_return(frame, arg) + if event == 'exception': + return self.dispatch_exception(frame, arg) + if event == 'c_call': + return self.trace_dispatch + if event == 'c_exception': + return self.trace_dispatch + if event == 'c_return': + return self.trace_dispatch + print('bdb.Bdb.dispatch: unknown debugging event:', repr(event)) + return self.trace_dispatch + + def dispatch_line(self, frame): + """Invoke user function and return trace function for line event. + + If the debugger stops on the current line, invoke + self.user_line(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + """ + if self.stop_here(frame) or self.break_here(frame): + self.user_line(frame) + if self.quitting: raise BdbQuit + return self.trace_dispatch + + def dispatch_call(self, frame, arg): + """Invoke user function and return trace function for call event. + + If the debugger stops on this function call, invoke + self.user_call(). Raise BbdQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + """ + # XXX 'arg' is no longer used + if self.botframe is None: + # First call of dispatch since reset() + self.botframe = frame.f_back # (CT) Note that this may also be None! + return self.trace_dispatch + if not (self.stop_here(frame) or self.break_anywhere(frame)): + # No need to trace this function + return # None + # Ignore call events in generator except when stepping. + if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS: + return self.trace_dispatch + self.user_call(frame, arg) + if self.quitting: raise BdbQuit + return self.trace_dispatch + + def dispatch_return(self, frame, arg): + """Invoke user function and return trace function for return event. + + If the debugger stops on this function return, invoke + self.user_return(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + """ + if self.stop_here(frame) or frame == self.returnframe: + # Ignore return events in generator except when stepping. + if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS: + return self.trace_dispatch + try: + self.frame_returning = frame + self.user_return(frame, arg) + finally: + self.frame_returning = None + if self.quitting: raise BdbQuit + # The user issued a 'next' or 'until' command. + if self.stopframe is frame and self.stoplineno != -1: + self._set_stopinfo(None, None) + return self.trace_dispatch + + def dispatch_exception(self, frame, arg): + """Invoke user function and return trace function for exception event. + + If the debugger stops on this exception, invoke + self.user_exception(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + """ + if self.stop_here(frame): + # When stepping with next/until/return in a generator frame, skip + # the internal StopIteration exception (with no traceback) + # triggered by a subiterator run with the 'yield from' statement. + if not (frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS + and arg[0] is StopIteration and arg[2] is None): + self.user_exception(frame, arg) + if self.quitting: raise BdbQuit + # Stop at the StopIteration or GeneratorExit exception when the user + # has set stopframe in a generator by issuing a return command, or a + # next/until command at the last statement in the generator before the + # exception. + elif (self.stopframe and frame is not self.stopframe + and self.stopframe.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS + and arg[0] in (StopIteration, GeneratorExit)): + self.user_exception(frame, arg) + if self.quitting: raise BdbQuit + + return self.trace_dispatch + + # Normally derived classes don't override the following + # methods, but they may if they want to redefine the + # definition of stopping and breakpoints. + + def is_skipped_module(self, module_name): + "Return True if module_name matches any skip pattern." + if module_name is None: # some modules do not have names + return False + for pattern in self.skip: + if fnmatch.fnmatch(module_name, pattern): + return True + return False + + def stop_here(self, frame): + "Return True if frame is below the starting frame in the stack." + # (CT) stopframe may now also be None, see dispatch_call. + # (CT) the former test for None is therefore removed from here. + if self.skip and \ + self.is_skipped_module(frame.f_globals.get('__name__')): + return False + if frame is self.stopframe: + if self.stoplineno == -1: + return False + return frame.f_lineno >= self.stoplineno + if not self.stopframe: + return True + return False + + def break_here(self, frame): + """Return True if there is an effective breakpoint for this line. + + Check for line or function breakpoint and if in effect. + Delete temporary breakpoints if effective() says to. + """ + filename = self.canonic(frame.f_code.co_filename) + if filename not in self.breaks: + return False + lineno = frame.f_lineno + if lineno not in self.breaks[filename]: + # The line itself has no breakpoint, but maybe the line is the + # first line of a function with breakpoint set by function name. + lineno = frame.f_code.co_firstlineno + if lineno not in self.breaks[filename]: + return False + + # flag says ok to delete temp. bp + (bp, flag) = effective(filename, lineno, frame) + if bp: + self.currentbp = bp.number + if (flag and bp.temporary): + self.do_clear(str(bp.number)) + return True + else: + return False + + def do_clear(self, arg): + """Remove temporary breakpoint. + + Must implement in derived classes or get NotImplementedError. + """ + raise NotImplementedError("subclass of bdb must implement do_clear()") + + def break_anywhere(self, frame): + """Return True if there is any breakpoint for frame's filename. + """ + return self.canonic(frame.f_code.co_filename) in self.breaks + + # Derived classes should override the user_* methods + # to gain control. + + def user_call(self, frame, argument_list): + """Called if we might stop in a function.""" + pass + + def user_line(self, frame): + """Called when we stop or break at a line.""" + pass + + def user_return(self, frame, return_value): + """Called when a return trap is set here.""" + pass + + def user_exception(self, frame, exc_info): + """Called when we stop on an exception.""" + pass + + def _set_stopinfo(self, stopframe, returnframe, stoplineno=0): + """Set the attributes for stopping. + + If stoplineno is greater than or equal to 0, then stop at line + greater than or equal to the stopline. If stoplineno is -1, then + don't stop at all. + """ + self.stopframe = stopframe + self.returnframe = returnframe + self.quitting = False + # stoplineno >= 0 means: stop at line >= the stoplineno + # stoplineno -1 means: don't stop at all + self.stoplineno = stoplineno + + # Derived classes and clients can call the following methods + # to affect the stepping state. + + def set_until(self, frame, lineno=None): + """Stop when the line with the lineno greater than the current one is + reached or when returning from current frame.""" + # the name "until" is borrowed from gdb + if lineno is None: + lineno = frame.f_lineno + 1 + self._set_stopinfo(frame, frame, lineno) + + def set_step(self): + """Stop after one line of code.""" + # Issue #13183: pdb skips frames after hitting a breakpoint and running + # step commands. + # Restore the trace function in the caller (that may not have been set + # for performance reasons) when returning from the current frame. + if self.frame_returning: + caller_frame = self.frame_returning.f_back + if caller_frame and not caller_frame.f_trace: + caller_frame.f_trace = self.trace_dispatch + self._set_stopinfo(None, None) + + def set_next(self, frame): + """Stop on the next line in or below the given frame.""" + self._set_stopinfo(frame, None) + + def set_return(self, frame): + """Stop when returning from the given frame.""" + if frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS: + self._set_stopinfo(frame, None, -1) + else: + self._set_stopinfo(frame.f_back, frame) + + def set_trace(self, frame=None): + """Start debugging from frame. + + If frame is not specified, debugging starts from caller's frame. + """ + if frame is None: + frame = sys._getframe().f_back + self.reset() + while frame: + frame.f_trace = self.trace_dispatch + self.botframe = frame + frame = frame.f_back + self.set_step() + sys.settrace(self.trace_dispatch) + + def set_continue(self): + """Stop only at breakpoints or when finished. + + If there are no breakpoints, set the system trace function to None. + """ + # Don't stop except at breakpoints or when finished + self._set_stopinfo(self.botframe, None, -1) + if not self.breaks: + # no breakpoints; run without debugger overhead + sys.settrace(None) + frame = sys._getframe().f_back + while frame and frame is not self.botframe: + del frame.f_trace + frame = frame.f_back + + def set_quit(self): + """Set quitting attribute to True. + + Raises BdbQuit exception in the next call to a dispatch_*() method. + """ + self.stopframe = self.botframe + self.returnframe = None + self.quitting = True + sys.settrace(None) + + # Derived classes and clients can call the following methods + # to manipulate breakpoints. These methods return an + # error message if something went wrong, None if all is well. + # Set_break prints out the breakpoint line and file:lineno. + # Call self.get_*break*() to see the breakpoints or better + # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint(). + + def set_break(self, filename, lineno, temporary=False, cond=None, + funcname=None): + """Set a new breakpoint for filename:lineno. + + If lineno doesn't exist for the filename, return an error message. + The filename should be in canonical form. + """ + filename = self.canonic(filename) + import linecache # Import as late as possible + line = linecache.getline(filename, lineno) + if not line: + return 'Line %s:%d does not exist' % (filename, lineno) + list = self.breaks.setdefault(filename, []) + if lineno not in list: + list.append(lineno) + bp = Breakpoint(filename, lineno, temporary, cond, funcname) + return None + + def _prune_breaks(self, filename, lineno): + """Prune breakpoints for filename:lineno. + + A list of breakpoints is maintained in the Bdb instance and in + the Breakpoint class. If a breakpoint in the Bdb instance no + longer exists in the Breakpoint class, then it's removed from the + Bdb instance. + """ + if (filename, lineno) not in Breakpoint.bplist: + self.breaks[filename].remove(lineno) + if not self.breaks[filename]: + del self.breaks[filename] + + def clear_break(self, filename, lineno): + """Delete breakpoints for filename:lineno. + + If no breakpoints were set, return an error message. + """ + filename = self.canonic(filename) + if filename not in self.breaks: + return 'There are no breakpoints in %s' % filename + if lineno not in self.breaks[filename]: + return 'There is no breakpoint at %s:%d' % (filename, lineno) + # If there's only one bp in the list for that file,line + # pair, then remove the breaks entry + for bp in Breakpoint.bplist[filename, lineno][:]: + bp.deleteMe() + self._prune_breaks(filename, lineno) + return None + + def clear_bpbynumber(self, arg): + """Delete a breakpoint by its index in Breakpoint.bpbynumber. + + If arg is invalid, return an error message. + """ + try: + bp = self.get_bpbynumber(arg) + except ValueError as err: + return str(err) + bp.deleteMe() + self._prune_breaks(bp.file, bp.line) + return None + + def clear_all_file_breaks(self, filename): + """Delete all breakpoints in filename. + + If none were set, return an error message. + """ + filename = self.canonic(filename) + if filename not in self.breaks: + return 'There are no breakpoints in %s' % filename + for line in self.breaks[filename]: + blist = Breakpoint.bplist[filename, line] + for bp in blist: + bp.deleteMe() + del self.breaks[filename] + return None + + def clear_all_breaks(self): + """Delete all existing breakpoints. + + If none were set, return an error message. + """ + if not self.breaks: + return 'There are no breakpoints' + for bp in Breakpoint.bpbynumber: + if bp: + bp.deleteMe() + self.breaks = {} + return None + + def get_bpbynumber(self, arg): + """Return a breakpoint by its index in Breakpoint.bybpnumber. + + For invalid arg values or if the breakpoint doesn't exist, + raise a ValueError. + """ + if not arg: + raise ValueError('Breakpoint number expected') + try: + number = int(arg) + except ValueError: + raise ValueError('Non-numeric breakpoint number %s' % arg) from None + try: + bp = Breakpoint.bpbynumber[number] + except IndexError: + raise ValueError('Breakpoint number %d out of range' % number) from None + if bp is None: + raise ValueError('Breakpoint %d already deleted' % number) + return bp + + def get_break(self, filename, lineno): + """Return True if there is a breakpoint for filename:lineno.""" + filename = self.canonic(filename) + return filename in self.breaks and \ + lineno in self.breaks[filename] + + def get_breaks(self, filename, lineno): + """Return all breakpoints for filename:lineno. + + If no breakpoints are set, return an empty list. + """ + filename = self.canonic(filename) + return filename in self.breaks and \ + lineno in self.breaks[filename] and \ + Breakpoint.bplist[filename, lineno] or [] + + def get_file_breaks(self, filename): + """Return all lines with breakpoints for filename. + + If no breakpoints are set, return an empty list. + """ + filename = self.canonic(filename) + if filename in self.breaks: + return self.breaks[filename] + else: + return [] + + def get_all_breaks(self): + """Return all breakpoints that are set.""" + return self.breaks + + # Derived classes and clients can call the following method + # to get a data structure representing a stack trace. + + def get_stack(self, f, t): + """Return a list of (frame, lineno) in a stack trace and a size. + + List starts with original calling frame, if there is one. + Size may be number of frames above or below f. + """ + stack = [] + if t and t.tb_frame is f: + t = t.tb_next + while f is not None: + stack.append((f, f.f_lineno)) + if f is self.botframe: + break + f = f.f_back + stack.reverse() + i = max(0, len(stack) - 1) + while t is not None: + stack.append((t.tb_frame, t.tb_lineno)) + t = t.tb_next + if f is None: + i = max(0, len(stack) - 1) + return stack, i + + def format_stack_entry(self, frame_lineno, lprefix=': '): + """Return a string with information about a stack entry. + + The stack entry frame_lineno is a (frame, lineno) tuple. The + return string contains the canonical filename, the function name + or '', the input arguments, the return value, and the + line of code (if it exists). + + """ + import linecache, reprlib + frame, lineno = frame_lineno + filename = self.canonic(frame.f_code.co_filename) + s = '%s(%r)' % (filename, lineno) + if frame.f_code.co_name: + s += frame.f_code.co_name + else: + s += "" + s += '()' + if '__return__' in frame.f_locals: + rv = frame.f_locals['__return__'] + s += '->' + s += reprlib.repr(rv) + line = linecache.getline(filename, lineno, frame.f_globals) + if line: + s += lprefix + line.strip() + return s + + # The following methods can be called by clients to use + # a debugger to debug a statement or an expression. + # Both can be given as a string, or a code object. + + def run(self, cmd, globals=None, locals=None): + """Debug a statement executed via the exec() function. + + globals defaults to __main__.dict; locals defaults to globals. + """ + if globals is None: + import __main__ + globals = __main__.__dict__ + if locals is None: + locals = globals + self.reset() + if isinstance(cmd, str): + cmd = compile(cmd, "", "exec") + sys.settrace(self.trace_dispatch) + try: + exec(cmd, globals, locals) + except BdbQuit: + pass + finally: + self.quitting = True + sys.settrace(None) + + def runeval(self, expr, globals=None, locals=None): + """Debug an expression executed via the eval() function. + + globals defaults to __main__.dict; locals defaults to globals. + """ + if globals is None: + import __main__ + globals = __main__.__dict__ + if locals is None: + locals = globals + self.reset() + sys.settrace(self.trace_dispatch) + try: + return eval(expr, globals, locals) + except BdbQuit: + pass + finally: + self.quitting = True + sys.settrace(None) + + def runctx(self, cmd, globals, locals): + """For backwards-compatibility. Defers to run().""" + # B/W compatibility + self.run(cmd, globals, locals) + + # This method is more useful to debug a single function call. + + def runcall(*args, **kwds): + """Debug a single function call. + + Return the result of the function call. + """ + if len(args) >= 2: + self, func, *args = args + elif not args: + raise TypeError("descriptor 'runcall' of 'Bdb' object " + "needs an argument") + elif 'func' in kwds: + func = kwds.pop('func') + self, *args = args + import warnings + warnings.warn("Passing 'func' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + raise TypeError('runcall expected at least 1 positional argument, ' + 'got %d' % (len(args)-1)) + + self.reset() + sys.settrace(self.trace_dispatch) + res = None + try: + res = func(*args, **kwds) + except BdbQuit: + pass + finally: + self.quitting = True + sys.settrace(None) + return res + runcall.__text_signature__ = '($self, func, /, *args, **kwds)' + + +def set_trace(): + """Start debugging with a Bdb instance from the caller's frame.""" + Bdb().set_trace() + + +class Breakpoint: + """Breakpoint class. + + Implements temporary breakpoints, ignore counts, disabling and + (re)-enabling, and conditionals. + + Breakpoints are indexed by number through bpbynumber and by + the (file, line) tuple using bplist. The former points to a + single instance of class Breakpoint. The latter points to a + list of such instances since there may be more than one + breakpoint per line. + + When creating a breakpoint, its associated filename should be + in canonical form. If funcname is defined, a breakpoint hit will be + counted when the first line of that function is executed. A + conditional breakpoint always counts a hit. + """ + + # XXX Keeping state in the class is a mistake -- this means + # you cannot have more than one active Bdb instance. + + next = 1 # Next bp to be assigned + bplist = {} # indexed by (file, lineno) tuple + bpbynumber = [None] # Each entry is None or an instance of Bpt + # index 0 is unused, except for marking an + # effective break .... see effective() + + def __init__(self, file, line, temporary=False, cond=None, funcname=None): + self.funcname = funcname + # Needed if funcname is not None. + self.func_first_executable_line = None + self.file = file # This better be in canonical form! + self.line = line + self.temporary = temporary + self.cond = cond + self.enabled = True + self.ignore = 0 + self.hits = 0 + self.number = Breakpoint.next + Breakpoint.next += 1 + # Build the two lists + self.bpbynumber.append(self) + if (file, line) in self.bplist: + self.bplist[file, line].append(self) + else: + self.bplist[file, line] = [self] + + def deleteMe(self): + """Delete the breakpoint from the list associated to a file:line. + + If it is the last breakpoint in that position, it also deletes + the entry for the file:line. + """ + + index = (self.file, self.line) + self.bpbynumber[self.number] = None # No longer in list + self.bplist[index].remove(self) + if not self.bplist[index]: + # No more bp for this f:l combo + del self.bplist[index] + + def enable(self): + """Mark the breakpoint as enabled.""" + self.enabled = True + + def disable(self): + """Mark the breakpoint as disabled.""" + self.enabled = False + + def bpprint(self, out=None): + """Print the output of bpformat(). + + The optional out argument directs where the output is sent + and defaults to standard output. + """ + if out is None: + out = sys.stdout + print(self.bpformat(), file=out) + + def bpformat(self): + """Return a string with information about the breakpoint. + + The information includes the breakpoint number, temporary + status, file:line position, break condition, number of times to + ignore, and number of times hit. + + """ + if self.temporary: + disp = 'del ' + else: + disp = 'keep ' + if self.enabled: + disp = disp + 'yes ' + else: + disp = disp + 'no ' + ret = '%-4dbreakpoint %s at %s:%d' % (self.number, disp, + self.file, self.line) + if self.cond: + ret += '\n\tstop only if %s' % (self.cond,) + if self.ignore: + ret += '\n\tignore next %d hits' % (self.ignore,) + if self.hits: + if self.hits > 1: + ss = 's' + else: + ss = '' + ret += '\n\tbreakpoint already hit %d time%s' % (self.hits, ss) + return ret + + def __str__(self): + "Return a condensed description of the breakpoint." + return 'breakpoint %s at %s:%s' % (self.number, self.file, self.line) + +# -----------end of Breakpoint class---------- + + +def checkfuncname(b, frame): + """Return True if break should happen here. + + Whether a break should happen depends on the way that b (the breakpoint) + was set. If it was set via line number, check if b.line is the same as + the one in the frame. If it was set via function name, check if this is + the right function and if it is on the first executable line. + """ + if not b.funcname: + # Breakpoint was set via line number. + if b.line != frame.f_lineno: + # Breakpoint was set at a line with a def statement and the function + # defined is called: don't break. + return False + return True + + # Breakpoint set via function name. + if frame.f_code.co_name != b.funcname: + # It's not a function call, but rather execution of def statement. + return False + + # We are in the right frame. + if not b.func_first_executable_line: + # The function is entered for the 1st time. + b.func_first_executable_line = frame.f_lineno + + if b.func_first_executable_line != frame.f_lineno: + # But we are not at the first line number: don't break. + return False + return True + + +# Determines if there is an effective (active) breakpoint at this +# line of code. Returns breakpoint number or 0 if none +def effective(file, line, frame): + """Determine which breakpoint for this file:line is to be acted upon. + + Called only if we know there is a breakpoint at this location. Return + the breakpoint that was triggered and a boolean that indicates if it is + ok to delete a temporary breakpoint. Return (None, None) if there is no + matching breakpoint. + """ + possibles = Breakpoint.bplist[file, line] + for b in possibles: + if not b.enabled: + continue + if not checkfuncname(b, frame): + continue + # Count every hit when bp is enabled + b.hits += 1 + if not b.cond: + # If unconditional, and ignoring go on to next, else break + if b.ignore > 0: + b.ignore -= 1 + continue + else: + # breakpoint and marker that it's ok to delete if temporary + return (b, True) + else: + # Conditional bp. + # Ignore count applies only to those bpt hits where the + # condition evaluates to true. + try: + val = eval(b.cond, frame.f_globals, frame.f_locals) + if val: + if b.ignore > 0: + b.ignore -= 1 + # continue + else: + return (b, True) + # else: + # continue + except: + # if eval fails, most conservative thing is to stop on + # breakpoint regardless of ignore count. Don't delete + # temporary, as another hint to user. + return (b, False) + return (None, None) + + +# -------------------- testing -------------------- + +class Tdb(Bdb): + def user_call(self, frame, args): + name = frame.f_code.co_name + if not name: name = '???' + print('+++ call', name, args) + def user_line(self, frame): + import linecache + name = frame.f_code.co_name + if not name: name = '???' + fn = self.canonic(frame.f_code.co_filename) + line = linecache.getline(fn, frame.f_lineno, frame.f_globals) + print('+++', fn, frame.f_lineno, name, ':', line.strip()) + def user_return(self, frame, retval): + print('+++ return', retval) + def user_exception(self, frame, exc_stuff): + print('+++ exception', exc_stuff) + self.set_continue() + +def foo(n): + print('foo(', n, ')') + x = bar(n*10) + print('bar returned', x) + +def bar(a): + print('bar(', a, ')') + return a/2 + +def test(): + t = Tdb() + t.run('import bdb; bdb.foo(10)') diff --git a/Lib/cmd.py b/Lib/cmd.py new file mode 100644 index 00000000000..859e91096d8 --- /dev/null +++ b/Lib/cmd.py @@ -0,0 +1,401 @@ +"""A generic class to build line-oriented command interpreters. + +Interpreters constructed with this class obey the following conventions: + +1. End of file on input is processed as the command 'EOF'. +2. A command is parsed out of each line by collecting the prefix composed + of characters in the identchars member. +3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method + is passed a single argument consisting of the remainder of the line. +4. Typing an empty line repeats the last command. (Actually, it calls the + method `emptyline', which may be overridden in a subclass.) +5. There is a predefined `help' method. Given an argument `topic', it + calls the command `help_topic'. With no arguments, it lists all topics + with defined help_ functions, broken into up to three topics; documented + commands, miscellaneous help topics, and undocumented commands. +6. The command '?' is a synonym for `help'. The command '!' is a synonym + for `shell', if a do_shell method exists. +7. If completion is enabled, completing commands will be done automatically, + and completing of commands args is done by calling complete_foo() with + arguments text, line, begidx, endidx. text is string we are matching + against, all returned matches must begin with it. line is the current + input line (lstripped), begidx and endidx are the beginning and end + indexes of the text being matched, which could be used to provide + different completion depending upon which position the argument is in. + +The `default' method may be overridden to intercept commands for which there +is no do_ method. + +The `completedefault' method may be overridden to intercept completions for +commands that have no complete_ method. + +The data member `self.ruler' sets the character used to draw separator lines +in the help messages. If empty, no ruler line is drawn. It defaults to "=". + +If the value of `self.intro' is nonempty when the cmdloop method is called, +it is printed out on interpreter startup. This value may be overridden +via an optional argument to the cmdloop() method. + +The data members `self.doc_header', `self.misc_header', and +`self.undoc_header' set the headers used for the help function's +listings of documented functions, miscellaneous topics, and undocumented +functions respectively. +""" + +import string, sys + +__all__ = ["Cmd"] + +PROMPT = '(Cmd) ' +IDENTCHARS = string.ascii_letters + string.digits + '_' + +class Cmd: + """A simple framework for writing line-oriented command interpreters. + + These are often useful for test harnesses, administrative tools, and + prototypes that will later be wrapped in a more sophisticated interface. + + A Cmd instance or subclass instance is a line-oriented interpreter + framework. There is no good reason to instantiate Cmd itself; rather, + it's useful as a superclass of an interpreter class you define yourself + in order to inherit Cmd's methods and encapsulate action methods. + + """ + prompt = PROMPT + identchars = IDENTCHARS + ruler = '=' + lastcmd = '' + intro = None + doc_leader = "" + doc_header = "Documented commands (type help ):" + misc_header = "Miscellaneous help topics:" + undoc_header = "Undocumented commands:" + nohelp = "*** No help on %s" + use_rawinput = 1 + + def __init__(self, completekey='tab', stdin=None, stdout=None): + """Instantiate a line-oriented interpreter framework. + + The optional argument 'completekey' is the readline name of a + completion key; it defaults to the Tab key. If completekey is + not None and the readline module is available, command completion + is done automatically. The optional arguments stdin and stdout + specify alternate input and output file objects; if not specified, + sys.stdin and sys.stdout are used. + + """ + if stdin is not None: + self.stdin = stdin + else: + self.stdin = sys.stdin + if stdout is not None: + self.stdout = stdout + else: + self.stdout = sys.stdout + self.cmdqueue = [] + self.completekey = completekey + + def cmdloop(self, intro=None): + """Repeatedly issue a prompt, accept input, parse an initial prefix + off the received input, and dispatch to action methods, passing them + the remainder of the line as argument. + + """ + + self.preloop() + if self.use_rawinput and self.completekey: + try: + import readline + self.old_completer = readline.get_completer() + readline.set_completer(self.complete) + readline.parse_and_bind(self.completekey+": complete") + except ImportError: + pass + try: + if intro is not None: + self.intro = intro + if self.intro: + self.stdout.write(str(self.intro)+"\n") + stop = None + while not stop: + if self.cmdqueue: + line = self.cmdqueue.pop(0) + else: + if self.use_rawinput: + try: + line = input(self.prompt) + except EOFError: + line = 'EOF' + else: + self.stdout.write(self.prompt) + self.stdout.flush() + line = self.stdin.readline() + if not len(line): + line = 'EOF' + else: + line = line.rstrip('\r\n') + line = self.precmd(line) + stop = self.onecmd(line) + stop = self.postcmd(stop, line) + self.postloop() + finally: + if self.use_rawinput and self.completekey: + try: + import readline + readline.set_completer(self.old_completer) + except ImportError: + pass + + + def precmd(self, line): + """Hook method executed just before the command line is + interpreted, but after the input prompt is generated and issued. + + """ + return line + + def postcmd(self, stop, line): + """Hook method executed just after a command dispatch is finished.""" + return stop + + def preloop(self): + """Hook method executed once when the cmdloop() method is called.""" + pass + + def postloop(self): + """Hook method executed once when the cmdloop() method is about to + return. + + """ + pass + + def parseline(self, line): + """Parse the line into a command name and a string containing + the arguments. Returns a tuple containing (command, args, line). + 'command' and 'args' may be None if the line couldn't be parsed. + """ + line = line.strip() + if not line: + return None, None, line + elif line[0] == '?': + line = 'help ' + line[1:] + elif line[0] == '!': + if hasattr(self, 'do_shell'): + line = 'shell ' + line[1:] + else: + return None, None, line + i, n = 0, len(line) + while i < n and line[i] in self.identchars: i = i+1 + cmd, arg = line[:i], line[i:].strip() + return cmd, arg, line + + def onecmd(self, line): + """Interpret the argument as though it had been typed in response + to the prompt. + + This may be overridden, but should not normally need to be; + see the precmd() and postcmd() methods for useful execution hooks. + The return value is a flag indicating whether interpretation of + commands by the interpreter should stop. + + """ + cmd, arg, line = self.parseline(line) + if not line: + return self.emptyline() + if cmd is None: + return self.default(line) + self.lastcmd = line + if line == 'EOF' : + self.lastcmd = '' + if cmd == '': + return self.default(line) + else: + try: + func = getattr(self, 'do_' + cmd) + except AttributeError: + return self.default(line) + return func(arg) + + def emptyline(self): + """Called when an empty line is entered in response to the prompt. + + If this method is not overridden, it repeats the last nonempty + command entered. + + """ + if self.lastcmd: + return self.onecmd(self.lastcmd) + + def default(self, line): + """Called on an input line when the command prefix is not recognized. + + If this method is not overridden, it prints an error message and + returns. + + """ + self.stdout.write('*** Unknown syntax: %s\n'%line) + + def completedefault(self, *ignored): + """Method called to complete an input line when no command-specific + complete_*() method is available. + + By default, it returns an empty list. + + """ + return [] + + def completenames(self, text, *ignored): + dotext = 'do_'+text + return [a[3:] for a in self.get_names() if a.startswith(dotext)] + + def complete(self, text, state): + """Return the next possible completion for 'text'. + + If a command has not been entered, then complete against command list. + Otherwise try to call complete_ to get list of completions. + """ + if state == 0: + import readline + origline = readline.get_line_buffer() + line = origline.lstrip() + stripped = len(origline) - len(line) + begidx = readline.get_begidx() - stripped + endidx = readline.get_endidx() - stripped + if begidx>0: + cmd, args, foo = self.parseline(line) + if cmd == '': + compfunc = self.completedefault + else: + try: + compfunc = getattr(self, 'complete_' + cmd) + except AttributeError: + compfunc = self.completedefault + else: + compfunc = self.completenames + self.completion_matches = compfunc(text, line, begidx, endidx) + try: + return self.completion_matches[state] + except IndexError: + return None + + def get_names(self): + # This method used to pull in base class attributes + # at a time dir() didn't do it yet. + return dir(self.__class__) + + def complete_help(self, *args): + commands = set(self.completenames(*args)) + topics = set(a[5:] for a in self.get_names() + if a.startswith('help_' + args[0])) + return list(commands | topics) + + def do_help(self, arg): + 'List available commands with "help" or detailed help with "help cmd".' + if arg: + # XXX check arg syntax + try: + func = getattr(self, 'help_' + arg) + except AttributeError: + try: + doc=getattr(self, 'do_' + arg).__doc__ + if doc: + self.stdout.write("%s\n"%str(doc)) + return + except AttributeError: + pass + self.stdout.write("%s\n"%str(self.nohelp % (arg,))) + return + func() + else: + names = self.get_names() + cmds_doc = [] + cmds_undoc = [] + help = {} + for name in names: + if name[:5] == 'help_': + help[name[5:]]=1 + names.sort() + # There can be duplicates if routines overridden + prevname = '' + for name in names: + if name[:3] == 'do_': + if name == prevname: + continue + prevname = name + cmd=name[3:] + if cmd in help: + cmds_doc.append(cmd) + del help[cmd] + elif getattr(self, name).__doc__: + cmds_doc.append(cmd) + else: + cmds_undoc.append(cmd) + self.stdout.write("%s\n"%str(self.doc_leader)) + self.print_topics(self.doc_header, cmds_doc, 15,80) + self.print_topics(self.misc_header, list(help.keys()),15,80) + self.print_topics(self.undoc_header, cmds_undoc, 15,80) + + def print_topics(self, header, cmds, cmdlen, maxcol): + if cmds: + self.stdout.write("%s\n"%str(header)) + if self.ruler: + self.stdout.write("%s\n"%str(self.ruler * len(header))) + self.columnize(cmds, maxcol-1) + self.stdout.write("\n") + + def columnize(self, list, displaywidth=80): + """Display a list of strings as a compact set of columns. + + Each column is only as wide as necessary. + Columns are separated by two spaces (one was not legible enough). + """ + if not list: + self.stdout.write("\n") + return + + nonstrings = [i for i in range(len(list)) + if not isinstance(list[i], str)] + if nonstrings: + raise TypeError("list[i] not a string for i in %s" + % ", ".join(map(str, nonstrings))) + size = len(list) + if size == 1: + self.stdout.write('%s\n'%str(list[0])) + return + # Try every row count from 1 upwards + for nrows in range(1, len(list)): + ncols = (size+nrows-1) // nrows + colwidths = [] + totwidth = -2 + for col in range(ncols): + colwidth = 0 + for row in range(nrows): + i = row + nrows*col + if i >= size: + break + x = list[i] + colwidth = max(colwidth, len(x)) + colwidths.append(colwidth) + totwidth += colwidth + 2 + if totwidth > displaywidth: + break + if totwidth <= displaywidth: + break + else: + nrows = len(list) + ncols = 1 + colwidths = [0] + for row in range(nrows): + texts = [] + for col in range(ncols): + i = row + nrows*col + if i >= size: + x = "" + else: + x = list[i] + texts.append(x) + while texts and not texts[-1]: + del texts[-1] + for col in range(len(texts)): + texts[col] = texts[col].ljust(colwidths[col]) + self.stdout.write("%s\n"%str(" ".join(texts))) diff --git a/Lib/doctest.py b/Lib/doctest.py new file mode 100644 index 00000000000..dcbcfe52e90 --- /dev/null +++ b/Lib/doctest.py @@ -0,0 +1,2786 @@ +# Module doctest. +# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org). +# Major enhancements and refactoring by: +# Jim Fulton +# Edward Loper + +# Provided as-is; use at your own risk; no warranty; no promises; enjoy! + +r"""Module doctest -- a framework for running examples in docstrings. + +In simplest use, end each module M to be tested with: + +def _test(): + import doctest + doctest.testmod() + +if __name__ == "__main__": + _test() + +Then running the module as a script will cause the examples in the +docstrings to get executed and verified: + +python M.py + +This won't display anything unless an example fails, in which case the +failing example(s) and the cause(s) of the failure(s) are printed to stdout +(why not stderr? because stderr is a lame hack <0.2 wink>), and the final +line of output is "Test failed.". + +Run it with the -v switch instead: + +python M.py -v + +and a detailed report of all examples tried is printed to stdout, along +with assorted summaries at the end. + +You can force verbose mode by passing "verbose=True" to testmod, or prohibit +it by passing "verbose=False". In either of those cases, sys.argv is not +examined by testmod. + +There are a variety of other ways to run doctests, including integration +with the unittest framework, and support for running non-Python text +files containing doctests. There are also many ways to override parts +of doctest's default behaviors. See the Library Reference Manual for +details. +""" + +__docformat__ = 'reStructuredText en' + +__all__ = [ + # 0, Option Flags + 'register_optionflag', + 'DONT_ACCEPT_TRUE_FOR_1', + 'DONT_ACCEPT_BLANKLINE', + 'NORMALIZE_WHITESPACE', + 'ELLIPSIS', + 'SKIP', + 'IGNORE_EXCEPTION_DETAIL', + 'COMPARISON_FLAGS', + 'REPORT_UDIFF', + 'REPORT_CDIFF', + 'REPORT_NDIFF', + 'REPORT_ONLY_FIRST_FAILURE', + 'REPORTING_FLAGS', + 'FAIL_FAST', + # 1. Utility Functions + # 2. Example & DocTest + 'Example', + 'DocTest', + # 3. Doctest Parser + 'DocTestParser', + # 4. Doctest Finder + 'DocTestFinder', + # 5. Doctest Runner + 'DocTestRunner', + 'OutputChecker', + 'DocTestFailure', + 'UnexpectedException', + 'DebugRunner', + # 6. Test Functions + 'testmod', + 'testfile', + 'run_docstring_examples', + # 7. Unittest Support + 'DocTestSuite', + 'DocFileSuite', + 'set_unittest_reportflags', + # 8. Debugging Support + 'script_from_examples', + 'testsource', + 'debug_src', + 'debug', +] + +import __future__ +import difflib +import inspect +import linecache +import os +import pdb +import re +import sys +import traceback +import unittest +from io import StringIO +from collections import namedtuple + +TestResults = namedtuple('TestResults', 'failed attempted') + +# There are 4 basic classes: +# - Example: a pair, plus an intra-docstring line number. +# - DocTest: a collection of examples, parsed from a docstring, plus +# info about where the docstring came from (name, filename, lineno). +# - DocTestFinder: extracts DocTests from a given object's docstring and +# its contained objects' docstrings. +# - DocTestRunner: runs DocTest cases, and accumulates statistics. +# +# So the basic picture is: +# +# list of: +# +------+ +---------+ +-------+ +# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| +# +------+ +---------+ +-------+ +# | Example | +# | ... | +# | Example | +# +---------+ + +# Option constants. + +OPTIONFLAGS_BY_NAME = {} +def register_optionflag(name): + # Create a new flag unless `name` is already known. + return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME)) + +DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') +DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') +NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') +ELLIPSIS = register_optionflag('ELLIPSIS') +SKIP = register_optionflag('SKIP') +IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') + +COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | + DONT_ACCEPT_BLANKLINE | + NORMALIZE_WHITESPACE | + ELLIPSIS | + SKIP | + IGNORE_EXCEPTION_DETAIL) + +REPORT_UDIFF = register_optionflag('REPORT_UDIFF') +REPORT_CDIFF = register_optionflag('REPORT_CDIFF') +REPORT_NDIFF = register_optionflag('REPORT_NDIFF') +REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') +FAIL_FAST = register_optionflag('FAIL_FAST') + +REPORTING_FLAGS = (REPORT_UDIFF | + REPORT_CDIFF | + REPORT_NDIFF | + REPORT_ONLY_FIRST_FAILURE | + FAIL_FAST) + +# Special string markers for use in `want` strings: +BLANKLINE_MARKER = '' +ELLIPSIS_MARKER = '...' + +###################################################################### +## Table of Contents +###################################################################### +# 1. Utility Functions +# 2. Example & DocTest -- store test cases +# 3. DocTest Parser -- extracts examples from strings +# 4. DocTest Finder -- extracts test cases from objects +# 5. DocTest Runner -- runs test cases +# 6. Test Functions -- convenient wrappers for testing +# 7. Unittest Support +# 8. Debugging Support +# 9. Example Usage + +###################################################################### +## 1. Utility Functions +###################################################################### + +def _extract_future_flags(globs): + """ + Return the compiler-flags associated with the future features that + have been imported into the given namespace (globs). + """ + flags = 0 + for fname in __future__.all_feature_names: + feature = globs.get(fname, None) + if feature is getattr(__future__, fname): + flags |= feature.compiler_flag + return flags + +def _normalize_module(module, depth=2): + """ + Return the module specified by `module`. In particular: + - If `module` is a module, then return module. + - If `module` is a string, then import and return the + module with that name. + - If `module` is None, then return the calling module. + The calling module is assumed to be the module of + the stack frame at the given depth in the call stack. + """ + if inspect.ismodule(module): + return module + elif isinstance(module, str): + return __import__(module, globals(), locals(), ["*"]) + elif module is None: + return sys.modules[sys._getframe(depth).f_globals['__name__']] + else: + raise TypeError("Expected a module, string, or None") + +def _load_testfile(filename, package, module_relative, encoding): + if module_relative: + package = _normalize_module(package, 3) + filename = _module_relative_path(package, filename) + if getattr(package, '__loader__', None) is not None: + if hasattr(package.__loader__, 'get_data'): + file_contents = package.__loader__.get_data(filename) + file_contents = file_contents.decode(encoding) + # get_data() opens files as 'rb', so one must do the equivalent + # conversion as universal newlines would do. + return file_contents.replace(os.linesep, '\n'), filename + with open(filename, encoding=encoding) as f: + return f.read(), filename + +def _indent(s, indent=4): + """ + Add the given number of space characters to the beginning of + every non-blank line in `s`, and return the result. + """ + # This regexp matches the start of non-blank lines: + return re.sub('(?m)^(?!$)', indent*' ', s) + +def _exception_traceback(exc_info): + """ + Return a string containing a traceback message for the given + exc_info tuple (as returned by sys.exc_info()). + """ + # Get a traceback message. + excout = StringIO() + exc_type, exc_val, exc_tb = exc_info + traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) + return excout.getvalue() + +# Override some StringIO methods. +class _SpoofOut(StringIO): + def getvalue(self): + result = StringIO.getvalue(self) + # If anything at all was written, make sure there's a trailing + # newline. There's no way for the expected output to indicate + # that a trailing newline is missing. + if result and not result.endswith("\n"): + result += "\n" + return result + + def truncate(self, size=None): + self.seek(size) + StringIO.truncate(self) + +# Worst-case linear-time ellipsis matching. +def _ellipsis_match(want, got): + """ + Essentially the only subtle case: + >>> _ellipsis_match('aa...aa', 'aaa') + False + """ + if ELLIPSIS_MARKER not in want: + return want == got + + # Find "the real" strings. + ws = want.split(ELLIPSIS_MARKER) + assert len(ws) >= 2 + + # Deal with exact matches possibly needed at one or both ends. + startpos, endpos = 0, len(got) + w = ws[0] + if w: # starts with exact match + if got.startswith(w): + startpos = len(w) + del ws[0] + else: + return False + w = ws[-1] + if w: # ends with exact match + if got.endswith(w): + endpos -= len(w) + del ws[-1] + else: + return False + + if startpos > endpos: + # Exact end matches required more characters than we have, as in + # _ellipsis_match('aa...aa', 'aaa') + return False + + # For the rest, we only need to find the leftmost non-overlapping + # match for each piece. If there's no overall match that way alone, + # there's no overall match period. + for w in ws: + # w may be '' at times, if there are consecutive ellipses, or + # due to an ellipsis at the start or end of `want`. That's OK. + # Search for an empty string succeeds, and doesn't change startpos. + startpos = got.find(w, startpos, endpos) + if startpos < 0: + return False + startpos += len(w) + + return True + +def _comment_line(line): + "Return a commented form of the given line" + line = line.rstrip() + if line: + return '# '+line + else: + return '#' + +def _strip_exception_details(msg): + # Support for IGNORE_EXCEPTION_DETAIL. + # Get rid of everything except the exception name; in particular, drop + # the possibly dotted module path (if any) and the exception message (if + # any). We assume that a colon is never part of a dotted name, or of an + # exception name. + # E.g., given + # "foo.bar.MyError: la di da" + # return "MyError" + # Or for "abc.def" or "abc.def:\n" return "def". + + start, end = 0, len(msg) + # The exception name must appear on the first line. + i = msg.find("\n") + if i >= 0: + end = i + # retain up to the first colon (if any) + i = msg.find(':', 0, end) + if i >= 0: + end = i + # retain just the exception name + i = msg.rfind('.', 0, end) + if i >= 0: + start = i+1 + return msg[start: end] + +class _OutputRedirectingPdb(pdb.Pdb): + """ + A specialized version of the python debugger that redirects stdout + to a given stream when interacting with the user. Stdout is *not* + redirected when traced code is executed. + """ + def __init__(self, out): + self.__out = out + self.__debugger_used = False + # do not play signal games in the pdb + pdb.Pdb.__init__(self, stdout=out, nosigint=True) + # still use input() to get user input + self.use_rawinput = 1 + + def set_trace(self, frame=None): + self.__debugger_used = True + if frame is None: + frame = sys._getframe().f_back + pdb.Pdb.set_trace(self, frame) + + def set_continue(self): + # Calling set_continue unconditionally would break unit test + # coverage reporting, as Bdb.set_continue calls sys.settrace(None). + if self.__debugger_used: + pdb.Pdb.set_continue(self) + + def trace_dispatch(self, *args): + # Redirect stdout to the given stream. + save_stdout = sys.stdout + sys.stdout = self.__out + # Call Pdb's trace dispatch method. + try: + return pdb.Pdb.trace_dispatch(self, *args) + finally: + sys.stdout = save_stdout + +# [XX] Normalize with respect to os.path.pardir? +def _module_relative_path(module, test_path): + if not inspect.ismodule(module): + raise TypeError('Expected a module: %r' % module) + if test_path.startswith('/'): + raise ValueError('Module-relative files may not have absolute paths') + + # Normalize the path. On Windows, replace "/" with "\". + test_path = os.path.join(*(test_path.split('/'))) + + # Find the base directory for the path. + if hasattr(module, '__file__'): + # A normal module/package + basedir = os.path.split(module.__file__)[0] + elif module.__name__ == '__main__': + # An interactive session. + if len(sys.argv)>0 and sys.argv[0] != '': + basedir = os.path.split(sys.argv[0])[0] + else: + basedir = os.curdir + else: + if hasattr(module, '__path__'): + for directory in module.__path__: + fullpath = os.path.join(directory, test_path) + if os.path.exists(fullpath): + return fullpath + + # A module w/o __file__ (this includes builtins) + raise ValueError("Can't resolve paths relative to the module " + "%r (it has no __file__)" + % module.__name__) + + # Combine the base directory and the test path. + return os.path.join(basedir, test_path) + +###################################################################### +## 2. Example & DocTest +###################################################################### +## - An "example" is a pair, where "source" is a +## fragment of source code, and "want" is the expected output for +## "source." The Example class also includes information about +## where the example was extracted from. +## +## - A "doctest" is a collection of examples, typically extracted from +## a string (such as an object's docstring). The DocTest class also +## includes information about where the string was extracted from. + +class Example: + """ + A single doctest example, consisting of source code and expected + output. `Example` defines the following attributes: + + - source: A single Python statement, always ending with a newline. + The constructor adds a newline if needed. + + - want: The expected output from running the source code (either + from stdout, or a traceback in case of exception). `want` ends + with a newline unless it's empty, in which case it's an empty + string. The constructor adds a newline if needed. + + - exc_msg: The exception message generated by the example, if + the example is expected to generate an exception; or `None` if + it is not expected to generate an exception. This exception + message is compared against the return value of + `traceback.format_exception_only()`. `exc_msg` ends with a + newline unless it's `None`. The constructor adds a newline + if needed. + + - lineno: The line number within the DocTest string containing + this Example where the Example begins. This line number is + zero-based, with respect to the beginning of the DocTest. + + - indent: The example's indentation in the DocTest string. + I.e., the number of space characters that precede the + example's first prompt. + + - options: A dictionary mapping from option flags to True or + False, which is used to override default options for this + example. Any option flags not contained in this dictionary + are left at their default value (as specified by the + DocTestRunner's optionflags). By default, no options are set. + """ + def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, + options=None): + # Normalize inputs. + if not source.endswith('\n'): + source += '\n' + if want and not want.endswith('\n'): + want += '\n' + if exc_msg is not None and not exc_msg.endswith('\n'): + exc_msg += '\n' + # Store properties. + self.source = source + self.want = want + self.lineno = lineno + self.indent = indent + if options is None: options = {} + self.options = options + self.exc_msg = exc_msg + + def __eq__(self, other): + if type(self) is not type(other): + return NotImplemented + + return self.source == other.source and \ + self.want == other.want and \ + self.lineno == other.lineno and \ + self.indent == other.indent and \ + self.options == other.options and \ + self.exc_msg == other.exc_msg + + def __hash__(self): + return hash((self.source, self.want, self.lineno, self.indent, + self.exc_msg)) + +class DocTest: + """ + A collection of doctest examples that should be run in a single + namespace. Each `DocTest` defines the following attributes: + + - examples: the list of examples. + + - globs: The namespace (aka globals) that the examples should + be run in. + + - name: A name identifying the DocTest (typically, the name of + the object whose docstring this DocTest was extracted from). + + - filename: The name of the file that this DocTest was extracted + from, or `None` if the filename is unknown. + + - lineno: The line number within filename where this DocTest + begins, or `None` if the line number is unavailable. This + line number is zero-based, with respect to the beginning of + the file. + + - docstring: The string that the examples were extracted from, + or `None` if the string is unavailable. + """ + def __init__(self, examples, globs, name, filename, lineno, docstring): + """ + Create a new DocTest containing the given examples. The + DocTest's globals are initialized with a copy of `globs`. + """ + assert not isinstance(examples, str), \ + "DocTest no longer accepts str; use DocTestParser instead" + self.examples = examples + self.docstring = docstring + self.globs = globs.copy() + self.name = name + self.filename = filename + self.lineno = lineno + + def __repr__(self): + if len(self.examples) == 0: + examples = 'no examples' + elif len(self.examples) == 1: + examples = '1 example' + else: + examples = '%d examples' % len(self.examples) + return ('<%s %s from %s:%s (%s)>' % + (self.__class__.__name__, + self.name, self.filename, self.lineno, examples)) + + def __eq__(self, other): + if type(self) is not type(other): + return NotImplemented + + return self.examples == other.examples and \ + self.docstring == other.docstring and \ + self.globs == other.globs and \ + self.name == other.name and \ + self.filename == other.filename and \ + self.lineno == other.lineno + + def __hash__(self): + return hash((self.docstring, self.name, self.filename, self.lineno)) + + # This lets us sort tests by name: + def __lt__(self, other): + if not isinstance(other, DocTest): + return NotImplemented + return ((self.name, self.filename, self.lineno, id(self)) + < + (other.name, other.filename, other.lineno, id(other))) + +###################################################################### +## 3. DocTestParser +###################################################################### + +class DocTestParser: + """ + A class used to parse strings containing doctest examples. + """ + # This regular expression is used to find doctest examples in a + # string. It defines three groups: `source` is the source code + # (including leading indentation and prompts); `indent` is the + # indentation of the first (PS1) line of the source code; and + # `want` is the expected output (including leading indentation). + _EXAMPLE_RE = re.compile(r''' + # Source consists of a PS1 line followed by zero or more PS2 lines. + (?P + (?:^(?P [ ]*) >>> .*) # PS1 line + (?:\n [ ]* \.\.\. .*)*) # PS2 lines + \n? + # Want consists of any non-blank lines that do not start with PS1. + (?P (?:(?![ ]*$) # Not a blank line + (?![ ]*>>>) # Not a line starting with PS1 + .+$\n? # But any other line + )*) + ''', re.MULTILINE | re.VERBOSE) + + # A regular expression for handling `want` strings that contain + # expected exceptions. It divides `want` into three pieces: + # - the traceback header line (`hdr`) + # - the traceback stack (`stack`) + # - the exception message (`msg`), as generated by + # traceback.format_exception_only() + # `msg` may have multiple lines. We assume/require that the + # exception message is the first non-indented line starting with a word + # character following the traceback header line. + _EXCEPTION_RE = re.compile(r""" + # Grab the traceback header. Different versions of Python have + # said different things on the first traceback line. + ^(?P Traceback\ \( + (?: most\ recent\ call\ last + | innermost\ last + ) \) : + ) + \s* $ # toss trailing whitespace on the header. + (?P .*?) # don't blink: absorb stuff until... + ^ (?P \w+ .*) # a line *starts* with alphanum. + """, re.VERBOSE | re.MULTILINE | re.DOTALL) + + # A callable returning a true value iff its argument is a blank line + # or contains a single comment. + _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match + + def parse(self, string, name=''): + """ + Divide the given string into examples and intervening text, + and return them as a list of alternating Examples and strings. + Line numbers for the Examples are 0-based. The optional + argument `name` is a name identifying this string, and is only + used for error messages. + """ + string = string.expandtabs() + # If all lines begin with the same indentation, then strip it. + min_indent = self._min_indent(string) + if min_indent > 0: + string = '\n'.join([l[min_indent:] for l in string.split('\n')]) + + output = [] + charno, lineno = 0, 0 + # Find all doctest examples in the string: + for m in self._EXAMPLE_RE.finditer(string): + # Add the pre-example text to `output`. + output.append(string[charno:m.start()]) + # Update lineno (lines before this example) + lineno += string.count('\n', charno, m.start()) + # Extract info from the regexp match. + (source, options, want, exc_msg) = \ + self._parse_example(m, name, lineno) + # Create an Example, and add it to the list. + if not self._IS_BLANK_OR_COMMENT(source): + output.append( Example(source, want, exc_msg, + lineno=lineno, + indent=min_indent+len(m.group('indent')), + options=options) ) + # Update lineno (lines inside this example) + lineno += string.count('\n', m.start(), m.end()) + # Update charno. + charno = m.end() + # Add any remaining post-example text to `output`. + output.append(string[charno:]) + return output + + def get_doctest(self, string, globs, name, filename, lineno): + """ + Extract all doctest examples from the given string, and + collect them into a `DocTest` object. + + `globs`, `name`, `filename`, and `lineno` are attributes for + the new `DocTest` object. See the documentation for `DocTest` + for more information. + """ + return DocTest(self.get_examples(string, name), globs, + name, filename, lineno, string) + + def get_examples(self, string, name=''): + """ + Extract all doctest examples from the given string, and return + them as a list of `Example` objects. Line numbers are + 0-based, because it's most common in doctests that nothing + interesting appears on the same line as opening triple-quote, + and so the first interesting line is called \"line 1\" then. + + The optional argument `name` is a name identifying this + string, and is only used for error messages. + """ + return [x for x in self.parse(string, name) + if isinstance(x, Example)] + + def _parse_example(self, m, name, lineno): + """ + Given a regular expression match from `_EXAMPLE_RE` (`m`), + return a pair `(source, want)`, where `source` is the matched + example's source code (with prompts and indentation stripped); + and `want` is the example's expected output (with indentation + stripped). + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + """ + # Get the example's indentation level. + indent = len(m.group('indent')) + + # Divide source into lines; check that they're properly + # indented; and then strip their indentation & prompts. + source_lines = m.group('source').split('\n') + self._check_prompt_blank(source_lines, indent, name, lineno) + self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) + source = '\n'.join([sl[indent+4:] for sl in source_lines]) + + # Divide want into lines; check that it's properly indented; and + # then strip the indentation. Spaces before the last newline should + # be preserved, so plain rstrip() isn't good enough. + want = m.group('want') + want_lines = want.split('\n') + if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): + del want_lines[-1] # forget final newline & spaces after it + self._check_prefix(want_lines, ' '*indent, name, + lineno + len(source_lines)) + want = '\n'.join([wl[indent:] for wl in want_lines]) + + # If `want` contains a traceback message, then extract it. + m = self._EXCEPTION_RE.match(want) + if m: + exc_msg = m.group('msg') + else: + exc_msg = None + + # Extract options from the source. + options = self._find_options(source, name, lineno) + + return source, options, want, exc_msg + + # This regular expression looks for option directives in the + # source code of an example. Option directives are comments + # starting with "doctest:". Warning: this may give false + # positives for string-literals that contain the string + # "#doctest:". Eliminating these false positives would require + # actually parsing the string; but we limit them by ignoring any + # line containing "#doctest:" that is *followed* by a quote mark. + _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', + re.MULTILINE) + + def _find_options(self, source, name, lineno): + """ + Return a dictionary containing option overrides extracted from + option directives in the given source string. + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + """ + options = {} + # (note: with the current regexp, this will match at most once:) + for m in self._OPTION_DIRECTIVE_RE.finditer(source): + option_strings = m.group(1).replace(',', ' ').split() + for option in option_strings: + if (option[0] not in '+-' or + option[1:] not in OPTIONFLAGS_BY_NAME): + raise ValueError('line %r of the doctest for %s ' + 'has an invalid option: %r' % + (lineno+1, name, option)) + flag = OPTIONFLAGS_BY_NAME[option[1:]] + options[flag] = (option[0] == '+') + if options and self._IS_BLANK_OR_COMMENT(source): + raise ValueError('line %r of the doctest for %s has an option ' + 'directive on a line with no example: %r' % + (lineno, name, source)) + return options + + # This regular expression finds the indentation of every non-blank + # line in a string. + _INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE) + + def _min_indent(self, s): + "Return the minimum indentation of any non-blank line in `s`" + indents = [len(indent) for indent in self._INDENT_RE.findall(s)] + if len(indents) > 0: + return min(indents) + else: + return 0 + + def _check_prompt_blank(self, lines, indent, name, lineno): + """ + Given the lines of a source string (including prompts and + leading indentation), check to make sure that every prompt is + followed by a space character. If any line is not followed by + a space character, then raise ValueError. + """ + for i, line in enumerate(lines): + if len(line) >= indent+4 and line[indent+3] != ' ': + raise ValueError('line %r of the docstring for %s ' + 'lacks blank after %s: %r' % + (lineno+i+1, name, + line[indent:indent+3], line)) + + def _check_prefix(self, lines, prefix, name, lineno): + """ + Check that every line in the given list starts with the given + prefix; if any line does not, then raise a ValueError. + """ + for i, line in enumerate(lines): + if line and not line.startswith(prefix): + raise ValueError('line %r of the docstring for %s has ' + 'inconsistent leading whitespace: %r' % + (lineno+i+1, name, line)) + + +###################################################################### +## 4. DocTest Finder +###################################################################### + +class DocTestFinder: + """ + A class used to extract the DocTests that are relevant to a given + object, from its docstring and the docstrings of its contained + objects. Doctests can currently be extracted from the following + object types: modules, functions, classes, methods, staticmethods, + classmethods, and properties. + """ + + def __init__(self, verbose=False, parser=DocTestParser(), + recurse=True, exclude_empty=True): + """ + Create a new doctest finder. + + The optional argument `parser` specifies a class or + function that should be used to create new DocTest objects (or + objects that implement the same interface as DocTest). The + signature for this factory function should match the signature + of the DocTest constructor. + + If the optional argument `recurse` is false, then `find` will + only examine the given object, and not any contained objects. + + If the optional argument `exclude_empty` is false, then `find` + will include tests for objects with empty docstrings. + """ + self._parser = parser + self._verbose = verbose + self._recurse = recurse + self._exclude_empty = exclude_empty + + def find(self, obj, name=None, module=None, globs=None, extraglobs=None): + """ + Return a list of the DocTests that are defined by the given + object's docstring, or by any of its contained objects' + docstrings. + + The optional parameter `module` is the module that contains + the given object. If the module is not specified or is None, then + the test finder will attempt to automatically determine the + correct module. The object's module is used: + + - As a default namespace, if `globs` is not specified. + - To prevent the DocTestFinder from extracting DocTests + from objects that are imported from other modules. + - To find the name of the file containing the object. + - To help find the line number of the object within its + file. + + Contained objects whose module does not match `module` are ignored. + + If `module` is False, no attempt to find the module will be made. + This is obscure, of use mostly in tests: if `module` is False, or + is None but cannot be found automatically, then all objects are + considered to belong to the (non-existent) module, so all contained + objects will (recursively) be searched for doctests. + + The globals for each DocTest is formed by combining `globs` + and `extraglobs` (bindings in `extraglobs` override bindings + in `globs`). A new copy of the globals dictionary is created + for each DocTest. If `globs` is not specified, then it + defaults to the module's `__dict__`, if specified, or {} + otherwise. If `extraglobs` is not specified, then it defaults + to {}. + + """ + # If name was not specified, then extract it from the object. + if name is None: + name = getattr(obj, '__name__', None) + if name is None: + raise ValueError("DocTestFinder.find: name must be given " + "when obj.__name__ doesn't exist: %r" % + (type(obj),)) + + # Find the module that contains the given object (if obj is + # a module, then module=obj.). Note: this may fail, in which + # case module will be None. + if module is False: + module = None + elif module is None: + module = inspect.getmodule(obj) + + # Read the module's source code. This is used by + # DocTestFinder._find_lineno to find the line number for a + # given object's docstring. + try: + file = inspect.getsourcefile(obj) + except TypeError: + source_lines = None + else: + if not file: + # Check to see if it's one of our special internal "files" + # (see __patched_linecache_getlines). + file = inspect.getfile(obj) + if not file[0]+file[-2:] == '<]>': file = None + if file is None: + source_lines = None + else: + if module is not None: + # Supply the module globals in case the module was + # originally loaded via a PEP 302 loader and + # file is not a valid filesystem path + source_lines = linecache.getlines(file, module.__dict__) + else: + # No access to a loader, so assume it's a normal + # filesystem path + source_lines = linecache.getlines(file) + if not source_lines: + source_lines = None + + # Initialize globals, and merge in extraglobs. + if globs is None: + if module is None: + globs = {} + else: + globs = module.__dict__.copy() + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + if '__name__' not in globs: + globs['__name__'] = '__main__' # provide a default module name + + # Recursively explore `obj`, extracting DocTests. + tests = [] + self._find(tests, obj, name, module, source_lines, globs, {}) + # Sort the tests by alpha order of names, for consistency in + # verbose-mode output. This was a feature of doctest in Pythons + # <= 2.3 that got lost by accident in 2.4. It was repaired in + # 2.4.4 and 2.5. + tests.sort() + return tests + + def _from_module(self, module, object): + """ + Return true if the given object is defined in the given + module. + """ + if module is None: + return True + elif inspect.getmodule(object) is not None: + return module is inspect.getmodule(object) + elif inspect.isfunction(object): + return module.__dict__ is object.__globals__ + elif inspect.ismethoddescriptor(object): + if hasattr(object, '__objclass__'): + obj_mod = object.__objclass__.__module__ + elif hasattr(object, '__module__'): + obj_mod = object.__module__ + else: + return True # [XX] no easy way to tell otherwise + return module.__name__ == obj_mod + elif inspect.isclass(object): + return module.__name__ == object.__module__ + elif hasattr(object, '__module__'): + return module.__name__ == object.__module__ + elif isinstance(object, property): + return True # [XX] no way not be sure. + else: + raise ValueError("object must be a class or function") + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to `tests`. + """ + if self._verbose: + print('Finding tests in %s' % name) + + # If we've already processed this object, then ignore it. + if id(obj) in seen: + return + seen[id(obj)] = 1 + + # Find a test for this object, and add it to the list of tests. + test = self._get_test(obj, name, module, globs, source_lines) + if test is not None: + tests.append(test) + + # Look for tests in a module's contained objects. + if inspect.ismodule(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + valname = '%s.%s' % (name, valname) + # Recurse to functions & classes. + if ((inspect.isroutine(inspect.unwrap(val)) + or inspect.isclass(val)) and + self._from_module(module, val)): + self._find(tests, val, valname, module, source_lines, + globs, seen) + + # Look for tests in a module's __test__ dictionary. + if inspect.ismodule(obj) and self._recurse: + for valname, val in getattr(obj, '__test__', {}).items(): + if not isinstance(valname, str): + raise ValueError("DocTestFinder.find: __test__ keys " + "must be strings: %r" % + (type(valname),)) + if not (inspect.isroutine(val) or inspect.isclass(val) or + inspect.ismodule(val) or isinstance(val, str)): + raise ValueError("DocTestFinder.find: __test__ values " + "must be strings, functions, methods, " + "classes, or modules: %r" % + (type(val),)) + valname = '%s.__test__.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + # Look for tests in a class's contained objects. + if inspect.isclass(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Special handling for staticmethod/classmethod. + if isinstance(val, staticmethod): + val = getattr(obj, valname) + if isinstance(val, classmethod): + val = getattr(obj, valname).__func__ + + # Recurse to methods, properties, and nested classes. + if ((inspect.isroutine(val) or inspect.isclass(val) or + isinstance(val, property)) and + self._from_module(module, val)): + valname = '%s.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + def _get_test(self, obj, name, module, globs, source_lines): + """ + Return a DocTest for the given object, if it defines a docstring; + otherwise, return None. + """ + # Extract the object's docstring. If it doesn't have one, + # then return None (no test for this object). + if isinstance(obj, str): + docstring = obj + else: + try: + if obj.__doc__ is None: + docstring = '' + else: + docstring = obj.__doc__ + if not isinstance(docstring, str): + docstring = str(docstring) + except (TypeError, AttributeError): + docstring = '' + + # Find the docstring's location in the file. + lineno = self._find_lineno(obj, source_lines) + + # Don't bother if the docstring is empty. + if self._exclude_empty and not docstring: + return None + + # Return a DocTest for this object. + if module is None: + filename = None + else: + # __file__ can be None for namespace packages. + filename = getattr(module, '__file__', None) or module.__name__ + if filename[-4:] == ".pyc": + filename = filename[:-1] + return self._parser.get_doctest(docstring, globs, name, + filename, lineno) + + def _find_lineno(self, obj, source_lines): + """ + Return a line number of the given object's docstring. Note: + this method assumes that the object has a docstring. + """ + lineno = None + + # Find the line number for modules. + if inspect.ismodule(obj): + lineno = 0 + + # Find the line number for classes. + # Note: this could be fooled if a class is defined multiple + # times in a single file. + if inspect.isclass(obj): + if source_lines is None: + return None + pat = re.compile(r'^\s*class\s*%s\b' % + getattr(obj, '__name__', '-')) + for i, line in enumerate(source_lines): + if pat.match(line): + lineno = i + break + + # Find the line number for functions & methods. + if inspect.ismethod(obj): obj = obj.__func__ + if inspect.isfunction(obj): obj = obj.__code__ + if inspect.istraceback(obj): obj = obj.tb_frame + if inspect.isframe(obj): obj = obj.f_code + if inspect.iscode(obj): + lineno = getattr(obj, 'co_firstlineno', None)-1 + + # Find the line number where the docstring starts. Assume + # that it's the first line that begins with a quote mark. + # Note: this could be fooled by a multiline function + # signature, where a continuation line begins with a quote + # mark. + if lineno is not None: + if source_lines is None: + return lineno+1 + pat = re.compile(r'(^|.*:)\s*\w*("|\')') + for lineno in range(lineno, len(source_lines)): + if pat.match(source_lines[lineno]): + return lineno + + # We couldn't find the line number. + return None + +###################################################################### +## 5. DocTest Runner +###################################################################### + +class DocTestRunner: + """ + A class used to run DocTest test cases, and accumulate statistics. + The `run` method is used to process a single DocTest case. It + returns a tuple `(f, t)`, where `t` is the number of test cases + tried, and `f` is the number of test cases that failed. + + >>> tests = DocTestFinder().find(_TestClass) + >>> runner = DocTestRunner(verbose=False) + >>> tests.sort(key = lambda test: test.name) + >>> for test in tests: + ... print(test.name, '->', runner.run(test)) + _TestClass -> TestResults(failed=0, attempted=2) + _TestClass.__init__ -> TestResults(failed=0, attempted=2) + _TestClass.get -> TestResults(failed=0, attempted=2) + _TestClass.square -> TestResults(failed=0, attempted=1) + + The `summarize` method prints a summary of all the test cases that + have been run by the runner, and returns an aggregated `(f, t)` + tuple: + + >>> runner.summarize(verbose=1) + 4 items passed all tests: + 2 tests in _TestClass + 2 tests in _TestClass.__init__ + 2 tests in _TestClass.get + 1 tests in _TestClass.square + 7 tests in 4 items. + 7 passed and 0 failed. + Test passed. + TestResults(failed=0, attempted=7) + + The aggregated number of tried examples and failed examples is + also available via the `tries` and `failures` attributes: + + >>> runner.tries + 7 + >>> runner.failures + 0 + + The comparison between expected outputs and actual outputs is done + by an `OutputChecker`. This comparison may be customized with a + number of option flags; see the documentation for `testmod` for + more information. If the option flags are insufficient, then the + comparison may also be customized by passing a subclass of + `OutputChecker` to the constructor. + + The test runner's display output can be controlled in two ways. + First, an output function (`out) can be passed to + `TestRunner.run`; this function will be called with strings that + should be displayed. It defaults to `sys.stdout.write`. If + capturing the output is not sufficient, then the display output + can be also customized by subclassing DocTestRunner, and + overriding the methods `report_start`, `report_success`, + `report_unexpected_exception`, and `report_failure`. + """ + # This divider string is used to separate failure messages, and to + # separate sections of the summary. + DIVIDER = "*" * 70 + + def __init__(self, checker=None, verbose=None, optionflags=0): + """ + Create a new test runner. + + Optional keyword arg `checker` is the `OutputChecker` that + should be used to compare the expected outputs and actual + outputs of doctest examples. + + Optional keyword arg 'verbose' prints lots of stuff if true, + only failures if false; by default, it's true iff '-v' is in + sys.argv. + + Optional argument `optionflags` can be used to control how the + test runner compares expected output to actual output, and how + it displays failures. See the documentation for `testmod` for + more information. + """ + self._checker = checker or OutputChecker() + if verbose is None: + verbose = '-v' in sys.argv + self._verbose = verbose + self.optionflags = optionflags + self.original_optionflags = optionflags + + # Keep track of the examples we've run. + self.tries = 0 + self.failures = 0 + self._name2ft = {} + + # Create a fake output target for capturing doctest output. + self._fakeout = _SpoofOut() + + #///////////////////////////////////////////////////////////////// + # Reporting methods + #///////////////////////////////////////////////////////////////// + + def report_start(self, out, test, example): + """ + Report that the test runner is about to process the given + example. (Only displays a message if verbose=True) + """ + if self._verbose: + if example.want: + out('Trying:\n' + _indent(example.source) + + 'Expecting:\n' + _indent(example.want)) + else: + out('Trying:\n' + _indent(example.source) + + 'Expecting nothing\n') + + def report_success(self, out, test, example, got): + """ + Report that the given example ran successfully. (Only + displays a message if verbose=True) + """ + if self._verbose: + out("ok\n") + + def report_failure(self, out, test, example, got): + """ + Report that the given example failed. + """ + out(self._failure_header(test, example) + + self._checker.output_difference(example, got, self.optionflags)) + + def report_unexpected_exception(self, out, test, example, exc_info): + """ + Report that the given example raised an unexpected exception. + """ + out(self._failure_header(test, example) + + 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) + + def _failure_header(self, test, example): + out = [self.DIVIDER] + if test.filename: + if test.lineno is not None and example.lineno is not None: + lineno = test.lineno + example.lineno + 1 + else: + lineno = '?' + out.append('File "%s", line %s, in %s' % + (test.filename, lineno, test.name)) + else: + out.append('Line %s, in %s' % (example.lineno+1, test.name)) + out.append('Failed example:') + source = example.source + out.append(_indent(source)) + return '\n'.join(out) + + #///////////////////////////////////////////////////////////////// + # DocTest Running + #///////////////////////////////////////////////////////////////// + + def __run(self, test, compileflags, out): + """ + Run the examples in `test`. Write the outcome of each example + with one of the `DocTestRunner.report_*` methods, using the + writer function `out`. `compileflags` is the set of compiler + flags that should be used to execute examples. Return a tuple + `(f, t)`, where `t` is the number of examples tried, and `f` + is the number of examples that failed. The examples are run + in the namespace `test.globs`. + """ + # Keep track of the number of failures and tries. + failures = tries = 0 + + # Save the option flags (since option directives can be used + # to modify them). + original_optionflags = self.optionflags + + SUCCESS, FAILURE, BOOM = range(3) # `outcome` state + + check = self._checker.check_output + + # Process each example. + for examplenum, example in enumerate(test.examples): + + # If REPORT_ONLY_FIRST_FAILURE is set, then suppress + # reporting after the first failure. + quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and + failures > 0) + + # Merge in the example's options. + self.optionflags = original_optionflags + if example.options: + for (optionflag, val) in example.options.items(): + if val: + self.optionflags |= optionflag + else: + self.optionflags &= ~optionflag + + # If 'SKIP' is set, then skip this example. + if self.optionflags & SKIP: + continue + + # Record that we started this example. + tries += 1 + if not quiet: + self.report_start(out, test, example) + + # Use a special filename for compile(), so we can retrieve + # the source code during interactive debugging (see + # __patched_linecache_getlines). + filename = '' % (test.name, examplenum) + + # Run the example in the given context (globs), and record + # any exception that gets raised. (But don't intercept + # keyboard interrupts.) + try: + # Don't blink! This is where the user's code gets run. + exec(compile(example.source, filename, "single", + compileflags, 1), test.globs) + self.debugger.set_continue() # ==== Example Finished ==== + exception = None + except KeyboardInterrupt: + raise + except: + exception = sys.exc_info() + self.debugger.set_continue() # ==== Example Finished ==== + + got = self._fakeout.getvalue() # the actual output + self._fakeout.truncate(0) + outcome = FAILURE # guilty until proved innocent or insane + + # If the example executed without raising any exceptions, + # verify its output. + if exception is None: + if check(example.want, got, self.optionflags): + outcome = SUCCESS + + # The example raised an exception: check if it was expected. + else: + exc_msg = traceback.format_exception_only(*exception[:2])[-1] + if not quiet: + got += _exception_traceback(exception) + + # If `example.exc_msg` is None, then we weren't expecting + # an exception. + if example.exc_msg is None: + outcome = BOOM + + # We expected an exception: see whether it matches. + elif check(example.exc_msg, exc_msg, self.optionflags): + outcome = SUCCESS + + # Another chance if they didn't care about the detail. + elif self.optionflags & IGNORE_EXCEPTION_DETAIL: + if check(_strip_exception_details(example.exc_msg), + _strip_exception_details(exc_msg), + self.optionflags): + outcome = SUCCESS + + # Report the outcome. + if outcome is SUCCESS: + if not quiet: + self.report_success(out, test, example, got) + elif outcome is FAILURE: + if not quiet: + self.report_failure(out, test, example, got) + failures += 1 + elif outcome is BOOM: + if not quiet: + self.report_unexpected_exception(out, test, example, + exception) + failures += 1 + else: + assert False, ("unknown outcome", outcome) + + if failures and self.optionflags & FAIL_FAST: + break + + # Restore the option flags (in case they were modified) + self.optionflags = original_optionflags + + # Record and return the number of failures and tries. + self.__record_outcome(test, failures, tries) + return TestResults(failures, tries) + + def __record_outcome(self, test, f, t): + """ + Record the fact that the given DocTest (`test`) generated `f` + failures out of `t` tried examples. + """ + f2, t2 = self._name2ft.get(test.name, (0,0)) + self._name2ft[test.name] = (f+f2, t+t2) + self.failures += f + self.tries += t + + __LINECACHE_FILENAME_RE = re.compile(r'.+)' + r'\[(?P\d+)\]>$') + def __patched_linecache_getlines(self, filename, module_globals=None): + m = self.__LINECACHE_FILENAME_RE.match(filename) + if m and m.group('name') == self.test.name: + example = self.test.examples[int(m.group('examplenum'))] + return example.source.splitlines(keepends=True) + else: + return self.save_linecache_getlines(filename, module_globals) + + def run(self, test, compileflags=None, out=None, clear_globs=True): + """ + Run the examples in `test`, and display the results using the + writer function `out`. + + The examples are run in the namespace `test.globs`. If + `clear_globs` is true (the default), then this namespace will + be cleared after the test runs, to help with garbage + collection. If you would like to examine the namespace after + the test completes, then use `clear_globs=False`. + + `compileflags` gives the set of flags that should be used by + the Python compiler when running the examples. If not + specified, then it will default to the set of future-import + flags that apply to `globs`. + + The output of each example is checked using + `DocTestRunner.check_output`, and the results are formatted by + the `DocTestRunner.report_*` methods. + """ + self.test = test + + if compileflags is None: + compileflags = _extract_future_flags(test.globs) + + save_stdout = sys.stdout + if out is None: + encoding = save_stdout.encoding + if encoding is None or encoding.lower() == 'utf-8': + out = save_stdout.write + else: + # Use backslashreplace error handling on write + def out(s): + s = str(s.encode(encoding, 'backslashreplace'), encoding) + save_stdout.write(s) + sys.stdout = self._fakeout + + # Patch pdb.set_trace to restore sys.stdout during interactive + # debugging (so it's not still redirected to self._fakeout). + # Note that the interactive output will go to *our* + # save_stdout, even if that's not the real sys.stdout; this + # allows us to write test cases for the set_trace behavior. + save_trace = sys.gettrace() + save_set_trace = pdb.set_trace + self.debugger = _OutputRedirectingPdb(save_stdout) + self.debugger.reset() + pdb.set_trace = self.debugger.set_trace + + # Patch linecache.getlines, so we can see the example's source + # when we're inside the debugger. + self.save_linecache_getlines = linecache.getlines + linecache.getlines = self.__patched_linecache_getlines + + # Make sure sys.displayhook just prints the value to stdout + save_displayhook = sys.displayhook + sys.displayhook = sys.__displayhook__ + + try: + return self.__run(test, compileflags, out) + finally: + sys.stdout = save_stdout + pdb.set_trace = save_set_trace + sys.settrace(save_trace) + linecache.getlines = self.save_linecache_getlines + sys.displayhook = save_displayhook + if clear_globs: + test.globs.clear() + import builtins + builtins._ = None + + #///////////////////////////////////////////////////////////////// + # Summarization + #///////////////////////////////////////////////////////////////// + def summarize(self, verbose=None): + """ + Print a summary of all the test cases that have been run by + this DocTestRunner, and return a tuple `(f, t)`, where `f` is + the total number of failed examples, and `t` is the total + number of tried examples. + + The optional `verbose` argument controls how detailed the + summary is. If the verbosity is not specified, then the + DocTestRunner's verbosity is used. + """ + if verbose is None: + verbose = self._verbose + notests = [] + passed = [] + failed = [] + totalt = totalf = 0 + for x in self._name2ft.items(): + name, (f, t) = x + assert f <= t + totalt += t + totalf += f + if t == 0: + notests.append(name) + elif f == 0: + passed.append( (name, t) ) + else: + failed.append(x) + if verbose: + if notests: + print(len(notests), "items had no tests:") + notests.sort() + for thing in notests: + print(" ", thing) + if passed: + print(len(passed), "items passed all tests:") + passed.sort() + for thing, count in passed: + print(" %3d tests in %s" % (count, thing)) + if failed: + print(self.DIVIDER) + print(len(failed), "items had failures:") + failed.sort() + for thing, (f, t) in failed: + print(" %3d of %3d in %s" % (f, t, thing)) + if verbose: + print(totalt, "tests in", len(self._name2ft), "items.") + print(totalt - totalf, "passed and", totalf, "failed.") + if totalf: + print("***Test Failed***", totalf, "failures.") + elif verbose: + print("Test passed.") + return TestResults(totalf, totalt) + + #///////////////////////////////////////////////////////////////// + # Backward compatibility cruft to maintain doctest.master. + #///////////////////////////////////////////////////////////////// + def merge(self, other): + d = self._name2ft + for name, (f, t) in other._name2ft.items(): + if name in d: + # Don't print here by default, since doing + # so breaks some of the buildbots + #print("*** DocTestRunner.merge: '" + name + "' in both" \ + # " testers; summing outcomes.") + f2, t2 = d[name] + f = f + f2 + t = t + t2 + d[name] = f, t + +class OutputChecker: + """ + A class used to check the whether the actual output from a doctest + example matches the expected output. `OutputChecker` defines two + methods: `check_output`, which compares a given pair of outputs, + and returns true if they match; and `output_difference`, which + returns a string describing the differences between two outputs. + """ + def _toAscii(self, s): + """ + Convert string to hex-escaped ASCII string. + """ + return str(s.encode('ASCII', 'backslashreplace'), "ASCII") + + def check_output(self, want, got, optionflags): + """ + Return True iff the actual output from an example (`got`) + matches the expected output (`want`). These strings are + always considered to match if they are identical; but + depending on what option flags the test runner is using, + several non-exact match types are also possible. See the + documentation for `TestRunner` for more information about + option flags. + """ + + # If `want` contains hex-escaped character such as "\u1234", + # then `want` is a string of six characters(e.g. [\,u,1,2,3,4]). + # On the other hand, `got` could be another sequence of + # characters such as [\u1234], so `want` and `got` should + # be folded to hex-escaped ASCII string to compare. + got = self._toAscii(got) + want = self._toAscii(want) + + # Handle the common case first, for efficiency: + # if they're string-identical, always return true. + if got == want: + return True + + # The values True and False replaced 1 and 0 as the return + # value for boolean comparisons in Python 2.3. + if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): + if (got,want) == ("True\n", "1\n"): + return True + if (got,want) == ("False\n", "0\n"): + return True + + # can be used as a special sequence to signify a + # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. + if not (optionflags & DONT_ACCEPT_BLANKLINE): + # Replace in want with a blank line. + want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), + '', want) + # If a line in got contains only spaces, then remove the + # spaces. + got = re.sub(r'(?m)^[^\S\n]+$', '', got) + if got == want: + return True + + # This flag causes doctest to ignore any differences in the + # contents of whitespace strings. Note that this can be used + # in conjunction with the ELLIPSIS flag. + if optionflags & NORMALIZE_WHITESPACE: + got = ' '.join(got.split()) + want = ' '.join(want.split()) + if got == want: + return True + + # The ELLIPSIS flag says to let the sequence "..." in `want` + # match any substring in `got`. + if optionflags & ELLIPSIS: + if _ellipsis_match(want, got): + return True + + # We didn't find any match; return false. + return False + + # Should we do a fancy diff? + def _do_a_fancy_diff(self, want, got, optionflags): + # Not unless they asked for a fancy diff. + if not optionflags & (REPORT_UDIFF | + REPORT_CDIFF | + REPORT_NDIFF): + return False + + # If expected output uses ellipsis, a meaningful fancy diff is + # too hard ... or maybe not. In two real-life failures Tim saw, + # a diff was a major help anyway, so this is commented out. + # [todo] _ellipsis_match() knows which pieces do and don't match, + # and could be the basis for a kick-ass diff in this case. + ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: + ## return False + + # ndiff does intraline difference marking, so can be useful even + # for 1-line differences. + if optionflags & REPORT_NDIFF: + return True + + # The other diff types need at least a few lines to be helpful. + return want.count('\n') > 2 and got.count('\n') > 2 + + def output_difference(self, example, got, optionflags): + """ + Return a string describing the differences between the + expected output for a given example (`example`) and the actual + output (`got`). `optionflags` is the set of option flags used + to compare `want` and `got`. + """ + want = example.want + # If s are being used, then replace blank lines + # with in the actual output string. + if not (optionflags & DONT_ACCEPT_BLANKLINE): + got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) + + # Check if we should use diff. + if self._do_a_fancy_diff(want, got, optionflags): + # Split want & got into lines. + want_lines = want.splitlines(keepends=True) + got_lines = got.splitlines(keepends=True) + # Use difflib to find their differences. + if optionflags & REPORT_UDIFF: + diff = difflib.unified_diff(want_lines, got_lines, n=2) + diff = list(diff)[2:] # strip the diff header + kind = 'unified diff with -expected +actual' + elif optionflags & REPORT_CDIFF: + diff = difflib.context_diff(want_lines, got_lines, n=2) + diff = list(diff)[2:] # strip the diff header + kind = 'context diff with expected followed by actual' + elif optionflags & REPORT_NDIFF: + engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) + diff = list(engine.compare(want_lines, got_lines)) + kind = 'ndiff with -expected +actual' + else: + assert 0, 'Bad diff option' + return 'Differences (%s):\n' % kind + _indent(''.join(diff)) + + # If we're not using diff, then simply list the expected + # output followed by the actual output. + if want and got: + return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) + elif want: + return 'Expected:\n%sGot nothing\n' % _indent(want) + elif got: + return 'Expected nothing\nGot:\n%s' % _indent(got) + else: + return 'Expected nothing\nGot nothing\n' + +class DocTestFailure(Exception): + """A DocTest example has failed in debugging mode. + + The exception instance has variables: + + - test: the DocTest object being run + + - example: the Example object that failed + + - got: the actual output + """ + def __init__(self, test, example, got): + self.test = test + self.example = example + self.got = got + + def __str__(self): + return str(self.test) + +class UnexpectedException(Exception): + """A DocTest example has encountered an unexpected exception + + The exception instance has variables: + + - test: the DocTest object being run + + - example: the Example object that failed + + - exc_info: the exception info + """ + def __init__(self, test, example, exc_info): + self.test = test + self.example = example + self.exc_info = exc_info + + def __str__(self): + return str(self.test) + +class DebugRunner(DocTestRunner): + r"""Run doc tests but raise an exception as soon as there is a failure. + + If an unexpected exception occurs, an UnexpectedException is raised. + It contains the test, the example, and the original exception: + + >>> runner = DebugRunner(verbose=False) + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> try: + ... runner.run(test) + ... except UnexpectedException as f: + ... failure = f + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[1] # Already has the traceback + Traceback (most recent call last): + ... + KeyError + + We wrap the original exception to give the calling application + access to the test and example information. + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> try: + ... runner.run(test) + ... except DocTestFailure as f: + ... failure = f + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + If a failure or error occurs, the globals are left intact: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 1} + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... >>> raise KeyError + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + Traceback (most recent call last): + ... + doctest.UnexpectedException: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 2} + + But the globals are cleared if there is no error: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + TestResults(failed=0, attempted=1) + + >>> test.globs + {} + + """ + + def run(self, test, compileflags=None, out=None, clear_globs=True): + r = DocTestRunner.run(self, test, compileflags, out, False) + if clear_globs: + test.globs.clear() + return r + + def report_unexpected_exception(self, out, test, example, exc_info): + raise UnexpectedException(test, example, exc_info) + + def report_failure(self, out, test, example, got): + raise DocTestFailure(test, example, got) + +###################################################################### +## 6. Test Functions +###################################################################### +# These should be backwards compatible. + +# For backward compatibility, a global instance of a DocTestRunner +# class, updated by testmod. +master = None + +def testmod(m=None, name=None, globs=None, verbose=None, + report=True, optionflags=0, extraglobs=None, + raise_on_error=False, exclude_empty=False): + """m=None, name=None, globs=None, verbose=None, report=True, + optionflags=0, extraglobs=None, raise_on_error=False, + exclude_empty=False + + Test examples in docstrings in functions and classes reachable + from module m (or the current module if m is not supplied), starting + with m.__doc__. + + Also test examples reachable from dict m.__test__ if it exists and is + not None. m.__test__ maps names to functions, classes and strings; + function and class docstrings are tested even if the name is private; + strings are tested directly, as if they were docstrings. + + Return (#failures, #tests). + + See help(doctest) for an overview. + + Optional keyword arg "name" gives the name of the module; by default + use m.__name__. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use m.__dict__. A copy of this + dict is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. This is new in 2.4. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. This is new in 2.3. Possible values (see the + docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + SKIP + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + global master + + # If no module was given, then use __main__. + if m is None: + # DWA - m will still be None if this wasn't invoked from the command + # line, in which case the following TypeError is about as good an error + # as we should expect + m = sys.modules.get('__main__') + + # Check that we were actually given a module. + if not inspect.ismodule(m): + raise TypeError("testmod: module required; %r" % (m,)) + + # If no name was given, then use the module's name. + if name is None: + name = m.__name__ + + # Find, parse, and run all tests in the given module. + finder = DocTestFinder(exclude_empty=exclude_empty) + + if raise_on_error: + runner = DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + + for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): + runner.run(test) + + if report: + runner.summarize() + + if master is None: + master = runner + else: + master.merge(runner) + + return TestResults(runner.failures, runner.tries) + +def testfile(filename, module_relative=True, name=None, package=None, + globs=None, verbose=None, report=True, optionflags=0, + extraglobs=None, raise_on_error=False, parser=DocTestParser(), + encoding=None): + """ + Test examples in the given file. Return (#failures, #tests). + + Optional keyword arg "module_relative" specifies how filenames + should be interpreted: + + - If "module_relative" is True (the default), then "filename" + specifies a module-relative path. By default, this path is + relative to the calling module's directory; but if the + "package" argument is specified, then it is relative to that + package. To ensure os-independence, "filename" should use + "/" characters to separate path segments, and should not + be an absolute path (i.e., it may not begin with "/"). + + - If "module_relative" is False, then "filename" specifies an + os-specific path. The path may be absolute or relative (to + the current working directory). + + Optional keyword arg "name" gives the name of the test; by default + use the file's basename. + + Optional keyword argument "package" is a Python package or the + name of a Python package whose directory should be used as the + base directory for a module relative filename. If no package is + specified, then the calling module's directory is used as the base + directory for module relative filenames. It is an error to + specify "package" if "module_relative" is False. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use {}. A copy of this dict + is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. Possible values (see the docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + SKIP + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Optional keyword arg "parser" specifies a DocTestParser (or + subclass) that should be used to extract tests from the files. + + Optional keyword arg "encoding" specifies an encoding that should + be used to convert the file to unicode. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + global master + + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path + text, filename = _load_testfile(filename, package, module_relative, + encoding or "utf-8") + + # If no name was given, then use the file's name. + if name is None: + name = os.path.basename(filename) + + # Assemble the globals. + if globs is None: + globs = {} + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + if '__name__' not in globs: + globs['__name__'] = '__main__' + + if raise_on_error: + runner = DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + + # Read the file, convert it to a test, and run it. + test = parser.get_doctest(text, globs, name, filename, 0) + runner.run(test) + + if report: + runner.summarize() + + if master is None: + master = runner + else: + master.merge(runner) + + return TestResults(runner.failures, runner.tries) + +def run_docstring_examples(f, globs, verbose=False, name="NoName", + compileflags=None, optionflags=0): + """ + Test examples in the given object's docstring (`f`), using `globs` + as globals. Optional argument `name` is used in failure messages. + If the optional argument `verbose` is true, then generate output + even if there are no failures. + + `compileflags` gives the set of flags that should be used by the + Python compiler when running the examples. If not specified, then + it will default to the set of future-import flags that apply to + `globs`. + + Optional keyword arg `optionflags` specifies options for the + testing and output. See the documentation for `testmod` for more + information. + """ + # Find, parse, and run all tests in the given module. + finder = DocTestFinder(verbose=verbose, recurse=False) + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + for test in finder.find(f, name, globs=globs): + runner.run(test, compileflags=compileflags) + +###################################################################### +## 7. Unittest Support +###################################################################### + +_unittest_reportflags = 0 + +def set_unittest_reportflags(flags): + """Sets the unittest option flags. + + The old flag is returned so that a runner could restore the old + value if it wished to: + + >>> import doctest + >>> old = doctest._unittest_reportflags + >>> doctest.set_unittest_reportflags(REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) == old + True + + >>> doctest._unittest_reportflags == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + + Only reporting flags can be set: + + >>> doctest.set_unittest_reportflags(ELLIPSIS) + Traceback (most recent call last): + ... + ValueError: ('Only reporting flags allowed', 8) + + >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + """ + global _unittest_reportflags + + if (flags & REPORTING_FLAGS) != flags: + raise ValueError("Only reporting flags allowed", flags) + old = _unittest_reportflags + _unittest_reportflags = flags + return old + + +class DocTestCase(unittest.TestCase): + + def __init__(self, test, optionflags=0, setUp=None, tearDown=None, + checker=None): + + unittest.TestCase.__init__(self) + self._dt_optionflags = optionflags + self._dt_checker = checker + self._dt_test = test + self._dt_setUp = setUp + self._dt_tearDown = tearDown + + def setUp(self): + test = self._dt_test + + if self._dt_setUp is not None: + self._dt_setUp(test) + + def tearDown(self): + test = self._dt_test + + if self._dt_tearDown is not None: + self._dt_tearDown(test) + + test.globs.clear() + + def runTest(self): + test = self._dt_test + old = sys.stdout + new = StringIO() + optionflags = self._dt_optionflags + + if not (optionflags & REPORTING_FLAGS): + # The option flags don't include any reporting flags, + # so add the default reporting flags + optionflags |= _unittest_reportflags + + runner = DocTestRunner(optionflags=optionflags, + checker=self._dt_checker, verbose=False) + + try: + runner.DIVIDER = "-"*70 + failures, tries = runner.run( + test, out=new.write, clear_globs=False) + finally: + sys.stdout = old + + if failures: + raise self.failureException(self.format_failure(new.getvalue())) + + def format_failure(self, err): + test = self._dt_test + if test.lineno is None: + lineno = 'unknown line number' + else: + lineno = '%s' % test.lineno + lname = '.'.join(test.name.split('.')[-1:]) + return ('Failed doctest test for %s\n' + ' File "%s", line %s, in %s\n\n%s' + % (test.name, test.filename, lineno, lname, err) + ) + + def debug(self): + r"""Run the test case without results and without catching exceptions + + The unit test framework includes a debug method on test cases + and test suites to support post-mortem debugging. The test code + is run in such a way that errors are not caught. This way a + caller can catch the errors and initiate post-mortem debugging. + + The DocTestCase provides a debug method that raises + UnexpectedException errors if there is an unexpected + exception: + + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + >>> try: + ... case.debug() + ... except UnexpectedException as f: + ... failure = f + + The UnexpectedException contains the test, the example, and + the original exception: + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[1] # Already has the traceback + Traceback (most recent call last): + ... + KeyError + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + + >>> try: + ... case.debug() + ... except DocTestFailure as f: + ... failure = f + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + """ + + self.setUp() + runner = DebugRunner(optionflags=self._dt_optionflags, + checker=self._dt_checker, verbose=False) + runner.run(self._dt_test, clear_globs=False) + self.tearDown() + + def id(self): + return self._dt_test.name + + def __eq__(self, other): + if type(self) is not type(other): + return NotImplemented + + return self._dt_test == other._dt_test and \ + self._dt_optionflags == other._dt_optionflags and \ + self._dt_setUp == other._dt_setUp and \ + self._dt_tearDown == other._dt_tearDown and \ + self._dt_checker == other._dt_checker + + def __hash__(self): + return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown, + self._dt_checker)) + + def __repr__(self): + name = self._dt_test.name.split('.') + return "%s (%s)" % (name[-1], '.'.join(name[:-1])) + + __str__ = object.__str__ + + def shortDescription(self): + return "Doctest: " + self._dt_test.name + +class SkipDocTestCase(DocTestCase): + def __init__(self, module): + self.module = module + DocTestCase.__init__(self, None) + + def setUp(self): + self.skipTest("DocTestSuite will not work with -O2 and above") + + def test_skip(self): + pass + + def shortDescription(self): + return "Skipping tests from %s" % self.module.__name__ + + __str__ = shortDescription + + +class _DocTestSuite(unittest.TestSuite): + + def _removeTestAtIndex(self, index): + pass + + +def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, + **options): + """ + Convert doctest tests for a module to a unittest test suite. + + This converts each documentation string in a module that + contains doctest tests to a unittest test case. If any of the + tests in a doc string fail, then the test case fails. An exception + is raised showing the name of the file containing the test and a + (sometimes approximate) line number. + + The `module` argument provides the module to be tested. The argument + can be either a module or a module name. + + If no argument is given, the calling module is used. + + A number of options may be provided as keyword arguments: + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + """ + + if test_finder is None: + test_finder = DocTestFinder() + + module = _normalize_module(module) + tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) + + if not tests and sys.flags.optimize >=2: + # Skip doctests when running with -O2 + suite = _DocTestSuite() + suite.addTest(SkipDocTestCase(module)) + return suite + + tests.sort() + suite = _DocTestSuite() + + for test in tests: + if len(test.examples) == 0: + continue + if not test.filename: + filename = module.__file__ + if filename[-4:] == ".pyc": + filename = filename[:-1] + test.filename = filename + suite.addTest(DocTestCase(test, **options)) + + return suite + +class DocFileCase(DocTestCase): + + def id(self): + return '_'.join(self._dt_test.name.split('.')) + + def __repr__(self): + return self._dt_test.filename + + def format_failure(self, err): + return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' + % (self._dt_test.name, self._dt_test.filename, err) + ) + +def DocFileTest(path, module_relative=True, package=None, + globs=None, parser=DocTestParser(), + encoding=None, **options): + if globs is None: + globs = {} + else: + globs = globs.copy() + + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path. + doc, path = _load_testfile(path, package, module_relative, + encoding or "utf-8") + + if "__file__" not in globs: + globs["__file__"] = path + + # Find the file and read it. + name = os.path.basename(path) + + # Convert it to a test, and wrap it in a DocFileCase. + test = parser.get_doctest(doc, globs, name, path, 0) + return DocFileCase(test, **options) + +def DocFileSuite(*paths, **kw): + """A unittest suite for one or more doctest files. + + The path to each doctest file is given as a string; the + interpretation of that string depends on the keyword argument + "module_relative". + + A number of options may be provided as keyword arguments: + + module_relative + If "module_relative" is True, then the given file paths are + interpreted as os-independent module-relative paths. By + default, these paths are relative to the calling module's + directory; but if the "package" argument is specified, then + they are relative to that package. To ensure os-independence, + "filename" should use "/" characters to separate path + segments, and may not be an absolute path (i.e., it may not + begin with "/"). + + If "module_relative" is False, then the given file paths are + interpreted as os-specific paths. These paths may be absolute + or relative (to the current working directory). + + package + A Python package or the name of a Python package whose directory + should be used as the base directory for module relative paths. + If "package" is not specified, then the calling module's + directory is used as the base directory for module relative + filenames. It is an error to specify "package" if + "module_relative" is False. + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + + parser + A DocTestParser (or subclass) that should be used to extract + tests from the files. + + encoding + An encoding that will be used to convert the files to unicode. + """ + suite = _DocTestSuite() + + # We do this here so that _normalize_module is called at the right + # level. If it were called in DocFileTest, then this function + # would be the caller and we might guess the package incorrectly. + if kw.get('module_relative', True): + kw['package'] = _normalize_module(kw.get('package')) + + for path in paths: + suite.addTest(DocFileTest(path, **kw)) + + return suite + +###################################################################### +## 8. Debugging Support +###################################################################### + +def script_from_examples(s): + r"""Extract script from text with examples. + + Converts text with examples to a Python script. Example input is + converted to regular code. Example output and all other words + are converted to comments: + + >>> text = ''' + ... Here are examples of simple math. + ... + ... Python has super accurate integer addition + ... + ... >>> 2 + 2 + ... 5 + ... + ... And very friendly error messages: + ... + ... >>> 1/0 + ... To Infinity + ... And + ... Beyond + ... + ... You can use logic if you want: + ... + ... >>> if 0: + ... ... blah + ... ... blah + ... ... + ... + ... Ho hum + ... ''' + + >>> print(script_from_examples(text)) + # Here are examples of simple math. + # + # Python has super accurate integer addition + # + 2 + 2 + # Expected: + ## 5 + # + # And very friendly error messages: + # + 1/0 + # Expected: + ## To Infinity + ## And + ## Beyond + # + # You can use logic if you want: + # + if 0: + blah + blah + # + # Ho hum + + """ + output = [] + for piece in DocTestParser().parse(s): + if isinstance(piece, Example): + # Add the example's source code (strip trailing NL) + output.append(piece.source[:-1]) + # Add the expected output: + want = piece.want + if want: + output.append('# Expected:') + output += ['## '+l for l in want.split('\n')[:-1]] + else: + # Add non-example text. + output += [_comment_line(l) + for l in piece.split('\n')[:-1]] + + # Trim junk on both ends. + while output and output[-1] == '#': + output.pop() + while output and output[0] == '#': + output.pop(0) + # Combine the output, and return it. + # Add a courtesy newline to prevent exec from choking (see bug #1172785) + return '\n'.join(output) + '\n' + +def testsource(module, name): + """Extract the test sources from a doctest docstring as a script. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the doc string with tests to be debugged. + """ + module = _normalize_module(module) + tests = DocTestFinder().find(module) + test = [t for t in tests if t.name == name] + if not test: + raise ValueError(name, "not found in tests") + test = test[0] + testsrc = script_from_examples(test.docstring) + return testsrc + +def debug_src(src, pm=False, globs=None): + """Debug a single doctest docstring, in argument `src`'""" + testsrc = script_from_examples(src) + debug_script(testsrc, pm, globs) + +def debug_script(src, pm=False, globs=None): + "Debug a test script. `src` is the script, as a string." + import pdb + + if globs: + globs = globs.copy() + else: + globs = {} + + if pm: + try: + exec(src, globs, globs) + except: + print(sys.exc_info()[1]) + p = pdb.Pdb(nosigint=True) + p.reset() + p.interaction(None, sys.exc_info()[2]) + else: + pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs) + +def debug(module, name, pm=False): + """Debug a single doctest docstring. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the docstring with tests to be debugged. + """ + module = _normalize_module(module) + testsrc = testsource(module, name) + debug_script(testsrc, pm, module.__dict__) + +###################################################################### +## 9. Example Usage +###################################################################### +class _TestClass: + """ + A pointless class, for sanity-checking of docstring testing. + + Methods: + square() + get() + + >>> _TestClass(13).get() + _TestClass(-12).get() + 1 + >>> hex(_TestClass(13).square().get()) + '0xa9' + """ + + def __init__(self, val): + """val -> _TestClass object with associated value val. + + >>> t = _TestClass(123) + >>> print(t.get()) + 123 + """ + + self.val = val + + def square(self): + """square() -> square TestClass's associated value + + >>> _TestClass(13).square().get() + 169 + """ + + self.val = self.val ** 2 + return self + + def get(self): + """get() -> return TestClass's associated value. + + >>> x = _TestClass(-42) + >>> print(x.get()) + -42 + """ + + return self.val + +__test__ = {"_TestClass": _TestClass, + "string": r""" + Example of a string object, searched as-is. + >>> x = 1; y = 2 + >>> x + y, x * y + (3, 2) + """, + + "bool-int equivalence": r""" + In 2.2, boolean expressions displayed + 0 or 1. By default, we still accept + them. This can be disabled by passing + DONT_ACCEPT_TRUE_FOR_1 to the new + optionflags argument. + >>> 4 == 4 + 1 + >>> 4 == 4 + True + >>> 4 > 4 + 0 + >>> 4 > 4 + False + """, + + "blank lines": r""" + Blank lines can be marked with : + >>> print('foo\n\nbar\n') + foo + + bar + + """, + + "ellipsis": r""" + If the ellipsis flag is used, then '...' can be used to + elide substrings in the desired output: + >>> print(list(range(1000))) #doctest: +ELLIPSIS + [0, 1, 2, ..., 999] + """, + + "whitespace normalization": r""" + If the whitespace normalization flag is used, then + differences in whitespace are ignored. + >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29] + """, + } + + +def _test(): + import argparse + + parser = argparse.ArgumentParser(description="doctest runner") + parser.add_argument('-v', '--verbose', action='store_true', default=False, + help='print very verbose output for all tests') + parser.add_argument('-o', '--option', action='append', + choices=OPTIONFLAGS_BY_NAME.keys(), default=[], + help=('specify a doctest option flag to apply' + ' to the test run; may be specified more' + ' than once to apply multiple options')) + parser.add_argument('-f', '--fail-fast', action='store_true', + help=('stop running tests after first failure (this' + ' is a shorthand for -o FAIL_FAST, and is' + ' in addition to any other -o options)')) + parser.add_argument('file', nargs='+', + help='file containing the tests to run') + args = parser.parse_args() + testfiles = args.file + # Verbose used to be handled by the "inspect argv" magic in DocTestRunner, + # but since we are using argparse we are passing it manually now. + verbose = args.verbose + options = 0 + for option in args.option: + options |= OPTIONFLAGS_BY_NAME[option] + if args.fail_fast: + options |= FAIL_FAST + for filename in testfiles: + if filename.endswith(".py"): + # It is a module -- insert its dir into sys.path and try to + # import it. If it is part of a package, that possibly + # won't work because of package imports. + dirname, filename = os.path.split(filename) + sys.path.insert(0, dirname) + m = __import__(filename[:-3]) + del sys.path[0] + failures, _ = testmod(m, verbose=verbose, optionflags=options) + else: + failures, _ = testfile(filename, module_relative=False, + verbose=verbose, optionflags=options) + if failures: + return 1 + return 0 + + +if __name__ == "__main__": + sys.exit(_test()) diff --git a/Lib/pdb.py b/Lib/pdb.py new file mode 100755 index 00000000000..bf503f1e73e --- /dev/null +++ b/Lib/pdb.py @@ -0,0 +1,1730 @@ +#! /usr/bin/env python3 + +""" +The Python Debugger Pdb +======================= + +To use the debugger in its simplest form: + + >>> import pdb + >>> pdb.run('') + +The debugger's prompt is '(Pdb) '. This will stop in the first +function call in . + +Alternatively, if a statement terminated with an unhandled exception, +you can use pdb's post-mortem facility to inspect the contents of the +traceback: + + >>> + + >>> import pdb + >>> pdb.pm() + +The commands recognized by the debugger are listed in the next +section. Most can be abbreviated as indicated; e.g., h(elp) means +that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', +nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in +square brackets. Alternatives in the command syntax are separated +by a vertical bar (|). + +A blank line repeats the previous command literally, except for +'list', where it lists the next 11 lines. + +Commands that the debugger doesn't recognize are assumed to be Python +statements and are executed in the context of the program being +debugged. Python statements can also be prefixed with an exclamation +point ('!'). This is a powerful way to inspect the program being +debugged; it is even possible to change variables or call functions. +When an exception occurs in such a statement, the exception name is +printed but the debugger's state is not changed. + +The debugger supports aliases, which can save typing. And aliases can +have parameters (see the alias help entry) which allows one a certain +level of adaptability to the context under examination. + +Multiple commands may be entered on a single line, separated by the +pair ';;'. No intelligence is applied to separating the commands; the +input is split at the first ';;', even if it is in the middle of a +quoted string. + +If a file ".pdbrc" exists in your home directory or in the current +directory, it is read in and executed as if it had been typed at the +debugger prompt. This is particularly useful for aliases. If both +files exist, the one in the home directory is read first and aliases +defined there can be overridden by the local file. This behavior can be +disabled by passing the "readrc=False" argument to the Pdb constructor. + +Aside from aliases, the debugger is not directly programmable; but it +is implemented as a class from which you can derive your own debugger +class, which you can make as fancy as you like. + + +Debugger commands +================= + +""" +# NOTE: the actual command documentation is collected from docstrings of the +# commands and is appended to __doc__ after the class has been defined. + +import os +import io +import re +import sys +import cmd +import bdb +import dis +import code +import glob +import pprint +import signal +import inspect +import traceback +import linecache + + +class Restart(Exception): + """Causes a debugger to be restarted for the debugged python program.""" + pass + +__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace", + "post_mortem", "help"] + +def find_function(funcname, filename): + cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname)) + try: + fp = open(filename) + except OSError: + return None + # consumer of this info expects the first line to be 1 + with fp: + for lineno, line in enumerate(fp, start=1): + if cre.match(line): + return funcname, filename, lineno + return None + +def getsourcelines(obj): + lines, lineno = inspect.findsource(obj) + if inspect.isframe(obj) and obj.f_globals is obj.f_locals: + # must be a module frame: do not try to cut a block out of it + return lines, 1 + elif inspect.ismodule(obj): + return lines, 1 + return inspect.getblock(lines[lineno:]), lineno+1 + +def lasti2lineno(code, lasti): + linestarts = list(dis.findlinestarts(code)) + linestarts.reverse() + for i, lineno in linestarts: + if lasti >= i: + return lineno + return 0 + + +class _rstr(str): + """String that doesn't quote its repr.""" + def __repr__(self): + return self + + +# Interaction prompt line will separate file and call info from code +# text using value of line_prefix string. A newline and arrow may +# be to your liking. You can set it once pdb is imported using the +# command "pdb.line_prefix = '\n% '". +# line_prefix = ': ' # Use this to get the old situation back +line_prefix = '\n-> ' # Probably a better default + +class Pdb(bdb.Bdb, cmd.Cmd): + + _previous_sigint_handler = None + + def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None, + nosigint=False, readrc=True): + bdb.Bdb.__init__(self, skip=skip) + cmd.Cmd.__init__(self, completekey, stdin, stdout) + sys.audit("pdb.Pdb") + if stdout: + self.use_rawinput = 0 + self.prompt = '(Pdb) ' + self.aliases = {} + self.displaying = {} + self.mainpyfile = '' + self._wait_for_mainpyfile = False + self.tb_lineno = {} + # Try to load readline if it exists + try: + import readline + # remove some common file name delimiters + readline.set_completer_delims(' \t\n`@#$%^&*()=+[{]}\\|;:\'",<>?') + except ImportError: + pass + self.allow_kbdint = False + self.nosigint = nosigint + + # Read ~/.pdbrc and ./.pdbrc + self.rcLines = [] + if readrc: + try: + with open(os.path.expanduser('~/.pdbrc')) as rcFile: + self.rcLines.extend(rcFile) + except OSError: + pass + try: + with open(".pdbrc") as rcFile: + self.rcLines.extend(rcFile) + except OSError: + pass + + self.commands = {} # associates a command list to breakpoint numbers + self.commands_doprompt = {} # for each bp num, tells if the prompt + # must be disp. after execing the cmd list + self.commands_silent = {} # for each bp num, tells if the stack trace + # must be disp. after execing the cmd list + self.commands_defining = False # True while in the process of defining + # a command list + self.commands_bnum = None # The breakpoint number for which we are + # defining a list + + def sigint_handler(self, signum, frame): + if self.allow_kbdint: + raise KeyboardInterrupt + self.message("\nProgram interrupted. (Use 'cont' to resume).") + self.set_step() + self.set_trace(frame) + + def reset(self): + bdb.Bdb.reset(self) + self.forget() + + def forget(self): + self.lineno = None + self.stack = [] + self.curindex = 0 + self.curframe = None + self.tb_lineno.clear() + + def setup(self, f, tb): + self.forget() + self.stack, self.curindex = self.get_stack(f, tb) + while tb: + # when setting up post-mortem debugging with a traceback, save all + # the original line numbers to be displayed along the current line + # numbers (which can be different, e.g. due to finally clauses) + lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti) + self.tb_lineno[tb.tb_frame] = lineno + tb = tb.tb_next + self.curframe = self.stack[self.curindex][0] + # The f_locals dictionary is updated from the actual frame + # locals whenever the .f_locals accessor is called, so we + # cache it here to ensure that modifications are not overwritten. + self.curframe_locals = self.curframe.f_locals + return self.execRcLines() + + # Can be executed earlier than 'setup' if desired + def execRcLines(self): + if not self.rcLines: + return + # local copy because of recursion + rcLines = self.rcLines + rcLines.reverse() + # execute every line only once + self.rcLines = [] + while rcLines: + line = rcLines.pop().strip() + if line and line[0] != '#': + if self.onecmd(line): + # if onecmd returns True, the command wants to exit + # from the interaction, save leftover rc lines + # to execute before next interaction + self.rcLines += reversed(rcLines) + return True + + # Override Bdb methods + + def user_call(self, frame, argument_list): + """This method is called when there is the remote possibility + that we ever need to stop in this function.""" + if self._wait_for_mainpyfile: + return + if self.stop_here(frame): + self.message('--Call--') + self.interaction(frame, None) + + def user_line(self, frame): + """This function is called when we stop or break at this line.""" + if self._wait_for_mainpyfile: + if (self.mainpyfile != self.canonic(frame.f_code.co_filename) + or frame.f_lineno <= 0): + return + self._wait_for_mainpyfile = False + if self.bp_commands(frame): + self.interaction(frame, None) + + def bp_commands(self, frame): + """Call every command that was set for the current active breakpoint + (if there is one). + + Returns True if the normal interaction function must be called, + False otherwise.""" + # self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit + if getattr(self, "currentbp", False) and \ + self.currentbp in self.commands: + currentbp = self.currentbp + self.currentbp = 0 + lastcmd_back = self.lastcmd + self.setup(frame, None) + for line in self.commands[currentbp]: + self.onecmd(line) + self.lastcmd = lastcmd_back + if not self.commands_silent[currentbp]: + self.print_stack_entry(self.stack[self.curindex]) + if self.commands_doprompt[currentbp]: + self._cmdloop() + self.forget() + return + return 1 + + def user_return(self, frame, return_value): + """This function is called when a return trap is set here.""" + if self._wait_for_mainpyfile: + return + frame.f_locals['__return__'] = return_value + self.message('--Return--') + self.interaction(frame, None) + + def user_exception(self, frame, exc_info): + """This function is called if an exception occurs, + but only if we are to stop at or just below this level.""" + if self._wait_for_mainpyfile: + return + exc_type, exc_value, exc_traceback = exc_info + frame.f_locals['__exception__'] = exc_type, exc_value + + # An 'Internal StopIteration' exception is an exception debug event + # issued by the interpreter when handling a subgenerator run with + # 'yield from' or a generator controlled by a for loop. No exception has + # actually occurred in this case. The debugger uses this debug event to + # stop when the debuggee is returning from such generators. + prefix = 'Internal ' if (not exc_traceback + and exc_type is StopIteration) else '' + self.message('%s%s' % (prefix, + traceback.format_exception_only(exc_type, exc_value)[-1].strip())) + self.interaction(frame, exc_traceback) + + # General interaction function + def _cmdloop(self): + while True: + try: + # keyboard interrupts allow for an easy way to cancel + # the current command, so allow them during interactive input + self.allow_kbdint = True + self.cmdloop() + self.allow_kbdint = False + break + except KeyboardInterrupt: + self.message('--KeyboardInterrupt--') + + # Called before loop, handles display expressions + def preloop(self): + displaying = self.displaying.get(self.curframe) + if displaying: + for expr, oldvalue in displaying.items(): + newvalue = self._getval_except(expr) + # check for identity first; this prevents custom __eq__ to + # be called at every loop, and also prevents instances whose + # fields are changed to be displayed + if newvalue is not oldvalue and newvalue != oldvalue: + displaying[expr] = newvalue + self.message('display %s: %r [old: %r]' % + (expr, newvalue, oldvalue)) + + def interaction(self, frame, traceback): + # Restore the previous signal handler at the Pdb prompt. + if Pdb._previous_sigint_handler: + try: + signal.signal(signal.SIGINT, Pdb._previous_sigint_handler) + except ValueError: # ValueError: signal only works in main thread + pass + else: + Pdb._previous_sigint_handler = None + if self.setup(frame, traceback): + # no interaction desired at this time (happens if .pdbrc contains + # a command like "continue") + self.forget() + return + self.print_stack_entry(self.stack[self.curindex]) + self._cmdloop() + self.forget() + + def displayhook(self, obj): + """Custom displayhook for the exec in default(), which prevents + assignment of the _ variable in the builtins. + """ + # reproduce the behavior of the standard displayhook, not printing None + if obj is not None: + self.message(repr(obj)) + + def default(self, line): + if line[:1] == '!': line = line[1:] + locals = self.curframe_locals + globals = self.curframe.f_globals + try: + code = compile(line + '\n', '', 'single') + save_stdout = sys.stdout + save_stdin = sys.stdin + save_displayhook = sys.displayhook + try: + sys.stdin = self.stdin + sys.stdout = self.stdout + sys.displayhook = self.displayhook + exec(code, globals, locals) + finally: + sys.stdout = save_stdout + sys.stdin = save_stdin + sys.displayhook = save_displayhook + except: + exc_info = sys.exc_info()[:2] + self.error(traceback.format_exception_only(*exc_info)[-1].strip()) + + def precmd(self, line): + """Handle alias expansion and ';;' separator.""" + if not line.strip(): + return line + args = line.split() + while args[0] in self.aliases: + line = self.aliases[args[0]] + ii = 1 + for tmpArg in args[1:]: + line = line.replace("%" + str(ii), + tmpArg) + ii += 1 + line = line.replace("%*", ' '.join(args[1:])) + args = line.split() + # split into ';;' separated commands + # unless it's an alias command + if args[0] != 'alias': + marker = line.find(';;') + if marker >= 0: + # queue up everything after marker + next = line[marker+2:].lstrip() + self.cmdqueue.append(next) + line = line[:marker].rstrip() + return line + + def onecmd(self, line): + """Interpret the argument as though it had been typed in response + to the prompt. + + Checks whether this line is typed at the normal prompt or in + a breakpoint command list definition. + """ + if not self.commands_defining: + return cmd.Cmd.onecmd(self, line) + else: + return self.handle_command_def(line) + + def handle_command_def(self, line): + """Handles one command line during command list definition.""" + cmd, arg, line = self.parseline(line) + if not cmd: + return + if cmd == 'silent': + self.commands_silent[self.commands_bnum] = True + return # continue to handle other cmd def in the cmd list + elif cmd == 'end': + self.cmdqueue = [] + return 1 # end of cmd list + cmdlist = self.commands[self.commands_bnum] + if arg: + cmdlist.append(cmd+' '+arg) + else: + cmdlist.append(cmd) + # Determine if we must stop + try: + func = getattr(self, 'do_' + cmd) + except AttributeError: + func = self.default + # one of the resuming commands + if func.__name__ in self.commands_resuming: + self.commands_doprompt[self.commands_bnum] = False + self.cmdqueue = [] + return 1 + return + + # interface abstraction functions + + def message(self, msg): + print(msg, file=self.stdout) + + def error(self, msg): + print('***', msg, file=self.stdout) + + # Generic completion functions. Individual complete_foo methods can be + # assigned below to one of these functions. + + def _complete_location(self, text, line, begidx, endidx): + # Complete a file/module/function location for break/tbreak/clear. + if line.strip().endswith((':', ',')): + # Here comes a line number or a condition which we can't complete. + return [] + # First, try to find matching functions (i.e. expressions). + try: + ret = self._complete_expression(text, line, begidx, endidx) + except Exception: + ret = [] + # Then, try to complete file names as well. + globs = glob.glob(text + '*') + for fn in globs: + if os.path.isdir(fn): + ret.append(fn + '/') + elif os.path.isfile(fn) and fn.lower().endswith(('.py', '.pyw')): + ret.append(fn + ':') + return ret + + def _complete_bpnumber(self, text, line, begidx, endidx): + # Complete a breakpoint number. (This would be more helpful if we could + # display additional info along with the completions, such as file/line + # of the breakpoint.) + return [str(i) for i, bp in enumerate(bdb.Breakpoint.bpbynumber) + if bp is not None and str(i).startswith(text)] + + def _complete_expression(self, text, line, begidx, endidx): + # Complete an arbitrary expression. + if not self.curframe: + return [] + # Collect globals and locals. It is usually not really sensible to also + # complete builtins, and they clutter the namespace quite heavily, so we + # leave them out. + ns = {**self.curframe.f_globals, **self.curframe_locals} + if '.' in text: + # Walk an attribute chain up to the last part, similar to what + # rlcompleter does. This will bail if any of the parts are not + # simple attribute access, which is what we want. + dotted = text.split('.') + try: + obj = ns[dotted[0]] + for part in dotted[1:-1]: + obj = getattr(obj, part) + except (KeyError, AttributeError): + return [] + prefix = '.'.join(dotted[:-1]) + '.' + return [prefix + n for n in dir(obj) if n.startswith(dotted[-1])] + else: + # Complete a simple name. + return [n for n in ns.keys() if n.startswith(text)] + + # Command definitions, called by cmdloop() + # The argument is the remaining string on the command line + # Return true to exit from the command loop + + def do_commands(self, arg): + """commands [bpnumber] + (com) ... + (com) end + (Pdb) + + Specify a list of commands for breakpoint number bpnumber. + The commands themselves are entered on the following lines. + Type a line containing just 'end' to terminate the commands. + The commands are executed when the breakpoint is hit. + + To remove all commands from a breakpoint, type commands and + follow it immediately with end; that is, give no commands. + + With no bpnumber argument, commands refers to the last + breakpoint set. + + You can use breakpoint commands to start your program up + again. Simply use the continue command, or step, or any other + command that resumes execution. + + Specifying any command resuming execution (currently continue, + step, next, return, jump, quit and their abbreviations) + terminates the command list (as if that command was + immediately followed by end). This is because any time you + resume execution (even with a simple next or step), you may + encounter another breakpoint -- which could have its own + command list, leading to ambiguities about which list to + execute. + + If you use the 'silent' command in the command list, the usual + message about stopping at a breakpoint is not printed. This + may be desirable for breakpoints that are to print a specific + message and then continue. If none of the other commands + print anything, you will see no sign that the breakpoint was + reached. + """ + if not arg: + bnum = len(bdb.Breakpoint.bpbynumber) - 1 + else: + try: + bnum = int(arg) + except: + self.error("Usage: commands [bnum]\n ...\n end") + return + self.commands_bnum = bnum + # Save old definitions for the case of a keyboard interrupt. + if bnum in self.commands: + old_command_defs = (self.commands[bnum], + self.commands_doprompt[bnum], + self.commands_silent[bnum]) + else: + old_command_defs = None + self.commands[bnum] = [] + self.commands_doprompt[bnum] = True + self.commands_silent[bnum] = False + + prompt_back = self.prompt + self.prompt = '(com) ' + self.commands_defining = True + try: + self.cmdloop() + except KeyboardInterrupt: + # Restore old definitions. + if old_command_defs: + self.commands[bnum] = old_command_defs[0] + self.commands_doprompt[bnum] = old_command_defs[1] + self.commands_silent[bnum] = old_command_defs[2] + else: + del self.commands[bnum] + del self.commands_doprompt[bnum] + del self.commands_silent[bnum] + self.error('command definition aborted, old commands restored') + finally: + self.commands_defining = False + self.prompt = prompt_back + + complete_commands = _complete_bpnumber + + def do_break(self, arg, temporary = 0): + """b(reak) [ ([filename:]lineno | function) [, condition] ] + Without argument, list all breaks. + + With a line number argument, set a break at this line in the + current file. With a function name, set a break at the first + executable line of that function. If a second argument is + present, it is a string specifying an expression which must + evaluate to true before the breakpoint is honored. + + The line number may be prefixed with a filename and a colon, + to specify a breakpoint in another file (probably one that + hasn't been loaded yet). The file is searched for on + sys.path; the .py suffix may be omitted. + """ + if not arg: + if self.breaks: # There's at least one + self.message("Num Type Disp Enb Where") + for bp in bdb.Breakpoint.bpbynumber: + if bp: + self.message(bp.bpformat()) + return + # parse arguments; comma has lowest precedence + # and cannot occur in filename + filename = None + lineno = None + cond = None + comma = arg.find(',') + if comma > 0: + # parse stuff after comma: "condition" + cond = arg[comma+1:].lstrip() + arg = arg[:comma].rstrip() + # parse stuff before comma: [filename:]lineno | function + colon = arg.rfind(':') + funcname = None + if colon >= 0: + filename = arg[:colon].rstrip() + f = self.lookupmodule(filename) + if not f: + self.error('%r not found from sys.path' % filename) + return + else: + filename = f + arg = arg[colon+1:].lstrip() + try: + lineno = int(arg) + except ValueError: + self.error('Bad lineno: %s' % arg) + return + else: + # no colon; can be lineno or function + try: + lineno = int(arg) + except ValueError: + try: + func = eval(arg, + self.curframe.f_globals, + self.curframe_locals) + except: + func = arg + try: + if hasattr(func, '__func__'): + func = func.__func__ + code = func.__code__ + #use co_name to identify the bkpt (function names + #could be aliased, but co_name is invariant) + funcname = code.co_name + lineno = code.co_firstlineno + filename = code.co_filename + except: + # last thing to try + (ok, filename, ln) = self.lineinfo(arg) + if not ok: + self.error('The specified object %r is not a function ' + 'or was not found along sys.path.' % arg) + return + funcname = ok # ok contains a function name + lineno = int(ln) + if not filename: + filename = self.defaultFile() + # Check for reasonable breakpoint + line = self.checkline(filename, lineno) + if line: + # now set the break point + err = self.set_break(filename, line, temporary, cond, funcname) + if err: + self.error(err) + else: + bp = self.get_breaks(filename, line)[-1] + self.message("Breakpoint %d at %s:%d" % + (bp.number, bp.file, bp.line)) + + # To be overridden in derived debuggers + def defaultFile(self): + """Produce a reasonable default.""" + filename = self.curframe.f_code.co_filename + if filename == '' and self.mainpyfile: + filename = self.mainpyfile + return filename + + do_b = do_break + + complete_break = _complete_location + complete_b = _complete_location + + def do_tbreak(self, arg): + """tbreak [ ([filename:]lineno | function) [, condition] ] + Same arguments as break, but sets a temporary breakpoint: it + is automatically deleted when first hit. + """ + self.do_break(arg, 1) + + complete_tbreak = _complete_location + + def lineinfo(self, identifier): + failed = (None, None, None) + # Input is identifier, may be in single quotes + idstring = identifier.split("'") + if len(idstring) == 1: + # not in single quotes + id = idstring[0].strip() + elif len(idstring) == 3: + # quoted + id = idstring[1].strip() + else: + return failed + if id == '': return failed + parts = id.split('.') + # Protection for derived debuggers + if parts[0] == 'self': + del parts[0] + if len(parts) == 0: + return failed + # Best first guess at file to look at + fname = self.defaultFile() + if len(parts) == 1: + item = parts[0] + else: + # More than one part. + # First is module, second is method/class + f = self.lookupmodule(parts[0]) + if f: + fname = f + item = parts[1] + answer = find_function(item, fname) + return answer or failed + + def checkline(self, filename, lineno): + """Check whether specified line seems to be executable. + + Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank + line or EOF). Warning: testing is not comprehensive. + """ + # this method should be callable before starting debugging, so default + # to "no globals" if there is no current frame + globs = self.curframe.f_globals if hasattr(self, 'curframe') else None + line = linecache.getline(filename, lineno, globs) + if not line: + self.message('End of file') + return 0 + line = line.strip() + # Don't allow setting breakpoint at a blank line + if (not line or (line[0] == '#') or + (line[:3] == '"""') or line[:3] == "'''"): + self.error('Blank or comment') + return 0 + return lineno + + def do_enable(self, arg): + """enable bpnumber [bpnumber ...] + Enables the breakpoints given as a space separated list of + breakpoint numbers. + """ + args = arg.split() + for i in args: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + bp.enable() + self.message('Enabled %s' % bp) + + complete_enable = _complete_bpnumber + + def do_disable(self, arg): + """disable bpnumber [bpnumber ...] + Disables the breakpoints given as a space separated list of + breakpoint numbers. Disabling a breakpoint means it cannot + cause the program to stop execution, but unlike clearing a + breakpoint, it remains in the list of breakpoints and can be + (re-)enabled. + """ + args = arg.split() + for i in args: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + bp.disable() + self.message('Disabled %s' % bp) + + complete_disable = _complete_bpnumber + + def do_condition(self, arg): + """condition bpnumber [condition] + Set a new condition for the breakpoint, an expression which + must evaluate to true before the breakpoint is honored. If + condition is absent, any existing condition is removed; i.e., + the breakpoint is made unconditional. + """ + args = arg.split(' ', 1) + try: + cond = args[1] + except IndexError: + cond = None + try: + bp = self.get_bpbynumber(args[0].strip()) + except IndexError: + self.error('Breakpoint number expected') + except ValueError as err: + self.error(err) + else: + bp.cond = cond + if not cond: + self.message('Breakpoint %d is now unconditional.' % bp.number) + else: + self.message('New condition set for breakpoint %d.' % bp.number) + + complete_condition = _complete_bpnumber + + def do_ignore(self, arg): + """ignore bpnumber [count] + Set the ignore count for the given breakpoint number. If + count is omitted, the ignore count is set to 0. A breakpoint + becomes active when the ignore count is zero. When non-zero, + the count is decremented each time the breakpoint is reached + and the breakpoint is not disabled and any associated + condition evaluates to true. + """ + args = arg.split() + try: + count = int(args[1].strip()) + except: + count = 0 + try: + bp = self.get_bpbynumber(args[0].strip()) + except IndexError: + self.error('Breakpoint number expected') + except ValueError as err: + self.error(err) + else: + bp.ignore = count + if count > 0: + if count > 1: + countstr = '%d crossings' % count + else: + countstr = '1 crossing' + self.message('Will ignore next %s of breakpoint %d.' % + (countstr, bp.number)) + else: + self.message('Will stop next time breakpoint %d is reached.' + % bp.number) + + complete_ignore = _complete_bpnumber + + def do_clear(self, arg): + """cl(ear) filename:lineno\ncl(ear) [bpnumber [bpnumber...]] + With a space separated list of breakpoint numbers, clear + those breakpoints. Without argument, clear all breaks (but + first ask confirmation). With a filename:lineno argument, + clear all breaks at that line in that file. + """ + if not arg: + try: + reply = input('Clear all breaks? ') + except EOFError: + reply = 'no' + reply = reply.strip().lower() + if reply in ('y', 'yes'): + bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp] + self.clear_all_breaks() + for bp in bplist: + self.message('Deleted %s' % bp) + return + if ':' in arg: + # Make sure it works for "clear C:\foo\bar.py:12" + i = arg.rfind(':') + filename = arg[:i] + arg = arg[i+1:] + try: + lineno = int(arg) + except ValueError: + err = "Invalid line number (%s)" % arg + else: + bplist = self.get_breaks(filename, lineno) + err = self.clear_break(filename, lineno) + if err: + self.error(err) + else: + for bp in bplist: + self.message('Deleted %s' % bp) + return + numberlist = arg.split() + for i in numberlist: + try: + bp = self.get_bpbynumber(i) + except ValueError as err: + self.error(err) + else: + self.clear_bpbynumber(i) + self.message('Deleted %s' % bp) + do_cl = do_clear # 'c' is already an abbreviation for 'continue' + + complete_clear = _complete_location + complete_cl = _complete_location + + def do_where(self, arg): + """w(here) + Print a stack trace, with the most recent frame at the bottom. + An arrow indicates the "current frame", which determines the + context of most commands. 'bt' is an alias for this command. + """ + self.print_stack_trace() + do_w = do_where + do_bt = do_where + + def _select_frame(self, number): + assert 0 <= number < len(self.stack) + self.curindex = number + self.curframe = self.stack[self.curindex][0] + self.curframe_locals = self.curframe.f_locals + self.print_stack_entry(self.stack[self.curindex]) + self.lineno = None + + def do_up(self, arg): + """u(p) [count] + Move the current frame count (default one) levels up in the + stack trace (to an older frame). + """ + if self.curindex == 0: + self.error('Oldest frame') + return + try: + count = int(arg or 1) + except ValueError: + self.error('Invalid frame count (%s)' % arg) + return + if count < 0: + newframe = 0 + else: + newframe = max(0, self.curindex - count) + self._select_frame(newframe) + do_u = do_up + + def do_down(self, arg): + """d(own) [count] + Move the current frame count (default one) levels down in the + stack trace (to a newer frame). + """ + if self.curindex + 1 == len(self.stack): + self.error('Newest frame') + return + try: + count = int(arg or 1) + except ValueError: + self.error('Invalid frame count (%s)' % arg) + return + if count < 0: + newframe = len(self.stack) - 1 + else: + newframe = min(len(self.stack) - 1, self.curindex + count) + self._select_frame(newframe) + do_d = do_down + + def do_until(self, arg): + """unt(il) [lineno] + Without argument, continue execution until the line with a + number greater than the current one is reached. With a line + number, continue execution until a line with a number greater + or equal to that is reached. In both cases, also stop when + the current frame returns. + """ + if arg: + try: + lineno = int(arg) + except ValueError: + self.error('Error in argument: %r' % arg) + return + if lineno <= self.curframe.f_lineno: + self.error('"until" line number is smaller than current ' + 'line number') + return + else: + lineno = None + self.set_until(self.curframe, lineno) + return 1 + do_unt = do_until + + def do_step(self, arg): + """s(tep) + Execute the current line, stop at the first possible occasion + (either in a function that is called or in the current + function). + """ + self.set_step() + return 1 + do_s = do_step + + def do_next(self, arg): + """n(ext) + Continue execution until the next line in the current function + is reached or it returns. + """ + self.set_next(self.curframe) + return 1 + do_n = do_next + + def do_run(self, arg): + """run [args...] + Restart the debugged python program. If a string is supplied + it is split with "shlex", and the result is used as the new + sys.argv. History, breakpoints, actions and debugger options + are preserved. "restart" is an alias for "run". + """ + if arg: + import shlex + argv0 = sys.argv[0:1] + sys.argv = shlex.split(arg) + sys.argv[:0] = argv0 + # this is caught in the main debugger loop + raise Restart + + do_restart = do_run + + def do_return(self, arg): + """r(eturn) + Continue execution until the current function returns. + """ + self.set_return(self.curframe) + return 1 + do_r = do_return + + def do_continue(self, arg): + """c(ont(inue)) + Continue execution, only stop when a breakpoint is encountered. + """ + if not self.nosigint: + try: + Pdb._previous_sigint_handler = \ + signal.signal(signal.SIGINT, self.sigint_handler) + except ValueError: + # ValueError happens when do_continue() is invoked from + # a non-main thread in which case we just continue without + # SIGINT set. Would printing a message here (once) make + # sense? + pass + self.set_continue() + return 1 + do_c = do_cont = do_continue + + def do_jump(self, arg): + """j(ump) lineno + Set the next line that will be executed. Only available in + the bottom-most frame. This lets you jump back and execute + code again, or jump forward to skip code that you don't want + to run. + + It should be noted that not all jumps are allowed -- for + instance it is not possible to jump into the middle of a + for loop or out of a finally clause. + """ + if self.curindex + 1 != len(self.stack): + self.error('You can only jump within the bottom frame') + return + try: + arg = int(arg) + except ValueError: + self.error("The 'jump' command requires a line number") + else: + try: + # Do the jump, fix up our copy of the stack, and display the + # new position + self.curframe.f_lineno = arg + self.stack[self.curindex] = self.stack[self.curindex][0], arg + self.print_stack_entry(self.stack[self.curindex]) + except ValueError as e: + self.error('Jump failed: %s' % e) + do_j = do_jump + + def do_debug(self, arg): + """debug code + Enter a recursive debugger that steps through the code + argument (which is an arbitrary expression or statement to be + executed in the current environment). + """ + sys.settrace(None) + globals = self.curframe.f_globals + locals = self.curframe_locals + p = Pdb(self.completekey, self.stdin, self.stdout) + p.prompt = "(%s) " % self.prompt.strip() + self.message("ENTERING RECURSIVE DEBUGGER") + try: + sys.call_tracing(p.run, (arg, globals, locals)) + except Exception: + exc_info = sys.exc_info()[:2] + self.error(traceback.format_exception_only(*exc_info)[-1].strip()) + self.message("LEAVING RECURSIVE DEBUGGER") + sys.settrace(self.trace_dispatch) + self.lastcmd = p.lastcmd + + complete_debug = _complete_expression + + def do_quit(self, arg): + """q(uit)\nexit + Quit from the debugger. The program being executed is aborted. + """ + self._user_requested_quit = True + self.set_quit() + return 1 + + do_q = do_quit + do_exit = do_quit + + def do_EOF(self, arg): + """EOF + Handles the receipt of EOF as a command. + """ + self.message('') + self._user_requested_quit = True + self.set_quit() + return 1 + + def do_args(self, arg): + """a(rgs) + Print the argument list of the current function. + """ + co = self.curframe.f_code + dict = self.curframe_locals + n = co.co_argcount + co.co_kwonlyargcount + if co.co_flags & inspect.CO_VARARGS: n = n+1 + if co.co_flags & inspect.CO_VARKEYWORDS: n = n+1 + for i in range(n): + name = co.co_varnames[i] + if name in dict: + self.message('%s = %r' % (name, dict[name])) + else: + self.message('%s = *** undefined ***' % (name,)) + do_a = do_args + + def do_retval(self, arg): + """retval + Print the return value for the last return of a function. + """ + if '__return__' in self.curframe_locals: + self.message(repr(self.curframe_locals['__return__'])) + else: + self.error('Not yet returned!') + do_rv = do_retval + + def _getval(self, arg): + try: + return eval(arg, self.curframe.f_globals, self.curframe_locals) + except: + exc_info = sys.exc_info()[:2] + self.error(traceback.format_exception_only(*exc_info)[-1].strip()) + raise + + def _getval_except(self, arg, frame=None): + try: + if frame is None: + return eval(arg, self.curframe.f_globals, self.curframe_locals) + else: + return eval(arg, frame.f_globals, frame.f_locals) + except: + exc_info = sys.exc_info()[:2] + err = traceback.format_exception_only(*exc_info)[-1].strip() + return _rstr('** raised %s **' % err) + + def do_p(self, arg): + """p expression + Print the value of the expression. + """ + try: + self.message(repr(self._getval(arg))) + except: + pass + + def do_pp(self, arg): + """pp expression + Pretty-print the value of the expression. + """ + try: + self.message(pprint.pformat(self._getval(arg))) + except: + pass + + complete_print = _complete_expression + complete_p = _complete_expression + complete_pp = _complete_expression + + def do_list(self, arg): + """l(ist) [first [,last] | .] + + List source code for the current file. Without arguments, + list 11 lines around the current line or continue the previous + listing. With . as argument, list 11 lines around the current + line. With one argument, list 11 lines starting at that line. + With two arguments, list the given range; if the second + argument is less than the first, it is a count. + + The current line in the current frame is indicated by "->". + If an exception is being debugged, the line where the + exception was originally raised or propagated is indicated by + ">>", if it differs from the current line. + """ + self.lastcmd = 'list' + last = None + if arg and arg != '.': + try: + if ',' in arg: + first, last = arg.split(',') + first = int(first.strip()) + last = int(last.strip()) + if last < first: + # assume it's a count + last = first + last + else: + first = int(arg.strip()) + first = max(1, first - 5) + except ValueError: + self.error('Error in argument: %r' % arg) + return + elif self.lineno is None or arg == '.': + first = max(1, self.curframe.f_lineno - 5) + else: + first = self.lineno + 1 + if last is None: + last = first + 10 + filename = self.curframe.f_code.co_filename + breaklist = self.get_file_breaks(filename) + try: + lines = linecache.getlines(filename, self.curframe.f_globals) + self._print_lines(lines[first-1:last], first, breaklist, + self.curframe) + self.lineno = min(last, len(lines)) + if len(lines) < last: + self.message('[EOF]') + except KeyboardInterrupt: + pass + do_l = do_list + + def do_longlist(self, arg): + """longlist | ll + List the whole source code for the current function or frame. + """ + filename = self.curframe.f_code.co_filename + breaklist = self.get_file_breaks(filename) + try: + lines, lineno = getsourcelines(self.curframe) + except OSError as err: + self.error(err) + return + self._print_lines(lines, lineno, breaklist, self.curframe) + do_ll = do_longlist + + def do_source(self, arg): + """source expression + Try to get source code for the given object and display it. + """ + try: + obj = self._getval(arg) + except: + return + try: + lines, lineno = getsourcelines(obj) + except (OSError, TypeError) as err: + self.error(err) + return + self._print_lines(lines, lineno) + + complete_source = _complete_expression + + def _print_lines(self, lines, start, breaks=(), frame=None): + """Print a range of lines.""" + if frame: + current_lineno = frame.f_lineno + exc_lineno = self.tb_lineno.get(frame, -1) + else: + current_lineno = exc_lineno = -1 + for lineno, line in enumerate(lines, start): + s = str(lineno).rjust(3) + if len(s) < 4: + s += ' ' + if lineno in breaks: + s += 'B' + else: + s += ' ' + if lineno == current_lineno: + s += '->' + elif lineno == exc_lineno: + s += '>>' + self.message(s + '\t' + line.rstrip()) + + def do_whatis(self, arg): + """whatis arg + Print the type of the argument. + """ + try: + value = self._getval(arg) + except: + # _getval() already printed the error + return + code = None + # Is it a function? + try: + code = value.__code__ + except Exception: + pass + if code: + self.message('Function %s' % code.co_name) + return + # Is it an instance method? + try: + code = value.__func__.__code__ + except Exception: + pass + if code: + self.message('Method %s' % code.co_name) + return + # Is it a class? + if value.__class__ is type: + self.message('Class %s.%s' % (value.__module__, value.__qualname__)) + return + # None of the above... + self.message(type(value)) + + complete_whatis = _complete_expression + + def do_display(self, arg): + """display [expression] + + Display the value of the expression if it changed, each time execution + stops in the current frame. + + Without expression, list all display expressions for the current frame. + """ + if not arg: + self.message('Currently displaying:') + for item in self.displaying.get(self.curframe, {}).items(): + self.message('%s: %r' % item) + else: + val = self._getval_except(arg) + self.displaying.setdefault(self.curframe, {})[arg] = val + self.message('display %s: %r' % (arg, val)) + + complete_display = _complete_expression + + def do_undisplay(self, arg): + """undisplay [expression] + + Do not display the expression any more in the current frame. + + Without expression, clear all display expressions for the current frame. + """ + if arg: + try: + del self.displaying.get(self.curframe, {})[arg] + except KeyError: + self.error('not displaying %s' % arg) + else: + self.displaying.pop(self.curframe, None) + + def complete_undisplay(self, text, line, begidx, endidx): + return [e for e in self.displaying.get(self.curframe, {}) + if e.startswith(text)] + + def do_interact(self, arg): + """interact + + Start an interactive interpreter whose global namespace + contains all the (global and local) names found in the current scope. + """ + ns = {**self.curframe.f_globals, **self.curframe_locals} + code.interact("*interactive*", local=ns) + + def do_alias(self, arg): + """alias [name [command [parameter parameter ...] ]] + Create an alias called 'name' that executes 'command'. The + command must *not* be enclosed in quotes. Replaceable + parameters can be indicated by %1, %2, and so on, while %* is + replaced by all the parameters. If no command is given, the + current alias for name is shown. If no name is given, all + aliases are listed. + + Aliases may be nested and can contain anything that can be + legally typed at the pdb prompt. Note! You *can* override + internal pdb commands with aliases! Those internal commands + are then hidden until the alias is removed. Aliasing is + recursively applied to the first word of the command line; all + other words in the line are left alone. + + As an example, here are two useful aliases (especially when + placed in the .pdbrc file): + + # Print instance variables (usage "pi classInst") + alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) + # Print instance variables in self + alias ps pi self + """ + args = arg.split() + if len(args) == 0: + keys = sorted(self.aliases.keys()) + for alias in keys: + self.message("%s = %s" % (alias, self.aliases[alias])) + return + if args[0] in self.aliases and len(args) == 1: + self.message("%s = %s" % (args[0], self.aliases[args[0]])) + else: + self.aliases[args[0]] = ' '.join(args[1:]) + + def do_unalias(self, arg): + """unalias name + Delete the specified alias. + """ + args = arg.split() + if len(args) == 0: return + if args[0] in self.aliases: + del self.aliases[args[0]] + + def complete_unalias(self, text, line, begidx, endidx): + return [a for a in self.aliases if a.startswith(text)] + + # List of all the commands making the program resume execution. + commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return', + 'do_quit', 'do_jump'] + + # Print a traceback starting at the top stack frame. + # The most recently entered frame is printed last; + # this is different from dbx and gdb, but consistent with + # the Python interpreter's stack trace. + # It is also consistent with the up/down commands (which are + # compatible with dbx and gdb: up moves towards 'main()' + # and down moves towards the most recent stack frame). + + def print_stack_trace(self): + try: + for frame_lineno in self.stack: + self.print_stack_entry(frame_lineno) + except KeyboardInterrupt: + pass + + def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix): + frame, lineno = frame_lineno + if frame is self.curframe: + prefix = '> ' + else: + prefix = ' ' + self.message(prefix + + self.format_stack_entry(frame_lineno, prompt_prefix)) + + # Provide help + + def do_help(self, arg): + """h(elp) + Without argument, print the list of available commands. + With a command name as argument, print help about that command. + "help pdb" shows the full pdb documentation. + "help exec" gives help on the ! command. + """ + if not arg: + return cmd.Cmd.do_help(self, arg) + try: + try: + topic = getattr(self, 'help_' + arg) + return topic() + except AttributeError: + command = getattr(self, 'do_' + arg) + except AttributeError: + self.error('No help for %r' % arg) + else: + if sys.flags.optimize >= 2: + self.error('No help for %r; please do not run Python with -OO ' + 'if you need command help' % arg) + return + self.message(command.__doc__.rstrip()) + + do_h = do_help + + def help_exec(self): + """(!) statement + Execute the (one-line) statement in the context of the current + stack frame. The exclamation point can be omitted unless the + first word of the statement resembles a debugger command. To + assign to a global variable you must always prefix the command + with a 'global' command, e.g.: + (Pdb) global list_options; list_options = ['-l'] + (Pdb) + """ + self.message((self.help_exec.__doc__ or '').strip()) + + def help_pdb(self): + help() + + # other helper functions + + def lookupmodule(self, filename): + """Helper function for break/clear parsing -- may be overridden. + + lookupmodule() translates (possibly incomplete) file or module name + into an absolute file name. + """ + if os.path.isabs(filename) and os.path.exists(filename): + return filename + f = os.path.join(sys.path[0], filename) + if os.path.exists(f) and self.canonic(f) == self.mainpyfile: + return f + root, ext = os.path.splitext(filename) + if ext == '': + filename = filename + '.py' + if os.path.isabs(filename): + return filename + for dirname in sys.path: + while os.path.islink(dirname): + dirname = os.readlink(dirname) + fullname = os.path.join(dirname, filename) + if os.path.exists(fullname): + return fullname + return None + + def _runmodule(self, module_name): + self._wait_for_mainpyfile = True + self._user_requested_quit = False + import runpy + mod_name, mod_spec, code = runpy._get_module_details(module_name) + self.mainpyfile = self.canonic(code.co_filename) + import __main__ + __main__.__dict__.clear() + __main__.__dict__.update({ + "__name__": "__main__", + "__file__": self.mainpyfile, + "__package__": mod_spec.parent, + "__loader__": mod_spec.loader, + "__spec__": mod_spec, + "__builtins__": __builtins__, + }) + self.run(code) + + def _runscript(self, filename): + # The script has to run in __main__ namespace (or imports from + # __main__ will break). + # + # So we clear up the __main__ and set several special variables + # (this gets rid of pdb's globals and cleans old variables on restarts). + import __main__ + __main__.__dict__.clear() + __main__.__dict__.update({"__name__" : "__main__", + "__file__" : filename, + "__builtins__": __builtins__, + }) + + # When bdb sets tracing, a number of call and line events happens + # BEFORE debugger even reaches user's code (and the exact sequence of + # events depends on python version). So we take special measures to + # avoid stopping before we reach the main script (see user_line and + # user_call for details). + self._wait_for_mainpyfile = True + self.mainpyfile = self.canonic(filename) + self._user_requested_quit = False + with io.open_code(filename) as fp: + statement = "exec(compile(%r, %r, 'exec'))" % \ + (fp.read(), self.mainpyfile) + self.run(statement) + +# Collect all command help into docstring, if not run with -OO + +if __doc__ is not None: + # unfortunately we can't guess this order from the class definition + _help_order = [ + 'help', 'where', 'down', 'up', 'break', 'tbreak', 'clear', 'disable', + 'enable', 'ignore', 'condition', 'commands', 'step', 'next', 'until', + 'jump', 'return', 'retval', 'run', 'continue', 'list', 'longlist', + 'args', 'p', 'pp', 'whatis', 'source', 'display', 'undisplay', + 'interact', 'alias', 'unalias', 'debug', 'quit', + ] + + for _command in _help_order: + __doc__ += getattr(Pdb, 'do_' + _command).__doc__.strip() + '\n\n' + __doc__ += Pdb.help_exec.__doc__ + + del _help_order, _command + + +# Simplified interface + +def run(statement, globals=None, locals=None): + Pdb().run(statement, globals, locals) + +def runeval(expression, globals=None, locals=None): + return Pdb().runeval(expression, globals, locals) + +def runctx(statement, globals, locals): + # B/W compatibility + run(statement, globals, locals) + +def runcall(*args, **kwds): + return Pdb().runcall(*args, **kwds) + +def set_trace(*, header=None): + pdb = Pdb() + if header is not None: + pdb.message(header) + pdb.set_trace(sys._getframe().f_back) + +# Post-Mortem interface + +def post_mortem(t=None): + # handling the default + if t is None: + # sys.exc_info() returns (type, value, traceback) if an exception is + # being handled, otherwise it returns None + t = sys.exc_info()[2] + if t is None: + raise ValueError("A valid traceback must be passed if no " + "exception is being handled") + + p = Pdb() + p.reset() + p.interaction(None, t) + +def pm(): + post_mortem(sys.last_traceback) + + +# Main program for testing + +TESTCMD = 'import x; x.main()' + +def test(): + run(TESTCMD) + +# print help +def help(): + import pydoc + pydoc.pager(__doc__) + +_usage = """\ +usage: pdb.py [-c command] ... [-m module | pyfile] [arg] ... + +Debug the Python program given by pyfile. Alternatively, +an executable module or package to debug can be specified using +the -m switch. + +Initial commands are read from .pdbrc files in your home directory +and in the current directory, if they exist. Commands supplied with +-c are executed after commands from .pdbrc files. + +To let the script run until an exception occurs, use "-c continue". +To let the script run up to a given line X in the debugged file, use +"-c 'until X'".""" + +def main(): + import getopt + + opts, args = getopt.getopt(sys.argv[1:], 'mhc:', ['help', 'command=']) + + if not args: + print(_usage) + sys.exit(2) + + commands = [] + run_as_module = False + for opt, optarg in opts: + if opt in ['-h', '--help']: + print(_usage) + sys.exit() + elif opt in ['-c', '--command']: + commands.append(optarg) + elif opt in ['-m']: + run_as_module = True + + mainpyfile = args[0] # Get script filename + if not run_as_module and not os.path.exists(mainpyfile): + print('Error:', mainpyfile, 'does not exist') + sys.exit(1) + + sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list + + # Replace pdb's dir with script's dir in front of module search path. + if not run_as_module: + sys.path[0] = os.path.dirname(mainpyfile) + + # Note on saving/restoring sys.argv: it's a good idea when sys.argv was + # modified by the script being debugged. It's a bad idea when it was + # changed by the user from the command line. There is a "restart" command + # which allows explicit specification of command line arguments. + pdb = Pdb() + pdb.rcLines.extend(commands) + while True: + try: + if run_as_module: + pdb._runmodule(mainpyfile) + else: + pdb._runscript(mainpyfile) + if pdb._user_requested_quit: + break + print("The program finished and will be restarted") + except Restart: + print("Restarting", mainpyfile, "with arguments:") + print("\t" + " ".join(args)) + except SystemExit: + # In most cases SystemExit does not warrant a post-mortem session. + print("The program exited via sys.exit(). Exit status:", end=' ') + print(sys.exc_info()[1]) + except SyntaxError: + traceback.print_exc() + sys.exit(1) + except: + traceback.print_exc() + print("Uncaught exception. Entering post mortem debugging") + print("Running 'cont' or 'step' will restart the program") + t = sys.exc_info()[2] + pdb.interaction(None, t) + print("Post mortem debugger finished. The " + mainpyfile + + " will be restarted") + + +# When invoked as main program, invoke the debugger on a script +if __name__ == '__main__': + import pdb + pdb.main() diff --git a/Lib/test/test_json/__init__.py b/Lib/test/test_json/__init__.py index 1a1684313b1..08a79415faf 100644 --- a/Lib/test/test_json/__init__.py +++ b/Lib/test/test_json/__init__.py @@ -1,6 +1,6 @@ import os import json -# import doctest +import doctest import unittest from test import support @@ -50,8 +50,8 @@ def test_cjson(self): def load_tests(loader, _, pattern): suite = unittest.TestSuite() - # for mod in (json, json.encoder, json.decoder): - # suite.addTest(doctest.DocTestSuite(mod)) + for mod in (json, json.encoder, json.decoder): + suite.addTest(doctest.DocTestSuite(mod)) suite.addTest(TestPyTest('test_pyjson')) suite.addTest(TestCTest('test_cjson')) diff --git a/README.md b/README.md index 8eb2f9e31f5..5de067e9d97 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,6 @@ To test RustPython, do the following: $ git clone https://github.com/RustPython/RustPython $ cd RustPython - $ export RUSTPYTHONPATH=Lib $ cargo run demo.py Hello, RustPython! @@ -152,20 +151,20 @@ methods are often the simplest and easiest way to contribute. You can also simply run `./whats_left.sh` to assist in finding any unimplemented method. -## Using a standard library +## Using a different standard library -As of now the standard library is under construction. You can use a standard +As of now the standard library is under construction. You can change a standard library by setting the RUSTPYTHONPATH environment variable. To do this, follow this method: ```shell -$ export RUSTPYTHONPATH=~/GIT/RustPython/Lib +$ export RUSTPYTHONPATH=./Lib # this is same as the default value $ cargo run -- -c 'import xdrlib' ``` You can play around with other standard libraries for python. For example, the -[ouroboros library](https://github.com/pybee/ouroboros). +[ouroboros library](https://github.com/pybee/ouroboros) or CPython Lib. ## Compiling to WebAssembly diff --git a/bytecode/src/bytecode.rs b/bytecode/src/bytecode.rs index c09d717a0b0..907d488a867 100644 --- a/bytecode/src/bytecode.rs +++ b/bytecode/src/bytecode.rs @@ -73,6 +73,10 @@ impl CodeFlags { pub const NAME_MAPPING: &'static [(&'static str, CodeFlags)] = &[ ("GENERATOR", CodeFlags::IS_GENERATOR), ("COROUTINE", CodeFlags::IS_COROUTINE), + ( + "ASYNC_GENERATOR", + Self::from_bits_truncate(Self::IS_GENERATOR.bits | Self::IS_COROUTINE.bits), + ), ("VARARGS", CodeFlags::HAS_VARARGS), ("VARKEYWORDS", CodeFlags::HAS_VARKEYWORDS), ]; diff --git a/parser/src/lexer.rs b/parser/src/lexer.rs index 8667165ea40..eda96ea7416 100644 --- a/parser/src/lexer.rs +++ b/parser/src/lexer.rs @@ -530,7 +530,7 @@ where loop { match self.next_char() { Some('\\') => { - if self.chr0 == Some(quote_char) { + if self.chr0 == Some(quote_char) && !is_raw { string_content.push(quote_char); self.next_char(); } else if is_raw { @@ -1625,7 +1625,7 @@ mod tests { is_fstring: false, }, Tok::String { - value: String::from("raw\'"), + value: String::from("raw\\'"), is_fstring: false, }, Tok::String { diff --git a/src/shell.rs b/src/shell.rs index e3922ee8828..4a6c095e613 100644 --- a/src/shell.rs +++ b/src/shell.rs @@ -6,7 +6,7 @@ use rustpython_vm::readline::{Readline, ReadlineResult}; use rustpython_vm::{ exceptions::{print_exception, PyBaseExceptionRef}, obj::objtype, - pyobject::{ItemProtocol, PyResult}, + pyobject::PyResult, scope::Scope, VirtualMachine, }; @@ -19,19 +19,10 @@ enum ShellExecResult { fn shell_exec(vm: &VirtualMachine, source: &str, scope: Scope) -> ShellExecResult { match vm.compile(source, compile::Mode::Single, "".to_owned()) { - Ok(code) => { - match vm.run_code_obj(code, scope.clone()) { - Ok(value) => { - // Save non-None values as "_" - if !vm.is_none(&value) { - let key = "_"; - scope.globals.set_item(key, value, vm).unwrap(); - } - ShellExecResult::Ok - } - Err(err) => ShellExecResult::PyErr(err), - } - } + Ok(code) => match vm.run_code_obj(code, scope) { + Ok(_val) => ShellExecResult::Ok, + Err(err) => ShellExecResult::PyErr(err), + }, Err(CompileError { error: CompileErrorType::Parse(ParseErrorType::EOF), .. diff --git a/vm/src/builtins.rs b/vm/src/builtins.rs index d4899e0a5e0..1f5f08bd56a 100644 --- a/vm/src/builtins.rs +++ b/vm/src/builtins.rs @@ -618,7 +618,7 @@ pub fn builtin_exit(exit_code_arg: OptionalArg, vm: &VirtualMachine Err(vm.new_exception(vm.ctx.exceptions.system_exit.clone(), vec![code])) } -#[derive(Debug, FromArgs)] +#[derive(Debug, Default, FromArgs)] pub struct PrintOptions { #[pyarg(keyword_only, default = "None")] sep: Option, diff --git a/vm/src/exceptions.rs b/vm/src/exceptions.rs index 3b0eeee7695..05d1a370fa6 100644 --- a/vm/src/exceptions.rs +++ b/vm/src/exceptions.rs @@ -6,27 +6,32 @@ use crate::obj::objtuple::{PyTuple, PyTupleRef}; use crate::obj::objtype::{self, PyClass, PyClassRef}; use crate::py_serde; use crate::pyobject::{ - PyClassImpl, PyContext, PyIterable, PyObjectRef, PyRef, PyResult, PyValue, TryFromObject, - TypeProtocol, + PyClassImpl, PyContext, PyIterable, PyObjectRef, PyRef, PyResult, PyValue, ThreadSafe, + TryFromObject, TypeProtocol, }; use crate::slots::PyTpFlags; use crate::types::create_type; use crate::VirtualMachine; + use itertools::Itertools; -use std::cell::{Cell, RefCell}; use std::fmt; use std::fs::File; use std::io::{self, BufRead, BufReader, Write}; +use std::sync::RwLock; + +use crossbeam_utils::atomic::AtomicCell; #[pyclass] pub struct PyBaseException { - traceback: RefCell>, - cause: RefCell>, - context: RefCell>, - suppress_context: Cell, - args: RefCell, + traceback: RwLock>, + cause: RwLock>, + context: RwLock>, + suppress_context: AtomicCell, + args: RwLock, } +impl ThreadSafe for PyBaseException {} + impl fmt::Debug for PyBaseException { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // TODO: implement more detailed, non-recursive Debug formatter @@ -48,11 +53,11 @@ impl PyValue for PyBaseException { impl PyBaseException { pub(crate) fn new(args: Vec, vm: &VirtualMachine) -> PyBaseException { PyBaseException { - traceback: RefCell::new(None), - cause: RefCell::new(None), - context: RefCell::new(None), - suppress_context: Cell::new(false), - args: RefCell::new(PyTuple::from(args).into_ref(vm)), + traceback: RwLock::new(None), + cause: RwLock::new(None), + context: RwLock::new(None), + suppress_context: AtomicCell::new(false), + args: RwLock::new(PyTuple::from(args).into_ref(vm)), } } @@ -63,65 +68,65 @@ impl PyBaseException { #[pymethod(name = "__init__")] fn init(&self, args: PyFuncArgs, vm: &VirtualMachine) -> PyResult<()> { - self.args.replace(PyTuple::from(args.args).into_ref(vm)); + *self.args.write().unwrap() = PyTuple::from(args.args).into_ref(vm); Ok(()) } #[pyproperty] pub fn args(&self) -> PyTupleRef { - self.args.borrow().clone() + self.args.read().unwrap().clone() } #[pyproperty(setter)] fn set_args(&self, args: PyIterable, vm: &VirtualMachine) -> PyResult<()> { let args = args.iter(vm)?.collect::>>()?; - self.args.replace(PyTuple::from(args).into_ref(vm)); + *self.args.write().unwrap() = PyTuple::from(args).into_ref(vm); Ok(()) } #[pyproperty(name = "__traceback__")] pub fn traceback(&self) -> Option { - self.traceback.borrow().clone() + self.traceback.read().unwrap().clone() } #[pyproperty(name = "__traceback__", setter)] pub fn set_traceback(&self, traceback: Option) { - self.traceback.replace(traceback); + *self.traceback.write().unwrap() = traceback; } #[pyproperty(name = "__cause__")] pub fn cause(&self) -> Option { - self.cause.borrow().clone() + self.cause.read().unwrap().clone() } #[pyproperty(name = "__cause__", setter)] pub fn set_cause(&self, cause: Option) { - self.cause.replace(cause); + *self.cause.write().unwrap() = cause; } #[pyproperty(name = "__context__")] pub fn context(&self) -> Option { - self.context.borrow().clone() + self.context.read().unwrap().clone() } #[pyproperty(name = "__context__", setter)] pub fn set_context(&self, context: Option) { - self.context.replace(context); + *self.context.write().unwrap() = context; } #[pyproperty(name = "__suppress_context__")] fn get_suppress_context(&self) -> bool { - self.suppress_context.get() + self.suppress_context.load() } #[pyproperty(name = "__suppress_context__", setter)] fn set_suppress_context(&self, suppress_context: bool) { - self.suppress_context.set(suppress_context); + self.suppress_context.store(suppress_context); } #[pymethod] fn with_traceback(zelf: PyRef, tb: Option) -> PyResult { - zelf.traceback.replace(tb); + *zelf.traceback.write().unwrap() = tb; Ok(zelf.as_object().clone()) } @@ -213,7 +218,7 @@ pub fn write_exception_inner( vm: &VirtualMachine, exc: &PyBaseExceptionRef, ) -> io::Result<()> { - if let Some(tb) = exc.traceback.borrow().clone() { + if let Some(tb) = exc.traceback.read().unwrap().clone() { writeln!(output, "Traceback (most recent call last):")?; for tb in tb.iter() { write_traceback_entry(output, &tb)?; @@ -605,7 +610,8 @@ fn none_getter(_obj: PyObjectRef, vm: &VirtualMachine) -> PyObjectRef { fn make_arg_getter(idx: usize) -> impl Fn(PyBaseExceptionRef, &VirtualMachine) -> PyObjectRef { move |exc, vm| { exc.args - .borrow() + .read() + .unwrap() .as_slice() .get(idx) .cloned() @@ -716,7 +722,7 @@ impl serde::Serialize for SerializeException<'_> { "context", &self.exc.context().as_ref().map(|e| Self::new(self.vm, e)), )?; - struc.serialize_field("suppress_context", &self.exc.suppress_context.get())?; + struc.serialize_field("suppress_context", &self.exc.suppress_context.load())?; let args = { struct Args<'vm>(&'vm VirtualMachine, PyTupleRef); diff --git a/vm/src/frame.rs b/vm/src/frame.rs index ae171410ca2..e522a8b4f3d 100644 --- a/vm/src/frame.rs +++ b/vm/src/frame.rs @@ -647,13 +647,10 @@ impl ExecutingFrame<'_> { bytecode::Instruction::Continue => self.unwind_blocks(vm, UnwindReason::Continue), bytecode::Instruction::PrintExpr => { let expr = self.pop_value(); - if !expr.is(&vm.get_none()) { - let repr = vm.to_repr(&expr)?; - // TODO: implement sys.displayhook - if let Ok(ref print) = vm.get_attribute(vm.builtins.clone(), "print") { - vm.invoke(print, vec![repr.into_object()])?; - } - } + + let displayhook = vm.get_attribute(vm.sys_module.clone(), "displayhook")?; + vm.invoke(&displayhook, vec![expr])?; + Ok(None) } bytecode::Instruction::LoadBuildClass => { diff --git a/vm/src/obj/objasyncgenerator.rs b/vm/src/obj/objasyncgenerator.rs index e209040d02a..c26e2a7c0ed 100644 --- a/vm/src/obj/objasyncgenerator.rs +++ b/vm/src/obj/objasyncgenerator.rs @@ -4,18 +4,19 @@ use super::objtype::{self, PyClassRef}; use crate::exceptions::PyBaseExceptionRef; use crate::frame::FrameRef; use crate::function::OptionalArg; -use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue}; +use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue, ThreadSafe}; use crate::vm::VirtualMachine; -use std::cell::Cell; +use crossbeam_utils::atomic::AtomicCell; #[pyclass(name = "async_generator")] #[derive(Debug)] pub struct PyAsyncGen { inner: Coro, - running_async: Cell, + running_async: AtomicCell, } pub type PyAsyncGenRef = PyRef; +impl ThreadSafe for PyAsyncGen {} impl PyValue for PyAsyncGen { fn class(vm: &VirtualMachine) -> PyClassRef { @@ -32,7 +33,7 @@ impl PyAsyncGen { pub fn new(frame: FrameRef, vm: &VirtualMachine) -> PyAsyncGenRef { PyAsyncGen { inner: Coro::new(frame, Variant::AsyncGen), - running_async: Cell::new(false), + running_async: AtomicCell::new(false), } .into_ref(vm) } @@ -57,7 +58,7 @@ impl PyAsyncGen { fn asend(zelf: PyRef, value: PyObjectRef, _vm: &VirtualMachine) -> PyAsyncGenASend { PyAsyncGenASend { ag: zelf, - state: Cell::new(AwaitableState::Init), + state: AtomicCell::new(AwaitableState::Init), value, } } @@ -73,7 +74,7 @@ impl PyAsyncGen { PyAsyncGenAThrow { ag: zelf, aclose: false, - state: Cell::new(AwaitableState::Init), + state: AtomicCell::new(AwaitableState::Init), value: ( exc_type, exc_val.unwrap_or_else(|| vm.get_none()), @@ -87,7 +88,7 @@ impl PyAsyncGen { PyAsyncGenAThrow { ag: zelf, aclose: true, - state: Cell::new(AwaitableState::Init), + state: AtomicCell::new(AwaitableState::Init), value: ( vm.ctx.exceptions.generator_exit.clone().into_object(), vm.get_none(), @@ -129,15 +130,15 @@ impl PyAsyncGenWrappedValue { if objtype::isinstance(&e, &vm.ctx.exceptions.stop_async_iteration) || objtype::isinstance(&e, &vm.ctx.exceptions.generator_exit) { - ag.inner.closed.set(true); + ag.inner.closed.store(true); } - ag.running_async.set(false); + ag.running_async.store(false); } let val = val?; match_class!(match val { val @ Self => { - ag.running_async.set(false); + ag.running_async.store(false); Err(vm.new_exception( vm.ctx.exceptions.stop_iteration.clone(), vec![val.0.clone()], @@ -159,10 +160,12 @@ enum AwaitableState { #[derive(Debug)] struct PyAsyncGenASend { ag: PyAsyncGenRef, - state: Cell, + state: AtomicCell, value: PyObjectRef, } +impl ThreadSafe for PyAsyncGenASend {} + impl PyValue for PyAsyncGenASend { fn class(vm: &VirtualMachine) -> PyClassRef { vm.ctx.types.async_generator_asend.clone() @@ -187,7 +190,7 @@ impl PyAsyncGenASend { #[pymethod] fn send(&self, val: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let val = match self.state.get() { + let val = match self.state.load() { AwaitableState::Closed => { return Err(vm.new_runtime_error( "cannot reuse already awaited __anext__()/asend()".to_owned(), @@ -195,13 +198,13 @@ impl PyAsyncGenASend { } AwaitableState::Iter => val, // already running, all good AwaitableState::Init => { - if self.ag.running_async.get() { + if self.ag.running_async.load() { return Err(vm.new_runtime_error( "anext(): asynchronous generator is already running".to_owned(), )); } - self.ag.running_async.set(true); - self.state.set(AwaitableState::Iter); + self.ag.running_async.store(true); + self.state.store(AwaitableState::Iter); if vm.is_none(&val) { self.value.clone() } else { @@ -225,7 +228,7 @@ impl PyAsyncGenASend { exc_tb: OptionalArg, vm: &VirtualMachine, ) -> PyResult { - if let AwaitableState::Closed = self.state.get() { + if let AwaitableState::Closed = self.state.load() { return Err( vm.new_runtime_error("cannot reuse already awaited __anext__()/asend()".to_owned()) ); @@ -246,7 +249,7 @@ impl PyAsyncGenASend { #[pymethod] fn close(&self) { - self.state.set(AwaitableState::Closed); + self.state.store(AwaitableState::Closed); } } @@ -255,10 +258,12 @@ impl PyAsyncGenASend { struct PyAsyncGenAThrow { ag: PyAsyncGenRef, aclose: bool, - state: Cell, + state: AtomicCell, value: (PyObjectRef, PyObjectRef, PyObjectRef), } +impl ThreadSafe for PyAsyncGenAThrow {} + impl PyValue for PyAsyncGenAThrow { fn class(vm: &VirtualMachine) -> PyClassRef { vm.ctx.types.async_generator_athrow.clone() @@ -283,14 +288,14 @@ impl PyAsyncGenAThrow { #[pymethod] fn send(&self, val: PyObjectRef, vm: &VirtualMachine) -> PyResult { - match self.state.get() { + match self.state.load() { AwaitableState::Closed => { Err(vm .new_runtime_error("cannot reuse already awaited aclose()/athrow()".to_owned())) } AwaitableState::Init => { - if self.ag.running_async.get() { - self.state.set(AwaitableState::Closed); + if self.ag.running_async.load() { + self.state.store(AwaitableState::Closed); let msg = if self.aclose { "aclose(): asynchronous generator is already running" } else { @@ -299,7 +304,7 @@ impl PyAsyncGenAThrow { return Err(vm.new_runtime_error(msg.to_owned())); } if self.ag.inner.closed() { - self.state.set(AwaitableState::Closed); + self.state.store(AwaitableState::Closed); return Err(vm.new_exception_empty(vm.ctx.exceptions.stop_iteration.clone())); } if !vm.is_none(&val) { @@ -307,8 +312,8 @@ impl PyAsyncGenAThrow { "can't send non-None value to a just-started async generator".to_owned(), )); } - self.state.set(AwaitableState::Iter); - self.ag.running_async.set(true); + self.state.store(AwaitableState::Iter); + self.ag.running_async.store(true); let (ty, val, tb) = self.value.clone(); let ret = self.ag.inner.throw(ty, val, tb, vm); @@ -368,7 +373,7 @@ impl PyAsyncGenAThrow { #[pymethod] fn close(&self) { - self.state.set(AwaitableState::Closed); + self.state.store(AwaitableState::Closed); } fn ignored_close(&self, res: &PyResult) -> bool { @@ -376,13 +381,13 @@ impl PyAsyncGenAThrow { .map_or(false, |v| v.payload_is::()) } fn yield_close(&self, vm: &VirtualMachine) -> PyBaseExceptionRef { - self.ag.running_async.set(false); - self.state.set(AwaitableState::Closed); + self.ag.running_async.store(false); + self.state.store(AwaitableState::Closed); vm.new_runtime_error("async generator ignored GeneratorExit".to_owned()) } fn check_error(&self, exc: PyBaseExceptionRef, vm: &VirtualMachine) -> PyBaseExceptionRef { - self.ag.running_async.set(false); - self.state.set(AwaitableState::Closed); + self.ag.running_async.store(false); + self.state.store(AwaitableState::Closed); if self.aclose && (objtype::isinstance(&exc, &vm.ctx.exceptions.stop_async_iteration) || objtype::isinstance(&exc, &vm.ctx.exceptions.generator_exit)) diff --git a/vm/src/obj/objbool.rs b/vm/src/obj/objbool.rs index 56d21bb5c5e..e67a39dc2ba 100644 --- a/vm/src/obj/objbool.rs +++ b/vm/src/obj/objbool.rs @@ -187,7 +187,7 @@ pub fn get_py_int(obj: &PyObjectRef) -> &PyInt { &obj.payload::().unwrap() } -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Default, Copy, Clone, PartialEq)] pub struct IntoPyBool { value: bool, } diff --git a/vm/src/obj/objcoroinner.rs b/vm/src/obj/objcoroinner.rs index 6b504d1892a..4cbb0940b45 100644 --- a/vm/src/obj/objcoroinner.rs +++ b/vm/src/obj/objcoroinner.rs @@ -1,10 +1,11 @@ use super::objtype::{self, PyClassRef}; use crate::exceptions::{self, PyBaseExceptionRef}; use crate::frame::{ExecutionResult, FrameRef}; -use crate::pyobject::{PyObjectRef, PyResult}; +use crate::pyobject::{PyObjectRef, PyResult, ThreadSafe}; use crate::vm::VirtualMachine; -use std::cell::{Cell, RefCell}; +use crossbeam_utils::atomic::AtomicCell; +use std::sync::RwLock; #[derive(Debug, PartialEq, Clone, Copy)] pub enum Variant { @@ -34,28 +35,30 @@ impl Variant { #[derive(Debug)] pub struct Coro { frame: FrameRef, - pub closed: Cell, - running: Cell, - exceptions: RefCell>, - started: Cell, + pub closed: AtomicCell, + running: AtomicCell, + exceptions: RwLock>, + started: AtomicCell, variant: Variant, } +impl ThreadSafe for Coro {} + impl Coro { pub fn new(frame: FrameRef, variant: Variant) -> Self { Coro { frame, - closed: Cell::new(false), - running: Cell::new(false), - exceptions: RefCell::new(vec![]), - started: Cell::new(false), + closed: AtomicCell::new(false), + running: AtomicCell::new(false), + exceptions: RwLock::new(vec![]), + started: AtomicCell::new(false), variant, } } fn maybe_close(&self, res: &PyResult) { match res { - Ok(ExecutionResult::Return(_)) | Err(_) => self.closed.set(true), + Ok(ExecutionResult::Return(_)) | Err(_) => self.closed.store(true), Ok(ExecutionResult::Yield(_)) => {} } } @@ -64,27 +67,29 @@ impl Coro { where F: FnOnce(FrameRef) -> PyResult, { - self.running.set(true); + self.running.store(true); let curr_exception_stack_len = vm.exceptions.borrow().len(); vm.exceptions .borrow_mut() - .append(&mut self.exceptions.borrow_mut()); + .append(&mut self.exceptions.write().unwrap()); let result = vm.with_frame(self.frame.clone(), func); - self.exceptions.replace( - vm.exceptions + std::mem::swap( + &mut *self.exceptions.write().unwrap(), + &mut vm + .exceptions .borrow_mut() .split_off(curr_exception_stack_len), ); - self.running.set(false); - self.started.set(true); + self.running.store(false); + self.started.store(true); result } pub fn send(&self, value: PyObjectRef, vm: &VirtualMachine) -> PyResult { - if self.closed.get() { + if self.closed.load() { return Err(vm.new_exception_empty(self.variant.stop_iteration(vm))); } - if !self.started.get() && !vm.is_none(&value) { + if !self.started.load() && !vm.is_none(&value) { return Err(vm.new_type_error(format!( "can't send non-None value to a just-started {}", self.variant.name() @@ -120,7 +125,7 @@ impl Coro { exc_tb: PyObjectRef, vm: &VirtualMachine, ) -> PyResult { - if self.closed.get() { + if self.closed.load() { return Err(exceptions::normalize(exc_type, exc_val, exc_tb, vm)?); } let result = self.run_with_context(vm, |f| f.gen_throw(vm, exc_type, exc_val, exc_tb)); @@ -129,7 +134,7 @@ impl Coro { } pub fn close(&self, vm: &VirtualMachine) -> PyResult<()> { - if self.closed.get() { + if self.closed.load() { return Ok(()); } let result = self.run_with_context(vm, |f| { @@ -140,7 +145,7 @@ impl Coro { vm.get_none(), ) }); - self.closed.set(true); + self.closed.store(true); match result { Ok(ExecutionResult::Yield(_)) => { Err(vm.new_runtime_error(format!("{} ignored GeneratorExit", self.variant.name()))) @@ -151,13 +156,13 @@ impl Coro { } pub fn started(&self) -> bool { - self.started.get() + self.started.load() } pub fn running(&self) -> bool { - self.running.get() + self.running.load() } pub fn closed(&self) -> bool { - self.closed.get() + self.closed.load() } pub fn frame(&self) -> FrameRef { self.frame.clone() diff --git a/vm/src/obj/objcoroutine.rs b/vm/src/obj/objcoroutine.rs index d1faacc2be0..48ccfe4dc15 100644 --- a/vm/src/obj/objcoroutine.rs +++ b/vm/src/obj/objcoroutine.rs @@ -4,7 +4,7 @@ use super::objstr::PyStringRef; use super::objtype::PyClassRef; use crate::frame::FrameRef; use crate::function::OptionalArg; -use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue}; +use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue, ThreadSafe}; use crate::vm::VirtualMachine; pub type PyCoroutineRef = PyRef; @@ -15,6 +15,8 @@ pub struct PyCoroutine { inner: Coro, } +impl ThreadSafe for PyCoroutine {} + impl PyValue for PyCoroutine { fn class(vm: &VirtualMachine) -> PyClassRef { vm.ctx.types.coroutine_type.clone() @@ -101,6 +103,8 @@ pub struct PyCoroutineWrapper { coro: PyCoroutineRef, } +impl ThreadSafe for PyCoroutineWrapper {} + impl PyValue for PyCoroutineWrapper { fn class(vm: &VirtualMachine) -> PyClassRef { vm.ctx.types.coroutine_wrapper_type.clone() diff --git a/vm/src/obj/objfunction.rs b/vm/src/obj/objfunction.rs index 7386772ea0b..c6d24fe6f17 100644 --- a/vm/src/obj/objfunction.rs +++ b/vm/src/obj/objfunction.rs @@ -336,6 +336,11 @@ impl PyBoundMethod { fn func(&self) -> PyObjectRef { self.function.clone() } + + #[pyproperty(magic)] + fn module(&self, vm: &VirtualMachine) -> Option { + vm.get_attribute(self.function.clone(), "__module__").ok() + } } impl PyValue for PyBoundMethod { diff --git a/vm/src/obj/objgenerator.rs b/vm/src/obj/objgenerator.rs index e91e24406c5..b2044621f92 100644 --- a/vm/src/obj/objgenerator.rs +++ b/vm/src/obj/objgenerator.rs @@ -7,7 +7,7 @@ use super::objcoroinner::{Coro, Variant}; use super::objtype::PyClassRef; use crate::frame::FrameRef; use crate::function::OptionalArg; -use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue}; +use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue, ThreadSafe}; use crate::vm::VirtualMachine; pub type PyGeneratorRef = PyRef; @@ -18,6 +18,8 @@ pub struct PyGenerator { inner: Coro, } +impl ThreadSafe for PyGenerator {} + impl PyValue for PyGenerator { fn class(vm: &VirtualMachine) -> PyClassRef { vm.ctx.generator_type() diff --git a/vm/src/stdlib/os.rs b/vm/src/stdlib/os.rs index e70061bc262..bd2a4e65367 100644 --- a/vm/src/stdlib/os.rs +++ b/vm/src/stdlib/os.rs @@ -1194,6 +1194,11 @@ fn os_getpgid(pid: u32, vm: &VirtualMachine) -> PyResult { } } +#[cfg(unix)] +fn os_getpgrp(vm: &VirtualMachine) -> PyResult { + Ok(vm.new_int(unistd::getpgrp().as_raw())) +} + #[cfg(all(unix, not(target_os = "redox")))] fn os_getsid(pid: u32, vm: &VirtualMachine) -> PyResult { match unistd::getsid(Some(Pid::from_raw(pid as i32))) { @@ -1444,6 +1449,14 @@ fn os_utime( unimplemented!("utime") } +#[cfg(unix)] +fn os_sync(_vm: &VirtualMachine) -> PyResult<()> { + unsafe { + libc::sync(); + } + Ok(()) +} + pub fn make_module(vm: &VirtualMachine) -> PyObjectRef { let ctx = &vm.ctx; @@ -1620,12 +1633,14 @@ fn extend_module_platform_specific(vm: &VirtualMachine, module: &PyObjectRef) { "getegid" => ctx.new_function(os_getegid), "getpgid" => ctx.new_function(os_getpgid), "getuid" => ctx.new_function(os_getuid), + "getpgrp" => ctx.new_function(os_getpgrp), "geteuid" => ctx.new_function(os_geteuid), "pipe" => ctx.new_function(os_pipe), //TODO: windows "set_blocking" => ctx.new_function(os_set_blocking), "setgid" => ctx.new_function(os_setgid), "setpgid" => ctx.new_function(os_setpgid), "setuid" => ctx.new_function(os_setuid), + "sync" => ctx.new_function(os_sync), "system" => ctx.new_function(os_system), "ttyname" => ctx.new_function(os_ttyname), "uname" => ctx.new_function(os_uname), diff --git a/vm/src/sysmodule.rs b/vm/src/sysmodule.rs index 52ab87699f3..73d360d8065 100644 --- a/vm/src/sysmodule.rs +++ b/vm/src/sysmodule.rs @@ -1,8 +1,9 @@ use std::sync::Arc; use std::{env, mem}; +use crate::builtins; use crate::frame::FrameRef; -use crate::function::OptionalArg; +use crate::function::{Args, OptionalArg, PyFuncArgs}; use crate::obj::objstr::PyStringRef; use crate::pyhash::PyHashInfo; use crate::pyobject::{ @@ -202,6 +203,24 @@ fn sys_exit(code: OptionalArg, vm: &VirtualMachine) -> PyResult { Err(vm.new_exception(vm.ctx.exceptions.system_exit.clone(), vec![code])) } +fn sys_audit(_args: PyFuncArgs) { + // TODO: sys.audit implementation +} + +fn sys_displayhook(obj: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> { + // Save non-None values as "_" + if vm.is_none(&obj) { + return Ok(()); + } + // set to none to avoid recursion while printing + vm.set_attr(&vm.builtins, "_", vm.get_none())?; + // TODO: catch encoding errors + let repr = vm.to_repr(&obj)?.into_object(); + builtins::builtin_print(Args::new(vec![repr]), Default::default(), vm)?; + vm.set_attr(&vm.builtins, "_", obj)?; + Ok(()) +} + pub fn make_module(vm: &VirtualMachine, module: PyObjectRef, builtins: PyObjectRef) { let ctx = &vm.ctx; @@ -394,6 +413,9 @@ settrace() -- set the global debug tracing function "base_exec_prefix" => ctx.new_str(base_exec_prefix.to_owned()), "exit" => ctx.new_function(sys_exit), "abiflags" => ctx.new_str("".to_owned()), + "audit" => ctx.new_function(sys_audit), + "displayhook" => ctx.new_function(sys_displayhook), + "__displayhook__" => ctx.new_function(sys_displayhook), }); modules.set_item("sys", module.clone(), vm).unwrap(); diff --git a/wasm/lib/Cargo.toml b/wasm/lib/Cargo.toml index 69eaa1b1e92..677fc5bbce4 100644 --- a/wasm/lib/Cargo.toml +++ b/wasm/lib/Cargo.toml @@ -25,6 +25,7 @@ serde-wasm-bindgen = "0.1" serde = "1.0" js-sys = "0.3" futures = "0.1" +generational-arena = "0.2" [dependencies.web-sys] version = "0.3" @@ -40,4 +41,4 @@ features = [ ] [package.metadata.wasm-pack.profile.release] -wasm-opt = ["-O1"] +wasm-opt = false#["-O1"] diff --git a/wasm/lib/src/convert.rs b/wasm/lib/src/convert.rs index 353f2b199cd..a352b3fa707 100644 --- a/wasm/lib/src/convert.rs +++ b/wasm/lib/src/convert.rs @@ -1,3 +1,4 @@ +use generational_arena::Arena; use std::cell::RefCell; use js_sys::{Array, ArrayBuffer, Object, Promise, Reflect, SyntaxError, Uint8Array}; @@ -16,7 +17,29 @@ use crate::browser_module; use crate::vm_class::{stored_vm_from_wasm, WASMVirtualMachine}; // Currently WASM do not support multithreading. We should change this once it is enabled. -thread_local!(static JS_FUNCS: RefCell> = RefCell::new(vec![])); +thread_local!(static JS_HANDLES: RefCell> = RefCell::new(Arena::new())); + +pub struct JsHandle(generational_arena::Index); +impl JsHandle { + pub fn new(js: JsValue) -> Self { + let idx = JS_HANDLES.with(|arena| arena.borrow_mut().insert(js)); + JsHandle(idx) + } + pub fn get(&self) -> JsValue { + JS_HANDLES.with(|arena| { + arena + .borrow() + .get(self.0) + .expect("index was removed") + .clone() + }) + } +} +impl Drop for JsHandle { + fn drop(&mut self) { + JS_HANDLES.with(|arena| arena.borrow_mut().remove(self.0)); + } +} #[wasm_bindgen(inline_js = r" export class PyError extends Error { @@ -195,32 +218,22 @@ pub fn js_to_py(vm: &VirtualMachine, js_val: JsValue) -> PyObjectRef { dict.into_object() } } else if js_val.is_function() { - let func = js_sys::Function::from(js_val); - let idx = JS_FUNCS.with(|funcs| { - let mut funcs = funcs.borrow_mut(); - funcs.push(func); - funcs.len() - 1 - }); + let js_handle = JsHandle::new(js_val); vm.ctx .new_method(move |vm: &VirtualMachine, args: PyFuncArgs| -> PyResult { - JS_FUNCS.with(|funcs| { - let this = Object::new(); - for (k, v) in args.kwargs { - Reflect::set(&this, &k.into(), &py_to_js(vm, v)) - .expect("property to be settable"); - } - let js_args = Array::new(); - for v in args.args { - js_args.push(&py_to_js(vm, v)); - } - funcs - .borrow() - .get(idx) - .unwrap() - .apply(&this, &js_args) - .map(|val| js_to_py(vm, val)) - .map_err(|err| js_err_to_py_err(vm, &err)) - }) + let this = Object::new(); + for (k, v) in args.kwargs { + Reflect::set(&this, &k.into(), &py_to_js(vm, v)) + .expect("property to be settable"); + } + let js_args = Array::new(); + for v in args.args { + js_args.push(&py_to_js(vm, v)); + } + let func = js_sys::Function::from(js_handle.get()); + func.apply(&this, &js_args) + .map(|val| js_to_py(vm, val)) + .map_err(|err| js_err_to_py_err(vm, &err)) }) } else if let Some(err) = js_val.dyn_ref::() { js_err_to_py_err(vm, err).into_object() diff --git a/wasm/lib/src/vm_class.rs b/wasm/lib/src/vm_class.rs index 8130e0c636c..199c0b5a5d6 100644 --- a/wasm/lib/src/vm_class.rs +++ b/wasm/lib/src/vm_class.rs @@ -6,15 +6,12 @@ use js_sys::{Object, TypeError}; use wasm_bindgen::prelude::*; use rustpython_compiler::compile; -use rustpython_vm::function::PyFuncArgs; -use rustpython_vm::pyobject::{ - ItemProtocol, PyObject, PyObjectPayload, PyObjectRef, PyResult, PyValue, -}; +use rustpython_vm::pyobject::{ItemProtocol, PyObject, PyObjectPayload, PyObjectRef, PyValue}; use rustpython_vm::scope::{NameProtocol, Scope}; use rustpython_vm::{InitParameter, PySettings, VirtualMachine}; use crate::browser_module::setup_browser_module; -use crate::convert::{self, PyResultExt}; +use crate::convert::{self, JsHandle, PyResultExt}; use crate::js_module; use crate::wasm_builtins; use rustpython_compiler::mode::Mode; @@ -67,7 +64,6 @@ impl StoredVirtualMachine { // https://rustwasm.github.io/2018/10/24/multithreading-rust-and-wasm.html#atomic-instructions thread_local! { static STORED_VMS: RefCell>> = RefCell::default(); - static JS_PRINT_FUNC: RefCell> = RefCell::new(None); } pub fn get_vm_id(vm: &VirtualMachine) -> &str { @@ -223,39 +219,28 @@ impl WASMVirtualMachine { fn error() -> JsValue { TypeError::new("Unknown stdout option, please pass a function or 'console'").into() } - let print_fn: PyObjectRef = if let Some(s) = stdout.as_string() { + use wasm_builtins::make_stdout_object; + let stdout: PyObjectRef = if let Some(s) = stdout.as_string() { match s.as_str() { - "console" => vm.ctx.new_method(wasm_builtins::builtin_print_console), + "console" => make_stdout_object(vm, wasm_builtins::sys_stdout_write_console), _ => return Err(error()), } } else if stdout.is_function() { - let func = js_sys::Function::from(stdout); - JS_PRINT_FUNC.with(|thread_func| thread_func.replace(Some(func.clone()))); - vm.ctx - .new_method(move |vm: &VirtualMachine, args: PyFuncArgs| -> PyResult { - JS_PRINT_FUNC.with(|func| { - func.borrow() - .as_ref() - .unwrap() - .call1( - &JsValue::UNDEFINED, - &wasm_builtins::format_print_args(vm, args)?.into(), - ) - .map_err(|err| convert::js_py_typeerror(vm, err))?; - Ok(vm.get_none()) - }) - }) + let func_handle = JsHandle::new(stdout); + make_stdout_object(vm, move |data, vm| { + let func = js_sys::Function::from(func_handle.get()); + func.call1(&JsValue::UNDEFINED, &data.into()) + .map_err(|err| convert::js_py_typeerror(vm, err))?; + Ok(()) + }) } else if stdout.is_null() { - fn noop(vm: &VirtualMachine, _args: PyFuncArgs) -> PyResult { - Ok(vm.get_none()) - } - vm.ctx.new_method(noop) + make_stdout_object(vm, |_, _| Ok(())) } else if stdout.is_undefined() { - vm.ctx.new_method(wasm_builtins::builtin_print_console) + make_stdout_object(vm, wasm_builtins::sys_stdout_write_console) } else { return Err(error()); }; - vm.set_attr(&vm.builtins, "print", print_fn).unwrap(); + vm.set_attr(&vm.sys_module, "stdout", stdout).unwrap(); Ok(()) })? } diff --git a/wasm/lib/src/wasm_builtins.rs b/wasm/lib/src/wasm_builtins.rs index 7fc6f65346e..5b0b9fd6ebb 100644 --- a/wasm/lib/src/wasm_builtins.rs +++ b/wasm/lib/src/wasm_builtins.rs @@ -4,75 +4,37 @@ //! desktop. //! Implements functions listed here: https://docs.python.org/3/library/builtins.html. -use js_sys::{self, Array}; use web_sys::{self, console}; -use rustpython_vm::function::PyFuncArgs; -use rustpython_vm::obj::{objstr, objtype}; -use rustpython_vm::pyobject::{IdProtocol, PyResult, TypeProtocol}; +use rustpython_vm::obj::objstr::PyStringRef; +use rustpython_vm::pyobject::{PyObjectRef, PyResult}; use rustpython_vm::VirtualMachine; pub(crate) fn window() -> web_sys::Window { web_sys::window().expect("Window to be available") } -pub fn format_print_args(vm: &VirtualMachine, args: PyFuncArgs) -> PyResult { - // Handle 'sep' kwarg: - let sep_arg = args - .get_optional_kwarg("sep") - .filter(|obj| !obj.is(&vm.get_none())); - if let Some(ref obj) = sep_arg { - if !objtype::isinstance(obj, &vm.ctx.str_type()) { - return Err(vm.new_type_error(format!( - "sep must be None or a string, not {}", - obj.class().name - ))); - } - } - let sep_str = sep_arg.as_ref().map(|obj| objstr::borrow_value(obj)); - - // Handle 'end' kwarg: - let end_arg = args - .get_optional_kwarg("end") - .filter(|obj| !obj.is(&vm.get_none())); - if let Some(ref obj) = end_arg { - if !objtype::isinstance(obj, &vm.ctx.str_type()) { - return Err(vm.new_type_error(format!( - "end must be None or a string, not {}", - obj.class().name - ))); - } - } - let end_str = end_arg.as_ref().map(|obj| objstr::borrow_value(obj)); - - // No need to handle 'flush' kwarg, irrelevant when writing to String - - let mut output = String::new(); - let mut first = true; - for a in args.args { - if first { - first = false; - } else if let Some(ref sep_str) = sep_str { - output.push_str(sep_str); - } else { - output.push(' '); - } - output.push_str(&vm.to_pystr(&a)?); - } - - if let Some(end_str) = end_str { - output.push_str(end_str.as_ref()) - } else { - output.push('\n'); - } - Ok(output) +pub fn sys_stdout_write_console(data: &str, _vm: &VirtualMachine) -> PyResult<()> { + console::log_1(&data.into()); + Ok(()) } -pub fn builtin_print_console(vm: &VirtualMachine, args: PyFuncArgs) -> PyResult { - let arr = Array::new(); - for arg in args.args { - arr.push(&vm.to_pystr(&arg)?.into()); - } - console::log(&arr); - Ok(vm.get_none()) +pub fn make_stdout_object( + vm: &VirtualMachine, + write_f: impl Fn(&str, &VirtualMachine) -> PyResult<()> + Send + Sync + 'static, +) -> PyObjectRef { + let ctx = &vm.ctx; + let write_method = ctx.new_method( + move |_self: PyObjectRef, data: PyStringRef, vm: &VirtualMachine| -> PyResult<()> { + write_f(data.as_str(), vm) + }, + ); + let flush_method = ctx.new_method(|_self: PyObjectRef| {}); + // there's not really any point to storing this class so that there's a consistent type object, + // we just want a half-decent repr() output + let cls = py_class!(ctx, "JSStdout", vm.ctx.object(), { + "write" => write_method, + "flush" => flush_method, + }); + ctx.new_base_object(cls, None) } diff --git a/wasm/tests/test_exec_mode.py b/wasm/tests/test_exec_mode.py index 669d7049db1..a2a55846f48 100644 --- a/wasm/tests/test_exec_mode.py +++ b/wasm/tests/test_exec_mode.py @@ -1,18 +1,21 @@ def test_eval_mode(wdriver): assert wdriver.execute_script("return window.rp.pyEval('1+1')") == 2 + def test_exec_mode(wdriver): assert wdriver.execute_script("return window.rp.pyExec('1+1')") is None + def test_exec_single_mode(wdriver): assert wdriver.execute_script("return window.rp.pyExecSingle('1+1')") == 2 - assert wdriver.execute_script( + stdout = wdriver.execute_script( """ - var output = []; + let output = ""; save_output = function(text) {{ - output.push(text) + output += text }}; window.rp.pyExecSingle('1+1\\n2+2',{stdout: save_output}); return output; """ - ) == ["2\n", "4\n"] + ) + assert stdout == "2\n4\n" diff --git a/wasm/tests/test_inject_module.py b/wasm/tests/test_inject_module.py index 462ed653537..afa25250d95 100644 --- a/wasm/tests/test_inject_module.py +++ b/wasm/tests/test_inject_module.py @@ -19,4 +19,3 @@ def get_thing(): return __thing() ); """ ) - From 8e9cb5a7c8aab8788674f256e195ed7c186ee8b1 Mon Sep 17 00:00:00 2001 From: TheAnyKey <32773684+TheAnyKey@users.noreply.github.com> Date: Thu, 14 May 2020 22:17:53 +0200 Subject: [PATCH 39/39] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 5de067e9d97..fe1ed44c941 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,8 @@ A Python-3 (CPython >= 3.5.0) Interpreter written in Rust :snake: :scream: [![WAPM package](https://wapm.io/package/rustpython/badge.svg?style=flat)](https://wapm.io/package/rustpython) [![Open in Gitpod](https://img.shields.io/static/v1?label=Open%20in&message=Gitpod&color=1aa6e4&logo=gitpod)](https://gitpod.io#https://github.com/RustPython/RustPython) +[![Open in Gitpod](https://img.shields.io/static/v1?label=Open%20in&message=Gitpod&color=1aa6e4&logo=gitpod)](https://gitpod.io#https://github.com/TheAnyKey/RustPython) + For this Fork [![Open in Gitpod](https://img.shields.io/static/v1?label=Open%20in&message=Gitpod&color=1aa6e4&logo=gitpod)](https://gitpod.io#https://github.com/TheAnyKey/RustPython)