diff --git a/learning.py b/learning.py index a98937435..02a5289a4 100644 --- a/learning.py +++ b/learning.py @@ -78,14 +78,14 @@ def setproblem(self, target, inputs=None, exclude=()): to not use in inputs. Attributes can be -n .. n, or an attrname. Also computes the list of possible values, if that wasn't done yet.""" self.target = self.attrnum(target) - exclude = map(self.attrnum, exclude) + exclude = list(map(self.attrnum, exclude)) if inputs: self.inputs = removeall(self.target, inputs) else: self.inputs = [a for a in self.attrs if a != self.target and a not in exclude] if not self.values: - self.values = map(unique, zip(*self.examples)) + self.values = list(map(unique, zip(*self.examples))) self.check_me() def check_me(self): @@ -94,7 +94,7 @@ def check_me(self): assert self.target in self.attrs assert self.target not in self.inputs assert set(self.inputs).issubset(set(self.attrs)) - map(self.check_example, self.examples) + list(map(self.check_example, self.examples)) def add_example(self, example): "Add an example to the list of examples, checking it first." @@ -111,10 +111,10 @@ def check_example(self, example): def attrnum(self, attr): "Returns the number used for attr, which can be a name, or -n .. n-1." - if attr < 0: - return len(self.attrs) + attr - elif isinstance(attr, str): + if isinstance(attr, str): return self.attrnames.index(attr) + elif attr < 0: + return len(self.attrs) + attr else: return attr @@ -138,7 +138,7 @@ def parse_csv(input, delim=','): [[1, 2, 3], [0, 2, 'na']] """ lines = [line for line in input.splitlines() if line.strip()] - return [map(num_or_str, line.split(delim)) for line in lines] + return [list(map(num_or_str, line.split(delim))) for line in lines] #______________________________________________________________________________ @@ -245,7 +245,7 @@ def predict(example): #______________________________________________________________________________ class DecisionFork: - """A fork of a decision tree holds an attribute to test, and a dict + """A fork of a decision tree holds an attribute to test, and a dict of branches, one for each of the attribute's values.""" def __init__(self, attr, attrname=None, branches=None): @@ -264,15 +264,15 @@ def add(self, val, subtree): def display(self, indent=0): name = self.attrname - print 'Test', name + print('Test', name) for (val, subtree) in self.branches.items(): - print ' '*4*indent, name, '=', val, '==>', + print(' '*4*indent, name, '=', val, '==>',) subtree.display(indent+1) def __repr__(self): return ('DecisionFork(%r, %r, %r)' % (self.attr, self.attrname, self.branches)) - + class DecisionLeaf: "A leaf of a decision tree holds just a result." @@ -283,11 +283,11 @@ def __call__(self, example): return self.result def display(self, indent=0): - print 'RESULT =', self.result + print('RESULT =', self.result) def __repr__(self): return repr(self.result) - + #______________________________________________________________________________ def DecisionTreeLearner(dataset): @@ -391,7 +391,7 @@ def predict(example): def NeuralNetLearner(dataset, sizes): """Layered feed-forward network.""" - activations = map(lambda n: [0.0 for i in range(n)], sizes) + activations = list(map(lambda n: [0.0 for i in range(n)], sizes)) weights = [] def predict(example): @@ -511,10 +511,10 @@ def test(predict, dataset, examples=None, verbose=0): if output == desired: right += 1 if verbose >= 2: - print ' OK: got %s for %s' % (desired, example) + print(' OK: got %s for %s' % (desired, example)) elif verbose: - print 'WRONG: got %s, expected %s for %s' % ( - output, desired, example) + print('WRONG: got %s, expected %s for %s' % + (output, desired, example)) return right / len(examples) def train_and_test(learner, dataset, start, end): @@ -627,7 +627,7 @@ def T(attrname, branches): def SyntheticRestaurant(n=20): "Generate a DataSet with n examples." def gen(): - example = map(random.choice, restaurant.values) + example = list(map(random.choice, restaurant.values)) example[restaurant.target] = Fig[18,2](example) return example return RestaurantDataSet([gen() for i in range(n)]) diff --git a/logic.py b/logic.py index 59a212def..be8aa8836 100644 --- a/logic.py +++ b/logic.py @@ -165,7 +165,7 @@ def __init__(self, op, *args): "Op is a string or number; args are Exprs (or are coerced to Exprs)." assert isinstance(op, str) or (isnumber(op) and not args) self.op = num_or_str(op) - self.args = map(expr, args) ## Coerce args to Exprs + self.args = list(map(expr, args)) ## Coerce args to Exprs def __call__(self, *args): """Self must be a symbol with no args, such as Expr('F'). Create a new diff --git a/probability.py b/probability.py index 5c95de36b..06e5f83a3 100644 --- a/probability.py +++ b/probability.py @@ -3,7 +3,7 @@ from utils import * from logic import extend -import random +import random from collections import defaultdict #______________________________________________________________________________ @@ -215,7 +215,8 @@ def __init__(self, X, parents, cpt): if isinstance(cpt, (float, int)): # no parents, 0-tuple cpt = {(): cpt} elif isinstance(cpt, dict): - if cpt and isinstance(cpt.keys()[0], bool): # one parent, 1-tuple + cpt_keys= list(cpt) + if cpt and isinstance(cpt_keys[0], bool): # one parent, 1-tuple cpt = dict(((v,), p) for v, p in cpt.items()) assert isinstance(cpt, dict) @@ -406,7 +407,7 @@ def rejection_sampling(X, e, bn, N): 'False: 0.7, True: 0.3' """ counts = dict((x, 0) for x in bn.variable_values(X)) # bold N in Fig. 14.14 - for j in xrange(N): + for j in range(N): sample = prior_sample(bn) # boldface x in Fig. 14.14 if consistent_with(sample, e): counts[sample[X]] += 1 @@ -428,7 +429,7 @@ def likelihood_weighting(X, e, bn, N): 'False: 0.702, True: 0.298' """ W = dict((x, 0) for x in bn.variable_values(X)) - for j in xrange(N): + for j in range(N): sample, weight = weighted_sample(bn, e) # boldface x, w in Fig. 14.15 W[sample[X]] += weight return ProbDist(X, W) @@ -462,7 +463,7 @@ def gibbs_ask(X, e, bn, N): state = dict(e) # boldface x in Fig. 14.16 for Zi in Z: state[Zi] = random.choice(bn.variable_values(Zi)) - for j in xrange(N): + for j in range(N): for Zi in Z: state[Zi] = markov_blanket_sample(Zi, state, bn) counts[state[X]] += 1 diff --git a/probability_test.py b/probability_test.py index fb18a273e..bfbf8e2b5 100644 --- a/probability_test.py +++ b/probability_test.py @@ -1,21 +1,15 @@ import pytest +from random import * from probability import * def tests(): cpt = burglary.variable_node('Alarm').cpt parents = ['Burglary', 'Earthquake'] event = {'Burglary': True, 'Earthquake': True} - assert cpt.p(True, parents, event) == 0.95 + bn= BayesNode("myNode", parents, cpt) + assert bn.p(True, event) == 0.95 event = {'Burglary': False, 'Earthquake': True} - assert cpt.p(False, parents, event) == 0.71 - assert BoolCPT({T: 0.2, F: 0.625}).p(False, ['Burglary'], event) == 0.375 - assert BoolCPT(0.75).p(False, [], {}) == 0.25 - cpt = BoolCPT({True: 0.2, False: 0.7}) - assert cpt.rand(['A'], {'A': True}) in [True, False] - cpt = BoolCPT({(True, True): 0.1, (True, False): 0.3, - (False, True): 0.5, (False, False): 0.7}) - assert cpt.rand(['A', 'B'], {'A': True, 'B': False}) in [True, False] - #enumeration_ask('Earthquake', {}, burglary) + assert bn.p(False, event) == 0.71 s = {'A': True, 'B': False, 'C': True, 'D': False} assert consistent_with(s, {}) @@ -23,8 +17,10 @@ def tests(): assert not consistent_with(s, {'A': False}) assert not consistent_with(s, {'D': True}) - seed(21); p = rejection_sampling('Earthquake', {}, burglary, 1000) + seed(21) + p = rejection_sampling('Earthquake', {}, burglary, 1000) assert p[True], p[False] == (0.001, 0.999) seed(71); p = likelihood_weighting('Earthquake', {}, burglary, 1000) assert p[True], p[False] == (0.002, 0.998) + diff --git a/search.py b/search.py index ab2ba5136..4c11510e8 100644 --- a/search.py +++ b/search.py @@ -253,7 +253,7 @@ def recursive_dls(node, problem, limit): def iterative_deepening_search(problem): "[Fig. 3.18]" - for depth in xrange(sys.maxint): + for depth in range(sys.maxint): result = depth_limited_search(problem, depth) if result != 'cutoff': return result @@ -326,7 +326,7 @@ def exp_schedule(k=20, lam=0.005, limit=100): def simulated_annealing(problem, schedule=exp_schedule()): "[Fig. 4.5]" current = Node(problem.initial) - for t in xrange(sys.maxint): + for t in range(sys.maxint): T = schedule(t) if T == 0: return current @@ -817,7 +817,7 @@ def do(searcher, problem): def compare_graph_searchers(): """Prints a table of results like this: >>> compare_graph_searchers() -Searcher Romania(A, B) Romania(O, N) Australia +Searcher Romania(A, B) Romania(O, N) Australia breadth_first_tree_search < 21/ 22/ 59/B> <1158/1159/3288/N> < 7/ 8/ 22/WA> breadth_first_search < 7/ 11/ 18/B> < 19/ 20/ 45/N> < 2/ 6/ 8/WA> depth_first_graph_search < 8/ 9/ 20/B> < 16/ 17/ 38/N> < 4/ 5/ 11/WA> @@ -852,9 +852,9 @@ def compare_graph_searchers(): >>> board = list('SARTELNID') >>> print_boggle(board) -S A R -T E L -N I D +S A R +T E L +N I D >>> f = BoggleFinder(board) >>> len(f) 206 diff --git a/utils.py b/utils.py index e73de1a31..315a4d779 100644 --- a/utils.py +++ b/utils.py @@ -345,7 +345,7 @@ def AIMAFile(components, mode='r'): def DataFile(name, mode='r'): "Return a file in the AIMA /data directory." - return AIMAFile(['..', 'data', name], mode) + return AIMAFile(['aima-data', name], mode) def unimplemented(): "Use this as a stub for not-yet-implemented functions."