diff --git a/Source/Gold/Runtime.debug.pas b/Source/Gold/Runtime.debug.pas new file mode 100644 index 0000000..e60d2d1 --- /dev/null +++ b/Source/Gold/Runtime.debug.pas @@ -0,0 +1,7 @@ +namespace go.runtime.debug; + +method SetTraceback(level: go.builtin.string); +begin +end; + +end. \ No newline at end of file diff --git a/Source/Gold/Runtime.pas b/Source/Gold/Runtime.pas index 62ae350..d91eb05 100644 --- a/Source/Gold/Runtime.pas +++ b/Source/Gold/Runtime.pas @@ -21,7 +21,10 @@ public method Next(): tuple of (Frame, Boolean); begin - exit (nil, false); + //exit (nil, false); + var lFrame := new Frame(); + lFrame.Function := ''; + exit (lFrame, false); end; end; [ValueTypeSemantics] @@ -36,7 +39,7 @@ method Callers(&skip: Integer; pc: go.builtin.Slice): Integer; begin - exit 0; + exit 1; end; method CallersFrames(acallers: go.builtin.Slice): Memory; @@ -77,6 +80,36 @@ exit (0,'',0, false); end; +method GOMAXPROCS(n: Integer): Integer; +begin + result := 1; +end; + +method Goexit; +begin + // TODO +end; + +method GC; +begin + // TODO +end; + +method Gosched; +begin + // TODO +end; + +method SetBlockProfileRate(rate: Integer); +begin +end; + +method SetMutexProfileFraction(rate: Integer): Integer; +begin +end; + +var MemProfileRate: Integer := 0; + type go.builtin.__Global = public partial class private @@ -709,6 +742,8 @@ [valueTypeSemantics] MemStats = public partial class public + Mallocs: go.builtin.uint64; + TotalAlloc: go.builtin.uint64; end; method ReadMemStats(m: Memory); diff --git a/Source/Gold/Runtime.pprof.pas b/Source/Gold/Runtime.pprof.pas new file mode 100644 index 0000000..c2928d3 --- /dev/null +++ b/Source/Gold/Runtime.pprof.pas @@ -0,0 +1,24 @@ +namespace go.runtime.pprof; + +method StartCPUProfile(w: go.io.Writer): go.builtin.error; +begin +end; + +method StopCPUProfile; +begin +end; + +method Lookup(name: go.builtin.string): go.builtin.Reference; +begin +end; + +type + Profile = public partial class + public + method WriteTo(w: go.io.Writer; debug: Integer): go.builtin.error; + begin + + end; + end; + +end. \ No newline at end of file diff --git a/Source/Gold/TestingHelper.pas b/Source/Gold/TestingHelper.pas new file mode 100644 index 0000000..2ea3a47 --- /dev/null +++ b/Source/Gold/TestingHelper.pas @@ -0,0 +1,53 @@ +namespace go.testing; + +{$IF ECHOES} +uses + System.Reflection, System.Collections.Generic; +{$ENDIF} + +type + TestHelper = public partial class + private + class method DiscoverTestsFrom(aType: System.Type): go.builtin.Slice; + begin + writeLn('-----------------Discover: ' + aType.FullName); + var lTests := new List(); + for each lMethod in aType.GetMethods do begin + if lMethod.Name.StartsWith('Test') then begin + var lTest := new InternalTest(); + lTest.Name := lMethod.Name; + lTest.F := (t)-> lMethod.Invoke(aType, [t]); + lTests.Add(lTest); + //result.Add(lMethod); + writeLn(lMethod.Name); + end; + end; + result := new go.builtin.Slice(lTests.ToArray); + end; + + public + class var AllTests: Dictionary> := new Dictionary>(); + class method DiscoverTests; + begin + {$IF ISLAND} + // TODO + {$ELSEIF ECHOES} + var lAllTypes := &Assembly.GetExecutingAssembly().GetTypes(); + var lTests: go.builtin.Slice; + for each lType in lAllTypes do begin + if lType.FullName.EndsWith('.__Global') then begin + lTests := DiscoverTestsFrom(lType); + if lTests.Length > 0 then + AllTests.Add(lType.FullName, lTests); + end; + end; + {$ENDIF} + end; + + class method GetTestsFor(aSuite: String): go.builtin.Slice; + begin + result := AllTests[aSuite]; + end; + end; + +end. \ No newline at end of file diff --git a/Source/Gold/internal/race/doc.go b/Source/Gold/internal/race/doc.go new file mode 100644 index 0000000..8fa44ce --- /dev/null +++ b/Source/Gold/internal/race/doc.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package race contains helper functions for manually instrumenting code for the race detector. + +The runtime package intentionally exports these functions only in the race build; +this package exports them unconditionally but without the "race" build tag they are no-ops. +*/ +package race diff --git a/Source/Gold/internal/race/norace.go b/Source/Gold/internal/race/norace.go new file mode 100644 index 0000000..d83c016 --- /dev/null +++ b/Source/Gold/internal/race/norace.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !race + +package race + +import ( + "unsafe" +) + +const Enabled = false + +func Acquire(addr unsafe.Pointer) { +} + +func Release(addr unsafe.Pointer) { +} + +func ReleaseMerge(addr unsafe.Pointer) { +} + +func Disable() { +} + +func Enable() { +} + +func Read(addr unsafe.Pointer) { +} + +func Write(addr unsafe.Pointer) { +} + +func ReadRange(addr unsafe.Pointer, len int) { +} + +func WriteRange(addr unsafe.Pointer, len int) { +} + +func Errors() int { return 0 } diff --git a/Source/Gold/internal/testlog/log.go b/Source/Gold/internal/testlog/log.go new file mode 100644 index 0000000..3c5f780 --- /dev/null +++ b/Source/Gold/internal/testlog/log.go @@ -0,0 +1,69 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testlog provides a back-channel communication path +// between tests and package os, so that cmd/go can see which +// environment variables and files a test consults. +package testlog + +import "sync/atomic" + +// Interface is the interface required of test loggers. +// The os package will invoke the interface's methods to indicate that +// it is inspecting the given environment variables or files. +// Multiple goroutines may call these methods simultaneously. +type Interface interface { + Getenv(key string) + Stat(file string) + Open(file string) + Chdir(dir string) +} + +// logger is the current logger Interface. +// We use an atomic.Value in case test startup +// is racing with goroutines started during init. +// That must not cause a race detector failure, +// although it will still result in limited visibility +// into exactly what those goroutines do. +var logger atomic.Value + +// SetLogger sets the test logger implementation for the current process. +// It must be called only once, at process startup. +func SetLogger(impl Interface) { + if logger.Load() != nil { + panic("testlog: SetLogger must be called only once") + } + logger.Store(&impl) +} + +// Logger returns the current test logger implementation. +// It returns nil if there is no logger. +func Logger() Interface { + impl := logger.Load() + if impl == nil { + return nil + } + return *impl.(*Interface) +} + +// Getenv calls Logger().Getenv, if a logger has been set. +func Getenv(name string) { + if log := Logger(); log != nil { + log.Getenv(name) + } +} + +// Open calls Logger().Open, if a logger has been set. +func Open(name string) { + if log := Logger(); log != nil { + log.Open(name) + } +} + +// Stat calls Logger().Stat, if a logger has been set. +func Stat(name string) { + if log := Logger(); log != nil { + log.Stat(name) + } +} diff --git a/Source/Gold/os.pas b/Source/Gold/os.pas index 4847375..9aee018 100644 --- a/Source/Gold/os.pas +++ b/Source/Gold/os.pas @@ -999,4 +999,8 @@ MyNopCloser = class(go.io.ReadCloser) {$ENDIF} end; +method Pipe: tuple of (r: go.builtin.Reference, w: go.builtin.Reference, err: go.builtin.error); +begin +end; + end. \ No newline at end of file diff --git a/Source/Gold/string.pas b/Source/Gold/string.pas index 1b92983..7358c47 100644 --- a/Source/Gold/string.pas +++ b/Source/Gold/string.pas @@ -86,6 +86,11 @@ result := new string([aValue]); end; + class operator Implicit(aValue: string): go.builtin.error; + begin + exit go.Errors.new(aValue); + end; + class operator Equal(a, b: string): Boolean; begin result := go.bytes.Compare(a.Value, b.Value) = 0; diff --git a/Source/Gold/testing/allocs.go b/Source/Gold/testing/allocs.go new file mode 100644 index 0000000..1eeb2d4 --- /dev/null +++ b/Source/Gold/testing/allocs.go @@ -0,0 +1,45 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "runtime" +) + +// AllocsPerRun returns the average number of allocations during calls to f. +// Although the return value has type float64, it will always be an integral value. +// +// To compute the number of allocations, the function will first be run once as +// a warm-up. The average number of allocations over the specified number of +// runs will then be measured and returned. +// +// AllocsPerRun sets GOMAXPROCS to 1 during its measurement and will restore +// it before returning. +func AllocsPerRun(runs int, f func()) (avg float64) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) + + // Warm up the function + f() + + // Measure the starting statistics + var memstats runtime.MemStats + runtime.ReadMemStats(&memstats) + mallocs := 0 - memstats.Mallocs + + // Run the function the specified number of times + for i := 0; i < runs; i++ { + f() + } + + // Read the final statistics + runtime.ReadMemStats(&memstats) + mallocs += memstats.Mallocs + + // Average the mallocs over the runs (not counting the warm-up). + // We are forced to return a float64 because the API is silly, but do + // the division as integers so we can ask if AllocsPerRun()==1 + // instead of AllocsPerRun()<2. + return float64(mallocs / uint64(runs)) +} diff --git a/Source/Gold/testing/allocs_test.go b/Source/Gold/testing/allocs_test.go new file mode 100644 index 0000000..5b346aa --- /dev/null +++ b/Source/Gold/testing/allocs_test.go @@ -0,0 +1,29 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import "testing" + +var global interface{} + +var allocsPerRunTests = []struct { + name string + fn func() + allocs float64 +}{ + {"alloc *byte", func() { global = new(*byte) }, 1}, + {"alloc complex128", func() { global = new(complex128) }, 1}, + {"alloc float64", func() { global = new(float64) }, 1}, + {"alloc int32", func() { global = new(int32) }, 1}, + {"alloc byte", func() { global = new(byte) }, 1}, +} + +func TestAllocsPerRun(t *testing.T) { + for _, tt := range allocsPerRunTests { + if allocs := testing.AllocsPerRun(100, tt.fn); allocs != tt.allocs { + t.Errorf("AllocsPerRun(100, %s) = %v, want %v", tt.name, allocs, tt.allocs) + } + } +} diff --git a/Source/Gold/testing/benchmark.go b/Source/Gold/testing/benchmark.go new file mode 100644 index 0000000..9c7b1be --- /dev/null +++ b/Source/Gold/testing/benchmark.go @@ -0,0 +1,666 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "flag" + "fmt" + "internal/race" + "os" + "runtime" + "sync" + "sync/atomic" + "time" +) + +var matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`") +var benchTime = flag.Duration("test.benchtime", 1*time.Second, "run each benchmark for duration `d`") +var benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks") + +// Global lock to ensure only one benchmark runs at a time. +var benchmarkLock sync.Mutex + +// Used for every benchmark for measuring memory. +var memStats runtime.MemStats + +// An internal type but exported because it is cross-package; part of the implementation +// of the "go test" command. +type InternalBenchmark struct { + Name string + F func(b *B) +} + +// B is a type passed to Benchmark functions to manage benchmark +// timing and to specify the number of iterations to run. +// +// A benchmark ends when its Benchmark function returns or calls any of the methods +// FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods must be called +// only from the goroutine running the Benchmark function. +// The other reporting methods, such as the variations of Log and Error, +// may be called simultaneously from multiple goroutines. +// +// Like in tests, benchmark logs are accumulated during execution +// and dumped to standard error when done. Unlike in tests, benchmark logs +// are always printed, so as not to hide output whose existence may be +// affecting benchmark results. +type B struct { + common + importPath string // import path of the package containing the benchmark + context *benchContext + N int + previousN int // number of iterations in the previous run + previousDuration time.Duration // total duration of the previous run + benchFunc func(b *B) + benchTime time.Duration + bytes int64 + missingBytes bool // one of the subbenchmarks does not have bytes set. + timerOn bool + showAllocResult bool + result BenchmarkResult + parallelism int // RunParallel creates parallelism*GOMAXPROCS goroutines + // The initial states of memStats.Mallocs and memStats.TotalAlloc. + startAllocs uint64 + startBytes uint64 + // The net total of this test after being run. + netAllocs uint64 + netBytes uint64 +} + +// StartTimer starts timing a test. This function is called automatically +// before a benchmark starts, but it can also used to resume timing after +// a call to StopTimer. +func (b *B) StartTimer() { + if !b.timerOn { + runtime.ReadMemStats(&memStats) + b.startAllocs = memStats.Mallocs + b.startBytes = memStats.TotalAlloc + b.start = time.Now() + b.timerOn = true + } +} + +// StopTimer stops timing a test. This can be used to pause the timer +// while performing complex initialization that you don't +// want to measure. +func (b *B) StopTimer() { + if b.timerOn { + b.duration += time.Since(b.start) + runtime.ReadMemStats(&memStats) + b.netAllocs += memStats.Mallocs - b.startAllocs + b.netBytes += memStats.TotalAlloc - b.startBytes + b.timerOn = false + } +} + +// ResetTimer zeros the elapsed benchmark time and memory allocation counters. +// It does not affect whether the timer is running. +func (b *B) ResetTimer() { + if b.timerOn { + runtime.ReadMemStats(&memStats) + b.startAllocs = memStats.Mallocs + b.startBytes = memStats.TotalAlloc + b.start = time.Now() + } + b.duration = 0 + b.netAllocs = 0 + b.netBytes = 0 +} + +// SetBytes records the number of bytes processed in a single operation. +// If this is called, the benchmark will report ns/op and MB/s. +func (b *B) SetBytes(n int64) { b.bytes = n } + +// ReportAllocs enables malloc statistics for this benchmark. +// It is equivalent to setting -test.benchmem, but it only affects the +// benchmark function that calls ReportAllocs. +func (b *B) ReportAllocs() { + b.showAllocResult = true +} + +func (b *B) nsPerOp() int64 { + if b.N <= 0 { + return 0 + } + return b.duration.Nanoseconds() / int64(b.N) +} + +// runN runs a single benchmark for the specified number of iterations. +func (b *B) runN(n int) { + benchmarkLock.Lock() + defer benchmarkLock.Unlock() + // Try to get a comparable environment for each run + // by clearing garbage from previous runs. + runtime.GC() + b.raceErrors = -race.Errors() + b.N = n + b.parallelism = 1 + b.ResetTimer() + b.StartTimer() + b.benchFunc(b) + b.StopTimer() + b.previousN = n + b.previousDuration = b.duration + b.raceErrors += race.Errors() + if b.raceErrors > 0 { + b.Errorf("race detected during execution of benchmark") + } +} + +func min(x, y int) int { + if x > y { + return y + } + return x +} + +func max(x, y int) int { + if x < y { + return y + } + return x +} + +// roundDown10 rounds a number down to the nearest power of 10. +func roundDown10(n int) int { + var tens = 0 + // tens = floor(log_10(n)) + for n >= 10 { + n = n / 10 + tens++ + } + // result = 10^tens + result := 1 + for i := 0; i < tens; i++ { + result *= 10 + } + return result +} + +// roundUp rounds x up to a number of the form [1eX, 2eX, 3eX, 5eX]. +func roundUp(n int) int { + base := roundDown10(n) + switch { + case n <= base: + return base + case n <= (2 * base): + return 2 * base + case n <= (3 * base): + return 3 * base + case n <= (5 * base): + return 5 * base + default: + return 10 * base + } +} + +// run1 runs the first iteration of benchFunc. It returns whether more +// iterations of this benchmarks should be run. +func (b *B) run1() bool { + if ctx := b.context; ctx != nil { + // Extend maxLen, if needed. + if n := len(b.name) + ctx.extLen + 1; n > ctx.maxLen { + ctx.maxLen = n + 8 // Add additional slack to avoid too many jumps in size. + } + } + go func() { + // Signal that we're done whether we return normally + // or by FailNow's runtime.Goexit. + defer func() { + b.signal <- true + }() + + b.runN(1) + }() + <-b.signal + if b.failed { + fmt.Fprintf(b.w, "--- FAIL: %s\n%s", b.name, b.output) + return false + } + // Only print the output if we know we are not going to proceed. + // Otherwise it is printed in processBench. + if atomic.LoadInt32(&b.hasSub) != 0 || b.finished { + tag := "BENCH" + if b.skipped { + tag = "SKIP" + } + if b.chatty && (len(b.output) > 0 || b.finished) { + b.trimOutput() + fmt.Fprintf(b.w, "--- %s: %s\n%s", tag, b.name, b.output) + } + return false + } + return true +} + +var labelsOnce sync.Once + +// run executes the benchmark in a separate goroutine, including all of its +// subbenchmarks. b must not have subbenchmarks. +func (b *B) run() { + labelsOnce.Do(func() { + fmt.Fprintf(b.w, "goos: %s\n", runtime.GOOS) + fmt.Fprintf(b.w, "goarch: %s\n", runtime.GOARCH) + if b.importPath != "" { + fmt.Fprintf(b.w, "pkg: %s\n", b.importPath) + } + }) + if b.context != nil { + // Running go test --test.bench + b.context.processBench(b) // Must call doBench. + } else { + // Running func Benchmark. + b.doBench() + } +} + +func (b *B) doBench() BenchmarkResult { + go b.launch() + <-b.signal + return b.result +} + +// launch launches the benchmark function. It gradually increases the number +// of benchmark iterations until the benchmark runs for the requested benchtime. +// launch is run by the doBench function as a separate goroutine. +// run1 must have been called on b. +func (b *B) launch() { + // Signal that we're done whether we return normally + // or by FailNow's runtime.Goexit. + defer func() { + b.signal <- true + }() + + // Run the benchmark for at least the specified amount of time. + d := b.benchTime + for n := 1; !b.failed && b.duration < d && n < 1e9; { + last := n + // Predict required iterations. + n = int(d.Nanoseconds()) + if nsop := b.nsPerOp(); nsop != 0 { + n /= int(nsop) + } + // Run more iterations than we think we'll need (1.2x). + // Don't grow too fast in case we had timing errors previously. + // Be sure to run at least one more than last time. + n = max(min(n+n/5, 100*last), last+1) + // Round up to something easy to read. + n = roundUp(n) + b.runN(n) + } + b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes} +} + +// The results of a benchmark run. +type BenchmarkResult struct { + N int // The number of iterations. + T time.Duration // The total time taken. + Bytes int64 // Bytes processed in one iteration. + MemAllocs uint64 // The total number of memory allocations. + MemBytes uint64 // The total number of bytes allocated. +} + +func (r BenchmarkResult) NsPerOp() int64 { + if r.N <= 0 { + return 0 + } + return r.T.Nanoseconds() / int64(r.N) +} + +func (r BenchmarkResult) mbPerSec() float64 { + if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 { + return 0 + } + return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds() +} + +// AllocsPerOp returns r.MemAllocs / r.N. +func (r BenchmarkResult) AllocsPerOp() int64 { + if r.N <= 0 { + return 0 + } + return int64(r.MemAllocs) / int64(r.N) +} + +// AllocedBytesPerOp returns r.MemBytes / r.N. +func (r BenchmarkResult) AllocedBytesPerOp() int64 { + if r.N <= 0 { + return 0 + } + return int64(r.MemBytes) / int64(r.N) +} + +func (r BenchmarkResult) String() string { + mbs := r.mbPerSec() + mb := "" + if mbs != 0 { + mb = fmt.Sprintf("\t%7.2f MB/s", mbs) + } + nsop := r.NsPerOp() + ns := fmt.Sprintf("%10d ns/op", nsop) + if r.N > 0 && nsop < 100 { + // The format specifiers here make sure that + // the ones digits line up for all three possible formats. + if nsop < 10 { + ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N)) + } else { + ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N)) + } + } + return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb) +} + +// MemString returns r.AllocedBytesPerOp and r.AllocsPerOp in the same format as 'go test'. +func (r BenchmarkResult) MemString() string { + return fmt.Sprintf("%8d B/op\t%8d allocs/op", + r.AllocedBytesPerOp(), r.AllocsPerOp()) +} + +// benchmarkName returns full name of benchmark including procs suffix. +func benchmarkName(name string, n int) string { + if n != 1 { + return fmt.Sprintf("%s-%d", name, n) + } + return name +} + +type benchContext struct { + match *matcher + + maxLen int // The largest recorded benchmark name. + extLen int // Maximum extension length. +} + +// An internal function but exported because it is cross-package; part of the implementation +// of the "go test" command. +func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) { + runBenchmarks("", matchString, benchmarks) +} + +func runBenchmarks(importPath string, matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) bool { + // If no flag was specified, don't run benchmarks. + if len(*matchBenchmarks) == 0 { + return true + } + // Collect matching benchmarks and determine longest name. + maxprocs := 1 + for _, procs := range cpuList { + if procs > maxprocs { + maxprocs = procs + } + } + ctx := &benchContext{ + match: newMatcher(matchString, *matchBenchmarks, "-test.bench"), + extLen: len(benchmarkName("", maxprocs)), + } + var bs []InternalBenchmark + for _, Benchmark := range benchmarks { + if _, matched, _ := ctx.match.fullName(nil, Benchmark.Name); matched { + bs = append(bs, Benchmark) + benchName := benchmarkName(Benchmark.Name, maxprocs) + if l := len(benchName) + ctx.extLen + 1; l > ctx.maxLen { + ctx.maxLen = l + } + } + } + main := &B{ + common: common{ + name: "Main", + w: os.Stdout, + chatty: *chatty, + }, + importPath: importPath, + benchFunc: func(b *B) { + for _, Benchmark := range bs { + b.Run(Benchmark.Name, Benchmark.F) + } + }, + benchTime: *benchTime, + context: ctx, + } + main.runN(1) + return !main.failed +} + +// processBench runs bench b for the configured CPU counts and prints the results. +func (ctx *benchContext) processBench(b *B) { + for i, procs := range cpuList { + for j := uint(0); j < *count; j++ { + runtime.GOMAXPROCS(procs) + benchName := benchmarkName(b.name, procs) + fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName) + // Recompute the running time for all but the first iteration. + if i > 0 || j > 0 { + b = &B{ + common: common{ + signal: make(chan bool), + name: b.name, + w: b.w, + chatty: b.chatty, + }, + benchFunc: b.benchFunc, + benchTime: b.benchTime, + } + b.run1() + } + r := b.doBench() + if b.failed { + // The output could be very long here, but probably isn't. + // We print it all, regardless, because we don't want to trim the reason + // the benchmark failed. + fmt.Fprintf(b.w, "--- FAIL: %s\n%s", benchName, b.output) + continue + } + results := r.String() + if *benchmarkMemory || b.showAllocResult { + results += "\t" + r.MemString() + } + fmt.Fprintln(b.w, results) + // Unlike with tests, we ignore the -chatty flag and always print output for + // benchmarks since the output generation time will skew the results. + if len(b.output) > 0 { + b.trimOutput() + fmt.Fprintf(b.w, "--- BENCH: %s\n%s", benchName, b.output) + } + if p := runtime.GOMAXPROCS(-1); p != procs { + fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p) + } + } + } +} + +// Run benchmarks f as a subbenchmark with the given name. It reports +// whether there were any failures. +// +// A subbenchmark is like any other benchmark. A benchmark that calls Run at +// least once will not be measured itself and will be called once with N=1. +func (b *B) Run(name string, f func(b *B)) bool { + // Since b has subbenchmarks, we will no longer run it as a benchmark itself. + // Release the lock and acquire it on exit to ensure locks stay paired. + atomic.StoreInt32(&b.hasSub, 1) + benchmarkLock.Unlock() + defer benchmarkLock.Lock() + + benchName, ok, partial := b.name, true, false + if b.context != nil { + benchName, ok, partial = b.context.match.fullName(&b.common, name) + } + if !ok { + return true + } + var pc [maxStackLen]uintptr + n := runtime.Callers(2, pc[:]) + sub := &B{ + common: common{ + signal: make(chan bool), + name: benchName, + parent: &b.common, + level: b.level + 1, + creator: pc[:n], + w: b.w, + chatty: b.chatty, + }, + importPath: b.importPath, + benchFunc: f, + benchTime: b.benchTime, + context: b.context, + } + if partial { + // Partial name match, like -bench=X/Y matching BenchmarkX. + // Only process sub-benchmarks, if any. + atomic.StoreInt32(&sub.hasSub, 1) + } + if sub.run1() { + sub.run() + } + b.add(sub.result) + return !sub.failed +} + +// add simulates running benchmarks in sequence in a single iteration. It is +// used to give some meaningful results in case func Benchmark is used in +// combination with Run. +func (b *B) add(other BenchmarkResult) { + r := &b.result + // The aggregated BenchmarkResults resemble running all subbenchmarks as + // in sequence in a single benchmark. + r.N = 1 + r.T += time.Duration(other.NsPerOp()) + if other.Bytes == 0 { + // Summing Bytes is meaningless in aggregate if not all subbenchmarks + // set it. + b.missingBytes = true + r.Bytes = 0 + } + if !b.missingBytes { + r.Bytes += other.Bytes + } + r.MemAllocs += uint64(other.AllocsPerOp()) + r.MemBytes += uint64(other.AllocedBytesPerOp()) +} + +// trimOutput shortens the output from a benchmark, which can be very long. +func (b *B) trimOutput() { + // The output is likely to appear multiple times because the benchmark + // is run multiple times, but at least it will be seen. This is not a big deal + // because benchmarks rarely print, but just in case, we trim it if it's too long. + const maxNewlines = 10 + for nlCount, j := 0, 0; j < len(b.output); j++ { + if b.output[j] == '\n' { + nlCount++ + if nlCount >= maxNewlines { + b.output = append(b.output[:j], "\n\t... [output truncated]\n"...) + break + } + } + } +} + +// A PB is used by RunParallel for running parallel benchmarks. +type PB struct { + globalN *uint64 // shared between all worker goroutines iteration counter + grain uint64 // acquire that many iterations from globalN at once + cache uint64 // local cache of acquired iterations + bN uint64 // total number of iterations to execute (b.N) +} + +// Next reports whether there are more iterations to execute. +func (pb *PB) Next() bool { + if pb.cache == 0 { + n := atomic.AddUint64(pb.globalN, pb.grain) + if n <= pb.bN { + pb.cache = pb.grain + } else if n < pb.bN+pb.grain { + pb.cache = pb.bN + pb.grain - n + } else { + return false + } + } + pb.cache-- + return true +} + +// RunParallel runs a benchmark in parallel. +// It creates multiple goroutines and distributes b.N iterations among them. +// The number of goroutines defaults to GOMAXPROCS. To increase parallelism for +// non-CPU-bound benchmarks, call SetParallelism before RunParallel. +// RunParallel is usually used with the go test -cpu flag. +// +// The body function will be run in each goroutine. It should set up any +// goroutine-local state and then iterate until pb.Next returns false. +// It should not use the StartTimer, StopTimer, or ResetTimer functions, +// because they have global effect. It should also not call Run. +func (b *B) RunParallel(body func(*PB)) { + if b.N == 0 { + return // Nothing to do when probing. + } + // Calculate grain size as number of iterations that take ~100µs. + // 100µs is enough to amortize the overhead and provide sufficient + // dynamic load balancing. + grain := uint64(0) + if b.previousN > 0 && b.previousDuration > 0 { + grain = 1e5 * uint64(b.previousN) / uint64(b.previousDuration) + } + if grain < 1 { + grain = 1 + } + // We expect the inner loop and function call to take at least 10ns, + // so do not do more than 100µs/10ns=1e4 iterations. + if grain > 1e4 { + grain = 1e4 + } + + n := uint64(0) + numProcs := b.parallelism * runtime.GOMAXPROCS(0) + var wg sync.WaitGroup + wg.Add(numProcs) + for p := 0; p < numProcs; p++ { + go func() { + defer wg.Done() + pb := &PB{ + globalN: &n, + grain: grain, + bN: uint64(b.N), + } + body(pb) + }() + } + wg.Wait() + if n <= uint64(b.N) && !b.Failed() { + b.Fatal("RunParallel: body exited without pb.Next() == false") + } +} + +// SetParallelism sets the number of goroutines used by RunParallel to p*GOMAXPROCS. +// There is usually no need to call SetParallelism for CPU-bound benchmarks. +// If p is less than 1, this call will have no effect. +func (b *B) SetParallelism(p int) { + if p >= 1 { + b.parallelism = p + } +} + +// Benchmark benchmarks a single function. Useful for creating +// custom benchmarks that do not use the "go test" command. +// +// If f calls Run, the result will be an estimate of running all its +// subbenchmarks that don't call Run in sequence in a single benchmark. +func Benchmark(f func(b *B)) BenchmarkResult { + b := &B{ + common: common{ + signal: make(chan bool), + w: discard{}, + }, + benchFunc: f, + benchTime: *benchTime, + } + if b.run1() { + b.run() + } + return b.result +} + +type discard struct{} + +func (discard) Write(b []byte) (n int, err error) { return len(b), nil } diff --git a/Source/Gold/testing/benchmark_test.go b/Source/Gold/testing/benchmark_test.go new file mode 100644 index 0000000..431bb53 --- /dev/null +++ b/Source/Gold/testing/benchmark_test.go @@ -0,0 +1,113 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "bytes" + "runtime" + "sync/atomic" + "testing" + "text/template" +) + +var roundDownTests = []struct { + v, expected int +}{ + {1, 1}, + {9, 1}, + {10, 10}, + {11, 10}, + {100, 100}, + {101, 100}, + {999, 100}, + {1000, 1000}, + {1001, 1000}, +} + +func TestRoundDown10(t *testing.T) { + for _, tt := range roundDownTests { + actual := testing.RoundDown10(tt.v) + if tt.expected != actual { + t.Errorf("roundDown10(%d): expected %d, actual %d", tt.v, tt.expected, actual) + } + } +} + +var roundUpTests = []struct { + v, expected int +}{ + {0, 1}, + {1, 1}, + {2, 2}, + {3, 3}, + {5, 5}, + {9, 10}, + {999, 1000}, + {1000, 1000}, + {1400, 2000}, + {1700, 2000}, + {2700, 3000}, + {4999, 5000}, + {5000, 5000}, + {5001, 10000}, +} + +func TestRoundUp(t *testing.T) { + for _, tt := range roundUpTests { + actual := testing.RoundUp(tt.v) + if tt.expected != actual { + t.Errorf("roundUp(%d): expected %d, actual %d", tt.v, tt.expected, actual) + } + } +} + +func TestRunParallel(t *testing.T) { + testing.Benchmark(func(b *testing.B) { + procs := uint32(0) + iters := uint64(0) + b.SetParallelism(3) + b.RunParallel(func(pb *testing.PB) { + atomic.AddUint32(&procs, 1) + for pb.Next() { + atomic.AddUint64(&iters, 1) + } + }) + if want := uint32(3 * runtime.GOMAXPROCS(0)); procs != want { + t.Errorf("got %v procs, want %v", procs, want) + } + if iters != uint64(b.N) { + t.Errorf("got %v iters, want %v", iters, b.N) + } + }) +} + +func TestRunParallelFail(t *testing.T) { + testing.Benchmark(func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + // The function must be able to log/abort + // w/o crashing/deadlocking the whole benchmark. + b.Log("log") + b.Error("error") + }) + }) +} + +func ExampleB_RunParallel() { + // Parallel benchmark for text/template.Template.Execute on a single object. + testing.Benchmark(func(b *testing.B) { + templ := template.Must(template.New("test").Parse("Hello, {{.}}!")) + // RunParallel will create GOMAXPROCS goroutines + // and distribute work among them. + b.RunParallel(func(pb *testing.PB) { + // Each goroutine has its own bytes.Buffer. + var buf bytes.Buffer + for pb.Next() { + // The loop body is executed b.N times total across all goroutines. + buf.Reset() + templ.Execute(&buf, "World") + } + }) + }) +} diff --git a/Source/Gold/testing/cover.go b/Source/Gold/testing/cover.go new file mode 100644 index 0000000..4f5c328 --- /dev/null +++ b/Source/Gold/testing/cover.go @@ -0,0 +1,119 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Support for test coverage. + +package testing + +import ( + "fmt" + "os" + "sync/atomic" +) + +// CoverBlock records the coverage data for a single basic block. +// The fields are 1-indexed, as in an editor: The opening line of +// the file is number 1, for example. Columns are measured +// in bytes. +// NOTE: This struct is internal to the testing infrastructure and may change. +// It is not covered (yet) by the Go 1 compatibility guidelines. +type CoverBlock struct { + Line0 uint32 // Line number for block start. + Col0 uint16 // Column number for block start. + Line1 uint32 // Line number for block end. + Col1 uint16 // Column number for block end. + Stmts uint16 // Number of statements included in this block. +} + +var cover Cover + +// Cover records information about test coverage checking. +// NOTE: This struct is internal to the testing infrastructure and may change. +// It is not covered (yet) by the Go 1 compatibility guidelines. +type Cover struct { + Mode string + Counters map[string][]uint32 + Blocks map[string][]CoverBlock + CoveredPackages string +} + +// Coverage reports the current code coverage as a fraction in the range [0, 1]. +// If coverage is not enabled, Coverage returns 0. +// +// When running a large set of sequential test cases, checking Coverage after each one +// can be useful for identifying which test cases exercise new code paths. +// It is not a replacement for the reports generated by 'go test -cover' and +// 'go tool cover'. +func Coverage() float64 { + var n, d int64 + for _, counters := range cover.Counters { + for i := range counters { + //if atomic.LoadUint32(&counters[i]) > 0 { + if counters[i] > 0 { + // elements change + n++ + } + d++ + } + } + if d == 0 { + return 0 + } + return float64(n) / float64(d) +} + +// RegisterCover records the coverage data accumulators for the tests. +// NOTE: This function is internal to the testing infrastructure and may change. +// It is not covered (yet) by the Go 1 compatibility guidelines. +func RegisterCover(c Cover) { + cover = c +} + +// mustBeNil checks the error and, if present, reports it and exits. +func mustBeNil(err error) { + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } +} + +// coverReport reports the coverage percentage and writes a coverage profile if requested. +func coverReport() { + var f *os.File + var err error + if *coverProfile != "" { + f, err = os.Create(toOutputDir(*coverProfile)) + mustBeNil(err) + fmt.Fprintf(f, "mode: %s\n", cover.Mode) + defer func() { mustBeNil(f.Close()) }() + } + + var active, total int64 + var count uint32 + for name, counts := range cover.Counters { + blocks := cover.Blocks[name] + for i := range counts { + stmts := int64(blocks[i].Stmts) + total += stmts + //count = atomic.LoadUint32(&counts[i]) // For -mode=atomic. + count = counts[i] // For -mode=atomic. + // elements change + if count > 0 { + active += stmts + } + if f != nil { + _, err := fmt.Fprintf(f, "%s:%d.%d,%d.%d %d %d\n", name, + blocks[i].Line0, blocks[i].Col0, + blocks[i].Line1, blocks[i].Col1, + stmts, + count) + mustBeNil(err) + } + } + } + if total == 0 { + total = 1 + } + fmt.Printf("coverage: %.1f%% of statements%s\n", 100*float64(active)/float64(total), cover.CoveredPackages) +} \ No newline at end of file diff --git a/Source/Gold/testing/example.go b/Source/Gold/testing/example.go new file mode 100644 index 0000000..f4beb76 --- /dev/null +++ b/Source/Gold/testing/example.go @@ -0,0 +1,123 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" +) + +type InternalExample struct { + Name string + F func() + Output string + Unordered bool +} + +// An internal function but exported because it is cross-package; part of the implementation +// of the "go test" command. +func RunExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ok bool) { + _, ok = runExamples(matchString, examples) + return ok +} + +func runExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ran, ok bool) { + ok = true + + var eg InternalExample + + for _, eg = range examples { + matched, err := matchString(*match, eg.Name) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.run: %s\n", err) + os.Exit(1) + } + if !matched { + continue + } + ran = true + if !runExample(eg) { + ok = false + } + } + + return ran, ok +} + +func sortLines(output string) string { + lines := strings.Split(output, "\n") + sort.Strings(lines) + return strings.Join(lines, "\n") +} + +func runExample(eg InternalExample) (ok bool) { + if *chatty { + fmt.Printf("=== RUN %s\n", eg.Name) + } + + // Capture stdout. + stdout := os.Stdout + r, w, err := os.Pipe() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Stdout = w + outC := make(chan string) + go func() { + var buf strings.Builder + _, err := io.Copy(&buf, r) + r.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "testing: copying pipe: %v\n", err) + os.Exit(1) + } + outC <- buf.String() + }() + + start := time.Now() + ok = true + + // Clean up in a deferred call so we can recover if the example panics. + defer func() { + dstr := fmtDuration(time.Since(start)) + + // Close pipe, restore stdout, get output. + w.Close() + os.Stdout = stdout + out := <-outC + + var fail string + err := recover() + got := strings.TrimSpace(out) + want := strings.TrimSpace(eg.Output) + if eg.Unordered { + if sortLines(got) != sortLines(want) && err == nil { + fail = fmt.Sprintf("got:\n%s\nwant (unordered):\n%s\n", out, eg.Output) + } + } else { + if got != want && err == nil { + fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", got, want) + } + } + if fail != "" || err != nil { + fmt.Printf("--- FAIL: %s (%s)\n%s", eg.Name, dstr, fail) + ok = false + } else if *chatty { + fmt.Printf("--- PASS: %s (%s)\n", eg.Name, dstr) + } + if err != nil { + panic(err) + } + }() + + // Run example. + eg.F() + return +} diff --git a/Source/Gold/testing/export_test.go b/Source/Gold/testing/export_test.go new file mode 100644 index 0000000..89781b4 --- /dev/null +++ b/Source/Gold/testing/export_test.go @@ -0,0 +1,10 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +var ( + RoundDown10 = roundDown10 + RoundUp = roundUp +) diff --git a/Source/Gold/testing/helper_test.go b/Source/Gold/testing/helper_test.go new file mode 100644 index 0000000..fe8ff05 --- /dev/null +++ b/Source/Gold/testing/helper_test.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "bytes" + "regexp" + "strings" +) + +func TestTBHelper(t *T) { + var buf bytes.Buffer + ctx := newTestContext(1, newMatcher(regexp.MatchString, "", "")) + t1 := &T{ + common: common{ + signal: make(chan bool), + w: &buf, + }, + context: ctx, + } + t1.Run("Test", testHelper) + + want := `--- FAIL: Test (?s) +helperfuncs_test.go:12: 0 +helperfuncs_test.go:33: 1 +helperfuncs_test.go:21: 2 +helperfuncs_test.go:35: 3 +helperfuncs_test.go:42: 4 +--- FAIL: Test/sub (?s) +helperfuncs_test.go:45: 5 +helperfuncs_test.go:21: 6 +helperfuncs_test.go:44: 7 +helperfuncs_test.go:56: 8 +` + lines := strings.Split(buf.String(), "\n") + durationRE := regexp.MustCompile(`\(.*\)$`) + for i, line := range lines { + line = strings.TrimSpace(line) + line = durationRE.ReplaceAllString(line, "(?s)") + lines[i] = line + } + got := strings.Join(lines, "\n") + if got != want { + t.Errorf("got output:\n\n%s\nwant:\n\n%s", got, want) + } +} + +func TestTBHelperParallel(t *T) { + var buf bytes.Buffer + ctx := newTestContext(1, newMatcher(regexp.MatchString, "", "")) + t1 := &T{ + common: common{ + signal: make(chan bool), + w: &buf, + }, + context: ctx, + } + t1.Run("Test", parallelTestHelper) + + lines := strings.Split(strings.TrimSpace(buf.String()), "\n") + if len(lines) != 6 { + t.Fatalf("parallelTestHelper gave %d lines of output; want 6", len(lines)) + } + want := "helperfuncs_test.go:21: parallel" + if got := strings.TrimSpace(lines[1]); got != want { + t.Errorf("got output line %q; want %q", got, want) + } +} diff --git a/Source/Gold/testing/helperfuncs_test.go b/Source/Gold/testing/helperfuncs_test.go new file mode 100644 index 0000000..f2d54b3 --- /dev/null +++ b/Source/Gold/testing/helperfuncs_test.go @@ -0,0 +1,69 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import "sync" + +// The line numbering of this file is important for TestTBHelper. + +func notHelper(t *T, msg string) { + t.Error(msg) +} + +func helper(t *T, msg string) { + t.Helper() + t.Error(msg) +} + +func notHelperCallingHelper(t *T, msg string) { + helper(t, msg) +} + +func helperCallingHelper(t *T, msg string) { + t.Helper() + helper(t, msg) +} + +func testHelper(t *T) { + // Check combinations of directly and indirectly + // calling helper functions. + notHelper(t, "0") + helper(t, "1") + notHelperCallingHelper(t, "2") + helperCallingHelper(t, "3") + + // Check a function literal closing over t that uses Helper. + fn := func(msg string) { + t.Helper() + t.Error(msg) + } + fn("4") + + t.Run("sub", func(t *T) { + helper(t, "5") + notHelperCallingHelper(t, "6") + // Check that calling Helper from inside a subtest entry function + // works as if it were in an ordinary function call. + t.Helper() + t.Error("7") + }) + + // Check that calling Helper from inside a top-level test function + // has no effect. + t.Helper() + t.Error("8") +} + +func parallelTestHelper(t *T) { + var wg sync.WaitGroup + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + notHelperCallingHelper(t, "parallel") + wg.Done() + }() + } + wg.Wait() +} diff --git a/Source/Gold/testing/internal/testdeps/deps.go b/Source/Gold/testing/internal/testdeps/deps.go new file mode 100644 index 0000000..836616c --- /dev/null +++ b/Source/Gold/testing/internal/testdeps/deps.go @@ -0,0 +1,126 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testdeps provides access to dependencies needed by test execution. +// +// This package is imported by the generated main package, which passes +// TestDeps into testing.Main. This allows tests to use packages at run time +// without making those packages direct dependencies of package testing. +// Direct dependencies of package testing are harder to write tests for. +package testdeps + +import ( + "bufio" + "internal/testlog" + "io" + "regexp" + "runtime/pprof" + "strings" + "sync" +) + +// TestDeps is an implementation of the testing.testDeps interface, +// suitable for passing to testing.MainStart. +type TestDeps struct{} + +var matchPat string +var matchRe *regexp.Regexp + +func (TestDeps) MatchString(pat, str string) (result bool, err error) { + if matchRe == nil || matchPat != pat { + matchPat = pat + matchRe, err = regexp.Compile(matchPat) + if err != nil { + return + } + } + return matchRe.MatchString(str), nil +} + +func (TestDeps) StartCPUProfile(w io.Writer) error { + return pprof.StartCPUProfile(w) +} + +func (TestDeps) StopCPUProfile() { + pprof.StopCPUProfile() +} + +func (TestDeps) WriteProfileTo(name string, w io.Writer, debug int) error { + return pprof.Lookup(name).WriteTo(w, debug) +} + +// ImportPath is the import path of the testing binary, set by the generated main function. +var ImportPath string + +func (TestDeps) ImportPath() string { + return ImportPath +} + +// testLog implements testlog.Interface, logging actions by package os. +type testLog struct { + mu sync.Mutex + w *bufio.Writer + set bool +} + +func (l *testLog) Getenv(key string) { + l.add("getenv", key) +} + +func (l *testLog) Open(name string) { + l.add("open", name) +} + +func (l *testLog) Stat(name string) { + l.add("stat", name) +} + +func (l *testLog) Chdir(name string) { + l.add("chdir", name) +} + +// add adds the (op, name) pair to the test log. +func (l *testLog) add(op, name string) { + if strings.Contains(name, "\n") || name == "" { + return + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.w == nil { + return + } + l.w.WriteString(op) + l.w.WriteByte(' ') + l.w.WriteString(name) + l.w.WriteByte('\n') +} + +var log testLog +var didSetLogger bool + +func (TestDeps) StartTestLog(w io.Writer) { + log.mu.Lock() + log.w = bufio.NewWriter(w) + if !log.set { + // Tests that define TestMain and then run m.Run multiple times + // will call StartTestLog/StopTestLog multiple times. + // Checking log.set avoids calling testlog.SetLogger multiple times + // (which will panic) and also avoids writing the header multiple times. + log.set = true + //testlog.SetLogger(&log) + testlog.SetLogger(log) + // elements change + log.w.WriteString("# test log\n") // known to cmd/go/internal/test/test.go + } + log.mu.Unlock() +} + +func (TestDeps) StopTestLog() error { + log.mu.Lock() + defer log.mu.Unlock() + err := log.w.Flush() + log.w = nil + return err +} \ No newline at end of file diff --git a/Source/Gold/testing/iotest/logger.go b/Source/Gold/testing/iotest/logger.go new file mode 100644 index 0000000..99548dc --- /dev/null +++ b/Source/Gold/testing/iotest/logger.go @@ -0,0 +1,54 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iotest + +import ( + "io" + "log" +) + +type writeLogger struct { + prefix string + w io.Writer +} + +func (l *writeLogger) Write(p []byte) (n int, err error) { + n, err = l.w.Write(p) + if err != nil { + log.Printf("%s %x: %v", l.prefix, p[0:n], err) + } else { + log.Printf("%s %x", l.prefix, p[0:n]) + } + return +} + +// NewWriteLogger returns a writer that behaves like w except +// that it logs (using log.Printf) each write to standard error, +// printing the prefix and the hexadecimal data written. +func NewWriteLogger(prefix string, w io.Writer) io.Writer { + return &writeLogger{prefix, w} +} + +type readLogger struct { + prefix string + r io.Reader +} + +func (l *readLogger) Read(p []byte) (n int, err error) { + n, err = l.r.Read(p) + if err != nil { + log.Printf("%s %x: %v", l.prefix, p[0:n], err) + } else { + log.Printf("%s %x", l.prefix, p[0:n]) + } + return +} + +// NewReadLogger returns a reader that behaves like r except +// that it logs (using log.Printf) each read to standard error, +// printing the prefix and the hexadecimal data read. +func NewReadLogger(prefix string, r io.Reader) io.Reader { + return &readLogger{prefix, r} +} diff --git a/Source/Gold/testing/iotest/reader.go b/Source/Gold/testing/iotest/reader.go new file mode 100644 index 0000000..8d82018 --- /dev/null +++ b/Source/Gold/testing/iotest/reader.go @@ -0,0 +1,88 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package iotest implements Readers and Writers useful mainly for testing. +package iotest + +import ( + "errors" + "io" +) + +// OneByteReader returns a Reader that implements +// each non-empty Read by reading one byte from r. +func OneByteReader(r io.Reader) io.Reader { return &oneByteReader{r} } + +type oneByteReader struct { + r io.Reader +} + +func (r *oneByteReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + return r.r.Read(p[0:1]) +} + +// HalfReader returns a Reader that implements Read +// by reading half as many requested bytes from r. +func HalfReader(r io.Reader) io.Reader { return &halfReader{r} } + +type halfReader struct { + r io.Reader +} + +func (r *halfReader) Read(p []byte) (int, error) { + return r.r.Read(p[0 : (len(p)+1)/2]) +} + +// DataErrReader changes the way errors are handled by a Reader. Normally, a +// Reader returns an error (typically EOF) from the first Read call after the +// last piece of data is read. DataErrReader wraps a Reader and changes its +// behavior so the final error is returned along with the final data, instead +// of in the first call after the final data. +func DataErrReader(r io.Reader) io.Reader { return &dataErrReader{r, nil, make([]byte, 1024)} } + +type dataErrReader struct { + r io.Reader + unread []byte + data []byte +} + +func (r *dataErrReader) Read(p []byte) (n int, err error) { + // loop because first call needs two reads: + // one to get data and a second to look for an error. + for { + if len(r.unread) == 0 { + n1, err1 := r.r.Read(r.data) + r.unread = r.data[0:n1] + err = err1 + } + if n > 0 || err != nil { + break + } + n = copy(p, r.unread) + r.unread = r.unread[n:] + } + return +} + +var ErrTimeout = errors.New("timeout") + +// TimeoutReader returns ErrTimeout on the second read +// with no data. Subsequent calls to read succeed. +func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} } + +type timeoutReader struct { + r io.Reader + count int +} + +func (r *timeoutReader) Read(p []byte) (int, error) { + r.count++ + if r.count == 2 { + return 0, ErrTimeout + } + return r.r.Read(p) +} diff --git a/Source/Gold/testing/iotest/writer.go b/Source/Gold/testing/iotest/writer.go new file mode 100644 index 0000000..af61ab8 --- /dev/null +++ b/Source/Gold/testing/iotest/writer.go @@ -0,0 +1,35 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iotest + +import "io" + +// TruncateWriter returns a Writer that writes to w +// but stops silently after n bytes. +func TruncateWriter(w io.Writer, n int64) io.Writer { + return &truncateWriter{w, n} +} + +type truncateWriter struct { + w io.Writer + n int64 +} + +func (t *truncateWriter) Write(p []byte) (n int, err error) { + if t.n <= 0 { + return len(p), nil + } + // real write + n = len(p) + if int64(n) > t.n { + n = int(t.n) + } + n, err = t.w.Write(p[0:n]) + t.n -= int64(n) + if err == nil { + n = len(p) + } + return +} diff --git a/Source/Gold/testing/match.go b/Source/Gold/testing/match.go new file mode 100644 index 0000000..b18c6e7 --- /dev/null +++ b/Source/Gold/testing/match.go @@ -0,0 +1,168 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "fmt" + "os" + "strconv" + "strings" + "sync" +) + +// matcher sanitizes, uniques, and filters names of subtests and subbenchmarks. +type matcher struct { + filter []string + matchFunc func(pat, str string) (bool, error) + + mu sync.Mutex + subNames map[string]int64 +} + +// TODO: fix test_main to avoid race and improve caching, also allowing to +// eliminate this Mutex. +var matchMutex sync.Mutex + +func newMatcher(matchString func(pat, str string) (bool, error), patterns, name string) *matcher { + var filter []string + if patterns != "" { + filter = splitRegexp(patterns) + for i, s := range filter { + filter[i] = rewrite(s) + } + // Verify filters before doing any processing. + for i, s := range filter { + if _, err := matchString(s, "non-empty"); err != nil { + fmt.Fprintf(os.Stderr, "testing: invalid regexp for element %d of %s (%q): %s\n", i, name, s, err) + os.Exit(1) + } + } + } + return &matcher{ + filter: filter, + matchFunc: matchString, + subNames: map[string]int64{}, + } +} + +func (m *matcher) fullName(c *common, subname string) (name string, ok, partial bool) { + name = subname + + m.mu.Lock() + defer m.mu.Unlock() + + if c != nil && c.level > 0 { + name = m.unique(c.name, rewrite(subname)) + } + + matchMutex.Lock() + defer matchMutex.Unlock() + + // We check the full array of paths each time to allow for the case that + // a pattern contains a '/'. + elem := strings.Split(name, "/") + for i, s := range elem { + if i >= len(m.filter) { + break + } + if ok, _ := m.matchFunc(m.filter[i], s); !ok { + return name, false, false + } + } + return name, true, len(elem) < len(m.filter) +} + +func splitRegexp(s string) []string { + a := make([]string, 0, strings.Count(s, "/")) + cs := 0 + cp := 0 + for i := 0; i < len(s); { + switch s[i] { + case '[': + cs++ + case ']': + if cs--; cs < 0 { // An unmatched ']' is legal. + cs = 0 + } + case '(': + if cs == 0 { + cp++ + } + case ')': + if cs == 0 { + cp-- + } + case '\\': + i++ + case '/': + if cs == 0 && cp == 0 { + a = append(a, s[:i]) + s = s[i+1:] + i = 0 + continue + } + } + i++ + } + return append(a, s) +} + +// unique creates a unique name for the given parent and subname by affixing it +// with one or more counts, if necessary. +func (m *matcher) unique(parent, subname string) string { + name := fmt.Sprintf("%s/%s", parent, subname) + empty := subname == "" + for { + next, exists := m.subNames[name] + if !empty && !exists { + m.subNames[name] = 1 // next count is 1 + return name + } + // Name was already used. We increment with the count and append a + // string with the count. + m.subNames[name] = next + 1 + + // Add a count to guarantee uniqueness. + name = fmt.Sprintf("%s#%02d", name, next) + empty = false + } +} + +// rewrite rewrites a subname to having only printable characters and no white +// space. +func rewrite(s string) string { + b := []byte{} + for _, r := range s { + switch { + case isSpace(r): + b = append(b, '_') + case !strconv.IsPrint(r): + s := strconv.QuoteRune(r) + b = append(b, s[1:len(s)-1]...) + default: + b = append(b, string(r)...) + } + } + return string(b) +} + +func isSpace(r rune) bool { + if r < 0x2000 { + switch r { + // Note: not the same as Unicode Z class. + case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0, 0x1680: + return true + } + } else { + if r <= 0x200a { + return true + } + switch r { + case 0x2028, 0x2029, 0x202f, 0x205f, 0x3000: + return true + } + } + return false +} diff --git a/Source/Gold/testing/match_test.go b/Source/Gold/testing/match_test.go new file mode 100644 index 0000000..8c09dc6 --- /dev/null +++ b/Source/Gold/testing/match_test.go @@ -0,0 +1,186 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "reflect" + "regexp" + "unicode" +) + +// Verify that our IsSpace agrees with unicode.IsSpace. +func TestIsSpace(t *T) { + n := 0 + for r := rune(0); r <= unicode.MaxRune; r++ { + if isSpace(r) != unicode.IsSpace(r) { + t.Errorf("IsSpace(%U)=%t incorrect", r, isSpace(r)) + n++ + if n > 10 { + return + } + } + } +} + +func TestSplitRegexp(t *T) { + res := func(s ...string) []string { return s } + testCases := []struct { + pattern string + result []string + }{ + // Correct patterns + // If a regexp pattern is correct, all split regexps need to be correct + // as well. + {"", res("")}, + {"/", res("", "")}, + {"//", res("", "", "")}, + {"A", res("A")}, + {"A/B", res("A", "B")}, + {"A/B/", res("A", "B", "")}, + {"/A/B/", res("", "A", "B", "")}, + {"[A]/(B)", res("[A]", "(B)")}, + {"[/]/[/]", res("[/]", "[/]")}, + {"[/]/[:/]", res("[/]", "[:/]")}, + {"/]", res("", "]")}, + {"]/", res("]", "")}, + {"]/[/]", res("]", "[/]")}, + {`([)/][(])`, res(`([)/][(])`)}, + {"[(]/[)]", res("[(]", "[)]")}, + + // Faulty patterns + // Errors in original should produce at least one faulty regexp in results. + {")/", res(")/")}, + {")/(/)", res(")/(", ")")}, + {"a[/)b", res("a[/)b")}, + {"(/]", res("(/]")}, + {"(/", res("(/")}, + {"[/]/[/", res("[/]", "[/")}, + {`\p{/}`, res(`\p{`, "}")}, + {`\p/`, res(`\p`, "")}, + {`[[:/:]]`, res(`[[:/:]]`)}, + } + for _, tc := range testCases { + a := splitRegexp(tc.pattern) + if !reflect.DeepEqual(a, tc.result) { + t.Errorf("splitRegexp(%q) = %#v; want %#v", tc.pattern, a, tc.result) + } + + // If there is any error in the pattern, one of the returned subpatterns + // needs to have an error as well. + if _, err := regexp.Compile(tc.pattern); err != nil { + ok := true + for _, re := range a { + if _, err := regexp.Compile(re); err != nil { + ok = false + } + } + if ok { + t.Errorf("%s: expected error in any of %q", tc.pattern, a) + } + } + } +} + +func TestMatcher(t *T) { + testCases := []struct { + pattern string + parent, sub string + ok bool + partial bool + }{ + // Behavior without subtests. + {"", "", "TestFoo", true, false}, + {"TestFoo", "", "TestFoo", true, false}, + {"TestFoo/", "", "TestFoo", true, true}, + {"TestFoo/bar/baz", "", "TestFoo", true, true}, + {"TestFoo", "", "TestBar", false, false}, + {"TestFoo/", "", "TestBar", false, false}, + {"TestFoo/bar/baz", "", "TestBar/bar/baz", false, false}, + + // with subtests + {"", "TestFoo", "x", true, false}, + {"TestFoo", "TestFoo", "x", true, false}, + {"TestFoo/", "TestFoo", "x", true, false}, + {"TestFoo/bar/baz", "TestFoo", "bar", true, true}, + // Subtest with a '/' in its name still allows for copy and pasted names + // to match. + {"TestFoo/bar/baz", "TestFoo", "bar/baz", true, false}, + {"TestFoo/bar/baz", "TestFoo/bar", "baz", true, false}, + {"TestFoo/bar/baz", "TestFoo", "x", false, false}, + {"TestFoo", "TestBar", "x", false, false}, + {"TestFoo/", "TestBar", "x", false, false}, + {"TestFoo/bar/baz", "TestBar", "x/bar/baz", false, false}, + + // subtests only + {"", "TestFoo", "x", true, false}, + {"/", "TestFoo", "x", true, false}, + {"./", "TestFoo", "x", true, false}, + {"./.", "TestFoo", "x", true, false}, + {"/bar/baz", "TestFoo", "bar", true, true}, + {"/bar/baz", "TestFoo", "bar/baz", true, false}, + {"//baz", "TestFoo", "bar/baz", true, false}, + {"//", "TestFoo", "bar/baz", true, false}, + {"/bar/baz", "TestFoo/bar", "baz", true, false}, + {"//foo", "TestFoo", "bar/baz", false, false}, + {"/bar/baz", "TestFoo", "x", false, false}, + {"/bar/baz", "TestBar", "x/bar/baz", false, false}, + } + + for _, tc := range testCases { + m := newMatcher(regexp.MatchString, tc.pattern, "-test.run") + + parent := &common{name: tc.parent} + if tc.parent != "" { + parent.level = 1 + } + if n, ok, partial := m.fullName(parent, tc.sub); ok != tc.ok || partial != tc.partial { + t.Errorf("for pattern %q, fullName(parent=%q, sub=%q) = %q, ok %v partial %v; want ok %v partial %v", + tc.pattern, tc.parent, tc.sub, n, ok, partial, tc.ok, tc.partial) + } + } +} + +func TestNaming(t *T) { + m := newMatcher(regexp.MatchString, "", "") + + parent := &common{name: "x", level: 1} // top-level test. + + // Rig the matcher with some preloaded values. + m.subNames["x/b"] = 1000 + + testCases := []struct { + name, want string + }{ + // Uniqueness + {"", "x/#00"}, + {"", "x/#01"}, + + {"t", "x/t"}, + {"t", "x/t#01"}, + {"t", "x/t#02"}, + + {"a#01", "x/a#01"}, // user has subtest with this name. + {"a", "x/a"}, // doesn't conflict with this name. + {"a", "x/a#01#01"}, // conflict, add disambiguating string. + {"a", "x/a#02"}, // This string is claimed now, so resume + {"a", "x/a#03"}, // with counting. + {"a#02", "x/a#02#01"}, + + {"b", "x/b#1000"}, // rigged, see above + {"b", "x/b#1001"}, + + // // Sanitizing + {"A:1 B:2", "x/A:1_B:2"}, + {"s\t\r\u00a0", "x/s___"}, + {"\x01", `x/\x01`}, + {"\U0010ffff", `x/\U0010ffff`}, + } + + for i, tc := range testCases { + if got, _, _ := m.fullName(parent, tc.name); got != tc.want { + t.Errorf("%d:%s: got %q; want %q", i, tc.name, got, tc.want) + } + } +} diff --git a/Source/Gold/testing/quick/quick.go b/Source/Gold/testing/quick/quick.go new file mode 100644 index 0000000..0457fc7 --- /dev/null +++ b/Source/Gold/testing/quick/quick.go @@ -0,0 +1,384 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package quick implements utility functions to help with black box testing. +// +// The testing/quick package is frozen and is not accepting new features. +package quick + +import ( + "flag" + "fmt" + "math" + "math/rand" + "reflect" + "strings" + "time" +) + +var defaultMaxCount *int = flag.Int("quickchecks", 100, "The default number of iterations for each check") + +// A Generator can generate random values of its own type. +type Generator interface { + // Generate returns a random instance of the type on which it is a + // method using the size as a size hint. + Generate(rand *rand.Rand, size int) reflect.Value +} + +// randFloat32 generates a random float taking the full range of a float32. +func randFloat32(rand *rand.Rand) float32 { + f := rand.Float64() * math.MaxFloat32 + if rand.Int()&1 == 1 { + f = -f + } + return float32(f) +} + +// randFloat64 generates a random float taking the full range of a float64. +func randFloat64(rand *rand.Rand) float64 { + f := rand.Float64() * math.MaxFloat64 + if rand.Int()&1 == 1 { + f = -f + } + return f +} + +// randInt64 returns a random int64. +func randInt64(rand *rand.Rand) int64 { + return int64(rand.Uint64()) +} + +// complexSize is the maximum length of arbitrary values that contain other +// values. +const complexSize = 50 + +// Value returns an arbitrary value of the given type. +// If the type implements the Generator interface, that will be used. +// Note: To create arbitrary values for structs, all the fields must be exported. +func Value(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool) { + return sizedValue(t, rand, complexSize) +} + +// sizedValue returns an arbitrary value of the given type. The size +// hint is used for shrinking as a function of indirection level so +// that recursive data structures will terminate. +func sizedValue(t reflect.Type, rand *rand.Rand, size int) (value reflect.Value, ok bool) { + if m, ok := reflect.Zero(t).Interface().(Generator); ok { + return m.Generate(rand, size), true + } + + v := reflect.New(t).Elem() + switch concrete := t; concrete.Kind() { + case reflect.Bool: + v.SetBool(rand.Int()&1 == 0) + case reflect.Float32: + v.SetFloat(float64(randFloat32(rand))) + case reflect.Float64: + v.SetFloat(randFloat64(rand)) + case reflect.Complex64: + v.SetComplex(complex(float64(randFloat32(rand)), float64(randFloat32(rand)))) + case reflect.Complex128: + v.SetComplex(complex(randFloat64(rand), randFloat64(rand))) + case reflect.Int16: + v.SetInt(randInt64(rand)) + case reflect.Int32: + v.SetInt(randInt64(rand)) + case reflect.Int64: + v.SetInt(randInt64(rand)) + case reflect.Int8: + v.SetInt(randInt64(rand)) + case reflect.Int: + v.SetInt(randInt64(rand)) + case reflect.Uint16: + v.SetUint(uint64(randInt64(rand))) + case reflect.Uint32: + v.SetUint(uint64(randInt64(rand))) + case reflect.Uint64: + v.SetUint(uint64(randInt64(rand))) + case reflect.Uint8: + v.SetUint(uint64(randInt64(rand))) + case reflect.Uint: + v.SetUint(uint64(randInt64(rand))) + case reflect.Uintptr: + v.SetUint(uint64(randInt64(rand))) + case reflect.Map: + numElems := rand.Intn(size) + v.Set(reflect.MakeMap(concrete)) + for i := 0; i < numElems; i++ { + key, ok1 := sizedValue(concrete.Key(), rand, size) + value, ok2 := sizedValue(concrete.Elem(), rand, size) + if !ok1 || !ok2 { + return reflect.Value{}, false + } + v.SetMapIndex(key, value) + } + case reflect.Ptr: + if rand.Intn(size) == 0 { + v.Set(reflect.Zero(concrete)) // Generate nil pointer. + } else { + elem, ok := sizedValue(concrete.Elem(), rand, size) + if !ok { + return reflect.Value{}, false + } + v.Set(reflect.New(concrete.Elem())) + v.Elem().Set(elem) + } + case reflect.Slice: + numElems := rand.Intn(size) + sizeLeft := size - numElems + v.Set(reflect.MakeSlice(concrete, numElems, numElems)) + for i := 0; i < numElems; i++ { + elem, ok := sizedValue(concrete.Elem(), rand, sizeLeft) + if !ok { + return reflect.Value{}, false + } + v.Index(i).Set(elem) + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + elem, ok := sizedValue(concrete.Elem(), rand, size) + if !ok { + return reflect.Value{}, false + } + v.Index(i).Set(elem) + } + case reflect.String: + numChars := rand.Intn(complexSize) + codePoints := make([]rune, numChars) + for i := 0; i < numChars; i++ { + codePoints[i] = rune(rand.Intn(0x10ffff)) + } + v.SetString(string(codePoints)) + case reflect.Struct: + n := v.NumField() + // Divide sizeLeft evenly among the struct fields. + sizeLeft := size + if n > sizeLeft { + sizeLeft = 1 + } else if n > 0 { + sizeLeft /= n + } + for i := 0; i < n; i++ { + elem, ok := sizedValue(concrete.Field(i).Type, rand, sizeLeft) + if !ok { + return reflect.Value{}, false + } + v.Field(i).Set(elem) + } + default: + return reflect.Value{}, false + } + + return v, true +} + +// A Config structure contains options for running a test. +type Config struct { + // MaxCount sets the maximum number of iterations. + // If zero, MaxCountScale is used. + MaxCount int + // MaxCountScale is a non-negative scale factor applied to the + // default maximum. + // If zero, the default is unchanged. + MaxCountScale float64 + // Rand specifies a source of random numbers. + // If nil, a default pseudo-random source will be used. + Rand *rand.Rand + // Values specifies a function to generate a slice of + // arbitrary reflect.Values that are congruent with the + // arguments to the function being tested. + // If nil, the top-level Value function is used to generate them. + Values func([]reflect.Value, *rand.Rand) +} + +var defaultConfig Config + +// getRand returns the *rand.Rand to use for a given Config. +func (c *Config) getRand() *rand.Rand { + if c.Rand == nil { + return rand.New(rand.NewSource(time.Now().UnixNano())) + } + return c.Rand +} + +// getMaxCount returns the maximum number of iterations to run for a given +// Config. +func (c *Config) getMaxCount() (maxCount int) { + maxCount = c.MaxCount + if maxCount == 0 { + if c.MaxCountScale != 0 { + maxCount = int(c.MaxCountScale * float64(*defaultMaxCount)) + } else { + maxCount = *defaultMaxCount + } + } + + return +} + +// A SetupError is the result of an error in the way that check is being +// used, independent of the functions being tested. +type SetupError string + +func (s SetupError) Error() string { return string(s) } + +// A CheckError is the result of Check finding an error. +type CheckError struct { + Count int + In []interface{} +} + +func (s *CheckError) Error() string { + return fmt.Sprintf("#%d: failed on input %s", s.Count, toString(s.In)) +} + +// A CheckEqualError is the result CheckEqual finding an error. +type CheckEqualError struct { + CheckError + Out1 []interface{} + Out2 []interface{} +} + +func (s *CheckEqualError) Error() string { + return fmt.Sprintf("#%d: failed on input %s. Output 1: %s. Output 2: %s", s.Count, toString(s.In), toString(s.Out1), toString(s.Out2)) +} + +// Check looks for an input to f, any function that returns bool, +// such that f returns false. It calls f repeatedly, with arbitrary +// values for each argument. If f returns false on a given input, +// Check returns that input as a *CheckError. +// For example: +// +// func TestOddMultipleOfThree(t *testing.T) { +// f := func(x int) bool { +// y := OddMultipleOfThree(x) +// return y%2 == 1 && y%3 == 0 +// } +// if err := quick.Check(f, nil); err != nil { +// t.Error(err) +// } +// } +func Check(f interface{}, config *Config) error { + if config == nil { + config = &defaultConfig + } + + fVal, fType, ok := functionAndType(f) + if !ok { + return SetupError("argument is not a function") + } + + if fType.NumOut() != 1 { + return SetupError("function does not return one value") + } + if fType.Out(0).Kind() != reflect.Bool { + return SetupError("function does not return a bool") + } + + arguments := make([]reflect.Value, fType.NumIn()) + rand := config.getRand() + maxCount := config.getMaxCount() + + for i := 0; i < maxCount; i++ { + err := arbitraryValues(arguments, fType, config, rand) + if err != nil { + return err + } + + if !fVal.Call(arguments)[0].Bool() { + return &CheckError{i + 1, toInterfaces(arguments)} + } + } + + return nil +} + +// CheckEqual looks for an input on which f and g return different results. +// It calls f and g repeatedly with arbitrary values for each argument. +// If f and g return different answers, CheckEqual returns a *CheckEqualError +// describing the input and the outputs. +func CheckEqual(f, g interface{}, config *Config) error { + if config == nil { + config = &defaultConfig + } + + x, xType, ok := functionAndType(f) + if !ok { + return SetupError("f is not a function") + } + y, yType, ok := functionAndType(g) + if !ok { + return SetupError("g is not a function") + } + + if xType != yType { + return SetupError("functions have different types") + } + + arguments := make([]reflect.Value, xType.NumIn()) + rand := config.getRand() + maxCount := config.getMaxCount() + + for i := 0; i < maxCount; i++ { + err := arbitraryValues(arguments, xType, config, rand) + if err != nil { + return err + } + + xOut := toInterfaces(x.Call(arguments)) + yOut := toInterfaces(y.Call(arguments)) + + if !reflect.DeepEqual(xOut, yOut) { + return &CheckEqualError{CheckError{i + 1, toInterfaces(arguments)}, xOut, yOut} + } + } + + return nil +} + +// arbitraryValues writes Values to args such that args contains Values +// suitable for calling f. +func arbitraryValues(args []reflect.Value, f reflect.Type, config *Config, rand *rand.Rand) (err error) { + if config.Values != nil { + config.Values(args, rand) + return + } + + for j := 0; j < len(args); j++ { + var ok bool + args[j], ok = Value(f.In(j), rand) + if !ok { + err = SetupError(fmt.Sprintf("cannot create arbitrary value of type %s for argument %d", f.In(j), j)) + return + } + } + + return +} + +func functionAndType(f interface{}) (v reflect.Value, t reflect.Type, ok bool) { + v = reflect.ValueOf(f) + ok = v.Kind() == reflect.Func + if !ok { + return + } + t = v.Type() + return +} + +func toInterfaces(values []reflect.Value) []interface{} { + ret := make([]interface{}, len(values)) + for i, v := range values { + ret[i] = v.Interface() + } + return ret +} + +func toString(interfaces []interface{}) string { + s := make([]string, len(interfaces)) + for i, v := range interfaces { + s[i] = fmt.Sprintf("%#v", v) + } + return strings.Join(s, ", ") +} diff --git a/Source/Gold/testing/quick/quick_test.go b/Source/Gold/testing/quick/quick_test.go new file mode 100644 index 0000000..4246cd1 --- /dev/null +++ b/Source/Gold/testing/quick/quick_test.go @@ -0,0 +1,327 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package quick + +import ( + "math/rand" + "reflect" + "testing" +) + +func fArray(a [4]byte) [4]byte { return a } + +type TestArrayAlias [4]byte + +func fArrayAlias(a TestArrayAlias) TestArrayAlias { return a } + +func fBool(a bool) bool { return a } + +type TestBoolAlias bool + +func fBoolAlias(a TestBoolAlias) TestBoolAlias { return a } + +func fFloat32(a float32) float32 { return a } + +type TestFloat32Alias float32 + +func fFloat32Alias(a TestFloat32Alias) TestFloat32Alias { return a } + +func fFloat64(a float64) float64 { return a } + +type TestFloat64Alias float64 + +func fFloat64Alias(a TestFloat64Alias) TestFloat64Alias { return a } + +func fComplex64(a complex64) complex64 { return a } + +type TestComplex64Alias complex64 + +func fComplex64Alias(a TestComplex64Alias) TestComplex64Alias { return a } + +func fComplex128(a complex128) complex128 { return a } + +type TestComplex128Alias complex128 + +func fComplex128Alias(a TestComplex128Alias) TestComplex128Alias { return a } + +func fInt16(a int16) int16 { return a } + +type TestInt16Alias int16 + +func fInt16Alias(a TestInt16Alias) TestInt16Alias { return a } + +func fInt32(a int32) int32 { return a } + +type TestInt32Alias int32 + +func fInt32Alias(a TestInt32Alias) TestInt32Alias { return a } + +func fInt64(a int64) int64 { return a } + +type TestInt64Alias int64 + +func fInt64Alias(a TestInt64Alias) TestInt64Alias { return a } + +func fInt8(a int8) int8 { return a } + +type TestInt8Alias int8 + +func fInt8Alias(a TestInt8Alias) TestInt8Alias { return a } + +func fInt(a int) int { return a } + +type TestIntAlias int + +func fIntAlias(a TestIntAlias) TestIntAlias { return a } + +func fMap(a map[int]int) map[int]int { return a } + +type TestMapAlias map[int]int + +func fMapAlias(a TestMapAlias) TestMapAlias { return a } + +func fPtr(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +type TestPtrAlias *int + +func fPtrAlias(a TestPtrAlias) TestPtrAlias { return a } + +func fSlice(a []byte) []byte { return a } + +type TestSliceAlias []byte + +func fSliceAlias(a TestSliceAlias) TestSliceAlias { return a } + +func fString(a string) string { return a } + +type TestStringAlias string + +func fStringAlias(a TestStringAlias) TestStringAlias { return a } + +type TestStruct struct { + A int + B string +} + +func fStruct(a TestStruct) TestStruct { return a } + +type TestStructAlias TestStruct + +func fStructAlias(a TestStructAlias) TestStructAlias { return a } + +func fUint16(a uint16) uint16 { return a } + +type TestUint16Alias uint16 + +func fUint16Alias(a TestUint16Alias) TestUint16Alias { return a } + +func fUint32(a uint32) uint32 { return a } + +type TestUint32Alias uint32 + +func fUint32Alias(a TestUint32Alias) TestUint32Alias { return a } + +func fUint64(a uint64) uint64 { return a } + +type TestUint64Alias uint64 + +func fUint64Alias(a TestUint64Alias) TestUint64Alias { return a } + +func fUint8(a uint8) uint8 { return a } + +type TestUint8Alias uint8 + +func fUint8Alias(a TestUint8Alias) TestUint8Alias { return a } + +func fUint(a uint) uint { return a } + +type TestUintAlias uint + +func fUintAlias(a TestUintAlias) TestUintAlias { return a } + +func fUintptr(a uintptr) uintptr { return a } + +type TestUintptrAlias uintptr + +func fUintptrAlias(a TestUintptrAlias) TestUintptrAlias { return a } + +func reportError(property string, err error, t *testing.T) { + if err != nil { + t.Errorf("%s: %s", property, err) + } +} + +func TestCheckEqual(t *testing.T) { + reportError("fArray", CheckEqual(fArray, fArray, nil), t) + reportError("fArrayAlias", CheckEqual(fArrayAlias, fArrayAlias, nil), t) + reportError("fBool", CheckEqual(fBool, fBool, nil), t) + reportError("fBoolAlias", CheckEqual(fBoolAlias, fBoolAlias, nil), t) + reportError("fFloat32", CheckEqual(fFloat32, fFloat32, nil), t) + reportError("fFloat32Alias", CheckEqual(fFloat32Alias, fFloat32Alias, nil), t) + reportError("fFloat64", CheckEqual(fFloat64, fFloat64, nil), t) + reportError("fFloat64Alias", CheckEqual(fFloat64Alias, fFloat64Alias, nil), t) + reportError("fComplex64", CheckEqual(fComplex64, fComplex64, nil), t) + reportError("fComplex64Alias", CheckEqual(fComplex64Alias, fComplex64Alias, nil), t) + reportError("fComplex128", CheckEqual(fComplex128, fComplex128, nil), t) + reportError("fComplex128Alias", CheckEqual(fComplex128Alias, fComplex128Alias, nil), t) + reportError("fInt16", CheckEqual(fInt16, fInt16, nil), t) + reportError("fInt16Alias", CheckEqual(fInt16Alias, fInt16Alias, nil), t) + reportError("fInt32", CheckEqual(fInt32, fInt32, nil), t) + reportError("fInt32Alias", CheckEqual(fInt32Alias, fInt32Alias, nil), t) + reportError("fInt64", CheckEqual(fInt64, fInt64, nil), t) + reportError("fInt64Alias", CheckEqual(fInt64Alias, fInt64Alias, nil), t) + reportError("fInt8", CheckEqual(fInt8, fInt8, nil), t) + reportError("fInt8Alias", CheckEqual(fInt8Alias, fInt8Alias, nil), t) + reportError("fInt", CheckEqual(fInt, fInt, nil), t) + reportError("fIntAlias", CheckEqual(fIntAlias, fIntAlias, nil), t) + reportError("fInt32", CheckEqual(fInt32, fInt32, nil), t) + reportError("fInt32Alias", CheckEqual(fInt32Alias, fInt32Alias, nil), t) + reportError("fMap", CheckEqual(fMap, fMap, nil), t) + reportError("fMapAlias", CheckEqual(fMapAlias, fMapAlias, nil), t) + reportError("fPtr", CheckEqual(fPtr, fPtr, nil), t) + reportError("fPtrAlias", CheckEqual(fPtrAlias, fPtrAlias, nil), t) + reportError("fSlice", CheckEqual(fSlice, fSlice, nil), t) + reportError("fSliceAlias", CheckEqual(fSliceAlias, fSliceAlias, nil), t) + reportError("fString", CheckEqual(fString, fString, nil), t) + reportError("fStringAlias", CheckEqual(fStringAlias, fStringAlias, nil), t) + reportError("fStruct", CheckEqual(fStruct, fStruct, nil), t) + reportError("fStructAlias", CheckEqual(fStructAlias, fStructAlias, nil), t) + reportError("fUint16", CheckEqual(fUint16, fUint16, nil), t) + reportError("fUint16Alias", CheckEqual(fUint16Alias, fUint16Alias, nil), t) + reportError("fUint32", CheckEqual(fUint32, fUint32, nil), t) + reportError("fUint32Alias", CheckEqual(fUint32Alias, fUint32Alias, nil), t) + reportError("fUint64", CheckEqual(fUint64, fUint64, nil), t) + reportError("fUint64Alias", CheckEqual(fUint64Alias, fUint64Alias, nil), t) + reportError("fUint8", CheckEqual(fUint8, fUint8, nil), t) + reportError("fUint8Alias", CheckEqual(fUint8Alias, fUint8Alias, nil), t) + reportError("fUint", CheckEqual(fUint, fUint, nil), t) + reportError("fUintAlias", CheckEqual(fUintAlias, fUintAlias, nil), t) + reportError("fUintptr", CheckEqual(fUintptr, fUintptr, nil), t) + reportError("fUintptrAlias", CheckEqual(fUintptrAlias, fUintptrAlias, nil), t) +} + +// This tests that ArbitraryValue is working by checking that all the arbitrary +// values of type MyStruct have x = 42. +type myStruct struct { + x int +} + +func (m myStruct) Generate(r *rand.Rand, _ int) reflect.Value { + return reflect.ValueOf(myStruct{x: 42}) +} + +func myStructProperty(in myStruct) bool { return in.x == 42 } + +func TestCheckProperty(t *testing.T) { + reportError("myStructProperty", Check(myStructProperty, nil), t) +} + +func TestFailure(t *testing.T) { + f := func(x int) bool { return false } + err := Check(f, nil) + if err == nil { + t.Errorf("Check didn't return an error") + } + if _, ok := err.(*CheckError); !ok { + t.Errorf("Error was not a CheckError: %s", err) + } + + err = CheckEqual(fUint, fUint32, nil) + if err == nil { + t.Errorf("#1 CheckEqual didn't return an error") + } + if _, ok := err.(SetupError); !ok { + t.Errorf("#1 Error was not a SetupError: %s", err) + } + + err = CheckEqual(func(x, y int) {}, func(x int) {}, nil) + if err == nil { + t.Errorf("#2 CheckEqual didn't return an error") + } + if _, ok := err.(SetupError); !ok { + t.Errorf("#2 Error was not a SetupError: %s", err) + } + + err = CheckEqual(func(x int) int { return 0 }, func(x int) int32 { return 0 }, nil) + if err == nil { + t.Errorf("#3 CheckEqual didn't return an error") + } + if _, ok := err.(SetupError); !ok { + t.Errorf("#3 Error was not a SetupError: %s", err) + } +} + +// Recursive data structures didn't terminate. +// Issues 8818 and 11148. +func TestRecursive(t *testing.T) { + type R struct { + Ptr *R + SliceP []*R + Slice []R + Map map[int]R + MapP map[int]*R + MapR map[*R]*R + SliceMap []map[int]R + } + + f := func(r R) bool { return true } + Check(f, nil) +} + +func TestEmptyStruct(t *testing.T) { + f := func(struct{}) bool { return true } + Check(f, nil) +} + +type ( + A struct{ B *B } + B struct{ A *A } +) + +func TestMutuallyRecursive(t *testing.T) { + f := func(a A) bool { return true } + Check(f, nil) +} + +// Some serialization formats (e.g. encoding/pem) cannot distinguish +// between a nil and an empty map or slice, so avoid generating the +// zero value for these. +func TestNonZeroSliceAndMap(t *testing.T) { + type Q struct { + M map[int]int + S []int + } + f := func(q Q) bool { + return q.M != nil && q.S != nil + } + err := Check(f, nil) + if err != nil { + t.Fatal(err) + } +} + +func TestInt64(t *testing.T) { + var lo, hi int64 + f := func(x int64) bool { + if x < lo { + lo = x + } + if x > hi { + hi = x + } + return true + } + cfg := &Config{MaxCount: 100000} + Check(f, cfg) + if uint64(lo)>>62 == 0 || uint64(hi)>>62 == 0 { + t.Errorf("int64 returned range %#016x,%#016x; does not look like full range", lo, hi) + } +} diff --git a/Source/Gold/testing/sub_test.go b/Source/Gold/testing/sub_test.go new file mode 100644 index 0000000..9af3909 --- /dev/null +++ b/Source/Gold/testing/sub_test.go @@ -0,0 +1,699 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "bytes" + "fmt" + "regexp" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" +) + +func init() { + // Make benchmark tests run 10* faster. + *benchTime = 100 * time.Millisecond +} + +func TestTestContext(t *T) { + const ( + add1 = 0 + done = 1 + ) + // After each of the calls are applied to the context, the + type call struct { + typ int // run or done + // result from applying the call + running int + waiting int + started bool + } + testCases := []struct { + max int + run []call + }{{ + max: 1, + run: []call{ + {typ: add1, running: 1, waiting: 0, started: true}, + {typ: done, running: 0, waiting: 0, started: false}, + }, + }, { + max: 1, + run: []call{ + {typ: add1, running: 1, waiting: 0, started: true}, + {typ: add1, running: 1, waiting: 1, started: false}, + {typ: done, running: 1, waiting: 0, started: true}, + {typ: done, running: 0, waiting: 0, started: false}, + {typ: add1, running: 1, waiting: 0, started: true}, + }, + }, { + max: 3, + run: []call{ + {typ: add1, running: 1, waiting: 0, started: true}, + {typ: add1, running: 2, waiting: 0, started: true}, + {typ: add1, running: 3, waiting: 0, started: true}, + {typ: add1, running: 3, waiting: 1, started: false}, + {typ: add1, running: 3, waiting: 2, started: false}, + {typ: add1, running: 3, waiting: 3, started: false}, + {typ: done, running: 3, waiting: 2, started: true}, + {typ: add1, running: 3, waiting: 3, started: false}, + {typ: done, running: 3, waiting: 2, started: true}, + {typ: done, running: 3, waiting: 1, started: true}, + {typ: done, running: 3, waiting: 0, started: true}, + {typ: done, running: 2, waiting: 0, started: false}, + {typ: done, running: 1, waiting: 0, started: false}, + {typ: done, running: 0, waiting: 0, started: false}, + }, + }} + for i, tc := range testCases { + ctx := &testContext{ + startParallel: make(chan bool), + maxParallel: tc.max, + } + for j, call := range tc.run { + doCall := func(f func()) chan bool { + done := make(chan bool) + go func() { + f() + done <- true + }() + return done + } + started := false + switch call.typ { + case add1: + signal := doCall(ctx.waitParallel) + select { + case <-signal: + started = true + case ctx.startParallel <- true: + <-signal + } + case done: + signal := doCall(ctx.release) + select { + case <-signal: + case <-ctx.startParallel: + started = true + <-signal + } + } + if started != call.started { + t.Errorf("%d:%d:started: got %v; want %v", i, j, started, call.started) + } + if ctx.running != call.running { + t.Errorf("%d:%d:running: got %v; want %v", i, j, ctx.running, call.running) + } + if ctx.numWaiting != call.waiting { + t.Errorf("%d:%d:waiting: got %v; want %v", i, j, ctx.numWaiting, call.waiting) + } + } + } +} + +func TestTRun(t *T) { + realTest := t + testCases := []struct { + desc string + ok bool + maxPar int + chatty bool + output string + f func(*T) + }{{ + desc: "failnow skips future sequential and parallel tests at same level", + ok: false, + maxPar: 1, + output: ` +--- FAIL: failnow skips future sequential and parallel tests at same level (N.NNs) + --- FAIL: failnow skips future sequential and parallel tests at same level/#00 (N.NNs) + `, + f: func(t *T) { + ranSeq := false + ranPar := false + t.Run("", func(t *T) { + t.Run("par", func(t *T) { + t.Parallel() + ranPar = true + }) + t.Run("seq", func(t *T) { + ranSeq = true + }) + t.FailNow() + t.Run("seq", func(t *T) { + realTest.Error("test must be skipped") + }) + t.Run("par", func(t *T) { + t.Parallel() + realTest.Error("test must be skipped.") + }) + }) + if !ranPar { + realTest.Error("parallel test was not run") + } + if !ranSeq { + realTest.Error("sequential test was not run") + } + }, + }, { + desc: "failure in parallel test propagates upwards", + ok: false, + maxPar: 1, + output: ` +--- FAIL: failure in parallel test propagates upwards (N.NNs) + --- FAIL: failure in parallel test propagates upwards/#00 (N.NNs) + --- FAIL: failure in parallel test propagates upwards/#00/par (N.NNs) + `, + f: func(t *T) { + t.Run("", func(t *T) { + t.Parallel() + t.Run("par", func(t *T) { + t.Parallel() + t.Fail() + }) + }) + }, + }, { + desc: "skipping without message, chatty", + ok: true, + chatty: true, + output: ` +=== RUN skipping without message, chatty +--- SKIP: skipping without message, chatty (N.NNs)`, + f: func(t *T) { t.SkipNow() }, + }, { + desc: "chatty with recursion", + ok: true, + chatty: true, + output: ` +=== RUN chatty with recursion +=== RUN chatty with recursion/#00 +=== RUN chatty with recursion/#00/#00 +--- PASS: chatty with recursion (N.NNs) + --- PASS: chatty with recursion/#00 (N.NNs) + --- PASS: chatty with recursion/#00/#00 (N.NNs)`, + f: func(t *T) { + t.Run("", func(t *T) { + t.Run("", func(t *T) {}) + }) + }, + }, { + desc: "skipping without message, not chatty", + ok: true, + f: func(t *T) { t.SkipNow() }, + }, { + desc: "skipping after error", + output: ` +--- FAIL: skipping after error (N.NNs) + sub_test.go:NNN: an error + sub_test.go:NNN: skipped`, + f: func(t *T) { + t.Error("an error") + t.Skip("skipped") + }, + }, { + desc: "use Run to locally synchronize parallelism", + ok: true, + maxPar: 1, + f: func(t *T) { + var count uint32 + t.Run("waitGroup", func(t *T) { + for i := 0; i < 4; i++ { + t.Run("par", func(t *T) { + t.Parallel() + atomic.AddUint32(&count, 1) + }) + } + }) + if count != 4 { + t.Errorf("count was %d; want 4", count) + } + }, + }, { + desc: "alternate sequential and parallel", + // Sequential tests should partake in the counting of running threads. + // Otherwise, if one runs parallel subtests in sequential tests that are + // itself subtests of parallel tests, the counts can get askew. + ok: true, + maxPar: 1, + f: func(t *T) { + t.Run("a", func(t *T) { + t.Parallel() + t.Run("b", func(t *T) { + // Sequential: ensure running count is decremented. + t.Run("c", func(t *T) { + t.Parallel() + }) + + }) + }) + }, + }, { + desc: "alternate sequential and parallel 2", + // Sequential tests should partake in the counting of running threads. + // Otherwise, if one runs parallel subtests in sequential tests that are + // itself subtests of parallel tests, the counts can get askew. + ok: true, + maxPar: 2, + f: func(t *T) { + for i := 0; i < 2; i++ { + t.Run("a", func(t *T) { + t.Parallel() + time.Sleep(time.Nanosecond) + for i := 0; i < 2; i++ { + t.Run("b", func(t *T) { + time.Sleep(time.Nanosecond) + for i := 0; i < 2; i++ { + t.Run("c", func(t *T) { + t.Parallel() + time.Sleep(time.Nanosecond) + }) + } + + }) + } + }) + } + }, + }, { + desc: "stress test", + ok: true, + maxPar: 4, + f: func(t *T) { + t.Parallel() + for i := 0; i < 12; i++ { + t.Run("a", func(t *T) { + t.Parallel() + time.Sleep(time.Nanosecond) + for i := 0; i < 12; i++ { + t.Run("b", func(t *T) { + time.Sleep(time.Nanosecond) + for i := 0; i < 12; i++ { + t.Run("c", func(t *T) { + t.Parallel() + time.Sleep(time.Nanosecond) + t.Run("d1", func(t *T) {}) + t.Run("d2", func(t *T) {}) + t.Run("d3", func(t *T) {}) + t.Run("d4", func(t *T) {}) + }) + } + }) + } + }) + } + }, + }, { + desc: "skip output", + ok: true, + maxPar: 4, + f: func(t *T) { + t.Skip() + }, + }, { + desc: "subtest calls error on parent", + ok: false, + output: ` +--- FAIL: subtest calls error on parent (N.NNs) + sub_test.go:NNN: first this + sub_test.go:NNN: and now this! + sub_test.go:NNN: oh, and this too`, + maxPar: 1, + f: func(t *T) { + t.Errorf("first this") + outer := t + t.Run("", func(t *T) { + outer.Errorf("and now this!") + }) + t.Errorf("oh, and this too") + }, + }, { + desc: "subtest calls fatal on parent", + ok: false, + output: ` +--- FAIL: subtest calls fatal on parent (N.NNs) + sub_test.go:NNN: first this + sub_test.go:NNN: and now this! + --- FAIL: subtest calls fatal on parent/#00 (N.NNs) + testing.go:NNN: test executed panic(nil) or runtime.Goexit: subtest may have called FailNow on a parent test`, + maxPar: 1, + f: func(t *T) { + outer := t + t.Errorf("first this") + t.Run("", func(t *T) { + outer.Fatalf("and now this!") + }) + t.Errorf("Should not reach here.") + }, + }, { + desc: "subtest calls error on ancestor", + ok: false, + output: ` +--- FAIL: subtest calls error on ancestor (N.NNs) + sub_test.go:NNN: Report to ancestor + --- FAIL: subtest calls error on ancestor/#00 (N.NNs) + sub_test.go:NNN: Still do this + sub_test.go:NNN: Also do this`, + maxPar: 1, + f: func(t *T) { + outer := t + t.Run("", func(t *T) { + t.Run("", func(t *T) { + outer.Errorf("Report to ancestor") + }) + t.Errorf("Still do this") + }) + t.Errorf("Also do this") + }, + }, { + desc: "subtest calls fatal on ancestor", + ok: false, + output: ` +--- FAIL: subtest calls fatal on ancestor (N.NNs) + sub_test.go:NNN: Nope`, + maxPar: 1, + f: func(t *T) { + outer := t + t.Run("", func(t *T) { + for i := 0; i < 4; i++ { + t.Run("", func(t *T) { + outer.Fatalf("Nope") + }) + t.Errorf("Don't do this") + } + t.Errorf("And neither do this") + }) + t.Errorf("Nor this") + }, + }, { + desc: "panic on goroutine fail after test exit", + ok: false, + maxPar: 4, + f: func(t *T) { + ch := make(chan bool) + t.Run("", func(t *T) { + go func() { + <-ch + defer func() { + if r := recover(); r == nil { + realTest.Errorf("expected panic") + } + ch <- true + }() + t.Errorf("failed after success") + }() + }) + ch <- true + <-ch + }, + }} + for _, tc := range testCases { + ctx := newTestContext(tc.maxPar, newMatcher(regexp.MatchString, "", "")) + buf := &bytes.Buffer{} + root := &T{ + common: common{ + signal: make(chan bool), + name: "Test", + w: buf, + chatty: tc.chatty, + }, + context: ctx, + } + ok := root.Run(tc.desc, tc.f) + ctx.release() + + if ok != tc.ok { + t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, tc.ok) + } + if ok != !root.Failed() { + t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed()) + } + if ctx.running != 0 || ctx.numWaiting != 0 { + t.Errorf("%s:running and waiting non-zero: got %d and %d", tc.desc, ctx.running, ctx.numWaiting) + } + got := strings.TrimSpace(buf.String()) + want := strings.TrimSpace(tc.output) + re := makeRegexp(want) + if ok, err := regexp.MatchString(re, got); !ok || err != nil { + t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want) + } + } +} + +func TestBRun(t *T) { + work := func(b *B) { + for i := 0; i < b.N; i++ { + time.Sleep(time.Nanosecond) + } + } + testCases := []struct { + desc string + failed bool + chatty bool + output string + f func(*B) + }{{ + desc: "simulate sequential run of subbenchmarks.", + f: func(b *B) { + b.Run("", func(b *B) { work(b) }) + time1 := b.result.NsPerOp() + b.Run("", func(b *B) { work(b) }) + time2 := b.result.NsPerOp() + if time1 >= time2 { + t.Errorf("no time spent in benchmark t1 >= t2 (%d >= %d)", time1, time2) + } + }, + }, { + desc: "bytes set by all benchmarks", + f: func(b *B) { + b.Run("", func(b *B) { b.SetBytes(10); work(b) }) + b.Run("", func(b *B) { b.SetBytes(10); work(b) }) + if b.result.Bytes != 20 { + t.Errorf("bytes: got: %d; want 20", b.result.Bytes) + } + }, + }, { + desc: "bytes set by some benchmarks", + // In this case the bytes result is meaningless, so it must be 0. + f: func(b *B) { + b.Run("", func(b *B) { b.SetBytes(10); work(b) }) + b.Run("", func(b *B) { work(b) }) + b.Run("", func(b *B) { b.SetBytes(10); work(b) }) + if b.result.Bytes != 0 { + t.Errorf("bytes: got: %d; want 0", b.result.Bytes) + } + }, + }, { + desc: "failure carried over to root", + failed: true, + output: "--- FAIL: root", + f: func(b *B) { b.Fail() }, + }, { + desc: "skipping without message, chatty", + chatty: true, + output: "--- SKIP: root", + f: func(b *B) { b.SkipNow() }, + }, { + desc: "skipping with message, chatty", + chatty: true, + output: ` +--- SKIP: root + sub_test.go:NNN: skipping`, + f: func(b *B) { b.Skip("skipping") }, + }, { + desc: "chatty with recursion", + chatty: true, + f: func(b *B) { + b.Run("", func(b *B) { + b.Run("", func(b *B) {}) + }) + }, + }, { + desc: "skipping without message, not chatty", + f: func(b *B) { b.SkipNow() }, + }, { + desc: "skipping after error", + failed: true, + output: ` +--- FAIL: root + sub_test.go:NNN: an error + sub_test.go:NNN: skipped`, + f: func(b *B) { + b.Error("an error") + b.Skip("skipped") + }, + }, { + desc: "memory allocation", + f: func(b *B) { + const bufSize = 256 + alloc := func(b *B) { + var buf [bufSize]byte + for i := 0; i < b.N; i++ { + _ = append([]byte(nil), buf[:]...) + } + } + b.Run("", func(b *B) { + alloc(b) + b.ReportAllocs() + }) + b.Run("", func(b *B) { + alloc(b) + b.ReportAllocs() + }) + // runtime.MemStats sometimes reports more allocations than the + // benchmark is responsible for. Luckily the point of this test is + // to ensure that the results are not underreported, so we can + // simply verify the lower bound. + if got := b.result.MemAllocs; got < 2 { + t.Errorf("MemAllocs was %v; want 2", got) + } + if got := b.result.MemBytes; got < 2*bufSize { + t.Errorf("MemBytes was %v; want %v", got, 2*bufSize) + } + }, + }} + for _, tc := range testCases { + var ok bool + buf := &bytes.Buffer{} + // This is almost like the Benchmark function, except that we override + // the benchtime and catch the failure result of the subbenchmark. + root := &B{ + common: common{ + signal: make(chan bool), + name: "root", + w: buf, + chatty: tc.chatty, + }, + benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure. + benchTime: time.Microsecond, + } + root.runN(1) + if ok != !tc.failed { + t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, !tc.failed) + } + if !ok != root.Failed() { + t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed()) + } + // All tests are run as subtests + if root.result.N != 1 { + t.Errorf("%s: N for parent benchmark was %d; want 1", tc.desc, root.result.N) + } + got := strings.TrimSpace(buf.String()) + want := strings.TrimSpace(tc.output) + re := makeRegexp(want) + if ok, err := regexp.MatchString(re, got); !ok || err != nil { + t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want) + } + } +} + +func makeRegexp(s string) string { + s = regexp.QuoteMeta(s) + s = strings.Replace(s, ":NNN:", `:\d\d\d:`, -1) + s = strings.Replace(s, "N\\.NNs", `\d*\.\d*s`, -1) + return s +} + +func TestBenchmarkOutput(t *T) { + // Ensure Benchmark initialized common.w by invoking it with an error and + // normal case. + Benchmark(func(b *B) { b.Error("do not print this output") }) + Benchmark(func(b *B) {}) +} + +func TestBenchmarkStartsFrom1(t *T) { + var first = true + Benchmark(func(b *B) { + if first && b.N != 1 { + panic(fmt.Sprintf("Benchmark() first N=%v; want 1", b.N)) + } + first = false + }) +} + +func TestBenchmarkReadMemStatsBeforeFirstRun(t *T) { + var first = true + Benchmark(func(b *B) { + if first && (b.startAllocs == 0 || b.startBytes == 0) { + panic(fmt.Sprintf("ReadMemStats not called before first run")) + } + first = false + }) +} + +func TestParallelSub(t *T) { + c := make(chan int) + block := make(chan int) + for i := 0; i < 10; i++ { + go func(i int) { + <-block + t.Run(fmt.Sprint(i), func(t *T) {}) + c <- 1 + }(i) + } + close(block) + for i := 0; i < 10; i++ { + <-c + } +} + +type funcWriter func([]byte) (int, error) + +func (fw funcWriter) Write(b []byte) (int, error) { return fw(b) } + +func TestRacyOutput(t *T) { + var runs int32 // The number of running Writes + var races int32 // Incremented for each race detected + raceDetector := func(b []byte) (int, error) { + // Check if some other goroutine is concurrently calling Write. + if atomic.LoadInt32(&runs) > 0 { + atomic.AddInt32(&races, 1) // Race detected! + } + atomic.AddInt32(&runs, 1) + defer atomic.AddInt32(&runs, -1) + runtime.Gosched() // Increase probability of a race + return len(b), nil + } + + var wg sync.WaitGroup + root := &T{ + common: common{w: funcWriter(raceDetector), chatty: true}, + context: newTestContext(1, newMatcher(regexp.MatchString, "", "")), + } + root.Run("", func(t *T) { + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + t.Run(fmt.Sprint(i), func(t *T) { + t.Logf("testing run %d", i) + }) + }(i) + } + }) + wg.Wait() + + if races > 0 { + t.Errorf("detected %d racy Writes", races) + } +} + +func TestBenchmark(t *T) { + res := Benchmark(func(b *B) { + for i := 0; i < 5; i++ { + b.Run("", func(b *B) { + for i := 0; i < b.N; i++ { + time.Sleep(time.Millisecond) + } + }) + } + }) + if res.NsPerOp() < 4000000 { + t.Errorf("want >5ms; got %v", time.Duration(res.NsPerOp())) + } +} diff --git a/Source/Gold/testing/testing.go b/Source/Gold/testing/testing.go new file mode 100644 index 0000000..05fb48c --- /dev/null +++ b/Source/Gold/testing/testing.go @@ -0,0 +1,1328 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testing provides support for automated testing of Go packages. +// It is intended to be used in concert with the ``go test'' command, which automates +// execution of any function of the form +// func TestXxx(*testing.T) +// where Xxx does not start with a lowercase letter. The function name +// serves to identify the test routine. +// +// Within these functions, use the Error, Fail or related methods to signal failure. +// +// To write a new test suite, create a file whose name ends _test.go that +// contains the TestXxx functions as described here. Put the file in the same +// package as the one being tested. The file will be excluded from regular +// package builds but will be included when the ``go test'' command is run. +// For more detail, run ``go help test'' and ``go help testflag''. +// +// Tests and benchmarks may be skipped if not applicable with a call to +// the Skip method of *T and *B: +// func TestTimeConsuming(t *testing.T) { +// if testing.Short() { +// t.Skip("skipping test in short mode.") +// } +// ... +// } +// +// Benchmarks +// +// Functions of the form +// func BenchmarkXxx(*testing.B) +// are considered benchmarks, and are executed by the "go test" command when +// its -bench flag is provided. Benchmarks are run sequentially. +// +// For a description of the testing flags, see +// https://golang.org/cmd/go/#hdr-Testing_flags +// +// A sample benchmark function looks like this: +// func BenchmarkHello(b *testing.B) { +// for i := 0; i < b.N; i++ { +// fmt.Sprintf("hello") +// } +// } +// +// The benchmark function must run the target code b.N times. +// During benchmark execution, b.N is adjusted until the benchmark function lasts +// long enough to be timed reliably. The output +// BenchmarkHello 10000000 282 ns/op +// means that the loop ran 10000000 times at a speed of 282 ns per loop. +// +// If a benchmark needs some expensive setup before running, the timer +// may be reset: +// +// func BenchmarkBigLen(b *testing.B) { +// big := NewBig() +// b.ResetTimer() +// for i := 0; i < b.N; i++ { +// big.Len() +// } +// } +// +// If a benchmark needs to test performance in a parallel setting, it may use +// the RunParallel helper function; such benchmarks are intended to be used with +// the go test -cpu flag: +// +// func BenchmarkTemplateParallel(b *testing.B) { +// templ := template.Must(template.New("test").Parse("Hello, {{.}}!")) +// b.RunParallel(func(pb *testing.PB) { +// var buf bytes.Buffer +// for pb.Next() { +// buf.Reset() +// templ.Execute(&buf, "World") +// } +// }) +// } +// +// Examples +// +// The package also runs and verifies example code. Example functions may +// include a concluding line comment that begins with "Output:" and is compared with +// the standard output of the function when the tests are run. (The comparison +// ignores leading and trailing space.) These are examples of an example: +// +// func ExampleHello() { +// fmt.Println("hello") +// // Output: hello +// } +// +// func ExampleSalutations() { +// fmt.Println("hello, and") +// fmt.Println("goodbye") +// // Output: +// // hello, and +// // goodbye +// } +// +// The comment prefix "Unordered output:" is like "Output:", but matches any +// line order: +// +// func ExamplePerm() { +// for _, value := range Perm(4) { +// fmt.Println(value) +// } +// // Unordered output: 4 +// // 2 +// // 1 +// // 3 +// // 0 +// } +// +// Example functions without output comments are compiled but not executed. +// +// The naming convention to declare examples for the package, a function F, a type T and +// method M on type T are: +// +// func Example() { ... } +// func ExampleF() { ... } +// func ExampleT() { ... } +// func ExampleT_M() { ... } +// +// Multiple example functions for a package/type/function/method may be provided by +// appending a distinct suffix to the name. The suffix must start with a +// lower-case letter. +// +// func Example_suffix() { ... } +// func ExampleF_suffix() { ... } +// func ExampleT_suffix() { ... } +// func ExampleT_M_suffix() { ... } +// +// The entire test file is presented as the example when it contains a single +// example function, at least one other function, type, variable, or constant +// declaration, and no test or benchmark functions. +// +// Subtests and Sub-benchmarks +// +// The Run methods of T and B allow defining subtests and sub-benchmarks, +// without having to define separate functions for each. This enables uses +// like table-driven benchmarks and creating hierarchical tests. +// It also provides a way to share common setup and tear-down code: +// +// func TestFoo(t *testing.T) { +// // +// t.Run("A=1", func(t *testing.T) { ... }) +// t.Run("A=2", func(t *testing.T) { ... }) +// t.Run("B=1", func(t *testing.T) { ... }) +// // +// } +// +// Each subtest and sub-benchmark has a unique name: the combination of the name +// of the top-level test and the sequence of names passed to Run, separated by +// slashes, with an optional trailing sequence number for disambiguation. +// +// The argument to the -run and -bench command-line flags is an unanchored regular +// expression that matches the test's name. For tests with multiple slash-separated +// elements, such as subtests, the argument is itself slash-separated, with +// expressions matching each name element in turn. Because it is unanchored, an +// empty expression matches any string. +// For example, using "matching" to mean "whose name contains": +// +// go test -run '' # Run all tests. +// go test -run Foo # Run top-level tests matching "Foo", such as "TestFooBar". +// go test -run Foo/A= # For top-level tests matching "Foo", run subtests matching "A=". +// go test -run /A=1 # For all top-level tests, run subtests matching "A=1". +// +// Subtests can also be used to control parallelism. A parent test will only +// complete once all of its subtests complete. In this example, all tests are +// run in parallel with each other, and only with each other, regardless of +// other top-level tests that may be defined: +// +// func TestGroupedParallel(t *testing.T) { +// for _, tc := range tests { +// tc := tc // capture range variable +// t.Run(tc.Name, func(t *testing.T) { +// t.Parallel() +// ... +// }) +// } +// } +// +// The race detector kills the program if it exceeds 8192 concurrent goroutines, +// so use care when running parallel tests with the -race flag set. +// +// Run does not return until parallel subtests have completed, providing a way +// to clean up after a group of parallel tests: +// +// func TestTeardownParallel(t *testing.T) { +// // This Run will not return until the parallel tests finish. +// t.Run("group", func(t *testing.T) { +// t.Run("Test1", parallelTest1) +// t.Run("Test2", parallelTest2) +// t.Run("Test3", parallelTest3) +// }) +// // +// } +// +// Main +// +// It is sometimes necessary for a test program to do extra setup or teardown +// before or after testing. It is also sometimes necessary for a test to control +// which code runs on the main thread. To support these and other cases, +// if a test file contains a function: +// +// func TestMain(m *testing.M) +// +// then the generated test will call TestMain(m) instead of running the tests +// directly. TestMain runs in the main goroutine and can do whatever setup +// and teardown is necessary around a call to m.Run. It should then call +// os.Exit with the result of m.Run. When TestMain is called, flag.Parse has +// not been run. If TestMain depends on command-line flags, including those +// of the testing package, it should call flag.Parse explicitly. +// +// A simple implementation of TestMain is: +// +// func TestMain(m *testing.M) { +// // call flag.Parse() here if TestMain uses flags +// os.Exit(m.Run()) +// } +// +package testing + +import ( + "bytes" + "errors" + "flag" + "fmt" + "internal/race" + "io" + "os" + "runtime" + "runtime/debug" + "runtime/trace" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +var ( + // The short flag requests that tests run more quickly, but its functionality + // is provided by test writers themselves. The testing package is just its + // home. The all.bash installation script sets it to make installation more + // efficient, but by default the flag is off so a plain "go test" will do a + // full test of the package. + short = flag.Bool("test.short", false, "run smaller test suite to save time") + + // The failfast flag requests that test execution stop after the first test failure. + failFast = flag.Bool("test.failfast", false, "do not start new tests after the first test failure") + + // The directory in which to create profile files and the like. When run from + // "go test", the binary always runs in the source directory for the package; + // this flag lets "go test" tell the binary to write the files in the directory where + // the "go test" command is run. + outputDir = flag.String("test.outputdir", "", "write profiles to `dir`") + + // Report as tests are run; default is silent for success. + chatty = flag.Bool("test.v", false, "verbose: print additional output") + count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times") + coverProfile = flag.String("test.coverprofile", "", "write a coverage profile to `file`") + matchList = flag.String("test.list", "", "list tests, examples, and benchmarks matching `regexp` then exit") + match = flag.String("test.run", "", "run only tests and examples matching `regexp`") + memProfile = flag.String("test.memprofile", "", "write an allocation profile to `file`") + memProfileRate = flag.Int("test.memprofilerate", 0, "set memory allocation profiling `rate` (see runtime.MemProfileRate)") + cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to `file`") + blockProfile = flag.String("test.blockprofile", "", "write a goroutine blocking profile to `file`") + blockProfileRate = flag.Int("test.blockprofilerate", 1, "set blocking profile `rate` (see runtime.SetBlockProfileRate)") + mutexProfile = flag.String("test.mutexprofile", "", "write a mutex contention profile to the named file after execution") + mutexProfileFraction = flag.Int("test.mutexprofilefraction", 1, "if >= 0, calls runtime.SetMutexProfileFraction()") + traceFile = flag.String("test.trace", "", "write an execution trace to `file`") + timeout = flag.Duration("test.timeout", 0, "panic test binary after duration `d` (default 0, timeout disabled)") + cpuListStr = flag.String("test.cpu", "", "comma-separated `list` of cpu counts to run each test with") + parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "run at most `n` tests in parallel") + testlog = flag.String("test.testlogfile", "", "write test action log to `file` (for use only by cmd/go)") + + haveExamples bool // are there examples? + + cpuList []int + testlogFile *os.File + + numFailed uint32 // number of test failures +) + +// The maximum number of stack frames to go through when skipping helper functions for +// the purpose of decorating log messages. +const maxStackLen = 50 + +// common holds the elements common between T and B and +// captures common methods such as Errorf. +type common struct { + mu sync.RWMutex // guards this group of fields + output []byte // Output generated by test or benchmark. + w io.Writer // For flushToParent. + ran bool // Test or benchmark (or one of its subtests) was executed. + failed bool // Test or benchmark has failed. + skipped bool // Test of benchmark has been skipped. + done bool // Test is finished and all subtests have completed. + helpers map[string]struct{} // functions to be skipped when writing file/line info + + chatty bool // A copy of the chatty flag. + finished bool // Test function has completed. + hasSub int32 // written atomically + raceErrors int // number of races detected during test + runner string // function name of tRunner running the test + + parent *common + level int // Nesting depth of test or benchmark. + creator []uintptr // If level > 0, the stack trace at the point where the parent called t.Run. + name string // Name of test or benchmark. + start time.Time // Time test or benchmark started + duration time.Duration + barrier chan bool // To signal parallel subtests they may start. + signal chan bool // To signal a test is done. + sub []*T // Queue of subtests to be run in parallel. +} + +// Short reports whether the -test.short flag is set. +func Short() bool { + return *short +} + +// CoverMode reports what the test coverage mode is set to. The +// values are "set", "count", or "atomic". The return value will be +// empty if test coverage is not enabled. +func CoverMode() string { + return cover.Mode +} + +// Verbose reports whether the -test.v flag is set. +func Verbose() bool { + return *chatty +} + +// frameSkip searches, starting after skip frames, for the first caller frame +// in a function not marked as a helper and returns that frame. +// The search stops if it finds a tRunner function that +// was the entry point into the test and the test is not a subtest. +// This function must be called with c.mu held. +func (c *common) frameSkip(skip int) runtime.Frame { + // If the search continues into the parent test, we'll have to hold + // its mu temporarily. If we then return, we need to unlock it. + shouldUnlock := false + defer func() { + if shouldUnlock { + c.mu.Unlock() + } + }() + var pc [maxStackLen]uintptr + // Skip two extra frames to account for this function + // and runtime.Callers itself. + n := runtime.Callers(skip+2, pc[:]) + if n == 0 { + panic("testing: zero callers found") + } + frames := runtime.CallersFrames(pc[:n]) + var firstFrame, prevFrame, frame runtime.Frame + for more := true; more; prevFrame = frame { + frame, more = frames.Next() + if firstFrame.PC == 0 { + firstFrame = frame + } + if frame.Function == c.runner { + // We've gone up all the way to the tRunner calling + // the test function (so the user must have + // called tb.Helper from inside that test function). + // If this is a top-level test, only skip up to the test function itself. + // If we're in a subtest, continue searching in the parent test, + // starting from the point of the call to Run which created this subtest. + if c.level > 1 { + frames = runtime.CallersFrames(c.creator) + parent := c.parent + // We're no longer looking at the current c after this point, + // so we should unlock its mu, unless it's the original receiver, + // in which case our caller doesn't expect us to do that. + if shouldUnlock { + c.mu.Unlock() + } + c = parent + // Remember to unlock c.mu when we no longer need it, either + // because we went up another nesting level, or because we + // returned. + shouldUnlock = true + c.mu.Lock() + continue + } + return prevFrame + } + if _, ok := c.helpers[frame.Function]; !ok { + // Found a frame that wasn't inside a helper function. + return frame + } + } + return firstFrame +} + +// decorate prefixes the string with the file and line of the call site +// and inserts the final newline if needed and indentation spaces for formatting. +// This function must be called with c.mu held. +func (c *common) decorate(s string) string { + frame := c.frameSkip(3) // decorate + log + public function. + file := frame.File + line := frame.Line + if file != "" { + // Truncate file name at last file name separator. + if index := strings.LastIndex(file, "/"); index >= 0 { + file = file[index+1:] + } else if index = strings.LastIndex(file, "\\"); index >= 0 { + file = file[index+1:] + } + } else { + file = "???" + } + if line == 0 { + line = 1 + } + buf := new(strings.Builder) + // Every line is indented at least 4 spaces. + buf.WriteString(" ") + fmt.Fprintf(buf, "%s:%d: ", file, line) + lines := strings.Split(s, "\n") + if l := len(lines); l > 1 && lines[l-1] == "" { + lines = lines[:l-1] + } + for i, line := range lines { + if i > 0 { + // Second and subsequent lines are indented an additional 4 spaces. + buf.WriteString("\n ") + } + buf.WriteString(line) + } + buf.WriteByte('\n') + return buf.String() +} + +// flushToParent writes c.output to the parent after first writing the header +// with the given format and arguments. +func (c *common) flushToParent(format string, args ...interface{}) { + p := c.parent + p.mu.Lock() + defer p.mu.Unlock() + + fmt.Fprintf(p.w, format, args...) + + c.mu.Lock() + defer c.mu.Unlock() + io.Copy(p.w, bytes.NewReader(c.output)) + c.output = c.output[:0] +} + +type indenter struct { + c *common +} + +func (w indenter) Write(b []byte) (n int, err error) { + n = len(b) + for len(b) > 0 { + end := bytes.IndexByte(b, '\n') + if end == -1 { + end = len(b) + } else { + end++ + } + // An indent of 4 spaces will neatly align the dashes with the status + // indicator of the parent. + const indent = " " + w.c.output = append(w.c.output, indent...) + w.c.output = append(w.c.output, b[:end]...) + b = b[end:] + } + return +} + +// fmtDuration returns a string representing d in the form "87.00s". +func fmtDuration(d time.Duration) string { + return fmt.Sprintf("%.2fs", d.Seconds()) +} + +// TB is the interface common to T and B. +type TB interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate Go 1 compatibility. + private() +} + +//var _ TB = (*T)(nil) +//var _ TB = (*B)(nil) + +// T is a type passed to Test functions to manage test state and support formatted test logs. +// Logs are accumulated during execution and dumped to standard output when done. +// +// A test ends when its Test function returns or calls any of the methods +// FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods, as well as +// the Parallel method, must be called only from the goroutine running the +// Test function. +// +// The other reporting methods, such as the variations of Log and Error, +// may be called simultaneously from multiple goroutines. +type T struct { + common + isParallel bool + context *testContext // For running tests and subtests. +} + +func (c *common) private() {} + +// Name returns the name of the running test or benchmark. +func (c *common) Name() string { + return c.name +} + +func (c *common) setRan() { + if c.parent != nil { + c.parent.setRan() + } + c.mu.Lock() + defer c.mu.Unlock() + c.ran = true +} + +// Fail marks the function as having failed but continues execution. +func (c *common) Fail() { + if c.parent != nil { + c.parent.Fail() + } + c.mu.Lock() + defer c.mu.Unlock() + // c.done needs to be locked to synchronize checks to c.done in parent tests. + if c.done { + panic("Fail in goroutine after " + c.name + " has completed") + } + c.failed = true +} + +// Failed reports whether the function has failed. +func (c *common) Failed() bool { + c.mu.RLock() + failed := c.failed + c.mu.RUnlock() + return failed || c.raceErrors+race.Errors() > 0 +} + +// FailNow marks the function as having failed and stops its execution +// by calling runtime.Goexit (which then runs all deferred calls in the +// current goroutine). +// Execution will continue at the next test or benchmark. +// FailNow must be called from the goroutine running the +// test or benchmark function, not from other goroutines +// created during the test. Calling FailNow does not stop +// those other goroutines. +func (c *common) FailNow() { + c.Fail() + + // Calling runtime.Goexit will exit the goroutine, which + // will run the deferred functions in this goroutine, + // which will eventually run the deferred lines in tRunner, + // which will signal to the test loop that this test is done. + // + // A previous version of this code said: + // + // c.duration = ... + // c.signal <- c.self + // runtime.Goexit() + // + // This previous version duplicated code (those lines are in + // tRunner no matter what), but worse the goroutine teardown + // implicit in runtime.Goexit was not guaranteed to complete + // before the test exited. If a test deferred an important cleanup + // function (like removing temporary files), there was no guarantee + // it would run on a test failure. Because we send on c.signal during + // a top-of-stack deferred function now, we know that the send + // only happens after any other stacked defers have completed. + c.finished = true + runtime.Goexit() +} + +// log generates the output. It's always at the same stack depth. +func (c *common) log(s string) { + c.mu.Lock() + defer c.mu.Unlock() + c.output = append(c.output, c.decorate(s)...) +} + +// Log formats its arguments using default formatting, analogous to Println, +// and records the text in the error log. For tests, the text will be printed only if +// the test fails or the -test.v flag is set. For benchmarks, the text is always +// printed to avoid having performance depend on the value of the -test.v flag. +func (c *common) Log(args ...interface{}) { c.log(fmt.Sprintln(args...)) } + +// Logf formats its arguments according to the format, analogous to Printf, and +// records the text in the error log. A final newline is added if not provided. For +// tests, the text will be printed only if the test fails or the -test.v flag is +// set. For benchmarks, the text is always printed to avoid having performance +// depend on the value of the -test.v flag. +func (c *common) Logf(format string, args ...interface{}) { c.log(fmt.Sprintf(format, args...)) } + +// Error is equivalent to Log followed by Fail. +func (c *common) Error(args ...interface{}) { + c.log(fmt.Sprintln(args...)) + c.Fail() +} + +// Errorf is equivalent to Logf followed by Fail. +func (c *common) Errorf(format string, args ...interface{}) { + c.log(fmt.Sprintf(format, args...)) + c.Fail() +} + +// Fatal is equivalent to Log followed by FailNow. +func (c *common) Fatal(args ...interface{}) { + c.log(fmt.Sprintln(args...)) + c.FailNow() +} + +// Fatalf is equivalent to Logf followed by FailNow. +func (c *common) Fatalf(format string, args ...interface{}) { + c.log(fmt.Sprintf(format, args...)) + c.FailNow() +} + +// Skip is equivalent to Log followed by SkipNow. +func (c *common) Skip(args ...interface{}) { + c.log(fmt.Sprintln(args...)) + c.SkipNow() +} + +// Skipf is equivalent to Logf followed by SkipNow. +func (c *common) Skipf(format string, args ...interface{}) { + c.log(fmt.Sprintf(format, args...)) + c.SkipNow() +} + +// SkipNow marks the test as having been skipped and stops its execution +// by calling runtime.Goexit. +// If a test fails (see Error, Errorf, Fail) and is then skipped, +// it is still considered to have failed. +// Execution will continue at the next test or benchmark. See also FailNow. +// SkipNow must be called from the goroutine running the test, not from +// other goroutines created during the test. Calling SkipNow does not stop +// those other goroutines. +func (c *common) SkipNow() { + c.skip() + c.finished = true + runtime.Goexit() +} + +func (c *common) skip() { + c.mu.Lock() + defer c.mu.Unlock() + c.skipped = true +} + +// Skipped reports whether the test was skipped. +func (c *common) Skipped() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.skipped +} + +// Helper marks the calling function as a test helper function. +// When printing file and line information, that function will be skipped. +// Helper may be called simultaneously from multiple goroutines. +func (c *common) Helper() { + c.mu.Lock() + defer c.mu.Unlock() + if c.helpers == nil { + c.helpers = make(map[string]struct{}) + } + c.helpers[callerName(1)] = struct{}{} +} + +// callerName gives the function name (qualified with a package path) +// for the caller after skip frames (where 0 means the current function). +func callerName(skip int) string { + // Make room for the skip PC. + var pc [2]uintptr + n := runtime.Callers(skip+2, pc[:]) // skip + runtime.Callers + callerName + if n == 0 { + panic("testing: zero callers found") + } + frames := runtime.CallersFrames(pc[:n]) + frame, _ := frames.Next() + return frame.Function +} + +// Parallel signals that this test is to be run in parallel with (and only with) +// other parallel tests. When a test is run multiple times due to use of +// -test.count or -test.cpu, multiple instances of a single test never run in +// parallel with each other. +func (t *T) Parallel() { + if t.isParallel { + panic("testing: t.Parallel called multiple times") + } + t.isParallel = true + + // We don't want to include the time we spend waiting for serial tests + // in the test duration. Record the elapsed time thus far and reset the + // timer afterwards. + t.duration += time.Since(t.start) + + // Add to the list of tests to be released by the parent. + t.parent.sub = append(t.parent.sub, t) + t.raceErrors += race.Errors() + + if t.chatty { + // Print directly to root's io.Writer so there is no delay. + root := t.parent + for ; root.parent != nil; root = root.parent { + } + root.mu.Lock() + fmt.Fprintf(root.w, "=== PAUSE %s\n", t.name) + root.mu.Unlock() + } + + t.signal <- true // Release calling test. + <-t.parent.barrier // Wait for the parent test to complete. + t.context.waitParallel() + + if t.chatty { + // Print directly to root's io.Writer so there is no delay. + root := t.parent + for ; root.parent != nil; root = root.parent { + } + root.mu.Lock() + fmt.Fprintf(root.w, "=== CONT %s\n", t.name) + root.mu.Unlock() + } + + t.start = time.Now() + t.raceErrors += -race.Errors() +} + +// An internal type but exported because it is cross-package; part of the implementation +// of the "go test" command. +type InternalTest struct { + Name string + F func(*T) +} + +var errNilPanicOrGoexit = errors.New("test executed panic(nil) or runtime.Goexit") + +func tRunner(t *T, fn func(t *T)) { + t.runner = callerName(0) + + // When this goroutine is done, either because fn(t) + // returned normally or because a test failure triggered + // a call to runtime.Goexit, record the duration and send + // a signal saying that the test is done. + defer func() { + if t.Failed() { + atomic.AddUint32(&numFailed, 1) + } + + if t.raceErrors+race.Errors() > 0 { + t.Errorf("race detected during execution of test") + } + + t.duration += time.Since(t.start) + // If the test panicked, print any test output before dying. + err := recover() + signal := true + if !t.finished && err == nil { + err = errNilPanicOrGoexit + for p := t.parent; p != nil; p = p.parent { + if p.finished { + t.Errorf("%v: subtest may have called FailNow on a parent test", err) + err = nil + signal = false + break + } + } + } + if err != nil { + t.Fail() + t.report() + panic(err) + } + + if len(t.sub) > 0 { + // Run parallel subtests. + // Decrease the running count for this test. + t.context.release() + // Release the parallel subtests. + close(t.barrier) + // Wait for subtests to complete. + for _, sub := range t.sub { + <-sub.signal + } + if !t.isParallel { + // Reacquire the count for sequential tests. See comment in Run. + t.context.waitParallel() + } + } else if t.isParallel { + // Only release the count for this test if it was run as a parallel + // test. See comment in Run method. + t.context.release() + } + t.report() // Report after all subtests have finished. + + // Do not lock t.done to allow race detector to detect race in case + // the user does not appropriately synchronizes a goroutine. + t.done = true + if t.parent != nil && atomic.LoadInt32(&t.hasSub) == 0 { + t.setRan() + } + t.signal <- signal + }() + + t.start = time.Now() + t.raceErrors = -race.Errors() + fn(t) + + // code beyond here will not be executed when FailNow is invoked + t.finished = true +} + +// Run runs f as a subtest of t called name. It runs f in a separate goroutine +// and blocks until f returns or calls t.Parallel to become a parallel test. +// Run reports whether f succeeded (or at least did not fail before calling t.Parallel). +// +// Run may be called simultaneously from multiple goroutines, but all such calls +// must return before the outer test function for t returns. +func (t *T) Run(name string, f func(t *T)) bool { + atomic.StoreInt32(&t.hasSub, 1) + testName, ok, _ := t.context.match.fullName(&t.common, name) + if !ok || shouldFailFast() { + return true + } + // Record the stack trace at the point of this call so that if the subtest + // function - which runs in a separate stack - is marked as a helper, we can + // continue walking the stack into the parent test. + var pc [maxStackLen]uintptr + n := runtime.Callers(2, pc[:]) + t = &T{ + common: common{ + barrier: make(chan bool), + signal: make(chan bool), + name: testName, + parent: &t.common, + level: t.level + 1, + creator: pc[:n], + chatty: t.chatty, + }, + context: t.context, + } + t.w = indenter{&t.common} + + if t.chatty { + // Print directly to root's io.Writer so there is no delay. + root := t.parent + for ; root.parent != nil; root = root.parent { + } + root.mu.Lock() + fmt.Fprintf(root.w, "=== RUN %s\n", t.name) + root.mu.Unlock() + } + // Instead of reducing the running count of this test before calling the + // tRunner and increasing it afterwards, we rely on tRunner keeping the + // count correct. This ensures that a sequence of sequential tests runs + // without being preempted, even when their parent is a parallel test. This + // may especially reduce surprises if *parallel == 1. + go tRunner(t, f) + if !<-t.signal { + // At this point, it is likely that FailNow was called on one of the + // parent tests by one of the subtests. Continue aborting up the chain. + runtime.Goexit() + } + return !t.failed +} + +// testContext holds all fields that are common to all tests. This includes +// synchronization primitives to run at most *parallel tests. +type testContext struct { + match *matcher + + mu sync.Mutex + + // Channel used to signal tests that are ready to be run in parallel. + startParallel chan bool + + // running is the number of tests currently running in parallel. + // This does not include tests that are waiting for subtests to complete. + running int + + // numWaiting is the number tests waiting to be run in parallel. + numWaiting int + + // maxParallel is a copy of the parallel flag. + maxParallel int +} + +func newTestContext(maxParallel int, m *matcher) *testContext { + return &testContext{ + match: m, + startParallel: make(chan bool), + maxParallel: maxParallel, + running: 1, // Set the count to 1 for the main (sequential) test. + } +} + +func (c *testContext) waitParallel() { + c.mu.Lock() + if c.running < c.maxParallel { + c.running++ + c.mu.Unlock() + return + } + c.numWaiting++ + c.mu.Unlock() + <-c.startParallel +} + +func (c *testContext) release() { + c.mu.Lock() + if c.numWaiting == 0 { + c.running-- + c.mu.Unlock() + return + } + c.numWaiting-- + c.mu.Unlock() + c.startParallel <- true // Pick a waiting test to be run. +} + +// No one should be using func Main anymore. +// See the doc comment on func Main and use MainStart instead. +var errMain = errors.New("testing: unexpected use of func Main") + +type matchStringOnly func(pat, str string) (bool, error) + +func (f matchStringOnly) MatchString(pat, str string) (bool, error) { return f(pat, str) } +func (f matchStringOnly) StartCPUProfile(w io.Writer) error { return errMain } +func (f matchStringOnly) StopCPUProfile() {} +func (f matchStringOnly) WriteProfileTo(string, io.Writer, int) error { return errMain } +func (f matchStringOnly) ImportPath() string { return "" } +func (f matchStringOnly) StartTestLog(io.Writer) {} +func (f matchStringOnly) StopTestLog() error { return errMain } + +// Main is an internal function, part of the implementation of the "go test" command. +// It was exported because it is cross-package and predates "internal" packages. +// It is no longer used by "go test" but preserved, as much as possible, for other +// systems that simulate "go test" using Main, but Main sometimes cannot be updated as +// new functionality is added to the testing package. +// Systems simulating "go test" should be updated to use MainStart. +func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) { + os.Exit(MainStart(matchStringOnly(matchString), tests, benchmarks, examples).Run()) +} + +// M is a type passed to a TestMain function to run the actual tests. +type M struct { + deps testDeps + tests []InternalTest + benchmarks []InternalBenchmark + examples []InternalExample + + timer *time.Timer + afterOnce sync.Once + + numRun int +} + +// testDeps is an internal interface of functionality that is +// passed into this package by a test's generated main package. +// The canonical implementation of this interface is +// testing/internal/testdeps's TestDeps. +type testDeps interface { + ImportPath() string + MatchString(pat, str string) (bool, error) + StartCPUProfile(io.Writer) error + StopCPUProfile() + StartTestLog(io.Writer) + StopTestLog() error + WriteProfileTo(string, io.Writer, int) error +} + +// MainStart is meant for use by tests generated by 'go test'. +// It is not meant to be called directly and is not subject to the Go 1 compatibility document. +// It may change signature from release to release. +func MainStart(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) *M { + return &M{ + deps: deps, + tests: tests, + benchmarks: benchmarks, + examples: examples, + } +} + +// Run runs the tests. It returns an exit code to pass to os.Exit. +func (m *M) Run() int { + // Count the number of calls to m.Run. + // We only ever expected 1, but we didn't enforce that, + // and now there are tests in the wild that call m.Run multiple times. + // Sigh. golang.org/issue/23129. + m.numRun++ + + // TestMain may have already called flag.Parse. + if !flag.Parsed() { + flag.Parse() + } + + if *parallel < 1 { + fmt.Fprintln(os.Stderr, "testing: -parallel can only be given a positive integer") + flag.Usage() + return 2 + } + + if len(*matchList) != 0 { + listTests(m.deps.MatchString, m.tests, m.benchmarks, m.examples) + return 0 + } + + parseCpuList() + + m.before() + defer m.after() + m.startAlarm() + haveExamples = len(m.examples) > 0 + testRan, testOk := runTests(m.deps.MatchString, m.tests) + exampleRan, exampleOk := runExamples(m.deps.MatchString, m.examples) + m.stopAlarm() + if !testRan && !exampleRan && *matchBenchmarks == "" { + fmt.Fprintln(os.Stderr, "testing: warning: no tests to run") + } + if !testOk || !exampleOk || !runBenchmarks(m.deps.ImportPath(), m.deps.MatchString, m.benchmarks) || race.Errors() > 0 { + fmt.Println("FAIL") + return 1 + } + + fmt.Println("PASS") + return 0 +} + +func (t *T) report() { + if t.parent == nil { + return + } + dstr := fmtDuration(t.duration) + format := "--- %s: %s (%s)\n" + if t.Failed() { + t.flushToParent(format, "FAIL", t.name, dstr) + } else if t.chatty { + if t.Skipped() { + t.flushToParent(format, "SKIP", t.name, dstr) + } else { + t.flushToParent(format, "PASS", t.name, dstr) + } + } +} + +func listTests(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) { + if _, err := matchString(*matchList, "non-empty"); err != nil { + fmt.Fprintf(os.Stderr, "testing: invalid regexp in -test.list (%q): %s\n", *matchList, err) + os.Exit(1) + } + + for _, test := range tests { + if ok, _ := matchString(*matchList, test.Name); ok { + fmt.Println(test.Name) + } + } + for _, bench := range benchmarks { + if ok, _ := matchString(*matchList, bench.Name); ok { + fmt.Println(bench.Name) + } + } + for _, example := range examples { + if ok, _ := matchString(*matchList, example.Name); ok { + fmt.Println(example.Name) + } + } +} + +// An internal function but exported because it is cross-package; part of the implementation +// of the "go test" command. +func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) { + ran, ok := runTests(matchString, tests) + if !ran && !haveExamples { + fmt.Fprintln(os.Stderr, "testing: warning: no tests to run") + } + return ok +} + +func runTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ran, ok bool) { + ok = true + for _, procs := range cpuList { + runtime.GOMAXPROCS(procs) + for i := uint(0); i < *count; i++ { + if shouldFailFast() { + break + } + ctx := newTestContext(*parallel, newMatcher(matchString, *match, "-test.run")) + t := &T{ + common: common{ + signal: make(chan bool), + barrier: make(chan bool), + w: os.Stdout, + chatty: *chatty, + }, + context: ctx, + } + tRunner(t, func(t *T) { + for _, test := range tests { + t.Run(test.Name, test.F) + } + // Run catching the signal rather than the tRunner as a separate + // goroutine to avoid adding a goroutine during the sequential + // phase as this pollutes the stacktrace output when aborting. + go func() { <-t.signal }() + }) + ok = ok && !t.Failed() + ran = ran || t.ran + } + } + return ran, ok +} + +// before runs before all testing. +func (m *M) before() { + if *memProfileRate > 0 { + runtime.MemProfileRate = *memProfileRate + } + if *cpuProfile != "" { + f, err := os.Create(toOutputDir(*cpuProfile)) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + return + } + if err := m.deps.StartCPUProfile(f); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err) + f.Close() + return + } + // Could save f so after can call f.Close; not worth the effort. + } + if *traceFile != "" { + f, err := os.Create(toOutputDir(*traceFile)) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + return + } + if err := trace.Start(f); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't start tracing: %s\n", err) + f.Close() + return + } + // Could save f so after can call f.Close; not worth the effort. + } + if *blockProfile != "" && *blockProfileRate >= 0 { + runtime.SetBlockProfileRate(*blockProfileRate) + } + if *mutexProfile != "" && *mutexProfileFraction >= 0 { + runtime.SetMutexProfileFraction(*mutexProfileFraction) + } + if *coverProfile != "" && cover.Mode == "" { + fmt.Fprintf(os.Stderr, "testing: cannot use -test.coverprofile because test binary was not built with coverage enabled\n") + os.Exit(2) + } + if *testlog != "" { + // Note: Not using toOutputDir. + // This file is for use by cmd/go, not users. + var f *os.File + var err error + if m.numRun == 1 { + f, err = os.Create(*testlog) + } else { + f, err = os.OpenFile(*testlog, os.O_WRONLY, 0) + if err == nil { + f.Seek(0, io.SeekEnd) + } + } + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } + m.deps.StartTestLog(f) + testlogFile = f + } +} + +// after runs after all testing. +func (m *M) after() { + m.afterOnce.Do(func() { + m.writeProfiles() + }) +} + +func (m *M) writeProfiles() { + if *testlog != "" { + if err := m.deps.StopTestLog(); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *testlog, err) + os.Exit(2) + } + if err := testlogFile.Close(); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *testlog, err) + os.Exit(2) + } + } + if *cpuProfile != "" { + m.deps.StopCPUProfile() // flushes profile to disk + } + if *traceFile != "" { + trace.Stop() // flushes trace to disk + } + if *memProfile != "" { + f, err := os.Create(toOutputDir(*memProfile)) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } + runtime.GC() // materialize all statistics + if err = m.deps.WriteProfileTo("allocs", f, 0); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *memProfile, err) + os.Exit(2) + } + f.Close() + } + if *blockProfile != "" && *blockProfileRate >= 0 { + f, err := os.Create(toOutputDir(*blockProfile)) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } + if err = m.deps.WriteProfileTo("block", f, 0); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err) + os.Exit(2) + } + f.Close() + } + if *mutexProfile != "" && *mutexProfileFraction >= 0 { + f, err := os.Create(toOutputDir(*mutexProfile)) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } + if err = m.deps.WriteProfileTo("mutex", f, 0); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err) + os.Exit(2) + } + f.Close() + } + if cover.Mode != "" { + coverReport() + } +} + +// toOutputDir returns the file name relocated, if required, to outputDir. +// Simple implementation to avoid pulling in path/filepath. +func toOutputDir(path string) string { + if *outputDir == "" || path == "" { + return path + } + if runtime.GOOS == "windows" { + // On Windows, it's clumsy, but we can be almost always correct + // by just looking for a drive letter and a colon. + // Absolute paths always have a drive letter (ignoring UNC). + // Problem: if path == "C:A" and outputdir == "C:\Go" it's unclear + // what to do, but even then path/filepath doesn't help. + // TODO: Worth doing better? Probably not, because we're here only + // under the management of go test. + if len(path) >= 2 { + letter, colon := path[0], path[1] + if ('a' <= letter && letter <= 'z' || 'A' <= letter && letter <= 'Z') && colon == ':' { + // If path starts with a drive letter we're stuck with it regardless. + return path + } + } + } + if os.IsPathSeparator(path[0]) { + return path + } + return fmt.Sprintf("%s%c%s", *outputDir, os.PathSeparator, path) +} + +// startAlarm starts an alarm if requested. +func (m *M) startAlarm() { + if *timeout > 0 { + m.timer = time.AfterFunc(*timeout, func() { + m.after() + debug.SetTraceback("all") + panic(fmt.Sprintf("test timed out after %v", *timeout)) + }) + } +} + +// stopAlarm turns off the alarm. +func (m *M) stopAlarm() { + if *timeout > 0 { + m.timer.Stop() + } +} + +func parseCpuList() { + for _, val := range strings.Split(*cpuListStr, ",") { + val = strings.TrimSpace(val) + if val == "" { + continue + } + cpu, err := strconv.Atoi(val) + if err != nil || cpu <= 0 { + fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu\n", val) + os.Exit(1) + } + cpuList = append(cpuList, cpu) + } + if cpuList == nil { + cpuList = append(cpuList, runtime.GOMAXPROCS(-1)) + } +} + +func shouldFailFast() bool { + return *failFast && atomic.LoadUint32(&numFailed) > 0 +} \ No newline at end of file diff --git a/Source/Gold/testing/testing_test.go b/Source/Gold/testing/testing_test.go new file mode 100644 index 0000000..45e4468 --- /dev/null +++ b/Source/Gold/testing/testing_test.go @@ -0,0 +1,18 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "os" + "testing" +) + +// This is exactly what a test would do without a TestMain. +// It's here only so that there is at least one package in the +// standard library with a TestMain, so that code is executed. + +func TestMain(m *testing.M) { + os.Exit(m.Run()) +}