소스 검색

调整...第三方文件

zhengkun 1 년 전
부모
커밋
d58cfb9974
100개의 변경된 파일17092개의 추가작업 그리고 0개의 파일을 삭제
  1. 14 0
      data_tool/src/github.com/frankban/quicktest/.github/dependabot.yaml
  2. 25 0
      data_tool/src/github.com/frankban/quicktest/.github/workflows/ci.yaml
  3. 1 0
      data_tool/src/github.com/frankban/quicktest/.gitignore
  4. 13 0
      data_tool/src/github.com/frankban/quicktest/.godocdown.template
  5. 21 0
      data_tool/src/github.com/frankban/quicktest/LICENSE
  6. 347 0
      data_tool/src/github.com/frankban/quicktest/README.md
  7. 799 0
      data_tool/src/github.com/frankban/quicktest/checker.go
  8. 92 0
      data_tool/src/github.com/frankban/quicktest/checker_err.go
  9. 247 0
      data_tool/src/github.com/frankban/quicktest/checker_err_test.go
  10. 3108 0
      data_tool/src/github.com/frankban/quicktest/checker_test.go
  11. 89 0
      data_tool/src/github.com/frankban/quicktest/cleanup_test.go
  12. 31 0
      data_tool/src/github.com/frankban/quicktest/comment.go
  13. 25 0
      data_tool/src/github.com/frankban/quicktest/comment_test.go
  14. 42 0
      data_tool/src/github.com/frankban/quicktest/deferpanic_test.go
  15. 340 0
      data_tool/src/github.com/frankban/quicktest/doc.go
  16. 35 0
      data_tool/src/github.com/frankban/quicktest/error.go
  17. 56 0
      data_tool/src/github.com/frankban/quicktest/error_test.go
  18. 8 0
      data_tool/src/github.com/frankban/quicktest/export_test.go
  19. 91 0
      data_tool/src/github.com/frankban/quicktest/format.go
  20. 144 0
      data_tool/src/github.com/frankban/quicktest/format_test.go
  21. 8 0
      data_tool/src/github.com/frankban/quicktest/go.mod
  22. 10 0
      data_tool/src/github.com/frankban/quicktest/go.sum
  23. 55 0
      data_tool/src/github.com/frankban/quicktest/iter.go
  24. 29 0
      data_tool/src/github.com/frankban/quicktest/mapiter.go
  25. 72 0
      data_tool/src/github.com/frankban/quicktest/patch.go
  26. 42 0
      data_tool/src/github.com/frankban/quicktest/patch_go1.14.go
  27. 41 0
      data_tool/src/github.com/frankban/quicktest/patch_go1.14_test.go
  28. 27 0
      data_tool/src/github.com/frankban/quicktest/patch_go1.17.go
  29. 34 0
      data_tool/src/github.com/frankban/quicktest/patch_go1.17_test.go
  30. 120 0
      data_tool/src/github.com/frankban/quicktest/patch_test.go
  31. 122 0
      data_tool/src/github.com/frankban/quicktest/qtsuite/suite.go
  32. 147 0
      data_tool/src/github.com/frankban/quicktest/qtsuite/suite_test.go
  33. 370 0
      data_tool/src/github.com/frankban/quicktest/quicktest.go
  34. 751 0
      data_tool/src/github.com/frankban/quicktest/quicktest_test.go
  35. 88 0
      data_tool/src/github.com/frankban/quicktest/race_test.go
  36. 248 0
      data_tool/src/github.com/frankban/quicktest/report.go
  37. 183 0
      data_tool/src/github.com/frankban/quicktest/report_test.go
  38. 27 0
      data_tool/src/github.com/google/btree/.github/workflows/test.yml
  39. 202 0
      data_tool/src/github.com/google/btree/LICENSE
  40. 10 0
      data_tool/src/github.com/google/btree/README.md
  41. 893 0
      data_tool/src/github.com/google/btree/btree.go
  42. 1083 0
      data_tool/src/github.com/google/btree/btree_generic.go
  43. 764 0
      data_tool/src/github.com/google/btree/btree_generic_test.go
  44. 76 0
      data_tool/src/github.com/google/btree/btree_mem.go
  45. 792 0
      data_tool/src/github.com/google/btree/btree_test.go
  46. 17 0
      data_tool/src/github.com/google/btree/go.mod
  47. 10 0
      data_tool/src/github.com/kr/pretty/.github/dependabot.yml
  48. 17 0
      data_tool/src/github.com/kr/pretty/.github/workflows/build-test.yml
  49. 5 0
      data_tool/src/github.com/kr/pretty/.gitignore
  50. 19 0
      data_tool/src/github.com/kr/pretty/License
  51. 9 0
      data_tool/src/github.com/kr/pretty/Readme
  52. 295 0
      data_tool/src/github.com/kr/pretty/diff.go
  53. 257 0
      data_tool/src/github.com/kr/pretty/diff_test.go
  54. 19 0
      data_tool/src/github.com/kr/pretty/example_test.go
  55. 355 0
      data_tool/src/github.com/kr/pretty/formatter.go
  56. 339 0
      data_tool/src/github.com/kr/pretty/formatter_test.go
  57. 8 0
      data_tool/src/github.com/kr/pretty/go.mod
  58. 6 0
      data_tool/src/github.com/kr/pretty/go.sum
  59. 108 0
      data_tool/src/github.com/kr/pretty/pretty.go
  60. 41 0
      data_tool/src/github.com/kr/pretty/zero.go
  61. 10 0
      data_tool/src/github.com/kr/text/.github/dependabot.yml
  62. 25 0
      data_tool/src/github.com/kr/text/.github/workflows/go.yml
  63. 19 0
      data_tool/src/github.com/kr/text/License
  64. 3 0
      data_tool/src/github.com/kr/text/Readme
  65. 73 0
      data_tool/src/github.com/kr/text/cmd/agg/doc.go
  66. 112 0
      data_tool/src/github.com/kr/text/cmd/agg/main.go
  67. 99 0
      data_tool/src/github.com/kr/text/cmd/agg/num.go
  68. 74 0
      data_tool/src/github.com/kr/text/cmd/agg/string.go
  69. 5 0
      data_tool/src/github.com/kr/text/colwriter/Readme
  70. 147 0
      data_tool/src/github.com/kr/text/colwriter/column.go
  71. 90 0
      data_tool/src/github.com/kr/text/colwriter/column_test.go
  72. 3 0
      data_tool/src/github.com/kr/text/doc.go
  73. 5 0
      data_tool/src/github.com/kr/text/go.mod
  74. 2 0
      data_tool/src/github.com/kr/text/go.sum
  75. 74 0
      data_tool/src/github.com/kr/text/indent.go
  76. 119 0
      data_tool/src/github.com/kr/text/indent_test.go
  77. 9 0
      data_tool/src/github.com/kr/text/mc/Readme
  78. 63 0
      data_tool/src/github.com/kr/text/mc/mc.go
  79. 86 0
      data_tool/src/github.com/kr/text/wrap.go
  80. 63 0
      data_tool/src/github.com/kr/text/wrap_test.go
  81. 19 0
      data_tool/src/github.com/peterbourgon/diskv/LICENSE
  82. 191 0
      data_tool/src/github.com/peterbourgon/diskv/README.md
  83. 430 0
      data_tool/src/github.com/peterbourgon/diskv/basic_test.go
  84. 64 0
      data_tool/src/github.com/peterbourgon/diskv/compression.go
  85. 72 0
      data_tool/src/github.com/peterbourgon/diskv/compression_test.go
  86. 729 0
      data_tool/src/github.com/peterbourgon/diskv/diskv.go
  87. 41 0
      data_tool/src/github.com/peterbourgon/diskv/examples/advanced-transform/advanced-transform.go
  88. 63 0
      data_tool/src/github.com/peterbourgon/diskv/examples/content-addressable-store/cas.go
  89. 75 0
      data_tool/src/github.com/peterbourgon/diskv/examples/git-like-store/git-like-store.go
  90. 29 0
      data_tool/src/github.com/peterbourgon/diskv/examples/super-simple-store/super-simple-store.go
  91. 5 0
      data_tool/src/github.com/peterbourgon/diskv/go.mod
  92. 2 0
      data_tool/src/github.com/peterbourgon/diskv/go.sum
  93. 74 0
      data_tool/src/github.com/peterbourgon/diskv/import_test.go
  94. 115 0
      data_tool/src/github.com/peterbourgon/diskv/index.go
  95. 161 0
      data_tool/src/github.com/peterbourgon/diskv/index_test.go
  96. 191 0
      data_tool/src/github.com/peterbourgon/diskv/issues_test.go
  97. 229 0
      data_tool/src/github.com/peterbourgon/diskv/keys_test.go
  98. 151 0
      data_tool/src/github.com/peterbourgon/diskv/speed_test.go
  99. 117 0
      data_tool/src/github.com/peterbourgon/diskv/stream_test.go
  100. 26 0
      data_tool/src/github.com/rogpeppe/fastuuid/LICENSE

+ 14 - 0
data_tool/src/github.com/frankban/quicktest/.github/dependabot.yaml

@@ -0,0 +1,14 @@
+version: 2
+updates:
+
+  - package-ecosystem: "github-actions"
+    directory: "/"
+    schedule:
+      # Check for updates to GitHub Actions every weekday.
+      interval: "daily"
+
+  - package-ecosystem: "gomod"
+    directory: "/"
+    schedule:
+      # Check for updates to go modules every weekday.
+      interval: "daily"

+ 25 - 0
data_tool/src/github.com/frankban/quicktest/.github/workflows/ci.yaml

@@ -0,0 +1,25 @@
+name: CI
+on: [push, pull_request]
+
+jobs:
+  build_test:
+    name: Build and Test
+    strategy:
+      matrix:
+        go: ['1.13', '1.14', '1.15', '1.16', '1.17', '1.18', '1.19', '1.20']
+    runs-on: ubuntu-latest
+    steps:
+    - uses: actions/checkout@v3
+    - uses: actions/setup-go@v4
+      with:
+        go-version: ${{ matrix.go }}
+    - uses: actions/cache@v3
+      with:
+        path: ~/go/pkg/mod
+        key: ubuntu-go-${{ hashFiles('**/go.sum') }}
+        restore-keys: |
+          ubuntu-go-
+    - name: Test
+      run: go test -mod readonly -race ./...
+    - name: Test Verbose
+      run: go test -mod readonly -race -v ./...

+ 1 - 0
data_tool/src/github.com/frankban/quicktest/.gitignore

@@ -0,0 +1 @@
+.vscode

+ 13 - 0
data_tool/src/github.com/frankban/quicktest/.godocdown.template

@@ -0,0 +1,13 @@
+[![Go Reference](https://pkg.go.dev/badge/github.com/frankban/quicktest.svg)](https://pkg.go.dev/github.com/frankban/quicktest#section-documentation)
+[![Build Status](https://github.com/frankban/quicktest/actions/workflows/ci.yaml/badge.svg)](https://github.com/frankban/quicktest/actions/workflows/ci.yaml)
+
+[//]: # (Generated with: godocdown -template=.godocdown.template -o README.md && sed -i= 's/^# /### /' README.md )
+
+# quicktest
+
+`go get github.com/frankban/quicktest@latest`
+
+{{ .EmitSynopsis }}
+
+For a complete API reference, see the
+[package documentation](https://pkg.go.dev/github.com/frankban/quicktest#section-documentation).

+ 21 - 0
data_tool/src/github.com/frankban/quicktest/LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Canonical Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 347 - 0
data_tool/src/github.com/frankban/quicktest/README.md

@@ -0,0 +1,347 @@
+[![Go Reference](https://pkg.go.dev/badge/github.com/frankban/quicktest.svg)](https://pkg.go.dev/github.com/frankban/quicktest#section-documentation)
+[![Build Status](https://github.com/frankban/quicktest/actions/workflows/ci.yaml/badge.svg)](https://github.com/frankban/quicktest/actions/workflows/ci.yaml)
+
+[//]: # (Generated with: godocdown -template=.godocdown.template -o README.md)
+
+### quicktest
+
+`go get github.com/frankban/quicktest@latest`
+
+Package quicktest provides a collection of Go helpers for writing tests.
+
+Quicktest helpers can be easily integrated inside regular Go tests, for
+instance:
+
+    import qt "github.com/frankban/quicktest"
+
+    func TestFoo(t *testing.T) {
+        t.Run("numbers", func(t *testing.T) {
+            c := qt.New(t)
+            numbers, err := somepackage.Numbers()
+            c.Assert(err, qt.IsNil)
+            c.Assert(numbers, qt.DeepEquals, []int{42, 47})
+        })
+        t.Run("bad wolf error", func(t *testing.T) {
+            c := qt.New(t)
+            numbers, err := somepackage.Numbers()
+            c.Assert(err, qt.ErrorMatches, "bad wolf")
+        })
+        t.Run("nil", func(t *testing.T) {
+            c := qt.New(t)
+            got := somepackage.MaybeNil()
+            c.Assert(got, qt.IsNil, qt.Commentf("value: %v", somepackage.Value))
+        })
+    }
+
+### Assertions
+
+An assertion looks like this, where qt.Equals could be replaced by any available
+checker. If the assertion fails, the underlying Fatal method is called to
+describe the error and abort the test.
+
+    c := qt.New(t)
+    c.Assert(someValue, qt.Equals, wantValue)
+
+If you don’t want to abort on failure, use Check instead, which calls Error
+instead of Fatal:
+
+    c.Check(someValue, qt.Equals, wantValue)
+
+For really short tests, the extra line for instantiating *qt.C can be avoided:
+
+    qt.Assert(t, someValue, qt.Equals, wantValue)
+    qt.Check(t, someValue, qt.Equals, wantValue)
+
+The library provides some base checkers like Equals, DeepEquals, Matches,
+ErrorMatches, IsNil and others. More can be added by implementing the Checker
+interface. Below, we list the checkers implemented by the package in
+alphabetical order.
+
+### All
+
+All returns a Checker that uses the given checker to check elements of slice or
+array or the values of a map. It succeeds if all elements pass the check. On
+failure it prints the error from the first index that failed.
+
+For example:
+
+    c.Assert([]int{3, 5, 8}, qt.All(qt.Not(qt.Equals)), 0)
+    c.Assert([][]string{{"a", "b"}, {"a", "b"}}, qt.All(qt.DeepEquals), []string{"c", "d"})
+
+See also Any and Contains.
+
+### Any
+
+Any returns a Checker that uses the given checker to check elements of a slice
+or array or the values from a map. It succeeds if any element passes the check.
+
+For example:
+
+    c.Assert([]int{3,5,7,99}, qt.Any(qt.Equals), 7)
+    c.Assert([][]string{{"a", "b"}, {"c", "d"}}, qt.Any(qt.DeepEquals), []string{"c", "d"})
+
+See also All and Contains.
+
+### CmpEquals
+
+CmpEquals checks equality of two arbitrary values according to the provided
+compare options. DeepEquals is more commonly used when no compare options are
+required.
+
+Example calls:
+
+    c.Assert(list, qt.CmpEquals(cmpopts.SortSlices), []int{42, 47})
+    c.Assert(got, qt.CmpEquals(), []int{42, 47}) // Same as qt.DeepEquals.
+
+### CodecEquals
+
+CodecEquals returns a checker that checks for codec value equivalence.
+
+    func CodecEquals(
+        marshal func(interface{}) ([]byte, error),
+        unmarshal func([]byte, interface{}) error,
+        opts ...cmp.Option,
+    ) Checker
+
+It expects two arguments: a byte slice or a string containing some
+codec-marshaled data, and a Go value.
+
+It uses unmarshal to unmarshal the data into an interface{} value. It marshals
+the Go value using marshal, then unmarshals the result into an interface{}
+value.
+
+It then checks that the two interface{} values are deep-equal to one another,
+using CmpEquals(opts) to perform the check.
+
+See JSONEquals for an example of this in use.
+
+### Contains
+
+Contains checks that a map, slice, array or string contains a value. It's the
+same as using Any(Equals), except that it has a special case for strings - if
+the first argument is a string, the second argument must also be a string and
+strings.Contains will be used.
+
+For example:
+
+    c.Assert("hello world", qt.Contains, "world")
+    c.Assert([]int{3,5,7,99}, qt.Contains, 7)
+
+### ContentEquals
+
+ContentEquals is is like DeepEquals but any slices in the compared values will
+be sorted before being compared.
+
+For example:
+
+    c.Assert([]string{"c", "a", "b"}, qt.ContentEquals, []string{"a", "b", "c"})
+
+### DeepEquals
+
+DeepEquals checks that two arbitrary values are deeply equal. The comparison is
+done using the github.com/google/go-cmp/cmp package. When comparing structs, by
+default no exported fields are allowed. If a more sophisticated comparison is
+required, use CmpEquals (see below).
+
+Example call:
+
+    c.Assert(got, qt.DeepEquals, []int{42, 47})
+
+### Equals
+
+Equals checks that two values are equal, as compared with Go's == operator.
+
+For instance:
+
+    c.Assert(answer, qt.Equals, 42)
+
+Note that the following will fail:
+
+    c.Assert((*sometype)(nil), qt.Equals, nil)
+
+Use the IsNil checker below for this kind of nil check.
+
+### ErrorAs
+
+ErrorAs checks that the error is or wraps a specific error type. If so, it
+assigns it to the provided pointer. This is analogous to calling errors.As.
+
+For instance:
+
+    // Checking for a specific error type
+    c.Assert(err, qt.ErrorAs, new(*os.PathError))
+
+    // Checking fields on a specific error type
+    var pathError *os.PathError
+    if c.Check(err, qt.ErrorAs, &pathError) {
+        c.Assert(pathError.Path, Equals, "some_path")
+    }
+
+### ErrorIs
+
+ErrorIs checks that the error is or wraps a specific error value. This is
+analogous to calling errors.Is.
+
+For instance:
+
+    c.Assert(err, qt.ErrorIs, os.ErrNotExist)
+
+### ErrorMatches
+
+ErrorMatches checks that the provided value is an error whose message matches
+the provided regular expression.
+
+For instance:
+
+    c.Assert(err, qt.ErrorMatches, `bad wolf .*`)
+
+### HasLen
+
+HasLen checks that the provided value has the given length.
+
+For instance:
+
+    c.Assert([]int{42, 47}, qt.HasLen, 2)
+    c.Assert(myMap, qt.HasLen, 42)
+
+### Implements
+
+Implements checks that the provided value implements an interface. The interface
+is specified with a pointer to an interface variable.
+
+For instance:
+
+    var rc io.ReadCloser
+    c.Assert(myReader, qt.Implements, &rc)
+
+### IsFalse
+
+IsFalse checks that the provided value is false. The value must have a boolean
+underlying type.
+
+For instance:
+
+    c.Assert(false, qt.IsFalse)
+    c.Assert(IsValid(), qt.IsFalse)
+
+### IsNil
+
+IsNil checks that the provided value is nil.
+
+For instance:
+
+    c.Assert(got, qt.IsNil)
+
+As a special case, if the value is nil but implements the error interface, it is
+still considered to be non-nil. This means that IsNil will fail on an error
+value that happens to have an underlying nil value, because that's invariably a
+mistake. See https://golang.org/doc/faq#nil_error.
+
+So it's just fine to check an error like this:
+
+    c.Assert(err, qt.IsNil)
+
+### IsNotNil
+
+IsNotNil is a Checker checking that the provided value is not nil. IsNotNil is
+the equivalent of qt.Not(qt.IsNil)
+
+For instance:
+
+    c.Assert(got, qt.IsNotNil)
+
+### IsTrue
+
+IsTrue checks that the provided value is true. The value must have a boolean
+underlying type.
+
+For instance:
+
+    c.Assert(true, qt.IsTrue)
+    c.Assert(myBoolean(false), qt.IsTrue)
+
+### JSONEquals
+
+JSONEquals checks whether a byte slice or string is JSON-equivalent to a Go
+value. See CodecEquals for more information.
+
+It uses DeepEquals to do the comparison. If a more sophisticated comparison is
+required, use CodecEquals directly.
+
+For instance:
+
+    c.Assert(`{"First": 47.11}`, qt.JSONEquals, &MyStruct{First: 47.11})
+
+### Matches
+
+Matches checks that a string or result of calling the String method (if the
+value implements fmt.Stringer) matches the provided regular expression.
+
+For instance:
+
+    c.Assert("these are the voyages", qt.Matches, `these are .*`)
+    c.Assert(net.ParseIP("1.2.3.4"), qt.Matches, `1.*`)
+
+### Not
+
+Not returns a Checker negating the given Checker.
+
+For instance:
+
+    c.Assert(got, qt.Not(qt.IsNil))
+    c.Assert(answer, qt.Not(qt.Equals), 42)
+
+### PanicMatches
+
+PanicMatches checks that the provided function panics with a message matching
+the provided regular expression.
+
+For instance:
+
+    c.Assert(func() {panic("bad wolf ...")}, qt.PanicMatches, `bad wolf .*`)
+
+### Satisfies
+
+Satisfies checks that the provided value, when used as argument of the provided
+predicate function, causes the function to return true. The function must be of
+type func(T) bool, having got assignable to T.
+
+For instance:
+
+    // Check that an error from os.Open satisfies os.IsNotExist.
+    c.Assert(err, qt.Satisfies, os.IsNotExist)
+
+    // Check that a floating point number is a not-a-number.
+    c.Assert(f, qt.Satisfies, math.IsNaN)
+
+### Deferred Execution
+
+The testing.TB.Cleanup helper provides the ability to defer the execution of
+functions that will be run when the test completes. This is often useful for
+creating OS-level resources such as temporary directories (see c.Mkdir).
+
+When targeting Go versions that don't have Cleanup (< 1.14), the same can be
+achieved using c.Defer. In this case, to trigger the deferred behavior, calling
+c.Done is required. For instance, if you create a *C instance at the top level,
+you’ll have to add a defer to trigger the cleanups at the end of the test:
+
+    defer c.Done()
+
+However, if you use quicktest to create a subtest, Done will be called
+automatically at the end of that subtest. For example:
+
+    func TestFoo(t *testing.T) {
+        c := qt.New(t)
+        c.Run("subtest", func(c *qt.C) {
+            c.Setenv("HOME", c.Mkdir())
+            // Here $HOME is set the path to a newly created directory.
+            // At the end of the test the directory will be removed
+            // and HOME set back to its original value.
+        })
+    }
+
+The c.Patch, c.Setenv, c.Unsetenv and c.Mkdir helpers use t.Cleanup for cleaning
+up resources when available, and fall back to Defer otherwise.
+
+For a complete API reference, see the
+[package documentation](https://pkg.go.dev/github.com/frankban/quicktest#section-documentation).

+ 799 - 0
data_tool/src/github.com/frankban/quicktest/checker.go

@@ -0,0 +1,799 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strings"
+
+	"github.com/google/go-cmp/cmp"
+	"github.com/google/go-cmp/cmp/cmpopts"
+	"github.com/kr/pretty"
+)
+
+// Checker is implemented by types used as part of Check/Assert invocations.
+type Checker interface {
+	// Check checks that the obtained value (got) is correct with respect to
+	// the checker's arguments (args). On failure, the returned error is
+	// printed along with the checker arguments and any key-value pairs added
+	// by calling the note function. Values are pretty-printed unless they are
+	// of type Unquoted.
+	//
+	// When the check arguments are invalid, Check may return a BadCheck error,
+	// which suppresses printing of the checker arguments. Values added with
+	// note are still printed.
+	//
+	// If Check returns ErrSilent, neither the checker arguments nor the error
+	// are printed. Again, values added with note are still printed.
+	Check(got interface{}, args []interface{}, note func(key string, value interface{})) error
+
+	// ArgNames returns the names of all required arguments, including the
+	// mandatory got argument and any additional args.
+	ArgNames() []string
+}
+
+// Equals is a Checker checking equality of two comparable values.
+//
+// For instance:
+//
+//	c.Assert(answer, qt.Equals, 42)
+//
+// Note that the following will fail:
+//
+//	c.Assert((*sometype)(nil), qt.Equals, nil)
+//
+// Use the IsNil checker below for this kind of nil check.
+var Equals Checker = &equalsChecker{
+	argNames: []string{"got", "want"},
+}
+
+type equalsChecker struct {
+	argNames
+}
+
+// Check implements Checker.Check by checking that got == args[0].
+func (c *equalsChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) (err error) {
+	defer func() {
+		// A panic is raised when the provided values are not comparable.
+		if r := recover(); r != nil {
+			err = fmt.Errorf("%s", r)
+		}
+	}()
+	want := args[0]
+	if got == want {
+		return nil
+	}
+
+	// Customize error message for non-nil errors.
+	if _, ok := got.(error); ok && want == nil {
+		return errors.New("got non-nil error")
+	}
+
+	// Show error types when comparing errors with different types.
+	if got, ok := got.(error); ok {
+		if want, ok := want.(error); ok {
+			gotType := reflect.TypeOf(got)
+			wantType := reflect.TypeOf(want)
+			if gotType != wantType {
+				note("got type", Unquoted(gotType.String()))
+				note("want type", Unquoted(wantType.String()))
+			}
+		}
+		return errors.New("values are not equal")
+	}
+
+	// Show line diff when comparing different multi-line strings.
+	if got, ok := got.(string); ok {
+		if want, ok := want.(string); ok {
+			isMultiLine := func(s string) bool {
+				i := strings.Index(s, "\n")
+				return i != -1 && i < len(s)-1
+			}
+			if isMultiLine(got) || isMultiLine(want) {
+				diff := cmp.Diff(strings.SplitAfter(got, "\n"), strings.SplitAfter(want, "\n"))
+				note("line diff (-got +want)", Unquoted(diff))
+			}
+		}
+	}
+
+	return errors.New("values are not equal")
+}
+
+// CmpEquals returns a Checker checking equality of two arbitrary values
+// according to the provided compare options. See DeepEquals as an example of
+// such a checker, commonly used when no compare options are required.
+//
+// Example calls:
+//
+//	c.Assert(list, qt.CmpEquals(cmpopts.SortSlices), []int{42, 47})
+//	c.Assert(got, qt.CmpEquals(), []int{42, 47}) // Same as qt.DeepEquals.
+func CmpEquals(opts ...cmp.Option) Checker {
+	return &cmpEqualsChecker{
+		argNames: []string{"got", "want"},
+		opts:     opts,
+	}
+}
+
+type cmpEqualsChecker struct {
+	argNames
+	opts cmp.Options
+}
+
+// Check implements Checker.Check by checking that got == args[0] according to
+// the compare options stored in the checker.
+func (c *cmpEqualsChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) (err error) {
+	defer func() {
+		// A panic is raised in some cases, for instance when trying to compare
+		// structs with unexported fields and neither AllowUnexported nor
+		// cmpopts.IgnoreUnexported are provided.
+		if r := recover(); r != nil {
+			err = BadCheckf("%s", r)
+		}
+	}()
+	want := args[0]
+	if diff := cmp.Diff(got, want, c.opts...); diff != "" {
+		// Only output values when the verbose flag is set.
+		note("error", Unquoted("values are not deep equal"))
+		note("diff (-got +want)", Unquoted(diff))
+		note("got", SuppressedIfLong{got})
+		note("want", SuppressedIfLong{want})
+		return ErrSilent
+	}
+	return nil
+}
+
+// DeepEquals is a Checker deeply checking equality of two arbitrary values.
+// The comparison is done using the github.com/google/go-cmp/cmp package.
+// When comparing structs, by default no exported fields are allowed. CmpEquals
+// can be used when more customized compare options are required.
+//
+// Example call:
+//
+//	c.Assert(got, qt.DeepEquals, []int{42, 47})
+var DeepEquals = CmpEquals()
+
+// ContentEquals is like DeepEquals but any slices in the compared values will
+// be sorted before being compared.
+var ContentEquals = CmpEquals(cmpopts.SortSlices(func(x, y interface{}) bool {
+	// TODO frankban: implement a proper sort function.
+	return pretty.Sprint(x) < pretty.Sprint(y)
+}))
+
+// Matches is a Checker checking that the provided string or fmt.Stringer
+// matches the provided regular expression pattern.
+//
+// For instance:
+//
+//	c.Assert("these are the voyages", qt.Matches, "these are .*")
+//	c.Assert(net.ParseIP("1.2.3.4"), qt.Matches, "1.*")
+//	c.Assert("my multi-line\nnumber", qt.Matches, regexp.MustCompile(`my multi-line\n(string|number)`))
+var Matches Checker = &matchesChecker{
+	argNames: []string{"got value", "regexp"},
+}
+
+type matchesChecker struct {
+	argNames
+}
+
+// Check implements Checker.Check by checking that got is a string or a
+// fmt.Stringer and that it matches args[0].
+func (c *matchesChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) error {
+	pattern := args[0]
+	switch v := got.(type) {
+	case string:
+		return match(v, pattern, "value does not match regexp", note)
+	case fmt.Stringer:
+		return match(v.String(), pattern, "value.String() does not match regexp", note)
+	}
+	note("value", got)
+	return BadCheckf("value is not a string or a fmt.Stringer")
+}
+
+func checkFirstArgIsError(got interface{}, note func(key string, value interface{})) error {
+	if got == nil {
+		return errors.New("got nil error but want non-nil")
+	}
+	_, ok := got.(error)
+	if !ok {
+		note("got", got)
+		return BadCheckf("first argument is not an error")
+	}
+	return nil
+}
+
+// ErrorMatches is a Checker checking that the provided value is an error whose
+// message matches the provided regular expression pattern.
+//
+// For instance:
+//
+//	c.Assert(err, qt.ErrorMatches, "bad wolf .*")
+//	c.Assert(err, qt.ErrorMatches, regexp.MustCompile("bad wolf .*"))
+var ErrorMatches Checker = &errorMatchesChecker{
+	argNames: []string{"got error", "regexp"},
+}
+
+type errorMatchesChecker struct {
+	argNames
+}
+
+// Check implements Checker.Check by checking that got is an error whose
+// Error() matches args[0].
+func (c *errorMatchesChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) error {
+	if err := checkFirstArgIsError(got, note); err != nil {
+		return err
+	}
+
+	gotErr := got.(error)
+	return match(gotErr.Error(), args[0], "error does not match regexp", note)
+}
+
+// PanicMatches is a Checker checking that the provided function panics with a
+// message matching the provided regular expression pattern.
+//
+// For instance:
+//
+//	c.Assert(func() {panic("bad wolf ...")}, qt.PanicMatches, "bad wolf .*")
+//	c.Assert(func() {panic("bad wolf ...")}, qt.PanicMatches, regexp.MustCompile(`bad wolf .*`))
+var PanicMatches Checker = &panicMatchesChecker{
+	argNames: []string{"function", "regexp"},
+}
+
+type panicMatchesChecker struct {
+	argNames
+}
+
+// Check implements Checker.Check by checking that got is a func() that panics
+// with a message matching args[0].
+func (c *panicMatchesChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) (err error) {
+	f := reflect.ValueOf(got)
+	if f.Kind() != reflect.Func {
+		note("got", got)
+		return BadCheckf("first argument is not a function")
+	}
+	ftype := f.Type()
+	if ftype.NumIn() != 0 {
+		note("function", got)
+		return BadCheckf("cannot use a function receiving arguments")
+	}
+
+	defer func() {
+		r := recover()
+		if r == nil {
+			err = errors.New("function did not panic")
+			return
+		}
+		msg := fmt.Sprint(r)
+		note("panic value", msg)
+		err = match(msg, args[0], "panic value does not match regexp", note)
+	}()
+
+	f.Call(nil)
+	return nil
+}
+
+// IsNil is a Checker checking that the provided value is nil.
+//
+// For instance:
+//
+//	c.Assert(got, qt.IsNil)
+//
+// As a special case, if the value is nil but implements the
+// error interface, it is still considered to be non-nil.
+// This means that IsNil will fail on an error value that happens
+// to have an underlying nil value, because that's
+// invariably a mistake.
+// See https://golang.org/doc/faq#nil_error.
+var IsNil Checker = &isNilChecker{
+	argNames: []string{"got"},
+}
+
+type isNilChecker struct {
+	argNames
+}
+
+// Check implements Checker.Check by checking that got is nil.
+func (c *isNilChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) (err error) {
+	if got == nil {
+		return nil
+	}
+	value := reflect.ValueOf(got)
+	_, isError := got.(error)
+	if canBeNil(value.Kind()) && value.IsNil() {
+		if isError {
+			// It's an error with an underlying nil value.
+			return fmt.Errorf("error containing nil value of type %T. See https://golang.org/doc/faq#nil_error", got)
+		}
+		return nil
+	}
+	if isError {
+		return errors.New("got non-nil error")
+	}
+	return errors.New("got non-nil value")
+}
+
+// IsNotNil is a Checker checking that the provided value is not nil.
+// IsNotNil is the equivalent of qt.Not(qt.IsNil)
+//
+// For instance:
+//
+//	c.Assert(got, qt.IsNotNil)
+var IsNotNil Checker = &notChecker{
+	Checker: IsNil,
+}
+
+// HasLen is a Checker checking that the provided value has the given length.
+//
+// For instance:
+//
+//	c.Assert([]int{42, 47}, qt.HasLen, 2)
+//	c.Assert(myMap, qt.HasLen, 42)
+var HasLen Checker = &hasLenChecker{
+	argNames: []string{"got", "want length"},
+}
+
+type hasLenChecker struct {
+	argNames
+}
+
+// Check implements Checker.Check by checking that len(got) == args[0].
+func (c *hasLenChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) (err error) {
+	v := reflect.ValueOf(got)
+	switch v.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+	default:
+		note("got", got)
+		return BadCheckf("first argument has no length")
+	}
+	want, ok := args[0].(int)
+	if !ok {
+		note("length", args[0])
+		return BadCheckf("length is not an int")
+	}
+	length := v.Len()
+	note("len(got)", length)
+	if length != want {
+		return fmt.Errorf("unexpected length")
+	}
+	return nil
+}
+
+// Implements checks that the provided value implements an interface. The
+// interface is specified with a pointer to an interface variable.
+//
+// For instance:
+//
+//	var rc io.ReadCloser
+//	c.Assert(myReader, qt.Implements, &rc)
+var Implements Checker = &implementsChecker{
+	argNames: []string{"got", "want interface pointer"},
+}
+
+type implementsChecker struct {
+	argNames
+}
+
+var emptyInterface = reflect.TypeOf((*interface{})(nil)).Elem()
+
+// Check implements Checker.Check by checking that got implements the
+// interface pointed to by args[0].
+func (c *implementsChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) (err error) {
+	if got == nil {
+		note("error", Unquoted("got nil value but want non-nil"))
+		note("got", got)
+		return ErrSilent
+	}
+
+	if args[0] == nil {
+		return BadCheckf("want a pointer to an interface variable but nil was provided")
+	}
+	wantType := reflect.TypeOf(args[0])
+	if wantType.Kind() != reflect.Ptr {
+		note("want", Unquoted(wantType.String()))
+		return BadCheckf("want a pointer to an interface variable but a non-pointer value was provided")
+	} else if wantType.Elem().Kind() != reflect.Interface {
+		note("want pointer type", Unquoted(wantType.Elem().String()))
+		return BadCheckf("want a pointer to an interface variable but a pointer to a concrete type was provided")
+	} else if wantType.Elem() == emptyInterface {
+		note("want pointer type", Unquoted(wantType.Elem().String()))
+		return BadCheckf("all types implement the empty interface, want a pointer to a variable that isn't the empty interface")
+	}
+
+	gotType := reflect.TypeOf(got)
+	if !gotType.Implements(wantType.Elem()) {
+		note("error", Unquoted("got value does not implement wanted interface"))
+		note("got", got)
+		note("want interface", Unquoted(wantType.Elem().String()))
+		return ErrSilent
+	}
+
+	return nil
+}
+
+// Satisfies is a Checker checking that the provided value, when used as
+// argument of the provided predicate function, causes the function to return
+// true. The function must be of type func(T) bool, having got assignable to T.
+//
+// For instance:
+//
+//	// Check that an error from os.Open satisfies os.IsNotExist.
+//	c.Assert(err, qt.Satisfies, os.IsNotExist)
+//
+//	// Check that a floating point number is a not-a-number.
+//	c.Assert(f, qt.Satisfies, math.IsNaN)
+var Satisfies Checker = &satisfiesChecker{
+	argNames: []string{"arg", "predicate function"},
+}
+
+type satisfiesChecker struct {
+	argNames
+}
+
+// Check implements Checker.Check by checking that args[0](got) == true.
+func (c *satisfiesChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) (err error) {
+	// Original code at
+	// <https://github.com/juju/testing/blob/master/checkers/bool.go>.
+	predicate := args[0]
+	f := reflect.ValueOf(predicate)
+	ftype := f.Type()
+	if ftype.Kind() != reflect.Func || ftype.NumIn() != 1 || ftype.NumOut() != 1 || ftype.Out(0).Kind() != reflect.Bool {
+		note("predicate function", predicate)
+		return BadCheckf("predicate function is not a func(T) bool")
+	}
+	v, t := reflect.ValueOf(got), ftype.In(0)
+	if !v.IsValid() {
+		if !canBeNil(t.Kind()) {
+			note("predicate function", predicate)
+			return BadCheckf("cannot use nil as type %v in argument to predicate function", t)
+		}
+		v = reflect.Zero(t)
+	} else if !v.Type().AssignableTo(t) {
+		note("arg", got)
+		note("predicate function", predicate)
+		return BadCheckf("cannot use value of type %v as type %v in argument to predicate function", v.Type(), t)
+	}
+	if f.Call([]reflect.Value{v})[0].Interface().(bool) {
+		return nil
+	}
+	return fmt.Errorf("value does not satisfy predicate function")
+}
+
+// IsTrue is a Checker checking that the provided value is true.
+// The value must have a boolean underlying type.
+//
+// For instance:
+//
+//	c.Assert(true, qt.IsTrue)
+//	c.Assert(myBoolean(false), qt.IsTrue)
+var IsTrue Checker = &boolChecker{
+	want: true,
+}
+
+// IsFalse is a Checker checking that the provided value is false.
+// The value must have a boolean underlying type.
+//
+// For instance:
+//
+//	c.Assert(false, qt.IsFalse)
+//	c.Assert(IsValid(), qt.IsFalse)
+var IsFalse Checker = &boolChecker{
+	want: false,
+}
+
+type boolChecker struct {
+	want bool
+}
+
+// Check implements Checker.Check by checking that got == c.want.
+func (c *boolChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) (err error) {
+	v := reflect.ValueOf(got)
+	if v.IsValid() && v.Kind() == reflect.Bool {
+		if v.Bool() != c.want {
+			return fmt.Errorf("value is not %v", c.want)
+		}
+		return nil
+	}
+	note("value", got)
+	return BadCheckf("value does not have a bool underlying type")
+}
+
+// ArgNames implements Checker.ArgNames.
+func (c *boolChecker) ArgNames() []string {
+	return []string{"got"}
+}
+
+// Not returns a Checker negating the given Checker.
+//
+// For instance:
+//
+//	c.Assert(got, qt.Not(qt.IsNil))
+//	c.Assert(answer, qt.Not(qt.Equals), 42)
+func Not(checker Checker) Checker {
+	return &notChecker{
+		Checker: checker,
+	}
+}
+
+type notChecker struct {
+	Checker
+}
+
+// Check implements Checker.Check by checking that the stored checker fails.
+func (c *notChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) (err error) {
+	if nc, ok := c.Checker.(*notChecker); ok {
+		return nc.Checker.Check(got, args, note)
+	}
+	err = c.Checker.Check(got, args, note)
+	if IsBadCheck(err) {
+		return err
+	}
+	if err != nil {
+		return nil
+	}
+	if c.Checker == IsNil {
+		return errors.New("got nil value but want non-nil")
+	}
+	return errors.New("unexpected success")
+}
+
+// Contains is a checker that checks that a map, slice, array
+// or string contains a value. It's the same as using
+// Any(Equals), except that it has a special case
+// for strings - if the first argument is a string,
+// the second argument must also be a string
+// and strings.Contains will be used.
+//
+// For example:
+//
+//	c.Assert("hello world", qt.Contains, "world")
+//	c.Assert([]int{3,5,7,99}, qt.Contains, 7)
+var Contains Checker = &containsChecker{
+	argNames: []string{"container", "want"},
+}
+
+type containsChecker struct {
+	argNames
+}
+
+// Check implements Checker.Check by checking that got contains args[0].
+func (c *containsChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) error {
+	if got, ok := got.(string); ok {
+		want, ok := args[0].(string)
+		if !ok {
+			return BadCheckf("strings can only contain strings, not %T", args[0])
+		}
+		if strings.Contains(got, want) {
+			return nil
+		}
+		return errors.New("no substring match found")
+	}
+	return Any(Equals).Check(got, args, note)
+}
+
+// Any returns a Checker that uses the given checker to check elements
+// of a slice or array or the values from a map. It succeeds if any element
+// passes the check.
+//
+// For example:
+//
+//	c.Assert([]int{3,5,7,99}, qt.Any(qt.Equals), 7)
+//	c.Assert([][]string{{"a", "b"}, {"c", "d"}}, qt.Any(qt.DeepEquals), []string{"c", "d"})
+//
+// See also All and Contains.
+func Any(c Checker) Checker {
+	return &anyChecker{
+		argNames:    append([]string{"container"}, c.ArgNames()[1:]...),
+		elemChecker: c,
+	}
+}
+
+type anyChecker struct {
+	argNames
+	elemChecker Checker
+}
+
+// Check implements Checker.Check by checking that one of the elements of
+// got passes the c.elemChecker check.
+func (c *anyChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) error {
+	iter, err := newIter(got)
+	if err != nil {
+		return BadCheckf("%v", err)
+	}
+	for iter.next() {
+		// For the time being, discard the notes added by the sub-checker,
+		// because it's not clear what a good behaviour would be.
+		// Should we print all the failed check for all elements? If there's only
+		// one element in the container, the answer is probably yes,
+		// but let's leave it for now.
+		err := c.elemChecker.Check(
+			iter.value().Interface(),
+			args,
+			func(key string, value interface{}) {},
+		)
+		if err == nil {
+			return nil
+		}
+		if IsBadCheck(err) {
+			return BadCheckf("at %s: %v", iter.key(), err)
+		}
+	}
+	return errors.New("no matching element found")
+}
+
+// All returns a Checker that uses the given checker to check elements
+// of slice or array or the values of a map. It succeeds if all elements
+// pass the check.
+// On failure it prints the error from the first index that failed.
+//
+// For example:
+//
+//	c.Assert([]int{3, 5, 8}, qt.All(qt.Not(qt.Equals)), 0)
+//	c.Assert([][]string{{"a", "b"}, {"a", "b"}}, qt.All(qt.DeepEquals), []string{"c", "d"})
+//
+// See also Any and Contains.
+func All(c Checker) Checker {
+	return &allChecker{
+		argNames:    append([]string{"container"}, c.ArgNames()[1:]...),
+		elemChecker: c,
+	}
+}
+
+type allChecker struct {
+	argNames
+	elemChecker Checker
+}
+
+// Check implement Checker.Check by checking that all the elements of got
+// pass the c.elemChecker check.
+func (c *allChecker) Check(got interface{}, args []interface{}, notef func(key string, value interface{})) error {
+	iter, err := newIter(got)
+	if err != nil {
+		return BadCheckf("%v", err)
+	}
+	for iter.next() {
+		// Store any notes added by the checker so
+		// we can add our own note at the start
+		// to say which element failed.
+		var notes []note
+		err := c.elemChecker.Check(
+			iter.value().Interface(),
+			args,
+			func(key string, val interface{}) {
+				notes = append(notes, note{key, val})
+			},
+		)
+		if err == nil {
+			continue
+		}
+		if IsBadCheck(err) {
+			return BadCheckf("at %s: %v", iter.key(), err)
+		}
+		notef("error", Unquoted("mismatch at "+iter.key()))
+		if err != ErrSilent {
+			// If the error's not silent, the checker is expecting
+			// the caller to print the error and the value that failed.
+			notef("error", Unquoted(err.Error()))
+			notef("first mismatched element", iter.value().Interface())
+		}
+		for _, n := range notes {
+			notef(n.key, n.value)
+		}
+		return ErrSilent
+	}
+	return nil
+}
+
+// JSONEquals is a checker that checks whether a byte slice
+// or string is JSON-equivalent to a Go value. See CodecEquals for
+// more information.
+//
+// It uses DeepEquals to do the comparison. If a more sophisticated
+// comparison is required, use CodecEquals directly.
+//
+// For instance:
+//
+//	c.Assert(`{"First": 47.11}`, qt.JSONEquals, &MyStruct{First: 47.11})
+var JSONEquals = CodecEquals(json.Marshal, json.Unmarshal)
+
+type codecEqualChecker struct {
+	argNames
+	marshal    func(interface{}) ([]byte, error)
+	unmarshal  func([]byte, interface{}) error
+	deepEquals Checker
+}
+
+// CodecEquals returns a checker that checks for codec value equivalence.
+//
+// It expects two arguments: a byte slice or a string containing some
+// codec-marshaled data, and a Go value.
+//
+// It uses unmarshal to unmarshal the data into an interface{} value.
+// It marshals the Go value using marshal, then unmarshals the result into
+// an interface{} value.
+//
+// It then checks that the two interface{} values are deep-equal to one
+// another, using CmpEquals(opts) to perform the check.
+//
+// See JSONEquals for an example of this in use.
+func CodecEquals(
+	marshal func(interface{}) ([]byte, error),
+	unmarshal func([]byte, interface{}) error,
+	opts ...cmp.Option,
+) Checker {
+	return &codecEqualChecker{
+		argNames:   argNames{"got", "want"},
+		marshal:    marshal,
+		unmarshal:  unmarshal,
+		deepEquals: CmpEquals(opts...),
+	}
+}
+
+func (c *codecEqualChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) error {
+	var gotContent []byte
+	switch got := got.(type) {
+	case string:
+		gotContent = []byte(got)
+	case []byte:
+		gotContent = got
+	default:
+		return BadCheckf("expected string or byte, got %T", got)
+	}
+	wantContent := args[0]
+	wantContentBytes, err := c.marshal(wantContent)
+	if err != nil {
+		return BadCheckf("cannot marshal expected contents: %v", err)
+	}
+	var wantContentVal interface{}
+	if err := c.unmarshal(wantContentBytes, &wantContentVal); err != nil {
+		return BadCheckf("cannot unmarshal expected contents: %v", err)
+	}
+	var gotContentVal interface{}
+	if err := c.unmarshal([]byte(gotContent), &gotContentVal); err != nil {
+		return fmt.Errorf("cannot unmarshal obtained contents: %v; %q", err, gotContent)
+	}
+	return c.deepEquals.Check(gotContentVal, []interface{}{wantContentVal}, note)
+}
+
+// argNames helps implementing Checker.ArgNames.
+type argNames []string
+
+// ArgNames implements Checker.ArgNames by returning the argument names.
+func (a argNames) ArgNames() []string {
+	return a
+}
+
+// match checks that the given error message matches the given pattern.
+func match(got string, pattern interface{}, msg string, note func(key string, value interface{})) error {
+	if actualRegex, ok := pattern.(*regexp.Regexp); ok {
+		if actualRegex.MatchString(got) {
+			return nil
+		}
+		return errors.New(msg)
+	}
+	regex, ok := pattern.(string)
+	if !ok {
+		note("regexp", pattern)
+		return BadCheckf("regexp is not a string")
+	}
+	matches, err := regexp.MatchString("^("+regex+")$", got)
+	if err != nil {
+		note("regexp", regex)
+		return BadCheckf("cannot compile regexp: %s", err)
+	}
+	if matches {
+		return nil
+	}
+	return errors.New(msg)
+}
+
+// canBeNil reports whether a value or type of the given kind can be nil.
+func canBeNil(k reflect.Kind) bool {
+	switch k {
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return true
+	}
+	return false
+}

+ 92 - 0
data_tool/src/github.com/frankban/quicktest/checker_err.go

@@ -0,0 +1,92 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+import (
+	"errors"
+	"fmt"
+)
+
+// ErrorAs checks that the error is or wraps a specific error type. If so, it
+// assigns it to the provided pointer. This is analogous to calling errors.As.
+//
+// For instance:
+//
+//	// Checking for a specific error type
+//	c.Assert(err, qt.ErrorAs, new(*os.PathError))
+//
+//	// Checking fields on a specific error type
+//	var pathError *os.PathError
+//	if c.Check(err, qt.ErrorAs, &pathError) {
+//	    c.Assert(pathError.Path, qt.Equals, "some_path")
+//	}
+var ErrorAs Checker = &errorAsChecker{
+	argNames: []string{"got", "as"},
+}
+
+type errorAsChecker struct {
+	argNames
+}
+
+// Check implements Checker.Check by checking that got is an error whose error
+// chain matches args[0] and assigning it to args[0].
+func (c *errorAsChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) (err error) {
+	if err := checkFirstArgIsError(got, note); err != nil {
+		return err
+	}
+
+	gotErr := got.(error)
+	defer func() {
+		// A panic is raised when the target is not a pointer to an interface
+		// or error.
+		if r := recover(); r != nil {
+			err = BadCheckf("%s", r)
+		}
+	}()
+	as := args[0]
+	if errors.As(gotErr, as) {
+		return nil
+	}
+
+	note("error", Unquoted("wanted type is not found in error chain"))
+	note("got", gotErr)
+	note("as", Unquoted(fmt.Sprintf("%T", as)))
+	return ErrSilent
+}
+
+// ErrorIs checks that the error is or wraps a specific error value. This is
+// analogous to calling errors.Is.
+//
+// For instance:
+//
+//	c.Assert(err, qt.ErrorIs, os.ErrNotExist)
+var ErrorIs Checker = &errorIsChecker{
+	argNames: []string{"got", "want"},
+}
+
+type errorIsChecker struct {
+	argNames
+}
+
+// Check implements Checker.Check by checking that got is an error whose error
+// chain matches args[0].
+func (c *errorIsChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) error {
+	if got == nil && args[0] == nil {
+		return nil
+	}
+	if err := checkFirstArgIsError(got, note); err != nil {
+		return err
+	}
+
+	gotErr := got.(error)
+	wantErr, ok := args[0].(error)
+	if !ok && args[0] != nil {
+		note("want", args[0])
+		return BadCheckf("second argument is not an error")
+	}
+
+	if !errors.Is(gotErr, wantErr) {
+		return errors.New("wanted error is not found in error chain")
+	}
+	return nil
+}

+ 247 - 0
data_tool/src/github.com/frankban/quicktest/checker_err_test.go

@@ -0,0 +1,247 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest_test
+
+import (
+	"errors"
+	"fmt"
+)
+
+func init() {
+	checkerTests = append(checkerTests, errorCheckerTests...)
+}
+
+type errTarget struct {
+	msg string
+}
+
+func (e *errTarget) Error() string {
+	return "ptr: " + e.msg
+}
+
+type errTargetNonPtr struct {
+	msg string
+}
+
+func (e errTargetNonPtr) Error() string {
+	return "non ptr: " + e.msg
+}
+
+var targetErr = &errTarget{msg: "target"}
+
+var errorCheckerTests = []struct {
+	about                 string
+	checker               Checker
+	got                   interface{}
+	args                  []interface{}
+	verbose               bool
+	expectedCheckFailure  string
+	expectedNegateFailure string
+}{{
+	about:   "ErrorAs: exact match",
+	checker: ErrorAs,
+	got:     targetErr,
+	args:    []interface{}{new(*errTarget)},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  e"ptr: target"
+as:
+  &&quicktest_test.errTarget{msg:"target"}
+`,
+}, {
+	about:   "ErrorAs: wrapped match",
+	checker: ErrorAs,
+	got:     fmt.Errorf("wrapped: %w", targetErr),
+	args:    []interface{}{new(*errTarget)},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  e"wrapped: ptr: target"
+as:
+  &&quicktest_test.errTarget{msg:"target"}
+`,
+}, {
+	about:   "ErrorAs: fails if nil error",
+	checker: ErrorAs,
+	got:     nil,
+	args:    []interface{}{new(*errTarget)},
+	expectedCheckFailure: `
+error:
+  got nil error but want non-nil
+got:
+  nil
+as:
+  &(*quicktest_test.errTarget)(nil)
+`,
+}, {
+	about:   "ErrorAs: fails if mismatch",
+	checker: ErrorAs,
+	got:     errors.New("other error"),
+	args:    []interface{}{new(*errTarget)},
+	expectedCheckFailure: `
+error:
+  wanted type is not found in error chain
+got:
+  e"other error"
+as:
+  **quicktest_test.errTarget
+`,
+}, {
+	about:   "ErrorAs: fails if mismatch with a non-pointer error implementation",
+	checker: ErrorAs,
+	got:     errors.New("other error"),
+	args:    []interface{}{new(errTargetNonPtr)},
+	expectedCheckFailure: `
+error:
+  wanted type is not found in error chain
+got:
+  e"other error"
+as:
+  *quicktest_test.errTargetNonPtr
+`,
+}, {
+	about:   "ErrorAs: bad check if invalid error",
+	checker: ErrorAs,
+	got:     "not an error",
+	args:    []interface{}{new(*errTarget)},
+	expectedCheckFailure: `
+error:
+  bad check: first argument is not an error
+got:
+  "not an error"
+`,
+	expectedNegateFailure: `
+error:
+  bad check: first argument is not an error
+got:
+  "not an error"
+`,
+}, {
+	about:   "ErrorAs: bad check if invalid as",
+	checker: ErrorAs,
+	got:     targetErr,
+	args:    []interface{}{&struct{}{}},
+	expectedCheckFailure: `
+error:
+  bad check: errors: *target must be interface or implement error
+`,
+	expectedNegateFailure: `
+error:
+  bad check: errors: *target must be interface or implement error
+`,
+}, {
+	about:   "ErrorIs: nil to nil match",
+	checker: ErrorIs,
+	got:     nil,
+	args:    []interface{}{nil},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  nil
+want:
+  <same as "got">
+`,
+}, {
+	about:   "ErrorIs: non-nil to nil mismatch",
+	checker: ErrorIs,
+	got:     targetErr,
+	args:    []interface{}{nil},
+	expectedCheckFailure: `
+error:
+  wanted error is not found in error chain
+got:
+  e"ptr: target"
+want:
+  nil
+`,
+}, {
+	about:   "ErrorIs: exact match",
+	checker: ErrorIs,
+	got:     targetErr,
+	args:    []interface{}{targetErr},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  e"ptr: target"
+want:
+  <same as "got">
+`,
+}, {
+	about:   "ErrorIs: wrapped match",
+	checker: ErrorIs,
+	got:     fmt.Errorf("wrapped: %w", targetErr),
+	args:    []interface{}{targetErr},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  e"wrapped: ptr: target"
+want:
+  e"ptr: target"
+`,
+}, {
+	about:   "ErrorIs: fails if nil error",
+	checker: ErrorIs,
+	got:     nil,
+	args:    []interface{}{targetErr},
+	expectedCheckFailure: `
+error:
+  got nil error but want non-nil
+got:
+  nil
+want:
+  e"ptr: target"
+`,
+}, {
+	about:   "ErrorIs: fails if mismatch",
+	checker: ErrorIs,
+	got:     errors.New("other error"),
+	args:    []interface{}{targetErr},
+	expectedCheckFailure: `
+error:
+  wanted error is not found in error chain
+got:
+  e"other error"
+want:
+  e"ptr: target"
+`,
+}, {
+	about:   "ErrorIs: bad check if invalid error",
+	checker: ErrorIs,
+	got:     "not an error",
+	args:    []interface{}{targetErr},
+	expectedCheckFailure: `
+error:
+  bad check: first argument is not an error
+got:
+  "not an error"
+`,
+	expectedNegateFailure: `
+error:
+  bad check: first argument is not an error
+got:
+  "not an error"
+`,
+}, {
+	about:   "ErrorIs: bad check if invalid error value",
+	checker: ErrorIs,
+	got:     targetErr,
+	args:    []interface{}{"not an error"},
+	expectedCheckFailure: `
+error:
+  bad check: second argument is not an error
+want:
+  "not an error"
+`,
+	expectedNegateFailure: `
+error:
+  bad check: second argument is not an error
+want:
+  "not an error"
+`,
+}}

+ 3108 - 0
data_tool/src/github.com/frankban/quicktest/checker_test.go

@@ -0,0 +1,3108 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest_test
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"regexp"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/google/go-cmp/cmp"
+	"github.com/google/go-cmp/cmp/cmpopts"
+)
+
+// Fooer is an interface for testing.
+type Fooer interface {
+	Foo()
+}
+
+var (
+	goTime = time.Date(2012, 3, 28, 0, 0, 0, 0, time.UTC)
+	chInt  = func() chan int {
+		ch := make(chan int, 4)
+		ch <- 42
+		ch <- 47
+		return ch
+	}()
+	sameInts = cmpopts.SortSlices(func(x, y int) bool {
+		return x < y
+	})
+	cmpEqualsGot = struct {
+		Strings []interface{}
+		Ints    []int
+	}{
+		Strings: []interface{}{"who", "dalek"},
+		Ints:    []int{42, 47},
+	}
+	cmpEqualsWant = struct {
+		Strings []interface{}
+		Ints    []int
+	}{
+		Strings: []interface{}{"who", "dalek"},
+		Ints:    []int{42},
+	}
+)
+
+type InnerJSON struct {
+	First  string
+	Second int             `json:",omitempty" yaml:",omitempty"`
+	Third  map[string]bool `json:",omitempty" yaml:",omitempty"`
+}
+
+type OuterJSON struct {
+	First  float64
+	Second []*InnerJSON `json:"Last,omitempty" yaml:"last,omitempty"`
+}
+
+type boolean bool
+
+var checkerTests = []struct {
+	about                 string
+	checker               Checker
+	got                   interface{}
+	args                  []interface{}
+	verbose               bool
+	expectedCheckFailure  string
+	expectedNegateFailure string
+}{{
+	about:   "Equals: same values",
+	checker: Equals,
+	got:     42,
+	args:    []interface{}{42},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  int(42)
+want:
+  <same as "got">
+`,
+}, {
+	about:   "Equals: different values",
+	checker: Equals,
+	got:     "42",
+	args:    []interface{}{"47"},
+	expectedCheckFailure: `
+error:
+  values are not equal
+got:
+  "42"
+want:
+  "47"
+`,
+}, {
+	about:   "Equals: different strings with quotes",
+	checker: Equals,
+	got:     `string "foo"`,
+	args:    []interface{}{`string "bar"`},
+	expectedCheckFailure: tilde2bq(`
+error:
+  values are not equal
+got:
+  ~string "foo"~
+want:
+  ~string "bar"~
+`),
+}, {
+	about:   "Equals: same multiline strings",
+	checker: Equals,
+	got:     "a\nmultiline\nstring",
+	args:    []interface{}{"a\nmultiline\nstring"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  "a\nmultiline\nstring"
+want:
+  <same as "got">
+`,
+}, {
+	about:   "Equals: different multi-line strings",
+	checker: Equals,
+	got:     "a\nlong\nmultiline\nstring",
+	args:    []interface{}{"just\na\nlong\nmulti-line\nstring\n"},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not equal
+line diff (-got +want):
+%s
+got:
+  "a\nlong\nmultiline\nstring"
+want:
+  "just\na\nlong\nmulti-line\nstring\n"
+`, diff([]string{"a\n", "long\n", "multiline\n", "string"}, []string{"just\n", "a\n", "long\n", "multi-line\n", "string\n", ""})),
+}, {
+	about:   "Equals: different single-line strings ending with newline",
+	checker: Equals,
+	got:     "foo\n",
+	args:    []interface{}{"bar\n"},
+	expectedCheckFailure: `
+error:
+  values are not equal
+got:
+  "foo\n"
+want:
+  "bar\n"
+`,
+}, {
+	about:   "Equals: different strings starting with newline",
+	checker: Equals,
+	got:     "\nfoo",
+	args:    []interface{}{"\nbar"},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not equal
+line diff (-got +want):
+%s
+got:
+  "\nfoo"
+want:
+  "\nbar"
+`, diff([]string{"\n", "foo"}, []string{"\n", "bar"})),
+}, {
+	about:   "Equals: different types",
+	checker: Equals,
+	got:     42,
+	args:    []interface{}{"42"},
+	expectedCheckFailure: `
+error:
+  values are not equal
+got:
+  int(42)
+want:
+  "42"
+`,
+}, {
+	about:   "Equals: nil and nil",
+	checker: Equals,
+	got:     nil,
+	args:    []interface{}{nil},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  nil
+want:
+  <same as "got">
+`,
+}, {
+	about:   "Equals: error is not nil",
+	checker: Equals,
+	got:     errBadWolf,
+	args:    []interface{}{nil},
+	expectedCheckFailure: `
+error:
+  got non-nil error
+got:
+  bad wolf
+    file:line
+want:
+  nil
+`,
+}, {
+	about:   "Equals: error is not nil: not formatted",
+	checker: Equals,
+	got: &errTest{
+		msg: "bad wolf",
+	},
+	args: []interface{}{nil},
+	expectedCheckFailure: `
+error:
+  got non-nil error
+got:
+  e"bad wolf"
+want:
+  nil
+`,
+}, {
+	about:   "Equals: error does not guard against nil",
+	checker: Equals,
+	got:     (*errTest)(nil),
+	args:    []interface{}{nil},
+	expectedCheckFailure: `
+error:
+  got non-nil error
+got:
+  e<nil>
+want:
+  nil
+`,
+}, {
+	about:   "Equals: error is not nil: not formatted and with quotes",
+	checker: Equals,
+	got: &errTest{
+		msg: `failure: "bad wolf"`,
+	},
+	args: []interface{}{nil},
+	expectedCheckFailure: tilde2bq(`
+error:
+  got non-nil error
+got:
+  e~failure: "bad wolf"~
+want:
+  nil
+`),
+}, {
+	about:   "Equals: different errors with same message",
+	checker: Equals,
+	got: &errTest{
+		msg: "bad wolf",
+	},
+	args: []interface{}{errors.New("bad wolf")},
+	expectedCheckFailure: `
+error:
+  values are not equal
+got type:
+  *quicktest_test.errTest
+want type:
+  *errors.errorString
+got:
+  e"bad wolf"
+want:
+  <same as "got" but different pointer value>
+`,
+}, {
+	about:   "Equals: different pointer errors with the same message",
+	checker: Equals,
+	got: &errTest{
+		msg: "bad wolf",
+	},
+	args: []interface{}{&errTest{
+		msg: "bad wolf",
+	}},
+	expectedCheckFailure: `
+error:
+  values are not equal
+got:
+  e"bad wolf"
+want:
+  <same as "got" but different pointer value>
+`,
+}, {
+	about:   "Equals: different pointers with the same formatted output",
+	checker: Equals,
+	got:     new(int),
+	args:    []interface{}{new(int)},
+	expectedCheckFailure: `
+error:
+  values are not equal
+got:
+  &int(0)
+want:
+  <same as "got" but different pointer value>
+`,
+}, {
+	about:   "Equals: nil struct",
+	checker: Equals,
+	got:     (*struct{})(nil),
+	args:    []interface{}{nil},
+	expectedCheckFailure: `
+error:
+  values are not equal
+got:
+  (*struct {})(nil)
+want:
+  nil
+`,
+}, {
+	about:   "Equals: different booleans",
+	checker: Equals,
+	got:     true,
+	args:    []interface{}{false},
+	expectedCheckFailure: `
+error:
+  values are not equal
+got:
+  bool(true)
+want:
+  bool(false)
+`,
+}, {
+	about:   "Equals: uncomparable types",
+	checker: Equals,
+	got: struct {
+		Ints []int
+	}{
+		Ints: []int{42, 47},
+	},
+	args: []interface{}{struct {
+		Ints []int
+	}{
+		Ints: []int{42, 47},
+	}},
+	expectedCheckFailure: `
+error:
+  runtime error: comparing uncomparable type struct { Ints []int }
+got:
+  struct { Ints []int }{
+      Ints: {42, 47},
+  }
+want:
+  <same as "got">
+`,
+}, {
+	about:   "Equals: not enough arguments",
+	checker: Equals,
+	expectedCheckFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  want
+`,
+	expectedNegateFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  want
+`,
+}, {
+	about:   "Equals: too many arguments",
+	checker: Equals,
+	args:    []interface{}{nil, 47},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      nil,
+      int(47),
+  }
+want args:
+  want
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      nil,
+      int(47),
+  }
+want args:
+  want
+`,
+}, {
+	about:   "CmpEquals: same values",
+	checker: CmpEquals(),
+	got:     cmpEqualsGot,
+	args:    []interface{}{cmpEqualsGot},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  struct { Strings []interface {}; Ints []int }{
+      Strings: {
+          "who",
+          "dalek",
+      },
+      Ints: {42, 47},
+  }
+want:
+  <same as "got">
+`,
+}, {
+	about:   "CmpEquals: different values",
+	checker: CmpEquals(),
+	got:     cmpEqualsGot,
+	args:    []interface{}{cmpEqualsWant},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  struct { Strings []interface {}; Ints []int }{
+      Strings: {
+          "who",
+          "dalek",
+      },
+      Ints: {42, 47},
+  }
+want:
+  struct { Strings []interface {}; Ints []int }{
+      Strings: {
+          "who",
+          "dalek",
+      },
+      Ints: {42},
+  }
+`, diff(cmpEqualsGot, cmpEqualsWant)),
+}, {
+	about:   "CmpEquals: different values, long output",
+	checker: CmpEquals(),
+	got:     []interface{}{cmpEqualsWant, "extra line 1", "extra line 2", "extra line 3"},
+	args:    []interface{}{[]interface{}{cmpEqualsWant, "extra line 1"}},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  <suppressed due to length (11 lines), use -v for full output>
+want:
+  []interface {}{
+      struct { Strings []interface {}; Ints []int }{
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42},
+      },
+      "extra line 1",
+  }
+`, diff([]interface{}{cmpEqualsWant, "extra line 1", "extra line 2", "extra line 3"}, []interface{}{cmpEqualsWant, "extra line 1"})),
+}, {
+	about:   "CmpEquals: different values: long output and verbose",
+	checker: CmpEquals(),
+	got:     []interface{}{cmpEqualsWant, "extra line 1", "extra line 2"},
+	args:    []interface{}{[]interface{}{cmpEqualsWant, "extra line 1"}},
+	verbose: true,
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  []interface {}{
+      struct { Strings []interface {}; Ints []int }{
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42},
+      },
+      "extra line 1",
+      "extra line 2",
+  }
+want:
+  []interface {}{
+      struct { Strings []interface {}; Ints []int }{
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42},
+      },
+      "extra line 1",
+  }
+`, diff([]interface{}{cmpEqualsWant, "extra line 1", "extra line 2"}, []interface{}{cmpEqualsWant, "extra line 1"})),
+}, {
+	about:   "CmpEquals: different values, long output, same number of lines",
+	checker: CmpEquals(),
+	got:     []interface{}{cmpEqualsWant, "extra line 1", "extra line 2", "extra line 3"},
+	args:    []interface{}{[]interface{}{cmpEqualsWant, "extra line 1", "extra line 2", "extra line three"}},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  <suppressed due to length (11 lines), use -v for full output>
+want:
+  <suppressed due to length (11 lines), use -v for full output>
+`, diff([]interface{}{cmpEqualsWant, "extra line 1", "extra line 2", "extra line 3"}, []interface{}{cmpEqualsWant, "extra line 1", "extra line 2", "extra line three"})),
+}, {
+	about:   "CmpEquals: same values with options",
+	checker: CmpEquals(sameInts),
+	got:     []int{1, 2, 3},
+	args: []interface{}{
+		[]int{3, 2, 1},
+	},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  []int{1, 2, 3}
+want:
+  []int{3, 2, 1}
+`,
+}, {
+	about:   "CmpEquals: different values with options",
+	checker: CmpEquals(sameInts),
+	got:     []int{1, 2, 4},
+	args: []interface{}{
+		[]int{3, 2, 1},
+	},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  []int{1, 2, 4}
+want:
+  []int{3, 2, 1}
+`, diff([]int{1, 2, 4}, []int{3, 2, 1}, sameInts)),
+}, {
+	about:   "CmpEquals: structs with unexported fields not allowed",
+	checker: CmpEquals(),
+	got: struct{ answer int }{
+		answer: 42,
+	},
+	args: []interface{}{
+		struct{ answer int }{
+			answer: 42,
+		},
+	},
+	expectedCheckFailure: `
+error:
+  bad check: cannot handle unexported field at root.answer:
+  	"github.com/frankban/quicktest_test".(struct { answer int })
+  consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported
+`,
+	expectedNegateFailure: `
+error:
+  bad check: cannot handle unexported field at root.answer:
+  	"github.com/frankban/quicktest_test".(struct { answer int })
+  consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported
+`,
+}, {
+	about:   "CmpEquals: structs with unexported fields ignored",
+	checker: CmpEquals(cmpopts.IgnoreUnexported(struct{ answer int }{})),
+	got: struct{ answer int }{
+		answer: 42,
+	},
+	args: []interface{}{
+		struct{ answer int }{
+			answer: 42,
+		},
+	},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  struct { answer int }{answer:42}
+want:
+  <same as "got">
+`,
+}, {
+	about:   "CmpEquals: same times",
+	checker: CmpEquals(),
+	got:     goTime,
+	args: []interface{}{
+		goTime,
+	},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  s"2012-03-28 00:00:00 +0000 UTC"
+want:
+  <same as "got">
+`,
+}, {
+	about:   "CmpEquals: different times: verbose",
+	checker: CmpEquals(),
+	got:     goTime.Add(24 * time.Hour),
+	args: []interface{}{
+		goTime,
+	},
+	verbose: true,
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  s"2012-03-29 00:00:00 +0000 UTC"
+want:
+  s"2012-03-28 00:00:00 +0000 UTC"
+`, diff(goTime.Add(24*time.Hour), goTime)),
+}, {
+	about:   "CmpEquals: not enough arguments",
+	checker: CmpEquals(),
+	expectedCheckFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  want
+`,
+	expectedNegateFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  want
+`,
+}, {
+	about:   "CmpEquals: too many arguments",
+	checker: CmpEquals(),
+	got:     []int{42},
+	args:    []interface{}{[]int{42}, "bad wolf"},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      []int{42},
+      "bad wolf",
+  }
+want args:
+  want
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      []int{42},
+      "bad wolf",
+  }
+want args:
+  want
+`,
+}, {
+	about:   "DeepEquals: different values",
+	checker: DeepEquals,
+	got:     cmpEqualsGot,
+	args:    []interface{}{cmpEqualsWant},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  struct { Strings []interface {}; Ints []int }{
+      Strings: {
+          "who",
+          "dalek",
+      },
+      Ints: {42, 47},
+  }
+want:
+  struct { Strings []interface {}; Ints []int }{
+      Strings: {
+          "who",
+          "dalek",
+      },
+      Ints: {42},
+  }
+`, diff(cmpEqualsGot, cmpEqualsWant)),
+}, {
+	about:   "DeepEquals: different values: long output",
+	checker: DeepEquals,
+	got:     []interface{}{cmpEqualsWant, cmpEqualsWant},
+	args:    []interface{}{[]interface{}{cmpEqualsWant, cmpEqualsWant, 42}},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  <suppressed due to length (15 lines), use -v for full output>
+want:
+  <suppressed due to length (16 lines), use -v for full output>
+`, diff([]interface{}{cmpEqualsWant, cmpEqualsWant}, []interface{}{cmpEqualsWant, cmpEqualsWant, 42})),
+}, {
+	about:   "DeepEquals: different values: long output and verbose",
+	checker: DeepEquals,
+	got:     []interface{}{cmpEqualsWant, cmpEqualsWant},
+	args:    []interface{}{[]interface{}{cmpEqualsWant, cmpEqualsWant, 42}},
+	verbose: true,
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  []interface {}{
+      struct { Strings []interface {}; Ints []int }{
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42},
+      },
+      struct { Strings []interface {}; Ints []int }{
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42},
+      },
+  }
+want:
+  []interface {}{
+      struct { Strings []interface {}; Ints []int }{
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42},
+      },
+      struct { Strings []interface {}; Ints []int }{
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42},
+      },
+      int(42),
+  }
+`, diff([]interface{}{cmpEqualsWant, cmpEqualsWant}, []interface{}{cmpEqualsWant, cmpEqualsWant, 42})),
+}, {
+	about:   "ContentEquals: same values",
+	checker: ContentEquals,
+	got:     []string{"these", "are", "the", "voyages"},
+	args: []interface{}{
+		[]string{"these", "are", "the", "voyages"},
+	},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  []string{"these", "are", "the", "voyages"}
+want:
+  <same as "got">
+`,
+}, {
+	about:   "ContentEquals: same contents",
+	checker: ContentEquals,
+	got:     []int{1, 2, 3},
+	args: []interface{}{
+		[]int{3, 2, 1},
+	},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  []int{1, 2, 3}
+want:
+  []int{3, 2, 1}
+`,
+}, {
+	about:   "ContentEquals: same contents on complex slice",
+	checker: ContentEquals,
+	got: []struct {
+		Strings []interface{}
+		Ints    []int
+	}{cmpEqualsGot, cmpEqualsGot, cmpEqualsWant},
+	args: []interface{}{
+		[]struct {
+			Strings []interface{}
+			Ints    []int
+		}{cmpEqualsWant, cmpEqualsGot, cmpEqualsGot},
+	},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  []struct { Strings []interface {}; Ints []int }{
+      {
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42, 47},
+      },
+      {
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42, 47},
+      },
+      {
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42},
+      },
+  }
+want:
+  []struct { Strings []interface {}; Ints []int }{
+      {
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42},
+      },
+      {
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42, 47},
+      },
+      {
+          Strings: {
+              "who",
+              "dalek",
+          },
+          Ints: {42, 47},
+      },
+  }
+`,
+}, {
+	about:   "ContentEquals: same contents on a nested slice",
+	checker: ContentEquals,
+	got: struct {
+		Nums []int
+	}{
+		Nums: []int{1, 2, 3, 4},
+	},
+	args: []interface{}{
+		struct {
+			Nums []int
+		}{
+			Nums: []int{4, 3, 2, 1},
+		},
+	},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  struct { Nums []int }{
+      Nums: {1, 2, 3, 4},
+  }
+want:
+  struct { Nums []int }{
+      Nums: {4, 3, 2, 1},
+  }
+`,
+}, {
+	about:   "ContentEquals: slices of different type",
+	checker: ContentEquals,
+	got:     []string{"bad", "wolf"},
+	args: []interface{}{
+		[]interface{}{"bad", "wolf"},
+	},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  []string{"bad", "wolf"}
+want:
+  []interface {}{
+      "bad",
+      "wolf",
+  }
+`, diff([]string{"bad", "wolf"}, []interface{}{"bad", "wolf"})),
+}, {
+	about:   "ContentEquals: not enough arguments",
+	checker: ContentEquals,
+	expectedCheckFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  want
+`,
+	expectedNegateFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  want
+`,
+}, {
+	about:   "ContentEquals: too many arguments",
+	checker: ContentEquals,
+	args:    []interface{}{nil, nil},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      nil,
+      nil,
+  }
+want args:
+  want
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      nil,
+      nil,
+  }
+want args:
+  want
+`,
+}, {
+	about:   "Matches: perfect match",
+	checker: Matches,
+	got:     "exterminate",
+	args:    []interface{}{"exterminate"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got value:
+  "exterminate"
+regexp:
+  <same as "got value">
+`,
+}, {
+	about:   "Matches: match",
+	checker: Matches,
+	got:     "these are the voyages",
+	args:    []interface{}{"these are the .*"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got value:
+  "these are the voyages"
+regexp:
+  "these are the .*"
+`,
+}, {
+	about:   "Matches: match with pre-compiled regexp",
+	checker: Matches,
+	got:     bytes.NewBufferString("resistance is futile"),
+	args:    []interface{}{regexp.MustCompile("resistance is (futile|useful)")},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got value:
+  s"resistance is futile"
+regexp:
+  s"resistance is (futile|useful)"
+`,
+}, {
+	about:   "Matches: mismatch with pre-compiled regexp",
+	checker: Matches,
+	got:     bytes.NewBufferString("resistance is cool"),
+	args:    []interface{}{regexp.MustCompile("resistance is (futile|useful)")},
+	expectedCheckFailure: `
+error:
+  value.String() does not match regexp
+got value:
+  s"resistance is cool"
+regexp:
+  s"resistance is (futile|useful)"
+`,
+}, {
+	about:   "Matches: match with pre-compiled multi-line regexp",
+	checker: Matches,
+	got:     bytes.NewBufferString("line 1\nline 2"),
+	args:    []interface{}{regexp.MustCompile(`line \d\nline \d`)},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got value:
+  s"line 1\nline 2"
+regexp:
+  s"line \\d\\nline \\d"
+`,
+}, {
+	about:   "Matches: match with stringer",
+	checker: Matches,
+	got:     bytes.NewBufferString("resistance is futile"),
+	args:    []interface{}{"resistance is (futile|useful)"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got value:
+  s"resistance is futile"
+regexp:
+  "resistance is (futile|useful)"
+`,
+}, {
+	about:   "Matches: mismatch",
+	checker: Matches,
+	got:     "voyages",
+	args:    []interface{}{"these are the voyages"},
+	expectedCheckFailure: `
+error:
+  value does not match regexp
+got value:
+  "voyages"
+regexp:
+  "these are the voyages"
+`,
+}, {
+	about:   "Matches: mismatch with stringer",
+	checker: Matches,
+	got:     bytes.NewBufferString("voyages"),
+	args:    []interface{}{"these are the voyages"},
+	expectedCheckFailure: `
+error:
+  value.String() does not match regexp
+got value:
+  s"voyages"
+regexp:
+  "these are the voyages"
+`,
+}, {
+	about:   "Matches: empty pattern",
+	checker: Matches,
+	got:     "these are the voyages",
+	args:    []interface{}{""},
+	expectedCheckFailure: `
+error:
+  value does not match regexp
+got value:
+  "these are the voyages"
+regexp:
+  ""
+`,
+}, {
+	about:   "Matches: complex pattern",
+	checker: Matches,
+	got:     "end of the universe",
+	args:    []interface{}{"bad wolf|end of the .*"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got value:
+  "end of the universe"
+regexp:
+  "bad wolf|end of the .*"
+`,
+}, {
+	about:   "Matches: invalid pattern",
+	checker: Matches,
+	got:     "voyages",
+	args:    []interface{}{"("},
+	expectedCheckFailure: `
+error:
+  bad check: cannot compile regexp: error parsing regexp: missing closing ): ` + "`^(()$`" + `
+regexp:
+  "("
+`,
+	expectedNegateFailure: `
+error:
+  bad check: cannot compile regexp: error parsing regexp: missing closing ): ` + "`^(()$`" + `
+regexp:
+  "("
+`,
+}, {
+	about:   "Matches: pattern not a string",
+	checker: Matches,
+	got:     "",
+	args:    []interface{}{[]int{42}},
+	expectedCheckFailure: `
+error:
+  bad check: regexp is not a string
+regexp:
+  []int{42}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: regexp is not a string
+regexp:
+  []int{42}
+`,
+}, {
+	about:   "Matches: not a string or as stringer",
+	checker: Matches,
+	got:     42,
+	args:    []interface{}{".*"},
+	expectedCheckFailure: `
+error:
+  bad check: value is not a string or a fmt.Stringer
+value:
+  int(42)
+`,
+	expectedNegateFailure: `
+error:
+  bad check: value is not a string or a fmt.Stringer
+value:
+  int(42)
+`,
+}, {
+	about:   "Matches: not enough arguments",
+	checker: Matches,
+	expectedCheckFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  regexp
+`,
+	expectedNegateFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  regexp
+`,
+}, {
+	about:   "Matches: too many arguments",
+	checker: Matches,
+	got:     "these are the voyages",
+	args:    []interface{}{"these are the .*", nil},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      "these are the .*",
+      nil,
+  }
+want args:
+  regexp
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      "these are the .*",
+      nil,
+  }
+want args:
+  regexp
+`,
+}, {
+	about:   "ErrorMatches: perfect match",
+	checker: ErrorMatches,
+	got:     errBadWolf,
+	args:    []interface{}{"bad wolf"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got error:
+  bad wolf
+    file:line
+regexp:
+  "bad wolf"
+`,
+}, {
+	about:   "ErrorMatches: match",
+	checker: ErrorMatches,
+	got:     errBadWolf,
+	args:    []interface{}{"bad .*"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got error:
+  bad wolf
+    file:line
+regexp:
+  "bad .*"
+`,
+}, {
+	about:   "ErrorMatches: mismatch",
+	checker: ErrorMatches,
+	got:     errBadWolf,
+	args:    []interface{}{"exterminate"},
+	expectedCheckFailure: `
+error:
+  error does not match regexp
+got error:
+  bad wolf
+    file:line
+regexp:
+  "exterminate"
+`,
+}, {
+	about:   "ErrorMatches: empty pattern",
+	checker: ErrorMatches,
+	got:     errBadWolf,
+	args:    []interface{}{""},
+	expectedCheckFailure: `
+error:
+  error does not match regexp
+got error:
+  bad wolf
+    file:line
+regexp:
+  ""
+`,
+}, {
+	about:   "ErrorMatches: complex pattern",
+	checker: ErrorMatches,
+	got:     errBadWolf,
+	args:    []interface{}{"bad wolf|end of the universe"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got error:
+  bad wolf
+    file:line
+regexp:
+  "bad wolf|end of the universe"
+`,
+}, {
+	about:   "ErrorMatches: invalid pattern",
+	checker: ErrorMatches,
+	got:     errBadWolf,
+	args:    []interface{}{"("},
+	expectedCheckFailure: `
+error:
+  bad check: cannot compile regexp: error parsing regexp: missing closing ): ` + "`^(()$`" + `
+regexp:
+  "("
+`,
+	expectedNegateFailure: `
+error:
+  bad check: cannot compile regexp: error parsing regexp: missing closing ): ` + "`^(()$`" + `
+regexp:
+  "("
+`,
+}, {
+	about:   "ErrorMatches: pattern not a string",
+	checker: ErrorMatches,
+	got:     errBadWolf,
+	args:    []interface{}{[]int{42}},
+	expectedCheckFailure: `
+error:
+  bad check: regexp is not a string
+regexp:
+  []int{42}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: regexp is not a string
+regexp:
+  []int{42}
+`,
+}, {
+	about:   "ErrorMatches: not an error",
+	checker: ErrorMatches,
+	got:     42,
+	args:    []interface{}{".*"},
+	expectedCheckFailure: `
+error:
+  bad check: first argument is not an error
+got:
+  int(42)
+`,
+	expectedNegateFailure: `
+error:
+  bad check: first argument is not an error
+got:
+  int(42)
+`,
+}, {
+	about:   "ErrorMatches: nil error",
+	checker: ErrorMatches,
+	got:     nil,
+	args:    []interface{}{"some pattern"},
+	expectedCheckFailure: `
+error:
+  got nil error but want non-nil
+got error:
+  nil
+regexp:
+  "some pattern"
+`,
+}, {
+	about:   "ErrorMatches: not enough arguments",
+	checker: ErrorMatches,
+	expectedCheckFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  regexp
+`,
+	expectedNegateFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  regexp
+`,
+}, {
+	about:   "ErrorMatches: too many arguments",
+	checker: ErrorMatches,
+	got:     errBadWolf,
+	args:    []interface{}{"bad wolf", []string{"bad", "wolf"}},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      "bad wolf",
+      []string{"bad", "wolf"},
+  }
+want args:
+  regexp
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      "bad wolf",
+      []string{"bad", "wolf"},
+  }
+want args:
+  regexp
+`,
+}, {
+	about:   "ErrorMatches: match with pre-compiled regexp",
+	checker: ErrorMatches,
+	got:     errBadWolf,
+	args:    []interface{}{regexp.MustCompile("bad (wolf|dog)")},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got error:
+  bad wolf
+    file:line
+regexp:
+  s"bad (wolf|dog)"
+`,
+}, {
+	about:   "ErrorMatches: match with pre-compiled multi-line regexp",
+	checker: ErrorMatches,
+	got:     errBadWolfMultiLine,
+	args:    []interface{}{regexp.MustCompile(`bad (wolf|dog)\nfaulty (logic|statement)`)},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got error:
+  bad wolf
+  faulty logic
+    file:line
+regexp:
+  s"bad (wolf|dog)\\nfaulty (logic|statement)"
+`,
+}, {
+	about:   "ErrorMatches: mismatch with pre-compiled regexp",
+	checker: ErrorMatches,
+	got:     errBadWolf,
+	args:    []interface{}{regexp.MustCompile("good (wolf|dog)")},
+	expectedCheckFailure: `
+error:
+  error does not match regexp
+got error:
+  bad wolf
+    file:line
+regexp:
+  s"good (wolf|dog)"
+`,
+}, {
+	about:   "PanicMatches: perfect match",
+	checker: PanicMatches,
+	got:     func() { panic("error: bad wolf") },
+	args:    []interface{}{"error: bad wolf"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+panic value:
+  "error: bad wolf"
+function:
+  func() {...}
+regexp:
+  <same as "panic value">
+`,
+}, {
+	about:   "PanicMatches: match",
+	checker: PanicMatches,
+	got:     func() { panic("error: bad wolf") },
+	args:    []interface{}{"error: .*"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+panic value:
+  "error: bad wolf"
+function:
+  func() {...}
+regexp:
+  "error: .*"
+`,
+}, {
+	about:   "PanicMatches: mismatch",
+	checker: PanicMatches,
+	got:     func() { panic("error: bad wolf") },
+	args:    []interface{}{"error: exterminate"},
+	expectedCheckFailure: `
+error:
+  panic value does not match regexp
+panic value:
+  "error: bad wolf"
+function:
+  func() {...}
+regexp:
+  "error: exterminate"
+`,
+}, {
+	about:   "PanicMatches: empty pattern",
+	checker: PanicMatches,
+	got:     func() { panic("error: bad wolf") },
+	args:    []interface{}{""},
+	expectedCheckFailure: `
+error:
+  panic value does not match regexp
+panic value:
+  "error: bad wolf"
+function:
+  func() {...}
+regexp:
+  ""
+`,
+}, {
+	about:   "PanicMatches: complex pattern",
+	checker: PanicMatches,
+	got:     func() { panic("bad wolf") },
+	args:    []interface{}{"bad wolf|end of the universe"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+panic value:
+  "bad wolf"
+function:
+  func() {...}
+regexp:
+  "bad wolf|end of the universe"
+`,
+}, {
+	about:   "PanicMatches: invalid pattern",
+	checker: PanicMatches,
+	got:     func() { panic("error: bad wolf") },
+	args:    []interface{}{"("},
+	expectedCheckFailure: `
+error:
+  bad check: cannot compile regexp: error parsing regexp: missing closing ): ` + "`^(()$`" + `
+panic value:
+  "error: bad wolf"
+regexp:
+  "("
+`,
+	expectedNegateFailure: `
+error:
+  bad check: cannot compile regexp: error parsing regexp: missing closing ): ` + "`^(()$`" + `
+panic value:
+  "error: bad wolf"
+regexp:
+  "("
+`,
+}, {
+	about:   "PanicMatches: pattern not a string",
+	checker: PanicMatches,
+	got:     func() { panic("error: bad wolf") },
+	args:    []interface{}{nil},
+	expectedCheckFailure: `
+error:
+  bad check: regexp is not a string
+panic value:
+  "error: bad wolf"
+regexp:
+  nil
+`,
+	expectedNegateFailure: `
+error:
+  bad check: regexp is not a string
+panic value:
+  "error: bad wolf"
+regexp:
+  nil
+`,
+}, {
+	about:   "PanicMatches: match with pre-compiled regexp",
+	checker: PanicMatches,
+	got:     func() { panic("error: bad wolf") },
+	args:    []interface{}{regexp.MustCompile("error: bad (wolf|dog)")},
+	expectedNegateFailure: `
+error:
+  unexpected success
+panic value:
+  "error: bad wolf"
+function:
+  func() {...}
+regexp:
+  s"error: bad (wolf|dog)"
+`,
+}, {
+	about:   "PanicMatches: match with pre-compiled multi-line regexp",
+	checker: PanicMatches,
+	got:     func() { panic("error: bad wolf\nfaulty logic") },
+	args:    []interface{}{regexp.MustCompile(`error: bad (wolf|dog)\nfaulty (logic|statement)`)},
+	expectedNegateFailure: `
+error:
+  unexpected success
+panic value:
+  "error: bad wolf\nfaulty logic"
+function:
+  func() {...}
+regexp:
+  s"error: bad (wolf|dog)\\nfaulty (logic|statement)"
+`,
+}, {
+	about:   "PanicMatches: mismatch with pre-compiled regexp",
+	checker: PanicMatches,
+	got:     func() { panic("error: bad wolf") },
+	args:    []interface{}{regexp.MustCompile("good (wolf|dog)")},
+	expectedCheckFailure: `
+error:
+  panic value does not match regexp
+panic value:
+  "error: bad wolf"
+function:
+  func() {...}
+regexp:
+  s"good (wolf|dog)"
+`,
+}, {
+	about:   "PanicMatches: not a function",
+	checker: PanicMatches,
+	got:     map[string]int{"answer": 42},
+	args:    []interface{}{".*"},
+	expectedCheckFailure: `
+error:
+  bad check: first argument is not a function
+got:
+  map[string]int{"answer":42}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: first argument is not a function
+got:
+  map[string]int{"answer":42}
+`,
+}, {
+	about:   "PanicMatches: not a proper function",
+	checker: PanicMatches,
+	got:     func(int) { panic("error: bad wolf") },
+	args:    []interface{}{".*"},
+	expectedCheckFailure: `
+error:
+  bad check: cannot use a function receiving arguments
+function:
+  func(int) {...}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: cannot use a function receiving arguments
+function:
+  func(int) {...}
+`,
+}, {
+	about:   "PanicMatches: function returning something",
+	checker: PanicMatches,
+	got:     func() error { panic("error: bad wolf") },
+	args:    []interface{}{".*"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+panic value:
+  "error: bad wolf"
+function:
+  func() error {...}
+regexp:
+  ".*"
+`,
+}, {
+	about:   "PanicMatches: no panic",
+	checker: PanicMatches,
+	got:     func() {},
+	args:    []interface{}{".*"},
+	expectedCheckFailure: `
+error:
+  function did not panic
+function:
+  func() {...}
+regexp:
+  ".*"
+`,
+}, {
+	about:   "PanicMatches: not enough arguments",
+	checker: PanicMatches,
+	expectedCheckFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  regexp
+`,
+	expectedNegateFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  regexp
+`,
+}, {
+	about:   "PanicMatches: too many arguments",
+	checker: PanicMatches,
+	got:     func() { panic("error: bad wolf") },
+	args:    []interface{}{"error: bad wolf", 42},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      "error: bad wolf",
+      int(42),
+  }
+want args:
+  regexp
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      "error: bad wolf",
+      int(42),
+  }
+want args:
+  regexp
+`,
+}, {
+	about:   "IsNil: nil",
+	checker: IsNil,
+	got:     nil,
+	expectedNegateFailure: `
+error:
+  got nil value but want non-nil
+got:
+  nil
+`,
+}, {
+	about:   "IsNil: nil struct",
+	checker: IsNil,
+	got:     (*struct{})(nil),
+	expectedNegateFailure: `
+error:
+  got nil value but want non-nil
+got:
+  (*struct {})(nil)
+`,
+}, {
+	about:   "IsNil: nil func",
+	checker: IsNil,
+	got:     (func())(nil),
+	expectedNegateFailure: `
+error:
+  got nil value but want non-nil
+got:
+  func() {...}
+`,
+}, {
+	about:   "IsNil: nil map",
+	checker: IsNil,
+	got:     (map[string]string)(nil),
+	expectedNegateFailure: `
+error:
+  got nil value but want non-nil
+got:
+  map[string]string{}
+`,
+}, {
+	about:   "IsNil: nil slice",
+	checker: IsNil,
+	got:     ([]int)(nil),
+	expectedNegateFailure: `
+error:
+  got nil value but want non-nil
+got:
+  []int(nil)
+`,
+}, {
+	about:   "IsNil: nil error-implementing type",
+	checker: IsNil,
+	got:     (*errTest)(nil),
+	expectedCheckFailure: `
+error:
+  error containing nil value of type *quicktest_test.errTest. See https://golang.org/doc/faq#nil_error
+got:
+  e<nil>
+`,
+}, {
+	about:   "IsNil: not nil",
+	checker: IsNil,
+	got:     42,
+	expectedCheckFailure: `
+error:
+  got non-nil value
+got:
+  int(42)
+`,
+}, {
+	about:   "IsNil: error is not nil",
+	checker: IsNil,
+	got:     errBadWolf,
+	expectedCheckFailure: `
+error:
+  got non-nil error
+got:
+  bad wolf
+    file:line
+`,
+}, {
+	about:   "IsNil: too many arguments",
+	checker: IsNil,
+	args:    []interface{}{"not nil"},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 1, want 0
+got args:
+  []interface {}{
+      "not nil",
+  }
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 1, want 0
+got args:
+  []interface {}{
+      "not nil",
+  }
+`,
+}, {
+	about:   "IsNotNil: success",
+	checker: IsNotNil,
+	got:     42,
+	expectedNegateFailure: `
+error:
+  got non-nil value
+got:
+  int(42)
+`,
+}, {
+	about:   "IsNotNil: failure",
+	checker: IsNotNil,
+	got:     nil,
+	expectedCheckFailure: `
+error:
+  got nil value but want non-nil
+got:
+  nil
+`,
+}, {
+	about:   "HasLen: arrays with the same length",
+	checker: HasLen,
+	got:     [4]string{"these", "are", "the", "voyages"},
+	args:    []interface{}{4},
+	expectedNegateFailure: `
+error:
+  unexpected success
+len(got):
+  int(4)
+got:
+  [4]string{"these", "are", "the", "voyages"}
+want length:
+  <same as "len(got)">
+`,
+}, {
+	about:   "HasLen: channels with the same length",
+	checker: HasLen,
+	got:     chInt,
+	args:    []interface{}{2},
+	expectedNegateFailure: fmt.Sprintf(`
+error:
+  unexpected success
+len(got):
+  int(2)
+got:
+  (chan int)(%v)
+want length:
+  <same as "len(got)">
+`, chInt),
+}, {
+	about:   "HasLen: maps with the same length",
+	checker: HasLen,
+	got:     map[string]bool{"true": true},
+	args:    []interface{}{1},
+	expectedNegateFailure: `
+error:
+  unexpected success
+len(got):
+  int(1)
+got:
+  map[string]bool{"true":true}
+want length:
+  <same as "len(got)">
+`,
+}, {
+	about:   "HasLen: slices with the same length",
+	checker: HasLen,
+	got:     []int{},
+	args:    []interface{}{0},
+	expectedNegateFailure: `
+error:
+  unexpected success
+len(got):
+  int(0)
+got:
+  []int{}
+want length:
+  <same as "len(got)">
+`,
+}, {
+	about:   "HasLen: strings with the same length",
+	checker: HasLen,
+	got:     "these are the voyages",
+	args:    []interface{}{21},
+	expectedNegateFailure: `
+error:
+  unexpected success
+len(got):
+  int(21)
+got:
+  "these are the voyages"
+want length:
+  <same as "len(got)">
+`,
+}, {
+	about:   "HasLen: arrays with different lengths",
+	checker: HasLen,
+	got:     [4]string{"these", "are", "the", "voyages"},
+	args:    []interface{}{0},
+	expectedCheckFailure: `
+error:
+  unexpected length
+len(got):
+  int(4)
+got:
+  [4]string{"these", "are", "the", "voyages"}
+want length:
+  int(0)
+`,
+}, {
+	about:   "HasLen: channels with different lengths",
+	checker: HasLen,
+	got:     chInt,
+	args:    []interface{}{4},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  unexpected length
+len(got):
+  int(2)
+got:
+  (chan int)(%v)
+want length:
+  int(4)
+`, chInt),
+}, {
+	about:   "HasLen: maps with different lengths",
+	checker: HasLen,
+	got:     map[string]bool{"true": true},
+	args:    []interface{}{42},
+	expectedCheckFailure: `
+error:
+  unexpected length
+len(got):
+  int(1)
+got:
+  map[string]bool{"true":true}
+want length:
+  int(42)
+`,
+}, {
+	about:   "HasLen: slices with different lengths",
+	checker: HasLen,
+	got:     []int{42, 47},
+	args:    []interface{}{1},
+	expectedCheckFailure: `
+error:
+  unexpected length
+len(got):
+  int(2)
+got:
+  []int{42, 47}
+want length:
+  int(1)
+`,
+}, {
+	about:   "HasLen: strings with different lengths",
+	checker: HasLen,
+	got:     "these are the voyages",
+	args:    []interface{}{42},
+	expectedCheckFailure: `
+error:
+  unexpected length
+len(got):
+  int(21)
+got:
+  "these are the voyages"
+want length:
+  int(42)
+`,
+}, {
+	about:   "HasLen: value without a length",
+	checker: HasLen,
+	got:     42,
+	args:    []interface{}{42},
+	expectedCheckFailure: `
+error:
+  bad check: first argument has no length
+got:
+  int(42)
+`,
+	expectedNegateFailure: `
+error:
+  bad check: first argument has no length
+got:
+  int(42)
+`,
+}, {
+	about:   "HasLen: expected value not a number",
+	checker: HasLen,
+	got:     "these are the voyages",
+	args:    []interface{}{"bad wolf"},
+	expectedCheckFailure: `
+error:
+  bad check: length is not an int
+length:
+  "bad wolf"
+`,
+	expectedNegateFailure: `
+error:
+  bad check: length is not an int
+length:
+  "bad wolf"
+`,
+}, {
+	about:   "HasLen: not enough arguments",
+	checker: HasLen,
+	expectedCheckFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  want length
+`,
+	expectedNegateFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  want length
+`,
+}, {
+	about:   "HasLen: too many arguments",
+	checker: HasLen,
+	got:     []int{42},
+	args:    []interface{}{42, 47},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      int(42),
+      int(47),
+  }
+want args:
+  want length
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      int(42),
+      int(47),
+  }
+want args:
+  want length
+`,
+}, {
+	about:   "Implements: implements interface",
+	checker: Implements,
+	got:     errBadWolf,
+	args:    []interface{}{(*error)(nil)},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  bad wolf
+    file:line
+want interface pointer:
+  (*error)(nil)
+`,
+}, {
+	about:   "Implements: does not implement interface",
+	checker: Implements,
+	got:     errBadWolf,
+	args:    []interface{}{(*Fooer)(nil)},
+	expectedCheckFailure: `
+error:
+  got value does not implement wanted interface
+got:
+  bad wolf
+    file:line
+want interface:
+  quicktest_test.Fooer
+`,
+}, {
+	about:   "Implements: fails if got nil",
+	checker: Implements,
+	got:     nil,
+	args:    []interface{}{(*Fooer)(nil)},
+	expectedCheckFailure: `
+error:
+  got nil value but want non-nil
+got:
+  nil
+`,
+}, {
+	about:   "Implements: bad check if wanted is nil",
+	checker: Implements,
+	got:     errBadWolf,
+	args:    []interface{}{nil},
+	expectedCheckFailure: `
+error:
+  bad check: want a pointer to an interface variable but nil was provided
+`,
+	expectedNegateFailure: `
+error:
+  bad check: want a pointer to an interface variable but nil was provided
+`,
+}, {
+	about:   "Implements: bad check if wanted is not pointer",
+	checker: Implements,
+	got:     errBadWolf,
+	args:    []interface{}{struct{}{}},
+	expectedCheckFailure: `
+error:
+  bad check: want a pointer to an interface variable but a non-pointer value was provided
+want:
+  struct {}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: want a pointer to an interface variable but a non-pointer value was provided
+want:
+  struct {}
+`,
+}, {
+	about:   "Implements: bad check if wanted is not pointer to interface",
+	checker: Implements,
+	got:     errBadWolf,
+	args:    []interface{}{(*struct{})(nil)},
+	expectedCheckFailure: `
+error:
+  bad check: want a pointer to an interface variable but a pointer to a concrete type was provided
+want pointer type:
+  struct {}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: want a pointer to an interface variable but a pointer to a concrete type was provided
+want pointer type:
+  struct {}
+`,
+}, {
+	about:   "Implements: bad check if wanted is a pointer to the empty interface",
+	checker: Implements,
+	got:     42,
+	args:    []interface{}{(*interface{})(nil)},
+	expectedCheckFailure: `
+error:
+  bad check: all types implement the empty interface, want a pointer to a variable that isn't the empty interface
+want pointer type:
+  interface {}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: all types implement the empty interface, want a pointer to a variable that isn't the empty interface
+want pointer type:
+  interface {}
+`,
+}, {
+	about:   "Satisfies: success with an error",
+	checker: Satisfies,
+	got:     BadCheckf("bad wolf"),
+	args:    []interface{}{IsBadCheck},
+	expectedNegateFailure: `
+error:
+  unexpected success
+arg:
+  e"bad check: bad wolf"
+predicate function:
+  func(error) bool {...}
+`,
+}, {
+	about:   "Satisfies: success with an int",
+	checker: Satisfies,
+	got:     42,
+	args: []interface{}{
+		func(v int) bool { return v == 42 },
+	},
+	expectedNegateFailure: `
+error:
+  unexpected success
+arg:
+  int(42)
+predicate function:
+  func(int) bool {...}
+`,
+}, {
+	about:   "Satisfies: success with nil",
+	checker: Satisfies,
+	got:     nil,
+	args: []interface{}{
+		func(v []int) bool { return true },
+	},
+	expectedNegateFailure: `
+error:
+  unexpected success
+arg:
+  nil
+predicate function:
+  func([]int) bool {...}
+`,
+}, {
+	about:   "Satisfies: failure with an error",
+	checker: Satisfies,
+	got:     nil,
+	args:    []interface{}{IsBadCheck},
+	expectedCheckFailure: `
+error:
+  value does not satisfy predicate function
+arg:
+  nil
+predicate function:
+  func(error) bool {...}
+`,
+}, {
+	about:   "Satisfies: failure with a string",
+	checker: Satisfies,
+	got:     "bad wolf",
+	args: []interface{}{
+		func(string) bool { return false },
+	},
+	expectedCheckFailure: `
+error:
+  value does not satisfy predicate function
+arg:
+  "bad wolf"
+predicate function:
+  func(string) bool {...}
+`,
+}, {
+	about:   "Satisfies: not a function",
+	checker: Satisfies,
+	got:     42,
+	args:    []interface{}{42},
+	expectedCheckFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  int(42)
+`,
+	expectedNegateFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  int(42)
+`,
+}, {
+	about:   "Satisfies: function accepting no arguments",
+	checker: Satisfies,
+	got:     42,
+	args: []interface{}{
+		func() bool { return true },
+	},
+	expectedCheckFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  func() bool {...}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  func() bool {...}
+`,
+}, {
+	about:   "Satisfies: function accepting too many arguments",
+	checker: Satisfies,
+	got:     42,
+	args: []interface{}{
+		func(int, string) bool { return false },
+	},
+	expectedCheckFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  func(int, string) bool {...}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  func(int, string) bool {...}
+`,
+}, {
+	about:   "Satisfies: function returning no arguments",
+	checker: Satisfies,
+	got:     42,
+	args: []interface{}{
+		func(error) {},
+	},
+	expectedCheckFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  func(error) {...}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  func(error) {...}
+`,
+}, {
+	about:   "Satisfies: function returning too many argments",
+	checker: Satisfies,
+	got:     42,
+	args: []interface{}{
+		func(int) (bool, error) { return true, nil },
+	},
+	expectedCheckFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  func(int) (bool, error) {...}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  func(int) (bool, error) {...}
+`,
+}, {
+	about:   "Satisfies: function not returning a bool",
+	checker: Satisfies,
+	got:     42,
+	args: []interface{}{
+		func(int) error { return nil },
+	},
+	expectedCheckFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  func(int) error {...}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: predicate function is not a func(T) bool
+predicate function:
+  func(int) error {...}
+`,
+}, {
+	about:   "Satisfies: type mismatch",
+	checker: Satisfies,
+	got:     42,
+	args:    []interface{}{IsBadCheck},
+	expectedCheckFailure: `
+error:
+  bad check: cannot use value of type int as type error in argument to predicate function
+arg:
+  int(42)
+predicate function:
+  func(error) bool {...}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: cannot use value of type int as type error in argument to predicate function
+arg:
+  int(42)
+predicate function:
+  func(error) bool {...}
+`,
+}, {
+	about:   "Satisfies: nil value that cannot be nil",
+	checker: Satisfies,
+	got:     nil,
+	args: []interface{}{
+		func(string) bool { return true },
+	},
+	expectedCheckFailure: `
+error:
+  bad check: cannot use nil as type string in argument to predicate function
+predicate function:
+  func(string) bool {...}
+`,
+	expectedNegateFailure: `
+error:
+  bad check: cannot use nil as type string in argument to predicate function
+predicate function:
+  func(string) bool {...}
+`,
+}, {
+	about:   "Satisfies: not enough arguments",
+	checker: Satisfies,
+	expectedCheckFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  predicate function
+`,
+	expectedNegateFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  predicate function
+`,
+}, {
+	about:   "Satisfies: too many arguments",
+	checker: Satisfies,
+	got:     42,
+	args:    []interface{}{func() bool { return true }, 1, 2},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 3, want 1
+got args:
+  []interface {}{
+      func() bool {...},
+      int(1),
+      int(2),
+  }
+want args:
+  predicate function
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 3, want 1
+got args:
+  []interface {}{
+      func() bool {...},
+      int(1),
+      int(2),
+  }
+want args:
+  predicate function
+`,
+}, {
+	about:   "IsTrue: success",
+	checker: IsTrue,
+	got:     true,
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  bool(true)
+`,
+}, {
+	about:   "IsTrue: failure",
+	checker: IsTrue,
+	got:     false,
+	expectedCheckFailure: `
+error:
+  value is not true
+got:
+  bool(false)
+`,
+}, {
+	about:   "IsTrue: success with subtype",
+	checker: IsTrue,
+	got:     boolean(true),
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  quicktest_test.boolean(true)
+`,
+}, {
+	about:   "IsTrue: failure with subtype",
+	checker: IsTrue,
+	got:     boolean(false),
+	expectedCheckFailure: `
+error:
+  value is not true
+got:
+  quicktest_test.boolean(false)
+`,
+}, {
+	about:   "IsTrue: nil value",
+	checker: IsTrue,
+	got:     nil,
+	expectedCheckFailure: `
+error:
+  bad check: value does not have a bool underlying type
+value:
+  nil
+`,
+	expectedNegateFailure: `
+error:
+  bad check: value does not have a bool underlying type
+value:
+  nil
+`,
+}, {
+	about:   "IsTrue: non-bool value",
+	checker: IsTrue,
+	got:     42,
+	expectedCheckFailure: `
+error:
+  bad check: value does not have a bool underlying type
+value:
+  int(42)
+`,
+	expectedNegateFailure: `
+error:
+  bad check: value does not have a bool underlying type
+value:
+  int(42)
+`,
+}, {
+	about:   "IsFalse: success",
+	checker: IsFalse,
+	got:     false,
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  bool(false)
+`,
+}, {
+	about:   "IsFalse: failure",
+	checker: IsFalse,
+	got:     true,
+	expectedCheckFailure: `
+error:
+  value is not false
+got:
+  bool(true)
+`,
+}, {
+	about:   "IsFalse: success with subtype",
+	checker: IsFalse,
+	got:     boolean(false),
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  quicktest_test.boolean(false)
+`,
+}, {
+	about:   "IsFalse: failure with subtype",
+	checker: IsFalse,
+	got:     boolean(true),
+	expectedCheckFailure: `
+error:
+  value is not false
+got:
+  quicktest_test.boolean(true)
+`,
+}, {
+	about:   "IsFalse: nil value",
+	checker: IsFalse,
+	got:     nil,
+	expectedCheckFailure: `
+error:
+  bad check: value does not have a bool underlying type
+value:
+  nil
+`,
+	expectedNegateFailure: `
+error:
+  bad check: value does not have a bool underlying type
+value:
+  nil
+`,
+}, {
+	about:   "IsFalse: non-bool value",
+	checker: IsFalse,
+	got:     "bad wolf",
+	expectedCheckFailure: `
+error:
+  bad check: value does not have a bool underlying type
+value:
+  "bad wolf"
+`,
+	expectedNegateFailure: `
+error:
+  bad check: value does not have a bool underlying type
+value:
+  "bad wolf"
+`,
+}, {
+	about:   "Not: success",
+	checker: Not(IsNil),
+	got:     42,
+	expectedNegateFailure: `
+error:
+  got non-nil value
+got:
+  int(42)
+`,
+}, {
+	about:   "Not: failure",
+	checker: Not(Equals),
+	got:     42,
+	args:    []interface{}{42},
+	expectedCheckFailure: `
+error:
+  unexpected success
+got:
+  int(42)
+want:
+  <same as "got">
+`,
+}, {
+	about:   "Not: IsNil failure",
+	checker: Not(IsNil),
+	got:     nil,
+	expectedCheckFailure: `
+error:
+  got nil value but want non-nil
+got:
+  nil
+`,
+}, {
+	about:   "Not: not enough arguments",
+	checker: Not(PanicMatches),
+	expectedCheckFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  regexp
+`,
+	expectedNegateFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  regexp
+`,
+}, {
+	about:   "Not: too many arguments",
+	checker: Not(Equals),
+	args:    []interface{}{42, nil},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      int(42),
+      nil,
+  }
+want args:
+  want
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      int(42),
+      nil,
+  }
+want args:
+  want
+`,
+}, {
+	about:   "Contains with string",
+	checker: Contains,
+	got:     "hello, world",
+	args:    []interface{}{"world"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+container:
+  "hello, world"
+want:
+  "world"
+`,
+}, {
+	about:   "Contains with string no match",
+	checker: Contains,
+	got:     "hello, world",
+	args:    []interface{}{"worlds"},
+	expectedCheckFailure: `
+error:
+  no substring match found
+container:
+  "hello, world"
+want:
+  "worlds"
+`,
+}, {
+	about:   "Contains with slice",
+	checker: Contains,
+	got:     []string{"a", "b", "c"},
+	args:    []interface{}{"a"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+container:
+  []string{"a", "b", "c"}
+want:
+  "a"
+`,
+}, {
+	about:   "Contains with map",
+	checker: Contains,
+	// Note: we can't use more than one element here because
+	// pretty.Print output is non-deterministic.
+	// https://github.com/kr/pretty/issues/47
+	got:  map[string]string{"a": "d"},
+	args: []interface{}{"d"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+container:
+  map[string]string{"a":"d"}
+want:
+  "d"
+`,
+}, {
+	about:   "Contains with non-string",
+	checker: Contains,
+	got:     "aa",
+	args:    []interface{}{5},
+	expectedCheckFailure: `
+error:
+  bad check: strings can only contain strings, not int
+`,
+	expectedNegateFailure: `
+error:
+  bad check: strings can only contain strings, not int
+`,
+}, {
+	about:   "All slice equals",
+	checker: All(Equals),
+	got:     []string{"a", "a"},
+	args:    []interface{}{"a"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+container:
+  []string{"a", "a"}
+want:
+  "a"
+`,
+}, {
+	about:   "All slice match",
+	checker: All(Matches),
+	got:     []string{"red", "blue", "green"},
+	args:    []interface{}{".*e.*"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+container:
+  []string{"red", "blue", "green"}
+regexp:
+  ".*e.*"
+`,
+}, {
+	about:   "All nested match",
+	checker: All(All(Matches)),
+	got:     [][]string{{"hello", "goodbye"}, {"red", "blue"}, {}},
+	args:    []interface{}{".*e.*"},
+	expectedNegateFailure: `
+error:
+  unexpected success
+container:
+  [][]string{
+      {"hello", "goodbye"},
+      {"red", "blue"},
+      {},
+  }
+regexp:
+  ".*e.*"
+`,
+}, {
+	about:   "All nested mismatch",
+	checker: All(All(Matches)),
+	got:     [][]string{{"hello", "goodbye"}, {"black", "blue"}, {}},
+	args:    []interface{}{".*e.*"},
+	expectedCheckFailure: `
+error:
+  mismatch at index 1
+error:
+  mismatch at index 0
+error:
+  value does not match regexp
+first mismatched element:
+  "black"
+`,
+}, {
+	about:   "All slice mismatch",
+	checker: All(Matches),
+	got:     []string{"red", "black"},
+	args:    []interface{}{".*e.*"},
+	expectedCheckFailure: `
+error:
+  mismatch at index 1
+error:
+  value does not match regexp
+first mismatched element:
+  "black"
+`,
+}, {
+	about:   "All slice mismatch with DeepEqual",
+	checker: All(DeepEquals),
+	got:     [][]string{{"a", "b"}, {"a", "c"}},
+	args:    []interface{}{[]string{"a", "b"}},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  mismatch at index 1
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  []string{"a", "c"}
+want:
+  []string{"a", "b"}
+`, diff([]string{"a", "c"}, []string{"a", "b"})),
+}, {
+	about:   "All bad checker args count",
+	checker: All(IsNil),
+	got:     []int{},
+	args:    []interface{}{5},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 1, want 0
+got args:
+  []interface {}{
+      int(5),
+  }
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 1, want 0
+got args:
+  []interface {}{
+      int(5),
+  }
+`,
+}, {
+	about:   "All bad checker args",
+	checker: All(Matches),
+	got:     []string{"hello"},
+	args:    []interface{}{5},
+	expectedCheckFailure: `
+error:
+  bad check: at index 0: bad check: regexp is not a string
+`,
+	expectedNegateFailure: `
+error:
+  bad check: at index 0: bad check: regexp is not a string
+`,
+}, {
+	about:   "All with non-container",
+	checker: All(Equals),
+	got:     5,
+	args:    []interface{}{5},
+	expectedCheckFailure: `
+error:
+  bad check: map, slice or array required
+`,
+	expectedNegateFailure: `
+error:
+  bad check: map, slice or array required
+`,
+}, {
+	about:   "All mismatch with map",
+	checker: All(Matches),
+	got:     map[string]string{"a": "red", "b": "black"},
+	args:    []interface{}{".*e.*"},
+	expectedCheckFailure: `
+error:
+  mismatch at key "b"
+error:
+  value does not match regexp
+first mismatched element:
+  "black"
+`,
+}, {
+	about:   "Any with non-container",
+	checker: Any(Equals),
+	got:     5,
+	args:    []interface{}{5},
+	expectedCheckFailure: `
+error:
+  bad check: map, slice or array required
+`,
+	expectedNegateFailure: `
+error:
+  bad check: map, slice or array required
+`,
+}, {
+	about:   "Any no match",
+	checker: Any(Equals),
+	got:     []int{},
+	args:    []interface{}{5},
+	expectedCheckFailure: `
+error:
+  no matching element found
+container:
+  []int{}
+want:
+  int(5)
+`,
+}, {
+	about:   "Any bad checker arg count",
+	checker: Any(IsNil),
+	got:     []int{},
+	args:    []interface{}{5},
+	expectedCheckFailure: `
+error:
+  bad check: too many arguments provided to checker: got 1, want 0
+got args:
+  []interface {}{
+      int(5),
+  }
+`,
+	expectedNegateFailure: `
+error:
+  bad check: too many arguments provided to checker: got 1, want 0
+got args:
+  []interface {}{
+      int(5),
+  }
+`,
+}, {
+	about:   "Any bad checker args",
+	checker: Any(Matches),
+	got:     []string{"hello"},
+	args:    []interface{}{5},
+	expectedCheckFailure: `
+error:
+  bad check: at index 0: bad check: regexp is not a string
+`,
+	expectedNegateFailure: `
+error:
+  bad check: at index 0: bad check: regexp is not a string
+`,
+}, {
+	about:   "JSONEquals simple",
+	checker: JSONEquals,
+	got:     `{"First": 47.11}`,
+	args: []interface{}{
+		&OuterJSON{
+			First: 47.11,
+		},
+	},
+	expectedNegateFailure: tilde2bq(`
+error:
+  unexpected success
+got:
+  ~{"First": 47.11}~
+want:
+  &quicktest_test.OuterJSON{
+      First:  47.11,
+      Second: nil,
+  }
+`),
+}, {
+	about:   "JSONEquals nested",
+	checker: JSONEquals,
+	got:     `{"First": 47.11, "Last": [{"First": "Hello", "Second": 42}]}`,
+	args: []interface{}{
+		&OuterJSON{
+			First: 47.11,
+			Second: []*InnerJSON{
+				{First: "Hello", Second: 42},
+			},
+		},
+	},
+	expectedNegateFailure: tilde2bq(`
+error:
+  unexpected success
+got:
+  ~{"First": 47.11, "Last": [{"First": "Hello", "Second": 42}]}~
+want:
+  &quicktest_test.OuterJSON{
+      First:  47.11,
+      Second: {
+          &quicktest_test.InnerJSON{
+              First:  "Hello",
+              Second: 42,
+              Third:  {},
+          },
+      },
+  }
+`),
+}, {
+	about:   "JSONEquals nested with newline",
+	checker: JSONEquals,
+	got: `{"First": 47.11, "Last": [{"First": "Hello", "Second": 42},
+			{"First": "World", "Third": {"F": false}}]}`,
+	args: []interface{}{
+		&OuterJSON{
+			First: 47.11,
+			Second: []*InnerJSON{
+				{First: "Hello", Second: 42},
+				{First: "World", Third: map[string]bool{
+					"F": false,
+				}},
+			},
+		},
+	},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  "{\"First\": 47.11, \"Last\": [{\"First\": \"Hello\", \"Second\": 42},\n\t\t\t{\"First\": \"World\", \"Third\": {\"F\": false}}]}"
+want:
+  &quicktest_test.OuterJSON{
+      First:  47.11,
+      Second: {
+          &quicktest_test.InnerJSON{
+              First:  "Hello",
+              Second: 42,
+              Third:  {},
+          },
+          &quicktest_test.InnerJSON{
+              First:  "World",
+              Second: 0,
+              Third:  {"F":false},
+          },
+      },
+  }
+`,
+}, {
+	about:   "JSONEquals extra field",
+	checker: JSONEquals,
+	got:     `{"NotThere": 1}`,
+	args: []interface{}{
+		&OuterJSON{
+			First: 2,
+		},
+	},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  values are not deep equal
+diff (-got +want):
+%s
+got:
+  map[string]interface {}{
+      "NotThere": float64(1),
+  }
+want:
+  map[string]interface {}{
+      "First": float64(2),
+  }
+`, diff(map[string]interface{}{"NotThere": 1.0}, map[string]interface{}{"First": 2.0})),
+}, {
+	about:   "JSONEquals cannot unmarshal obtained value",
+	checker: JSONEquals,
+	got:     `{"NotThere": `,
+	args:    []interface{}{nil},
+	expectedCheckFailure: fmt.Sprintf(tilde2bq(`
+error:
+  cannot unmarshal obtained contents: %s; "{\"NotThere\": "
+got:
+  ~{"NotThere": ~
+want:
+  nil
+`), mustJSONUnmarshalErr(`{"NotThere": `)),
+}, {
+	about:   "JSONEquals cannot marshal expected value",
+	checker: JSONEquals,
+	got:     `null`,
+	args: []interface{}{
+		jsonErrorMarshaler{},
+	},
+	expectedCheckFailure: `
+error:
+  bad check: cannot marshal expected contents: json: error calling MarshalJSON for type quicktest_test.jsonErrorMarshaler: qt json marshal error
+`,
+	expectedNegateFailure: `
+error:
+  bad check: cannot marshal expected contents: json: error calling MarshalJSON for type quicktest_test.jsonErrorMarshaler: qt json marshal error
+`,
+}, {
+	about:   "JSONEquals with []byte",
+	checker: JSONEquals,
+	got:     []byte("null"),
+	args:    []interface{}{nil},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  []uint8("null")
+want:
+  nil
+`,
+}, {
+	about:   "JSONEquals with RawMessage",
+	checker: JSONEquals,
+	got:     []byte("null"),
+	args:    []interface{}{json.RawMessage("null")},
+	expectedNegateFailure: `
+error:
+  unexpected success
+got:
+  []uint8("null")
+want:
+  json.RawMessage("null")
+`,
+}, {
+	about:   "JSONEquals with bad type",
+	checker: JSONEquals,
+	got:     0,
+	args:    []interface{}{nil},
+	expectedCheckFailure: `
+error:
+  bad check: expected string or byte, got int
+`,
+	expectedNegateFailure: `
+error:
+  bad check: expected string or byte, got int
+`,
+}, {
+	about: "CodecEquals with bad marshal",
+	checker: CodecEquals(
+		func(x interface{}) ([]byte, error) { return []byte("bad json"), nil },
+		json.Unmarshal,
+	),
+	got:  "null",
+	args: []interface{}{nil},
+	expectedCheckFailure: fmt.Sprintf(`
+error:
+  bad check: cannot unmarshal expected contents: %s
+`, mustJSONUnmarshalErr("bad json")),
+	expectedNegateFailure: fmt.Sprintf(`
+error:
+  bad check: cannot unmarshal expected contents: %s
+`, mustJSONUnmarshalErr("bad json")),
+}, {
+	about: "CodecEquals with options",
+	checker: CodecEquals(
+		json.Marshal,
+		json.Unmarshal,
+		cmpopts.SortSlices(func(x, y interface{}) bool { return x.(string) < y.(string) }),
+	),
+	got:  `["b", "z", "c", "a"]`,
+	args: []interface{}{[]string{"a", "c", "z", "b"}},
+	expectedNegateFailure: tilde2bq(`
+error:
+  unexpected success
+got:
+  ~["b", "z", "c", "a"]~
+want:
+  []string{"a", "c", "z", "b"}
+`),
+}}
+
+func TestCheckers(t *testing.T) {
+	original := TestingVerbose
+	defer func() {
+		TestingVerbose = original
+	}()
+	for _, test := range checkerTests {
+		*TestingVerbose = func() bool {
+			return test.verbose
+		}
+
+		t.Run(test.about, func(t *testing.T) {
+			tt := &testingT{}
+			c := New(tt)
+			ok := c.Check(test.got, test.checker, test.args...)
+			checkResult(t, ok, tt.errorString(), test.expectedCheckFailure)
+		})
+		t.Run("Not "+test.about, func(t *testing.T) {
+			tt := &testingT{}
+			c := New(tt)
+			ok := c.Check(test.got, Not(test.checker), test.args...)
+			checkResult(t, ok, tt.errorString(), test.expectedNegateFailure)
+		})
+	}
+}
+
+func diff(x, y interface{}, opts ...cmp.Option) string {
+	d := cmp.Diff(x, y, opts...)
+	return strings.TrimSuffix(Prefixf("  ", "%s", d), "\n")
+}
+
+type jsonErrorMarshaler struct{}
+
+func (jsonErrorMarshaler) MarshalJSON() ([]byte, error) {
+	return nil, fmt.Errorf("qt json marshal error")
+}
+
+func mustJSONUnmarshalErr(s string) error {
+	var v interface{}
+	err := json.Unmarshal([]byte(s), &v)
+	if err == nil {
+		panic("want JSON error, got nil")
+	}
+	return err
+}
+
+func tilde2bq(s string) string {
+	return strings.Replace(s, "~", "`", -1)
+}

+ 89 - 0
data_tool/src/github.com/frankban/quicktest/cleanup_test.go

@@ -0,0 +1,89 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+//go:build go1.14
+// +build go1.14
+
+package quicktest_test
+
+import (
+	"testing"
+)
+
+// This file defines tests that are only valid since the Cleanup
+// method was added in Go 1.14.
+
+func TestCCleanup(t *testing.T) {
+	c := New(t)
+	cleanups := 0
+	c.Run("defer", func(c *C) {
+		c.Cleanup(func() {
+			cleanups++
+		})
+	})
+	c.Assert(cleanups, Equals, 1)
+}
+
+func TestCDeferWithoutDone(t *testing.T) {
+	c := New(t)
+	tc := &testingTWithCleanup{
+		TB:      t,
+		cleanup: func() {},
+	}
+	c1 := New(tc)
+	c1.Defer(func() {})
+	c1.Defer(func() {})
+	c.Assert(tc.cleanup, PanicMatches, `Done not called after Defer`)
+}
+
+func TestCDeferFromDefer(t *testing.T) {
+	c := New(t)
+	tc := &testingTWithCleanup{
+		TB:      t,
+		cleanup: func() {},
+	}
+	c1 := New(tc)
+	c1.Defer(func() {
+		c1.Log("defer 1")
+		// This defer is triggered from the first Done().
+		// It should have its own Done() call too.
+		c1.Defer(func() {
+			c1.Log("defer 2")
+		})
+	})
+	c1.Done()
+	// Check that we report the missing second Done().
+	c.Assert(tc.cleanup, PanicMatches, `Done not called after Defer`)
+}
+
+func TestCDeferVsCleanupOrder(t *testing.T) {
+	c := New(t)
+	var defers []int
+	c.Run("subtest", func(c *C) {
+		c.Defer(func() {
+			defers = append(defers, 0)
+		})
+		c.Cleanup(func() {
+			defers = append(defers, 1)
+		})
+		c.Defer(func() {
+			defers = append(defers, 2)
+		})
+		c.Cleanup(func() {
+			defers = append(defers, 3)
+		})
+	})
+	c.Assert(defers, DeepEquals, []int{3, 2, 1, 0})
+}
+
+type testingTWithCleanup struct {
+	testing.TB
+	cleanup func()
+}
+
+func (t *testingTWithCleanup) Cleanup(f func()) {
+	oldCleanup := t.cleanup
+	t.cleanup = func() {
+		defer oldCleanup()
+		f()
+	}
+}

+ 31 - 0
data_tool/src/github.com/frankban/quicktest/comment.go

@@ -0,0 +1,31 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+import "fmt"
+
+// Commentf returns a test comment whose output is formatted according to
+// the given format specifier and args. It may be provided as the last argument
+// to any check or assertion and will be displayed if the check or assertion
+// fails. For instance:
+//
+//	c.Assert(a, qt.Equals, 42, qt.Commentf("answer is not %d", 42))
+func Commentf(format string, args ...interface{}) Comment {
+	return Comment{
+		format: format,
+		args:   args,
+	}
+}
+
+// Comment represents additional information on a check or an assertion which is
+// displayed when the check or assertion fails.
+type Comment struct {
+	format string
+	args   []interface{}
+}
+
+// String outputs a string formatted according to the stored format specifier
+// and args.
+func (c Comment) String() string {
+	return fmt.Sprintf(c.format, c.args...)
+}

+ 25 - 0
data_tool/src/github.com/frankban/quicktest/comment_test.go

@@ -0,0 +1,25 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest_test
+
+import (
+	"testing"
+)
+
+func TestCommentf(t *testing.T) {
+	c := Commentf("the answer is %d", 42)
+	comment := c.String()
+	expectedComment := "the answer is 42"
+	if comment != expectedComment {
+		t.Fatalf("comment error:\ngot  %q\nwant %q", comment, expectedComment)
+	}
+}
+
+func TestConstantCommentf(t *testing.T) {
+	const expectedComment = "bad wolf"
+	c := Commentf(expectedComment)
+	comment := c.String()
+	if comment != expectedComment {
+		t.Fatalf("constant comment error:\ngot  %q\nwant %q", comment, expectedComment)
+	}
+}

+ 42 - 0
data_tool/src/github.com/frankban/quicktest/deferpanic_test.go

@@ -0,0 +1,42 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+//go:build !go1.14
+// +build !go1.14
+
+package quicktest_test
+
+import (
+	"testing"
+)
+
+func TestCDeferCalledEvenAfterDeferPanic(t *testing.T) {
+	// This test doesn't test anything useful under go 1.14 and
+	// later when Cleanup is built in.
+	c := New(t)
+	deferred1 := 0
+	deferred2 := 0
+	c.Defer(func() {
+		deferred1++
+	})
+	c.Defer(func() {
+		panic("scream and shout")
+	})
+	c.Defer(func() {
+		deferred2++
+	})
+	c.Defer(func() {
+		panic("run in circles")
+	})
+	func() {
+		defer func() {
+			c.Check(recover(), Equals, "scream and shout")
+		}()
+		c.Done()
+	}()
+	c.Assert(deferred1, Equals, 1)
+	c.Assert(deferred2, Equals, 1)
+	// Check that calling Done again doesn't panic.
+	c.Done()
+	c.Assert(deferred1, Equals, 1)
+	c.Assert(deferred2, Equals, 1)
+}

+ 340 - 0
data_tool/src/github.com/frankban/quicktest/doc.go

@@ -0,0 +1,340 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+/*
+Package quicktest provides a collection of Go helpers for writing tests.
+
+Quicktest helpers can be easily integrated inside regular Go tests, for
+instance:
+
+	import qt "github.com/frankban/quicktest"
+
+	func TestFoo(t *testing.T) {
+	    t.Run("numbers", func(t *testing.T) {
+	        c := qt.New(t)
+	        numbers, err := somepackage.Numbers()
+	        c.Assert(err, qt.IsNil)
+	        c.Assert(numbers, qt.DeepEquals, []int{42, 47})
+	    })
+	    t.Run("bad wolf error", func(t *testing.T) {
+	        c := qt.New(t)
+	        numbers, err := somepackage.Numbers()
+	        c.Assert(err, qt.ErrorMatches, "bad wolf")
+	    })
+	    t.Run("nil", func(t *testing.T) {
+	        c := qt.New(t)
+	        got := somepackage.MaybeNil()
+	        c.Assert(got, qt.IsNil, qt.Commentf("value: %v", somepackage.Value))
+	    })
+	}
+
+# Assertions
+
+An assertion looks like this, where qt.Equals could be replaced by any
+available checker. If the assertion fails, the underlying Fatal method is
+called to describe the error and abort the test.
+
+	c := qt.New(t)
+	c.Assert(someValue, qt.Equals, wantValue)
+
+If you don’t want to abort on failure, use Check instead, which calls Error
+instead of Fatal:
+
+	c.Check(someValue, qt.Equals, wantValue)
+
+For really short tests, the extra line for instantiating *qt.C can be avoided:
+
+	qt.Assert(t, someValue, qt.Equals, wantValue)
+	qt.Check(t, someValue, qt.Equals, wantValue)
+
+The library provides some base checkers like Equals, DeepEquals, Matches,
+ErrorMatches, IsNil and others. More can be added by implementing the Checker
+interface. Below, we list the checkers implemented by the package in alphabetical
+order.
+
+# All
+
+All returns a Checker that uses the given checker to check elements of slice or
+array or the values of a map. It succeeds if all elements pass the check.
+On failure it prints the error from the first index that failed.
+
+For example:
+
+	c.Assert([]int{3, 5, 8}, qt.All(qt.Not(qt.Equals)), 0)
+	c.Assert([][]string{{"a", "b"}, {"a", "b"}}, qt.All(qt.DeepEquals), []string{"c", "d"})
+
+See also Any and Contains.
+
+# Any
+
+Any returns a Checker that uses the given checker to check elements of a slice
+or array or the values from a map. It succeeds if any element passes the check.
+
+For example:
+
+	c.Assert([]int{3,5,7,99}, qt.Any(qt.Equals), 7)
+	c.Assert([][]string{{"a", "b"}, {"c", "d"}}, qt.Any(qt.DeepEquals), []string{"c", "d"})
+
+See also All and Contains.
+
+# CmpEquals
+
+CmpEquals checks equality of two arbitrary values according to the provided
+compare options. DeepEquals is more commonly used when no compare options are
+required.
+
+Example calls:
+
+	c.Assert(list, qt.CmpEquals(cmpopts.SortSlices), []int{42, 47})
+	c.Assert(got, qt.CmpEquals(), []int{42, 47}) // Same as qt.DeepEquals.
+
+# CodecEquals
+
+CodecEquals returns a checker that checks for codec value equivalence.
+
+	func CodecEquals(
+	    marshal func(interface{}) ([]byte, error),
+	    unmarshal func([]byte, interface{}) error,
+	    opts ...cmp.Option,
+	) Checker
+
+It expects two arguments: a byte slice or a string containing some
+codec-marshaled data, and a Go value.
+
+It uses unmarshal to unmarshal the data into an interface{} value.
+It marshals the Go value using marshal, then unmarshals the result into
+an interface{} value.
+
+It then checks that the two interface{} values are deep-equal to one another,
+using CmpEquals(opts) to perform the check.
+
+See JSONEquals for an example of this in use.
+
+# Contains
+
+Contains checks that a map, slice, array or string contains a value. It's the
+same as using Any(Equals), except that it has a special case for strings - if
+the first argument is a string, the second argument must also be a string and
+strings.Contains will be used.
+
+For example:
+
+	c.Assert("hello world", qt.Contains, "world")
+	c.Assert([]int{3,5,7,99}, qt.Contains, 7)
+
+# ContentEquals
+
+ContentEquals is is like DeepEquals but any slices in the compared values will be sorted before being compared.
+
+For example:
+
+	c.Assert([]string{"c", "a", "b"}, qt.ContentEquals, []string{"a", "b", "c"})
+
+# DeepEquals
+
+DeepEquals checks that two arbitrary values are deeply equal.
+The comparison is done using the github.com/google/go-cmp/cmp package.
+When comparing structs, by default no exported fields are allowed.
+If a more sophisticated comparison is required, use CmpEquals (see below).
+
+Example call:
+
+	c.Assert(got, qt.DeepEquals, []int{42, 47})
+
+# Equals
+
+Equals checks that two values are equal, as compared with Go's == operator.
+
+For instance:
+
+	c.Assert(answer, qt.Equals, 42)
+
+Note that the following will fail:
+
+	c.Assert((*sometype)(nil), qt.Equals, nil)
+
+Use the IsNil checker below for this kind of nil check.
+
+# ErrorAs
+
+ErrorAs checks that the error is or wraps a specific error type. If so, it
+assigns it to the provided pointer. This is analogous to calling errors.As.
+
+For instance:
+
+	// Checking for a specific error type
+	c.Assert(err, qt.ErrorAs, new(*os.PathError))
+
+	// Checking fields on a specific error type
+	var pathError *os.PathError
+	if c.Check(err, qt.ErrorAs, &pathError) {
+	    c.Assert(pathError.Path, Equals, "some_path")
+	}
+
+# ErrorIs
+
+ErrorIs checks that the error is or wraps a specific error value. This is
+analogous to calling errors.Is.
+
+For instance:
+
+	c.Assert(err, qt.ErrorIs, os.ErrNotExist)
+
+# ErrorMatches
+
+ErrorMatches checks that the provided value is an error whose message matches
+the provided regular expression.
+
+For instance:
+
+	c.Assert(err, qt.ErrorMatches, `bad wolf .*`)
+
+# HasLen
+
+HasLen checks that the provided value has the given length.
+
+For instance:
+
+	c.Assert([]int{42, 47}, qt.HasLen, 2)
+	c.Assert(myMap, qt.HasLen, 42)
+
+# Implements
+
+Implements checks that the provided value implements an interface. The
+interface is specified with a pointer to an interface variable.
+
+For instance:
+
+	var rc io.ReadCloser
+	c.Assert(myReader, qt.Implements, &rc)
+
+# IsFalse
+
+IsFalse checks that the provided value is false.
+The value must have a boolean underlying type.
+
+For instance:
+
+	c.Assert(false, qt.IsFalse)
+	c.Assert(IsValid(), qt.IsFalse)
+
+# IsNil
+
+IsNil checks that the provided value is nil.
+
+For instance:
+
+	c.Assert(got, qt.IsNil)
+
+As a special case, if the value is nil but implements the
+error interface, it is still considered to be non-nil.
+This means that IsNil will fail on an error value that happens
+to have an underlying nil value, because that's
+invariably a mistake. See https://golang.org/doc/faq#nil_error.
+
+So it's just fine to check an error like this:
+
+	c.Assert(err, qt.IsNil)
+
+# IsNotNil
+
+IsNotNil is a Checker checking that the provided value is not nil.
+IsNotNil is the equivalent of qt.Not(qt.IsNil)
+
+For instance:
+
+	c.Assert(got, qt.IsNotNil)
+
+# IsTrue
+
+IsTrue checks that the provided value is true.
+The value must have a boolean underlying type.
+
+For instance:
+
+	c.Assert(true, qt.IsTrue)
+	c.Assert(myBoolean(false), qt.IsTrue)
+
+# JSONEquals
+
+JSONEquals checks whether a byte slice or string is JSON-equivalent to a Go
+value. See CodecEquals for more information.
+
+It uses DeepEquals to do the comparison. If a more sophisticated comparison is
+required, use CodecEquals directly.
+
+For instance:
+
+	c.Assert(`{"First": 47.11}`, qt.JSONEquals, &MyStruct{First: 47.11})
+
+# Matches
+
+Matches checks that a string or result of calling the String method
+(if the value implements fmt.Stringer) matches the provided regular expression.
+
+For instance:
+
+	c.Assert("these are the voyages", qt.Matches, `these are .*`)
+	c.Assert(net.ParseIP("1.2.3.4"), qt.Matches, `1.*`)
+
+# Not
+
+Not returns a Checker negating the given Checker.
+
+For instance:
+
+	c.Assert(got, qt.Not(qt.IsNil))
+	c.Assert(answer, qt.Not(qt.Equals), 42)
+
+# PanicMatches
+
+PanicMatches checks that the provided function panics with a message matching
+the provided regular expression.
+
+For instance:
+
+	c.Assert(func() {panic("bad wolf ...")}, qt.PanicMatches, `bad wolf .*`)
+
+# Satisfies
+
+Satisfies checks that the provided value, when used as argument of the provided
+predicate function, causes the function to return true. The function must be of
+type func(T) bool, having got assignable to T.
+
+For instance:
+
+	// Check that an error from os.Open satisfies os.IsNotExist.
+	c.Assert(err, qt.Satisfies, os.IsNotExist)
+
+	// Check that a floating point number is a not-a-number.
+	c.Assert(f, qt.Satisfies, math.IsNaN)
+
+# Deferred Execution
+
+The testing.TB.Cleanup helper provides the ability to defer the execution of
+functions that will be run when the test completes. This is often useful for
+creating OS-level resources such as temporary directories (see c.Mkdir).
+
+When targeting Go versions that don't have Cleanup (< 1.14), the same can be
+achieved using c.Defer. In this case, to trigger the deferred behavior, calling
+c.Done is required. For instance, if you create a *C instance at the top level,
+you’ll have to add a defer to trigger the cleanups at the end of the test:
+
+	defer c.Done()
+
+However, if you use quicktest to create a subtest, Done will be called
+automatically at the end of that subtest. For example:
+
+	func TestFoo(t *testing.T) {
+	    c := qt.New(t)
+	    c.Run("subtest", func(c *qt.C) {
+	        c.Setenv("HOME", c.Mkdir())
+	        // Here $HOME is set the path to a newly created directory.
+	        // At the end of the test the directory will be removed
+	        // and HOME set back to its original value.
+	    })
+	}
+
+The c.Patch, c.Setenv, c.Unsetenv and c.Mkdir helpers use t.Cleanup for
+cleaning up resources when available, and fall back to Defer otherwise.
+*/
+package quicktest

+ 35 - 0
data_tool/src/github.com/frankban/quicktest/error.go

@@ -0,0 +1,35 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+import (
+	"fmt"
+)
+
+// BadCheckf returns an error used to report a problem with the checker
+// invocation or testing execution itself (like wrong number or type of
+// arguments) rather than a real Check or Assert failure.
+// This helper can be used when implementing checkers.
+func BadCheckf(format string, a ...interface{}) error {
+	e := badCheck(fmt.Sprintf(format, a...))
+	return &e
+}
+
+// IsBadCheck reports whether the given error has been created by BadCheckf.
+// This helper can be used when implementing checkers.
+func IsBadCheck(err error) bool {
+	_, ok := err.(*badCheck)
+	return ok
+}
+
+type badCheck string
+
+// Error implements the error interface.
+func (e *badCheck) Error() string {
+	return "bad check: " + string(*e)
+}
+
+// ErrSilent is the error used when there is no need to include in the failure
+// output the "error" and "check" keys and all the keys automatically
+// added for args. This helper can be used when implementing checkers.
+var ErrSilent = fmt.Errorf("silent failure")

+ 56 - 0
data_tool/src/github.com/frankban/quicktest/error_test.go

@@ -0,0 +1,56 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest_test
+
+import (
+	"errors"
+	"fmt"
+	"testing"
+)
+
+func TestBadCheckf(t *testing.T) {
+	err := BadCheckf("bad %s", "wolf")
+	expectedMessage := "bad check: bad wolf"
+	if err.Error() != expectedMessage {
+		t.Fatalf("error:\ngot  %q\nwant %q", err, expectedMessage)
+	}
+}
+
+func TestIsBadCheck(t *testing.T) {
+	err := BadCheckf("bad wolf")
+	assertBool(t, IsBadCheck(err), true)
+	err = errors.New("bad wolf")
+	assertBool(t, IsBadCheck(err), false)
+}
+
+var errBadWolf = &errTest{
+	msg:       "bad wolf",
+	formatted: true,
+}
+
+var errBadWolfMultiLine = &errTest{
+	msg:       "bad wolf\nfaulty logic",
+	formatted: true,
+}
+
+// errTest is an error type used in tests.
+type errTest struct {
+	msg       string
+	formatted bool
+}
+
+// Error implements error.
+func (err *errTest) Error() string {
+	return err.msg
+}
+
+// Format implements fmt.Formatter.
+func (err *errTest) Format(f fmt.State, c rune) {
+	if !f.Flag('+') || c != 'v' {
+		fmt.Fprint(f, "unexpected verb for formatting the error")
+	}
+	fmt.Fprint(f, err.Error())
+	if err.formatted {
+		fmt.Fprint(f, "\n  file:line")
+	}
+}

+ 8 - 0
data_tool/src/github.com/frankban/quicktest/export_test.go

@@ -0,0 +1,8 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+var (
+	Prefixf        = prefixf
+	TestingVerbose = &testingVerbose
+)

+ 91 - 0
data_tool/src/github.com/frankban/quicktest/format.go

@@ -0,0 +1,91 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/kr/pretty"
+)
+
+// Format formats the given value as a string. It is used to print values in
+// test failures unless that's changed by calling C.SetFormat.
+func Format(v interface{}) string {
+	switch v := v.(type) {
+	case error:
+		s, ok := checkStringCall(v, v.Error)
+		if !ok {
+			return "e<nil>"
+		}
+		if msg := fmt.Sprintf("%+v", v); msg != s {
+			// The error has formatted itself with additional information.
+			// Leave that as is.
+			return msg
+		}
+		return "e" + quoteString(s)
+	case fmt.Stringer:
+		s, ok := checkStringCall(v, v.String)
+		if !ok {
+			return "s<nil>"
+		}
+		return "s" + quoteString(s)
+	case string:
+		return quoteString(v)
+	case uintptr, uint, uint8, uint16, uint32, uint64:
+		// Use decimal base (rather than hexadecimal) for representing uint types.
+		return fmt.Sprintf("%T(%d)", v, v)
+	}
+	if bytes, ok := byteSlice(v); ok && bytes != nil && utf8.Valid(bytes) {
+		// It's a top level slice of bytes that's also valid UTF-8.
+		// Ideally, this would happen at deeper levels too,
+		// but this is sufficient for some significant cases
+		// (json.RawMessage for example).
+		return fmt.Sprintf("%T(%s)", v, quoteString(string(bytes)))
+	}
+	// The pretty.Sprint equivalent does not quote string values.
+	return fmt.Sprintf("%# v", pretty.Formatter(v))
+}
+
+func byteSlice(x interface{}) ([]byte, bool) {
+	v := reflect.ValueOf(x)
+	if !v.IsValid() {
+		return nil, false
+	}
+	t := v.Type()
+	if t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
+		return v.Bytes(), true
+	}
+	return nil, false
+}
+
+func quoteString(s string) string {
+	// TODO think more about what to do about multi-line strings.
+	if strings.Contains(s, `"`) && !strings.Contains(s, "\n") && strconv.CanBackquote(s) {
+		return "`" + s + "`"
+	}
+	return strconv.Quote(s)
+}
+
+// checkStringCall calls f and returns its result, and reports if the call
+// succeeded without panicking due to a nil pointer.
+// If f panics and v is a nil pointer, it returns false.
+func checkStringCall(v interface{}, f func() string) (s string, ok bool) {
+	defer func() {
+		err := recover()
+		if err == nil {
+			return
+		}
+		if val := reflect.ValueOf(v); val.Kind() == reflect.Ptr && val.IsNil() {
+			ok = false
+			return
+		}
+		panic(err)
+	}()
+	return f(), true
+}
+
+type formatFunc func(interface{}) string

+ 144 - 0
data_tool/src/github.com/frankban/quicktest/format_test.go

@@ -0,0 +1,144 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest_test
+
+import (
+	"bytes"
+	"testing"
+)
+
+var formatTests = []struct {
+	about string
+	value interface{}
+	want  string
+}{{
+	about: "error value",
+	value: errBadWolf,
+	want:  "bad wolf\n  file:line",
+}, {
+	about: "error value: not formatted",
+	value: &errTest{
+		msg: "exterminate!",
+	},
+	want: `e"exterminate!"`,
+}, {
+	about: "error value: with quotes",
+	value: &errTest{
+		msg: `cannot open "/no/such/file"`,
+	},
+	want: "e`cannot open \"/no/such/file\"`",
+}, {
+	about: "error value: multi-line",
+	value: &errTest{
+		msg: `err:
+"these are the voyages"`,
+	},
+	want: `e"err:\n\"these are the voyages\""`,
+}, {
+	about: "error value: with backquotes",
+	value: &errTest{
+		msg: "cannot `open` \"file\"",
+	},
+	want: `e"cannot ` + "`open`" + ` \"file\""`,
+}, {
+	about: "error value: not guarding against nil",
+	value: (*errTest)(nil),
+	want:  `e<nil>`,
+}, {
+	about: "stringer",
+	value: bytes.NewBufferString("I am a stringer"),
+	want:  `s"I am a stringer"`,
+}, {
+	about: "stringer: with quotes",
+	value: bytes.NewBufferString(`I say "hello"`),
+	want:  "s`I say \"hello\"`",
+}, {
+	about: "stringer: not guarding against nil",
+	value: (*nilStringer)(nil),
+	want:  "s<nil>",
+}, {
+	about: "string",
+	value: "these are the voyages",
+	want:  `"these are the voyages"`,
+}, {
+	about: "string: with quotes",
+	value: `here is a quote: "`,
+	want:  "`here is a quote: \"`",
+}, {
+	about: "string: multi-line",
+	value: `foo
+"bar"
+`,
+	want: `"foo\n\"bar\"\n"`,
+}, {
+	about: "string: with backquotes",
+	value: `"` + "`",
+	want:  `"\"` + "`\"",
+}, {
+	about: "slice",
+	value: []int{1, 2, 3},
+	want:  "[]int{1, 2, 3}",
+}, {
+	about: "bytes",
+	value: []byte("hello"),
+	want:  `[]uint8("hello")`,
+}, {
+	about: "custom bytes type",
+	value: myBytes("hello"),
+	want:  `quicktest_test.myBytes("hello")`,
+}, {
+	about: "bytes with backquote",
+	value: []byte(`a "b" c`),
+	want:  "[]uint8(`a \"b\" c`)",
+}, {
+	about: "bytes with invalid utf-8",
+	value: []byte("\xff"),
+	want:  "[]uint8{0xff}",
+}, {
+	about: "nil byte slice",
+	value: []byte(nil),
+	want:  "[]uint8(nil)",
+}, {
+	about: "time",
+	value: goTime,
+	want:  `s"2012-03-28 00:00:00 +0000 UTC"`,
+}, {
+	about: "struct with byte slice",
+	value: struct{ X []byte }{[]byte("x")},
+	want:  "struct { X []uint8 }{\n    X:  {0x78},\n}",
+}, {
+	about: "uint64",
+	value: uint64(17),
+	want:  "uint64(17)",
+}, {
+	about: "uint32",
+	value: uint32(17898),
+	want:  "uint32(17898)",
+}, {
+	about: "uintptr",
+	value: uintptr(13),
+	want:  "uintptr(13)",
+},
+}
+
+func TestFormat(t *testing.T) {
+	for _, test := range formatTests {
+		t.Run(test.about, func(t *testing.T) {
+			got := Format(test.value)
+			if got != test.want {
+				t.Fatalf("format:\ngot  %q\nwant %q", got, test.want)
+			}
+		})
+	}
+}
+
+type myBytes []byte
+
+// nilStringer is a stringer not guarding against nil.
+type nilStringer struct {
+	msg string
+}
+
+func (s *nilStringer) String() string {
+	return s.msg
+}

+ 8 - 0
data_tool/src/github.com/frankban/quicktest/go.mod

@@ -0,0 +1,8 @@
+module github.com/frankban/quicktest
+
+require (
+	github.com/google/go-cmp v0.5.9
+	github.com/kr/pretty v0.3.1
+)
+
+go 1.13

+ 10 - 0
data_tool/src/github.com/frankban/quicktest/go.sum

@@ -0,0 +1,10 @@
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=

+ 55 - 0
data_tool/src/github.com/frankban/quicktest/iter.go

@@ -0,0 +1,55 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// containerIter provides an interface for iterating over a container
+// (map, slice or array).
+type containerIter interface {
+	// next advances to the next item in the container.
+	next() bool
+	// key returns the current key as a string.
+	key() string
+	// value returns the current value.
+	value() reflect.Value
+}
+
+// newIter returns an iterator over x which must be a map, slice
+// or array.
+func newIter(x interface{}) (containerIter, error) {
+	v := reflect.ValueOf(x)
+	switch v.Kind() {
+	case reflect.Map:
+		return newMapIter(v), nil
+	case reflect.Slice, reflect.Array:
+		return &sliceIter{
+			index: -1,
+			v:     v,
+		}, nil
+	default:
+		return nil, fmt.Errorf("map, slice or array required")
+	}
+}
+
+// sliceIter implements containerIter for slices and arrays.
+type sliceIter struct {
+	v     reflect.Value
+	index int
+}
+
+func (i *sliceIter) next() bool {
+	i.index++
+	return i.index < i.v.Len()
+}
+
+func (i *sliceIter) value() reflect.Value {
+	return i.v.Index(i.index)
+}
+
+func (i *sliceIter) key() string {
+	return fmt.Sprintf("index %d", i.index)
+}

+ 29 - 0
data_tool/src/github.com/frankban/quicktest/mapiter.go

@@ -0,0 +1,29 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+import (
+	"fmt"
+	"reflect"
+)
+
+func newMapIter(v reflect.Value) containerIter {
+	return mapIter{v.MapRange()}
+}
+
+// mapIter implements containerIter for maps.
+type mapIter struct {
+	iter *reflect.MapIter
+}
+
+func (i mapIter) next() bool {
+	return i.iter.Next()
+}
+
+func (i mapIter) key() string {
+	return fmt.Sprintf("key %#v", i.iter.Key())
+}
+
+func (i mapIter) value() reflect.Value {
+	return i.iter.Value()
+}

+ 72 - 0
data_tool/src/github.com/frankban/quicktest/patch.go

@@ -0,0 +1,72 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+import (
+	"io/ioutil"
+	"os"
+	"reflect"
+)
+
+// Patch sets a variable to a temporary value for the duration of the test.
+//
+// It sets the value pointed to by the given destination to the given
+// value, which must be assignable to the element type of the destination.
+//
+// At the end of the test (see "Deferred execution" in the package docs), the
+// destination is set back to its original value.
+func (c *C) Patch(dest, value interface{}) {
+	destv := reflect.ValueOf(dest).Elem()
+	oldv := reflect.New(destv.Type()).Elem()
+	oldv.Set(destv)
+	valuev := reflect.ValueOf(value)
+	if !valuev.IsValid() {
+		// This isn't quite right when the destination type is not
+		// nilable, but it's better than the complex alternative.
+		valuev = reflect.Zero(destv.Type())
+	}
+	destv.Set(valuev)
+	c.cleanup(func() {
+		destv.Set(oldv)
+	})
+}
+
+// Unsetenv unsets an environment variable for the duration of a test.
+func (c *C) Unsetenv(name string) {
+	c.Setenv(name, "")
+	os.Unsetenv(name)
+}
+
+// Mkdir makes a temporary directory and returns its name.
+//
+// At the end of the test (see "Deferred execution" in the package docs), the
+// directory and its contents are removed.
+//
+// Deprecated: in Go >= 1.15 use testing.TB.TempDir instead.
+func (c *C) Mkdir() string {
+	td, ok := c.TB.(interface {
+		TempDir() string
+	})
+	if ok {
+		return td.TempDir()
+	}
+	name, err := ioutil.TempDir("", "quicktest-")
+	c.Assert(err, Equals, nil)
+	c.cleanup(func() {
+		if err := os.RemoveAll(name); err != nil {
+			// Don't call c.Check because the stack traverse logic won't
+			// print the source location, so just log instead.
+			c.Errorf("quicktest cannot remove temporary testing directory: %v", err)
+		}
+	})
+	return name
+}
+
+// cleanup uses Cleanup when it can, falling back to using Defer.
+func (c *C) cleanup(f func()) {
+	if tb, ok := c.TB.(cleaner); ok {
+		tb.Cleanup(f)
+	} else {
+		c.Defer(f)
+	}
+}

+ 42 - 0
data_tool/src/github.com/frankban/quicktest/patch_go1.14.go

@@ -0,0 +1,42 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+//go:build go1.14
+// +build go1.14
+
+package quicktest
+
+import "testing"
+
+// Patch sets a variable to a temporary value for the duration of the test.
+//
+// It sets the value pointed to by the given destination to the given value,
+// which must be assignable to the element type of the destination.
+//
+// At the end of the test the destination is set back to its original value
+// using t.Cleanup.
+//
+// The top level Patch function is only available on Go >= 1.14. Use (*C).Patch
+// when on prior versions.
+func Patch(t testing.TB, dest, value interface{}) {
+	New(t).Patch(dest, value)
+}
+
+// Setenv sets an environment variable to a temporary value for the duration of
+// the test.
+//
+// At the end of the test the environment variable is returned to its original
+// value using t.Cleanup.
+//
+// The top level Setenv function is only available on Go >= 1.14. Use
+// (*C).Setenv when on prior versions.
+func Setenv(t testing.TB, name, val string) {
+	New(t).Setenv(name, val)
+}
+
+// Unsetenv unsets an environment variable for the duration of a test.
+//
+// The top level Unsetenv function is only available on Go >= 1.14. Use
+// (*C).Unsetenv when on prior versions.
+func Unsetenv(t testing.TB, name string) {
+	New(t).Unsetenv(name)
+}

+ 41 - 0
data_tool/src/github.com/frankban/quicktest/patch_go1.14_test.go

@@ -0,0 +1,41 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+//go:build go1.14
+// +build go1.14
+
+package quicktest_test
+
+import (
+	"os"
+	"testing"
+)
+
+func TestPatchSetInt(t *testing.T) {
+	i := 99
+	t.Run("subtest", func(t *testing.T) {
+		Patch(t, &i, 77)
+		Assert(t, i, Equals, 77)
+	})
+	Assert(t, i, Equals, 99)
+}
+
+func TestSetenv(t *testing.T) {
+	const envName = "SOME_VAR"
+	os.Setenv(envName, "initial")
+	t.Run("subtest", func(t *testing.T) {
+		Setenv(t, envName, "a new value")
+		Check(t, os.Getenv(envName), Equals, "a new value")
+	})
+	Check(t, os.Getenv(envName), Equals, "initial")
+}
+
+func TestUnsetenv(t *testing.T) {
+	const envName = "SOME_VAR"
+	os.Setenv(envName, "initial")
+	t.Run("subtest", func(t *testing.T) {
+		Unsetenv(t, envName)
+		_, ok := os.LookupEnv(envName)
+		Assert(t, ok, IsFalse)
+	})
+	Check(t, os.Getenv(envName), Equals, "initial")
+}

+ 27 - 0
data_tool/src/github.com/frankban/quicktest/patch_go1.17.go

@@ -0,0 +1,27 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+//go:build !go1.17
+// +build !go1.17
+
+package quicktest
+
+import "os"
+
+// Setenv sets an environment variable to a temporary value for the
+// duration of the test.
+//
+// At the end of the test (see "Deferred execution" in the package docs), the
+// environment variable is returned to its original value.
+//
+// This is the equivalent of testing.T.Setenv introduced in Go 1.17.
+func (c *C) Setenv(name, val string) {
+	oldVal, oldOK := os.LookupEnv(name)
+	os.Setenv(name, val)
+	c.cleanup(func() {
+		if oldOK {
+			os.Setenv(name, oldVal)
+		} else {
+			os.Unsetenv(name)
+		}
+	})
+}

+ 34 - 0
data_tool/src/github.com/frankban/quicktest/patch_go1.17_test.go

@@ -0,0 +1,34 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+//go:build !go1.17
+// +build !go1.17
+
+package quicktest_test
+
+import (
+	"os"
+	"testing"
+)
+
+const envName = "SOME_VAR"
+
+func TestCSetenv(t *testing.T) {
+	c := New(t)
+	os.Setenv(envName, "initial")
+	testCleanup(t, func(c *C) {
+		c.Setenv(envName, "new value")
+		c.Check(os.Getenv(envName), Equals, "new value")
+	})
+	c.Check(os.Getenv(envName), Equals, "initial")
+}
+
+func TestCSetenvWithUnsetVariable(t *testing.T) {
+	c := New(t)
+	os.Unsetenv(envName)
+	testCleanup(t, func(c *C) {
+		c.Setenv(envName, "new value")
+		c.Check(os.Getenv(envName), Equals, "new value")
+	})
+	_, ok := os.LookupEnv(envName)
+	c.Assert(ok, IsFalse)
+}

+ 120 - 0
data_tool/src/github.com/frankban/quicktest/patch_test.go

@@ -0,0 +1,120 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest_test
+
+import (
+	"errors"
+	"os"
+	"path/filepath"
+	"testing"
+)
+
+func TestCPatchSetInt(t *testing.T) {
+	c := New(t)
+	i := 99
+	testCleanup(t, func(c *C) {
+		c.Patch(&i, 88)
+		c.Assert(i, Equals, 88)
+	})
+	c.Assert(i, Equals, 99)
+}
+
+func TestCPatchSetError(t *testing.T) {
+	c := New(t)
+	oldErr := errors.New("foo")
+	newErr := errors.New("bar")
+	err := oldErr
+	testCleanup(t, func(c *C) {
+		c.Patch(&err, newErr)
+		c.Assert(err, Equals, newErr)
+	})
+	c.Assert(err, Equals, oldErr)
+}
+
+func TestCPatchSetErrorToNil(t *testing.T) {
+	c := New(t)
+	oldErr := errors.New("foo")
+	err := oldErr
+	testCleanup(t, func(c *C) {
+		c.Patch(&err, nil)
+		c.Assert(err, IsNil)
+	})
+	c.Assert(err, Equals, oldErr)
+}
+
+func TestCPatchSetMapToNil(t *testing.T) {
+	c := New(t)
+	oldMap := map[string]int{"foo": 1234}
+	m := oldMap
+	testCleanup(t, func(c *C) {
+		c.Patch(&m, nil)
+		c.Assert(m, IsNil)
+	})
+	c.Assert(m, DeepEquals, oldMap)
+}
+
+func TestCPatchPanicsWhenNotAssignable(t *testing.T) {
+	c := New(t)
+	i := 99
+	type otherInt int
+	c.Assert(func() {
+		c.Patch(&i, otherInt(88))
+	}, PanicMatches, `reflect\.Set: value of type quicktest_test\.otherInt is not assignable to type int`)
+}
+
+func TestCUnsetenv(t *testing.T) {
+	c := New(t)
+	const envName = "SOME_VAR"
+	os.Setenv(envName, "initial")
+	testCleanup(t, func(c *C) {
+		c.Unsetenv(envName)
+		_, ok := os.LookupEnv(envName)
+		c.Assert(ok, IsFalse)
+	})
+	c.Check(os.Getenv(envName), Equals, "initial")
+}
+
+func TestCUnsetenvWithUnsetVariable(t *testing.T) {
+	c := New(t)
+	const envName = "SOME_VAR"
+	os.Unsetenv(envName)
+	testCleanup(t, func(c *C) {
+		c.Unsetenv(envName)
+		_, ok := os.LookupEnv(envName)
+		c.Assert(ok, IsFalse)
+	})
+	_, ok := os.LookupEnv(envName)
+	c.Assert(ok, IsFalse)
+}
+
+func TestCMkdir(t *testing.T) {
+	c := New(t)
+	var dir string
+	testCleanup(t, func(c *C) {
+		dir = c.Mkdir()
+		c.Assert(dir, Not(Equals), "")
+		info, err := os.Stat(dir)
+		c.Assert(err, IsNil)
+		c.Assert(info.IsDir(), IsTrue)
+		f, err := os.Create(filepath.Join(dir, "hello"))
+		c.Assert(err, IsNil)
+		f.Close()
+	})
+	_, err := os.Stat(dir)
+	c.Assert(err, Not(IsNil))
+}
+
+func testCleanup(t *testing.T, f func(c *C)) {
+	t.Run("subtest", func(t *testing.T) {
+		c := New(t)
+		if _, ok := c.TB.(cleaner); !ok {
+			// Calling Done is required when testing on Go < 1.14.
+			defer c.Done()
+		}
+		f(c)
+	})
+}
+
+type cleaner interface {
+	Cleanup(func())
+}

+ 122 - 0
data_tool/src/github.com/frankban/quicktest/qtsuite/suite.go

@@ -0,0 +1,122 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+/*
+Package qtsuite allows quicktest to run test suites.
+
+A test suite is a value with one or more test methods.
+For example, the following code defines a suite of test functions that starts
+an HTTP server before running each test, and tears it down afterwards:
+
+	type suite struct {
+		url string
+	}
+
+	func (s *suite) Init(c *qt.C) {
+		hnd := func(w http.ResponseWriter, req *http.Request) {
+			fmt.Fprintf(w, "%s %s", req.Method, req.URL.Path)
+		}
+		srv := httptest.NewServer(http.HandlerFunc(hnd))
+		c.Cleanup(srv.Close)
+		s.url = srv.URL
+	}
+
+	func (s *suite) TestGet(c *qt.C) {
+		c.Parallel()
+		resp, err := http.Get(s.url)
+		c.Assert(err, qt.Equals, nil)
+		defer resp.Body.Close()
+		b, err := ioutil.ReadAll(resp.Body)
+		c.Assert(err, qt.Equals, nil)
+		c.Assert(string(b), qt.Equals, "GET /")
+	}
+
+	func (s *suite) TestHead(c *qt.C) {
+		c.Parallel()
+		resp, err := http.Head(s.url + "/path")
+		c.Assert(err, qt.Equals, nil)
+		defer resp.Body.Close()
+		b, err := ioutil.ReadAll(resp.Body)
+		c.Assert(err, qt.Equals, nil)
+		c.Assert(string(b), qt.Equals, "")
+		c.Assert(resp.ContentLength, qt.Equals, int64(10))
+	}
+
+The above code could be invoked from a test function like this:
+
+	func TestHTTPMethods(t *testing.T) {
+		qtsuite.Run(qt.New(t), &suite{"http://example.com"})
+	}
+*/
+package qtsuite
+
+import (
+	"reflect"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	qt "github.com/frankban/quicktest"
+)
+
+// Run runs each test method defined on the given value as a separate
+// subtest. A test is a method of the form
+//
+//	func (T) TestXxx(*quicktest.C)
+//
+// where Xxx does not start with a lowercase letter.
+//
+// If suite is a pointer, the value pointed to is copied before any
+// methods are invoked on it; a new copy is made for each test. This
+// means that it is OK for tests to modify fields in suite concurrently
+// if desired - it's OK to call c.Parallel().
+//
+// If suite has a method of the form
+//
+//	func (T) Init(*quicktest.C)
+//
+// this method will be invoked before each test run.
+func Run(c *qt.C, suite interface{}) {
+	sv := reflect.ValueOf(suite)
+	st := sv.Type()
+	init, hasInit := st.MethodByName("Init")
+	if hasInit && !isValidMethod(init) {
+		c.Fatal("wrong signature for Init, must be Init(*quicktest.C)")
+	}
+	for i := 0; i < st.NumMethod(); i++ {
+		m := st.Method(i)
+		if !isTestMethod(m) {
+			continue
+		}
+		c.Run(m.Name, func(c *qt.C) {
+			if !isValidMethod(m) {
+				c.Fatalf("wrong signature for %s, must be %s(*quicktest.C)", m.Name, m.Name)
+			}
+
+			sv := sv
+			if st.Kind() == reflect.Ptr {
+				sv1 := reflect.New(st.Elem())
+				sv1.Elem().Set(sv.Elem())
+				sv = sv1
+			}
+			args := []reflect.Value{sv, reflect.ValueOf(c)}
+			if hasInit {
+				init.Func.Call(args)
+			}
+			m.Func.Call(args)
+		})
+	}
+}
+
+var cType = reflect.TypeOf(&qt.C{})
+
+func isTestMethod(m reflect.Method) bool {
+	if !strings.HasPrefix(m.Name, "Test") {
+		return false
+	}
+	r, n := utf8.DecodeRuneInString(m.Name[4:])
+	return n == 0 || !unicode.IsLower(r)
+}
+
+func isValidMethod(m reflect.Method) bool {
+	return m.Type.NumIn() == 2 && m.Type.NumOut() == 0 && m.Type.In(1) == cType
+}

+ 147 - 0
data_tool/src/github.com/frankban/quicktest/qtsuite/suite_test.go

@@ -0,0 +1,147 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package qtsuite_test
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+
+	qt "github.com/frankban/quicktest"
+)
+
+func TestRunSuite(t *testing.T) {
+	c := qt.New(t)
+	var calls []call
+	tt := &testingT{}
+	Run(qt.New(tt), testSuite{calls: &calls})
+	c.Assert(calls, qt.DeepEquals, []call{
+		{"Test1", 0},
+		{"Test4", 0},
+	})
+}
+
+func TestRunSuiteEmbedded(t *testing.T) {
+	c := qt.New(t)
+	var calls []call
+	tt := &testingT{}
+	suite := struct {
+		testSuite
+	}{testSuite: testSuite{calls: &calls}}
+	Run(qt.New(tt), suite)
+	c.Assert(calls, qt.DeepEquals, []call{
+		{"Test1", 0},
+		{"Test4", 0},
+	})
+}
+
+func TestRunSuitePtr(t *testing.T) {
+	c := qt.New(t)
+	var calls []call
+	tt := &testingT{}
+	Run(qt.New(tt), &testSuite{calls: &calls})
+	c.Assert(calls, qt.DeepEquals, []call{
+		{"Init", 0},
+		{"Test1", 1},
+		{"Init", 0},
+		{"Test4", 1},
+	})
+}
+
+type testSuite struct {
+	init  int
+	calls *[]call
+}
+
+func (s testSuite) addCall(name string) {
+	*s.calls = append(*s.calls, call{Name: name, Init: s.init})
+}
+
+func (s *testSuite) Init(*qt.C) {
+	s.addCall("Init")
+	s.init++
+}
+
+func (s testSuite) Test1(*qt.C) {
+	s.addCall("Test1")
+}
+
+func (s testSuite) Test2() {
+	s.addCall("Test2")
+}
+
+func (s testSuite) Test3(*testing.T) {
+	s.addCall("Test3")
+}
+
+func (s testSuite) Test4(*qt.C) {
+	s.addCall("Test4")
+}
+
+func (s testSuite) Test5(*qt.C) bool {
+	s.addCall("Test5")
+	return false
+}
+
+func (s testSuite) Testa(*qt.C) {
+	s.addCall("Testa")
+}
+
+type call struct {
+	Name string
+	Init int
+}
+
+func TestInvalidInit(t *testing.T) {
+	c := qt.New(t)
+	tt := &testingT{}
+	tc := qt.New(tt)
+	Run(tc, invalidTestSuite{})
+	c.Assert(tt.fatalString(), qt.Equals, "wrong signature for Init, must be Init(*quicktest.C)")
+}
+
+type invalidTestSuite struct{}
+
+func (invalidTestSuite) Init() {}
+
+// testingT can be passed to qt.New for testing purposes.
+type testingT struct {
+	testing.TB
+
+	errorBuf bytes.Buffer
+	fatalBuf bytes.Buffer
+
+	subTestResult bool
+	subTestName   string
+	subTestT      *testing.T
+}
+
+// Error overrides *testing.T.Error so that messages are collected.
+func (t *testingT) Error(a ...interface{}) {
+	fmt.Fprint(&t.errorBuf, a...)
+}
+
+// Fatal overrides *testing.T.Fatal so that messages are collected and the
+// goroutine is not killed.
+func (t *testingT) Fatal(a ...interface{}) {
+	fmt.Fprint(&t.fatalBuf, a...)
+}
+
+// Run overrides *testing.T.Run.
+func (t *testingT) Run(name string, f func(t *testing.T)) bool {
+	t.subTestName, t.subTestT = name, &testing.T{}
+	ch := make(chan struct{})
+	// Run the subtest in its own goroutine so that if it calls runtime.GoExit,
+	// we can still return appropriately.
+	go func() {
+		defer close(ch)
+		f(t.subTestT)
+	}()
+	<-ch
+	return t.subTestResult
+}
+
+// fatalString returns the fatal error message.
+func (t *testingT) fatalString() string {
+	return t.fatalBuf.String()
+}

+ 370 - 0
data_tool/src/github.com/frankban/quicktest/quicktest.go

@@ -0,0 +1,370 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"testing"
+)
+
+// Check runs the given check using the provided t and continues execution in
+// case of failure. For instance:
+//
+//	qt.Check(t, answer, qt.Equals, 42)
+//	qt.Check(t, got, qt.IsNil, qt.Commentf("iteration %d", i))
+//
+// Additional args (not consumed by the checker), when provided, are included as
+// comments in the failure output when the check fails.
+func Check(t testing.TB, got interface{}, checker Checker, args ...interface{}) bool {
+	t.Helper()
+	return New(t).Check(got, checker, args...)
+}
+
+// Assert runs the given check using the provided t and stops execution in case
+// of failure. For instance:
+//
+//	qt.Assert(t, got, qt.DeepEquals, []int{42, 47})
+//	qt.Assert(t, got, qt.ErrorMatches, "bad wolf .*", qt.Commentf("a comment"))
+//
+// Additional args (not consumed by the checker), when provided, are included as
+// comments in the failure output when the check fails.
+func Assert(t testing.TB, got interface{}, checker Checker, args ...interface{}) bool {
+	t.Helper()
+	return New(t).Assert(got, checker, args...)
+}
+
+// New returns a new checker instance that uses t to fail the test when checks
+// fail. It only ever calls the Fatal, Error and (when available) Run methods
+// of t. For instance.
+//
+//	func TestFoo(t *testing.T) {
+//	    t.Run("A=42", func(t *testing.T) {
+//	        c := qt.New(t)
+//	        c.Assert(a, qt.Equals, 42)
+//	    })
+//	}
+//
+// The library already provides some base checkers, and more can be added by
+// implementing the Checker interface.
+//
+// If there is a likelihood that Defer will be called, then
+// a call to Done should be deferred after calling New.
+// For example:
+//
+//	func TestFoo(t *testing.T) {
+//	        c := qt.New(t)
+//	        defer c.Done()
+//	        c.Setenv("HOME", "/non-existent")
+//	        c.Assert(os.Getenv("HOME"), qt.Equals, "/non-existent")
+//	})
+//
+// A value of C that's has a non-nil TB field but is otherwise zero is valid.
+// So:
+//
+//	c := &qt.C{TB: t}
+//
+// is valid a way to create a C value; it's exactly the same as:
+//
+//	c := qt.New(t)
+//
+// Methods on C may be called concurrently, assuming the underlying
+// `testing.TB` implementation also allows that.
+func New(t testing.TB) *C {
+	return &C{
+		TB: t,
+	}
+}
+
+// C is a quicktest checker. It embeds a testing.TB value and provides
+// additional checking functionality. If an Assert or Check operation fails, it
+// uses the wrapped TB value to fail the test appropriately.
+type C struct {
+	testing.TB
+
+	mu         sync.Mutex
+	doneNeeded bool
+	deferred   func()
+	format     formatFunc
+}
+
+// cleaner is implemented by testing.TB on Go 1.14 and later.
+type cleaner interface {
+	Cleanup(func())
+}
+
+// Defer registers a function to be called when c.Done is
+// called. Deferred functions will be called in last added, first called
+// order. If c.Done is not called by the end of the test, the test
+// may panic. Note that if Cleanup is called, there is no
+// need to call Done.
+//
+// Deprecated: in Go >= 1.14 use testing.TB.Cleanup instead.
+func (c *C) Defer(f func()) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if cleaner, ok := c.TB.(cleaner); ok {
+		// Use TB.Cleanup when available, but add a check
+		// that Done has been called so that we don't run
+		// into unexpected Go version incompatibilities.
+		if !c.doneNeeded {
+			c.doneNeeded = true
+			cleaner.Cleanup(func() {
+				c.mu.Lock()
+				defer c.mu.Unlock()
+				if c.doneNeeded {
+					panic("Done not called after Defer")
+				}
+			})
+		}
+
+		cleaner.Cleanup(f)
+
+		return
+	}
+
+	oldDeferred := c.deferred
+	if oldDeferred != nil {
+		c.deferred = func() {
+			defer oldDeferred()
+			f()
+		}
+	} else {
+		c.deferred = f
+	}
+}
+
+// Done calls all the functions registered by Defer in reverse
+// registration order. After it's called, the functions are
+// unregistered, so calling Done twice will only call them once.
+//
+// When a test function is called by Run, Done will be called
+// automatically on the C value passed into it.
+//
+// Deprecated: in Go >= 1.14 this is no longer needed if using
+// testing.TB.Cleanup.
+func (c *C) Done() {
+	c.mu.Lock()
+	deferred := c.deferred
+	c.deferred = nil
+	c.doneNeeded = false
+	c.mu.Unlock()
+
+	if deferred != nil {
+		deferred()
+	}
+}
+
+// SetFormat sets the function used to print values in test failures.
+// By default Format is used.
+// Any subsequent subtests invoked with c.Run will also use this function by
+// default.
+func (c *C) SetFormat(format func(interface{}) string) {
+	c.mu.Lock()
+	c.format = format
+	c.mu.Unlock()
+}
+
+// getFormat returns the format function
+// safely acquired under lock.
+func (c *C) getFormat() func(interface{}) string {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.format
+}
+
+// Check runs the given check and continues execution in case of failure.
+// For instance:
+//
+//	c.Check(answer, qt.Equals, 42)
+//	c.Check(got, qt.IsNil, qt.Commentf("iteration %d", i))
+//
+// Additional args (not consumed by the checker), when provided, are included
+// as comments in the failure output when the check fails.
+func (c *C) Check(got interface{}, checker Checker, args ...interface{}) bool {
+	c.TB.Helper()
+	return check(c, checkParams{
+		fail:    c.TB.Error,
+		checker: checker,
+		got:     got,
+		args:    args,
+	})
+}
+
+// Assert runs the given check and stops execution in case of failure.
+// For instance:
+//
+//	c.Assert(got, qt.DeepEquals, []int{42, 47})
+//	c.Assert(got, qt.ErrorMatches, "bad wolf .*", qt.Commentf("a comment"))
+//
+// Additional args (not consumed by the checker), when provided, are included
+// as comments in the failure output when the check fails.
+func (c *C) Assert(got interface{}, checker Checker, args ...interface{}) bool {
+	c.TB.Helper()
+	return check(c, checkParams{
+		fail:    c.TB.Fatal,
+		checker: checker,
+		got:     got,
+		args:    args,
+	})
+}
+
+var (
+	stringType = reflect.TypeOf("")
+	boolType   = reflect.TypeOf(true)
+	tbType     = reflect.TypeOf(new(testing.TB)).Elem()
+)
+
+// Run runs f as a subtest of t called name. It's a wrapper around
+// the Run method of c.TB that provides the quicktest checker to f. When
+// the function completes, c.Done will be called to run any
+// functions registered with c.Defer.
+//
+// c.TB must implement a Run method of the following form:
+//
+//	Run(string, func(T)) bool
+//
+// where T is any type that is assignable to testing.TB.
+// Implementations include *testing.T, *testing.B and *C itself.
+//
+// The TB field in the subtest will hold the value passed
+// by Run to its argument function.
+//
+//	func TestFoo(t *testing.T) {
+//	    c := qt.New(t)
+//	    c.Run("A=42", func(c *qt.C) {
+//	        // This assertion only stops the current subtest.
+//	        c.Assert(a, qt.Equals, 42)
+//	    })
+//	}
+//
+// A panic is raised when Run is called and the embedded concrete type does not
+// implement a Run method with a correct signature.
+func (c *C) Run(name string, f func(c *C)) bool {
+	badType := func(m string) {
+		panic(fmt.Sprintf("cannot execute Run with underlying concrete type %T (%s)", c.TB, m))
+	}
+	m := reflect.ValueOf(c.TB).MethodByName("Run")
+	if !m.IsValid() {
+		// c.TB doesn't implement a Run method.
+		badType("no Run method")
+	}
+	mt := m.Type()
+	if mt.NumIn() != 2 ||
+		mt.In(0) != stringType ||
+		mt.NumOut() != 1 ||
+		mt.Out(0) != boolType {
+		// The Run method doesn't have the right argument counts and types.
+		badType("wrong argument count for Run method")
+	}
+	farg := mt.In(1)
+	if farg.Kind() != reflect.Func ||
+		farg.NumIn() != 1 ||
+		farg.NumOut() != 0 ||
+		!farg.In(0).AssignableTo(tbType) {
+		// The first argument to the Run function arg isn't right.
+		badType("bad first argument type for Run method")
+	}
+	cFormat := c.getFormat()
+	fv := reflect.MakeFunc(farg, func(args []reflect.Value) []reflect.Value {
+		c2 := New(args[0].Interface().(testing.TB))
+		defer c2.Done()
+		c2.SetFormat(cFormat)
+		f(c2)
+		return nil
+	})
+	return m.Call([]reflect.Value{reflect.ValueOf(name), fv})[0].Interface().(bool)
+}
+
+// Parallel signals that this test is to be run in parallel with (and only with) other parallel tests.
+// It's a wrapper around *testing.T.Parallel.
+//
+// A panic is raised when Parallel is called and the embedded concrete type does not
+// implement Parallel, for instance if TB's concrete type is a benchmark.
+func (c *C) Parallel() {
+	p, ok := c.TB.(interface {
+		Parallel()
+	})
+	if !ok {
+		panic(fmt.Sprintf("cannot execute Parallel with underlying concrete type %T", c.TB))
+	}
+	p.Parallel()
+}
+
+// check performs the actual check with the provided params.
+// In case of failure p.fail is called. In the fail report values are formatted
+// using p.format.
+func check(c *C, p checkParams) bool {
+	c.TB.Helper()
+	rp := reportParams{
+		got:    p.got,
+		args:   p.args,
+		format: c.getFormat(),
+	}
+	if rp.format == nil {
+		// No format set; use the default: Format.
+		rp.format = Format
+	}
+
+	// Allow checkers to annotate messages.
+	note := func(key string, value interface{}) {
+		rp.notes = append(rp.notes, note{
+			key:   key,
+			value: value,
+		})
+	}
+
+	// Ensure that we have a checker.
+	if p.checker == nil {
+		p.fail(report(BadCheckf("nil checker provided"), rp))
+		return false
+	}
+
+	// Extract comments if provided.
+	for len(p.args) > 0 {
+		comment, ok := p.args[len(p.args)-1].(Comment)
+		if !ok {
+			break
+		}
+		rp.comments = append([]Comment{comment}, rp.comments...)
+		p.args = p.args[:len(p.args)-1]
+	}
+	rp.args = p.args
+
+	// Validate that we have the correct number of arguments.
+	rp.argNames = p.checker.ArgNames()
+	wantNumArgs := len(rp.argNames) - 1
+	if gotNumArgs := len(rp.args); gotNumArgs != wantNumArgs {
+		if gotNumArgs > 0 {
+			note("got args", rp.args)
+		}
+		if wantNumArgs > 0 {
+			note("want args", Unquoted(strings.Join(rp.argNames[1:], ", ")))
+		}
+		var prefix string
+		if gotNumArgs > wantNumArgs {
+			prefix = "too many arguments provided to checker"
+		} else {
+			prefix = "not enough arguments provided to checker"
+		}
+		p.fail(report(BadCheckf("%s: got %d, want %d", prefix, gotNumArgs, wantNumArgs), rp))
+		return false
+	}
+
+	// Execute the check and report the failure if necessary.
+	if err := p.checker.Check(p.got, p.args, note); err != nil {
+		p.fail(report(err, rp))
+		return false
+	}
+	return true
+}
+
+// checkParams holds parameters for executing a check.
+type checkParams struct {
+	fail    func(...interface{})
+	checker Checker
+	got     interface{}
+	args    []interface{}
+}

+ 751 - 0
data_tool/src/github.com/frankban/quicktest/quicktest_test.go

@@ -0,0 +1,751 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest_test
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strings"
+	"testing"
+)
+
+var _ testing.TB = (*C)(nil)
+
+var cTests = []struct {
+	about           string
+	checker         Checker
+	got             interface{}
+	args            []interface{}
+	format          func(interface{}) string
+	expectedFailure string
+}{{
+	about:   "success",
+	checker: Equals,
+	got:     42,
+	args:    []interface{}{42},
+}, {
+	about:   "failure",
+	checker: Equals,
+	got:     "42",
+	args:    []interface{}{"47"},
+	expectedFailure: `
+error:
+  values are not equal
+got:
+  "42"
+want:
+  "47"
+`,
+}, {
+	about:   "failure with % signs",
+	checker: Equals,
+	got:     "42%x",
+	args:    []interface{}{"47%y"},
+	expectedFailure: `
+error:
+  values are not equal
+got:
+  "42%x"
+want:
+  "47%y"
+`,
+}, {
+	about:   "failure with comment",
+	checker: Equals,
+	got:     true,
+	args:    []interface{}{false, Commentf("apparently %v != %v", true, false)},
+	expectedFailure: `
+error:
+  values are not equal
+comment:
+  apparently true != false
+got:
+  bool(true)
+want:
+  bool(false)
+`,
+}, {
+	about:   "another failure with comment",
+	checker: IsNil,
+	got:     42,
+	args:    []interface{}{Commentf("bad wolf: %d", 42)},
+	expectedFailure: `
+error:
+  got non-nil value
+comment:
+  bad wolf: 42
+got:
+  int(42)
+`,
+}, {
+	about:   "failure with constant comment",
+	checker: IsNil,
+	got:     "something",
+	args:    []interface{}{Commentf("these are the voyages")},
+	expectedFailure: `
+error:
+  got non-nil value
+comment:
+  these are the voyages
+got:
+  "something"
+`,
+}, {
+	about:   "failure with empty comment",
+	checker: IsNil,
+	got:     47,
+	args:    []interface{}{Commentf("")},
+	expectedFailure: `
+error:
+  got non-nil value
+got:
+  int(47)
+`,
+}, {
+	about:   "failure with multiple comments",
+	checker: IsNil,
+	got:     42,
+	args: []interface{}{
+		Commentf("bad wolf: %d", 42),
+		Commentf("second comment"),
+	},
+	expectedFailure: `
+error:
+  got non-nil value
+comment:
+  bad wolf: 42
+comment:
+  second comment
+got:
+  int(42)
+`,
+}, {
+	about: "nil checker",
+	expectedFailure: `
+error:
+  bad check: nil checker provided
+`,
+}, {
+	about:   "not enough arguments",
+	checker: Equals,
+	got:     42,
+	args:    []interface{}{},
+	expectedFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+want args:
+  want
+`,
+}, {
+	about:   "not enough arguments with comment",
+	checker: DeepEquals,
+	got:     42,
+	args:    []interface{}{Commentf("test %d", 0)},
+	expectedFailure: `
+error:
+  bad check: not enough arguments provided to checker: got 0, want 1
+comment:
+  test 0
+want args:
+  want
+`,
+}, {
+	about:   "too many arguments",
+	checker: Matches,
+	got:     42,
+	args:    []interface{}{42, 47},
+	expectedFailure: `
+error:
+  bad check: too many arguments provided to checker: got 2, want 1
+got args:
+  []interface {}{
+      int(42),
+      int(47),
+  }
+want args:
+  regexp
+`,
+}, {
+	about:   "really too many arguments",
+	checker: DeepEquals,
+	got:     42,
+	args:    []interface{}{42, 47, nil, "stop"},
+	expectedFailure: `
+error:
+  bad check: too many arguments provided to checker: got 4, want 1
+got args:
+  []interface {}{
+      int(42),
+      int(47),
+      nil,
+      "stop",
+  }
+want args:
+  want
+`,
+}, {
+	about:   "too many arguments with comment",
+	checker: IsNil,
+	got:     42,
+	args:    []interface{}{nil, Commentf("these are the voyages")},
+	expectedFailure: `
+error:
+  bad check: too many arguments provided to checker: got 1, want 0
+comment:
+  these are the voyages
+got args:
+  []interface {}{
+      nil,
+  }
+`,
+}, {
+	about: "many arguments and notes",
+	checker: &testingChecker{
+		argNames: []string{"arg1", "arg2", "arg3"},
+		addNotes: func(note func(key string, value interface{})) {
+			note("note1", "these")
+			note("note2", Unquoted("are"))
+			note("note3", "the")
+			note("note4", "voyages")
+			note("note5", true)
+		},
+		err: errors.New("bad wolf"),
+	},
+	got:  42,
+	args: []interface{}{"val2", "val3"},
+	expectedFailure: `
+error:
+  bad wolf
+note1:
+  "these"
+note2:
+  are
+note3:
+  "the"
+note4:
+  "voyages"
+note5:
+  bool(true)
+arg1:
+  int(42)
+arg2:
+  "val2"
+arg3:
+  "val3"
+`,
+}, {
+	about: "many arguments and notes with the same value",
+	checker: &testingChecker{
+		argNames: []string{"arg1", "arg2", "arg3", "arg4"},
+		addNotes: func(note func(key string, value interface{})) {
+			note("note1", "value1")
+			note("note2", []int{42})
+			note("note3", "value1")
+			note("note4", nil)
+		},
+		err: errors.New("bad wolf"),
+	},
+	got:  "value1",
+	args: []interface{}{"value1", []int{42}, nil},
+	expectedFailure: `
+error:
+  bad wolf
+note1:
+  "value1"
+note2:
+  []int{42}
+note3:
+  <same as "note1">
+note4:
+  nil
+arg1:
+  <same as "note1">
+arg2:
+  <same as "note1">
+arg3:
+  <same as "note2">
+arg4:
+  <same as "note4">
+`,
+}, {
+	about: "many arguments and notes with custom format function",
+	checker: &testingChecker{
+		argNames: []string{"arg1", "arg2", "arg3"},
+		addNotes: func(note func(key string, value interface{})) {
+			note("note1", "these")
+			note("note2", Unquoted("are"))
+			note("note3", "the")
+			note("note4", "voyages")
+			note("note5", true)
+		},
+		err: errors.New("bad wolf"),
+	},
+	got:  42,
+	args: []interface{}{"val2", "val3"},
+	format: func(v interface{}) string {
+		return fmt.Sprintf("bad wolf %v", v)
+	},
+	expectedFailure: `
+error:
+  bad wolf
+note1:
+  bad wolf these
+note2:
+  are
+note3:
+  bad wolf the
+note4:
+  bad wolf voyages
+note5:
+  bad wolf true
+arg1:
+  bad wolf 42
+arg2:
+  bad wolf val2
+arg3:
+  bad wolf val3
+`,
+}, {
+	about: "bad check with notes",
+	checker: &testingChecker{
+		argNames: []string{"got", "want"},
+		addNotes: func(note func(key string, value interface{})) {
+			note("note", 42)
+		},
+		err: BadCheckf("bad wolf"),
+	},
+	got:  42,
+	args: []interface{}{"want"},
+	expectedFailure: `
+error:
+  bad check: bad wolf
+note:
+  int(42)
+`,
+}, {
+	about: "silent failure with notes",
+	checker: &testingChecker{
+		argNames: []string{"got", "want"},
+		addNotes: func(note func(key string, value interface{})) {
+			note("note1", "first note")
+			note("note2", Unquoted("second note"))
+		},
+		err: ErrSilent,
+	},
+	got:  42,
+	args: []interface{}{"want"},
+	expectedFailure: `
+note1:
+  "first note"
+note2:
+  second note
+`,
+}}
+
+func TestCAssertCheck(t *testing.T) {
+	for _, test := range cTests {
+		t.Run("Assert: "+test.about, func(t *testing.T) {
+			if test.format != nil {
+				t.Skip("changing format not supported when using qt.Assert directly")
+			}
+			tt := &testingT{}
+			ok := Assert(tt, test.got, test.checker, test.args...)
+			checkResult(t, ok, tt.fatalString(), test.expectedFailure)
+			if tt.errorString() != "" {
+				t.Fatalf("no error messages expected, but got %q", tt.errorString())
+			}
+		})
+		t.Run("Check: "+test.about, func(t *testing.T) {
+			if test.format != nil {
+				t.Skip("changing format not supported when using qt.Check directly")
+			}
+			tt := &testingT{}
+			ok := Check(tt, test.got, test.checker, test.args...)
+			checkResult(t, ok, tt.errorString(), test.expectedFailure)
+			if tt.fatalString() != "" {
+				t.Fatalf("no fatal messages expected, but got %q", tt.fatalString())
+			}
+		})
+		t.Run("c.Assert: "+test.about, func(t *testing.T) {
+			tt := &testingT{}
+			c := New(tt)
+			if test.format != nil {
+				c.SetFormat(test.format)
+			}
+			ok := c.Assert(test.got, test.checker, test.args...)
+			checkResult(t, ok, tt.fatalString(), test.expectedFailure)
+			if tt.errorString() != "" {
+				t.Fatalf("no error messages expected, but got %q", tt.errorString())
+			}
+		})
+		t.Run("c.Check: "+test.about, func(t *testing.T) {
+			tt := &testingT{}
+			c := New(tt)
+			if test.format != nil {
+				c.SetFormat(test.format)
+			}
+			ok := c.Check(test.got, test.checker, test.args...)
+			checkResult(t, ok, tt.errorString(), test.expectedFailure)
+			if tt.fatalString() != "" {
+				t.Fatalf("no fatal messages expected, but got %q", tt.fatalString())
+			}
+		})
+	}
+}
+
+func TestCRunSuccess(t *testing.T) {
+	tt := &testingT{}
+	c := New(tt)
+	var run bool
+	subTestName := "my test"
+	ok := c.Run(subTestName, func(innerC *C) {
+		run = true
+		if innerC == c {
+			t.Fatal("subtest C: same instance provided")
+		}
+		if innerC.TB != tt.subTestT {
+			t.Fatalf("subtest testing object: got %p, want %p", innerC.TB, tt.subTestT)
+		}
+		if tt.subTestName != subTestName {
+			t.Fatalf("subtest name: got %q, want %q", tt.subTestName, subTestName)
+		}
+	})
+	assertBool(t, run, true)
+	assertBool(t, ok, false)
+
+	// Simulate a test success.
+	tt.subTestResult = true
+	ok = c.Run(subTestName, func(innerC *C) {})
+	assertBool(t, ok, true)
+}
+
+func TestCRunOnBenchmark(t *testing.T) {
+	called := false
+	testing.Benchmark(func(b *testing.B) {
+		c := New(b)
+		c.Run("c", func(c *C) {
+			b1, ok := c.TB.(*testing.B)
+			if !ok {
+				t.Errorf("c.TB is type %T not *testing.B", c.TB)
+				return
+			}
+			if b1 == b {
+				t.Errorf("c.TB hasn't been given a new B value")
+				return
+			}
+			called = true
+		})
+	})
+	if !called {
+		t.Fatalf("sub-benchmark was never called")
+	}
+}
+
+// wrongRun1 has Run method with wrong arg count.
+type wrongRun1 struct {
+	testing.TB
+}
+
+func (wrongRun1) Run() {}
+
+// wrongRun2 has no Run method.
+type wrongRun2 struct {
+	testing.TB
+}
+
+// wrongRun3 has Run method that takes a type not
+// assignable to testing.TB.
+type wrongRun3 struct {
+	testing.TB
+}
+
+func (wrongRun3) Run(string, func(string)) bool { return false }
+
+// wrongRun4 has Run method that doesn't return bool.
+type wrongRun4 struct {
+	testing.TB
+}
+
+func (wrongRun4) Run(string, func(*testing.T)) {}
+
+var CRunPanicTests = []struct {
+	tb          testing.TB
+	expectPanic string
+}{{
+	tb:          wrongRun1{},
+	expectPanic: "wrong argument count for Run method",
+}, {
+	tb:          wrongRun2{},
+	expectPanic: "no Run method",
+}, {
+	tb:          wrongRun3{},
+	expectPanic: "bad first argument type for Run method",
+}, {
+	tb:          wrongRun4{},
+	expectPanic: "wrong argument count for Run method",
+}}
+
+func TestCRunPanic(t *testing.T) {
+	for _, test := range CRunPanicTests {
+		t.Run(fmt.Sprintf("%T", test.tb), func(t *testing.T) {
+			c := New(test.tb)
+			defer func() {
+				got := recover()
+				want := fmt.Sprintf(
+					"cannot execute Run with underlying concrete type %T (%s)",
+					test.tb, test.expectPanic,
+				)
+				if got != want {
+					t.Fatalf("unexpected panic recover message; got %q want %q", got, want)
+				}
+			}()
+			c.Run("panic", func(innerC *C) {})
+		})
+	}
+}
+
+func TestCRunFormat(t *testing.T) {
+	tt, innerTT := &testingT{}, &testingT{}
+	c := New(tt)
+	c.SetFormat(func(v interface{}) string {
+		return fmt.Sprintf("myfmt(%v)", v)
+	})
+	c.Run("my test", func(innerC *C) {
+		innerC.TB = innerTT
+		innerC.Check(42, Equals, nil)
+	})
+	assertPrefix(t, innerTT.errorString(), `
+error:
+  values are not equal
+got:
+  myfmt(42)
+want:
+  myfmt(<nil>)
+`)
+}
+
+func TestHelper(t *testing.T) {
+	tt := &testingT{}
+	Assert(tt, true, IsFalse)
+	if tt.helperCalls != 3 {
+		t.Fatalf("want 3 calls (Assert, c.Assert, check), got %d", tt.helperCalls)
+	}
+}
+
+func TestCHelper(t *testing.T) {
+	tt := &testingT{}
+	c := New(tt)
+	c.Assert(true, IsFalse)
+	if tt.helperCalls != 2 {
+		t.Fatalf("want 2 calls (c.Assert, check), got %d", tt.helperCalls)
+	}
+}
+
+func TestCParallel(t *testing.T) {
+	tt := &testingT{}
+	c := New(tt)
+	c.Parallel()
+	if !tt.parallel {
+		t.Fatalf("parallel not called")
+	}
+}
+
+func TestCParallelPanic(t *testing.T) {
+	c := New(&testing.B{})
+	defer func() {
+		r := recover()
+		if r != "cannot execute Parallel with underlying concrete type *testing.B" {
+			t.Fatalf("unexpected panic recover: %v", r)
+		}
+	}()
+	c.Parallel()
+}
+
+func TestCDefer(t *testing.T) {
+	c := New(t)
+	var defers []int
+	c.Run("subtest", func(c *C) {
+		c.Defer(func() { defers = append(defers, 1) })
+		c.Defer(func() { defers = append(defers, 2) })
+		// Calling Done twice should not do anything more.
+		c.Done()
+	})
+	c.Assert(defers, DeepEquals, []int{2, 1})
+}
+
+func TestCDeferCalledEvenAfterGoexit(t *testing.T) {
+	// The testing package uses runtime.Goexit on
+	// assertion failure, so check that defers are still
+	// called in that case.
+	c := New(t)
+	defers := 0
+	c.Run("subtest", func(c *C) {
+		c.Defer(func() {
+			defers++
+		})
+		c.Defer(func() {
+			c.SkipNow()
+		})
+	})
+	c.Assert(defers, Equals, 1)
+}
+
+func TestCRunDefer(t *testing.T) {
+	c := New(t)
+	defers := 0
+	c.Run("subtest", func(c *C) {
+		c.Run("x", func(c *C) {
+			c.Defer(func() { defers++ })
+		})
+	})
+	c.Assert(defers, Equals, 1)
+}
+
+type customT struct {
+	*testing.T
+	data int
+}
+
+func (t *customT) Run(name string, f func(*customT)) bool {
+	return t.T.Run(name, func(t1 *testing.T) {
+		f(&customT{t1, t.data})
+	})
+}
+
+func TestCRunCustomType(t *testing.T) {
+	ct := &customT{t, 99}
+	c := New(ct)
+	called := 0
+	c.Run("test", func(c *C) {
+		called++
+		ct1, ok := c.TB.(*customT)
+		if !ok {
+			t.Error("TB isn't expected type")
+		}
+		if ct1.data != ct.data {
+			t.Errorf("data not copied correctly; got %v want %v", ct1.data, ct.data)
+		}
+		if ct1 == ct {
+			t.Errorf("old instance passed, not new")
+		}
+	})
+	if called != 1 {
+		t.Fatalf("subtest was called %d times, not once", called)
+	}
+}
+
+func checkResult(t *testing.T, ok bool, got, want string) {
+	t.Helper()
+	if want != "" {
+		assertPrefix(t, got, want+"stack:\n")
+		assertBool(t, ok, false)
+		return
+	}
+	if got != "" {
+		t.Fatalf("output:\ngot  %q\nwant empty", got)
+	}
+	assertBool(t, ok, true)
+}
+
+// testingT can be passed to qt.New for testing purposes.
+type testingT struct {
+	testing.TB
+
+	errorBuf bytes.Buffer
+	fatalBuf bytes.Buffer
+
+	subTestResult bool
+	subTestName   string
+	subTestT      *testing.T
+
+	helperCalls int
+	parallel    bool
+}
+
+// Error overrides testing.TB.Error so that messages are collected.
+func (t *testingT) Error(a ...interface{}) {
+	fmt.Fprint(&t.errorBuf, a...)
+}
+
+// Fatal overrides testing.TB.Fatal so that messages are collected and the
+// goroutine is not killed.
+func (t *testingT) Fatal(a ...interface{}) {
+	fmt.Fprint(&t.fatalBuf, a...)
+}
+
+// Parallel overrides testing.TB.Parallel in order to record the call.
+func (t *testingT) Parallel() {
+	t.parallel = true
+}
+
+// Helper overrides testing.TB.Helper in order to count calls.
+func (t *testingT) Helper() {
+	t.helperCalls += 1
+}
+
+// Fatal overrides *testing.T.Fatal so that messages are collected and the
+// goroutine is not killed.
+func (t *testingT) Run(name string, f func(t *testing.T)) bool {
+	t.subTestName, t.subTestT = name, &testing.T{}
+	f(t.subTestT)
+	return t.subTestResult
+}
+
+// errorString returns the error message.
+func (t *testingT) errorString() string {
+	return t.errorBuf.String()
+}
+
+// fatalString returns the fatal error message.
+func (t *testingT) fatalString() string {
+	return t.fatalBuf.String()
+}
+
+// assertPrefix fails if the got value does not have the given prefix.
+func assertPrefix(t testing.TB, got, prefix string) {
+	t.Helper()
+	if prefix == "" {
+		t.Fatal("prefix: empty value provided")
+	}
+	if !strings.HasPrefix(got, prefix) {
+		t.Fatalf(`prefix:
+got  %q
+want %q
+-------------------- got --------------------
+%s
+-------------------- want -------------------
+%s
+---------------------------------------------`, got, prefix, got, prefix)
+	}
+}
+
+// assertBool fails if the given boolean values don't match.
+func assertBool(t testing.TB, got, want bool) {
+	t.Helper()
+	if got != want {
+		t.Fatalf("bool:\ngot  %v\nwant %v", got, want)
+	}
+}
+
+// testingChecker is a quicktest.Checker used in tests. It receives the
+// provided argNames, adds notes via the provided addNotes function, and when
+// the check is run the provided error is returned.
+type testingChecker struct {
+	argNames []string
+	addNotes func(note func(key string, value interface{}))
+	err      error
+}
+
+// Check implements quicktest.Checker by returning the stored error.
+func (c *testingChecker) Check(got interface{}, args []interface{}, note func(key string, value interface{})) error {
+	if c.addNotes != nil {
+		c.addNotes(note)
+	}
+	return c.err
+}
+
+// Info implements quicktest.Checker by returning the stored args.
+func (c *testingChecker) ArgNames() []string {
+	return c.argNames
+}

+ 88 - 0
data_tool/src/github.com/frankban/quicktest/race_test.go

@@ -0,0 +1,88 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest_test
+
+import (
+	"sync"
+	"sync/atomic"
+	"testing"
+)
+
+func TestConcurrentMethods(t *testing.T) {
+	// This test is designed to be run with the race
+	// detector enabled. It checks that C methods
+	// are safe to call concurrently.
+
+	// N holds the number of iterations to run any given
+	// operation concurrently with the others.
+	const N = 100
+
+	var x, y int32
+	c := New(dummyT{t})
+	c.Run("subtest", func(c *C) {
+		var wg sync.WaitGroup
+		// start calls f in two goroutines, each
+		// running it N times.
+		// All the goroutines get started before we actually
+		// start them running, so that the race detector
+		// has a better chance of catching issues.
+		gogogo := make(chan struct{})
+		start := func(f func()) {
+			repeat := func() {
+				defer wg.Done()
+				<-gogogo
+				for i := 0; i < N; i++ {
+					f()
+				}
+			}
+			wg.Add(2)
+			go repeat()
+			go repeat()
+		}
+		start(func() {
+			c.Defer(func() {
+				atomic.AddInt32(&x, 1)
+			})
+			c.Defer(func() {
+				atomic.AddInt32(&y, 1)
+			})
+		})
+		start(func() {
+			c.Done()
+		})
+		start(func() {
+			c.SetFormat(func(v interface{}) string {
+				return "x"
+			})
+		})
+		start(func() {
+			// Do an assert to exercise the formatter.
+			c.Check(true, Equals, false)
+		})
+		start(func() {
+			c.Run("", func(c *C) {})
+		})
+		close(gogogo)
+		wg.Wait()
+	})
+	// Check that all the defer functions ran OK.
+	if x != N*2 || y != N*2 {
+		t.Fatalf("unexpected x, y counts; got %d, %d; want %d, %d", x, y, N*2, N*2)
+	}
+}
+
+// dummyT wraps a *testing.T value suitable
+// for TestConcurrentMethods so that calling Error
+// won't fail the test and that it implements
+// Run correctly.
+type dummyT struct {
+	*testing.T
+}
+
+func (dummyT) Error(...interface{}) {}
+
+func (t dummyT) Run(name string, f func(t dummyT)) bool {
+	return t.T.Run(name, func(t *testing.T) {
+		f(dummyT{t})
+	})
+}

+ 248 - 0
data_tool/src/github.com/frankban/quicktest/report.go

@@ -0,0 +1,248 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/parser"
+	"go/printer"
+	"go/token"
+	"io"
+	"reflect"
+	"runtime"
+	"strings"
+	"testing"
+)
+
+// reportParams holds parameters for reporting a test error.
+type reportParams struct {
+	// argNames holds the names for the arguments passed to the checker.
+	argNames []string
+	// got holds the value that was checked.
+	got interface{}
+	// args holds all other arguments (if any) provided to the checker.
+	args []interface{}
+	// comment optionally holds the comment passed when performing the check.
+	comments []Comment
+	// notes holds notes added while doing the check.
+	notes []note
+	// format holds the format function that must be used when outputting
+	// values.
+	format formatFunc
+}
+
+// Unquoted indicates that the string must not be pretty printed in the failure
+// output. This is useful when a checker calls note and does not want the
+// provided value to be quoted.
+type Unquoted string
+
+// SuppressedIfLong indicates that the value must be suppressed if verbose
+// testing is off and the pretty printed version of the value is long. This is
+// useful when a checker calls note and does not want the provided value to be
+// printed in non-verbose test runs if the value is too long.
+type SuppressedIfLong struct {
+	// Value holds the original annotated value.
+	Value interface{}
+}
+
+// longValueLines holds the number of lines after which a value is long.
+const longValueLines = 10
+
+// report generates a failure report for the given error, optionally including
+// in the output the checker arguments, comment and notes included in the
+// provided report parameters.
+func report(err error, p reportParams) string {
+	var buf bytes.Buffer
+	buf.WriteByte('\n')
+	writeError(&buf, err, p)
+	writeStack(&buf)
+	return buf.String()
+}
+
+// writeError writes a pretty formatted output of the given error using the
+// provided report parameters.
+func writeError(w io.Writer, err error, p reportParams) {
+	ptrs := make(map[string]interface{})
+	values := make(map[string]string)
+
+	printPair := func(key string, value interface{}) {
+		fmt.Fprintln(w, key+":")
+		var v string
+
+		if u, ok := value.(Unquoted); ok {
+			// Output the raw string without quotes.
+			v = string(u)
+		} else if s, ok := value.(SuppressedIfLong); ok {
+			// Check whether the output is too long and must be suppressed.
+			v = p.format(s.Value)
+			if !testingVerbose() {
+				if n := strings.Count(v, "\n"); n > longValueLines {
+					fmt.Fprint(w, prefixf(prefix, "<suppressed due to length (%d lines), use -v for full output>", n))
+					return
+				}
+			}
+		} else {
+			// Check whether the output has been already seen.
+			v = p.format(value)
+			isPtr := reflect.ValueOf(value).Kind() == reflect.Ptr
+			if k := values[v]; k != "" {
+				if previousValue, ok := ptrs[k]; ok && isPtr && previousValue != value {
+					fmt.Fprint(w, prefixf(prefix, "<same as %q but different pointer value>", k))
+					return
+				}
+				fmt.Fprint(w, prefixf(prefix, "<same as %q>", k))
+				return
+			}
+			if isPtr {
+				ptrs[key] = value
+			}
+		}
+
+		values[v] = key
+		fmt.Fprint(w, prefixf(prefix, "%s", v))
+	}
+
+	// Write the checker error.
+	if err != ErrSilent {
+		printPair("error", Unquoted(err.Error()))
+	}
+
+	// Write comments if provided.
+	for _, c := range p.comments {
+		if comment := c.String(); comment != "" {
+			printPair("comment", Unquoted(comment))
+		}
+	}
+
+	// Write notes if present.
+	for _, n := range p.notes {
+		printPair(n.key, n.value)
+	}
+	if IsBadCheck(err) || err == ErrSilent {
+		// For errors in the checker invocation or for silent errors, do not
+		// show output from args.
+		return
+	}
+
+	// Write provided args.
+	for i, arg := range append([]interface{}{p.got}, p.args...) {
+		printPair(p.argNames[i], arg)
+	}
+}
+
+// testingVerbose is defined as a variable for testing.
+var testingVerbose = func() bool {
+	return testing.Verbose()
+}
+
+// writeStack writes the traceback information for the current failure into the
+// provided writer.
+func writeStack(w io.Writer) {
+	fmt.Fprintln(w, "stack:")
+	pc := make([]uintptr, 8)
+	sg := &stmtGetter{
+		fset:  token.NewFileSet(),
+		files: make(map[string]*ast.File, 8),
+		config: &printer.Config{
+			Mode:     printer.UseSpaces,
+			Tabwidth: 4,
+		},
+	}
+	runtime.Callers(5, pc)
+	frames := runtime.CallersFrames(pc)
+	thisPackage := reflect.TypeOf(C{}).PkgPath() + "."
+	for {
+		frame, more := frames.Next()
+		if strings.HasPrefix(frame.Function, "testing.") {
+			// Stop before getting back to stdlib test runner calls.
+			break
+		}
+		if fname := strings.TrimPrefix(frame.Function, thisPackage); fname != frame.Function {
+			if ast.IsExported(fname) {
+				// Continue without printing frames for quicktest exported API.
+				continue
+			}
+			// Stop when entering quicktest internal calls.
+			// This is useful for instance when using qtsuite.
+			break
+		}
+		fmt.Fprint(w, prefixf(prefix, "%s:%d", frame.File, frame.Line))
+		if strings.HasSuffix(frame.File, ".go") {
+			stmt, err := sg.Get(frame.File, frame.Line)
+			if err != nil {
+				fmt.Fprint(w, prefixf(prefix+prefix, "<%s>", err))
+			} else {
+				fmt.Fprint(w, prefixf(prefix+prefix, "%s", stmt))
+			}
+		}
+		if !more {
+			// There are no more callers.
+			break
+		}
+	}
+}
+
+type stmtGetter struct {
+	fset   *token.FileSet
+	files  map[string]*ast.File
+	config *printer.Config
+}
+
+// Get returns the lines of code of the statement at the given file and line.
+func (sg *stmtGetter) Get(file string, line int) (string, error) {
+	f := sg.files[file]
+	if f == nil {
+		var err error
+		f, err = parser.ParseFile(sg.fset, file, nil, parser.ParseComments)
+		if err != nil {
+			return "", fmt.Errorf("cannot parse source file: %s", err)
+		}
+		sg.files[file] = f
+	}
+	var stmt string
+	ast.Inspect(f, func(n ast.Node) bool {
+		if n == nil || stmt != "" {
+			return false
+		}
+		pos := sg.fset.Position(n.Pos()).Line
+		end := sg.fset.Position(n.End()).Line
+		// Go < v1.9 reports the line where the statements ends, not the line
+		// where it begins.
+		if line == pos || line == end {
+			var buf bytes.Buffer
+			// TODO: include possible comment after the statement.
+			sg.config.Fprint(&buf, sg.fset, &printer.CommentedNode{
+				Node:     n,
+				Comments: f.Comments,
+			})
+			stmt = buf.String()
+			return false
+		}
+		return pos < line && line <= end
+	})
+	return stmt, nil
+}
+
+// prefixf formats the given string with the given args. It also inserts the
+// final newline if needed and indentation with the given prefix.
+func prefixf(prefix, format string, args ...interface{}) string {
+	var buf []byte
+	s := strings.TrimSuffix(fmt.Sprintf(format, args...), "\n")
+	for _, line := range strings.Split(s, "\n") {
+		buf = append(buf, prefix...)
+		buf = append(buf, line...)
+		buf = append(buf, '\n')
+	}
+	return string(buf)
+}
+
+// note holds a key/value annotation.
+type note struct {
+	key   string
+	value interface{}
+}
+
+// prefix is the string used to indent blocks of output.
+const prefix = "  "

+ 183 - 0
data_tool/src/github.com/frankban/quicktest/report_test.go

@@ -0,0 +1,183 @@
+// Licensed under the MIT license, see LICENSE file for details.
+
+package quicktest_test
+
+import (
+	"runtime"
+	"strings"
+	"testing"
+)
+
+// The tests in this file rely on their own source code lines.
+
+func TestReportOutput(t *testing.T) {
+	tt := &testingT{}
+	c := New(tt)
+	c.Assert(42, Equals, 47)
+	want := `
+error:
+  values are not equal
+got:
+  int(42)
+want:
+  int(47)
+stack:
+  $file:18
+    c.Assert(42, qt.Equals, 47)
+`
+	assertReport(t, tt, want)
+}
+
+func f1(c *C) {
+	f2(c)
+}
+
+func f2(c *C) {
+	c.Assert(42, IsNil) // Real assertion here!
+}
+
+func TestIndirectReportOutput(t *testing.T) {
+	tt := &testingT{}
+	c := New(tt)
+	f1(c)
+	want := `
+error:
+  got non-nil value
+got:
+  int(42)
+stack:
+  $file:38
+    c.Assert(42, qt.IsNil)
+  $file:34
+    f2(c)
+  $file:44
+    f1(c)
+`
+	assertReport(t, tt, want)
+}
+
+func TestMultilineReportOutput(t *testing.T) {
+	tt := &testingT{}
+	c := New(tt)
+	c.Assert(
+		"this string", // Comment 1.
+		Equals,
+		"another string",
+		Commentf("a comment"), // Comment 2.
+	) // Comment 3.
+	want := `
+error:
+  values are not equal
+comment:
+  a comment
+got:
+  "this string"
+want:
+  "another string"
+stack:
+  $file:64
+    c.Assert(
+        "this string", // Comment 1.
+        qt.Equals,
+        "another string",
+        qt.Commentf("a comment"), // Comment 2.
+    )
+`
+	assertReport(t, tt, want)
+}
+
+func TestCmpReportOutput(t *testing.T) {
+	tt := &testingT{}
+	c := New(tt)
+	gotExamples := []*reportExample{{
+		AnInt: 42,
+	}, {
+		AnInt: 47,
+	}, {
+		AnInt: 1,
+	}, {
+		AnInt: 2,
+	}}
+	wantExamples := []*reportExample{{
+		AnInt: 42,
+	}, {
+		AnInt: 47,
+	}, {
+		AnInt: 2,
+	}, {
+		AnInt: 1,
+	}, {}}
+	c.Assert(gotExamples, DeepEquals, wantExamples)
+	want := `
+error:
+  values are not deep equal
+diff (-got +want):
+    []*quicktest_test.reportExample{
+            &{AnInt: 42},
+            &{AnInt: 47},
+  +         &{AnInt: 2},
+            &{AnInt: 1},
+  -         &{AnInt: 2},
+  +         &{},
+    }
+got:
+  []*quicktest_test.reportExample{
+      &quicktest_test.reportExample{AnInt:42},
+      &quicktest_test.reportExample{AnInt:47},
+      &quicktest_test.reportExample{AnInt:1},
+      &quicktest_test.reportExample{AnInt:2},
+  }
+want:
+  []*quicktest_test.reportExample{
+      &quicktest_test.reportExample{AnInt:42},
+      &quicktest_test.reportExample{AnInt:47},
+      &quicktest_test.reportExample{AnInt:2},
+      &quicktest_test.reportExample{AnInt:1},
+      &quicktest_test.reportExample{},
+  }
+stack:
+  $file:112
+    c.Assert(gotExamples, qt.DeepEquals, wantExamples)
+`
+	assertReport(t, tt, want)
+}
+
+func TestTopLevelAssertReportOutput(t *testing.T) {
+	tt := &testingT{}
+	Assert(tt, 42, Equals, 47)
+	want := `
+error:
+  values are not equal
+got:
+  int(42)
+want:
+  int(47)
+stack:
+  $file:149
+    qt.Assert(tt, 42, qt.Equals, 47)
+`
+	assertReport(t, tt, want)
+}
+
+func assertReport(t *testing.T, tt *testingT, want string) {
+	got := strings.Replace(tt.fatalString(), "\t", "        ", -1)
+	// go-cmp can include non-breaking spaces in its output.
+	got = strings.Replace(got, "\u00a0", " ", -1)
+	// Adjust for file names in different systems.
+	_, file, _, ok := runtime.Caller(0)
+	assertBool(t, ok, true)
+	want = strings.Replace(want, "$file", file, -1)
+	if got != want {
+		t.Fatalf(`failure:
+%q
+%q
+------------------------------ got ------------------------------
+%s------------------------------ want -----------------------------
+%s-----------------------------------------------------------------`,
+			got, want, got, want)
+	}
+}
+
+type reportExample struct {
+	AnInt int
+}

+ 27 - 0
data_tool/src/github.com/google/btree/.github/workflows/test.yml

@@ -0,0 +1,27 @@
+on: [push, pull_request]
+name: Test
+jobs:
+  test:
+    strategy:
+      matrix:
+        go-version:
+          - 1.11.x
+          - 1.12.x
+          - 1.13.x
+          - 1.14.x
+          - 1.15.x
+          - 1.16.x
+          - 1.17.x
+          - 1.18.x
+        os:
+          - ubuntu-latest
+    runs-on: ${{ matrix.os }}
+    steps:
+    - name: Install Go
+      uses: actions/setup-go@v2
+      with:
+        go-version: ${{ matrix.go-version }}
+    - name: Checkout code
+      uses: actions/checkout@v2
+    - name: Test
+      run: go test -v ./...

+ 202 - 0
data_tool/src/github.com/google/btree/LICENSE

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 10 - 0
data_tool/src/github.com/google/btree/README.md

@@ -0,0 +1,10 @@
+# BTree implementation for Go
+
+This package provides an in-memory B-Tree implementation for Go, useful as
+an ordered, mutable data structure.
+
+The API is based off of the wonderful
+http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
+act as a drop-in replacement for gollrb trees.
+
+See http://godoc.org/github.com/google/btree for documentation.

+ 893 - 0
data_tool/src/github.com/google/btree/btree.go

@@ -0,0 +1,893 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.18
+// +build !go1.18
+
+// Package btree implements in-memory B-Trees of arbitrary degree.
+//
+// btree implements an in-memory B-Tree for use as an ordered data structure.
+// It is not meant for persistent storage solutions.
+//
+// It has a flatter structure than an equivalent red-black or other binary tree,
+// which in some cases yields better memory usage and/or performance.
+// See some discussion on the matter here:
+//   http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
+// Note, though, that this project is in no way related to the C++ B-Tree
+// implementation written about there.
+//
+// Within this tree, each node contains a slice of items and a (possibly nil)
+// slice of children.  For basic numeric values or raw structs, this can cause
+// efficiency differences when compared to equivalent C++ template code that
+// stores values in arrays within the node:
+//   * Due to the overhead of storing values as interfaces (each
+//     value needs to be stored as the value itself, then 2 words for the
+//     interface pointing to that value and its type), resulting in higher
+//     memory use.
+//   * Since interfaces can point to values anywhere in memory, values are
+//     most likely not stored in contiguous blocks, resulting in a higher
+//     number of cache misses.
+// These issues don't tend to matter, though, when working with strings or other
+// heap-allocated structures, since C++-equivalent structures also must store
+// pointers and also distribute their values across the heap.
+//
+// This implementation is designed to be a drop-in replacement to gollrb.LLRB
+// trees, (http://github.com/petar/gollrb), an excellent and probably the most
+// widely used ordered tree implementation in the Go ecosystem currently.
+// Its functions, therefore, exactly mirror those of
+// llrb.LLRB where possible.  Unlike gollrb, though, we currently don't
+// support storing multiple equivalent values.
+package btree
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+	"sync"
+)
+
+// Item represents a single object in the tree.
+type Item interface {
+	// Less tests whether the current item is less than the given argument.
+	//
+	// This must provide a strict weak ordering.
+	// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
+	// hold one of either a or b in the tree).
+	Less(than Item) bool
+}
+
+const (
+	DefaultFreeListSize = 32
+)
+
+var (
+	nilItems    = make(items, 16)
+	nilChildren = make(children, 16)
+)
+
+// FreeList represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeList struct {
+	mu       sync.Mutex
+	freelist []*node
+}
+
+// NewFreeList creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeList(size int) *FreeList {
+	return &FreeList{freelist: make([]*node, 0, size)}
+}
+
+func (f *FreeList) newNode() (n *node) {
+	f.mu.Lock()
+	index := len(f.freelist) - 1
+	if index < 0 {
+		f.mu.Unlock()
+		return new(node)
+	}
+	n = f.freelist[index]
+	f.freelist[index] = nil
+	f.freelist = f.freelist[:index]
+	f.mu.Unlock()
+	return
+}
+
+// freeNode adds the given node to the list, returning true if it was added
+// and false if it was discarded.
+func (f *FreeList) freeNode(n *node) (out bool) {
+	f.mu.Lock()
+	if len(f.freelist) < cap(f.freelist) {
+		f.freelist = append(f.freelist, n)
+		out = true
+	}
+	f.mu.Unlock()
+	return
+}
+
+// ItemIterator allows callers of Ascend* to iterate in-order over portions of
+// the tree.  When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIterator func(i Item) bool
+
+// New creates a new B-Tree with the given degree.
+//
+// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+func New(degree int) *BTree {
+	return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
+}
+
+// NewWithFreeList creates a new B-Tree that uses the given node free list.
+func NewWithFreeList(degree int, f *FreeList) *BTree {
+	if degree <= 1 {
+		panic("bad degree")
+	}
+	return &BTree{
+		degree: degree,
+		cow:    &copyOnWriteContext{freelist: f},
+	}
+}
+
+// items stores items in a node.
+type items []Item
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *items) insertAt(index int, item Item) {
+	*s = append(*s, nil)
+	if index < len(*s) {
+		copy((*s)[index+1:], (*s)[index:])
+	}
+	(*s)[index] = item
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *items) removeAt(index int) Item {
+	item := (*s)[index]
+	copy((*s)[index:], (*s)[index+1:])
+	(*s)[len(*s)-1] = nil
+	*s = (*s)[:len(*s)-1]
+	return item
+}
+
+// pop removes and returns the last element in the list.
+func (s *items) pop() (out Item) {
+	index := len(*s) - 1
+	out = (*s)[index]
+	(*s)[index] = nil
+	*s = (*s)[:index]
+	return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index items. index must be less than or equal to length.
+func (s *items) truncate(index int) {
+	var toClear items
+	*s, toClear = (*s)[:index], (*s)[index:]
+	for len(toClear) > 0 {
+		toClear = toClear[copy(toClear, nilItems):]
+	}
+}
+
+// find returns the index where the given item should be inserted into this
+// list.  'found' is true if the item already exists in the list at the given
+// index.
+func (s items) find(item Item) (index int, found bool) {
+	i := sort.Search(len(s), func(i int) bool {
+		return item.Less(s[i])
+	})
+	if i > 0 && !s[i-1].Less(item) {
+		return i - 1, true
+	}
+	return i, false
+}
+
+// children stores child nodes in a node.
+type children []*node
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *children) insertAt(index int, n *node) {
+	*s = append(*s, nil)
+	if index < len(*s) {
+		copy((*s)[index+1:], (*s)[index:])
+	}
+	(*s)[index] = n
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *children) removeAt(index int) *node {
+	n := (*s)[index]
+	copy((*s)[index:], (*s)[index+1:])
+	(*s)[len(*s)-1] = nil
+	*s = (*s)[:len(*s)-1]
+	return n
+}
+
+// pop removes and returns the last element in the list.
+func (s *children) pop() (out *node) {
+	index := len(*s) - 1
+	out = (*s)[index]
+	(*s)[index] = nil
+	*s = (*s)[:index]
+	return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index children. index must be less than or equal to length.
+func (s *children) truncate(index int) {
+	var toClear children
+	*s, toClear = (*s)[:index], (*s)[index:]
+	for len(toClear) > 0 {
+		toClear = toClear[copy(toClear, nilChildren):]
+	}
+}
+
+// node is an internal node in a tree.
+//
+// It must at all times maintain the invariant that either
+//   * len(children) == 0, len(items) unconstrained
+//   * len(children) == len(items) + 1
+type node struct {
+	items    items
+	children children
+	cow      *copyOnWriteContext
+}
+
+func (n *node) mutableFor(cow *copyOnWriteContext) *node {
+	if n.cow == cow {
+		return n
+	}
+	out := cow.newNode()
+	if cap(out.items) >= len(n.items) {
+		out.items = out.items[:len(n.items)]
+	} else {
+		out.items = make(items, len(n.items), cap(n.items))
+	}
+	copy(out.items, n.items)
+	// Copy children
+	if cap(out.children) >= len(n.children) {
+		out.children = out.children[:len(n.children)]
+	} else {
+		out.children = make(children, len(n.children), cap(n.children))
+	}
+	copy(out.children, n.children)
+	return out
+}
+
+func (n *node) mutableChild(i int) *node {
+	c := n.children[i].mutableFor(n.cow)
+	n.children[i] = c
+	return c
+}
+
+// split splits the given node at the given index.  The current node shrinks,
+// and this function returns the item that existed at that index and a new node
+// containing all items/children after it.
+func (n *node) split(i int) (Item, *node) {
+	item := n.items[i]
+	next := n.cow.newNode()
+	next.items = append(next.items, n.items[i+1:]...)
+	n.items.truncate(i)
+	if len(n.children) > 0 {
+		next.children = append(next.children, n.children[i+1:]...)
+		n.children.truncate(i + 1)
+	}
+	return item, next
+}
+
+// maybeSplitChild checks if a child should be split, and if so splits it.
+// Returns whether or not a split occurred.
+func (n *node) maybeSplitChild(i, maxItems int) bool {
+	if len(n.children[i].items) < maxItems {
+		return false
+	}
+	first := n.mutableChild(i)
+	item, second := first.split(maxItems / 2)
+	n.items.insertAt(i, item)
+	n.children.insertAt(i+1, second)
+	return true
+}
+
+// insert inserts an item into the subtree rooted at this node, making sure
+// no nodes in the subtree exceed maxItems items.  Should an equivalent item be
+// be found/replaced by insert, it will be returned.
+func (n *node) insert(item Item, maxItems int) Item {
+	i, found := n.items.find(item)
+	if found {
+		out := n.items[i]
+		n.items[i] = item
+		return out
+	}
+	if len(n.children) == 0 {
+		n.items.insertAt(i, item)
+		return nil
+	}
+	if n.maybeSplitChild(i, maxItems) {
+		inTree := n.items[i]
+		switch {
+		case item.Less(inTree):
+			// no change, we want first split node
+		case inTree.Less(item):
+			i++ // we want second split node
+		default:
+			out := n.items[i]
+			n.items[i] = item
+			return out
+		}
+	}
+	return n.mutableChild(i).insert(item, maxItems)
+}
+
+// get finds the given key in the subtree and returns it.
+func (n *node) get(key Item) Item {
+	i, found := n.items.find(key)
+	if found {
+		return n.items[i]
+	} else if len(n.children) > 0 {
+		return n.children[i].get(key)
+	}
+	return nil
+}
+
+// min returns the first item in the subtree.
+func min(n *node) Item {
+	if n == nil {
+		return nil
+	}
+	for len(n.children) > 0 {
+		n = n.children[0]
+	}
+	if len(n.items) == 0 {
+		return nil
+	}
+	return n.items[0]
+}
+
+// max returns the last item in the subtree.
+func max(n *node) Item {
+	if n == nil {
+		return nil
+	}
+	for len(n.children) > 0 {
+		n = n.children[len(n.children)-1]
+	}
+	if len(n.items) == 0 {
+		return nil
+	}
+	return n.items[len(n.items)-1]
+}
+
+// toRemove details what item to remove in a node.remove call.
+type toRemove int
+
+const (
+	removeItem toRemove = iota // removes the given item
+	removeMin                  // removes smallest item in the subtree
+	removeMax                  // removes largest item in the subtree
+)
+
+// remove removes an item from the subtree rooted at this node.
+func (n *node) remove(item Item, minItems int, typ toRemove) Item {
+	var i int
+	var found bool
+	switch typ {
+	case removeMax:
+		if len(n.children) == 0 {
+			return n.items.pop()
+		}
+		i = len(n.items)
+	case removeMin:
+		if len(n.children) == 0 {
+			return n.items.removeAt(0)
+		}
+		i = 0
+	case removeItem:
+		i, found = n.items.find(item)
+		if len(n.children) == 0 {
+			if found {
+				return n.items.removeAt(i)
+			}
+			return nil
+		}
+	default:
+		panic("invalid type")
+	}
+	// If we get to here, we have children.
+	if len(n.children[i].items) <= minItems {
+		return n.growChildAndRemove(i, item, minItems, typ)
+	}
+	child := n.mutableChild(i)
+	// Either we had enough items to begin with, or we've done some
+	// merging/stealing, because we've got enough now and we're ready to return
+	// stuff.
+	if found {
+		// The item exists at index 'i', and the child we've selected can give us a
+		// predecessor, since if we've gotten here it's got > minItems items in it.
+		out := n.items[i]
+		// We use our special-case 'remove' call with typ=maxItem to pull the
+		// predecessor of item i (the rightmost leaf of our immediate left child)
+		// and set it into where we pulled the item from.
+		n.items[i] = child.remove(nil, minItems, removeMax)
+		return out
+	}
+	// Final recursive call.  Once we're here, we know that the item isn't in this
+	// node and that the child is big enough to remove from.
+	return child.remove(item, minItems, typ)
+}
+
+// growChildAndRemove grows child 'i' to make sure it's possible to remove an
+// item from it while keeping it at minItems, then calls remove to actually
+// remove it.
+//
+// Most documentation says we have to do two sets of special casing:
+//   1) item is in this node
+//   2) item is in child
+// In both cases, we need to handle the two subcases:
+//   A) node has enough values that it can spare one
+//   B) node doesn't have enough values
+// For the latter, we have to check:
+//   a) left sibling has node to spare
+//   b) right sibling has node to spare
+//   c) we must merge
+// To simplify our code here, we handle cases #1 and #2 the same:
+// If a node doesn't have enough items, we make sure it does (using a,b,c).
+// We then simply redo our remove call, and the second time (regardless of
+// whether we're in case 1 or 2), we'll have enough items and can guarantee
+// that we hit case A.
+func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
+	if i > 0 && len(n.children[i-1].items) > minItems {
+		// Steal from left child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i - 1)
+		stolenItem := stealFrom.items.pop()
+		child.items.insertAt(0, n.items[i-1])
+		n.items[i-1] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children.insertAt(0, stealFrom.children.pop())
+		}
+	} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
+		// steal from right child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i + 1)
+		stolenItem := stealFrom.items.removeAt(0)
+		child.items = append(child.items, n.items[i])
+		n.items[i] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children = append(child.children, stealFrom.children.removeAt(0))
+		}
+	} else {
+		if i >= len(n.items) {
+			i--
+		}
+		child := n.mutableChild(i)
+		// merge with right child
+		mergeItem := n.items.removeAt(i)
+		mergeChild := n.children.removeAt(i + 1)
+		child.items = append(child.items, mergeItem)
+		child.items = append(child.items, mergeChild.items...)
+		child.children = append(child.children, mergeChild.children...)
+		n.cow.freeNode(mergeChild)
+	}
+	return n.remove(item, minItems, typ)
+}
+
+type direction int
+
+const (
+	descend = direction(-1)
+	ascend  = direction(+1)
+)
+
+// iterate provides a simple method for iterating over elements in the tree.
+//
+// When ascending, the 'start' should be less than 'stop' and when descending,
+// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
+// will force the iterator to include the first item when it equals 'start',
+// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
+// "greaterThan" or "lessThan" queries.
+func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
+	var ok, found bool
+	var index int
+	switch dir {
+	case ascend:
+		if start != nil {
+			index, _ = n.items.find(start)
+		}
+		for i := index; i < len(n.items); i++ {
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
+				hit = true
+				continue
+			}
+			hit = true
+			if stop != nil && !n.items[i].Less(stop) {
+				return hit, false
+			}
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	case descend:
+		if start != nil {
+			index, found = n.items.find(start)
+			if !found {
+				index = index - 1
+			}
+		} else {
+			index = len(n.items) - 1
+		}
+		for i := index; i >= 0; i-- {
+			if start != nil && !n.items[i].Less(start) {
+				if !includeStart || hit || start.Less(n.items[i]) {
+					continue
+				}
+			}
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if stop != nil && !stop.Less(n.items[i]) {
+				return hit, false //	continue
+			}
+			hit = true
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	}
+	return hit, true
+}
+
+// Used for testing/debugging purposes.
+func (n *node) print(w io.Writer, level int) {
+	fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat("  ", level), n.items)
+	for _, c := range n.children {
+		c.print(w, level+1)
+	}
+}
+
+// BTree is an implementation of a B-Tree.
+//
+// BTree stores Item instances in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTree struct {
+	degree int
+	length int
+	root   *node
+	cow    *copyOnWriteContext
+}
+
+// copyOnWriteContext pointers determine node ownership... a tree with a write
+// context equivalent to a node's write context is allowed to modify that node.
+// A tree whose write context does not match a node's is not allowed to modify
+// it, and must create a new, writable copy (IE: it's a Clone).
+//
+// When doing any write operation, we maintain the invariant that the current
+// node's context is equal to the context of the tree that requested the write.
+// We do this by, before we descend into any node, creating a copy with the
+// correct context if the contexts don't match.
+//
+// Since the node we're currently visiting on any write has the requesting
+// tree's context, that node is modifiable in place.  Children of that node may
+// not share context, but before we descend into them, we'll make a mutable
+// copy.
+type copyOnWriteContext struct {
+	freelist *FreeList
+}
+
+// Clone clones the btree, lazily.  Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2.  Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified.  Read operations
+// should have no performance degredation.  Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTree) Clone() (t2 *BTree) {
+	// Create two entirely new copy-on-write contexts.
+	// This operation effectively creates three trees:
+	//   the original, shared nodes (old b.cow)
+	//   the new b.cow nodes
+	//   the new out.cow nodes
+	cow1, cow2 := *t.cow, *t.cow
+	out := *t
+	t.cow = &cow1
+	out.cow = &cow2
+	return &out
+}
+
+// maxItems returns the max number of items to allow per node.
+func (t *BTree) maxItems() int {
+	return t.degree*2 - 1
+}
+
+// minItems returns the min number of items to allow per node (ignored for the
+// root node).
+func (t *BTree) minItems() int {
+	return t.degree - 1
+}
+
+func (c *copyOnWriteContext) newNode() (n *node) {
+	n = c.freelist.newNode()
+	n.cow = c
+	return
+}
+
+type freeType int
+
+const (
+	ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
+	ftStored                       // node was stored in the freelist for later use
+	ftNotOwned                     // node was ignored by COW, since it's owned by another one
+)
+
+// freeNode frees a node within a given COW context, if it's owned by that
+// context.  It returns what happened to the node (see freeType const
+// documentation).
+func (c *copyOnWriteContext) freeNode(n *node) freeType {
+	if n.cow == c {
+		// clear to allow GC
+		n.items.truncate(0)
+		n.children.truncate(0)
+		n.cow = nil
+		if c.freelist.freeNode(n) {
+			return ftStored
+		} else {
+			return ftFreelistFull
+		}
+	} else {
+		return ftNotOwned
+	}
+}
+
+// ReplaceOrInsert adds the given item to the tree.  If an item in the tree
+// already equals the given one, it is removed from the tree and returned.
+// Otherwise, nil is returned.
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTree) ReplaceOrInsert(item Item) Item {
+	if item == nil {
+		panic("nil item being added to BTree")
+	}
+	if t.root == nil {
+		t.root = t.cow.newNode()
+		t.root.items = append(t.root.items, item)
+		t.length++
+		return nil
+	} else {
+		t.root = t.root.mutableFor(t.cow)
+		if len(t.root.items) >= t.maxItems() {
+			item2, second := t.root.split(t.maxItems() / 2)
+			oldroot := t.root
+			t.root = t.cow.newNode()
+			t.root.items = append(t.root.items, item2)
+			t.root.children = append(t.root.children, oldroot, second)
+		}
+	}
+	out := t.root.insert(item, t.maxItems())
+	if out == nil {
+		t.length++
+	}
+	return out
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it.  If no such item exists, returns nil.
+func (t *BTree) Delete(item Item) Item {
+	return t.deleteItem(item, removeItem)
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMin() Item {
+	return t.deleteItem(nil, removeMin)
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMax() Item {
+	return t.deleteItem(nil, removeMax)
+}
+
+func (t *BTree) deleteItem(item Item, typ toRemove) Item {
+	if t.root == nil || len(t.root.items) == 0 {
+		return nil
+	}
+	t.root = t.root.mutableFor(t.cow)
+	out := t.root.remove(item, t.minItems(), typ)
+	if len(t.root.items) == 0 && len(t.root.children) > 0 {
+		oldroot := t.root
+		t.root = t.root.children[0]
+		t.cow.freeNode(oldroot)
+	}
+	if out != nil {
+		t.length--
+	}
+	return out
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, nil, pivot, false, false, iterator)
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, pivot, nil, true, false, iterator)
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTree) Ascend(iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, nil, nil, false, false, iterator)
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, pivot, nil, true, false, iterator)
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range [last, pivot), until iterator returns false.
+func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, nil, pivot, false, false, iterator)
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTree) Descend(iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, nil, nil, false, false, iterator)
+}
+
+// Get looks for the key item in the tree, returning it.  It returns nil if
+// unable to find that item.
+func (t *BTree) Get(key Item) Item {
+	if t.root == nil {
+		return nil
+	}
+	return t.root.get(key)
+}
+
+// Min returns the smallest item in the tree, or nil if the tree is empty.
+func (t *BTree) Min() Item {
+	return min(t.root)
+}
+
+// Max returns the largest item in the tree, or nil if the tree is empty.
+func (t *BTree) Max() Item {
+	return max(t.root)
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTree) Has(key Item) bool {
+	return t.Get(key) != nil
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTree) Len() int {
+	return t.length
+}
+
+// Clear removes all items from the btree.  If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full.  Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly.  It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+//   O(1): when addNodesToFreelist is false, this is a single operation.
+//   O(1): when the freelist is already full, it breaks out immediately
+//   O(freelist size):  when the freelist is empty and the nodes are all owned
+//       by this tree, nodes are added to the freelist until full.
+//   O(tree size):  when all nodes are owned by another tree, all nodes are
+//       iterated over looking for nodes to add to the freelist, and due to
+//       ownership, none are.
+func (t *BTree) Clear(addNodesToFreelist bool) {
+	if t.root != nil && addNodesToFreelist {
+		t.root.reset(t.cow)
+	}
+	t.root, t.length = nil, 0
+}
+
+// reset returns a subtree to the freelist.  It breaks out immediately if the
+// freelist is full, since the only benefit of iterating is to fill that
+// freelist up.  Returns true if parent reset call should continue.
+func (n *node) reset(c *copyOnWriteContext) bool {
+	for _, child := range n.children {
+		if !child.reset(c) {
+			return false
+		}
+	}
+	return c.freeNode(n) != ftFreelistFull
+}
+
+// Int implements the Item interface for integers.
+type Int int
+
+// Less returns true if int(a) < int(b).
+func (a Int) Less(b Item) bool {
+	return a < b.(Int)
+}

+ 1083 - 0
data_tool/src/github.com/google/btree/btree_generic.go

@@ -0,0 +1,1083 @@
+// Copyright 2014-2022 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.18
+// +build go1.18
+
+// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific
+// instantiation of that generic for the Item interface, with a backwards-
+// compatible API.  Before go1.18, generics are not supported,
+// and BTree is just an implementation based around the Item interface.
+
+// Package btree implements in-memory B-Trees of arbitrary degree.
+//
+// btree implements an in-memory B-Tree for use as an ordered data structure.
+// It is not meant for persistent storage solutions.
+//
+// It has a flatter structure than an equivalent red-black or other binary tree,
+// which in some cases yields better memory usage and/or performance.
+// See some discussion on the matter here:
+//   http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
+// Note, though, that this project is in no way related to the C++ B-Tree
+// implementation written about there.
+//
+// Within this tree, each node contains a slice of items and a (possibly nil)
+// slice of children.  For basic numeric values or raw structs, this can cause
+// efficiency differences when compared to equivalent C++ template code that
+// stores values in arrays within the node:
+//   * Due to the overhead of storing values as interfaces (each
+//     value needs to be stored as the value itself, then 2 words for the
+//     interface pointing to that value and its type), resulting in higher
+//     memory use.
+//   * Since interfaces can point to values anywhere in memory, values are
+//     most likely not stored in contiguous blocks, resulting in a higher
+//     number of cache misses.
+// These issues don't tend to matter, though, when working with strings or other
+// heap-allocated structures, since C++-equivalent structures also must store
+// pointers and also distribute their values across the heap.
+//
+// This implementation is designed to be a drop-in replacement to gollrb.LLRB
+// trees, (http://github.com/petar/gollrb), an excellent and probably the most
+// widely used ordered tree implementation in the Go ecosystem currently.
+// Its functions, therefore, exactly mirror those of
+// llrb.LLRB where possible.  Unlike gollrb, though, we currently don't
+// support storing multiple equivalent values.
+//
+// There are two implementations; those suffixed with 'G' are generics, usable
+// for any type, and require a passed-in "less" function to define their ordering.
+// Those without this prefix are specific to the 'Item' interface, and use
+// its 'Less' function for ordering.
+package btree
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+	"sync"
+)
+
+// Item represents a single object in the tree.
+type Item interface {
+	// Less tests whether the current item is less than the given argument.
+	//
+	// This must provide a strict weak ordering.
+	// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
+	// hold one of either a or b in the tree).
+	Less(than Item) bool
+}
+
+const (
+	DefaultFreeListSize = 32
+)
+
+// FreeListG represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList, in particular when they're created with Clone.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeListG[T any] struct {
+	mu       sync.Mutex
+	freelist []*node[T]
+}
+
+// NewFreeListG creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeListG[T any](size int) *FreeListG[T] {
+	return &FreeListG[T]{freelist: make([]*node[T], 0, size)}
+}
+
+func (f *FreeListG[T]) newNode() (n *node[T]) {
+	f.mu.Lock()
+	index := len(f.freelist) - 1
+	if index < 0 {
+		f.mu.Unlock()
+		return new(node[T])
+	}
+	n = f.freelist[index]
+	f.freelist[index] = nil
+	f.freelist = f.freelist[:index]
+	f.mu.Unlock()
+	return
+}
+
+func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) {
+	f.mu.Lock()
+	if len(f.freelist) < cap(f.freelist) {
+		f.freelist = append(f.freelist, n)
+		out = true
+	}
+	f.mu.Unlock()
+	return
+}
+
+// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of
+// the tree.  When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIteratorG[T any] func(item T) bool
+
+// Ordered represents the set of types for which the '<' operator work.
+type Ordered interface {
+	~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string
+}
+
+// Less[T] returns a default LessFunc that uses the '<' operator for types that support it.
+func Less[T Ordered]() LessFunc[T] {
+	return func(a, b T) bool { return a < b }
+}
+
+// NewOrderedG creates a new B-Tree for ordered types.
+func NewOrderedG[T Ordered](degree int) *BTreeG[T] {
+	return NewG[T](degree, Less[T]())
+}
+
+// NewG creates a new B-Tree with the given degree.
+//
+// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+//
+// The passed-in LessFunc determines how objects of type T are ordered.
+func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] {
+	return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize))
+}
+
+// NewWithFreeListG creates a new B-Tree that uses the given node free list.
+func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] {
+	if degree <= 1 {
+		panic("bad degree")
+	}
+	return &BTreeG[T]{
+		degree: degree,
+		cow:    &copyOnWriteContext[T]{freelist: f, less: less},
+	}
+}
+
+// items stores items in a node.
+type items[T any] []T
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *items[T]) insertAt(index int, item T) {
+	var zero T
+	*s = append(*s, zero)
+	if index < len(*s) {
+		copy((*s)[index+1:], (*s)[index:])
+	}
+	(*s)[index] = item
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *items[T]) removeAt(index int) T {
+	item := (*s)[index]
+	copy((*s)[index:], (*s)[index+1:])
+	var zero T
+	(*s)[len(*s)-1] = zero
+	*s = (*s)[:len(*s)-1]
+	return item
+}
+
+// pop removes and returns the last element in the list.
+func (s *items[T]) pop() (out T) {
+	index := len(*s) - 1
+	out = (*s)[index]
+	var zero T
+	(*s)[index] = zero
+	*s = (*s)[:index]
+	return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index items. index must be less than or equal to length.
+func (s *items[T]) truncate(index int) {
+	var toClear items[T]
+	*s, toClear = (*s)[:index], (*s)[index:]
+	var zero T
+	for i := 0; i < len(toClear); i++ {
+		toClear[i] = zero
+	}
+}
+
+// find returns the index where the given item should be inserted into this
+// list.  'found' is true if the item already exists in the list at the given
+// index.
+func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) {
+	i := sort.Search(len(s), func(i int) bool {
+		return less(item, s[i])
+	})
+	if i > 0 && !less(s[i-1], item) {
+		return i - 1, true
+	}
+	return i, false
+}
+
+// node is an internal node in a tree.
+//
+// It must at all times maintain the invariant that either
+//   * len(children) == 0, len(items) unconstrained
+//   * len(children) == len(items) + 1
+type node[T any] struct {
+	items    items[T]
+	children items[*node[T]]
+	cow      *copyOnWriteContext[T]
+}
+
+func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] {
+	if n.cow == cow {
+		return n
+	}
+	out := cow.newNode()
+	if cap(out.items) >= len(n.items) {
+		out.items = out.items[:len(n.items)]
+	} else {
+		out.items = make(items[T], len(n.items), cap(n.items))
+	}
+	copy(out.items, n.items)
+	// Copy children
+	if cap(out.children) >= len(n.children) {
+		out.children = out.children[:len(n.children)]
+	} else {
+		out.children = make(items[*node[T]], len(n.children), cap(n.children))
+	}
+	copy(out.children, n.children)
+	return out
+}
+
+func (n *node[T]) mutableChild(i int) *node[T] {
+	c := n.children[i].mutableFor(n.cow)
+	n.children[i] = c
+	return c
+}
+
+// split splits the given node at the given index.  The current node shrinks,
+// and this function returns the item that existed at that index and a new node
+// containing all items/children after it.
+func (n *node[T]) split(i int) (T, *node[T]) {
+	item := n.items[i]
+	next := n.cow.newNode()
+	next.items = append(next.items, n.items[i+1:]...)
+	n.items.truncate(i)
+	if len(n.children) > 0 {
+		next.children = append(next.children, n.children[i+1:]...)
+		n.children.truncate(i + 1)
+	}
+	return item, next
+}
+
+// maybeSplitChild checks if a child should be split, and if so splits it.
+// Returns whether or not a split occurred.
+func (n *node[T]) maybeSplitChild(i, maxItems int) bool {
+	if len(n.children[i].items) < maxItems {
+		return false
+	}
+	first := n.mutableChild(i)
+	item, second := first.split(maxItems / 2)
+	n.items.insertAt(i, item)
+	n.children.insertAt(i+1, second)
+	return true
+}
+
+// insert inserts an item into the subtree rooted at this node, making sure
+// no nodes in the subtree exceed maxItems items.  Should an equivalent item be
+// be found/replaced by insert, it will be returned.
+func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) {
+	i, found := n.items.find(item, n.cow.less)
+	if found {
+		out := n.items[i]
+		n.items[i] = item
+		return out, true
+	}
+	if len(n.children) == 0 {
+		n.items.insertAt(i, item)
+		return
+	}
+	if n.maybeSplitChild(i, maxItems) {
+		inTree := n.items[i]
+		switch {
+		case n.cow.less(item, inTree):
+			// no change, we want first split node
+		case n.cow.less(inTree, item):
+			i++ // we want second split node
+		default:
+			out := n.items[i]
+			n.items[i] = item
+			return out, true
+		}
+	}
+	return n.mutableChild(i).insert(item, maxItems)
+}
+
+// get finds the given key in the subtree and returns it.
+func (n *node[T]) get(key T) (_ T, _ bool) {
+	i, found := n.items.find(key, n.cow.less)
+	if found {
+		return n.items[i], true
+	} else if len(n.children) > 0 {
+		return n.children[i].get(key)
+	}
+	return
+}
+
+// min returns the first item in the subtree.
+func min[T any](n *node[T]) (_ T, found bool) {
+	if n == nil {
+		return
+	}
+	for len(n.children) > 0 {
+		n = n.children[0]
+	}
+	if len(n.items) == 0 {
+		return
+	}
+	return n.items[0], true
+}
+
+// max returns the last item in the subtree.
+func max[T any](n *node[T]) (_ T, found bool) {
+	if n == nil {
+		return
+	}
+	for len(n.children) > 0 {
+		n = n.children[len(n.children)-1]
+	}
+	if len(n.items) == 0 {
+		return
+	}
+	return n.items[len(n.items)-1], true
+}
+
+// toRemove details what item to remove in a node.remove call.
+type toRemove int
+
+const (
+	removeItem toRemove = iota // removes the given item
+	removeMin                  // removes smallest item in the subtree
+	removeMax                  // removes largest item in the subtree
+)
+
+// remove removes an item from the subtree rooted at this node.
+func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) {
+	var i int
+	var found bool
+	switch typ {
+	case removeMax:
+		if len(n.children) == 0 {
+			return n.items.pop(), true
+		}
+		i = len(n.items)
+	case removeMin:
+		if len(n.children) == 0 {
+			return n.items.removeAt(0), true
+		}
+		i = 0
+	case removeItem:
+		i, found = n.items.find(item, n.cow.less)
+		if len(n.children) == 0 {
+			if found {
+				return n.items.removeAt(i), true
+			}
+			return
+		}
+	default:
+		panic("invalid type")
+	}
+	// If we get to here, we have children.
+	if len(n.children[i].items) <= minItems {
+		return n.growChildAndRemove(i, item, minItems, typ)
+	}
+	child := n.mutableChild(i)
+	// Either we had enough items to begin with, or we've done some
+	// merging/stealing, because we've got enough now and we're ready to return
+	// stuff.
+	if found {
+		// The item exists at index 'i', and the child we've selected can give us a
+		// predecessor, since if we've gotten here it's got > minItems items in it.
+		out := n.items[i]
+		// We use our special-case 'remove' call with typ=maxItem to pull the
+		// predecessor of item i (the rightmost leaf of our immediate left child)
+		// and set it into where we pulled the item from.
+		var zero T
+		n.items[i], _ = child.remove(zero, minItems, removeMax)
+		return out, true
+	}
+	// Final recursive call.  Once we're here, we know that the item isn't in this
+	// node and that the child is big enough to remove from.
+	return child.remove(item, minItems, typ)
+}
+
+// growChildAndRemove grows child 'i' to make sure it's possible to remove an
+// item from it while keeping it at minItems, then calls remove to actually
+// remove it.
+//
+// Most documentation says we have to do two sets of special casing:
+//   1) item is in this node
+//   2) item is in child
+// In both cases, we need to handle the two subcases:
+//   A) node has enough values that it can spare one
+//   B) node doesn't have enough values
+// For the latter, we have to check:
+//   a) left sibling has node to spare
+//   b) right sibling has node to spare
+//   c) we must merge
+// To simplify our code here, we handle cases #1 and #2 the same:
+// If a node doesn't have enough items, we make sure it does (using a,b,c).
+// We then simply redo our remove call, and the second time (regardless of
+// whether we're in case 1 or 2), we'll have enough items and can guarantee
+// that we hit case A.
+func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) {
+	if i > 0 && len(n.children[i-1].items) > minItems {
+		// Steal from left child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i - 1)
+		stolenItem := stealFrom.items.pop()
+		child.items.insertAt(0, n.items[i-1])
+		n.items[i-1] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children.insertAt(0, stealFrom.children.pop())
+		}
+	} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
+		// steal from right child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i + 1)
+		stolenItem := stealFrom.items.removeAt(0)
+		child.items = append(child.items, n.items[i])
+		n.items[i] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children = append(child.children, stealFrom.children.removeAt(0))
+		}
+	} else {
+		if i >= len(n.items) {
+			i--
+		}
+		child := n.mutableChild(i)
+		// merge with right child
+		mergeItem := n.items.removeAt(i)
+		mergeChild := n.children.removeAt(i + 1)
+		child.items = append(child.items, mergeItem)
+		child.items = append(child.items, mergeChild.items...)
+		child.children = append(child.children, mergeChild.children...)
+		n.cow.freeNode(mergeChild)
+	}
+	return n.remove(item, minItems, typ)
+}
+
+type direction int
+
+const (
+	descend = direction(-1)
+	ascend  = direction(+1)
+)
+
+type optionalItem[T any] struct {
+	item  T
+	valid bool
+}
+
+func optional[T any](item T) optionalItem[T] {
+	return optionalItem[T]{item: item, valid: true}
+}
+func empty[T any]() optionalItem[T] {
+	return optionalItem[T]{}
+}
+
+// iterate provides a simple method for iterating over elements in the tree.
+//
+// When ascending, the 'start' should be less than 'stop' and when descending,
+// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
+// will force the iterator to include the first item when it equals 'start',
+// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
+// "greaterThan" or "lessThan" queries.
+func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) {
+	var ok, found bool
+	var index int
+	switch dir {
+	case ascend:
+		if start.valid {
+			index, _ = n.items.find(start.item, n.cow.less)
+		}
+		for i := index; i < len(n.items); i++ {
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) {
+				hit = true
+				continue
+			}
+			hit = true
+			if stop.valid && !n.cow.less(n.items[i], stop.item) {
+				return hit, false
+			}
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	case descend:
+		if start.valid {
+			index, found = n.items.find(start.item, n.cow.less)
+			if !found {
+				index = index - 1
+			}
+		} else {
+			index = len(n.items) - 1
+		}
+		for i := index; i >= 0; i-- {
+			if start.valid && !n.cow.less(n.items[i], start.item) {
+				if !includeStart || hit || n.cow.less(start.item, n.items[i]) {
+					continue
+				}
+			}
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if stop.valid && !n.cow.less(stop.item, n.items[i]) {
+				return hit, false //	continue
+			}
+			hit = true
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	}
+	return hit, true
+}
+
+// print is used for testing/debugging purposes.
+func (n *node[T]) print(w io.Writer, level int) {
+	fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat("  ", level), n.items)
+	for _, c := range n.children {
+		c.print(w, level+1)
+	}
+}
+
+// BTreeG is a generic implementation of a B-Tree.
+//
+// BTreeG stores items of type T in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTreeG[T any] struct {
+	degree int
+	length int
+	root   *node[T]
+	cow    *copyOnWriteContext[T]
+}
+
+// LessFunc[T] determines how to order a type 'T'.  It should implement a strict
+// ordering, and should return true if within that ordering, 'a' < 'b'.
+type LessFunc[T any] func(a, b T) bool
+
+// copyOnWriteContext pointers determine node ownership... a tree with a write
+// context equivalent to a node's write context is allowed to modify that node.
+// A tree whose write context does not match a node's is not allowed to modify
+// it, and must create a new, writable copy (IE: it's a Clone).
+//
+// When doing any write operation, we maintain the invariant that the current
+// node's context is equal to the context of the tree that requested the write.
+// We do this by, before we descend into any node, creating a copy with the
+// correct context if the contexts don't match.
+//
+// Since the node we're currently visiting on any write has the requesting
+// tree's context, that node is modifiable in place.  Children of that node may
+// not share context, but before we descend into them, we'll make a mutable
+// copy.
+type copyOnWriteContext[T any] struct {
+	freelist *FreeListG[T]
+	less     LessFunc[T]
+}
+
+// Clone clones the btree, lazily.  Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2.  Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified.  Read operations
+// should have no performance degredation.  Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) {
+	// Create two entirely new copy-on-write contexts.
+	// This operation effectively creates three trees:
+	//   the original, shared nodes (old b.cow)
+	//   the new b.cow nodes
+	//   the new out.cow nodes
+	cow1, cow2 := *t.cow, *t.cow
+	out := *t
+	t.cow = &cow1
+	out.cow = &cow2
+	return &out
+}
+
+// maxItems returns the max number of items to allow per node.
+func (t *BTreeG[T]) maxItems() int {
+	return t.degree*2 - 1
+}
+
+// minItems returns the min number of items to allow per node (ignored for the
+// root node).
+func (t *BTreeG[T]) minItems() int {
+	return t.degree - 1
+}
+
+func (c *copyOnWriteContext[T]) newNode() (n *node[T]) {
+	n = c.freelist.newNode()
+	n.cow = c
+	return
+}
+
+type freeType int
+
+const (
+	ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
+	ftStored                       // node was stored in the freelist for later use
+	ftNotOwned                     // node was ignored by COW, since it's owned by another one
+)
+
+// freeNode frees a node within a given COW context, if it's owned by that
+// context.  It returns what happened to the node (see freeType const
+// documentation).
+func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType {
+	if n.cow == c {
+		// clear to allow GC
+		n.items.truncate(0)
+		n.children.truncate(0)
+		n.cow = nil
+		if c.freelist.freeNode(n) {
+			return ftStored
+		} else {
+			return ftFreelistFull
+		}
+	} else {
+		return ftNotOwned
+	}
+}
+
+// ReplaceOrInsert adds the given item to the tree.  If an item in the tree
+// already equals the given one, it is removed from the tree and returned,
+// and the second return value is true.  Otherwise, (zeroValue, false)
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) {
+	if t.root == nil {
+		t.root = t.cow.newNode()
+		t.root.items = append(t.root.items, item)
+		t.length++
+		return
+	} else {
+		t.root = t.root.mutableFor(t.cow)
+		if len(t.root.items) >= t.maxItems() {
+			item2, second := t.root.split(t.maxItems() / 2)
+			oldroot := t.root
+			t.root = t.cow.newNode()
+			t.root.items = append(t.root.items, item2)
+			t.root.children = append(t.root.children, oldroot, second)
+		}
+	}
+	out, outb := t.root.insert(item, t.maxItems())
+	if !outb {
+		t.length++
+	}
+	return out, outb
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it.  If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) Delete(item T) (T, bool) {
+	return t.deleteItem(item, removeItem)
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) DeleteMin() (T, bool) {
+	var zero T
+	return t.deleteItem(zero, removeMin)
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) DeleteMax() (T, bool) {
+	var zero T
+	return t.deleteItem(zero, removeMax)
+}
+
+func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) {
+	if t.root == nil || len(t.root.items) == 0 {
+		return
+	}
+	t.root = t.root.mutableFor(t.cow)
+	out, outb := t.root.remove(item, t.minItems(), typ)
+	if len(t.root.items) == 0 && len(t.root.children) > 0 {
+		oldroot := t.root
+		t.root = t.root.children[0]
+		t.cow.freeNode(oldroot)
+	}
+	if outb {
+		t.length--
+	}
+	return out, outb
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator)
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator)
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator)
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator)
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator)
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator)
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range [last, pivot), until iterator returns false.
+func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator)
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator)
+}
+
+// Get looks for the key item in the tree, returning it.  It returns
+// (zeroValue, false) if unable to find that item.
+func (t *BTreeG[T]) Get(key T) (_ T, _ bool) {
+	if t.root == nil {
+		return
+	}
+	return t.root.get(key)
+}
+
+// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty.
+func (t *BTreeG[T]) Min() (_ T, _ bool) {
+	return min(t.root)
+}
+
+// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty.
+func (t *BTreeG[T]) Max() (_ T, _ bool) {
+	return max(t.root)
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTreeG[T]) Has(key T) bool {
+	_, ok := t.Get(key)
+	return ok
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTreeG[T]) Len() int {
+	return t.length
+}
+
+// Clear removes all items from the btree.  If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full.  Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly.  It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+//   O(1): when addNodesToFreelist is false, this is a single operation.
+//   O(1): when the freelist is already full, it breaks out immediately
+//   O(freelist size):  when the freelist is empty and the nodes are all owned
+//       by this tree, nodes are added to the freelist until full.
+//   O(tree size):  when all nodes are owned by another tree, all nodes are
+//       iterated over looking for nodes to add to the freelist, and due to
+//       ownership, none are.
+func (t *BTreeG[T]) Clear(addNodesToFreelist bool) {
+	if t.root != nil && addNodesToFreelist {
+		t.root.reset(t.cow)
+	}
+	t.root, t.length = nil, 0
+}
+
+// reset returns a subtree to the freelist.  It breaks out immediately if the
+// freelist is full, since the only benefit of iterating is to fill that
+// freelist up.  Returns true if parent reset call should continue.
+func (n *node[T]) reset(c *copyOnWriteContext[T]) bool {
+	for _, child := range n.children {
+		if !child.reset(c) {
+			return false
+		}
+	}
+	return c.freeNode(n) != ftFreelistFull
+}
+
+// Int implements the Item interface for integers.
+type Int int
+
+// Less returns true if int(a) < int(b).
+func (a Int) Less(b Item) bool {
+	return a < b.(Int)
+}
+
+// BTree is an implementation of a B-Tree.
+//
+// BTree stores Item instances in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTree BTreeG[Item]
+
+var itemLess LessFunc[Item] = func(a, b Item) bool {
+	return a.Less(b)
+}
+
+// New creates a new B-Tree with the given degree.
+//
+// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+func New(degree int) *BTree {
+	return (*BTree)(NewG[Item](degree, itemLess))
+}
+
+// FreeList represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeList FreeListG[Item]
+
+// NewFreeList creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeList(size int) *FreeList {
+	return (*FreeList)(NewFreeListG[Item](size))
+}
+
+// NewWithFreeList creates a new B-Tree that uses the given node free list.
+func NewWithFreeList(degree int, f *FreeList) *BTree {
+	return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f)))
+}
+
+// ItemIterator allows callers of Ascend* to iterate in-order over portions of
+// the tree.  When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIterator ItemIteratorG[Item]
+
+// Clone clones the btree, lazily.  Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2.  Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified.  Read operations
+// should have no performance degredation.  Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTree) Clone() (t2 *BTree) {
+	return (*BTree)((*BTreeG[Item])(t).Clone())
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it.  If no such item exists, returns nil.
+func (t *BTree) Delete(item Item) Item {
+	i, _ := (*BTreeG[Item])(t).Delete(item)
+	return i
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMax() Item {
+	i, _ := (*BTreeG[Item])(t).DeleteMax()
+	return i
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMin() Item {
+	i, _ := (*BTreeG[Item])(t).DeleteMin()
+	return i
+}
+
+// Get looks for the key item in the tree, returning it.  It returns nil if
+// unable to find that item.
+func (t *BTree) Get(key Item) Item {
+	i, _ := (*BTreeG[Item])(t).Get(key)
+	return i
+}
+
+// Max returns the largest item in the tree, or nil if the tree is empty.
+func (t *BTree) Max() Item {
+	i, _ := (*BTreeG[Item])(t).Max()
+	return i
+}
+
+// Min returns the smallest item in the tree, or nil if the tree is empty.
+func (t *BTree) Min() Item {
+	i, _ := (*BTreeG[Item])(t).Min()
+	return i
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTree) Has(key Item) bool {
+	return (*BTreeG[Item])(t).Has(key)
+}
+
+// ReplaceOrInsert adds the given item to the tree.  If an item in the tree
+// already equals the given one, it is removed from the tree and returned.
+// Otherwise, nil is returned.
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTree) ReplaceOrInsert(item Item) Item {
+	i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item)
+	return i
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator))
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTree) Ascend(iterator ItemIterator) {
+	(*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator))
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator))
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range [last, pivot), until iterator returns false.
+func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
+	(*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTree) Descend(iterator ItemIterator) {
+	(*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator))
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTree) Len() int {
+	return (*BTreeG[Item])(t).Len()
+}
+
+// Clear removes all items from the btree.  If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full.  Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly.  It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+//   O(1): when addNodesToFreelist is false, this is a single operation.
+//   O(1): when the freelist is already full, it breaks out immediately
+//   O(freelist size):  when the freelist is empty and the nodes are all owned
+//       by this tree, nodes are added to the freelist until full.
+//   O(tree size):  when all nodes are owned by another tree, all nodes are
+//       iterated over looking for nodes to add to the freelist, and due to
+//       ownership, none are.
+func (t *BTree) Clear(addNodesToFreelist bool) {
+	(*BTreeG[Item])(t).Clear(addNodesToFreelist)
+}

+ 764 - 0
data_tool/src/github.com/google/btree/btree_generic_test.go

@@ -0,0 +1,764 @@
+// Copyright 2014-2022 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.18
+// +build go1.18
+
+package btree
+
+import (
+	"fmt"
+	"math/rand"
+	"reflect"
+	"sort"
+	"sync"
+	"testing"
+)
+
+func intRange(s int, reverse bool) []int {
+	out := make([]int, s)
+	for i := 0; i < s; i++ {
+		v := i
+		if reverse {
+			v = s - i - 1
+		}
+		out[i] = v
+	}
+	return out
+}
+
+func intAll(t *BTreeG[int]) (out []int) {
+	t.Ascend(func(a int) bool {
+		out = append(out, a)
+		return true
+	})
+	return
+}
+
+func intAllRev(t *BTreeG[int]) (out []int) {
+	t.Descend(func(a int) bool {
+		out = append(out, a)
+		return true
+	})
+	return
+}
+
+func TestBTreeG(t *testing.T) {
+	tr := NewOrderedG[int](*btreeDegree)
+	const treeSize = 10000
+	for i := 0; i < 10; i++ {
+		if min, ok := tr.Min(); ok || min != 0 {
+			t.Fatalf("empty min, got %+v", min)
+		}
+		if max, ok := tr.Max(); ok || max != 0 {
+			t.Fatalf("empty max, got %+v", max)
+		}
+		for _, item := range rand.Perm(treeSize) {
+			if x, ok := tr.ReplaceOrInsert(item); ok || x != 0 {
+				t.Fatal("insert found item", item)
+			}
+		}
+		for _, item := range rand.Perm(treeSize) {
+			if x, ok := tr.ReplaceOrInsert(item); !ok || x != item {
+				t.Fatal("insert didn't find item", item)
+			}
+		}
+		want := 0
+		if min, ok := tr.Min(); !ok || min != want {
+			t.Fatalf("min: ok %v want %+v, got %+v", ok, want, min)
+		}
+		want = treeSize - 1
+		if max, ok := tr.Max(); !ok || max != want {
+			t.Fatalf("max: ok %v want %+v, got %+v", ok, want, max)
+		}
+		got := intAll(tr)
+		wantRange := intRange(treeSize, false)
+		if !reflect.DeepEqual(got, wantRange) {
+			t.Fatalf("mismatch:\n got: %v\nwant: %v", got, wantRange)
+		}
+
+		gotrev := intAllRev(tr)
+		wantrev := intRange(treeSize, true)
+		if !reflect.DeepEqual(gotrev, wantrev) {
+			t.Fatalf("mismatch:\n got: %v\nwant: %v", gotrev, wantrev)
+		}
+
+		for _, item := range rand.Perm(treeSize) {
+			if x, ok := tr.Delete(item); !ok || x != item {
+				t.Fatalf("didn't find %v", item)
+			}
+		}
+		if got = intAll(tr); len(got) > 0 {
+			t.Fatalf("some left!: %v", got)
+		}
+		if got = intAllRev(tr); len(got) > 0 {
+			t.Fatalf("some left!: %v", got)
+		}
+	}
+}
+
+func ExampleBTreeG() {
+	tr := NewOrderedG[int](*btreeDegree)
+	for i := 0; i < 10; i++ {
+		tr.ReplaceOrInsert(i)
+	}
+	fmt.Println("len:       ", tr.Len())
+	v, ok := tr.Get(3)
+	fmt.Println("get3:      ", v, ok)
+	v, ok = tr.Get(100)
+	fmt.Println("get100:    ", v, ok)
+	v, ok = tr.Delete(4)
+	fmt.Println("del4:      ", v, ok)
+	v, ok = tr.Delete(100)
+	fmt.Println("del100:    ", v, ok)
+	v, ok = tr.ReplaceOrInsert(5)
+	fmt.Println("replace5:  ", v, ok)
+	v, ok = tr.ReplaceOrInsert(100)
+	fmt.Println("replace100:", v, ok)
+	v, ok = tr.Min()
+	fmt.Println("min:       ", v, ok)
+	v, ok = tr.DeleteMin()
+	fmt.Println("delmin:    ", v, ok)
+	v, ok = tr.Max()
+	fmt.Println("max:       ", v, ok)
+	v, ok = tr.DeleteMax()
+	fmt.Println("delmax:    ", v, ok)
+	fmt.Println("len:       ", tr.Len())
+	// Output:
+	// len:        10
+	// get3:       3 true
+	// get100:     0 false
+	// del4:       4 true
+	// del100:     0 false
+	// replace5:   5 true
+	// replace100: 0 false
+	// min:        0 true
+	// delmin:     0 true
+	// max:        100 true
+	// delmax:     100 true
+	// len:        8
+}
+
+func TestDeleteMinG(t *testing.T) {
+	tr := NewOrderedG[int](3)
+	for _, v := range rand.Perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []int
+	for v, ok := tr.DeleteMin(); ok; v, ok = tr.DeleteMin() {
+		got = append(got, v)
+	}
+	if want := intRange(100, false); !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestDeleteMaxG(t *testing.T) {
+	tr := NewOrderedG[int](3)
+	for _, v := range rand.Perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []int
+	for v, ok := tr.DeleteMax(); ok; v, ok = tr.DeleteMax() {
+		got = append(got, v)
+	}
+	if want := intRange(100, true); !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestAscendRangeG(t *testing.T) {
+	tr := NewOrderedG[int](2)
+	for _, v := range rand.Perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []int
+	tr.AscendRange(40, 60, func(a int) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, false)[40:60]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.AscendRange(40, 60, func(a int) bool {
+		if a > 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, false)[40:51]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestDescendRangeG(t *testing.T) {
+	tr := NewOrderedG[int](2)
+	for _, v := range rand.Perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []int
+	tr.DescendRange(60, 40, func(a int) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, true)[39:59]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.DescendRange(60, 40, func(a int) bool {
+		if a < 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, true)[39:50]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestAscendLessThanG(t *testing.T) {
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, v := range rand.Perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []int
+	tr.AscendLessThan(60, func(a int) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, false)[:60]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.AscendLessThan(60, func(a int) bool {
+		if a > 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, false)[:51]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestDescendLessOrEqualG(t *testing.T) {
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, v := range rand.Perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []int
+	tr.DescendLessOrEqual(40, func(a int) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, true)[59:]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.DescendLessOrEqual(60, func(a int) bool {
+		if a < 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, true)[39:50]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestAscendGreaterOrEqualG(t *testing.T) {
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, v := range rand.Perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []int
+	tr.AscendGreaterOrEqual(40, func(a int) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, false)[40:]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.AscendGreaterOrEqual(40, func(a int) bool {
+		if a > 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, false)[40:51]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestDescendGreaterThanG(t *testing.T) {
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, v := range rand.Perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []int
+	tr.DescendGreaterThan(40, func(a int) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, true)[:59]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.DescendGreaterThan(40, func(a int) bool {
+		if a < 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := intRange(100, true)[:50]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func BenchmarkInsertG(b *testing.B) {
+	b.StopTimer()
+	insertP := rand.Perm(benchmarkTreeSize)
+	b.StartTimer()
+	i := 0
+	for i < b.N {
+		tr := NewOrderedG[int](*btreeDegree)
+		for _, item := range insertP {
+			tr.ReplaceOrInsert(item)
+			i++
+			if i >= b.N {
+				return
+			}
+		}
+	}
+}
+
+func BenchmarkSeekG(b *testing.B) {
+	b.StopTimer()
+	size := 100000
+	insertP := rand.Perm(size)
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, item := range insertP {
+		tr.ReplaceOrInsert(item)
+	}
+	b.StartTimer()
+
+	for i := 0; i < b.N; i++ {
+		tr.AscendGreaterOrEqual(i%size, func(i int) bool { return false })
+	}
+}
+
+func BenchmarkDeleteInsertG(b *testing.B) {
+	b.StopTimer()
+	insertP := rand.Perm(benchmarkTreeSize)
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, item := range insertP {
+		tr.ReplaceOrInsert(item)
+	}
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		tr.Delete(insertP[i%benchmarkTreeSize])
+		tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize])
+	}
+}
+
+func BenchmarkDeleteInsertCloneOnceG(b *testing.B) {
+	b.StopTimer()
+	insertP := rand.Perm(benchmarkTreeSize)
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, item := range insertP {
+		tr.ReplaceOrInsert(item)
+	}
+	tr = tr.Clone()
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		tr.Delete(insertP[i%benchmarkTreeSize])
+		tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize])
+	}
+}
+
+func BenchmarkDeleteInsertCloneEachTimeG(b *testing.B) {
+	b.StopTimer()
+	insertP := rand.Perm(benchmarkTreeSize)
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, item := range insertP {
+		tr.ReplaceOrInsert(item)
+	}
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		tr = tr.Clone()
+		tr.Delete(insertP[i%benchmarkTreeSize])
+		tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize])
+	}
+}
+
+func BenchmarkDeleteG(b *testing.B) {
+	b.StopTimer()
+	insertP := rand.Perm(benchmarkTreeSize)
+	removeP := rand.Perm(benchmarkTreeSize)
+	b.StartTimer()
+	i := 0
+	for i < b.N {
+		b.StopTimer()
+		tr := NewOrderedG[int](*btreeDegree)
+		for _, v := range insertP {
+			tr.ReplaceOrInsert(v)
+		}
+		b.StartTimer()
+		for _, item := range removeP {
+			tr.Delete(item)
+			i++
+			if i >= b.N {
+				return
+			}
+		}
+		if tr.Len() > 0 {
+			panic(tr.Len())
+		}
+	}
+}
+
+func BenchmarkGetG(b *testing.B) {
+	b.StopTimer()
+	insertP := rand.Perm(benchmarkTreeSize)
+	removeP := rand.Perm(benchmarkTreeSize)
+	b.StartTimer()
+	i := 0
+	for i < b.N {
+		b.StopTimer()
+		tr := NewOrderedG[int](*btreeDegree)
+		for _, v := range insertP {
+			tr.ReplaceOrInsert(v)
+		}
+		b.StartTimer()
+		for _, item := range removeP {
+			tr.Get(item)
+			i++
+			if i >= b.N {
+				return
+			}
+		}
+	}
+}
+
+func BenchmarkGetCloneEachTimeG(b *testing.B) {
+	b.StopTimer()
+	insertP := rand.Perm(benchmarkTreeSize)
+	removeP := rand.Perm(benchmarkTreeSize)
+	b.StartTimer()
+	i := 0
+	for i < b.N {
+		b.StopTimer()
+		tr := NewOrderedG[int](*btreeDegree)
+		for _, v := range insertP {
+			tr.ReplaceOrInsert(v)
+		}
+		b.StartTimer()
+		for _, item := range removeP {
+			tr = tr.Clone()
+			tr.Get(item)
+			i++
+			if i >= b.N {
+				return
+			}
+		}
+	}
+}
+
+func BenchmarkAscendG(b *testing.B) {
+	arr := rand.Perm(benchmarkTreeSize)
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Ints(arr)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := 0
+		tr.Ascend(func(item int) bool {
+			if item != arr[j] {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j], item)
+			}
+			j++
+			return true
+		})
+	}
+}
+
+func BenchmarkDescendG(b *testing.B) {
+	arr := rand.Perm(benchmarkTreeSize)
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Ints(arr)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := len(arr) - 1
+		tr.Descend(func(item int) bool {
+			if item != arr[j] {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j], item)
+			}
+			j--
+			return true
+		})
+	}
+}
+
+func BenchmarkAscendRangeG(b *testing.B) {
+	arr := rand.Perm(benchmarkTreeSize)
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Ints(arr)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := 100
+		tr.AscendRange(100, arr[len(arr)-100], func(item int) bool {
+			if item != arr[j] {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j], item)
+			}
+			j++
+			return true
+		})
+		if j != len(arr)-100 {
+			b.Fatalf("expected: %v, got %v", len(arr)-100, j)
+		}
+	}
+}
+
+func BenchmarkDescendRangeG(b *testing.B) {
+	arr := rand.Perm(benchmarkTreeSize)
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Ints(arr)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := len(arr) - 100
+		tr.DescendRange(arr[len(arr)-100], 100, func(item int) bool {
+			if item != arr[j] {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j], item)
+			}
+			j--
+			return true
+		})
+		if j != 100 {
+			b.Fatalf("expected: %v, got %v", len(arr)-100, j)
+		}
+	}
+}
+
+func BenchmarkAscendGreaterOrEqualG(b *testing.B) {
+	arr := rand.Perm(benchmarkTreeSize)
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Ints(arr)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := 100
+		k := 0
+		tr.AscendGreaterOrEqual(100, func(item int) bool {
+			if item != arr[j] {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j], item)
+			}
+			j++
+			k++
+			return true
+		})
+		if j != len(arr) {
+			b.Fatalf("expected: %v, got %v", len(arr), j)
+		}
+		if k != len(arr)-100 {
+			b.Fatalf("expected: %v, got %v", len(arr)-100, k)
+		}
+	}
+}
+
+func BenchmarkDescendLessOrEqualG(b *testing.B) {
+	arr := rand.Perm(benchmarkTreeSize)
+	tr := NewOrderedG[int](*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Ints(arr)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := len(arr) - 100
+		k := len(arr)
+		tr.DescendLessOrEqual(arr[len(arr)-100], func(item int) bool {
+			if item != arr[j] {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j], item)
+			}
+			j--
+			k--
+			return true
+		})
+		if j != -1 {
+			b.Fatalf("expected: %v, got %v", -1, j)
+		}
+		if k != 99 {
+			b.Fatalf("expected: %v, got %v", 99, k)
+		}
+	}
+}
+
+func cloneTestG(t *testing.T, b *BTreeG[int], start int, p []int, wg *sync.WaitGroup, trees *[]*BTreeG[int], lock *sync.Mutex) {
+	t.Logf("Starting new clone at %v", start)
+	lock.Lock()
+	*trees = append(*trees, b)
+	lock.Unlock()
+	for i := start; i < cloneTestSize; i++ {
+		b.ReplaceOrInsert(p[i])
+		if i%(cloneTestSize/5) == 0 {
+			wg.Add(1)
+			go cloneTestG(t, b.Clone(), i+1, p, wg, trees, lock)
+		}
+	}
+	wg.Done()
+}
+
+func TestCloneConcurrentOperationsG(t *testing.T) {
+	b := NewOrderedG[int](*btreeDegree)
+	trees := []*BTreeG[int]{}
+	p := rand.Perm(cloneTestSize)
+	var wg sync.WaitGroup
+	wg.Add(1)
+	go cloneTestG(t, b, 0, p, &wg, &trees, &sync.Mutex{})
+	wg.Wait()
+	want := intRange(cloneTestSize, false)
+	t.Logf("Starting equality checks on %d trees", len(trees))
+	for i, tree := range trees {
+		if !reflect.DeepEqual(want, intAll(tree)) {
+			t.Errorf("tree %v mismatch", i)
+		}
+	}
+	t.Log("Removing half from first half")
+	toRemove := intRange(cloneTestSize, false)[cloneTestSize/2:]
+	for i := 0; i < len(trees)/2; i++ {
+		tree := trees[i]
+		wg.Add(1)
+		go func() {
+			for _, item := range toRemove {
+				tree.Delete(item)
+			}
+			wg.Done()
+		}()
+	}
+	wg.Wait()
+	t.Log("Checking all values again")
+	for i, tree := range trees {
+		var wantpart []int
+		if i < len(trees)/2 {
+			wantpart = want[:cloneTestSize/2]
+		} else {
+			wantpart = want
+		}
+		if got := intAll(tree); !reflect.DeepEqual(wantpart, got) {
+			t.Errorf("tree %v mismatch, want %v got %v", i, len(want), len(got))
+		}
+	}
+}
+
+func BenchmarkDeleteAndRestoreG(b *testing.B) {
+	items := rand.Perm(16392)
+	b.ResetTimer()
+	b.Run(`CopyBigFreeList`, func(b *testing.B) {
+		fl := NewFreeListG[int](16392)
+		tr := NewWithFreeListG[int](*btreeDegree, Less[int](), fl)
+		for _, v := range items {
+			tr.ReplaceOrInsert(v)
+		}
+		b.ReportAllocs()
+		b.ResetTimer()
+		for i := 0; i < b.N; i++ {
+			dels := make([]int, 0, tr.Len())
+			tr.Ascend(func(b int) bool {
+				dels = append(dels, b)
+				return true
+			})
+			for _, del := range dels {
+				tr.Delete(del)
+			}
+			// tr is now empty, we make a new empty copy of it.
+			tr = NewWithFreeListG[int](*btreeDegree, Less[int](), fl)
+			for _, v := range items {
+				tr.ReplaceOrInsert(v)
+			}
+		}
+	})
+	b.Run(`Copy`, func(b *testing.B) {
+		tr := NewOrderedG[int](*btreeDegree)
+		for _, v := range items {
+			tr.ReplaceOrInsert(v)
+		}
+		b.ReportAllocs()
+		b.ResetTimer()
+		for i := 0; i < b.N; i++ {
+			dels := make([]int, 0, tr.Len())
+			tr.Ascend(func(b int) bool {
+				dels = append(dels, b)
+				return true
+			})
+			for _, del := range dels {
+				tr.Delete(del)
+			}
+			// tr is now empty, we make a new empty copy of it.
+			tr = NewOrderedG[int](*btreeDegree)
+			for _, v := range items {
+				tr.ReplaceOrInsert(v)
+			}
+		}
+	})
+	b.Run(`ClearBigFreelist`, func(b *testing.B) {
+		fl := NewFreeListG[int](16392)
+		tr := NewWithFreeListG[int](*btreeDegree, Less[int](), fl)
+		for _, v := range items {
+			tr.ReplaceOrInsert(v)
+		}
+		b.ReportAllocs()
+		b.ResetTimer()
+		for i := 0; i < b.N; i++ {
+			tr.Clear(true)
+			for _, v := range items {
+				tr.ReplaceOrInsert(v)
+			}
+		}
+	})
+	b.Run(`Clear`, func(b *testing.B) {
+		tr := NewOrderedG[int](*btreeDegree)
+		for _, v := range items {
+			tr.ReplaceOrInsert(v)
+		}
+		b.ReportAllocs()
+		b.ResetTimer()
+		for i := 0; i < b.N; i++ {
+			tr.Clear(true)
+			for _, v := range items {
+				tr.ReplaceOrInsert(v)
+			}
+		}
+	})
+}

+ 76 - 0
data_tool/src/github.com/google/btree/btree_mem.go

@@ -0,0 +1,76 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build ignore
+
+// This binary compares memory usage between btree and gollrb.
+package main
+
+import (
+	"flag"
+	"fmt"
+	"math/rand"
+	"runtime"
+	"time"
+
+	"github.com/google/btree"
+	"github.com/petar/GoLLRB/llrb"
+)
+
+var (
+	size   = flag.Int("size", 1000000, "size of the tree to build")
+	degree = flag.Int("degree", 8, "degree of btree")
+	gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
+)
+
+func main() {
+	flag.Parse()
+	vals := rand.Perm(*size)
+	var t, v interface{}
+	v = vals
+	var stats runtime.MemStats
+	for i := 0; i < 10; i++ {
+		runtime.GC()
+	}
+	fmt.Println("-------- BEFORE ----------")
+	runtime.ReadMemStats(&stats)
+	fmt.Printf("%+v\n", stats)
+	start := time.Now()
+	if *gollrb {
+		tr := llrb.New()
+		for _, v := range vals {
+			tr.ReplaceOrInsert(llrb.Int(v))
+		}
+		t = tr // keep it around
+	} else {
+		tr := btree.New(*degree)
+		for _, v := range vals {
+			tr.ReplaceOrInsert(btree.Int(v))
+		}
+		t = tr // keep it around
+	}
+	fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
+	fmt.Println("-------- AFTER ----------")
+	runtime.ReadMemStats(&stats)
+	fmt.Printf("%+v\n", stats)
+	for i := 0; i < 10; i++ {
+		runtime.GC()
+	}
+	fmt.Println("-------- AFTER GC ----------")
+	runtime.ReadMemStats(&stats)
+	fmt.Printf("%+v\n", stats)
+	if t == v {
+		fmt.Println("to make sure vals and tree aren't GC'd")
+	}
+}

+ 792 - 0
data_tool/src/github.com/google/btree/btree_test.go

@@ -0,0 +1,792 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package btree
+
+import (
+	"flag"
+	"fmt"
+	"math/rand"
+	"reflect"
+	"sort"
+	"sync"
+	"testing"
+	"time"
+)
+
+func init() {
+	seed := time.Now().Unix()
+	fmt.Println(seed)
+	rand.Seed(seed)
+}
+
+// perm returns a random permutation of n Int items in the range [0, n).
+func perm(n int) (out []Item) {
+	for _, v := range rand.Perm(n) {
+		out = append(out, Int(v))
+	}
+	return
+}
+
+// rang returns an ordered list of Int items in the range [0, n).
+func rang(n int) (out []Item) {
+	for i := 0; i < n; i++ {
+		out = append(out, Int(i))
+	}
+	return
+}
+
+// all extracts all items from a tree in order as a slice.
+func all(t *BTree) (out []Item) {
+	t.Ascend(func(a Item) bool {
+		out = append(out, a)
+		return true
+	})
+	return
+}
+
+// rangerev returns a reversed ordered list of Int items in the range [0, n).
+func rangrev(n int) (out []Item) {
+	for i := n - 1; i >= 0; i-- {
+		out = append(out, Int(i))
+	}
+	return
+}
+
+// allrev extracts all items from a tree in reverse order as a slice.
+func allrev(t *BTree) (out []Item) {
+	t.Descend(func(a Item) bool {
+		out = append(out, a)
+		return true
+	})
+	return
+}
+
+var btreeDegree = flag.Int("degree", 32, "B-Tree degree")
+
+func TestBTree(t *testing.T) {
+	tr := New(*btreeDegree)
+	const treeSize = 10000
+	for i := 0; i < 10; i++ {
+		if min := tr.Min(); min != nil {
+			t.Fatalf("empty min, got %+v", min)
+		}
+		if max := tr.Max(); max != nil {
+			t.Fatalf("empty max, got %+v", max)
+		}
+		for _, item := range perm(treeSize) {
+			if x := tr.ReplaceOrInsert(item); x != nil {
+				t.Fatal("insert found item", item)
+			}
+		}
+		for _, item := range perm(treeSize) {
+			if !tr.Has(item) {
+				t.Fatal("has did not find item", item)
+			}
+		}
+		for _, item := range perm(treeSize) {
+			if x := tr.ReplaceOrInsert(item); x == nil {
+				t.Fatal("insert didn't find item", item)
+			}
+		}
+		if min, want := tr.Min(), Item(Int(0)); min != want {
+			t.Fatalf("min: want %+v, got %+v", want, min)
+		}
+		if max, want := tr.Max(), Item(Int(treeSize-1)); max != want {
+			t.Fatalf("max: want %+v, got %+v", want, max)
+		}
+		got := all(tr)
+		want := rang(treeSize)
+		if !reflect.DeepEqual(got, want) {
+			t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want)
+		}
+
+		gotrev := allrev(tr)
+		wantrev := rangrev(treeSize)
+		if !reflect.DeepEqual(gotrev, wantrev) {
+			t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want)
+		}
+
+		for _, item := range perm(treeSize) {
+			if x := tr.Delete(item); x == nil {
+				t.Fatalf("didn't find %v", item)
+			}
+		}
+		if got = all(tr); len(got) > 0 {
+			t.Fatalf("some left!: %v", got)
+		}
+	}
+}
+
+func ExampleBTree() {
+	tr := New(*btreeDegree)
+	for i := Int(0); i < 10; i++ {
+		tr.ReplaceOrInsert(i)
+	}
+	fmt.Println("len:       ", tr.Len())
+	fmt.Println("get3:      ", tr.Get(Int(3)))
+	fmt.Println("get100:    ", tr.Get(Int(100)))
+	fmt.Println("del4:      ", tr.Delete(Int(4)))
+	fmt.Println("del100:    ", tr.Delete(Int(100)))
+	fmt.Println("replace5:  ", tr.ReplaceOrInsert(Int(5)))
+	fmt.Println("replace100:", tr.ReplaceOrInsert(Int(100)))
+	fmt.Println("min:       ", tr.Min())
+	fmt.Println("delmin:    ", tr.DeleteMin())
+	fmt.Println("max:       ", tr.Max())
+	fmt.Println("delmax:    ", tr.DeleteMax())
+	fmt.Println("len:       ", tr.Len())
+	// Output:
+	// len:        10
+	// get3:       3
+	// get100:     <nil>
+	// del4:       4
+	// del100:     <nil>
+	// replace5:   5
+	// replace100: <nil>
+	// min:        0
+	// delmin:     0
+	// max:        100
+	// delmax:     100
+	// len:        8
+}
+
+func TestDeleteMin(t *testing.T) {
+	tr := New(3)
+	for _, v := range perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []Item
+	for v := tr.DeleteMin(); v != nil; v = tr.DeleteMin() {
+		got = append(got, v)
+	}
+	if want := rang(100); !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestDeleteMax(t *testing.T) {
+	tr := New(3)
+	for _, v := range perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []Item
+	for v := tr.DeleteMax(); v != nil; v = tr.DeleteMax() {
+		got = append(got, v)
+	}
+	// Reverse our list.
+	for i := 0; i < len(got)/2; i++ {
+		got[i], got[len(got)-i-1] = got[len(got)-i-1], got[i]
+	}
+	if want := rang(100); !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestAscendRange(t *testing.T) {
+	tr := New(2)
+	for _, v := range perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []Item
+	tr.AscendRange(Int(40), Int(60), func(a Item) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := rang(100)[40:60]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.AscendRange(Int(40), Int(60), func(a Item) bool {
+		if a.(Int) > 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestDescendRange(t *testing.T) {
+	tr := New(2)
+	for _, v := range perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []Item
+	tr.DescendRange(Int(60), Int(40), func(a Item) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := rangrev(100)[39:59]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.DescendRange(Int(60), Int(40), func(a Item) bool {
+		if a.(Int) < 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+func TestAscendLessThan(t *testing.T) {
+	tr := New(*btreeDegree)
+	for _, v := range perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []Item
+	tr.AscendLessThan(Int(60), func(a Item) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := rang(100)[:60]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.AscendLessThan(Int(60), func(a Item) bool {
+		if a.(Int) > 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := rang(100)[:51]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestDescendLessOrEqual(t *testing.T) {
+	tr := New(*btreeDegree)
+	for _, v := range perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []Item
+	tr.DescendLessOrEqual(Int(40), func(a Item) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := rangrev(100)[59:]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.DescendLessOrEqual(Int(60), func(a Item) bool {
+		if a.(Int) < 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want)
+	}
+}
+func TestAscendGreaterOrEqual(t *testing.T) {
+	tr := New(*btreeDegree)
+	for _, v := range perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []Item
+	tr.AscendGreaterOrEqual(Int(40), func(a Item) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := rang(100)[40:]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.AscendGreaterOrEqual(Int(40), func(a Item) bool {
+		if a.(Int) > 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+func TestDescendGreaterThan(t *testing.T) {
+	tr := New(*btreeDegree)
+	for _, v := range perm(100) {
+		tr.ReplaceOrInsert(v)
+	}
+	var got []Item
+	tr.DescendGreaterThan(Int(40), func(a Item) bool {
+		got = append(got, a)
+		return true
+	})
+	if want := rangrev(100)[:59]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want)
+	}
+	got = got[:0]
+	tr.DescendGreaterThan(Int(40), func(a Item) bool {
+		if a.(Int) < 50 {
+			return false
+		}
+		got = append(got, a)
+		return true
+	})
+	if want := rangrev(100)[:50]; !reflect.DeepEqual(got, want) {
+		t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want)
+	}
+}
+
+const benchmarkTreeSize = 10000
+
+func BenchmarkInsert(b *testing.B) {
+	b.StopTimer()
+	insertP := perm(benchmarkTreeSize)
+	b.StartTimer()
+	i := 0
+	for i < b.N {
+		tr := New(*btreeDegree)
+		for _, item := range insertP {
+			tr.ReplaceOrInsert(item)
+			i++
+			if i >= b.N {
+				return
+			}
+		}
+	}
+}
+
+func BenchmarkSeek(b *testing.B) {
+	b.StopTimer()
+	size := 100000
+	insertP := perm(size)
+	tr := New(*btreeDegree)
+	for _, item := range insertP {
+		tr.ReplaceOrInsert(item)
+	}
+	b.StartTimer()
+
+	for i := 0; i < b.N; i++ {
+		tr.AscendGreaterOrEqual(Int(i%size), func(i Item) bool { return false })
+	}
+}
+
+func BenchmarkDeleteInsert(b *testing.B) {
+	b.StopTimer()
+	insertP := perm(benchmarkTreeSize)
+	tr := New(*btreeDegree)
+	for _, item := range insertP {
+		tr.ReplaceOrInsert(item)
+	}
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		tr.Delete(insertP[i%benchmarkTreeSize])
+		tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize])
+	}
+}
+
+func BenchmarkDeleteInsertCloneOnce(b *testing.B) {
+	b.StopTimer()
+	insertP := perm(benchmarkTreeSize)
+	tr := New(*btreeDegree)
+	for _, item := range insertP {
+		tr.ReplaceOrInsert(item)
+	}
+	tr = tr.Clone()
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		tr.Delete(insertP[i%benchmarkTreeSize])
+		tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize])
+	}
+}
+
+func BenchmarkDeleteInsertCloneEachTime(b *testing.B) {
+	b.StopTimer()
+	insertP := perm(benchmarkTreeSize)
+	tr := New(*btreeDegree)
+	for _, item := range insertP {
+		tr.ReplaceOrInsert(item)
+	}
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		tr = tr.Clone()
+		tr.Delete(insertP[i%benchmarkTreeSize])
+		tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize])
+	}
+}
+
+func BenchmarkDelete(b *testing.B) {
+	b.StopTimer()
+	insertP := perm(benchmarkTreeSize)
+	removeP := perm(benchmarkTreeSize)
+	b.StartTimer()
+	i := 0
+	for i < b.N {
+		b.StopTimer()
+		tr := New(*btreeDegree)
+		for _, v := range insertP {
+			tr.ReplaceOrInsert(v)
+		}
+		b.StartTimer()
+		for _, item := range removeP {
+			tr.Delete(item)
+			i++
+			if i >= b.N {
+				return
+			}
+		}
+		if tr.Len() > 0 {
+			panic(tr.Len())
+		}
+	}
+}
+
+func BenchmarkGet(b *testing.B) {
+	b.StopTimer()
+	insertP := perm(benchmarkTreeSize)
+	removeP := perm(benchmarkTreeSize)
+	b.StartTimer()
+	i := 0
+	for i < b.N {
+		b.StopTimer()
+		tr := New(*btreeDegree)
+		for _, v := range insertP {
+			tr.ReplaceOrInsert(v)
+		}
+		b.StartTimer()
+		for _, item := range removeP {
+			tr.Get(item)
+			i++
+			if i >= b.N {
+				return
+			}
+		}
+	}
+}
+
+func BenchmarkGetCloneEachTime(b *testing.B) {
+	b.StopTimer()
+	insertP := perm(benchmarkTreeSize)
+	removeP := perm(benchmarkTreeSize)
+	b.StartTimer()
+	i := 0
+	for i < b.N {
+		b.StopTimer()
+		tr := New(*btreeDegree)
+		for _, v := range insertP {
+			tr.ReplaceOrInsert(v)
+		}
+		b.StartTimer()
+		for _, item := range removeP {
+			tr = tr.Clone()
+			tr.Get(item)
+			i++
+			if i >= b.N {
+				return
+			}
+		}
+	}
+}
+
+type byInts []Item
+
+func (a byInts) Len() int {
+	return len(a)
+}
+
+func (a byInts) Less(i, j int) bool {
+	return a[i].(Int) < a[j].(Int)
+}
+
+func (a byInts) Swap(i, j int) {
+	a[i], a[j] = a[j], a[i]
+}
+
+func BenchmarkAscend(b *testing.B) {
+	arr := perm(benchmarkTreeSize)
+	tr := New(*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Sort(byInts(arr))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := 0
+		tr.Ascend(func(item Item) bool {
+			if item.(Int) != arr[j].(Int) {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
+			}
+			j++
+			return true
+		})
+	}
+}
+
+func BenchmarkDescend(b *testing.B) {
+	arr := perm(benchmarkTreeSize)
+	tr := New(*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Sort(byInts(arr))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := len(arr) - 1
+		tr.Descend(func(item Item) bool {
+			if item.(Int) != arr[j].(Int) {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
+			}
+			j--
+			return true
+		})
+	}
+}
+func BenchmarkAscendRange(b *testing.B) {
+	arr := perm(benchmarkTreeSize)
+	tr := New(*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Sort(byInts(arr))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := 100
+		tr.AscendRange(Int(100), arr[len(arr)-100], func(item Item) bool {
+			if item.(Int) != arr[j].(Int) {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
+			}
+			j++
+			return true
+		})
+		if j != len(arr)-100 {
+			b.Fatalf("expected: %v, got %v", len(arr)-100, j)
+		}
+	}
+}
+
+func BenchmarkDescendRange(b *testing.B) {
+	arr := perm(benchmarkTreeSize)
+	tr := New(*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Sort(byInts(arr))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := len(arr) - 100
+		tr.DescendRange(arr[len(arr)-100], Int(100), func(item Item) bool {
+			if item.(Int) != arr[j].(Int) {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
+			}
+			j--
+			return true
+		})
+		if j != 100 {
+			b.Fatalf("expected: %v, got %v", len(arr)-100, j)
+		}
+	}
+}
+func BenchmarkAscendGreaterOrEqual(b *testing.B) {
+	arr := perm(benchmarkTreeSize)
+	tr := New(*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Sort(byInts(arr))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := 100
+		k := 0
+		tr.AscendGreaterOrEqual(Int(100), func(item Item) bool {
+			if item.(Int) != arr[j].(Int) {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
+			}
+			j++
+			k++
+			return true
+		})
+		if j != len(arr) {
+			b.Fatalf("expected: %v, got %v", len(arr), j)
+		}
+		if k != len(arr)-100 {
+			b.Fatalf("expected: %v, got %v", len(arr)-100, k)
+		}
+	}
+}
+func BenchmarkDescendLessOrEqual(b *testing.B) {
+	arr := perm(benchmarkTreeSize)
+	tr := New(*btreeDegree)
+	for _, v := range arr {
+		tr.ReplaceOrInsert(v)
+	}
+	sort.Sort(byInts(arr))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		j := len(arr) - 100
+		k := len(arr)
+		tr.DescendLessOrEqual(arr[len(arr)-100], func(item Item) bool {
+			if item.(Int) != arr[j].(Int) {
+				b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
+			}
+			j--
+			k--
+			return true
+		})
+		if j != -1 {
+			b.Fatalf("expected: %v, got %v", -1, j)
+		}
+		if k != 99 {
+			b.Fatalf("expected: %v, got %v", 99, k)
+		}
+	}
+}
+
+const cloneTestSize = 10000
+
+func cloneTest(t *testing.T, b *BTree, start int, p []Item, wg *sync.WaitGroup, trees *[]*BTree, lock *sync.Mutex) {
+	t.Logf("Starting new clone at %v", start)
+	lock.Lock()
+	*trees = append(*trees, b)
+	lock.Unlock()
+	for i := start; i < cloneTestSize; i++ {
+		b.ReplaceOrInsert(p[i])
+		if i%(cloneTestSize/5) == 0 {
+			wg.Add(1)
+			go cloneTest(t, b.Clone(), i+1, p, wg, trees, lock)
+		}
+	}
+	wg.Done()
+}
+
+func TestCloneConcurrentOperations(t *testing.T) {
+	b := New(*btreeDegree)
+	trees := []*BTree{}
+	p := perm(cloneTestSize)
+	var wg sync.WaitGroup
+	wg.Add(1)
+	go cloneTest(t, b, 0, p, &wg, &trees, &sync.Mutex{})
+	wg.Wait()
+	want := rang(cloneTestSize)
+	t.Logf("Starting equality checks on %d trees", len(trees))
+	for i, tree := range trees {
+		if !reflect.DeepEqual(want, all(tree)) {
+			t.Errorf("tree %v mismatch", i)
+		}
+	}
+	t.Log("Removing half from first half")
+	toRemove := rang(cloneTestSize)[cloneTestSize/2:]
+	for i := 0; i < len(trees)/2; i++ {
+		tree := trees[i]
+		wg.Add(1)
+		go func() {
+			for _, item := range toRemove {
+				tree.Delete(item)
+			}
+			wg.Done()
+		}()
+	}
+	wg.Wait()
+	t.Log("Checking all values again")
+	for i, tree := range trees {
+		var wantpart []Item
+		if i < len(trees)/2 {
+			wantpart = want[:cloneTestSize/2]
+		} else {
+			wantpart = want
+		}
+		if got := all(tree); !reflect.DeepEqual(wantpart, got) {
+			t.Errorf("tree %v mismatch, want %v got %v", i, len(want), len(got))
+		}
+	}
+}
+
+func BenchmarkDeleteAndRestore(b *testing.B) {
+	items := perm(16392)
+	b.ResetTimer()
+	b.Run(`CopyBigFreeList`, func(b *testing.B) {
+		fl := NewFreeList(16392)
+		tr := NewWithFreeList(*btreeDegree, fl)
+		for _, v := range items {
+			tr.ReplaceOrInsert(v)
+		}
+		b.ReportAllocs()
+		b.ResetTimer()
+		for i := 0; i < b.N; i++ {
+			dels := make([]Item, 0, tr.Len())
+			tr.Ascend(ItemIterator(func(b Item) bool {
+				dels = append(dels, b)
+				return true
+			}))
+			for _, del := range dels {
+				tr.Delete(del)
+			}
+			// tr is now empty, we make a new empty copy of it.
+			tr = NewWithFreeList(*btreeDegree, fl)
+			for _, v := range items {
+				tr.ReplaceOrInsert(v)
+			}
+		}
+	})
+	b.Run(`Copy`, func(b *testing.B) {
+		tr := New(*btreeDegree)
+		for _, v := range items {
+			tr.ReplaceOrInsert(v)
+		}
+		b.ReportAllocs()
+		b.ResetTimer()
+		for i := 0; i < b.N; i++ {
+			dels := make([]Item, 0, tr.Len())
+			tr.Ascend(ItemIterator(func(b Item) bool {
+				dels = append(dels, b)
+				return true
+			}))
+			for _, del := range dels {
+				tr.Delete(del)
+			}
+			// tr is now empty, we make a new empty copy of it.
+			tr = New(*btreeDegree)
+			for _, v := range items {
+				tr.ReplaceOrInsert(v)
+			}
+		}
+	})
+	b.Run(`ClearBigFreelist`, func(b *testing.B) {
+		fl := NewFreeList(16392)
+		tr := NewWithFreeList(*btreeDegree, fl)
+		for _, v := range items {
+			tr.ReplaceOrInsert(v)
+		}
+		b.ReportAllocs()
+		b.ResetTimer()
+		for i := 0; i < b.N; i++ {
+			tr.Clear(true)
+			for _, v := range items {
+				tr.ReplaceOrInsert(v)
+			}
+		}
+	})
+	b.Run(`Clear`, func(b *testing.B) {
+		tr := New(*btreeDegree)
+		for _, v := range items {
+			tr.ReplaceOrInsert(v)
+		}
+		b.ReportAllocs()
+		b.ResetTimer()
+		for i := 0; i < b.N; i++ {
+			tr.Clear(true)
+			for _, v := range items {
+				tr.ReplaceOrInsert(v)
+			}
+		}
+	})
+}

+ 17 - 0
data_tool/src/github.com/google/btree/go.mod

@@ -0,0 +1,17 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+module github.com/google/btree
+
+go 1.18

+ 10 - 0
data_tool/src/github.com/kr/pretty/.github/dependabot.yml

@@ -0,0 +1,10 @@
+version: 2
+updates:
+- package-ecosystem: gomod
+  directory: /
+  schedule:
+    interval: weekly
+- package-ecosystem: github-actions
+  directory: /
+  schedule:
+    interval: weekly

+ 17 - 0
data_tool/src/github.com/kr/pretty/.github/workflows/build-test.yml

@@ -0,0 +1,17 @@
+name: build-test
+on:
+  push:
+  pull_request:
+
+jobs:
+  test:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/setup-go@v2
+        with:
+          go-version: 1.13.x
+      - uses: actions/checkout@v3
+      - name: Build
+        run: go build .
+      - name: Test
+        run: go test -v .

+ 5 - 0
data_tool/src/github.com/kr/pretty/.gitignore

@@ -0,0 +1,5 @@
+[568].out
+_go*
+_test*
+_obj
+/.idea

+ 19 - 0
data_tool/src/github.com/kr/pretty/License

@@ -0,0 +1,19 @@
+Copyright 2012 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 9 - 0
data_tool/src/github.com/kr/pretty/Readme

@@ -0,0 +1,9 @@
+package pretty
+
+    import "github.com/kr/pretty"
+
+    Package pretty provides pretty-printing for Go values.
+
+Documentation
+
+    http://godoc.org/github.com/kr/pretty

+ 295 - 0
data_tool/src/github.com/kr/pretty/diff.go

@@ -0,0 +1,295 @@
+package pretty
+
+import (
+	"fmt"
+	"io"
+	"reflect"
+)
+
+type sbuf []string
+
+func (p *sbuf) Printf(format string, a ...interface{}) {
+	s := fmt.Sprintf(format, a...)
+	*p = append(*p, s)
+}
+
+// Diff returns a slice where each element describes
+// a difference between a and b.
+func Diff(a, b interface{}) (desc []string) {
+	Pdiff((*sbuf)(&desc), a, b)
+	return desc
+}
+
+// wprintfer calls Fprintf on w for each Printf call
+// with a trailing newline.
+type wprintfer struct{ w io.Writer }
+
+func (p *wprintfer) Printf(format string, a ...interface{}) {
+	fmt.Fprintf(p.w, format+"\n", a...)
+}
+
+// Fdiff writes to w a description of the differences between a and b.
+func Fdiff(w io.Writer, a, b interface{}) {
+	Pdiff(&wprintfer{w}, a, b)
+}
+
+type Printfer interface {
+	Printf(format string, a ...interface{})
+}
+
+// Pdiff prints to p a description of the differences between a and b.
+// It calls Printf once for each difference, with no trailing newline.
+// The standard library log.Logger is a Printfer.
+func Pdiff(p Printfer, a, b interface{}) {
+	d := diffPrinter{
+		w:        p,
+		aVisited: make(map[visit]visit),
+		bVisited: make(map[visit]visit),
+	}
+	d.diff(reflect.ValueOf(a), reflect.ValueOf(b))
+}
+
+type Logfer interface {
+	Logf(format string, a ...interface{})
+}
+
+// logprintfer calls Fprintf on w for each Printf call
+// with a trailing newline.
+type logprintfer struct{ l Logfer }
+
+func (p *logprintfer) Printf(format string, a ...interface{}) {
+	p.l.Logf(format, a...)
+}
+
+// Ldiff prints to l a description of the differences between a and b.
+// It calls Logf once for each difference, with no trailing newline.
+// The standard library testing.T and testing.B are Logfers.
+func Ldiff(l Logfer, a, b interface{}) {
+	Pdiff(&logprintfer{l}, a, b)
+}
+
+type diffPrinter struct {
+	w Printfer
+	l string // label
+
+	aVisited map[visit]visit
+	bVisited map[visit]visit
+}
+
+func (w diffPrinter) printf(f string, a ...interface{}) {
+	var l string
+	if w.l != "" {
+		l = w.l + ": "
+	}
+	w.w.Printf(l+f, a...)
+}
+
+func (w diffPrinter) diff(av, bv reflect.Value) {
+	if !av.IsValid() && bv.IsValid() {
+		w.printf("nil != %# v", formatter{v: bv, quote: true})
+		return
+	}
+	if av.IsValid() && !bv.IsValid() {
+		w.printf("%# v != nil", formatter{v: av, quote: true})
+		return
+	}
+	if !av.IsValid() && !bv.IsValid() {
+		return
+	}
+
+	at := av.Type()
+	bt := bv.Type()
+	if at != bt {
+		w.printf("%v != %v", at, bt)
+		return
+	}
+
+	if av.CanAddr() && bv.CanAddr() {
+		avis := visit{av.UnsafeAddr(), at}
+		bvis := visit{bv.UnsafeAddr(), bt}
+		var cycle bool
+
+		// Have we seen this value before?
+		if vis, ok := w.aVisited[avis]; ok {
+			cycle = true
+			if vis != bvis {
+				w.printf("%# v (previously visited) != %# v", formatter{v: av, quote: true}, formatter{v: bv, quote: true})
+			}
+		} else if _, ok := w.bVisited[bvis]; ok {
+			cycle = true
+			w.printf("%# v != %# v (previously visited)", formatter{v: av, quote: true}, formatter{v: bv, quote: true})
+		}
+		w.aVisited[avis] = bvis
+		w.bVisited[bvis] = avis
+		if cycle {
+			return
+		}
+	}
+
+	switch kind := at.Kind(); kind {
+	case reflect.Bool:
+		if a, b := av.Bool(), bv.Bool(); a != b {
+			w.printf("%v != %v", a, b)
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		if a, b := av.Int(), bv.Int(); a != b {
+			w.printf("%d != %d", a, b)
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		if a, b := av.Uint(), bv.Uint(); a != b {
+			w.printf("%d != %d", a, b)
+		}
+	case reflect.Float32, reflect.Float64:
+		if a, b := av.Float(), bv.Float(); a != b {
+			w.printf("%v != %v", a, b)
+		}
+	case reflect.Complex64, reflect.Complex128:
+		if a, b := av.Complex(), bv.Complex(); a != b {
+			w.printf("%v != %v", a, b)
+		}
+	case reflect.Array:
+		n := av.Len()
+		for i := 0; i < n; i++ {
+			w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
+		}
+	case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+		if a, b := av.Pointer(), bv.Pointer(); a != b {
+			w.printf("%#x != %#x", a, b)
+		}
+	case reflect.Interface:
+		w.diff(av.Elem(), bv.Elem())
+	case reflect.Map:
+		ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys())
+		for _, k := range ak {
+			w := w.relabel(fmt.Sprintf("[%#v]", k))
+			w.printf("%q != (missing)", av.MapIndex(k))
+		}
+		for _, k := range both {
+			w := w.relabel(fmt.Sprintf("[%#v]", k))
+			w.diff(av.MapIndex(k), bv.MapIndex(k))
+		}
+		for _, k := range bk {
+			w := w.relabel(fmt.Sprintf("[%#v]", k))
+			w.printf("(missing) != %q", bv.MapIndex(k))
+		}
+	case reflect.Ptr:
+		switch {
+		case av.IsNil() && !bv.IsNil():
+			w.printf("nil != %# v", formatter{v: bv, quote: true})
+		case !av.IsNil() && bv.IsNil():
+			w.printf("%# v != nil", formatter{v: av, quote: true})
+		case !av.IsNil() && !bv.IsNil():
+			w.diff(av.Elem(), bv.Elem())
+		}
+	case reflect.Slice:
+		lenA := av.Len()
+		lenB := bv.Len()
+		if lenA != lenB {
+			w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB)
+			break
+		}
+		for i := 0; i < lenA; i++ {
+			w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
+		}
+	case reflect.String:
+		if a, b := av.String(), bv.String(); a != b {
+			w.printf("%q != %q", a, b)
+		}
+	case reflect.Struct:
+		for i := 0; i < av.NumField(); i++ {
+			w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i))
+		}
+	default:
+		panic("unknown reflect Kind: " + kind.String())
+	}
+}
+
+func (d diffPrinter) relabel(name string) (d1 diffPrinter) {
+	d1 = d
+	if d.l != "" && name[0] != '[' {
+		d1.l += "."
+	}
+	d1.l += name
+	return d1
+}
+
+// keyEqual compares a and b for equality.
+// Both a and b must be valid map keys.
+func keyEqual(av, bv reflect.Value) bool {
+	if !av.IsValid() && !bv.IsValid() {
+		return true
+	}
+	if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() {
+		return false
+	}
+	switch kind := av.Kind(); kind {
+	case reflect.Bool:
+		a, b := av.Bool(), bv.Bool()
+		return a == b
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		a, b := av.Int(), bv.Int()
+		return a == b
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		a, b := av.Uint(), bv.Uint()
+		return a == b
+	case reflect.Float32, reflect.Float64:
+		a, b := av.Float(), bv.Float()
+		return a == b
+	case reflect.Complex64, reflect.Complex128:
+		a, b := av.Complex(), bv.Complex()
+		return a == b
+	case reflect.Array:
+		for i := 0; i < av.Len(); i++ {
+			if !keyEqual(av.Index(i), bv.Index(i)) {
+				return false
+			}
+		}
+		return true
+	case reflect.Chan, reflect.UnsafePointer, reflect.Ptr:
+		a, b := av.Pointer(), bv.Pointer()
+		return a == b
+	case reflect.Interface:
+		return keyEqual(av.Elem(), bv.Elem())
+	case reflect.String:
+		a, b := av.String(), bv.String()
+		return a == b
+	case reflect.Struct:
+		for i := 0; i < av.NumField(); i++ {
+			if !keyEqual(av.Field(i), bv.Field(i)) {
+				return false
+			}
+		}
+		return true
+	default:
+		panic("invalid map key type " + av.Type().String())
+	}
+}
+
+func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) {
+	for _, av := range a {
+		inBoth := false
+		for _, bv := range b {
+			if keyEqual(av, bv) {
+				inBoth = true
+				both = append(both, av)
+				break
+			}
+		}
+		if !inBoth {
+			ak = append(ak, av)
+		}
+	}
+	for _, bv := range b {
+		inBoth := false
+		for _, av := range a {
+			if keyEqual(av, bv) {
+				inBoth = true
+				break
+			}
+		}
+		if !inBoth {
+			bk = append(bk, bv)
+		}
+	}
+	return
+}

+ 257 - 0
data_tool/src/github.com/kr/pretty/diff_test.go

@@ -0,0 +1,257 @@
+package pretty
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"reflect"
+	"testing"
+	"unsafe"
+)
+
+var (
+	_ Logfer   = (*testing.T)(nil)
+	_ Logfer   = (*testing.B)(nil)
+	_ Printfer = (*log.Logger)(nil)
+)
+
+type difftest struct {
+	a   interface{}
+	b   interface{}
+	exp []string
+}
+
+type S struct {
+	A int
+	S *S
+	I interface{}
+	C []int
+}
+
+type (
+	N struct{ N int }
+	E interface{}
+)
+
+var (
+	c0 = make(chan int)
+	c1 = make(chan int)
+	f0 = func() {}
+	f1 = func() {}
+	i0 = 0
+	i1 = 1
+)
+
+var diffs = []difftest{
+	{a: nil, b: nil},
+	{a: S{A: 1}, b: S{A: 1}},
+
+	{0, "", []string{`int != string`}},
+	{0, 1, []string{`0 != 1`}},
+	{S{}, new(S), []string{`pretty.S != *pretty.S`}},
+	{"a", "b", []string{`"a" != "b"`}},
+	{S{}, S{A: 1}, []string{`A: 0 != 1`}},
+	{new(S), &S{A: 1}, []string{`A: 0 != 1`}},
+	{S{S: new(S)}, S{S: &S{A: 1}}, []string{`S.A: 0 != 1`}},
+	{S{}, S{I: 0}, []string{`I: nil != int(0)`}},
+	{S{I: 1}, S{I: "x"}, []string{`I: int != string`}},
+	{S{}, S{C: []int{1}}, []string{`C: []int[0] != []int[1]`}},
+	{S{C: []int{}}, S{C: []int{1}}, []string{`C: []int[0] != []int[1]`}},
+	{S{C: []int{1, 2, 3}}, S{C: []int{1, 2, 4}}, []string{`C[2]: 3 != 4`}},
+	{S{}, S{A: 1, S: new(S)}, []string{`A: 0 != 1`, `S: nil != &pretty.S{}`}},
+
+	// unexported fields of every reflect.Kind (both equal and unequal)
+	{struct{ x bool }{false}, struct{ x bool }{false}, nil},
+	{struct{ x bool }{false}, struct{ x bool }{true}, []string{`x: false != true`}},
+	{struct{ x int }{0}, struct{ x int }{0}, nil},
+	{struct{ x int }{0}, struct{ x int }{1}, []string{`x: 0 != 1`}},
+	{struct{ x int8 }{0}, struct{ x int8 }{0}, nil},
+	{struct{ x int8 }{0}, struct{ x int8 }{1}, []string{`x: 0 != 1`}},
+	{struct{ x int16 }{0}, struct{ x int16 }{0}, nil},
+	{struct{ x int16 }{0}, struct{ x int16 }{1}, []string{`x: 0 != 1`}},
+	{struct{ x int32 }{0}, struct{ x int32 }{0}, nil},
+	{struct{ x int32 }{0}, struct{ x int32 }{1}, []string{`x: 0 != 1`}},
+	{struct{ x int64 }{0}, struct{ x int64 }{0}, nil},
+	{struct{ x int64 }{0}, struct{ x int64 }{1}, []string{`x: 0 != 1`}},
+	{struct{ x uint }{0}, struct{ x uint }{0}, nil},
+	{struct{ x uint }{0}, struct{ x uint }{1}, []string{`x: 0 != 1`}},
+	{struct{ x uint8 }{0}, struct{ x uint8 }{0}, nil},
+	{struct{ x uint8 }{0}, struct{ x uint8 }{1}, []string{`x: 0 != 1`}},
+	{struct{ x uint16 }{0}, struct{ x uint16 }{0}, nil},
+	{struct{ x uint16 }{0}, struct{ x uint16 }{1}, []string{`x: 0 != 1`}},
+	{struct{ x uint32 }{0}, struct{ x uint32 }{0}, nil},
+	{struct{ x uint32 }{0}, struct{ x uint32 }{1}, []string{`x: 0 != 1`}},
+	{struct{ x uint64 }{0}, struct{ x uint64 }{0}, nil},
+	{struct{ x uint64 }{0}, struct{ x uint64 }{1}, []string{`x: 0 != 1`}},
+	{struct{ x uintptr }{0}, struct{ x uintptr }{0}, nil},
+	{struct{ x uintptr }{0}, struct{ x uintptr }{1}, []string{`x: 0 != 1`}},
+	{struct{ x float32 }{0}, struct{ x float32 }{0}, nil},
+	{struct{ x float32 }{0}, struct{ x float32 }{1}, []string{`x: 0 != 1`}},
+	{struct{ x float64 }{0}, struct{ x float64 }{0}, nil},
+	{struct{ x float64 }{0}, struct{ x float64 }{1}, []string{`x: 0 != 1`}},
+	{struct{ x complex64 }{0}, struct{ x complex64 }{0}, nil},
+	{struct{ x complex64 }{0}, struct{ x complex64 }{1}, []string{`x: (0+0i) != (1+0i)`}},
+	{struct{ x complex128 }{0}, struct{ x complex128 }{0}, nil},
+	{struct{ x complex128 }{0}, struct{ x complex128 }{1}, []string{`x: (0+0i) != (1+0i)`}},
+	{struct{ x [1]int }{[1]int{0}}, struct{ x [1]int }{[1]int{0}}, nil},
+	{struct{ x [1]int }{[1]int{0}}, struct{ x [1]int }{[1]int{1}}, []string{`x[0]: 0 != 1`}},
+	{struct{ x chan int }{c0}, struct{ x chan int }{c0}, nil},
+	{struct{ x chan int }{c0}, struct{ x chan int }{c1}, []string{fmt.Sprintf("x: %p != %p", c0, c1)}},
+	{struct{ x func() }{f0}, struct{ x func() }{f0}, nil},
+	{struct{ x func() }{f0}, struct{ x func() }{f1}, []string{fmt.Sprintf("x: %p != %p", f0, f1)}},
+	{struct{ x interface{} }{0}, struct{ x interface{} }{0}, nil},
+	{struct{ x interface{} }{0}, struct{ x interface{} }{1}, []string{`x: 0 != 1`}},
+	{struct{ x interface{} }{0}, struct{ x interface{} }{""}, []string{`x: int != string`}},
+	{struct{ x interface{} }{0}, struct{ x interface{} }{nil}, []string{`x: int(0) != nil`}},
+	{struct{ x interface{} }{nil}, struct{ x interface{} }{0}, []string{`x: nil != int(0)`}},
+	{struct{ x map[int]int }{map[int]int{0: 0}}, struct{ x map[int]int }{map[int]int{0: 0}}, nil},
+	{struct{ x map[int]int }{map[int]int{0: 0}}, struct{ x map[int]int }{map[int]int{0: 1}}, []string{`x[0]: 0 != 1`}},
+	{struct{ x *int }{new(int)}, struct{ x *int }{new(int)}, nil},
+	{struct{ x *int }{&i0}, struct{ x *int }{&i1}, []string{`x: 0 != 1`}},
+	{struct{ x *int }{nil}, struct{ x *int }{&i0}, []string{`x: nil != &int(0)`}},
+	{struct{ x *int }{&i0}, struct{ x *int }{nil}, []string{`x: &int(0) != nil`}},
+	{struct{ x []int }{[]int{0}}, struct{ x []int }{[]int{0}}, nil},
+	{struct{ x []int }{[]int{0}}, struct{ x []int }{[]int{1}}, []string{`x[0]: 0 != 1`}},
+	{struct{ x string }{"a"}, struct{ x string }{"a"}, nil},
+	{struct{ x string }{"a"}, struct{ x string }{"b"}, []string{`x: "a" != "b"`}},
+	{struct{ x N }{N{0}}, struct{ x N }{N{0}}, nil},
+	{struct{ x N }{N{0}}, struct{ x N }{N{1}}, []string{`x.N: 0 != 1`}},
+	{
+		struct{ x unsafe.Pointer }{unsafe.Pointer(uintptr(0))},
+		struct{ x unsafe.Pointer }{unsafe.Pointer(uintptr(0))},
+		nil,
+	},
+	{
+		struct{ x unsafe.Pointer }{unsafe.Pointer(uintptr(0))},
+		struct{ x unsafe.Pointer }{unsafe.Pointer(uintptr(1))},
+		[]string{`x: 0x0 != 0x1`},
+	},
+}
+
+func TestDiff(t *testing.T) {
+	for _, tt := range diffs {
+		expectDiffOutput(t, tt.a, tt.b, tt.exp)
+	}
+}
+
+func expectDiffOutput(t *testing.T, a, b interface{}, exp []string) {
+	got := Diff(a, b)
+	eq := len(got) == len(exp)
+	if eq {
+		for i := range got {
+			eq = eq && got[i] == exp[i]
+		}
+	}
+	if !eq {
+		t.Errorf("diffing % #v", a)
+		t.Errorf("with    % #v", b)
+		diffdiff(t, got, exp)
+	}
+}
+
+func TestKeyEqual(t *testing.T) {
+	var emptyInterfaceZero interface{} = 0
+
+	cases := []interface{}{
+		new(bool),
+		new(int),
+		new(int8),
+		new(int16),
+		new(int32),
+		new(int64),
+		new(uint),
+		new(uint8),
+		new(uint16),
+		new(uint32),
+		new(uint64),
+		new(uintptr),
+		new(float32),
+		new(float64),
+		new(complex64),
+		new(complex128),
+		new([1]int),
+		new(chan int),
+		new(unsafe.Pointer),
+		new(interface{}),
+		&emptyInterfaceZero,
+		new(*int),
+		new(string),
+		new(struct{ int }),
+	}
+
+	for _, test := range cases {
+		rv := reflect.ValueOf(test).Elem()
+		if !keyEqual(rv, rv) {
+			t.Errorf("keyEqual(%s, %s) = false want true", rv.Type(), rv.Type())
+		}
+	}
+}
+
+func TestFdiff(t *testing.T) {
+	var buf bytes.Buffer
+	Fdiff(&buf, 0, 1)
+	want := "0 != 1\n"
+	if got := buf.String(); got != want {
+		t.Errorf("Fdiff(0, 1) = %q want %q", got, want)
+	}
+}
+
+func TestDiffCycle(t *testing.T) {
+	// Diff two cyclic structs
+	a := &I{i: 1, R: nil}
+	a.R = a
+	b := &I{i: 2, R: nil}
+	b.R = b
+	expectDiffOutput(t, a, b, []string{
+		`i: 1 != 2`,
+	})
+
+	// Diff two equal cyclic structs
+	b.i = 1
+	expectDiffOutput(t, a, b, []string{})
+
+	// Diff two structs with different cycles
+	b2 := &I{i: 1, R: b}
+	b.R = b2
+	expectDiffOutput(t, a, b, []string{`R: pretty.I{
+    i:  1,
+    R:  &pretty.I{(CYCLIC REFERENCE)},
+} (previously visited) != pretty.I{
+    i:  1,
+    R:  &pretty.I{
+        i:  1,
+        R:  &pretty.I{(CYCLIC REFERENCE)},
+    },
+}`})
+
+	// ... and the same in the other direction
+	expectDiffOutput(t, b, a, []string{`R: pretty.I{
+    i:  1,
+    R:  &pretty.I{
+        i:  1,
+        R:  &pretty.I{(CYCLIC REFERENCE)},
+    },
+} != pretty.I{
+    i:  1,
+    R:  &pretty.I{(CYCLIC REFERENCE)},
+} (previously visited)`})
+}
+
+func diffdiff(t *testing.T, got, exp []string) {
+	minus(t, "unexpected:", got, exp)
+	minus(t, "missing:", exp, got)
+}
+
+func minus(t *testing.T, s string, a, b []string) {
+	var i, j int
+	for i = 0; i < len(a); i++ {
+		for j = 0; j < len(b); j++ {
+			if a[i] == b[j] {
+				break
+			}
+		}
+		if j == len(b) {
+			t.Error(s, a[i])
+		}
+	}
+}

+ 19 - 0
data_tool/src/github.com/kr/pretty/example_test.go

@@ -0,0 +1,19 @@
+package pretty_test
+
+import (
+	"fmt"
+)
+
+func Example() {
+	type myType struct {
+		a, b int
+	}
+	var x = []myType{{1, 2}, {3, 4}, {5, 6}}
+	fmt.Printf("%# v", Formatter(x))
+	// output:
+	// []pretty_test.myType{
+	//     {a:1, b:2},
+	//     {a:3, b:4},
+	//     {a:5, b:6},
+	// }
+}

+ 355 - 0
data_tool/src/github.com/kr/pretty/formatter.go

@@ -0,0 +1,355 @@
+package pretty
+
+import (
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"text/tabwriter"
+
+	"github.com/kr/text"
+	"github.com/rogpeppe/go-internal/fmtsort"
+)
+
+type formatter struct {
+	v     reflect.Value
+	force bool
+	quote bool
+}
+
+// Formatter makes a wrapper, f, that will format x as go source with line
+// breaks and tabs. Object f responds to the "%v" formatting verb when both the
+// "#" and " " (space) flags are set, for example:
+//
+//     fmt.Sprintf("%# v", Formatter(x))
+//
+// If one of these two flags is not set, or any other verb is used, f will
+// format x according to the usual rules of package fmt.
+// In particular, if x satisfies fmt.Formatter, then x.Format will be called.
+func Formatter(x interface{}) (f fmt.Formatter) {
+	return formatter{v: reflect.ValueOf(x), quote: true}
+}
+
+func (fo formatter) String() string {
+	return fmt.Sprint(fo.v.Interface()) // unwrap it
+}
+
+func (fo formatter) passThrough(f fmt.State, c rune) {
+	s := "%"
+	for i := 0; i < 128; i++ {
+		if f.Flag(i) {
+			s += string(rune(i))
+		}
+	}
+	if w, ok := f.Width(); ok {
+		s += fmt.Sprintf("%d", w)
+	}
+	if p, ok := f.Precision(); ok {
+		s += fmt.Sprintf(".%d", p)
+	}
+	s += string(c)
+	fmt.Fprintf(f, s, fo.v.Interface())
+}
+
+func (fo formatter) Format(f fmt.State, c rune) {
+	if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') {
+		w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0)
+		p := &printer{tw: w, Writer: w, visited: make(map[visit]int)}
+		p.printValue(fo.v, true, fo.quote)
+		w.Flush()
+		return
+	}
+	fo.passThrough(f, c)
+}
+
+type printer struct {
+	io.Writer
+	tw      *tabwriter.Writer
+	visited map[visit]int
+	depth   int
+}
+
+func (p *printer) indent() *printer {
+	q := *p
+	q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0)
+	q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'})
+	return &q
+}
+
+func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) {
+	if showType {
+		io.WriteString(p, v.Type().String())
+		fmt.Fprintf(p, "(%#v)", x)
+	} else {
+		fmt.Fprintf(p, "%#v", x)
+	}
+}
+
+// printValue must keep track of already-printed pointer values to avoid
+// infinite recursion.
+type visit struct {
+	v   uintptr
+	typ reflect.Type
+}
+
+func (p *printer) catchPanic(v reflect.Value, method string) {
+	if r := recover(); r != nil {
+		if v.Kind() == reflect.Ptr && v.IsNil() {
+			writeByte(p, '(')
+			io.WriteString(p, v.Type().String())
+			io.WriteString(p, ")(nil)")
+			return
+		}
+		writeByte(p, '(')
+		io.WriteString(p, v.Type().String())
+		io.WriteString(p, ")(PANIC=calling method ")
+		io.WriteString(p, strconv.Quote(method))
+		io.WriteString(p, ": ")
+		fmt.Fprint(p, r)
+		writeByte(p, ')')
+	}
+}
+
+func (p *printer) printValue(v reflect.Value, showType, quote bool) {
+	if p.depth > 10 {
+		io.WriteString(p, "!%v(DEPTH EXCEEDED)")
+		return
+	}
+
+	if v.IsValid() && v.CanInterface() {
+		i := v.Interface()
+		if goStringer, ok := i.(fmt.GoStringer); ok {
+			defer p.catchPanic(v, "GoString")
+			io.WriteString(p, goStringer.GoString())
+			return
+		}
+	}
+
+	switch v.Kind() {
+	case reflect.Bool:
+		p.printInline(v, v.Bool(), showType)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		p.printInline(v, v.Int(), showType)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		p.printInline(v, v.Uint(), showType)
+	case reflect.Float32, reflect.Float64:
+		p.printInline(v, v.Float(), showType)
+	case reflect.Complex64, reflect.Complex128:
+		fmt.Fprintf(p, "%#v", v.Complex())
+	case reflect.String:
+		p.fmtString(v.String(), quote)
+	case reflect.Map:
+		t := v.Type()
+		if showType {
+			io.WriteString(p, t.String())
+		}
+		writeByte(p, '{')
+		if nonzero(v) {
+			expand := !canInline(v.Type())
+			pp := p
+			if expand {
+				writeByte(p, '\n')
+				pp = p.indent()
+			}
+			sm := fmtsort.Sort(v)
+			for i := 0; i < v.Len(); i++ {
+				k := sm.Key[i]
+				mv := sm.Value[i]
+				pp.printValue(k, false, true)
+				writeByte(pp, ':')
+				if expand {
+					writeByte(pp, '\t')
+				}
+				showTypeInStruct := t.Elem().Kind() == reflect.Interface
+				pp.printValue(mv, showTypeInStruct, true)
+				if expand {
+					io.WriteString(pp, ",\n")
+				} else if i < v.Len()-1 {
+					io.WriteString(pp, ", ")
+				}
+			}
+			if expand {
+				pp.tw.Flush()
+			}
+		}
+		writeByte(p, '}')
+	case reflect.Struct:
+		t := v.Type()
+		if v.CanAddr() {
+			addr := v.UnsafeAddr()
+			vis := visit{addr, t}
+			if vd, ok := p.visited[vis]; ok && vd < p.depth {
+				p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false)
+				break // don't print v again
+			}
+			p.visited[vis] = p.depth
+		}
+
+		if showType {
+			io.WriteString(p, t.String())
+		}
+		writeByte(p, '{')
+		if nonzero(v) {
+			expand := !canInline(v.Type())
+			pp := p
+			if expand {
+				writeByte(p, '\n')
+				pp = p.indent()
+			}
+			for i := 0; i < v.NumField(); i++ {
+				showTypeInStruct := true
+				if f := t.Field(i); f.Name != "" {
+					io.WriteString(pp, f.Name)
+					writeByte(pp, ':')
+					if expand {
+						writeByte(pp, '\t')
+					}
+					showTypeInStruct = labelType(f.Type)
+				}
+				pp.printValue(getField(v, i), showTypeInStruct, true)
+				if expand {
+					io.WriteString(pp, ",\n")
+				} else if i < v.NumField()-1 {
+					io.WriteString(pp, ", ")
+				}
+			}
+			if expand {
+				pp.tw.Flush()
+			}
+		}
+		writeByte(p, '}')
+	case reflect.Interface:
+		switch e := v.Elem(); {
+		case e.Kind() == reflect.Invalid:
+			io.WriteString(p, "nil")
+		case e.IsValid():
+			pp := *p
+			pp.depth++
+			pp.printValue(e, showType, true)
+		default:
+			io.WriteString(p, v.Type().String())
+			io.WriteString(p, "(nil)")
+		}
+	case reflect.Array, reflect.Slice:
+		t := v.Type()
+		if showType {
+			io.WriteString(p, t.String())
+		}
+		if v.Kind() == reflect.Slice && v.IsNil() && showType {
+			io.WriteString(p, "(nil)")
+			break
+		}
+		if v.Kind() == reflect.Slice && v.IsNil() {
+			io.WriteString(p, "nil")
+			break
+		}
+		writeByte(p, '{')
+		expand := !canInline(v.Type())
+		pp := p
+		if expand {
+			writeByte(p, '\n')
+			pp = p.indent()
+		}
+		for i := 0; i < v.Len(); i++ {
+			showTypeInSlice := t.Elem().Kind() == reflect.Interface
+			pp.printValue(v.Index(i), showTypeInSlice, true)
+			if expand {
+				io.WriteString(pp, ",\n")
+			} else if i < v.Len()-1 {
+				io.WriteString(pp, ", ")
+			}
+		}
+		if expand {
+			pp.tw.Flush()
+		}
+		writeByte(p, '}')
+	case reflect.Ptr:
+		e := v.Elem()
+		if !e.IsValid() {
+			writeByte(p, '(')
+			io.WriteString(p, v.Type().String())
+			io.WriteString(p, ")(nil)")
+		} else {
+			pp := *p
+			pp.depth++
+			writeByte(pp, '&')
+			pp.printValue(e, true, true)
+		}
+	case reflect.Chan:
+		x := v.Pointer()
+		if showType {
+			writeByte(p, '(')
+			io.WriteString(p, v.Type().String())
+			fmt.Fprintf(p, ")(%#v)", x)
+		} else {
+			fmt.Fprintf(p, "%#v", x)
+		}
+	case reflect.Func:
+		io.WriteString(p, v.Type().String())
+		io.WriteString(p, " {...}")
+	case reflect.UnsafePointer:
+		p.printInline(v, v.Pointer(), showType)
+	case reflect.Invalid:
+		io.WriteString(p, "nil")
+	}
+}
+
+func canInline(t reflect.Type) bool {
+	switch t.Kind() {
+	case reflect.Map:
+		return !canExpand(t.Elem())
+	case reflect.Struct:
+		for i := 0; i < t.NumField(); i++ {
+			if canExpand(t.Field(i).Type) {
+				return false
+			}
+		}
+		return true
+	case reflect.Interface:
+		return false
+	case reflect.Array, reflect.Slice:
+		return !canExpand(t.Elem())
+	case reflect.Ptr:
+		return false
+	case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+		return false
+	}
+	return true
+}
+
+func canExpand(t reflect.Type) bool {
+	switch t.Kind() {
+	case reflect.Map, reflect.Struct,
+		reflect.Interface, reflect.Array, reflect.Slice,
+		reflect.Ptr:
+		return true
+	}
+	return false
+}
+
+func labelType(t reflect.Type) bool {
+	switch t.Kind() {
+	case reflect.Interface, reflect.Struct:
+		return true
+	}
+	return false
+}
+
+func (p *printer) fmtString(s string, quote bool) {
+	if quote {
+		s = strconv.Quote(s)
+	}
+	io.WriteString(p, s)
+}
+
+func writeByte(w io.Writer, b byte) {
+	w.Write([]byte{b})
+}
+
+func getField(v reflect.Value, i int) reflect.Value {
+	val := v.Field(i)
+	if val.Kind() == reflect.Interface && !val.IsNil() {
+		val = val.Elem()
+	}
+	return val
+}

+ 339 - 0
data_tool/src/github.com/kr/pretty/formatter_test.go

@@ -0,0 +1,339 @@
+package pretty
+
+import (
+	"fmt"
+	"io"
+	"strings"
+	"testing"
+	"time"
+	"unsafe"
+)
+
+type test struct {
+	v interface{}
+	s string
+}
+
+type passtest struct {
+	v    interface{}
+	f, s string
+}
+
+type LongStructTypeName struct {
+	longFieldName      interface{}
+	otherLongFieldName interface{}
+}
+
+type SA struct {
+	t *T
+	v T
+}
+
+type T struct {
+	x, y int
+}
+
+type F int
+
+func (f F) Format(s fmt.State, c rune) {
+	fmt.Fprintf(s, "F(%d)", int(f))
+}
+
+type Stringer struct{ i int }
+
+func (s *Stringer) String() string { return "foo" }
+
+var long = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
+
+var passthrough = []passtest{
+	{1, "%d", "1"},
+	{"a", "%s", "a"},
+	{&Stringer{}, "%s", "foo"},
+}
+
+func TestPassthrough(t *testing.T) {
+	for _, tt := range passthrough {
+		s := fmt.Sprintf(tt.f, Formatter(tt.v))
+		if tt.s != s {
+			t.Errorf("expected %q", tt.s)
+			t.Errorf("got      %q", s)
+			t.Errorf("expraw\n%s", tt.s)
+			t.Errorf("gotraw\n%s", s)
+		}
+	}
+}
+
+type StructWithPrivateFields struct {
+	A string
+	b string
+}
+
+func NewStructWithPrivateFields(a string) StructWithPrivateFields {
+	return StructWithPrivateFields{a, "fixedb"}
+}
+
+func (s StructWithPrivateFields) GoString() string {
+	return fmt.Sprintf("NewStructWithPrivateFields(%q)", s.A)
+}
+
+var gosyntax = []test{
+	{nil, `nil`},
+	{"", `""`},
+	{"a", `"a"`},
+	{1, "int(1)"},
+	{1.0, "float64(1)"},
+	{[]int(nil), "[]int(nil)"},
+	{[0]int{}, "[0]int{}"},
+	{complex(1, 0), "(1+0i)"},
+	//{make(chan int), "(chan int)(0x1234)"},
+	{unsafe.Pointer(uintptr(unsafe.Pointer(&long))), fmt.Sprintf("unsafe.Pointer(0x%02x)", uintptr(unsafe.Pointer(&long)))},
+	{func(int) {}, "func(int) {...}"},
+	{map[string]string{"a": "a", "b": "b"}, "map[string]string{\"a\":\"a\", \"b\":\"b\"}"},
+	{map[int]int{1: 1}, "map[int]int{1:1}"},
+	{int32(1), "int32(1)"},
+	{io.EOF, `&errors.errorString{s:"EOF"}`},
+	{[]string{"a"}, `[]string{"a"}`},
+	{
+		[]string{long},
+		`[]string{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"}`,
+	},
+	{F(5), "pretty.F(5)"},
+	{NewStructWithPrivateFields("foo"), `NewStructWithPrivateFields("foo")`},
+	{
+		SA{&T{1, 2}, T{3, 4}},
+		`pretty.SA{
+    t:  &pretty.T{x:1, y:2},
+    v:  pretty.T{x:3, y:4},
+}`,
+	},
+	{
+		map[int][]byte{1: {}},
+		`map[int][]uint8{
+    1:  {},
+}`,
+	},
+	{
+		map[int]T{1: {}},
+		`map[int]pretty.T{
+    1:  {},
+}`,
+	},
+	{
+		long,
+		`"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"`,
+	},
+	{
+		LongStructTypeName{
+			longFieldName:      LongStructTypeName{},
+			otherLongFieldName: long,
+		},
+		`pretty.LongStructTypeName{
+    longFieldName:      pretty.LongStructTypeName{},
+    otherLongFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
+}`,
+	},
+	{
+		&LongStructTypeName{
+			longFieldName:      &LongStructTypeName{},
+			otherLongFieldName: (*LongStructTypeName)(nil),
+		},
+		`&pretty.LongStructTypeName{
+    longFieldName:      &pretty.LongStructTypeName{},
+    otherLongFieldName: (*pretty.LongStructTypeName)(nil),
+}`,
+	},
+	{
+		[]LongStructTypeName{
+			{nil, nil},
+			{3, 3},
+			{long, nil},
+		},
+		`[]pretty.LongStructTypeName{
+    {},
+    {
+        longFieldName:      int(3),
+        otherLongFieldName: int(3),
+    },
+    {
+        longFieldName:      "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
+        otherLongFieldName: nil,
+    },
+}`,
+	},
+	{
+		[]interface{}{
+			LongStructTypeName{nil, nil},
+			[]byte{1, 2, 3},
+			T{3, 4},
+			LongStructTypeName{long, nil},
+		},
+		`[]interface {}{
+    pretty.LongStructTypeName{},
+    []uint8{0x1, 0x2, 0x3},
+    pretty.T{x:3, y:4},
+    pretty.LongStructTypeName{
+        longFieldName:      "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
+        otherLongFieldName: nil,
+    },
+}`,
+	},
+	{(*time.Time)(nil), "(*time.Time)(nil)"},
+	{&ValueGoString{"vgs"}, `VGS vgs`},
+	{(*ValueGoString)(nil), `(*pretty.ValueGoString)(nil)`},
+	{(*VGSWrapper)(nil), `(*pretty.VGSWrapper)(nil)`},
+	{&PointerGoString{"pgs"}, `PGS pgs`},
+	{(*PointerGoString)(nil), "(*pretty.PointerGoString)(nil)"},
+	{&PanicGoString{"oops!"}, `(*pretty.PanicGoString)(PANIC=calling method "GoString": oops!)`},
+}
+
+type ValueGoString struct {
+	s string
+}
+
+func (g ValueGoString) GoString() string {
+	return "VGS " + g.s
+}
+
+type VGSWrapper struct {
+	ValueGoString
+}
+
+type PointerGoString struct {
+	s string
+}
+
+func (g *PointerGoString) GoString() string {
+	return "PGS " + g.s
+}
+
+type PanicGoString struct {
+	s string
+}
+
+func (g *PanicGoString) GoString() string {
+	panic(g.s)
+}
+
+func TestGoSyntax(t *testing.T) {
+	for _, tt := range gosyntax {
+		s := fmt.Sprintf("%# v", Formatter(tt.v))
+		if tt.s != s {
+			t.Errorf("expected %q", tt.s)
+			t.Errorf("got      %q", s)
+			t.Errorf("expraw\n%s", tt.s)
+			t.Errorf("gotraw\n%s", s)
+		}
+	}
+}
+
+type I struct {
+	i int
+	R interface{}
+}
+
+func (i *I) I() *I { return i.R.(*I) }
+
+func TestCycle(t *testing.T) {
+	type A struct{ *A }
+	v := &A{}
+	v.A = v
+
+	// panics from stack overflow without cycle detection
+	t.Logf("Example cycle:\n%# v", Formatter(v))
+
+	p := &A{}
+	s := fmt.Sprintf("%# v", Formatter([]*A{p, p}))
+	if strings.Contains(s, "CYCLIC") {
+		t.Errorf("Repeated address detected as cyclic reference:\n%s", s)
+	}
+
+	type R struct {
+		i int
+		*R
+	}
+	r := &R{
+		i: 1,
+		R: &R{
+			i: 2,
+			R: &R{
+				i: 3,
+			},
+		},
+	}
+	r.R.R.R = r
+	t.Logf("Example longer cycle:\n%# v", Formatter(r))
+
+	r = &R{
+		i: 1,
+		R: &R{
+			i: 2,
+			R: &R{
+				i: 3,
+				R: &R{
+					i: 4,
+					R: &R{
+						i: 5,
+						R: &R{
+							i: 6,
+							R: &R{
+								i: 7,
+								R: &R{
+									i: 8,
+									R: &R{
+										i: 9,
+										R: &R{
+											i: 10,
+											R: &R{
+												i: 11,
+											},
+										},
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+	// here be pirates
+	r.R.R.R.R.R.R.R.R.R.R.R = r
+	t.Logf("Example very long cycle:\n%# v", Formatter(r))
+
+	i := &I{
+		i: 1,
+		R: &I{
+			i: 2,
+			R: &I{
+				i: 3,
+				R: &I{
+					i: 4,
+					R: &I{
+						i: 5,
+						R: &I{
+							i: 6,
+							R: &I{
+								i: 7,
+								R: &I{
+									i: 8,
+									R: &I{
+										i: 9,
+										R: &I{
+											i: 10,
+											R: &I{
+												i: 11,
+											},
+										},
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+	iv := i.I().I().I().I().I().I().I().I().I().I()
+	*iv = *i
+	t.Logf("Example long interface cycle:\n%# v", Formatter(i))
+}

+ 8 - 0
data_tool/src/github.com/kr/pretty/go.mod

@@ -0,0 +1,8 @@
+module github.com/kr/pretty
+
+go 1.12
+
+require (
+	github.com/kr/text v0.2.0
+	github.com/rogpeppe/go-internal v1.9.0
+)

+ 6 - 0
data_tool/src/github.com/kr/pretty/go.sum

@@ -0,0 +1,6 @@
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=

+ 108 - 0
data_tool/src/github.com/kr/pretty/pretty.go

@@ -0,0 +1,108 @@
+// Package pretty provides pretty-printing for Go values. This is
+// useful during debugging, to avoid wrapping long output lines in
+// the terminal.
+//
+// It provides a function, Formatter, that can be used with any
+// function that accepts a format string. It also provides
+// convenience wrappers for functions in packages fmt and log.
+package pretty
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"reflect"
+)
+
+// Errorf is a convenience wrapper for fmt.Errorf.
+//
+// Calling Errorf(f, x, y) is equivalent to
+// fmt.Errorf(f, Formatter(x), Formatter(y)).
+func Errorf(format string, a ...interface{}) error {
+	return fmt.Errorf(format, wrap(a, false)...)
+}
+
+// Fprintf is a convenience wrapper for fmt.Fprintf.
+//
+// Calling Fprintf(w, f, x, y) is equivalent to
+// fmt.Fprintf(w, f, Formatter(x), Formatter(y)).
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) {
+	return fmt.Fprintf(w, format, wrap(a, false)...)
+}
+
+// Log is a convenience wrapper for log.Printf.
+//
+// Calling Log(x, y) is equivalent to
+// log.Print(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Log(a ...interface{}) {
+	log.Print(wrap(a, true)...)
+}
+
+// Logf is a convenience wrapper for log.Printf.
+//
+// Calling Logf(f, x, y) is equivalent to
+// log.Printf(f, Formatter(x), Formatter(y)).
+func Logf(format string, a ...interface{}) {
+	log.Printf(format, wrap(a, false)...)
+}
+
+// Logln is a convenience wrapper for log.Printf.
+//
+// Calling Logln(x, y) is equivalent to
+// log.Println(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Logln(a ...interface{}) {
+	log.Println(wrap(a, true)...)
+}
+
+// Print pretty-prints its operands and writes to standard output.
+//
+// Calling Print(x, y) is equivalent to
+// fmt.Print(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Print(a ...interface{}) (n int, errno error) {
+	return fmt.Print(wrap(a, true)...)
+}
+
+// Printf is a convenience wrapper for fmt.Printf.
+//
+// Calling Printf(f, x, y) is equivalent to
+// fmt.Printf(f, Formatter(x), Formatter(y)).
+func Printf(format string, a ...interface{}) (n int, errno error) {
+	return fmt.Printf(format, wrap(a, false)...)
+}
+
+// Println pretty-prints its operands and writes to standard output.
+//
+// Calling Println(x, y) is equivalent to
+// fmt.Println(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Println(a ...interface{}) (n int, errno error) {
+	return fmt.Println(wrap(a, true)...)
+}
+
+// Sprint is a convenience wrapper for fmt.Sprintf.
+//
+// Calling Sprint(x, y) is equivalent to
+// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Sprint(a ...interface{}) string {
+	return fmt.Sprint(wrap(a, true)...)
+}
+
+// Sprintf is a convenience wrapper for fmt.Sprintf.
+//
+// Calling Sprintf(f, x, y) is equivalent to
+// fmt.Sprintf(f, Formatter(x), Formatter(y)).
+func Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, wrap(a, false)...)
+}
+
+func wrap(a []interface{}, force bool) []interface{} {
+	w := make([]interface{}, len(a))
+	for i, x := range a {
+		w[i] = formatter{v: reflect.ValueOf(x), force: force}
+	}
+	return w
+}

+ 41 - 0
data_tool/src/github.com/kr/pretty/zero.go

@@ -0,0 +1,41 @@
+package pretty
+
+import (
+	"reflect"
+)
+
+func nonzero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() != 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() != 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() != 0
+	case reflect.Complex64, reflect.Complex128:
+		return v.Complex() != complex(0, 0)
+	case reflect.String:
+		return v.String() != ""
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			if nonzero(getField(v, i)) {
+				return true
+			}
+		}
+		return false
+	case reflect.Array:
+		for i := 0; i < v.Len(); i++ {
+			if nonzero(v.Index(i)) {
+				return true
+			}
+		}
+		return false
+	case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func:
+		return !v.IsNil()
+	case reflect.UnsafePointer:
+		return v.Pointer() != 0
+	}
+	return true
+}

+ 10 - 0
data_tool/src/github.com/kr/text/.github/dependabot.yml

@@ -0,0 +1,10 @@
+version: 2
+updates:
+- package-ecosystem: gomod
+  directory: /
+  schedule:
+    interval: weekly
+- package-ecosystem: github-actions
+  directory: /
+  schedule:
+    interval: weekly

+ 25 - 0
data_tool/src/github.com/kr/text/.github/workflows/go.yml

@@ -0,0 +1,25 @@
+name: Go
+
+on:
+  push:
+    branches: [ main ]
+  pull_request:
+    branches: [ main ]
+
+jobs:
+
+  build:
+    runs-on: ubuntu-latest
+    steps:
+    - uses: actions/checkout@v3
+
+    - name: Set up Go
+      uses: actions/setup-go@v2
+      with:
+        go-version: 1.16
+
+    - name: Build
+      run: go build -v ./...
+
+    - name: Test
+      run: go test -v ./...

+ 19 - 0
data_tool/src/github.com/kr/text/License

@@ -0,0 +1,19 @@
+Copyright 2012 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 3 - 0
data_tool/src/github.com/kr/text/Readme

@@ -0,0 +1,3 @@
+This is a Go package for manipulating paragraphs of text.
+
+See https://pkg.go.dev/github.com/kr/text for full documentation.

+ 73 - 0
data_tool/src/github.com/kr/text/cmd/agg/doc.go

@@ -0,0 +1,73 @@
+/*
+
+Agg computes aggregate values over tabular text.
+It behaves somewhat like the SQL “GROUP BY” clause.
+
+Usage:
+
+	agg [function...]
+
+It reads input from stdin as a sequence of records, one per line.
+It treats each line as a set of fields separated by white space.
+One field (the first, by default) is designated as the key.
+Successive lines with equal keys are grouped into a group,
+and agg produces one line of output for each group.
+(Note that only contiguous input lines can form a group.
+If you need to make sure that all records for a given key
+are grouped together, sort the input first.)
+
+For each remaining field,
+agg applies a function to all the values in the group,
+producing a single output value.
+The command line arguments specify which functions to use,
+one per field in the input table.
+
+Functions
+
+The available functions are:
+
+    key        group by this field (default for field 1)
+    first      value from first line of group (default for rest)
+    last       value from last line of group
+    sample     value from any line of group, uniformly at random
+    prefix     longest common string prefix
+    join:sep   concatenate strings with given sep
+    smin       lexically least string
+    smax       lexically greatest string
+    min        numerically least value
+    max        numerically greatest value
+    sum        numeric sum
+    mean       arithmetic mean
+    count      number of records (ignores input value)
+    const:val  print val, ignoring input
+    drop       omit the column entirely
+
+The numeric functions skip items that don't parse as numbers.
+
+Examples
+
+Using the following input:
+
+    $ cat >input
+    -rwx   alice      100   /home/alice/bin/crdt
+    -rw-   alice   210002   /home/alice/thesis.tex
+    -rw-   bob      10051   /home/bob/expenses.tab
+    -rwx   kr      862060   /home/kr/bin/blog
+    -rwx   kr      304608   /home/kr/bin/agg
+
+Disk usage for each user, plus where that disk usage occurs
+(longest common prefix of filesystem paths):
+
+    $ agg <input drop key sum prefix
+    alice	210153	/home/alice/
+    bob	10051	/home/bob/expenses.tab
+    kr	1166668	/home/kr/
+
+Disk usage for executable vs non-executable files:
+
+    $ sort input | agg key drop sum join:,
+    -rw-	220053	/home/alice/thesis.tex,/home/bob/expenses.tab
+    -rwx	1166768	/home/alice/bin/crdt,/home/kr/bin/agg,/home/kr/bin/blog
+
+*/
+package main

+ 112 - 0
data_tool/src/github.com/kr/text/cmd/agg/main.go

@@ -0,0 +1,112 @@
+package main
+
+// TODO(kr): tests
+
+import (
+	"bufio"
+	"fmt"
+	"log"
+	"math/rand"
+	"os"
+	"strings"
+	"time"
+)
+
+type agg interface {
+	merge(string)
+	String() string
+}
+
+var (
+	key     = 0
+	funcmap = make(map[int]func(init, arg string) agg)
+	argmap  = make(map[int]string)
+	symtab  = map[string]func(init, arg string) agg{
+		"first":  first,
+		"last":   last,
+		"prefix": prefix,
+		"sample": sample,
+		"join":   join,
+		"smin":   smin,
+		"smax":   smax,
+		"min":    min,
+		"max":    max,
+		"sum":    sum,
+		"mean":   mean,
+		"count":  count,
+		"const":  constf,
+		"drop":   nil,
+	}
+)
+
+func main() {
+	log.SetPrefix("agg: ")
+	log.SetFlags(0)
+	rand.Seed(time.Now().UnixNano())
+	for i, sym := range os.Args[1:] {
+		if p := strings.IndexByte(sym, ':'); p >= 0 {
+			sym, argmap[i] = sym[:p], sym[p+1:]
+		}
+		if sym == "key" {
+			key, sym = i, "first"
+		}
+		f, ok := symtab[sym]
+		if !ok {
+			log.Fatalf("bad function: %q", sym)
+		}
+		funcmap[i] = f
+	}
+
+	sc := bufio.NewScanner(os.Stdin)
+	var g *group
+	for sc.Scan() {
+		ss := strings.Fields(sc.Text())
+		if !matches(g, ss) {
+			emit(g)
+			g = &group{key: ss[key]}
+		}
+		mergeLine(g, ss)
+	}
+	emit(g)
+}
+
+type group struct {
+	key string
+	agg []agg
+}
+
+func matches(g *group, ss []string) bool {
+	return g != nil && g.key == ss[key]
+}
+
+func emit(g *group) {
+	if g == nil {
+		return
+	}
+	rest := false
+	for i, a := range g.agg {
+		if f, ok := funcmap[i]; ok && f == nil {
+			continue
+		}
+		if rest {
+			fmt.Print("\t")
+		}
+		rest = true
+		fmt.Print(a)
+	}
+	fmt.Println()
+}
+
+func mergeLine(g *group, ss []string) {
+	for i, s := range ss {
+		if i >= len(g.agg) {
+			f := funcmap[i]
+			if f == nil {
+				f = first
+			}
+			g.agg = append(g.agg, f(s, argmap[i]))
+		} else {
+			g.agg[i].merge(s)
+		}
+	}
+}

+ 99 - 0
data_tool/src/github.com/kr/text/cmd/agg/num.go

@@ -0,0 +1,99 @@
+package main
+
+import (
+	"math/big"
+	"strconv"
+)
+
+func min(s, arg string) agg { return newBinop(s, opmin) }
+func max(s, arg string) agg { return newBinop(s, opmax) }
+func sum(s, arg string) agg { return newBinop(s, opsum) }
+
+type binop struct {
+	v *big.Float
+	f func(a, b *big.Float) *big.Float
+}
+
+func newBinop(s string, f func(a, b *big.Float) *big.Float) *binop {
+	v, _ := parseFloat(s)
+	return &binop{v, f}
+}
+
+func (o *binop) String() string {
+	if o.v == nil {
+		return "NaN"
+	}
+	return o.v.Text('f', -1)
+}
+
+func (o *binop) merge(s string) {
+	v, ok := parseFloat(s)
+	if !ok {
+		return
+	}
+	o.v = o.f(o.v, v)
+}
+
+func opmin(a, b *big.Float) *big.Float {
+	if a != nil && (b == nil || a.Cmp(b) <= 0) {
+		return a
+	}
+	return b
+}
+
+func opmax(a, b *big.Float) *big.Float {
+	if a != nil && (b == nil || a.Cmp(b) >= 0) {
+		return a
+	}
+	return b
+}
+
+func opsum(a, b *big.Float) *big.Float {
+	if a == nil {
+		return b
+	} else if b == nil {
+		return a
+	}
+	return a.Add(a, b)
+}
+
+type meanagg struct {
+	v *big.Float
+	d float64 // actually an integer
+}
+
+func mean(s, arg string) agg {
+	v, ok := parseFloat(s)
+	if !ok {
+		return &meanagg{new(big.Float), 0}
+	}
+	return &meanagg{v, 1}
+}
+
+func (m *meanagg) String() string {
+	if m.d == 0 {
+		return "NaN"
+	}
+	v := new(big.Float).Quo(m.v, big.NewFloat(m.d))
+	return v.Text('f', -1)
+}
+
+func (m *meanagg) merge(s string) {
+	v, ok := parseFloat(s)
+	if !ok {
+		return
+	}
+	m.v.Add(m.v, v)
+	m.d++
+}
+
+func parseFloat(s string) (*big.Float, bool) {
+	v, _, err := big.ParseFloat(s, 0, 1000, big.ToNearestEven)
+	return v, err == nil
+}
+
+type counter int
+
+func count(init, arg string) agg  { return new(counter) }
+func (c *counter) String() string { return strconv.Itoa(int(*c) + 1) }
+func (c *counter) merge(string)   { *c++ }

+ 74 - 0
data_tool/src/github.com/kr/text/cmd/agg/string.go

@@ -0,0 +1,74 @@
+package main
+
+import (
+	"math/rand"
+	"strings"
+)
+
+func first(s, arg string) agg  { return &sbinop{s, opfirst} }
+func last(s, arg string) agg   { return &sbinop{s, oplast} }
+func prefix(s, arg string) agg { return &sbinop{s, opprefix} }
+func join(s, arg string) agg   { return &sbinop{s, opjoin(arg)} }
+func smin(s, arg string) agg   { return &sbinop{s, opsmin} }
+func smax(s, arg string) agg   { return &sbinop{s, opsmax} }
+
+type sbinop struct {
+	s string
+	f func(a, b string) string
+}
+
+func (o *sbinop) String() string { return o.s }
+
+func (o *sbinop) merge(s string) { o.s = o.f(o.s, s) }
+
+func opfirst(a, b string) string { return a }
+func oplast(a, b string) string  { return b }
+
+func opprefix(a, b string) string {
+	for i := range a {
+		if i >= len(b) || a[i] != b[i] {
+			return a[:i]
+		}
+	}
+	return a
+}
+
+func opjoin(sep string) func(a, b string) string {
+	return func(a, b string) string {
+		return a + sep + b // TODO(kr): too slow? maybe strings.Join?
+	}
+}
+
+func opsmin(a, b string) string {
+	if strings.Compare(a, b) <= 0 {
+		return a
+	}
+	return b
+}
+
+func opsmax(a, b string) string {
+	if strings.Compare(a, b) >= 0 {
+		return a
+	}
+	return b
+}
+
+type sampler struct {
+	n int
+	s string
+}
+
+func sample(s, arg string) agg    { return &sampler{1, s} }
+func (p *sampler) String() string { return p.s }
+func (p *sampler) merge(s string) {
+	p.n++
+	if rand.Intn(p.n) == 0 {
+		p.s = s
+	}
+}
+
+type constant string
+
+func constf(init, arg string) agg { return constant(arg) }
+func (c constant) String() string { return string(c) }
+func (c constant) merge(string)   {}

+ 5 - 0
data_tool/src/github.com/kr/text/colwriter/Readme

@@ -0,0 +1,5 @@
+Package colwriter provides a write filter that formats
+input lines in multiple columns.
+
+The package is a straightforward translation from
+/src/cmd/draw/mc.c in Plan 9 from User Space.

+ 147 - 0
data_tool/src/github.com/kr/text/colwriter/column.go

@@ -0,0 +1,147 @@
+// Package colwriter provides a write filter that formats
+// input lines in multiple columns.
+//
+// The package is a straightforward translation from
+// /src/cmd/draw/mc.c in Plan 9 from User Space.
+package colwriter
+
+import (
+	"bytes"
+	"io"
+	"unicode/utf8"
+)
+
+const (
+	tab = 4
+)
+
+const (
+	// BreakOnColon Print each input line ending in a colon ':' separately.
+	BreakOnColon uint = 1 << iota
+)
+
+// A Writer is a filter that arranges input lines in as many columns as will
+// fit in its width. Tab '\t' chars in the input are translated to sequences
+// of spaces ending at multiples of 4 positions.
+//
+// If BreakOnColon is set, each input line ending in a colon ':' is written
+// separately.
+//
+// The Writer assumes that all Unicode code points have the same width; this
+// may not be true in some fonts.
+type Writer struct {
+	w     io.Writer
+	buf   []byte
+	width int
+	flag  uint
+}
+
+// NewWriter allocates and initializes a new Writer writing to w.
+// Parameter width controls the total number of characters on each line
+// across all columns.
+func NewWriter(w io.Writer, width int, flag uint) *Writer {
+	return &Writer{
+		w:     w,
+		width: width,
+		flag:  flag,
+	}
+}
+
+// Write writes p to the writer w. The only errors returned are ones
+// encountered while writing to the underlying output stream.
+func (w *Writer) Write(p []byte) (n int, err error) {
+	var linelen int
+	var lastWasColon bool
+	for i, c := range p {
+		w.buf = append(w.buf, c)
+		linelen++
+		if c == '\t' {
+			w.buf[len(w.buf)-1] = ' '
+			for linelen%tab != 0 {
+				w.buf = append(w.buf, ' ')
+				linelen++
+			}
+		}
+		if w.flag&BreakOnColon != 0 && c == ':' {
+			lastWasColon = true
+		} else if lastWasColon {
+			if c == '\n' {
+				pos := bytes.LastIndex(w.buf[:len(w.buf)-1], []byte{'\n'})
+				if pos < 0 {
+					pos = 0
+				}
+				line := w.buf[pos:]
+				w.buf = w.buf[:pos]
+				if err = w.columnate(); err != nil {
+					if len(line) < i {
+						return i - len(line), err
+					}
+					return 0, err
+				}
+				if n, err := w.w.Write(line); err != nil {
+					if r := len(line) - n; r < i {
+						return i - r, err
+					}
+					return 0, err
+				}
+			}
+			lastWasColon = false
+		}
+		if c == '\n' {
+			linelen = 0
+		}
+	}
+	return len(p), nil
+}
+
+// Flush should be called after the last call to Write to ensure that any data
+// buffered in the Writer is written to output.
+func (w *Writer) Flush() error {
+	return w.columnate()
+}
+
+func (w *Writer) columnate() error {
+	words := bytes.Split(w.buf, []byte{'\n'})
+	w.buf = nil
+	if len(words[len(words)-1]) == 0 {
+		words = words[:len(words)-1]
+	}
+	maxwidth := 0
+	for _, wd := range words {
+		if n := utf8.RuneCount(wd); n > maxwidth {
+			maxwidth = n
+		}
+	}
+	maxwidth++ // space char
+	wordsPerLine := w.width / maxwidth
+	if wordsPerLine <= 0 {
+		wordsPerLine = 1
+	}
+	nlines := (len(words) + wordsPerLine - 1) / wordsPerLine
+	for i := 0; i < nlines; i++ {
+		col := 0
+		endcol := 0
+		for j := i; j < len(words); j += nlines {
+			endcol += maxwidth
+			_, err := w.w.Write(words[j])
+			if err != nil {
+				return err
+			}
+			col += utf8.RuneCount(words[j])
+			if j+nlines < len(words) {
+				for col < endcol {
+					_, err := w.w.Write([]byte{' '})
+					if err != nil {
+						return err
+					}
+					col++
+				}
+			}
+		}
+		_, err := w.w.Write([]byte{'\n'})
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}

+ 90 - 0
data_tool/src/github.com/kr/text/colwriter/column_test.go

@@ -0,0 +1,90 @@
+package colwriter
+
+import (
+	"bytes"
+	"testing"
+)
+
+var src = `
+.git
+.gitignore
+.godir
+Procfile:
+README.md
+api.go
+apps.go
+auth.go
+darwin.go
+data.go
+dyno.go:
+env.go
+git.go
+help.go
+hkdist
+linux.go
+ls.go
+main.go
+plugin.go
+run.go
+scale.go
+ssh.go
+tail.go
+term
+unix.go
+update.go
+version.go
+windows.go
+`[1:]
+
+var tests = []struct {
+	wid  int
+	flag uint
+	src  string
+	want string
+}{
+	{80, 0, "", ""},
+	{80, 0, src, `
+.git       README.md  darwin.go  git.go     ls.go      scale.go   unix.go
+.gitignore api.go     data.go    help.go    main.go    ssh.go     update.go
+.godir     apps.go    dyno.go:   hkdist     plugin.go  tail.go    version.go
+Procfile:  auth.go    env.go     linux.go   run.go     term       windows.go
+`[1:]},
+	{80, BreakOnColon, src, `
+.git       .gitignore .godir
+
+Procfile:
+README.md api.go    apps.go   auth.go   darwin.go data.go
+
+dyno.go:
+env.go     hkdist     main.go    scale.go   term       version.go
+git.go     linux.go   plugin.go  ssh.go     unix.go    windows.go
+help.go    ls.go      run.go     tail.go    update.go
+`[1:]},
+	{20, 0, `
+Hello
+Γειά σου
+안녕
+今日は
+`[1:], `
+Hello    안녕
+Γειά σου 今日は
+`[1:]},
+}
+
+func TestWriter(t *testing.T) {
+	for _, test := range tests {
+		b := new(bytes.Buffer)
+		w := NewWriter(b, test.wid, test.flag)
+		if _, err := w.Write([]byte(test.src)); err != nil {
+			t.Error(err)
+		}
+		if err := w.Flush(); err != nil {
+			t.Error(err)
+		}
+		if g := b.String(); test.want != g {
+			t.Log("\n" + test.want)
+			t.Log("\n" + g)
+			t.Errorf("%q != %q", test.want, g)
+		}
+	}
+}

+ 3 - 0
data_tool/src/github.com/kr/text/doc.go

@@ -0,0 +1,3 @@
+// Package text provides rudimentary functions for manipulating text in
+// paragraphs.
+package text

+ 5 - 0
data_tool/src/github.com/kr/text/go.mod

@@ -0,0 +1,5 @@
+module github.com/kr/text
+
+go 1.12
+
+require github.com/creack/pty v1.1.17

+ 2 - 0
data_tool/src/github.com/kr/text/go.sum

@@ -0,0 +1,2 @@
+github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI=
+github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=

+ 74 - 0
data_tool/src/github.com/kr/text/indent.go

@@ -0,0 +1,74 @@
+package text
+
+import (
+	"io"
+)
+
+// Indent inserts prefix at the beginning of each non-empty line of s. The
+// end-of-line marker is NL.
+func Indent(s, prefix string) string {
+	return string(IndentBytes([]byte(s), []byte(prefix)))
+}
+
+// IndentBytes inserts prefix at the beginning of each non-empty line of b.
+// The end-of-line marker is NL.
+func IndentBytes(b, prefix []byte) []byte {
+	var res []byte
+	bol := true
+	for _, c := range b {
+		if bol && c != '\n' {
+			res = append(res, prefix...)
+		}
+		res = append(res, c)
+		bol = c == '\n'
+	}
+	return res
+}
+
+// Writer indents each line of its input.
+type indentWriter struct {
+	w   io.Writer
+	bol bool
+	pre [][]byte
+	sel int
+	off int
+}
+
+// NewIndentWriter makes a new write filter that indents the input
+// lines. Each line is prefixed in order with the corresponding
+// element of pre. If there are more lines than elements, the last
+// element of pre is repeated for each subsequent line.
+func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer {
+	return &indentWriter{
+		w:   w,
+		pre: pre,
+		bol: true,
+	}
+}
+
+// The only errors returned are from the underlying indentWriter.
+func (w *indentWriter) Write(p []byte) (n int, err error) {
+	for _, c := range p {
+		if w.bol {
+			var i int
+			i, err = w.w.Write(w.pre[w.sel][w.off:])
+			w.off += i
+			if err != nil {
+				return n, err
+			}
+		}
+		_, err = w.w.Write([]byte{c})
+		if err != nil {
+			return n, err
+		}
+		n++
+		w.bol = c == '\n'
+		if w.bol {
+			w.off = 0
+			if w.sel < len(w.pre)-1 {
+				w.sel++
+			}
+		}
+	}
+	return n, nil
+}

+ 119 - 0
data_tool/src/github.com/kr/text/indent_test.go

@@ -0,0 +1,119 @@
+package text
+
+import (
+	"bytes"
+	"testing"
+)
+
+type T struct {
+	inp, exp, pre string
+}
+
+var tests = []T{
+	{
+		"The quick brown fox\njumps over the lazy\ndog.\nBut not quickly.\n",
+		"xxxThe quick brown fox\nxxxjumps over the lazy\nxxxdog.\nxxxBut not quickly.\n",
+		"xxx",
+	},
+	{
+		"The quick brown fox\njumps over the lazy\ndog.\n\nBut not quickly.",
+		"xxxThe quick brown fox\nxxxjumps over the lazy\nxxxdog.\n\nxxxBut not quickly.",
+		"xxx",
+	},
+}
+
+func TestIndent(t *testing.T) {
+	for _, test := range tests {
+		got := Indent(test.inp, test.pre)
+		if got != test.exp {
+			t.Errorf("mismatch %q != %q", got, test.exp)
+		}
+	}
+}
+
+type IndentWriterTest struct {
+	inp, exp string
+	pre      []string
+}
+
+var ts = []IndentWriterTest{
+	{
+		`
+The quick brown fox
+jumps over the lazy
+dog.
+But not quickly.
+`[1:],
+		`
+xxxThe quick brown fox
+xxxjumps over the lazy
+xxxdog.
+xxxBut not quickly.
+`[1:],
+		[]string{"xxx"},
+	},
+	{
+		`
+The quick brown fox
+jumps over the lazy
+dog.
+But not quickly.
+`[1:],
+		`
+xxaThe quick brown fox
+xxxjumps over the lazy
+xxxdog.
+xxxBut not quickly.
+`[1:],
+		[]string{"xxa", "xxx"},
+	},
+	{
+		`
+The quick brown fox
+jumps over the lazy
+dog.
+But not quickly.
+`[1:],
+		`
+xxaThe quick brown fox
+xxbjumps over the lazy
+xxcdog.
+xxxBut not quickly.
+`[1:],
+		[]string{"xxa", "xxb", "xxc", "xxx"},
+	},
+	{
+		`
+The quick brown fox
+jumps over the lazy
+dog.
+
+But not quickly.`[1:],
+		`
+xxaThe quick brown fox
+xxxjumps over the lazy
+xxxdog.
+xxx
+xxxBut not quickly.`[1:],
+		[]string{"xxa", "xxx"},
+	},
+}
+
+func TestIndentWriter(t *testing.T) {
+	for _, test := range ts {
+		b := new(bytes.Buffer)
+		pre := make([][]byte, len(test.pre))
+		for i := range test.pre {
+			pre[i] = []byte(test.pre[i])
+		}
+		w := NewIndentWriter(b, pre...)
+		if _, err := w.Write([]byte(test.inp)); err != nil {
+			t.Error(err)
+		}
+		if got := b.String(); got != test.exp {
+			t.Errorf("mismatch %q != %q", got, test.exp)
+			t.Log(got)
+			t.Log(test.exp)
+		}
+	}
+}

+ 9 - 0
data_tool/src/github.com/kr/text/mc/Readme

@@ -0,0 +1,9 @@
+Command mc prints in multiple columns.
+
+  Usage: mc [-] [-N] [file...]
+
+Mc splits the input into as many columns as will fit in N
+print positions. If the output is a tty, the default N is
+the number of characters in a terminal line; otherwise the
+default N is 80. Under option - each input line ending in
+a colon ':' is printed separately.

+ 63 - 0
data_tool/src/github.com/kr/text/mc/mc.go

@@ -0,0 +1,63 @@
+// Command mc prints in multiple columns.
+//
+//   Usage: mc [-] [-N] [file...]
+//
+// Mc splits the input into as many columns as will fit in N
+// print positions. If the output is a tty, the default N is
+// the number of characters in a terminal line; otherwise the
+// default N is 80. Under option - each input line ending in
+// a colon ':' is printed separately.
+package main
+
+import (
+	"io"
+	"log"
+	"os"
+	"strconv"
+
+	"github.com/creack/pty"
+	"github.com/kr/text/colwriter"
+)
+
+func main() {
+	var width int
+	var flag uint
+	args := os.Args[1:]
+	for len(args) > 0 && len(args[0]) > 0 && args[0][0] == '-' {
+		if len(args[0]) > 1 {
+			width, _ = strconv.Atoi(args[0][1:])
+		} else {
+			flag |= colwriter.BreakOnColon
+		}
+		args = args[1:]
+	}
+	if width < 1 {
+		_, width, _ = pty.Getsize(os.Stdout)
+	}
+	if width < 1 {
+		width = 80
+	}
+
+	w := colwriter.NewWriter(os.Stdout, width, flag)
+	if len(args) > 0 {
+		for _, s := range args {
+			if f, err := os.Open(s); err == nil {
+				copyin(w, f)
+				f.Close()
+			} else {
+				log.Println(err)
+			}
+		}
+	} else {
+		copyin(w, os.Stdin)
+	}
+}
+
+func copyin(w *colwriter.Writer, r io.Reader) {
+	if _, err := io.Copy(w, r); err != nil {
+		log.Println(err)
+	}
+	if err := w.Flush(); err != nil {
+		log.Println(err)
+	}
+}

+ 86 - 0
data_tool/src/github.com/kr/text/wrap.go

@@ -0,0 +1,86 @@
+package text
+
+import (
+	"bytes"
+	"math"
+)
+
+var (
+	nl = []byte{'\n'}
+	sp = []byte{' '}
+)
+
+const defaultPenalty = 1e5
+
+// Wrap wraps s into a paragraph of lines of length lim, with minimal
+// raggedness.
+func Wrap(s string, lim int) string {
+	return string(WrapBytes([]byte(s), lim))
+}
+
+// WrapBytes wraps b into a paragraph of lines of length lim, with minimal
+// raggedness.
+func WrapBytes(b []byte, lim int) []byte {
+	words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp)
+	var lines [][]byte
+	for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
+		lines = append(lines, bytes.Join(line, sp))
+	}
+	return bytes.Join(lines, nl)
+}
+
+// WrapWords is the low-level line-breaking algorithm, useful if you need more
+// control over the details of the text wrapping process. For most uses, either
+// Wrap or WrapBytes will be sufficient and more convenient.
+//
+// WrapWords splits a list of words into lines with minimal "raggedness",
+// treating each byte as one unit, accounting for spc units between adjacent
+// words on each line, and attempting to limit lines to lim units. Raggedness
+// is the total error over all lines, where error is the square of the
+// difference of the length of the line and lim. Too-long lines (which only
+// happen when a single word is longer than lim units) have pen penalty units
+// added to the error.
+func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte {
+	n := len(words)
+
+	length := make([][]int, n)
+	for i := 0; i < n; i++ {
+		length[i] = make([]int, n)
+		length[i][i] = len(words[i])
+		for j := i + 1; j < n; j++ {
+			length[i][j] = length[i][j-1] + spc + len(words[j])
+		}
+	}
+
+	nbrk := make([]int, n)
+	cost := make([]int, n)
+	for i := range cost {
+		cost[i] = math.MaxInt32
+	}
+	for i := n - 1; i >= 0; i-- {
+		if length[i][n-1] <= lim || i == n-1 {
+			cost[i] = 0
+			nbrk[i] = n
+		} else {
+			for j := i + 1; j < n; j++ {
+				d := lim - length[i][j-1]
+				c := d*d + cost[j]
+				if length[i][j-1] > lim && i != (j-1) {
+					c += pen // too-long lines get a worse penalty
+				}
+				if c < cost[i] {
+					cost[i] = c
+					nbrk[i] = j
+				}
+			}
+		}
+	}
+
+	var lines [][][]byte
+	i := 0
+	for i < n {
+		lines = append(lines, words[i:nbrk[i]])
+		i = nbrk[i]
+	}
+	return lines
+}

+ 63 - 0
data_tool/src/github.com/kr/text/wrap_test.go

@@ -0,0 +1,63 @@
+package text
+
+import (
+	"bytes"
+	"testing"
+)
+
+var text = "The quick brown fox jumps over the lazy dog."
+
+func TestWrap(t *testing.T) {
+	exp := [][]string{
+		{"The", "quick", "brown", "fox"},
+		{"jumps", "over", "the", "lazy", "dog."},
+	}
+	words := bytes.Split([]byte(text), sp)
+	got := WrapWords(words, 1, 24, defaultPenalty)
+	if len(exp) != len(got) {
+		t.Fail()
+	}
+	for i := range exp {
+		if len(exp[i]) != len(got[i]) {
+			t.Fail()
+		}
+		for j := range exp[i] {
+			if exp[i][j] != string(got[i][j]) {
+				t.Fatal(i, exp[i][j], got[i][j])
+			}
+		}
+	}
+}
+
+func TestWrapNarrow(t *testing.T) {
+	exp := "The\nquick\nbrown\nfox\njumps\nover\nthe\nlazy\ndog."
+	if Wrap(text, 5) != exp {
+		t.Fail()
+	}
+}
+
+func TestWrapOneLine(t *testing.T) {
+	exp := "The quick brown fox jumps over the lazy dog."
+	if Wrap(text, 500) != exp {
+		t.Fail()
+	}
+}
+
+func TestWrapBug1(t *testing.T) {
+	cases := []struct {
+		limit int
+		text  string
+		want  string
+	}{
+		{4, "aaaaa", "aaaaa"},
+		{4, "a aaaaa", "a\naaaaa"},
+		{4, "overlong overlong foo", "overlong\noverlong\nfoo"},
+	}
+
+	for _, test := range cases {
+		got := Wrap(test.text, test.limit)
+		if got != test.want {
+			t.Errorf("Wrap(%q, %d) = %q want %q", test.text, test.limit, got, test.want)
+		}
+	}
+}

+ 19 - 0
data_tool/src/github.com/peterbourgon/diskv/LICENSE

@@ -0,0 +1,19 @@
+Copyright (c) 2011-2012 Peter Bourgon
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 191 - 0
data_tool/src/github.com/peterbourgon/diskv/README.md

@@ -0,0 +1,191 @@
+# What is diskv?
+
+Diskv (disk-vee) is a simple, persistent key-value store written in the Go
+language. It starts with an incredibly simple API for storing arbitrary data on
+a filesystem by key, and builds several layers of performance-enhancing
+abstraction on top.  The end result is a conceptually simple, but highly
+performant, disk-backed storage system.
+
+[![Build Status][1]][2]
+
+[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
+[2]: https://drone.io/github.com/peterbourgon/diskv/latest
+
+
+# Installing
+
+Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
+Then,
+
+```bash
+$ go get github.com/peterbourgon/diskv/v3
+```
+
+[3]: http://golang.org
+[4]: http://golang.org/doc/install/source
+[5]: http://golang.org/doc/install
+
+
+# Usage
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/peterbourgon/diskv/v3"
+)
+
+func main() {
+	// Simplest transform function: put all the data files into the base dir.
+	flatTransform := func(s string) []string { return []string{} }
+
+	// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
+	d := diskv.New(diskv.Options{
+		BasePath:     "my-data-dir",
+		Transform:    flatTransform,
+		CacheSizeMax: 1024 * 1024,
+	})
+
+	// Write three bytes to the key "alpha".
+	key := "alpha"
+	d.Write(key, []byte{'1', '2', '3'})
+
+	// Read the value back out of the store.
+	value, _ := d.Read(key)
+	fmt.Printf("%v\n", value)
+
+	// Erase the key+value from the store (and the disk).
+	d.Erase(key)
+}
+```
+
+More complex examples can be found in the "examples" subdirectory.
+
+
+# Theory
+
+## Basic idea
+
+At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
+The data is written to a single file on disk, with the same name as the key.
+The key determines where that file will be stored, via a user-provided
+`TransformFunc`, which takes a key and returns a slice (`[]string`)
+corresponding to a path list where the key file will be stored. The simplest
+TransformFunc,
+
+```go
+func SimpleTransform (key string) []string {
+    return []string{}
+}
+```
+
+will place all keys in the same, base directory. The design is inspired by
+[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
+behavior is available in the content-addressable-storage example.
+
+[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
+
+**Note** that your TransformFunc should ensure that one valid key doesn't
+transform to a subset of another valid key. That is, it shouldn't be possible
+to construct valid keys that resolve to directory names. As a concrete example,
+if your TransformFunc splits on every 3 characters, then
+
+```go
+d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
+d.Write("abc", val)    // Error: attempted write to <base>/abc/abc, but it's a directory
+```
+
+This will be addressed in an upcoming version of diskv.
+
+Probably the most important design principle behind diskv is that your data is
+always flatly available on the disk. diskv will never do anything that would
+prevent you from accessing, copying, backing up, or otherwise interacting with
+your data via common UNIX commandline tools.
+
+## Advanced path transformation
+
+If you need more control over the file name written to disk or if you want to support
+slashes in your key name or special characters in the keys, you can use the
+AdvancedTransform property. You must supply a function that returns
+a special PathKey structure, which is a breakdown of a path and a file name. Strings
+returned must be clean of any slashes or special characters:
+
+```go
+func AdvancedTransformExample(key string) *diskv.PathKey {
+	path := strings.Split(key, "/")
+	last := len(path) - 1
+	return &diskv.PathKey{
+		Path:     path[:last],
+		FileName: path[last] + ".txt",
+	}
+}
+
+// If you provide an AdvancedTransform, you must also provide its
+// inverse:
+
+func InverseTransformExample(pathKey *diskv.PathKey) (key string) {
+	txt := pathKey.FileName[len(pathKey.FileName)-4:]
+	if txt != ".txt" {
+		panic("Invalid file found in storage folder!")
+	}
+	return strings.Join(pathKey.Path, "/") + pathKey.FileName[:len(pathKey.FileName)-4]
+}
+
+func main() {
+	d := diskv.New(diskv.Options{
+		BasePath:          "my-data-dir",
+		AdvancedTransform: AdvancedTransformExample,
+		InverseTransform:  InverseTransformExample,
+		CacheSizeMax:      1024 * 1024,
+	})
+	// Write some text to the key "alpha/beta/gamma".
+	key := "alpha/beta/gamma"
+	d.WriteString(key, "¡Hola!") // will be stored in "<basedir>/alpha/beta/gamma.txt"
+	fmt.Println(d.ReadString("alpha/beta/gamma"))
+}
+```
+
+
+## Adding a cache
+
+An in-memory caching layer is provided by combining the BasicStore
+functionality with a simple map structure, and keeping it up-to-date as
+appropriate. Since the map structure in Go is not threadsafe, it's combined
+with a RWMutex to provide safe concurrent access.
+
+## Adding order
+
+diskv is a key-value store and therefore inherently unordered. An ordering
+system can be injected into the store by passing something which satisfies the
+diskv.Index interface. (A default implementation, using Google's
+[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
+user-provided Less function) index of the keys, which can be queried.
+
+[7]: https://github.com/google/btree
+
+## Adding compression
+
+Something which implements the diskv.Compression interface may be passed
+during store creation, so that all Writes and Reads are filtered through
+a compression/decompression pipeline. Several default implementations,
+using stdlib compression algorithms, are provided. Note that data is cached
+compressed; the cost of decompression is borne with each Read.
+
+## Streaming
+
+diskv also now provides ReadStream and WriteStream methods, to allow very large
+data to be handled efficiently.
+
+
+# Future plans
+
+ * Needs plenty of robust testing: huge datasets, etc...
+ * More thorough benchmarking
+ * Your suggestions for use-cases I haven't thought of
+
+
+# Credits and contributions
+
+Original idea, design and implementation: [Peter Bourgon](https://github.com/peterbourgon)
+Other collaborations: [Javier Peletier](https://github.com/jpeletier) ([Epic Labs](https://www.epiclabs.io))

+ 430 - 0
data_tool/src/github.com/peterbourgon/diskv/basic_test.go

@@ -0,0 +1,430 @@
+package diskv
+
+import (
+	"bytes"
+	"errors"
+	"math/rand"
+	"regexp"
+	"strings"
+	"testing"
+	"time"
+)
+
+func cmpBytes(a, b []byte) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i := 0; i < len(a); i++ {
+		if a[i] != b[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func (d *Diskv) isCached(key string) bool {
+	d.mu.RLock()
+	defer d.mu.RUnlock()
+	_, ok := d.cache[key]
+	return ok
+}
+
+func TestWriteReadErase(t *testing.T) {
+	d := New(Options{
+		BasePath:     "test-data",
+		CacheSizeMax: 1024,
+	})
+	defer d.EraseAll()
+	k, v := "a", []byte{'b'}
+	if err := d.Write(k, v); err != nil {
+		t.Fatalf("write: %s", err)
+	}
+	if readVal, err := d.Read(k); err != nil {
+		t.Fatalf("read: %s", err)
+	} else if bytes.Compare(v, readVal) != 0 {
+		t.Fatalf("read: expected %s, got %s", v, readVal)
+	}
+	if err := d.Erase(k); err != nil {
+		t.Fatalf("erase: %s", err)
+	}
+}
+
+func TestWRECache(t *testing.T) {
+	d := New(Options{
+		BasePath:     "test-data",
+		CacheSizeMax: 1024,
+	})
+	defer d.EraseAll()
+	k, v := "xxx", []byte{' ', ' ', ' '}
+	if d.isCached(k) {
+		t.Fatalf("key cached before Write and Read")
+	}
+	if err := d.Write(k, v); err != nil {
+		t.Fatalf("write: %s", err)
+	}
+	if d.isCached(k) {
+		t.Fatalf("key cached before Read")
+	}
+	if readVal, err := d.Read(k); err != nil {
+		t.Fatalf("read: %s", err)
+	} else if bytes.Compare(v, readVal) != 0 {
+		t.Fatalf("read: expected %s, got %s", v, readVal)
+	}
+	for i := 0; i < 10 && !d.isCached(k); i++ {
+		time.Sleep(10 * time.Millisecond)
+	}
+	if !d.isCached(k) {
+		t.Fatalf("key not cached after Read")
+	}
+	if err := d.Erase(k); err != nil {
+		t.Fatalf("erase: %s", err)
+	}
+	if d.isCached(k) {
+		t.Fatalf("key cached after Erase")
+	}
+}
+
+func TestStrings(t *testing.T) {
+	d := New(Options{
+		BasePath:     "test-data",
+		CacheSizeMax: 1024,
+	})
+	defer d.EraseAll()
+
+	keys := map[string]bool{"a": false, "b": false, "c": false, "d": false}
+	v := []byte{'1'}
+	for k := range keys {
+		if err := d.Write(k, v); err != nil {
+			t.Fatalf("write: %s: %s", k, err)
+		}
+	}
+
+	for k := range d.Keys(nil) {
+		if _, present := keys[k]; present {
+			t.Logf("got: %s", k)
+			keys[k] = true
+		} else {
+			t.Fatalf("strings() returns unknown key: %s", k)
+		}
+	}
+
+	for k, found := range keys {
+		if !found {
+			t.Errorf("never got %s", k)
+		}
+	}
+}
+
+func TestZeroByteCache(t *testing.T) {
+	d := New(Options{
+		BasePath:     "test-data",
+		CacheSizeMax: 0,
+	})
+	defer d.EraseAll()
+
+	k, v := "a", []byte{'1', '2', '3'}
+	if err := d.Write(k, v); err != nil {
+		t.Fatalf("Write: %s", err)
+	}
+
+	if d.isCached(k) {
+		t.Fatalf("key cached, expected not-cached")
+	}
+
+	if _, err := d.Read(k); err != nil {
+		t.Fatalf("Read: %s", err)
+	}
+
+	if d.isCached(k) {
+		t.Fatalf("key cached, expected not-cached")
+	}
+}
+
+func TestOneByteCache(t *testing.T) {
+	d := New(Options{
+		BasePath:     "test-data",
+		CacheSizeMax: 1,
+	})
+	defer d.EraseAll()
+
+	k1, k2, v1, v2 := "a", "b", []byte{'1'}, []byte{'1', '2'}
+	if err := d.Write(k1, v1); err != nil {
+		t.Fatal(err)
+	}
+
+	if v, err := d.Read(k1); err != nil {
+		t.Fatal(err)
+	} else if !cmpBytes(v, v1) {
+		t.Fatalf("Read: expected %s, got %s", string(v1), string(v))
+	}
+
+	for i := 0; i < 10 && !d.isCached(k1); i++ {
+		time.Sleep(10 * time.Millisecond)
+	}
+	if !d.isCached(k1) {
+		t.Fatalf("expected 1-byte value to be cached, but it wasn't")
+	}
+
+	if err := d.Write(k2, v2); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := d.Read(k2); err != nil {
+		t.Fatalf("--> %s", err)
+	}
+
+	for i := 0; i < 10 && (!d.isCached(k1) || d.isCached(k2)); i++ {
+		time.Sleep(10 * time.Millisecond) // just wait for lazy-cache
+	}
+	if !d.isCached(k1) {
+		t.Fatalf("1-byte value was uncached for no reason")
+	}
+
+	if d.isCached(k2) {
+		t.Fatalf("2-byte value was cached, but cache max size is 1")
+	}
+}
+
+func TestStaleCache(t *testing.T) {
+	d := New(Options{
+		BasePath:     "test-data",
+		CacheSizeMax: 1,
+	})
+	defer d.EraseAll()
+
+	k, first, second := "a", "first", "second"
+	if err := d.Write(k, []byte(first)); err != nil {
+		t.Fatal(err)
+	}
+
+	v, err := d.Read(k)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if string(v) != first {
+		t.Errorf("expected '%s', got '%s'", first, v)
+	}
+
+	if err := d.Write(k, []byte(second)); err != nil {
+		t.Fatal(err)
+	}
+
+	v, err = d.Read(k)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if string(v) != second {
+		t.Errorf("expected '%s', got '%s'", second, v)
+	}
+}
+
+func TestHas(t *testing.T) {
+	d := New(Options{
+		BasePath:     "test-data",
+		CacheSizeMax: 1024,
+	})
+	defer d.EraseAll()
+
+	for k, v := range map[string]string{
+		"a":      "1",
+		"foo":    "2",
+		"012345": "3",
+	} {
+		d.Write(k, []byte(v))
+	}
+
+	d.Read("foo") // cache one of them
+	if !d.isCached("foo") {
+		t.Errorf("'foo' didn't get cached")
+	}
+
+	for _, tuple := range []struct {
+		key      string
+		expected bool
+	}{
+		{"a", true},
+		{"b", false},
+		{"foo", true},
+		{"bar", false},
+		{"01234", false},
+		{"012345", true},
+		{"0123456", false},
+	} {
+		if expected, got := tuple.expected, d.Has(tuple.key); expected != got {
+			t.Errorf("Has(%s): expected %v, got %v", tuple.key, expected, got)
+		}
+	}
+}
+
+type BrokenReader struct{}
+
+func (BrokenReader) Read(p []byte) (n int, err error) {
+	return 0, errors.New("failed to read")
+}
+
+func TestRemovesIncompleteFiles(t *testing.T) {
+	opts := Options{
+		BasePath:     "test-data",
+		CacheSizeMax: 1024,
+	}
+	d := New(opts)
+	defer d.EraseAll()
+
+	key, stream, sync := "key", BrokenReader{}, false
+
+	if err := d.WriteStream(key, stream, sync); err == nil {
+		t.Fatalf("Expected i/o copy error, none received.")
+	}
+
+	if _, err := d.Read(key); err == nil {
+		t.Fatal("Could read the key, but it shouldn't exist")
+	}
+}
+
+func TestTempDir(t *testing.T) {
+	opts := Options{
+		BasePath:     "test-data",
+		TempDir:      "test-data-temp",
+		CacheSizeMax: 1024,
+	}
+	d := New(opts)
+	defer d.EraseAll()
+
+	k, v := "a", []byte{'b'}
+	if err := d.Write(k, v); err != nil {
+		t.Fatalf("write: %s", err)
+	}
+	if readVal, err := d.Read(k); err != nil {
+		t.Fatalf("read: %s", err)
+	} else if bytes.Compare(v, readVal) != 0 {
+		t.Fatalf("read: expected %s, got %s", v, readVal)
+	}
+	if err := d.Erase(k); err != nil {
+		t.Fatalf("erase: %s", err)
+	}
+}
+
+type CrashingReader struct{}
+
+func (CrashingReader) Read(p []byte) (n int, err error) {
+	panic("System has crashed while reading the stream")
+}
+
+func TestAtomicWrite(t *testing.T) {
+	opts := Options{
+		BasePath: "test-data",
+		// Test would fail if TempDir is not set here.
+		TempDir:      "test-data-temp",
+		CacheSizeMax: 1024,
+	}
+	d := New(opts)
+	defer d.EraseAll()
+
+	key := "key"
+	func() {
+		defer func() {
+			recover() // Ignore panicking error
+		}()
+
+		stream := CrashingReader{}
+		d.WriteStream(key, stream, false)
+	}()
+
+	if d.Has(key) {
+		t.Fatal("Has key, but it shouldn't exist")
+	}
+	if _, ok := <-d.Keys(nil); ok {
+		t.Fatal("Store isn't empty")
+	}
+}
+
+const letterBytes = "abcdef0123456789"
+
+func randStringBytes(n int) string {
+	b := make([]byte, n)
+	for i := range b {
+		b[i] = letterBytes[rand.Intn(len(letterBytes))]
+	}
+	return string(b)
+}
+
+func TestHybridStore(t *testing.T) {
+	regex := regexp.MustCompile("[0-9a-fA-F]{64}")
+
+	transformFunc := func(s string) *PathKey {
+
+		if regex.MatchString(s) {
+			return &PathKey{Path: []string{"objects", s[0:2]},
+				FileName: s,
+			}
+		}
+
+		folders := strings.Split(s, "/")
+		lfolders := len(folders)
+		if lfolders > 1 {
+			return &PathKey{Path: folders[:lfolders-1],
+				FileName: folders[lfolders-1],
+			}
+		}
+
+		return &PathKey{Path: []string{},
+			FileName: s,
+		}
+	}
+
+	inverseTransformFunc := func(pathKey *PathKey) string {
+
+		if regex.MatchString(pathKey.FileName) {
+			return pathKey.FileName
+
+		}
+
+		if len(pathKey.Path) == 0 {
+			return pathKey.FileName
+		}
+
+		return strings.Join(pathKey.Path, "/") + "/" + pathKey.FileName
+
+	}
+	opts := Options{
+		BasePath:          "test-data",
+		CacheSizeMax:      1024,
+		AdvancedTransform: transformFunc,
+		InverseTransform:  inverseTransformFunc,
+	}
+	d := New(opts)
+	defer d.EraseAll()
+
+	testData := map[string]string{}
+
+	for i := 0; i < 100; i++ {
+		testData[randStringBytes(64)] = randStringBytes(100)
+	}
+
+	for i := 0; i < 100; i++ {
+		testData[randStringBytes(20)] = randStringBytes(100)
+	}
+
+	for i := 0; i < 100; i++ {
+		numsep := rand.Intn(10) + 1
+		key := ""
+		for j := 0; j < numsep; j++ {
+			key += randStringBytes(10) + "/"
+		}
+		key += randStringBytes(40)
+		testData[key] = randStringBytes(100)
+	}
+
+	for k, v := range testData {
+		d.WriteString(k, v)
+	}
+
+	for k, v := range testData {
+		readVal := d.ReadString(k)
+
+		if v != readVal {
+			t.Fatalf("read: expected %s, got %s", v, readVal)
+		}
+	}
+
+}

+ 64 - 0
data_tool/src/github.com/peterbourgon/diskv/compression.go

@@ -0,0 +1,64 @@
+package diskv
+
+import (
+	"compress/flate"
+	"compress/gzip"
+	"compress/zlib"
+	"io"
+)
+
+// Compression is an interface that Diskv uses to implement compression of
+// data. Writer takes a destination io.Writer and returns a WriteCloser that
+// compresses all data written through it. Reader takes a source io.Reader and
+// returns a ReadCloser that decompresses all data read through it. You may
+// define these methods on your own type, or use one of the NewCompression
+// helpers.
+type Compression interface {
+	Writer(dst io.Writer) (io.WriteCloser, error)
+	Reader(src io.Reader) (io.ReadCloser, error)
+}
+
+// NewGzipCompression returns a Gzip-based Compression.
+func NewGzipCompression() Compression {
+	return NewGzipCompressionLevel(flate.DefaultCompression)
+}
+
+// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
+func NewGzipCompressionLevel(level int) Compression {
+	return &genericCompression{
+		wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
+		rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
+	}
+}
+
+// NewZlibCompression returns a Zlib-based Compression.
+func NewZlibCompression() Compression {
+	return NewZlibCompressionLevel(flate.DefaultCompression)
+}
+
+// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
+func NewZlibCompressionLevel(level int) Compression {
+	return NewZlibCompressionLevelDict(level, nil)
+}
+
+// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
+// level, based on the given dictionary.
+func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
+	return &genericCompression{
+		func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
+		func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
+	}
+}
+
+type genericCompression struct {
+	wf func(w io.Writer) (io.WriteCloser, error)
+	rf func(r io.Reader) (io.ReadCloser, error)
+}
+
+func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
+	return g.wf(dst)
+}
+
+func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
+	return g.rf(src)
+}

+ 72 - 0
data_tool/src/github.com/peterbourgon/diskv/compression_test.go

@@ -0,0 +1,72 @@
+package diskv
+
+import (
+	"compress/flate"
+	"fmt"
+	"math/rand"
+	"os"
+	"testing"
+	"time"
+)
+
+func init() {
+	rand.Seed(time.Now().UnixNano())
+}
+
+func testCompressionWith(t *testing.T, c Compression, name string) {
+	d := New(Options{
+		BasePath:     "compression-test",
+		CacheSizeMax: 0,
+		Compression:  c,
+	})
+	defer d.EraseAll()
+
+	sz := 4096
+	val := make([]byte, sz)
+	for i := 0; i < sz; i++ {
+		val[i] = byte('a' + rand.Intn(26)) // {a-z}; should compress some
+	}
+
+	key := "a"
+	if err := d.Write(key, val); err != nil {
+		t.Fatalf("write failed: %s", err)
+	}
+
+	targetFile := fmt.Sprintf("%s%c%s", d.BasePath, os.PathSeparator, key)
+	fi, err := os.Stat(targetFile)
+	if err != nil {
+		t.Fatalf("%s: %s", targetFile, err)
+	}
+
+	if fi.Size() >= int64(sz) {
+		t.Fatalf("%s: size=%d, expected smaller", targetFile, fi.Size())
+	}
+	t.Logf("%s compressed %d to %d", name, sz, fi.Size())
+
+	readVal, err := d.Read(key)
+	if len(readVal) != sz {
+		t.Fatalf("read: expected size=%d, got size=%d", sz, len(readVal))
+	}
+
+	for i := 0; i < sz; i++ {
+		if readVal[i] != val[i] {
+			t.Fatalf("i=%d: expected %v, got %v", i, val[i], readVal[i])
+		}
+	}
+}
+
+func TestGzipDefault(t *testing.T) {
+	testCompressionWith(t, NewGzipCompression(), "gzip")
+}
+
+func TestGzipBestCompression(t *testing.T) {
+	testCompressionWith(t, NewGzipCompressionLevel(flate.BestCompression), "gzip-max")
+}
+
+func TestGzipBestSpeed(t *testing.T) {
+	testCompressionWith(t, NewGzipCompressionLevel(flate.BestSpeed), "gzip-min")
+}
+
+func TestZlib(t *testing.T) {
+	testCompressionWith(t, NewZlibCompression(), "zlib")
+}

+ 729 - 0
data_tool/src/github.com/peterbourgon/diskv/diskv.go

@@ -0,0 +1,729 @@
+// Diskv (disk-vee) is a simple, persistent, key-value store.
+// It stores all data flatly on the filesystem.
+
+package diskv
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"syscall"
+)
+
+const (
+	defaultBasePath             = "diskv"
+	defaultFilePerm os.FileMode = 0666
+	defaultPathPerm os.FileMode = 0777
+)
+
+// PathKey represents a string key that has been transformed to
+// a directory and file name where the content will eventually
+// be stored
+type PathKey struct {
+	Path        []string
+	FileName    string
+	originalKey string
+}
+
+var (
+	defaultAdvancedTransform = func(s string) *PathKey { return &PathKey{Path: []string{}, FileName: s} }
+	defaultInverseTransform  = func(pathKey *PathKey) string { return pathKey.FileName }
+	errCanceled              = errors.New("canceled")
+	errEmptyKey              = errors.New("empty key")
+	errBadKey                = errors.New("bad key")
+	errImportDirectory       = errors.New("can't import a directory")
+)
+
+// TransformFunction transforms a key into a slice of strings, with each
+// element in the slice representing a directory in the file path where the
+// key's entry will eventually be stored.
+//
+// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
+// the final location of the data file will be <basedir>/ab/cde/f/abcdef
+type TransformFunction func(s string) []string
+
+// AdvancedTransformFunction transforms a key into a PathKey.
+//
+// A PathKey contains a slice of strings, where each element in the slice
+// represents a directory in the file path where the key's entry will eventually
+// be stored, as well as the filename.
+//
+// For example, if AdvancedTransformFunc transforms "abcdef/file.txt" to the
+// PathKey {Path: ["ab", "cde", "f"], FileName: "file.txt"}, the final location
+// of the data file will be <basedir>/ab/cde/f/file.txt.
+//
+// You must provide an InverseTransformFunction if you use an
+// AdvancedTransformFunction.
+type AdvancedTransformFunction func(s string) *PathKey
+
+// InverseTransformFunction takes a PathKey and converts it back to a Diskv key.
+// In effect, it's the opposite of an AdvancedTransformFunction.
+type InverseTransformFunction func(pathKey *PathKey) string
+
+// Options define a set of properties that dictate Diskv behavior.
+// All values are optional.
+type Options struct {
+	BasePath          string
+	Transform         TransformFunction
+	AdvancedTransform AdvancedTransformFunction
+	InverseTransform  InverseTransformFunction
+	CacheSizeMax      uint64 // bytes
+	PathPerm          os.FileMode
+	FilePerm          os.FileMode
+	// If TempDir is set, it will enable filesystem atomic writes by
+	// writing temporary files to that location before being moved
+	// to BasePath.
+	// Note that TempDir MUST be on the same device/partition as
+	// BasePath.
+	TempDir string
+
+	Index     Index
+	IndexLess LessFunction
+
+	Compression Compression
+}
+
+// Diskv implements the Diskv interface. You shouldn't construct Diskv
+// structures directly; instead, use the New constructor.
+type Diskv struct {
+	Options
+	mu        sync.RWMutex
+	cache     map[string][]byte
+	cacheSize uint64
+}
+
+// New returns an initialized Diskv structure, ready to use.
+// If the path identified by baseDir already contains data,
+// it will be accessible, but not yet cached.
+func New(o Options) *Diskv {
+	if o.BasePath == "" {
+		o.BasePath = defaultBasePath
+	}
+
+	if o.AdvancedTransform == nil {
+		if o.Transform == nil {
+			o.AdvancedTransform = defaultAdvancedTransform
+		} else {
+			o.AdvancedTransform = convertToAdvancedTransform(o.Transform)
+		}
+		if o.InverseTransform == nil {
+			o.InverseTransform = defaultInverseTransform
+		}
+	} else {
+		if o.InverseTransform == nil {
+			panic("You must provide an InverseTransform function in advanced mode")
+		}
+	}
+
+	if o.PathPerm == 0 {
+		o.PathPerm = defaultPathPerm
+	}
+	if o.FilePerm == 0 {
+		o.FilePerm = defaultFilePerm
+	}
+
+	d := &Diskv{
+		Options:   o,
+		cache:     map[string][]byte{},
+		cacheSize: 0,
+	}
+
+	if d.Index != nil && d.IndexLess != nil {
+		d.Index.Initialize(d.IndexLess, d.Keys(nil))
+	}
+
+	return d
+}
+
+// convertToAdvancedTransform takes a classic Transform function and
+// converts it to the new AdvancedTransform
+func convertToAdvancedTransform(oldFunc func(s string) []string) AdvancedTransformFunction {
+	return func(s string) *PathKey {
+		return &PathKey{Path: oldFunc(s), FileName: s}
+	}
+}
+
+// Write synchronously writes the key-value pair to disk, making it immediately
+// available for reads. Write relies on the filesystem to perform an eventual
+// sync to physical media. If you need stronger guarantees, see WriteStream.
+func (d *Diskv) Write(key string, val []byte) error {
+	return d.WriteStream(key, bytes.NewReader(val), false)
+}
+
+// WriteString writes a string key-value pair to disk
+func (d *Diskv) WriteString(key string, val string) error {
+	return d.Write(key, []byte(val))
+}
+
+func (d *Diskv) transform(key string) (pathKey *PathKey) {
+	pathKey = d.AdvancedTransform(key)
+	pathKey.originalKey = key
+	return pathKey
+}
+
+// WriteStream writes the data represented by the io.Reader to the disk, under
+// the provided key. If sync is true, WriteStream performs an explicit sync on
+// the file as soon as it's written.
+//
+// bytes.Buffer provides io.Reader semantics for basic data types.
+func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
+	if len(key) <= 0 {
+		return errEmptyKey
+	}
+
+	pathKey := d.transform(key)
+
+	// Ensure keys cannot evaluate to paths that would not exist
+	for _, pathPart := range pathKey.Path {
+		if strings.ContainsRune(pathPart, os.PathSeparator) {
+			return errBadKey
+		}
+	}
+
+	if strings.ContainsRune(pathKey.FileName, os.PathSeparator) {
+		return errBadKey
+	}
+
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	return d.writeStreamWithLock(pathKey, r, sync)
+}
+
+// createKeyFileWithLock either creates the key file directly, or
+// creates a temporary file in TempDir if it is set.
+func (d *Diskv) createKeyFileWithLock(pathKey *PathKey) (*os.File, error) {
+	if d.TempDir != "" {
+		if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
+			return nil, fmt.Errorf("temp mkdir: %s", err)
+		}
+		f, err := ioutil.TempFile(d.TempDir, "")
+		if err != nil {
+			return nil, fmt.Errorf("temp file: %s", err)
+		}
+
+		if err := os.Chmod(f.Name(), d.FilePerm); err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return nil, fmt.Errorf("chmod: %s", err)
+		}
+		return f, nil
+	}
+
+	mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
+	f, err := os.OpenFile(d.completeFilename(pathKey), mode, d.FilePerm)
+	if err != nil {
+		return nil, fmt.Errorf("open file: %s", err)
+	}
+	return f, nil
+}
+
+// writeStream does no input validation checking.
+func (d *Diskv) writeStreamWithLock(pathKey *PathKey, r io.Reader, sync bool) error {
+	if err := d.ensurePathWithLock(pathKey); err != nil {
+		return fmt.Errorf("ensure path: %s", err)
+	}
+
+	f, err := d.createKeyFileWithLock(pathKey)
+	if err != nil {
+		return fmt.Errorf("create key file: %s", err)
+	}
+
+	wc := io.WriteCloser(&nopWriteCloser{f})
+	if d.Compression != nil {
+		wc, err = d.Compression.Writer(f)
+		if err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("compression writer: %s", err)
+		}
+	}
+
+	if _, err := io.Copy(wc, r); err != nil {
+		f.Close()           // error deliberately ignored
+		os.Remove(f.Name()) // error deliberately ignored
+		return fmt.Errorf("i/o copy: %s", err)
+	}
+
+	if err := wc.Close(); err != nil {
+		f.Close()           // error deliberately ignored
+		os.Remove(f.Name()) // error deliberately ignored
+		return fmt.Errorf("compression close: %s", err)
+	}
+
+	if sync {
+		if err := f.Sync(); err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("file sync: %s", err)
+		}
+	}
+
+	if err := f.Close(); err != nil {
+		return fmt.Errorf("file close: %s", err)
+	}
+
+	fullPath := d.completeFilename(pathKey)
+	if f.Name() != fullPath {
+		if err := os.Rename(f.Name(), fullPath); err != nil {
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("rename: %s", err)
+		}
+	}
+
+	if d.Index != nil {
+		d.Index.Insert(pathKey.originalKey)
+	}
+
+	d.bustCacheWithLock(pathKey.originalKey) // cache only on read
+
+	return nil
+}
+
+// Import imports the source file into diskv under the destination key. If the
+// destination key already exists, it's overwritten. If move is true, the
+// source file is removed after a successful import.
+func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
+	if dstKey == "" {
+		return errEmptyKey
+	}
+
+	if fi, err := os.Stat(srcFilename); err != nil {
+		return err
+	} else if fi.IsDir() {
+		return errImportDirectory
+	}
+
+	dstPathKey := d.transform(dstKey)
+
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	if err := d.ensurePathWithLock(dstPathKey); err != nil {
+		return fmt.Errorf("ensure path: %s", err)
+	}
+
+	if move {
+		if err := syscall.Rename(srcFilename, d.completeFilename(dstPathKey)); err == nil {
+			d.bustCacheWithLock(dstPathKey.originalKey)
+			return nil
+		} else if err != syscall.EXDEV {
+			// If it failed due to being on a different device, fall back to copying
+			return err
+		}
+	}
+
+	f, err := os.Open(srcFilename)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	err = d.writeStreamWithLock(dstPathKey, f, false)
+	if err == nil && move {
+		err = os.Remove(srcFilename)
+	}
+	return err
+}
+
+// Read reads the key and returns the value.
+// If the key is available in the cache, Read won't touch the disk.
+// If the key is not in the cache, Read will have the side-effect of
+// lazily caching the value.
+func (d *Diskv) Read(key string) ([]byte, error) {
+	rc, err := d.ReadStream(key, false)
+	if err != nil {
+		return []byte{}, err
+	}
+	defer rc.Close()
+	return ioutil.ReadAll(rc)
+}
+
+// ReadString reads the key and returns a string value
+// In case of error, an empty string is returned
+func (d *Diskv) ReadString(key string) string {
+	value, _ := d.Read(key)
+	return string(value)
+}
+
+// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
+// If the value is cached from a previous read, and direct is false,
+// ReadStream will use the cached value. Otherwise, it will return a handle to
+// the file on disk, and cache the data on read.
+//
+// If direct is true, ReadStream will lazily delete any cached value for the
+// key, and return a direct handle to the file on disk.
+//
+// If compression is enabled, ReadStream taps into the io.Reader stream prior
+// to decompression, and caches the compressed data.
+func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
+
+	pathKey := d.transform(key)
+	d.mu.RLock()
+	defer d.mu.RUnlock()
+
+	if val, ok := d.cache[key]; ok {
+		if !direct {
+			buf := bytes.NewReader(val)
+			if d.Compression != nil {
+				return d.Compression.Reader(buf)
+			}
+			return ioutil.NopCloser(buf), nil
+		}
+
+		go func() {
+			d.mu.Lock()
+			defer d.mu.Unlock()
+			d.uncacheWithLock(key, uint64(len(val)))
+		}()
+	}
+
+	return d.readWithRLock(pathKey)
+}
+
+// read ignores the cache, and returns an io.ReadCloser representing the
+// decompressed data for the given key, streamed from the disk. Clients should
+// acquire a read lock on the Diskv and check the cache themselves before
+// calling read.
+func (d *Diskv) readWithRLock(pathKey *PathKey) (io.ReadCloser, error) {
+	filename := d.completeFilename(pathKey)
+
+	fi, err := os.Stat(filename)
+	if err != nil {
+		return nil, err
+	}
+	if fi.IsDir() {
+		return nil, os.ErrNotExist
+	}
+
+	f, err := os.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+
+	var r io.Reader
+	if d.CacheSizeMax > 0 {
+		r = newSiphon(f, d, pathKey.originalKey)
+	} else {
+		r = &closingReader{f}
+	}
+
+	var rc = io.ReadCloser(ioutil.NopCloser(r))
+	if d.Compression != nil {
+		rc, err = d.Compression.Reader(r)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return rc, nil
+}
+
+// closingReader provides a Reader that automatically closes the
+// embedded ReadCloser when it reaches EOF
+type closingReader struct {
+	rc io.ReadCloser
+}
+
+func (cr closingReader) Read(p []byte) (int, error) {
+	n, err := cr.rc.Read(p)
+	if err == io.EOF {
+		if closeErr := cr.rc.Close(); closeErr != nil {
+			return n, closeErr // close must succeed for Read to succeed
+		}
+	}
+	return n, err
+}
+
+// siphon is like a TeeReader: it copies all data read through it to an
+// internal buffer, and moves that buffer to the cache at EOF.
+type siphon struct {
+	f   *os.File
+	d   *Diskv
+	key string
+	buf *bytes.Buffer
+}
+
+// newSiphon constructs a siphoning reader that represents the passed file.
+// When a successful series of reads ends in an EOF, the siphon will write
+// the buffered data to Diskv's cache under the given key.
+func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
+	return &siphon{
+		f:   f,
+		d:   d,
+		key: key,
+		buf: &bytes.Buffer{},
+	}
+}
+
+// Read implements the io.Reader interface for siphon.
+func (s *siphon) Read(p []byte) (int, error) {
+	n, err := s.f.Read(p)
+
+	if err == nil {
+		return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
+	}
+
+	if err == io.EOF {
+		s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
+		if closeErr := s.f.Close(); closeErr != nil {
+			return n, closeErr // close must succeed for Read to succeed
+		}
+		return n, err
+	}
+
+	return n, err
+}
+
+// Erase synchronously erases the given key from the disk and the cache.
+func (d *Diskv) Erase(key string) error {
+	pathKey := d.transform(key)
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	d.bustCacheWithLock(key)
+
+	// erase from index
+	if d.Index != nil {
+		d.Index.Delete(key)
+	}
+
+	// erase from disk
+	filename := d.completeFilename(pathKey)
+	if s, err := os.Stat(filename); err == nil {
+		if s.IsDir() {
+			return errBadKey
+		}
+		if err = os.Remove(filename); err != nil {
+			return err
+		}
+	} else {
+		// Return err as-is so caller can do os.IsNotExist(err).
+		return err
+	}
+
+	// clean up and return
+	d.pruneDirsWithLock(key)
+	return nil
+}
+
+// EraseAll will delete all of the data from the store, both in the cache and on
+// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
+// diskv-related data. Care should be taken to always specify a diskv base
+// directory that is exclusively for diskv data.
+func (d *Diskv) EraseAll() error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	d.cache = make(map[string][]byte)
+	d.cacheSize = 0
+	if d.TempDir != "" {
+		os.RemoveAll(d.TempDir) // errors ignored
+	}
+	return os.RemoveAll(d.BasePath)
+}
+
+// Has returns true if the given key exists.
+func (d *Diskv) Has(key string) bool {
+	pathKey := d.transform(key)
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	if _, ok := d.cache[key]; ok {
+		return true
+	}
+
+	filename := d.completeFilename(pathKey)
+	s, err := os.Stat(filename)
+	if err != nil {
+		return false
+	}
+	if s.IsDir() {
+		return false
+	}
+
+	return true
+}
+
+// Keys returns a channel that will yield every key accessible by the store,
+// in undefined order. If a cancel channel is provided, closing it will
+// terminate and close the keys channel.
+func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
+	return d.KeysPrefix("", cancel)
+}
+
+// KeysPrefix returns a channel that will yield every key accessible by the
+// store with the given prefix, in undefined order. If a cancel channel is
+// provided, closing it will terminate and close the keys channel. If the
+// provided prefix is the empty string, all keys will be yielded.
+func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
+	var prepath string
+	if prefix == "" {
+		prepath = d.BasePath
+	} else {
+		prefixKey := d.transform(prefix)
+		prepath = d.pathFor(prefixKey)
+	}
+	c := make(chan string)
+	go func() {
+		filepath.Walk(prepath, d.walker(c, prefix, cancel))
+		close(c)
+	}()
+	return c
+}
+
+// walker returns a function which satisfies the filepath.WalkFunc interface.
+// It sends every non-directory file entry down the channel c.
+func (d *Diskv) walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
+	return func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		relPath, _ := filepath.Rel(d.BasePath, path)
+		dir, file := filepath.Split(relPath)
+		pathSplit := strings.Split(dir, string(filepath.Separator))
+		pathSplit = pathSplit[:len(pathSplit)-1]
+
+		pathKey := &PathKey{
+			Path:     pathSplit,
+			FileName: file,
+		}
+
+		key := d.InverseTransform(pathKey)
+
+		if info.IsDir() || !strings.HasPrefix(key, prefix) {
+			return nil // "pass"
+		}
+
+		select {
+		case c <- key:
+		case <-cancel:
+			return errCanceled
+		}
+
+		return nil
+	}
+}
+
+// pathFor returns the absolute path for location on the filesystem where the
+// data for the given key will be stored.
+func (d *Diskv) pathFor(pathKey *PathKey) string {
+	return filepath.Join(d.BasePath, filepath.Join(pathKey.Path...))
+}
+
+// ensurePathWithLock is a helper function that generates all necessary
+// directories on the filesystem for the given key.
+func (d *Diskv) ensurePathWithLock(pathKey *PathKey) error {
+	return os.MkdirAll(d.pathFor(pathKey), d.PathPerm)
+}
+
+// completeFilename returns the absolute path to the file for the given key.
+func (d *Diskv) completeFilename(pathKey *PathKey) string {
+	return filepath.Join(d.pathFor(pathKey), pathKey.FileName)
+}
+
+// cacheWithLock attempts to cache the given key-value pair in the store's
+// cache. It can fail if the value is larger than the cache's maximum size.
+func (d *Diskv) cacheWithLock(key string, val []byte) error {
+	// If the key already exists, delete it.
+	d.bustCacheWithLock(key)
+
+	valueSize := uint64(len(val))
+	if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
+		return fmt.Errorf("%s; not caching", err)
+	}
+
+	// be very strict about memory guarantees
+	if (d.cacheSize + valueSize) > d.CacheSizeMax {
+		panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
+	}
+
+	d.cache[key] = val
+	d.cacheSize += valueSize
+	return nil
+}
+
+// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
+func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	return d.cacheWithLock(key, val)
+}
+
+func (d *Diskv) bustCacheWithLock(key string) {
+	if val, ok := d.cache[key]; ok {
+		d.uncacheWithLock(key, uint64(len(val)))
+	}
+}
+
+func (d *Diskv) uncacheWithLock(key string, sz uint64) {
+	d.cacheSize -= sz
+	delete(d.cache, key)
+}
+
+// pruneDirsWithLock deletes empty directories in the path walk leading to the
+// key k. Typically this function is called after an Erase is made.
+func (d *Diskv) pruneDirsWithLock(key string) error {
+	pathlist := d.transform(key).Path
+	for i := range pathlist {
+		dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
+
+		// thanks to Steven Blenkinsop for this snippet
+		switch fi, err := os.Stat(dir); true {
+		case err != nil:
+			return err
+		case !fi.IsDir():
+			panic(fmt.Sprintf("corrupt dirstate at %s", dir))
+		}
+
+		nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
+		if err != nil {
+			return err
+		} else if len(nlinks) > 0 {
+			return nil // has subdirs -- do not prune
+		}
+		if err = os.Remove(dir); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
+// until the cache has at least valueSize bytes available.
+func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
+	if valueSize > d.CacheSizeMax {
+		return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
+	}
+
+	safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
+
+	for key, val := range d.cache {
+		if safe() {
+			break
+		}
+
+		d.uncacheWithLock(key, uint64(len(val)))
+	}
+
+	if !safe() {
+		panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
+	}
+
+	return nil
+}
+
+// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
+// satisfy the io.WriteCloser interface.
+type nopWriteCloser struct {
+	io.Writer
+}
+
+func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
+func (wc *nopWriteCloser) Close() error                { return nil }

+ 41 - 0
data_tool/src/github.com/peterbourgon/diskv/examples/advanced-transform/advanced-transform.go

@@ -0,0 +1,41 @@
+package main
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/peterbourgon/diskv/v3"
+)
+
+func AdvancedTransformExample(key string) *diskv.PathKey {
+	path := strings.Split(key, "/")
+	last := len(path) - 1
+	return &diskv.PathKey{
+		Path:     path[:last],
+		FileName: path[last] + ".txt",
+	}
+}
+
+// If you provide an AdvancedTransform, you must also provide its
+// inverse:
+
+func InverseTransformExample(pathKey *diskv.PathKey) (key string) {
+	txt := pathKey.FileName[len(pathKey.FileName)-4:]
+	if txt != ".txt" {
+		panic("Invalid file found in storage folder!")
+	}
+	return strings.Join(pathKey.Path, "/") + pathKey.FileName[:len(pathKey.FileName)-4]
+}
+
+func main() {
+	d := diskv.New(diskv.Options{
+		BasePath:          "my-data-dir",
+		AdvancedTransform: AdvancedTransformExample,
+		InverseTransform:  InverseTransformExample,
+		CacheSizeMax:      1024 * 1024,
+	})
+	// Write some text to the key "alpha/beta/gamma".
+	key := "alpha/beta/gamma"
+	d.WriteString(key, "¡Hola!") // will be stored in "<basedir>/alpha/beta/gamma.txt"
+	fmt.Println(d.ReadString("alpha/beta/gamma"))
+}

+ 63 - 0
data_tool/src/github.com/peterbourgon/diskv/examples/content-addressable-store/cas.go

@@ -0,0 +1,63 @@
+package main
+
+import (
+	"crypto/md5"
+	"fmt"
+	"io"
+
+	"github.com/peterbourgon/diskv/v3"
+)
+
+const transformBlockSize = 2 // grouping of chars per directory depth
+
+func blockTransform(s string) []string {
+	var (
+		sliceSize = len(s) / transformBlockSize
+		pathSlice = make([]string, sliceSize)
+	)
+	for i := 0; i < sliceSize; i++ {
+		from, to := i*transformBlockSize, (i*transformBlockSize)+transformBlockSize
+		pathSlice[i] = s[from:to]
+	}
+	return pathSlice
+}
+
+func main() {
+	d := diskv.New(diskv.Options{
+		BasePath:     "data",
+		Transform:    blockTransform,
+		CacheSizeMax: 1024 * 1024, // 1MB
+	})
+
+	for _, valueStr := range []string{
+		"I am the very model of a modern Major-General",
+		"I've information vegetable, animal, and mineral",
+		"I know the kings of England, and I quote the fights historical",
+		"From Marathon to Waterloo, in order categorical",
+		"I'm very well acquainted, too, with matters mathematical",
+		"I understand equations, both the simple and quadratical",
+		"About binomial theorem I'm teeming with a lot o' news",
+		"With many cheerful facts about the square of the hypotenuse",
+	} {
+		d.Write(md5sum(valueStr), []byte(valueStr))
+	}
+
+	var keyCount int
+	for key := range d.Keys(nil) {
+		val, err := d.Read(key)
+		if err != nil {
+			panic(fmt.Sprintf("key %s had no value", key))
+		}
+		fmt.Printf("%s: %s\n", key, val)
+		keyCount++
+	}
+	fmt.Printf("%d total keys\n", keyCount)
+
+	// d.EraseAll() // leave it commented out to see how data is kept on disk
+}
+
+func md5sum(s string) string {
+	h := md5.New()
+	io.WriteString(h, s)
+	return fmt.Sprintf("%x", h.Sum(nil))
+}

+ 75 - 0
data_tool/src/github.com/peterbourgon/diskv/examples/git-like-store/git-like-store.go

@@ -0,0 +1,75 @@
+package main
+
+/* This example uses a more advanced transform function that simulates a bit
+ how Git stores objects:
+
+* places hash-like keys under the objects directory
+* any other key is placed in the base directory. If the key
+* contains slashes, these are converted to subdirectories
+
+*/
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+
+	"github.com/peterbourgon/diskv/v3"
+)
+
+var hex40 = regexp.MustCompile("[0-9a-fA-F]{40}")
+
+func hexTransform(s string) *diskv.PathKey {
+	if hex40.MatchString(s) {
+		return &diskv.PathKey{Path: []string{"objects", s[0:2]},
+			FileName: s,
+		}
+	}
+
+	folders := strings.Split(s, "/")
+	lfolders := len(folders)
+	if lfolders > 1 {
+		return &diskv.PathKey{Path: folders[:lfolders-1],
+			FileName: folders[lfolders-1],
+		}
+	}
+
+	return &diskv.PathKey{Path: []string{},
+		FileName: s,
+	}
+}
+
+func hexInverseTransform(pathKey *diskv.PathKey) string {
+	if hex40.MatchString(pathKey.FileName) {
+		return pathKey.FileName
+	}
+
+	if len(pathKey.Path) == 0 {
+		return pathKey.FileName
+	}
+
+	return strings.Join(pathKey.Path, "/") + "/" + pathKey.FileName
+}
+
+func main() {
+	d := diskv.New(diskv.Options{
+		BasePath:          "my-data-dir",
+		AdvancedTransform: hexTransform,
+		InverseTransform:  hexInverseTransform,
+		CacheSizeMax:      1024 * 1024,
+	})
+
+	// Write some text to the key "alpha/beta/gamma".
+	key := "1bd88421b055327fcc8660c76c4894c4ea4c95d7"
+	d.WriteString(key, "¡Hola!") // will be stored in "<basedir>/objects/1b/1bd88421b055327fcc8660c76c4894c4ea4c95d7"
+
+	d.WriteString("refs/heads/master", "some text") // will be stored in "<basedir>/refs/heads/master"
+
+	fmt.Println("Enumerating All keys:")
+	c := d.Keys(nil)
+
+	for key := range c {
+		value := d.ReadString(key)
+		fmt.Printf("Key: %s, Value: %s\n", key, value)
+	}
+}

+ 29 - 0
data_tool/src/github.com/peterbourgon/diskv/examples/super-simple-store/super-simple-store.go

@@ -0,0 +1,29 @@
+package main
+
+import (
+	"fmt"
+
+	"github.com/peterbourgon/diskv/v3"
+)
+
+func main() {
+	d := diskv.New(diskv.Options{
+		BasePath:     "my-diskv-data-directory",
+		CacheSizeMax: 1024 * 1024, // 1MB
+	})
+
+	key := "alpha"
+	if err := d.Write(key, []byte{'1', '2', '3'}); err != nil {
+		panic(err)
+	}
+
+	value, err := d.Read(key)
+	if err != nil {
+		panic(err)
+	}
+	fmt.Printf("%v\n", value)
+
+	if err := d.Erase(key); err != nil {
+		panic(err)
+	}
+}

+ 5 - 0
data_tool/src/github.com/peterbourgon/diskv/go.mod

@@ -0,0 +1,5 @@
+module github.com/peterbourgon/diskv/v3
+
+go 1.12
+
+require github.com/google/btree v1.0.0

+ 2 - 0
data_tool/src/github.com/peterbourgon/diskv/go.sum

@@ -0,0 +1,2 @@
+github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=

+ 74 - 0
data_tool/src/github.com/peterbourgon/diskv/import_test.go

@@ -0,0 +1,74 @@
+package diskv_test
+
+import (
+	"bytes"
+	"io/ioutil"
+	"os"
+
+	"testing"
+)
+
+func TestImportMove(t *testing.T) {
+	b := []byte(`0123456789`)
+	f, err := ioutil.TempFile("", "temp-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := f.Write(b); err != nil {
+		t.Fatal(err)
+	}
+	f.Close()
+
+	d := New(Options{
+		BasePath: "test-import-move",
+	})
+	defer d.EraseAll()
+
+	key := "key"
+
+	if err := d.Write(key, []byte(`TBD`)); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := d.Import(f.Name(), key, true); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := os.Stat(f.Name()); err == nil || !os.IsNotExist(err) {
+		t.Errorf("expected temp file to be gone, but err = %v", err)
+	}
+
+	if !d.Has(key) {
+		t.Errorf("%q not present", key)
+	}
+
+	if buf, err := d.Read(key); err != nil || bytes.Compare(b, buf) != 0 {
+		t.Errorf("want %q, have %q (err = %v)", string(b), string(buf), err)
+	}
+}
+
+func TestImportCopy(t *testing.T) {
+	b := []byte(`¡åéîòü!`)
+
+	f, err := ioutil.TempFile("", "temp-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := f.Write(b); err != nil {
+		t.Fatal(err)
+	}
+	f.Close()
+
+	d := New(Options{
+		BasePath: "test-import-copy",
+	})
+	defer d.EraseAll()
+
+	if err := d.Import(f.Name(), "key", false); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := os.Stat(f.Name()); err != nil {
+		t.Errorf("expected temp file to remain, but got err = %v", err)
+	}
+}

+ 115 - 0
data_tool/src/github.com/peterbourgon/diskv/index.go

@@ -0,0 +1,115 @@
+package diskv
+
+import (
+	"sync"
+
+	"github.com/google/btree"
+)
+
+// Index is a generic interface for things that can
+// provide an ordered list of keys.
+type Index interface {
+	Initialize(less LessFunction, keys <-chan string)
+	Insert(key string)
+	Delete(key string)
+	Keys(from string, n int) []string
+}
+
+// LessFunction is used to initialize an Index of keys in a specific order.
+type LessFunction func(string, string) bool
+
+// btreeString is a custom data type that satisfies the BTree Less interface,
+// making the strings it wraps sortable by the BTree package.
+type btreeString struct {
+	s string
+	l LessFunction
+}
+
+// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
+func (s btreeString) Less(i btree.Item) bool {
+	return s.l(s.s, i.(btreeString).s)
+}
+
+// BTreeIndex is an implementation of the Index interface using google/btree.
+type BTreeIndex struct {
+	sync.RWMutex
+	LessFunction
+	*btree.BTree
+}
+
+// Initialize populates the BTree tree with data from the keys channel,
+// according to the passed less function. It's destructive to the BTreeIndex.
+func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
+	i.Lock()
+	defer i.Unlock()
+	i.LessFunction = less
+	i.BTree = rebuild(less, keys)
+}
+
+// Insert inserts the given key (only) into the BTree tree.
+func (i *BTreeIndex) Insert(key string) {
+	i.Lock()
+	defer i.Unlock()
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+	i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
+}
+
+// Delete removes the given key (only) from the BTree tree.
+func (i *BTreeIndex) Delete(key string) {
+	i.Lock()
+	defer i.Unlock()
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+	i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
+}
+
+// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
+// Keys will return the first n keys. If the passed 'from' key is non-empty, the
+// first key in the returned slice will be the key that immediately follows the
+// passed key, in key order.
+func (i *BTreeIndex) Keys(from string, n int) []string {
+	i.RLock()
+	defer i.RUnlock()
+
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+
+	if i.BTree.Len() <= 0 {
+		return []string{}
+	}
+
+	btreeFrom := btreeString{s: from, l: i.LessFunction}
+	skipFirst := true
+	if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
+		// no such key, so fabricate an always-smallest item
+		btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
+		skipFirst = false
+	}
+
+	keys := []string{}
+	iterator := func(i btree.Item) bool {
+		keys = append(keys, i.(btreeString).s)
+		return len(keys) < n
+	}
+	i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
+
+	if skipFirst && len(keys) > 0 {
+		keys = keys[1:]
+	}
+
+	return keys
+}
+
+// rebuildIndex does the work of regenerating the index
+// with the given keys.
+func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
+	tree := btree.New(2)
+	for key := range keys {
+		tree.ReplaceOrInsert(btreeString{s: key, l: less})
+	}
+	return tree
+}

+ 161 - 0
data_tool/src/github.com/peterbourgon/diskv/index_test.go

@@ -0,0 +1,161 @@
+package diskv
+
+import (
+	"bytes"
+	"reflect"
+	"testing"
+	"time"
+)
+
+func strLess(a, b string) bool { return a < b }
+
+func cmpStrings(a, b []string) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i := 0; i < len(a); i++ {
+		if a[i] != b[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func (d *Diskv) isIndexed(key string) bool {
+	if d.Index == nil {
+		return false
+	}
+
+	for _, got := range d.Index.Keys("", 1000) {
+		if got == key {
+			return true
+		}
+	}
+	return false
+}
+
+func TestIndexOrder(t *testing.T) {
+	d := New(Options{
+		BasePath:     "index-test",
+		CacheSizeMax: 1024,
+		Index:        &BTreeIndex{},
+		IndexLess:    strLess,
+	})
+	defer d.EraseAll()
+
+	v := []byte{'1', '2', '3'}
+	d.Write("a", v)
+	if !d.isIndexed("a") {
+		t.Fatalf("'a' not indexed after write")
+	}
+	d.Write("1", v)
+	d.Write("m", v)
+	d.Write("-", v)
+	d.Write("A", v)
+
+	expectedKeys := []string{"-", "1", "A", "a", "m"}
+	keys := []string{}
+	for _, key := range d.Index.Keys("", 100) {
+		keys = append(keys, key)
+	}
+
+	if !cmpStrings(keys, expectedKeys) {
+		t.Fatalf("got %s, expected %s", keys, expectedKeys)
+	}
+}
+
+func TestIndexLoad(t *testing.T) {
+	d1 := New(Options{
+		BasePath:     "index-test",
+		CacheSizeMax: 1024,
+	})
+	defer d1.EraseAll()
+
+	val := []byte{'1', '2', '3'}
+	keys := []string{"a", "b", "c", "d", "e", "f", "g"}
+	for _, key := range keys {
+		d1.Write(key, val)
+	}
+
+	d2 := New(Options{
+		BasePath:     "index-test",
+		CacheSizeMax: 1024,
+		Index:        &BTreeIndex{},
+		IndexLess:    strLess,
+	})
+	defer d2.EraseAll()
+
+	// check d2 has properly loaded existing d1 data
+	for _, key := range keys {
+		if !d2.isIndexed(key) {
+			t.Fatalf("key '%s' not indexed on secondary", key)
+		}
+	}
+
+	// cache one
+	if readValue, err := d2.Read(keys[0]); err != nil {
+		t.Fatalf("%s", err)
+	} else if bytes.Compare(val, readValue) != 0 {
+		t.Fatalf("%s: got %s, expected %s", keys[0], readValue, val)
+	}
+
+	// make sure it got cached
+	for i := 0; i < 10 && !d2.isCached(keys[0]); i++ {
+		time.Sleep(10 * time.Millisecond)
+	}
+	if !d2.isCached(keys[0]) {
+		t.Fatalf("key '%s' not cached", keys[0])
+	}
+
+	// kill the disk
+	d1.EraseAll()
+
+	// cached value should still be there in the second
+	if readValue, err := d2.Read(keys[0]); err != nil {
+		t.Fatalf("%s", err)
+	} else if bytes.Compare(val, readValue) != 0 {
+		t.Fatalf("%s: got %s, expected %s", keys[0], readValue, val)
+	}
+
+	// but not in the original
+	if _, err := d1.Read(keys[0]); err == nil {
+		t.Fatalf("expected error reading from flushed store")
+	}
+}
+
+func TestIndexKeysEmptyFrom(t *testing.T) {
+	d := New(Options{
+		BasePath:     "index-test",
+		CacheSizeMax: 1024,
+		Index:        &BTreeIndex{},
+		IndexLess:    strLess,
+	})
+	defer d.EraseAll()
+
+	for _, k := range []string{"a", "c", "z", "b", "x", "b", "y"} {
+		d.Write(k, []byte("1"))
+	}
+
+	want := []string{"a", "b", "c", "x", "y", "z"}
+	have := d.Index.Keys("", 99)
+	if !reflect.DeepEqual(want, have) {
+		t.Errorf("want %v, have %v", want, have)
+	}
+}
+
+func TestBadKeys(t *testing.T) {
+	d := New(Options{
+		BasePath:     "index-test",
+		CacheSizeMax: 1024,
+		Index:        &BTreeIndex{},
+		IndexLess:    strLess,
+	})
+	defer d.EraseAll()
+
+	for _, k := range []string{"a/a"} {
+		err := d.Write(k, []byte("1"))
+		if err != errBadKey {
+			t.Errorf("Expected bad key error, got: %v", err)
+		}
+	}
+}

+ 191 - 0
data_tool/src/github.com/peterbourgon/diskv/issues_test.go

@@ -0,0 +1,191 @@
+package diskv
+
+import (
+	"bytes"
+	"io/ioutil"
+	"math/rand"
+	"sync"
+	"testing"
+	"time"
+)
+
+// ReadStream from cache shouldn't panic on a nil dereference from a nonexistent
+// Compression :)
+func TestIssue2A(t *testing.T) {
+	d := New(Options{
+		BasePath:     "test-issue-2a",
+		CacheSizeMax: 1024,
+	})
+	defer d.EraseAll()
+
+	input := "abcdefghijklmnopqrstuvwxy"
+	key, writeBuf, sync := "a", bytes.NewBufferString(input), false
+	if err := d.WriteStream(key, writeBuf, sync); err != nil {
+		t.Fatal(err)
+	}
+
+	for i := 0; i < 2; i++ {
+		began := time.Now()
+		rc, err := d.ReadStream(key, false)
+		if err != nil {
+			t.Fatal(err)
+		}
+		buf, err := ioutil.ReadAll(rc)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !cmpBytes(buf, []byte(input)) {
+			t.Fatalf("read #%d: '%s' != '%s'", i+1, string(buf), input)
+		}
+		rc.Close()
+		t.Logf("read #%d in %s", i+1, time.Since(began))
+	}
+}
+
+// ReadStream on a key that resolves to a directory should return an error.
+func TestIssue2B(t *testing.T) {
+	blockTransform := func(s string) []string {
+		transformBlockSize := 3
+		sliceSize := len(s) / transformBlockSize
+		pathSlice := make([]string, sliceSize)
+		for i := 0; i < sliceSize; i++ {
+			from, to := i*transformBlockSize, (i*transformBlockSize)+transformBlockSize
+			pathSlice[i] = s[from:to]
+		}
+		return pathSlice
+	}
+
+	d := New(Options{
+		BasePath:     "test-issue-2b",
+		Transform:    blockTransform,
+		CacheSizeMax: 0,
+	})
+	defer d.EraseAll()
+
+	v := []byte{'1', '2', '3'}
+	if err := d.Write("abcabc", v); err != nil {
+		t.Fatal(err)
+	}
+
+	_, err := d.ReadStream("abc", false)
+	if err == nil {
+		t.Fatal("ReadStream('abc') should return error")
+	}
+	t.Logf("ReadStream('abc') returned error: %v", err)
+}
+
+// Ensure ReadStream with direct=true isn't racy.
+func TestIssue17(t *testing.T) {
+	var (
+		basePath = "test-data"
+	)
+
+	dWrite := New(Options{
+		BasePath:     basePath,
+		CacheSizeMax: 0,
+	})
+	defer dWrite.EraseAll()
+
+	dRead := New(Options{
+		BasePath:     basePath,
+		CacheSizeMax: 50,
+	})
+
+	cases := map[string]string{
+		"a": `1234567890`,
+		"b": `2345678901`,
+		"c": `3456789012`,
+		"d": `4567890123`,
+		"e": `5678901234`,
+	}
+
+	for k, v := range cases {
+		if err := dWrite.Write(k, []byte(v)); err != nil {
+			t.Fatalf("during write: %s", err)
+		}
+		dRead.Read(k) // ensure it's added to cache
+	}
+
+	var wg sync.WaitGroup
+	start := make(chan struct{})
+	for k, v := range cases {
+		wg.Add(1)
+		go func(k, v string) {
+			<-start
+			dRead.ReadStream(k, true)
+			wg.Done()
+		}(k, v)
+	}
+	close(start)
+	wg.Wait()
+}
+
+// Test for issue #40, where acquiring two stream readers on the same k/v pair
+// caused the value to be written into the cache twice, messing up the
+// size calculations.
+func TestIssue40(t *testing.T) {
+	var (
+		basePath = "test-data"
+	)
+	// Simplest transform function: put all the data files into the base dir.
+	flatTransform := func(s string) []string { return []string{} }
+
+	// Initialize a new diskv store, rooted at "my-data-dir",
+	// with a 100 byte cache.
+	d := New(Options{
+		BasePath:     basePath,
+		Transform:    flatTransform,
+		CacheSizeMax: 100,
+	})
+
+	defer d.EraseAll()
+
+	// Write a 50 byte value, filling the cache half-way
+	k1 := "key1"
+	d1 := make([]byte, 50)
+	rand.Read(d1)
+	d.Write(k1, d1)
+
+	// Get *two* read streams on it. Because the key is not yet in the cache,
+	// and will not be in the cache until a stream is fully read, both
+	// readers use the 'siphon' object, which always writes to the cache
+	// after reading.
+	s1, err := d.ReadStream(k1, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	s2, err := d.ReadStream(k1, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// When each stream is drained, the underlying siphon will write
+	// the value into the cache's map and increment the cache size.
+	// This means we will have 1 entry in the cache map
+	// ("key1" mapping to a 50 byte slice) but the cache size will be 100,
+	// because the buggy code does not check if an entry already exists
+	// in the map.
+	// s1 drains:
+	//   cache[k] = v
+	//   cacheSize += len(v)
+	// s2 drains:
+	//   cache[k] = v /* overwrites existing */
+	//   cacheSize += len(v) /* blindly adds to the cache size */
+	ioutil.ReadAll(s1)
+	ioutil.ReadAll(s2)
+
+	// Now write a different k/v pair, with a 60 byte array.
+	k2 := "key2"
+	d2 := make([]byte, 60)
+	rand.Read(d2)
+	d.Write(k2, d2)
+	// The act of reading the k/v pair back out causes it to be cached.
+	// Because the cache is only 100 bytes, it needs to delete existing
+	// entries to make room.
+	// If the cache is buggy, it will delete the single 50-byte entry
+	// from the cache map & decrement cacheSize by 50... but because
+	// cacheSize was improperly incremented twice earlier, this will
+	// leave us with no entries in the cacheMap but with cacheSize==50.
+	// Since CacheSizeMax-cacheSize (100-50) is less than 60, there
+	// is no room in the cache for this entry and it panics.
+	d.Read(k2)
+}

+ 229 - 0
data_tool/src/github.com/peterbourgon/diskv/keys_test.go

@@ -0,0 +1,229 @@
+package diskv
+
+import (
+	"reflect"
+	"runtime"
+	"strings"
+	"testing"
+)
+
+var (
+	keysTestData = map[string]string{
+		"ab01cd01": "When we started building CoreOS",
+		"ab01cd02": "we looked at all the various components available to us",
+		"ab01cd03": "re-using the best tools",
+		"ef01gh04": "and building the ones that did not exist",
+		"ef02gh05": "We believe strongly in the Unix philosophy",
+		"xxxxxxxx": "tools should be independently useful",
+	}
+
+	prefixes = []string{
+		"", // all
+		"a",
+		"ab",
+		"ab0",
+		"ab01",
+		"ab01cd0",
+		"ab01cd01",
+		"ab01cd01x", // none
+		"b",         // none
+		"b0",        // none
+		"0",         // none
+		"01",        // none
+		"e",
+		"ef",
+		"efx", // none
+		"ef01gh0",
+		"ef01gh04",
+		"ef01gh05",
+		"ef01gh06", // none
+	}
+)
+
+func TestKeysFlat(t *testing.T) {
+	transform := func(s string) []string {
+		if s == "" {
+			t.Fatalf(`transform should not be called with ""`)
+		}
+		return []string{}
+	}
+	d := New(Options{
+		BasePath:  "test-data",
+		Transform: transform,
+	})
+	defer d.EraseAll()
+
+	for k, v := range keysTestData {
+		d.Write(k, []byte(v))
+	}
+
+	checkKeys(t, d.Keys(nil), keysTestData)
+}
+
+func TestKeysNested(t *testing.T) {
+	d := New(Options{
+		BasePath:  "test-data",
+		Transform: blockTransform(2),
+	})
+	defer d.EraseAll()
+
+	for k, v := range keysTestData {
+		d.Write(k, []byte(v))
+	}
+
+	checkKeys(t, d.Keys(nil), keysTestData)
+}
+
+func TestKeysPrefixFlat(t *testing.T) {
+	d := New(Options{
+		BasePath: "test-data",
+	})
+	defer d.EraseAll()
+
+	for k, v := range keysTestData {
+		d.Write(k, []byte(v))
+	}
+
+	for _, prefix := range prefixes {
+		checkKeys(t, d.KeysPrefix(prefix, nil), filterPrefix(keysTestData, prefix))
+	}
+}
+
+func TestKeysPrefixNested(t *testing.T) {
+	d := New(Options{
+		BasePath:  "test-data",
+		Transform: blockTransform(2),
+	})
+	defer d.EraseAll()
+
+	for k, v := range keysTestData {
+		d.Write(k, []byte(v))
+	}
+
+	for _, prefix := range prefixes {
+		checkKeys(t, d.KeysPrefix(prefix, nil), filterPrefix(keysTestData, prefix))
+	}
+}
+
+func TestKeysCancel(t *testing.T) {
+	d := New(Options{
+		BasePath: "test-data",
+	})
+	defer d.EraseAll()
+
+	for k, v := range keysTestData {
+		d.Write(k, []byte(v))
+	}
+
+	var (
+		cancel      = make(chan struct{})
+		received    = 0
+		cancelAfter = len(keysTestData) / 2
+	)
+
+	for key := range d.Keys(cancel) {
+		received++
+
+		if received >= cancelAfter {
+			close(cancel)
+			runtime.Gosched() // allow walker to detect cancel
+		}
+
+		t.Logf("received %d: %q", received, key)
+	}
+
+	if want, have := cancelAfter, received; want != have {
+		t.Errorf("want %d, have %d", want, have)
+	}
+}
+
+func checkKeys(t *testing.T, c <-chan string, want map[string]string) {
+	for k := range c {
+		if _, ok := want[k]; !ok {
+			t.Errorf("%q yielded but not expected", k)
+			continue
+		}
+
+		delete(want, k)
+		t.Logf("%q yielded OK", k)
+	}
+
+	if len(want) != 0 {
+		t.Errorf("%d expected key(s) not yielded: %s", len(want), strings.Join(flattenKeys(want), ", "))
+	}
+}
+
+func blockTransform(blockSize int) func(string) []string {
+	return func(s string) []string {
+		var (
+			sliceSize = len(s) / blockSize
+			pathSlice = make([]string, sliceSize)
+		)
+		for i := 0; i < sliceSize; i++ {
+			from, to := i*blockSize, (i*blockSize)+blockSize
+			pathSlice[i] = s[from:to]
+		}
+		return pathSlice
+	}
+}
+
+func filterPrefix(in map[string]string, prefix string) map[string]string {
+	out := map[string]string{}
+	for k, v := range in {
+		if strings.HasPrefix(k, prefix) {
+			out[k] = v
+		}
+	}
+	return out
+}
+
+func TestFilterPrefix(t *testing.T) {
+	input := map[string]string{
+		"all":        "",
+		"and":        "",
+		"at":         "",
+		"available":  "",
+		"best":       "",
+		"building":   "",
+		"components": "",
+		"coreos":     "",
+		"did":        "",
+		"exist":      "",
+		"looked":     "",
+		"not":        "",
+		"ones":       "",
+		"re-using":   "",
+		"started":    "",
+		"that":       "",
+		"the":        "",
+		"to":         "",
+		"tools":      "",
+		"us":         "",
+		"various":    "",
+		"we":         "",
+		"when":       "",
+	}
+
+	for prefix, want := range map[string]map[string]string{
+		"a":    map[string]string{"all": "", "and": "", "at": "", "available": ""},
+		"al":   map[string]string{"all": ""},
+		"all":  map[string]string{"all": ""},
+		"alll": map[string]string{},
+		"c":    map[string]string{"components": "", "coreos": ""},
+		"co":   map[string]string{"components": "", "coreos": ""},
+		"com":  map[string]string{"components": ""},
+	} {
+		have := filterPrefix(input, prefix)
+		if !reflect.DeepEqual(want, have) {
+			t.Errorf("%q: want %v, have %v", prefix, flattenKeys(want), flattenKeys(have))
+		}
+	}
+}
+
+func flattenKeys(m map[string]string) []string {
+	a := make([]string, 0, len(m))
+	for k := range m {
+		a = append(a, k)
+	}
+	return a
+}

+ 151 - 0
data_tool/src/github.com/peterbourgon/diskv/speed_test.go

@@ -0,0 +1,151 @@
+package diskv
+
+import (
+	"fmt"
+	"math/rand"
+	"testing"
+)
+
+func shuffle(keys []string) {
+	ints := rand.Perm(len(keys))
+	for i := range keys {
+		keys[i], keys[ints[i]] = keys[ints[i]], keys[i]
+	}
+}
+
+func genValue(size int) []byte {
+	v := make([]byte, size)
+	for i := 0; i < size; i++ {
+		v[i] = uint8((rand.Int() % 26) + 97) // a-z
+	}
+	return v
+}
+
+const (
+	keyCount = 1000
+)
+
+func genKeys() []string {
+	keys := make([]string, keyCount)
+	for i := 0; i < keyCount; i++ {
+		keys[i] = fmt.Sprintf("%d", i)
+	}
+	return keys
+}
+
+func (d *Diskv) load(keys []string, val []byte) {
+	for _, key := range keys {
+		d.Write(key, val)
+	}
+}
+
+func benchRead(b *testing.B, size, cachesz int) {
+	b.StopTimer()
+	d := New(Options{
+		BasePath:     "speed-test",
+		CacheSizeMax: uint64(cachesz),
+	})
+	defer d.EraseAll()
+
+	keys := genKeys()
+	value := genValue(size)
+	d.load(keys, value)
+	shuffle(keys)
+	b.SetBytes(int64(size))
+
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		_, _ = d.Read(keys[i%len(keys)])
+	}
+	b.StopTimer()
+}
+
+func benchWrite(b *testing.B, size int, withIndex bool) {
+	b.StopTimer()
+
+	options := Options{
+		BasePath:     "speed-test",
+		CacheSizeMax: 0,
+	}
+	if withIndex {
+		options.Index = &BTreeIndex{}
+		options.IndexLess = strLess
+	}
+
+	d := New(options)
+	defer d.EraseAll()
+	keys := genKeys()
+	value := genValue(size)
+	shuffle(keys)
+	b.SetBytes(int64(size))
+
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		d.Write(keys[i%len(keys)], value)
+	}
+	b.StopTimer()
+}
+
+func BenchmarkWrite__32B_NoIndex(b *testing.B) {
+	benchWrite(b, 32, false)
+}
+
+func BenchmarkWrite__1KB_NoIndex(b *testing.B) {
+	benchWrite(b, 1024, false)
+}
+
+func BenchmarkWrite__4KB_NoIndex(b *testing.B) {
+	benchWrite(b, 4096, false)
+}
+
+func BenchmarkWrite_10KB_NoIndex(b *testing.B) {
+	benchWrite(b, 10240, false)
+}
+
+func BenchmarkWrite__32B_WithIndex(b *testing.B) {
+	benchWrite(b, 32, true)
+}
+
+func BenchmarkWrite__1KB_WithIndex(b *testing.B) {
+	benchWrite(b, 1024, true)
+}
+
+func BenchmarkWrite__4KB_WithIndex(b *testing.B) {
+	benchWrite(b, 4096, true)
+}
+
+func BenchmarkWrite_10KB_WithIndex(b *testing.B) {
+	benchWrite(b, 10240, true)
+}
+
+func BenchmarkRead__32B_NoCache(b *testing.B) {
+	benchRead(b, 32, 0)
+}
+
+func BenchmarkRead__1KB_NoCache(b *testing.B) {
+	benchRead(b, 1024, 0)
+}
+
+func BenchmarkRead__4KB_NoCache(b *testing.B) {
+	benchRead(b, 4096, 0)
+}
+
+func BenchmarkRead_10KB_NoCache(b *testing.B) {
+	benchRead(b, 10240, 0)
+}
+
+func BenchmarkRead__32B_WithCache(b *testing.B) {
+	benchRead(b, 32, keyCount*32*2)
+}
+
+func BenchmarkRead__1KB_WithCache(b *testing.B) {
+	benchRead(b, 1024, keyCount*1024*2)
+}
+
+func BenchmarkRead__4KB_WithCache(b *testing.B) {
+	benchRead(b, 4096, keyCount*4096*2)
+}
+
+func BenchmarkRead_10KB_WithCache(b *testing.B) {
+	benchRead(b, 10240, keyCount*4096*2)
+}

+ 117 - 0
data_tool/src/github.com/peterbourgon/diskv/stream_test.go

@@ -0,0 +1,117 @@
+package diskv
+
+import (
+	"bytes"
+	"io/ioutil"
+	"testing"
+)
+
+func TestBasicStreamCaching(t *testing.T) {
+	d := New(Options{
+		BasePath:     "test-data",
+		CacheSizeMax: 1024,
+	})
+	defer d.EraseAll()
+
+	input := "a1b2c3"
+	key, writeBuf, sync := "a", bytes.NewBufferString(input), true
+	if err := d.WriteStream(key, writeBuf, sync); err != nil {
+		t.Fatal(err)
+	}
+
+	if d.isCached(key) {
+		t.Fatalf("'%s' cached, but shouldn't be (yet)", key)
+	}
+
+	rc, err := d.ReadStream(key, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	readBuf, err := ioutil.ReadAll(rc)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !cmpBytes(readBuf, []byte(input)) {
+		t.Fatalf("'%s' != '%s'", string(readBuf), input)
+	}
+
+	if !d.isCached(key) {
+		t.Fatalf("'%s' isn't cached, but should be", key)
+	}
+}
+
+func TestReadStreamDirect(t *testing.T) {
+	var (
+		basePath = "test-data"
+	)
+	dWrite := New(Options{
+		BasePath:     basePath,
+		CacheSizeMax: 0,
+	})
+	defer dWrite.EraseAll()
+	dRead := New(Options{
+		BasePath:     basePath,
+		CacheSizeMax: 1024,
+	})
+
+	// Write
+	key, val1, val2 := "a", []byte(`1234567890`), []byte(`aaaaaaaaaa`)
+	if err := dWrite.Write(key, val1); err != nil {
+		t.Fatalf("during first write: %s", err)
+	}
+
+	// First, caching read.
+	val, err := dRead.Read(key)
+	if err != nil {
+		t.Fatalf("during initial read: %s", err)
+	}
+	t.Logf("read 1: %s => %s", key, string(val))
+	if !cmpBytes(val1, val) {
+		t.Errorf("expected %q, got %q", string(val1), string(val))
+	}
+	if !dRead.isCached(key) {
+		t.Errorf("%q should be cached, but isn't", key)
+	}
+
+	// Write a different value.
+	if err := dWrite.Write(key, val2); err != nil {
+		t.Fatalf("during second write: %s", err)
+	}
+
+	// Second read, should hit cache and get the old value.
+	val, err = dRead.Read(key)
+	if err != nil {
+		t.Fatalf("during second (cache-hit) read: %s", err)
+	}
+	t.Logf("read 2: %s => %s", key, string(val))
+	if !cmpBytes(val1, val) {
+		t.Errorf("expected %q, got %q", string(val1), string(val))
+	}
+
+	// Third, direct read, should get the updated value.
+	rc, err := dRead.ReadStream(key, true)
+	if err != nil {
+		t.Fatalf("during third (direct) read, ReadStream: %s", err)
+	}
+	defer rc.Close()
+	val, err = ioutil.ReadAll(rc)
+	if err != nil {
+		t.Fatalf("during third (direct) read, ReadAll: %s", err)
+	}
+	t.Logf("read 3: %s => %s", key, string(val))
+	if !cmpBytes(val2, val) {
+		t.Errorf("expected %q, got %q", string(val1), string(val))
+	}
+
+	// Fourth read, should hit cache and get the new value.
+	val, err = dRead.Read(key)
+	if err != nil {
+		t.Fatalf("during fourth (cache-hit) read: %s", err)
+	}
+	t.Logf("read 4: %s => %s", key, string(val))
+	if !cmpBytes(val2, val) {
+		t.Errorf("expected %q, got %q", string(val1), string(val))
+	}
+}

+ 26 - 0
data_tool/src/github.com/rogpeppe/fastuuid/LICENSE

@@ -0,0 +1,26 @@
+Copyright © 2014, Roger Peppe
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+    * Neither the name of this project nor the names of its contributors
+      may be used to endorse or promote products derived from this software
+      without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.