5 年之前
父節點
當前提交
7d7b290b22
共有 100 個文件被更改,包括 18597 次插入0 次删除
  1. 2 0
      src/README.md
  2. 28 0
      src/gopkg.in/inf.v0/LICENSE
  3. 210 0
      src/gopkg.in/inf.v0/benchmark_test.go
  4. 615 0
      src/gopkg.in/inf.v0/dec.go
  5. 33 0
      src/gopkg.in/inf.v0/dec_go1_2_test.go
  6. 40 0
      src/gopkg.in/inf.v0/dec_internal_test.go
  7. 379 0
      src/gopkg.in/inf.v0/dec_test.go
  8. 62 0
      src/gopkg.in/inf.v0/example_test.go
  9. 145 0
      src/gopkg.in/inf.v0/rounder.go
  10. 72 0
      src/gopkg.in/inf.v0/rounder_example_test.go
  11. 109 0
      src/gopkg.in/inf.v0/rounder_test.go
  12. 45 0
      src/gopkg.in/mgo.v2/.travis.yml
  13. 25 0
      src/gopkg.in/mgo.v2/LICENSE
  14. 5 0
      src/gopkg.in/mgo.v2/Makefile
  15. 4 0
      src/gopkg.in/mgo.v2/README.md
  16. 467 0
      src/gopkg.in/mgo.v2/auth.go
  17. 1180 0
      src/gopkg.in/mgo.v2/auth_test.go
  18. 25 0
      src/gopkg.in/mgo.v2/bson/LICENSE
  19. 738 0
      src/gopkg.in/mgo.v2/bson/bson.go
  20. 1832 0
      src/gopkg.in/mgo.v2/bson/bson_test.go
  21. 310 0
      src/gopkg.in/mgo.v2/bson/decimal.go
  22. 4109 0
      src/gopkg.in/mgo.v2/bson/decimal_test.go
  23. 849 0
      src/gopkg.in/mgo.v2/bson/decode.go
  24. 521 0
      src/gopkg.in/mgo.v2/bson/encode.go
  25. 380 0
      src/gopkg.in/mgo.v2/bson/json.go
  26. 184 0
      src/gopkg.in/mgo.v2/bson/json_test.go
  27. 27 0
      src/gopkg.in/mgo.v2/bson/specdata/update.sh
  28. 241 0
      src/gopkg.in/mgo.v2/bson/specdata_test.go
  29. 351 0
      src/gopkg.in/mgo.v2/bulk.go
  30. 504 0
      src/gopkg.in/mgo.v2/bulk_test.go
  31. 684 0
      src/gopkg.in/mgo.v2/cluster.go
  32. 2090 0
      src/gopkg.in/mgo.v2/cluster_test.go
  33. 196 0
      src/gopkg.in/mgo.v2/dbtest/dbserver.go
  34. 108 0
      src/gopkg.in/mgo.v2/dbtest/dbserver_test.go
  35. 12 0
      src/gopkg.in/mgo.v2/dbtest/export_test.go
  36. 31 0
      src/gopkg.in/mgo.v2/doc.go
  37. 33 0
      src/gopkg.in/mgo.v2/export_test.go
  38. 761 0
      src/gopkg.in/mgo.v2/gridfs.go
  39. 708 0
      src/gopkg.in/mgo.v2/gridfs_test.go
  40. 20 0
      src/gopkg.in/mgo.v2/harness/certs/client.crt
  41. 27 0
      src/gopkg.in/mgo.v2/harness/certs/client.key
  42. 57 0
      src/gopkg.in/mgo.v2/harness/certs/client.pem
  43. 17 0
      src/gopkg.in/mgo.v2/harness/certs/client.req
  44. 22 0
      src/gopkg.in/mgo.v2/harness/certs/server.crt
  45. 28 0
      src/gopkg.in/mgo.v2/harness/certs/server.key
  46. 50 0
      src/gopkg.in/mgo.v2/harness/certs/server.pem
  47. 57 0
      src/gopkg.in/mgo.v2/harness/daemons/.env
  48. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/.empty
  49. 二進制
      src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/journal/tempLatencyTest
  50. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/mongod.lock
  51. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run
  52. 8 0
      src/gopkg.in/mgo.v2/harness/daemons/cfg1/run
  53. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/cfg2/db/.empty
  54. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run
  55. 8 0
      src/gopkg.in/mgo.v2/harness/daemons/cfg2/run
  56. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/cfg3/db/.empty
  57. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run
  58. 9 0
      src/gopkg.in/mgo.v2/harness/daemons/cfg3/run
  59. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/db1/db/.empty
  60. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/db1/log/run
  61. 15 0
      src/gopkg.in/mgo.v2/harness/daemons/db1/run
  62. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/db2/db/.empty
  63. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/db2/log/run
  64. 8 0
      src/gopkg.in/mgo.v2/harness/daemons/db2/run
  65. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/db3/db/.empty
  66. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/db3/log/run
  67. 12 0
      src/gopkg.in/mgo.v2/harness/daemons/db3/run
  68. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/rs1a/db/.empty
  69. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run
  70. 8 0
      src/gopkg.in/mgo.v2/harness/daemons/rs1a/run
  71. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/rs1b/db/.empty
  72. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run
  73. 8 0
      src/gopkg.in/mgo.v2/harness/daemons/rs1b/run
  74. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/rs1c/db/.empty
  75. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run
  76. 8 0
      src/gopkg.in/mgo.v2/harness/daemons/rs1c/run
  77. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/rs2a/db/.empty
  78. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run
  79. 8 0
      src/gopkg.in/mgo.v2/harness/daemons/rs2a/run
  80. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/rs2b/db/.empty
  81. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run
  82. 8 0
      src/gopkg.in/mgo.v2/harness/daemons/rs2b/run
  83. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/rs2c/db/.empty
  84. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run
  85. 8 0
      src/gopkg.in/mgo.v2/harness/daemons/rs2c/run
  86. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/rs3a/db/.empty
  87. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run
  88. 9 0
      src/gopkg.in/mgo.v2/harness/daemons/rs3a/run
  89. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/rs3b/db/.empty
  90. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run
  91. 9 0
      src/gopkg.in/mgo.v2/harness/daemons/rs3b/run
  92. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/rs3c/db/.empty
  93. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run
  94. 9 0
      src/gopkg.in/mgo.v2/harness/daemons/rs3c/run
  95. 0 0
      src/gopkg.in/mgo.v2/harness/daemons/rs4a/db/.empty
  96. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run
  97. 8 0
      src/gopkg.in/mgo.v2/harness/daemons/rs4a/run
  98. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/s1/log/run
  99. 7 0
      src/gopkg.in/mgo.v2/harness/daemons/s1/run
  100. 3 0
      src/gopkg.in/mgo.v2/harness/daemons/s2/log/run

+ 2 - 0
src/README.md

@@ -0,0 +1,2 @@
+## 这是一个mongodb的工具类
+-  是原qfw/mongodb包的改进  2020-03-05

+ 28 - 0
src/gopkg.in/inf.v0/LICENSE

@@ -0,0 +1,28 @@
+Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
+Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 210 - 0
src/gopkg.in/inf.v0/benchmark_test.go

@@ -0,0 +1,210 @@
+package inf
+
+import (
+	"fmt"
+	"math/big"
+	"math/rand"
+	"sync"
+	"testing"
+)
+
+const maxcap = 1024 * 1024
+const bits = 256
+const maxscale = 32
+
+var once sync.Once
+
+var decInput [][2]Dec
+var intInput [][2]big.Int
+
+var initBench = func() {
+	decInput = make([][2]Dec, maxcap)
+	intInput = make([][2]big.Int, maxcap)
+	max := new(big.Int).Lsh(big.NewInt(1), bits)
+	r := rand.New(rand.NewSource(0))
+	for i := 0; i < cap(decInput); i++ {
+		decInput[i][0].SetUnscaledBig(new(big.Int).Rand(r, max)).
+			SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale)))
+		decInput[i][1].SetUnscaledBig(new(big.Int).Rand(r, max)).
+			SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale)))
+	}
+	for i := 0; i < cap(intInput); i++ {
+		intInput[i][0].Rand(r, max)
+		intInput[i][1].Rand(r, max)
+	}
+}
+
+func doBenchmarkDec1(b *testing.B, f func(z *Dec)) {
+	once.Do(initBench)
+	b.ResetTimer()
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		f(&decInput[i%maxcap][0])
+	}
+}
+
+func doBenchmarkDec2(b *testing.B, f func(x, y *Dec)) {
+	once.Do(initBench)
+	b.ResetTimer()
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		f(&decInput[i%maxcap][0], &decInput[i%maxcap][1])
+	}
+}
+
+func doBenchmarkInt1(b *testing.B, f func(z *big.Int)) {
+	once.Do(initBench)
+	b.ResetTimer()
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		f(&intInput[i%maxcap][0])
+	}
+}
+
+func doBenchmarkInt2(b *testing.B, f func(x, y *big.Int)) {
+	once.Do(initBench)
+	b.ResetTimer()
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		f(&intInput[i%maxcap][0], &intInput[i%maxcap][1])
+	}
+}
+
+func Benchmark_Dec_String(b *testing.B) {
+	doBenchmarkDec1(b, func(x *Dec) {
+		x.String()
+	})
+}
+
+func Benchmark_Dec_StringScan(b *testing.B) {
+	doBenchmarkDec1(b, func(x *Dec) {
+		s := x.String()
+		d := new(Dec)
+		fmt.Sscan(s, d)
+	})
+}
+
+func Benchmark_Dec_GobEncode(b *testing.B) {
+	doBenchmarkDec1(b, func(x *Dec) {
+		x.GobEncode()
+	})
+}
+
+func Benchmark_Dec_GobEnDecode(b *testing.B) {
+	doBenchmarkDec1(b, func(x *Dec) {
+		g, _ := x.GobEncode()
+		new(Dec).GobDecode(g)
+	})
+}
+
+func Benchmark_Dec_Add(b *testing.B) {
+	doBenchmarkDec2(b, func(x, y *Dec) {
+		ys := y.Scale()
+		y.SetScale(x.Scale())
+		_ = new(Dec).Add(x, y)
+		y.SetScale(ys)
+	})
+}
+
+func Benchmark_Dec_AddMixed(b *testing.B) {
+	doBenchmarkDec2(b, func(x, y *Dec) {
+		_ = new(Dec).Add(x, y)
+	})
+}
+
+func Benchmark_Dec_Sub(b *testing.B) {
+	doBenchmarkDec2(b, func(x, y *Dec) {
+		ys := y.Scale()
+		y.SetScale(x.Scale())
+		_ = new(Dec).Sub(x, y)
+		y.SetScale(ys)
+	})
+}
+
+func Benchmark_Dec_SubMixed(b *testing.B) {
+	doBenchmarkDec2(b, func(x, y *Dec) {
+		_ = new(Dec).Sub(x, y)
+	})
+}
+
+func Benchmark_Dec_Mul(b *testing.B) {
+	doBenchmarkDec2(b, func(x, y *Dec) {
+		_ = new(Dec).Mul(x, y)
+	})
+}
+
+func Benchmark_Dec_Mul_QuoExact(b *testing.B) {
+	doBenchmarkDec2(b, func(x, y *Dec) {
+		v := new(Dec).Mul(x, y)
+		_ = new(Dec).QuoExact(v, y)
+	})
+}
+
+func Benchmark_Dec_QuoRound_Fixed_Down(b *testing.B) {
+	doBenchmarkDec2(b, func(x, y *Dec) {
+		_ = new(Dec).QuoRound(x, y, 0, RoundDown)
+	})
+}
+
+func Benchmark_Dec_QuoRound_Fixed_HalfUp(b *testing.B) {
+	doBenchmarkDec2(b, func(x, y *Dec) {
+		_ = new(Dec).QuoRound(x, y, 0, RoundHalfUp)
+	})
+}
+
+func Benchmark_Int_String(b *testing.B) {
+	doBenchmarkInt1(b, func(x *big.Int) {
+		x.String()
+	})
+}
+
+func Benchmark_Int_StringScan(b *testing.B) {
+	doBenchmarkInt1(b, func(x *big.Int) {
+		s := x.String()
+		d := new(big.Int)
+		fmt.Sscan(s, d)
+	})
+}
+
+func Benchmark_Int_GobEncode(b *testing.B) {
+	doBenchmarkInt1(b, func(x *big.Int) {
+		x.GobEncode()
+	})
+}
+
+func Benchmark_Int_GobEnDecode(b *testing.B) {
+	doBenchmarkInt1(b, func(x *big.Int) {
+		g, _ := x.GobEncode()
+		new(big.Int).GobDecode(g)
+	})
+}
+
+func Benchmark_Int_Add(b *testing.B) {
+	doBenchmarkInt2(b, func(x, y *big.Int) {
+		_ = new(big.Int).Add(x, y)
+	})
+}
+
+func Benchmark_Int_Sub(b *testing.B) {
+	doBenchmarkInt2(b, func(x, y *big.Int) {
+		_ = new(big.Int).Sub(x, y)
+	})
+}
+
+func Benchmark_Int_Mul(b *testing.B) {
+	doBenchmarkInt2(b, func(x, y *big.Int) {
+		_ = new(big.Int).Mul(x, y)
+	})
+}
+
+func Benchmark_Int_Quo(b *testing.B) {
+	doBenchmarkInt2(b, func(x, y *big.Int) {
+		_ = new(big.Int).Quo(x, y)
+	})
+}
+
+func Benchmark_Int_QuoRem(b *testing.B) {
+	doBenchmarkInt2(b, func(x, y *big.Int) {
+		_, _ = new(big.Int).QuoRem(x, y, new(big.Int))
+	})
+}

+ 615 - 0
src/gopkg.in/inf.v0/dec.go

@@ -0,0 +1,615 @@
+// Package inf (type inf.Dec) implements "infinite-precision" decimal
+// arithmetic.
+// "Infinite precision" describes two characteristics: practically unlimited
+// precision for decimal number representation and no support for calculating
+// with any specific fixed precision.
+// (Although there is no practical limit on precision, inf.Dec can only
+// represent finite decimals.)
+//
+// This package is currently in experimental stage and the API may change.
+//
+// This package does NOT support:
+//  - rounding to specific precisions (as opposed to specific decimal positions)
+//  - the notion of context (each rounding must be explicit)
+//  - NaN and Inf values, and distinguishing between positive and negative zero
+//  - conversions to and from float32/64 types
+//
+// Features considered for possible addition:
+//  + formatting options
+//  + Exp method
+//  + combined operations such as AddRound/MulAdd etc
+//  + exchanging data in decimal32/64/128 formats
+//
+package inf // import "gopkg.in/inf.v0"
+
+// TODO:
+//  - avoid excessive deep copying (quo and rounders)
+
+import (
+	"fmt"
+	"io"
+	"math/big"
+	"strings"
+)
+
+// A Dec represents a signed arbitrary-precision decimal.
+// It is a combination of a sign, an arbitrary-precision integer coefficient
+// value, and a signed fixed-precision exponent value.
+// The sign and the coefficient value are handled together as a signed value
+// and referred to as the unscaled value.
+// (Positive and negative zero values are not distinguished.)
+// Since the exponent is most commonly non-positive, it is handled in negated
+// form and referred to as scale.
+//
+// The mathematical value of a Dec equals:
+//
+//  unscaled * 10**(-scale)
+//
+// Note that different Dec representations may have equal mathematical values.
+//
+//  unscaled  scale  String()
+//  -------------------------
+//         0      0    "0"
+//         0      2    "0.00"
+//         0     -2    "0"
+//         1      0    "1"
+//       100      2    "1.00"
+//        10      0   "10"
+//         1     -1   "10"
+//
+// The zero value for a Dec represents the value 0 with scale 0.
+//
+// Operations are typically performed through the *Dec type.
+// The semantics of the assignment operation "=" for "bare" Dec values is
+// undefined and should not be relied on.
+//
+// Methods are typically of the form:
+//
+//	func (z *Dec) Op(x, y *Dec) *Dec
+//
+// and implement operations z = x Op y with the result as receiver; if it
+// is one of the operands it may be overwritten (and its memory reused).
+// To enable chaining of operations, the result is also returned. Methods
+// returning a result other than *Dec take one of the operands as the receiver.
+//
+// A "bare" Quo method (quotient / division operation) is not provided, as the
+// result is not always a finite decimal and thus in general cannot be
+// represented as a Dec.
+// Instead, in the common case when rounding is (potentially) necessary,
+// QuoRound should be used with a Scale and a Rounder.
+// QuoExact or QuoRound with RoundExact can be used in the special cases when it
+// is known that the result is always a finite decimal.
+//
+type Dec struct {
+	unscaled big.Int
+	scale    Scale
+}
+
+// Scale represents the type used for the scale of a Dec.
+type Scale int32
+
+const scaleSize = 4 // bytes in a Scale value
+
+// Scaler represents a method for obtaining the scale to use for the result of
+// an operation on x and y.
+type scaler interface {
+	Scale(x *Dec, y *Dec) Scale
+}
+
+var bigInt = [...]*big.Int{
+	big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4),
+	big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9),
+	big.NewInt(10),
+}
+
+var exp10cache [64]big.Int = func() [64]big.Int {
+	e10, e10i := [64]big.Int{}, bigInt[1]
+	for i, _ := range e10 {
+		e10[i].Set(e10i)
+		e10i = new(big.Int).Mul(e10i, bigInt[10])
+	}
+	return e10
+}()
+
+// NewDec allocates and returns a new Dec set to the given int64 unscaled value
+// and scale.
+func NewDec(unscaled int64, scale Scale) *Dec {
+	return new(Dec).SetUnscaled(unscaled).SetScale(scale)
+}
+
+// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled
+// value and scale.
+func NewDecBig(unscaled *big.Int, scale Scale) *Dec {
+	return new(Dec).SetUnscaledBig(unscaled).SetScale(scale)
+}
+
+// Scale returns the scale of x.
+func (x *Dec) Scale() Scale {
+	return x.scale
+}
+
+// Unscaled returns the unscaled value of x for u and true for ok when the
+// unscaled value can be represented as int64; otherwise it returns an undefined
+// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid
+// checking the validity of the value when the check is known to be redundant.
+func (x *Dec) Unscaled() (u int64, ok bool) {
+	u = x.unscaled.Int64()
+	var i big.Int
+	ok = i.SetInt64(u).Cmp(&x.unscaled) == 0
+	return
+}
+
+// UnscaledBig returns the unscaled value of x as *big.Int.
+func (x *Dec) UnscaledBig() *big.Int {
+	return &x.unscaled
+}
+
+// SetScale sets the scale of z, with the unscaled value unchanged, and returns
+// z.
+// The mathematical value of the Dec changes as if it was multiplied by
+// 10**(oldscale-scale).
+func (z *Dec) SetScale(scale Scale) *Dec {
+	z.scale = scale
+	return z
+}
+
+// SetUnscaled sets the unscaled value of z, with the scale unchanged, and
+// returns z.
+func (z *Dec) SetUnscaled(unscaled int64) *Dec {
+	z.unscaled.SetInt64(unscaled)
+	return z
+}
+
+// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and
+// returns z.
+func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec {
+	z.unscaled.Set(unscaled)
+	return z
+}
+
+// Set sets z to the value of x and returns z.
+// It does nothing if z == x.
+func (z *Dec) Set(x *Dec) *Dec {
+	if z != x {
+		z.SetUnscaledBig(x.UnscaledBig())
+		z.SetScale(x.Scale())
+	}
+	return z
+}
+
+// Sign returns:
+//
+//	-1 if x <  0
+//	 0 if x == 0
+//	+1 if x >  0
+//
+func (x *Dec) Sign() int {
+	return x.UnscaledBig().Sign()
+}
+
+// Neg sets z to -x and returns z.
+func (z *Dec) Neg(x *Dec) *Dec {
+	z.SetScale(x.Scale())
+	z.UnscaledBig().Neg(x.UnscaledBig())
+	return z
+}
+
+// Cmp compares x and y and returns:
+//
+//   -1 if x <  y
+//    0 if x == y
+//   +1 if x >  y
+//
+func (x *Dec) Cmp(y *Dec) int {
+	xx, yy := upscale(x, y)
+	return xx.UnscaledBig().Cmp(yy.UnscaledBig())
+}
+
+// Abs sets z to |x| (the absolute value of x) and returns z.
+func (z *Dec) Abs(x *Dec) *Dec {
+	z.SetScale(x.Scale())
+	z.UnscaledBig().Abs(x.UnscaledBig())
+	return z
+}
+
+// Add sets z to the sum x+y and returns z.
+// The scale of z is the greater of the scales of x and y.
+func (z *Dec) Add(x, y *Dec) *Dec {
+	xx, yy := upscale(x, y)
+	z.SetScale(xx.Scale())
+	z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig())
+	return z
+}
+
+// Sub sets z to the difference x-y and returns z.
+// The scale of z is the greater of the scales of x and y.
+func (z *Dec) Sub(x, y *Dec) *Dec {
+	xx, yy := upscale(x, y)
+	z.SetScale(xx.Scale())
+	z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig())
+	return z
+}
+
+// Mul sets z to the product x*y and returns z.
+// The scale of z is the sum of the scales of x and y.
+func (z *Dec) Mul(x, y *Dec) *Dec {
+	z.SetScale(x.Scale() + y.Scale())
+	z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig())
+	return z
+}
+
+// Round sets z to the value of x rounded to Scale s using Rounder r, and
+// returns z.
+func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec {
+	return z.QuoRound(x, NewDec(1, 0), s, r)
+}
+
+// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the
+// specified scale.
+//
+// If the rounder is RoundExact but the result can not be expressed exactly at
+// the specified scale, QuoRound returns nil, and the value of z is undefined.
+//
+// There is no corresponding Div method; the equivalent can be achieved through
+// the choice of Rounder used.
+//
+func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec {
+	return z.quo(x, y, sclr{s}, r)
+}
+
+func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec {
+	scl := s.Scale(x, y)
+	var zzz *Dec
+	if r.UseRemainder() {
+		zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int))
+		zzz = r.Round(new(Dec), zz, rA, rB)
+	} else {
+		zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil)
+		zzz = r.Round(new(Dec), zz, nil, nil)
+	}
+	if zzz == nil {
+		return nil
+	}
+	return z.Set(zzz)
+}
+
+// QuoExact sets z to the quotient x/y and returns z when x/y is a finite
+// decimal. Otherwise it returns nil and the value of z is undefined.
+//
+// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is
+// calculated so that the remainder will be zero whenever x/y is a finite
+// decimal.
+func (z *Dec) QuoExact(x, y *Dec) *Dec {
+	return z.quo(x, y, scaleQuoExact{}, RoundExact)
+}
+
+// quoRem sets z to the quotient x/y with the scale s, and if useRem is true,
+// it sets remNum and remDen to the numerator and denominator of the remainder.
+// It returns z, remNum and remDen.
+//
+// The remainder is normalized to the range -1 < r < 1 to simplify rounding;
+// that is, the results satisfy the following equation:
+//
+//  x / y = z + (remNum/remDen) * 10**(-z.Scale())
+//
+// See Rounder for more details about rounding.
+//
+func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool,
+	remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) {
+	// difference (required adjustment) compared to "canonical" result scale
+	shift := s - (x.Scale() - y.Scale())
+	// pointers to adjusted unscaled dividend and divisor
+	var ix, iy *big.Int
+	switch {
+	case shift > 0:
+		// increased scale: decimal-shift dividend left
+		ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift))
+		iy = y.UnscaledBig()
+	case shift < 0:
+		// decreased scale: decimal-shift divisor left
+		ix = x.UnscaledBig()
+		iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift))
+	default:
+		ix = x.UnscaledBig()
+		iy = y.UnscaledBig()
+	}
+	// save a copy of iy in case it to be overwritten with the result
+	iy2 := iy
+	if iy == z.UnscaledBig() {
+		iy2 = new(big.Int).Set(iy)
+	}
+	// set scale
+	z.SetScale(s)
+	// set unscaled
+	if useRem {
+		// Int division
+		_, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int))
+		// set remainder
+		remNum.Set(intr)
+		remDen.Set(iy2)
+	} else {
+		z.UnscaledBig().Quo(ix, iy)
+	}
+	return z, remNum, remDen
+}
+
+type sclr struct{ s Scale }
+
+func (s sclr) Scale(x, y *Dec) Scale {
+	return s.s
+}
+
+type scaleQuoExact struct{}
+
+func (sqe scaleQuoExact) Scale(x, y *Dec) Scale {
+	rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig())
+	f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5])
+	var f10 Scale
+	if f2 > f5 {
+		f10 = Scale(f2)
+	} else {
+		f10 = Scale(f5)
+	}
+	return x.Scale() - y.Scale() + f10
+}
+
+func factor(n *big.Int, p *big.Int) int {
+	// could be improved for large factors
+	d, f := n, 0
+	for {
+		dd, dm := new(big.Int).DivMod(d, p, new(big.Int))
+		if dm.Sign() == 0 {
+			f++
+			d = dd
+		} else {
+			break
+		}
+	}
+	return f
+}
+
+func factor2(n *big.Int) int {
+	// could be improved for large factors
+	f := 0
+	for ; n.Bit(f) == 0; f++ {
+	}
+	return f
+}
+
+func upscale(a, b *Dec) (*Dec, *Dec) {
+	if a.Scale() == b.Scale() {
+		return a, b
+	}
+	if a.Scale() > b.Scale() {
+		bb := b.rescale(a.Scale())
+		return a, bb
+	}
+	aa := a.rescale(b.Scale())
+	return aa, b
+}
+
+func exp10(x Scale) *big.Int {
+	if int(x) < len(exp10cache) {
+		return &exp10cache[int(x)]
+	}
+	return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil)
+}
+
+func (x *Dec) rescale(newScale Scale) *Dec {
+	shift := newScale - x.Scale()
+	switch {
+	case shift < 0:
+		e := exp10(-shift)
+		return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale)
+	case shift > 0:
+		e := exp10(shift)
+		return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale)
+	}
+	return x
+}
+
+var zeros = []byte("00000000000000000000000000000000" +
+	"00000000000000000000000000000000")
+var lzeros = Scale(len(zeros))
+
+func appendZeros(s []byte, n Scale) []byte {
+	for i := Scale(0); i < n; i += lzeros {
+		if n > i+lzeros {
+			s = append(s, zeros...)
+		} else {
+			s = append(s, zeros[0:n-i]...)
+		}
+	}
+	return s
+}
+
+func (x *Dec) String() string {
+	if x == nil {
+		return "<nil>"
+	}
+	scale := x.Scale()
+	s := []byte(x.UnscaledBig().String())
+	if scale <= 0 {
+		if scale != 0 && x.unscaled.Sign() != 0 {
+			s = appendZeros(s, -scale)
+		}
+		return string(s)
+	}
+	negbit := Scale(-((x.Sign() - 1) / 2))
+	// scale > 0
+	lens := Scale(len(s))
+	if lens-negbit <= scale {
+		ss := make([]byte, 0, scale+2)
+		if negbit == 1 {
+			ss = append(ss, '-')
+		}
+		ss = append(ss, '0', '.')
+		ss = appendZeros(ss, scale-lens+negbit)
+		ss = append(ss, s[negbit:]...)
+		return string(ss)
+	}
+	// lens > scale
+	ss := make([]byte, 0, lens+1)
+	ss = append(ss, s[:lens-scale]...)
+	ss = append(ss, '.')
+	ss = append(ss, s[lens-scale:]...)
+	return string(ss)
+}
+
+// Format is a support routine for fmt.Formatter. It accepts the decimal
+// formats 'd' and 'f', and handles both equivalently.
+// Width, precision, flags and bases 2, 8, 16 are not supported.
+func (x *Dec) Format(s fmt.State, ch rune) {
+	if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' {
+		fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String())
+		return
+	}
+	fmt.Fprintf(s, x.String())
+}
+
+func (z *Dec) scan(r io.RuneScanner) (*Dec, error) {
+	unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes
+	dp, dg := -1, -1                 // indexes of decimal point, first digit
+loop:
+	for {
+		ch, _, err := r.ReadRune()
+		if err == io.EOF {
+			break loop
+		}
+		if err != nil {
+			return nil, err
+		}
+		switch {
+		case ch == '+' || ch == '-':
+			if len(unscaled) > 0 || dp >= 0 { // must be first character
+				r.UnreadRune()
+				break loop
+			}
+		case ch == '.':
+			if dp >= 0 {
+				r.UnreadRune()
+				break loop
+			}
+			dp = len(unscaled)
+			continue // don't add to unscaled
+		case ch >= '0' && ch <= '9':
+			if dg == -1 {
+				dg = len(unscaled)
+			}
+		default:
+			r.UnreadRune()
+			break loop
+		}
+		unscaled = append(unscaled, byte(ch))
+	}
+	if dg == -1 {
+		return nil, fmt.Errorf("no digits read")
+	}
+	if dp >= 0 {
+		z.SetScale(Scale(len(unscaled) - dp))
+	} else {
+		z.SetScale(0)
+	}
+	_, ok := z.UnscaledBig().SetString(string(unscaled), 10)
+	if !ok {
+		return nil, fmt.Errorf("invalid decimal: %s", string(unscaled))
+	}
+	return z, nil
+}
+
+// SetString sets z to the value of s, interpreted as a decimal (base 10),
+// and returns z and a boolean indicating success. The scale of z is the
+// number of digits after the decimal point (including any trailing 0s),
+// or 0 if there is no decimal point. If SetString fails, the value of z
+// is undefined but the returned value is nil.
+func (z *Dec) SetString(s string) (*Dec, bool) {
+	r := strings.NewReader(s)
+	_, err := z.scan(r)
+	if err != nil {
+		return nil, false
+	}
+	_, _, err = r.ReadRune()
+	if err != io.EOF {
+		return nil, false
+	}
+	// err == io.EOF => scan consumed all of s
+	return z, true
+}
+
+// Scan is a support routine for fmt.Scanner; it sets z to the value of
+// the scanned number. It accepts the decimal formats 'd' and 'f', and
+// handles both equivalently. Bases 2, 8, 16 are not supported.
+// The scale of z is the number of digits after the decimal point
+// (including any trailing 0s), or 0 if there is no decimal point.
+func (z *Dec) Scan(s fmt.ScanState, ch rune) error {
+	if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' {
+		return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch)
+	}
+	s.SkipSpace()
+	_, err := z.scan(s)
+	return err
+}
+
+// Gob encoding version
+const decGobVersion byte = 1
+
+func scaleBytes(s Scale) []byte {
+	buf := make([]byte, scaleSize)
+	i := scaleSize
+	for j := 0; j < scaleSize; j++ {
+		i--
+		buf[i] = byte(s)
+		s >>= 8
+	}
+	return buf
+}
+
+func scale(b []byte) (s Scale) {
+	for j := 0; j < scaleSize; j++ {
+		s <<= 8
+		s |= Scale(b[j])
+	}
+	return
+}
+
+// GobEncode implements the gob.GobEncoder interface.
+func (x *Dec) GobEncode() ([]byte, error) {
+	buf, err := x.UnscaledBig().GobEncode()
+	if err != nil {
+		return nil, err
+	}
+	buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion)
+	return buf, nil
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (z *Dec) GobDecode(buf []byte) error {
+	if len(buf) == 0 {
+		return fmt.Errorf("Dec.GobDecode: no data")
+	}
+	b := buf[len(buf)-1]
+	if b != decGobVersion {
+		return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b)
+	}
+	l := len(buf) - scaleSize - 1
+	err := z.UnscaledBig().GobDecode(buf[:l])
+	if err != nil {
+		return err
+	}
+	z.SetScale(scale(buf[l : l+scaleSize]))
+	return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (x *Dec) MarshalText() ([]byte, error) {
+	return []byte(x.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (z *Dec) UnmarshalText(data []byte) error {
+	_, ok := z.SetString(string(data))
+	if !ok {
+		return fmt.Errorf("invalid inf.Dec")
+	}
+	return nil
+}

+ 33 - 0
src/gopkg.in/inf.v0/dec_go1_2_test.go

@@ -0,0 +1,33 @@
+// +build go1.2
+
+package inf
+
+import (
+	"encoding"
+	"encoding/json"
+	"testing"
+)
+
+var _ encoding.TextMarshaler = new(Dec)
+var _ encoding.TextUnmarshaler = new(Dec)
+
+type Obj struct {
+	Val *Dec
+}
+
+func TestDecJsonMarshalUnmarshal(t *testing.T) {
+	o := Obj{Val: NewDec(123, 2)}
+	js, err := json.Marshal(o)
+	if err != nil {
+		t.Fatalf("json.Marshal(%v): got %v, want ok", o, err)
+	}
+	o2 := &Obj{}
+	err = json.Unmarshal(js, o2)
+	if err != nil {
+		t.Fatalf("json.Unmarshal(%#q): got %v, want ok", js, err)
+	}
+	if o.Val.Scale() != o2.Val.Scale() ||
+		o.Val.UnscaledBig().Cmp(o2.Val.UnscaledBig()) != 0 {
+		t.Fatalf("json.Unmarshal(json.Marshal(%v)): want %v, got %v", o, o, o2)
+	}
+}

+ 40 - 0
src/gopkg.in/inf.v0/dec_internal_test.go

@@ -0,0 +1,40 @@
+package inf
+
+import (
+	"math/big"
+	"testing"
+)
+
+var decQuoRemZZZ = []struct {
+	z, x, y  *Dec
+	r        *big.Rat
+	srA, srB int
+}{
+	// basic examples
+	{NewDec(1, 0), NewDec(2, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1},
+	{NewDec(15, 1), NewDec(3, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1},
+	{NewDec(1, 1), NewDec(1, 0), NewDec(10, 0), big.NewRat(0, 1), 0, 1},
+	{NewDec(0, 0), NewDec(2, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1},
+	{NewDec(0, 0), NewDec(2, 0), NewDec(6, 0), big.NewRat(1, 3), 1, 1},
+	{NewDec(1, 1), NewDec(2, 0), NewDec(12, 0), big.NewRat(2, 3), 1, 1},
+
+	// examples from the Go Language Specification
+	{NewDec(1, 0), NewDec(5, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1},
+	{NewDec(-1, 0), NewDec(-5, 0), NewDec(3, 0), big.NewRat(-2, 3), -1, 1},
+	{NewDec(-1, 0), NewDec(5, 0), NewDec(-3, 0), big.NewRat(-2, 3), 1, -1},
+	{NewDec(1, 0), NewDec(-5, 0), NewDec(-3, 0), big.NewRat(2, 3), -1, -1},
+}
+
+func TestDecQuoRem(t *testing.T) {
+	for i, a := range decQuoRemZZZ {
+		z, rA, rB := new(Dec), new(big.Int), new(big.Int)
+		s := scaleQuoExact{}.Scale(a.x, a.y)
+		z.quoRem(a.x, a.y, s, true, rA, rB)
+		if a.z.Cmp(z) != 0 || a.r.Cmp(new(big.Rat).SetFrac(rA, rB)) != 0 {
+			t.Errorf("#%d QuoRemZZZ got %v, %v, %v; expected %v, %v", i, z, rA, rB, a.z, a.r)
+		}
+		if a.srA != rA.Sign() || a.srB != rB.Sign() {
+			t.Errorf("#%d QuoRemZZZ wrong signs, got %v, %v; expected %v, %v", i, rA.Sign(), rB.Sign(), a.srA, a.srB)
+		}
+	}
+}

+ 379 - 0
src/gopkg.in/inf.v0/dec_test.go

@@ -0,0 +1,379 @@
+package inf_test
+
+import (
+	"bytes"
+	"encoding/gob"
+	"fmt"
+	"math/big"
+	"strings"
+	"testing"
+
+	"gopkg.in/inf.v0"
+)
+
+type decFunZZ func(z, x, y *inf.Dec) *inf.Dec
+type decArgZZ struct {
+	z, x, y *inf.Dec
+}
+
+var decSumZZ = []decArgZZ{
+	{inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)},
+	{inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)},
+	{inf.NewDec(1111111110, 0), inf.NewDec(123456789, 0), inf.NewDec(987654321, 0)},
+	{inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(0, 0)},
+	{inf.NewDec(864197532, 0), inf.NewDec(-123456789, 0), inf.NewDec(987654321, 0)},
+	{inf.NewDec(-1111111110, 0), inf.NewDec(-123456789, 0), inf.NewDec(-987654321, 0)},
+	{inf.NewDec(12, 2), inf.NewDec(1, 1), inf.NewDec(2, 2)},
+}
+
+var decProdZZ = []decArgZZ{
+	{inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)},
+	{inf.NewDec(0, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)},
+	{inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(1, 0)},
+	{inf.NewDec(-991*991, 0), inf.NewDec(991, 0), inf.NewDec(-991, 0)},
+	{inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)},
+	{inf.NewDec(2, -3), inf.NewDec(1, -1), inf.NewDec(2, -2)},
+	{inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)},
+}
+
+func TestDecSignZ(t *testing.T) {
+	var zero inf.Dec
+	for _, a := range decSumZZ {
+		s := a.z.Sign()
+		e := a.z.Cmp(&zero)
+		if s != e {
+			t.Errorf("got %d; want %d for z = %v", s, e, a.z)
+		}
+	}
+}
+
+func TestDecAbsZ(t *testing.T) {
+	var zero inf.Dec
+	for _, a := range decSumZZ {
+		var z inf.Dec
+		z.Abs(a.z)
+		var e inf.Dec
+		e.Set(a.z)
+		if e.Cmp(&zero) < 0 {
+			e.Sub(&zero, &e)
+		}
+		if z.Cmp(&e) != 0 {
+			t.Errorf("got z = %v; want %v", z, e)
+		}
+	}
+}
+
+func testDecFunZZ(t *testing.T, msg string, f decFunZZ, a decArgZZ) {
+	var z inf.Dec
+	f(&z, a.x, a.y)
+	if (&z).Cmp(a.z) != 0 {
+		t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, &z, a.z)
+	}
+}
+
+func TestDecSumZZ(t *testing.T) {
+	AddZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Add(x, y) }
+	SubZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Sub(x, y) }
+	for _, a := range decSumZZ {
+		arg := a
+		testDecFunZZ(t, "AddZZ", AddZZ, arg)
+
+		arg = decArgZZ{a.z, a.y, a.x}
+		testDecFunZZ(t, "AddZZ symmetric", AddZZ, arg)
+
+		arg = decArgZZ{a.x, a.z, a.y}
+		testDecFunZZ(t, "SubZZ", SubZZ, arg)
+
+		arg = decArgZZ{a.y, a.z, a.x}
+		testDecFunZZ(t, "SubZZ symmetric", SubZZ, arg)
+	}
+}
+
+func TestDecProdZZ(t *testing.T) {
+	MulZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Mul(x, y) }
+	for _, a := range decProdZZ {
+		arg := a
+		testDecFunZZ(t, "MulZZ", MulZZ, arg)
+
+		arg = decArgZZ{a.z, a.y, a.x}
+		testDecFunZZ(t, "MulZZ symmetric", MulZZ, arg)
+	}
+}
+
+var decUnscaledTests = []struct {
+	d  *inf.Dec
+	u  int64 // ignored when ok == false
+	ok bool
+}{
+	{new(inf.Dec), 0, true},
+	{inf.NewDec(-1<<63, 0), -1 << 63, true},
+	{inf.NewDec(-(-1<<63 + 1), 0), -(-1<<63 + 1), true},
+	{new(inf.Dec).Neg(inf.NewDec(-1<<63, 0)), 0, false},
+	{new(inf.Dec).Sub(inf.NewDec(-1<<63, 0), inf.NewDec(1, 0)), 0, false},
+	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), 0, false},
+}
+
+func TestDecUnscaled(t *testing.T) {
+	for i, tt := range decUnscaledTests {
+		u, ok := tt.d.Unscaled()
+		if ok != tt.ok {
+			t.Errorf("#%d Unscaled: got %v, expected %v", i, ok, tt.ok)
+		} else if ok && u != tt.u {
+			t.Errorf("#%d Unscaled: got %v, expected %v", i, u, tt.u)
+		}
+	}
+}
+
+var decRoundTests = [...]struct {
+	in  *inf.Dec
+	s   inf.Scale
+	r   inf.Rounder
+	exp *inf.Dec
+}{
+	{inf.NewDec(123424999999999993, 15), 2, inf.RoundHalfUp, inf.NewDec(12342, 2)},
+	{inf.NewDec(123425000000000001, 15), 2, inf.RoundHalfUp, inf.NewDec(12343, 2)},
+	{inf.NewDec(123424999999999993, 15), 15, inf.RoundHalfUp, inf.NewDec(123424999999999993, 15)},
+	{inf.NewDec(123424999999999993, 15), 16, inf.RoundHalfUp, inf.NewDec(1234249999999999930, 16)},
+	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -1, inf.RoundHalfUp, inf.NewDec(1844674407370955162, -1)},
+	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -2, inf.RoundHalfUp, inf.NewDec(184467440737095516, -2)},
+	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -3, inf.RoundHalfUp, inf.NewDec(18446744073709552, -3)},
+	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -4, inf.RoundHalfUp, inf.NewDec(1844674407370955, -4)},
+	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -5, inf.RoundHalfUp, inf.NewDec(184467440737096, -5)},
+	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -6, inf.RoundHalfUp, inf.NewDec(18446744073710, -6)},
+}
+
+func TestDecRound(t *testing.T) {
+	for i, tt := range decRoundTests {
+		z := new(inf.Dec).Round(tt.in, tt.s, tt.r)
+		if tt.exp.Cmp(z) != 0 {
+			t.Errorf("#%d Round got %v; expected %v", i, z, tt.exp)
+		}
+	}
+}
+
+var decStringTests = []struct {
+	in     string
+	out    string
+	val    int64
+	scale  inf.Scale // skip SetString if negative
+	ok     bool
+	scanOk bool
+}{
+	{in: "", ok: false, scanOk: false},
+	{in: "a", ok: false, scanOk: false},
+	{in: "z", ok: false, scanOk: false},
+	{in: "+", ok: false, scanOk: false},
+	{in: "-", ok: false, scanOk: false},
+	{in: "g", ok: false, scanOk: false},
+	{in: ".", ok: false, scanOk: false},
+	{in: ".-0", ok: false, scanOk: false},
+	{in: ".+0", ok: false, scanOk: false},
+	// Scannable but not SetStringable
+	{"0b", "ignored", 0, 0, false, true},
+	{"0x", "ignored", 0, 0, false, true},
+	{"0xg", "ignored", 0, 0, false, true},
+	{"0.0g", "ignored", 0, 1, false, true},
+	// examples from godoc for Dec
+	{"0", "0", 0, 0, true, true},
+	{"0.00", "0.00", 0, 2, true, true},
+	{"ignored", "0", 0, -2, true, false},
+	{"1", "1", 1, 0, true, true},
+	{"1.00", "1.00", 100, 2, true, true},
+	{"10", "10", 10, 0, true, true},
+	{"ignored", "10", 1, -1, true, false},
+	// other tests
+	{"+0", "0", 0, 0, true, true},
+	{"-0", "0", 0, 0, true, true},
+	{"0.0", "0.0", 0, 1, true, true},
+	{"0.1", "0.1", 1, 1, true, true},
+	{"0.", "0", 0, 0, true, true},
+	{"-10", "-10", -1, -1, true, true},
+	{"-1", "-1", -1, 0, true, true},
+	{"-0.1", "-0.1", -1, 1, true, true},
+	{"-0.01", "-0.01", -1, 2, true, true},
+	{"+0.", "0", 0, 0, true, true},
+	{"-0.", "0", 0, 0, true, true},
+	{".0", "0.0", 0, 1, true, true},
+	{"+.0", "0.0", 0, 1, true, true},
+	{"-.0", "0.0", 0, 1, true, true},
+	{"0.0000000000", "0.0000000000", 0, 10, true, true},
+	{"0.0000000001", "0.0000000001", 1, 10, true, true},
+	{"-0.0000000000", "0.0000000000", 0, 10, true, true},
+	{"-0.0000000001", "-0.0000000001", -1, 10, true, true},
+	{"-10", "-10", -10, 0, true, true},
+	{"+10", "10", 10, 0, true, true},
+	{"00", "0", 0, 0, true, true},
+	{"023", "23", 23, 0, true, true},      // decimal, not octal
+	{"-02.3", "-2.3", -23, 1, true, true}, // decimal, not octal
+}
+
+func TestDecGetString(t *testing.T) {
+	z := new(inf.Dec)
+	for i, test := range decStringTests {
+		if !test.ok {
+			continue
+		}
+		z.SetUnscaled(test.val)
+		z.SetScale(test.scale)
+
+		s := z.String()
+		if s != test.out {
+			t.Errorf("#%da got %s; want %s", i, s, test.out)
+		}
+
+		s = fmt.Sprintf("%d", z)
+		if s != test.out {
+			t.Errorf("#%db got %s; want %s", i, s, test.out)
+		}
+	}
+}
+
+func TestDecSetString(t *testing.T) {
+	tmp := new(inf.Dec)
+	for i, test := range decStringTests {
+		if test.scale < 0 {
+			// SetString only supports scale >= 0
+			continue
+		}
+		// initialize to a non-zero value so that issues with parsing
+		// 0 are detected
+		tmp.Set(inf.NewDec(1234567890, 123))
+		n1, ok1 := new(inf.Dec).SetString(test.in)
+		n2, ok2 := tmp.SetString(test.in)
+		expected := inf.NewDec(test.val, test.scale)
+		if ok1 != test.ok || ok2 != test.ok {
+			t.Errorf("#%d (input '%s') ok incorrect (should be %t)", i, test.in, test.ok)
+			continue
+		}
+		if !ok1 {
+			if n1 != nil {
+				t.Errorf("#%d (input '%s') n1 != nil", i, test.in)
+			}
+			continue
+		}
+		if !ok2 {
+			if n2 != nil {
+				t.Errorf("#%d (input '%s') n2 != nil", i, test.in)
+			}
+			continue
+		}
+
+		if n1.Cmp(expected) != 0 {
+			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
+		}
+		if n2.Cmp(expected) != 0 {
+			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
+		}
+	}
+}
+
+func TestDecScan(t *testing.T) {
+	tmp := new(inf.Dec)
+	for i, test := range decStringTests {
+		if test.scale < 0 {
+			// SetString only supports scale >= 0
+			continue
+		}
+		// initialize to a non-zero value so that issues with parsing
+		// 0 are detected
+		tmp.Set(inf.NewDec(1234567890, 123))
+		n1, n2 := new(inf.Dec), tmp
+		nn1, err1 := fmt.Sscan(test.in, n1)
+		nn2, err2 := fmt.Sscan(test.in, n2)
+		if !test.scanOk {
+			if err1 == nil || err2 == nil {
+				t.Errorf("#%d (input '%s') ok incorrect, should be %t", i, test.in, test.scanOk)
+			}
+			continue
+		}
+		expected := inf.NewDec(test.val, test.scale)
+		if nn1 != 1 || err1 != nil || nn2 != 1 || err2 != nil {
+			t.Errorf("#%d (input '%s') error %d %v, %d %v", i, test.in, nn1, err1, nn2, err2)
+			continue
+		}
+		if n1.Cmp(expected) != 0 {
+			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
+		}
+		if n2.Cmp(expected) != 0 {
+			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
+		}
+	}
+}
+
+var decScanNextTests = []struct {
+	in   string
+	ok   bool
+	next rune
+}{
+	{"", false, 0},
+	{"a", false, 'a'},
+	{"z", false, 'z'},
+	{"+", false, 0},
+	{"-", false, 0},
+	{"g", false, 'g'},
+	{".", false, 0},
+	{".-0", false, '-'},
+	{".+0", false, '+'},
+	{"0b", true, 'b'},
+	{"0x", true, 'x'},
+	{"0xg", true, 'x'},
+	{"0.0g", true, 'g'},
+}
+
+func TestDecScanNext(t *testing.T) {
+	for i, test := range decScanNextTests {
+		rdr := strings.NewReader(test.in)
+		n1 := new(inf.Dec)
+		nn1, _ := fmt.Fscan(rdr, n1)
+		if (test.ok && nn1 == 0) || (!test.ok && nn1 > 0) {
+			t.Errorf("#%d (input '%s') ok incorrect should be %t", i, test.in, test.ok)
+			continue
+		}
+		r := rune(0)
+		nn2, err := fmt.Fscanf(rdr, "%c", &r)
+		if test.next != r {
+			t.Errorf("#%d (input '%s') next incorrect, got %c should be %c, %d, %v", i, test.in, r, test.next, nn2, err)
+		}
+	}
+}
+
+var decGobEncodingTests = []string{
+	"0",
+	"1",
+	"2",
+	"10",
+	"42",
+	"1234567890",
+	"298472983472983471903246121093472394872319615612417471234712061",
+}
+
+func TestDecGobEncoding(t *testing.T) {
+	var medium bytes.Buffer
+	enc := gob.NewEncoder(&medium)
+	dec := gob.NewDecoder(&medium)
+	for i, test := range decGobEncodingTests {
+		for j := 0; j < 2; j++ {
+			for k := inf.Scale(-5); k <= 5; k++ {
+				medium.Reset() // empty buffer for each test case (in case of failures)
+				stest := test
+				if j != 0 {
+					// negative numbers
+					stest = "-" + test
+				}
+				var tx inf.Dec
+				tx.SetString(stest)
+				tx.SetScale(k) // test with positive, negative, and zero scale
+				if err := enc.Encode(&tx); err != nil {
+					t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err)
+				}
+				var rx inf.Dec
+				if err := dec.Decode(&rx); err != nil {
+					t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err)
+				}
+				if rx.Cmp(&tx) != 0 {
+					t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx)
+				}
+			}
+		}
+	}
+}

+ 62 - 0
src/gopkg.in/inf.v0/example_test.go

@@ -0,0 +1,62 @@
+package inf_test
+
+import (
+	"fmt"
+	"log"
+)
+
+import "gopkg.in/inf.v0"
+
+func ExampleDec_SetString() {
+	d := new(inf.Dec)
+	d.SetString("012345.67890") // decimal; leading 0 ignored; trailing 0 kept
+	fmt.Println(d)
+	// Output: 12345.67890
+}
+
+func ExampleDec_Scan() {
+	// The Scan function is rarely used directly;
+	// the fmt package recognizes it as an implementation of fmt.Scanner.
+	d := new(inf.Dec)
+	_, err := fmt.Sscan("184467440.73709551617", d)
+	if err != nil {
+		log.Println("error scanning value:", err)
+	} else {
+		fmt.Println(d)
+	}
+	// Output: 184467440.73709551617
+}
+
+func ExampleDec_QuoRound_scale2RoundDown() {
+	// 10 / 3 is an infinite decimal; it has no exact Dec representation
+	x, y := inf.NewDec(10, 0), inf.NewDec(3, 0)
+	// use 2 digits beyond the decimal point, round towards 0
+	z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundDown)
+	fmt.Println(z)
+	// Output: 3.33
+}
+
+func ExampleDec_QuoRound_scale2RoundCeil() {
+	// -42 / 400 is an finite decimal with 3 digits beyond the decimal point
+	x, y := inf.NewDec(-42, 0), inf.NewDec(400, 0)
+	// use 2 digits beyond decimal point, round towards positive infinity
+	z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundCeil)
+	fmt.Println(z)
+	// Output: -0.10
+}
+
+func ExampleDec_QuoExact_ok() {
+	// 1 / 25 is a finite decimal; it has exact Dec representation
+	x, y := inf.NewDec(1, 0), inf.NewDec(25, 0)
+	z := new(inf.Dec).QuoExact(x, y)
+	fmt.Println(z)
+	// Output: 0.04
+}
+
+func ExampleDec_QuoExact_fail() {
+	// 1 / 3 is an infinite decimal; it has no exact Dec representation
+	x, y := inf.NewDec(1, 0), inf.NewDec(3, 0)
+	z := new(inf.Dec).QuoExact(x, y)
+	fmt.Println(z)
+	// Output: <nil>
+}

+ 145 - 0
src/gopkg.in/inf.v0/rounder.go

@@ -0,0 +1,145 @@
+package inf
+
+import (
+	"math/big"
+)
+
+// Rounder represents a method for rounding the (possibly infinite decimal)
+// result of a division to a finite Dec. It is used by Dec.Round() and
+// Dec.Quo().
+//
+// See the Example for results of using each Rounder with some sample values.
+//
+type Rounder rounder
+
+// See http://speleotrove.com/decimal/damodel.html#refround for more detailed
+// definitions of these rounding modes.
+var (
+	RoundDown     Rounder // towards 0
+	RoundUp       Rounder // away from 0
+	RoundFloor    Rounder // towards -infinity
+	RoundCeil     Rounder // towards +infinity
+	RoundHalfDown Rounder // to nearest; towards 0 if same distance
+	RoundHalfUp   Rounder // to nearest; away from 0 if same distance
+	RoundHalfEven Rounder // to nearest; even last digit if same distance
+)
+
+// RoundExact is to be used in the case when rounding is not necessary.
+// When used with Quo or Round, it returns the result verbatim when it can be
+// expressed exactly with the given precision, and it returns nil otherwise.
+// QuoExact is a shorthand for using Quo with RoundExact.
+var RoundExact Rounder
+
+type rounder interface {
+
+	// When UseRemainder() returns true, the Round() method is passed the
+	// remainder of the division, expressed as the numerator and denominator of
+	// a rational.
+	UseRemainder() bool
+
+	// Round sets the rounded value of a quotient to z, and returns z.
+	// quo is rounded down (truncated towards zero) to the scale obtained from
+	// the Scaler in Quo().
+	//
+	// When the remainder is not used, remNum and remDen are nil.
+	// When used, the remainder is normalized between -1 and 1; that is:
+	//
+	//  -|remDen| < remNum < |remDen|
+	//
+	// remDen has the same sign as y, and remNum is zero or has the same sign
+	// as x.
+	Round(z, quo *Dec, remNum, remDen *big.Int) *Dec
+}
+
+type rndr struct {
+	useRem bool
+	round  func(z, quo *Dec, remNum, remDen *big.Int) *Dec
+}
+
+func (r rndr) UseRemainder() bool {
+	return r.useRem
+}
+
+func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec {
+	return r.round(z, quo, remNum, remDen)
+}
+
+var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)}
+
+func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec {
+	return func(z, q *Dec, rA, rB *big.Int) *Dec {
+		z.Set(q)
+		brA, brB := rA.BitLen(), rB.BitLen()
+		if brA < brB-1 {
+			// brA < brB-1 => |rA| < |rB/2|
+			return z
+		}
+		roundUp := false
+		srA, srB := rA.Sign(), rB.Sign()
+		s := srA * srB
+		if brA == brB-1 {
+			rA2 := new(big.Int).Lsh(rA, 1)
+			if s < 0 {
+				rA2.Neg(rA2)
+			}
+			roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0))
+		} else {
+			// brA > brB-1 => |rA| > |rB/2|
+			roundUp = true
+		}
+		if roundUp {
+			z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1])
+		}
+		return z
+	}
+}
+
+func init() {
+	RoundExact = rndr{true,
+		func(z, q *Dec, rA, rB *big.Int) *Dec {
+			if rA.Sign() != 0 {
+				return nil
+			}
+			return z.Set(q)
+		}}
+	RoundDown = rndr{false,
+		func(z, q *Dec, rA, rB *big.Int) *Dec {
+			return z.Set(q)
+		}}
+	RoundUp = rndr{true,
+		func(z, q *Dec, rA, rB *big.Int) *Dec {
+			z.Set(q)
+			if rA.Sign() != 0 {
+				z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1])
+			}
+			return z
+		}}
+	RoundFloor = rndr{true,
+		func(z, q *Dec, rA, rB *big.Int) *Dec {
+			z.Set(q)
+			if rA.Sign()*rB.Sign() < 0 {
+				z.UnscaledBig().Add(z.UnscaledBig(), intSign[0])
+			}
+			return z
+		}}
+	RoundCeil = rndr{true,
+		func(z, q *Dec, rA, rB *big.Int) *Dec {
+			z.Set(q)
+			if rA.Sign()*rB.Sign() > 0 {
+				z.UnscaledBig().Add(z.UnscaledBig(), intSign[2])
+			}
+			return z
+		}}
+	RoundHalfDown = rndr{true, roundHalf(
+		func(c int, odd uint) bool {
+			return c > 0
+		})}
+	RoundHalfUp = rndr{true, roundHalf(
+		func(c int, odd uint) bool {
+			return c >= 0
+		})}
+	RoundHalfEven = rndr{true, roundHalf(
+		func(c int, odd uint) bool {
+			return c > 0 || c == 0 && odd == 1
+		})}
+}

+ 72 - 0
src/gopkg.in/inf.v0/rounder_example_test.go

@@ -0,0 +1,72 @@
+package inf_test
+
+import (
+	"fmt"
+	"os"
+	"text/tabwriter"
+
+	"gopkg.in/inf.v0"
+)
+
+// This example displays the results of Dec.Round with each of the Rounders.
+//
+func ExampleRounder() {
+	var vals = []struct {
+		x string
+		s inf.Scale
+	}{
+		{"-0.18", 1}, {"-0.15", 1}, {"-0.12", 1}, {"-0.10", 1},
+		{"-0.08", 1}, {"-0.05", 1}, {"-0.02", 1}, {"0.00", 1},
+		{"0.02", 1}, {"0.05", 1}, {"0.08", 1}, {"0.10", 1},
+		{"0.12", 1}, {"0.15", 1}, {"0.18", 1},
+	}
+
+	var rounders = []struct {
+		name    string
+		rounder inf.Rounder
+	}{
+		{"RoundDown", inf.RoundDown}, {"RoundUp", inf.RoundUp},
+		{"RoundCeil", inf.RoundCeil}, {"RoundFloor", inf.RoundFloor},
+		{"RoundHalfDown", inf.RoundHalfDown}, {"RoundHalfUp", inf.RoundHalfUp},
+		{"RoundHalfEven", inf.RoundHalfEven}, {"RoundExact", inf.RoundExact},
+	}
+
+	fmt.Println("The results of new(inf.Dec).Round(x, s, inf.RoundXXX):\n")
+	w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.AlignRight)
+	fmt.Fprint(w, "x\ts\t|\t")
+	for _, r := range rounders {
+		fmt.Fprintf(w, "%s\t", r.name[5:])
+	}
+	fmt.Fprintln(w)
+	for _, v := range vals {
+		fmt.Fprintf(w, "%s\t%d\t|\t", v.x, v.s)
+		for _, r := range rounders {
+			x, _ := new(inf.Dec).SetString(v.x)
+			z := new(inf.Dec).Round(x, v.s, r.rounder)
+			fmt.Fprintf(w, "%d\t", z)
+		}
+		fmt.Fprintln(w)
+	}
+	w.Flush()
+
+	// Output:
+	// The results of new(inf.Dec).Round(x, s, inf.RoundXXX):
+	//
+	//      x s | Down   Up Ceil Floor HalfDown HalfUp HalfEven Exact
+	//  -0.18 1 | -0.1 -0.2 -0.1  -0.2     -0.2   -0.2     -0.2 <nil>
+	//  -0.15 1 | -0.1 -0.2 -0.1  -0.2     -0.1   -0.2     -0.2 <nil>
+	//  -0.12 1 | -0.1 -0.2 -0.1  -0.2     -0.1   -0.1     -0.1 <nil>
+	//  -0.10 1 | -0.1 -0.1 -0.1  -0.1     -0.1   -0.1     -0.1  -0.1
+	//  -0.08 1 |  0.0 -0.1  0.0  -0.1     -0.1   -0.1     -0.1 <nil>
+	//  -0.05 1 |  0.0 -0.1  0.0  -0.1      0.0   -0.1      0.0 <nil>
+	//  -0.02 1 |  0.0 -0.1  0.0  -0.1      0.0    0.0      0.0 <nil>
+	//   0.00 1 |  0.0  0.0  0.0   0.0      0.0    0.0      0.0   0.0
+	//   0.02 1 |  0.0  0.1  0.1   0.0      0.0    0.0      0.0 <nil>
+	//   0.05 1 |  0.0  0.1  0.1   0.0      0.0    0.1      0.0 <nil>
+	//   0.08 1 |  0.0  0.1  0.1   0.0      0.1    0.1      0.1 <nil>
+	//   0.10 1 |  0.1  0.1  0.1   0.1      0.1    0.1      0.1   0.1
+	//   0.12 1 |  0.1  0.2  0.2   0.1      0.1    0.1      0.1 <nil>
+	//   0.15 1 |  0.1  0.2  0.2   0.1      0.1    0.2      0.2 <nil>
+	//   0.18 1 |  0.1  0.2  0.2   0.1      0.2    0.2      0.2 <nil>
+
+}

+ 109 - 0
src/gopkg.in/inf.v0/rounder_test.go

@@ -0,0 +1,109 @@
+package inf_test
+
+import (
+	"math/big"
+	"testing"
+
+	"gopkg.in/inf.v0"
+)
+
+var decRounderInputs = [...]struct {
+	quo    *inf.Dec
+	rA, rB *big.Int
+}{
+	// examples from go language spec
+	{inf.NewDec(1, 0), big.NewInt(2), big.NewInt(3)},   //  5 /  3
+	{inf.NewDec(-1, 0), big.NewInt(-2), big.NewInt(3)}, // -5 /  3
+	{inf.NewDec(-1, 0), big.NewInt(2), big.NewInt(-3)}, //  5 / -3
+	{inf.NewDec(1, 0), big.NewInt(-2), big.NewInt(-3)}, // -5 / -3
+	// examples from godoc
+	{inf.NewDec(-1, 1), big.NewInt(-8), big.NewInt(10)},
+	{inf.NewDec(-1, 1), big.NewInt(-5), big.NewInt(10)},
+	{inf.NewDec(-1, 1), big.NewInt(-2), big.NewInt(10)},
+	{inf.NewDec(0, 1), big.NewInt(-8), big.NewInt(10)},
+	{inf.NewDec(0, 1), big.NewInt(-5), big.NewInt(10)},
+	{inf.NewDec(0, 1), big.NewInt(-2), big.NewInt(10)},
+	{inf.NewDec(0, 1), big.NewInt(0), big.NewInt(1)},
+	{inf.NewDec(0, 1), big.NewInt(2), big.NewInt(10)},
+	{inf.NewDec(0, 1), big.NewInt(5), big.NewInt(10)},
+	{inf.NewDec(0, 1), big.NewInt(8), big.NewInt(10)},
+	{inf.NewDec(1, 1), big.NewInt(2), big.NewInt(10)},
+	{inf.NewDec(1, 1), big.NewInt(5), big.NewInt(10)},
+	{inf.NewDec(1, 1), big.NewInt(8), big.NewInt(10)},
+}
+
+var decRounderResults = [...]struct {
+	rounder inf.Rounder
+	results [len(decRounderInputs)]*inf.Dec
+}{
+	{inf.RoundExact, [...]*inf.Dec{nil, nil, nil, nil,
+		nil, nil, nil, nil, nil, nil,
+		inf.NewDec(0, 1), nil, nil, nil, nil, nil, nil}},
+	{inf.RoundDown, [...]*inf.Dec{
+		inf.NewDec(1, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(1, 0),
+		inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
+		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+		inf.NewDec(0, 1),
+		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+		inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}},
+	{inf.RoundUp, [...]*inf.Dec{
+		inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
+		inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1),
+		inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
+		inf.NewDec(0, 1),
+		inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
+		inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
+	{inf.RoundHalfDown, [...]*inf.Dec{
+		inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
+		inf.NewDec(-2, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
+		inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+		inf.NewDec(0, 1),
+		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1),
+		inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(2, 1)}},
+	{inf.RoundHalfUp, [...]*inf.Dec{
+		inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
+		inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1),
+		inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(0, 1),
+		inf.NewDec(0, 1),
+		inf.NewDec(0, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
+		inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
+	{inf.RoundHalfEven, [...]*inf.Dec{
+		inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
+		inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1),
+		inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+		inf.NewDec(0, 1),
+		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1),
+		inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
+	{inf.RoundFloor, [...]*inf.Dec{
+		inf.NewDec(1, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(1, 0),
+		inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1),
+		inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
+		inf.NewDec(0, 1),
+		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+		inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}},
+	{inf.RoundCeil, [...]*inf.Dec{
+		inf.NewDec(2, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(2, 0),
+		inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
+		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
+		inf.NewDec(0, 1),
+		inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
+		inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
+}
+
+func TestDecRounders(t *testing.T) {
+	for i, a := range decRounderResults {
+		for j, input := range decRounderInputs {
+			q := new(inf.Dec).Set(input.quo)
+			rA, rB := new(big.Int).Set(input.rA), new(big.Int).Set(input.rB)
+			res := a.rounder.Round(new(inf.Dec), q, rA, rB)
+			if a.results[j] == nil && res == nil {
+				continue
+			}
+			if (a.results[j] == nil && res != nil) ||
+				(a.results[j] != nil && res == nil) ||
+				a.results[j].Cmp(res) != 0 {
+				t.Errorf("#%d,%d Rounder got %v; expected %v", i, j, res, a.results[j])
+			}
+		}
+	}
+}

+ 45 - 0
src/gopkg.in/mgo.v2/.travis.yml

@@ -0,0 +1,45 @@
+language: go
+
+go_import_path: gopkg.in/mgo.v2
+
+addons:
+    apt:
+        packages:
+
+env:
+    global:
+        - BUCKET=https://niemeyer.s3.amazonaws.com
+    matrix:
+        - GO=1.4.1 MONGODB=x86_64-2.2.7
+        - GO=1.4.1 MONGODB=x86_64-2.4.14
+        - GO=1.4.1 MONGODB=x86_64-2.6.11
+        - GO=1.4.1 MONGODB=x86_64-3.0.9
+        - GO=1.4.1 MONGODB=x86_64-3.2.3-nojournal
+        - GO=1.5.3 MONGODB=x86_64-3.0.9
+        - GO=1.6   MONGODB=x86_64-3.0.9
+
+install:
+    - eval "$(gimme $GO)"
+
+    - wget $BUCKET/mongodb-linux-$MONGODB.tgz
+    - tar xzvf mongodb-linux-$MONGODB.tgz
+    - export PATH=$PWD/mongodb-linux-$MONGODB/bin:$PATH
+
+    - wget $BUCKET/daemontools.tar.gz
+    - tar xzvf daemontools.tar.gz
+    - export PATH=$PWD/daemontools:$PATH
+
+    - go get gopkg.in/check.v1
+    - go get gopkg.in/yaml.v2
+    - go get gopkg.in/tomb.v2
+
+before_script:
+    - export NOIPV6=1
+    - make startdb
+
+script:
+    - (cd bson && go test -check.v)
+    - go test -check.v -fast
+    - (cd txn && go test -check.v)
+
+# vim:sw=4:ts=4:et

+ 25 - 0
src/gopkg.in/mgo.v2/LICENSE

@@ -0,0 +1,25 @@
+mgo - MongoDB driver for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 5 - 0
src/gopkg.in/mgo.v2/Makefile

@@ -0,0 +1,5 @@
+startdb:
+	@harness/setup.sh start
+
+stopdb:
+	@harness/setup.sh stop

+ 4 - 0
src/gopkg.in/mgo.v2/README.md

@@ -0,0 +1,4 @@
+The MongoDB driver for Go
+-------------------------
+
+Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details.

+ 467 - 0
src/gopkg.in/mgo.v2/auth.go

@@ -0,0 +1,467 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+	"crypto/md5"
+	"crypto/sha1"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"sync"
+
+	"gopkg.in/mgo.v2/bson"
+	"gopkg.in/mgo.v2/internal/scram"
+)
+
+type authCmd struct {
+	Authenticate int
+
+	Nonce string
+	User  string
+	Key   string
+}
+
+type startSaslCmd struct {
+	StartSASL int `bson:"startSasl"`
+}
+
+type authResult struct {
+	ErrMsg string
+	Ok     bool
+}
+
+type getNonceCmd struct {
+	GetNonce int
+}
+
+type getNonceResult struct {
+	Nonce string
+	Err   string "$err"
+	Code  int
+}
+
+type logoutCmd struct {
+	Logout int
+}
+
+type saslCmd struct {
+	Start          int    `bson:"saslStart,omitempty"`
+	Continue       int    `bson:"saslContinue,omitempty"`
+	ConversationId int    `bson:"conversationId,omitempty"`
+	Mechanism      string `bson:"mechanism,omitempty"`
+	Payload        []byte
+}
+
+type saslResult struct {
+	Ok    bool `bson:"ok"`
+	NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
+	Done  bool
+
+	ConversationId int `bson:"conversationId"`
+	Payload        []byte
+	ErrMsg         string
+}
+
+type saslStepper interface {
+	Step(serverData []byte) (clientData []byte, done bool, err error)
+	Close()
+}
+
+func (socket *mongoSocket) getNonce() (nonce string, err error) {
+	socket.Lock()
+	for socket.cachedNonce == "" && socket.dead == nil {
+		debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
+		socket.gotNonce.Wait()
+	}
+	if socket.cachedNonce == "mongos" {
+		socket.Unlock()
+		return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
+	}
+	debugf("Socket %p to %s: got nonce", socket, socket.addr)
+	nonce, err = socket.cachedNonce, socket.dead
+	socket.cachedNonce = ""
+	socket.Unlock()
+	if err != nil {
+		nonce = ""
+	}
+	return
+}
+
+func (socket *mongoSocket) resetNonce() {
+	debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
+	op := &queryOp{}
+	op.query = &getNonceCmd{GetNonce: 1}
+	op.collection = "admin.$cmd"
+	op.limit = -1
+	op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
+		if err != nil {
+			socket.kill(errors.New("getNonce: "+err.Error()), true)
+			return
+		}
+		result := &getNonceResult{}
+		err = bson.Unmarshal(docData, &result)
+		if err != nil {
+			socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
+			return
+		}
+		debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
+		if result.Code == 13390 {
+			// mongos doesn't yet support auth (see http://j.mp/mongos-auth)
+			result.Nonce = "mongos"
+		} else if result.Nonce == "" {
+			var msg string
+			if result.Err != "" {
+				msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
+			} else {
+				msg = "Got an empty nonce"
+			}
+			socket.kill(errors.New(msg), true)
+			return
+		}
+		socket.Lock()
+		if socket.cachedNonce != "" {
+			socket.Unlock()
+			panic("resetNonce: nonce already cached")
+		}
+		socket.cachedNonce = result.Nonce
+		socket.gotNonce.Signal()
+		socket.Unlock()
+	}
+	err := socket.Query(op)
+	if err != nil {
+		socket.kill(errors.New("resetNonce: "+err.Error()), true)
+	}
+}
+
+func (socket *mongoSocket) Login(cred Credential) error {
+	socket.Lock()
+	if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 {
+		cred.Mechanism = "SCRAM-SHA-1"
+	}
+	for _, sockCred := range socket.creds {
+		if sockCred == cred {
+			debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
+			socket.Unlock()
+			return nil
+		}
+	}
+	if socket.dropLogout(cred) {
+		debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
+		socket.creds = append(socket.creds, cred)
+		socket.Unlock()
+		return nil
+	}
+	socket.Unlock()
+
+	debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
+
+	var err error
+	switch cred.Mechanism {
+	case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501.
+		err = socket.loginClassic(cred)
+	case "PLAIN":
+		err = socket.loginPlain(cred)
+	case "MONGODB-X509":
+		err = socket.loginX509(cred)
+	default:
+		// Try SASL for everything else, if it is available.
+		err = socket.loginSASL(cred)
+	}
+
+	if err != nil {
+		debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
+	} else {
+		debugf("Socket %p to %s: login successful", socket, socket.addr)
+	}
+	return err
+}
+
+func (socket *mongoSocket) loginClassic(cred Credential) error {
+	// Note that this only works properly because this function is
+	// synchronous, which means the nonce won't get reset while we're
+	// using it and any other login requests will block waiting for a
+	// new nonce provided in the defer call below.
+	nonce, err := socket.getNonce()
+	if err != nil {
+		return err
+	}
+	defer socket.resetNonce()
+
+	psum := md5.New()
+	psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
+
+	ksum := md5.New()
+	ksum.Write([]byte(nonce + cred.Username))
+	ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
+
+	key := hex.EncodeToString(ksum.Sum(nil))
+
+	cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
+	res := authResult{}
+	return socket.loginRun(cred.Source, &cmd, &res, func() error {
+		if !res.Ok {
+			return errors.New(res.ErrMsg)
+		}
+		socket.Lock()
+		socket.dropAuth(cred.Source)
+		socket.creds = append(socket.creds, cred)
+		socket.Unlock()
+		return nil
+	})
+}
+
+type authX509Cmd struct {
+	Authenticate int
+	User         string
+	Mechanism    string
+}
+
+func (socket *mongoSocket) loginX509(cred Credential) error {
+	cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"}
+	res := authResult{}
+	return socket.loginRun(cred.Source, &cmd, &res, func() error {
+		if !res.Ok {
+			return errors.New(res.ErrMsg)
+		}
+		socket.Lock()
+		socket.dropAuth(cred.Source)
+		socket.creds = append(socket.creds, cred)
+		socket.Unlock()
+		return nil
+	})
+}
+
+func (socket *mongoSocket) loginPlain(cred Credential) error {
+	cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
+	res := authResult{}
+	return socket.loginRun(cred.Source, &cmd, &res, func() error {
+		if !res.Ok {
+			return errors.New(res.ErrMsg)
+		}
+		socket.Lock()
+		socket.dropAuth(cred.Source)
+		socket.creds = append(socket.creds, cred)
+		socket.Unlock()
+		return nil
+	})
+}
+
+func (socket *mongoSocket) loginSASL(cred Credential) error {
+	var sasl saslStepper
+	var err error
+	if cred.Mechanism == "SCRAM-SHA-1" {
+		// SCRAM is handled without external libraries.
+		sasl = saslNewScram(cred)
+	} else if len(cred.ServiceHost) > 0 {
+		sasl, err = saslNew(cred, cred.ServiceHost)
+	} else {
+		sasl, err = saslNew(cred, socket.Server().Addr)
+	}
+	if err != nil {
+		return err
+	}
+	defer sasl.Close()
+
+	// The goal of this logic is to carry a locked socket until the
+	// local SASL step confirms the auth is valid; the socket needs to be
+	// locked so that concurrent action doesn't leave the socket in an
+	// auth state that doesn't reflect the operations that took place.
+	// As a simple case, imagine inverting login=>logout to logout=>login.
+	//
+	// The logic below works because the lock func isn't called concurrently.
+	locked := false
+	lock := func(b bool) {
+		if locked != b {
+			locked = b
+			if b {
+				socket.Lock()
+			} else {
+				socket.Unlock()
+			}
+		}
+	}
+
+	lock(true)
+	defer lock(false)
+
+	start := 1
+	cmd := saslCmd{}
+	res := saslResult{}
+	for {
+		payload, done, err := sasl.Step(res.Payload)
+		if err != nil {
+			return err
+		}
+		if done && res.Done {
+			socket.dropAuth(cred.Source)
+			socket.creds = append(socket.creds, cred)
+			break
+		}
+		lock(false)
+
+		cmd = saslCmd{
+			Start:          start,
+			Continue:       1 - start,
+			ConversationId: res.ConversationId,
+			Mechanism:      cred.Mechanism,
+			Payload:        payload,
+		}
+		start = 0
+		err = socket.loginRun(cred.Source, &cmd, &res, func() error {
+			// See the comment on lock for why this is necessary.
+			lock(true)
+			if !res.Ok || res.NotOk {
+				return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
+			}
+			return nil
+		})
+		if err != nil {
+			return err
+		}
+		if done && res.Done {
+			socket.dropAuth(cred.Source)
+			socket.creds = append(socket.creds, cred)
+			break
+		}
+	}
+
+	return nil
+}
+
+func saslNewScram(cred Credential) *saslScram {
+	credsum := md5.New()
+	credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
+	client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil)))
+	return &saslScram{cred: cred, client: client}
+}
+
+type saslScram struct {
+	cred   Credential
+	client *scram.Client
+}
+
+func (s *saslScram) Close() {}
+
+func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) {
+	more := s.client.Step(serverData)
+	return s.client.Out(), !more, s.client.Err()
+}
+
+func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
+	var mutex sync.Mutex
+	var replyErr error
+	mutex.Lock()
+
+	op := queryOp{}
+	op.query = query
+	op.collection = db + ".$cmd"
+	op.limit = -1
+	op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
+		defer mutex.Unlock()
+
+		if err != nil {
+			replyErr = err
+			return
+		}
+
+		err = bson.Unmarshal(docData, result)
+		if err != nil {
+			replyErr = err
+		} else {
+			// Must handle this within the read loop for the socket, so
+			// that concurrent login requests are properly ordered.
+			replyErr = f()
+		}
+	}
+
+	err := socket.Query(&op)
+	if err != nil {
+		return err
+	}
+	mutex.Lock() // Wait.
+	return replyErr
+}
+
+func (socket *mongoSocket) Logout(db string) {
+	socket.Lock()
+	cred, found := socket.dropAuth(db)
+	if found {
+		debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
+		socket.logout = append(socket.logout, cred)
+	}
+	socket.Unlock()
+}
+
+func (socket *mongoSocket) LogoutAll() {
+	socket.Lock()
+	if l := len(socket.creds); l > 0 {
+		debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
+		socket.logout = append(socket.logout, socket.creds...)
+		socket.creds = socket.creds[0:0]
+	}
+	socket.Unlock()
+}
+
+func (socket *mongoSocket) flushLogout() (ops []interface{}) {
+	socket.Lock()
+	if l := len(socket.logout); l > 0 {
+		debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
+		for i := 0; i != l; i++ {
+			op := queryOp{}
+			op.query = &logoutCmd{1}
+			op.collection = socket.logout[i].Source + ".$cmd"
+			op.limit = -1
+			ops = append(ops, &op)
+		}
+		socket.logout = socket.logout[0:0]
+	}
+	socket.Unlock()
+	return
+}
+
+func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
+	for i, sockCred := range socket.creds {
+		if sockCred.Source == db {
+			copy(socket.creds[i:], socket.creds[i+1:])
+			socket.creds = socket.creds[:len(socket.creds)-1]
+			return sockCred, true
+		}
+	}
+	return cred, false
+}
+
+func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
+	for i, sockCred := range socket.logout {
+		if sockCred == cred {
+			copy(socket.logout[i:], socket.logout[i+1:])
+			socket.logout = socket.logout[:len(socket.logout)-1]
+			return true
+		}
+	}
+	return false
+}

+ 1180 - 0
src/gopkg.in/mgo.v2/auth_test.go

@@ -0,0 +1,1180 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+	"crypto/tls"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/url"
+	"os"
+	"runtime"
+	"sync"
+	"time"
+
+	. "gopkg.in/check.v1"
+	"gopkg.in/mgo.v2"
+)
+
+func (s *S) TestAuthLoginDatabase(c *C) {
+	// Test both with a normal database and with an authenticated shard.
+	for _, addr := range []string{"localhost:40002", "localhost:40203"} {
+		session, err := mgo.Dial(addr)
+		c.Assert(err, IsNil)
+		defer session.Close()
+
+		coll := session.DB("mydb").C("mycoll")
+		err = coll.Insert(M{"n": 1})
+		c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+
+		admindb := session.DB("admin")
+
+		err = admindb.Login("root", "wrong")
+		c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+
+		err = admindb.Login("root", "rapadura")
+		c.Assert(err, IsNil)
+
+		err = coll.Insert(M{"n": 1})
+		c.Assert(err, IsNil)
+	}
+}
+
+func (s *S) TestAuthLoginSession(c *C) {
+	// Test both with a normal database and with an authenticated shard.
+	for _, addr := range []string{"localhost:40002", "localhost:40203"} {
+		session, err := mgo.Dial(addr)
+		c.Assert(err, IsNil)
+		defer session.Close()
+
+		coll := session.DB("mydb").C("mycoll")
+		err = coll.Insert(M{"n": 1})
+		c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+
+		cred := mgo.Credential{
+			Username: "root",
+			Password: "wrong",
+		}
+		err = session.Login(&cred)
+		c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+
+		cred.Password = "rapadura"
+
+		err = session.Login(&cred)
+		c.Assert(err, IsNil)
+
+		err = coll.Insert(M{"n": 1})
+		c.Assert(err, IsNil)
+	}
+}
+
+func (s *S) TestAuthLoginLogout(c *C) {
+	// Test both with a normal database and with an authenticated shard.
+	for _, addr := range []string{"localhost:40002", "localhost:40203"} {
+		session, err := mgo.Dial(addr)
+		c.Assert(err, IsNil)
+		defer session.Close()
+
+		admindb := session.DB("admin")
+		err = admindb.Login("root", "rapadura")
+		c.Assert(err, IsNil)
+
+		admindb.Logout()
+
+		coll := session.DB("mydb").C("mycoll")
+		err = coll.Insert(M{"n": 1})
+		c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+
+		// Must have dropped auth from the session too.
+		session = session.Copy()
+		defer session.Close()
+
+		coll = session.DB("mydb").C("mycoll")
+		err = coll.Insert(M{"n": 1})
+		c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+	}
+}
+
+func (s *S) TestAuthLoginLogoutAll(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	session.LogoutAll()
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+
+	// Must have dropped auth from the session too.
+	session = session.Copy()
+	defer session.Close()
+
+	coll = session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+}
+
+func (s *S) TestAuthUpsertUserErrors(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	mydb := session.DB("mydb")
+
+	err = mydb.UpsertUser(&mgo.User{})
+	c.Assert(err, ErrorMatches, "user has no Username")
+
+	err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", UserSource: "source"})
+	c.Assert(err, ErrorMatches, "user has both Password/PasswordHash and UserSource set")
+
+	err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", OtherDBRoles: map[string][]mgo.Role{"db": nil}})
+	c.Assert(err, ErrorMatches, "user with OtherDBRoles is only supported in the admin or \\$external databases")
+}
+
+func (s *S) TestAuthUpsertUser(c *C) {
+	if !s.versionAtLeast(2, 4) {
+		c.Skip("UpsertUser only works on 2.4+")
+	}
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	mydb := session.DB("mydb")
+
+	ruser := &mgo.User{
+		Username: "myruser",
+		Password: "mypass",
+		Roles:    []mgo.Role{mgo.RoleRead},
+	}
+	rwuser := &mgo.User{
+		Username: "myrwuser",
+		Password: "mypass",
+		Roles:    []mgo.Role{mgo.RoleReadWrite},
+	}
+
+	err = mydb.UpsertUser(ruser)
+	c.Assert(err, IsNil)
+	err = mydb.UpsertUser(rwuser)
+	c.Assert(err, IsNil)
+
+	err = mydb.Login("myruser", "mypass")
+	c.Assert(err, IsNil)
+
+	admindb.Logout()
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+	err = mydb.Login("myrwuser", "mypass")
+	c.Assert(err, IsNil)
+
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+
+	myotherdb := session.DB("myotherdb")
+
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	// Test UserSource.
+	rwuserother := &mgo.User{
+		Username:   "myrwuser",
+		UserSource: "mydb",
+		Roles:      []mgo.Role{mgo.RoleRead},
+	}
+
+	err = myotherdb.UpsertUser(rwuserother)
+	if s.versionAtLeast(2, 6) {
+		c.Assert(err, ErrorMatches, `MongoDB 2.6\+ does not support the UserSource setting`)
+		return
+	}
+	c.Assert(err, IsNil)
+
+	admindb.Logout()
+
+	// Test indirection via UserSource: we can't write to it, because
+	// the roles for myrwuser are different there.
+	othercoll := myotherdb.C("myothercoll")
+	err = othercoll.Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+	// Reading works, though.
+	err = othercoll.Find(nil).One(nil)
+	c.Assert(err, Equals, mgo.ErrNotFound)
+
+	// Can't login directly into the database using UserSource, though.
+	err = myotherdb.Login("myrwuser", "mypass")
+	c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+}
+
+func (s *S) TestAuthUpsertUserOtherDBRoles(c *C) {
+	if !s.versionAtLeast(2, 4) {
+		c.Skip("UpsertUser only works on 2.4+")
+	}
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	ruser := &mgo.User{
+		Username:     "myruser",
+		Password:     "mypass",
+		OtherDBRoles: map[string][]mgo.Role{"mydb": []mgo.Role{mgo.RoleRead}},
+	}
+
+	err = admindb.UpsertUser(ruser)
+	c.Assert(err, IsNil)
+	defer admindb.RemoveUser("myruser")
+
+	admindb.Logout()
+	err = admindb.Login("myruser", "mypass")
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+	err = coll.Find(nil).One(nil)
+	c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestAuthUpsertUserUpdates(c *C) {
+	if !s.versionAtLeast(2, 4) {
+		c.Skip("UpsertUser only works on 2.4+")
+	}
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	mydb := session.DB("mydb")
+
+	// Insert a user that can read.
+	user := &mgo.User{
+		Username: "myruser",
+		Password: "mypass",
+		Roles:    []mgo.Role{mgo.RoleRead},
+	}
+	err = mydb.UpsertUser(user)
+	c.Assert(err, IsNil)
+
+	// Now update the user password.
+	user = &mgo.User{
+		Username: "myruser",
+		Password: "mynewpass",
+	}
+	err = mydb.UpsertUser(user)
+	c.Assert(err, IsNil)
+
+	// Login with the new user.
+	usession, err := mgo.Dial("myruser:mynewpass@localhost:40002/mydb")
+	c.Assert(err, IsNil)
+	defer usession.Close()
+
+	// Can read, but not write.
+	err = usession.DB("mydb").C("mycoll").Find(nil).One(nil)
+	c.Assert(err, Equals, mgo.ErrNotFound)
+	err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+	// Update the user role.
+	user = &mgo.User{
+		Username: "myruser",
+		Roles:    []mgo.Role{mgo.RoleReadWrite},
+	}
+	err = mydb.UpsertUser(user)
+	c.Assert(err, IsNil)
+
+	// Dial again to ensure the password hasn't changed.
+	usession, err = mgo.Dial("myruser:mynewpass@localhost:40002/mydb")
+	c.Assert(err, IsNil)
+	defer usession.Close()
+
+	// Now it can write.
+	err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthAddUser(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	mydb := session.DB("mydb")
+	err = mydb.AddUser("myruser", "mypass", true)
+	c.Assert(err, IsNil)
+	err = mydb.AddUser("mywuser", "mypass", false)
+	c.Assert(err, IsNil)
+
+	err = mydb.Login("myruser", "mypass")
+	c.Assert(err, IsNil)
+
+	admindb.Logout()
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+	err = mydb.Login("mywuser", "mypass")
+	c.Assert(err, IsNil)
+
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthAddUserReplaces(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	mydb := session.DB("mydb")
+	err = mydb.AddUser("myuser", "myoldpass", false)
+	c.Assert(err, IsNil)
+	err = mydb.AddUser("myuser", "mynewpass", true)
+	c.Assert(err, IsNil)
+
+	admindb.Logout()
+
+	err = mydb.Login("myuser", "myoldpass")
+	c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+	err = mydb.Login("myuser", "mynewpass")
+	c.Assert(err, IsNil)
+
+	// ReadOnly flag was changed too.
+	err = mydb.C("mycoll").Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+}
+
+func (s *S) TestAuthRemoveUser(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	mydb := session.DB("mydb")
+	err = mydb.AddUser("myuser", "mypass", true)
+	c.Assert(err, IsNil)
+	err = mydb.RemoveUser("myuser")
+	c.Assert(err, IsNil)
+	err = mydb.RemoveUser("myuser")
+	c.Assert(err, Equals, mgo.ErrNotFound)
+
+	err = mydb.Login("myuser", "mypass")
+	c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+}
+
+func (s *S) TestAuthLoginTwiceDoesNothing(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	oldStats := mgo.GetStats()
+
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	newStats := mgo.GetStats()
+	c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
+}
+
+func (s *S) TestAuthLoginLogoutLoginDoesNothing(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	oldStats := mgo.GetStats()
+
+	admindb.Logout()
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	newStats := mgo.GetStats()
+	c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
+}
+
+func (s *S) TestAuthLoginSwitchUser(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+
+	err = admindb.Login("reader", "rapadura")
+	c.Assert(err, IsNil)
+
+	// Can't write.
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+	// But can read.
+	result := struct{ N int }{}
+	err = coll.Find(nil).One(&result)
+	c.Assert(err, IsNil)
+	c.Assert(result.N, Equals, 1)
+}
+
+func (s *S) TestAuthLoginChangePassword(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	mydb := session.DB("mydb")
+	err = mydb.AddUser("myuser", "myoldpass", false)
+	c.Assert(err, IsNil)
+
+	err = mydb.Login("myuser", "myoldpass")
+	c.Assert(err, IsNil)
+
+	err = mydb.AddUser("myuser", "mynewpass", true)
+	c.Assert(err, IsNil)
+
+	err = mydb.Login("myuser", "mynewpass")
+	c.Assert(err, IsNil)
+
+	admindb.Logout()
+
+	// The second login must be in effect, which means read-only.
+	err = mydb.C("mycoll").Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+}
+
+func (s *S) TestAuthLoginCachingWithSessionRefresh(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	session.Refresh()
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthLoginCachingWithSessionCopy(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	session = session.Copy()
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthLoginCachingWithSessionClone(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	session = session.Clone()
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthLoginCachingWithNewSession(c *C) {
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	session = session.New()
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+}
+
+func (s *S) TestAuthLoginCachingAcrossPool(c *C) {
+	// Logins are cached even when the conenction goes back
+	// into the pool.
+
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	// Add another user to test the logout case at the same time.
+	mydb := session.DB("mydb")
+	err = mydb.AddUser("myuser", "mypass", false)
+	c.Assert(err, IsNil)
+
+	err = mydb.Login("myuser", "mypass")
+	c.Assert(err, IsNil)
+
+	// Logout root explicitly, to test both cases.
+	admindb.Logout()
+
+	// Give socket back to pool.
+	session.Refresh()
+
+	// Brand new session, should use socket from the pool.
+	other := session.New()
+	defer other.Close()
+
+	oldStats := mgo.GetStats()
+
+	err = other.DB("admin").Login("root", "rapadura")
+	c.Assert(err, IsNil)
+	err = other.DB("mydb").Login("myuser", "mypass")
+	c.Assert(err, IsNil)
+
+	// Both logins were cached, so no ops.
+	newStats := mgo.GetStats()
+	c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
+
+	// And they actually worked.
+	err = other.DB("mydb").C("mycoll").Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+
+	other.DB("admin").Logout()
+
+	err = other.DB("mydb").C("mycoll").Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthLoginCachingAcrossPoolWithLogout(c *C) {
+	// Now verify that logouts are properly flushed if they
+	// are not revalidated after leaving the pool.
+
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	// Add another user to test the logout case at the same time.
+	mydb := session.DB("mydb")
+	err = mydb.AddUser("myuser", "mypass", true)
+	c.Assert(err, IsNil)
+
+	err = mydb.Login("myuser", "mypass")
+	c.Assert(err, IsNil)
+
+	// Just some data to query later.
+	err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+
+	// Give socket back to pool.
+	session.Refresh()
+
+	// Brand new session, should use socket from the pool.
+	other := session.New()
+	defer other.Close()
+
+	oldStats := mgo.GetStats()
+
+	err = other.DB("mydb").Login("myuser", "mypass")
+	c.Assert(err, IsNil)
+
+	// Login was cached, so no ops.
+	newStats := mgo.GetStats()
+	c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
+
+	// Can't write, since root has been implicitly logged out
+	// when the collection went into the pool, and not revalidated.
+	err = other.DB("mydb").C("mycoll").Insert(M{"n": 1})
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+	// But can read due to the revalidated myuser login.
+	result := struct{ N int }{}
+	err = other.DB("mydb").C("mycoll").Find(nil).One(&result)
+	c.Assert(err, IsNil)
+	c.Assert(result.N, Equals, 1)
+}
+
+func (s *S) TestAuthEventual(c *C) {
+	// Eventual sessions don't keep sockets around, so they are
+	// an interesting test case.
+	session, err := mgo.Dial("localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	admindb := session.DB("admin")
+	err = admindb.Login("root", "rapadura")
+	c.Assert(err, IsNil)
+
+	err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+
+	var wg sync.WaitGroup
+	wg.Add(20)
+
+	for i := 0; i != 10; i++ {
+		go func() {
+			defer wg.Done()
+			var result struct{ N int }
+			err := session.DB("mydb").C("mycoll").Find(nil).One(&result)
+			c.Assert(err, IsNil)
+			c.Assert(result.N, Equals, 1)
+		}()
+	}
+
+	for i := 0; i != 10; i++ {
+		go func() {
+			defer wg.Done()
+			err := session.DB("mydb").C("mycoll").Insert(M{"n": 1})
+			c.Assert(err, IsNil)
+		}()
+	}
+
+	wg.Wait()
+}
+
+func (s *S) TestAuthURL(c *C) {
+	session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthURLWrongCredentials(c *C) {
+	session, err := mgo.Dial("mongodb://root:wrong@localhost:40002/")
+	if session != nil {
+		session.Close()
+	}
+	c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+	c.Assert(session, IsNil)
+}
+
+func (s *S) TestAuthURLWithNewSession(c *C) {
+	// When authentication is in the URL, the new session will
+	// actually carry it on as well, even if logged out explicitly.
+	session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.DB("admin").Logout()
+
+	// Do it twice to ensure it passes the needed data on.
+	session = session.New()
+	defer session.Close()
+	session = session.New()
+	defer session.Close()
+
+	err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthURLWithDatabase(c *C) {
+	session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	mydb := session.DB("mydb")
+	err = mydb.AddUser("myruser", "mypass", true)
+	c.Assert(err, IsNil)
+
+	// Test once with database, and once with source.
+	for i := 0; i < 2; i++ {
+		var url string
+		if i == 0 {
+			url = "mongodb://myruser:mypass@localhost:40002/mydb"
+		} else {
+			url = "mongodb://myruser:mypass@localhost:40002/admin?authSource=mydb"
+		}
+		usession, err := mgo.Dial(url)
+		c.Assert(err, IsNil)
+		defer usession.Close()
+
+		ucoll := usession.DB("mydb").C("mycoll")
+		err = ucoll.FindId(0).One(nil)
+		c.Assert(err, Equals, mgo.ErrNotFound)
+		err = ucoll.Insert(M{"n": 1})
+		c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+	}
+}
+
+func (s *S) TestDefaultDatabase(c *C) {
+	tests := []struct{ url, db string }{
+		{"mongodb://root:rapadura@localhost:40002", "test"},
+		{"mongodb://root:rapadura@localhost:40002/admin", "admin"},
+		{"mongodb://localhost:40001", "test"},
+		{"mongodb://localhost:40001/", "test"},
+		{"mongodb://localhost:40001/mydb", "mydb"},
+	}
+
+	for _, test := range tests {
+		session, err := mgo.Dial(test.url)
+		c.Assert(err, IsNil)
+		defer session.Close()
+
+		c.Logf("test: %#v", test)
+		c.Assert(session.DB("").Name, Equals, test.db)
+
+		scopy := session.Copy()
+		c.Check(scopy.DB("").Name, Equals, test.db)
+		scopy.Close()
+	}
+}
+
+func (s *S) TestAuthDirect(c *C) {
+	// Direct connections must work to the master and slaves.
+	for _, port := range []string{"40031", "40032", "40033"} {
+		url := fmt.Sprintf("mongodb://root:rapadura@localhost:%s/?connect=direct", port)
+		session, err := mgo.Dial(url)
+		c.Assert(err, IsNil)
+		defer session.Close()
+
+		session.SetMode(mgo.Monotonic, true)
+
+		var result struct{}
+		err = session.DB("mydb").C("mycoll").Find(nil).One(&result)
+		c.Assert(err, Equals, mgo.ErrNotFound)
+	}
+}
+
+func (s *S) TestAuthDirectWithLogin(c *C) {
+	// Direct connections must work to the master and slaves.
+	for _, port := range []string{"40031", "40032", "40033"} {
+		url := fmt.Sprintf("mongodb://localhost:%s/?connect=direct", port)
+		session, err := mgo.Dial(url)
+		c.Assert(err, IsNil)
+		defer session.Close()
+
+		session.SetMode(mgo.Monotonic, true)
+		session.SetSyncTimeout(3 * time.Second)
+
+		err = session.DB("admin").Login("root", "rapadura")
+		c.Assert(err, IsNil)
+
+		var result struct{}
+		err = session.DB("mydb").C("mycoll").Find(nil).One(&result)
+		c.Assert(err, Equals, mgo.ErrNotFound)
+	}
+}
+
+func (s *S) TestAuthScramSha1Cred(c *C) {
+	if !s.versionAtLeast(2, 7, 7) {
+		c.Skip("SCRAM-SHA-1 tests depend on 2.7.7")
+	}
+	cred := &mgo.Credential{
+		Username:  "root",
+		Password:  "rapadura",
+		Mechanism: "SCRAM-SHA-1",
+		Source:    "admin",
+	}
+	host := "localhost:40002"
+	c.Logf("Connecting to %s...", host)
+	session, err := mgo.Dial(host)
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	mycoll := session.DB("admin").C("mycoll")
+
+	c.Logf("Connected! Testing the need for authentication...")
+	err = mycoll.Find(nil).One(nil)
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+	c.Logf("Authenticating...")
+	err = session.Login(cred)
+	c.Assert(err, IsNil)
+	c.Logf("Authenticated!")
+
+	c.Logf("Connected! Testing the need for authentication...")
+	err = mycoll.Find(nil).One(nil)
+	c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestAuthScramSha1URL(c *C) {
+	if !s.versionAtLeast(2, 7, 7) {
+		c.Skip("SCRAM-SHA-1 tests depend on 2.7.7")
+	}
+	host := "localhost:40002"
+	c.Logf("Connecting to %s...", host)
+	session, err := mgo.Dial(fmt.Sprintf("root:rapadura@%s?authMechanism=SCRAM-SHA-1", host))
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	mycoll := session.DB("admin").C("mycoll")
+
+	c.Logf("Connected! Testing the need for authentication...")
+	err = mycoll.Find(nil).One(nil)
+	c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestAuthX509Cred(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+	binfo, err := session.BuildInfo()
+	c.Assert(err, IsNil)
+	if binfo.OpenSSLVersion == "" {
+		c.Skip("server does not support SSL")
+	}
+
+	clientCertPEM, err := ioutil.ReadFile("harness/certs/client.pem")
+	c.Assert(err, IsNil)
+
+	clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM)
+	c.Assert(err, IsNil)
+
+	tlsConfig := &tls.Config{
+		// Isolating tests to client certs, don't care about server validation.
+		InsecureSkipVerify: true,
+		Certificates:       []tls.Certificate{clientCert},
+	}
+
+	var host = "localhost:40003"
+	c.Logf("Connecting to %s...", host)
+	session, err = mgo.DialWithInfo(&mgo.DialInfo{
+		Addrs: []string{host},
+		DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) {
+			return tls.Dial("tcp", addr.String(), tlsConfig)
+		},
+	})
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	err = session.Login(&mgo.Credential{Username: "root", Password: "rapadura"})
+	c.Assert(err, IsNil)
+
+	// This needs to be kept in sync with client.pem
+	x509Subject := "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO"
+
+	externalDB := session.DB("$external")
+	var x509User mgo.User = mgo.User{
+		Username:     x509Subject,
+		OtherDBRoles: map[string][]mgo.Role{"admin": []mgo.Role{mgo.RoleRoot}},
+	}
+	err = externalDB.UpsertUser(&x509User)
+	c.Assert(err, IsNil)
+
+	session.LogoutAll()
+
+	c.Logf("Connected! Ensuring authentication is required...")
+	names, err := session.DatabaseNames()
+	c.Assert(err, ErrorMatches, "not authorized .*")
+
+	cred := &mgo.Credential{
+		Username:  x509Subject,
+		Mechanism: "MONGODB-X509",
+		Source:    "$external",
+	}
+
+	c.Logf("Authenticating...")
+	err = session.Login(cred)
+	c.Assert(err, IsNil)
+	c.Logf("Authenticated!")
+
+	names, err = session.DatabaseNames()
+	c.Assert(err, IsNil)
+	c.Assert(len(names) > 0, Equals, true)
+}
+
+var (
+	plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)")
+	plainUser = "einstein"
+	plainPass = "password"
+)
+
+func (s *S) TestAuthPlainCred(c *C) {
+	if *plainFlag == "" {
+		c.Skip("no -plain")
+	}
+	cred := &mgo.Credential{
+		Username:  plainUser,
+		Password:  plainPass,
+		Source:    "$external",
+		Mechanism: "PLAIN",
+	}
+	c.Logf("Connecting to %s...", *plainFlag)
+	session, err := mgo.Dial(*plainFlag)
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	records := session.DB("records").C("records")
+
+	c.Logf("Connected! Testing the need for authentication...")
+	err = records.Find(nil).One(nil)
+	c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+	c.Logf("Authenticating...")
+	err = session.Login(cred)
+	c.Assert(err, IsNil)
+	c.Logf("Authenticated!")
+
+	c.Logf("Connected! Testing the need for authentication...")
+	err = records.Find(nil).One(nil)
+	c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestAuthPlainURL(c *C) {
+	if *plainFlag == "" {
+		c.Skip("no -plain")
+	}
+	c.Logf("Connecting to %s...", *plainFlag)
+	session, err := mgo.Dial(fmt.Sprintf("%s:%s@%s?authMechanism=PLAIN", url.QueryEscape(plainUser), url.QueryEscape(plainPass), *plainFlag))
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	c.Logf("Connected! Testing the need for authentication...")
+	err = session.DB("records").C("records").Find(nil).One(nil)
+	c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+var (
+	kerberosFlag = flag.Bool("kerberos", false, "Test Kerberos authentication (depends on custom environment)")
+	kerberosHost = "ldaptest.10gen.cc"
+	kerberosUser = "drivers@LDAPTEST.10GEN.CC"
+
+	winKerberosPasswordEnv = "MGO_KERBEROS_PASSWORD"
+)
+
+// Kerberos has its own suite because it talks to a remote server
+// that is prepared to authenticate against a kerberos deployment.
+type KerberosSuite struct{}
+
+var _ = Suite(&KerberosSuite{})
+
+func (kerberosSuite *KerberosSuite) SetUpSuite(c *C) {
+	mgo.SetDebug(true)
+	mgo.SetStats(true)
+}
+
+func (kerberosSuite *KerberosSuite) TearDownSuite(c *C) {
+	mgo.SetDebug(false)
+	mgo.SetStats(false)
+}
+
+func (kerberosSuite *KerberosSuite) SetUpTest(c *C) {
+	mgo.SetLogger((*cLogger)(c))
+	mgo.ResetStats()
+}
+
+func (kerberosSuite *KerberosSuite) TearDownTest(c *C) {
+	mgo.SetLogger(nil)
+}
+
+func (kerberosSuite *KerberosSuite) TestAuthKerberosCred(c *C) {
+	if !*kerberosFlag {
+		c.Skip("no -kerberos")
+	}
+	cred := &mgo.Credential{
+		Username:  kerberosUser,
+		Mechanism: "GSSAPI",
+	}
+	windowsAppendPasswordToCredential(cred)
+	c.Logf("Connecting to %s...", kerberosHost)
+	session, err := mgo.Dial(kerberosHost)
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	c.Logf("Connected! Testing the need for authentication...")
+	n, err := session.DB("kerberos").C("test").Find(M{}).Count()
+	c.Assert(err, ErrorMatches, ".*authorized.*")
+
+	c.Logf("Authenticating...")
+	err = session.Login(cred)
+	c.Assert(err, IsNil)
+	c.Logf("Authenticated!")
+
+	n, err = session.DB("kerberos").C("test").Find(M{}).Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 1)
+}
+
+func (kerberosSuite *KerberosSuite) TestAuthKerberosURL(c *C) {
+	if !*kerberosFlag {
+		c.Skip("no -kerberos")
+	}
+	c.Logf("Connecting to %s...", kerberosHost)
+	connectUri := url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI"
+	if runtime.GOOS == "windows" {
+		connectUri = url.QueryEscape(kerberosUser) + ":" + url.QueryEscape(getWindowsKerberosPassword()) + "@" + kerberosHost + "?authMechanism=GSSAPI"
+	}
+	session, err := mgo.Dial(connectUri)
+	c.Assert(err, IsNil)
+	defer session.Close()
+	n, err := session.DB("kerberos").C("test").Find(M{}).Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 1)
+}
+
+func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceName(c *C) {
+	if !*kerberosFlag {
+		c.Skip("no -kerberos")
+	}
+
+	wrongServiceName := "wrong"
+	rightServiceName := "mongodb"
+
+	cred := &mgo.Credential{
+		Username:  kerberosUser,
+		Mechanism: "GSSAPI",
+		Service:   wrongServiceName,
+	}
+	windowsAppendPasswordToCredential(cred)
+
+	c.Logf("Connecting to %s...", kerberosHost)
+	session, err := mgo.Dial(kerberosHost)
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	c.Logf("Authenticating with incorrect service name...")
+	err = session.Login(cred)
+	c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*")
+
+	cred.Service = rightServiceName
+	c.Logf("Authenticating with correct service name...")
+	err = session.Login(cred)
+	c.Assert(err, IsNil)
+	c.Logf("Authenticated!")
+
+	n, err := session.DB("kerberos").C("test").Find(M{}).Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 1)
+}
+
+func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceHost(c *C) {
+	if !*kerberosFlag {
+		c.Skip("no -kerberos")
+	}
+
+	wrongServiceHost := "eggs.bacon.tk"
+	rightServiceHost := kerberosHost
+
+	cred := &mgo.Credential{
+		Username:    kerberosUser,
+		Mechanism:   "GSSAPI",
+		ServiceHost: wrongServiceHost,
+	}
+	windowsAppendPasswordToCredential(cred)
+
+	c.Logf("Connecting to %s...", kerberosHost)
+	session, err := mgo.Dial(kerberosHost)
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	c.Logf("Authenticating with incorrect service host...")
+	err = session.Login(cred)
+	c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*")
+
+	cred.ServiceHost = rightServiceHost
+	c.Logf("Authenticating with correct service host...")
+	err = session.Login(cred)
+	c.Assert(err, IsNil)
+	c.Logf("Authenticated!")
+
+	n, err := session.DB("kerberos").C("test").Find(M{}).Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 1)
+}
+
+// No kinit on SSPI-style Kerberos, so we need to provide a password. In order
+// to avoid inlining password, require it to be set as an environment variable,
+// for instance: `SET MGO_KERBEROS_PASSWORD=this_isnt_the_password`
+func getWindowsKerberosPassword() string {
+	pw := os.Getenv(winKerberosPasswordEnv)
+	if pw == "" {
+		panic(fmt.Sprintf("Need to set %v environment variable to run Kerberos tests on Windows", winKerberosPasswordEnv))
+	}
+	return pw
+}
+
+func windowsAppendPasswordToCredential(cred *mgo.Credential) {
+	if runtime.GOOS == "windows" {
+		cred.Password = getWindowsKerberosPassword()
+	}
+}

+ 25 - 0
src/gopkg.in/mgo.v2/bson/LICENSE

@@ -0,0 +1,25 @@
+BSON library for Go
+
+Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 738 - 0
src/gopkg.in/mgo.v2/bson/bson.go

@@ -0,0 +1,738 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package bson is an implementation of the BSON specification for Go:
+//
+//     http://bsonspec.org
+//
+// It was created as part of the mgo MongoDB driver for Go, but is standalone
+// and may be used on its own without the driver.
+package bson
+
+import (
+	"bytes"
+	"crypto/md5"
+	"crypto/rand"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// --------------------------------------------------------------------------
+// The public API.
+
+// A value implementing the bson.Getter interface will have its GetBSON
+// method called when the given value has to be marshalled, and the result
+// of this method will be marshaled in place of the actual object.
+//
+// If GetBSON returns return a non-nil error, the marshalling procedure
+// will stop and error out with the provided value.
+type Getter interface {
+	GetBSON() (interface{}, error)
+}
+
+// A value implementing the bson.Setter interface will receive the BSON
+// value via the SetBSON method during unmarshaling, and the object
+// itself will not be changed as usual.
+//
+// If setting the value works, the method should return nil or alternatively
+// bson.SetZero to set the respective field to its zero value (nil for
+// pointer types). If SetBSON returns a value of type bson.TypeError, the
+// BSON value will be omitted from a map or slice being decoded and the
+// unmarshalling will continue. If it returns any other non-nil error, the
+// unmarshalling procedure will stop and error out with the provided value.
+//
+// This interface is generally useful in pointer receivers, since the method
+// will want to change the receiver. A type field that implements the Setter
+// interface doesn't have to be a pointer, though.
+//
+// Unlike the usual behavior, unmarshalling onto a value that implements a
+// Setter interface will NOT reset the value to its zero state. This allows
+// the value to decide by itself how to be unmarshalled.
+//
+// For example:
+//
+//     type MyString string
+//
+//     func (s *MyString) SetBSON(raw bson.Raw) error {
+//         return raw.Unmarshal(s)
+//     }
+//
+type Setter interface {
+	SetBSON(raw Raw) error
+}
+
+// SetZero may be returned from a SetBSON method to have the value set to
+// its respective zero value. When used in pointer values, this will set the
+// field to nil rather than to the pre-allocated value.
+var SetZero = errors.New("set to zero")
+
+// M is a convenient alias for a map[string]interface{} map, useful for
+// dealing with BSON in a native way.  For instance:
+//
+//     bson.M{"a": 1, "b": true}
+//
+// There's no special handling for this type in addition to what's done anyway
+// for an equivalent map type.  Elements in the map will be dumped in an
+// undefined ordered. See also the bson.D type for an ordered alternative.
+type M map[string]interface{}
+
+// D represents a BSON document containing ordered elements. For example:
+//
+//     bson.D{{"a", 1}, {"b", true}}
+//
+// In some situations, such as when creating indexes for MongoDB, the order in
+// which the elements are defined is important.  If the order is not important,
+// using a map is generally more comfortable. See bson.M and bson.RawD.
+type D []DocElem
+
+// DocElem is an element of the bson.D document representation.
+type DocElem struct {
+	Name  string
+	Value interface{}
+}
+
+// Map returns a map out of the ordered element name/value pairs in d.
+func (d D) Map() (m M) {
+	m = make(M, len(d))
+	for _, item := range d {
+		m[item.Name] = item.Value
+	}
+	return m
+}
+
+// The Raw type represents raw unprocessed BSON documents and elements.
+// Kind is the kind of element as defined per the BSON specification, and
+// Data is the raw unprocessed data for the respective element.
+// Using this type it is possible to unmarshal or marshal values partially.
+//
+// Relevant documentation:
+//
+//     http://bsonspec.org/#/specification
+//
+type Raw struct {
+	Kind byte
+	Data []byte
+}
+
+// RawD represents a BSON document containing raw unprocessed elements.
+// This low-level representation may be useful when lazily processing
+// documents of uncertain content, or when manipulating the raw content
+// documents in general.
+type RawD []RawDocElem
+
+// See the RawD type.
+type RawDocElem struct {
+	Name  string
+	Value Raw
+}
+
+// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
+// long. MongoDB objects by default have such a property set in their "_id"
+// property.
+//
+// http://www.mongodb.org/display/DOCS/Object+IDs
+type ObjectId string
+
+// ObjectIdHex returns an ObjectId from the provided hex representation.
+// Calling this function with an invalid hex representation will
+// cause a runtime panic. See the IsObjectIdHex function.
+func ObjectIdHex(s string) ObjectId {
+	d, err := hex.DecodeString(s)
+	if err != nil || len(d) != 12 {
+		panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
+	}
+	return ObjectId(d)
+}
+
+// IsObjectIdHex returns whether s is a valid hex representation of
+// an ObjectId. See the ObjectIdHex function.
+func IsObjectIdHex(s string) bool {
+	if len(s) != 24 {
+		return false
+	}
+	_, err := hex.DecodeString(s)
+	return err == nil
+}
+
+// objectIdCounter is atomically incremented when generating a new ObjectId
+// using NewObjectId() function. It's used as a counter part of an id.
+var objectIdCounter uint32 = readRandomUint32()
+
+// readRandomUint32 returns a random objectIdCounter.
+func readRandomUint32() uint32 {
+	var b [4]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot read random object id: %v", err))
+	}
+	return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
+}
+
+// machineId stores machine id generated once and used in subsequent calls
+// to NewObjectId function.
+var machineId = readMachineId()
+var processId = os.Getpid()
+
+// readMachineId generates and returns a machine id.
+// If this function fails to get the hostname it will cause a runtime error.
+func readMachineId() []byte {
+	var sum [3]byte
+	id := sum[:]
+	hostname, err1 := os.Hostname()
+	if err1 != nil {
+		_, err2 := io.ReadFull(rand.Reader, id)
+		if err2 != nil {
+			panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
+		}
+		return id
+	}
+	hw := md5.New()
+	hw.Write([]byte(hostname))
+	copy(id, hw.Sum(nil))
+	return id
+}
+
+// NewObjectId returns a new unique ObjectId.
+func NewObjectId() ObjectId {
+	var b [12]byte
+	// Timestamp, 4 bytes, big endian
+	binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
+	// Machine, first 3 bytes of md5(hostname)
+	b[4] = machineId[0]
+	b[5] = machineId[1]
+	b[6] = machineId[2]
+	// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
+	b[7] = byte(processId >> 8)
+	b[8] = byte(processId)
+	// Increment, 3 bytes, big endian
+	i := atomic.AddUint32(&objectIdCounter, 1)
+	b[9] = byte(i >> 16)
+	b[10] = byte(i >> 8)
+	b[11] = byte(i)
+	return ObjectId(b[:])
+}
+
+// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
+// with the provided number of seconds from epoch UTC, and all other parts
+// filled with zeroes. It's not safe to insert a document with an id generated
+// by this method, it is useful only for queries to find documents with ids
+// generated before or after the specified timestamp.
+func NewObjectIdWithTime(t time.Time) ObjectId {
+	var b [12]byte
+	binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
+	return ObjectId(string(b[:]))
+}
+
+// String returns a hex string representation of the id.
+// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
+func (id ObjectId) String() string {
+	return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
+}
+
+// Hex returns a hex representation of the ObjectId.
+func (id ObjectId) Hex() string {
+	return hex.EncodeToString([]byte(id))
+}
+
+// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
+func (id ObjectId) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
+}
+
+var nullBytes = []byte("null")
+
+// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
+func (id *ObjectId) UnmarshalJSON(data []byte) error {
+	if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
+		var v struct {
+			Id   json.RawMessage `json:"$oid"`
+			Func struct {
+				Id json.RawMessage
+			} `json:"$oidFunc"`
+		}
+		err := jdec(data, &v)
+		if err == nil {
+			if len(v.Id) > 0 {
+				data = []byte(v.Id)
+			} else {
+				data = []byte(v.Func.Id)
+			}
+		}
+	}
+	if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
+		*id = ""
+		return nil
+	}
+	if len(data) != 26 || data[0] != '"' || data[25] != '"' {
+		return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
+	}
+	var buf [12]byte
+	_, err := hex.Decode(buf[:], data[1:25])
+	if err != nil {
+		return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
+	}
+	*id = ObjectId(string(buf[:]))
+	return nil
+}
+
+// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
+func (id ObjectId) MarshalText() ([]byte, error) {
+	return []byte(fmt.Sprintf("%x", string(id))), nil
+}
+
+// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
+func (id *ObjectId) UnmarshalText(data []byte) error {
+	if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
+		*id = ""
+		return nil
+	}
+	if len(data) != 24 {
+		return fmt.Errorf("invalid ObjectId: %s", data)
+	}
+	var buf [12]byte
+	_, err := hex.Decode(buf[:], data[:])
+	if err != nil {
+		return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
+	}
+	*id = ObjectId(string(buf[:]))
+	return nil
+}
+
+// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
+func (id ObjectId) Valid() bool {
+	return len(id) == 12
+}
+
+// byteSlice returns byte slice of id from start to end.
+// Calling this function with an invalid id will cause a runtime panic.
+func (id ObjectId) byteSlice(start, end int) []byte {
+	if len(id) != 12 {
+		panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
+	}
+	return []byte(string(id)[start:end])
+}
+
+// Time returns the timestamp part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Time() time.Time {
+	// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
+	secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
+	return time.Unix(secs, 0)
+}
+
+// Machine returns the 3-byte machine id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Machine() []byte {
+	return id.byteSlice(4, 7)
+}
+
+// Pid returns the process id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Pid() uint16 {
+	return binary.BigEndian.Uint16(id.byteSlice(7, 9))
+}
+
+// Counter returns the incrementing value part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Counter() int32 {
+	b := id.byteSlice(9, 12)
+	// Counter is stored as big-endian 3-byte value
+	return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
+}
+
+// The Symbol type is similar to a string and is used in languages with a
+// distinct symbol type.
+type Symbol string
+
+// Now returns the current time with millisecond precision. MongoDB stores
+// timestamps with the same precision, so a Time returned from this method
+// will not change after a roundtrip to the database. That's the only reason
+// why this function exists. Using the time.Now function also works fine
+// otherwise.
+func Now() time.Time {
+	return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
+}
+
+// MongoTimestamp is a special internal type used by MongoDB that for some
+// strange reason has its own datatype defined in BSON.
+type MongoTimestamp int64
+
+type orderKey int64
+
+// MaxKey is a special value that compares higher than all other possible BSON
+// values in a MongoDB database.
+var MaxKey = orderKey(1<<63 - 1)
+
+// MinKey is a special value that compares lower than all other possible BSON
+// values in a MongoDB database.
+var MinKey = orderKey(-1 << 63)
+
+type undefined struct{}
+
+// Undefined represents the undefined BSON value.
+var Undefined undefined
+
+// Binary is a representation for non-standard binary values.  Any kind should
+// work, but the following are known as of this writing:
+//
+//   0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
+//   0x01 - Function (!?)
+//   0x02 - Obsolete generic.
+//   0x03 - UUID
+//   0x05 - MD5
+//   0x80 - User defined.
+//
+type Binary struct {
+	Kind byte
+	Data []byte
+}
+
+// RegEx represents a regular expression.  The Options field may contain
+// individual characters defining the way in which the pattern should be
+// applied, and must be sorted. Valid options as of this writing are 'i' for
+// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
+// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
+// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
+// unicode. The value of the Options parameter is not verified before being
+// marshaled into the BSON format.
+type RegEx struct {
+	Pattern string
+	Options string
+}
+
+// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
+// will be marshaled as a mapping from identifiers to values that may be
+// used when evaluating the provided Code.
+type JavaScript struct {
+	Code  string
+	Scope interface{}
+}
+
+// DBPointer refers to a document id in a namespace.
+//
+// This type is deprecated in the BSON specification and should not be used
+// except for backwards compatibility with ancient applications.
+type DBPointer struct {
+	Namespace string
+	Id        ObjectId
+}
+
+const initialBufferSize = 64
+
+func handleErr(err *error) {
+	if r := recover(); r != nil {
+		if _, ok := r.(runtime.Error); ok {
+			panic(r)
+		} else if _, ok := r.(externalPanic); ok {
+			panic(r)
+		} else if s, ok := r.(string); ok {
+			*err = errors.New(s)
+		} else if e, ok := r.(error); ok {
+			*err = e
+		} else {
+			panic(r)
+		}
+	}
+}
+
+// Marshal serializes the in value, which may be a map or a struct value.
+// In the case of struct values, only exported fields will be serialized,
+// and the order of serialized fields will match that of the struct itself.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty  Only include the field if it's not set to the zero
+//                value for the type or to empty slices or maps.
+//
+//     minsize    Marshal an int64 value as an int32, if that's feasible
+//                while preserving the numeric value.
+//
+//     inline     Inline the field, which must be a struct or a map,
+//                causing all of its fields or keys to be processed as if
+//                they were part of the outer struct. For maps, keys must
+//                not conflict with the bson keys of other struct fields.
+//
+// Some examples:
+//
+//     type T struct {
+//         A bool
+//         B int    "myb"
+//         C string "myc,omitempty"
+//         D string `bson:",omitempty" json:"jsonkey"`
+//         E int64  ",minsize"
+//         F int64  "myf,omitempty,minsize"
+//     }
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := &encoder{make([]byte, 0, initialBufferSize), sync.Mutex{}}
+	e.addDoc(reflect.ValueOf(in))
+	return e.out, nil
+}
+
+// Unmarshal deserializes data from in into the out value.  The out value
+// must be a map, a pointer to a struct, or a pointer to a bson.D value.
+// In the case of struct values, only exported fields will be deserialized.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported during unmarshal (see the
+// Marshal method for other flags):
+//
+//     inline     Inline the field, which must be a struct or a map.
+//                Inlined structs are handled as if its fields were part
+//                of the outer struct. An inlined map causes keys that do
+//                not match any other struct field to be inserted in the
+//                map rather than being discarded as usual.
+//
+// The target field or element types of out may not necessarily match
+// the BSON values of the provided data.  The following conversions are
+// made automatically:
+//
+// - Numeric types are converted if at least the integer part of the
+//   value would be preserved correctly
+// - Bools are converted to numeric types as 1 or 0
+// - Numeric types are converted to bools as true if not 0 or false otherwise
+// - Binary and string BSON data is converted to a string, array or byte slice
+//
+// If the value would not fit the type and cannot be converted, it's
+// silently skipped.
+//
+// Pointer values are initialized when necessary.
+func Unmarshal(in []byte, out interface{}) (err error) {
+	if raw, ok := out.(*Raw); ok {
+		raw.Kind = 3
+		raw.Data = in
+		return nil
+	}
+	defer handleErr(&err)
+	v := reflect.ValueOf(out)
+	switch v.Kind() {
+	case reflect.Ptr:
+		fallthrough
+	case reflect.Map:
+		d := newDecoder(in)
+		d.readDocTo(v)
+	case reflect.Struct:
+		return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
+	default:
+		return errors.New("Unmarshal needs a map or a pointer to a struct.")
+	}
+	return nil
+}
+
+// Unmarshal deserializes raw into the out value.  If the out value type
+// is not compatible with raw, a *bson.TypeError is returned.
+//
+// See the Unmarshal function documentation for more details on the
+// unmarshalling process.
+func (raw Raw) Unmarshal(out interface{}) (err error) {
+	defer handleErr(&err)
+	v := reflect.ValueOf(out)
+	switch v.Kind() {
+	case reflect.Ptr:
+		v = v.Elem()
+		fallthrough
+	case reflect.Map:
+		d := newDecoder(raw.Data)
+		good := d.readElemTo(v, raw.Kind)
+		if !good {
+			return &TypeError{v.Type(), raw.Kind}
+		}
+	case reflect.Struct:
+		return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
+	default:
+		return errors.New("Raw Unmarshal needs a map or a valid pointer.")
+	}
+	return nil
+}
+
+type TypeError struct {
+	Type reflect.Type
+	Kind byte
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+	InlineMap  int
+	Zero       reflect.Value
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	MinSize   bool
+	Inline    []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var structMapMutex sync.RWMutex
+
+type externalPanic string
+
+func (e externalPanic) String() string {
+	return string(e)
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	structMapMutex.RLock()
+	sinfo, found := structMap[st]
+	structMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("bson")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "minsize":
+					info.MinSize = true
+				case "inline":
+					inline = true
+				default:
+					msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
+					panic(externalPanic(msg))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				panic("Option ,inline needs a struct value or map field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+	sinfo = &structInfo{
+		fieldsMap,
+		fieldsList,
+		inlineMap,
+		reflect.New(st).Elem(),
+	}
+	structMapMutex.Lock()
+	structMap[st] = sinfo
+	structMapMutex.Unlock()
+	return sinfo, nil
+}

+ 1832 - 0
src/gopkg.in/mgo.v2/bson/bson_test.go

@@ -0,0 +1,1832 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson_test
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"encoding/xml"
+	"errors"
+	"net/url"
+	"reflect"
+	"strings"
+	"testing"
+	"time"
+
+	. "gopkg.in/check.v1"
+	"gopkg.in/mgo.v2/bson"
+	"gopkg.in/yaml.v2"
+)
+
+func TestAll(t *testing.T) {
+	TestingT(t)
+}
+
+type S struct{}
+
+var _ = Suite(&S{})
+
+// Wrap up the document elements contained in data, prepending the int32
+// length of the data, and appending the '\x00' value closing the document.
+func wrapInDoc(data string) string {
+	result := make([]byte, len(data)+5)
+	binary.LittleEndian.PutUint32(result, uint32(len(result)))
+	copy(result[4:], []byte(data))
+	return string(result)
+}
+
+func makeZeroDoc(value interface{}) (zero interface{}) {
+	v := reflect.ValueOf(value)
+	t := v.Type()
+	switch t.Kind() {
+	case reflect.Map:
+		mv := reflect.MakeMap(t)
+		zero = mv.Interface()
+	case reflect.Ptr:
+		pv := reflect.New(v.Type().Elem())
+		zero = pv.Interface()
+	case reflect.Slice, reflect.Int, reflect.Int64, reflect.Struct:
+		zero = reflect.New(t).Interface()
+	default:
+		panic("unsupported doc type: " + t.Name())
+	}
+	return zero
+}
+
+func testUnmarshal(c *C, data string, obj interface{}) {
+	zero := makeZeroDoc(obj)
+	err := bson.Unmarshal([]byte(data), zero)
+	c.Assert(err, IsNil)
+	c.Assert(zero, DeepEquals, obj)
+}
+
+type testItemType struct {
+	obj  interface{}
+	data string
+}
+
+// --------------------------------------------------------------------------
+// Samples from bsonspec.org:
+
+var sampleItems = []testItemType{
+	{bson.M{"hello": "world"},
+		"\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"},
+	{bson.M{"BSON": []interface{}{"awesome", float64(5.05), 1986}},
+		"1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" +
+			"awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"},
+}
+
+func (s *S) TestMarshalSampleItems(c *C) {
+	for i, item := range sampleItems {
+		data, err := bson.Marshal(item.obj)
+		c.Assert(err, IsNil)
+		c.Assert(string(data), Equals, item.data, Commentf("Failed on item %d", i))
+	}
+}
+
+func (s *S) TestUnmarshalSampleItems(c *C) {
+	for i, item := range sampleItems {
+		value := bson.M{}
+		err := bson.Unmarshal([]byte(item.data), value)
+		c.Assert(err, IsNil)
+		c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d", i))
+	}
+}
+
+// --------------------------------------------------------------------------
+// Every type, ordered by the type flag. These are not wrapped with the
+// length and last \x00 from the document. wrapInDoc() computes them.
+// Note that all of them should be supported as two-way conversions.
+
+var allItems = []testItemType{
+	{bson.M{},
+		""},
+	{bson.M{"_": float64(5.05)},
+		"\x01_\x00333333\x14@"},
+	{bson.M{"_": "yo"},
+		"\x02_\x00\x03\x00\x00\x00yo\x00"},
+	{bson.M{"_": bson.M{"a": true}},
+		"\x03_\x00\x09\x00\x00\x00\x08a\x00\x01\x00"},
+	{bson.M{"_": []interface{}{true, false}},
+		"\x04_\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"},
+	{bson.M{"_": []byte("yo")},
+		"\x05_\x00\x02\x00\x00\x00\x00yo"},
+	{bson.M{"_": bson.Binary{0x80, []byte("udef")}},
+		"\x05_\x00\x04\x00\x00\x00\x80udef"},
+	{bson.M{"_": bson.Undefined}, // Obsolete, but still seen in the wild.
+		"\x06_\x00"},
+	{bson.M{"_": bson.ObjectId("0123456789ab")},
+		"\x07_\x000123456789ab"},
+	{bson.M{"_": bson.DBPointer{"testnamespace", bson.ObjectId("0123456789ab")}},
+		"\x0C_\x00\x0e\x00\x00\x00testnamespace\x000123456789ab"},
+	{bson.M{"_": false},
+		"\x08_\x00\x00"},
+	{bson.M{"_": true},
+		"\x08_\x00\x01"},
+	{bson.M{"_": time.Unix(0, 258e6)}, // Note the NS <=> MS conversion.
+		"\x09_\x00\x02\x01\x00\x00\x00\x00\x00\x00"},
+	{bson.M{"_": nil},
+		"\x0A_\x00"},
+	{bson.M{"_": bson.RegEx{"ab", "cd"}},
+		"\x0B_\x00ab\x00cd\x00"},
+	{bson.M{"_": bson.JavaScript{"code", nil}},
+		"\x0D_\x00\x05\x00\x00\x00code\x00"},
+	{bson.M{"_": bson.Symbol("sym")},
+		"\x0E_\x00\x04\x00\x00\x00sym\x00"},
+	{bson.M{"_": bson.JavaScript{"code", bson.M{"": nil}}},
+		"\x0F_\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" +
+			"\x07\x00\x00\x00\x0A\x00\x00"},
+	{bson.M{"_": 258},
+		"\x10_\x00\x02\x01\x00\x00"},
+	{bson.M{"_": bson.MongoTimestamp(258)},
+		"\x11_\x00\x02\x01\x00\x00\x00\x00\x00\x00"},
+	{bson.M{"_": int64(258)},
+		"\x12_\x00\x02\x01\x00\x00\x00\x00\x00\x00"},
+	{bson.M{"_": int64(258 << 32)},
+		"\x12_\x00\x00\x00\x00\x00\x02\x01\x00\x00"},
+	{bson.M{"_": bson.MaxKey},
+		"\x7F_\x00"},
+	{bson.M{"_": bson.MinKey},
+		"\xFF_\x00"},
+}
+
+func (s *S) TestMarshalAllItems(c *C) {
+	for i, item := range allItems {
+		data, err := bson.Marshal(item.obj)
+		c.Assert(err, IsNil)
+		c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d: %#v", i, item))
+	}
+}
+
+func (s *S) TestUnmarshalAllItems(c *C) {
+	for i, item := range allItems {
+		value := bson.M{}
+		err := bson.Unmarshal([]byte(wrapInDoc(item.data)), value)
+		c.Assert(err, IsNil)
+		c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item))
+	}
+}
+
+func (s *S) TestUnmarshalRawAllItems(c *C) {
+	for i, item := range allItems {
+		if len(item.data) == 0 {
+			continue
+		}
+		value := item.obj.(bson.M)["_"]
+		if value == nil {
+			continue
+		}
+		pv := reflect.New(reflect.ValueOf(value).Type())
+		raw := bson.Raw{item.data[0], []byte(item.data[3:])}
+		c.Logf("Unmarshal raw: %#v, %#v", raw, pv.Interface())
+		err := raw.Unmarshal(pv.Interface())
+		c.Assert(err, IsNil)
+		c.Assert(pv.Elem().Interface(), DeepEquals, value, Commentf("Failed on item %d: %#v", i, item))
+	}
+}
+
+func (s *S) TestUnmarshalRawIncompatible(c *C) {
+	raw := bson.Raw{0x08, []byte{0x01}} // true
+	err := raw.Unmarshal(&struct{}{})
+	c.Assert(err, ErrorMatches, "BSON kind 0x08 isn't compatible with type struct \\{\\}")
+}
+
+func (s *S) TestUnmarshalZeroesStruct(c *C) {
+	data, err := bson.Marshal(bson.M{"b": 2})
+	c.Assert(err, IsNil)
+	type T struct{ A, B int }
+	v := T{A: 1}
+	err = bson.Unmarshal(data, &v)
+	c.Assert(err, IsNil)
+	c.Assert(v.A, Equals, 0)
+	c.Assert(v.B, Equals, 2)
+}
+
+func (s *S) TestUnmarshalZeroesMap(c *C) {
+	data, err := bson.Marshal(bson.M{"b": 2})
+	c.Assert(err, IsNil)
+	m := bson.M{"a": 1}
+	err = bson.Unmarshal(data, &m)
+	c.Assert(err, IsNil)
+	c.Assert(m, DeepEquals, bson.M{"b": 2})
+}
+
+func (s *S) TestUnmarshalNonNilInterface(c *C) {
+	data, err := bson.Marshal(bson.M{"b": 2})
+	c.Assert(err, IsNil)
+	m := bson.M{"a": 1}
+	var i interface{}
+	i = m
+	err = bson.Unmarshal(data, &i)
+	c.Assert(err, IsNil)
+	c.Assert(i, DeepEquals, bson.M{"b": 2})
+	c.Assert(m, DeepEquals, bson.M{"a": 1})
+}
+
+// --------------------------------------------------------------------------
+// Some one way marshaling operations which would unmarshal differently.
+
+var oneWayMarshalItems = []testItemType{
+	// These are being passed as pointers, and will unmarshal as values.
+	{bson.M{"": &bson.Binary{0x02, []byte("old")}},
+		"\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"},
+	{bson.M{"": &bson.Binary{0x80, []byte("udef")}},
+		"\x05\x00\x04\x00\x00\x00\x80udef"},
+	{bson.M{"": &bson.RegEx{"ab", "cd"}},
+		"\x0B\x00ab\x00cd\x00"},
+	{bson.M{"": &bson.JavaScript{"code", nil}},
+		"\x0D\x00\x05\x00\x00\x00code\x00"},
+	{bson.M{"": &bson.JavaScript{"code", bson.M{"": nil}}},
+		"\x0F\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" +
+			"\x07\x00\x00\x00\x0A\x00\x00"},
+
+	// There's no float32 type in BSON.  Will encode as a float64.
+	{bson.M{"": float32(5.05)},
+		"\x01\x00\x00\x00\x00@33\x14@"},
+
+	// The array will be unmarshaled as a slice instead.
+	{bson.M{"": [2]bool{true, false}},
+		"\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"},
+
+	// The typed slice will be unmarshaled as []interface{}.
+	{bson.M{"": []bool{true, false}},
+		"\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"},
+
+	// Will unmarshal as a []byte.
+	{bson.M{"": bson.Binary{0x00, []byte("yo")}},
+		"\x05\x00\x02\x00\x00\x00\x00yo"},
+	{bson.M{"": bson.Binary{0x02, []byte("old")}},
+		"\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"},
+
+	// No way to preserve the type information here. We might encode as a zero
+	// value, but this would mean that pointer values in structs wouldn't be
+	// able to correctly distinguish between unset and set to the zero value.
+	{bson.M{"": (*byte)(nil)},
+		"\x0A\x00"},
+
+	// No int types smaller than int32 in BSON. Could encode this as a char,
+	// but it would still be ambiguous, take more, and be awkward in Go when
+	// loaded without typing information.
+	{bson.M{"": byte(8)},
+		"\x10\x00\x08\x00\x00\x00"},
+
+	// There are no unsigned types in BSON.  Will unmarshal as int32 or int64.
+	{bson.M{"": uint32(258)},
+		"\x10\x00\x02\x01\x00\x00"},
+	{bson.M{"": uint64(258)},
+		"\x12\x00\x02\x01\x00\x00\x00\x00\x00\x00"},
+	{bson.M{"": uint64(258 << 32)},
+		"\x12\x00\x00\x00\x00\x00\x02\x01\x00\x00"},
+
+	// This will unmarshal as int.
+	{bson.M{"": int32(258)},
+		"\x10\x00\x02\x01\x00\x00"},
+
+	// That's a special case. The unsigned value is too large for an int32,
+	// so an int64 is used instead.
+	{bson.M{"": uint32(1<<32 - 1)},
+		"\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"},
+	{bson.M{"": uint(1<<32 - 1)},
+		"\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"},
+}
+
+func (s *S) TestOneWayMarshalItems(c *C) {
+	for i, item := range oneWayMarshalItems {
+		data, err := bson.Marshal(item.obj)
+		c.Assert(err, IsNil)
+		c.Assert(string(data), Equals, wrapInDoc(item.data),
+			Commentf("Failed on item %d", i))
+	}
+}
+
+// --------------------------------------------------------------------------
+// Two-way tests for user-defined structures using the samples
+// from bsonspec.org.
+
+type specSample1 struct {
+	Hello string
+}
+
+type specSample2 struct {
+	BSON []interface{} "BSON"
+}
+
+var structSampleItems = []testItemType{
+	{&specSample1{"world"},
+		"\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"},
+	{&specSample2{[]interface{}{"awesome", float64(5.05), 1986}},
+		"1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" +
+			"awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"},
+}
+
+func (s *S) TestMarshalStructSampleItems(c *C) {
+	for i, item := range structSampleItems {
+		data, err := bson.Marshal(item.obj)
+		c.Assert(err, IsNil)
+		c.Assert(string(data), Equals, item.data,
+			Commentf("Failed on item %d", i))
+	}
+}
+
+func (s *S) TestUnmarshalStructSampleItems(c *C) {
+	for _, item := range structSampleItems {
+		testUnmarshal(c, item.data, item.obj)
+	}
+}
+
+func (s *S) Test64bitInt(c *C) {
+	var i int64 = (1 << 31)
+	if int(i) > 0 {
+		data, err := bson.Marshal(bson.M{"i": int(i)})
+		c.Assert(err, IsNil)
+		c.Assert(string(data), Equals, wrapInDoc("\x12i\x00\x00\x00\x00\x80\x00\x00\x00\x00"))
+
+		var result struct{ I int }
+		err = bson.Unmarshal(data, &result)
+		c.Assert(err, IsNil)
+		c.Assert(int64(result.I), Equals, i)
+	}
+}
+
+// --------------------------------------------------------------------------
+// Generic two-way struct marshaling tests.
+
+var bytevar = byte(8)
+var byteptr = &bytevar
+
+var structItems = []testItemType{
+	{&struct{ Ptr *byte }{nil},
+		"\x0Aptr\x00"},
+	{&struct{ Ptr *byte }{&bytevar},
+		"\x10ptr\x00\x08\x00\x00\x00"},
+	{&struct{ Ptr **byte }{&byteptr},
+		"\x10ptr\x00\x08\x00\x00\x00"},
+	{&struct{ Byte byte }{8},
+		"\x10byte\x00\x08\x00\x00\x00"},
+	{&struct{ Byte byte }{0},
+		"\x10byte\x00\x00\x00\x00\x00"},
+	{&struct {
+		V byte "Tag"
+	}{8},
+		"\x10Tag\x00\x08\x00\x00\x00"},
+	{&struct {
+		V *struct {
+			Byte byte
+		}
+	}{&struct{ Byte byte }{8}},
+		"\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"},
+	{&struct{ priv byte }{}, ""},
+
+	// The order of the dumped fields should be the same in the struct.
+	{&struct{ A, C, B, D, F, E *byte }{},
+		"\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x0Ae\x00"},
+
+	{&struct{ V bson.Raw }{bson.Raw{0x03, []byte("\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00")}},
+		"\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"},
+	{&struct{ V bson.Raw }{bson.Raw{0x10, []byte("\x00\x00\x00\x00")}},
+		"\x10v\x00" + "\x00\x00\x00\x00"},
+
+	// Byte arrays.
+	{&struct{ V [2]byte }{[2]byte{'y', 'o'}},
+		"\x05v\x00\x02\x00\x00\x00\x00yo"},
+}
+
+func (s *S) TestMarshalStructItems(c *C) {
+	for i, item := range structItems {
+		data, err := bson.Marshal(item.obj)
+		c.Assert(err, IsNil)
+		c.Assert(string(data), Equals, wrapInDoc(item.data),
+			Commentf("Failed on item %d", i))
+	}
+}
+
+func (s *S) TestUnmarshalStructItems(c *C) {
+	for _, item := range structItems {
+		testUnmarshal(c, wrapInDoc(item.data), item.obj)
+	}
+}
+
+func (s *S) TestUnmarshalRawStructItems(c *C) {
+	for i, item := range structItems {
+		raw := bson.Raw{0x03, []byte(wrapInDoc(item.data))}
+		zero := makeZeroDoc(item.obj)
+		err := raw.Unmarshal(zero)
+		c.Assert(err, IsNil)
+		c.Assert(zero, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item))
+	}
+}
+
+func (s *S) TestUnmarshalRawNil(c *C) {
+	// Regression test: shouldn't try to nil out the pointer itself,
+	// as it's not settable.
+	raw := bson.Raw{0x0A, []byte{}}
+	err := raw.Unmarshal(&struct{}{})
+	c.Assert(err, IsNil)
+}
+
+// --------------------------------------------------------------------------
+// One-way marshaling tests.
+
+type dOnIface struct {
+	D interface{}
+}
+
+type ignoreField struct {
+	Before string
+	Ignore string `bson:"-"`
+	After  string
+}
+
+var marshalItems = []testItemType{
+	// Ordered document dump.  Will unmarshal as a dictionary by default.
+	{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}},
+		"\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"},
+	{MyD{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}},
+		"\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"},
+	{&dOnIface{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}},
+		"\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")},
+
+	{bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}},
+		"\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"},
+	{MyRawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}},
+		"\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"},
+	{&dOnIface{bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}},
+		"\x03d\x00" + wrapInDoc("\x0Aa\x00"+"\x0Ac\x00"+"\x08b\x00\x01")},
+
+	{&ignoreField{"before", "ignore", "after"},
+		"\x02before\x00\a\x00\x00\x00before\x00\x02after\x00\x06\x00\x00\x00after\x00"},
+
+	// Marshalling a Raw document does nothing.
+	{bson.Raw{0x03, []byte(wrapInDoc("anything"))},
+		"anything"},
+	{bson.Raw{Data: []byte(wrapInDoc("anything"))},
+		"anything"},
+}
+
+func (s *S) TestMarshalOneWayItems(c *C) {
+	for _, item := range marshalItems {
+		data, err := bson.Marshal(item.obj)
+		c.Assert(err, IsNil)
+		c.Assert(string(data), Equals, wrapInDoc(item.data))
+	}
+}
+
+// --------------------------------------------------------------------------
+// One-way unmarshaling tests.
+
+var unmarshalItems = []testItemType{
+	// Field is private.  Should not attempt to unmarshal it.
+	{&struct{ priv byte }{},
+		"\x10priv\x00\x08\x00\x00\x00"},
+
+	// Wrong casing. Field names are lowercased.
+	{&struct{ Byte byte }{},
+		"\x10Byte\x00\x08\x00\x00\x00"},
+
+	// Ignore non-existing field.
+	{&struct{ Byte byte }{9},
+		"\x10boot\x00\x08\x00\x00\x00" + "\x10byte\x00\x09\x00\x00\x00"},
+
+	// Do not unmarshal on ignored field.
+	{&ignoreField{"before", "", "after"},
+		"\x02before\x00\a\x00\x00\x00before\x00" +
+			"\x02-\x00\a\x00\x00\x00ignore\x00" +
+			"\x02after\x00\x06\x00\x00\x00after\x00"},
+
+	// Ignore unsuitable types silently.
+	{map[string]string{"str": "s"},
+		"\x02str\x00\x02\x00\x00\x00s\x00" + "\x10int\x00\x01\x00\x00\x00"},
+	{map[string][]int{"array": []int{5, 9}},
+		"\x04array\x00" + wrapInDoc("\x100\x00\x05\x00\x00\x00"+"\x021\x00\x02\x00\x00\x00s\x00"+"\x102\x00\x09\x00\x00\x00")},
+
+	// Wrong type. Shouldn't init pointer.
+	{&struct{ Str *byte }{},
+		"\x02str\x00\x02\x00\x00\x00s\x00"},
+	{&struct{ Str *struct{ Str string } }{},
+		"\x02str\x00\x02\x00\x00\x00s\x00"},
+
+	// Ordered document.
+	{&struct{ bson.D }{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}},
+		"\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")},
+
+	// Raw document.
+	{&bson.Raw{0x03, []byte(wrapInDoc("\x10byte\x00\x08\x00\x00\x00"))},
+		"\x10byte\x00\x08\x00\x00\x00"},
+
+	// RawD document.
+	{&struct{ bson.RawD }{bson.RawD{{"a", bson.Raw{0x0A, []byte{}}}, {"c", bson.Raw{0x0A, []byte{}}}, {"b", bson.Raw{0x08, []byte{0x01}}}}},
+		"\x03rawd\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x08b\x00\x01")},
+
+	// Decode old binary.
+	{bson.M{"_": []byte("old")},
+		"\x05_\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"},
+
+	// Decode old binary without length. According to the spec, this shouldn't happen.
+	{bson.M{"_": []byte("old")},
+		"\x05_\x00\x03\x00\x00\x00\x02old"},
+
+	// Decode a doc within a doc in to a slice within a doc; shouldn't error
+	{&struct{ Foo []string }{},
+		"\x03\x66\x6f\x6f\x00\x05\x00\x00\x00\x00"},
+}
+
+func (s *S) TestUnmarshalOneWayItems(c *C) {
+	for _, item := range unmarshalItems {
+		testUnmarshal(c, wrapInDoc(item.data), item.obj)
+	}
+}
+
+func (s *S) TestUnmarshalNilInStruct(c *C) {
+	// Nil is the default value, so we need to ensure it's indeed being set.
+	b := byte(1)
+	v := &struct{ Ptr *byte }{&b}
+	err := bson.Unmarshal([]byte(wrapInDoc("\x0Aptr\x00")), v)
+	c.Assert(err, IsNil)
+	c.Assert(v, DeepEquals, &struct{ Ptr *byte }{nil})
+}
+
+// --------------------------------------------------------------------------
+// Marshalling error cases.
+
+type structWithDupKeys struct {
+	Name  byte
+	Other byte "name" // Tag should precede.
+}
+
+var marshalErrorItems = []testItemType{
+	{bson.M{"": uint64(1 << 63)},
+		"BSON has no uint64 type, and value is too large to fit correctly in an int64"},
+	{bson.M{"": bson.ObjectId("tooshort")},
+		"ObjectIDs must be exactly 12 bytes long \\(got 8\\)"},
+	{int64(123),
+		"Can't marshal int64 as a BSON document"},
+	{bson.M{"": 1i},
+		"Can't marshal complex128 in a BSON document"},
+	{&structWithDupKeys{},
+		"Duplicated key 'name' in struct bson_test.structWithDupKeys"},
+	{bson.Raw{0xA, []byte{}},
+		"Attempted to marshal Raw kind 10 as a document"},
+	{bson.Raw{0x3, []byte{}},
+		"Attempted to marshal empty Raw document"},
+	{bson.M{"w": bson.Raw{0x3, []byte{}}},
+		"Attempted to marshal empty Raw document"},
+	{&inlineCantPtr{&struct{ A, B int }{1, 2}},
+		"Option ,inline needs a struct value or map field"},
+	{&inlineDupName{1, struct{ A, B int }{2, 3}},
+		"Duplicated key 'a' in struct bson_test.inlineDupName"},
+	{&inlineDupMap{},
+		"Multiple ,inline maps in struct bson_test.inlineDupMap"},
+	{&inlineBadKeyMap{},
+		"Option ,inline needs a map with string keys in struct bson_test.inlineBadKeyMap"},
+	{&inlineMap{A: 1, M: map[string]interface{}{"a": 1}},
+		`Can't have key "a" in inlined map; conflicts with struct field`},
+}
+
+func (s *S) TestMarshalErrorItems(c *C) {
+	for _, item := range marshalErrorItems {
+		data, err := bson.Marshal(item.obj)
+		c.Assert(err, ErrorMatches, item.data)
+		c.Assert(data, IsNil)
+	}
+}
+
+// --------------------------------------------------------------------------
+// Unmarshalling error cases.
+
+type unmarshalErrorType struct {
+	obj   interface{}
+	data  string
+	error string
+}
+
+var unmarshalErrorItems = []unmarshalErrorType{
+	// Tag name conflicts with existing parameter.
+	{&structWithDupKeys{},
+		"\x10name\x00\x08\x00\x00\x00",
+		"Duplicated key 'name' in struct bson_test.structWithDupKeys"},
+
+	// Non-string map key.
+	{map[int]interface{}{},
+		"\x10name\x00\x08\x00\x00\x00",
+		"BSON map must have string keys. Got: map\\[int\\]interface \\{\\}"},
+
+	{nil,
+		"\xEEname\x00",
+		"Unknown element kind \\(0xEE\\)"},
+
+	{struct{ Name bool }{},
+		"\x10name\x00\x08\x00\x00\x00",
+		"Unmarshal can't deal with struct values. Use a pointer."},
+
+	{123,
+		"\x10name\x00\x08\x00\x00\x00",
+		"Unmarshal needs a map or a pointer to a struct."},
+
+	{nil,
+		"\x08\x62\x00\x02",
+		"encoded boolean must be 1 or 0, found 2"},
+}
+
+func (s *S) TestUnmarshalErrorItems(c *C) {
+	for _, item := range unmarshalErrorItems {
+		data := []byte(wrapInDoc(item.data))
+		var value interface{}
+		switch reflect.ValueOf(item.obj).Kind() {
+		case reflect.Map, reflect.Ptr:
+			value = makeZeroDoc(item.obj)
+		case reflect.Invalid:
+			value = bson.M{}
+		default:
+			value = item.obj
+		}
+		err := bson.Unmarshal(data, value)
+		c.Assert(err, ErrorMatches, item.error)
+	}
+}
+
+type unmarshalRawErrorType struct {
+	obj   interface{}
+	raw   bson.Raw
+	error string
+}
+
+var unmarshalRawErrorItems = []unmarshalRawErrorType{
+	// Tag name conflicts with existing parameter.
+	{&structWithDupKeys{},
+		bson.Raw{0x03, []byte("\x10byte\x00\x08\x00\x00\x00")},
+		"Duplicated key 'name' in struct bson_test.structWithDupKeys"},
+
+	{&struct{}{},
+		bson.Raw{0xEE, []byte{}},
+		"Unknown element kind \\(0xEE\\)"},
+
+	{struct{ Name bool }{},
+		bson.Raw{0x10, []byte("\x08\x00\x00\x00")},
+		"Raw Unmarshal can't deal with struct values. Use a pointer."},
+
+	{123,
+		bson.Raw{0x10, []byte("\x08\x00\x00\x00")},
+		"Raw Unmarshal needs a map or a valid pointer."},
+}
+
+func (s *S) TestUnmarshalRawErrorItems(c *C) {
+	for i, item := range unmarshalRawErrorItems {
+		err := item.raw.Unmarshal(item.obj)
+		c.Assert(err, ErrorMatches, item.error, Commentf("Failed on item %d: %#v\n", i, item))
+	}
+}
+
+var corruptedData = []string{
+	"\x04\x00\x00\x00\x00",         // Document shorter than minimum
+	"\x06\x00\x00\x00\x00",         // Not enough data
+	"\x05\x00\x00",                 // Broken length
+	"\x05\x00\x00\x00\xff",         // Corrupted termination
+	"\x0A\x00\x00\x00\x0Aooop\x00", // Unfinished C string
+
+	// Array end past end of string (s[2]=0x07 is correct)
+	wrapInDoc("\x04\x00\x09\x00\x00\x00\x0A\x00\x00"),
+
+	// Array end within string, but past acceptable.
+	wrapInDoc("\x04\x00\x08\x00\x00\x00\x0A\x00\x00"),
+
+	// Document end within string, but past acceptable.
+	wrapInDoc("\x03\x00\x08\x00\x00\x00\x0A\x00\x00"),
+
+	// String with corrupted end.
+	wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"),
+
+	// String with negative length (issue #116).
+	"\x0c\x00\x00\x00\x02x\x00\xff\xff\xff\xff\x00",
+
+	// String with zero length (must include trailing '\x00')
+	"\x0c\x00\x00\x00\x02x\x00\x00\x00\x00\x00\x00",
+
+	// Binary with negative length.
+	"\r\x00\x00\x00\x05x\x00\xff\xff\xff\xff\x00\x00",
+}
+
+func (s *S) TestUnmarshalMapDocumentTooShort(c *C) {
+	for _, data := range corruptedData {
+		err := bson.Unmarshal([]byte(data), bson.M{})
+		c.Assert(err, ErrorMatches, "Document is corrupted")
+
+		err = bson.Unmarshal([]byte(data), &struct{}{})
+		c.Assert(err, ErrorMatches, "Document is corrupted")
+	}
+}
+
+// --------------------------------------------------------------------------
+// Setter test cases.
+
+var setterResult = map[string]error{}
+
+type setterType struct {
+	received interface{}
+}
+
+func (o *setterType) SetBSON(raw bson.Raw) error {
+	err := raw.Unmarshal(&o.received)
+	if err != nil {
+		panic("The panic:" + err.Error())
+	}
+	if s, ok := o.received.(string); ok {
+		if result, ok := setterResult[s]; ok {
+			return result
+		}
+	}
+	return nil
+}
+
+type ptrSetterDoc struct {
+	Field *setterType "_"
+}
+
+type valSetterDoc struct {
+	Field setterType "_"
+}
+
+func (s *S) TestUnmarshalAllItemsWithPtrSetter(c *C) {
+	for _, item := range allItems {
+		for i := 0; i != 2; i++ {
+			var field *setterType
+			if i == 0 {
+				obj := &ptrSetterDoc{}
+				err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj)
+				c.Assert(err, IsNil)
+				field = obj.Field
+			} else {
+				obj := &valSetterDoc{}
+				err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj)
+				c.Assert(err, IsNil)
+				field = &obj.Field
+			}
+			if item.data == "" {
+				// Nothing to unmarshal. Should be untouched.
+				if i == 0 {
+					c.Assert(field, IsNil)
+				} else {
+					c.Assert(field.received, IsNil)
+				}
+			} else {
+				expected := item.obj.(bson.M)["_"]
+				c.Assert(field, NotNil, Commentf("Pointer not initialized (%#v)", expected))
+				c.Assert(field.received, DeepEquals, expected)
+			}
+		}
+	}
+}
+
+func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
+	obj := &setterType{}
+	err := bson.Unmarshal([]byte(sampleItems[0].data), obj)
+	c.Assert(err, IsNil)
+	c.Assert(obj.received, DeepEquals, bson.M{"hello": "world"})
+}
+
+func (s *S) TestUnmarshalSetterOmits(c *C) {
+	setterResult["2"] = &bson.TypeError{}
+	setterResult["4"] = &bson.TypeError{}
+	defer func() {
+		delete(setterResult, "2")
+		delete(setterResult, "4")
+	}()
+
+	m := map[string]*setterType{}
+	data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" +
+		"\x02def\x00\x02\x00\x00\x002\x00" +
+		"\x02ghi\x00\x02\x00\x00\x003\x00" +
+		"\x02jkl\x00\x02\x00\x00\x004\x00")
+	err := bson.Unmarshal([]byte(data), m)
+	c.Assert(err, IsNil)
+	c.Assert(m["abc"], NotNil)
+	c.Assert(m["def"], IsNil)
+	c.Assert(m["ghi"], NotNil)
+	c.Assert(m["jkl"], IsNil)
+
+	c.Assert(m["abc"].received, Equals, "1")
+	c.Assert(m["ghi"].received, Equals, "3")
+}
+
+func (s *S) TestUnmarshalSetterErrors(c *C) {
+	boom := errors.New("BOOM")
+	setterResult["2"] = boom
+	defer delete(setterResult, "2")
+
+	m := map[string]*setterType{}
+	data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" +
+		"\x02def\x00\x02\x00\x00\x002\x00" +
+		"\x02ghi\x00\x02\x00\x00\x003\x00")
+	err := bson.Unmarshal([]byte(data), m)
+	c.Assert(err, Equals, boom)
+	c.Assert(m["abc"], NotNil)
+	c.Assert(m["def"], IsNil)
+	c.Assert(m["ghi"], IsNil)
+
+	c.Assert(m["abc"].received, Equals, "1")
+}
+
+func (s *S) TestDMap(c *C) {
+	d := bson.D{{"a", 1}, {"b", 2}}
+	c.Assert(d.Map(), DeepEquals, bson.M{"a": 1, "b": 2})
+}
+
+func (s *S) TestUnmarshalSetterSetZero(c *C) {
+	setterResult["foo"] = bson.SetZero
+	defer delete(setterResult, "field")
+
+	data, err := bson.Marshal(bson.M{"field": "foo"})
+	c.Assert(err, IsNil)
+
+	m := map[string]*setterType{}
+	err = bson.Unmarshal([]byte(data), m)
+	c.Assert(err, IsNil)
+
+	value, ok := m["field"]
+	c.Assert(ok, Equals, true)
+	c.Assert(value, IsNil)
+}
+
+// --------------------------------------------------------------------------
+// Getter test cases.
+
+type typeWithGetter struct {
+	result interface{}
+	err    error
+}
+
+func (t *typeWithGetter) GetBSON() (interface{}, error) {
+	if t == nil {
+		return "<value is nil>", nil
+	}
+	return t.result, t.err
+}
+
+type docWithGetterField struct {
+	Field *typeWithGetter "_"
+}
+
+func (s *S) TestMarshalAllItemsWithGetter(c *C) {
+	for i, item := range allItems {
+		if item.data == "" {
+			continue
+		}
+		obj := &docWithGetterField{}
+		obj.Field = &typeWithGetter{result: item.obj.(bson.M)["_"]}
+		data, err := bson.Marshal(obj)
+		c.Assert(err, IsNil)
+		c.Assert(string(data), Equals, wrapInDoc(item.data),
+			Commentf("Failed on item #%d", i))
+	}
+}
+
+func (s *S) TestMarshalWholeDocumentWithGetter(c *C) {
+	obj := &typeWithGetter{result: sampleItems[0].obj}
+	data, err := bson.Marshal(obj)
+	c.Assert(err, IsNil)
+	c.Assert(string(data), Equals, sampleItems[0].data)
+}
+
+func (s *S) TestGetterErrors(c *C) {
+	e := errors.New("oops")
+
+	obj1 := &docWithGetterField{}
+	obj1.Field = &typeWithGetter{sampleItems[0].obj, e}
+	data, err := bson.Marshal(obj1)
+	c.Assert(err, ErrorMatches, "oops")
+	c.Assert(data, IsNil)
+
+	obj2 := &typeWithGetter{sampleItems[0].obj, e}
+	data, err = bson.Marshal(obj2)
+	c.Assert(err, ErrorMatches, "oops")
+	c.Assert(data, IsNil)
+}
+
+type intGetter int64
+
+func (t intGetter) GetBSON() (interface{}, error) {
+	return int64(t), nil
+}
+
+type typeWithIntGetter struct {
+	V intGetter ",minsize"
+}
+
+func (s *S) TestMarshalShortWithGetter(c *C) {
+	obj := typeWithIntGetter{42}
+	data, err := bson.Marshal(obj)
+	c.Assert(err, IsNil)
+	m := bson.M{}
+	err = bson.Unmarshal(data, m)
+	c.Assert(err, IsNil)
+	c.Assert(m["v"], Equals, 42)
+}
+
+func (s *S) TestMarshalWithGetterNil(c *C) {
+	obj := docWithGetterField{}
+	data, err := bson.Marshal(obj)
+	c.Assert(err, IsNil)
+	m := bson.M{}
+	err = bson.Unmarshal(data, m)
+	c.Assert(err, IsNil)
+	c.Assert(m, DeepEquals, bson.M{"_": "<value is nil>"})
+}
+
+// --------------------------------------------------------------------------
+// Cross-type conversion tests.
+
+type crossTypeItem struct {
+	obj1 interface{}
+	obj2 interface{}
+}
+
+type condStr struct {
+	V string ",omitempty"
+}
+type condStrNS struct {
+	V string `a:"A" bson:",omitempty" b:"B"`
+}
+type condBool struct {
+	V bool ",omitempty"
+}
+type condInt struct {
+	V int ",omitempty"
+}
+type condUInt struct {
+	V uint ",omitempty"
+}
+type condFloat struct {
+	V float64 ",omitempty"
+}
+type condIface struct {
+	V interface{} ",omitempty"
+}
+type condPtr struct {
+	V *bool ",omitempty"
+}
+type condSlice struct {
+	V []string ",omitempty"
+}
+type condMap struct {
+	V map[string]int ",omitempty"
+}
+type namedCondStr struct {
+	V string "myv,omitempty"
+}
+type condTime struct {
+	V time.Time ",omitempty"
+}
+type condStruct struct {
+	V struct{ A []int } ",omitempty"
+}
+type condRaw struct {
+	V bson.Raw ",omitempty"
+}
+
+type shortInt struct {
+	V int64 ",minsize"
+}
+type shortUint struct {
+	V uint64 ",minsize"
+}
+type shortIface struct {
+	V interface{} ",minsize"
+}
+type shortPtr struct {
+	V *int64 ",minsize"
+}
+type shortNonEmptyInt struct {
+	V int64 ",minsize,omitempty"
+}
+
+type inlineInt struct {
+	V struct{ A, B int } ",inline"
+}
+type inlineCantPtr struct {
+	V *struct{ A, B int } ",inline"
+}
+type inlineDupName struct {
+	A int
+	V struct{ A, B int } ",inline"
+}
+type inlineMap struct {
+	A int
+	M map[string]interface{} ",inline"
+}
+type inlineMapInt struct {
+	A int
+	M map[string]int ",inline"
+}
+type inlineMapMyM struct {
+	A int
+	M MyM ",inline"
+}
+type inlineDupMap struct {
+	M1 map[string]interface{} ",inline"
+	M2 map[string]interface{} ",inline"
+}
+type inlineBadKeyMap struct {
+	M map[int]int ",inline"
+}
+type inlineUnexported struct {
+	M          map[string]interface{} ",inline"
+	unexported ",inline"
+}
+type unexported struct {
+	A int
+}
+
+type getterSetterD bson.D
+
+func (s getterSetterD) GetBSON() (interface{}, error) {
+	if len(s) == 0 {
+		return bson.D{}, nil
+	}
+	return bson.D(s[:len(s)-1]), nil
+}
+
+func (s *getterSetterD) SetBSON(raw bson.Raw) error {
+	var doc bson.D
+	err := raw.Unmarshal(&doc)
+	doc = append(doc, bson.DocElem{"suffix", true})
+	*s = getterSetterD(doc)
+	return err
+}
+
+type getterSetterInt int
+
+func (i getterSetterInt) GetBSON() (interface{}, error) {
+	return bson.D{{"a", int(i)}}, nil
+}
+
+func (i *getterSetterInt) SetBSON(raw bson.Raw) error {
+	var doc struct{ A int }
+	err := raw.Unmarshal(&doc)
+	*i = getterSetterInt(doc.A)
+	return err
+}
+
+type ifaceType interface {
+	Hello()
+}
+
+type ifaceSlice []ifaceType
+
+func (s *ifaceSlice) SetBSON(raw bson.Raw) error {
+	var ns []int
+	if err := raw.Unmarshal(&ns); err != nil {
+		return err
+	}
+	*s = make(ifaceSlice, ns[0])
+	return nil
+}
+
+func (s ifaceSlice) GetBSON() (interface{}, error) {
+	return []int{len(s)}, nil
+}
+
+type (
+	MyString string
+	MyBytes  []byte
+	MyBool   bool
+	MyD      []bson.DocElem
+	MyRawD   []bson.RawDocElem
+	MyM      map[string]interface{}
+)
+
+var (
+	truevar  = true
+	falsevar = false
+
+	int64var = int64(42)
+	int64ptr = &int64var
+	intvar   = int(42)
+	intptr   = &intvar
+
+	gsintvar = getterSetterInt(42)
+)
+
+func parseURL(s string) *url.URL {
+	u, err := url.Parse(s)
+	if err != nil {
+		panic(err)
+	}
+	return u
+}
+
+// That's a pretty fun test.  It will dump the first item, generate a zero
+// value equivalent to the second one, load the dumped data onto it, and then
+// verify that the resulting value is deep-equal to the untouched second value.
+// Then, it will do the same in the *opposite* direction!
+var twoWayCrossItems = []crossTypeItem{
+	// int<=>int
+	{&struct{ I int }{42}, &struct{ I int8 }{42}},
+	{&struct{ I int }{42}, &struct{ I int32 }{42}},
+	{&struct{ I int }{42}, &struct{ I int64 }{42}},
+	{&struct{ I int8 }{42}, &struct{ I int32 }{42}},
+	{&struct{ I int8 }{42}, &struct{ I int64 }{42}},
+	{&struct{ I int32 }{42}, &struct{ I int64 }{42}},
+
+	// uint<=>uint
+	{&struct{ I uint }{42}, &struct{ I uint8 }{42}},
+	{&struct{ I uint }{42}, &struct{ I uint32 }{42}},
+	{&struct{ I uint }{42}, &struct{ I uint64 }{42}},
+	{&struct{ I uint8 }{42}, &struct{ I uint32 }{42}},
+	{&struct{ I uint8 }{42}, &struct{ I uint64 }{42}},
+	{&struct{ I uint32 }{42}, &struct{ I uint64 }{42}},
+
+	// float32<=>float64
+	{&struct{ I float32 }{42}, &struct{ I float64 }{42}},
+
+	// int<=>uint
+	{&struct{ I uint }{42}, &struct{ I int }{42}},
+	{&struct{ I uint }{42}, &struct{ I int8 }{42}},
+	{&struct{ I uint }{42}, &struct{ I int32 }{42}},
+	{&struct{ I uint }{42}, &struct{ I int64 }{42}},
+	{&struct{ I uint8 }{42}, &struct{ I int }{42}},
+	{&struct{ I uint8 }{42}, &struct{ I int8 }{42}},
+	{&struct{ I uint8 }{42}, &struct{ I int32 }{42}},
+	{&struct{ I uint8 }{42}, &struct{ I int64 }{42}},
+	{&struct{ I uint32 }{42}, &struct{ I int }{42}},
+	{&struct{ I uint32 }{42}, &struct{ I int8 }{42}},
+	{&struct{ I uint32 }{42}, &struct{ I int32 }{42}},
+	{&struct{ I uint32 }{42}, &struct{ I int64 }{42}},
+	{&struct{ I uint64 }{42}, &struct{ I int }{42}},
+	{&struct{ I uint64 }{42}, &struct{ I int8 }{42}},
+	{&struct{ I uint64 }{42}, &struct{ I int32 }{42}},
+	{&struct{ I uint64 }{42}, &struct{ I int64 }{42}},
+
+	// int <=> float
+	{&struct{ I int }{42}, &struct{ I float64 }{42}},
+
+	// int <=> bool
+	{&struct{ I int }{1}, &struct{ I bool }{true}},
+	{&struct{ I int }{0}, &struct{ I bool }{false}},
+
+	// uint <=> float64
+	{&struct{ I uint }{42}, &struct{ I float64 }{42}},
+
+	// uint <=> bool
+	{&struct{ I uint }{1}, &struct{ I bool }{true}},
+	{&struct{ I uint }{0}, &struct{ I bool }{false}},
+
+	// float64 <=> bool
+	{&struct{ I float64 }{1}, &struct{ I bool }{true}},
+	{&struct{ I float64 }{0}, &struct{ I bool }{false}},
+
+	// string <=> string and string <=> []byte
+	{&struct{ S []byte }{[]byte("abc")}, &struct{ S string }{"abc"}},
+	{&struct{ S []byte }{[]byte("def")}, &struct{ S bson.Symbol }{"def"}},
+	{&struct{ S string }{"ghi"}, &struct{ S bson.Symbol }{"ghi"}},
+
+	// map <=> struct
+	{&struct {
+		A struct {
+			B, C int
+		}
+	}{struct{ B, C int }{1, 2}},
+		map[string]map[string]int{"a": map[string]int{"b": 1, "c": 2}}},
+
+	{&struct{ A bson.Symbol }{"abc"}, map[string]string{"a": "abc"}},
+	{&struct{ A bson.Symbol }{"abc"}, map[string][]byte{"a": []byte("abc")}},
+	{&struct{ A []byte }{[]byte("abc")}, map[string]string{"a": "abc"}},
+	{&struct{ A uint }{42}, map[string]int{"a": 42}},
+	{&struct{ A uint }{42}, map[string]float64{"a": 42}},
+	{&struct{ A uint }{1}, map[string]bool{"a": true}},
+	{&struct{ A int }{42}, map[string]uint{"a": 42}},
+	{&struct{ A int }{42}, map[string]float64{"a": 42}},
+	{&struct{ A int }{1}, map[string]bool{"a": true}},
+	{&struct{ A float64 }{42}, map[string]float32{"a": 42}},
+	{&struct{ A float64 }{42}, map[string]int{"a": 42}},
+	{&struct{ A float64 }{42}, map[string]uint{"a": 42}},
+	{&struct{ A float64 }{1}, map[string]bool{"a": true}},
+	{&struct{ A bool }{true}, map[string]int{"a": 1}},
+	{&struct{ A bool }{true}, map[string]uint{"a": 1}},
+	{&struct{ A bool }{true}, map[string]float64{"a": 1}},
+	{&struct{ A **byte }{&byteptr}, map[string]byte{"a": 8}},
+
+	// url.URL <=> string
+	{&struct{ URL *url.URL }{parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}},
+	{&struct{ URL url.URL }{*parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}},
+
+	// Slices
+	{&struct{ S []int }{[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}},
+	{&struct{ S *[]int }{&[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}},
+
+	// Conditionals
+	{&condBool{true}, map[string]bool{"v": true}},
+	{&condBool{}, map[string]bool{}},
+	{&condInt{1}, map[string]int{"v": 1}},
+	{&condInt{}, map[string]int{}},
+	{&condUInt{1}, map[string]uint{"v": 1}},
+	{&condUInt{}, map[string]uint{}},
+	{&condFloat{}, map[string]int{}},
+	{&condStr{"yo"}, map[string]string{"v": "yo"}},
+	{&condStr{}, map[string]string{}},
+	{&condStrNS{"yo"}, map[string]string{"v": "yo"}},
+	{&condStrNS{}, map[string]string{}},
+	{&condSlice{[]string{"yo"}}, map[string][]string{"v": []string{"yo"}}},
+	{&condSlice{}, map[string][]string{}},
+	{&condMap{map[string]int{"k": 1}}, bson.M{"v": bson.M{"k": 1}}},
+	{&condMap{}, map[string][]string{}},
+	{&condIface{"yo"}, map[string]string{"v": "yo"}},
+	{&condIface{""}, map[string]string{"v": ""}},
+	{&condIface{}, map[string]string{}},
+	{&condPtr{&truevar}, map[string]bool{"v": true}},
+	{&condPtr{&falsevar}, map[string]bool{"v": false}},
+	{&condPtr{}, map[string]string{}},
+
+	{&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}},
+	{&condTime{}, map[string]string{}},
+
+	{&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}},
+	{&condStruct{struct{ A []int }{}}, bson.M{}},
+
+	{&condRaw{bson.Raw{Kind: 0x0A, Data: []byte{}}}, bson.M{"v": nil}},
+	{&condRaw{bson.Raw{Kind: 0x00}}, bson.M{}},
+
+	{&namedCondStr{"yo"}, map[string]string{"myv": "yo"}},
+	{&namedCondStr{}, map[string]string{}},
+
+	{&shortInt{1}, map[string]interface{}{"v": 1}},
+	{&shortInt{1 << 30}, map[string]interface{}{"v": 1 << 30}},
+	{&shortInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}},
+	{&shortUint{1 << 30}, map[string]interface{}{"v": 1 << 30}},
+	{&shortUint{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}},
+	{&shortIface{int64(1) << 31}, map[string]interface{}{"v": int64(1 << 31)}},
+	{&shortPtr{int64ptr}, map[string]interface{}{"v": intvar}},
+
+	{&shortNonEmptyInt{1}, map[string]interface{}{"v": 1}},
+	{&shortNonEmptyInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}},
+	{&shortNonEmptyInt{}, map[string]interface{}{}},
+
+	{&inlineInt{struct{ A, B int }{1, 2}}, map[string]interface{}{"a": 1, "b": 2}},
+	{&inlineMap{A: 1, M: map[string]interface{}{"b": 2}}, map[string]interface{}{"a": 1, "b": 2}},
+	{&inlineMap{A: 1, M: nil}, map[string]interface{}{"a": 1}},
+	{&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}},
+	{&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}},
+	{&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}},
+	{&inlineUnexported{M: map[string]interface{}{"b": 1}, unexported: unexported{A: 2}}, map[string]interface{}{"b": 1, "a": 2}},
+
+	// []byte <=> Binary
+	{&struct{ B []byte }{[]byte("abc")}, map[string]bson.Binary{"b": bson.Binary{Data: []byte("abc")}}},
+
+	// []byte <=> MyBytes
+	{&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}},
+	{&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}},
+	{&struct{ B MyBytes }{}, map[string]bool{}},
+	{&struct{ B []byte }{[]byte("abc")}, map[string]MyBytes{"b": []byte("abc")}},
+
+	// bool <=> MyBool
+	{&struct{ B MyBool }{true}, map[string]bool{"b": true}},
+	{&struct{ B MyBool }{}, map[string]bool{"b": false}},
+	{&struct{ B MyBool }{}, map[string]string{}},
+	{&struct{ B bool }{}, map[string]MyBool{"b": false}},
+
+	// arrays
+	{&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}},
+	{&struct{ V [2]byte }{[...]byte{1, 2}}, map[string][2]byte{"v": [2]byte{1, 2}}},
+
+	// zero time
+	{&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}},
+
+	// zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds
+	{&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()},
+		map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}},
+
+	// bson.D <=> []DocElem
+	{&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}},
+	{&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}},
+	{&struct{ V MyD }{MyD{{"a", 1}}}, &bson.D{{"v", bson.D{{"a", 1}}}}},
+
+	// bson.RawD <=> []RawDocElem
+	{&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}},
+	{&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}},
+
+	// bson.M <=> map
+	{bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}},
+	{bson.M{"a": bson.M{"b": 1, "c": 2}}, map[string]interface{}{"a": map[string]interface{}{"b": 1, "c": 2}}},
+
+	// bson.M <=> map[MyString]
+	{bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}},
+
+	// json.Number <=> int64, float64
+	{&struct{ N json.Number }{"5"}, map[string]interface{}{"n": int64(5)}},
+	{&struct{ N json.Number }{"5.05"}, map[string]interface{}{"n": 5.05}},
+	{&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}},
+
+	// bson.D <=> non-struct getter/setter
+	{&bson.D{{"a", 1}}, &getterSetterD{{"a", 1}, {"suffix", true}}},
+	{&bson.D{{"a", 42}}, &gsintvar},
+
+	// Interface slice setter.
+	{&struct{ V ifaceSlice }{ifaceSlice{nil, nil, nil}}, bson.M{"v": []interface{}{3}}},
+}
+
+// Same thing, but only one way (obj1 => obj2).
+var oneWayCrossItems = []crossTypeItem{
+	// map <=> struct
+	{map[string]interface{}{"a": 1, "b": "2", "c": 3}, map[string]int{"a": 1, "c": 3}},
+
+	// inline map elides badly typed values
+	{map[string]interface{}{"a": 1, "b": "2", "c": 3}, &inlineMapInt{A: 1, M: map[string]int{"c": 3}}},
+
+	// Can't decode int into struct.
+	{bson.M{"a": bson.M{"b": 2}}, &struct{ A bool }{}},
+
+	// Would get decoded into a int32 too in the opposite direction.
+	{&shortIface{int64(1) << 30}, map[string]interface{}{"v": 1 << 30}},
+
+	// Ensure omitempty on struct with private fields works properly.
+	{&struct {
+		V struct{ v time.Time } ",omitempty"
+	}{}, map[string]interface{}{}},
+
+	// Attempt to marshal slice into RawD (issue #120).
+	{bson.M{"x": []int{1, 2, 3}}, &struct{ X bson.RawD }{}},
+}
+
+func testCrossPair(c *C, dump interface{}, load interface{}) {
+	c.Logf("Dump: %#v", dump)
+	c.Logf("Load: %#v", load)
+	zero := makeZeroDoc(load)
+	data, err := bson.Marshal(dump)
+	c.Assert(err, IsNil)
+	c.Logf("Dumped: %#v", string(data))
+	err = bson.Unmarshal(data, zero)
+	c.Assert(err, IsNil)
+	c.Logf("Loaded: %#v", zero)
+	c.Assert(zero, DeepEquals, load)
+}
+
+func (s *S) TestTwoWayCrossPairs(c *C) {
+	for _, item := range twoWayCrossItems {
+		testCrossPair(c, item.obj1, item.obj2)
+		testCrossPair(c, item.obj2, item.obj1)
+	}
+}
+
+func (s *S) TestOneWayCrossPairs(c *C) {
+	for _, item := range oneWayCrossItems {
+		testCrossPair(c, item.obj1, item.obj2)
+	}
+}
+
+// --------------------------------------------------------------------------
+// ObjectId hex representation test.
+
+func (s *S) TestObjectIdHex(c *C) {
+	id := bson.ObjectIdHex("4d88e15b60f486e428412dc9")
+	c.Assert(id.String(), Equals, `ObjectIdHex("4d88e15b60f486e428412dc9")`)
+	c.Assert(id.Hex(), Equals, "4d88e15b60f486e428412dc9")
+}
+
+func (s *S) TestIsObjectIdHex(c *C) {
+	test := []struct {
+		id    string
+		valid bool
+	}{
+		{"4d88e15b60f486e428412dc9", true},
+		{"4d88e15b60f486e428412dc", false},
+		{"4d88e15b60f486e428412dc9e", false},
+		{"4d88e15b60f486e428412dcx", false},
+	}
+	for _, t := range test {
+		c.Assert(bson.IsObjectIdHex(t.id), Equals, t.valid)
+	}
+}
+
+// --------------------------------------------------------------------------
+// ObjectId parts extraction tests.
+
+type objectIdParts struct {
+	id        bson.ObjectId
+	timestamp int64
+	machine   []byte
+	pid       uint16
+	counter   int32
+}
+
+var objectIds = []objectIdParts{
+	objectIdParts{
+		bson.ObjectIdHex("4d88e15b60f486e428412dc9"),
+		1300816219,
+		[]byte{0x60, 0xf4, 0x86},
+		0xe428,
+		4271561,
+	},
+	objectIdParts{
+		bson.ObjectIdHex("000000000000000000000000"),
+		0,
+		[]byte{0x00, 0x00, 0x00},
+		0x0000,
+		0,
+	},
+	objectIdParts{
+		bson.ObjectIdHex("00000000aabbccddee000001"),
+		0,
+		[]byte{0xaa, 0xbb, 0xcc},
+		0xddee,
+		1,
+	},
+}
+
+func (s *S) TestObjectIdPartsExtraction(c *C) {
+	for i, v := range objectIds {
+		t := time.Unix(v.timestamp, 0)
+		c.Assert(v.id.Time(), Equals, t, Commentf("#%d Wrong timestamp value", i))
+		c.Assert(v.id.Machine(), DeepEquals, v.machine, Commentf("#%d Wrong machine id value", i))
+		c.Assert(v.id.Pid(), Equals, v.pid, Commentf("#%d Wrong pid value", i))
+		c.Assert(v.id.Counter(), Equals, v.counter, Commentf("#%d Wrong counter value", i))
+	}
+}
+
+func (s *S) TestNow(c *C) {
+	before := time.Now()
+	time.Sleep(1e6)
+	now := bson.Now()
+	time.Sleep(1e6)
+	after := time.Now()
+	c.Assert(now.After(before) && now.Before(after), Equals, true, Commentf("now=%s, before=%s, after=%s", now, before, after))
+}
+
+// --------------------------------------------------------------------------
+// ObjectId generation tests.
+
+func (s *S) TestNewObjectId(c *C) {
+	// Generate 10 ids
+	ids := make([]bson.ObjectId, 10)
+	for i := 0; i < 10; i++ {
+		ids[i] = bson.NewObjectId()
+	}
+	for i := 1; i < 10; i++ {
+		prevId := ids[i-1]
+		id := ids[i]
+		// Test for uniqueness among all other 9 generated ids
+		for j, tid := range ids {
+			if j != i {
+				c.Assert(id, Not(Equals), tid, Commentf("Generated ObjectId is not unique"))
+			}
+		}
+		// Check that timestamp was incremented and is within 30 seconds of the previous one
+		secs := id.Time().Sub(prevId.Time()).Seconds()
+		c.Assert((secs >= 0 && secs <= 30), Equals, true, Commentf("Wrong timestamp in generated ObjectId"))
+		// Check that machine ids are the same
+		c.Assert(id.Machine(), DeepEquals, prevId.Machine())
+		// Check that pids are the same
+		c.Assert(id.Pid(), Equals, prevId.Pid())
+		// Test for proper increment
+		delta := int(id.Counter() - prevId.Counter())
+		c.Assert(delta, Equals, 1, Commentf("Wrong increment in generated ObjectId"))
+	}
+}
+
+func (s *S) TestNewObjectIdWithTime(c *C) {
+	t := time.Unix(12345678, 0)
+	id := bson.NewObjectIdWithTime(t)
+	c.Assert(id.Time(), Equals, t)
+	c.Assert(id.Machine(), DeepEquals, []byte{0x00, 0x00, 0x00})
+	c.Assert(int(id.Pid()), Equals, 0)
+	c.Assert(int(id.Counter()), Equals, 0)
+}
+
+// --------------------------------------------------------------------------
+// ObjectId JSON marshalling.
+
+type jsonType struct {
+	Id bson.ObjectId
+}
+
+var jsonIdTests = []struct {
+	value     jsonType
+	json      string
+	marshal   bool
+	unmarshal bool
+	error     string
+}{{
+	value:     jsonType{Id: bson.ObjectIdHex("4d88e15b60f486e428412dc9")},
+	json:      `{"Id":"4d88e15b60f486e428412dc9"}`,
+	marshal:   true,
+	unmarshal: true,
+}, {
+	value:     jsonType{},
+	json:      `{"Id":""}`,
+	marshal:   true,
+	unmarshal: true,
+}, {
+	value:     jsonType{},
+	json:      `{"Id":null}`,
+	marshal:   false,
+	unmarshal: true,
+}, {
+	json:      `{"Id":"4d88e15b60f486e428412dc9A"}`,
+	error:     `invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`,
+	marshal:   false,
+	unmarshal: true,
+}, {
+	json:      `{"Id":"4d88e15b60f486e428412dcZ"}`,
+	error:     `invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`,
+	marshal:   false,
+	unmarshal: true,
+}}
+
+func (s *S) TestObjectIdJSONMarshaling(c *C) {
+	for _, test := range jsonIdTests {
+		if test.marshal {
+			data, err := json.Marshal(&test.value)
+			if test.error == "" {
+				c.Assert(err, IsNil)
+				c.Assert(string(data), Equals, test.json)
+			} else {
+				c.Assert(err, ErrorMatches, test.error)
+			}
+		}
+
+		if test.unmarshal {
+			var value jsonType
+			err := json.Unmarshal([]byte(test.json), &value)
+			if test.error == "" {
+				c.Assert(err, IsNil)
+				c.Assert(value, DeepEquals, test.value)
+			} else {
+				c.Assert(err, ErrorMatches, test.error)
+			}
+		}
+	}
+}
+
+// --------------------------------------------------------------------------
+// Spec tests
+
+type specTest struct {
+	Description string
+	Documents   []struct {
+		Decoded    map[string]interface{}
+		Encoded    string
+		DecodeOnly bool `yaml:"decodeOnly"`
+		Error      interface{}
+	}
+}
+
+func (s *S) TestSpecTests(c *C) {
+	for _, data := range specTests {
+		var test specTest
+		err := yaml.Unmarshal([]byte(data), &test)
+		c.Assert(err, IsNil)
+
+		c.Logf("Running spec test set %q", test.Description)
+
+		for _, doc := range test.Documents {
+			if doc.Error != nil {
+				continue
+			}
+			c.Logf("Ensuring %q decodes as %v", doc.Encoded, doc.Decoded)
+			var decoded map[string]interface{}
+			encoded, err := hex.DecodeString(doc.Encoded)
+			c.Assert(err, IsNil)
+			err = bson.Unmarshal(encoded, &decoded)
+			c.Assert(err, IsNil)
+			c.Assert(decoded, DeepEquals, doc.Decoded)
+		}
+
+		for _, doc := range test.Documents {
+			if doc.DecodeOnly || doc.Error != nil {
+				continue
+			}
+			c.Logf("Ensuring %v encodes as %q", doc.Decoded, doc.Encoded)
+			encoded, err := bson.Marshal(doc.Decoded)
+			c.Assert(err, IsNil)
+			c.Assert(strings.ToUpper(hex.EncodeToString(encoded)), Equals, doc.Encoded)
+		}
+
+		for _, doc := range test.Documents {
+			if doc.Error == nil {
+				continue
+			}
+			c.Logf("Ensuring %q errors when decoded: %s", doc.Encoded, doc.Error)
+			var decoded map[string]interface{}
+			encoded, err := hex.DecodeString(doc.Encoded)
+			c.Assert(err, IsNil)
+			err = bson.Unmarshal(encoded, &decoded)
+			c.Assert(err, NotNil)
+			c.Logf("Failed with: %v", err)
+		}
+	}
+}
+
+// --------------------------------------------------------------------------
+// ObjectId Text encoding.TextUnmarshaler.
+
+var textIdTests = []struct {
+	value     bson.ObjectId
+	text      string
+	marshal   bool
+	unmarshal bool
+	error     string
+}{{
+	value:     bson.ObjectIdHex("4d88e15b60f486e428412dc9"),
+	text:      "4d88e15b60f486e428412dc9",
+	marshal:   true,
+	unmarshal: true,
+}, {
+	text:      "",
+	marshal:   true,
+	unmarshal: true,
+}, {
+	text:      "4d88e15b60f486e428412dc9A",
+	marshal:   false,
+	unmarshal: true,
+	error:     `invalid ObjectId: 4d88e15b60f486e428412dc9A`,
+}, {
+	text:      "4d88e15b60f486e428412dcZ",
+	marshal:   false,
+	unmarshal: true,
+	error:     `invalid ObjectId: 4d88e15b60f486e428412dcZ .*`,
+}}
+
+func (s *S) TestObjectIdTextMarshaling(c *C) {
+	for _, test := range textIdTests {
+		if test.marshal {
+			data, err := test.value.MarshalText()
+			if test.error == "" {
+				c.Assert(err, IsNil)
+				c.Assert(string(data), Equals, test.text)
+			} else {
+				c.Assert(err, ErrorMatches, test.error)
+			}
+		}
+
+		if test.unmarshal {
+			err := test.value.UnmarshalText([]byte(test.text))
+			if test.error == "" {
+				c.Assert(err, IsNil)
+				if test.value != "" {
+					value := bson.ObjectIdHex(test.text)
+					c.Assert(value, DeepEquals, test.value)
+				}
+			} else {
+				c.Assert(err, ErrorMatches, test.error)
+			}
+		}
+	}
+}
+
+// --------------------------------------------------------------------------
+// ObjectId XML marshalling.
+
+type xmlType struct {
+	Id bson.ObjectId
+}
+
+var xmlIdTests = []struct {
+	value     xmlType
+	xml       string
+	marshal   bool
+	unmarshal bool
+	error     string
+}{{
+	value:     xmlType{Id: bson.ObjectIdHex("4d88e15b60f486e428412dc9")},
+	xml:       "<xmlType><Id>4d88e15b60f486e428412dc9</Id></xmlType>",
+	marshal:   true,
+	unmarshal: true,
+}, {
+	value:     xmlType{},
+	xml:       "<xmlType><Id></Id></xmlType>",
+	marshal:   true,
+	unmarshal: true,
+}, {
+	xml:       "<xmlType><Id>4d88e15b60f486e428412dc9A</Id></xmlType>",
+	marshal:   false,
+	unmarshal: true,
+	error:     `invalid ObjectId: 4d88e15b60f486e428412dc9A`,
+}, {
+	xml:       "<xmlType><Id>4d88e15b60f486e428412dcZ</Id></xmlType>",
+	marshal:   false,
+	unmarshal: true,
+	error:     `invalid ObjectId: 4d88e15b60f486e428412dcZ .*`,
+}}
+
+func (s *S) TestObjectIdXMLMarshaling(c *C) {
+	for _, test := range xmlIdTests {
+		if test.marshal {
+			data, err := xml.Marshal(&test.value)
+			if test.error == "" {
+				c.Assert(err, IsNil)
+				c.Assert(string(data), Equals, test.xml)
+			} else {
+				c.Assert(err, ErrorMatches, test.error)
+			}
+		}
+
+		if test.unmarshal {
+			var value xmlType
+			err := xml.Unmarshal([]byte(test.xml), &value)
+			if test.error == "" {
+				c.Assert(err, IsNil)
+				c.Assert(value, DeepEquals, test.value)
+			} else {
+				c.Assert(err, ErrorMatches, test.error)
+			}
+		}
+	}
+}
+
+// --------------------------------------------------------------------------
+// Some simple benchmarks.
+
+type BenchT struct {
+	A, B, C, D, E, F string
+}
+
+type BenchRawT struct {
+	A string
+	B int
+	C bson.M
+	D []float64
+}
+
+func (s *S) BenchmarkUnmarhsalStruct(c *C) {
+	v := BenchT{A: "A", D: "D", E: "E"}
+	data, err := bson.Marshal(&v)
+	if err != nil {
+		panic(err)
+	}
+	c.ResetTimer()
+	for i := 0; i < c.N; i++ {
+		err = bson.Unmarshal(data, &v)
+	}
+	if err != nil {
+		panic(err)
+	}
+}
+
+func (s *S) BenchmarkUnmarhsalMap(c *C) {
+	m := bson.M{"a": "a", "d": "d", "e": "e"}
+	data, err := bson.Marshal(&m)
+	if err != nil {
+		panic(err)
+	}
+	c.ResetTimer()
+	for i := 0; i < c.N; i++ {
+		err = bson.Unmarshal(data, &m)
+	}
+	if err != nil {
+		panic(err)
+	}
+}
+
+func (s *S) BenchmarkUnmarshalRaw(c *C) {
+	var err error
+	m := BenchRawT{
+		A: "test_string",
+		B: 123,
+		C: bson.M{
+			"subdoc_int": 12312,
+			"subdoc_doc": bson.M{"1": 1},
+		},
+		D: []float64{0.0, 1.3333, -99.9997, 3.1415},
+	}
+	data, err := bson.Marshal(&m)
+	if err != nil {
+		panic(err)
+	}
+	raw := bson.Raw{}
+	c.ResetTimer()
+	for i := 0; i < c.N; i++ {
+		err = bson.Unmarshal(data, &raw)
+	}
+	if err != nil {
+		panic(err)
+	}
+}
+
+func (s *S) BenchmarkNewObjectId(c *C) {
+	for i := 0; i < c.N; i++ {
+		bson.NewObjectId()
+	}
+}

+ 310 - 0
src/gopkg.in/mgo.v2/bson/decimal.go

@@ -0,0 +1,310 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package bson
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Decimal128 holds decimal128 BSON values.
+type Decimal128 struct {
+	h, l uint64
+}
+
+func (d Decimal128) String() string {
+	var pos int     // positive sign
+	var e int       // exponent
+	var h, l uint64 // significand high/low
+
+	if d.h>>63&1 == 0 {
+		pos = 1
+	}
+
+	switch d.h >> 58 & (1<<5 - 1) {
+	case 0x1F:
+		return "NaN"
+	case 0x1E:
+		return "-Inf"[pos:]
+	}
+
+	l = d.l
+	if d.h>>61&3 == 3 {
+		// Bits: 1*sign 2*ignored 14*exponent 111*significand.
+		// Implicit 0b100 prefix in significand.
+		e = int(d.h>>47&(1<<14-1)) - 6176
+		//h = 4<<47 | d.h&(1<<47-1)
+		// Spec says all of these values are out of range.
+		h, l = 0, 0
+	} else {
+		// Bits: 1*sign 14*exponent 113*significand
+		e = int(d.h>>49&(1<<14-1)) - 6176
+		h = d.h & (1<<49 - 1)
+	}
+
+	// Would be handled by the logic below, but that's trivial and common.
+	if h == 0 && l == 0 && e == 0 {
+		return "-0"[pos:]
+	}
+
+	var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
+	var last = len(repr)
+	var i = len(repr)
+	var dot = len(repr) + e
+	var rem uint32
+Loop:
+	for d9 := 0; d9 < 5; d9++ {
+		h, l, rem = divmod(h, l, 1e9)
+		for d1 := 0; d1 < 9; d1++ {
+			// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
+			if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
+				e += len(repr) - i
+				i--
+				repr[i] = '.'
+				last = i - 1
+				dot = len(repr) // Unmark.
+			}
+			c := '0' + byte(rem%10)
+			rem /= 10
+			i--
+			repr[i] = c
+			// Handle "0E+3", "1E+3", etc.
+			if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
+				last = i
+				break Loop
+			}
+			if c != '0' {
+				last = i
+			}
+			// Break early. Works without it, but why.
+			if dot > i && l == 0 && h == 0 && rem == 0 {
+				break Loop
+			}
+		}
+	}
+	repr[last-1] = '-'
+	last--
+
+	if e > 0 {
+		return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
+	}
+	if e < 0 {
+		return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
+	}
+	return string(repr[last+pos:])
+}
+
+func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
+	div64 := uint64(div)
+	a := h >> 32
+	aq := a / div64
+	ar := a % div64
+	b := ar<<32 + h&(1<<32-1)
+	bq := b / div64
+	br := b % div64
+	c := br<<32 + l>>32
+	cq := c / div64
+	cr := c % div64
+	d := cr<<32 + l&(1<<32-1)
+	dq := d / div64
+	dr := d % div64
+	return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
+}
+
+var dNaN = Decimal128{0x1F << 58, 0}
+var dPosInf = Decimal128{0x1E << 58, 0}
+var dNegInf = Decimal128{0x3E << 58, 0}
+
+func dErr(s string) (Decimal128, error) {
+	return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
+}
+
+func ParseDecimal128(s string) (Decimal128, error) {
+	orig := s
+	if s == "" {
+		return dErr(orig)
+	}
+	neg := s[0] == '-'
+	if neg || s[0] == '+' {
+		s = s[1:]
+	}
+
+	if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
+		if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
+			return dNaN, nil
+		}
+		if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+			if neg {
+				return dNegInf, nil
+			}
+			return dPosInf, nil
+		}
+		return dErr(orig)
+	}
+
+	var h, l uint64
+	var e int
+
+	var add, ovr uint32
+	var mul uint32 = 1
+	var dot = -1
+	var digits = 0
+	var i = 0
+	for i < len(s) {
+		c := s[i]
+		if mul == 1e9 {
+			h, l, ovr = muladd(h, l, mul, add)
+			mul, add = 1, 0
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if c >= '0' && c <= '9' {
+			i++
+			if c > '0' || digits > 0 {
+				digits++
+			}
+			if digits > 34 {
+				if c == '0' {
+					// Exact rounding.
+					e++
+					continue
+				}
+				return dErr(orig)
+			}
+			mul *= 10
+			add *= 10
+			add += uint32(c - '0')
+			continue
+		}
+		if c == '.' {
+			i++
+			if dot >= 0 || i == 1 && len(s) == 1 {
+				return dErr(orig)
+			}
+			if i == len(s) {
+				break
+			}
+			if s[i] < '0' || s[i] > '9' || e > 0 {
+				return dErr(orig)
+			}
+			dot = i
+			continue
+		}
+		break
+	}
+	if i == 0 {
+		return dErr(orig)
+	}
+	if mul > 1 {
+		h, l, ovr = muladd(h, l, mul, add)
+		if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+			return dErr(orig)
+		}
+	}
+	if dot >= 0 {
+		e += dot - i
+	}
+	if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
+		i++
+		eneg := s[i] == '-'
+		if eneg || s[i] == '+' {
+			i++
+			if i == len(s) {
+				return dErr(orig)
+			}
+		}
+		n := 0
+		for i < len(s) && n < 1e4 {
+			c := s[i]
+			i++
+			if c < '0' || c > '9' {
+				return dErr(orig)
+			}
+			n *= 10
+			n += int(c - '0')
+		}
+		if eneg {
+			n = -n
+		}
+		e += n
+		for e < -6176 {
+			// Subnormal.
+			var div uint32 = 1
+			for div < 1e9 && e < -6176 {
+				div *= 10
+				e++
+			}
+			var rem uint32
+			h, l, rem = divmod(h, l, div)
+			if rem > 0 {
+				return dErr(orig)
+			}
+		}
+		for e > 6111 {
+			// Clamped.
+			var mul uint32 = 1
+			for mul < 1e9 && e > 6111 {
+				mul *= 10
+				e--
+			}
+			h, l, ovr = muladd(h, l, mul, 0)
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if e < -6176 || e > 6111 {
+			return dErr(orig)
+		}
+	}
+
+	if i < len(s) {
+		return dErr(orig)
+	}
+
+	h |= uint64(e+6176) & uint64(1<<14-1) << 49
+	if neg {
+		h |= 1 << 63
+	}
+	return Decimal128{h, l}, nil
+}
+
+func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
+	mul64 := uint64(mul)
+	a := mul64 * (l & (1<<32 - 1))
+	b := a>>32 + mul64*(l>>32)
+	c := b>>32 + mul64*(h&(1<<32-1))
+	d := c>>32 + mul64*(h>>32)
+
+	a = a&(1<<32-1) + uint64(add)
+	b = b&(1<<32-1) + a>>32
+	c = c&(1<<32-1) + b>>32
+	d = d&(1<<32-1) + c>>32
+
+	return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
+}

+ 4109 - 0
src/gopkg.in/mgo.v2/bson/decimal_test.go

@@ -0,0 +1,4109 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package bson_test
+
+import (
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"strings"
+
+	"gopkg.in/mgo.v2/bson"
+
+	. "gopkg.in/check.v1"
+)
+
+// --------------------------------------------------------------------------
+// Decimal tests
+
+type decimalTests struct {
+	Valid []struct {
+		Description      string `json:"description"`
+		BSON             string `json:"bson"`
+		CanonicalBSON    string `json:"canonical_bson"`
+		ExtJSON          string `json:"extjson"`
+		CanonicalExtJSON string `json:"canonical_extjson"`
+		Lossy            bool   `json:"lossy"`
+	} `json:"valid"`
+
+	ParseErrors []struct {
+		Description string `json:"description"`
+		String      string `json:"string"`
+	} `json:"parseErrors"`
+}
+
+func extJSONRepr(s string) string {
+	var value struct {
+		D struct {
+			Repr string `json:"$numberDecimal"`
+		} `json:"d"`
+	}
+	err := json.Unmarshal([]byte(s), &value)
+	if err != nil {
+		panic(err)
+	}
+	return value.D.Repr
+}
+
+func (s *S) TestDecimalTests(c *C) {
+	// These also conform to the spec and are used by Go elsewhere.
+	// (e.g. math/big won't parse "Infinity").
+	goStr := func(s string) string {
+		switch s {
+		case "Infinity":
+			return "Inf"
+		case "-Infinity":
+			return "-Inf"
+		}
+		return s
+	}
+
+	for _, testEntry := range decimalTestsJSON {
+		testFile := testEntry.file
+
+		var tests decimalTests
+		err := json.Unmarshal([]byte(testEntry.json), &tests)
+		c.Assert(err, IsNil)
+
+		for _, test := range tests.Valid {
+			c.Logf("Running %s test: %s", testFile, test.Description)
+
+			test.BSON = strings.ToLower(test.BSON)
+
+			// Unmarshal value from BSON data.
+			bsonData, err := hex.DecodeString(test.BSON)
+			var bsonValue struct{ D interface{} }
+			err = bson.Unmarshal(bsonData, &bsonValue)
+			c.Assert(err, IsNil)
+			dec128, ok := bsonValue.D.(bson.Decimal128)
+			c.Assert(ok, Equals, true)
+
+			// Extract ExtJSON representations (canonical and not).
+			extjRepr := extJSONRepr(test.ExtJSON)
+			cextjRepr := extjRepr
+			if test.CanonicalExtJSON != "" {
+				cextjRepr = extJSONRepr(test.CanonicalExtJSON)
+			}
+
+			wantRepr := goStr(cextjRepr)
+
+			// Generate canonical representation.
+			c.Assert(dec128.String(), Equals, wantRepr)
+
+			// Parse original canonical representation.
+			parsed, err := bson.ParseDecimal128(cextjRepr)
+			c.Assert(err, IsNil)
+			c.Assert(parsed.String(), Equals, wantRepr)
+
+			// Parse non-canonical representation.
+			parsed, err = bson.ParseDecimal128(extjRepr)
+			c.Assert(err, IsNil)
+			c.Assert(parsed.String(), Equals, wantRepr)
+
+			// Parse Go canonical representation (Inf vs. Infinity).
+			parsed, err = bson.ParseDecimal128(wantRepr)
+			c.Assert(err, IsNil)
+			c.Assert(parsed.String(), Equals, wantRepr)
+
+			// Marshal original value back into BSON data.
+			data, err := bson.Marshal(bsonValue)
+			c.Assert(err, IsNil)
+			c.Assert(hex.EncodeToString(data), Equals, test.BSON)
+
+			if test.Lossy {
+				continue
+			}
+
+			// Marshal the parsed canonical representation.
+			var parsedValue struct{ D interface{} }
+			parsedValue.D = parsed
+			data, err = bson.Marshal(parsedValue)
+			c.Assert(err, IsNil)
+			c.Assert(hex.EncodeToString(data), Equals, test.BSON)
+		}
+
+		for _, test := range tests.ParseErrors {
+			c.Logf("Running %s parse error test: %s (string %q)", testFile, test.Description, test.String)
+
+			_, err := bson.ParseDecimal128(test.String)
+			quoted := regexp.QuoteMeta(fmt.Sprintf("%q", test.String))
+			c.Assert(err, ErrorMatches, `cannot parse `+quoted+` as a decimal128`)
+		}
+	}
+}
+
+const decBenchNum = "9.999999999999999999999999999999999E+6144"
+
+func (s *S) BenchmarkDecimal128String(c *C) {
+	d, err := bson.ParseDecimal128(decBenchNum)
+	c.Assert(err, IsNil)
+	c.Assert(d.String(), Equals, decBenchNum)
+
+	c.ResetTimer()
+	for i := 0; i < c.N; i++ {
+		d.String()
+	}
+}
+
+func (s *S) BenchmarkDecimal128Parse(c *C) {
+	var err error
+	c.ResetTimer()
+	for i := 0; i < c.N; i++ {
+		_, err = bson.ParseDecimal128(decBenchNum)
+	}
+	if err != nil {
+		panic(err)
+	}
+}
+
+var decimalTestsJSON = []struct{ file, json string }{
+	{"decimal128-1.json", `
+{
+    "description": "Decimal128",
+    "bson_type": "0x13",
+    "test_key": "d",
+    "valid": [
+        {
+            "description": "Special - Canonical NaN",
+            "bson": "180000001364000000000000000000000000000000007C00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}"
+        },
+        {
+            "description": "Special - Negative NaN",
+            "bson": "18000000136400000000000000000000000000000000FC00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}",
+            "lossy": true
+        },
+        {
+            "description": "Special - Negative NaN",
+            "bson": "18000000136400000000000000000000000000000000FC00",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-NaN\"}}",
+            "lossy": true
+        },
+        {
+            "description": "Special - Canonical SNaN",
+            "bson": "180000001364000000000000000000000000000000007E00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}",
+            "lossy": true
+        },
+        {
+            "description": "Special - Negative SNaN",
+            "bson": "18000000136400000000000000000000000000000000FE00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}",
+            "lossy": true
+        },
+        {
+            "description": "Special - NaN with a payload",
+            "bson": "180000001364001200000000000000000000000000007E00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}",
+            "lossy": true
+        },
+        {
+            "description": "Special - Canonical Positive Infinity",
+            "bson": "180000001364000000000000000000000000000000007800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+        },
+        {
+            "description": "Special - Canonical Negative Infinity",
+            "bson": "18000000136400000000000000000000000000000000F800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+        },
+        {
+            "description": "Special - Invalid representation treated as 0",
+            "bson": "180000001364000000000000000000000000000000106C00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}",
+            "lossy": true
+        },
+        {
+            "description": "Special - Invalid representation treated as -0",
+            "bson": "18000000136400DCBA9876543210DEADBEEF00000010EC00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}",
+            "lossy": true
+        },
+        {
+            "description": "Special - Invalid representation treated as 0E3",
+            "bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF116C00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}",
+            "lossy": true
+        },
+        {
+            "description": "Regular - Adjusted Exponent Limit",
+            "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF22F00",
+            "extjson": "{\"d\": { \"$numberDecimal\": \"0.000001234567890123456789012345678901234\" }}"
+        },
+        {
+            "description": "Regular - Smallest",
+            "bson": "18000000136400D204000000000000000000000000343000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001234\"}}"
+        },
+        {
+            "description": "Regular - Smallest with Trailing Zeros",
+            "bson": "1800000013640040EF5A07000000000000000000002A3000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00123400000\"}}"
+        },
+        {
+            "description": "Regular - 0.1",
+            "bson": "1800000013640001000000000000000000000000003E3000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1\"}}"
+        },
+        {
+            "description": "Regular - 0.1234567890123456789012345678901234",
+            "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFC2F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1234567890123456789012345678901234\"}}"
+        },
+        {
+            "description": "Regular - 0",
+            "bson": "180000001364000000000000000000000000000000403000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+        },
+        {
+            "description": "Regular - -0",
+            "bson": "18000000136400000000000000000000000000000040B000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+        },
+        {
+            "description": "Regular - -0.0",
+            "bson": "1800000013640000000000000000000000000000003EB000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}"
+        },
+        {
+            "description": "Regular - 2",
+            "bson": "180000001364000200000000000000000000000000403000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"2\"}}"
+        },
+        {
+            "description": "Regular - 2.000",
+            "bson": "18000000136400D0070000000000000000000000003A3000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"2.000\"}}"
+        },
+        {
+            "description": "Regular - Largest",
+            "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}"
+        },
+        {
+            "description": "Scientific - Tiniest",
+            "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED010000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E-6143\"}}"
+        },
+        {
+            "description": "Scientific - Tiny",
+            "bson": "180000001364000100000000000000000000000000000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}"
+        },
+        {
+            "description": "Scientific - Negative Tiny",
+            "bson": "180000001364000100000000000000000000000000008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}"
+        },
+        {
+            "description": "Scientific - Adjusted Exponent Limit",
+            "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF02F00",
+            "extjson": "{\"d\": { \"$numberDecimal\": \"1.234567890123456789012345678901234E-7\" }}"
+        },
+        {
+            "description": "Scientific - Fractional",
+            "bson": "1800000013640064000000000000000000000000002CB000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}"
+        },
+        {
+            "description": "Scientific - 0 with Exponent",
+            "bson": "180000001364000000000000000000000000000000205F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6000\"}}"
+        },
+        {
+            "description": "Scientific - 0 with Negative Exponent",
+            "bson": "1800000013640000000000000000000000000000007A2B00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-611\"}}"
+        },
+        {
+            "description": "Scientific - No Decimal with Signed Exponent",
+            "bson": "180000001364000100000000000000000000000000463000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}"
+        },
+        {
+            "description": "Scientific - Trailing Zero",
+            "bson": "180000001364001A04000000000000000000000000423000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.050E+4\"}}"
+        },
+        {
+            "description": "Scientific - With Decimal",
+            "bson": "180000001364006900000000000000000000000000423000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.05E+3\"}}"
+        },
+        {
+            "description": "Scientific - Full",
+            "bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF403000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"5192296858534827628530496329220095\"}}"
+        },
+        {
+            "description": "Scientific - Large",
+            "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}"
+        },
+        {
+            "description": "Scientific - Largest",
+            "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - Exponent Normalization",
+            "bson": "1800000013640064000000000000000000000000002CB000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-100E-10\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - Unsigned Positive Exponent",
+            "bson": "180000001364000100000000000000000000000000463000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E3\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - Lowercase Exponent Identifier",
+            "bson": "180000001364000100000000000000000000000000463000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+3\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - Long Significand with Exponent",
+            "bson": "1800000013640079D9E0F9763ADA429D0200000000583000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345689012345789012345E+12\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.2345689012345789012345E+34\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - Positive Sign",
+            "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"+1234567890123456789012345678901234\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - Long Decimal String",
+            "bson": "180000001364000100000000000000000000000000722800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \".000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-999\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - nan",
+            "bson": "180000001364000000000000000000000000000000007C00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"nan\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - nAn",
+            "bson": "180000001364000000000000000000000000000000007C00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"nAn\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - +infinity",
+            "bson": "180000001364000000000000000000000000000000007800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"+infinity\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - infinity",
+            "bson": "180000001364000000000000000000000000000000007800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"infinity\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - infiniTY",
+            "bson": "180000001364000000000000000000000000000000007800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"infiniTY\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - inf",
+            "bson": "180000001364000000000000000000000000000000007800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"inf\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - inF",
+            "bson": "180000001364000000000000000000000000000000007800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"inF\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - -infinity",
+            "bson": "18000000136400000000000000000000000000000000F800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infinity\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - -infiniTy",
+            "bson": "18000000136400000000000000000000000000000000F800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infiniTy\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - -Inf",
+            "bson": "18000000136400000000000000000000000000000000F800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - -inf",
+            "bson": "18000000136400000000000000000000000000000000F800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inf\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+        },
+        {
+            "description": "Non-Canonical Parsing - -inF",
+            "bson": "18000000136400000000000000000000000000000000F800",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inF\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+        },
+        {
+           "description": "Rounded Subnormal number",
+           "bson": "180000001364000100000000000000000000000000000000",
+           "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E-6177\"}}",
+           "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}"
+        },
+        {
+           "description": "Clamped",
+           "bson": "180000001364000a00000000000000000000000000fe5f00",
+           "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E6112\"}}",
+           "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}"
+        },
+        {
+           "description": "Exact rounding",
+           "bson": "18000000136400000000000a5bc138938d44c64d31cc3700",
+           "extjson": "{\"d\" : {\"$numberDecimal\" : \"1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"}}",
+           "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+999\"}}"
+        }
+    ]
+}
+`},
+
+	{"decimal128-2.json", `
+{
+    "description": "Decimal128",
+    "bson_type": "0x13",
+    "test_key": "d",
+    "valid": [
+       {
+          "description": "[decq021] Normality",
+          "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C40B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234567890123456789012345678901234\"}}"
+       },
+       {
+          "description": "[decq823] values around [u]int32 edges (zeros done earlier)",
+          "bson": "18000000136400010000800000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483649\"}}"
+       },
+       {
+          "description": "[decq822] values around [u]int32 edges (zeros done earlier)",
+          "bson": "18000000136400000000800000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483648\"}}"
+       },
+       {
+          "description": "[decq821] values around [u]int32 edges (zeros done earlier)",
+          "bson": "18000000136400FFFFFF7F0000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483647\"}}"
+       },
+       {
+          "description": "[decq820] values around [u]int32 edges (zeros done earlier)",
+          "bson": "18000000136400FEFFFF7F0000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483646\"}}"
+       },
+       {
+          "description": "[decq152] fold-downs (more below)",
+          "bson": "18000000136400393000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-12345\"}}"
+       },
+       {
+          "description": "[decq154] fold-downs (more below)",
+          "bson": "18000000136400D20400000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234\"}}"
+       },
+       {
+          "description": "[decq006] derivative canonical plain strings",
+          "bson": "18000000136400EE0200000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-750\"}}"
+       },
+       {
+          "description": "[decq164] fold-downs (more below)",
+          "bson": "1800000013640039300000000000000000000000003CB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-123.45\"}}"
+       },
+       {
+          "description": "[decq156] fold-downs (more below)",
+          "bson": "180000001364007B0000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-123\"}}"
+       },
+       {
+          "description": "[decq008] derivative canonical plain strings",
+          "bson": "18000000136400EE020000000000000000000000003EB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-75.0\"}}"
+       },
+       {
+          "description": "[decq158] fold-downs (more below)",
+          "bson": "180000001364000C0000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-12\"}}"
+       },
+       {
+          "description": "[decq122] Nmax and similar",
+          "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFFDF00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999999999999999999999999999999999E+6144\"}}"
+       },
+       {
+          "description": "[decq002] (mostly derived from the Strawman 4 document and examples)",
+          "bson": "18000000136400EE020000000000000000000000003CB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50\"}}"
+       },
+       {
+          "description": "[decq004] derivative canonical plain strings",
+          "bson": "18000000136400EE0200000000000000000000000042B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E+3\"}}"
+       },
+       {
+          "description": "[decq018] derivative canonical plain strings",
+          "bson": "18000000136400EE020000000000000000000000002EB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E-7\"}}"
+       },
+       {
+          "description": "[decq125] Nmax and similar",
+          "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFEDF00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.234567890123456789012345678901234E+6144\"}}"
+       },
+       {
+          "description": "[decq131] fold-downs (more below)",
+          "bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}"
+       },
+       {
+          "description": "[decq162] fold-downs (more below)",
+          "bson": "180000001364007B000000000000000000000000003CB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23\"}}"
+       },
+       {
+          "description": "[decq176] Nmin and below",
+          "bson": "18000000136400010000000A5BC138938D44C64D31008000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000001E-6143\"}}"
+       },
+       {
+          "description": "[decq174] Nmin and below",
+          "bson": "18000000136400000000000A5BC138938D44C64D31008000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E-6143\"}}"
+       },
+       {
+          "description": "[decq133] fold-downs (more below)",
+          "bson": "18000000136400000000000A5BC138938D44C64D31FEDF00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}"
+       },
+       {
+          "description": "[decq160] fold-downs (more below)",
+          "bson": "18000000136400010000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}"
+       },
+       {
+          "description": "[decq172] Nmin and below",
+          "bson": "180000001364000100000000000000000000000000428000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6143\"}}"
+       },
+       {
+          "description": "[decq010] derivative canonical plain strings",
+          "bson": "18000000136400EE020000000000000000000000003AB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.750\"}}"
+       },
+       {
+          "description": "[decq012] derivative canonical plain strings",
+          "bson": "18000000136400EE0200000000000000000000000038B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0750\"}}"
+       },
+       {
+          "description": "[decq014] derivative canonical plain strings",
+          "bson": "18000000136400EE0200000000000000000000000034B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000750\"}}"
+       },
+       {
+          "description": "[decq016] derivative canonical plain strings",
+          "bson": "18000000136400EE0200000000000000000000000030B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000750\"}}"
+       },
+       {
+          "description": "[decq404] zeros",
+          "bson": "180000001364000000000000000000000000000000000000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}"
+       },
+       {
+          "description": "[decq424] negative zeros",
+          "bson": "180000001364000000000000000000000000000000008000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}"
+       },
+       {
+          "description": "[decq407] zeros",
+          "bson": "1800000013640000000000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+       },
+       {
+          "description": "[decq427] negative zeros",
+          "bson": "1800000013640000000000000000000000000000003CB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}"
+       },
+       {
+          "description": "[decq409] zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[decq428] negative zeros",
+          "bson": "18000000136400000000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+       },
+       {
+          "description": "[decq700] Selected DPD codes",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[decq406] zeros",
+          "bson": "1800000013640000000000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+       },
+       {
+          "description": "[decq426] negative zeros",
+          "bson": "1800000013640000000000000000000000000000003CB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}"
+       },
+       {
+          "description": "[decq410] zeros",
+          "bson": "180000001364000000000000000000000000000000463000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}"
+       },
+       {
+          "description": "[decq431] negative zeros",
+          "bson": "18000000136400000000000000000000000000000046B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+3\"}}"
+       },
+       {
+          "description": "[decq419] clamped zeros...",
+          "bson": "180000001364000000000000000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}"
+       },
+       {
+          "description": "[decq432] negative zeros",
+          "bson": "180000001364000000000000000000000000000000FEDF00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}"
+       },
+       {
+          "description": "[decq405] zeros",
+          "bson": "180000001364000000000000000000000000000000000000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}"
+       },
+       {
+          "description": "[decq425] negative zeros",
+          "bson": "180000001364000000000000000000000000000000008000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}"
+       },
+       {
+          "description": "[decq508] Specials",
+          "bson": "180000001364000000000000000000000000000000007800",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+       },
+       {
+          "description": "[decq528] Specials",
+          "bson": "18000000136400000000000000000000000000000000F800",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+       },
+       {
+          "description": "[decq541] Specials",
+          "bson": "180000001364000000000000000000000000000000007C00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}"
+       },
+       {
+          "description": "[decq074] Nmin and below",
+          "bson": "18000000136400000000000A5BC138938D44C64D31000000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E-6143\"}}"
+       },
+       {
+          "description": "[decq602] fold-down full sequence",
+          "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}"
+       },
+       {
+          "description": "[decq604] fold-down full sequence",
+          "bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}"
+       },
+       {
+          "description": "[decq606] fold-down full sequence",
+          "bson": "1800000013640000000080264B91C02220BE377E00FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}"
+       },
+       {
+          "description": "[decq608] fold-down full sequence",
+          "bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}"
+       },
+       {
+          "description": "[decq610] fold-down full sequence",
+          "bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}"
+       },
+       {
+          "description": "[decq612] fold-down full sequence",
+          "bson": "18000000136400000000106102253E5ECE4F200000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}"
+       },
+       {
+          "description": "[decq614] fold-down full sequence",
+          "bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}"
+       },
+       {
+          "description": "[decq616] fold-down full sequence",
+          "bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}"
+       },
+       {
+          "description": "[decq618] fold-down full sequence",
+          "bson": "180000001364000000004A48011416954508000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}"
+       },
+       {
+          "description": "[decq620] fold-down full sequence",
+          "bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}"
+       },
+       {
+          "description": "[decq622] fold-down full sequence",
+          "bson": "18000000136400000080F64AE1C7022D1500000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}"
+       },
+       {
+          "description": "[decq624] fold-down full sequence",
+          "bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}"
+       },
+       {
+          "description": "[decq626] fold-down full sequence",
+          "bson": "180000001364000000A0DEC5ADC935360000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}"
+       },
+       {
+          "description": "[decq628] fold-down full sequence",
+          "bson": "18000000136400000010632D5EC76B050000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}"
+       },
+       {
+          "description": "[decq630] fold-down full sequence",
+          "bson": "180000001364000000E8890423C78A000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}"
+       },
+       {
+          "description": "[decq632] fold-down full sequence",
+          "bson": "18000000136400000064A7B3B6E00D000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}"
+       },
+       {
+          "description": "[decq634] fold-down full sequence",
+          "bson": "1800000013640000008A5D78456301000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}"
+       },
+       {
+          "description": "[decq636] fold-down full sequence",
+          "bson": "180000001364000000C16FF2862300000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}"
+       },
+       {
+          "description": "[decq638] fold-down full sequence",
+          "bson": "180000001364000080C6A47E8D0300000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}"
+       },
+       {
+          "description": "[decq640] fold-down full sequence",
+          "bson": "1800000013640000407A10F35A0000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}"
+       },
+       {
+          "description": "[decq642] fold-down full sequence",
+          "bson": "1800000013640000A0724E18090000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}"
+       },
+       {
+          "description": "[decq644] fold-down full sequence",
+          "bson": "180000001364000010A5D4E8000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}"
+       },
+       {
+          "description": "[decq646] fold-down full sequence",
+          "bson": "1800000013640000E8764817000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}"
+       },
+       {
+          "description": "[decq648] fold-down full sequence",
+          "bson": "1800000013640000E40B5402000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}"
+       },
+       {
+          "description": "[decq650] fold-down full sequence",
+          "bson": "1800000013640000CA9A3B00000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}"
+       },
+       {
+          "description": "[decq652] fold-down full sequence",
+          "bson": "1800000013640000E1F50500000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}"
+       },
+       {
+          "description": "[decq654] fold-down full sequence",
+          "bson": "180000001364008096980000000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}"
+       },
+       {
+          "description": "[decq656] fold-down full sequence",
+          "bson": "1800000013640040420F0000000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}"
+       },
+       {
+          "description": "[decq658] fold-down full sequence",
+          "bson": "18000000136400A086010000000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}"
+       },
+       {
+          "description": "[decq660] fold-down full sequence",
+          "bson": "180000001364001027000000000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}"
+       },
+       {
+          "description": "[decq662] fold-down full sequence",
+          "bson": "18000000136400E803000000000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}"
+       },
+       {
+          "description": "[decq664] fold-down full sequence",
+          "bson": "180000001364006400000000000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}"
+       },
+       {
+          "description": "[decq666] fold-down full sequence",
+          "bson": "180000001364000A00000000000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}"
+       },
+       {
+          "description": "[decq060] fold-downs (more below)",
+          "bson": "180000001364000100000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}"
+       },
+       {
+          "description": "[decq670] fold-down full sequence",
+          "bson": "180000001364000100000000000000000000000000FC5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6110\"}}"
+       },
+       {
+          "description": "[decq668] fold-down full sequence",
+          "bson": "180000001364000100000000000000000000000000FE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6111\"}}"
+       },
+       {
+          "description": "[decq072] Nmin and below",
+          "bson": "180000001364000100000000000000000000000000420000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6143\"}}"
+       },
+       {
+          "description": "[decq076] Nmin and below",
+          "bson": "18000000136400010000000A5BC138938D44C64D31000000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000001E-6143\"}}"
+       },
+       {
+          "description": "[decq036] fold-downs (more below)",
+          "bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}"
+       },
+       {
+          "description": "[decq062] fold-downs (more below)",
+          "bson": "180000001364007B000000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23\"}}"
+       },
+       {
+          "description": "[decq034] Nmax and similar",
+          "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFE5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234567890123456789012345678901234E+6144\"}}"
+       },
+       {
+          "description": "[decq441] exponent lengths",
+          "bson": "180000001364000700000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}"
+       },
+       {
+          "description": "[decq449] exponent lengths",
+          "bson": "1800000013640007000000000000000000000000001E5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5999\"}}"
+       },
+       {
+          "description": "[decq447] exponent lengths",
+          "bson": "1800000013640007000000000000000000000000000E3800",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+999\"}}"
+       },
+       {
+          "description": "[decq445] exponent lengths",
+          "bson": "180000001364000700000000000000000000000000063100",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+99\"}}"
+       },
+       {
+          "description": "[decq443] exponent lengths",
+          "bson": "180000001364000700000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}"
+       },
+       {
+          "description": "[decq842] VG testcase",
+          "bson": "180000001364000000FED83F4E7C9FE4E269E38A5BCD1700",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7.049000000000010795488000000000000E-3097\"}}"
+       },
+       {
+          "description": "[decq841] VG testcase",
+          "bson": "180000001364000000203B9DB5056F000000000000002400",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"8.000000000000000000E-1550\"}}"
+       },
+       {
+          "description": "[decq840] VG testcase",
+          "bson": "180000001364003C17258419D710C42F0000000000002400",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"8.81125000000001349436E-1548\"}}"
+       },
+       {
+          "description": "[decq701] Selected DPD codes",
+          "bson": "180000001364000900000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"9\"}}"
+       },
+       {
+          "description": "[decq032] Nmax and similar",
+          "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}"
+       },
+       {
+          "description": "[decq702] Selected DPD codes",
+          "bson": "180000001364000A00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}"
+       },
+       {
+          "description": "[decq057] fold-downs (more below)",
+          "bson": "180000001364000C00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}"
+       },
+       {
+          "description": "[decq703] Selected DPD codes",
+          "bson": "180000001364001300000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"19\"}}"
+       },
+       {
+          "description": "[decq704] Selected DPD codes",
+          "bson": "180000001364001400000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"20\"}}"
+       },
+       {
+          "description": "[decq705] Selected DPD codes",
+          "bson": "180000001364001D00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"29\"}}"
+       },
+       {
+          "description": "[decq706] Selected DPD codes",
+          "bson": "180000001364001E00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"30\"}}"
+       },
+       {
+          "description": "[decq707] Selected DPD codes",
+          "bson": "180000001364002700000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"39\"}}"
+       },
+       {
+          "description": "[decq708] Selected DPD codes",
+          "bson": "180000001364002800000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"40\"}}"
+       },
+       {
+          "description": "[decq709] Selected DPD codes",
+          "bson": "180000001364003100000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"49\"}}"
+       },
+       {
+          "description": "[decq710] Selected DPD codes",
+          "bson": "180000001364003200000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"50\"}}"
+       },
+       {
+          "description": "[decq711] Selected DPD codes",
+          "bson": "180000001364003B00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"59\"}}"
+       },
+       {
+          "description": "[decq712] Selected DPD codes",
+          "bson": "180000001364003C00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"60\"}}"
+       },
+       {
+          "description": "[decq713] Selected DPD codes",
+          "bson": "180000001364004500000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"69\"}}"
+       },
+       {
+          "description": "[decq714] Selected DPD codes",
+          "bson": "180000001364004600000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"70\"}}"
+       },
+       {
+          "description": "[decq715] Selected DPD codes",
+          "bson": "180000001364004700000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"71\"}}"
+       },
+       {
+          "description": "[decq716] Selected DPD codes",
+          "bson": "180000001364004800000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"72\"}}"
+       },
+       {
+          "description": "[decq717] Selected DPD codes",
+          "bson": "180000001364004900000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"73\"}}"
+       },
+       {
+          "description": "[decq718] Selected DPD codes",
+          "bson": "180000001364004A00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"74\"}}"
+       },
+       {
+          "description": "[decq719] Selected DPD codes",
+          "bson": "180000001364004B00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"75\"}}"
+       },
+       {
+          "description": "[decq720] Selected DPD codes",
+          "bson": "180000001364004C00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"76\"}}"
+       },
+       {
+          "description": "[decq721] Selected DPD codes",
+          "bson": "180000001364004D00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"77\"}}"
+       },
+       {
+          "description": "[decq722] Selected DPD codes",
+          "bson": "180000001364004E00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"78\"}}"
+       },
+       {
+          "description": "[decq723] Selected DPD codes",
+          "bson": "180000001364004F00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"79\"}}"
+       },
+       {
+          "description": "[decq056] fold-downs (more below)",
+          "bson": "180000001364007B00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"123\"}}"
+       },
+       {
+          "description": "[decq064] fold-downs (more below)",
+          "bson": "1800000013640039300000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"123.45\"}}"
+       },
+       {
+          "description": "[decq732] Selected DPD codes",
+          "bson": "180000001364000802000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"520\"}}"
+       },
+       {
+          "description": "[decq733] Selected DPD codes",
+          "bson": "180000001364000902000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"521\"}}"
+       },
+       {
+          "description": "[decq740] DPD: one of each of the huffman groups",
+          "bson": "180000001364000903000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"777\"}}"
+       },
+       {
+          "description": "[decq741] DPD: one of each of the huffman groups",
+          "bson": "180000001364000A03000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"778\"}}"
+       },
+       {
+          "description": "[decq742] DPD: one of each of the huffman groups",
+          "bson": "180000001364001303000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"787\"}}"
+       },
+       {
+          "description": "[decq746] DPD: one of each of the huffman groups",
+          "bson": "180000001364001F03000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"799\"}}"
+       },
+       {
+          "description": "[decq743] DPD: one of each of the huffman groups",
+          "bson": "180000001364006D03000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"877\"}}"
+       },
+       {
+          "description": "[decq753] DPD all-highs cases (includes the 24 redundant codes)",
+          "bson": "180000001364007803000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"888\"}}"
+       },
+       {
+          "description": "[decq754] DPD all-highs cases (includes the 24 redundant codes)",
+          "bson": "180000001364007903000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"889\"}}"
+       },
+       {
+          "description": "[decq760] DPD all-highs cases (includes the 24 redundant codes)",
+          "bson": "180000001364008203000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"898\"}}"
+       },
+       {
+          "description": "[decq764] DPD all-highs cases (includes the 24 redundant codes)",
+          "bson": "180000001364008303000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"899\"}}"
+       },
+       {
+          "description": "[decq745] DPD: one of each of the huffman groups",
+          "bson": "18000000136400D303000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"979\"}}"
+       },
+       {
+          "description": "[decq770] DPD all-highs cases (includes the 24 redundant codes)",
+          "bson": "18000000136400DC03000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"988\"}}"
+       },
+       {
+          "description": "[decq774] DPD all-highs cases (includes the 24 redundant codes)",
+          "bson": "18000000136400DD03000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"989\"}}"
+       },
+       {
+          "description": "[decq730] Selected DPD codes",
+          "bson": "18000000136400E203000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"994\"}}"
+       },
+       {
+          "description": "[decq731] Selected DPD codes",
+          "bson": "18000000136400E303000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"995\"}}"
+       },
+       {
+          "description": "[decq744] DPD: one of each of the huffman groups",
+          "bson": "18000000136400E503000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"997\"}}"
+       },
+       {
+          "description": "[decq780] DPD all-highs cases (includes the 24 redundant codes)",
+          "bson": "18000000136400E603000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"998\"}}"
+       },
+       {
+          "description": "[decq787] DPD all-highs cases (includes the 24 redundant codes)",
+          "bson": "18000000136400E703000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"999\"}}"
+       },
+       {
+          "description": "[decq053] fold-downs (more below)",
+          "bson": "18000000136400D204000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234\"}}"
+       },
+       {
+          "description": "[decq052] fold-downs (more below)",
+          "bson": "180000001364003930000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345\"}}"
+       },
+       {
+          "description": "[decq792] Miscellaneous (testers' queries, etc.)",
+          "bson": "180000001364003075000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"30000\"}}"
+       },
+       {
+          "description": "[decq793] Miscellaneous (testers' queries, etc.)",
+          "bson": "1800000013640090940D0000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"890000\"}}"
+       },
+       {
+          "description": "[decq824] values around [u]int32 edges (zeros done earlier)",
+          "bson": "18000000136400FEFFFF7F00000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483646\"}}"
+       },
+       {
+          "description": "[decq825] values around [u]int32 edges (zeros done earlier)",
+          "bson": "18000000136400FFFFFF7F00000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483647\"}}"
+       },
+       {
+          "description": "[decq826] values around [u]int32 edges (zeros done earlier)",
+          "bson": "180000001364000000008000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483648\"}}"
+       },
+       {
+          "description": "[decq827] values around [u]int32 edges (zeros done earlier)",
+          "bson": "180000001364000100008000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483649\"}}"
+       },
+       {
+          "description": "[decq828] values around [u]int32 edges (zeros done earlier)",
+          "bson": "18000000136400FEFFFFFF00000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967294\"}}"
+       },
+       {
+          "description": "[decq829] values around [u]int32 edges (zeros done earlier)",
+          "bson": "18000000136400FFFFFFFF00000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967295\"}}"
+       },
+       {
+          "description": "[decq830] values around [u]int32 edges (zeros done earlier)",
+          "bson": "180000001364000000000001000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967296\"}}"
+       },
+       {
+          "description": "[decq831] values around [u]int32 edges (zeros done earlier)",
+          "bson": "180000001364000100000001000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967297\"}}"
+       },
+       {
+          "description": "[decq022] Normality",
+          "bson": "18000000136400C7711CC7B548F377DC80A131C836403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1111111111111111111111111111111111\"}}"
+       },
+       {
+          "description": "[decq020] Normality",
+          "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}"
+       },
+       {
+          "description": "[decq550] Specials",
+          "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED413000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"9999999999999999999999999999999999\"}}"
+       }
+    ]
+}
+`},
+
+	{"decimal128-3.json", `
+{
+    "description": "Decimal128",
+    "bson_type": "0x13",
+    "test_key": "d",
+    "valid": [
+       {
+          "description": "[basx066] strings without E cannot generate E in result",
+          "bson": "18000000136400185C0ACE0000000000000000000038B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00345678.5432\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}"
+       },
+       {
+          "description": "[basx065] strings without E cannot generate E in result",
+          "bson": "18000000136400185C0ACE0000000000000000000038B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0345678.5432\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}"
+       },
+       {
+          "description": "[basx064] strings without E cannot generate E in result",
+          "bson": "18000000136400185C0ACE0000000000000000000038B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}"
+       },
+       {
+          "description": "[basx041] strings without E cannot generate E in result",
+          "bson": "180000001364004C0000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-76\"}}"
+       },
+       {
+          "description": "[basx027] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364000F270000000000000000000000003AB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999\"}}"
+       },
+       {
+          "description": "[basx026] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364009F230000000000000000000000003AB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.119\"}}"
+       },
+       {
+          "description": "[basx025] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364008F030000000000000000000000003CB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.11\"}}"
+       },
+       {
+          "description": "[basx024] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364005B000000000000000000000000003EB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.1\"}}"
+       },
+       {
+          "description": "[dqbsr531] negatives (Rounded)",
+          "bson": "1800000013640099761CC7B548F377DC80A131C836FEAF00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.1111111111111111111111111111123450\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.111111111111111111111111111112345\"}}"
+       },
+       {
+          "description": "[basx022] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364000A000000000000000000000000003EB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0\"}}"
+       },
+       {
+          "description": "[basx021] conform to rules and exponent will be in permitted range).",
+          "bson": "18000000136400010000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}"
+       },
+       {
+          "description": "[basx601] Zeros",
+          "bson": "1800000013640000000000000000000000000000002E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}"
+       },
+       {
+          "description": "[basx622] Zeros",
+          "bson": "1800000013640000000000000000000000000000002EB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-9\"}}"
+       },
+       {
+          "description": "[basx602] Zeros",
+          "bson": "180000001364000000000000000000000000000000303000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}"
+       },
+       {
+          "description": "[basx621] Zeros",
+          "bson": "18000000136400000000000000000000000000000030B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8\"}}"
+       },
+       {
+          "description": "[basx603] Zeros",
+          "bson": "180000001364000000000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}"
+       },
+       {
+          "description": "[basx620] Zeros",
+          "bson": "18000000136400000000000000000000000000000032B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000000\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}"
+       },
+       {
+          "description": "[basx604] Zeros",
+          "bson": "180000001364000000000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}"
+       },
+       {
+          "description": "[basx619] Zeros",
+          "bson": "18000000136400000000000000000000000000000034B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}"
+       },
+       {
+          "description": "[basx605] Zeros",
+          "bson": "180000001364000000000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}"
+       },
+       {
+          "description": "[basx618] Zeros",
+          "bson": "18000000136400000000000000000000000000000036B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}"
+       },
+       {
+          "description": "[basx680] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"000000.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx606] Zeros",
+          "bson": "180000001364000000000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}"
+       },
+       {
+          "description": "[basx617] Zeros",
+          "bson": "18000000136400000000000000000000000000000038B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}"
+       },
+       {
+          "description": "[basx681] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"00000.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx686] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"+00000.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx687] Zeros",
+          "bson": "18000000136400000000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00000.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+       },
+       {
+          "description": "[basx019] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640000000000000000000000000000003CB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00.00\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}"
+       },
+       {
+          "description": "[basx607] Zeros",
+          "bson": "1800000013640000000000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}"
+       },
+       {
+          "description": "[basx616] Zeros",
+          "bson": "1800000013640000000000000000000000000000003AB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}"
+       },
+       {
+          "description": "[basx682] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0000.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx155] Numbers with E",
+          "bson": "1800000013640000000000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000e+0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}"
+       },
+       {
+          "description": "[basx130] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}"
+       },
+       {
+          "description": "[basx290] some more negative zeros [systematic tests below]",
+          "bson": "18000000136400000000000000000000000000000038B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}"
+       },
+       {
+          "description": "[basx131] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}"
+       },
+       {
+          "description": "[basx291] some more negative zeros [systematic tests below]",
+          "bson": "18000000136400000000000000000000000000000036B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}"
+       },
+       {
+          "description": "[basx132] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}"
+       },
+       {
+          "description": "[basx292] some more negative zeros [systematic tests below]",
+          "bson": "18000000136400000000000000000000000000000034B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}"
+       },
+       {
+          "description": "[basx133] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}"
+       },
+       {
+          "description": "[basx293] some more negative zeros [systematic tests below]",
+          "bson": "18000000136400000000000000000000000000000032B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}"
+       },
+       {
+          "description": "[basx608] Zeros",
+          "bson": "1800000013640000000000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+       },
+       {
+          "description": "[basx615] Zeros",
+          "bson": "1800000013640000000000000000000000000000003CB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}"
+       },
+       {
+          "description": "[basx683] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"000.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx630] Zeros",
+          "bson": "1800000013640000000000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+       },
+       {
+          "description": "[basx670] Zeros",
+          "bson": "1800000013640000000000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+       },
+       {
+          "description": "[basx631] Zeros",
+          "bson": "1800000013640000000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}"
+       },
+       {
+          "description": "[basx671] Zeros",
+          "bson": "1800000013640000000000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}"
+       },
+       {
+          "description": "[basx134] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}"
+       },
+       {
+          "description": "[basx294] some more negative zeros [systematic tests below]",
+          "bson": "18000000136400000000000000000000000000000038B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}"
+       },
+       {
+          "description": "[basx632] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx672] Zeros",
+          "bson": "180000001364000000000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}"
+       },
+       {
+          "description": "[basx135] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}"
+       },
+       {
+          "description": "[basx295] some more negative zeros [systematic tests below]",
+          "bson": "18000000136400000000000000000000000000000036B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}"
+       },
+       {
+          "description": "[basx633] Zeros",
+          "bson": "180000001364000000000000000000000000000000423000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}"
+       },
+       {
+          "description": "[basx673] Zeros",
+          "bson": "180000001364000000000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}"
+       },
+       {
+          "description": "[basx136] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}"
+       },
+       {
+          "description": "[basx674] Zeros",
+          "bson": "180000001364000000000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}"
+       },
+       {
+          "description": "[basx634] Zeros",
+          "bson": "180000001364000000000000000000000000000000443000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}"
+       },
+       {
+          "description": "[basx137] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}"
+       },
+       {
+          "description": "[basx635] Zeros",
+          "bson": "180000001364000000000000000000000000000000463000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+5\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}"
+       },
+       {
+          "description": "[basx675] Zeros",
+          "bson": "180000001364000000000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}"
+       },
+       {
+          "description": "[basx636] Zeros",
+          "bson": "180000001364000000000000000000000000000000483000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+6\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}"
+       },
+       {
+          "description": "[basx676] Zeros",
+          "bson": "180000001364000000000000000000000000000000303000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-6\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}"
+       },
+       {
+          "description": "[basx637] Zeros",
+          "bson": "1800000013640000000000000000000000000000004A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+7\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}"
+       },
+       {
+          "description": "[basx677] Zeros",
+          "bson": "1800000013640000000000000000000000000000002E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-7\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}"
+       },
+       {
+          "description": "[basx638] Zeros",
+          "bson": "1800000013640000000000000000000000000000004C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}"
+       },
+       {
+          "description": "[basx678] Zeros",
+          "bson": "1800000013640000000000000000000000000000002C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}"
+       },
+       {
+          "description": "[basx149] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"000E+9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+       },
+       {
+          "description": "[basx639] Zeros",
+          "bson": "1800000013640000000000000000000000000000004E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}"
+       },
+       {
+          "description": "[basx679] Zeros",
+          "bson": "1800000013640000000000000000000000000000002A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-11\"}}"
+       },
+       {
+          "description": "[basx063] strings without E cannot generate E in result",
+          "bson": "18000000136400185C0ACE00000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"+00345678.5432\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}"
+       },
+       {
+          "description": "[basx018] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640000000000000000000000000000003EB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}"
+       },
+       {
+          "description": "[basx609] Zeros",
+          "bson": "1800000013640000000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}"
+       },
+       {
+          "description": "[basx614] Zeros",
+          "bson": "1800000013640000000000000000000000000000003EB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}"
+       },
+       {
+          "description": "[basx684] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"00.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx640] Zeros",
+          "bson": "1800000013640000000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}"
+       },
+       {
+          "description": "[basx660] Zeros",
+          "bson": "1800000013640000000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}"
+       },
+       {
+          "description": "[basx641] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx661] Zeros",
+          "bson": "1800000013640000000000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+       },
+       {
+          "description": "[basx296] some more negative zeros [systematic tests below]",
+          "bson": "1800000013640000000000000000000000000000003AB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}"
+       },
+       {
+          "description": "[basx642] Zeros",
+          "bson": "180000001364000000000000000000000000000000423000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}"
+       },
+       {
+          "description": "[basx662] Zeros",
+          "bson": "1800000013640000000000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}"
+       },
+       {
+          "description": "[basx297] some more negative zeros [systematic tests below]",
+          "bson": "18000000136400000000000000000000000000000038B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}"
+       },
+       {
+          "description": "[basx643] Zeros",
+          "bson": "180000001364000000000000000000000000000000443000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}"
+       },
+       {
+          "description": "[basx663] Zeros",
+          "bson": "180000001364000000000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}"
+       },
+       {
+          "description": "[basx644] Zeros",
+          "bson": "180000001364000000000000000000000000000000463000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}"
+       },
+       {
+          "description": "[basx664] Zeros",
+          "bson": "180000001364000000000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}"
+       },
+       {
+          "description": "[basx645] Zeros",
+          "bson": "180000001364000000000000000000000000000000483000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+5\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}"
+       },
+       {
+          "description": "[basx665] Zeros",
+          "bson": "180000001364000000000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-5\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}"
+       },
+       {
+          "description": "[basx646] Zeros",
+          "bson": "1800000013640000000000000000000000000000004A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+6\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}"
+       },
+       {
+          "description": "[basx666] Zeros",
+          "bson": "180000001364000000000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-6\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}"
+       },
+       {
+          "description": "[basx647] Zeros",
+          "bson": "1800000013640000000000000000000000000000004C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+7\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}"
+       },
+       {
+          "description": "[basx667] Zeros",
+          "bson": "180000001364000000000000000000000000000000303000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-7\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}"
+       },
+       {
+          "description": "[basx648] Zeros",
+          "bson": "1800000013640000000000000000000000000000004E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}"
+       },
+       {
+          "description": "[basx668] Zeros",
+          "bson": "1800000013640000000000000000000000000000002E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}"
+       },
+       {
+          "description": "[basx160] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"00E+9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+       },
+       {
+          "description": "[basx161] Numbers with E",
+          "bson": "1800000013640000000000000000000000000000002E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"00E-9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}"
+       },
+       {
+          "description": "[basx649] Zeros",
+          "bson": "180000001364000000000000000000000000000000503000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}"
+       },
+       {
+          "description": "[basx669] Zeros",
+          "bson": "1800000013640000000000000000000000000000002C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}"
+       },
+       {
+          "description": "[basx062] strings without E cannot generate E in result",
+          "bson": "18000000136400185C0ACE00000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0345678.5432\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}"
+       },
+       {
+          "description": "[basx001] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx017] conform to rules and exponent will be in permitted range).",
+          "bson": "18000000136400000000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+       },
+       {
+          "description": "[basx611] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx613] Zeros",
+          "bson": "18000000136400000000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+       },
+       {
+          "description": "[basx685] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx688] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx689] Zeros",
+          "bson": "18000000136400000000000000000000000000000040B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+       },
+       {
+          "description": "[basx650] Zeros",
+          "bson": "180000001364000000000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+       },
+       {
+          "description": "[basx651] Zeros",
+          "bson": "180000001364000000000000000000000000000000423000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}"
+       },
+       {
+          "description": "[basx298] some more negative zeros [systematic tests below]",
+          "bson": "1800000013640000000000000000000000000000003CB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}"
+       },
+       {
+          "description": "[basx652] Zeros",
+          "bson": "180000001364000000000000000000000000000000443000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}"
+       },
+       {
+          "description": "[basx299] some more negative zeros [systematic tests below]",
+          "bson": "1800000013640000000000000000000000000000003AB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}"
+       },
+       {
+          "description": "[basx653] Zeros",
+          "bson": "180000001364000000000000000000000000000000463000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}"
+       },
+       {
+          "description": "[basx654] Zeros",
+          "bson": "180000001364000000000000000000000000000000483000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}"
+       },
+       {
+          "description": "[basx655] Zeros",
+          "bson": "1800000013640000000000000000000000000000004A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}"
+       },
+       {
+          "description": "[basx656] Zeros",
+          "bson": "1800000013640000000000000000000000000000004C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}"
+       },
+       {
+          "description": "[basx657] Zeros",
+          "bson": "1800000013640000000000000000000000000000004E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}"
+       },
+       {
+          "description": "[basx658] Zeros",
+          "bson": "180000001364000000000000000000000000000000503000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}"
+       },
+       {
+          "description": "[basx138] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0E+9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+       },
+       {
+          "description": "[basx139] Numbers with E",
+          "bson": "18000000136400000000000000000000000000000052B000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+9\"}}"
+       },
+       {
+          "description": "[basx144] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+       },
+       {
+          "description": "[basx154] Numbers with E",
+          "bson": "180000001364000000000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+       },
+       {
+          "description": "[basx659] Zeros",
+          "bson": "180000001364000000000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+       },
+       {
+          "description": "[basx042] strings without E cannot generate E in result",
+          "bson": "18000000136400FC040000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}"
+       },
+       {
+          "description": "[basx143] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"+1E+009\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+       },
+       {
+          "description": "[basx061] strings without E cannot generate E in result",
+          "bson": "18000000136400185C0ACE00000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"+345678.5432\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}"
+       },
+       {
+          "description": "[basx036] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640015CD5B0700000000000000000000203000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000123456789\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-8\"}}"
+       },
+       {
+          "description": "[basx035] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640015CD5B0700000000000000000000223000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000123456789\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-7\"}}"
+       },
+       {
+          "description": "[basx034] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640015CD5B0700000000000000000000243000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000123456789\"}}"
+       },
+       {
+          "description": "[basx053] strings without E cannot generate E in result",
+          "bson": "180000001364003200000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}"
+       },
+       {
+          "description": "[basx033] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640015CD5B0700000000000000000000263000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000123456789\"}}"
+       },
+       {
+          "description": "[basx016] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364000C000000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.012\"}}"
+       },
+       {
+          "description": "[basx015] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364007B000000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123\"}}"
+       },
+       {
+          "description": "[basx037] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640078DF0D8648700000000000000000223000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012344\"}}"
+       },
+       {
+          "description": "[basx038] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640079DF0D8648700000000000000000223000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012345\"}}"
+       },
+       {
+          "description": "[basx250] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+       },
+       {
+          "description": "[basx257] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+       },
+       {
+          "description": "[basx256] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}"
+       },
+       {
+          "description": "[basx258] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+       },
+       {
+          "description": "[basx251] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000103000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-20\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-21\"}}"
+       },
+       {
+          "description": "[basx263] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000603000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+20\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+19\"}}"
+       },
+       {
+          "description": "[basx255] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}"
+       },
+       {
+          "description": "[basx259] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+       },
+       {
+          "description": "[basx254] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}"
+       },
+       {
+          "description": "[basx260] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+       },
+       {
+          "description": "[basx253] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000303000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}"
+       },
+       {
+          "description": "[basx261] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+       },
+       {
+          "description": "[basx252] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000283000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-9\"}}"
+       },
+       {
+          "description": "[basx262] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000483000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}"
+       },
+       {
+          "description": "[basx159] Numbers with E",
+          "bson": "1800000013640049000000000000000000000000002E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.73e-7\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7.3E-8\"}}"
+       },
+       {
+          "description": "[basx004] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640064000000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00\"}}"
+       },
+       {
+          "description": "[basx003] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364000A000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}"
+       },
+       {
+          "description": "[basx002] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364000100000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}"
+       },
+       {
+          "description": "[basx148] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+009\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+       },
+       {
+          "description": "[basx153] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E009\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+       },
+       {
+          "description": "[basx141] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+09\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+       },
+       {
+          "description": "[basx146] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+09\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+       },
+       {
+          "description": "[basx151] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e09\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+       },
+       {
+          "description": "[basx142] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000F43000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}"
+       },
+       {
+          "description": "[basx147] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000F43000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+90\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}"
+       },
+       {
+          "description": "[basx152] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000F43000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E90\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}"
+       },
+       {
+          "description": "[basx140] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+       },
+       {
+          "description": "[basx150] Numbers with E",
+          "bson": "180000001364000100000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+       },
+       {
+          "description": "[basx014] conform to rules and exponent will be in permitted range).",
+          "bson": "18000000136400D2040000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234\"}}"
+       },
+       {
+          "description": "[basx170] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+       },
+       {
+          "description": "[basx177] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+       },
+       {
+          "description": "[basx176] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+       },
+       {
+          "description": "[basx178] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+       },
+       {
+          "description": "[basx171] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000123000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-20\"}}"
+       },
+       {
+          "description": "[basx183] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000623000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+20\"}}"
+       },
+       {
+          "description": "[basx175] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}"
+       },
+       {
+          "description": "[basx179] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+       },
+       {
+          "description": "[basx174] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}"
+       },
+       {
+          "description": "[basx180] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+       },
+       {
+          "description": "[basx173] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}"
+       },
+       {
+          "description": "[basx181] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000423000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}"
+       },
+       {
+          "description": "[basx172] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000002A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-8\"}}"
+       },
+       {
+          "description": "[basx182] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000004A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+8\"}}"
+       },
+       {
+          "description": "[basx157] Numbers with E",
+          "bson": "180000001364000400000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"4E+9\"}}"
+       },
+       {
+          "description": "[basx067] examples",
+          "bson": "180000001364000500000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-6\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}"
+       },
+       {
+          "description": "[basx069] examples",
+          "bson": "180000001364000500000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}"
+       },
+       {
+          "description": "[basx385] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}"
+       },
+       {
+          "description": "[basx365] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000543000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E10\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+10\"}}"
+       },
+       {
+          "description": "[basx405] Engineering notation tests",
+          "bson": "1800000013640007000000000000000000000000002C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-10\"}}"
+       },
+       {
+          "description": "[basx363] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000563000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E11\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+11\"}}"
+       },
+       {
+          "description": "[basx407] Engineering notation tests",
+          "bson": "1800000013640007000000000000000000000000002A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-11\"}}"
+       },
+       {
+          "description": "[basx361] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000583000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E12\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+12\"}}"
+       },
+       {
+          "description": "[basx409] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000283000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-12\"}}"
+       },
+       {
+          "description": "[basx411] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000263000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-13\"}}"
+       },
+       {
+          "description": "[basx383] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000423000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+1\"}}"
+       },
+       {
+          "description": "[basx387] Engineering notation tests",
+          "bson": "1800000013640007000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.7\"}}"
+       },
+       {
+          "description": "[basx381] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000443000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+2\"}}"
+       },
+       {
+          "description": "[basx389] Engineering notation tests",
+          "bson": "1800000013640007000000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.07\"}}"
+       },
+       {
+          "description": "[basx379] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000463000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+3\"}}"
+       },
+       {
+          "description": "[basx391] Engineering notation tests",
+          "bson": "1800000013640007000000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.007\"}}"
+       },
+       {
+          "description": "[basx377] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000483000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+4\"}}"
+       },
+       {
+          "description": "[basx393] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0007\"}}"
+       },
+       {
+          "description": "[basx375] Engineering notation tests",
+          "bson": "1800000013640007000000000000000000000000004A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E5\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5\"}}"
+       },
+       {
+          "description": "[basx395] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-5\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00007\"}}"
+       },
+       {
+          "description": "[basx373] Engineering notation tests",
+          "bson": "1800000013640007000000000000000000000000004C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E6\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+6\"}}"
+       },
+       {
+          "description": "[basx397] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-6\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000007\"}}"
+       },
+       {
+          "description": "[basx371] Engineering notation tests",
+          "bson": "1800000013640007000000000000000000000000004E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E7\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+7\"}}"
+       },
+       {
+          "description": "[basx399] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-7\"}}"
+       },
+       {
+          "description": "[basx369] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000503000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+8\"}}"
+       },
+       {
+          "description": "[basx401] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000303000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-8\"}}"
+       },
+       {
+          "description": "[basx367] Engineering notation tests",
+          "bson": "180000001364000700000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}"
+       },
+       {
+          "description": "[basx403] Engineering notation tests",
+          "bson": "1800000013640007000000000000000000000000002E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-9\"}}"
+       },
+       {
+          "description": "[basx007] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640064000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.0\"}}"
+       },
+       {
+          "description": "[basx005] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364000A00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}"
+       },
+       {
+          "description": "[basx165] Numbers with E",
+          "bson": "180000001364000A00000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+009\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}"
+       },
+       {
+          "description": "[basx163] Numbers with E",
+          "bson": "180000001364000A00000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+09\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}"
+       },
+       {
+          "description": "[basx325] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}"
+       },
+       {
+          "description": "[basx305] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000543000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e10\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+11\"}}"
+       },
+       {
+          "description": "[basx345] Engineering notation tests",
+          "bson": "180000001364000A000000000000000000000000002C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-10\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-9\"}}"
+       },
+       {
+          "description": "[basx303] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000563000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e11\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+12\"}}"
+       },
+       {
+          "description": "[basx347] Engineering notation tests",
+          "bson": "180000001364000A000000000000000000000000002A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-11\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-10\"}}"
+       },
+       {
+          "description": "[basx301] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000583000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e12\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+13\"}}"
+       },
+       {
+          "description": "[basx349] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000283000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-12\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-11\"}}"
+       },
+       {
+          "description": "[basx351] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000263000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-13\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-12\"}}"
+       },
+       {
+          "description": "[basx323] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000423000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+2\"}}"
+       },
+       {
+          "description": "[basx327] Engineering notation tests",
+          "bson": "180000001364000A000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}"
+       },
+       {
+          "description": "[basx321] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000443000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+3\"}}"
+       },
+       {
+          "description": "[basx329] Engineering notation tests",
+          "bson": "180000001364000A000000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.10\"}}"
+       },
+       {
+          "description": "[basx319] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000463000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+4\"}}"
+       },
+       {
+          "description": "[basx331] Engineering notation tests",
+          "bson": "180000001364000A000000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.010\"}}"
+       },
+       {
+          "description": "[basx317] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000483000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+5\"}}"
+       },
+       {
+          "description": "[basx333] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0010\"}}"
+       },
+       {
+          "description": "[basx315] Engineering notation tests",
+          "bson": "180000001364000A000000000000000000000000004A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e5\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6\"}}"
+       },
+       {
+          "description": "[basx335] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-5\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00010\"}}"
+       },
+       {
+          "description": "[basx313] Engineering notation tests",
+          "bson": "180000001364000A000000000000000000000000004C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e6\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+7\"}}"
+       },
+       {
+          "description": "[basx337] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-6\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000010\"}}"
+       },
+       {
+          "description": "[basx311] Engineering notation tests",
+          "bson": "180000001364000A000000000000000000000000004E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e7\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+8\"}}"
+       },
+       {
+          "description": "[basx339] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-7\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000010\"}}"
+       },
+       {
+          "description": "[basx309] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000503000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+9\"}}"
+       },
+       {
+          "description": "[basx341] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000303000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-7\"}}"
+       },
+       {
+          "description": "[basx164] Numbers with E",
+          "bson": "180000001364000A00000000000000000000000000F43000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e+90\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+91\"}}"
+       },
+       {
+          "description": "[basx162] Numbers with E",
+          "bson": "180000001364000A00000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}"
+       },
+       {
+          "description": "[basx307] Engineering notation tests",
+          "bson": "180000001364000A00000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}"
+       },
+       {
+          "description": "[basx343] Engineering notation tests",
+          "bson": "180000001364000A000000000000000000000000002E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-8\"}}"
+       },
+       {
+          "description": "[basx008] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640065000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.1\"}}"
+       },
+       {
+          "description": "[basx009] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640068000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.4\"}}"
+       },
+       {
+          "description": "[basx010] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640069000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.5\"}}"
+       },
+       {
+          "description": "[basx011] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364006A000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.6\"}}"
+       },
+       {
+          "description": "[basx012] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364006D000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.9\"}}"
+       },
+       {
+          "description": "[basx013] conform to rules and exponent will be in permitted range).",
+          "bson": "180000001364006E000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"11.0\"}}"
+       },
+       {
+          "description": "[basx040] strings without E cannot generate E in result",
+          "bson": "180000001364000C00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}"
+       },
+       {
+          "description": "[basx190] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+       },
+       {
+          "description": "[basx197] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+       },
+       {
+          "description": "[basx196] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+       },
+       {
+          "description": "[basx198] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+       },
+       {
+          "description": "[basx191] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000143000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-20\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-19\"}}"
+       },
+       {
+          "description": "[basx203] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000643000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+20\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+21\"}}"
+       },
+       {
+          "description": "[basx195] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+       },
+       {
+          "description": "[basx199] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+       },
+       {
+          "description": "[basx194] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}"
+       },
+       {
+          "description": "[basx200] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000423000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}"
+       },
+       {
+          "description": "[basx193] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}"
+       },
+       {
+          "description": "[basx201] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000443000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}"
+       },
+       {
+          "description": "[basx192] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000002C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-7\"}}"
+       },
+       {
+          "description": "[basx202] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000004C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+9\"}}"
+       },
+       {
+          "description": "[basx044] strings without E cannot generate E in result",
+          "bson": "18000000136400FC040000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"012.76\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}"
+       },
+       {
+          "description": "[basx042] strings without E cannot generate E in result",
+          "bson": "18000000136400FC040000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}"
+       },
+       {
+          "description": "[basx046] strings without E cannot generate E in result",
+          "bson": "180000001364001100000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"17.\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"17\"}}"
+       },
+       {
+          "description": "[basx049] strings without E cannot generate E in result",
+          "bson": "180000001364002C00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0044\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}"
+       },
+       {
+          "description": "[basx048] strings without E cannot generate E in result",
+          "bson": "180000001364002C00000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"044\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}"
+       },
+       {
+          "description": "[basx158] Numbers with E",
+          "bson": "180000001364002C00000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"44E+9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4.4E+10\"}}"
+       },
+       {
+          "description": "[basx068] examples",
+          "bson": "180000001364003200000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"50E-7\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}"
+       },
+       {
+          "description": "[basx169] Numbers with E",
+          "bson": "180000001364006400000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+009\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}"
+       },
+       {
+          "description": "[basx167] Numbers with E",
+          "bson": "180000001364006400000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+09\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}"
+       },
+       {
+          "description": "[basx168] Numbers with E",
+          "bson": "180000001364006400000000000000000000000000F43000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"100E+90\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+92\"}}"
+       },
+       {
+          "description": "[basx166] Numbers with E",
+          "bson": "180000001364006400000000000000000000000000523000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+9\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}"
+       },
+       {
+          "description": "[basx210] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+       },
+       {
+          "description": "[basx217] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+       },
+       {
+          "description": "[basx216] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+       },
+       {
+          "description": "[basx218] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+       },
+       {
+          "description": "[basx211] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000163000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-20\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-18\"}}"
+       },
+       {
+          "description": "[basx223] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000663000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+20\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+22\"}}"
+       },
+       {
+          "description": "[basx215] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+       },
+       {
+          "description": "[basx219] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000423000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}"
+       },
+       {
+          "description": "[basx214] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+       },
+       {
+          "description": "[basx220] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000443000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}"
+       },
+       {
+          "description": "[basx213] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}"
+       },
+       {
+          "description": "[basx221] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000463000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}"
+       },
+       {
+          "description": "[basx212] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000002E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000001265\"}}"
+       },
+       {
+          "description": "[basx222] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000004E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+10\"}}"
+       },
+       {
+          "description": "[basx006] conform to rules and exponent will be in permitted range).",
+          "bson": "18000000136400E803000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1000\"}}"
+       },
+       {
+          "description": "[basx230] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+       },
+       {
+          "description": "[basx237] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+       },
+       {
+          "description": "[basx236] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+       },
+       {
+          "description": "[basx238] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000423000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+1\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}"
+       },
+       {
+          "description": "[basx231] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000183000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-20\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-17\"}}"
+       },
+       {
+          "description": "[basx243] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000683000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+20\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+23\"}}"
+       },
+       {
+          "description": "[basx235] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+       },
+       {
+          "description": "[basx239] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000443000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+2\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}"
+       },
+       {
+          "description": "[basx234] Numbers with E",
+          "bson": "18000000136400F1040000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+       },
+       {
+          "description": "[basx240] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000463000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+3\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}"
+       },
+       {
+          "description": "[basx233] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+       },
+       {
+          "description": "[basx241] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000483000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+4\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}"
+       },
+       {
+          "description": "[basx232] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000303000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}"
+       },
+       {
+          "description": "[basx242] Numbers with E",
+          "bson": "18000000136400F104000000000000000000000000503000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+8\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+11\"}}"
+       },
+       {
+          "description": "[basx060] strings without E cannot generate E in result",
+          "bson": "18000000136400185C0ACE00000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}"
+       },
+       {
+          "description": "[basx059] strings without E cannot generate E in result",
+          "bson": "18000000136400F198670C08000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0345678.54321\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.54321\"}}"
+       },
+       {
+          "description": "[basx058] strings without E cannot generate E in result",
+          "bson": "180000001364006AF90B7C50000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.543210\"}}"
+       },
+       {
+          "description": "[basx057] strings without E cannot generate E in result",
+          "bson": "180000001364006A19562522020000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"2345678.543210\"}}"
+       },
+       {
+          "description": "[basx056] strings without E cannot generate E in result",
+          "bson": "180000001364006AB9C8733A0B0000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345678.543210\"}}"
+       },
+       {
+          "description": "[basx031] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640040AF0D8648700000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.000000\"}}"
+       },
+       {
+          "description": "[basx030] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640080910F8648700000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.123456\"}}"
+       },
+       {
+          "description": "[basx032] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640080910F8648700000000000000000403000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789123456\"}}"
+       }
+    ]
+}
+`},
+
+	{"decimal128-4.json", `
+{
+    "description": "Decimal128",
+    "bson_type": "0x13",
+    "test_key": "d",
+    "valid": [
+       {
+          "description": "[basx023] conform to rules and exponent will be in permitted range).",
+          "bson": "1800000013640001000000000000000000000000003EB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.1\"}}"
+       },
+
+       {
+          "description": "[basx045] strings without E cannot generate E in result",
+          "bson": "1800000013640003000000000000000000000000003A3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.003\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.003\"}}"
+       },
+       {
+          "description": "[basx610] Zeros",
+          "bson": "1800000013640000000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \".0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}"
+       },
+       {
+          "description": "[basx612] Zeros",
+          "bson": "1800000013640000000000000000000000000000003EB000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"-.0\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}"
+       },
+       {
+          "description": "[basx043] strings without E cannot generate E in result",
+          "bson": "18000000136400FC040000000000000000000000003C3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}"
+       },
+       {
+          "description": "[basx055] strings without E cannot generate E in result",
+          "bson": "180000001364000500000000000000000000000000303000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000005\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-8\"}}"
+       },
+       {
+          "description": "[basx054] strings without E cannot generate E in result",
+          "bson": "180000001364000500000000000000000000000000323000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000005\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}"
+       },
+       {
+          "description": "[basx052] strings without E cannot generate E in result",
+          "bson": "180000001364000500000000000000000000000000343000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}"
+       },
+       {
+          "description": "[basx051] strings without E cannot generate E in result",
+          "bson": "180000001364000500000000000000000000000000363000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"00.00005\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00005\"}}"
+       },
+       {
+          "description": "[basx050] strings without E cannot generate E in result",
+          "bson": "180000001364000500000000000000000000000000383000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0005\"}}"
+       },
+       {
+          "description": "[basx047] strings without E cannot generate E in result",
+          "bson": "1800000013640005000000000000000000000000003E3000",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \".5\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.5\"}}"
+       },
+       {
+          "description": "[dqbsr431] check rounding modes heeded (Rounded)",
+          "bson": "1800000013640099761CC7B548F377DC80A131C836FE2F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.1111111111111111111111111111123450\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.111111111111111111111111111112345\"}}"
+       },
+       {
+          "description": "OK2",
+          "bson": "18000000136400000000000A5BC138938D44C64D31FC2F00",
+          "extjson": "{\"d\" : {\"$numberDecimal\" : \".100000000000000000000000000000000000000000000000000000000000\"}}",
+          "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1000000000000000000000000000000000\"}}"
+       }
+    ],
+    "parseErrors": [
+       {
+          "description": "[basx564] Near-specials (Conversion_syntax)",
+          "string": "Infi"
+       },
+       {
+          "description": "[basx565] Near-specials (Conversion_syntax)",
+          "string": "Infin"
+       },
+       {
+          "description": "[basx566] Near-specials (Conversion_syntax)",
+          "string": "Infini"
+       },
+       {
+          "description": "[basx567] Near-specials (Conversion_syntax)",
+          "string": "Infinit"
+       },
+       {
+          "description": "[basx568] Near-specials (Conversion_syntax)",
+          "string": "-Infinit"
+       },
+       {
+          "description": "[basx590] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": ".Infinity"
+       },
+       {
+          "description": "[basx562] Near-specials (Conversion_syntax)",
+          "string": "NaNq"
+       },
+       {
+          "description": "[basx563] Near-specials (Conversion_syntax)",
+          "string": "NaNs"
+       },
+       {
+          "description": "[dqbas939] overflow results at different rounding modes (Overflow & Inexact & Rounded)",
+          "string": "-7e10000"
+       },
+       {
+          "description": "[dqbsr534] negatives (Rounded & Inexact)",
+          "string": "-1.11111111111111111111111111111234650"
+       },
+       {
+          "description": "[dqbsr535] negatives (Rounded & Inexact)",
+          "string": "-1.11111111111111111111111111111234551"
+       },
+       {
+          "description": "[dqbsr533] negatives (Rounded & Inexact)",
+          "string": "-1.11111111111111111111111111111234550"
+       },
+       {
+          "description": "[dqbsr532] negatives (Rounded & Inexact)",
+          "string": "-1.11111111111111111111111111111234549"
+       },
+       {
+          "description": "[dqbsr432] check rounding modes heeded (Rounded & Inexact)",
+          "string": "1.11111111111111111111111111111234549"
+       },
+       {
+          "description": "[dqbsr433] check rounding modes heeded (Rounded & Inexact)",
+          "string": "1.11111111111111111111111111111234550"
+       },
+       {
+          "description": "[dqbsr435] check rounding modes heeded (Rounded & Inexact)",
+          "string": "1.11111111111111111111111111111234551"
+       },
+       {
+          "description": "[dqbsr434] check rounding modes heeded (Rounded & Inexact)",
+          "string": "1.11111111111111111111111111111234650"
+       },
+       {
+          "description": "[dqbas938] overflow results at different rounding modes (Overflow & Inexact & Rounded)",
+          "string": "7e10000"
+       },
+       {
+          "description": "Inexact rounding#1",
+          "string": "100000000000000000000000000000000000000000000000000000000001"
+       },
+       {
+          "description": "Inexact rounding#2",
+          "string": "1E-6177"
+       }
+    ]
+}
+`},
+
+	{"decimal128-5.json", `
+{
+    "description": "Decimal128",
+    "bson_type": "0x13",
+    "test_key": "d",
+    "valid": [
+        {
+            "description": "[decq035] fold-downs (more below) (Clamped)",
+            "bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23E+6144\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}"
+        },
+        {
+            "description": "[decq037] fold-downs (more below) (Clamped)",
+            "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}"
+        },
+        {
+            "description": "[decq077] Nmin and below (Subnormal)",
+            "bson": "180000001364000000000081EFAC855B416D2DEE04000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.100000000000000000000000000000000E-6143\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}"
+        },
+        {
+            "description": "[decq078] Nmin and below (Subnormal)",
+            "bson": "180000001364000000000081EFAC855B416D2DEE04000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}"
+        },
+        {
+            "description": "[decq079] Nmin and below (Subnormal)",
+            "bson": "180000001364000A00000000000000000000000000000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000010E-6143\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}"
+        },
+        {
+            "description": "[decq080] Nmin and below (Subnormal)",
+            "bson": "180000001364000A00000000000000000000000000000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}"
+        },
+        {
+            "description": "[decq081] Nmin and below (Subnormal)",
+            "bson": "180000001364000100000000000000000000000000020000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000000000000000000000000001E-6143\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}"
+        },
+        {
+            "description": "[decq082] Nmin and below (Subnormal)",
+            "bson": "180000001364000100000000000000000000000000020000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}"
+        },
+        {
+            "description": "[decq083] Nmin and below (Subnormal)",
+            "bson": "180000001364000100000000000000000000000000000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000001E-6143\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}"
+        },
+        {
+            "description": "[decq084] Nmin and below (Subnormal)",
+            "bson": "180000001364000100000000000000000000000000000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}"
+        },
+        {
+            "description": "[decq090] underflows cannot be tested for simple copies, check edge cases (Subnormal)",
+            "bson": "180000001364000100000000000000000000000000000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e-6176\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}"
+        },
+        {
+            "description": "[decq100] underflows cannot be tested for simple copies, check edge cases (Subnormal)",
+            "bson": "18000000136400FFFFFFFF095BC138938D44C64D31000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"999999999999999999999999999999999e-6176\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.99999999999999999999999999999999E-6144\"}}"
+        },
+        {
+            "description": "[decq130] fold-downs (more below) (Clamped)",
+            "bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23E+6144\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}"
+        },
+        {
+            "description": "[decq132] fold-downs (more below) (Clamped)",
+            "bson": "18000000136400000000000A5BC138938D44C64D31FEDF00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E+6144\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}"
+        },
+        {
+            "description": "[decq177] Nmin and below (Subnormal)",
+            "bson": "180000001364000000000081EFAC855B416D2DEE04008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.100000000000000000000000000000000E-6143\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}"
+        },
+        {
+            "description": "[decq178] Nmin and below (Subnormal)",
+            "bson": "180000001364000000000081EFAC855B416D2DEE04008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}"
+        },
+        {
+            "description": "[decq179] Nmin and below (Subnormal)",
+            "bson": "180000001364000A00000000000000000000000000008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000010E-6143\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}"
+        },
+        {
+            "description": "[decq180] Nmin and below (Subnormal)",
+            "bson": "180000001364000A00000000000000000000000000008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}"
+        },
+        {
+            "description": "[decq181] Nmin and below (Subnormal)",
+            "bson": "180000001364000100000000000000000000000000028000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000000000000000000000000001E-6143\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}"
+        },
+        {
+            "description": "[decq182] Nmin and below (Subnormal)",
+            "bson": "180000001364000100000000000000000000000000028000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}"
+        },
+        {
+            "description": "[decq183] Nmin and below (Subnormal)",
+            "bson": "180000001364000100000000000000000000000000008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000001E-6143\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}"
+        },
+        {
+            "description": "[decq184] Nmin and below (Subnormal)",
+            "bson": "180000001364000100000000000000000000000000008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}"
+        },
+        {
+            "description": "[decq190] underflow edge cases (Subnormal)",
+            "bson": "180000001364000100000000000000000000000000008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1e-6176\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}"
+        },
+        {
+            "description": "[decq200] underflow edge cases (Subnormal)",
+            "bson": "18000000136400FFFFFFFF095BC138938D44C64D31008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-999999999999999999999999999999999e-6176\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.99999999999999999999999999999999E-6144\"}}"
+        },
+        {
+            "description": "[decq400] zeros (Clamped)",
+            "bson": "180000001364000000000000000000000000000000000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8000\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}"
+        },
+        {
+            "description": "[decq401] zeros (Clamped)",
+            "bson": "180000001364000000000000000000000000000000000000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6177\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}"
+        },
+        {
+            "description": "[decq414] clamped zeros... (Clamped)",
+            "bson": "180000001364000000000000000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6112\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}"
+        },
+        {
+            "description": "[decq416] clamped zeros... (Clamped)",
+            "bson": "180000001364000000000000000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6144\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}"
+        },
+        {
+            "description": "[decq418] clamped zeros... (Clamped)",
+            "bson": "180000001364000000000000000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8000\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}"
+        },
+        {
+            "description": "[decq420] negative zeros (Clamped)",
+            "bson": "180000001364000000000000000000000000000000008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8000\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}"
+        },
+        {
+            "description": "[decq421] negative zeros (Clamped)",
+            "bson": "180000001364000000000000000000000000000000008000",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6177\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}"
+        },
+        {
+            "description": "[decq434] clamped zeros... (Clamped)",
+            "bson": "180000001364000000000000000000000000000000FEDF00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6112\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}"
+        },
+        {
+            "description": "[decq436] clamped zeros... (Clamped)",
+            "bson": "180000001364000000000000000000000000000000FEDF00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6144\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}"
+        },
+        {
+            "description": "[decq438] clamped zeros... (Clamped)",
+            "bson": "180000001364000000000000000000000000000000FEDF00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+8000\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}"
+        },
+        {
+            "description": "[decq601] fold-down full sequence (Clamped)",
+            "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}"
+        },
+        {
+            "description": "[decq603] fold-down full sequence (Clamped)",
+            "bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6143\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}"
+        },
+        {
+            "description": "[decq605] fold-down full sequence (Clamped)",
+            "bson": "1800000013640000000080264B91C02220BE377E00FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6142\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}"
+        },
+        {
+            "description": "[decq607] fold-down full sequence (Clamped)",
+            "bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6141\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}"
+        },
+        {
+            "description": "[decq609] fold-down full sequence (Clamped)",
+            "bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6140\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}"
+        },
+        {
+            "description": "[decq611] fold-down full sequence (Clamped)",
+            "bson": "18000000136400000000106102253E5ECE4F200000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6139\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}"
+        },
+        {
+            "description": "[decq613] fold-down full sequence (Clamped)",
+            "bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6138\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}"
+        },
+        {
+            "description": "[decq615] fold-down full sequence (Clamped)",
+            "bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6137\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}"
+        },
+        {
+            "description": "[decq617] fold-down full sequence (Clamped)",
+            "bson": "180000001364000000004A48011416954508000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6136\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}"
+        },
+        {
+            "description": "[decq619] fold-down full sequence (Clamped)",
+            "bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6135\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}"
+        },
+        {
+            "description": "[decq621] fold-down full sequence (Clamped)",
+            "bson": "18000000136400000080F64AE1C7022D1500000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6134\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}"
+        },
+        {
+            "description": "[decq623] fold-down full sequence (Clamped)",
+            "bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6133\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}"
+        },
+        {
+            "description": "[decq625] fold-down full sequence (Clamped)",
+            "bson": "180000001364000000A0DEC5ADC935360000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6132\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}"
+        },
+        {
+            "description": "[decq627] fold-down full sequence (Clamped)",
+            "bson": "18000000136400000010632D5EC76B050000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6131\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}"
+        },
+        {
+            "description": "[decq629] fold-down full sequence (Clamped)",
+            "bson": "180000001364000000E8890423C78A000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6130\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}"
+        },
+        {
+            "description": "[decq631] fold-down full sequence (Clamped)",
+            "bson": "18000000136400000064A7B3B6E00D000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6129\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}"
+        },
+        {
+            "description": "[decq633] fold-down full sequence (Clamped)",
+            "bson": "1800000013640000008A5D78456301000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6128\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}"
+        },
+        {
+            "description": "[decq635] fold-down full sequence (Clamped)",
+            "bson": "180000001364000000C16FF2862300000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6127\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}"
+        },
+        {
+            "description": "[decq637] fold-down full sequence (Clamped)",
+            "bson": "180000001364000080C6A47E8D0300000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6126\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}"
+        },
+        {
+            "description": "[decq639] fold-down full sequence (Clamped)",
+            "bson": "1800000013640000407A10F35A0000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6125\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}"
+        },
+        {
+            "description": "[decq641] fold-down full sequence (Clamped)",
+            "bson": "1800000013640000A0724E18090000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6124\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}"
+        },
+        {
+            "description": "[decq643] fold-down full sequence (Clamped)",
+            "bson": "180000001364000010A5D4E8000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6123\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}"
+        },
+        {
+            "description": "[decq645] fold-down full sequence (Clamped)",
+            "bson": "1800000013640000E8764817000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6122\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}"
+        },
+        {
+            "description": "[decq647] fold-down full sequence (Clamped)",
+            "bson": "1800000013640000E40B5402000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6121\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}"
+        },
+        {
+            "description": "[decq649] fold-down full sequence (Clamped)",
+            "bson": "1800000013640000CA9A3B00000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6120\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}"
+        },
+        {
+            "description": "[decq651] fold-down full sequence (Clamped)",
+            "bson": "1800000013640000E1F50500000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6119\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}"
+        },
+        {
+            "description": "[decq653] fold-down full sequence (Clamped)",
+            "bson": "180000001364008096980000000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6118\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}"
+        },
+        {
+            "description": "[decq655] fold-down full sequence (Clamped)",
+            "bson": "1800000013640040420F0000000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6117\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}"
+        },
+        {
+            "description": "[decq657] fold-down full sequence (Clamped)",
+            "bson": "18000000136400A086010000000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6116\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}"
+        },
+        {
+            "description": "[decq659] fold-down full sequence (Clamped)",
+            "bson": "180000001364001027000000000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6115\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}"
+        },
+        {
+            "description": "[decq661] fold-down full sequence (Clamped)",
+            "bson": "18000000136400E803000000000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6114\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}"
+        },
+        {
+            "description": "[decq663] fold-down full sequence (Clamped)",
+            "bson": "180000001364006400000000000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6113\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}"
+        },
+        {
+            "description": "[decq665] fold-down full sequence (Clamped)",
+            "bson": "180000001364000A00000000000000000000000000FE5F00",
+            "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6112\"}}",
+            "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}"
+        }
+    ]
+}
+`},
+
+	{"decimal128-6.json", `
+{
+    "description": "Decimal128",
+    "bson_type": "0x13",
+    "test_key": "d",
+    "parseErrors": [
+        {
+            "description": "Incomplete Exponent",
+            "string": "1e"
+        },
+        {
+            "description": "Exponent at the beginning",
+            "string": "E01"
+        },
+        {
+            "description": "Just a decimal place",
+            "string": "."
+        },
+        {
+            "description": "2 decimal places",
+            "string": "..3"
+        },
+        {
+            "description": "2 decimal places",
+            "string": ".13.3"
+        },
+        {
+            "description": "2 decimal places",
+            "string": "1..3"
+        },
+        {
+            "description": "2 decimal places",
+            "string": "1.3.4"
+        },
+        {
+            "description": "2 decimal places",
+            "string": "1.34."
+        },
+        {
+            "description": "Decimal with no digits",
+            "string": ".e"
+        },
+        {
+            "description": "2 signs",
+            "string": "+-32.4"
+        },
+        {
+            "description": "2 signs",
+            "string": "-+32.4"
+        },
+        {
+            "description": "2 negative signs",
+            "string": "--32.4"
+        },
+        {
+            "description": "2 negative signs",
+            "string": "-32.-4"
+        },
+        {
+            "description": "End in negative sign",
+            "string": "32.0-"
+        },
+        {
+            "description": "2 negative signs",
+            "string": "32.4E--21"
+        },
+        {
+            "description": "2 negative signs",
+            "string": "32.4E-2-1"
+        },
+        {
+            "description": "2 signs",
+            "string": "32.4E+-21"
+        },
+        {
+            "description": "Empty string",
+            "string": ""
+        },
+        {
+            "description": "leading white space positive number",
+            "string": " 1"
+        },
+        {
+            "description": "leading white space negative number",
+            "string": " -1"
+        },
+        {
+            "description": "trailing white space",
+            "string": "1 "
+        },
+        {
+            "description": "Invalid",
+            "string": "E"
+        },
+        {
+            "description": "Invalid",
+            "string": "invalid"
+        },
+        {
+            "description": "Invalid",
+            "string": "i"
+        },
+        {
+            "description": "Invalid",
+            "string": "in"
+        },
+        {
+            "description": "Invalid",
+            "string": "-in"
+        },
+        {
+            "description": "Invalid",
+            "string": "Na"
+        },
+        {
+            "description": "Invalid",
+            "string": "-Na"
+        },
+        {
+            "description": "Invalid",
+            "string": "1.23abc"
+        },
+        {
+            "description": "Invalid",
+            "string": "1.23abcE+02"
+        },
+        {
+            "description": "Invalid",
+            "string": "1.23E+0aabs2"
+        }
+    ]
+}
+`},
+
+	{"decimal128-7.json", `
+{
+    "description": "Decimal128",
+    "bson_type": "0x13",
+    "test_key": "d",
+    "parseErrors": [
+       {
+          "description": "[basx572] Near-specials (Conversion_syntax)",
+          "string": "-9Inf"
+       },
+       {
+          "description": "[basx516] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "-1-"
+       },
+       {
+          "description": "[basx533] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "0000.."
+       },
+       {
+          "description": "[basx534] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": ".0000."
+       },
+       {
+          "description": "[basx535] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "00..00"
+       },
+       {
+          "description": "[basx569] Near-specials (Conversion_syntax)",
+          "string": "0Inf"
+       },
+       {
+          "description": "[basx571] Near-specials (Conversion_syntax)",
+          "string": "-0Inf"
+       },
+       {
+          "description": "[basx575] Near-specials (Conversion_syntax)",
+          "string": "0sNaN"
+       },
+       {
+          "description": "[basx503] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "++1"
+       },
+       {
+          "description": "[basx504] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "--1"
+       },
+       {
+          "description": "[basx505] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "-+1"
+       },
+       {
+          "description": "[basx506] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "+-1"
+       },
+       {
+          "description": "[basx510] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": " +1"
+       },
+       {
+          "description": "[basx513] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": " + 1"
+       },
+       {
+          "description": "[basx514] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": " - 1"
+       },
+       {
+          "description": "[basx501] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "."
+       },
+       {
+          "description": "[basx502] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": ".."
+       },
+       {
+          "description": "[basx519] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": ""
+       },
+       {
+          "description": "[basx525] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "e100"
+       },
+       {
+          "description": "[basx549] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "e+1"
+       },
+       {
+          "description": "[basx577] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": ".e+1"
+       },
+       {
+          "description": "[basx578] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": "+.e+1"
+       },
+       {
+          "description": "[basx581] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": "E+1"
+       },
+       {
+          "description": "[basx582] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": ".E+1"
+       },
+       {
+          "description": "[basx583] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": "+.E+1"
+       },
+       {
+          "description": "[basx579] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": "-.e+"
+       },
+       {
+          "description": "[basx580] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": "-.e"
+       },
+       {
+          "description": "[basx584] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": "-.E+"
+       },
+       {
+          "description": "[basx585] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": "-.E"
+       },
+       {
+          "description": "[basx589] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": "+.Inf"
+       },
+       {
+          "description": "[basx586] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": ".NaN"
+       },
+       {
+          "description": "[basx587] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": "-.NaN"
+       },
+       {
+          "description": "[basx545] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "ONE"
+       },
+       {
+          "description": "[basx561] Near-specials (Conversion_syntax)",
+          "string": "qNaN"
+       },
+       {
+          "description": "[basx573] Near-specials (Conversion_syntax)",
+          "string": "-sNa"
+       },
+       {
+          "description": "[basx588] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+          "string": "+.sNaN"
+       },
+       {
+          "description": "[basx544] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "ten"
+       },
+       {
+          "description": "[basx527] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "u0b65"
+       },
+       {
+          "description": "[basx526] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "u0e5a"
+       },
+       {
+          "description": "[basx515] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "x"
+       },
+       {
+          "description": "[basx574] Near-specials (Conversion_syntax)",
+          "string": "xNaN"
+       },
+       {
+          "description": "[basx530] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": ".123.5"
+       },
+       {
+          "description": "[basx500] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1..2"
+       },
+       {
+          "description": "[basx542] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1e1.0"
+       },
+       {
+          "description": "[basx553] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1E+1.2.3"
+       },
+       {
+          "description": "[basx543] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1e123e"
+       },
+       {
+          "description": "[basx552] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1E+1.2"
+       },
+       {
+          "description": "[basx546] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1e.1"
+       },
+       {
+          "description": "[basx547] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1e1."
+       },
+       {
+          "description": "[basx554] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1E++1"
+       },
+       {
+          "description": "[basx555] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1E--1"
+       },
+       {
+          "description": "[basx556] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1E+-1"
+       },
+       {
+          "description": "[basx557] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1E-+1"
+       },
+       {
+          "description": "[basx558] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1E'1"
+       },
+       {
+          "description": "[basx559] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1E\"1"
+       },
+       {
+          "description": "[basx520] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1e-"
+       },
+       {
+          "description": "[basx560] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1E"
+       },
+       {
+          "description": "[basx548] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1ee"
+       },
+       {
+          "description": "[basx551] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1.2.1"
+       },
+       {
+          "description": "[basx550] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1.23.4"
+       },
+       {
+          "description": "[basx529] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "1.34.5"
+       },
+       {
+          "description": "[basx531] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "01.35."
+       },
+       {
+          "description": "[basx532] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "01.35-"
+       },
+       {
+          "description": "[basx518] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "3+"
+       },
+       {
+          "description": "[basx521] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "7e99999a"
+       },
+       {
+          "description": "[basx570] Near-specials (Conversion_syntax)",
+          "string": "9Inf"
+       },
+       {
+          "description": "[basx512] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "12 "
+       },
+       {
+          "description": "[basx517] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "12-"
+       },
+       {
+          "description": "[basx507] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "12e"
+       },
+       {
+          "description": "[basx508] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "12e++"
+       },
+       {
+          "description": "[basx509] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "12f4"
+       },
+       {
+          "description": "[basx536] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "111e*123"
+       },
+       {
+          "description": "[basx537] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "111e123-"
+       },
+       {
+          "description": "[basx540] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "111e1*23"
+       },
+       {
+          "description": "[basx538] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "111e+12+"
+       },
+       {
+          "description": "[basx539] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "111e1-3-"
+       },
+       {
+          "description": "[basx541] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "111E1e+3"
+       },
+       {
+          "description": "[basx528] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "123,65"
+       },
+       {
+          "description": "[basx523] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "7e12356789012x"
+       },
+       {
+          "description": "[basx522] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+          "string": "7e123567890x"
+       }
+    ]
+}
+`},
+}

+ 849 - 0
src/gopkg.in/mgo.v2/bson/decode.go

@@ -0,0 +1,849 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+)
+
+type decoder struct {
+	in      []byte
+	i       int
+	docType reflect.Type
+}
+
+var typeM = reflect.TypeOf(M{})
+
+func newDecoder(in []byte) *decoder {
+	return &decoder{in, 0, typeM}
+}
+
+// --------------------------------------------------------------------------
+// Some helper functions.
+
+func corrupted() {
+	panic("Document is corrupted")
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of documents.
+
+const (
+	setterUnknown = iota
+	setterNone
+	setterType
+	setterAddr
+)
+
+var setterStyles map[reflect.Type]int
+var setterIface reflect.Type
+var setterMutex sync.RWMutex
+
+func init() {
+	var iface Setter
+	setterIface = reflect.TypeOf(&iface).Elem()
+	setterStyles = make(map[reflect.Type]int)
+}
+
+func setterStyle(outt reflect.Type) int {
+	setterMutex.RLock()
+	style := setterStyles[outt]
+	setterMutex.RUnlock()
+	if style == setterUnknown {
+		setterMutex.Lock()
+		defer setterMutex.Unlock()
+		if outt.Implements(setterIface) {
+			setterStyles[outt] = setterType
+		} else if reflect.PtrTo(outt).Implements(setterIface) {
+			setterStyles[outt] = setterAddr
+		} else {
+			setterStyles[outt] = setterNone
+		}
+		style = setterStyles[outt]
+	}
+	return style
+}
+
+func getSetter(outt reflect.Type, out reflect.Value) Setter {
+	style := setterStyle(outt)
+	if style == setterNone {
+		return nil
+	}
+	if style == setterAddr {
+		if !out.CanAddr() {
+			return nil
+		}
+		out = out.Addr()
+	} else if outt.Kind() == reflect.Ptr && out.IsNil() {
+		out.Set(reflect.New(outt.Elem()))
+	}
+	return out.Interface().(Setter)
+}
+
+func clearMap(m reflect.Value) {
+	var none reflect.Value
+	for _, k := range m.MapKeys() {
+		m.SetMapIndex(k, none)
+	}
+}
+
+func (d *decoder) readDocTo(out reflect.Value) {
+	var elemType reflect.Type
+	outt := out.Type()
+	outk := outt.Kind()
+
+	for {
+		if outk == reflect.Ptr && out.IsNil() {
+			out.Set(reflect.New(outt.Elem()))
+		}
+		if setter := getSetter(outt, out); setter != nil {
+			var raw Raw
+			d.readDocTo(reflect.ValueOf(&raw))
+			err := setter.SetBSON(raw)
+			if _, ok := err.(*TypeError); err != nil && !ok {
+				panic(err)
+			}
+			return
+		}
+		if outk == reflect.Ptr {
+			out = out.Elem()
+			outt = out.Type()
+			outk = out.Kind()
+			continue
+		}
+		break
+	}
+
+	var fieldsMap map[string]fieldInfo
+	var inlineMap reflect.Value
+	start := d.i
+
+	origout := out
+	if outk == reflect.Interface {
+		if d.docType.Kind() == reflect.Map {
+			mv := reflect.MakeMap(d.docType)
+			out.Set(mv)
+			out = mv
+		} else {
+			dv := reflect.New(d.docType).Elem()
+			out.Set(dv)
+			out = dv
+		}
+		outt = out.Type()
+		outk = outt.Kind()
+	}
+
+	docType := d.docType
+	keyType := typeString
+	convertKey := false
+	switch outk {
+	case reflect.Map:
+		keyType = outt.Key()
+		if keyType.Kind() != reflect.String {
+			panic("BSON map must have string keys. Got: " + outt.String())
+		}
+		if keyType != typeString {
+			convertKey = true
+		}
+		elemType = outt.Elem()
+		if elemType == typeIface {
+			d.docType = outt
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(out.Type()))
+		} else if out.Len() > 0 {
+			clearMap(out)
+		}
+	case reflect.Struct:
+		if outt != typeRaw {
+			sinfo, err := getStructInfo(out.Type())
+			if err != nil {
+				panic(err)
+			}
+			fieldsMap = sinfo.FieldsMap
+			out.Set(sinfo.Zero)
+			if sinfo.InlineMap != -1 {
+				inlineMap = out.Field(sinfo.InlineMap)
+				if !inlineMap.IsNil() && inlineMap.Len() > 0 {
+					clearMap(inlineMap)
+				}
+				elemType = inlineMap.Type().Elem()
+				if elemType == typeIface {
+					d.docType = inlineMap.Type()
+				}
+			}
+		}
+	case reflect.Slice:
+		switch outt.Elem() {
+		case typeDocElem:
+			origout.Set(d.readDocElems(outt))
+			return
+		case typeRawDocElem:
+			origout.Set(d.readRawDocElems(outt))
+			return
+		}
+		fallthrough
+	default:
+		panic("Unsupported document type for unmarshalling: " + out.Type().String())
+	}
+
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		name := d.readCStr()
+		if d.i >= end {
+			corrupted()
+		}
+
+		switch outk {
+		case reflect.Map:
+			e := reflect.New(elemType).Elem()
+			if d.readElemTo(e, kind) {
+				k := reflect.ValueOf(name)
+				if convertKey {
+					k = k.Convert(keyType)
+				}
+				out.SetMapIndex(k, e)
+			}
+		case reflect.Struct:
+			if outt == typeRaw {
+				d.dropElem(kind)
+			} else {
+				if info, ok := fieldsMap[name]; ok {
+					if info.Inline == nil {
+						d.readElemTo(out.Field(info.Num), kind)
+					} else {
+						d.readElemTo(out.FieldByIndex(info.Inline), kind)
+					}
+				} else if inlineMap.IsValid() {
+					if inlineMap.IsNil() {
+						inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+					}
+					e := reflect.New(elemType).Elem()
+					if d.readElemTo(e, kind) {
+						inlineMap.SetMapIndex(reflect.ValueOf(name), e)
+					}
+				} else {
+					d.dropElem(kind)
+				}
+			}
+		case reflect.Slice:
+		}
+
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+	d.docType = docType
+
+	if outt == typeRaw {
+		out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
+	}
+}
+
+func (d *decoder) readArrayDocTo(out reflect.Value) {
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	i := 0
+	l := out.Len()
+	for d.in[d.i] != '\x00' {
+		if i >= l {
+			panic("Length mismatch on array field")
+		}
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		d.readElemTo(out.Index(i), kind)
+		if d.i >= end {
+			corrupted()
+		}
+		i++
+	}
+	if i != l {
+		panic("Length mismatch on array field")
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+}
+
+func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
+	tmp := make([]reflect.Value, 0, 8)
+	elemType := t.Elem()
+	if elemType == typeRawDocElem {
+		d.dropElem(0x04)
+		return reflect.Zero(t).Interface()
+	}
+
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		e := reflect.New(elemType).Elem()
+		if d.readElemTo(e, kind) {
+			tmp = append(tmp, e)
+		}
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+
+	n := len(tmp)
+	slice := reflect.MakeSlice(t, n, n)
+	for i := 0; i != n; i++ {
+		slice.Index(i).Set(tmp[i])
+	}
+	return slice.Interface()
+}
+
+var typeSlice = reflect.TypeOf([]interface{}{})
+var typeIface = typeSlice.Elem()
+
+func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
+	docType := d.docType
+	d.docType = typ
+	slice := make([]DocElem, 0, 8)
+	d.readDocWith(func(kind byte, name string) {
+		e := DocElem{Name: name}
+		v := reflect.ValueOf(&e.Value)
+		if d.readElemTo(v.Elem(), kind) {
+			slice = append(slice, e)
+		}
+	})
+	slicev := reflect.New(typ).Elem()
+	slicev.Set(reflect.ValueOf(slice))
+	d.docType = docType
+	return slicev
+}
+
+func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
+	docType := d.docType
+	d.docType = typ
+	slice := make([]RawDocElem, 0, 8)
+	d.readDocWith(func(kind byte, name string) {
+		e := RawDocElem{Name: name}
+		v := reflect.ValueOf(&e.Value)
+		if d.readElemTo(v.Elem(), kind) {
+			slice = append(slice, e)
+		}
+	})
+	slicev := reflect.New(typ).Elem()
+	slicev.Set(reflect.ValueOf(slice))
+	d.docType = docType
+	return slicev
+}
+
+func (d *decoder) readDocWith(f func(kind byte, name string)) {
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		name := d.readCStr()
+		if d.i >= end {
+			corrupted()
+		}
+		f(kind, name)
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of individual elements within a document.
+
+var blackHole = settableValueOf(struct{}{})
+
+func (d *decoder) dropElem(kind byte) {
+	d.readElemTo(blackHole, kind)
+}
+
+// Attempt to decode an element from the document and put it into out.
+// If the types are not compatible, the returned ok value will be
+// false and out will be unchanged.
+func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
+
+	start := d.i
+
+	if kind == 0x03 {
+		// Delegate unmarshaling of documents.
+		outt := out.Type()
+		outk := out.Kind()
+		switch outk {
+		case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
+			d.readDocTo(out)
+			return true
+		}
+		if setterStyle(outt) != setterNone {
+			d.readDocTo(out)
+			return true
+		}
+		if outk == reflect.Slice {
+			switch outt.Elem() {
+			case typeDocElem:
+				out.Set(d.readDocElems(outt))
+			case typeRawDocElem:
+				out.Set(d.readRawDocElems(outt))
+			default:
+				d.readDocTo(blackHole)
+			}
+			return true
+		}
+		d.readDocTo(blackHole)
+		return true
+	}
+
+	var in interface{}
+
+	switch kind {
+	case 0x01: // Float64
+		in = d.readFloat64()
+	case 0x02: // UTF-8 string
+		in = d.readStr()
+	case 0x03: // Document
+		panic("Can't happen. Handled above.")
+	case 0x04: // Array
+		outt := out.Type()
+		if setterStyle(outt) != setterNone {
+			// Skip the value so its data is handed to the setter below.
+			d.dropElem(kind)
+			break
+		}
+		for outt.Kind() == reflect.Ptr {
+			outt = outt.Elem()
+		}
+		switch outt.Kind() {
+		case reflect.Array:
+			d.readArrayDocTo(out)
+			return true
+		case reflect.Slice:
+			in = d.readSliceDoc(outt)
+		default:
+			in = d.readSliceDoc(typeSlice)
+		}
+	case 0x05: // Binary
+		b := d.readBinary()
+		if b.Kind == 0x00 || b.Kind == 0x02 {
+			in = b.Data
+		} else {
+			in = b
+		}
+	case 0x06: // Undefined (obsolete, but still seen in the wild)
+		in = Undefined
+	case 0x07: // ObjectId
+		in = ObjectId(d.readBytes(12))
+	case 0x08: // Bool
+		in = d.readBool()
+	case 0x09: // Timestamp
+		// MongoDB handles timestamps as milliseconds.
+		i := d.readInt64()
+		if i == -62135596800000 {
+			in = time.Time{} // In UTC for convenience.
+		} else {
+			in = time.Unix(i/1e3, i%1e3*1e6)
+		}
+	case 0x0A: // Nil
+		in = nil
+	case 0x0B: // RegEx
+		in = d.readRegEx()
+	case 0x0C:
+		in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
+	case 0x0D: // JavaScript without scope
+		in = JavaScript{Code: d.readStr()}
+	case 0x0E: // Symbol
+		in = Symbol(d.readStr())
+	case 0x0F: // JavaScript with scope
+		d.i += 4 // Skip length
+		js := JavaScript{d.readStr(), make(M)}
+		d.readDocTo(reflect.ValueOf(js.Scope))
+		in = js
+	case 0x10: // Int32
+		in = int(d.readInt32())
+	case 0x11: // Mongo-specific timestamp
+		in = MongoTimestamp(d.readInt64())
+	case 0x12: // Int64
+		in = d.readInt64()
+	case 0x13: // Decimal128
+		in = Decimal128{
+			l: uint64(d.readInt64()),
+			h: uint64(d.readInt64()),
+		}
+	case 0x7F: // Max key
+		in = MaxKey
+	case 0xFF: // Min key
+		in = MinKey
+	default:
+		panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
+	}
+
+	outt := out.Type()
+
+	if outt == typeRaw {
+		out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
+		return true
+	}
+
+	if setter := getSetter(outt, out); setter != nil {
+		err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
+		if err == SetZero {
+			out.Set(reflect.Zero(outt))
+			return true
+		}
+		if err == nil {
+			return true
+		}
+		if _, ok := err.(*TypeError); !ok {
+			panic(err)
+		}
+		return false
+	}
+
+	if in == nil {
+		out.Set(reflect.Zero(outt))
+		return true
+	}
+
+	outk := outt.Kind()
+
+	// Dereference and initialize pointer if necessary.
+	first := true
+	for outk == reflect.Ptr {
+		if !out.IsNil() {
+			out = out.Elem()
+		} else {
+			elem := reflect.New(outt.Elem())
+			if first {
+				// Only set if value is compatible.
+				first = false
+				defer func(out, elem reflect.Value) {
+					if good {
+						out.Set(elem)
+					}
+				}(out, elem)
+			} else {
+				out.Set(elem)
+			}
+			out = elem
+		}
+		outt = out.Type()
+		outk = outt.Kind()
+	}
+
+	inv := reflect.ValueOf(in)
+	if outt == inv.Type() {
+		out.Set(inv)
+		return true
+	}
+
+	switch outk {
+	case reflect.Interface:
+		out.Set(inv)
+		return true
+	case reflect.String:
+		switch inv.Kind() {
+		case reflect.String:
+			out.SetString(inv.String())
+			return true
+		case reflect.Slice:
+			if b, ok := in.([]byte); ok {
+				out.SetString(string(b))
+				return true
+			}
+		case reflect.Int, reflect.Int64:
+			if outt == typeJSONNumber {
+				out.SetString(strconv.FormatInt(inv.Int(), 10))
+				return true
+			}
+		case reflect.Float64:
+			if outt == typeJSONNumber {
+				out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
+				return true
+			}
+		}
+	case reflect.Slice, reflect.Array:
+		// Remember, array (0x04) slices are built with the correct
+		// element type.  If we are here, must be a cross BSON kind
+		// conversion (e.g. 0x05 unmarshalling on string).
+		if outt.Elem().Kind() != reflect.Uint8 {
+			break
+		}
+		switch inv.Kind() {
+		case reflect.String:
+			slice := []byte(inv.String())
+			out.Set(reflect.ValueOf(slice))
+			return true
+		case reflect.Slice:
+			switch outt.Kind() {
+			case reflect.Array:
+				reflect.Copy(out, inv)
+			case reflect.Slice:
+				out.SetBytes(inv.Bytes())
+			}
+			return true
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch inv.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetInt(inv.Int())
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetInt(int64(inv.Float()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetInt(1)
+			} else {
+				out.SetInt(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("can't happen: no uint types in BSON (!?)")
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch inv.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetUint(uint64(inv.Int()))
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetUint(uint64(inv.Float()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetUint(1)
+			} else {
+				out.SetUint(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON.")
+		}
+	case reflect.Float32, reflect.Float64:
+		switch inv.Kind() {
+		case reflect.Float32, reflect.Float64:
+			out.SetFloat(inv.Float())
+			return true
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetFloat(float64(inv.Int()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetFloat(1)
+			} else {
+				out.SetFloat(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON?")
+		}
+	case reflect.Bool:
+		switch inv.Kind() {
+		case reflect.Bool:
+			out.SetBool(inv.Bool())
+			return true
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetBool(inv.Int() != 0)
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetBool(inv.Float() != 0)
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON?")
+		}
+	case reflect.Struct:
+		if outt == typeURL && inv.Kind() == reflect.String {
+			u, err := url.Parse(inv.String())
+			if err != nil {
+				panic(err)
+			}
+			out.Set(reflect.ValueOf(u).Elem())
+			return true
+		}
+		if outt == typeBinary {
+			if b, ok := in.([]byte); ok {
+				out.Set(reflect.ValueOf(Binary{Data: b}))
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// --------------------------------------------------------------------------
+// Parsers of basic types.
+
+func (d *decoder) readRegEx() RegEx {
+	re := RegEx{}
+	re.Pattern = d.readCStr()
+	re.Options = d.readCStr()
+	return re
+}
+
+func (d *decoder) readBinary() Binary {
+	l := d.readInt32()
+	b := Binary{}
+	b.Kind = d.readByte()
+	b.Data = d.readBytes(l)
+	if b.Kind == 0x02 && len(b.Data) >= 4 {
+		// Weird obsolete format with redundant length.
+		b.Data = b.Data[4:]
+	}
+	return b
+}
+
+func (d *decoder) readStr() string {
+	l := d.readInt32()
+	b := d.readBytes(l - 1)
+	if d.readByte() != '\x00' {
+		corrupted()
+	}
+	return string(b)
+}
+
+func (d *decoder) readCStr() string {
+	start := d.i
+	end := start
+	l := len(d.in)
+	for ; end != l; end++ {
+		if d.in[end] == '\x00' {
+			break
+		}
+	}
+	d.i = end + 1
+	if d.i > l {
+		corrupted()
+	}
+	return string(d.in[start:end])
+}
+
+func (d *decoder) readBool() bool {
+	b := d.readByte()
+	if b == 0 {
+		return false
+	}
+	if b == 1 {
+		return true
+	}
+	panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
+}
+
+func (d *decoder) readFloat64() float64 {
+	return math.Float64frombits(uint64(d.readInt64()))
+}
+
+func (d *decoder) readInt32() int32 {
+	b := d.readBytes(4)
+	return int32((uint32(b[0]) << 0) |
+		(uint32(b[1]) << 8) |
+		(uint32(b[2]) << 16) |
+		(uint32(b[3]) << 24))
+}
+
+func (d *decoder) readInt64() int64 {
+	b := d.readBytes(8)
+	return int64((uint64(b[0]) << 0) |
+		(uint64(b[1]) << 8) |
+		(uint64(b[2]) << 16) |
+		(uint64(b[3]) << 24) |
+		(uint64(b[4]) << 32) |
+		(uint64(b[5]) << 40) |
+		(uint64(b[6]) << 48) |
+		(uint64(b[7]) << 56))
+}
+
+func (d *decoder) readByte() byte {
+	i := d.i
+	d.i++
+	if d.i > len(d.in) {
+		corrupted()
+	}
+	return d.in[i]
+}
+
+func (d *decoder) readBytes(length int32) []byte {
+	if length < 0 {
+		corrupted()
+	}
+	start := d.i
+	d.i += int(length)
+	if d.i < start || d.i > len(d.in) {
+		corrupted()
+	}
+	return d.in[start : start+int(length)]
+}

+ 521 - 0
src/gopkg.in/mgo.v2/bson/encode.go

@@ -0,0 +1,521 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// --------------------------------------------------------------------------
+// Some internal infrastructure.
+
+var (
+	typeBinary         = reflect.TypeOf(Binary{})
+	typeObjectId       = reflect.TypeOf(ObjectId(""))
+	typeDBPointer      = reflect.TypeOf(DBPointer{"", ObjectId("")})
+	typeSymbol         = reflect.TypeOf(Symbol(""))
+	typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
+	typeOrderKey       = reflect.TypeOf(MinKey)
+	typeDocElem        = reflect.TypeOf(DocElem{})
+	typeRawDocElem     = reflect.TypeOf(RawDocElem{})
+	typeRaw            = reflect.TypeOf(Raw{})
+	typeURL            = reflect.TypeOf(url.URL{})
+	typeTime           = reflect.TypeOf(time.Time{})
+	typeString         = reflect.TypeOf("")
+	typeJSONNumber     = reflect.TypeOf(json.Number(""))
+)
+
+const itoaCacheSize = 32
+
+var itoaCache []string
+
+func init() {
+	itoaCache = make([]string, itoaCacheSize)
+	for i := 0; i != itoaCacheSize; i++ {
+		itoaCache[i] = strconv.Itoa(i)
+	}
+}
+
+func itoa(i int) string {
+	if i < itoaCacheSize {
+		return itoaCache[i]
+	}
+	return strconv.Itoa(i)
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of the document value itself.
+
+type encoder struct {
+	out  []byte
+	Lock sync.Mutex
+}
+
+func (e *encoder) addDoc(v reflect.Value) {
+	for {
+		if vi, ok := v.Interface().(Getter); ok {
+			getv, err := vi.GetBSON()
+			if err != nil {
+				panic(err)
+			}
+			v = reflect.ValueOf(getv)
+			continue
+		}
+		if v.Kind() == reflect.Ptr {
+			v = v.Elem()
+			continue
+		}
+		break
+	}
+
+	if v.Type() == typeRaw {
+		raw := v.Interface().(Raw)
+		if raw.Kind != 0x03 && raw.Kind != 0x00 {
+			panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
+		}
+		if len(raw.Data) == 0 {
+			panic("Attempted to marshal empty Raw document")
+		}
+		e.addBytes(raw.Data...)
+		return
+	}
+
+	start := e.reserveInt32()
+
+	switch v.Kind() {
+	case reflect.Map:
+		e.addMap(v)
+	case reflect.Struct:
+		e.addStruct(v)
+	case reflect.Array, reflect.Slice:
+		e.addSlice(v)
+	default:
+		panic("Can't marshal " + v.Type().String() + " as a BSON document")
+	}
+
+	e.addBytes(0)
+	e.setInt32(start, int32(len(e.out)-start))
+}
+
+func (e *encoder) addMap(v reflect.Value) {
+	vm := v.MapKeys()
+	vv := make([]reflect.Value, len(vm))
+	for n, k := range vm {
+		vv[n] = v.MapIndex(k)
+	}
+	for n, k := range vm {
+		e.addElem(k.String(), vv[n], false)
+	}
+}
+
+func (e *encoder) addStruct(v reflect.Value) {
+	sinfo, err := getStructInfo(v.Type())
+	if err != nil {
+		panic(err)
+	}
+	var value reflect.Value
+	if sinfo.InlineMap >= 0 {
+		m := v.Field(sinfo.InlineMap)
+		if m.Len() > 0 {
+			for _, k := range m.MapKeys() {
+				ks := k.String()
+				if _, found := sinfo.FieldsMap[ks]; found {
+					panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
+				}
+				e.addElem(ks, m.MapIndex(k), false)
+			}
+		}
+	}
+	for _, info := range sinfo.FieldsList {
+		if info.Inline == nil {
+			value = v.Field(info.Num)
+		} else {
+			value = v.FieldByIndex(info.Inline)
+		}
+		if info.OmitEmpty && isZero(value) {
+			continue
+		}
+		e.addElem(info.Key, value, info.MinSize)
+	}
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Ptr, reflect.Interface:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		if vt == typeTime {
+			return v.Interface().(time.Time).IsZero()
+		}
+		for i := 0; i < v.NumField(); i++ {
+			if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+func (e *encoder) addSlice(v reflect.Value) {
+	vi := v.Interface()
+	if d, ok := vi.(D); ok {
+		for _, elem := range d {
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	if d, ok := vi.(RawD); ok {
+		for _, elem := range d {
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	l := v.Len()
+	et := v.Type().Elem()
+	if et == typeDocElem {
+		for i := 0; i < l; i++ {
+			elem := v.Index(i).Interface().(DocElem)
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	if et == typeRawDocElem {
+		for i := 0; i < l; i++ {
+			elem := v.Index(i).Interface().(RawDocElem)
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	for i := 0; i < l; i++ {
+		e.addElem(itoa(i), v.Index(i), false)
+	}
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of elements in a document.
+
+func (e *encoder) addElemName(kind byte, name string) {
+	e.addBytes(kind)
+	e.addBytes([]byte(name)...)
+	e.addBytes(0)
+}
+
+func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
+
+	if !v.IsValid() {
+		e.addElemName(0x0A, name)
+		return
+	}
+
+	if getter, ok := v.Interface().(Getter); ok {
+		getv, err := getter.GetBSON()
+		if err != nil {
+			panic(err)
+		}
+		e.addElem(name, reflect.ValueOf(getv), minSize)
+		return
+	}
+
+	switch v.Kind() {
+
+	case reflect.Interface:
+		e.addElem(name, v.Elem(), minSize)
+
+	case reflect.Ptr:
+		e.addElem(name, v.Elem(), minSize)
+
+	case reflect.String:
+		s := v.String()
+		switch v.Type() {
+		case typeObjectId:
+			if len(s) != 12 {
+				panic("ObjectIDs must be exactly 12 bytes long (got " +
+					strconv.Itoa(len(s)) + ")")
+			}
+			e.addElemName(0x07, name)
+			e.addBytes([]byte(s)...)
+		case typeSymbol:
+			e.addElemName(0x0E, name)
+			e.addStr(s)
+		case typeJSONNumber:
+			n := v.Interface().(json.Number)
+			if i, err := n.Int64(); err == nil {
+				e.addElemName(0x12, name)
+				e.addInt64(i)
+			} else if f, err := n.Float64(); err == nil {
+				e.addElemName(0x01, name)
+				e.addFloat64(f)
+			} else {
+				panic("failed to convert json.Number to a number: " + s)
+			}
+		default:
+			e.addElemName(0x02, name)
+			e.addStr(s)
+		}
+
+	case reflect.Float32, reflect.Float64:
+		e.addElemName(0x01, name)
+		e.addFloat64(v.Float())
+
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		u := v.Uint()
+		if int64(u) < 0 {
+			panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
+		} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
+			e.addElemName(0x10, name)
+			e.addInt32(int32(u))
+		} else {
+			e.addElemName(0x12, name)
+			e.addInt64(int64(u))
+		}
+
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch v.Type() {
+		case typeMongoTimestamp:
+			e.addElemName(0x11, name)
+			e.addInt64(v.Int())
+
+		case typeOrderKey:
+			if v.Int() == int64(MaxKey) {
+				e.addElemName(0x7F, name)
+			} else {
+				e.addElemName(0xFF, name)
+			}
+
+		default:
+			i := v.Int()
+			if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
+				// It fits into an int32, encode as such.
+				e.addElemName(0x10, name)
+				e.addInt32(int32(i))
+			} else {
+				e.addElemName(0x12, name)
+				e.addInt64(i)
+			}
+		}
+
+	case reflect.Bool:
+		e.addElemName(0x08, name)
+		if v.Bool() {
+			e.addBytes(1)
+		} else {
+			e.addBytes(0)
+		}
+
+	case reflect.Map:
+		e.addElemName(0x03, name)
+		e.addDoc(v)
+
+	case reflect.Slice:
+		vt := v.Type()
+		et := vt.Elem()
+		if et.Kind() == reflect.Uint8 {
+			e.addElemName(0x05, name)
+			e.addBinary(0x00, v.Bytes())
+		} else if et == typeDocElem || et == typeRawDocElem {
+			e.addElemName(0x03, name)
+			e.addDoc(v)
+		} else {
+			e.addElemName(0x04, name)
+			e.addDoc(v)
+		}
+
+	case reflect.Array:
+		et := v.Type().Elem()
+		if et.Kind() == reflect.Uint8 {
+			e.addElemName(0x05, name)
+			if v.CanAddr() {
+				e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
+			} else {
+				n := v.Len()
+				e.addInt32(int32(n))
+				e.addBytes(0x00)
+				for i := 0; i < n; i++ {
+					el := v.Index(i)
+					e.addBytes(byte(el.Uint()))
+				}
+			}
+		} else {
+			e.addElemName(0x04, name)
+			e.addDoc(v)
+		}
+
+	case reflect.Struct:
+		switch s := v.Interface().(type) {
+
+		case Raw:
+			kind := s.Kind
+			if kind == 0x00 {
+				kind = 0x03
+			}
+			if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
+				panic("Attempted to marshal empty Raw document")
+			}
+			e.addElemName(kind, name)
+			e.addBytes(s.Data...)
+
+		case Binary:
+			e.addElemName(0x05, name)
+			e.addBinary(s.Kind, s.Data)
+
+		case Decimal128:
+			e.addElemName(0x13, name)
+			e.addInt64(int64(s.l))
+			e.addInt64(int64(s.h))
+
+		case DBPointer:
+			e.addElemName(0x0C, name)
+			e.addStr(s.Namespace)
+			if len(s.Id) != 12 {
+				panic("ObjectIDs must be exactly 12 bytes long (got " +
+					strconv.Itoa(len(s.Id)) + ")")
+			}
+			e.addBytes([]byte(s.Id)...)
+
+		case RegEx:
+			e.addElemName(0x0B, name)
+			e.addCStr(s.Pattern)
+			e.addCStr(s.Options)
+
+		case JavaScript:
+			if s.Scope == nil {
+				e.addElemName(0x0D, name)
+				e.addStr(s.Code)
+			} else {
+				e.addElemName(0x0F, name)
+				start := e.reserveInt32()
+				e.addStr(s.Code)
+				e.addDoc(reflect.ValueOf(s.Scope))
+				e.setInt32(start, int32(len(e.out)-start))
+			}
+
+		case time.Time:
+			// MongoDB handles timestamps as milliseconds.
+			e.addElemName(0x09, name)
+			e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
+
+		case url.URL:
+			e.addElemName(0x02, name)
+			e.addStr(s.String())
+
+		case undefined:
+			e.addElemName(0x06, name)
+
+		default:
+			e.addElemName(0x03, name)
+			e.addDoc(v)
+		}
+
+	default:
+		panic("Can't marshal " + v.Type().String() + " in a BSON document")
+	}
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of base types.
+
+func (e *encoder) addBinary(subtype byte, v []byte) {
+	if subtype == 0x02 {
+		// Wonder how that brilliant idea came to life. Obsolete, luckily.
+		e.addInt32(int32(len(v) + 4))
+		e.addBytes(subtype)
+		e.addInt32(int32(len(v)))
+	} else {
+		e.addInt32(int32(len(v)))
+		e.addBytes(subtype)
+	}
+	e.addBytes(v...)
+}
+
+func (e *encoder) addStr(v string) {
+	e.addInt32(int32(len(v) + 1))
+	e.addCStr(v)
+}
+
+func (e *encoder) addCStr(v string) {
+	e.addBytes([]byte(v)...)
+	e.addBytes(0)
+}
+
+func (e *encoder) reserveInt32() (pos int) {
+	pos = len(e.out)
+	e.addBytes(0, 0, 0, 0)
+	return pos
+}
+
+func (e *encoder) setInt32(pos int, v int32) {
+	e.out[pos+0] = byte(v)
+	e.out[pos+1] = byte(v >> 8)
+	e.out[pos+2] = byte(v >> 16)
+	e.out[pos+3] = byte(v >> 24)
+}
+
+func (e *encoder) addInt32(v int32) {
+	u := uint32(v)
+	e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
+}
+
+func (e *encoder) addInt64(v int64) {
+	u := uint64(v)
+	e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
+		byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
+}
+
+func (e *encoder) addFloat64(v float64) {
+	e.addInt64(int64(math.Float64bits(v)))
+}
+
+func (e *encoder) addBytes(v ...byte) {
+	e.out = append(e.out, v...)
+}

+ 380 - 0
src/gopkg.in/mgo.v2/bson/json.go

@@ -0,0 +1,380 @@
+package bson
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"gopkg.in/mgo.v2/internal/json"
+	"strconv"
+	"time"
+)
+
+// UnmarshalJSON unmarshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func UnmarshalJSON(data []byte, value interface{}) error {
+	d := json.NewDecoder(bytes.NewBuffer(data))
+	d.Extend(&jsonExt)
+	return d.Decode(value)
+}
+
+// MarshalJSON marshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func MarshalJSON(value interface{}) ([]byte, error) {
+	var buf bytes.Buffer
+	e := json.NewEncoder(&buf)
+	e.Extend(&jsonExt)
+	err := e.Encode(value)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// jdec is used internally by the JSON decoding functions
+// so they may unmarshal functions without getting into endless
+// recursion due to keyed objects.
+func jdec(data []byte, value interface{}) error {
+	d := json.NewDecoder(bytes.NewBuffer(data))
+	d.Extend(&funcExt)
+	return d.Decode(value)
+}
+
+var jsonExt json.Extension
+var funcExt json.Extension
+
+// TODO
+// - Shell regular expressions ("/regexp/opts")
+
+func init() {
+	jsonExt.DecodeUnquotedKeys(true)
+	jsonExt.DecodeTrailingCommas(true)
+
+	funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
+	jsonExt.DecodeKeyed("$binary", jdecBinary)
+	jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
+	jsonExt.EncodeType([]byte(nil), jencBinarySlice)
+	jsonExt.EncodeType(Binary{}, jencBinaryType)
+
+	funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
+	funcExt.DecodeFunc("new Date", "$dateFunc", "S")
+	jsonExt.DecodeKeyed("$date", jdecDate)
+	jsonExt.DecodeKeyed("$dateFunc", jdecDate)
+	jsonExt.EncodeType(time.Time{}, jencDate)
+
+	funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
+	jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
+	jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
+
+	funcExt.DecodeConst("undefined", Undefined)
+
+	jsonExt.DecodeKeyed("$regex", jdecRegEx)
+	jsonExt.EncodeType(RegEx{}, jencRegEx)
+
+	funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
+	jsonExt.DecodeKeyed("$oid", jdecObjectId)
+	jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
+	jsonExt.EncodeType(ObjectId(""), jencObjectId)
+
+	funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
+	jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
+
+	funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
+	jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
+	jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
+	jsonExt.EncodeType(int64(0), jencNumberLong)
+	jsonExt.EncodeType(int(0), jencInt)
+
+	funcExt.DecodeConst("MinKey", MinKey)
+	funcExt.DecodeConst("MaxKey", MaxKey)
+	jsonExt.DecodeKeyed("$minKey", jdecMinKey)
+	jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
+	jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
+
+	jsonExt.DecodeKeyed("$undefined", jdecUndefined)
+	jsonExt.EncodeType(Undefined, jencUndefined)
+
+	jsonExt.Extend(&funcExt)
+}
+
+func fbytes(format string, args ...interface{}) []byte {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, format, args...)
+	return buf.Bytes()
+}
+
+func jdecBinary(data []byte) (interface{}, error) {
+	var v struct {
+		Binary []byte `json:"$binary"`
+		Type   string `json:"$type"`
+		Func   struct {
+			Binary []byte `json:"$binary"`
+			Type   int64  `json:"$type"`
+		} `json:"$binaryFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+
+	var binData []byte
+	var binKind int64
+	if v.Type == "" && v.Binary == nil {
+		binData = v.Func.Binary
+		binKind = v.Func.Type
+	} else if v.Type == "" {
+		return v.Binary, nil
+	} else {
+		binData = v.Binary
+		binKind, err = strconv.ParseInt(v.Type, 0, 64)
+		if err != nil {
+			binKind = -1
+		}
+	}
+
+	if binKind == 0 {
+		return binData, nil
+	}
+	if binKind < 0 || binKind > 255 {
+		return nil, fmt.Errorf("invalid type in binary object: %s", data)
+	}
+
+	return Binary{Kind: byte(binKind), Data: binData}, nil
+}
+
+func jencBinarySlice(v interface{}) ([]byte, error) {
+	in := v.([]byte)
+	out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
+	base64.StdEncoding.Encode(out, in)
+	return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
+}
+
+func jencBinaryType(v interface{}) ([]byte, error) {
+	in := v.(Binary)
+	out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
+	base64.StdEncoding.Encode(out, in.Data)
+	return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
+}
+
+const jdateFormat = "2006-01-02T15:04:05.999Z"
+
+func jdecDate(data []byte) (interface{}, error) {
+	var v struct {
+		S    string `json:"$date"`
+		Func struct {
+			S string
+		} `json:"$dateFunc"`
+	}
+	_ = jdec(data, &v)
+	if v.S == "" {
+		v.S = v.Func.S
+	}
+	if v.S != "" {
+		for _, format := range []string{jdateFormat, "2006-01-02"} {
+			t, err := time.Parse(format, v.S)
+			if err == nil {
+				return t, nil
+			}
+		}
+		return nil, fmt.Errorf("cannot parse date: %q", v.S)
+	}
+
+	var vn struct {
+		Date struct {
+			N int64 `json:"$numberLong,string"`
+		} `json:"$date"`
+		Func struct {
+			S int64
+		} `json:"$dateFunc"`
+	}
+	err := jdec(data, &vn)
+	if err != nil {
+		return nil, fmt.Errorf("cannot parse date: %q", data)
+	}
+	n := vn.Date.N
+	if n == 0 {
+		n = vn.Func.S
+	}
+	return time.Unix(n/1000, n%1000*1e6).UTC(), nil
+}
+
+func jencDate(v interface{}) ([]byte, error) {
+	t := v.(time.Time)
+	return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
+}
+
+func jdecTimestamp(data []byte) (interface{}, error) {
+	var v struct {
+		Func struct {
+			T int32 `json:"t"`
+			I int32 `json:"i"`
+		} `json:"$timestamp"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
+}
+
+func jencTimestamp(v interface{}) ([]byte, error) {
+	ts := uint64(v.(MongoTimestamp))
+	return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
+}
+
+func jdecRegEx(data []byte) (interface{}, error) {
+	var v struct {
+		Regex   string `json:"$regex"`
+		Options string `json:"$options"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return RegEx{v.Regex, v.Options}, nil
+}
+
+func jencRegEx(v interface{}) ([]byte, error) {
+	re := v.(RegEx)
+	type regex struct {
+		Regex   string `json:"$regex"`
+		Options string `json:"$options"`
+	}
+	return json.Marshal(regex{re.Pattern, re.Options})
+}
+
+func jdecObjectId(data []byte) (interface{}, error) {
+	var v struct {
+		Id   string `json:"$oid"`
+		Func struct {
+			Id string
+		} `json:"$oidFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.Id == "" {
+		v.Id = v.Func.Id
+	}
+	return ObjectIdHex(v.Id), nil
+}
+
+func jencObjectId(v interface{}) ([]byte, error) {
+	return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
+}
+
+func jdecDBRef(data []byte) (interface{}, error) {
+	// TODO Support unmarshaling $ref and $id into the input value.
+	var v struct {
+		Obj map[string]interface{} `json:"$dbrefFunc"`
+	}
+	// TODO Fix this. Must not be required.
+	v.Obj = make(map[string]interface{})
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return v.Obj, nil
+}
+
+func jdecNumberLong(data []byte) (interface{}, error) {
+	var v struct {
+		N    int64 `json:"$numberLong,string"`
+		Func struct {
+			N int64 `json:",string"`
+		} `json:"$numberLongFunc"`
+	}
+	var vn struct {
+		N    int64 `json:"$numberLong"`
+		Func struct {
+			N int64
+		} `json:"$numberLongFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		err = jdec(data, &vn)
+		v.N = vn.N
+		v.Func.N = vn.Func.N
+	}
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 0 {
+		return v.N, nil
+	}
+	return v.Func.N, nil
+}
+
+func jencNumberLong(v interface{}) ([]byte, error) {
+	n := v.(int64)
+	f := `{"$numberLong":"%d"}`
+	if n <= 1<<53 {
+		f = `{"$numberLong":%d}`
+	}
+	return fbytes(f, n), nil
+}
+
+func jencInt(v interface{}) ([]byte, error) {
+	n := v.(int)
+	f := `{"$numberLong":"%d"}`
+	if int64(n) <= 1<<53 {
+		f = `%d`
+	}
+	return fbytes(f, n), nil
+}
+
+func jdecMinKey(data []byte) (interface{}, error) {
+	var v struct {
+		N int64 `json:"$minKey"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 1 {
+		return nil, fmt.Errorf("invalid $minKey object: %s", data)
+	}
+	return MinKey, nil
+}
+
+func jdecMaxKey(data []byte) (interface{}, error) {
+	var v struct {
+		N int64 `json:"$maxKey"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 1 {
+		return nil, fmt.Errorf("invalid $maxKey object: %s", data)
+	}
+	return MaxKey, nil
+}
+
+func jencMinMaxKey(v interface{}) ([]byte, error) {
+	switch v.(orderKey) {
+	case MinKey:
+		return []byte(`{"$minKey":1}`), nil
+	case MaxKey:
+		return []byte(`{"$maxKey":1}`), nil
+	}
+	panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
+}
+
+func jdecUndefined(data []byte) (interface{}, error) {
+	var v struct {
+		B bool `json:"$undefined"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if !v.B {
+		return nil, fmt.Errorf("invalid $undefined object: %s", data)
+	}
+	return Undefined, nil
+}
+
+func jencUndefined(v interface{}) ([]byte, error) {
+	return []byte(`{"$undefined":true}`), nil
+}

+ 184 - 0
src/gopkg.in/mgo.v2/bson/json_test.go

@@ -0,0 +1,184 @@
+package bson_test
+
+import (
+	"gopkg.in/mgo.v2/bson"
+
+	. "gopkg.in/check.v1"
+	"reflect"
+	"strings"
+	"time"
+)
+
+type jsonTest struct {
+	a interface{} // value encoded into JSON (optional)
+	b string      // JSON expected as output of <a>, and used as input to <c>
+	c interface{} // Value expected from decoding <b>, defaults to <a>
+	e string      // error string, if decoding (b) should fail
+}
+
+var jsonTests = []jsonTest{
+	// $binary
+	{
+		a: []byte("foo"),
+		b: `{"$binary":"Zm9v","$type":"0x0"}`,
+	}, {
+		a: bson.Binary{Kind: 2, Data: []byte("foo")},
+		b: `{"$binary":"Zm9v","$type":"0x2"}`,
+	}, {
+		b: `BinData(2,"Zm9v")`,
+		c: bson.Binary{Kind: 2, Data: []byte("foo")},
+	},
+
+	// $date
+	{
+		a: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
+		b: `{"$date":"2016-05-15T01:02:03.004Z"}`,
+	}, {
+		b: `{"$date": {"$numberLong": "1002"}}`,
+		c: time.Date(1970, 1, 1, 0, 0, 1, 2e6, time.UTC),
+	}, {
+		b: `ISODate("2016-05-15T01:02:03.004Z")`,
+		c: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
+	}, {
+		b: `new Date(1000)`,
+		c: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
+	}, {
+		b: `new Date("2016-05-15")`,
+		c: time.Date(2016, 5, 15, 0, 0, 0, 0, time.UTC),
+	},
+
+	// $timestamp
+	{
+		a: bson.MongoTimestamp(4294967298),
+		b: `{"$timestamp":{"t":1,"i":2}}`,
+	}, {
+		b: `Timestamp(1, 2)`,
+		c: bson.MongoTimestamp(4294967298),
+	},
+
+	// $regex
+	{
+		a: bson.RegEx{"pattern", "options"},
+		b: `{"$regex":"pattern","$options":"options"}`,
+	},
+
+	// $oid
+	{
+		a: bson.ObjectIdHex("0123456789abcdef01234567"),
+		b: `{"$oid":"0123456789abcdef01234567"}`,
+	}, {
+		b: `ObjectId("0123456789abcdef01234567")`,
+		c: bson.ObjectIdHex("0123456789abcdef01234567"),
+	},
+
+	// $ref (no special type)
+	{
+		b: `DBRef("name", "id")`,
+		c: map[string]interface{}{"$ref": "name", "$id": "id"},
+	},
+
+	// $numberLong
+	{
+		a: 123,
+		b: `123`,
+	}, {
+		a: int64(9007199254740992),
+		b: `{"$numberLong":9007199254740992}`,
+	}, {
+		a: int64(1<<53 + 1),
+		b: `{"$numberLong":"9007199254740993"}`,
+	}, {
+		a: 1<<53 + 1,
+		b: `{"$numberLong":"9007199254740993"}`,
+		c: int64(9007199254740993),
+	}, {
+		b: `NumberLong(9007199254740992)`,
+		c: int64(1 << 53),
+	}, {
+		b: `NumberLong("9007199254740993")`,
+		c: int64(1<<53 + 1),
+	},
+
+	// $minKey, $maxKey
+	{
+		a: bson.MinKey,
+		b: `{"$minKey":1}`,
+	}, {
+		a: bson.MaxKey,
+		b: `{"$maxKey":1}`,
+	}, {
+		b: `MinKey`,
+		c: bson.MinKey,
+	}, {
+		b: `MaxKey`,
+		c: bson.MaxKey,
+	}, {
+		b: `{"$minKey":0}`,
+		e: `invalid $minKey object: {"$minKey":0}`,
+	}, {
+		b: `{"$maxKey":0}`,
+		e: `invalid $maxKey object: {"$maxKey":0}`,
+	},
+
+	{
+		a: bson.Undefined,
+		b: `{"$undefined":true}`,
+	}, {
+		b: `undefined`,
+		c: bson.Undefined,
+	}, {
+		b: `{"v": undefined}`,
+		c: struct{ V interface{} }{bson.Undefined},
+	},
+
+	// Unquoted keys and trailing commas
+	{
+		b: `{$foo: ["bar",],}`,
+		c: map[string]interface{}{"$foo": []interface{}{"bar"}},
+	},
+}
+
+func (s *S) TestJSON(c *C) {
+	for i, item := range jsonTests {
+		c.Logf("------------ (#%d)", i)
+		c.Logf("A: %#v", item.a)
+		c.Logf("B: %#v", item.b)
+
+		if item.c == nil {
+			item.c = item.a
+		} else {
+			c.Logf("C: %#v", item.c)
+		}
+		if item.e != "" {
+			c.Logf("E: %s", item.e)
+		}
+
+		if item.a != nil {
+			data, err := bson.MarshalJSON(item.a)
+			c.Assert(err, IsNil)
+			c.Logf("Dumped: %#v", string(data))
+			c.Assert(strings.TrimSuffix(string(data), "\n"), Equals, item.b)
+		}
+
+		var zero interface{}
+		if item.c == nil {
+			zero = &struct{}{}
+		} else {
+			zero = reflect.New(reflect.TypeOf(item.c)).Interface()
+		}
+		err := bson.UnmarshalJSON([]byte(item.b), zero)
+		if item.e != "" {
+			c.Assert(err, NotNil)
+			c.Assert(err.Error(), Equals, item.e)
+			continue
+		}
+		c.Assert(err, IsNil)
+		zerov := reflect.ValueOf(zero)
+		value := zerov.Interface()
+		if zerov.Kind() == reflect.Ptr {
+			value = zerov.Elem().Interface()
+		}
+		c.Logf("Loaded: %#v", value)
+		c.Assert(value, DeepEquals, item.c)
+	}
+}

+ 27 - 0
src/gopkg.in/mgo.v2/bson/specdata/update.sh

@@ -0,0 +1,27 @@
+#!/bin/sh
+
+set -e
+
+if [ ! -d specifications ]; then
+	git clone -b bson git@github.com:jyemin/specifications
+fi
+
+TESTFILE="../specdata_test.go"
+
+cat <<END > $TESTFILE
+package bson_test
+
+var specTests = []string{
+END
+
+for file in specifications/source/bson/tests/*.yml; do
+	(
+		echo '`'
+		cat $file
+		echo -n '`,'
+	) >> $TESTFILE
+done
+
+echo '}' >> $TESTFILE
+
+gofmt -w $TESTFILE

+ 241 - 0
src/gopkg.in/mgo.v2/bson/specdata_test.go

@@ -0,0 +1,241 @@
+package bson_test
+
+var specTests = []string{
+	`
+--- 
+description: "Array type"
+documents:
+  - 
+    decoded: 
+      a : []
+    encoded: 0D000000046100050000000000 
+  - 
+    decoded: 
+      a: [10]
+    encoded: 140000000461000C0000001030000A0000000000
+  -
+    # Decode an array that uses an empty string as the key
+    decodeOnly : true
+    decoded: 
+      a: [10]
+    encoded: 130000000461000B00000010000A0000000000
+  -
+    # Decode an array that uses a non-numeric string as the key
+    decodeOnly : true
+    decoded: 
+      a: [10]
+    encoded: 150000000461000D000000106162000A0000000000
+
+
+`, `
+--- 
+description: "Boolean type"
+documents: 
+  - 
+    encoded: "090000000862000100"
+    decoded: { "b" : true }
+  - 
+    encoded: "090000000862000000"
+    decoded: { "b" : false }
+    
+ 
+  `, `
+--- 
+description: "Corrupted BSON"
+documents:
+  -
+    encoded: "09000000016600"
+    error: "truncated double"
+  -
+    encoded: "09000000026600"
+    error: "truncated string"
+  -
+    encoded: "09000000036600"
+    error: "truncated document"
+  -
+    encoded: "09000000046600"
+    error: "truncated array"
+  -
+    encoded: "09000000056600"
+    error: "truncated binary"
+  -
+    encoded: "09000000076600"
+    error: "truncated objectid"
+  -
+    encoded: "09000000086600"
+    error: "truncated boolean"
+  -
+    encoded: "09000000096600"
+    error: "truncated date"
+  -
+    encoded: "090000000b6600"
+    error: "truncated regex"
+  -
+    encoded: "090000000c6600"
+    error: "truncated db pointer"
+  -
+    encoded: "0C0000000d6600"
+    error: "truncated javascript"
+  -
+    encoded: "0C0000000e6600"
+    error: "truncated symbol"
+  -
+    encoded: "0C0000000f6600"
+    error: "truncated javascript with scope"
+  -
+    encoded: "0C000000106600"
+    error: "truncated int32"
+  -
+    encoded: "0C000000116600"
+    error: "truncated timestamp"
+  -
+    encoded: "0C000000126600"
+    error: "truncated int64"
+  - 
+    encoded: "0400000000"
+    error: basic
+  - 
+    encoded: "0500000001"
+    error: basic
+  - 
+    encoded: "05000000"
+    error: basic
+  - 
+    encoded: "0700000002610078563412"
+    error: basic
+  - 
+    encoded: "090000001061000500"
+    error: basic
+  - 
+    encoded: "00000000000000000000"
+    error: basic
+  - 
+    encoded: "1300000002666f6f00040000006261720000"
+    error: "basic"
+  - 
+    encoded: "1800000003666f6f000f0000001062617200ffffff7f0000"
+    error: basic
+  - 
+    encoded: "1500000003666f6f000c0000000862617200010000"
+    error: basic
+  - 
+    encoded: "1c00000003666f6f001200000002626172000500000062617a000000"
+    error: basic
+  - 
+    encoded: "1000000002610004000000616263ff00"
+    error: string is not null-terminated
+  - 
+    encoded: "0c0000000200000000000000"
+    error: bad_string_length
+  - 
+    encoded: "120000000200ffffffff666f6f6261720000"
+    error: bad_string_length
+  - 
+    encoded: "0c0000000e00000000000000"
+    error: bad_string_length
+  - 
+    encoded: "120000000e00ffffffff666f6f6261720000"
+    error: bad_string_length
+  - 
+    encoded: "180000000c00fa5bd841d6585d9900"
+    error: ""
+  - 
+    encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900"
+    error: bad_string_length
+  - 
+    encoded: "0c0000000d00000000000000"
+    error: bad_string_length
+  - 
+    encoded: "0c0000000d00ffffffff0000"
+    error: bad_string_length
+  - 
+    encoded: "1c0000000f001500000000000000000c000000020001000000000000"
+    error: bad_string_length
+  - 
+    encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000"
+    error: bad_string_length
+  - 
+    encoded: "1c0000000f001500000001000000000c000000020000000000000000"
+    error: bad_string_length
+  - 
+    encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000"
+    error: bad_string_length
+  - 
+    encoded: "0E00000008616263646566676869707172737475"
+    error: "Run-on CString"
+  - 
+    encoded: "0100000000"
+    error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)"
+  - 
+    encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000"
+    error: "One object, but with object size listed smaller than it is in the data"
+  - 
+    encoded: "05000000"
+    error: "One object, missing the EOO at the end"
+  - 
+    encoded: "0500000001"
+    error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01"
+  - 
+    encoded: "05000000ff"
+    error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff"
+  - 
+    encoded: "0500000070"
+    error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70"
+  - 
+    encoded: "07000000000000"
+    error: "Invalid BSON type low range"
+  - 
+    encoded: "07000000800000"
+    error: "Invalid BSON type high range"
+  -
+    encoded: "090000000862000200"
+    error: "Invalid boolean value of 2"
+  - 
+    encoded: "09000000086200ff00"
+    error: "Invalid boolean value of -1"
+  `, `
+--- 
+description: "Int32 type"
+documents: 
+  - 
+    decoded: 
+      i: -2147483648
+    encoded: 0C0000001069000000008000
+  - 
+    decoded: 
+      i: 2147483647
+    encoded: 0C000000106900FFFFFF7F00
+  - 
+    decoded: 
+      i: -1
+    encoded: 0C000000106900FFFFFFFF00
+  - 
+    decoded: 
+      i: 0
+    encoded: 0C0000001069000000000000
+  - 
+    decoded: 
+      i: 1
+    encoded: 0C0000001069000100000000
+
+`, `
+--- 
+description: "String type"
+documents:
+  - 
+    decoded: 
+      s : ""
+    encoded: 0D000000027300010000000000
+  - 
+    decoded: 
+      s: "a"
+    encoded: 0E00000002730002000000610000
+  - 
+    decoded: 
+      s: "This is a string"
+    encoded: 1D0000000273001100000054686973206973206120737472696E670000
+  - 
+    decoded: 
+      s: "κόσμε"
+    encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000
+`}

+ 351 - 0
src/gopkg.in/mgo.v2/bulk.go

@@ -0,0 +1,351 @@
+package mgo
+
+import (
+	"bytes"
+	"sort"
+
+	"gopkg.in/mgo.v2/bson"
+)
+
+// Bulk represents an operation that can be prepared with several
+// orthogonal changes before being delivered to the server.
+//
+// MongoDB servers older than version 2.6 do not have proper support for bulk
+// operations, so the driver attempts to map its API as much as possible into
+// the functionality that works. In particular, in those releases updates and
+// removals are sent individually, and inserts are sent in bulk but have
+// suboptimal error reporting compared to more recent versions of the server.
+// See the documentation of BulkErrorCase for details on that.
+//
+// Relevant documentation:
+//
+//   http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
+//
+type Bulk struct {
+	c       *Collection
+	opcount int
+	actions []bulkAction
+	ordered bool
+}
+
+type bulkOp int
+
+const (
+	bulkInsert bulkOp = iota + 1
+	bulkUpdate
+	bulkUpdateAll
+	bulkRemove
+)
+
+type bulkAction struct {
+	op   bulkOp
+	docs []interface{}
+	idxs []int
+}
+
+type bulkUpdateOp []interface{}
+type bulkDeleteOp []interface{}
+
+// BulkResult holds the results for a bulk operation.
+type BulkResult struct {
+	Matched  int
+	Modified int // Available only for MongoDB 2.6+
+
+	// Be conservative while we understand exactly how to report these
+	// results in a useful and convenient way, and also how to emulate
+	// them with prior servers.
+	private bool
+}
+
+// BulkError holds an error returned from running a Bulk operation.
+// Individual errors may be obtained and inspected via the Cases method.
+type BulkError struct {
+	ecases []BulkErrorCase
+}
+
+func (e *BulkError) Error() string {
+	if len(e.ecases) == 0 {
+		return "invalid BulkError instance: no errors"
+	}
+	if len(e.ecases) == 1 {
+		return e.ecases[0].Err.Error()
+	}
+	msgs := make([]string, 0, len(e.ecases))
+	seen := make(map[string]bool)
+	for _, ecase := range e.ecases {
+		msg := ecase.Err.Error()
+		if !seen[msg] {
+			seen[msg] = true
+			msgs = append(msgs, msg)
+		}
+	}
+	if len(msgs) == 1 {
+		return msgs[0]
+	}
+	var buf bytes.Buffer
+	buf.WriteString("multiple errors in bulk operation:\n")
+	for _, msg := range msgs {
+		buf.WriteString("  - ")
+		buf.WriteString(msg)
+		buf.WriteByte('\n')
+	}
+	return buf.String()
+}
+
+type bulkErrorCases []BulkErrorCase
+
+func (slice bulkErrorCases) Len() int           { return len(slice) }
+func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index }
+func (slice bulkErrorCases) Swap(i, j int)      { slice[i], slice[j] = slice[j], slice[i] }
+
+// BulkErrorCase holds an individual error found while attempting a single change
+// within a bulk operation, and the position in which it was enqueued.
+//
+// MongoDB servers older than version 2.6 do not have proper support for bulk
+// operations, so the driver attempts to map its API as much as possible into
+// the functionality that works. In particular, only the last error is reported
+// for bulk inserts and without any positional information, so the Index
+// field is set to -1 in these cases.
+type BulkErrorCase struct {
+	Index int // Position of operation that failed, or -1 if unknown.
+	Err   error
+}
+
+// Cases returns all individual errors found while attempting the requested changes.
+//
+// See the documentation of BulkErrorCase for limitations in older MongoDB releases.
+func (e *BulkError) Cases() []BulkErrorCase {
+	return e.ecases
+}
+
+// Bulk returns a value to prepare the execution of a bulk operation.
+func (c *Collection) Bulk() *Bulk {
+	return &Bulk{c: c, ordered: true}
+}
+
+// Unordered puts the bulk operation in unordered mode.
+//
+// In unordered mode the indvidual operations may be sent
+// out of order, which means latter operations may proceed
+// even if prior ones have failed.
+func (b *Bulk) Unordered() {
+	b.ordered = false
+}
+
+func (b *Bulk) action(op bulkOp, opcount int) *bulkAction {
+	var action *bulkAction
+	if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op {
+		action = &b.actions[len(b.actions)-1]
+	} else if !b.ordered {
+		for i := range b.actions {
+			if b.actions[i].op == op {
+				action = &b.actions[i]
+				break
+			}
+		}
+	}
+	if action == nil {
+		b.actions = append(b.actions, bulkAction{op: op})
+		action = &b.actions[len(b.actions)-1]
+	}
+	for i := 0; i < opcount; i++ {
+		action.idxs = append(action.idxs, b.opcount)
+		b.opcount++
+	}
+	return action
+}
+
+// Insert queues up the provided documents for insertion.
+func (b *Bulk) Insert(docs ...interface{}) {
+	action := b.action(bulkInsert, len(docs))
+	action.docs = append(action.docs, docs...)
+}
+
+// Remove queues up the provided selectors for removing matching documents.
+// Each selector will remove only a single matching document.
+func (b *Bulk) Remove(selectors ...interface{}) {
+	action := b.action(bulkRemove, len(selectors))
+	for _, selector := range selectors {
+		if selector == nil {
+			selector = bson.D{}
+		}
+		action.docs = append(action.docs, &deleteOp{
+			Collection: b.c.FullName,
+			Selector:   selector,
+			Flags:      1,
+			Limit:      1,
+		})
+	}
+}
+
+// RemoveAll queues up the provided selectors for removing all matching documents.
+// Each selector will remove all matching documents.
+func (b *Bulk) RemoveAll(selectors ...interface{}) {
+	action := b.action(bulkRemove, len(selectors))
+	for _, selector := range selectors {
+		if selector == nil {
+			selector = bson.D{}
+		}
+		action.docs = append(action.docs, &deleteOp{
+			Collection: b.c.FullName,
+			Selector:   selector,
+			Flags:      0,
+			Limit:      0,
+		})
+	}
+}
+
+// Update queues up the provided pairs of updating instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair matches exactly one document for updating at most.
+func (b *Bulk) Update(pairs ...interface{}) {
+	if len(pairs)%2 != 0 {
+		panic("Bulk.Update requires an even number of parameters")
+	}
+	action := b.action(bulkUpdate, len(pairs)/2)
+	for i := 0; i < len(pairs); i += 2 {
+		selector := pairs[i]
+		if selector == nil {
+			selector = bson.D{}
+		}
+		action.docs = append(action.docs, &updateOp{
+			Collection: b.c.FullName,
+			Selector:   selector,
+			Update:     pairs[i+1],
+		})
+	}
+}
+
+// UpdateAll queues up the provided pairs of updating instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair updates all documents matching the selector.
+func (b *Bulk) UpdateAll(pairs ...interface{}) {
+	if len(pairs)%2 != 0 {
+		panic("Bulk.UpdateAll requires an even number of parameters")
+	}
+	action := b.action(bulkUpdate, len(pairs)/2)
+	for i := 0; i < len(pairs); i += 2 {
+		selector := pairs[i]
+		if selector == nil {
+			selector = bson.D{}
+		}
+		action.docs = append(action.docs, &updateOp{
+			Collection: b.c.FullName,
+			Selector:   selector,
+			Update:     pairs[i+1],
+			Flags:      2,
+			Multi:      true,
+		})
+	}
+}
+
+// Upsert queues up the provided pairs of upserting instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair matches exactly one document for updating at most.
+func (b *Bulk) Upsert(pairs ...interface{}) {
+	if len(pairs)%2 != 0 {
+		panic("Bulk.Update requires an even number of parameters")
+	}
+	action := b.action(bulkUpdate, len(pairs)/2)
+	for i := 0; i < len(pairs); i += 2 {
+		selector := pairs[i]
+		if selector == nil {
+			selector = bson.D{}
+		}
+		action.docs = append(action.docs, &updateOp{
+			Collection: b.c.FullName,
+			Selector:   selector,
+			Update:     pairs[i+1],
+			Flags:      1,
+			Upsert:     true,
+		})
+	}
+}
+
+// Run runs all the operations queued up.
+//
+// If an error is reported on an unordered bulk operation, the error value may
+// be an aggregation of all issues observed. As an exception to that, Insert
+// operations running on MongoDB versions prior to 2.6 will report the last
+// error only due to a limitation in the wire protocol.
+func (b *Bulk) Run() (*BulkResult, error) {
+	var result BulkResult
+	var berr BulkError
+	var failed bool
+	for i := range b.actions {
+		action := &b.actions[i]
+		var ok bool
+		switch action.op {
+		case bulkInsert:
+			ok = b.runInsert(action, &result, &berr)
+		case bulkUpdate:
+			ok = b.runUpdate(action, &result, &berr)
+		case bulkRemove:
+			ok = b.runRemove(action, &result, &berr)
+		default:
+			panic("unknown bulk operation")
+		}
+		if !ok {
+			failed = true
+			if b.ordered {
+				break
+			}
+		}
+	}
+	if failed {
+		sort.Sort(bulkErrorCases(berr.ecases))
+		return nil, &berr
+	}
+	return &result, nil
+}
+
+func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+	op := &insertOp{b.c.FullName, action.docs, 0}
+	if !b.ordered {
+		op.flags = 1 // ContinueOnError
+	}
+	lerr, err := b.c.writeOp(op, b.ordered)
+	return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+	lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered)
+	if lerr != nil {
+		result.Matched += lerr.N
+		result.Modified += lerr.modified
+	}
+	return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+	lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered)
+	if lerr != nil {
+		result.Matched += lerr.N
+		result.Modified += lerr.modified
+	}
+	return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool {
+	if lerr != nil && len(lerr.ecases) > 0 {
+		for i := 0; i < len(lerr.ecases); i++ {
+			// Map back from the local error index into the visible one.
+			ecase := lerr.ecases[i]
+			idx := ecase.Index
+			if idx >= 0 {
+				idx = action.idxs[idx]
+			}
+			berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err})
+		}
+		return false
+	} else if err != nil {
+		for i := 0; i < len(action.idxs); i++ {
+			berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err})
+		}
+		return false
+	}
+	return true
+}

+ 504 - 0
src/gopkg.in/mgo.v2/bulk_test.go

@@ -0,0 +1,504 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2015 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+	. "gopkg.in/check.v1"
+	"gopkg.in/mgo.v2"
+)
+
+func (s *S) TestBulkInsert(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	bulk := coll.Bulk()
+	bulk.Insert(M{"n": 1})
+	bulk.Insert(M{"n": 2}, M{"n": 3})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
+}
+
+func (s *S) TestBulkInsertError(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	bulk := coll.Bulk()
+	bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
+	_, err = bulk.Run()
+	c.Assert(err, ErrorMatches, ".*duplicate key.*")
+	c.Assert(mgo.IsDup(err), Equals, true)
+
+	type doc struct {
+		N int `_id`
+	}
+	var res []doc
+	err = coll.Find(nil).Sort("_id").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{1}, {2}})
+}
+
+func (s *S) TestBulkInsertErrorUnordered(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	bulk := coll.Bulk()
+	bulk.Unordered()
+	bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
+	_, err = bulk.Run()
+	c.Assert(err, ErrorMatches, ".*duplicate key.*")
+
+	type doc struct {
+		N int `_id`
+	}
+	var res []doc
+	err = coll.Find(nil).Sort("_id").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
+}
+
+func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) {
+	// The server has a batch limit of 1000 documents when using write commands.
+	// This artificial limit did not exist with the old wire protocol, so to
+	// avoid compatibility issues the implementation internally split batches
+	// into the proper size and delivers them one by one. This test ensures that
+	// the behavior of unordered (that is, continue on error) remains correct
+	// when errors happen and there are batches left.
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	bulk := coll.Bulk()
+	bulk.Unordered()
+
+	const total = 4096
+	type doc struct {
+		Id int `_id`
+	}
+	docs := make([]interface{}, total)
+	for i := 0; i < total; i++ {
+		docs[i] = doc{i}
+	}
+	docs[1] = doc{0}
+	bulk.Insert(docs...)
+	_, err = bulk.Run()
+	c.Assert(err, ErrorMatches, ".*duplicate key.*")
+
+	n, err := coll.Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, total-1)
+
+	var res doc
+	err = coll.FindId(1500).One(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res.Id, Equals, 1500)
+}
+
+func (s *S) TestBulkErrorString(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	// If it's just the same string multiple times, join it into a single message.
+	bulk := coll.Bulk()
+	bulk.Unordered()
+	bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2})
+	_, err = bulk.Run()
+	c.Assert(err, ErrorMatches, ".*duplicate key.*")
+	c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key")
+	c.Assert(mgo.IsDup(err), Equals, true)
+
+	// With matching errors but different messages, present them all.
+	bulk = coll.Bulk()
+	bulk.Unordered()
+	bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"})
+	_, err = bulk.Run()
+	if s.versionAtLeast(2, 6) {
+		c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n(  - .*duplicate.*\n){2}$")
+		c.Assert(err, ErrorMatches, "(?s).*dupone.*")
+		c.Assert(err, ErrorMatches, "(?s).*duptwo.*")
+	} else {
+		// Wire protocol query doesn't return all errors.
+		c.Assert(err, ErrorMatches, ".*duplicate.*")
+	}
+	c.Assert(mgo.IsDup(err), Equals, true)
+
+	// With mixed errors, present them all.
+	bulk = coll.Bulk()
+	bulk.Unordered()
+	bulk.Insert(M{"_id": 1}, M{"_id": []int{2}})
+	_, err = bulk.Run()
+	if s.versionAtLeast(2, 6) {
+		c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n  - .*duplicate.*\n  - .*array.*\n$")
+	} else {
+		// Wire protocol query doesn't return all errors.
+		c.Assert(err, ErrorMatches, ".*array.*")
+	}
+	c.Assert(mgo.IsDup(err), Equals, false)
+}
+
+func (s *S) TestBulkErrorCases_2_6(c *C) {
+	if !s.versionAtLeast(2, 6) {
+		c.Skip("2.4- has poor bulk reporting")
+	}
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	bulk := coll.Bulk()
+	bulk.Unordered()
+
+	// There's a limit of 1000 operations per command, so
+	// this forces the more complex indexing logic to act.
+	for i := 0; i < 1010; i++ {
+		switch i {
+		case 3, 14:
+			bulk.Insert(M{"_id": "dupone"})
+		case 5, 106:
+			bulk.Update(M{"_id": i - 1}, M{"$set": M{"_id": 4}})
+		case 7, 1008:
+			bulk.Insert(M{"_id": "duptwo"})
+		default:
+			bulk.Insert(M{"_id": i})
+		}
+	}
+
+	_, err = bulk.Run()
+	ecases := err.(*mgo.BulkError).Cases()
+
+	c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*")
+	c.Check(ecases[0].Index, Equals, 14)
+	c.Check(ecases[1].Err, ErrorMatches, ".*update.*_id.*")
+	c.Check(ecases[1].Index, Equals, 106)
+	c.Check(ecases[2].Err, ErrorMatches, ".*duplicate.*duptwo.*")
+	c.Check(ecases[2].Index, Equals, 1008)
+}
+
+func (s *S) TestBulkErrorCases_2_4(c *C) {
+	if s.versionAtLeast(2, 6) {
+		c.Skip("2.6+ has better reporting")
+	}
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	bulk := coll.Bulk()
+	bulk.Unordered()
+
+	// There's a limit of 1000 operations per command, so
+	// this forces the more complex indexing logic to act.
+	for i := 0; i < 1010; i++ {
+		switch i {
+		case 3, 14:
+			bulk.Insert(M{"_id": "dupone"})
+		case 5:
+			bulk.Update(M{"_id": i - 1}, M{"$set": M{"n": 4}})
+		case 106:
+			bulk.Update(M{"_id": i - 1}, M{"$bogus": M{"n": 4}})
+		case 7, 1008:
+			bulk.Insert(M{"_id": "duptwo"})
+		default:
+			bulk.Insert(M{"_id": i})
+		}
+	}
+
+	_, err = bulk.Run()
+	ecases := err.(*mgo.BulkError).Cases()
+
+	c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*duptwo.*")
+	c.Check(ecases[0].Index, Equals, -1)
+	c.Check(ecases[1].Err, ErrorMatches, `.*\$bogus.*`)
+	c.Check(ecases[1].Index, Equals, 106)
+}
+
+func (s *S) TestBulkErrorCasesOrdered(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	bulk := coll.Bulk()
+
+	// There's a limit of 1000 operations per command, so
+	// this forces the more complex indexing logic to act.
+	for i := 0; i < 20; i++ {
+		switch i {
+		case 3, 14:
+			bulk.Insert(M{"_id": "dupone"})
+		case 7, 17:
+			bulk.Insert(M{"_id": "duptwo"})
+		default:
+			bulk.Insert(M{"_id": i})
+		}
+	}
+
+	_, err = bulk.Run()
+	ecases := err.(*mgo.BulkError).Cases()
+
+	c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*")
+	if s.versionAtLeast(2, 6) {
+		c.Check(ecases[0].Index, Equals, 14)
+	} else {
+		c.Check(ecases[0].Index, Equals, -1)
+	}
+	c.Check(ecases, HasLen, 1)
+}
+
+func (s *S) TestBulkUpdate(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}})
+	bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}})
+	bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
+	bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r.Matched, Equals, 4)
+	if s.versionAtLeast(2, 6) {
+		c.Assert(r.Modified, Equals, 3)
+	}
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}})
+}
+
+func (s *S) TestBulkUpdateError(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.Update(
+		M{"n": 1}, M{"$set": M{"n": 10}},
+		M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
+		M{"n": 3}, M{"$set": M{"n": 30}},
+	)
+	r, err := bulk.Run()
+	c.Assert(err, ErrorMatches, ".*_id.*")
+	c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}})
+}
+
+func (s *S) TestBulkUpdateErrorUnordered(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.Unordered()
+	bulk.Update(
+		M{"n": 1}, M{"$set": M{"n": 10}},
+		M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
+		M{"n": 3}, M{"$set": M{"n": 30}},
+	)
+	r, err := bulk.Run()
+	c.Assert(err, ErrorMatches, ".*_id.*")
+	c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}})
+}
+
+func (s *S) TestBulkUpdateAll(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}})
+	bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}})  // Won't change.
+	bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
+	bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r.Matched, Equals, 6)
+	if s.versionAtLeast(2, 6) {
+		c.Assert(r.Modified, Equals, 5)
+	}
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}})
+}
+
+func (s *S) TestBulkMixedUnordered(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	// Abuse undefined behavior to ensure the desired implementation is in place.
+	bulk := coll.Bulk()
+	bulk.Unordered()
+	bulk.Insert(M{"n": 1})
+	bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}})
+	bulk.Insert(M{"n": 2})
+	bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}})
+	bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}})
+	bulk.Insert(M{"n": 3})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r.Matched, Equals, 3)
+	if s.versionAtLeast(2, 6) {
+		c.Assert(r.Modified, Equals, 3)
+	}
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}})
+}
+
+func (s *S) TestBulkUpsert(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}})
+	bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}})
+}
+
+func (s *S) TestBulkRemove(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.Remove(M{"n": 1})
+	bulk.Remove(M{"n": 2}, M{"n": 4})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r.Matched, Equals, 3)
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{3}, {4}})
+}
+
+func (s *S) TestBulkRemoveAll(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.RemoveAll(M{"n": 1})
+	bulk.RemoveAll(M{"n": 2}, M{"n": 4})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r.Matched, Equals, 4)
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{3}})
+}

+ 684 - 0
src/gopkg.in/mgo.v2/cluster.go

@@ -0,0 +1,684 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"gopkg.in/mgo.v2/bson"
+)
+
+// ---------------------------------------------------------------------------
+// Mongo cluster encapsulation.
+//
+// A cluster enables the communication with one or more servers participating
+// in a mongo cluster.  This works with individual servers, a replica set,
+// a replica pair, one or multiple mongos routers, etc.
+
+type mongoCluster struct {
+	sync.RWMutex
+	serverSynced sync.Cond
+	userSeeds    []string
+	dynaSeeds    []string
+	servers      mongoServers
+	masters      mongoServers
+	references   int
+	syncing      bool
+	direct       bool
+	failFast     bool
+	syncCount    uint
+	setName      string
+	cachedIndex  map[string]bool
+	sync         chan bool
+	dial         dialer
+	minPoolSize  int
+	maxIdleTimeMS int
+}
+
+func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster {
+	cluster := &mongoCluster{
+		userSeeds:  userSeeds,
+		references: 1,
+		direct:     direct,
+		failFast:   failFast,
+		dial:       dial,
+		setName:    setName,
+	}
+	cluster.serverSynced.L = cluster.RWMutex.RLocker()
+	cluster.sync = make(chan bool, 1)
+	stats.cluster(+1)
+	go cluster.syncServersLoop()
+	return cluster
+}
+
+// Acquire increases the reference count for the cluster.
+func (cluster *mongoCluster) Acquire() {
+	cluster.Lock()
+	cluster.references++
+	debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
+	cluster.Unlock()
+}
+
+// Release decreases the reference count for the cluster. Once
+// it reaches zero, all servers will be closed.
+func (cluster *mongoCluster) Release() {
+	cluster.Lock()
+	if cluster.references == 0 {
+		panic("cluster.Release() with references == 0")
+	}
+	cluster.references--
+	debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
+	if cluster.references == 0 {
+		for _, server := range cluster.servers.Slice() {
+			server.Close()
+		}
+		// Wake up the sync loop so it can die.
+		cluster.syncServers()
+		stats.cluster(-1)
+	}
+	cluster.Unlock()
+}
+
+func (cluster *mongoCluster) LiveServers() (servers []string) {
+	cluster.RLock()
+	for _, serv := range cluster.servers.Slice() {
+		servers = append(servers, serv.Addr)
+	}
+	cluster.RUnlock()
+	return servers
+}
+
+func (cluster *mongoCluster) removeServer(server *mongoServer) {
+	cluster.Lock()
+	cluster.masters.Remove(server)
+	other := cluster.servers.Remove(server)
+	cluster.Unlock()
+	if other != nil {
+		other.Close()
+		log("Removed server ", server.Addr, " from cluster.")
+	}
+	server.Close()
+}
+
+type isMasterResult struct {
+	IsMaster       bool
+	Secondary      bool
+	Primary        string
+	Hosts          []string
+	Passives       []string
+	Tags           bson.D
+	Msg            string
+	SetName        string `bson:"setName"`
+	MaxWireVersion int    `bson:"maxWireVersion"`
+}
+
+func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
+	// Monotonic let's it talk to a slave and still hold the socket.
+	session := newSession(Monotonic, cluster, 10*time.Second)
+	session.setSocket(socket)
+	err := session.Run("ismaster", result)
+	session.Close()
+	return err
+}
+
+type possibleTimeout interface {
+	Timeout() bool
+}
+
+var syncSocketTimeout = 5 * time.Second
+
+func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
+	var syncTimeout time.Duration
+	if raceDetector {
+		// This variable is only ever touched by tests.
+		globalMutex.Lock()
+		syncTimeout = syncSocketTimeout
+		globalMutex.Unlock()
+	} else {
+		syncTimeout = syncSocketTimeout
+	}
+
+	addr := server.Addr
+	log("SYNC Processing ", addr, "...")
+
+	// Retry a few times to avoid knocking a server down for a hiccup.
+	var result isMasterResult
+	var tryerr error
+	for retry := 0; ; retry++ {
+		if retry == 3 || retry == 1 && cluster.failFast {
+			return nil, nil, tryerr
+		}
+		if retry > 0 {
+			// Don't abuse the server needlessly if there's something actually wrong.
+			if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
+				// Give a chance for waiters to timeout as well.
+				cluster.serverSynced.Broadcast()
+			}
+			time.Sleep(syncShortDelay)
+		}
+
+		// It's not clear what would be a good timeout here. Is it
+		// better to wait longer or to retry?
+		socket, _, err := server.AcquireSocket(0, syncTimeout)
+		if err != nil {
+			tryerr = err
+			logf("SYNC Failed to get socket to %s: %v", addr, err)
+			continue
+		}
+		err = cluster.isMaster(socket, &result)
+		socket.Release()
+		if err != nil {
+			tryerr = err
+			logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
+			continue
+		}
+		debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
+		break
+	}
+
+	if cluster.setName != "" && result.SetName != cluster.setName {
+		logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName)
+		return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName)
+	}
+
+	if result.IsMaster {
+		debugf("SYNC %s is a master.", addr)
+		if !server.info.Master {
+			// Made an incorrect assumption above, so fix stats.
+			stats.conn(-1, false)
+			stats.conn(+1, true)
+		}
+	} else if result.Secondary {
+		debugf("SYNC %s is a slave.", addr)
+	} else if cluster.direct {
+		logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
+	} else {
+		logf("SYNC %s is neither a master nor a slave.", addr)
+		// Let stats track it as whatever was known before.
+		return nil, nil, errors.New(addr + " is not a master nor slave")
+	}
+
+	info = &mongoServerInfo{
+		Master:         result.IsMaster,
+		Mongos:         result.Msg == "isdbgrid",
+		Tags:           result.Tags,
+		SetName:        result.SetName,
+		MaxWireVersion: result.MaxWireVersion,
+	}
+
+	hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
+	if result.Primary != "" {
+		// First in the list to speed up master discovery.
+		hosts = append(hosts, result.Primary)
+	}
+	hosts = append(hosts, result.Hosts...)
+	hosts = append(hosts, result.Passives...)
+
+	debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
+	return info, hosts, nil
+}
+
+type syncKind bool
+
+const (
+	completeSync syncKind = true
+	partialSync  syncKind = false
+)
+
+func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
+	cluster.Lock()
+	current := cluster.servers.Search(server.ResolvedAddr)
+	if current == nil {
+		if syncKind == partialSync {
+			cluster.Unlock()
+			server.Close()
+			log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
+			return
+		}
+		cluster.servers.Add(server)
+		if info.Master {
+			cluster.masters.Add(server)
+			log("SYNC Adding ", server.Addr, " to cluster as a master.")
+		} else {
+			log("SYNC Adding ", server.Addr, " to cluster as a slave.")
+		}
+	} else {
+		if server != current {
+			panic("addServer attempting to add duplicated server")
+		}
+		if server.Info().Master != info.Master {
+			if info.Master {
+				log("SYNC Server ", server.Addr, " is now a master.")
+				cluster.masters.Add(server)
+			} else {
+				log("SYNC Server ", server.Addr, " is now a slave.")
+				cluster.masters.Remove(server)
+			}
+		}
+	}
+	server.SetInfo(info)
+	debugf("SYNC Broadcasting availability of server %s", server.Addr)
+	cluster.serverSynced.Broadcast()
+	cluster.Unlock()
+}
+
+func (cluster *mongoCluster) getKnownAddrs() []string {
+	cluster.RLock()
+	max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
+	seen := make(map[string]bool, max)
+	known := make([]string, 0, max)
+
+	add := func(addr string) {
+		if _, found := seen[addr]; !found {
+			seen[addr] = true
+			known = append(known, addr)
+		}
+	}
+
+	for _, addr := range cluster.userSeeds {
+		add(addr)
+	}
+	for _, addr := range cluster.dynaSeeds {
+		add(addr)
+	}
+	for _, serv := range cluster.servers.Slice() {
+		add(serv.Addr)
+	}
+	cluster.RUnlock()
+
+	return known
+}
+
+// syncServers injects a value into the cluster.sync channel to force
+// an iteration of the syncServersLoop function.
+func (cluster *mongoCluster) syncServers() {
+	select {
+	case cluster.sync <- true:
+	default:
+	}
+}
+
+// How long to wait for a checkup of the cluster topology if nothing
+// else kicks a synchronization before that.
+const syncServersDelay = 30 * time.Second
+const syncShortDelay = 500 * time.Millisecond
+
+// syncServersLoop loops while the cluster is alive to keep its idea of
+// the server topology up-to-date. It must be called just once from
+// newCluster.  The loop iterates once syncServersDelay has passed, or
+// if somebody injects a value into the cluster.sync channel to force a
+// synchronization.  A loop iteration will contact all servers in
+// parallel, ask them about known peers and their own role within the
+// cluster, and then attempt to do the same with all the peers
+// retrieved.
+func (cluster *mongoCluster) syncServersLoop() {
+	for {
+		debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
+
+		cluster.Lock()
+		if cluster.references == 0 {
+			cluster.Unlock()
+			break
+		}
+		cluster.references++ // Keep alive while syncing.
+		direct := cluster.direct
+		cluster.Unlock()
+
+		cluster.syncServersIteration(direct)
+
+		// We just synchronized, so consume any outstanding requests.
+		select {
+		case <-cluster.sync:
+		default:
+		}
+
+		cluster.Release()
+
+		// Hold off before allowing another sync. No point in
+		// burning CPU looking for down servers.
+		if !cluster.failFast {
+			time.Sleep(syncShortDelay)
+		}
+
+		cluster.Lock()
+		if cluster.references == 0 {
+			cluster.Unlock()
+			break
+		}
+		cluster.syncCount++
+		// Poke all waiters so they have a chance to timeout or
+		// restart syncing if they wish to.
+		cluster.serverSynced.Broadcast()
+		// Check if we have to restart immediately either way.
+		restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
+		cluster.Unlock()
+
+		if restart {
+			log("SYNC No masters found. Will synchronize again.")
+			time.Sleep(syncShortDelay)
+			continue
+		}
+
+		debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
+
+		// Hold off until somebody explicitly requests a synchronization
+		// or it's time to check for a cluster topology change again.
+		select {
+		case <-cluster.sync:
+		case <-time.After(syncServersDelay):
+		}
+	}
+	debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
+}
+
+func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
+	cluster.RLock()
+	server := cluster.servers.Search(tcpaddr.String())
+	cluster.RUnlock()
+	if server != nil {
+		return server
+	}
+	return newServer(addr, tcpaddr, cluster.sync, cluster.dial, cluster.minPoolSize, cluster.maxIdleTimeMS)
+}
+
+func resolveAddr(addr string) (*net.TCPAddr, error) {
+	// Simple cases that do not need actual resolution. Works with IPv4 and v6.
+	if host, port, err := net.SplitHostPort(addr); err == nil {
+		if port, _ := strconv.Atoi(port); port > 0 {
+			zone := ""
+			if i := strings.LastIndex(host, "%"); i >= 0 {
+				zone = host[i+1:]
+				host = host[:i]
+			}
+			ip := net.ParseIP(host)
+			if ip != nil {
+				return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil
+			}
+		}
+	}
+
+	// Attempt to resolve IPv4 and v6 concurrently.
+	addrChan := make(chan *net.TCPAddr, 2)
+	for _, network := range []string{"udp4", "udp6"} {
+		network := network
+		go func() {
+			// The unfortunate UDP dialing hack allows having a timeout on address resolution.
+			conn, err := net.DialTimeout(network, addr, 10*time.Second)
+			if err != nil {
+				addrChan <- nil
+			} else {
+				addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
+				conn.Close()
+			}
+		}()
+	}
+
+	// Wait for the result of IPv4 and v6 resolution. Use IPv4 if available.
+	tcpaddr := <-addrChan
+	if tcpaddr == nil || len(tcpaddr.IP) != 4 {
+		var timeout <-chan time.Time
+		if tcpaddr != nil {
+			// Don't wait too long if an IPv6 address is known.
+			timeout = time.After(50 * time.Millisecond)
+		}
+		select {
+		case <-timeout:
+		case tcpaddr2 := <-addrChan:
+			if tcpaddr == nil || tcpaddr2 != nil {
+				// It's an IPv4 address or the only known address. Use it.
+				tcpaddr = tcpaddr2
+			}
+		}
+	}
+
+	if tcpaddr == nil {
+		log("SYNC Failed to resolve server address: ", addr)
+		return nil, errors.New("failed to resolve server address: " + addr)
+	}
+	if tcpaddr.String() != addr {
+		debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
+	}
+	return tcpaddr, nil
+}
+
+type pendingAdd struct {
+	server *mongoServer
+	info   *mongoServerInfo
+}
+
+func (cluster *mongoCluster) syncServersIteration(direct bool) {
+	log("SYNC Starting full topology synchronization...")
+
+	var wg sync.WaitGroup
+	var m sync.Mutex
+	notYetAdded := make(map[string]pendingAdd)
+	addIfFound := make(map[string]bool)
+	seen := make(map[string]bool)
+	syncKind := partialSync
+
+	var spawnSync func(addr string, byMaster bool)
+	spawnSync = func(addr string, byMaster bool) {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+
+			tcpaddr, err := resolveAddr(addr)
+			if err != nil {
+				log("SYNC Failed to start sync of ", addr, ": ", err.Error())
+				return
+			}
+			resolvedAddr := tcpaddr.String()
+
+			m.Lock()
+			if byMaster {
+				if pending, ok := notYetAdded[resolvedAddr]; ok {
+					delete(notYetAdded, resolvedAddr)
+					m.Unlock()
+					cluster.addServer(pending.server, pending.info, completeSync)
+					return
+				}
+				addIfFound[resolvedAddr] = true
+			}
+			if seen[resolvedAddr] {
+				m.Unlock()
+				return
+			}
+			seen[resolvedAddr] = true
+			m.Unlock()
+
+			server := cluster.server(addr, tcpaddr)
+			info, hosts, err := cluster.syncServer(server)
+			if err != nil {
+				cluster.removeServer(server)
+				return
+			}
+
+			m.Lock()
+			add := direct || info.Master || addIfFound[resolvedAddr]
+			if add {
+				syncKind = completeSync
+			} else {
+				notYetAdded[resolvedAddr] = pendingAdd{server, info}
+			}
+			m.Unlock()
+			if add {
+				cluster.addServer(server, info, completeSync)
+			}
+			if !direct {
+				for _, addr := range hosts {
+					spawnSync(addr, info.Master)
+				}
+			}
+		}()
+	}
+
+	knownAddrs := cluster.getKnownAddrs()
+	for _, addr := range knownAddrs {
+		spawnSync(addr, false)
+	}
+	wg.Wait()
+
+	if syncKind == completeSync {
+		logf("SYNC Synchronization was complete (got data from primary).")
+		for _, pending := range notYetAdded {
+			cluster.removeServer(pending.server)
+		}
+	} else {
+		logf("SYNC Synchronization was partial (cannot talk to primary).")
+		for _, pending := range notYetAdded {
+			cluster.addServer(pending.server, pending.info, partialSync)
+		}
+	}
+
+	cluster.Lock()
+	mastersLen := cluster.masters.Len()
+	logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen)
+
+	// Update dynamic seeds, but only if we have any good servers. Otherwise,
+	// leave them alone for better chances of a successful sync in the future.
+	if syncKind == completeSync {
+		dynaSeeds := make([]string, cluster.servers.Len())
+		for i, server := range cluster.servers.Slice() {
+			dynaSeeds[i] = server.Addr
+		}
+		cluster.dynaSeeds = dynaSeeds
+		debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
+	}
+	cluster.Unlock()
+}
+
+// AcquireSocket returns a socket to a server in the cluster.  If slaveOk is
+// true, it will attempt to return a socket to a slave server.  If it is
+// false, the socket will necessarily be to a master server.
+func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) {
+	var started time.Time
+	var syncCount uint
+	warnedLimit := false
+	for {
+		cluster.RLock()
+		for {
+			mastersLen := cluster.masters.Len()
+			slavesLen := cluster.servers.Len() - mastersLen
+			debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen)
+			if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk {
+				break
+			}
+			if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() {
+				break
+			}
+			if started.IsZero() {
+				// Initialize after fast path above.
+				started = time.Now()
+				syncCount = cluster.syncCount
+			} else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
+				cluster.RUnlock()
+				return nil, errors.New("no reachable servers")
+			}
+			log("Waiting for servers to synchronize...")
+			cluster.syncServers()
+
+			// Remember: this will release and reacquire the lock.
+			cluster.serverSynced.Wait()
+		}
+
+		var server *mongoServer
+		if slaveOk {
+			server = cluster.servers.BestFit(mode, serverTags)
+		} else {
+			server = cluster.masters.BestFit(mode, nil)
+		}
+		cluster.RUnlock()
+
+		if server == nil {
+			// Must have failed the requested tags. Sleep to avoid spinning.
+			time.Sleep(1e8)
+			continue
+		}
+
+		s, abended, err := server.AcquireSocket(poolLimit, socketTimeout)
+		if err == errPoolLimit {
+			if !warnedLimit {
+				warnedLimit = true
+				log("WARNING: Per-server connection limit reached.")
+			}
+			time.Sleep(100 * time.Millisecond)
+			continue
+		}
+		if err != nil {
+			cluster.removeServer(server)
+			cluster.syncServers()
+			continue
+		}
+		if abended && !slaveOk {
+			var result isMasterResult
+			err := cluster.isMaster(s, &result)
+			if err != nil || !result.IsMaster {
+				logf("Cannot confirm server %s as master (%v)", server.Addr, err)
+				s.Release()
+				cluster.syncServers()
+				time.Sleep(100 * time.Millisecond)
+				continue
+			}
+		}
+		return s, nil
+	}
+	panic("unreached")
+}
+
+func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
+	cluster.Lock()
+	if cluster.cachedIndex == nil {
+		cluster.cachedIndex = make(map[string]bool)
+	}
+	if exists {
+		cluster.cachedIndex[cacheKey] = true
+	} else {
+		delete(cluster.cachedIndex, cacheKey)
+	}
+	cluster.Unlock()
+}
+
+func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
+	cluster.RLock()
+	if cluster.cachedIndex != nil {
+		result = cluster.cachedIndex[cacheKey]
+	}
+	cluster.RUnlock()
+	return
+}
+
+func (cluster *mongoCluster) ResetIndexCache() {
+	cluster.Lock()
+	cluster.cachedIndex = make(map[string]bool)
+	cluster.Unlock()
+}

+ 2090 - 0
src/gopkg.in/mgo.v2/cluster_test.go

@@ -0,0 +1,2090 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	. "gopkg.in/check.v1"
+	"gopkg.in/mgo.v2"
+	"gopkg.in/mgo.v2/bson"
+)
+
+func (s *S) TestNewSession(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	// Do a dummy operation to wait for connection.
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"_id": 1})
+	c.Assert(err, IsNil)
+
+	// Tweak safety and query settings to ensure other has copied those.
+	session.SetSafe(nil)
+	session.SetBatch(-1)
+	other := session.New()
+	defer other.Close()
+	session.SetSafe(&mgo.Safe{})
+
+	// Clone was copied while session was unsafe, so no errors.
+	otherColl := other.DB("mydb").C("mycoll")
+	err = otherColl.Insert(M{"_id": 1})
+	c.Assert(err, IsNil)
+
+	// Original session was made safe again.
+	err = coll.Insert(M{"_id": 1})
+	c.Assert(err, NotNil)
+
+	// With New(), each session has its own socket now.
+	stats := mgo.GetStats()
+	c.Assert(stats.MasterConns, Equals, 2)
+	c.Assert(stats.SocketsInUse, Equals, 2)
+
+	// Ensure query parameters were cloned.
+	err = otherColl.Insert(M{"_id": 2})
+	c.Assert(err, IsNil)
+
+	// Ping the database to ensure the nonce has been received already.
+	c.Assert(other.Ping(), IsNil)
+
+	mgo.ResetStats()
+
+	iter := otherColl.Find(M{}).Iter()
+	c.Assert(err, IsNil)
+
+	m := M{}
+	ok := iter.Next(m)
+	c.Assert(ok, Equals, true)
+	err = iter.Close()
+	c.Assert(err, IsNil)
+
+	// If Batch(-1) is in effect, a single document must have been received.
+	stats = mgo.GetStats()
+	c.Assert(stats.ReceivedDocs, Equals, 1)
+}
+
+func (s *S) TestCloneSession(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	// Do a dummy operation to wait for connection.
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"_id": 1})
+	c.Assert(err, IsNil)
+
+	// Tweak safety and query settings to ensure clone is copying those.
+	session.SetSafe(nil)
+	session.SetBatch(-1)
+	clone := session.Clone()
+	defer clone.Close()
+	session.SetSafe(&mgo.Safe{})
+
+	// Clone was copied while session was unsafe, so no errors.
+	cloneColl := clone.DB("mydb").C("mycoll")
+	err = cloneColl.Insert(M{"_id": 1})
+	c.Assert(err, IsNil)
+
+	// Original session was made safe again.
+	err = coll.Insert(M{"_id": 1})
+	c.Assert(err, NotNil)
+
+	// With Clone(), same socket is shared between sessions now.
+	stats := mgo.GetStats()
+	c.Assert(stats.SocketsInUse, Equals, 1)
+	c.Assert(stats.SocketRefs, Equals, 2)
+
+	// Refreshing one of them should let the original socket go,
+	// while preserving the safety settings.
+	clone.Refresh()
+	err = cloneColl.Insert(M{"_id": 1})
+	c.Assert(err, IsNil)
+
+	// Must have used another connection now.
+	stats = mgo.GetStats()
+	c.Assert(stats.SocketsInUse, Equals, 2)
+	c.Assert(stats.SocketRefs, Equals, 2)
+
+	// Ensure query parameters were cloned.
+	err = cloneColl.Insert(M{"_id": 2})
+	c.Assert(err, IsNil)
+
+	// Ping the database to ensure the nonce has been received already.
+	c.Assert(clone.Ping(), IsNil)
+
+	mgo.ResetStats()
+
+	iter := cloneColl.Find(M{}).Iter()
+	c.Assert(err, IsNil)
+
+	m := M{}
+	ok := iter.Next(m)
+	c.Assert(ok, Equals, true)
+	err = iter.Close()
+	c.Assert(err, IsNil)
+
+	// If Batch(-1) is in effect, a single document must have been received.
+	stats = mgo.GetStats()
+	c.Assert(stats.ReceivedDocs, Equals, 1)
+}
+
+func (s *S) TestModeStrong(c *C) {
+	session, err := mgo.Dial("localhost:40012")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Monotonic, false)
+	session.SetMode(mgo.Strong, false)
+
+	c.Assert(session.Mode(), Equals, mgo.Strong)
+
+	result := M{}
+	cmd := session.DB("admin").C("$cmd")
+	err = cmd.Find(M{"ismaster": 1}).One(&result)
+	c.Assert(err, IsNil)
+	c.Assert(result["ismaster"], Equals, true)
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	// Wait since the sync also uses sockets.
+	for len(session.LiveServers()) != 3 {
+		c.Log("Waiting for cluster sync to finish...")
+		time.Sleep(5e8)
+	}
+
+	stats := mgo.GetStats()
+	c.Assert(stats.MasterConns, Equals, 1)
+	c.Assert(stats.SlaveConns, Equals, 2)
+	c.Assert(stats.SocketsInUse, Equals, 1)
+
+	session.SetMode(mgo.Strong, true)
+
+	stats = mgo.GetStats()
+	c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestModeMonotonic(c *C) {
+	// Must necessarily connect to a slave, otherwise the
+	// master connection will be available first.
+	session, err := mgo.Dial("localhost:40012")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Monotonic, false)
+
+	c.Assert(session.Mode(), Equals, mgo.Monotonic)
+
+	var result struct{ IsMaster bool }
+	cmd := session.DB("admin").C("$cmd")
+	err = cmd.Find(M{"ismaster": 1}).One(&result)
+	c.Assert(err, IsNil)
+	c.Assert(result.IsMaster, Equals, false)
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	err = cmd.Find(M{"ismaster": 1}).One(&result)
+	c.Assert(err, IsNil)
+	c.Assert(result.IsMaster, Equals, true)
+
+	// Wait since the sync also uses sockets.
+	for len(session.LiveServers()) != 3 {
+		c.Log("Waiting for cluster sync to finish...")
+		time.Sleep(5e8)
+	}
+
+	stats := mgo.GetStats()
+	c.Assert(stats.MasterConns, Equals, 1)
+	c.Assert(stats.SlaveConns, Equals, 2)
+	c.Assert(stats.SocketsInUse, Equals, 2)
+
+	session.SetMode(mgo.Monotonic, true)
+
+	stats = mgo.GetStats()
+	c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestModeMonotonicAfterStrong(c *C) {
+	// Test that a strong session shifting to a monotonic
+	// one preserves the socket untouched.
+
+	session, err := mgo.Dial("localhost:40012")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	// Insert something to force a connection to the master.
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	session.SetMode(mgo.Monotonic, false)
+
+	// Wait since the sync also uses sockets.
+	for len(session.LiveServers()) != 3 {
+		c.Log("Waiting for cluster sync to finish...")
+		time.Sleep(5e8)
+	}
+
+	// Master socket should still be reserved.
+	stats := mgo.GetStats()
+	c.Assert(stats.SocketsInUse, Equals, 1)
+
+	// Confirm it's the master even though it's Monotonic by now.
+	result := M{}
+	cmd := session.DB("admin").C("$cmd")
+	err = cmd.Find(M{"ismaster": 1}).One(&result)
+	c.Assert(err, IsNil)
+	c.Assert(result["ismaster"], Equals, true)
+}
+
+func (s *S) TestModeStrongAfterMonotonic(c *C) {
+	// Test that shifting from Monotonic to Strong while
+	// using a slave socket will keep the socket reserved
+	// until the master socket is necessary, so that no
+	// switch over occurs unless it's actually necessary.
+
+	// Must necessarily connect to a slave, otherwise the
+	// master connection will be available first.
+	session, err := mgo.Dial("localhost:40012")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Monotonic, false)
+
+	// Ensure we're talking to a slave, and reserve the socket.
+	result := M{}
+	err = session.Run("ismaster", &result)
+	c.Assert(err, IsNil)
+	c.Assert(result["ismaster"], Equals, false)
+
+	// Switch to a Strong session.
+	session.SetMode(mgo.Strong, false)
+
+	// Wait since the sync also uses sockets.
+	for len(session.LiveServers()) != 3 {
+		c.Log("Waiting for cluster sync to finish...")
+		time.Sleep(5e8)
+	}
+
+	// Slave socket should still be reserved.
+	stats := mgo.GetStats()
+	c.Assert(stats.SocketsInUse, Equals, 1)
+
+	// But any operation will switch it to the master.
+	result = M{}
+	err = session.Run("ismaster", &result)
+	c.Assert(err, IsNil)
+	c.Assert(result["ismaster"], Equals, true)
+}
+
+func (s *S) TestModeMonotonicWriteOnIteration(c *C) {
+	// Must necessarily connect to a slave, otherwise the
+	// master connection will be available first.
+	session, err := mgo.Dial("localhost:40012")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Monotonic, false)
+
+	c.Assert(session.Mode(), Equals, mgo.Monotonic)
+
+	coll1 := session.DB("mydb").C("mycoll1")
+	coll2 := session.DB("mydb").C("mycoll2")
+
+	ns := []int{40, 41, 42, 43, 44, 45, 46}
+	for _, n := range ns {
+		err := coll1.Insert(M{"n": n})
+		c.Assert(err, IsNil)
+	}
+
+	// Release master so we can grab a slave again.
+	session.Refresh()
+
+	// Wait until synchronization is done.
+	for {
+		n, err := coll1.Count()
+		c.Assert(err, IsNil)
+		if n == len(ns) {
+			break
+		}
+	}
+
+	iter := coll1.Find(nil).Batch(2).Iter()
+	i := 0
+	m := M{}
+	for iter.Next(&m) {
+		i++
+		if i > 3 {
+			err := coll2.Insert(M{"n": 47 + i})
+			c.Assert(err, IsNil)
+		}
+	}
+	c.Assert(i, Equals, len(ns))
+}
+
+func (s *S) TestModeEventual(c *C) {
+	// Must necessarily connect to a slave, otherwise the
+	// master connection will be available first.
+	session, err := mgo.Dial("localhost:40012")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Eventual, false)
+
+	c.Assert(session.Mode(), Equals, mgo.Eventual)
+
+	result := M{}
+	err = session.Run("ismaster", &result)
+	c.Assert(err, IsNil)
+	c.Assert(result["ismaster"], Equals, false)
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	result = M{}
+	err = session.Run("ismaster", &result)
+	c.Assert(err, IsNil)
+	c.Assert(result["ismaster"], Equals, false)
+
+	// Wait since the sync also uses sockets.
+	for len(session.LiveServers()) != 3 {
+		c.Log("Waiting for cluster sync to finish...")
+		time.Sleep(5e8)
+	}
+
+	stats := mgo.GetStats()
+	c.Assert(stats.MasterConns, Equals, 1)
+	c.Assert(stats.SlaveConns, Equals, 2)
+	c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestModeEventualAfterStrong(c *C) {
+	// Test that a strong session shifting to an eventual
+	// one preserves the socket untouched.
+
+	session, err := mgo.Dial("localhost:40012")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	// Insert something to force a connection to the master.
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	session.SetMode(mgo.Eventual, false)
+
+	// Wait since the sync also uses sockets.
+	for len(session.LiveServers()) != 3 {
+		c.Log("Waiting for cluster sync to finish...")
+		time.Sleep(5e8)
+	}
+
+	// Master socket should still be reserved.
+	stats := mgo.GetStats()
+	c.Assert(stats.SocketsInUse, Equals, 1)
+
+	// Confirm it's the master even though it's Eventual by now.
+	result := M{}
+	cmd := session.DB("admin").C("$cmd")
+	err = cmd.Find(M{"ismaster": 1}).One(&result)
+	c.Assert(err, IsNil)
+	c.Assert(result["ismaster"], Equals, true)
+
+	session.SetMode(mgo.Eventual, true)
+
+	stats = mgo.GetStats()
+	c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestModeStrongFallover(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40021")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	// With strong consistency, this will open a socket to the master.
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+
+	// Kill the master.
+	host := result.Host
+	s.Stop(host)
+
+	// This must fail, since the connection was broken.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, Equals, io.EOF)
+
+	// With strong consistency, it fails again until reset.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, Equals, io.EOF)
+
+	session.Refresh()
+
+	// Now we should be able to talk to the new master.
+	// Increase the timeout since this may take quite a while.
+	session.SetSyncTimeout(3 * time.Minute)
+
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(result.Host, Not(Equals), host)
+
+	// Insert some data to confirm it's indeed a master.
+	err = session.DB("mydb").C("mycoll").Insert(M{"n": 42})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestModePrimaryHiccup(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40021")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	// With strong consistency, this will open a socket to the master.
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+
+	// Establish a few extra sessions to create spare sockets to
+	// the master. This increases a bit the chances of getting an
+	// incorrect cached socket.
+	var sessions []*mgo.Session
+	for i := 0; i < 20; i++ {
+		sessions = append(sessions, session.Copy())
+		err = sessions[len(sessions)-1].Run("serverStatus", result)
+		c.Assert(err, IsNil)
+	}
+	for i := range sessions {
+		sessions[i].Close()
+	}
+
+	// Kill the master, but bring it back immediatelly.
+	host := result.Host
+	s.Stop(host)
+	s.StartAll()
+
+	// This must fail, since the connection was broken.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, Equals, io.EOF)
+
+	// With strong consistency, it fails again until reset.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, Equals, io.EOF)
+
+	session.Refresh()
+
+	// Now we should be able to talk to the new master.
+	// Increase the timeout since this may take quite a while.
+	session.SetSyncTimeout(3 * time.Minute)
+
+	// Insert some data to confirm it's indeed a master.
+	err = session.DB("mydb").C("mycoll").Insert(M{"n": 42})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestModeMonotonicFallover(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40021")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Monotonic, true)
+
+	// Insert something to force a switch to the master.
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	// Wait a bit for this to be synchronized to slaves.
+	time.Sleep(3 * time.Second)
+
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+
+	// Kill the master.
+	host := result.Host
+	s.Stop(host)
+
+	// This must fail, since the connection was broken.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, Equals, io.EOF)
+
+	// With monotonic consistency, it fails again until reset.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, Equals, io.EOF)
+
+	session.Refresh()
+
+	// Now we should be able to talk to the new master.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(result.Host, Not(Equals), host)
+}
+
+func (s *S) TestModeMonotonicWithSlaveFallover(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40021")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	ssresult := &struct{ Host string }{}
+	imresult := &struct{ IsMaster bool }{}
+
+	// Figure the master while still using the strong session.
+	err = session.Run("serverStatus", ssresult)
+	c.Assert(err, IsNil)
+	err = session.Run("isMaster", imresult)
+	c.Assert(err, IsNil)
+	master := ssresult.Host
+	c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
+
+	// Create new monotonic session with an explicit address to ensure
+	// a slave is synchronized before the master, otherwise a connection
+	// with the master may be used below for lack of other options.
+	var addr string
+	switch {
+	case strings.HasSuffix(ssresult.Host, ":40021"):
+		addr = "localhost:40022"
+	case strings.HasSuffix(ssresult.Host, ":40022"):
+		addr = "localhost:40021"
+	case strings.HasSuffix(ssresult.Host, ":40023"):
+		addr = "localhost:40021"
+	default:
+		c.Fatal("Unknown host: ", ssresult.Host)
+	}
+
+	session, err = mgo.Dial(addr)
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Monotonic, true)
+
+	// Check the address of the socket associated with the monotonic session.
+	c.Log("Running serverStatus and isMaster with monotonic session")
+	err = session.Run("serverStatus", ssresult)
+	c.Assert(err, IsNil)
+	err = session.Run("isMaster", imresult)
+	c.Assert(err, IsNil)
+	slave := ssresult.Host
+	c.Assert(imresult.IsMaster, Equals, false, Commentf("%s is not a slave", slave))
+
+	c.Assert(master, Not(Equals), slave)
+
+	// Kill the master.
+	s.Stop(master)
+
+	// Session must still be good, since we were talking to a slave.
+	err = session.Run("serverStatus", ssresult)
+	c.Assert(err, IsNil)
+
+	c.Assert(ssresult.Host, Equals, slave,
+		Commentf("Monotonic session moved from %s to %s", slave, ssresult.Host))
+
+	// If we try to insert something, it'll have to hold until the new
+	// master is available to move the connection, and work correctly.
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	// Must now be talking to the new master.
+	err = session.Run("serverStatus", ssresult)
+	c.Assert(err, IsNil)
+	err = session.Run("isMaster", imresult)
+	c.Assert(err, IsNil)
+	c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
+
+	// ... which is not the old one, since it's still dead.
+	c.Assert(ssresult.Host, Not(Equals), master)
+}
+
+func (s *S) TestModeEventualFallover(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40021")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	master := result.Host
+
+	session.SetMode(mgo.Eventual, true)
+
+	// Should connect to the master when needed.
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	// Wait a bit for this to be synchronized to slaves.
+	time.Sleep(3 * time.Second)
+
+	// Kill the master.
+	s.Stop(master)
+
+	// Should still work, with the new master now.
+	coll = session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(result.Host, Not(Equals), master)
+}
+
+func (s *S) TestModeSecondaryJustPrimary(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Secondary, true)
+
+	err = session.Ping()
+	c.Assert(err, ErrorMatches, "no reachable servers")
+}
+
+func (s *S) TestModeSecondaryPreferredJustPrimary(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.SecondaryPreferred, true)
+
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestModeSecondaryPreferredFallover(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	// Ensure secondaries are available for being picked up.
+	for len(session.LiveServers()) != 3 {
+		c.Log("Waiting for cluster sync to finish...")
+		time.Sleep(5e8)
+	}
+
+	session.SetMode(mgo.SecondaryPreferred, true)
+
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(supvName(result.Host), Not(Equals), "rs1a")
+	secondary := result.Host
+
+	// Should connect to the primary when needed.
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	// Wait a bit for this to be synchronized to slaves.
+	time.Sleep(3 * time.Second)
+
+	// Kill the primary.
+	s.Stop("localhost:40011")
+
+	// It can still talk to the selected secondary.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(result.Host, Equals, secondary)
+
+	// But cannot speak to the primary until reset.
+	coll = session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, Equals, io.EOF)
+
+	session.Refresh()
+
+	// Can still talk to a secondary.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(supvName(result.Host), Not(Equals), "rs1a")
+
+	s.StartAll()
+
+	// Should now be able to talk to the primary again.
+	coll = session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestModePrimaryPreferredFallover(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.PrimaryPreferred, true)
+
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(supvName(result.Host), Equals, "rs1a")
+
+	// Kill the primary.
+	s.Stop("localhost:40011")
+
+	// Should now fail as there was a primary socket in use already.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, Equals, io.EOF)
+
+	// Refresh so the reserved primary socket goes away.
+	session.Refresh()
+
+	// Should be able to talk to the secondary.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+
+	s.StartAll()
+
+	// Should wait for the new primary to become available.
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	// And should use the new primary in general, as it is preferred.
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(supvName(result.Host), Equals, "rs1a")
+}
+
+func (s *S) TestModePrimaryFallover(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetSyncTimeout(3 * time.Second)
+
+	session.SetMode(mgo.Primary, true)
+
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(supvName(result.Host), Equals, "rs1a")
+
+	// Kill the primary.
+	s.Stop("localhost:40011")
+
+	session.Refresh()
+
+	err = session.Ping()
+	c.Assert(err, ErrorMatches, "no reachable servers")
+}
+
+func (s *S) TestModeSecondary(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Secondary, true)
+
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(supvName(result.Host), Not(Equals), "rs1a")
+	secondary := result.Host
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(result.Host, Equals, secondary)
+}
+
+func (s *S) TestPreserveSocketCountOnSync(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	stats := mgo.GetStats()
+	for stats.SocketsAlive != 3 {
+		c.Logf("Waiting for all connections to be established (sockets alive currently %d)...", stats.SocketsAlive)
+		stats = mgo.GetStats()
+		time.Sleep(5e8)
+	}
+
+	c.Assert(stats.SocketsAlive, Equals, 3)
+
+	// Kill the master (with rs1, 'a' is always the master).
+	s.Stop("localhost:40011")
+
+	// Wait for the logic to run for a bit and bring it back.
+	startedAll := make(chan bool)
+	go func() {
+		time.Sleep(5e9)
+		s.StartAll()
+		startedAll <- true
+	}()
+
+	// Do not allow the test to return before the goroutine above is done.
+	defer func() {
+		<-startedAll
+	}()
+
+	// Do an action to kick the resync logic in, and also to
+	// wait until the cluster recognizes the server is back.
+	result := struct{ Ok bool }{}
+	err = session.Run("getLastError", &result)
+	c.Assert(err, IsNil)
+	c.Assert(result.Ok, Equals, true)
+
+	for i := 0; i != 20; i++ {
+		stats = mgo.GetStats()
+		if stats.SocketsAlive == 3 {
+			break
+		}
+		c.Logf("Waiting for 3 sockets alive, have %d", stats.SocketsAlive)
+		time.Sleep(5e8)
+	}
+
+	// Ensure the number of sockets is preserved after syncing.
+	stats = mgo.GetStats()
+	c.Assert(stats.SocketsAlive, Equals, 3)
+	c.Assert(stats.SocketsInUse, Equals, 1)
+	c.Assert(stats.SocketRefs, Equals, 1)
+}
+
+// Connect to the master of a deployment with a single server,
+// run an insert, and then ensure the insert worked and that a
+// single connection was established.
+func (s *S) TestTopologySyncWithSingleMaster(c *C) {
+	// Use hostname here rather than IP, to make things trickier.
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1, "b": 2})
+	c.Assert(err, IsNil)
+
+	// One connection used for discovery. Master socket recycled for
+	// insert. Socket is reserved after insert.
+	stats := mgo.GetStats()
+	c.Assert(stats.MasterConns, Equals, 1)
+	c.Assert(stats.SlaveConns, Equals, 0)
+	c.Assert(stats.SocketsInUse, Equals, 1)
+
+	// Refresh session and socket must be released.
+	session.Refresh()
+	stats = mgo.GetStats()
+	c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestTopologySyncWithSlaveSeed(c *C) {
+	// That's supposed to be a slave. Must run discovery
+	// and find out master to insert successfully.
+	session, err := mgo.Dial("localhost:40012")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	coll.Insert(M{"a": 1, "b": 2})
+
+	result := struct{ Ok bool }{}
+	err = session.Run("getLastError", &result)
+	c.Assert(err, IsNil)
+	c.Assert(result.Ok, Equals, true)
+
+	// One connection to each during discovery. Master
+	// socket recycled for insert.
+	stats := mgo.GetStats()
+	c.Assert(stats.MasterConns, Equals, 1)
+	c.Assert(stats.SlaveConns, Equals, 2)
+
+	// Only one socket reference alive, in the master socket owned
+	// by the above session.
+	c.Assert(stats.SocketsInUse, Equals, 1)
+
+	// Refresh it, and it must be gone.
+	session.Refresh()
+	stats = mgo.GetStats()
+	c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestSyncTimeout(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	s.Stop("localhost:40001")
+
+	timeout := 3 * time.Second
+	session.SetSyncTimeout(timeout)
+	started := time.Now()
+
+	// Do something.
+	result := struct{ Ok bool }{}
+	err = session.Run("getLastError", &result)
+	c.Assert(err, ErrorMatches, "no reachable servers")
+	c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true)
+	c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true)
+}
+
+func (s *S) TestDialWithTimeout(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	timeout := 2 * time.Second
+	started := time.Now()
+
+	// 40009 isn't used by the test servers.
+	session, err := mgo.DialWithTimeout("localhost:40009", timeout)
+	if session != nil {
+		session.Close()
+	}
+	c.Assert(err, ErrorMatches, "no reachable servers")
+	c.Assert(session, IsNil)
+	c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true)
+	c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true)
+}
+
+func (s *S) TestSocketTimeout(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	s.Freeze("localhost:40001")
+
+	timeout := 3 * time.Second
+	session.SetSocketTimeout(timeout)
+	started := time.Now()
+
+	// Do something.
+	result := struct{ Ok bool }{}
+	err = session.Run("getLastError", &result)
+	c.Assert(err, ErrorMatches, ".*: i/o timeout")
+	c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true)
+	c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true)
+}
+
+func (s *S) TestSocketTimeoutOnDial(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	timeout := 1 * time.Second
+
+	defer mgo.HackSyncSocketTimeout(timeout)()
+
+	s.Freeze("localhost:40001")
+
+	started := time.Now()
+
+	session, err := mgo.DialWithTimeout("localhost:40001", timeout)
+	c.Assert(err, ErrorMatches, "no reachable servers")
+	c.Assert(session, IsNil)
+
+	c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true)
+	c.Assert(started.After(time.Now().Add(-20*time.Second)), Equals, true)
+}
+
+func (s *S) TestSocketTimeoutOnInactiveSocket(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	timeout := 2 * time.Second
+	session.SetSocketTimeout(timeout)
+
+	// Do something that relies on the timeout and works.
+	c.Assert(session.Ping(), IsNil)
+
+	// Freeze and wait for the timeout to go by.
+	s.Freeze("localhost:40001")
+	time.Sleep(timeout + 500*time.Millisecond)
+	s.Thaw("localhost:40001")
+
+	// Do something again. The timeout above should not have killed
+	// the socket as there was nothing to be done.
+	c.Assert(session.Ping(), IsNil)
+}
+
+func (s *S) TestDialWithReplicaSetName(c *C) {
+	seedLists := [][]string{
+		// rs1 primary and rs2 primary
+		[]string{"localhost:40011", "localhost:40021"},
+		// rs1 primary and rs2 secondary
+		[]string{"localhost:40011", "localhost:40022"},
+		// rs1 secondary and rs2 primary
+		[]string{"localhost:40012", "localhost:40021"},
+		// rs1 secondary and rs2 secondary
+		[]string{"localhost:40012", "localhost:40022"},
+	}
+
+	rs2Members := []string{":40021", ":40022", ":40023"}
+
+	verifySyncedServers := func(session *mgo.Session, numServers int) {
+		// wait for the server(s) to be synced
+		for len(session.LiveServers()) != numServers {
+			c.Log("Waiting for cluster sync to finish...")
+			time.Sleep(5e8)
+		}
+
+		// ensure none of the rs2 set members are communicated with
+		for _, addr := range session.LiveServers() {
+			for _, rs2Member := range rs2Members {
+				c.Assert(strings.HasSuffix(addr, rs2Member), Equals, false)
+			}
+		}
+	}
+
+	// only communication with rs1 members is expected
+	for _, seedList := range seedLists {
+		info := mgo.DialInfo{
+			Addrs:          seedList,
+			Timeout:        5 * time.Second,
+			ReplicaSetName: "rs1",
+		}
+
+		session, err := mgo.DialWithInfo(&info)
+		c.Assert(err, IsNil)
+		verifySyncedServers(session, 3)
+		session.Close()
+
+		info.Direct = true
+		session, err = mgo.DialWithInfo(&info)
+		c.Assert(err, IsNil)
+		verifySyncedServers(session, 1)
+		session.Close()
+
+		connectionUrl := fmt.Sprintf("mongodb://%v/?replicaSet=rs1", strings.Join(seedList, ","))
+		session, err = mgo.Dial(connectionUrl)
+		c.Assert(err, IsNil)
+		verifySyncedServers(session, 3)
+		session.Close()
+
+		connectionUrl += "&connect=direct"
+		session, err = mgo.Dial(connectionUrl)
+		c.Assert(err, IsNil)
+		verifySyncedServers(session, 1)
+		session.Close()
+	}
+
+}
+
+func (s *S) TestDirect(c *C) {
+	session, err := mgo.Dial("localhost:40012?connect=direct")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	// We know that server is a slave.
+	session.SetMode(mgo.Monotonic, true)
+
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true)
+
+	stats := mgo.GetStats()
+	c.Assert(stats.SocketsAlive, Equals, 1)
+	c.Assert(stats.SocketsInUse, Equals, 1)
+	c.Assert(stats.SocketRefs, Equals, 1)
+
+	// We've got no master, so it'll timeout.
+	session.SetSyncTimeout(5e8 * time.Nanosecond)
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"test": 1})
+	c.Assert(err, ErrorMatches, "no reachable servers")
+
+	// Writing to the local database is okay.
+	coll = session.DB("local").C("mycoll")
+	defer coll.RemoveAll(nil)
+	id := bson.NewObjectId()
+	err = coll.Insert(M{"_id": id})
+	c.Assert(err, IsNil)
+
+	// Data was stored in the right server.
+	n, err := coll.Find(M{"_id": id}).Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 1)
+
+	// Server hasn't changed.
+	result.Host = ""
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true)
+}
+
+func (s *S) TestDirectToUnknownStateMember(c *C) {
+	session, err := mgo.Dial("localhost:40041?connect=direct")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Monotonic, true)
+
+	result := &struct{ Host string }{}
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true)
+
+	// We've got no master, so it'll timeout.
+	session.SetSyncTimeout(5e8 * time.Nanosecond)
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"test": 1})
+	c.Assert(err, ErrorMatches, "no reachable servers")
+
+	// Slave is still reachable.
+	result.Host = ""
+	err = session.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+	c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true)
+}
+
+func (s *S) TestFailFast(c *C) {
+	info := mgo.DialInfo{
+		Addrs:    []string{"localhost:99999"},
+		Timeout:  5 * time.Second,
+		FailFast: true,
+	}
+
+	started := time.Now()
+
+	_, err := mgo.DialWithInfo(&info)
+	c.Assert(err, ErrorMatches, "no reachable servers")
+
+	c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true)
+}
+
+func (s *S) countQueries(c *C, server string) (n int) {
+	defer func() { c.Logf("Queries for %q: %d", server, n) }()
+	session, err := mgo.Dial(server + "?connect=direct")
+	c.Assert(err, IsNil)
+	defer session.Close()
+	session.SetMode(mgo.Monotonic, true)
+	var result struct {
+		OpCounters struct {
+			Query int
+		}
+		Metrics struct {
+			Commands struct{ Find struct{ Total int } }
+		}
+	}
+	err = session.Run("serverStatus", &result)
+	c.Assert(err, IsNil)
+	if s.versionAtLeast(3, 2) {
+		return result.Metrics.Commands.Find.Total
+	}
+	return result.OpCounters.Query
+}
+
+func (s *S) countCommands(c *C, server, commandName string) (n int) {
+	defer func() { c.Logf("Queries for %q: %d", server, n) }()
+	session, err := mgo.Dial(server + "?connect=direct")
+	c.Assert(err, IsNil)
+	defer session.Close()
+	session.SetMode(mgo.Monotonic, true)
+	var result struct {
+		Metrics struct {
+			Commands map[string]struct{ Total int }
+		}
+	}
+	err = session.Run("serverStatus", &result)
+	c.Assert(err, IsNil)
+	return result.Metrics.Commands[commandName].Total
+}
+
+func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) {
+	session, err := mgo.Dial("localhost:40021")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	ssresult := &struct{ Host string }{}
+	imresult := &struct{ IsMaster bool }{}
+
+	// Figure the master while still using the strong session.
+	err = session.Run("serverStatus", ssresult)
+	c.Assert(err, IsNil)
+	err = session.Run("isMaster", imresult)
+	c.Assert(err, IsNil)
+	master := ssresult.Host
+	c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
+
+	// Ensure mongos is aware about the current topology.
+	s.Stop(":40201")
+	s.StartAll()
+
+	mongos, err := mgo.Dial("localhost:40202")
+	c.Assert(err, IsNil)
+	defer mongos.Close()
+
+	// Insert some data as otherwise 3.2+ doesn't seem to run the query at all.
+	err = mongos.DB("mydb").C("mycoll").Insert(bson.M{"n": 1})
+	c.Assert(err, IsNil)
+
+	// Wait until all servers see the data.
+	for _, addr := range []string{"localhost:40021", "localhost:40022", "localhost:40023"} {
+		session, err := mgo.Dial(addr + "?connect=direct")
+		c.Assert(err, IsNil)
+		defer session.Close()
+		session.SetMode(mgo.Monotonic, true)
+		for i := 300; i >= 0; i-- {
+			n, err := session.DB("mydb").C("mycoll").Find(nil).Count()
+			c.Assert(err, IsNil)
+			if n == 1 {
+				break
+			}
+			if i == 0 {
+				c.Fatalf("Inserted data never reached " + addr)
+			}
+			time.Sleep(100 * time.Millisecond)
+		}
+	}
+
+	// Collect op counters for everyone.
+	q21a := s.countQueries(c, "localhost:40021")
+	q22a := s.countQueries(c, "localhost:40022")
+	q23a := s.countQueries(c, "localhost:40023")
+
+	// Do a SlaveOk query through MongoS
+
+	mongos.SetMode(mgo.Monotonic, true)
+
+	coll := mongos.DB("mydb").C("mycoll")
+	var result struct{ N int }
+	for i := 0; i != 5; i++ {
+		err = coll.Find(nil).One(&result)
+		c.Assert(err, IsNil)
+		c.Assert(result.N, Equals, 1)
+	}
+
+	// Collect op counters for everyone again.
+	q21b := s.countQueries(c, "localhost:40021")
+	q22b := s.countQueries(c, "localhost:40022")
+	q23b := s.countQueries(c, "localhost:40023")
+
+	var masterDelta, slaveDelta int
+	switch hostPort(master) {
+	case "40021":
+		masterDelta = q21b - q21a
+		slaveDelta = (q22b - q22a) + (q23b - q23a)
+	case "40022":
+		masterDelta = q22b - q22a
+		slaveDelta = (q21b - q21a) + (q23b - q23a)
+	case "40023":
+		masterDelta = q23b - q23a
+		slaveDelta = (q21b - q21a) + (q22b - q22a)
+	default:
+		c.Fatal("Uh?")
+	}
+
+	c.Check(masterDelta, Equals, 0) // Just the counting itself.
+	c.Check(slaveDelta, Equals, 5)  // The counting for both, plus 5 queries above.
+}
+
+func (s *S) TestSecondaryModeWithMongos(c *C) {
+	session, err := mgo.Dial("localhost:40021")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	ssresult := &struct{ Host string }{}
+	imresult := &struct{ IsMaster bool }{}
+
+	// Figure the master while still using the strong session.
+	err = session.Run("serverStatus", ssresult)
+	c.Assert(err, IsNil)
+	err = session.Run("isMaster", imresult)
+	c.Assert(err, IsNil)
+	master := ssresult.Host
+	c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
+
+	// Ensure mongos is aware about the current topology.
+	s.Stop(":40201")
+	s.StartAll()
+
+	mongos, err := mgo.Dial("localhost:40202")
+	c.Assert(err, IsNil)
+	defer mongos.Close()
+
+	mongos.SetSyncTimeout(5 * time.Second)
+
+	// Insert some data as otherwise 3.2+ doesn't seem to run the query at all.
+	err = mongos.DB("mydb").C("mycoll").Insert(bson.M{"n": 1})
+	c.Assert(err, IsNil)
+
+	// Wait until all servers see the data.
+	for _, addr := range []string{"localhost:40021", "localhost:40022", "localhost:40023"} {
+		session, err := mgo.Dial(addr + "?connect=direct")
+		c.Assert(err, IsNil)
+		defer session.Close()
+		session.SetMode(mgo.Monotonic, true)
+		for i := 300; i >= 0; i-- {
+			n, err := session.DB("mydb").C("mycoll").Find(nil).Count()
+			c.Assert(err, IsNil)
+			if n == 1 {
+				break
+			}
+			if i == 0 {
+				c.Fatalf("Inserted data never reached " + addr)
+			}
+			time.Sleep(100 * time.Millisecond)
+		}
+	}
+
+	// Collect op counters for everyone.
+	q21a := s.countQueries(c, "localhost:40021")
+	q22a := s.countQueries(c, "localhost:40022")
+	q23a := s.countQueries(c, "localhost:40023")
+
+	// Do a Secondary query through MongoS
+
+	mongos.SetMode(mgo.Secondary, true)
+
+	coll := mongos.DB("mydb").C("mycoll")
+	var result struct{ N int }
+	for i := 0; i != 5; i++ {
+		err = coll.Find(nil).One(&result)
+		c.Assert(err, IsNil)
+		c.Assert(result.N, Equals, 1)
+	}
+
+	// Collect op counters for everyone again.
+	q21b := s.countQueries(c, "localhost:40021")
+	q22b := s.countQueries(c, "localhost:40022")
+	q23b := s.countQueries(c, "localhost:40023")
+
+	var masterDelta, slaveDelta int
+	switch hostPort(master) {
+	case "40021":
+		masterDelta = q21b - q21a
+		slaveDelta = (q22b - q22a) + (q23b - q23a)
+	case "40022":
+		masterDelta = q22b - q22a
+		slaveDelta = (q21b - q21a) + (q23b - q23a)
+	case "40023":
+		masterDelta = q23b - q23a
+		slaveDelta = (q21b - q21a) + (q22b - q22a)
+	default:
+		c.Fatal("Uh?")
+	}
+
+	c.Check(masterDelta, Equals, 0) // Just the counting itself.
+	c.Check(slaveDelta, Equals, 5)  // The counting for both, plus 5 queries above.
+}
+
+func (s *S) TestSecondaryModeWithMongosInsert(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40202")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Secondary, true)
+	session.SetSyncTimeout(4 * time.Second)
+
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(M{"a": 1})
+	c.Assert(err, IsNil)
+
+	var result struct{ A int }
+	coll.Find(nil).One(&result)
+	c.Assert(result.A, Equals, 1)
+}
+
+
+func (s *S) TestRemovalOfClusterMember(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	master, err := mgo.Dial("localhost:40021")
+	c.Assert(err, IsNil)
+	defer master.Close()
+
+	// Wait for cluster to fully sync up.
+	for i := 0; i < 10; i++ {
+		if len(master.LiveServers()) == 3 {
+			break
+		}
+		time.Sleep(5e8)
+	}
+	if len(master.LiveServers()) != 3 {
+		c.Fatalf("Test started with bad cluster state: %v", master.LiveServers())
+	}
+
+	result := &struct {
+		IsMaster bool
+		Me       string
+	}{}
+	slave := master.Copy()
+	slave.SetMode(mgo.Monotonic, true) // Monotonic can hold a non-master socket persistently.
+	err = slave.Run("isMaster", result)
+	c.Assert(err, IsNil)
+	c.Assert(result.IsMaster, Equals, false)
+	slaveAddr := result.Me
+
+	defer func() {
+		config := map[string]string{
+			"40021": `{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}`,
+			"40022": `{_id: 2, host: "127.0.0.1:40022", priority: 0, tags: {rs2: "b"}}`,
+			"40023": `{_id: 3, host: "127.0.0.1:40023", priority: 0, tags: {rs2: "c"}}`,
+		}
+		master.Refresh()
+		master.Run(bson.D{{"$eval", `rs.add(` + config[hostPort(slaveAddr)] + `)`}}, nil)
+		master.Close()
+		slave.Close()
+
+		// Ensure suite syncs up with the changes before next test.
+		s.Stop(":40201")
+		s.StartAll()
+		time.Sleep(8 * time.Second)
+		// TODO Find a better way to find out when mongos is fully aware that all
+		// servers are up. Without that follow up tests that depend on mongos will
+		// break due to their expectation of things being in a working state.
+	}()
+
+	c.Logf("========== Removing slave: %s ==========", slaveAddr)
+
+	master.Run(bson.D{{"$eval", `rs.remove("` + slaveAddr + `")`}}, nil)
+
+	master.Refresh()
+
+	// Give the cluster a moment to catch up by doing a roundtrip to the master.
+	err = master.Ping()
+	c.Assert(err, IsNil)
+
+	time.Sleep(3e9)
+
+	// This must fail since the slave has been taken off the cluster.
+	err = slave.Ping()
+	c.Assert(err, NotNil)
+
+	for i := 0; i < 15; i++ {
+		if len(master.LiveServers()) == 2 {
+			break
+		}
+		time.Sleep(time.Second)
+	}
+	live := master.LiveServers()
+	if len(live) != 2 {
+		c.Errorf("Removed server still considered live: %#s", live)
+	}
+
+	c.Log("========== Test succeeded. ==========")
+}
+
+func (s *S) TestPoolLimitSimple(c *C) {
+	for test := 0; test < 2; test++ {
+		var session *mgo.Session
+		var err error
+		if test == 0 {
+			session, err = mgo.Dial("localhost:40001")
+			c.Assert(err, IsNil)
+			session.SetPoolLimit(1)
+		} else {
+			session, err = mgo.Dial("localhost:40001?maxPoolSize=1")
+			c.Assert(err, IsNil)
+		}
+		defer session.Close()
+
+		// Put one socket in use.
+		c.Assert(session.Ping(), IsNil)
+
+		done := make(chan time.Duration)
+
+		// Now block trying to get another one due to the pool limit.
+		go func() {
+			copy := session.Copy()
+			defer copy.Close()
+			started := time.Now()
+			c.Check(copy.Ping(), IsNil)
+			done <- time.Now().Sub(started)
+		}()
+
+		time.Sleep(300 * time.Millisecond)
+
+		// Put the one socket back in the pool, freeing it for the copy.
+		session.Refresh()
+		delay := <-done
+		c.Assert(delay > 300*time.Millisecond, Equals, true, Commentf("Delay: %s", delay))
+	}
+}
+
+func (s *S) TestPoolLimitMany(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	stats := mgo.GetStats()
+	for stats.SocketsAlive != 3 {
+		c.Logf("Waiting for all connections to be established (sockets alive currently %d)...", stats.SocketsAlive)
+		stats = mgo.GetStats()
+		time.Sleep(5e8)
+	}
+
+	const poolLimit = 64
+	session.SetPoolLimit(poolLimit)
+
+	// Consume the whole limit for the master.
+	var master []*mgo.Session
+	for i := 0; i < poolLimit; i++ {
+		s := session.Copy()
+		defer s.Close()
+		c.Assert(s.Ping(), IsNil)
+		master = append(master, s)
+	}
+
+	before := time.Now()
+	go func() {
+		time.Sleep(3e9)
+		master[0].Refresh()
+	}()
+
+	// Then, a single ping must block, since it would need another
+	// connection to the master, over the limit. Once the goroutine
+	// above releases its socket, it should move on.
+	session.Ping()
+	delay := time.Now().Sub(before)
+	c.Assert(delay > 3e9, Equals, true)
+	c.Assert(delay < 6e9, Equals, true)
+}
+
+func (s *S) TestSetModeEventualIterBug(c *C) {
+	session1, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session1.Close()
+
+	session1.SetMode(mgo.Eventual, false)
+
+	coll1 := session1.DB("mydb").C("mycoll")
+
+	const N = 100
+	for i := 0; i < N; i++ {
+		err = coll1.Insert(M{"_id": i})
+		c.Assert(err, IsNil)
+	}
+
+	c.Logf("Waiting until secondary syncs")
+	for {
+		n, err := coll1.Count()
+		c.Assert(err, IsNil)
+		if n == N {
+			c.Logf("Found all")
+			break
+		}
+	}
+
+	session2, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session2.Close()
+
+	session2.SetMode(mgo.Eventual, false)
+
+	coll2 := session2.DB("mydb").C("mycoll")
+
+	i := 0
+	iter := coll2.Find(nil).Batch(10).Iter()
+	var result struct{}
+	for iter.Next(&result) {
+		i++
+	}
+	c.Assert(iter.Close(), Equals, nil)
+	c.Assert(i, Equals, N)
+}
+
+func (s *S) TestCustomDialOld(c *C) {
+	dials := make(chan bool, 16)
+	dial := func(addr net.Addr) (net.Conn, error) {
+		tcpaddr, ok := addr.(*net.TCPAddr)
+		if !ok {
+			return nil, fmt.Errorf("unexpected address type: %T", addr)
+		}
+		dials <- true
+		return net.DialTCP("tcp", nil, tcpaddr)
+	}
+	info := mgo.DialInfo{
+		Addrs: []string{"localhost:40012"},
+		Dial:  dial,
+	}
+
+	// Use hostname here rather than IP, to make things trickier.
+	session, err := mgo.DialWithInfo(&info)
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	const N = 3
+	for i := 0; i < N; i++ {
+		select {
+		case <-dials:
+		case <-time.After(5 * time.Second):
+			c.Fatalf("expected %d dials, got %d", N, i)
+		}
+	}
+	select {
+	case <-dials:
+		c.Fatalf("got more dials than expected")
+	case <-time.After(100 * time.Millisecond):
+	}
+}
+
+func (s *S) TestCustomDialNew(c *C) {
+	dials := make(chan bool, 16)
+	dial := func(addr *mgo.ServerAddr) (net.Conn, error) {
+		dials <- true
+		if addr.TCPAddr().Port == 40012 {
+			c.Check(addr.String(), Equals, "localhost:40012")
+		}
+		return net.DialTCP("tcp", nil, addr.TCPAddr())
+	}
+	info := mgo.DialInfo{
+		Addrs:      []string{"localhost:40012"},
+		DialServer: dial,
+	}
+
+	// Use hostname here rather than IP, to make things trickier.
+	session, err := mgo.DialWithInfo(&info)
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	const N = 3
+	for i := 0; i < N; i++ {
+		select {
+		case <-dials:
+		case <-time.After(5 * time.Second):
+			c.Fatalf("expected %d dials, got %d", N, i)
+		}
+	}
+	select {
+	case <-dials:
+		c.Fatalf("got more dials than expected")
+	case <-time.After(100 * time.Millisecond):
+	}
+}
+
+func (s *S) TestPrimaryShutdownOnAuthShard(c *C) {
+	if *fast {
+		c.Skip("-fast")
+	}
+
+	// Dial the shard.
+	session, err := mgo.Dial("localhost:40203")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	// Login and insert something to make it more realistic.
+	session.DB("admin").Login("root", "rapadura")
+	coll := session.DB("mydb").C("mycoll")
+	err = coll.Insert(bson.M{"n": 1})
+	c.Assert(err, IsNil)
+
+	// Dial the replica set to figure the master out.
+	rs, err := mgo.Dial("root:rapadura@localhost:40031")
+	c.Assert(err, IsNil)
+	defer rs.Close()
+
+	// With strong consistency, this will open a socket to the master.
+	result := &struct{ Host string }{}
+	err = rs.Run("serverStatus", result)
+	c.Assert(err, IsNil)
+
+	// Kill the master.
+	host := result.Host
+	s.Stop(host)
+
+	// This must fail, since the connection was broken.
+	err = rs.Run("serverStatus", result)
+	c.Assert(err, Equals, io.EOF)
+
+	// This won't work because the master just died.
+	err = coll.Insert(bson.M{"n": 2})
+	c.Assert(err, NotNil)
+
+	// Refresh session and wait for re-election.
+	session.Refresh()
+	for i := 0; i < 60; i++ {
+		err = coll.Insert(bson.M{"n": 3})
+		if err == nil {
+			break
+		}
+		c.Logf("Waiting for replica set to elect a new master. Last error: %v", err)
+		time.Sleep(500 * time.Millisecond)
+	}
+	c.Assert(err, IsNil)
+
+	count, err := coll.Count()
+	c.Assert(count > 1, Equals, true)
+}
+
+func (s *S) TestNearestSecondary(c *C) {
+	defer mgo.HackPingDelay(300 * time.Millisecond)()
+
+	rs1a := "127.0.0.1:40011"
+	rs1b := "127.0.0.1:40012"
+	rs1c := "127.0.0.1:40013"
+	s.Freeze(rs1b)
+
+	session, err := mgo.Dial(rs1a)
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	// Wait for the sync up to run through the first couple of servers.
+	for len(session.LiveServers()) != 2 {
+		c.Log("Waiting for two servers to be alive...")
+		time.Sleep(100 * time.Millisecond)
+	}
+
+	// Extra delay to ensure the third server gets penalized.
+	time.Sleep(500 * time.Millisecond)
+
+	// Release third server.
+	s.Thaw(rs1b)
+
+	// Wait for it to come up.
+	for len(session.LiveServers()) != 3 {
+		c.Log("Waiting for all servers to be alive...")
+		time.Sleep(100 * time.Millisecond)
+	}
+
+	session.SetMode(mgo.Monotonic, true)
+	var result struct{ Host string }
+
+	// See which slave picks the line, several times to avoid chance.
+	for i := 0; i < 10; i++ {
+		session.Refresh()
+		err = session.Run("serverStatus", &result)
+		c.Assert(err, IsNil)
+		c.Assert(hostPort(result.Host), Equals, hostPort(rs1c))
+	}
+
+	if *fast {
+		// Don't hold back for several seconds.
+		return
+	}
+
+	// Now hold the other server for long enough to penalize it.
+	s.Freeze(rs1c)
+	time.Sleep(5 * time.Second)
+	s.Thaw(rs1c)
+
+	// Wait for the ping to be processed.
+	time.Sleep(500 * time.Millisecond)
+
+	// Repeating the test should now pick the former server consistently.
+	for i := 0; i < 10; i++ {
+		session.Refresh()
+		err = session.Run("serverStatus", &result)
+		c.Assert(err, IsNil)
+		c.Assert(hostPort(result.Host), Equals, hostPort(rs1b))
+	}
+}
+
+func (s *S) TestNearestServer(c *C) {
+	defer mgo.HackPingDelay(300 * time.Millisecond)()
+
+	rs1a := "127.0.0.1:40011"
+	rs1b := "127.0.0.1:40012"
+	rs1c := "127.0.0.1:40013"
+
+	session, err := mgo.Dial(rs1a)
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	s.Freeze(rs1a)
+	s.Freeze(rs1b)
+
+	// Extra delay to ensure the first two servers get penalized.
+	time.Sleep(500 * time.Millisecond)
+
+	// Release them.
+	s.Thaw(rs1a)
+	s.Thaw(rs1b)
+
+	// Wait for everyone to come up.
+	for len(session.LiveServers()) != 3 {
+		c.Log("Waiting for all servers to be alive...")
+		time.Sleep(100 * time.Millisecond)
+	}
+
+	session.SetMode(mgo.Nearest, true)
+	var result struct{ Host string }
+
+	// See which server picks the line, several times to avoid chance.
+	for i := 0; i < 10; i++ {
+		session.Refresh()
+		err = session.Run("serverStatus", &result)
+		c.Assert(err, IsNil)
+		c.Assert(hostPort(result.Host), Equals, hostPort(rs1c))
+	}
+
+	if *fast {
+		// Don't hold back for several seconds.
+		return
+	}
+
+	// Now hold the two secondaries for long enough to penalize them.
+	s.Freeze(rs1b)
+	s.Freeze(rs1c)
+	time.Sleep(5 * time.Second)
+	s.Thaw(rs1b)
+	s.Thaw(rs1c)
+
+	// Wait for the ping to be processed.
+	time.Sleep(500 * time.Millisecond)
+
+	// Repeating the test should now pick the primary server consistently.
+	for i := 0; i < 10; i++ {
+		session.Refresh()
+		err = session.Run("serverStatus", &result)
+		c.Assert(err, IsNil)
+		c.Assert(hostPort(result.Host), Equals, hostPort(rs1a))
+	}
+}
+
+func (s *S) TestConnectCloseConcurrency(c *C) {
+	restore := mgo.HackPingDelay(500 * time.Millisecond)
+	defer restore()
+	var wg sync.WaitGroup
+	const n = 500
+	wg.Add(n)
+	for i := 0; i < n; i++ {
+		go func() {
+			defer wg.Done()
+			session, err := mgo.Dial("localhost:40001")
+			if err != nil {
+				c.Fatal(err)
+			}
+			time.Sleep(1)
+			session.Close()
+		}()
+	}
+	wg.Wait()
+}
+
+func (s *S) TestSelectServers(c *C) {
+	if !s.versionAtLeast(2, 2) {
+		c.Skip("read preferences introduced in 2.2")
+	}
+
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	session.SetMode(mgo.Eventual, true)
+
+	var result struct{ Host string }
+
+	session.Refresh()
+	session.SelectServers(bson.D{{"rs1", "b"}})
+	err = session.Run("serverStatus", &result)
+	c.Assert(err, IsNil)
+	c.Assert(hostPort(result.Host), Equals, "40012")
+
+	session.Refresh()
+	session.SelectServers(bson.D{{"rs1", "c"}})
+	err = session.Run("serverStatus", &result)
+	c.Assert(err, IsNil)
+	c.Assert(hostPort(result.Host), Equals, "40013")
+}
+
+func (s *S) TestSelectServersWithMongos(c *C) {
+	if !s.versionAtLeast(2, 2) {
+		c.Skip("read preferences introduced in 2.2")
+	}
+
+	session, err := mgo.Dial("localhost:40021")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	ssresult := &struct{ Host string }{}
+	imresult := &struct{ IsMaster bool }{}
+
+	// Figure the master while still using the strong session.
+	err = session.Run("serverStatus", ssresult)
+	c.Assert(err, IsNil)
+	err = session.Run("isMaster", imresult)
+	c.Assert(err, IsNil)
+	master := ssresult.Host
+	c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
+
+	var slave1, slave2 string
+	switch hostPort(master) {
+	case "40021":
+		slave1, slave2 = "b", "c"
+	case "40022":
+		slave1, slave2 = "a", "c"
+	case "40023":
+		slave1, slave2 = "a", "b"
+	}
+
+	// Collect op counters for everyone.
+	q21a := s.countQueries(c, "localhost:40021")
+	q22a := s.countQueries(c, "localhost:40022")
+	q23a := s.countQueries(c, "localhost:40023")
+
+	// Do a SlaveOk query through MongoS
+	mongos, err := mgo.Dial("localhost:40202")
+	c.Assert(err, IsNil)
+	defer mongos.Close()
+
+	mongos.SetMode(mgo.Monotonic, true)
+
+	mongos.Refresh()
+	mongos.SelectServers(bson.D{{"rs2", slave1}})
+	coll := mongos.DB("mydb").C("mycoll")
+	result := &struct{}{}
+	for i := 0; i != 5; i++ {
+		err := coll.Find(nil).One(result)
+		c.Assert(err, Equals, mgo.ErrNotFound)
+	}
+
+	mongos.Refresh()
+	mongos.SelectServers(bson.D{{"rs2", slave2}})
+	coll = mongos.DB("mydb").C("mycoll")
+	for i := 0; i != 7; i++ {
+		err := coll.Find(nil).One(result)
+		c.Assert(err, Equals, mgo.ErrNotFound)
+	}
+
+	// Collect op counters for everyone again.
+	q21b := s.countQueries(c, "localhost:40021")
+	q22b := s.countQueries(c, "localhost:40022")
+	q23b := s.countQueries(c, "localhost:40023")
+
+	switch hostPort(master) {
+	case "40021":
+		c.Check(q21b-q21a, Equals, 0)
+		c.Check(q22b-q22a, Equals, 5)
+		c.Check(q23b-q23a, Equals, 7)
+	case "40022":
+		c.Check(q21b-q21a, Equals, 5)
+		c.Check(q22b-q22a, Equals, 0)
+		c.Check(q23b-q23a, Equals, 7)
+	case "40023":
+		c.Check(q21b-q21a, Equals, 5)
+		c.Check(q22b-q22a, Equals, 7)
+		c.Check(q23b-q23a, Equals, 0)
+	default:
+		c.Fatal("Uh?")
+	}
+}
+
+func (s *S) TestDoNotFallbackToMonotonic(c *C) {
+	// There was a bug at some point that some functions were
+	// falling back to Monotonic mode. This test ensures all listIndexes
+	// commands go to the primary, as should happen since the session is
+	// in Strong mode.
+	if !s.versionAtLeast(3, 0) {
+		c.Skip("command-counting logic depends on 3.0+")
+	}
+
+	session, err := mgo.Dial("localhost:40012")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	for i := 0; i < 15; i++ {
+		q11a := s.countCommands(c, "localhost:40011", "listIndexes")
+		q12a := s.countCommands(c, "localhost:40012", "listIndexes")
+		q13a := s.countCommands(c, "localhost:40013", "listIndexes")
+
+		_, err := session.DB("local").C("system.indexes").Indexes()
+		c.Assert(err, IsNil)
+
+		q11b := s.countCommands(c, "localhost:40011", "listIndexes")
+		q12b := s.countCommands(c, "localhost:40012", "listIndexes")
+		q13b := s.countCommands(c, "localhost:40013", "listIndexes")
+
+		c.Assert(q11b, Equals, q11a+1)
+		c.Assert(q12b, Equals, q12a)
+		c.Assert(q13b, Equals, q13a)
+	}
+}

+ 196 - 0
src/gopkg.in/mgo.v2/dbtest/dbserver.go

@@ -0,0 +1,196 @@
+package dbtest
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+	"os"
+	"os/exec"
+	"strconv"
+	"time"
+
+	"gopkg.in/mgo.v2"
+	"gopkg.in/tomb.v2"
+)
+
+// DBServer controls a MongoDB server process to be used within test suites.
+//
+// The test server is started when Session is called the first time and should
+// remain running for the duration of all tests, with the Wipe method being
+// called between tests (before each of them) to clear stored data. After all tests
+// are done, the Stop method should be called to stop the test server.
+//
+// Before the DBServer is used the SetPath method must be called to define
+// the location for the database files to be stored.
+type DBServer struct {
+	session *mgo.Session
+	output  bytes.Buffer
+	server  *exec.Cmd
+	dbpath  string
+	host    string
+	tomb    tomb.Tomb
+}
+
+// SetPath defines the path to the directory where the database files will be
+// stored if it is started. The directory path itself is not created or removed
+// by the test helper.
+func (dbs *DBServer) SetPath(dbpath string) {
+	dbs.dbpath = dbpath
+}
+
+func (dbs *DBServer) start() {
+	if dbs.server != nil {
+		panic("DBServer already started")
+	}
+	if dbs.dbpath == "" {
+		panic("DBServer.SetPath must be called before using the server")
+	}
+	mgo.SetStats(true)
+	l, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		panic("unable to listen on a local address: " + err.Error())
+	}
+	addr := l.Addr().(*net.TCPAddr)
+	l.Close()
+	dbs.host = addr.String()
+
+	args := []string{
+		"--dbpath", dbs.dbpath,
+		"--bind_ip", "127.0.0.1",
+		"--port", strconv.Itoa(addr.Port),
+		"--nssize", "1",
+		"--noprealloc",
+		"--smallfiles",
+		"--nojournal",
+	}
+	dbs.tomb = tomb.Tomb{}
+	dbs.server = exec.Command("mongod", args...)
+	dbs.server.Stdout = &dbs.output
+	dbs.server.Stderr = &dbs.output
+	err = dbs.server.Start()
+	if err != nil {
+		panic(err)
+	}
+	dbs.tomb.Go(dbs.monitor)
+	dbs.Wipe()
+}
+
+func (dbs *DBServer) monitor() error {
+	dbs.server.Process.Wait()
+	if dbs.tomb.Alive() {
+		// Present some debugging information.
+		fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n")
+		fmt.Fprintf(os.Stderr, "%s", dbs.output.Bytes())
+		fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n")
+		cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod")
+		cmd.Stdout = os.Stderr
+		cmd.Stderr = os.Stderr
+		cmd.Run()
+		fmt.Fprintf(os.Stderr, "----------------------------------------\n")
+
+		panic("mongod process died unexpectedly")
+	}
+	return nil
+}
+
+// Stop stops the test server process, if it is running.
+//
+// It's okay to call Stop multiple times. After the test server is
+// stopped it cannot be restarted.
+//
+// All database sessions must be closed before or while the Stop method
+// is running. Otherwise Stop will panic after a timeout informing that
+// there is a session leak.
+func (dbs *DBServer) Stop() {
+	if dbs.session != nil {
+		dbs.checkSessions()
+		if dbs.session != nil {
+			dbs.session.Close()
+			dbs.session = nil
+		}
+	}
+	if dbs.server != nil {
+		dbs.tomb.Kill(nil)
+		dbs.server.Process.Signal(os.Interrupt)
+		select {
+		case <-dbs.tomb.Dead():
+		case <-time.After(5 * time.Second):
+			panic("timeout waiting for mongod process to die")
+		}
+		dbs.server = nil
+	}
+}
+
+// Session returns a new session to the server. The returned session
+// must be closed after the test is done with it.
+//
+// The first Session obtained from a DBServer will start it.
+func (dbs *DBServer) Session() *mgo.Session {
+	if dbs.server == nil {
+		dbs.start()
+	}
+	if dbs.session == nil {
+		mgo.ResetStats()
+		var err error
+		dbs.session, err = mgo.Dial(dbs.host + "/test")
+		if err != nil {
+			panic(err)
+		}
+	}
+	return dbs.session.Copy()
+}
+
+// checkSessions ensures all mgo sessions opened were properly closed.
+// For slightly faster tests, it may be disabled setting the
+// environmnet variable CHECK_SESSIONS to 0.
+func (dbs *DBServer) checkSessions() {
+	if check := os.Getenv("CHECK_SESSIONS"); check == "0" || dbs.server == nil || dbs.session == nil {
+		return
+	}
+	dbs.session.Close()
+	dbs.session = nil
+	for i := 0; i < 100; i++ {
+		stats := mgo.GetStats()
+		if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
+			return
+		}
+		time.Sleep(100 * time.Millisecond)
+	}
+	panic("There are mgo sessions still alive.")
+}
+
+// Wipe drops all created databases and their data.
+//
+// The MongoDB server remains running if it was prevoiusly running,
+// or stopped if it was previously stopped.
+//
+// All database sessions must be closed before or while the Wipe method
+// is running. Otherwise Wipe will panic after a timeout informing that
+// there is a session leak.
+func (dbs *DBServer) Wipe() {
+	if dbs.server == nil || dbs.session == nil {
+		return
+	}
+	dbs.checkSessions()
+	sessionUnset := dbs.session == nil
+	session := dbs.Session()
+	defer session.Close()
+	if sessionUnset {
+		dbs.session.Close()
+		dbs.session = nil
+	}
+	names, err := session.DatabaseNames()
+	if err != nil {
+		panic(err)
+	}
+	for _, name := range names {
+		switch name {
+		case "admin", "local", "config":
+		default:
+			err = session.DB(name).DropDatabase()
+			if err != nil {
+				panic(err)
+			}
+		}
+	}
+}

+ 108 - 0
src/gopkg.in/mgo.v2/dbtest/dbserver_test.go

@@ -0,0 +1,108 @@
+package dbtest_test
+
+import (
+	"os"
+	"testing"
+	"time"
+
+	. "gopkg.in/check.v1"
+
+	"gopkg.in/mgo.v2"
+	"gopkg.in/mgo.v2/dbtest"
+)
+
+type M map[string]interface{}
+
+func TestAll(t *testing.T) {
+	TestingT(t)
+}
+
+type S struct {
+	oldCheckSessions string
+}
+
+var _ = Suite(&S{})
+
+func (s *S) SetUpTest(c *C) {
+	s.oldCheckSessions = os.Getenv("CHECK_SESSIONS")
+	os.Setenv("CHECK_SESSIONS", "")
+}
+
+func (s *S) TearDownTest(c *C) {
+	os.Setenv("CHECK_SESSIONS", s.oldCheckSessions)
+}
+
+func (s *S) TestWipeData(c *C) {
+	var server dbtest.DBServer
+	server.SetPath(c.MkDir())
+	defer server.Stop()
+
+	session := server.Session()
+	err := session.DB("mydb").C("mycoll").Insert(M{"a": 1})
+	session.Close()
+	c.Assert(err, IsNil)
+
+	server.Wipe()
+
+	session = server.Session()
+	names, err := session.DatabaseNames()
+	session.Close()
+	c.Assert(err, IsNil)
+	for _, name := range names {
+		if name != "local" && name != "admin" {
+			c.Fatalf("Wipe should have removed this database: %s", name)
+		}
+	}
+}
+
+func (s *S) TestStop(c *C) {
+	var server dbtest.DBServer
+	server.SetPath(c.MkDir())
+	defer server.Stop()
+
+	// Server should not be running.
+	process := server.ProcessTest()
+	c.Assert(process, IsNil)
+
+	session := server.Session()
+	addr := session.LiveServers()[0]
+	session.Close()
+
+	// Server should be running now.
+	process = server.ProcessTest()
+	p, err := os.FindProcess(process.Pid)
+	c.Assert(err, IsNil)
+	p.Release()
+
+	server.Stop()
+
+	// Server should not be running anymore.
+	session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond)
+	if session != nil {
+		session.Close()
+		c.Fatalf("Stop did not stop the server")
+	}
+}
+
+func (s *S) TestCheckSessions(c *C) {
+	var server dbtest.DBServer
+	server.SetPath(c.MkDir())
+	defer server.Stop()
+
+	session := server.Session()
+	defer session.Close()
+	c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.")
+}
+
+func (s *S) TestCheckSessionsDisabled(c *C) {
+	var server dbtest.DBServer
+	server.SetPath(c.MkDir())
+	defer server.Stop()
+
+	os.Setenv("CHECK_SESSIONS", "0")
+
+	// Should not panic, although it looks to Wipe like this session will leak.
+	session := server.Session()
+	defer session.Close()
+	server.Wipe()
+}

+ 12 - 0
src/gopkg.in/mgo.v2/dbtest/export_test.go

@@ -0,0 +1,12 @@
+package dbtest
+
+import (
+	"os"
+)
+
+func (dbs *DBServer) ProcessTest() *os.Process {
+	if dbs.server == nil {
+		return nil
+	}
+	return dbs.server.Process
+}

+ 31 - 0
src/gopkg.in/mgo.v2/doc.go

@@ -0,0 +1,31 @@
+// Package mgo offers a rich MongoDB driver for Go.
+//
+// Details about the mgo project (pronounced as "mango") are found
+// in its web page:
+//
+//     http://labix.org/mgo
+//
+// Usage of the driver revolves around the concept of sessions.  To
+// get started, obtain a session using the Dial function:
+//
+//     session, err := mgo.Dial(url)
+//
+// This will establish one or more connections with the cluster of
+// servers defined by the url parameter.  From then on, the cluster
+// may be queried with multiple consistency rules (see SetMode) and
+// documents retrieved with statements such as:
+//
+//     c := session.DB(database).C(collection)
+//     err := c.Find(query).One(&result)
+//
+// New sessions are typically created by calling session.Copy on the
+// initial session obtained at dial time. These new sessions will share
+// the same cluster information and connection pool, and may be easily
+// handed into other methods and functions for organizing logic.
+// Every session created must have its Close method called at the end
+// of its life time, so its resources may be put back in the pool or
+// collected, depending on the case.
+//
+// For more details, see the documentation for the types and methods.
+//
+package mgo

+ 33 - 0
src/gopkg.in/mgo.v2/export_test.go

@@ -0,0 +1,33 @@
+package mgo
+
+import (
+	"time"
+)
+
+func HackPingDelay(newDelay time.Duration) (restore func()) {
+	globalMutex.Lock()
+	defer globalMutex.Unlock()
+
+	oldDelay := pingDelay
+	restore = func() {
+		globalMutex.Lock()
+		pingDelay = oldDelay
+		globalMutex.Unlock()
+	}
+	pingDelay = newDelay
+	return
+}
+
+func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) {
+	globalMutex.Lock()
+	defer globalMutex.Unlock()
+
+	oldTimeout := syncSocketTimeout
+	restore = func() {
+		globalMutex.Lock()
+		syncSocketTimeout = oldTimeout
+		globalMutex.Unlock()
+	}
+	syncSocketTimeout = newTimeout
+	return
+}

+ 761 - 0
src/gopkg.in/mgo.v2/gridfs.go

@@ -0,0 +1,761 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+	"crypto/md5"
+	"encoding/hex"
+	"errors"
+	"hash"
+	"io"
+	"os"
+	"sync"
+	"time"
+
+	"gopkg.in/mgo.v2/bson"
+)
+
+type GridFS struct {
+	Files  *Collection
+	Chunks *Collection
+}
+
+type gfsFileMode int
+
+const (
+	gfsClosed  gfsFileMode = 0
+	gfsReading gfsFileMode = 1
+	gfsWriting gfsFileMode = 2
+)
+
+type GridFile struct {
+	m    sync.Mutex
+	c    sync.Cond
+	gfs  *GridFS
+	mode gfsFileMode
+	err  error
+
+	chunk  int
+	offset int64
+
+	wpending int
+	wbuf     []byte
+	wsum     hash.Hash
+
+	rbuf   []byte
+	rcache *gfsCachedChunk
+
+	doc gfsFile
+}
+
+type gfsFile struct {
+	Id          interface{} "_id"
+	ChunkSize   int         "chunkSize"
+	UploadDate  time.Time   "uploadDate"
+	Length      int64       ",minsize"
+	MD5         string
+	Filename    string    ",omitempty"
+	ContentType string    "contentType,omitempty"
+	Metadata    *bson.Raw ",omitempty"
+}
+
+type gfsChunk struct {
+	Id      interface{} "_id"
+	FilesId interface{} "files_id"
+	N       int
+	Data    []byte
+}
+
+type gfsCachedChunk struct {
+	wait sync.Mutex
+	n    int
+	data []byte
+	err  error
+}
+
+func newGridFS(db *Database, prefix string) *GridFS {
+	return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
+}
+
+func (gfs *GridFS) newFile() *GridFile {
+	file := &GridFile{gfs: gfs}
+	file.c.L = &file.m
+	//runtime.SetFinalizer(file, finalizeFile)
+	return file
+}
+
+func finalizeFile(file *GridFile) {
+	file.Close()
+}
+
+// Create creates a new file with the provided name in the GridFS.  If the file
+// name already exists, a new version will be inserted with an up-to-date
+// uploadDate that will cause it to be atomically visible to the Open and
+// OpenId methods.  If the file name is not important, an empty name may be
+// provided and the file Id used instead.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// A simple example inserting a new file:
+//
+//     func check(err error) {
+//         if err != nil {
+//             panic(err.String())
+//         }
+//     }
+//     file, err := db.GridFS("fs").Create("myfile.txt")
+//     check(err)
+//     n, err := file.Write([]byte("Hello world!"))
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//     fmt.Printf("%d bytes written\n", n)
+//
+// The io.Writer interface is implemented by *GridFile and may be used to
+// help on the file creation.  For example:
+//
+//     file, err := db.GridFS("fs").Create("myfile.txt")
+//     check(err)
+//     messages, err := os.Open("/var/log/messages")
+//     check(err)
+//     defer messages.Close()
+//     err = io.Copy(file, messages)
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//
+func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
+	file = gfs.newFile()
+	file.mode = gfsWriting
+	file.wsum = md5.New()
+	file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
+	return
+}
+
+// OpenId returns the file with the provided id, for reading.
+// If the file isn't found, err will be set to mgo.ErrNotFound.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// The following example will print the first 8192 bytes from the file:
+//
+//     func check(err error) {
+//         if err != nil {
+//             panic(err.String())
+//         }
+//     }
+//     file, err := db.GridFS("fs").OpenId(objid)
+//     check(err)
+//     b := make([]byte, 8192)
+//     n, err := file.Read(b)
+//     check(err)
+//     fmt.Println(string(b))
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//     fmt.Printf("%d bytes read\n", n)
+//
+// The io.Reader interface is implemented by *GridFile and may be used to
+// deal with it.  As an example, the following snippet will dump the whole
+// file into the standard output:
+//
+//     file, err := db.GridFS("fs").OpenId(objid)
+//     check(err)
+//     err = io.Copy(os.Stdout, file)
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//
+func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
+	var doc gfsFile
+	err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
+	if err != nil {
+		return
+	}
+	file = gfs.newFile()
+	file.mode = gfsReading
+	file.doc = doc
+	return
+}
+
+// Open returns the most recently uploaded file with the provided
+// name, for reading. If the file isn't found, err will be set
+// to mgo.ErrNotFound.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// The following example will print the first 8192 bytes from the file:
+//
+//     file, err := db.GridFS("fs").Open("myfile.txt")
+//     check(err)
+//     b := make([]byte, 8192)
+//     n, err := file.Read(b)
+//     check(err)
+//     fmt.Println(string(b))
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//     fmt.Printf("%d bytes read\n", n)
+//
+// The io.Reader interface is implemented by *GridFile and may be used to
+// deal with it.  As an example, the following snippet will dump the whole
+// file into the standard output:
+//
+//     file, err := db.GridFS("fs").Open("myfile.txt")
+//     check(err)
+//     err = io.Copy(os.Stdout, file)
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//
+func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
+	var doc gfsFile
+	err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
+	if err != nil {
+		return
+	}
+	file = gfs.newFile()
+	file.mode = gfsReading
+	file.doc = doc
+	return
+}
+
+// OpenNext opens the next file from iter for reading, sets *file to it,
+// and returns true on the success case. If no more documents are available
+// on iter or an error occurred, *file is set to nil and the result is false.
+// Errors will be available via iter.Err().
+//
+// The iter parameter must be an iterator on the GridFS files collection.
+// Using the GridFS.Find method is an easy way to obtain such an iterator,
+// but any iterator on the collection will work.
+//
+// If the provided *file is non-nil, OpenNext will close it before attempting
+// to iterate to the next element. This means that in a loop one only
+// has to worry about closing files when breaking out of the loop early
+// (break, return, or panic).
+//
+// For example:
+//
+//     gfs := db.GridFS("fs")
+//     query := gfs.Find(nil).Sort("filename")
+//     iter := query.Iter()
+//     var f *mgo.GridFile
+//     for gfs.OpenNext(iter, &f) {
+//         fmt.Printf("Filename: %s\n", f.Name())
+//     }
+//     if iter.Close() != nil {
+//         panic(iter.Close())
+//     }
+//
+func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
+	if *file != nil {
+		// Ignoring the error here shouldn't be a big deal
+		// as we're reading the file and the loop iteration
+		// for this file is finished.
+		_ = (*file).Close()
+	}
+	var doc gfsFile
+	if !iter.Next(&doc) {
+		*file = nil
+		return false
+	}
+	f := gfs.newFile()
+	f.mode = gfsReading
+	f.doc = doc
+	*file = f
+	return true
+}
+
+// Find runs query on GridFS's files collection and returns
+// the resulting Query.
+//
+// This logic:
+//
+//     gfs := db.GridFS("fs")
+//     iter := gfs.Find(nil).Iter()
+//
+// Is equivalent to:
+//
+//     files := db.C("fs" + ".files")
+//     iter := files.Find(nil).Iter()
+//
+func (gfs *GridFS) Find(query interface{}) *Query {
+	return gfs.Files.Find(query)
+}
+
+// RemoveId deletes the file with the provided id from the GridFS.
+func (gfs *GridFS) RemoveId(id interface{}) error {
+	err := gfs.Files.Remove(bson.M{"_id": id})
+	if err != nil {
+		return err
+	}
+	_, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
+	return err
+}
+
+type gfsDocId struct {
+	Id interface{} "_id"
+}
+
+// Remove deletes all files with the provided name from the GridFS.
+func (gfs *GridFS) Remove(name string) (err error) {
+	iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
+	var doc gfsDocId
+	for iter.Next(&doc) {
+		if e := gfs.RemoveId(doc.Id); e != nil {
+			err = e
+		}
+	}
+	if err == nil {
+		err = iter.Close()
+	}
+	return err
+}
+
+func (file *GridFile) assertMode(mode gfsFileMode) {
+	switch file.mode {
+	case mode:
+		return
+	case gfsWriting:
+		panic("GridFile is open for writing")
+	case gfsReading:
+		panic("GridFile is open for reading")
+	case gfsClosed:
+		panic("GridFile is closed")
+	default:
+		panic("internal error: missing GridFile mode")
+	}
+}
+
+// SetChunkSize sets size of saved chunks.  Once the file is written to, it
+// will be split in blocks of that size and each block saved into an
+// independent chunk document.  The default chunk size is 255kb.
+//
+// It is a runtime error to call this function once the file has started
+// being written to.
+func (file *GridFile) SetChunkSize(bytes int) {
+	file.assertMode(gfsWriting)
+	debugf("GridFile %p: setting chunk size to %d", file, bytes)
+	file.m.Lock()
+	file.doc.ChunkSize = bytes
+	file.m.Unlock()
+}
+
+// Id returns the current file Id.
+func (file *GridFile) Id() interface{} {
+	return file.doc.Id
+}
+
+// SetId changes the current file Id.
+//
+// It is a runtime error to call this function once the file has started
+// being written to, or when the file is not open for writing.
+func (file *GridFile) SetId(id interface{}) {
+	file.assertMode(gfsWriting)
+	file.m.Lock()
+	file.doc.Id = id
+	file.m.Unlock()
+}
+
+// Name returns the optional file name.  An empty string will be returned
+// in case it is unset.
+func (file *GridFile) Name() string {
+	return file.doc.Filename
+}
+
+// SetName changes the optional file name.  An empty string may be used to
+// unset it.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetName(name string) {
+	file.assertMode(gfsWriting)
+	file.m.Lock()
+	file.doc.Filename = name
+	file.m.Unlock()
+}
+
+// ContentType returns the optional file content type.  An empty string will be
+// returned in case it is unset.
+func (file *GridFile) ContentType() string {
+	return file.doc.ContentType
+}
+
+// ContentType changes the optional file content type.  An empty string may be
+// used to unset it.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetContentType(ctype string) {
+	file.assertMode(gfsWriting)
+	file.m.Lock()
+	file.doc.ContentType = ctype
+	file.m.Unlock()
+}
+
+// GetMeta unmarshals the optional "metadata" field associated with the
+// file into the result parameter. The meaning of keys under that field
+// is user-defined. For example:
+//
+//     result := struct{ INode int }{}
+//     err = file.GetMeta(&result)
+//     if err != nil {
+//         panic(err.String())
+//     }
+//     fmt.Printf("inode: %d\n", result.INode)
+//
+func (file *GridFile) GetMeta(result interface{}) (err error) {
+	file.m.Lock()
+	if file.doc.Metadata != nil {
+		err = bson.Unmarshal(file.doc.Metadata.Data, result)
+	}
+	file.m.Unlock()
+	return
+}
+
+// SetMeta changes the optional "metadata" field associated with the
+// file. The meaning of keys under that field is user-defined.
+// For example:
+//
+//     file.SetMeta(bson.M{"inode": inode})
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetMeta(metadata interface{}) {
+	file.assertMode(gfsWriting)
+	data, err := bson.Marshal(metadata)
+	file.m.Lock()
+	if err != nil && file.err == nil {
+		file.err = err
+	} else {
+		file.doc.Metadata = &bson.Raw{Data: data}
+	}
+	file.m.Unlock()
+}
+
+// Size returns the file size in bytes.
+func (file *GridFile) Size() (bytes int64) {
+	file.m.Lock()
+	bytes = file.doc.Length
+	file.m.Unlock()
+	return
+}
+
+// MD5 returns the file MD5 as a hex-encoded string.
+func (file *GridFile) MD5() (md5 string) {
+	return file.doc.MD5
+}
+
+// UploadDate returns the file upload time.
+func (file *GridFile) UploadDate() time.Time {
+	return file.doc.UploadDate
+}
+
+// SetUploadDate changes the file upload time.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetUploadDate(t time.Time) {
+	file.assertMode(gfsWriting)
+	file.m.Lock()
+	file.doc.UploadDate = t
+	file.m.Unlock()
+}
+
+// Close flushes any pending changes in case the file is being written
+// to, waits for any background operations to finish, and closes the file.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+func (file *GridFile) Close() (err error) {
+	file.m.Lock()
+	defer file.m.Unlock()
+	if file.mode == gfsWriting {
+		if len(file.wbuf) > 0 && file.err == nil {
+			file.insertChunk(file.wbuf)
+			file.wbuf = file.wbuf[0:0]
+		}
+		file.completeWrite()
+	} else if file.mode == gfsReading && file.rcache != nil {
+		file.rcache.wait.Lock()
+		file.rcache = nil
+	}
+	file.mode = gfsClosed
+	debugf("GridFile %p: closed", file)
+	return file.err
+}
+
+func (file *GridFile) completeWrite() {
+	for file.wpending > 0 {
+		debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
+		file.c.Wait()
+	}
+	if file.err == nil {
+		hexsum := hex.EncodeToString(file.wsum.Sum(nil))
+		if file.doc.UploadDate.IsZero() {
+			file.doc.UploadDate = bson.Now()
+		}
+		file.doc.MD5 = hexsum
+		file.err = file.gfs.Files.Insert(file.doc)
+	}
+	if file.err != nil {
+		file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
+	}
+	if file.err == nil {
+		index := Index{
+			Key:    []string{"files_id", "n"},
+			Unique: true,
+		}
+		file.err = file.gfs.Chunks.EnsureIndex(index)
+	}
+}
+
+// Abort cancels an in-progress write, preventing the file from being
+// automically created and ensuring previously written chunks are
+// removed when the file is closed.
+//
+// It is a runtime error to call Abort when the file was not opened
+// for writing.
+func (file *GridFile) Abort() {
+	if file.mode != gfsWriting {
+		panic("file.Abort must be called on file opened for writing")
+	}
+	file.err = errors.New("write aborted")
+}
+
+// Write writes the provided data to the file and returns the
+// number of bytes written and an error in case something
+// wrong happened.
+//
+// The file will internally cache the data so that all but the last
+// chunk sent to the database have the size defined by SetChunkSize.
+// This also means that errors may be deferred until a future call
+// to Write or Close.
+//
+// The parameters and behavior of this function turn the file
+// into an io.Writer.
+func (file *GridFile) Write(data []byte) (n int, err error) {
+	file.assertMode(gfsWriting)
+	file.m.Lock()
+	debugf("GridFile %p: writing %d bytes", file, len(data))
+	defer file.m.Unlock()
+
+	if file.err != nil {
+		return 0, file.err
+	}
+
+	n = len(data)
+	file.doc.Length += int64(n)
+	chunkSize := file.doc.ChunkSize
+
+	if len(file.wbuf)+len(data) < chunkSize {
+		file.wbuf = append(file.wbuf, data...)
+		return
+	}
+
+	// First, flush file.wbuf complementing with data.
+	if len(file.wbuf) > 0 {
+		missing := chunkSize - len(file.wbuf)
+		if missing > len(data) {
+			missing = len(data)
+		}
+		file.wbuf = append(file.wbuf, data[:missing]...)
+		data = data[missing:]
+		file.insertChunk(file.wbuf)
+		file.wbuf = file.wbuf[0:0]
+	}
+
+	// Then, flush all chunks from data without copying.
+	for len(data) > chunkSize {
+		size := chunkSize
+		if size > len(data) {
+			size = len(data)
+		}
+		file.insertChunk(data[:size])
+		data = data[size:]
+	}
+
+	// And append the rest for a future call.
+	file.wbuf = append(file.wbuf, data...)
+
+	return n, file.err
+}
+
+func (file *GridFile) insertChunk(data []byte) {
+	n := file.chunk
+	file.chunk++
+	debugf("GridFile %p: adding to checksum: %q", file, string(data))
+	file.wsum.Write(data)
+
+	for file.doc.ChunkSize*file.wpending >= 1024*1024 {
+		// Hold on.. we got a MB pending.
+		file.c.Wait()
+		if file.err != nil {
+			return
+		}
+	}
+
+	file.wpending++
+
+	debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
+
+	// We may not own the memory of data, so rather than
+	// simply copying it, we'll marshal the document ahead of time.
+	data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
+	if err != nil {
+		file.err = err
+		return
+	}
+
+	go func() {
+		err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
+		file.m.Lock()
+		file.wpending--
+		if err != nil && file.err == nil {
+			file.err = err
+		}
+		file.c.Broadcast()
+		file.m.Unlock()
+	}()
+}
+
+// Seek sets the offset for the next Read or Write on file to
+// offset, interpreted according to whence: 0 means relative to
+// the origin of the file, 1 means relative to the current offset,
+// and 2 means relative to the end. It returns the new offset and
+// an error, if any.
+func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
+	file.m.Lock()
+	debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
+	defer file.m.Unlock()
+	switch whence {
+	case os.SEEK_SET:
+	case os.SEEK_CUR:
+		offset += file.offset
+	case os.SEEK_END:
+		offset += file.doc.Length
+	default:
+		panic("unsupported whence value")
+	}
+	if offset > file.doc.Length {
+		return file.offset, errors.New("seek past end of file")
+	}
+	if offset == file.doc.Length {
+		// If we're seeking to the end of the file,
+		// no need to read anything. This enables
+		// a client to find the size of the file using only the
+		// io.ReadSeeker interface with low overhead.
+		file.offset = offset
+		return file.offset, nil
+	}
+	chunk := int(offset / int64(file.doc.ChunkSize))
+	if chunk+1 == file.chunk && offset >= file.offset {
+		file.rbuf = file.rbuf[int(offset-file.offset):]
+		file.offset = offset
+		return file.offset, nil
+	}
+	file.offset = offset
+	file.chunk = chunk
+	file.rbuf = nil
+	file.rbuf, err = file.getChunk()
+	if err == nil {
+		file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
+	}
+	return file.offset, err
+}
+
+// Read reads into b the next available data from the file and
+// returns the number of bytes written and an error in case
+// something wrong happened.  At the end of the file, n will
+// be zero and err will be set to io.EOF.
+//
+// The parameters and behavior of this function turn the file
+// into an io.Reader.
+func (file *GridFile) Read(b []byte) (n int, err error) {
+	file.assertMode(gfsReading)
+	file.m.Lock()
+	debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
+	defer file.m.Unlock()
+	if file.offset == file.doc.Length {
+		return 0, io.EOF
+	}
+	for err == nil {
+		i := copy(b, file.rbuf)
+		n += i
+		file.offset += int64(i)
+		file.rbuf = file.rbuf[i:]
+		if i == len(b) || file.offset == file.doc.Length {
+			break
+		}
+		b = b[i:]
+		file.rbuf, err = file.getChunk()
+	}
+	return n, err
+}
+
+func (file *GridFile) getChunk() (data []byte, err error) {
+	cache := file.rcache
+	file.rcache = nil
+	if cache != nil && cache.n == file.chunk {
+		debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
+		cache.wait.Lock()
+		data, err = cache.data, cache.err
+	} else {
+		debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
+		var doc gfsChunk
+		err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
+		data = doc.Data
+	}
+	file.chunk++
+	if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
+		// Read the next one in background.
+		cache = &gfsCachedChunk{n: file.chunk}
+		cache.wait.Lock()
+		debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
+		// Clone the session to avoid having it closed in between.
+		chunks := file.gfs.Chunks
+		session := chunks.Database.Session.Clone()
+		go func(id interface{}, n int) {
+			defer session.Close()
+			chunks = chunks.With(session)
+			var doc gfsChunk
+			cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
+			cache.data = doc.Data
+			cache.wait.Unlock()
+		}(file.doc.Id, file.chunk)
+		file.rcache = cache
+	}
+	debugf("Returning err: %#v", err)
+	return
+}

+ 708 - 0
src/gopkg.in/mgo.v2/gridfs_test.go

@@ -0,0 +1,708 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+	"io"
+	"os"
+	"time"
+
+	. "gopkg.in/check.v1"
+	"gopkg.in/mgo.v2"
+	"gopkg.in/mgo.v2/bson"
+)
+
+func (s *S) TestGridFSCreate(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	before := bson.Now()
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+
+	n, err := file.Write([]byte("some data"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 9)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	after := bson.Now()
+
+	// Check the file information.
+	result := M{}
+	err = db.C("fs.files").Find(nil).One(result)
+	c.Assert(err, IsNil)
+
+	fileId, ok := result["_id"].(bson.ObjectId)
+	c.Assert(ok, Equals, true)
+	c.Assert(fileId.Valid(), Equals, true)
+	result["_id"] = "<id>"
+
+	ud, ok := result["uploadDate"].(time.Time)
+	c.Assert(ok, Equals, true)
+	c.Assert(ud.After(before) && ud.Before(after), Equals, true)
+	result["uploadDate"] = "<timestamp>"
+
+	expected := M{
+		"_id":        "<id>",
+		"length":     9,
+		"chunkSize":  255 * 1024,
+		"uploadDate": "<timestamp>",
+		"md5":        "1e50210a0202497fb79bc38b6ade6c34",
+	}
+	c.Assert(result, DeepEquals, expected)
+
+	// Check the chunk.
+	result = M{}
+	err = db.C("fs.chunks").Find(nil).One(result)
+	c.Assert(err, IsNil)
+
+	chunkId, ok := result["_id"].(bson.ObjectId)
+	c.Assert(ok, Equals, true)
+	c.Assert(chunkId.Valid(), Equals, true)
+	result["_id"] = "<id>"
+
+	expected = M{
+		"_id":      "<id>",
+		"files_id": fileId,
+		"n":        0,
+		"data":     []byte("some data"),
+	}
+	c.Assert(result, DeepEquals, expected)
+
+	// Check that an index was created.
+	indexes, err := db.C("fs.chunks").Indexes()
+	c.Assert(err, IsNil)
+	c.Assert(len(indexes), Equals, 2)
+	c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"})
+}
+
+func (s *S) TestGridFSFileDetails(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("myfile1.txt")
+	c.Assert(err, IsNil)
+
+	n, err := file.Write([]byte("some"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 4)
+
+	c.Assert(file.Size(), Equals, int64(4))
+
+	n, err = file.Write([]byte(" data"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 5)
+
+	c.Assert(file.Size(), Equals, int64(9))
+
+	id, _ := file.Id().(bson.ObjectId)
+	c.Assert(id.Valid(), Equals, true)
+	c.Assert(file.Name(), Equals, "myfile1.txt")
+	c.Assert(file.ContentType(), Equals, "")
+
+	var info interface{}
+	err = file.GetMeta(&info)
+	c.Assert(err, IsNil)
+	c.Assert(info, IsNil)
+
+	file.SetId("myid")
+	file.SetName("myfile2.txt")
+	file.SetContentType("text/plain")
+	file.SetMeta(M{"any": "thing"})
+
+	c.Assert(file.Id(), Equals, "myid")
+	c.Assert(file.Name(), Equals, "myfile2.txt")
+	c.Assert(file.ContentType(), Equals, "text/plain")
+
+	err = file.GetMeta(&info)
+	c.Assert(err, IsNil)
+	c.Assert(info, DeepEquals, bson.M{"any": "thing"})
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")
+
+	ud := file.UploadDate()
+	now := time.Now()
+	c.Assert(ud.Before(now), Equals, true)
+	c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true)
+
+	result := M{}
+	err = db.C("fs.files").Find(nil).One(result)
+	c.Assert(err, IsNil)
+
+	result["uploadDate"] = "<timestamp>"
+
+	expected := M{
+		"_id":         "myid",
+		"length":      9,
+		"chunkSize":   255 * 1024,
+		"uploadDate":  "<timestamp>",
+		"md5":         "1e50210a0202497fb79bc38b6ade6c34",
+		"filename":    "myfile2.txt",
+		"contentType": "text/plain",
+		"metadata":    M{"any": "thing"},
+	}
+	c.Assert(result, DeepEquals, expected)
+}
+
+func (s *S) TestGridFSSetUploadDate(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+
+	t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local)
+	file.SetUploadDate(t)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	// Check the file information.
+	result := M{}
+	err = db.C("fs.files").Find(nil).One(result)
+	c.Assert(err, IsNil)
+
+	ud := result["uploadDate"].(time.Time)
+	if !ud.Equal(t) {
+		c.Fatalf("want upload date %s, got %s", t, ud)
+	}
+}
+
+func (s *S) TestGridFSCreateWithChunking(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+
+	file.SetChunkSize(5)
+
+	// Smaller than the chunk size.
+	n, err := file.Write([]byte("abc"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 3)
+
+	// Boundary in the middle.
+	n, err = file.Write([]byte("defg"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 4)
+
+	// Boundary at the end.
+	n, err = file.Write([]byte("hij"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 3)
+
+	// Larger than the chunk size, with 3 chunks.
+	n, err = file.Write([]byte("klmnopqrstuv"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 12)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	// Check the file information.
+	result := M{}
+	err = db.C("fs.files").Find(nil).One(result)
+	c.Assert(err, IsNil)
+
+	fileId, _ := result["_id"].(bson.ObjectId)
+	c.Assert(fileId.Valid(), Equals, true)
+	result["_id"] = "<id>"
+	result["uploadDate"] = "<timestamp>"
+
+	expected := M{
+		"_id":        "<id>",
+		"length":     22,
+		"chunkSize":  5,
+		"uploadDate": "<timestamp>",
+		"md5":        "44a66044834cbe55040089cabfc102d5",
+	}
+	c.Assert(result, DeepEquals, expected)
+
+	// Check the chunks.
+	iter := db.C("fs.chunks").Find(nil).Sort("n").Iter()
+	dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}
+	for i := 0; ; i++ {
+		result = M{}
+		if !iter.Next(result) {
+			if i != 5 {
+				c.Fatalf("Expected 5 chunks, got %d", i)
+			}
+			break
+		}
+		c.Assert(iter.Close(), IsNil)
+
+		result["_id"] = "<id>"
+
+		expected = M{
+			"_id":      "<id>",
+			"files_id": fileId,
+			"n":        i,
+			"data":     []byte(dataChunks[i]),
+		}
+		c.Assert(result, DeepEquals, expected)
+	}
+}
+
+func (s *S) TestGridFSAbort(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+
+	file.SetChunkSize(5)
+
+	n, err := file.Write([]byte("some data"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 9)
+
+	var count int
+	for i := 0; i < 10; i++ {
+		count, err = db.C("fs.chunks").Count()
+		if count > 0 || err != nil {
+			break
+		}
+	}
+	c.Assert(err, IsNil)
+	c.Assert(count, Equals, 1)
+
+	file.Abort()
+
+	err = file.Close()
+	c.Assert(err, ErrorMatches, "write aborted")
+
+	count, err = db.C("fs.chunks").Count()
+	c.Assert(err, IsNil)
+	c.Assert(count, Equals, 0)
+}
+
+func (s *S) TestGridFSCloseConflict(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true})
+
+	// For a closing-time conflict
+	err = db.C("fs.files").Insert(M{"filename": "foo.txt"})
+	c.Assert(err, IsNil)
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("foo.txt")
+	c.Assert(err, IsNil)
+
+	_, err = file.Write([]byte("some data"))
+	c.Assert(err, IsNil)
+
+	err = file.Close()
+	c.Assert(mgo.IsDup(err), Equals, true)
+
+	count, err := db.C("fs.chunks").Count()
+	c.Assert(err, IsNil)
+	c.Assert(count, Equals, 0)
+}
+
+func (s *S) TestGridFSOpenNotFound(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.OpenId("non-existent")
+	c.Assert(err == mgo.ErrNotFound, Equals, true)
+	c.Assert(file, IsNil)
+
+	file, err = gfs.Open("non-existent")
+	c.Assert(err == mgo.ErrNotFound, Equals, true)
+	c.Assert(file, IsNil)
+}
+
+func (s *S) TestGridFSReadAll(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+	id := file.Id()
+
+	file.SetChunkSize(5)
+
+	n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 22)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	file, err = gfs.OpenId(id)
+	c.Assert(err, IsNil)
+
+	b := make([]byte, 30)
+	n, err = file.Read(b)
+	c.Assert(n, Equals, 22)
+	c.Assert(err, IsNil)
+
+	n, err = file.Read(b)
+	c.Assert(n, Equals, 0)
+	c.Assert(err == io.EOF, Equals, true)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestGridFSReadChunking(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+
+	id := file.Id()
+
+	file.SetChunkSize(5)
+
+	n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 22)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	file, err = gfs.OpenId(id)
+	c.Assert(err, IsNil)
+
+	b := make([]byte, 30)
+
+	// Smaller than the chunk size.
+	n, err = file.Read(b[:3])
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 3)
+	c.Assert(b[:3], DeepEquals, []byte("abc"))
+
+	// Boundary in the middle.
+	n, err = file.Read(b[:4])
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 4)
+	c.Assert(b[:4], DeepEquals, []byte("defg"))
+
+	// Boundary at the end.
+	n, err = file.Read(b[:3])
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 3)
+	c.Assert(b[:3], DeepEquals, []byte("hij"))
+
+	// Larger than the chunk size, with 3 chunks.
+	n, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 12)
+	c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv"))
+
+	n, err = file.Read(b)
+	c.Assert(n, Equals, 0)
+	c.Assert(err == io.EOF, Equals, true)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestGridFSOpen(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'1'})
+	file.Close()
+
+	file, err = gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'2'})
+	file.Close()
+
+	file, err = gfs.Open("myfile.txt")
+	c.Assert(err, IsNil)
+	defer file.Close()
+
+	var b [1]byte
+
+	_, err = file.Read(b[:])
+	c.Assert(err, IsNil)
+	c.Assert(string(b[:]), Equals, "2")
+}
+
+func (s *S) TestGridFSSeek(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+	id := file.Id()
+
+	file.SetChunkSize(5)
+
+	n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 22)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	b := make([]byte, 5)
+
+	file, err = gfs.OpenId(id)
+	c.Assert(err, IsNil)
+
+	o, err := file.Seek(3, os.SEEK_SET)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(3))
+	_, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(b, DeepEquals, []byte("defgh"))
+
+	o, err = file.Seek(5, os.SEEK_CUR)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(13))
+	_, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(b, DeepEquals, []byte("nopqr"))
+
+	o, err = file.Seek(0, os.SEEK_END)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(22))
+	n, err = file.Read(b)
+	c.Assert(err, Equals, io.EOF)
+	c.Assert(n, Equals, 0)
+
+	o, err = file.Seek(-10, os.SEEK_END)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(12))
+	_, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(b, DeepEquals, []byte("mnopq"))
+
+	o, err = file.Seek(8, os.SEEK_SET)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(8))
+	_, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(b, DeepEquals, []byte("ijklm"))
+
+	// Trivial seek forward within same chunk. Already
+	// got the data, shouldn't touch the database.
+	sent := mgo.GetStats().SentOps
+	o, err = file.Seek(1, os.SEEK_CUR)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(14))
+	c.Assert(mgo.GetStats().SentOps, Equals, sent)
+	_, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(b, DeepEquals, []byte("opqrs"))
+
+	// Try seeking past end of file.
+	file.Seek(3, os.SEEK_SET)
+	o, err = file.Seek(23, os.SEEK_SET)
+	c.Assert(err, ErrorMatches, "seek past end of file")
+	c.Assert(o, Equals, int64(3))
+}
+
+func (s *S) TestGridFSRemoveId(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'1'})
+	file.Close()
+
+	file, err = gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'2'})
+	id := file.Id()
+	file.Close()
+
+	err = gfs.RemoveId(id)
+	c.Assert(err, IsNil)
+
+	file, err = gfs.Open("myfile.txt")
+	c.Assert(err, IsNil)
+	defer file.Close()
+
+	var b [1]byte
+
+	_, err = file.Read(b[:])
+	c.Assert(err, IsNil)
+	c.Assert(string(b[:]), Equals, "1")
+
+	n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 0)
+}
+
+func (s *S) TestGridFSRemove(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'1'})
+	file.Close()
+
+	file, err = gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'2'})
+	file.Close()
+
+	err = gfs.Remove("myfile.txt")
+	c.Assert(err, IsNil)
+
+	_, err = gfs.Open("myfile.txt")
+	c.Assert(err == mgo.ErrNotFound, Equals, true)
+
+	n, err := db.C("fs.chunks").Find(nil).Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 0)
+}
+
+func (s *S) TestGridFSOpenNext(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("myfile1.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'1'})
+	file.Close()
+
+	file, err = gfs.Create("myfile2.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'2'})
+	file.Close()
+
+	var f *mgo.GridFile
+	var b [1]byte
+
+	iter := gfs.Find(nil).Sort("-filename").Iter()
+
+	ok := gfs.OpenNext(iter, &f)
+	c.Assert(ok, Equals, true)
+	c.Check(f.Name(), Equals, "myfile2.txt")
+
+	_, err = f.Read(b[:])
+	c.Assert(err, IsNil)
+	c.Assert(string(b[:]), Equals, "2")
+
+	ok = gfs.OpenNext(iter, &f)
+	c.Assert(ok, Equals, true)
+	c.Check(f.Name(), Equals, "myfile1.txt")
+
+	_, err = f.Read(b[:])
+	c.Assert(err, IsNil)
+	c.Assert(string(b[:]), Equals, "1")
+
+	ok = gfs.OpenNext(iter, &f)
+	c.Assert(ok, Equals, false)
+	c.Assert(iter.Close(), IsNil)
+	c.Assert(f, IsNil)
+
+	// Do it again with a more restrictive query to make sure
+	// it's actually taken into account.
+	iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter()
+
+	ok = gfs.OpenNext(iter, &f)
+	c.Assert(ok, Equals, true)
+	c.Check(f.Name(), Equals, "myfile1.txt")
+
+	ok = gfs.OpenNext(iter, &f)
+	c.Assert(ok, Equals, false)
+	c.Assert(iter.Close(), IsNil)
+	c.Assert(f, IsNil)
+}

+ 20 - 0
src/gopkg.in/mgo.v2/harness/certs/client.crt

@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV
+BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl
+cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw
+OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH
+DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls
+b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H
+4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ
+616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I
+AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd
+7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO
+Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx
+l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5
+CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW
+DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47
+PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR
+OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI
+/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r
+z3A=
+-----END CERTIFICATE-----

+ 27 - 0
src/gopkg.in/mgo.v2/harness/certs/client.key

@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7
+wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ
+r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ
+Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI
+KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5
+Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu
+La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq
+KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv
+bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f
+Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA
+Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp
+QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo
+DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl
+QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F
+Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ
++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F
+jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB
+K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy
+HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP
+Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E
+xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB
+28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z
+ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ
+4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo
+I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk=
+-----END RSA PRIVATE KEY-----

+ 57 - 0
src/gopkg.in/mgo.v2/harness/certs/client.pem

@@ -0,0 +1,57 @@
+To regenerate the key:
+
+   openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key
+   cat server.key server.crt > server.pem
+   openssl genrsa -out client.key 2048
+   openssl req -key client.key -new -out client.req
+   openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt
+   cat client.key client.crt > client.pem
+
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7
+wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ
+r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ
+Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI
+KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5
+Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu
+La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq
+KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv
+bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f
+Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA
+Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp
+QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo
+DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl
+QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F
+Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ
++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F
+jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB
+K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy
+HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP
+Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E
+xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB
+28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z
+ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ
+4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo
+I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk=
+-----END RSA PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV
+BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl
+cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw
+OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH
+DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls
+b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H
+4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ
+616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I
+AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd
+7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO
+Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx
+l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5
+CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW
+DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47
+PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR
+OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI
+/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r
+z3A=
+-----END CERTIFICATE-----
+

+ 17 - 0
src/gopkg.in/mgo.v2/harness/certs/client.req

@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICoTCCAYkCAQAwXDELMAkGA1UEBhMCR08xDDAKBgNVBAgMA01HTzEMMAoGA1UE
+BwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBkNsaWVudDESMBAGA1UEAwwJ
+bG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtFIkIZk/
+h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7wQidZwLul+cyDfPRDzzo3za4
+GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJr4f/tItg0riOEBbLslQDzNTt
+CAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJQ6DYEQgCa2BTIWq0Uw3WO20M
+3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AIKBhAZwa7vND0RaRYqpO9kyZF
+zh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5Hx+ftNTXnl/69TnxG44BP8M8
+8ZfDWlpzwpsTXwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKbOFblIscxlXalV
+sEGNm2oz380RN2QoLhN6nKtAiv0jWm6iKhdAhOIQIeaRPhUP3cyi8bcBvLdMeQ3d
+ZYIByB55/R0VSP1vs4qkXJCQegHcpMpyuIzsMV8p3Q4lxzGKyKtPA6Bb5c49p8Sk
+ncD+LL4ymrMEia4cBPsHL9hhFOm4gqDacbU8+ETLTpuoSvUZiw7OwngqhE2r+kMv
+KDweq5TOPeb+ftKzQKrrfB+XVdBoTKYw6CwARpogbc0/7mvottVcJ/0yAgC1fBbM
+vupkohkXwKfjxKl6nKNL3R2GkzHQOh91hglAx5zyybKQn2YMM328Vk4X6csBg+pg
+tb1s0MA=
+-----END CERTIFICATE REQUEST-----

+ 22 - 0
src/gopkg.in/mgo.v2/harness/certs/server.crt

@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
+BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP
+MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw
+ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM
+A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl
+cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm
+6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK
+IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5
+GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji
+fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP
+JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd
+OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu
+2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG
+TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw
+nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s
+UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C
+W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL
+yQ==
+-----END CERTIFICATE-----

+ 28 - 0
src/gopkg.in/mgo.v2/harness/certs/server.key

@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB
+Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk
+mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi
+xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb
+YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R
+ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs
+uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9
+wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu
+MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi
+wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby
+yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk
+eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3
+ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC
+tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB
+xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6
+MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9
+Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3
+IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q
+Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl
+QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z
+GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do
+4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1
+ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7
+1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt
+9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk
+SruEA1+5bfBRMW0P+h7Qfe4=
+-----END PRIVATE KEY-----

+ 50 - 0
src/gopkg.in/mgo.v2/harness/certs/server.pem

@@ -0,0 +1,50 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB
+Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk
+mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi
+xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb
+YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R
+ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs
+uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9
+wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu
+MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi
+wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby
+yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk
+eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3
+ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC
+tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB
+xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6
+MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9
+Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3
+IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q
+Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl
+QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z
+GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do
+4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1
+ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7
+1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt
+9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk
+SruEA1+5bfBRMW0P+h7Qfe4=
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
+BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP
+MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw
+ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM
+A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl
+cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm
+6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK
+IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5
+GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji
+fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP
+JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd
+OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu
+2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG
+TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw
+nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s
+UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C
+W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL
+yQ==
+-----END CERTIFICATE-----

+ 57 - 0
src/gopkg.in/mgo.v2/harness/daemons/.env

@@ -0,0 +1,57 @@
+
+set -e
+
+MONGOVERSION=$(mongod --version | sed -n 's/.*v\([0-9]\+\.[0-9]\+\)\..*/\1/p')
+MONGOMAJOR=$(echo $MONGOVERSION | sed 's/\([0-9]\+\)\..*/\1/')
+MONGOMINOR=$(echo $MONGOVERSION | sed 's/[0-9]\+\.\([0-9]\+\)/\1/')
+
+versionAtLeast() {
+	TESTMAJOR="$1"
+	TESTMINOR="$2"
+	if [ "$MONGOMAJOR" -gt "$TESTMAJOR" ]; then
+		return 0
+	fi
+	if [ "$MONGOMAJOR" -lt "$TESTMAJOR" ]; then
+		return 100
+	fi
+	if [ "$MONGOMINOR" -ge "$TESTMINOR" ]; then
+		return 0
+	fi
+	return 100
+}
+
+COMMONDOPTSNOIP="
+	--nohttpinterface
+	--noprealloc
+	--nojournal
+	--smallfiles
+	--nssize=1
+	--oplogSize=1
+	--dbpath ./db
+	"
+COMMONDOPTS="
+	$COMMONDOPTSNOIP
+	--bind_ip=127.0.0.1
+	"
+COMMONCOPTS="
+	$COMMONDOPTS
+	"
+COMMONSOPTS="
+	--chunkSize 1
+	--bind_ip=127.0.0.1
+	"
+
+if versionAtLeast 3 2; then
+	# 3.2 doesn't like --nojournal on config servers.
+	#COMMONCOPTS="$(echo "$COMMONCOPTS" | sed '/--nojournal/d')"
+	# Using a hacked version of MongoDB 3.2 for now.
+
+	# Go back to MMAPv1 so it's not super sluggish. :-(
+	COMMONDOPTSNOIP="--storageEngine=mmapv1 $COMMONDOPTSNOIP"
+	COMMONDOPTS="--storageEngine=mmapv1 $COMMONDOPTS"
+	COMMONCOPTS="--storageEngine=mmapv1 $COMMONCOPTS"
+fi
+
+if [ "$TRAVIS" = true ]; then
+	set -x
+fi

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/.empty


二進制
src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/journal/tempLatencyTest


+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/mongod.lock


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
src/gopkg.in/mgo.v2/harness/daemons/cfg1/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONCOPTS \
+	--port 40101 \
+	--configsvr
+

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/cfg2/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
src/gopkg.in/mgo.v2/harness/daemons/cfg2/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONCOPTS \
+	--port 40102 \
+	--configsvr
+

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/cfg3/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 9 - 0
src/gopkg.in/mgo.v2/harness/daemons/cfg3/run

@@ -0,0 +1,9 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONCOPTS \
+	--port 40103 \
+	--configsvr \
+	--auth \
+	--keyFile=../../certs/keyfile

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/db1/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/db1/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 15 - 0
src/gopkg.in/mgo.v2/harness/daemons/db1/run

@@ -0,0 +1,15 @@
+#!/bin/sh
+
+. ../.env
+
+if [ x$NOIPV6 = x1 ]; then
+	BINDIP="127.0.0.1"
+else
+	BINDIP="127.0.0.1,::1"
+fi
+
+exec mongod $COMMONDOPTSNOIP \
+	--shardsvr \
+	--bind_ip=$BINDIP \
+	--port 40001 \
+	--ipv6

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/db2/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/db2/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
src/gopkg.in/mgo.v2/harness/daemons/db2/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--port 40002 \
+	--auth

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/db3/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/db3/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 12 - 0
src/gopkg.in/mgo.v2/harness/daemons/db3/run

@@ -0,0 +1,12 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--port 40003 \
+	--auth \
+	--sslMode preferSSL \
+	--sslCAFile ../../certs/server.pem \
+	--sslPEMKeyFile ../../certs/server.pem
+

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs1a/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs1a/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--replSet rs1 \
+	--port 40011

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs1b/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs1b/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--replSet rs1 \
+	--port 40012

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs1c/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs1c/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--replSet rs1 \
+	--port 40013

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs2a/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs2a/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--replSet rs2 \
+	--port 40021

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs2b/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs2b/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--replSet rs2 \
+	--port 40022

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs2c/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs2c/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--replSet rs2 \
+	--port 40023

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs3a/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 9 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs3a/run

@@ -0,0 +1,9 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--replSet rs3 \
+	--port 40031 \
+	--keyFile=../../certs/keyfile

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs3b/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 9 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs3b/run

@@ -0,0 +1,9 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--replSet rs3 \
+	--port 40032 \
+	--keyFile=../../certs/keyfile

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs3c/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 9 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs3c/run

@@ -0,0 +1,9 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--replSet rs3 \
+	--port 40033 \
+	--keyFile=../../certs/keyfile

+ 0 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs4a/db/.empty


+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
src/gopkg.in/mgo.v2/harness/daemons/rs4a/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--replSet rs4 \
+	--port 40041

+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/s1/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 7 - 0
src/gopkg.in/mgo.v2/harness/daemons/s1/run

@@ -0,0 +1,7 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongos $COMMONSOPTS \
+	--port 40201 \
+	--configdb 127.0.0.1:40101

+ 3 - 0
src/gopkg.in/mgo.v2/harness/daemons/s2/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

部分文件因文件數量過多而無法顯示