浏览代码

三方包-备份

zhengkun 3 年之前
父节点
当前提交
287d7fb144
共有 100 个文件被更改,包括 13770 次插入5 次删除
  1. 5 5
      sensitive/src/config.json
  2. 26 0
      sensitive/src/elastic.v1/.gitignore
  3. 21 0
      sensitive/src/elastic.v1/.travis.yml
  4. 27 0
      sensitive/src/elastic.v1/CONTRIBUTING.md
  5. 20 0
      sensitive/src/elastic.v1/LICENSE
  6. 503 0
      sensitive/src/elastic.v1/README.md
  7. 137 0
      sensitive/src/elastic.v1/alias.go
  8. 123 0
      sensitive/src/elastic.v1/alias_test.go
  9. 193 0
      sensitive/src/elastic.v1/aliases.go
  10. 146 0
      sensitive/src/elastic.v1/aliases_test.go
  11. 336 0
      sensitive/src/elastic.v1/bulk.go
  12. 108 0
      sensitive/src/elastic.v1/bulk_delete_request.go
  13. 42 0
      sensitive/src/elastic.v1/bulk_delete_request_test.go
  14. 173 0
      sensitive/src/elastic.v1/bulk_index_request.go
  15. 63 0
      sensitive/src/elastic.v1/bulk_index_request_test.go
  16. 13 0
      sensitive/src/elastic.v1/bulk_request.go
  17. 361 0
      sensitive/src/elastic.v1/bulk_test.go
  18. 240 0
      sensitive/src/elastic.v1/bulk_update_request.go
  19. 79 0
      sensitive/src/elastic.v1/bulk_update_request_test.go
  20. 358 0
      sensitive/src/elastic.v1/client.go
  21. 89 0
      sensitive/src/elastic.v1/client_test.go
  22. 209 0
      sensitive/src/elastic.v1/cluster_health.go
  23. 68 0
      sensitive/src/elastic.v1/cluster_health_test.go
  24. 217 0
      sensitive/src/elastic.v1/cluster_state.go
  25. 78 0
      sensitive/src/elastic.v1/cluster_state_test.go
  26. 181 0
      sensitive/src/elastic.v1/count.go
  27. 85 0
      sensitive/src/elastic.v1/count_test.go
  28. 96 0
      sensitive/src/elastic.v1/create_index.go
  29. 145 0
      sensitive/src/elastic.v1/delete.go
  30. 320 0
      sensitive/src/elastic.v1/delete_by_query.go
  31. 76 0
      sensitive/src/elastic.v1/delete_by_query_test.go
  32. 66 0
      sensitive/src/elastic.v1/delete_index.go
  33. 142 0
      sensitive/src/elastic.v1/delete_template.go
  34. 83 0
      sensitive/src/elastic.v1/delete_test.go
  35. 55 0
      sensitive/src/elastic.v1/doc.go
  36. 39 0
      sensitive/src/elastic.v1/errors.go
  37. 461 0
      sensitive/src/elastic.v1/example_test.go
  38. 78 0
      sensitive/src/elastic.v1/exists.go
  39. 70 0
      sensitive/src/elastic.v1/fetch_source_context.go
  40. 88 0
      sensitive/src/elastic.v1/fetch_source_context_test.go
  41. 9 0
      sensitive/src/elastic.v1/filter.go
  42. 122 0
      sensitive/src/elastic.v1/flush.go
  43. 22 0
      sensitive/src/elastic.v1/flush_test.go
  44. 43 0
      sensitive/src/elastic.v1/geo_point.go
  45. 20 0
      sensitive/src/elastic.v1/geo_point_test.go
  46. 204 0
      sensitive/src/elastic.v1/get.go
  47. 138 0
      sensitive/src/elastic.v1/get_template.go
  48. 137 0
      sensitive/src/elastic.v1/get_test.go
  49. 492 0
      sensitive/src/elastic.v1/highlight.go
  50. 169 0
      sensitive/src/elastic.v1/highlight_test.go
  51. 244 0
      sensitive/src/elastic.v1/index.go
  52. 170 0
      sensitive/src/elastic.v1/index_close.go
  53. 57 0
      sensitive/src/elastic.v1/index_exists.go
  54. 171 0
      sensitive/src/elastic.v1/index_open.go
  55. 387 0
      sensitive/src/elastic.v1/index_test.go
  56. 206 0
      sensitive/src/elastic.v1/multi_get.go
  57. 95 0
      sensitive/src/elastic.v1/multi_get_test.go
  58. 130 0
      sensitive/src/elastic.v1/multi_search.go
  59. 195 0
      sensitive/src/elastic.v1/multi_search_test.go
  60. 162 0
      sensitive/src/elastic.v1/optimize.go
  61. 49 0
      sensitive/src/elastic.v1/optimize_test.go
  62. 131 0
      sensitive/src/elastic.v1/ping.go
  63. 73 0
      sensitive/src/elastic.v1/ping_test.go
  64. 175 0
      sensitive/src/elastic.v1/put_template.go
  65. 14 0
      sensitive/src/elastic.v1/query.go
  66. 126 0
      sensitive/src/elastic.v1/refresh.go
  67. 49 0
      sensitive/src/elastic.v1/refresh_test.go
  68. 60 0
      sensitive/src/elastic.v1/request.go
  69. 36 0
      sensitive/src/elastic.v1/rescore.go
  70. 55 0
      sensitive/src/elastic.v1/rescorer.go
  71. 326 0
      sensitive/src/elastic.v1/scan.go
  72. 185 0
      sensitive/src/elastic.v1/scan_test.go
  73. 273 0
      sensitive/src/elastic.v1/scroll.go
  74. 110 0
      sensitive/src/elastic.v1/scroll_test.go
  75. 513 0
      sensitive/src/elastic.v1/search.go
  76. 761 0
      sensitive/src/elastic.v1/search_aggs.go
  77. 100 0
      sensitive/src/elastic.v1/search_aggs_avg.go
  78. 32 0
      sensitive/src/elastic.v1/search_aggs_avg_test.go
  79. 119 0
      sensitive/src/elastic.v1/search_aggs_cardinality.go
  80. 45 0
      sensitive/src/elastic.v1/search_aggs_cardinality_test.go
  81. 57 0
      sensitive/src/elastic.v1/search_aggs_children.go
  82. 34 0
      sensitive/src/elastic.v1/search_aggs_children_test.go
  83. 294 0
      sensitive/src/elastic.v1/search_aggs_date_histogram.go
  84. 19 0
      sensitive/src/elastic.v1/search_aggs_date_histogram_test.go
  85. 234 0
      sensitive/src/elastic.v1/search_aggs_date_range.go
  86. 102 0
      sensitive/src/elastic.v1/search_aggs_date_range_test.go
  87. 99 0
      sensitive/src/elastic.v1/search_aggs_extended_stats.go
  88. 32 0
      sensitive/src/elastic.v1/search_aggs_extended_stats_test.go
  89. 58 0
      sensitive/src/elastic.v1/search_aggs_filter.go
  90. 36 0
      sensitive/src/elastic.v1/search_aggs_filter_test.go
  91. 76 0
      sensitive/src/elastic.v1/search_aggs_filters.go
  92. 37 0
      sensitive/src/elastic.v1/search_aggs_filters_test.go
  93. 95 0
      sensitive/src/elastic.v1/search_aggs_geo_bounds.go
  94. 32 0
      sensitive/src/elastic.v1/search_aggs_geo_bounds_test.go
  95. 180 0
      sensitive/src/elastic.v1/search_aggs_geo_distance.go
  96. 38 0
      sensitive/src/elastic.v1/search_aggs_geo_distance_test.go
  97. 56 0
      sensitive/src/elastic.v1/search_aggs_global.go
  98. 19 0
      sensitive/src/elastic.v1/search_aggs_global_test.go
  99. 228 0
      sensitive/src/elastic.v1/search_aggs_histogram.go
  100. 19 0
      sensitive/src/elastic.v1/search_aggs_histogram_test.go

+ 5 - 5
sensitive/src/config.json

@@ -1,9 +1,9 @@
 {
-  "udpport": ":1782",
+  "udpport": ":1762",
   "save_mgodb": {
     "addr": "127.0.0.1:27017",
-    "db": "zhengkun",
-    "coll": "bidding",
+    "db": "qfw",
+    "coll": "result_20210109",
     "pool": 10
   },
   "qfw_mgodb": {
@@ -11,8 +11,8 @@
     "db": "mixdata",
     "coll": "qyxy_std",
     "pool": 10,
-    "username": "",
-    "password": ""
+    "username": "zhengkun",
+    "password": "zk@123123"
   },
   "fields": {
     "buyer": 1,

+ 26 - 0
sensitive/src/elastic.v1/.gitignore

@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+/cluster-test
+/generator
+/tmp

+ 21 - 0
sensitive/src/elastic.v1/.travis.yml

@@ -0,0 +1,21 @@
+sudo: false
+
+language: go
+
+go:
+  - 1.3
+  - 1.4
+  - tip
+
+env:
+  matrix:
+    - ES_VERSION=1.3.9
+    - ES_VERSION=1.4.2
+    - ES_VERSION=1.4.4
+
+before_script:
+  - mkdir ${HOME}/elasticsearch
+  - wget http://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-${ES_VERSION}.tar.gz
+  - tar -xzf elasticsearch-${ES_VERSION}.tar.gz -C ${HOME}/elasticsearch
+  - ${HOME}/elasticsearch/elasticsearch-${ES_VERSION}/bin/elasticsearch >& /dev/null &
+  - sleep 15

+ 27 - 0
sensitive/src/elastic.v1/CONTRIBUTING.md

@@ -0,0 +1,27 @@
+# How to contribute
+
+Elastic is an open-source project and we are looking forward to each
+contribution.
+
+## Your Pull Request
+
+To make it easy to review and understand your changes, please keep the
+following things in mind before submitting your pull request:
+
+* Work on the latest possible state of `olivere/elastic`.
+* Create a branch dedicated to your change.
+* If possible, write a test case which confirms your change.
+* Make sure your changes and your tests work with all recent versions of
+  Elasticsearch. At the moment, we're targeting the current and the previous
+  release, e.g. the 1.4 and the 1.3 branch.
+* Test your changes before creating a pull request (`go test ./...`).
+* Don't mix several features or bug fixes in one pull request.
+* Create a meaningful commit message.
+* Explain your change, e.g. provide a link to the issue you are fixing and
+  probably a link to the Elasticsearch documentation and/or source code.
+* Format your source with `go fmt`.
+
+## Additional Resources
+
+* [GitHub documentation](http://help.github.com/)
+* [GitHub pull request documentation](http://help.github.com/send-pull-requests/)

+ 20 - 0
sensitive/src/elastic.v1/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+Copyright © 2012-2014 Oliver Eilhard
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the “Software”), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.

+ 503 - 0
sensitive/src/elastic.v1/README.md

@@ -0,0 +1,503 @@
+# Elastic
+
+Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for [Go](http://www.golang.org/).
+
+[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v1)](https://travis-ci.org/olivere/elastic)
+[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v1)
+[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
+
+
+## Releases
+
+**Notice**: This is version 1.0 of Elastic. There is a newer version
+available on [https://github.com/olivere/elastic](https://github.com/olivere/elastic).
+I encourage anyone to use the newest version.
+
+However, if you want to continue using the 1.0 version, you need to go-get
+a new URL and switch your import path. We're using [gopkg.in](http://gokpg.in/) for that.
+Here's how to use Elastic version 1:
+
+```sh
+$ go get -u gopkg.in/olivere/elastic.v1
+```
+
+In your Go code:
+
+```go
+import "gopkg.in/olivere/elastic.v1"
+```
+
+If you instead use `github.com/olivere/elastic` in your code base, you are
+following master. I try to keep master stable, but things might
+break now and then.
+
+## Status
+
+We use Elastic in production for more than two years now.
+Although Elastic is quite stable from our experience, we don't have
+a stable API yet. The reason for this is that Elasticsearch changes quite
+often and at a fast pace. At this moment we focus on features, not on a
+stable API. Having said that, there have been no huge changes for the last
+12 months that required you to rewrite your application big time.
+More often than not it's renaming APIs and adding/removing features
+so that we are in sync with the Elasticsearch API.
+
+Elastic supports and has been tested in production with
+the following Elasticsearch versions: 0.90, 1.0, 1.1, 1.2, 1.3, and 1.4.
+
+Elasticsearch has quite a few features. A lot of them are
+not yet implemented in Elastic (see below for details).
+I add features and APIs as required. It's straightforward
+to implement missing pieces. I'm accepting pull requests :-)
+
+Having said that, I hope you find the project useful. Fork it
+as you like.
+
+## Usage
+
+The first thing you do is to create a Client. The client takes a http.Client
+and (optionally) a list of URLs to the Elasticsearch servers as arguments.
+If the list of URLs is empty, http://localhost:9200 is used by default.
+You typically create one client for your app.
+
+```go
+client, err := elastic.NewClient(http.DefaultClient)
+if err != nil {
+    // Handle error
+}
+```
+
+Notice that you can pass your own http.Client implementation here. You can
+also pass more than one URL to a client. Elastic pings the URLs periodically
+and takes the first to succeed. By doing this periodically, Elastic provides
+automatic failover, e.g. when an Elasticsearch server goes down during
+updates.
+
+If no Elasticsearch server is available, services will fail when creating
+a new request and will return `ErrNoClient`. While this method is not very
+sophisticated and might result in timeouts, it is robust enough for our
+use cases. Pull requests are welcome.
+
+```go
+client, err := elastic.NewClient(http.DefaultClient, "http://1.2.3.4:9200", "http://1.2.3.5:9200")
+if err != nil {
+    // Handle error
+}
+```
+
+A Client provides services. The services usually come with a variety of
+methods to prepare the query and a `Do` function to execute it against the
+Elasticsearch REST interface and return a response. Here is an example
+of the IndexExists service that checks if a given index already exists.
+
+```go
+exists, err := client.IndexExists("twitter").Do()
+if err != nil {
+    // Handle error
+}
+if !exists {
+    // Index does not exist yet.
+}
+```
+
+Look up the documentation for Client to get an idea of the services provided
+and what kinds of responses you get when executing the `Do` function of a service.
+
+Here's a longer example:
+
+```go
+// Import Elastic
+import (
+  "github.com/olivere/elastic"
+)
+
+// Obtain a client. You can provide your own HTTP client here.
+client, err := elastic.NewClient(http.DefaultClient)
+if err != nil {
+    // Handle error
+    panic(err)
+}
+
+// Ping the Elasticsearch server to get e.g. the version number
+info, code, err := client.Ping().Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number)
+
+// Getting the ES version number is quite common, so there's a shortcut
+esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200")
+if err != nil {
+    // Handle error
+    panic(err)
+}
+fmt.Printf("Elasticsearch version %s", esversion)
+
+// Use the IndexExists service to check if a specified index exists.
+exists, err := client.IndexExists("twitter").Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+if !exists {
+    // Create a new index.
+    createIndex, err := client.CreateIndex("twitter").Do()
+    if err != nil {
+        // Handle error
+        panic(err)
+    }
+    if !createIndex.Acknowledged {
+        // Not acknowledged
+    }
+}
+
+// Index a tweet (using JSON serialization)
+tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
+put1, err := client.Index().
+    Index("twitter").
+    Type("tweet").
+    Id("1").
+    BodyJson(tweet1).
+    Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
+
+// Index a second tweet (by string)
+tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
+put2, err := client.Index().
+    Index("twitter").
+    Type("tweet").
+    Id("2").
+    BodyString(tweet2).
+    Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type)
+
+// Get tweet with specified ID
+get1, err := client.Get().
+    Index("twitter").
+    Type("tweet").
+    Id("1").
+    Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+if get1.Found {
+    fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+}
+
+// Flush to make sure the documents got written.
+_, err = client.Flush().Index("twitter").Do()
+if err != nil {
+    panic(err)
+}
+
+// Search with a term query
+termQuery := elastic.NewTermQuery("user", "olivere")
+searchResult, err := client.Search().
+    Index("twitter").   // search in index "twitter"
+    Query(&termQuery).  // specify the query
+    Sort("user", true). // sort by "user" field, ascending
+    From(0).Size(10).   // take documents 0-9
+    Debug(true).        // print request and response to stdout
+    Pretty(true).       // pretty print request and response JSON
+    Do()                // execute
+if err != nil {
+    // Handle error
+    panic(err)
+}
+
+// searchResult is of type SearchResult and returns hits, suggestions,
+// and all kinds of other information from Elasticsearch.
+fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+// Number of hits
+if searchResult.Hits != nil {
+    fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+    // Iterate through results
+    for _, hit := range searchResult.Hits.Hits {
+        // hit.Index contains the name of the index
+
+        // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+        var t Tweet
+        err := json.Unmarshal(*hit.Source, &t)
+        if err != nil {
+            // Deserialization failed
+        }
+
+        // Work with tweet
+        fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+    }
+} else {
+    // No hits
+    fmt.Print("Found no tweets\n")
+}
+
+
+// Update a tweet by the update API of Elasticsearch.
+// We just increment the number of retweets.
+update, err := client.Update().Index("twitter").Type("tweet").Id("1").
+    Script("ctx._source.retweets += num").
+    ScriptParams(map[string]interface{}{"num": 1}).
+    Upsert(map[string]interface{}{"retweets": 0}).
+    Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version)
+
+// ...
+
+// Delete an index.
+deleteIndex, err := client.DeleteIndex("twitter").Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+if !deleteIndex.Acknowledged {
+    // Not acknowledged
+}
+```
+
+## Installation
+
+Grab the code with `go get github.com/olivere/elastic`.
+
+## API Status
+
+Here's the current API status.
+
+### APIs
+
+- [x] Search (most queries, filters, facets, aggregations etc. are implemented: see below)
+- [x] Index
+- [x] Get
+- [x] Delete
+- [x] Delete By Query
+- [x] Update
+- [x] Multi Get
+- [x] Bulk
+- [ ] Bulk UDP
+- [ ] Term vectors
+- [ ] Multi term vectors
+- [x] Count
+- [ ] Validate
+- [ ] Explain
+- [x] Search
+- [ ] Search shards
+- [x] Search template
+- [x] Facets (most are implemented, see below)
+- [x] Aggregates (most are implemented, see below)
+- [x] Multi Search
+- [ ] Percolate
+- [ ] More like this
+- [ ] Benchmark
+
+### Indices
+
+- [x] Create index
+- [x] Delete index
+- [x] Indices exists
+- [x] Open/close index
+- [ ] Put mapping
+- [ ] Get mapping
+- [ ] Get field mapping
+- [ ] Types exist
+- [ ] Delete mapping
+- [x] Index aliases
+- [ ] Update indices settings
+- [ ] Get settings
+- [ ] Analyze
+- [ ] Index templates
+- [ ] Warmers
+- [ ] Status
+- [ ] Indices stats
+- [ ] Indices segments
+- [ ] Indices recovery
+- [ ] Clear cache
+- [x] Flush
+- [x] Refresh
+- [x] Optimize
+
+### Snapshot and Restore
+
+- [ ] Snapshot
+- [ ] Restore
+- [ ] Snapshot status
+- [ ] Monitoring snapshot/restore progress
+- [ ] Partial restore
+
+### Cat APIs
+
+Not implemented. Those are better suited for operating with Elasticsearch
+on the command line.
+
+### Cluster
+
+- [x] Health
+- [x] State
+- [ ] Stats
+- [ ] Pending cluster tasks
+- [ ] Cluster reroute
+- [ ] Cluster update settings
+- [ ] Nodes stats
+- [ ] Nodes info
+- [ ] Nodes hot_threads
+- [ ] Nodes shutdown
+
+### Query DSL
+
+#### Queries
+
+- [x] `match`
+- [x] `multi_match`
+- [x] `bool`
+- [x] `boosting`
+- [ ] `common_terms`
+- [ ] `constant_score`
+- [x] `dis_max`
+- [x] `filtered`
+- [x] `fuzzy_like_this_query` (`flt`)
+- [x] `fuzzy_like_this_field_query` (`flt_field`)
+- [x] `function_score`
+- [x] `fuzzy`
+- [ ] `geo_shape`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `match_all`
+- [x] `mlt`
+- [x] `mlt_field`
+- [x] `nested`
+- [x] `prefix`
+- [x] `query_string`
+- [x] `simple_query_string`
+- [x] `range`
+- [x] `regexp`
+- [ ] `span_first`
+- [ ] `span_multi_term`
+- [ ] `span_near`
+- [ ] `span_not`
+- [ ] `span_or`
+- [ ] `span_term`
+- [x] `term`
+- [x] `terms`
+- [ ] `top_children`
+- [x] `wildcard`
+- [ ] `minimum_should_match`
+- [ ] `multi_term_query_rewrite`
+- [x] `template_query`
+
+#### Filters
+
+- [x] `and`
+- [x] `bool`
+- [x] `exists`
+- [ ] `geo_bounding_box`
+- [ ] `geo_distance`
+- [ ] `geo_distance_range`
+- [x] `geo_polygon`
+- [ ] `geoshape`
+- [ ] `geohash`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `limit`
+- [x] `match_all`
+- [x] `missing`
+- [x] `nested`
+- [x] `not`
+- [x] `or`
+- [x] `prefix`
+- [x] `query`
+- [x] `range`
+- [x] `regexp`
+- [ ] `script`
+- [x] `term`
+- [x] `terms`
+- [x] `type`
+
+### Facets
+
+- [x] Terms
+- [x] Range
+- [x] Histogram
+- [x] Date Histogram
+- [x] Filter
+- [x] Query
+- [x] Statistical
+- [x] Terms Stats
+- [x] Geo Distance
+
+### Aggregations
+
+- [x] min
+- [x] max
+- [x] sum
+- [x] avg
+- [x] stats
+- [x] extended stats
+- [x] value count
+- [x] percentiles
+- [x] percentile ranks
+- [x] cardinality
+- [x] geo bounds
+- [x] top hits
+- [ ] scripted metric
+- [x] global
+- [x] filter
+- [x] filters
+- [x] missing
+- [x] nested
+- [x] reverse nested
+- [x] children
+- [x] terms
+- [x] significant terms
+- [x] range
+- [x] date range
+- [x] ipv4 range
+- [x] histogram
+- [x] date histogram
+- [x] geo distance
+- [x] geohash grid
+
+### Sorting
+
+- [x] Sort by score
+- [x] Sort by field
+- [x] Sort by geo distance
+- [x] Sort by script
+
+### Scan
+
+Scrolling through documents (e.g. `search_type=scan`) are implemented via
+the `Scroll` and `Scan` services.
+
+## How to contribute
+
+Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
+
+## Credits
+
+Thanks a lot for the great folks working hard on
+[Elasticsearch](http://www.elasticsearch.org/)
+and
+[Go](http://www.golang.org/).
+
+## LICENSE
+
+MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
+or the LICENSE file provided in the repository for details.
+

+ 137 - 0
sensitive/src/elastic.v1/alias.go

@@ -0,0 +1,137 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+)
+
+type AliasService struct {
+	client  *Client
+	actions []aliasAction
+	pretty  bool
+	debug   bool
+}
+
+type aliasAction struct {
+	// "add" or "remove"
+	Type string
+	// Index name
+	Index string
+	// Alias name
+	Alias string
+	// Filter
+	Filter *Filter
+}
+
+func NewAliasService(client *Client) *AliasService {
+	builder := &AliasService{
+		client:  client,
+		actions: make([]aliasAction, 0),
+	}
+	return builder
+}
+
+func (s *AliasService) Pretty(pretty bool) *AliasService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *AliasService) Debug(debug bool) *AliasService {
+	s.debug = debug
+	return s
+}
+
+func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
+	action := aliasAction{Type: "add", Index: indexName, Alias: aliasName}
+	s.actions = append(s.actions, action)
+	return s
+}
+
+func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter *Filter) *AliasService {
+	action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter}
+	s.actions = append(s.actions, action)
+	return s
+}
+
+func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
+	action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName}
+	s.actions = append(s.actions, action)
+	return s
+}
+
+func (s *AliasService) Do() (*AliasResult, error) {
+	// Build url
+	urls := "/_aliases"
+
+	// Set up a new request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Actions
+	body := make(map[string]interface{})
+	actionsJson := make([]interface{}, 0)
+
+	for _, action := range s.actions {
+		actionJson := make(map[string]interface{})
+		detailsJson := make(map[string]interface{})
+		detailsJson["index"] = action.Index
+		detailsJson["alias"] = action.Alias
+		if action.Filter != nil {
+			detailsJson["filter"] = (*action.Filter).Source()
+		}
+		actionJson[action.Type] = detailsJson
+		actionsJson = append(actionsJson, actionJson)
+	}
+
+	body["actions"] = actionsJson
+
+	// Set body
+	req.SetBodyJson(body)
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	ret := new(AliasResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasResult struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 123 - 0
sensitive/src/elastic.v1/alias_test.go

@@ -0,0 +1,123 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+const (
+	testAliasName = "elastic-test-alias"
+)
+
+func TestAliasLifecycle(t *testing.T) {
+	var err error
+
+	client := setupTestClientAndCreateIndex(t)
+
+	// Some tweets
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
+	tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+	// Add tweets to first index
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Add tweets to second index
+	_, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Flush
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Flush().Index(testIndexName2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	/*
+		// Alias should not yet exist
+		aliasesResult1, err := client.Aliases().Do()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if len(aliasesResult1.Indices) != 0 {
+			t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 0, len(aliasesResult1.Indices))
+		}
+	*/
+
+	// Add both indices to a new alias
+	aliasCreate, err := client.Alias().
+		Add(testIndexName, testAliasName).
+		Add(testIndexName2, testAliasName).
+		//Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !aliasCreate.Acknowledged {
+		t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
+	}
+
+	// Search should return all 3 tweets
+	matchAll := NewMatchAllQuery()
+	searchResult1, err := client.Search().Index(testAliasName).Query(&matchAll).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if searchResult1.Hits == nil {
+		t.Errorf("expected SearchResult.Hits != nil; got nil")
+	}
+	if searchResult1.Hits.TotalHits != 3 {
+		t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult1.Hits.TotalHits)
+	}
+
+	/*
+		// Alias should return both indices
+		aliasesResult2, err := client.Aliases().Do()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if len(aliasesResult2.Indices) != 2 {
+			t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices))
+		}
+	*/
+
+	// Remove first index should remove two tweets, so should only yield 1
+	aliasRemove1, err := client.Alias().
+		Remove(testIndexName, testAliasName).
+		//Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !aliasRemove1.Acknowledged {
+		t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
+	}
+
+	searchResult2, err := client.Search().Index(testAliasName).Query(&matchAll).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if searchResult2.Hits == nil {
+		t.Errorf("expected SearchResult.Hits != nil; got nil")
+	}
+	if searchResult2.Hits.TotalHits != 1 {
+		t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult2.Hits.TotalHits)
+	}
+
+}

+ 193 - 0
sensitive/src/elastic.v1/aliases.go

@@ -0,0 +1,193 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type AliasesService struct {
+	client  *Client
+	indices []string
+	pretty  bool
+	debug   bool
+}
+
+func NewAliasesService(client *Client) *AliasesService {
+	builder := &AliasesService{
+		client:  client,
+		indices: make([]string, 0),
+	}
+	return builder
+}
+
+func (s *AliasesService) Pretty(pretty bool) *AliasesService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *AliasesService) Debug(debug bool) *AliasesService {
+	s.debug = debug
+	return s
+}
+
+func (s *AliasesService) Index(indexName string) *AliasesService {
+	s.indices = append(s.indices, indexName)
+	return s
+}
+
+func (s *AliasesService) Indices(indexNames ...string) *AliasesService {
+	s.indices = append(s.indices, indexNames...)
+	return s
+}
+
+func (s *AliasesService) Do() (*AliasesResult, error) {
+	var err error
+
+	// Build url
+	urls := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err = uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	urls += strings.Join(indexPart, ",")
+
+	// TODO Types part
+
+	// Search
+	urls += "/_aliases"
+
+	// Set up a new request
+	req, err := s.client.NewRequest("GET", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	// {
+	//   "indexName" : {
+	//     "aliases" : {
+	//       "alias1" : { },
+	//       "alias2" : { }
+	//     }
+	//   },
+	//   "indexName2" : {
+	//     ...
+	//   },
+	// }
+	bodyBytes, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, err
+	}
+	indexMap := make(map[string]interface{})
+	if err := json.Unmarshal(bodyBytes, &indexMap); err != nil {
+		return nil, err
+	}
+
+	// Each (indexName, _)
+	ret := &AliasesResult{
+		Indices: make(map[string]indexResult),
+	}
+	for indexName, indexData := range indexMap {
+		indexOut, found := ret.Indices[indexName]
+		if !found {
+			indexOut = indexResult{Aliases: make([]aliasResult, 0)}
+		}
+
+		// { "aliases" : { ... } }
+		indexDataMap, ok := indexData.(map[string]interface{})
+		if ok {
+			aliasesData, ok := indexDataMap["aliases"].(map[string]interface{})
+			if ok {
+				for aliasName, _ := range aliasesData {
+					aliasRes := aliasResult{AliasName: aliasName}
+					indexOut.Aliases = append(indexOut.Aliases, aliasRes)
+				}
+			}
+		}
+
+		ret.Indices[indexName] = indexOut
+	}
+
+	return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasesResult struct {
+	Indices map[string]indexResult
+}
+
+type indexResult struct {
+	Aliases []aliasResult
+}
+
+type aliasResult struct {
+	AliasName string
+}
+
+func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
+	indices := make([]string, 0)
+
+	for indexName, indexInfo := range ar.Indices {
+		for _, aliasInfo := range indexInfo.Aliases {
+			if aliasInfo.AliasName == aliasName {
+				indices = append(indices, indexName)
+			}
+		}
+	}
+
+	return indices
+}
+
+func (ir indexResult) HasAlias(aliasName string) bool {
+	for _, alias := range ir.Aliases {
+		if alias.AliasName == aliasName {
+			return true
+		}
+	}
+	return false
+}

+ 146 - 0
sensitive/src/elastic.v1/aliases_test.go

@@ -0,0 +1,146 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestAliases(t *testing.T) {
+	var err error
+
+	client := setupTestClientAndCreateIndex(t)
+
+	// Some tweets
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
+	tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+	// Add tweets to first index
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Add tweets to second index
+	_, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Flush
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Flush().Index(testIndexName2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Alias should not yet exist
+	aliasesResult1, err := client.Aliases().
+		Indices(testIndexName, testIndexName2).
+		//Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(aliasesResult1.Indices) != 2 {
+		t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices))
+	}
+	for indexName, indexDetails := range aliasesResult1.Indices {
+		if len(indexDetails.Aliases) != 0 {
+			t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
+		}
+	}
+
+	// Add both indices to a new alias
+	aliasCreate, err := client.Alias().
+		Add(testIndexName, testAliasName).
+		Add(testIndexName2, testAliasName).
+		//Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !aliasCreate.Acknowledged {
+		t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
+	}
+
+	// Alias should now exist
+	aliasesResult2, err := client.Aliases().
+		Indices(testIndexName, testIndexName2).
+		//Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(aliasesResult2.Indices) != 2 {
+		t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices))
+	}
+	for indexName, indexDetails := range aliasesResult2.Indices {
+		if len(indexDetails.Aliases) != 1 {
+			t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
+		}
+	}
+
+	// Check the reverse function:
+	indexInfo1, found := aliasesResult2.Indices[testIndexName]
+	if !found {
+		t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
+	}
+	aliasFound := indexInfo1.HasAlias(testAliasName)
+	if !aliasFound {
+		t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound)
+	}
+
+	// Check the reverse function:
+	indexInfo2, found := aliasesResult2.Indices[testIndexName2]
+	if !found {
+		t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
+	}
+	aliasFound = indexInfo2.HasAlias(testAliasName)
+	if !aliasFound {
+		t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound)
+	}
+
+	// Remove first index should remove two tweets, so should only yield 1
+	aliasRemove1, err := client.Alias().
+		Remove(testIndexName, testAliasName).
+		//Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !aliasRemove1.Acknowledged {
+		t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
+	}
+
+	// Alias should now exist only for index 2
+	aliasesResult3, err := client.Aliases().Indices(testIndexName, testIndexName2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(aliasesResult3.Indices) != 2 {
+		t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices))
+	}
+	for indexName, indexDetails := range aliasesResult3.Indices {
+		if indexName == testIndexName {
+			if len(indexDetails.Aliases) != 0 {
+				t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
+			}
+		} else if indexName == testIndexName2 {
+			if len(indexDetails.Aliases) != 1 {
+				t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
+			}
+		} else {
+			t.Errorf("got index %s", indexName)
+		}
+	}
+}

+ 336 - 0
sensitive/src/elastic.v1/bulk.go

@@ -0,0 +1,336 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type BulkService struct {
+	client *Client
+
+	index    string
+	_type    string
+	requests []BulkableRequest
+	//replicationType string
+	//consistencyLevel string
+	timeout      string
+	refresh      *bool
+	pretty       bool
+	debug        bool
+	debugOnError bool
+}
+
+func NewBulkService(client *Client) *BulkService {
+	builder := &BulkService{
+		client:       client,
+		requests:     make([]BulkableRequest, 0),
+		pretty:       false,
+		debug:        false,
+		debugOnError: false,
+	}
+	return builder
+}
+
+func (s *BulkService) reset() {
+	s.requests = make([]BulkableRequest, 0)
+}
+
+func (s *BulkService) Index(index string) *BulkService {
+	s.index = index
+	return s
+}
+
+func (s *BulkService) Type(_type string) *BulkService {
+	s._type = _type
+	return s
+}
+
+func (s *BulkService) Timeout(timeout string) *BulkService {
+	s.timeout = timeout
+	return s
+}
+
+func (s *BulkService) Refresh(refresh bool) *BulkService {
+	s.refresh = &refresh
+	return s
+}
+
+func (s *BulkService) Pretty(pretty bool) *BulkService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *BulkService) Debug(debug bool) *BulkService {
+	s.debug = debug
+	return s
+}
+
+func (s *BulkService) DebugOnError(debug bool) *BulkService {
+	s.debugOnError = debug
+	return s
+}
+
+func (s *BulkService) Add(r BulkableRequest) *BulkService {
+	s.requests = append(s.requests, r)
+	return s
+}
+
+func (s *BulkService) NumberOfActions() int {
+	return len(s.requests)
+}
+
+func (s *BulkService) bodyAsString() (string, error) {
+	buf := bytes.NewBufferString("")
+
+	for _, req := range s.requests {
+		source, err := req.Source()
+		if err != nil {
+			return "", err
+		}
+		for _, line := range source {
+			_, err := buf.WriteString(fmt.Sprintf("%s\n", line))
+			if err != nil {
+				return "", nil
+			}
+		}
+	}
+
+	return buf.String(), nil
+}
+
+func (s *BulkService) Do() (*BulkResponse, error) {
+	// No actions?
+	if s.NumberOfActions() == 0 {
+		return nil, errors.New("elastic: No bulk actions to commit")
+	}
+
+	// Get body
+	body, err := s.bodyAsString()
+	if err != nil {
+		return nil, err
+	}
+
+	// Build url
+	urls := "/"
+	if s.index != "" {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": s.index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		urls += index + "/"
+	}
+	if s._type != "" {
+		typ, err := uritemplates.Expand("{type}", map[string]string{
+			"type": s._type,
+		})
+		if err != nil {
+			return nil, err
+		}
+		urls += typ + "/"
+	}
+	urls += "_bulk"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.refresh != nil {
+		params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body
+	req.SetBodyString(body)
+
+	// Debug
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		if s.debugOnError {
+			s.client.dumpRequest((*http.Request)(req))
+			s.client.dumpResponse(res)
+		}
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		if s.debugOnError {
+			s.client.dumpRequest((*http.Request)(req))
+			s.client.dumpResponse(res)
+		}
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Debug
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	ret := new(BulkResponse)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		if s.debugOnError {
+			s.client.dumpResponse(res)
+		}
+		return nil, err
+	}
+
+	// Reset so the request can be reused
+	s.reset()
+
+	return ret, nil
+}
+
+// BulkResponse is a response to a bulk execution.
+//
+// Example:
+// {
+//   "took":3,
+//   "errors":false,
+//   "items":[{
+//     "index":{
+//       "_index":"index1",
+//       "_type":"tweet",
+//       "_id":"1",
+//       "_version":3,
+//       "status":201
+//     }
+//   },{
+//     "index":{
+//       "_index":"index2",
+//       "_type":"tweet",
+//       "_id":"2",
+//       "_version":3,
+//       "status":200
+//     }
+//   },{
+//     "delete":{
+//       "_index":"index1",
+//       "_type":"tweet",
+//       "_id":"1",
+//       "_version":4,
+//       "status":200,
+//       "found":true
+//     }
+//   },{
+//     "update":{
+//       "_index":"index2",
+//       "_type":"tweet",
+//       "_id":"2",
+//       "_version":4,
+//       "status":200
+//     }
+//   }]
+// }
+type BulkResponse struct {
+	Took   int                            `json:"took,omitempty"`
+	Errors bool                           `json:"errors,omitempty"`
+	Items  []map[string]*BulkResponseItem `json:"items,omitempty"`
+}
+
+// BulkResponseItem is the result of a single bulk request.
+type BulkResponseItem struct {
+	Index   string `json:"_index,omitempty"`
+	Type    string `json:"_type,omitempty"`
+	Id      string `json:"_id,omitempty"`
+	Version int    `json:"_version,omitempty"`
+	Status  int    `json:"status,omitempty"`
+	Found   bool   `json:"found,omitempty"`
+	Error   string `json:"error,omitempty"`
+}
+
+// Indexed returns all bulk request results of "index" actions.
+func (r *BulkResponse) Indexed() []*BulkResponseItem {
+	return r.ByAction("index")
+}
+
+// Created returns all bulk request results of "create" actions.
+func (r *BulkResponse) Created() []*BulkResponseItem {
+	return r.ByAction("create")
+}
+
+// Updated returns all bulk request results of "update" actions.
+func (r *BulkResponse) Updated() []*BulkResponseItem {
+	return r.ByAction("update")
+}
+
+// Deleted returns all bulk request results of "delete" actions.
+func (r *BulkResponse) Deleted() []*BulkResponseItem {
+	return r.ByAction("delete")
+}
+
+// ByAction returns all bulk request results of a certain action,
+// e.g. "index" or "delete".
+func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	items := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		if result, found := item[action]; found {
+			items = append(items, result)
+		}
+	}
+	return items
+}
+
+// ById returns all bulk request results of a given document id,
+// regardless of the action ("index", "delete" etc.).
+func (r *BulkResponse) ById(id string) []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	items := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		for _, result := range item {
+			if result.Id == id {
+				items = append(items, result)
+			}
+		}
+	}
+	return items
+}
+
+// Failed returns those items of a bulk response that have errors,
+// i.e. those that don't have a status code between 200 and 299.
+func (r *BulkResponse) Failed() []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	errors := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		for _, result := range item {
+			if !(result.Status >= 200 && result.Status <= 299) {
+				errors = append(errors, result)
+			}
+		}
+	}
+	return errors
+}

+ 108 - 0
sensitive/src/elastic.v1/bulk_delete_request.go

@@ -0,0 +1,108 @@
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// -- Bulk delete request --
+
+// Bulk request to remove document from Elasticsearch.
+type BulkDeleteRequest struct {
+	BulkableRequest
+	index       string
+	typ         string
+	id          string
+	routing     string
+	refresh     *bool
+	version     int64  // default is MATCH_ANY
+	versionType string // default is "internal"
+}
+
+func NewBulkDeleteRequest() *BulkDeleteRequest {
+	return &BulkDeleteRequest{}
+}
+
+func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
+	r.index = index
+	return r
+}
+
+func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
+	r.typ = typ
+	return r
+}
+
+func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
+	r.id = id
+	return r
+}
+
+func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
+	r.routing = routing
+	return r
+}
+
+func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest {
+	r.refresh = &refresh
+	return r
+}
+
+func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
+	r.version = version
+	return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
+	r.versionType = versionType
+	return r
+}
+
+func (r *BulkDeleteRequest) String() string {
+	lines, err := r.Source()
+	if err == nil {
+		return strings.Join(lines, "\n")
+	}
+	return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkDeleteRequest) Source() ([]string, error) {
+	lines := make([]string, 1)
+
+	source := make(map[string]interface{})
+	deleteCommand := make(map[string]interface{})
+	if r.index != "" {
+		deleteCommand["_index"] = r.index
+	}
+	if r.typ != "" {
+		deleteCommand["_type"] = r.typ
+	}
+	if r.id != "" {
+		deleteCommand["_id"] = r.id
+	}
+	if r.routing != "" {
+		deleteCommand["_routing"] = r.routing
+	}
+	if r.version > 0 {
+		deleteCommand["_version"] = r.version
+	}
+	if r.versionType != "" {
+		deleteCommand["_version_type"] = r.versionType
+	}
+	if r.refresh != nil {
+		deleteCommand["refresh"] = *r.refresh
+	}
+	source["delete"] = deleteCommand
+
+	body, err := json.Marshal(source)
+	if err != nil {
+		return nil, err
+	}
+
+	lines[0] = string(body)
+
+	return lines, nil
+}

+ 42 - 0
sensitive/src/elastic.v1/bulk_delete_request_test.go

@@ -0,0 +1,42 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestBulkDeleteRequestSerialization(t *testing.T) {
+	tests := []struct {
+		Request  BulkableRequest
+		Expected []string
+	}{
+		// #0
+		{
+			Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"),
+			Expected: []string{
+				`{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+			},
+		},
+	}
+
+	for i, test := range tests {
+		lines, err := test.Request.Source()
+		if err != nil {
+			t.Fatalf("case #%d: expected no error, got: %v", i, err)
+		}
+		if lines == nil {
+			t.Fatalf("case #%d: expected lines, got nil", i)
+		}
+		if len(lines) != len(test.Expected) {
+			t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+		}
+		for j, line := range lines {
+			if line != test.Expected[j] {
+				t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+			}
+		}
+	}
+}

+ 173 - 0
sensitive/src/elastic.v1/bulk_index_request.go

@@ -0,0 +1,173 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// Bulk request to add document to Elasticsearch.
+type BulkIndexRequest struct {
+	BulkableRequest
+	index       string
+	typ         string
+	id          string
+	opType      string
+	routing     string
+	parent      string
+	timestamp   string
+	ttl         int64
+	refresh     *bool
+	version     int64  // default is MATCH_ANY
+	versionType string // default is "internal"
+	doc         interface{}
+}
+
+func NewBulkIndexRequest() *BulkIndexRequest {
+	return &BulkIndexRequest{
+		opType: "index",
+	}
+}
+
+func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
+	r.index = index
+	return r
+}
+
+func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
+	r.typ = typ
+	return r
+}
+
+func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
+	r.id = id
+	return r
+}
+
+func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
+	r.opType = opType
+	return r
+}
+
+func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
+	r.routing = routing
+	return r
+}
+
+func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
+	r.parent = parent
+	return r
+}
+
+func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest {
+	r.timestamp = timestamp
+	return r
+}
+
+func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest {
+	r.ttl = ttl
+	return r
+}
+
+func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest {
+	r.refresh = &refresh
+	return r
+}
+
+func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
+	r.version = version
+	return r
+}
+
+func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
+	r.versionType = versionType
+	return r
+}
+
+func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
+	r.doc = doc
+	return r
+}
+
+func (r *BulkIndexRequest) String() string {
+	lines, err := r.Source()
+	if err == nil {
+		return strings.Join(lines, "\n")
+	}
+	return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkIndexRequest) Source() ([]string, error) {
+	// { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
+	// { "field1" : "value1" }
+
+	lines := make([]string, 2)
+
+	// "index" ...
+	command := make(map[string]interface{})
+	indexCommand := make(map[string]interface{})
+	if r.index != "" {
+		indexCommand["_index"] = r.index
+	}
+	if r.typ != "" {
+		indexCommand["_type"] = r.typ
+	}
+	if r.id != "" {
+		indexCommand["_id"] = r.id
+	}
+	if r.routing != "" {
+		indexCommand["_routing"] = r.routing
+	}
+	if r.parent != "" {
+		indexCommand["_parent"] = r.parent
+	}
+	if r.timestamp != "" {
+		indexCommand["_timestamp"] = r.timestamp
+	}
+	if r.ttl > 0 {
+		indexCommand["_ttl"] = r.ttl
+	}
+	if r.version > 0 {
+		indexCommand["_version"] = r.version
+	}
+	if r.versionType != "" {
+		indexCommand["_version_type"] = r.versionType
+	}
+	if r.refresh != nil {
+		indexCommand["refresh"] = *r.refresh
+	}
+	command[r.opType] = indexCommand
+	line, err := json.Marshal(command)
+	if err != nil {
+		return nil, err
+	}
+	lines[0] = string(line)
+
+	// "field1" ...
+	if r.doc != nil {
+		switch t := r.doc.(type) {
+		default:
+			body, err := json.Marshal(r.doc)
+			if err != nil {
+				return nil, err
+			}
+			lines[1] = string(body)
+		case json.RawMessage:
+			lines[1] = string(t)
+		case *json.RawMessage:
+			lines[1] = string(*t)
+		case string:
+			lines[1] = t
+		case *string:
+			lines[1] = *t
+		}
+	} else {
+		lines[1] = "{}"
+	}
+
+	return lines, nil
+}

+ 63 - 0
sensitive/src/elastic.v1/bulk_index_request_test.go

@@ -0,0 +1,63 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+	"time"
+)
+
+func TestBulkIndexRequestSerialization(t *testing.T) {
+	tests := []struct {
+		Request  BulkableRequest
+		Expected []string
+	}{
+		// #0
+		{
+			Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1").
+				Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+			Expected: []string{
+				`{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+				`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+			},
+		},
+		// #1
+		{
+			Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1").
+				Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+			Expected: []string{
+				`{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+				`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+			},
+		},
+		// #2
+		{
+			Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").
+				Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+			Expected: []string{
+				`{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+				`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+			},
+		},
+	}
+
+	for i, test := range tests {
+		lines, err := test.Request.Source()
+		if err != nil {
+			t.Fatalf("case #%d: expected no error, got: %v", i, err)
+		}
+		if lines == nil {
+			t.Fatalf("case #%d: expected lines, got nil", i)
+		}
+		if len(lines) != len(test.Expected) {
+			t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+		}
+		for j, line := range lines {
+			if line != test.Expected[j] {
+				t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+			}
+		}
+	}
+}

+ 13 - 0
sensitive/src/elastic.v1/bulk_request.go

@@ -0,0 +1,13 @@
+package elastic
+
+import (
+	"fmt"
+)
+
+// -- Bulkable request (index/update/delete) --
+
+// Generic interface to bulkable requests.
+type BulkableRequest interface {
+	fmt.Stringer
+	Source() ([]string, error)
+}

+ 361 - 0
sensitive/src/elastic.v1/bulk_test.go

@@ -0,0 +1,361 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestBulk(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+	index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
+	index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+	delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+
+	bulkRequest := client.Bulk() //.Debug(true)
+	bulkRequest = bulkRequest.Add(index1Req)
+	bulkRequest = bulkRequest.Add(index2Req)
+	bulkRequest = bulkRequest.Add(delete1Req)
+
+	if bulkRequest.NumberOfActions() != 3 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
+	}
+
+	bulkResponse, err := bulkRequest.Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bulkResponse == nil {
+		t.Errorf("expected bulkResponse to be != nil; got nil")
+	}
+
+	if bulkRequest.NumberOfActions() != 0 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
+	}
+
+	// Document with Id="1" should not exist
+	exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if exists {
+		t.Errorf("expected exists %v; got %v", false, exists)
+	}
+
+	// Document with Id="2" should exist
+	exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !exists {
+		t.Errorf("expected exists %v; got %v", true, exists)
+	}
+
+	// Update
+	updateDoc := struct {
+		Retweets int `json:"retweets"`
+	}{
+		42,
+	}
+	update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").Doc(&updateDoc)
+	bulkRequest = client.Bulk() // .Debug(true)
+	bulkRequest = bulkRequest.Add(update1Req)
+
+	if bulkRequest.NumberOfActions() != 1 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions())
+	}
+
+	bulkResponse, err = bulkRequest.Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bulkResponse == nil {
+		t.Errorf("expected bulkResponse to be != nil; got nil")
+	}
+
+	if bulkRequest.NumberOfActions() != 0 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
+	}
+
+	// Document with Id="1" should have a retweets count of 42
+	doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if doc == nil {
+		t.Fatal("expected doc to be != nil; got nil")
+	}
+	if !doc.Found {
+		t.Fatalf("expected doc to be found; got found = %v", doc.Found)
+	}
+	if doc.Source == nil {
+		t.Fatal("expected doc source to be != nil; got nil")
+	}
+	var updatedTweet tweet
+	err = json.Unmarshal(*doc.Source, &updatedTweet)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if updatedTweet.Retweets != 42 {
+		t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets)
+	}
+}
+
+func TestBulkWithIndexSetOnClient(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+	index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
+	index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+	delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+
+	bulkRequest := client.Bulk().Index(testIndexName).Type("tweet") //.Debug(true)
+	bulkRequest = bulkRequest.Add(index1Req)
+	bulkRequest = bulkRequest.Add(index2Req)
+	bulkRequest = bulkRequest.Add(delete1Req)
+
+	if bulkRequest.NumberOfActions() != 3 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
+	}
+
+	bulkResponse, err := bulkRequest.Do() // .Debug(true).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bulkResponse == nil {
+		t.Errorf("expected bulkResponse to be != nil; got nil")
+	}
+
+	// Document with Id="1" should not exist
+	exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if exists {
+		t.Errorf("expected exists %v; got %v", false, exists)
+	}
+
+	// Document with Id="2" should exist
+	exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !exists {
+		t.Errorf("expected exists %v; got %v", true, exists)
+	}
+}
+
+func TestBulkRequestsSerialization(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+	index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
+	index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+	delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+	update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").
+		Doc(struct {
+		Retweets int `json:"retweets"`
+	}{
+		Retweets: 42,
+	})
+
+	bulkRequest := client.Bulk() //.Debug(true)
+	bulkRequest = bulkRequest.Add(index1Req)
+	bulkRequest = bulkRequest.Add(index2Req)
+	bulkRequest = bulkRequest.Add(delete1Req)
+	bulkRequest = bulkRequest.Add(update2Req)
+
+	if bulkRequest.NumberOfActions() != 4 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
+	}
+
+	expected := `{"index":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
+{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"}
+{"create":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
+{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"}
+{"delete":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
+{"update":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
+{"doc":{"retweets":42}}
+`
+	got, err := bulkRequest.bodyAsString()
+	if err != nil {
+		t.Fatalf("expected no error, got: %v", err)
+	}
+	if got != expected {
+		t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+	}
+
+	// Run the bulk request
+	bulkResponse, err := bulkRequest.Do() // .Debug(true).Pretty(true).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bulkResponse == nil {
+		t.Errorf("expected bulkResponse to be != nil; got nil")
+	}
+	if bulkResponse.Took == 0 {
+		t.Errorf("expected took to be > 0; got %d", bulkResponse.Took)
+	}
+	if bulkResponse.Errors {
+		t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors)
+	}
+	if len(bulkResponse.Items) != 4 {
+		t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items))
+	}
+
+	// Indexed actions
+	indexed := bulkResponse.Indexed()
+	if indexed == nil {
+		t.Fatal("expected indexed to be != nil; got nil")
+	}
+	if len(indexed) != 1 {
+		t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed))
+	}
+	if indexed[0].Id != "1" {
+		t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id)
+	}
+	if indexed[0].Status != 201 {
+		t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status)
+	}
+
+	// Created actions
+	created := bulkResponse.Created()
+	if created == nil {
+		t.Fatal("expected created to be != nil; got nil")
+	}
+	if len(created) != 1 {
+		t.Fatalf("expected len(created) == %d; got %d", 1, len(created))
+	}
+	if created[0].Id != "2" {
+		t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id)
+	}
+	if created[0].Status != 201 {
+		t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status)
+	}
+
+	// Deleted actions
+	deleted := bulkResponse.Deleted()
+	if deleted == nil {
+		t.Fatal("expected deleted to be != nil; got nil")
+	}
+	if len(deleted) != 1 {
+		t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted))
+	}
+	if deleted[0].Id != "1" {
+		t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id)
+	}
+	if deleted[0].Status != 200 {
+		t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status)
+	}
+	if !deleted[0].Found {
+		t.Errorf("expected deleted[0].Found == %v; got %v", true, deleted[0].Found)
+	}
+
+	// Updated actions
+	updated := bulkResponse.Updated()
+	if updated == nil {
+		t.Fatal("expected updated to be != nil; got nil")
+	}
+	if len(updated) != 1 {
+		t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated))
+	}
+	if updated[0].Id != "2" {
+		t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id)
+	}
+	if updated[0].Status != 200 {
+		t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status)
+	}
+	if updated[0].Version != 2 {
+		t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version)
+	}
+
+	// ById
+	id1Results := bulkResponse.ById("1")
+	if id1Results == nil {
+		t.Fatal("expected id1Results to be != nil; got nil")
+	}
+	if len(id1Results) != 2 {
+		t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results))
+	}
+	if id1Results[0].Id != "1" {
+		t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id)
+	}
+	if id1Results[0].Status != 201 {
+		t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status)
+	}
+	if id1Results[0].Version != 1 {
+		t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version)
+	}
+	if id1Results[1].Id != "1" {
+		t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id)
+	}
+	if id1Results[1].Status != 200 {
+		t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status)
+	}
+	if id1Results[1].Version != 2 {
+		t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version)
+	}
+}
+
+func TestFailedBulkRequests(t *testing.T) {
+	js := `{
+  "took" : 2,
+  "errors" : true,
+  "items" : [ {
+    "index" : {
+      "_index" : "elastic-test",
+      "_type" : "tweet",
+      "_id" : "1",
+      "_version" : 1,
+      "status" : 201
+    }
+  }, {
+    "create" : {
+      "_index" : "elastic-test",
+      "_type" : "tweet",
+      "_id" : "2",
+      "_version" : 1,
+      "status" : 423,
+      "error" : "Locked"
+    }
+  }, {
+    "delete" : {
+      "_index" : "elastic-test",
+      "_type" : "tweet",
+      "_id" : "1",
+      "_version" : 2,
+      "status" : 404,
+      "found" : false
+    }
+  }, {
+    "update" : {
+      "_index" : "elastic-test",
+      "_type" : "tweet",
+      "_id" : "2",
+      "_version" : 2,
+      "status" : 200
+    }
+  } ]
+}`
+
+	var resp BulkResponse
+	err := json.Unmarshal([]byte(js), &resp)
+	if err != nil {
+		t.Fatal(err)
+	}
+	failed := resp.Failed()
+	if len(failed) != 2 {
+		t.Errorf("expected %d failed items; got: %d", 2, len(failed))
+	}
+}

+ 240 - 0
sensitive/src/elastic.v1/bulk_update_request.go

@@ -0,0 +1,240 @@
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// Bulk request to update document in Elasticsearch.
+type BulkUpdateRequest struct {
+	BulkableRequest
+	index string
+	typ   string
+	id    string
+
+	routing         string
+	parent          string
+	script          string
+	scriptType      string
+	scriptLang      string
+	scriptParams    map[string]interface{}
+	version         int64  // default is MATCH_ANY
+	versionType     string // default is "internal"
+	retryOnConflict *int
+	refresh         *bool
+	upsert          interface{}
+	docAsUpsert     *bool
+	doc             interface{}
+	ttl             int64
+	timestamp       string
+}
+
+func NewBulkUpdateRequest() *BulkUpdateRequest {
+	return &BulkUpdateRequest{}
+}
+
+func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
+	r.index = index
+	return r
+}
+
+func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
+	r.typ = typ
+	return r
+}
+
+func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
+	r.id = id
+	return r
+}
+
+func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
+	r.routing = routing
+	return r
+}
+
+func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
+	r.parent = parent
+	return r
+}
+
+func (r *BulkUpdateRequest) Script(script string) *BulkUpdateRequest {
+	r.script = script
+	return r
+}
+
+func (r *BulkUpdateRequest) ScriptType(scriptType string) *BulkUpdateRequest {
+	r.scriptType = scriptType
+	return r
+}
+
+func (r *BulkUpdateRequest) ScriptLang(scriptLang string) *BulkUpdateRequest {
+	r.scriptLang = scriptLang
+	return r
+}
+
+func (r *BulkUpdateRequest) ScriptParams(params map[string]interface{}) *BulkUpdateRequest {
+	r.scriptParams = params
+	return r
+}
+
+func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
+	r.retryOnConflict = &retryOnConflict
+	return r
+}
+
+func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
+	r.version = version
+	return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
+	r.versionType = versionType
+	return r
+}
+
+func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest {
+	r.refresh = &refresh
+	return r
+}
+
+func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
+	r.doc = doc
+	return r
+}
+
+func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
+	r.docAsUpsert = &docAsUpsert
+	return r
+}
+
+func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
+	r.upsert = doc
+	return r
+}
+
+func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest {
+	r.ttl = ttl
+	return r
+}
+
+func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest {
+	r.timestamp = timestamp
+	return r
+}
+
+func (r *BulkUpdateRequest) String() string {
+	lines, err := r.Source()
+	if err == nil {
+		return strings.Join(lines, "\n")
+	}
+	return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) {
+	switch t := data.(type) {
+	default:
+		body, err := json.Marshal(data)
+		if err != nil {
+			return "", err
+		}
+		return string(body), nil
+	case json.RawMessage:
+		return string(t), nil
+	case *json.RawMessage:
+		return string(*t), nil
+	case string:
+		return t, nil
+	case *string:
+		return *t, nil
+	}
+}
+
+func (r BulkUpdateRequest) Source() ([]string, error) {
+	// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+	// { "doc" : { "field1" : "value1", ... } }
+	// or
+	// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+	// { "script" : { ... } }
+
+	lines := make([]string, 2)
+
+	// "update" ...
+	command := make(map[string]interface{})
+	updateCommand := make(map[string]interface{})
+	if r.index != "" {
+		updateCommand["_index"] = r.index
+	}
+	if r.typ != "" {
+		updateCommand["_type"] = r.typ
+	}
+	if r.id != "" {
+		updateCommand["_id"] = r.id
+	}
+	if r.routing != "" {
+		updateCommand["_routing"] = r.routing
+	}
+	if r.parent != "" {
+		updateCommand["_parent"] = r.parent
+	}
+	if r.timestamp != "" {
+		updateCommand["_timestamp"] = r.timestamp
+	}
+	if r.ttl > 0 {
+		updateCommand["_ttl"] = r.ttl
+	}
+	if r.version > 0 {
+		updateCommand["_version"] = r.version
+	}
+	if r.versionType != "" {
+		updateCommand["_version_type"] = r.versionType
+	}
+	if r.refresh != nil {
+		updateCommand["refresh"] = *r.refresh
+	}
+	if r.retryOnConflict != nil {
+		updateCommand["_retry_on_conflict"] = *r.retryOnConflict
+	}
+	if r.upsert != nil {
+		updateCommand["upsert"] = r.upsert
+	}
+	command["update"] = updateCommand
+	line, err := json.Marshal(command)
+	if err != nil {
+		return nil, err
+	}
+	lines[0] = string(line)
+
+	// 2nd line: {"doc" : { ... }} or {"script": {...}}
+	source := make(map[string]interface{})
+	if r.docAsUpsert != nil {
+		source["doc_as_upsert"] = *r.docAsUpsert
+	}
+	if r.doc != nil {
+		// {"doc":{...}}
+		source["doc"] = r.doc
+	} else if r.script != "" {
+		// {"script":...}
+		source["script"] = r.script
+		if r.scriptLang != "" {
+			source["lang"] = r.scriptLang
+		}
+		/*
+			if r.scriptType != "" {
+				source["script_type"] = r.scriptType
+			}
+		*/
+		if r.scriptParams != nil && len(r.scriptParams) > 0 {
+			source["params"] = r.scriptParams
+		}
+	}
+	lines[1], err = r.getSourceAsString(source)
+	if err != nil {
+		return nil, err
+	}
+
+	return lines, nil
+}

+ 79 - 0
sensitive/src/elastic.v1/bulk_update_request_test.go

@@ -0,0 +1,79 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestBulkUpdateRequestSerialization(t *testing.T) {
+	tests := []struct {
+		Request  BulkableRequest
+		Expected []string
+	}{
+		// #0
+		{
+			Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct {
+				Counter int64 `json:"counter"`
+			}{
+				Counter: 42,
+			}),
+			Expected: []string{
+				`{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+				`{"doc":{"counter":42}}`,
+			},
+		},
+		// #1
+		{
+			Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
+				RetryOnConflict(3).
+				DocAsUpsert(true).
+				Doc(struct {
+				Counter int64 `json:"counter"`
+			}{
+				Counter: 42,
+			}),
+			Expected: []string{
+				`{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`,
+				`{"doc":{"counter":42},"doc_as_upsert":true}`,
+			},
+		},
+		// #2
+		{
+			Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
+				RetryOnConflict(3).
+				Script(`ctx._source.retweets += param1`).
+				ScriptLang("js").
+				ScriptParams(map[string]interface{}{"param1": 42}).
+				Upsert(struct {
+				Counter int64 `json:"counter"`
+			}{
+				Counter: 42,
+			}),
+			Expected: []string{
+				`{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet","upsert":{"counter":42}}}`,
+				`{"lang":"js","params":{"param1":42},"script":"ctx._source.retweets += param1"}`,
+			},
+		},
+	}
+
+	for i, test := range tests {
+		lines, err := test.Request.Source()
+		if err != nil {
+			t.Fatalf("case #%d: expected no error, got: %v", i, err)
+		}
+		if lines == nil {
+			t.Fatalf("case #%d: expected lines, got nil", i)
+		}
+		if len(lines) != len(test.Expected) {
+			t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+		}
+		for j, line := range lines {
+			if line != test.Expected[j] {
+				t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+			}
+		}
+	}
+}

+ 358 - 0
sensitive/src/elastic.v1/client.go

@@ -0,0 +1,358 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"errors"
+	"log"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+	"sync"
+	"time"
+)
+
+const (
+	// Version is the current version of Elastic.
+	Version = "1.3.1"
+
+	// defaultUrl to be used as base for Elasticsearch requests.
+	defaultUrl = "http://localhost:9200"
+
+	// pingDuration is the time to periodically check the Elasticsearch URLs.
+	pingDuration = 60 * time.Second
+)
+
+var (
+	// ErrNoClient is raised when no active Elasticsearch client is available.
+	ErrNoClient = errors.New("no active client")
+)
+
+// Client is an Elasticsearch client. Create one by calling NewClient.
+type Client struct {
+	urls []string // urls is a list of all clients for Elasticsearch queries
+
+	c *http.Client // c is the net/http Client to use for requests
+
+	log *log.Logger // output log
+
+	mu        sync.RWMutex // mutex for the next two fields
+	activeUrl string       // currently active connection url
+	hasActive bool         // true if we have an active connection
+}
+
+// NewClient creates a new client to work with Elasticsearch.
+func NewClient(client *http.Client, urls ...string) (*Client, error) {
+	if client == nil {
+		return nil, errors.New("client is nil")
+	}
+	c := &Client{c: client}
+	switch len(urls) {
+	case 0:
+		c.urls = make([]string, 1)
+		c.urls[0] = defaultUrl
+	case 1:
+		c.urls = make([]string, 1)
+		c.urls[0] = urls[0]
+	default:
+		c.urls = urls
+	}
+	c.pingUrls()
+	go c.pinger() // start goroutine periodically ping all clients
+	return c, nil
+}
+
+// SetLogger sets the logger for output from Elastic.
+// If you don't set the logger, it will print to os.Stdout.
+func (c *Client) SetLogger(log *log.Logger) {
+	c.log = log
+}
+
+// printf is a helper to log output.
+func (c *Client) printf(format string, args ...interface{}) {
+	if c.log != nil {
+		c.log.Printf(format, args...)
+	} else {
+		log.Printf(format, args...)
+	}
+}
+
+// dumpRequest dumps the given HTTP request.
+func (c *Client) dumpRequest(r *http.Request) {
+	out, err := httputil.DumpRequestOut(r, true)
+	if err == nil {
+		c.printf("%s\n", string(out))
+	}
+}
+
+// dumpResponse dumps the given HTTP response.
+func (c *Client) dumpResponse(resp *http.Response) {
+	out, err := httputil.DumpResponse(resp, true)
+	if err == nil {
+		c.printf("%s\n", string(out))
+	}
+}
+
+// NewRequest creates a new request with the given method and prepends
+// the base URL to the path. If no active connection to Elasticsearch
+// is available, ErrNoClient is returned.
+func (c *Client) NewRequest(method, path string) (*Request, error) {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	if !c.hasActive {
+		return nil, ErrNoClient
+	}
+	return NewRequest(method, c.activeUrl+path)
+}
+
+// pinger periodically runs pingUrls.
+func (c *Client) pinger() {
+	ticker := time.NewTicker(pingDuration)
+	for {
+		select {
+		case <-ticker.C:
+			c.pingUrls()
+		}
+	}
+}
+
+// pingUrls iterates through all client URLs. It checks if the client
+// is available. It takes the first one available and saves its URL
+// in activeUrl. If no client is available, hasActive is set to false
+// and NewRequest will fail.
+func (c *Client) pingUrls() {
+	for _, url_ := range c.urls {
+		params := make(url.Values)
+		params.Set("timeout", "1")
+		req, err := NewRequest("HEAD", url_+"/?"+params.Encode())
+		if err == nil {
+			res, err := c.c.Do((*http.Request)(req))
+			if err == nil {
+				defer res.Body.Close()
+				if res.StatusCode == http.StatusOK {
+					// Everything okay: Update activeUrl and set hasActive to true.
+					c.mu.Lock()
+					defer c.mu.Unlock()
+					if c.activeUrl != "" && c.activeUrl != url_ {
+						log.Printf("elastic: switched connection from %s to %s", c.activeUrl, url_)
+					}
+					c.activeUrl = url_
+					c.hasActive = true
+					return
+				}
+			} else {
+				log.Printf("elastic: %v", err)
+			}
+		} else {
+			log.Printf("elastic: %v", err)
+		}
+	}
+
+	// No client available
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	c.hasActive = false
+}
+
+// ElasticsearchVersion returns the version number of Elasticsearch
+// running on the given URL.
+func (c *Client) ElasticsearchVersion(url string) (string, error) {
+	res, _, err := c.Ping().URL(url).Do()
+	if err != nil {
+		return "", err
+	}
+	return res.Version.Number, nil
+}
+
+// Ping checks if a given node in a cluster exists and (optionally)
+// returns some basic information about the Elasticsearch server,
+// e.g. the Elasticsearch version number.
+func (c *Client) Ping() *PingService {
+	return NewPingService(c)
+}
+
+// CreateIndex returns a service to create a new index.
+func (c *Client) CreateIndex(name string) *CreateIndexService {
+	builder := NewCreateIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// DeleteIndex returns a service to delete an index.
+func (c *Client) DeleteIndex(name string) *DeleteIndexService {
+	builder := NewDeleteIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// IndexExists allows to check if an index exists.
+func (c *Client) IndexExists(name string) *IndexExistsService {
+	builder := NewIndexExistsService(c)
+	builder.Index(name)
+	return builder
+}
+
+// OpenIndex opens an index.
+func (c *Client) OpenIndex(name string) *OpenIndexService {
+	builder := NewOpenIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// CloseIndex closes an index.
+func (c *Client) CloseIndex(name string) *CloseIndexService {
+	builder := NewCloseIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// Index a document.
+func (c *Client) Index() *IndexService {
+	builder := NewIndexService(c)
+	return builder
+}
+
+// Update a document.
+func (c *Client) Update() *UpdateService {
+	builder := NewUpdateService(c)
+	return builder
+}
+
+// Delete a document.
+func (c *Client) Delete() *DeleteService {
+	builder := NewDeleteService(c)
+	return builder
+}
+
+// DeleteByQuery deletes documents as found by a query.
+func (c *Client) DeleteByQuery() *DeleteByQueryService {
+	builder := NewDeleteByQueryService(c)
+	return builder
+}
+
+// Get a document.
+func (c *Client) Get() *GetService {
+	builder := NewGetService(c)
+	return builder
+}
+
+// MultiGet retrieves multiple documents in one roundtrip.
+func (c *Client) MultiGet() *MultiGetService {
+	builder := NewMultiGetService(c)
+	return builder
+}
+
+// Exists checks if a document exists.
+func (c *Client) Exists() *ExistsService {
+	builder := NewExistsService(c)
+	return builder
+}
+
+// Count documents.
+func (c *Client) Count(indices ...string) *CountService {
+	builder := NewCountService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Search is the entry point for searches.
+func (c *Client) Search(indices ...string) *SearchService {
+	builder := NewSearchService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// MultiSearch is the entry point for multi searches.
+func (c *Client) MultiSearch() *MultiSearchService {
+	return NewMultiSearchService(c)
+}
+
+// Suggest returns a service to return suggestions.
+func (c *Client) Suggest(indices ...string) *SuggestService {
+	builder := NewSuggestService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Scan through documents. Use this to iterate inside a server process
+// where the results will be processed without returning them to a client.
+func (c *Client) Scan(indices ...string) *ScanService {
+	builder := NewScanService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Scroll through documents. Use this to efficiently scroll through results
+// while returning the results to a client. Use Scan when you don't need
+// to return requests to a client (i.e. not paginating via request/response).
+func (c *Client) Scroll(indices ...string) *ScrollService {
+	builder := NewScrollService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Optimize asks Elasticsearch to optimize one or more indices.
+func (c *Client) Optimize(indices ...string) *OptimizeService {
+	builder := NewOptimizeService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Refresh asks Elasticsearch to refresh one or more indices.
+func (c *Client) Refresh(indices ...string) *RefreshService {
+	builder := NewRefreshService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Flush asks Elasticsearch to free memory from the index and
+// flush data to disk.
+func (c *Client) Flush() *FlushService {
+	builder := NewFlushService(c)
+	return builder
+}
+
+// Bulk is the entry point to mass insert/update/delete documents.
+func (c *Client) Bulk() *BulkService {
+	builder := NewBulkService(c)
+	return builder
+}
+
+// Alias enables the caller to add and/or remove aliases.
+func (c *Client) Alias() *AliasService {
+	builder := NewAliasService(c)
+	return builder
+}
+
+// Aliases returns aliases by index name(s).
+func (c *Client) Aliases() *AliasesService {
+	builder := NewAliasesService(c)
+	return builder
+}
+
+// GetTemplate gets a search template.
+func (c *Client) GetTemplate() *GetTemplateService {
+	return NewGetTemplateService(c)
+}
+
+// PutTemplate creates or updates a search template.
+func (c *Client) PutTemplate() *PutTemplateService {
+	return NewPutTemplateService(c)
+}
+
+// DeleteTemplate deletes a search template.
+func (c *Client) DeleteTemplate() *DeleteTemplateService {
+	return NewDeleteTemplateService(c)
+}
+
+// ClusterHealth retrieves the health of the cluster.
+func (c *Client) ClusterHealth() *ClusterHealthService {
+	return NewClusterHealthService(c)
+}
+
+// ClusterState retrieves the state of the cluster.
+func (c *Client) ClusterState() *ClusterStateService {
+	return NewClusterStateService(c)
+}

+ 89 - 0
sensitive/src/elastic.v1/client_test.go

@@ -0,0 +1,89 @@
+package elastic
+
+import (
+	"net/http"
+	"testing"
+)
+
+func TestSingleUrl(t *testing.T) {
+	client, err := NewClient(http.DefaultClient)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(client.urls) != 1 {
+		t.Fatalf("expected 1 default client url, got: %v", client.urls)
+	}
+	if client.urls[0] != defaultUrl {
+		t.Errorf("expected default client url of %s, got: %s", defaultUrl, client.urls[0])
+	}
+}
+
+func TestMultipleUrls(t *testing.T) {
+	client, err := NewClient(http.DefaultClient, "http://localhost:9200", "http://localhost:9201")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(client.urls) != 2 {
+		t.Fatalf("expected 2 default client urls, got: %v", client.urls)
+	}
+	if client.urls[0] != "http://localhost:9200" {
+		t.Errorf("expected 1st client url of %s, got: %s", "http://localhost:9200", client.urls[0])
+	}
+	if client.urls[1] != "http://localhost:9201" {
+		t.Errorf("expected 2nd client url of %s, got: %s", "http://localhost:9201", client.urls[0])
+	}
+}
+
+func TestFindingActiveClient(t *testing.T) {
+	client, err := NewClient(http.DefaultClient, "http://localhost:19200", "http://localhost:9200")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(client.urls) != 2 {
+		t.Fatalf("expected 2 default client urls, got: %v", client.urls)
+	}
+	if !client.hasActive {
+		t.Errorf("expected to have active connection, got: %v", client.hasActive)
+	}
+	expected := "http://localhost:9200"
+	if client.activeUrl != expected {
+		t.Errorf("expected active url to be %s, got: %v", expected, client.activeUrl)
+	}
+}
+
+func TestFindingNoActiveClient(t *testing.T) {
+	client, err := NewClient(http.DefaultClient, "http://localhost:19200", "http://localhost:19201")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(client.urls) != 2 {
+		t.Fatalf("expected 2 default client urls, got: %v", client.urls)
+	}
+	if client.hasActive {
+		t.Errorf("expected to not have an active connection, got: %v", client.hasActive)
+	}
+	if client.activeUrl != "" {
+		t.Errorf("expected no active url, got: %v", client.activeUrl)
+	}
+	req, err := client.NewRequest("HEAD", "/")
+	if err != ErrNoClient {
+		t.Errorf("expected ErrNoClient, got: %v", err)
+	}
+	if req != nil {
+		t.Errorf("expected no request, got: %v", req)
+	}
+}
+
+func TestElasticsearchVersion(t *testing.T) {
+	client, err := NewClient(http.DefaultClient)
+	if err != nil {
+		t.Fatal(err)
+	}
+	version, err := client.ElasticsearchVersion(defaultUrl)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if version == "" {
+		t.Errorf("expected a version number, got: %q", version)
+	}
+}

+ 209 - 0
sensitive/src/elastic.v1/cluster_health.go

@@ -0,0 +1,209 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// ClusterHealthService allows to get the status of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-health.html.
+type ClusterHealthService struct {
+	client                  *Client
+	debug                   bool
+	pretty                  bool
+	indices                 []string
+	waitForStatus           string
+	level                   string
+	local                   *bool
+	masterTimeout           string
+	timeout                 string
+	waitForActiveShards     *int
+	waitForNodes            string
+	waitForRelocatingShards *int
+}
+
+// NewClusterHealthService creates a new ClusterHealthService.
+func NewClusterHealthService(client *Client) *ClusterHealthService {
+	return &ClusterHealthService{client: client, indices: make([]string, 0)}
+}
+
+// Index limits the information returned to a specific index.
+func (s *ClusterHealthService) Index(index string) *ClusterHealthService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices limits the information returned to specific indices.
+func (s *ClusterHealthService) Indices(indices ...string) *ClusterHealthService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// MasterTimeout specifies an explicit operation timeout for connection to master node.
+func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// Timeout specifies an explicit operation timeout.
+func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
+	s.timeout = timeout
+	return s
+}
+
+// WaitForActiveShards can be used to wait until the specified number of shards are active.
+func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
+	s.waitForActiveShards = &waitForActiveShards
+	return s
+}
+
+// WaitForNodes can be used to wait until the specified number of nodes are available.
+func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
+	s.waitForNodes = waitForNodes
+	return s
+}
+
+// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished.
+func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService {
+	s.waitForRelocatingShards = &waitForRelocatingShards
+	return s
+}
+
+// WaitForStatus can be used to wait until the cluster is in a specific state.
+// Valid values are: green, yellow, or red.
+func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
+	s.waitForStatus = waitForStatus
+	return s
+}
+
+// Level specifies the level of detail for returned information.
+func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
+	s.level = level
+	return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
+	s.local = &local
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterHealthService) buildURL() (string, error) {
+	// Build URL
+	urls, err := uritemplates.Expand("/_cluster/health/{index}", map[string]string{
+		"index": strings.Join(s.indices, ","),
+	})
+	if err != nil {
+		return "", err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.waitForRelocatingShards != nil {
+		params.Set("wait_for_relocating_shards", fmt.Sprintf("%d", *s.waitForRelocatingShards))
+	}
+	if s.waitForStatus != "" {
+		params.Set("wait_for_status", s.waitForStatus)
+	}
+	if s.level != "" {
+		params.Set("level", s.level)
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.waitForActiveShards != nil {
+		params.Set("wait_for_active_shards", fmt.Sprintf("%d", *s.waitForActiveShards))
+	}
+	if s.waitForNodes != "" {
+		params.Set("wait_for_nodes", s.waitForNodes)
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	return urls, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterHealthService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	urls, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request
+	req, err := s.client.NewRequest("GET", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get HTTP response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+	// Return operation response
+	resp := new(ClusterHealthResponse)
+	if err := json.NewDecoder(res.Body).Decode(resp); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ClusterHealthResponse is the response of ClusterHealthService.Do.
+type ClusterHealthResponse struct {
+	ClusterName         string `json:"cluster_name"`
+	Status              string `json:"status"`
+	TimedOut            bool   `json:"timed_out"`
+	NumberOfNodes       int    `json:"number_of_nodes"`
+	NumberOfDataNodes   int    `json:"number_of_data_nodes"`
+	ActivePrimaryShards int    `json:"active_primary_shards"`
+	ActiveShards        int    `json:"active_shards"`
+	RelocatingShards    int    `json:"relocating_shards"`
+	InitializedShards   int    `json:"initialized_shards"`
+	UnassignedShards    int    `json:"unassigned_shards"`
+}

+ 68 - 0
sensitive/src/elastic.v1/cluster_health_test.go

@@ -0,0 +1,68 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestClusterHealth(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	// Get cluster health
+	res, err := client.ClusterHealth().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatalf("expected res to be != nil; got: %v", res)
+	}
+	if res.Status != "green" && res.Status != "red" && res.Status != "yellow" {
+		t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status)
+	}
+}
+
+func TestClusterHealthURLs(t *testing.T) {
+	tests := []struct {
+		Service  *ClusterHealthService
+		Expected string
+	}{
+		{
+			Service: &ClusterHealthService{
+				indices: []string{},
+			},
+			Expected: "/_cluster/health/",
+		},
+		{
+			Service: &ClusterHealthService{
+				indices: []string{"twitter"},
+			},
+			Expected: "/_cluster/health/twitter",
+		},
+		{
+			Service: &ClusterHealthService{
+				indices: []string{"twitter", "gplus"},
+			},
+			Expected: "/_cluster/health/twitter%2Cgplus",
+		},
+		{
+			Service: &ClusterHealthService{
+				indices:       []string{"twitter"},
+				waitForStatus: "yellow",
+			},
+			Expected: "/_cluster/health/twitter?wait_for_status=yellow",
+		},
+	}
+
+	for _, test := range tests {
+		got, err := test.Service.buildURL()
+		if err != nil {
+			t.Fatalf("expected no error; got: %v", err)
+		}
+		if got != test.Expected {
+			t.Errorf("expected URL = %q; got: %q", test.Expected, got)
+		}
+	}
+}

+ 217 - 0
sensitive/src/elastic.v1/cluster_state.go

@@ -0,0 +1,217 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// ClusterStateService returns the state of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-state.html.
+type ClusterStateService struct {
+	client        *Client
+	debug         bool
+	pretty        bool
+	indices       []string
+	metrics       []string
+	local         *bool
+	masterTimeout string
+	flatSettings  *bool
+}
+
+// NewClusterStateService creates a new ClusterStateService.
+func NewClusterStateService(client *Client) *ClusterStateService {
+	return &ClusterStateService{
+		client:  client,
+		indices: make([]string, 0),
+		metrics: make([]string, 0),
+	}
+}
+
+// Index the name of the index. Use _all or an empty string to perform
+// the operation on all indices.
+func (s *ClusterStateService) Index(index string) *ClusterStateService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices is a list of index names. Use _all or an empty string to
+// perform the operation on all indices.
+func (s *ClusterStateService) Indices(indices ...string) *ClusterStateService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// Metric limits the information returned to the specified metric.
+// It can be one of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metric(metric string) *ClusterStateService {
+	s.metrics = make([]string, 0)
+	s.metrics = append(s.metrics, metric)
+	return s
+}
+
+// Metrics limits the information returned to the specified metrics.
+// It can be any of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metrics(metrics ...string) *ClusterStateService {
+	s.metrics = make([]string, 0)
+	s.metrics = append(s.metrics, metrics...)
+	return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterStateService) Local(local bool) *ClusterStateService {
+	s.local = &local
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
+	s.flatSettings = &flatSettings
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterStateService) buildURL() (string, error) {
+	// Build URL
+	metrics := strings.Join(s.metrics, ",")
+	if metrics == "" {
+		metrics = "_all"
+	}
+	indices := strings.Join(s.indices, ",")
+	if indices == "" {
+		indices = "_all"
+	}
+	urls, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
+		"metrics": metrics,
+		"indices": indices,
+	})
+	if err != nil {
+		return "", err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.flatSettings != nil {
+		params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	return urls, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterStateService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *ClusterStateService) Do() (*ClusterStateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	urls, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request
+	req, err := s.client.NewRequest("GET", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get HTTP response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+	// Return operation response
+	resp := new(ClusterStateResponse)
+
+	if err := json.NewDecoder(res.Body).Decode(resp); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ClusterStateResponse is the response of ClusterStateService.Do.
+type ClusterStateResponse struct {
+	ClusterName  string                               `json:"cluster_name"`
+	Version      int                                  `json:"version"`
+	MasterNode   string                               `json:"master_node"`
+	Blocks       map[string]interface{}               `json:"blocks"`
+	Nodes        map[string]*ClusterStateNode         `json:"nodes"`
+	Metadata     *ClusterStateMetadata                `json:"metadata"`
+	RoutingTable map[string]*ClusterStateRoutingTable `json:"routing_table"`
+	RoutingNodes *ClusterStateRoutingNode             `json:"routing_nodes"`
+	Allocations  []interface{}                        `json:"allocations"`
+	Customs      map[string]interface{}               `json:"customs"`
+}
+
+type ClusterStateMetadata struct {
+	Templates    map[string]interface{} `json:"templates"`
+	Indices      map[string]interface{} `json:"indices"`
+	Repositories map[string]interface{} `json:"repositories"`
+}
+
+type ClusterStateNode struct {
+	State          string  `json:"state"`
+	Primary        bool    `json:"primary"`
+	Node           string  `json:"node"`
+	RelocatingNode *string `json:"relocating_node"`
+	Shard          int     `json:"shard"`
+	Index          string  `json:"index"`
+}
+
+type ClusterStateRoutingTable struct {
+	Indices map[string]interface{} `json:"indices"`
+}
+
+type ClusterStateRoutingNode struct {
+	Unassigned []interface{}          `json:"unassigned"`
+	Nodes      map[string]interface{} `json:"nodes"`
+}

+ 78 - 0
sensitive/src/elastic.v1/cluster_state_test.go

@@ -0,0 +1,78 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestClusterState(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	// Get cluster state
+	res, err := client.ClusterState().Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatalf("expected res to be != nil; got: %v", res)
+	}
+	if res.ClusterName == "" {
+		t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
+	}
+}
+
+func TestClusterStateURLs(t *testing.T) {
+	tests := []struct {
+		Service  *ClusterStateService
+		Expected string
+	}{
+		{
+			Service: &ClusterStateService{
+				indices: []string{},
+				metrics: []string{},
+			},
+			Expected: "/_cluster/state/_all/_all",
+		},
+		{
+			Service: &ClusterStateService{
+				indices: []string{"twitter"},
+				metrics: []string{},
+			},
+			Expected: "/_cluster/state/_all/twitter",
+		},
+		{
+			Service: &ClusterStateService{
+				indices: []string{"twitter", "gplus"},
+				metrics: []string{},
+			},
+			Expected: "/_cluster/state/_all/twitter%2Cgplus",
+		},
+		{
+			Service: &ClusterStateService{
+				indices: []string{},
+				metrics: []string{"nodes"},
+			},
+			Expected: "/_cluster/state/nodes/_all",
+		},
+		{
+			Service: &ClusterStateService{
+				indices: []string{"twitter"},
+				metrics: []string{"nodes"},
+			},
+			Expected: "/_cluster/state/nodes/twitter",
+		},
+	}
+
+	for _, test := range tests {
+		got, err := test.Service.buildURL()
+		if err != nil {
+			t.Fatalf("expected no error; got: %v", err)
+		}
+		if got != test.Expected {
+			t.Errorf("expected URL = %q; got: %q", test.Expected, got)
+		}
+	}
+}

+ 181 - 0
sensitive/src/elastic.v1/count.go

@@ -0,0 +1,181 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// CountService is a convenient service for determining the
+// number of documents in an index. Use SearchService with
+// a SearchType of count for counting with queries etc.
+type CountService struct {
+	client  *Client
+	indices []string
+	types   []string
+	query   Query
+	debug   bool
+	pretty  bool
+}
+
+// CountResult is the result returned from using the Count API
+// (http://www.elasticsearch.org/guide/reference/api/count/)
+type CountResult struct {
+	Count  int64      `json:"count"`
+	Shards shardsInfo `json:"_shards,omitempty"`
+}
+
+func NewCountService(client *Client) *CountService {
+	builder := &CountService{
+		client: client,
+		debug:  false,
+		pretty: false,
+	}
+	return builder
+}
+
+func (s *CountService) Index(index string) *CountService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *CountService) Indices(indices ...string) *CountService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *CountService) Type(typ string) *CountService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, typ)
+	return s
+}
+
+func (s *CountService) Types(types ...string) *CountService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+func (s *CountService) Query(query Query) *CountService {
+	s.query = query
+	return s
+}
+
+func (s *CountService) Pretty(pretty bool) *CountService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *CountService) Debug(debug bool) *CountService {
+	s.debug = debug
+	return s
+}
+
+func (s *CountService) Do() (int64, error) {
+	var err error
+
+	// Build url
+	urls := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err = uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return 0, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		urls += strings.Join(indexPart, ",")
+	}
+
+	// Types part
+	typesPart := make([]string, 0)
+	for _, typ := range s.types {
+		typ, err = uritemplates.Expand("{type}", map[string]string{
+			"type": typ,
+		})
+		if err != nil {
+			return 0, err
+		}
+		typesPart = append(typesPart, typ)
+	}
+	if len(typesPart) > 0 {
+		urls += "/" + strings.Join(typesPart, ",")
+	}
+
+	// Search
+	urls += "/_count"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return 0, err
+	}
+
+	// Set body if there is a query set
+	if s.query != nil {
+		query := make(map[string]interface{})
+		query["query"] = s.query.Source()
+		req.SetBodyJson(query)
+	}
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return 0, err
+	}
+	if err := checkResponse(res); err != nil {
+		return 0, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	ret := new(CountResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return 0, err
+	}
+
+	if ret != nil {
+		return ret.Count, nil
+	}
+
+	return int64(0), nil
+}

+ 85 - 0
sensitive/src/elastic.v1/count_test.go

@@ -0,0 +1,85 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestCount(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Count documents
+	count, err := client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Errorf("expected Count = %d; got %d", 3, count)
+	}
+
+	// Count documents
+	count, err = client.Count(testIndexName).Type("tweet").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Errorf("expected Count = %d; got %d", 3, count)
+	}
+
+	// Count documents
+	count, err = client.Count(testIndexName).Type("gezwitscher").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 0 {
+		t.Errorf("expected Count = %d; got %d", 0, count)
+	}
+
+	// Count with query
+	query := NewTermQuery("user", "olivere")
+	count, err = client.Count(testIndexName).Query(query).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Errorf("expected Count = %d; got %d", 2, count)
+	}
+
+	// Count with query and type
+	query = NewTermQuery("user", "olivere")
+	count, err = client.Count(testIndexName).Type("tweet").Query(query).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Errorf("expected Count = %d; got %d", 2, count)
+	}
+}

+ 96 - 0
sensitive/src/elastic.v1/create_index.go

@@ -0,0 +1,96 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"net/http"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type CreateIndexService struct {
+	client *Client
+	index  string
+	body   string
+	pretty bool
+	debug  bool
+}
+
+func NewCreateIndexService(client *Client) *CreateIndexService {
+	builder := &CreateIndexService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *CreateIndexService) Index(index string) *CreateIndexService {
+	b.index = index
+	return b
+}
+
+func (b *CreateIndexService) Body(body string) *CreateIndexService {
+	b.body = body
+	return b
+}
+
+func (b *CreateIndexService) Pretty(pretty bool) *CreateIndexService {
+	b.pretty = pretty
+	return b
+}
+
+func (b *CreateIndexService) Debug(debug bool) *CreateIndexService {
+	b.debug = debug
+	return b
+}
+
+func (b *CreateIndexService) Do() (*CreateIndexResult, error) {
+	// Build url
+	urls, err := uritemplates.Expand("/{index}/", map[string]string{
+		"index": b.index,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Set up a new request
+	req, err := b.client.NewRequest("PUT", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body
+	req.SetBodyString(b.body)
+
+	if b.debug {
+		b.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := b.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if b.debug {
+		b.client.dumpResponse(res)
+	}
+
+	ret := new(CreateIndexResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a create index request.
+
+type CreateIndexResult struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 145 - 0
sensitive/src/elastic.v1/delete.go

@@ -0,0 +1,145 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type DeleteService struct {
+	client  *Client
+	index   string
+	_type   string
+	id      string
+	routing string
+	refresh *bool
+	version *int
+	pretty  bool
+	debug   bool
+}
+
+func NewDeleteService(client *Client) *DeleteService {
+	builder := &DeleteService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *DeleteService) Index(index string) *DeleteService {
+	s.index = index
+	return s
+}
+
+func (s *DeleteService) Type(_type string) *DeleteService {
+	s._type = _type
+	return s
+}
+
+func (s *DeleteService) Id(id string) *DeleteService {
+	s.id = id
+	return s
+}
+
+func (s *DeleteService) Parent(parent string) *DeleteService {
+	if s.routing == "" {
+		s.routing = parent
+	}
+	return s
+}
+
+func (s *DeleteService) Refresh(refresh bool) *DeleteService {
+	s.refresh = &refresh
+	return s
+}
+
+func (s *DeleteService) Version(version int) *DeleteService {
+	s.version = &version
+	return s
+}
+
+func (s *DeleteService) Pretty(pretty bool) *DeleteService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *DeleteService) Debug(debug bool) *DeleteService {
+	s.debug = debug
+	return s
+}
+
+func (s *DeleteService) Do() (*DeleteResult, error) {
+	// Build url
+	urls, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+		"index": s.index,
+		"type":  s._type,
+		"id":    s.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Parameters
+	params := make(url.Values)
+	if s.refresh != nil {
+		params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+	}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *s.version))
+	}
+	if s.routing != "" {
+		params.Set("routing", fmt.Sprintf("%s", s.routing))
+	}
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("DELETE", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	ret := new(DeleteResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a delete request.
+
+type DeleteResult struct {
+	Found   bool   `json:"found"`
+	Index   string `json:"_index"`
+	Type    string `json:"_type"`
+	Id      string `json:"_id"`
+	Version int64  `json:"_version"`
+}

+ 320 - 0
sensitive/src/elastic.v1/delete_by_query.go

@@ -0,0 +1,320 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// DeleteByQueryService deletes documents that match a query.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html.
+type DeleteByQueryService struct {
+	client            *Client
+	indices           []string
+	types             []string
+	analyzer          string
+	consistency       string
+	defaultOper       string
+	df                string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+	replication       string
+	routing           string
+	timeout           string
+	debug             bool
+	pretty            bool
+	q                 string
+	query             Query
+}
+
+// NewDeleteByQueryService creates a new DeleteByQueryService.
+// You typically use the client's DeleteByQuery to get a reference to
+// the service.
+func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
+	builder := &DeleteByQueryService{
+		client: client,
+	}
+	return builder
+}
+
+// Index limits the delete-by-query to a single index.
+// You can use _all to perform the operation on all indices.
+func (s *DeleteByQueryService) Index(index string) *DeleteByQueryService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices sets the indices on which to perform the delete operation.
+func (s *DeleteByQueryService) Indices(indices ...string) *DeleteByQueryService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// Type limits the delete operation to the given type.
+func (s *DeleteByQueryService) Type(typ string) *DeleteByQueryService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, typ)
+	return s
+}
+
+// Types limits the delete operation to the given types.
+func (s *DeleteByQueryService) Types(types ...string) *DeleteByQueryService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+// Analyzer to use for the query string.
+func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
+	s.analyzer = analyzer
+	return s
+}
+
+// Consistency represents the specific write consistency setting for the operation.
+// It can be one, quorum, or all.
+func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService {
+	s.consistency = consistency
+	return s
+}
+
+// DefaultOperator for query string query (AND or OR).
+func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
+	s.defaultOper = defaultOperator
+	return s
+}
+
+// DF is the field to use as default where no field prefix is given in the query string.
+func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
+	s.df = defaultField
+	return s
+}
+
+// DefaultField is the field to use as default where no field prefix is given in the query string.
+// It is an alias to the DF func.
+func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
+	s.df = defaultField
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
+	s.ignoreUnavailable = &ignore
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices (including the _all string
+// or when no indices have been specified).
+func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
+	s.allowNoIndices = &allow
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both. It can be "open" or "closed".
+func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
+	s.expandWildcards = expand
+	return s
+}
+
+// Replication sets a specific replication type (sync or async).
+func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService {
+	s.replication = replication
+	return s
+}
+
+// Q specifies the query in Lucene query string syntax. You can also use
+// Query to programmatically specify the query.
+func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
+	s.q = query
+	return s
+}
+
+// QueryString is an alias to Q. Notice that you can also use Query to
+// programmatically set the query.
+func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
+	s.q = query
+	return s
+}
+
+// Routing sets a specific routing value.
+func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService {
+	s.routing = routing
+	return s
+}
+
+// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms".
+func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
+	s.timeout = timeout
+	return s
+}
+
+// Pretty indents the JSON output from Elasticsearch. Use in combination
+// with Debug to see the actual output of Elasticsearch.
+func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
+	s.pretty = pretty
+	return s
+}
+
+// Debug prints HTTP request and response to os.Stdout.
+func (s *DeleteByQueryService) Debug(debug bool) *DeleteByQueryService {
+	s.debug = debug
+	return s
+}
+
+// Query sets the query programmatically.
+func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
+	s.query = query
+	return s
+}
+
+// Do executes the delete-by-query operation.
+func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) {
+	var err error
+
+	// Build url
+	urls := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err = uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		urls += strings.Join(indexPart, ",")
+	}
+
+	// Types part
+	typesPart := make([]string, 0)
+	for _, typ := range s.types {
+		typ, err = uritemplates.Expand("{type}", map[string]string{
+			"type": typ,
+		})
+		if err != nil {
+			return nil, err
+		}
+		typesPart = append(typesPart, typ)
+	}
+	if len(typesPart) > 0 {
+		urls += "/" + strings.Join(typesPart, ",")
+	}
+
+	// Search
+	urls += "/_query"
+
+	// Parameters
+	params := make(url.Values)
+	if s.analyzer != "" {
+		params.Set("analyzer", s.analyzer)
+	}
+	if s.consistency != "" {
+		params.Set("consistency", s.consistency)
+	}
+	if s.defaultOper != "" {
+		params.Set("default_operator", s.defaultOper)
+	}
+	if s.df != "" {
+		params.Set("df", s.df)
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.replication != "" {
+		params.Set("replication", s.replication)
+	}
+	if s.routing != "" {
+		params.Set("routing", s.routing)
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.q != "" {
+		params.Set("q", s.q)
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("DELETE", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body if there is a query set
+	if s.query != nil {
+		query := make(map[string]interface{})
+		query["query"] = s.query.Source()
+		req.SetBodyJson(query)
+	}
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	ret := new(DeleteByQueryResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService.
+type DeleteByQueryResult struct {
+	Indices map[string]IndexDeleteByQueryResult `json:"_indices"`
+}
+
+// IndexDeleteByQueryResult is the result of a delete-by-query for a specific
+// index.
+type IndexDeleteByQueryResult struct {
+	Shards shardsInfo `json:"_shards"`
+}

+ 76 - 0
sensitive/src/elastic.v1/delete_by_query_test.go

@@ -0,0 +1,76 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestDeleteByQuery(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Count documents
+	count, err := client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Fatalf("expected count = %d; got: %d", 3, count)
+	}
+
+	// Delete all documents by sandrae
+	q := NewTermQuery("user", "sandrae")
+	res, err := client.DeleteByQuery().Index(testIndexName).Type("tweet").Query(q).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatalf("expected response != nil; got: %v", res)
+	}
+	idx, found := res.Indices[testIndexName]
+	if !found {
+		t.Errorf("expected Found = true; got: %v", found)
+	}
+	if idx.Shards.Failed > 0 {
+		t.Errorf("expected no failed shards; got: %d", idx.Shards.Failed)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	count, err = client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Fatalf("expected Count = %d; got: %d", 2, count)
+	}
+}

+ 66 - 0
sensitive/src/elastic.v1/delete_index.go

@@ -0,0 +1,66 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"net/http"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type DeleteIndexService struct {
+	client *Client
+	index  string
+}
+
+func NewDeleteIndexService(client *Client) *DeleteIndexService {
+	builder := &DeleteIndexService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *DeleteIndexService) Index(index string) *DeleteIndexService {
+	b.index = index
+	return b
+}
+
+func (b *DeleteIndexService) Do() (*DeleteIndexResult, error) {
+	// Build url
+	urls, err := uritemplates.Expand("/{index}/", map[string]string{
+		"index": b.index,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Set up a new request
+	req, err := b.client.NewRequest("DELETE", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Get response
+	res, err := b.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	ret := new(DeleteIndexResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a delete index request.
+
+type DeleteIndexResult struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 142 - 0
sensitive/src/elastic.v1/delete_template.go

@@ -0,0 +1,142 @@
+// Copyright 2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// DeleteTemplateService deletes a search template. More information can
+// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type DeleteTemplateService struct {
+	client      *Client
+	debug       bool
+	pretty      bool
+	id          string
+	version     *int
+	versionType string
+}
+
+// NewDeleteTemplateService creates a new DeleteTemplateService.
+func NewDeleteTemplateService(client *Client) *DeleteTemplateService {
+	return &DeleteTemplateService{
+		client: client,
+	}
+}
+
+// Id is the template ID.
+func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService {
+	s.id = id
+	return s
+}
+
+// Version an explicit version number for concurrency control.
+func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService {
+	s.version = &version
+	return s
+}
+
+// VersionType specifies a version type.
+func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService {
+	s.versionType = versionType
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteTemplateService) buildURL() (string, error) {
+	// Build URL
+	urls, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+		"id": s.id,
+	})
+	if err != nil {
+		return "", err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *s.version))
+	}
+	if s.versionType != "" {
+		params.Set("version_type", s.versionType)
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	return urls, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteTemplateService) Validate() error {
+	var invalid []string
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	urls, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request
+	req, err := s.client.NewRequest("DELETE", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get HTTP response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+	// Return operation response
+	resp := new(DeleteTemplateResponse)
+	if err := json.NewDecoder(res.Body).Decode(resp); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DeleteTemplateResponse is the response of DeleteTemplateService.Do.
+type DeleteTemplateResponse struct {
+	Found   bool   `json:"found"`
+	Index   string `json:"_index"`
+	Type    string `json:"_type"`
+	Id      string `json:"_id"`
+	Version int    `json:"_version"`
+}

+ 83 - 0
sensitive/src/elastic.v1/delete_test.go

@@ -0,0 +1,83 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestDelete(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Count documents
+	count, err := client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Errorf("expected Count = %d; got %d", 3, count)
+	}
+
+	// Delete document 1
+	res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != true {
+		t.Errorf("expected Found = true; got %v", res.Found)
+	}
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	count, err = client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Errorf("expected Count = %d; got %d", 2, count)
+	}
+
+	// Delete non existent document 99
+	res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh(true).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != false {
+		t.Errorf("expected Found = false; got %v", res.Found)
+	}
+	count, err = client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Errorf("expected Count = %d; got %d", 2, count)
+	}
+}

+ 55 - 0
sensitive/src/elastic.v1/doc.go

@@ -0,0 +1,55 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+/*
+Package elastic provides an interface to the Elasticsearch server
+(http://www.elasticsearch.org/).
+
+Notice: This is version 1 of Elastic. There are newer versions of Elastic
+available on GitHub at https://github.com/olivere/elastic. Version 1 is
+maintained, but new development happens in newer versions.
+
+The first thing you do is to create a Client. The client takes a http.Client
+and (optionally) a list of URLs to the Elasticsearch servers as arguments.
+If the list of URLs is empty, http://localhost:9200 is used by default.
+You typically create one client for your app.
+
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		// Handle error
+	}
+
+Notice that you can pass your own http.Client implementation here. You can
+also pass more than one URL to a client. Elastic pings the URLs periodically
+and takes the first to succeed. By doing this periodically, Elastic provides
+automatic failover, e.g. when an Elasticsearch server goes down during
+updates.
+
+If no Elasticsearch server is available, services will fail when creating
+a new request and will return ErrNoClient. While this method is not very
+sophisticated and might result in timeouts, it is robust enough for our
+use cases. Pull requests are welcome.
+
+	client, err := elastic.NewClient(http.DefaultClient, "http://1.2.3.4:9200", "http://1.2.3.5:9200")
+	if err != nil {
+		// Handle error
+	}
+
+A Client provides services. The services usually come with a variety of
+methods to prepare the query and a Do function to execute it against the
+Elasticsearch REST interface and return a response. Here is an example
+of the IndexExists service that checks if a given index already exists.
+
+	exists, err := client.IndexExists("twitter").Do()
+	if err != nil {
+		// Handle error
+	}
+	if !exists {
+		// Index does not exist yet.
+	}
+
+Look up the documentation for Client to get an idea of the services provided
+and what kinds of responses you get when executing the Do function of a service.
+*/
+package elastic

+ 39 - 0
sensitive/src/elastic.v1/errors.go

@@ -0,0 +1,39 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+)
+
+func checkResponse(res *http.Response) error {
+	// 200-299 and 404 are valid status codes
+	if (res.StatusCode >= 200 && res.StatusCode <= 299) ||
+		res.StatusCode == http.StatusNotFound {
+		return nil
+	}
+	slurp, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return fmt.Errorf("elastic: got HTTP response code %d and error reading body: %v", res.StatusCode, err)
+	}
+	errReply := new(Error)
+	err = json.Unmarshal(slurp, errReply)
+	if err == nil && errReply != nil {
+		return errReply.Error()
+	}
+	return nil
+}
+
+type Error struct {
+	Status  int    `json:"status"`
+	Message string `json:"error"`
+}
+
+func (e *Error) Error() error {
+	return fmt.Errorf("elastic: Error %d: %s", e.Status, e.Message)
+}

+ 461 - 0
sensitive/src/elastic.v1/example_test.go

@@ -0,0 +1,461 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic_test
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/http"
+	"time"
+
+	"github.com/olivere/elastic"
+)
+
+type Tweet struct {
+	User     string                `json:"user"`
+	Message  string                `json:"message"`
+	Retweets int                   `json:"retweets"`
+	Image    string                `json:"image,omitempty"`
+	Created  time.Time             `json:"created,omitempty"`
+	Tags     []string              `json:"tags,omitempty"`
+	Location string                `json:"location,omitempty"`
+	Suggest  *elastic.SuggestField `json:"suggest_field,omitempty"`
+}
+
+func Example() {
+	// Obtain a client. You can provide your own HTTP client here.
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// Ping the Elasticsearch server to get e.g. the version number
+	info, code, err := client.Ping().Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number)
+
+	// Getting the ES version number is quite common, so there's a shortcut
+	esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200")
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	fmt.Printf("Elasticsearch version %s", esversion)
+
+	// Use the IndexExists service to check if a specified index exists.
+	exists, err := client.IndexExists("twitter").Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if !exists {
+		// Create a new index.
+		createIndex, err := client.CreateIndex("twitter").Do()
+		if err != nil {
+			// Handle error
+			panic(err)
+		}
+		if !createIndex.Acknowledged {
+			// Not acknowledged
+		}
+	}
+
+	// Index a tweet (using JSON serialization)
+	tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
+	put1, err := client.Index().
+		Index("twitter").
+		Type("tweet").
+		Id("1").
+		BodyJson(tweet1).
+		Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
+
+	// Index a second tweet (by string)
+	tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
+	put2, err := client.Index().
+		Index("twitter").
+		Type("tweet").
+		Id("2").
+		BodyString(tweet2).
+		Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type)
+
+	// Get tweet with specified ID
+	get1, err := client.Get().
+		Index("twitter").
+		Type("tweet").
+		Id("1").
+		Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if get1.Found {
+		fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+	}
+
+	// Flush to make sure the documents got written.
+	_, err = client.Flush().Index("twitter").Do()
+	if err != nil {
+		panic(err)
+	}
+
+	// Search with a term query
+	termQuery := elastic.NewTermQuery("user", "olivere")
+	searchResult, err := client.Search().
+		Index("twitter").   // search in index "twitter"
+		Query(&termQuery).  // specify the query
+		Sort("user", true). // sort by "user" field, ascending
+		From(0).Size(10).   // take documents 0-9
+		Debug(true).        // print request and response to stdout
+		Pretty(true).       // pretty print request and response JSON
+		Do()                // execute
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// searchResult is of type SearchResult and returns hits, suggestions,
+	// and all kinds of other information from Elasticsearch.
+	fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+	// Number of hits
+	if searchResult.Hits != nil {
+		fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+		// Iterate through results
+		for _, hit := range searchResult.Hits.Hits {
+			// hit.Index contains the name of the index
+
+			// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+			var t Tweet
+			err := json.Unmarshal(*hit.Source, &t)
+			if err != nil {
+				// Deserialization failed
+			}
+
+			// Work with tweet
+			fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+		}
+	} else {
+		// No hits
+		fmt.Print("Found no tweets\n")
+	}
+
+	// Update a tweet by the update API of Elasticsearch.
+	// We just increment the number of retweets.
+	update, err := client.Update().Index("twitter").Type("tweet").Id("1").
+		Script("ctx._source.retweets += num").
+		ScriptParams(map[string]interface{}{"num": 1}).
+		Upsert(map[string]interface{}{"retweets": 0}).
+		Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version)
+
+	// ...
+
+	// Delete an index.
+	deleteIndex, err := client.DeleteIndex("twitter").Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if !deleteIndex.Acknowledged {
+		// Not acknowledged
+	}
+}
+
+func ExampleClient_NewClient_default() {
+	// Obtain a client to the Elasticsearch instance on http://localhost:9200.
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		// Handle error
+		fmt.Printf("connection failed: %v\n", err)
+	} else {
+		fmt.Println("connected")
+	}
+	_ = client
+	// Output:
+	// connected
+}
+
+func ExampleClient_NewClient_cluster() {
+	// Obtain a client for an Elasticsearch cluster of two nodes,
+	// running on 10.0.1.1 and 10.0.1.2.
+	client, err := elastic.NewClient(http.DefaultClient, "http://10.0.1.1:9200", "http://10.0.1.2:9200")
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	_ = client
+}
+
+func ExampleIndexExistsService() {
+	// Get a client to the local Elasticsearch instance.
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	// Use the IndexExists service to check if the index "twitter" exists.
+	exists, err := client.IndexExists("twitter").Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if exists {
+		// ...
+	}
+}
+
+func ExampleCreateIndexService() {
+	// Get a client to the local Elasticsearch instance.
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	// Create a new index.
+	createIndex, err := client.CreateIndex("twitter").Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if !createIndex.Acknowledged {
+		// Not acknowledged
+	}
+}
+
+func ExampleDeleteIndexService() {
+	// Get a client to the local Elasticsearch instance.
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	// Delete an index.
+	deleteIndex, err := client.DeleteIndex("twitter").Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if !deleteIndex.Acknowledged {
+		// Not acknowledged
+	}
+}
+
+func ExampleSearchService() {
+	// Get a client to the local Elasticsearch instance.
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// Search with a term query
+	termQuery := elastic.NewTermQuery("user", "olivere")
+	searchResult, err := client.Search().
+		Index("twitter").   // search in index "twitter"
+		Query(&termQuery).  // specify the query
+		Sort("user", true). // sort by "user" field, ascending
+		From(0).Size(10).   // take documents 0-9
+		Debug(true).        // print request and response to stdout
+		Pretty(true).       // pretty print request and response JSON
+		Do()                // execute
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// searchResult is of type SearchResult and returns hits, suggestions,
+	// and all kinds of other information from Elasticsearch.
+	fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+	// Number of hits
+	if searchResult.Hits != nil {
+		fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+		// Iterate through results
+		for _, hit := range searchResult.Hits.Hits {
+			// hit.Index contains the name of the index
+
+			// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+			var t Tweet
+			err := json.Unmarshal(*hit.Source, &t)
+			if err != nil {
+				// Deserialization failed
+			}
+
+			// Work with tweet
+			fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+		}
+	} else {
+		// No hits
+		fmt.Print("Found no tweets\n")
+	}
+}
+
+func ExampleAggregations() {
+	// Get a client to the local Elasticsearch instance.
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year).
+	timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
+	histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year")
+	timeline = timeline.SubAggregation("history", histogram)
+
+	// Search with a term query
+	searchResult, err := client.Search().
+		Index("twitter").                  // search in index "twitter"
+		Query(elastic.NewMatchAllQuery()). // return all results, but ...
+		SearchType("count").               // ... do not return hits, just the count
+		Aggregation("timeline", timeline). // add our aggregation to the query
+		Debug(true).                       // print request and response to stdout
+		Pretty(true).                      // pretty print request and response JSON
+		Do()                               // execute
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// Access "timeline" aggregate in search result.
+	agg, found := searchResult.Aggregations.Terms("timeline")
+	if !found {
+		log.Fatalf("we sould have a terms aggregation called %q", "timeline")
+	}
+	for _, userBucket := range agg.Buckets {
+		// Every bucket should have the user field as key.
+		user := userBucket.Key
+
+		// The sub-aggregation history should have the number of tweets per year.
+		histogram, found := userBucket.DateHistogram("history")
+		if found {
+			for _, year := range histogram.Buckets {
+				fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString)
+			}
+		}
+	}
+}
+
+func ExamplePutTemplateService() {
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		panic(err)
+	}
+
+	// Create search template
+	tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}`
+
+	// Create template
+	resp, err := client.PutTemplate().
+		Id("my-search-template"). // Name of the template
+		BodyString(tmpl).         // Search template itself
+		Do()                      // Execute
+	if err != nil {
+		panic(err)
+	}
+	if resp.Created {
+		fmt.Println("search template created")
+	}
+}
+
+func ExampleGetTemplateService() {
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		panic(err)
+	}
+
+	// Get template stored under "my-search-template"
+	resp, err := client.GetTemplate().Id("my-search-template").Do()
+	if err != nil {
+		panic(err)
+	}
+	fmt.Printf("search template is: %q\n", resp.Template)
+}
+
+func ExampleDeleteTemplateService() {
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		panic(err)
+	}
+
+	// Delete template
+	resp, err := client.DeleteTemplate().Id("my-search-template").Do()
+	if err != nil {
+		panic(err)
+	}
+	if resp != nil && resp.Found {
+		fmt.Println("template deleted")
+	}
+}
+
+func ExampleClusterHealthService() {
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		panic(err)
+	}
+
+	// Get cluster health
+	res, err := client.ClusterHealth().Index("twitter").Do()
+	if err != nil {
+		panic(err)
+	}
+	if res == nil {
+		panic(err)
+	}
+	fmt.Printf("Cluster status is %q\n", res.Status)
+}
+
+func ExampleClusterHealthService_WaitForGreen() {
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		panic(err)
+	}
+
+	// Wait for status green
+	res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do()
+	if err != nil {
+		panic(err)
+	}
+	if res.TimedOut {
+		fmt.Printf("time out waiting for cluster status %q\n", "green")
+	} else {
+		fmt.Printf("cluster status is %q\n", res.Status)
+	}
+}
+
+func ExampleClusterStateService() {
+	client, err := elastic.NewClient(http.DefaultClient)
+	if err != nil {
+		panic(err)
+	}
+
+	// Get cluster state
+	res, err := client.ClusterState().Metric("version").Do()
+	if err != nil {
+		panic(err)
+	}
+	fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version)
+}

+ 78 - 0
sensitive/src/elastic.v1/exists.go

@@ -0,0 +1,78 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type ExistsService struct {
+	client *Client
+	index  string
+	_type  string
+	id     string
+}
+
+func NewExistsService(client *Client) *ExistsService {
+	builder := &ExistsService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *ExistsService) String() string {
+	return fmt.Sprintf("exists([%v][%v][%v])",
+		s.index,
+		s._type,
+		s.id)
+}
+
+func (s *ExistsService) Index(index string) *ExistsService {
+	s.index = index
+	return s
+}
+
+func (s *ExistsService) Type(_type string) *ExistsService {
+	s._type = _type
+	return s
+}
+
+func (s *ExistsService) Id(id string) *ExistsService {
+	s.id = id
+	return s
+}
+
+func (s *ExistsService) Do() (bool, error) {
+	// Build url
+	urls, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+		"index": s.index,
+		"type":  s._type,
+		"id":    s.id,
+	})
+	if err != nil {
+		return false, err
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("HEAD", urls)
+	if err != nil {
+		return false, err
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return false, err
+	}
+	if res.StatusCode == 200 {
+		return true, nil
+	} else if res.StatusCode == 404 {
+		return false, nil
+	}
+	return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}

+ 70 - 0
sensitive/src/elastic.v1/fetch_source_context.go

@@ -0,0 +1,70 @@
+package elastic
+
+import (
+	"net/url"
+	"strings"
+)
+
+type FetchSourceContext struct {
+	fetchSource     bool
+	transformSource bool
+	includes        []string
+	excludes        []string
+}
+
+func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
+	return &FetchSourceContext{
+		fetchSource: fetchSource,
+		includes:    make([]string, 0),
+		excludes:    make([]string, 0),
+	}
+}
+
+func (fsc *FetchSourceContext) FetchSource() bool {
+	return fsc.fetchSource
+}
+
+func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
+	fsc.fetchSource = fetchSource
+}
+
+func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
+	fsc.includes = append(fsc.includes, includes...)
+	return fsc
+}
+
+func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
+	fsc.excludes = append(fsc.excludes, excludes...)
+	return fsc
+}
+
+func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext {
+	fsc.transformSource = transformSource
+	return fsc
+}
+
+func (fsc *FetchSourceContext) Source() interface{} {
+	if !fsc.fetchSource {
+		return false
+	}
+	return map[string]interface{}{
+		"includes": fsc.includes,
+		"excludes": fsc.excludes,
+	}
+}
+
+// Query returns the parameters in a form suitable for a URL query string.
+func (fsc *FetchSourceContext) Query() url.Values {
+	params := url.Values{}
+	if !fsc.fetchSource {
+		params.Add("_source", "false")
+		return params
+	}
+	if len(fsc.includes) > 0 {
+		params.Add("_source_include", strings.Join(fsc.includes, ","))
+	}
+	if len(fsc.excludes) > 0 {
+		params.Add("_source_exclude", strings.Join(fsc.excludes, ","))
+	}
+	return params
+}

+ 88 - 0
sensitive/src/elastic.v1/fetch_source_context_test.go

@@ -0,0 +1,88 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestFetchSourceContextNoFetchSource(t *testing.T) {
+	builder := NewFetchSourceContext(false)
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `false`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) {
+	builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c")
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `false`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestFetchSourceContextFetchSource(t *testing.T) {
+	builder := NewFetchSourceContext(true)
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"excludes":[],"includes":[]}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) {
+	builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"excludes":["c"],"includes":["a","b"]}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestFetchSourceContextQueryDefaults(t *testing.T) {
+	builder := NewFetchSourceContext(true)
+	values := builder.Query()
+	got := values.Encode()
+	expected := ""
+	if got != expected {
+		t.Errorf("expected %q; got: %q", expected, got)
+	}
+}
+
+func TestFetchSourceContextQueryNoFetchSource(t *testing.T) {
+	builder := NewFetchSourceContext(false)
+	values := builder.Query()
+	got := values.Encode()
+	expected := "_source=false"
+	if got != expected {
+		t.Errorf("expected %q; got: %q", expected, got)
+	}
+}
+
+func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) {
+	builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
+	values := builder.Query()
+	got := values.Encode()
+	expected := "_source_exclude=c&_source_include=a%2Cb"
+	if got != expected {
+		t.Errorf("expected %q; got: %q", expected, got)
+	}
+}

+ 9 - 0
sensitive/src/elastic.v1/filter.go

@@ -0,0 +1,9 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Filter interface {
+	Source() interface{}
+}

+ 122 - 0
sensitive/src/elastic.v1/flush.go

@@ -0,0 +1,122 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type FlushService struct {
+	client *Client
+
+	indices []string
+	refresh *bool
+	full    *bool
+}
+
+func NewFlushService(client *Client) *FlushService {
+	builder := &FlushService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *FlushService) Index(index string) *FlushService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *FlushService) Indices(indices ...string) *FlushService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *FlushService) Refresh(refresh bool) *FlushService {
+	s.refresh = &refresh
+	return s
+}
+
+func (s *FlushService) Full(full bool) *FlushService {
+	s.full = &full
+	return s
+}
+
+func (s *FlushService) Do() (*FlushResult, error) {
+	// Build url
+	urls := "/"
+
+	// Indices part
+	if len(s.indices) > 0 {
+		indexPart := make([]string, 0)
+		for _, index := range s.indices {
+			index, err := uritemplates.Expand("{index}", map[string]string{
+				"index": index,
+			})
+			if err != nil {
+				return nil, err
+			}
+			indexPart = append(indexPart, index)
+		}
+		urls += strings.Join(indexPart, ",") + "/"
+	}
+	urls += "_flush"
+
+	// Parameters
+	params := make(url.Values)
+	if s.refresh != nil {
+		params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+	}
+	if s.full != nil {
+		params.Set("full", fmt.Sprintf("%v", *s.full))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	ret := new(FlushResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a flush request.
+
+type shardsInfo struct {
+	Total      int `json:"total"`
+	Successful int `json:"successful"`
+	Failed     int `json:"failed"`
+}
+
+type FlushResult struct {
+	Shards shardsInfo `json:"_shards"`
+}

+ 22 - 0
sensitive/src/elastic.v1/flush_test.go

@@ -0,0 +1,22 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestFlush(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	// Flush all indices
+	res, err := client.Flush().Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Errorf("expected res to be != nil; got: %v", res)
+	}
+}

+ 43 - 0
sensitive/src/elastic.v1/geo_point.go

@@ -0,0 +1,43 @@
+package elastic
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// GeoPoint is a geographic position described via latitude and longitude.
+type GeoPoint struct {
+	Lat, Lon float64
+}
+
+// Source returns the object to be serialized in Elasticsearch DSL.
+func (pt *GeoPoint) Source() map[string]float64 {
+	return map[string]float64{
+		"lat": pt.Lat,
+		"lon": pt.Lon,
+	}
+}
+
+// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
+func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
+	return &GeoPoint{Lat: lat, Lon: lon}
+}
+
+// GeoPointFromString initializes a new GeoPoint by a string that is
+// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
+func GeoPointFromString(latLon string) (*GeoPoint, error) {
+	latlon := strings.SplitN(latLon, ",", 2)
+	if len(latlon) != 2 {
+		return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
+	}
+	lat, err := strconv.ParseFloat(latlon[0], 64)
+	if err != nil {
+		return nil, err
+	}
+	lon, err := strconv.ParseFloat(latlon[1], 64)
+	if err != nil {
+		return nil, err
+	}
+	return &GeoPoint{Lat: lat, Lon: lon}, nil
+}

+ 20 - 0
sensitive/src/elastic.v1/geo_point_test.go

@@ -0,0 +1,20 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestGeoPointSource(t *testing.T) {
+	pt := GeoPoint{Lat: 40, Lon: -70}
+
+	data, err := json.Marshal(pt.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"lat":40,"lon":-70}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 204 - 0
sensitive/src/elastic.v1/get.go

@@ -0,0 +1,204 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type GetService struct {
+	client                     *Client
+	index                      string
+	_type                      string
+	id                         string
+	routing                    string
+	preference                 string
+	fields                     []string
+	fsc                        *FetchSourceContext
+	refresh                    *bool
+	realtime                   *bool
+	version                    *int64 // see org.elasticsearch.common.lucene.uid.Versions
+	versionType                string // see org.elasticsearch.index.VersionType
+	ignoreErrOnGeneratedFields *bool
+}
+
+func NewGetService(client *Client) *GetService {
+	builder := &GetService{
+		client: client,
+		_type:  "_all",
+	}
+	return builder
+}
+
+func (b *GetService) String() string {
+	return fmt.Sprintf("[%v][%v][%v]: routing [%v]",
+		b.index,
+		b._type,
+		b.id,
+		b.routing)
+}
+
+func (b *GetService) Index(index string) *GetService {
+	b.index = index
+	return b
+}
+
+func (b *GetService) Type(_type string) *GetService {
+	b._type = _type
+	return b
+}
+
+func (b *GetService) Id(id string) *GetService {
+	b.id = id
+	return b
+}
+
+func (b *GetService) Parent(parent string) *GetService {
+	if b.routing == "" {
+		b.routing = parent
+	}
+	return b
+}
+
+func (b *GetService) Routing(routing string) *GetService {
+	b.routing = routing
+	return b
+}
+
+func (b *GetService) Preference(preference string) *GetService {
+	b.preference = preference
+	return b
+}
+
+func (b *GetService) Fields(fields ...string) *GetService {
+	if b.fields == nil {
+		b.fields = make([]string, 0)
+	}
+	b.fields = append(b.fields, fields...)
+	return b
+}
+
+func (s *GetService) FetchSource(fetchSource bool) *GetService {
+	if s.fsc == nil {
+		s.fsc = NewFetchSourceContext(fetchSource)
+	} else {
+		s.fsc.SetFetchSource(fetchSource)
+	}
+	return s
+}
+
+func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
+	s.fsc = fetchSourceContext
+	return s
+}
+
+func (b *GetService) Refresh(refresh bool) *GetService {
+	b.refresh = &refresh
+	return b
+}
+
+func (b *GetService) Realtime(realtime bool) *GetService {
+	b.realtime = &realtime
+	return b
+}
+
+// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1),
+// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions.
+// The default is MatchAny (-3).
+func (b *GetService) Version(version int64) *GetService {
+	b.version = &version
+	return b
+}
+
+// VersionType can be "internal", "external", "external_gt", "external_gte",
+// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source.
+// It is "internal" by default.
+func (b *GetService) VersionType(versionType string) *GetService {
+	b.versionType = versionType
+	return b
+}
+
+func (b *GetService) Do() (*GetResult, error) {
+	// Build url
+	urls, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+		"index": b.index,
+		"type":  b._type,
+		"id":    b.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	params := make(url.Values)
+	if b.realtime != nil {
+		params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+	}
+	if len(b.fields) > 0 {
+		params.Add("fields", strings.Join(b.fields, ","))
+	}
+	if b.routing != "" {
+		params.Add("routing", b.routing)
+	}
+	if b.preference != "" {
+		params.Add("preference", b.preference)
+	}
+	if b.refresh != nil {
+		params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
+	}
+	if b.version != nil {
+		params.Add("_version", fmt.Sprintf("%d", *b.version))
+	}
+	if b.versionType != "" {
+		params.Add("_version_type", b.versionType)
+	}
+	if b.fsc != nil {
+		for k, values := range b.fsc.Query() {
+			params.Add(k, strings.Join(values, ","))
+		}
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := b.client.NewRequest("GET", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Get response
+	res, err := b.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	ret := new(GetResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a get request.
+
+type GetResult struct {
+	Index   string           `json:"_index"`
+	Type    string           `json:"_type"`
+	Id      string           `json:"_id"`
+	Version int64            `json:"_version,omitempty"`
+	Source  *json.RawMessage `json:"_source,omitempty"`
+	Found   bool             `json:"found,omitempty"`
+	Fields  []string         `json:"fields,omitempty"`
+	Error   string           `json:"error,omitempty"` // used only in MultiGet
+}

+ 138 - 0
sensitive/src/elastic.v1/get_template.go

@@ -0,0 +1,138 @@
+// Copyright 2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// GetTemplateService reads a search template.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type GetTemplateService struct {
+	client      *Client
+	debug       bool
+	pretty      bool
+	id          string
+	version     interface{}
+	versionType string
+}
+
+// NewGetTemplateService creates a new GetTemplateService.
+func NewGetTemplateService(client *Client) *GetTemplateService {
+	return &GetTemplateService{
+		client: client,
+	}
+}
+
+// Id is documented as: Template ID.
+func (s *GetTemplateService) Id(id string) *GetTemplateService {
+	s.id = id
+	return s
+}
+
+// Version is documented as: Explicit version number for concurrency control.
+func (s *GetTemplateService) Version(version interface{}) *GetTemplateService {
+	s.version = version
+	return s
+}
+
+// VersionType is documented as: Specific version type.
+func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService {
+	s.versionType = versionType
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *GetTemplateService) buildURL() (string, error) {
+	// Build URL
+	urls, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+		"id": s.id,
+	})
+	if err != nil {
+		return "", err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%v", s.version))
+	}
+	if s.versionType != "" {
+		params.Set("version_type", s.versionType)
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	return urls, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *GetTemplateService) Validate() error {
+	var invalid []string
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation and returns the template.
+func (s *GetTemplateService) Do() (*GetTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	urls, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request
+	req, err := s.client.NewRequest("GET", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get HTTP response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	// Decode response
+	resp := new(GetTemplateResponse)
+	if err := json.NewDecoder(res.Body).Decode(resp); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+type GetTemplateResponse struct {
+	Template string `json:"template"`
+}

+ 137 - 0
sensitive/src/elastic.v1/get_test.go

@@ -0,0 +1,137 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestGet(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Count documents
+	count, err := client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Errorf("expected Count = %d; got %d", 3, count)
+	}
+
+	// Get document 1
+	res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != true {
+		t.Errorf("expected Found = true; got %v", res.Found)
+	}
+	if res.Source == nil {
+		t.Errorf("expected Source != nil; got %v", res.Source)
+	}
+
+	// Get non existent document 99
+	res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != false {
+		t.Errorf("expected Found = false; got %v", res.Found)
+	}
+	if res.Source != nil {
+		t.Errorf("expected Source == nil; got %v", res.Source)
+	}
+}
+
+func TestGetWithSourceFiltering(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Get document 1, without source
+	res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != true {
+		t.Errorf("expected Found = true; got %v", res.Found)
+	}
+	if res.Source != nil {
+		t.Errorf("expected Source == nil; got %v", res.Source)
+	}
+
+	// Get document 1, exclude Message field
+	fsc := NewFetchSourceContext(true).Exclude("message")
+	res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != true {
+		t.Errorf("expected Found = true; got %v", res.Found)
+	}
+	if res.Source == nil {
+		t.Errorf("expected Source != nil; got %v", res.Source)
+	}
+	var tw tweet
+	err = json.Unmarshal(*res.Source, &tw)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if tw.User != "olivere" {
+		t.Errorf("expected user %q; got: %q", "olivere", tw.User)
+	}
+	if tw.Message != "" {
+		t.Errorf("expected message %q; got: %q", "", tw.Message)
+	}
+}

+ 492 - 0
sensitive/src/elastic.v1/highlight.go

@@ -0,0 +1,492 @@
+package elastic
+
+// Highlight allows highlighting search results on one or more fields.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+type Highlight struct {
+	fields                []*HighlighterField
+	tagsSchema            *string
+	highlightFilter       *bool
+	fragmentSize          *int
+	numOfFragments        *int
+	preTags               []string
+	postTags              []string
+	order                 *string
+	encoder               *string
+	requireFieldMatch     *bool
+	boundaryMaxScan       *int
+	boundaryChars         []rune
+	highlighterType       *string
+	fragmenter            *string
+	highlightQuery        Query
+	noMatchSize           *int
+	phraseLimit           *int
+	options               map[string]interface{}
+	forceSource           *bool
+	useExplicitFieldOrder bool
+}
+
+func NewHighlight() *Highlight {
+	hl := &Highlight{
+		fields:        make([]*HighlighterField, 0),
+		preTags:       make([]string, 0),
+		postTags:      make([]string, 0),
+		boundaryChars: make([]rune, 0),
+		options:       make(map[string]interface{}),
+	}
+	return hl
+}
+
+func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
+	hl.fields = append(hl.fields, fields...)
+	return hl
+}
+
+func (hl *Highlight) Field(name string) *Highlight {
+	field := NewHighlighterField(name)
+	hl.fields = append(hl.fields, field)
+	return hl
+}
+
+func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
+	hl.tagsSchema = &schemaName
+	return hl
+}
+
+func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
+	hl.highlightFilter = &highlightFilter
+	return hl
+}
+
+func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
+	hl.fragmentSize = &fragmentSize
+	return hl
+}
+
+func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
+	hl.numOfFragments = &numOfFragments
+	return hl
+}
+
+func (hl *Highlight) Encoder(encoder string) *Highlight {
+	hl.encoder = &encoder
+	return hl
+}
+
+func (hl *Highlight) PreTags(preTags ...string) *Highlight {
+	hl.preTags = make([]string, 0)
+	hl.preTags = append(hl.preTags, preTags...)
+	return hl
+}
+
+func (hl *Highlight) PostTags(postTags ...string) *Highlight {
+	hl.postTags = make([]string, 0)
+	hl.postTags = append(hl.postTags, postTags...)
+	return hl
+}
+
+func (hl *Highlight) Order(order string) *Highlight {
+	hl.order = &order
+	return hl
+}
+
+func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
+	hl.requireFieldMatch = &requireFieldMatch
+	return hl
+}
+
+func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
+	hl.boundaryMaxScan = &boundaryMaxScan
+	return hl
+}
+
+func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight {
+	hl.boundaryChars = make([]rune, 0)
+	hl.boundaryChars = append(hl.boundaryChars, boundaryChars...)
+	return hl
+}
+
+func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
+	hl.highlighterType = &highlighterType
+	return hl
+}
+
+func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
+	hl.fragmenter = &fragmenter
+	return hl
+}
+
+func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight {
+	hl.highlightQuery = highlightQuery
+	return hl
+}
+
+func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
+	hl.noMatchSize = &noMatchSize
+	return hl
+}
+
+func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
+	hl.options = options
+	return hl
+}
+
+func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
+	hl.forceSource = &forceSource
+	return hl
+}
+
+func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
+	hl.useExplicitFieldOrder = useExplicitFieldOrder
+	return hl
+}
+
+// Creates the query source for the bool query.
+func (hl *Highlight) Source() interface{} {
+	// Returns the map inside of "highlight":
+	// "highlight":{
+	//   ... this ...
+	// }
+	source := make(map[string]interface{})
+	if hl.tagsSchema != nil {
+		source["tags_schema"] = *hl.tagsSchema
+	}
+	if hl.preTags != nil && len(hl.preTags) > 0 {
+		source["pre_tags"] = hl.preTags
+	}
+	if hl.postTags != nil && len(hl.postTags) > 0 {
+		source["post_tags"] = hl.postTags
+	}
+	if hl.order != nil {
+		source["order"] = *hl.order
+	}
+	if hl.highlightFilter != nil {
+		source["highlight_filter"] = *hl.highlightFilter
+	}
+	if hl.fragmentSize != nil {
+		source["fragment_size"] = *hl.fragmentSize
+	}
+	if hl.numOfFragments != nil {
+		source["number_of_fragments"] = *hl.numOfFragments
+	}
+	if hl.encoder != nil {
+		source["encoder"] = *hl.encoder
+	}
+	if hl.requireFieldMatch != nil {
+		source["require_field_match"] = *hl.requireFieldMatch
+	}
+	if hl.boundaryMaxScan != nil {
+		source["boundary_max_scan"] = *hl.boundaryMaxScan
+	}
+	if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 {
+		source["boundary_chars"] = hl.boundaryChars
+	}
+	if hl.highlighterType != nil {
+		source["type"] = *hl.highlighterType
+	}
+	if hl.fragmenter != nil {
+		source["fragmenter"] = *hl.fragmenter
+	}
+	if hl.highlightQuery != nil {
+		source["highlight_query"] = hl.highlightQuery.Source()
+	}
+	if hl.noMatchSize != nil {
+		source["no_match_size"] = *hl.noMatchSize
+	}
+	if hl.phraseLimit != nil {
+		source["phrase_limit"] = *hl.phraseLimit
+	}
+	if hl.options != nil && len(hl.options) > 0 {
+		source["options"] = hl.options
+	}
+	if hl.forceSource != nil {
+		source["force_source"] = *hl.forceSource
+	}
+
+	if hl.fields != nil && len(hl.fields) > 0 {
+		if hl.useExplicitFieldOrder {
+			// Use a slice for the fields
+			fields := make([]map[string]interface{}, 0)
+			for _, field := range hl.fields {
+				fmap := make(map[string]interface{})
+				fmap[field.Name] = field.Source()
+				fields = append(fields, fmap)
+			}
+			source["fields"] = fields
+		} else {
+			// Use a map for the fields
+			fields := make(map[string]interface{}, 0)
+			for _, field := range hl.fields {
+				fields[field.Name] = field.Source()
+			}
+			source["fields"] = fields
+		}
+	}
+
+	return source
+
+	/*
+		highlightS := make(map[string]interface{})
+
+		if hl.tagsSchema != "" {
+			highlightS["tags_schema"] = hl.tagsSchema
+		}
+		if len(hl.preTags) > 0 {
+			highlightS["pre_tags"] = hl.preTags
+		}
+		if len(hl.postTags) > 0 {
+			highlightS["post_tags"] = hl.postTags
+		}
+		if hl.order != "" {
+			highlightS["order"] = hl.order
+		}
+		if hl.encoder != "" {
+			highlightS["encoder"] = hl.encoder
+		}
+		if hl.requireFieldMatch != nil {
+			highlightS["require_field_match"] = *hl.requireFieldMatch
+		}
+		if hl.highlighterType != "" {
+			highlightS["type"] = hl.highlighterType
+		}
+		if hl.fragmenter != "" {
+			highlightS["fragmenter"] = hl.fragmenter
+		}
+		if hl.highlightQuery != nil {
+			highlightS["highlight_query"] = hl.highlightQuery.Source()
+		}
+		if hl.noMatchSize != nil {
+			highlightS["no_match_size"] = *hl.noMatchSize
+		}
+		if len(hl.options) > 0 {
+			highlightS["options"] = hl.options
+		}
+		if hl.forceSource != nil {
+			highlightS["force_source"] = *hl.forceSource
+		}
+		if len(hl.fields) > 0 {
+			fieldsS := make(map[string]interface{})
+			for _, field := range hl.fields {
+				fieldsS[field.Name] = field.Source()
+			}
+			highlightS["fields"] = fieldsS
+		}
+
+		return highlightS
+	*/
+}
+
+// HighlighterField specifies a highlighted field.
+type HighlighterField struct {
+	Name string
+
+	preTags           []string
+	postTags          []string
+	fragmentSize      int
+	fragmentOffset    int
+	numOfFragments    int
+	highlightFilter   *bool
+	order             *string
+	requireFieldMatch *bool
+	boundaryMaxScan   int
+	boundaryChars     []rune
+	highlighterType   *string
+	fragmenter        *string
+	highlightQuery    Query
+	noMatchSize       *int
+	matchedFields     []string
+	phraseLimit       *int
+	options           map[string]interface{}
+	forceSource       *bool
+
+	/*
+		Name              string
+		preTags           []string
+		postTags          []string
+		fragmentSize      int
+		numOfFragments    int
+		fragmentOffset    int
+		highlightFilter   *bool
+		order             string
+		requireFieldMatch *bool
+		boundaryMaxScan   int
+		boundaryChars     []rune
+		highlighterType   string
+		fragmenter        string
+		highlightQuery    Query
+		noMatchSize       *int
+		matchedFields     []string
+		options           map[string]interface{}
+		forceSource       *bool
+	*/
+}
+
+func NewHighlighterField(name string) *HighlighterField {
+	return &HighlighterField{
+		Name:            name,
+		preTags:         make([]string, 0),
+		postTags:        make([]string, 0),
+		fragmentSize:    -1,
+		fragmentOffset:  -1,
+		numOfFragments:  -1,
+		boundaryMaxScan: -1,
+		boundaryChars:   make([]rune, 0),
+		matchedFields:   make([]string, 0),
+		options:         make(map[string]interface{}),
+	}
+}
+
+func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
+	f.preTags = make([]string, 0)
+	f.preTags = append(f.preTags, preTags...)
+	return f
+}
+
+func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
+	f.postTags = make([]string, 0)
+	f.postTags = append(f.postTags, postTags...)
+	return f
+}
+
+func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
+	f.fragmentSize = fragmentSize
+	return f
+}
+
+func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
+	f.fragmentOffset = fragmentOffset
+	return f
+}
+
+func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
+	f.numOfFragments = numOfFragments
+	return f
+}
+
+func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
+	f.highlightFilter = &highlightFilter
+	return f
+}
+
+func (f *HighlighterField) Order(order string) *HighlighterField {
+	f.order = &order
+	return f
+}
+
+func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
+	f.requireFieldMatch = &requireFieldMatch
+	return f
+}
+
+func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
+	f.boundaryMaxScan = boundaryMaxScan
+	return f
+}
+
+func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
+	f.boundaryChars = make([]rune, 0)
+	f.boundaryChars = append(f.boundaryChars, boundaryChars...)
+	return f
+}
+
+func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
+	f.highlighterType = &highlighterType
+	return f
+}
+
+func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
+	f.fragmenter = &fragmenter
+	return f
+}
+
+func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
+	f.highlightQuery = highlightQuery
+	return f
+}
+
+func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
+	f.noMatchSize = &noMatchSize
+	return f
+}
+
+func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
+	f.options = options
+	return f
+}
+
+func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
+	f.matchedFields = make([]string, 0)
+	f.matchedFields = append(f.matchedFields, matchedFields...)
+	return f
+}
+
+func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
+	f.phraseLimit = &phraseLimit
+	return f
+}
+
+func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
+	f.forceSource = &forceSource
+	return f
+}
+
+func (f *HighlighterField) Source() interface{} {
+	source := make(map[string]interface{})
+
+	if f.preTags != nil && len(f.preTags) > 0 {
+		source["pre_tags"] = f.preTags
+	}
+	if f.postTags != nil && len(f.postTags) > 0 {
+		source["post_tags"] = f.postTags
+	}
+	if f.fragmentSize != -1 {
+		source["fragment_size"] = f.fragmentSize
+	}
+	if f.numOfFragments != -1 {
+		source["number_of_fragments"] = f.numOfFragments
+	}
+	if f.fragmentOffset != -1 {
+		source["fragment_offset"] = f.fragmentOffset
+	}
+	if f.highlightFilter != nil {
+		source["highlight_filter"] = *f.highlightFilter
+	}
+	if f.order != nil {
+		source["order"] = *f.order
+	}
+	if f.requireFieldMatch != nil {
+		source["require_field_match"] = *f.requireFieldMatch
+	}
+	if f.boundaryMaxScan != -1 {
+		source["boundary_max_scan"] = f.boundaryMaxScan
+	}
+	if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
+		source["boundary_chars"] = f.boundaryChars
+	}
+	if f.highlighterType != nil {
+		source["type"] = *f.highlighterType
+	}
+	if f.fragmenter != nil {
+		source["fragmenter"] = *f.fragmenter
+	}
+	if f.highlightQuery != nil {
+		source["highlight_query"] = f.highlightQuery.Source()
+	}
+	if f.noMatchSize != nil {
+		source["no_match_size"] = *f.noMatchSize
+	}
+	if f.matchedFields != nil && len(f.matchedFields) > 0 {
+		source["matched_fields"] = f.matchedFields
+	}
+	if f.phraseLimit != nil {
+		source["phrase_limit"] = *f.phraseLimit
+	}
+	if f.options != nil && len(f.options) > 0 {
+		source["options"] = f.options
+	}
+	if f.forceSource != nil {
+		source["force_source"] = *f.forceSource
+	}
+
+	return source
+}

+ 169 - 0
sensitive/src/elastic.v1/highlight_test.go

@@ -0,0 +1,169 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	_ "net/http"
+	"testing"
+)
+
+func TestHighlighterField(t *testing.T) {
+	field := NewHighlighterField("grade")
+	data, err := json.Marshal(field.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlighterFieldWithOptions(t *testing.T) {
+	field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1)
+	data, err := json.Marshal(field.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"fragment_size":2,"number_of_fragments":1}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlightWithStringField(t *testing.T) {
+	builder := NewHighlight().Field("grade")
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"fields":{"grade":{}}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlightWithFields(t *testing.T) {
+	gradeField := NewHighlighterField("grade")
+	builder := NewHighlight().Fields(gradeField)
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"fields":{"grade":{}}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlightWithMultipleFields(t *testing.T) {
+	gradeField := NewHighlighterField("grade")
+	colorField := NewHighlighterField("color")
+	builder := NewHighlight().Fields(gradeField, colorField)
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"fields":{"color":{},"grade":{}}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlighterWithExplicitFieldOrder(t *testing.T) {
+	gradeField := NewHighlighterField("grade").FragmentSize(2)
+	colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1)
+	builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true)
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlightWithTermQuery(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Specify highlighter
+	hl := NewHighlight()
+	hl = hl.Fields(NewHighlighterField("message"))
+	hl = hl.PreTags("<em>").PostTags("</em>")
+
+	// Match all should return all documents
+	query := NewPrefixQuery("message", "golang")
+	searchResult, err := client.Search().
+		Index(testIndexName).
+		Highlight(hl).
+		Query(&query).
+		//Debug(true).Pretty(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if searchResult.Hits == nil {
+		t.Fatalf("expected SearchResult.Hits != nil; got nil")
+	}
+	if searchResult.Hits.TotalHits != 1 {
+		t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
+	}
+	if len(searchResult.Hits.Hits) != 1 {
+		t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
+	}
+
+	hit := searchResult.Hits.Hits[0]
+	var tw tweet
+	if err := json.Unmarshal(*hit.Source, &tw); err != nil {
+		t.Fatal(err)
+	}
+	if hit.Highlight == nil || len(hit.Highlight) == 0 {
+		t.Fatal("expected hit to have a highlight; got nil")
+	}
+	if hl, found := hit.Highlight["message"]; found {
+		if len(hl) != 1 {
+			t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl))
+		}
+		expected := "Welcome to <em>Golang</em> and Elasticsearch."
+		if hl[0] != expected {
+			t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0])
+		}
+	} else {
+		t.Fatal("expected to have a highlight on field \"message\"; got none")
+	}
+}

+ 244 - 0
sensitive/src/elastic.v1/index.go

@@ -0,0 +1,244 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndexResult is the result of indexing a document in Elasticsearch.
+type IndexResult struct {
+	Index   string `json:"_index"`
+	Type    string `json:"_type"`
+	Id      string `json:"_id"`
+	Version int    `json:"_version"`
+	Created bool   `json:"created"`
+}
+
+// IndexService adds documents to Elasticsearch.
+type IndexService struct {
+	client      *Client
+	index       string
+	_type       string
+	id          string
+	routing     string
+	parent      string
+	opType      string
+	refresh     *bool
+	version     *int64
+	versionType string
+	timestamp   string
+	ttl         string
+	timeout     string
+	bodyString  string
+	bodyJson    interface{}
+	pretty      bool
+	debug       bool
+}
+
+func NewIndexService(client *Client) *IndexService {
+	builder := &IndexService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *IndexService) Index(name string) *IndexService {
+	b.index = name
+	return b
+}
+
+func (b *IndexService) Type(_type string) *IndexService {
+	b._type = _type
+	return b
+}
+
+func (b *IndexService) Id(id string) *IndexService {
+	b.id = id
+	return b
+}
+
+func (b *IndexService) Routing(routing string) *IndexService {
+	b.routing = routing
+	return b
+}
+
+func (b *IndexService) Parent(parent string) *IndexService {
+	b.parent = parent
+	return b
+}
+
+// OpType is either "create" or "index" (the default).
+func (b *IndexService) OpType(opType string) *IndexService {
+	b.opType = opType
+	return b
+}
+
+func (b *IndexService) Refresh(refresh bool) *IndexService {
+	b.refresh = &refresh
+	return b
+}
+
+func (b *IndexService) Version(version int64) *IndexService {
+	b.version = &version
+	return b
+}
+
+// VersionType is either "internal" (default), "external",
+// "external_gt", "external_gte", or "force".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
+// for details.
+func (b *IndexService) VersionType(versionType string) *IndexService {
+	b.versionType = versionType
+	return b
+}
+
+func (b *IndexService) Timestamp(timestamp string) *IndexService {
+	b.timestamp = timestamp
+	return b
+}
+
+func (b *IndexService) TTL(ttl string) *IndexService {
+	b.ttl = ttl
+	return b
+}
+
+func (b *IndexService) Timeout(timeout string) *IndexService {
+	b.timeout = timeout
+	return b
+}
+
+func (b *IndexService) BodyString(body string) *IndexService {
+	b.bodyString = body
+	return b
+}
+
+func (b *IndexService) BodyJson(json interface{}) *IndexService {
+	b.bodyJson = json
+	return b
+}
+
+func (b *IndexService) Pretty(pretty bool) *IndexService {
+	b.pretty = pretty
+	return b
+}
+
+func (b *IndexService) Debug(debug bool) *IndexService {
+	b.debug = debug
+	return b
+}
+
+func (b *IndexService) Do() (*IndexResult, error) {
+	// Build url
+	var urls, method string
+	if b.id != "" {
+		// Create document with manual id
+		method = "PUT"
+		urls = "/{index}/{type}/{id}"
+	} else {
+		// Automatic ID generation
+		// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation
+		method = "POST"
+		urls = "/{index}/{type}/"
+	}
+	urls, err := uritemplates.Expand(urls, map[string]string{
+		"index": b.index,
+		"type":  b._type,
+		"id":    b.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Parameters
+	params := make(url.Values)
+	if b.pretty {
+		params.Set("pretty", "true")
+	}
+	if b.routing != "" {
+		params.Set("routing", b.routing)
+	}
+	if b.parent != "" {
+		params.Set("parent", b.parent)
+	}
+	if b.opType != "" {
+		params.Set("op_type", b.opType)
+	}
+	if b.refresh != nil && *b.refresh {
+		params.Set("refresh", "true")
+	}
+	if b.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *b.version))
+	}
+	if b.versionType != "" {
+		params.Set("version_type", b.versionType)
+	}
+	if b.timestamp != "" {
+		params.Set("timestamp", b.timestamp)
+	}
+	if b.ttl != "" {
+		params.Set("ttl", b.ttl)
+	}
+	if b.timeout != "" {
+		params.Set("timeout", b.timeout)
+	}
+
+	/*
+		routing string
+		parent string
+		opType string
+		refresh *bool
+		version *int64
+		versionType string
+		timestamp string
+		ttl string
+	*/
+
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := b.client.NewRequest(method, urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body
+	if b.bodyJson != nil {
+		req.SetBodyJson(b.bodyJson)
+	} else {
+		req.SetBodyString(b.bodyString)
+	}
+
+	if b.debug {
+		b.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := b.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if b.debug {
+		b.client.dumpResponse(res)
+	}
+
+	ret := new(IndexResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}

+ 170 - 0
sensitive/src/elastic.v1/index_close.go

@@ -0,0 +1,170 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// CloseIndexService closes an index.
+// See documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type CloseIndexService struct {
+	client            *Client
+	debug             bool
+	pretty            bool
+	index             string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+	timeout           string
+	masterTimeout     string
+}
+
+// NewCloseIndexService creates a new CloseIndexService.
+func NewCloseIndexService(client *Client) *CloseIndexService {
+	return &CloseIndexService{client: client}
+}
+
+// Index is the name of the index.
+func (s *CloseIndexService) Index(index string) *CloseIndexService {
+	s.index = index
+	return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *CloseIndexService) Timeout(timeout string) *CloseIndexService {
+	s.timeout = timeout
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *CloseIndexService) MasterTimeout(masterTimeout string) *CloseIndexService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *CloseIndexService) IgnoreUnavailable(ignoreUnavailable bool) *CloseIndexService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
+func (s *CloseIndexService) AllowNoIndices(allowNoIndices bool) *CloseIndexService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *CloseIndexService) ExpandWildcards(expandWildcards string) *CloseIndexService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *CloseIndexService) buildURL() (string, error) {
+	// Build URL
+	urls, err := uritemplates.Expand("/{index}/_close", map[string]string{
+		"index": s.index,
+	})
+	if err != nil {
+		return "", err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.allowNoIndices != nil {
+		params.Set("allowNoIndices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expandWildcards", s.expandWildcards)
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.masterTimeout != "" {
+		params.Set("masterTimeout", s.masterTimeout)
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignoreUnavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	return urls, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *CloseIndexService) Validate() error {
+	var invalid []string
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *CloseIndexService) Do() (*CloseIndexResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	urls, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get HTTP response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	// Return operation response
+	resp := new(CloseIndexResponse)
+	if err := json.NewDecoder(res.Body).Decode(resp); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// CloseIndexResponse is the response of CloseIndexService.Do.
+type CloseIndexResponse struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 57 - 0
sensitive/src/elastic.v1/index_exists.go

@@ -0,0 +1,57 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type IndexExistsService struct {
+	client *Client
+	index  string
+}
+
+func NewIndexExistsService(client *Client) *IndexExistsService {
+	builder := &IndexExistsService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *IndexExistsService) Index(index string) *IndexExistsService {
+	b.index = index
+	return b
+}
+
+func (b *IndexExistsService) Do() (bool, error) {
+	// Build url
+	urls, err := uritemplates.Expand("/{index}", map[string]string{
+		"index": b.index,
+	})
+	if err != nil {
+		return false, err
+	}
+
+	// Set up a new request
+	req, err := b.client.NewRequest("HEAD", urls)
+	if err != nil {
+		return false, err
+	}
+
+	// Get response
+	res, err := b.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return false, err
+	}
+	if res.StatusCode == 200 {
+		return true, nil
+	} else if res.StatusCode == 404 {
+		return false, nil
+	}
+	return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}

+ 171 - 0
sensitive/src/elastic.v1/index_open.go

@@ -0,0 +1,171 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// OpenIndexService opens an index.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type OpenIndexService struct {
+	client            *Client
+	debug             bool
+	pretty            bool
+	index             string
+	expandWildcards   string
+	timeout           string
+	masterTimeout     string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+}
+
+// NewOpenIndexService creates a new OpenIndexService.
+func NewOpenIndexService(client *Client) *OpenIndexService {
+	return &OpenIndexService{client: client}
+}
+
+// Index is the name of the index to open.
+func (s *OpenIndexService) Index(index string) *OpenIndexService {
+	s.index = index
+	return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *OpenIndexService) Timeout(timeout string) *OpenIndexService {
+	s.timeout = timeout
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *OpenIndexService) MasterTimeout(masterTimeout string) *OpenIndexService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *OpenIndexService) IgnoreUnavailable(ignoreUnavailable bool) *OpenIndexService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *OpenIndexService) AllowNoIndices(allowNoIndices bool) *OpenIndexService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *OpenIndexService) ExpandWildcards(expandWildcards string) *OpenIndexService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *OpenIndexService) buildURL() (string, error) {
+	// Build URL
+	urls, err := uritemplates.Expand("/{index}/_open", map[string]string{
+		"index": s.index,
+	})
+	if err != nil {
+		return "", err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.masterTimeout != "" {
+		params.Set("masterTimeout", s.masterTimeout)
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignoreUnavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allowNoIndices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expandWildcards", s.expandWildcards)
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	return urls, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *OpenIndexService) Validate() error {
+	var invalid []string
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *OpenIndexService) Do() (*OpenIndexResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	urls, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get HTTP response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	// Return operation response
+	resp := new(OpenIndexResponse)
+	if err := json.NewDecoder(res.Body).Decode(resp); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// OpenIndexResponse is the response of OpenIndexService.Do.
+type OpenIndexResponse struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 387 - 0
sensitive/src/elastic.v1/index_test.go

@@ -0,0 +1,387 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"net/http"
+	"testing"
+	"time"
+)
+
+const (
+	testIndexName  = "elastic-test"
+	testIndexName2 = "elastic-test2"
+	testMapping    = `
+{
+	"settings":{
+		"number_of_shards":1,
+		"number_of_replicas":0
+	},
+	"mappings":{
+		"tweet":{
+			"properties":{
+				"tags":{
+					"type":"string"
+				},
+				"location":{
+					"type":"geo_point"
+				},
+				"suggest_field":{
+					"type":"completion",
+					"payloads":true
+				}
+			}
+		}
+	}
+}
+`
+)
+
+type tweet struct {
+	User     string        `json:"user"`
+	Message  string        `json:"message"`
+	Retweets int           `json:"retweets"`
+	Image    string        `json:"image,omitempty"`
+	Created  time.Time     `json:"created,omitempty"`
+	Tags     []string      `json:"tags,omitempty"`
+	Location string        `json:"location,omitempty"`
+	Suggest  *SuggestField `json:"suggest_field,omitempty"`
+}
+
+func setupTestClient(t *testing.T) *Client {
+	client, err := NewClient(http.DefaultClient)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	client.DeleteIndex(testIndexName).Do()
+	client.DeleteIndex(testIndexName2).Do()
+
+	return client
+}
+
+func setupTestClientAndCreateIndex(t *testing.T) *Client {
+	client := setupTestClient(t)
+
+	// Create index
+	createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if createIndex == nil {
+		t.Errorf("expected result to be != nil; got: %v", createIndex)
+	}
+
+	// Create second index
+	createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if createIndex2 == nil {
+		t.Errorf("expected result to be != nil; got: %v", createIndex2)
+	}
+
+	return client
+}
+
+func TestIndexLifecycle(t *testing.T) {
+	client := setupTestClient(t)
+
+	// Create index
+	createIndex, err := client.CreateIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !createIndex.Acknowledged {
+		t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+	}
+
+	// Check if index exists
+	indexExists, err := client.IndexExists(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !indexExists {
+		t.Fatalf("index %s should exist, but doesn't\n", testIndexName)
+	}
+
+	// Delete index
+	deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !deleteIndex.Acknowledged {
+		t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+	}
+
+	// Check if index exists
+	indexExists, err = client.IndexExists(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if indexExists {
+		t.Fatalf("index %s should not exist, but does\n", testIndexName)
+	}
+}
+
+func TestIndexExistScenarios(t *testing.T) {
+	client := setupTestClient(t)
+
+	// Should return false if index does not exist
+	indexExists, err := client.IndexExists(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if indexExists {
+		t.Fatalf("expected index exists to return %v, got %v", false, indexExists)
+	}
+
+	// Create index
+	createIndex, err := client.CreateIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !createIndex.Acknowledged {
+		t.Errorf("expected CreateIndexResult.Ack %v; got %v", true, createIndex.Acknowledged)
+	}
+
+	// Should return true if index does not exist
+	indexExists, err = client.IndexExists(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !indexExists {
+		t.Fatalf("expected index exists to return %v, got %v", true, indexExists)
+	}
+}
+
+// TODO(oe): Find out why this test fails on Travis CI.
+/*
+func TestIndexOpenAndClose(t *testing.T) {
+	client := setupTestClient(t)
+
+	// Create index
+	createIndex, err := client.CreateIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !createIndex.Acknowledged {
+		t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+	}
+	defer func() {
+		// Delete index
+		deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !deleteIndex.Acknowledged {
+			t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+		}
+	}()
+
+	waitForYellow := func() {
+		// Wait for status yellow
+		res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if res != nil && res.TimedOut {
+			t.Fatalf("cluster time out waiting for status %q", "yellow")
+		}
+	}
+
+	// Wait for cluster
+	waitForYellow()
+
+	// Close index
+	cresp, err := client.CloseIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !cresp.Acknowledged {
+		t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName)
+	}
+
+	// Wait for cluster
+	waitForYellow()
+
+	// Open index again
+	oresp, err := client.OpenIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !oresp.Acknowledged {
+		t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName)
+	}
+}
+*/
+
+func TestDocumentLifecycle(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+	// Add a document
+	indexResult, err := client.Index().
+		Index(testIndexName).
+		Type("tweet").
+		Id("1").
+		BodyJson(&tweet1).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if indexResult == nil {
+		t.Errorf("expected result to be != nil; got: %v", indexResult)
+	}
+
+	// Exists
+	exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !exists {
+		t.Errorf("expected exists %v; got %v", true, exists)
+	}
+
+	// Get document
+	getResult, err := client.Get().
+		Index(testIndexName).
+		Type("tweet").
+		Id("1").
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if getResult.Index != testIndexName {
+		t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
+	}
+	if getResult.Type != "tweet" {
+		t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
+	}
+	if getResult.Id != "1" {
+		t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id)
+	}
+	if getResult.Source == nil {
+		t.Errorf("expected GetResult.Source to be != nil; got nil")
+	}
+
+	// Decode the Source field
+	var tweetGot tweet
+	err = json.Unmarshal(*getResult.Source, &tweetGot)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if tweetGot.User != tweet1.User {
+		t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
+	}
+	if tweetGot.Message != tweet1.Message {
+		t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
+	}
+
+	// Delete document again
+	deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if deleteResult == nil {
+		t.Errorf("expected result to be != nil; got: %v", deleteResult)
+	}
+
+	// Exists
+	exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if exists {
+		t.Errorf("expected exists %v; got %v", false, exists)
+	}
+}
+
+func TestDocumentLifecycleWithAutomaticIDGeneration(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+	// Add a document
+	indexResult, err := client.Index().
+		Index(testIndexName).
+		Type("tweet").
+		BodyJson(&tweet1).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if indexResult == nil {
+		t.Errorf("expected result to be != nil; got: %v", indexResult)
+	}
+	if indexResult.Id == "" {
+		t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id)
+	}
+	id := indexResult.Id
+
+	// Exists
+	exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !exists {
+		t.Errorf("expected exists %v; got %v", true, exists)
+	}
+
+	// Get document
+	getResult, err := client.Get().
+		Index(testIndexName).
+		Type("tweet").
+		Id(id).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if getResult.Index != testIndexName {
+		t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
+	}
+	if getResult.Type != "tweet" {
+		t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
+	}
+	if getResult.Id != id {
+		t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id)
+	}
+	if getResult.Source == nil {
+		t.Errorf("expected GetResult.Source to be != nil; got nil")
+	}
+
+	// Decode the Source field
+	var tweetGot tweet
+	err = json.Unmarshal(*getResult.Source, &tweetGot)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if tweetGot.User != tweet1.User {
+		t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
+	}
+	if tweetGot.Message != tweet1.Message {
+		t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
+	}
+
+	// Delete document again
+	deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if deleteResult == nil {
+		t.Errorf("expected result to be != nil; got: %v", deleteResult)
+	}
+
+	// Exists
+	exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if exists {
+		t.Errorf("expected exists %v; got %v", false, exists)
+	}
+}

+ 206 - 0
sensitive/src/elastic.v1/multi_get.go

@@ -0,0 +1,206 @@
+// Copyright 2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+)
+
+type MultiGetService struct {
+	client     *Client
+	preference string
+	realtime   *bool
+	refresh    *bool
+	items      []*MultiGetItem
+}
+
+func NewMultiGetService(client *Client) *MultiGetService {
+	builder := &MultiGetService{
+		client: client,
+		items:  make([]*MultiGetItem, 0),
+	}
+	return builder
+}
+
+func (b *MultiGetService) Preference(preference string) *MultiGetService {
+	b.preference = preference
+	return b
+}
+
+func (b *MultiGetService) Refresh(refresh bool) *MultiGetService {
+	b.refresh = &refresh
+	return b
+}
+
+func (b *MultiGetService) Realtime(realtime bool) *MultiGetService {
+	b.realtime = &realtime
+	return b
+}
+
+func (b *MultiGetService) Add(items ...*MultiGetItem) *MultiGetService {
+	b.items = append(b.items, items...)
+	return b
+}
+
+func (b *MultiGetService) Source() interface{} {
+	source := make(map[string]interface{})
+	items := make([]interface{}, len(b.items))
+	for i, item := range b.items {
+		items[i] = item.Source()
+	}
+	source["docs"] = items
+	return source
+}
+
+func (b *MultiGetService) Do() (*MultiGetResult, error) {
+	// Build url
+	urls := "/_mget"
+
+	params := make(url.Values)
+	if b.realtime != nil {
+		params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+	}
+	if b.preference != "" {
+		params.Add("preference", b.preference)
+	}
+	if b.refresh != nil {
+		params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := b.client.NewRequest("GET", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body
+	req.SetBodyJson(b.Source())
+
+	// Get response
+	res, err := b.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	ret := new(MultiGetResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Multi Get Item --
+
+// MultiGetItem is a single document to retrieve via the MultiGetService.
+type MultiGetItem struct {
+	index       string
+	typ         string
+	id          string
+	routing     string
+	fields      []string
+	version     *int64 // see org.elasticsearch.common.lucene.uid.Versions
+	versionType string // see org.elasticsearch.index.VersionType
+	fsc         *FetchSourceContext
+}
+
+func NewMultiGetItem() *MultiGetItem {
+	return &MultiGetItem{}
+}
+
+func (item *MultiGetItem) Index(index string) *MultiGetItem {
+	item.index = index
+	return item
+}
+
+func (item *MultiGetItem) Type(typ string) *MultiGetItem {
+	item.typ = typ
+	return item
+}
+
+func (item *MultiGetItem) Id(id string) *MultiGetItem {
+	item.id = id
+	return item
+}
+
+func (item *MultiGetItem) Routing(routing string) *MultiGetItem {
+	item.routing = routing
+	return item
+}
+
+func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem {
+	if item.fields == nil {
+		item.fields = make([]string, 0)
+	}
+	item.fields = append(item.fields, fields...)
+	return item
+}
+
+// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1),
+// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions.
+// The default is MatchAny (-3).
+func (item *MultiGetItem) Version(version int64) *MultiGetItem {
+	item.version = &version
+	return item
+}
+
+// VersionType can be "internal", "external", "external_gt", "external_gte",
+// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source.
+// It is "internal" by default.
+func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem {
+	item.versionType = versionType
+	return item
+}
+
+func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem {
+	item.fsc = fetchSourceContext
+	return item
+}
+
+// Source returns the serialized JSON to be sent to Elasticsearch as
+// part of a MultiGet search.
+func (item *MultiGetItem) Source() interface{} {
+	source := make(map[string]interface{})
+
+	source["_id"] = item.id
+
+	if item.index != "" {
+		source["_index"] = item.index
+	}
+	if item.typ != "" {
+		source["_type"] = item.typ
+	}
+	if item.fsc != nil {
+		source["_source"] = item.fsc.Source()
+	}
+	if item.fields != nil {
+		source["_fields"] = item.fields
+	}
+	if item.routing != "" {
+		source["_routing"] = item.routing
+	}
+	if item.version != nil {
+		source["_version"] = *item.version
+	}
+	if item.versionType != "" {
+		source["_version_type"] = item.versionType
+	}
+
+	return source
+}
+
+// -- Result of a Multi Get request.
+
+type MultiGetResult struct {
+	Docs []*GetResult `json:"docs,omitempty"`
+}

+ 95 - 0
sensitive/src/elastic.v1/multi_get_test.go

@@ -0,0 +1,95 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestMultiGet(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add some documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Count documents
+	count, err := client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Errorf("expected Count = %d; got %d", 3, count)
+	}
+
+	// Get documents 1 and 3
+	res, err := client.MultiGet().
+		Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("1")).
+		Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("3")).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatal("expected result to be != nil; got nil")
+	}
+	if res.Docs == nil {
+		t.Fatal("expected result docs to be != nil; got nil")
+	}
+	if len(res.Docs) != 2 {
+		t.Fatalf("expected to have 2 docs; got %d", len(res.Docs))
+	}
+
+	item := res.Docs[0]
+	if item.Error != "" {
+		t.Errorf("expected no error on item 0; got %q", item.Error)
+	}
+	if item.Source == nil {
+		t.Errorf("expected Source != nil; got %v", item.Source)
+	}
+	var doc tweet
+	if err := json.Unmarshal(*item.Source, &doc); err != nil {
+		t.Fatalf("expected to unmarshal item Source; got %v", err)
+	}
+	if doc.Message != tweet1.Message {
+		t.Errorf("expected Message of first tweet to be %q; got %q", tweet1.Message, doc.Message)
+	}
+
+	item = res.Docs[1]
+	if item.Error != "" {
+		t.Errorf("expected no error on item 1; got %q", item.Error)
+	}
+	if item.Source == nil {
+		t.Errorf("expected Source != nil; got %v", item.Source)
+	}
+	if err := json.Unmarshal(*item.Source, &doc); err != nil {
+		t.Fatalf("expected to unmarshal item Source; got %v", err)
+	}
+	if doc.Message != tweet3.Message {
+		t.Errorf("expected Message of second tweet to be %q; got %q", tweet3.Message, doc.Message)
+	}
+}

+ 130 - 0
sensitive/src/elastic.v1/multi_search.go

@@ -0,0 +1,130 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+)
+
+// MultiSearch executes one or more searches in one roundtrip.
+// See http://www.elasticsearch.org/guide/reference/api/multi-search/
+type MultiSearchService struct {
+	client     *Client
+	requests   []*SearchRequest
+	indices    []string
+	pretty     bool
+	debug      bool
+	routing    string
+	preference string
+}
+
+func NewMultiSearchService(client *Client) *MultiSearchService {
+	builder := &MultiSearchService{
+		client:   client,
+		requests: make([]*SearchRequest, 0),
+		indices:  make([]string, 0),
+		debug:    false,
+		pretty:   false,
+	}
+	return builder
+}
+
+func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService {
+	s.requests = append(s.requests, requests...)
+	return s
+}
+
+func (s *MultiSearchService) Index(index string) *MultiSearchService {
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *MultiSearchService) Indices(indices ...string) *MultiSearchService {
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *MultiSearchService) Debug(debug bool) *MultiSearchService {
+	s.debug = debug
+	return s
+}
+
+func (s *MultiSearchService) Do() (*MultiSearchResult, error) {
+	// Build url
+	urls := "/_msearch"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("GET", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body
+	lines := make([]string, 0)
+	for _, sr := range s.requests {
+		// Set default indices if not specified in the request
+		if !sr.HasIndices() && len(s.indices) > 0 {
+			sr = sr.Indices(s.indices...)
+		}
+
+		header, err := json.Marshal(sr.header())
+		if err != nil {
+			return nil, err
+		}
+		body, err := json.Marshal(sr.body())
+		if err != nil {
+			return nil, err
+		}
+		lines = append(lines, string(header))
+		lines = append(lines, string(body))
+	}
+	req.SetBodyString(strings.Join(lines, "\n") + "\n") // Don't forget trailing \n
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	ret := new(MultiSearchResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+type MultiSearchResult struct {
+	Responses []*SearchResult `json:"responses,omitempty"`
+}

+ 195 - 0
sensitive/src/elastic.v1/multi_search_test.go

@@ -0,0 +1,195 @@
+package elastic
+
+import (
+	"encoding/json"
+	_ "net/http"
+	"testing"
+)
+
+func TestMultiSearch(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{
+		User:    "olivere",
+		Message: "Welcome to Golang and Elasticsearch.",
+		Tags:    []string{"golang", "elasticsearch"},
+	}
+	tweet2 := tweet{
+		User:    "olivere",
+		Message: "Another unrelated topic.",
+		Tags:    []string{"golang"},
+	}
+	tweet3 := tweet{
+		User:    "sandrae",
+		Message: "Cycling is fun.",
+		Tags:    []string{"sports", "cycling"},
+	}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Spawn two search queries with one roundtrip
+	q1 := NewMatchAllQuery()
+	q2 := NewTermQuery("tags", "golang")
+
+	sreq1 := NewSearchRequest().Indices(testIndexName, testIndexName2).
+		Source(NewSearchSource().Query(q1).Size(10))
+	sreq2 := NewSearchRequest().Index(testIndexName).Type("tweet").
+		Source(NewSearchSource().Query(q2))
+
+	searchResult, err := client.MultiSearch().
+		Add(sreq1, sreq2).
+		// Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if searchResult.Responses == nil {
+		t.Fatal("expected responses != nil; got nil")
+	}
+	if len(searchResult.Responses) != 2 {
+		t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses))
+	}
+
+	sres := searchResult.Responses[0]
+	if sres.Hits == nil {
+		t.Errorf("expected Hits != nil; got nil")
+	}
+	if sres.Hits.TotalHits != 3 {
+		t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits)
+	}
+	if len(sres.Hits.Hits) != 3 {
+		t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits))
+	}
+	for _, hit := range sres.Hits.Hits {
+		if hit.Index != testIndexName {
+			t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+		}
+		item := make(map[string]interface{})
+		err := json.Unmarshal(*hit.Source, &item)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	sres = searchResult.Responses[1]
+	if sres.Hits == nil {
+		t.Errorf("expected Hits != nil; got nil")
+	}
+	if sres.Hits.TotalHits != 2 {
+		t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits)
+	}
+	if len(sres.Hits.Hits) != 2 {
+		t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits))
+	}
+	for _, hit := range sres.Hits.Hits {
+		if hit.Index != testIndexName {
+			t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+		}
+		item := make(map[string]interface{})
+		err := json.Unmarshal(*hit.Source, &item)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+}
+
+func TestMultiSearchWithOneRequest(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{
+		User:    "olivere",
+		Message: "Welcome to Golang and Elasticsearch.",
+		Tags:    []string{"golang", "elasticsearch"},
+	}
+	tweet2 := tweet{
+		User:    "olivere",
+		Message: "Another unrelated topic.",
+		Tags:    []string{"golang"},
+	}
+	tweet3 := tweet{
+		User:    "sandrae",
+		Message: "Cycling is fun.",
+		Tags:    []string{"sports", "cycling"},
+	}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Spawn two search queries with one roundtrip
+	query := NewMatchAllQuery()
+	source := NewSearchSource().Query(query).Size(10)
+	sreq := NewSearchRequest().Source(source)
+
+	searchResult, err := client.MultiSearch().
+		Index(testIndexName).
+		Add(sreq).
+		// Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if searchResult.Responses == nil {
+		t.Fatal("expected responses != nil; got nil")
+	}
+	if len(searchResult.Responses) != 1 {
+		t.Fatalf("expected 1 responses; got %d", len(searchResult.Responses))
+	}
+
+	sres := searchResult.Responses[0]
+	if sres.Hits == nil {
+		t.Errorf("expected Hits != nil; got nil")
+	}
+	if sres.Hits.TotalHits != 3 {
+		t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits)
+	}
+	if len(sres.Hits.Hits) != 3 {
+		t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits))
+	}
+	for _, hit := range sres.Hits.Hits {
+		if hit.Index != testIndexName {
+			t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+		}
+		item := make(map[string]interface{})
+		err := json.Unmarshal(*hit.Source, &item)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+}

+ 162 - 0
sensitive/src/elastic.v1/optimize.go

@@ -0,0 +1,162 @@
+// Copyright 2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type OptimizeService struct {
+	client             *Client
+	indices            []string
+	maxNumSegments     *int
+	onlyExpungeDeletes *bool
+	flush              *bool
+	waitForMerge       *bool
+	force              *bool
+	pretty             bool
+	debug              bool
+}
+
+func NewOptimizeService(client *Client) *OptimizeService {
+	builder := &OptimizeService{
+		client:  client,
+		indices: make([]string, 0),
+	}
+	return builder
+}
+
+func (s *OptimizeService) Index(index string) *OptimizeService {
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *OptimizeService) Indices(indices ...string) *OptimizeService {
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService {
+	s.maxNumSegments = &maxNumSegments
+	return s
+}
+
+func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService {
+	s.onlyExpungeDeletes = &onlyExpungeDeletes
+	return s
+}
+
+func (s *OptimizeService) Flush(flush bool) *OptimizeService {
+	s.flush = &flush
+	return s
+}
+
+func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService {
+	s.waitForMerge = &waitForMerge
+	return s
+}
+
+func (s *OptimizeService) Force(force bool) *OptimizeService {
+	s.force = &force
+	return s
+}
+
+func (s *OptimizeService) Pretty(pretty bool) *OptimizeService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *OptimizeService) Debug(debug bool) *OptimizeService {
+	s.debug = debug
+	return s
+}
+
+func (s *OptimizeService) Do() (*OptimizeResult, error) {
+	// Build url
+	urls := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		urls += strings.Join(indexPart, ",")
+	}
+
+	urls += "/_optimize"
+
+	// Parameters
+	params := make(url.Values)
+	if s.maxNumSegments != nil {
+		params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments))
+	}
+	if s.onlyExpungeDeletes != nil {
+		params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
+	}
+	if s.flush != nil {
+		params.Set("flush", fmt.Sprintf("%v", *s.flush))
+	}
+	if s.waitForMerge != nil {
+		params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge))
+	}
+	if s.force != nil {
+		params.Set("force", fmt.Sprintf("%v", *s.force))
+	}
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	ret := new(OptimizeResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of an optimize request.
+
+type OptimizeResult struct {
+	Shards shardsInfo `json:"_shards,omitempty"`
+}

+ 49 - 0
sensitive/src/elastic.v1/optimize_test.go

@@ -0,0 +1,49 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestOptimize(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add some documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Optimize documents
+	res, err := client.Optimize(testIndexName, testIndexName2).
+		// Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatal("expected result; got nil")
+	}
+}

+ 131 - 0
sensitive/src/elastic.v1/ping.go

@@ -0,0 +1,131 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"net/http"
+	"net/url"
+)
+
+// PingService checks if an Elasticsearch server on a given URL is alive.
+// When asked for, it can also return various information about the
+// Elasticsearch server, e.g. the Elasticsearch version number.
+//
+// Ping simply starts a HTTP GET request to the URL of the server.
+// If the server responds with HTTP Status code 200 OK, the server is alive.
+type PingService struct {
+	client       *Client
+	url          string
+	timeout      string
+	httpHeadOnly bool
+	debug        bool
+	pretty       bool
+}
+
+// PingResult is the result returned from querying the Elasticsearch server.
+type PingResult struct {
+	Status      int    `json:"status"`
+	Name        string `json:"name"`
+	ClusterName string `json:"cluster_name"`
+	Version     struct {
+		Number         string `json:"number"`
+		BuildHash      string `json:"build_hash"`
+		BuildTimestamp string `json:"build_timestamp"`
+		BuildSnapshot  bool   `json:"build_snapshot"`
+		LuceneVersion  string `json:"lucene_version"`
+	} `json:"version"`
+	TagLine string `json:"tagline"`
+}
+
+func NewPingService(client *Client) *PingService {
+	return &PingService{
+		client:       client,
+		url:          defaultUrl,
+		httpHeadOnly: false,
+		debug:        false,
+		pretty:       false,
+	}
+}
+
+func (s *PingService) URL(url string) *PingService {
+	s.url = url
+	return s
+}
+
+func (s *PingService) Timeout(timeout string) *PingService {
+	s.timeout = timeout
+	return s
+}
+
+// HeadOnly makes the service to only return the status code in Do;
+// the PingResult will be nil.
+func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService {
+	s.httpHeadOnly = httpHeadOnly
+	return s
+}
+
+func (s *PingService) Pretty(pretty bool) *PingService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *PingService) Debug(debug bool) *PingService {
+	s.debug = debug
+	return s
+}
+
+// Do returns the PingResult, the HTTP status code of the Elasticsearch
+// server, and an error.
+func (s *PingService) Do() (*PingResult, int, error) {
+	url_ := s.url + "/"
+
+	params := make(url.Values)
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if len(params) > 0 {
+		url_ += "?" + params.Encode()
+	}
+
+	var method string
+	if s.httpHeadOnly {
+		method = "HEAD"
+	} else {
+		method = "GET"
+	}
+
+	req, err := NewRequest(method, url_)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, 0, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	var ret *PingResult
+	if !s.httpHeadOnly {
+		ret = new(PingResult)
+		if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+			return nil, res.StatusCode, err
+		}
+	}
+
+	return ret, res.StatusCode, nil
+}

+ 73 - 0
sensitive/src/elastic.v1/ping_test.go

@@ -0,0 +1,73 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"net/http"
+	"testing"
+)
+
+func TestPingGet(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	res, code, err := client.Ping().
+		//Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if code != http.StatusOK {
+		t.Errorf("expected status code = %d; got %d", http.StatusOK, code)
+	}
+	if res == nil {
+		t.Fatalf("expected to return result, got: %v", res)
+	}
+	if res.Status != http.StatusOK {
+		t.Errorf("expected Status = %d; got %d", http.StatusOK, res.Status)
+	}
+	if res.Name == "" {
+		t.Errorf("expected Name != \"\"; got %q", res.Name)
+	}
+	if res.Version.Number == "" {
+		t.Errorf("expected Version.Number != \"\"; got %q", res.Version.Number)
+	}
+}
+
+func TestPingHead(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	res, code, err := client.Ping().
+		HttpHeadOnly(true).
+		//Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if code != http.StatusOK {
+		t.Errorf("expected status code = %d; got %d", http.StatusOK, code)
+	}
+	if res != nil {
+		t.Errorf("expected not to return result, got: %v", res)
+	}
+}
+
+func TestPingHeadFailure(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	res, code, err := client.Ping().
+		URL("http://127.0.0.1:9299").
+		HttpHeadOnly(true).
+		//Pretty(true).Debug(true).
+		Do()
+	if err == nil {
+		t.Error("expected error, got nil")
+	}
+	if code == http.StatusOK {
+		t.Errorf("expected status code != %d; got %d", http.StatusOK, code)
+	}
+	if res != nil {
+		t.Errorf("expected not to return result, got: %v", res)
+	}
+}

+ 175 - 0
sensitive/src/elastic.v1/put_template.go

@@ -0,0 +1,175 @@
+// Copyright 2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// PutTemplateService creates or updates a search template.
+// The documentation can be found at
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type PutTemplateService struct {
+	client      *Client
+	debug       bool
+	pretty      bool
+	id          string
+	opType      string
+	version     *int
+	versionType string
+	bodyJson    interface{}
+	bodyString  string
+}
+
+// NewPutTemplateService creates a new PutTemplateService.
+func NewPutTemplateService(client *Client) *PutTemplateService {
+	return &PutTemplateService{
+		client: client,
+	}
+}
+
+// Id is the template ID.
+func (s *PutTemplateService) Id(id string) *PutTemplateService {
+	s.id = id
+	return s
+}
+
+// OpType is an explicit operation type.
+func (s *PutTemplateService) OpType(opType string) *PutTemplateService {
+	s.opType = opType
+	return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *PutTemplateService) Version(version int) *PutTemplateService {
+	s.version = &version
+	return s
+}
+
+// VersionType is a specific version type.
+func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService {
+	s.versionType = versionType
+	return s
+}
+
+// BodyJson is the document as a JSON serializable object.
+func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService {
+	s.bodyJson = body
+	return s
+}
+
+// BodyString is the document as a string.
+func (s *PutTemplateService) BodyString(body string) *PutTemplateService {
+	s.bodyString = body
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *PutTemplateService) buildURL() (string, error) {
+	// Build URL
+	urls, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+		"id": s.id,
+	})
+	if err != nil {
+		return "", err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *s.version))
+	}
+	if s.versionType != "" {
+		params.Set("version_type", s.versionType)
+	}
+	if s.opType != "" {
+		params.Set("op_type", s.opType)
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	return urls, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *PutTemplateService) Validate() error {
+	var invalid []string
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if s.bodyString == "" && s.bodyJson == nil {
+		invalid = append(invalid, "BodyJson")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *PutTemplateService) Do() (*PutTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	urls, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request
+	req, err := s.client.NewRequest("PUT", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request body
+	if s.bodyJson != nil {
+		req.SetBodyJson(s.bodyJson)
+	} else {
+		req.SetBodyString(s.bodyString)
+	}
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get HTTP response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Debug output?
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+	// Return operation response
+	resp := new(PutTemplateResponse)
+	if err := json.NewDecoder(res.Body).Decode(resp); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// PutTemplateResponse is the response of PutTemplateService.Do.
+type PutTemplateResponse struct {
+	Id      string `json:"_id"`
+	Version int    `json:"_version"`
+	Created bool   `json:"created"`
+}

+ 14 - 0
sensitive/src/elastic.v1/query.go

@@ -0,0 +1,14 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Represents the generic query interface.
+// A querys' only purpose is to return the
+// source of the query as a JSON-serializable
+// object. Returning a map[string]interface{}
+// will do.
+type Query interface {
+	Source() interface{}
+}

+ 126 - 0
sensitive/src/elastic.v1/refresh.go

@@ -0,0 +1,126 @@
+// Copyright 2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type RefreshService struct {
+	client  *Client
+	indices []string
+	force   *bool
+	pretty  bool
+	debug   bool
+}
+
+func NewRefreshService(client *Client) *RefreshService {
+	builder := &RefreshService{
+		client:  client,
+		indices: make([]string, 0),
+	}
+	return builder
+}
+
+func (s *RefreshService) Index(index string) *RefreshService {
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *RefreshService) Indices(indices ...string) *RefreshService {
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *RefreshService) Force(force bool) *RefreshService {
+	s.force = &force
+	return s
+}
+
+func (s *RefreshService) Pretty(pretty bool) *RefreshService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *RefreshService) Debug(debug bool) *RefreshService {
+	s.debug = debug
+	return s
+}
+
+func (s *RefreshService) Do() (*RefreshResult, error) {
+	// Build url
+	urls := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		urls += strings.Join(indexPart, ",")
+	}
+
+	urls += "/_refresh"
+
+	// Parameters
+	params := make(url.Values)
+	if s.force != nil {
+		params.Set("force", fmt.Sprintf("%v", *s.force))
+	}
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	ret := new(RefreshResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a refresh request.
+
+type RefreshResult struct {
+	Shards shardsInfo `json:"_shards,omitempty"`
+}

+ 49 - 0
sensitive/src/elastic.v1/refresh_test.go

@@ -0,0 +1,49 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestRefresh(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add some documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Refresh indices
+	res, err := client.Refresh(testIndexName, testIndexName2).
+		// Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatal("expected result; got nil")
+	}
+}

+ 60 - 0
sensitive/src/elastic.v1/request.go

@@ -0,0 +1,60 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"runtime"
+	"strings"
+)
+
+// Elasticsearch-specific HTTP request
+type Request http.Request
+
+func NewRequest(method, url string) (*Request, error) {
+	req, err := http.NewRequest(method, url, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")")
+	req.Header.Add("Accept", "application/json")
+	return (*Request)(req), nil
+}
+
+func (r *Request) SetBodyJson(data interface{}) error {
+	body, err := json.Marshal(data)
+	if err != nil {
+		return err
+	}
+	r.SetBody(bytes.NewReader(body))
+	r.Header.Set("Content-Type", "application/json")
+	return nil
+}
+
+func (r *Request) SetBodyString(body string) error {
+	return r.SetBody(strings.NewReader(body))
+}
+
+func (r *Request) SetBody(body io.Reader) error {
+	rc, ok := body.(io.ReadCloser)
+	if !ok && body != nil {
+		rc = ioutil.NopCloser(body)
+	}
+	r.Body = rc
+	if body != nil {
+		switch v := body.(type) {
+		case *strings.Reader:
+			r.ContentLength = int64(v.Len())
+		case *bytes.Buffer:
+			r.ContentLength = int64(v.Len())
+		}
+	}
+	return nil
+}

+ 36 - 0
sensitive/src/elastic.v1/rescore.go

@@ -0,0 +1,36 @@
+package elastic
+
+type Rescore struct {
+	rescorer                 Rescorer
+	windowSize               *int
+	defaultRescoreWindowSize *int
+}
+
+func NewRescore() *Rescore {
+	return &Rescore{}
+}
+
+func (r *Rescore) WindowSize(windowSize int) *Rescore {
+	r.windowSize = &windowSize
+	return r
+}
+
+func (r *Rescore) IsEmpty() bool {
+	return r.rescorer == nil
+}
+
+func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore {
+	r.rescorer = rescorer
+	return r
+}
+
+func (r *Rescore) Source() interface{} {
+	source := make(map[string]interface{})
+	if r.windowSize != nil {
+		source["window_size"] = *r.windowSize
+	} else if r.defaultRescoreWindowSize != nil {
+		source["window_size"] = *r.defaultRescoreWindowSize
+	}
+	source[r.rescorer.Name()] = r.rescorer.Source()
+	return source
+}

+ 55 - 0
sensitive/src/elastic.v1/rescorer.go

@@ -0,0 +1,55 @@
+package elastic
+
+type Rescorer interface {
+	Name() string
+	Source() interface{}
+}
+
+// -- Query Rescorer --
+
+type QueryRescorer struct {
+	query              Query
+	rescoreQueryWeight *float64
+	queryWeight        *float64
+	scoreMode          string
+}
+
+func NewQueryRescorer(query Query) *QueryRescorer {
+	return &QueryRescorer{
+		query: query,
+	}
+}
+
+func (r *QueryRescorer) Name() string {
+	return "query"
+}
+
+func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer {
+	r.rescoreQueryWeight = &rescoreQueryWeight
+	return r
+}
+
+func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer {
+	r.queryWeight = &queryWeight
+	return r
+}
+
+func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer {
+	r.scoreMode = scoreMode
+	return r
+}
+
+func (r *QueryRescorer) Source() interface{} {
+	source := make(map[string]interface{})
+	source["rescore_query"] = r.query.Source()
+	if r.queryWeight != nil {
+		source["query_weight"] = *r.queryWeight
+	}
+	if r.rescoreQueryWeight != nil {
+		source["rescore_query_weight"] = *r.rescoreQueryWeight
+	}
+	if r.scoreMode != "" {
+		source["score_mode"] = r.scoreMode
+	}
+	return source
+}

+ 326 - 0
sensitive/src/elastic.v1/scan.go

@@ -0,0 +1,326 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+const (
+	defaultKeepAlive = "5m"
+)
+
+var (
+	// End of stream (or scan)
+	EOS = errors.New("EOS")
+
+	// No ScrollId
+	ErrNoScrollId = errors.New("elastic: No scrollId")
+)
+
+// ScanService manages a cursor through documents in Elasticsearch.
+type ScanService struct {
+	client    *Client
+	indices   []string
+	types     []string
+	keepAlive string
+	query     Query
+	size      *int
+	pretty    bool
+	debug     bool
+}
+
+func NewScanService(client *Client) *ScanService {
+	builder := &ScanService{
+		client: client,
+		query:  NewMatchAllQuery(),
+		debug:  false,
+		pretty: false,
+	}
+	return builder
+}
+
+func (s *ScanService) Index(index string) *ScanService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *ScanService) Indices(indices ...string) *ScanService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *ScanService) Type(typ string) *ScanService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, typ)
+	return s
+}
+
+func (s *ScanService) Types(types ...string) *ScanService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+// Scroll is an alias for KeepAlive, the time to keep
+// the cursor alive (e.g. "5m" for 5 minutes).
+func (s *ScanService) Scroll(keepAlive string) *ScanService {
+	s.keepAlive = keepAlive
+	return s
+}
+
+// KeepAlive sets the maximum time the cursor will be
+// available before expiration (e.g. "5m" for 5 minutes).
+func (s *ScanService) KeepAlive(keepAlive string) *ScanService {
+	s.keepAlive = keepAlive
+	return s
+}
+
+func (s *ScanService) Query(query Query) *ScanService {
+	s.query = query
+	return s
+}
+
+func (s *ScanService) Pretty(pretty bool) *ScanService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *ScanService) Debug(debug bool) *ScanService {
+	s.debug = debug
+	return s
+}
+
+func (s *ScanService) Size(size int) *ScanService {
+	s.size = &size
+	return s
+}
+
+func (s *ScanService) Do() (*ScanCursor, error) {
+	// Build url
+	urls := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		urls += strings.Join(indexPart, ",")
+	}
+
+	// Types
+	typesPart := make([]string, 0)
+	for _, typ := range s.types {
+		typ, err := uritemplates.Expand("{type}", map[string]string{
+			"type": typ,
+		})
+		if err != nil {
+			return nil, err
+		}
+		typesPart = append(typesPart, typ)
+	}
+	if len(typesPart) > 0 {
+		urls += "/" + strings.Join(typesPart, ",")
+	}
+
+	// Search
+	urls += "/_search"
+
+	// Parameters
+	params := make(url.Values)
+	params.Set("search_type", "scan")
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.keepAlive != "" {
+		params.Set("scroll", s.keepAlive)
+	} else {
+		params.Set("scroll", defaultKeepAlive)
+	}
+	if s.size != nil && *s.size > 0 {
+		params.Set("size", fmt.Sprintf("%d", *s.size))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body
+	body := make(map[string]interface{})
+
+	// Query
+	if s.query != nil {
+		body["query"] = s.query.Source()
+	}
+
+	req.SetBodyJson(body)
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	searchResult := new(SearchResult)
+	if err := json.NewDecoder(res.Body).Decode(searchResult); err != nil {
+		return nil, err
+	}
+
+	cursor := NewScanCursor(s.client, s.keepAlive, s.pretty, s.debug, searchResult)
+
+	return cursor, nil
+}
+
+// scanCursor represents a single page of results from
+// an Elasticsearch Scan operation.
+type ScanCursor struct {
+	Results *SearchResult
+
+	client      *Client
+	keepAlive   string
+	pretty      bool
+	debug       bool
+	currentPage int
+}
+
+// newScanCursor returns a new initialized instance
+// of scanCursor.
+func NewScanCursor(client *Client, keepAlive string, pretty, debug bool, searchResult *SearchResult) *ScanCursor {
+	return &ScanCursor{
+		client:    client,
+		keepAlive: keepAlive,
+		pretty:    pretty,
+		debug:     debug,
+		Results:   searchResult,
+	}
+}
+
+// TotalHits is a convenience method that returns the number
+// of hits the cursor will iterate through.
+func (c *ScanCursor) TotalHits() int64 {
+	if c.Results.Hits == nil {
+		return 0
+	}
+	return c.Results.Hits.TotalHits
+}
+
+// Next returns the next search result or nil when all
+// documents have been scanned.
+//
+// Usage:
+//
+//   for {
+//     res, err := cursor.Next()
+//     if err == elastic.EOS {
+//       // End of stream (or scan)
+//       break
+//     }
+//     if err != nil {
+//       // Handle error
+//     }
+//     // Work with res
+//   }
+//
+func (c *ScanCursor) Next() (*SearchResult, error) {
+	if c.currentPage > 0 {
+		if c.Results.Hits == nil || len(c.Results.Hits.Hits) == 0 || c.Results.Hits.TotalHits == 0 {
+			return nil, EOS
+		}
+	}
+	if c.Results.ScrollId == "" {
+		return nil, ErrNoScrollId
+	}
+
+	// Build url
+	urls := "/_search/scroll"
+
+	// Parameters
+	params := make(url.Values)
+	if c.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", c.pretty))
+	}
+	if c.keepAlive != "" {
+		params.Set("scroll", c.keepAlive)
+	} else {
+		params.Set("scroll", defaultKeepAlive)
+	}
+	urls += "?" + params.Encode()
+
+	// Set up a new request
+	req, err := c.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body
+	req.SetBodyString(c.Results.ScrollId)
+
+	if c.debug {
+		c.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := c.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if c.debug {
+		c.client.dumpResponse(res)
+	}
+
+	if err := json.NewDecoder(res.Body).Decode(c.Results); err != nil {
+		return nil, err
+	}
+
+	c.currentPage += 1
+
+	return c.Results, nil
+}

+ 185 - 0
sensitive/src/elastic.v1/scan_test.go

@@ -0,0 +1,185 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	_ "net/http"
+	"testing"
+)
+
+func TestScan(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Match all should return all documents
+	cursor, err := client.Scan(testIndexName).
+		Size(1).
+		// Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if cursor.Results == nil {
+		t.Errorf("expected results != nil; got nil")
+	}
+	if cursor.Results.Hits == nil {
+		t.Errorf("expected results.Hits != nil; got nil")
+	}
+	if cursor.Results.Hits.TotalHits != 3 {
+		t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, cursor.Results.Hits.TotalHits)
+	}
+	if len(cursor.Results.Hits.Hits) != 0 {
+		t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits))
+	}
+
+	pages := 0
+	numDocs := 0
+
+	for {
+		searchResult, err := cursor.Next()
+		if err == EOS {
+			break
+		}
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		pages += 1
+
+		for _, hit := range searchResult.Hits.Hits {
+			if hit.Index != testIndexName {
+				t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+			}
+			item := make(map[string]interface{})
+			err := json.Unmarshal(*hit.Source, &item)
+			if err != nil {
+				t.Fatal(err)
+			}
+			numDocs += 1
+		}
+	}
+
+	if pages <= 0 {
+		t.Errorf("expected to retrieve at least 1 page; got %d", pages)
+	}
+
+	if numDocs != 3 {
+		t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs)
+	}
+}
+
+func TestScanWithQuery(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Return tweets from olivere only
+	termQuery := NewTermQuery("user", "olivere")
+	cursor, err := client.Scan(testIndexName).
+		Size(1).
+		Query(termQuery).
+		// Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if cursor.Results == nil {
+		t.Errorf("expected results != nil; got nil")
+	}
+	if cursor.Results.Hits == nil {
+		t.Errorf("expected results.Hits != nil; got nil")
+	}
+	if cursor.Results.Hits.TotalHits != 2 {
+		t.Errorf("expected results.Hits.TotalHits = %d; got %d", 2, cursor.Results.Hits.TotalHits)
+	}
+	if len(cursor.Results.Hits.Hits) != 0 {
+		t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(cursor.Results.Hits.Hits))
+	}
+
+	pages := 0
+	numDocs := 0
+
+	for {
+		searchResult, err := cursor.Next()
+		if err == EOS {
+			break
+		}
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		pages += 1
+
+		for _, hit := range searchResult.Hits.Hits {
+			if hit.Index != testIndexName {
+				t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+			}
+			item := make(map[string]interface{})
+			err := json.Unmarshal(*hit.Source, &item)
+			if err != nil {
+				t.Fatal(err)
+			}
+			numDocs += 1
+		}
+	}
+
+	if pages <= 0 {
+		t.Errorf("expected to retrieve at least 1 page; got %d", pages)
+	}
+
+	if numDocs != 2 {
+		t.Errorf("expected to retrieve %d hits; got %d", 2, numDocs)
+	}
+}

+ 273 - 0
sensitive/src/elastic.v1/scroll.go

@@ -0,0 +1,273 @@
+// Copyright 2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// ScrollService manages a cursor through documents in Elasticsearch.
+type ScrollService struct {
+	client    *Client
+	indices   []string
+	types     []string
+	keepAlive string
+	query     Query
+	size      *int
+	pretty    bool
+	debug     bool
+	scrollId  string
+}
+
+func NewScrollService(client *Client) *ScrollService {
+	builder := &ScrollService{
+		client: client,
+		query:  NewMatchAllQuery(),
+		debug:  false,
+		pretty: false,
+	}
+	return builder
+}
+
+func (s *ScrollService) Index(index string) *ScrollService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *ScrollService) Indices(indices ...string) *ScrollService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *ScrollService) Type(typ string) *ScrollService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, typ)
+	return s
+}
+
+func (s *ScrollService) Types(types ...string) *ScrollService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+// Scroll is an alias for KeepAlive, the time to keep
+// the cursor alive (e.g. "5m" for 5 minutes).
+func (s *ScrollService) Scroll(keepAlive string) *ScrollService {
+	s.keepAlive = keepAlive
+	return s
+}
+
+// KeepAlive sets the maximum time the cursor will be
+// available before expiration (e.g. "5m" for 5 minutes).
+func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService {
+	s.keepAlive = keepAlive
+	return s
+}
+
+func (s *ScrollService) Query(query Query) *ScrollService {
+	s.query = query
+	return s
+}
+
+func (s *ScrollService) Pretty(pretty bool) *ScrollService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *ScrollService) Debug(debug bool) *ScrollService {
+	s.debug = debug
+	return s
+}
+
+func (s *ScrollService) Size(size int) *ScrollService {
+	s.size = &size
+	return s
+}
+
+func (s *ScrollService) ScrollId(scrollId string) *ScrollService {
+	s.scrollId = scrollId
+	return s
+}
+
+func (s *ScrollService) Do() (*SearchResult, error) {
+	if s.scrollId == "" {
+		return s.GetFirstPage()
+	}
+	return s.GetNextPage()
+}
+
+func (s *ScrollService) GetFirstPage() (*SearchResult, error) {
+	// Build url
+	urls := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		urls += strings.Join(indexPart, ",")
+	}
+
+	// Types
+	typesPart := make([]string, 0)
+	for _, typ := range s.types {
+		typ, err := uritemplates.Expand("{type}", map[string]string{
+			"type": typ,
+		})
+		if err != nil {
+			return nil, err
+		}
+		typesPart = append(typesPart, typ)
+	}
+	if len(typesPart) > 0 {
+		urls += "/" + strings.Join(typesPart, ",")
+	}
+
+	// Search
+	urls += "/_search"
+
+	// Parameters
+	params := make(url.Values)
+	params.Set("search_type", "scan")
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.keepAlive != "" {
+		params.Set("scroll", s.keepAlive)
+	} else {
+		params.Set("scroll", defaultKeepAlive)
+	}
+	if s.size != nil && *s.size > 0 {
+		params.Set("size", fmt.Sprintf("%d", *s.size))
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body
+	body := make(map[string]interface{})
+
+	// Query
+	if s.query != nil {
+		body["query"] = s.query.Source()
+	}
+
+	req.SetBodyJson(body)
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	searchResult := new(SearchResult)
+	if err := json.NewDecoder(res.Body).Decode(searchResult); err != nil {
+		return nil, err
+	}
+
+	return searchResult, nil
+}
+
+func (s *ScrollService) GetNextPage() (*SearchResult, error) {
+	if s.scrollId == "" {
+		return nil, ErrNoScrollId
+	}
+
+	// Build url
+	urls := "/_search/scroll"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.keepAlive != "" {
+		params.Set("scroll", s.keepAlive)
+	} else {
+		params.Set("scroll", defaultKeepAlive)
+	}
+	urls += "?" + params.Encode()
+
+	// Set up a new request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body
+	req.SetBodyString(s.scrollId)
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	searchResult := new(SearchResult)
+	if err := json.NewDecoder(res.Body).Decode(searchResult); err != nil {
+		return nil, err
+	}
+
+	// Determine last page
+	if searchResult == nil || searchResult.Hits == nil || len(searchResult.Hits.Hits) == 0 || searchResult.Hits.TotalHits == 0 {
+		return nil, EOS
+	}
+
+	return searchResult, nil
+}

+ 110 - 0
sensitive/src/elastic.v1/scroll_test.go

@@ -0,0 +1,110 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	_ "net/http"
+	"testing"
+)
+
+func TestScroll(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Match all should return all documents
+	res, err := client.Scroll(testIndexName).
+		Size(1).
+		// Pretty(true).Debug(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if res == nil {
+		t.Errorf("expected results != nil; got nil")
+	}
+	if res.Hits == nil {
+		t.Errorf("expected results.Hits != nil; got nil")
+	}
+	if res.Hits.TotalHits != 3 {
+		t.Errorf("expected results.Hits.TotalHits = %d; got %d", 3, res.Hits.TotalHits)
+	}
+	if len(res.Hits.Hits) != 0 {
+		t.Errorf("expected len(results.Hits.Hits) = %d; got %d", 0, len(res.Hits.Hits))
+	}
+	if res.ScrollId == "" {
+		t.Errorf("expected scrollId in results; got %q", res.ScrollId)
+	}
+
+	pages := 0
+	numDocs := 0
+	scrollId := res.ScrollId
+
+	for {
+		searchResult, err := client.Scroll(testIndexName).
+			Size(1).
+			ScrollId(scrollId).
+			// Pretty(true).Debug(true).
+			Do()
+		if err == EOS {
+			break
+		}
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		pages += 1
+
+		for _, hit := range searchResult.Hits.Hits {
+			if hit.Index != testIndexName {
+				t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+			}
+			item := make(map[string]interface{})
+			err := json.Unmarshal(*hit.Source, &item)
+			if err != nil {
+				t.Fatal(err)
+			}
+			numDocs += 1
+		}
+
+		scrollId = searchResult.ScrollId
+		if scrollId == "" {
+			t.Errorf("expeced scrollId in results; got %q", scrollId)
+		}
+	}
+
+	if pages <= 0 {
+		t.Errorf("expected to retrieve at least 1 page; got %d", pages)
+	}
+
+	if numDocs != 3 {
+		t.Errorf("expected to retrieve %d hits; got %d", 3, numDocs)
+	}
+}

+ 513 - 0
sensitive/src/elastic.v1/search.go

@@ -0,0 +1,513 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// Search for documents in Elasticsearch.
+type SearchService struct {
+	client       *Client
+	searchSource *SearchSource
+	source       interface{}
+	pretty       bool
+	searchType   string
+	indices      []string
+	queryHint    string
+	routing      string
+	preference   string
+	types        []string
+	debug        bool
+}
+
+// NewSearchService creates a new service for searching in Elasticsearch.
+// You typically do not create the service yourself manually, but access
+// it via client.Search().
+func NewSearchService(client *Client) *SearchService {
+	builder := &SearchService{
+		client:       client,
+		searchSource: NewSearchSource(),
+		debug:        false,
+		pretty:       false,
+	}
+	return builder
+}
+
+// SearchSource sets the search source builder to use with this service.
+func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService {
+	s.searchSource = searchSource
+	if s.searchSource == nil {
+		s.searchSource = NewSearchSource()
+	}
+	return s
+}
+
+// Source allows the user to set the request body manually without using
+// any of the structs and interfaces in Elastic.
+func (s *SearchService) Source(source interface{}) *SearchService {
+	s.source = source
+	return s
+}
+
+// Index sets the name of the index to use for search.
+func (s *SearchService) Index(index string) *SearchService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices sets the names of the indices to use for search.
+func (s *SearchService) Indices(indices ...string) *SearchService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// Type restricts the search for the given type.
+func (s *SearchService) Type(typ string) *SearchService {
+	if s.types == nil {
+		s.types = []string{typ}
+	} else {
+		s.types = append(s.types, typ)
+	}
+	return s
+}
+
+// Types allows to restrict the search to a list of types.
+func (s *SearchService) Types(types ...string) *SearchService {
+	if s.types == nil {
+		s.types = make([]string, len(types))
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+// Pretty enables the caller to indent the JSON output. Use it in combination
+// with Debug to see what Elasticsearch actually returned.
+func (s *SearchService) Pretty(pretty bool) *SearchService {
+	s.pretty = pretty
+	return s
+}
+
+// Debug enables the user to print the output of the search to os.Stdout
+// when calling Do.
+func (s *SearchService) Debug(debug bool) *SearchService {
+	s.debug = debug
+	return s
+}
+
+// Timeout sets the timeout to use, e.g. "1s" or "1000ms".
+func (s *SearchService) Timeout(timeout string) *SearchService {
+	s.searchSource = s.searchSource.Timeout(timeout)
+	return s
+}
+
+// TimeoutInMillis sets the timeout in milliseconds.
+func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService {
+	s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis)
+	return s
+}
+
+// SearchType sets the search operation type. Valid values are:
+// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch",
+// "dfs_query_and_fetch", "count", "scan".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-search-type.html#search-request-search-type
+// for details.
+func (s *SearchService) SearchType(searchType string) *SearchService {
+	s.searchType = searchType
+	return s
+}
+
+// Routing allows for (a comma-separated) list of specific routing values.
+func (s *SearchService) Routing(routing string) *SearchService {
+	s.routing = routing
+	return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: "random").
+func (s *SearchService) Preference(preference string) *SearchService {
+	s.preference = preference
+	return s
+}
+
+func (s *SearchService) QueryHint(queryHint string) *SearchService {
+	s.queryHint = queryHint
+	return s
+}
+
+// Query sets the query to perform, e.g. MatchAllQuery.
+func (s *SearchService) Query(query Query) *SearchService {
+	s.searchSource = s.searchSource.Query(query)
+	return s
+}
+
+// PostFilter is executed as the last filter. It only affects the
+// search hits but not facets. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html
+// for details.
+func (s *SearchService) PostFilter(postFilter Filter) *SearchService {
+	s.searchSource = s.searchSource.PostFilter(postFilter)
+	return s
+}
+
+// Highlight sets the highlighting. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+// for details.
+func (s *SearchService) Highlight(highlight *Highlight) *SearchService {
+	s.searchSource = s.searchSource.Highlight(highlight)
+	return s
+}
+
+// GlobalSuggestText sets the global text for suggesters. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html#global-suggest
+// for details.
+func (s *SearchService) GlobalSuggestText(globalText string) *SearchService {
+	s.searchSource = s.searchSource.GlobalSuggestText(globalText)
+	return s
+}
+
+// Suggester sets the suggester. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html
+// for details.
+func (s *SearchService) Suggester(suggester Suggester) *SearchService {
+	s.searchSource = s.searchSource.Suggester(suggester)
+	return s
+}
+
+// Facet adds a facet to the search. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html
+// to get an overview of Elasticsearch facets.
+func (s *SearchService) Facet(name string, facet Facet) *SearchService {
+	s.searchSource = s.searchSource.Facet(name, facet)
+	return s
+}
+
+// Aggregation adds an aggregation to the search. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html
+// for an overview of aggregations in Elasticsearch.
+func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService {
+	s.searchSource = s.searchSource.Aggregation(name, aggregation)
+	return s
+}
+
+// MinScore excludes documents which have a score less than the minimum
+// specified here. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-min-score.html.
+func (s *SearchService) MinScore(minScore float64) *SearchService {
+	s.searchSource = s.searchSource.MinScore(minScore)
+	return s
+}
+
+// From defines the offset from the first result you want to fetch.
+// Use it in combination with Size to paginate through results.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-from-size.html
+// for details.
+func (s *SearchService) From(from int) *SearchService {
+	s.searchSource = s.searchSource.From(from)
+	return s
+}
+
+// Size defines the maximum number of hits to be returned.
+// Use it in combination with From to paginate through results.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-from-size.html
+// for details.
+func (s *SearchService) Size(size int) *SearchService {
+	s.searchSource = s.searchSource.Size(size)
+	return s
+}
+
+// Explain can be enabled to provide an explanation for each hit and how its
+// score was computed.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html
+// for details.
+func (s *SearchService) Explain(explain bool) *SearchService {
+	s.searchSource = s.searchSource.Explain(explain)
+	return s
+}
+
+// Version can be set to true to return a version for each search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html.
+func (s *SearchService) Version(version bool) *SearchService {
+	s.searchSource = s.searchSource.Version(version)
+	return s
+}
+
+// Sort the results by the given field, in the given order.
+// Use the alternative SortWithInfo to use a struct to define the sorting.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) Sort(field string, ascending bool) *SearchService {
+	s.searchSource = s.searchSource.Sort(field, ascending)
+	return s
+}
+
+// SortWithInfo defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) SortWithInfo(info SortInfo) *SearchService {
+	s.searchSource = s.searchSource.SortWithInfo(info)
+	return s
+}
+
+// SortBy defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) SortBy(sorter ...Sorter) *SearchService {
+	s.searchSource = s.searchSource.SortBy(sorter...)
+	return s
+}
+
+// Fields tells Elasticsearch to only load specific fields from a search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html.
+func (s *SearchService) Fields(fields ...string) *SearchService {
+	s.searchSource = s.searchSource.Fields(fields...)
+	return s
+}
+
+// Do executes the search and returns a SearchResult.
+func (s *SearchService) Do() (*SearchResult, error) {
+	// Build url
+	urls := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	urls += strings.Join(indexPart, ",")
+
+	// Types part
+	if len(s.types) > 0 {
+		typesPart := make([]string, 0)
+		for _, typ := range s.types {
+			typ, err := uritemplates.Expand("{type}", map[string]string{
+				"type": typ,
+			})
+			if err != nil {
+				return nil, err
+			}
+			typesPart = append(typesPart, typ)
+		}
+		urls += "/"
+		urls += strings.Join(typesPart, ",")
+	}
+
+	// Search
+	urls += "/_search"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.searchType != "" {
+		params.Set("search_type", s.searchType)
+	}
+	if len(params) > 0 {
+		urls += "?" + params.Encode()
+	}
+
+	// Set up a new request
+	req, err := s.client.NewRequest("POST", urls)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set body
+	if s.source != nil {
+		req.SetBodyJson(s.source)
+	} else {
+		req.SetBodyJson(s.searchSource.Source())
+	}
+
+	if s.debug {
+		s.client.dumpRequest((*http.Request)(req))
+	}
+
+	// Get response
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, err
+	}
+	if err := checkResponse(res); err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	if s.debug {
+		s.client.dumpResponse(res)
+	}
+
+	ret := new(SearchResult)
+	if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// SearchResult is the result of a search in Elasticsearch.
+type SearchResult struct {
+	TookInMillis int64         `json:"took"`            // search time in milliseconds
+	ScrollId     string        `json:"_scroll_id"`      // only used with Scroll and Scan operations
+	Hits         *SearchHits   `json:"hits"`            // the actual search hits
+	Suggest      SearchSuggest `json:"suggest"`         // results from suggesters
+	Facets       SearchFacets  `json:"facets"`          // results from facets
+	Aggregations Aggregations  `json:"aggregations"`    // results from aggregations
+	TimedOut     bool          `json:"timed_out"`       // true if the search timed out
+	Error        string        `json:"error,omitempty"` // used in MultiSearch only
+}
+
+// SearchHits specifies the list of search hits.
+type SearchHits struct {
+	TotalHits int64        `json:"total"`     // total number of hits found
+	MaxScore  *float64     `json:"max_score"` // maximum score of all hits
+	Hits      []*SearchHit `json:"hits"`      // the actual hits returned
+}
+
+// SearchHit is a single hit.
+type SearchHit struct {
+	Score       *float64               `json:"_score"`       // computed score
+	Index       string                 `json:"_index"`       // index name
+	Id          string                 `json:"_id"`          // external or internal
+	Type        string                 `json:"_type"`        // type
+	Version     *int64                 `json:"_version"`     // version number, when Version is set to true in SearchService
+	Sort        []interface{}          `json:"sort"`         // sort information
+	Highlight   SearchHitHighlight     `json:"highlight"`    // highlighter information
+	Source      *json.RawMessage       `json:"_source"`      // stored document source
+	Fields      map[string]interface{} `json:"fields"`       // returned fields
+	Explanation *SearchExplanation     `json:"_explanation"` // explains how the score was computed
+
+	// Shard
+	// HighlightFields
+	// SortValues
+	// MatchedFilters
+}
+
+// SearchExplanation explains how the score for a hit was computed.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html.
+type SearchExplanation struct {
+	Value       float64             `json:"value"`             // e.g. 1.0
+	Description string              `json:"description"`       // e.g. "boost" or "ConstantScore(*:*), product of:"
+	Details     []SearchExplanation `json:"details,omitempty"` // recursive details
+}
+
+// Suggest
+
+// SearchSuggest is a map of suggestions.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggest map[string][]SearchSuggestion
+
+// SearchSuggestion is a single search suggestion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggestion struct {
+	Text    string                   `json:"text"`
+	Offset  int                      `json:"offset"`
+	Length  int                      `json:"length"`
+	Options []SearchSuggestionOption `json:"options"`
+}
+
+// SearchSuggestionOption is an option of a SearchSuggestion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggestionOption struct {
+	Text    string      `json:"text"`
+	Score   float32     `json:"score"`
+	Freq    int         `json:"freq"`
+	Payload interface{} `json:"payload"`
+}
+
+// Facets
+
+// SearchFacets is a map of facets.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html.
+type SearchFacets map[string]*SearchFacet
+
+// SearchFacet is a single facet.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html.
+type SearchFacet struct {
+	Type    string             `json:"_type"`
+	Missing int                `json:"missing"`
+	Total   int                `json:"total"`
+	Other   int                `json:"other"`
+	Terms   []searchFacetTerm  `json:"terms"`
+	Ranges  []searchFacetRange `json:"ranges"`
+	Entries []searchFacetEntry `json:"entries"`
+}
+
+// searchFacetTerm is the result of a terms facet.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-terms-facet.html.
+type searchFacetTerm struct {
+	Term  string `json:"term"`
+	Count int    `json:"count"`
+}
+
+// searchFacetRange is the result of a range facet.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-range-facet.html.
+type searchFacetRange struct {
+	From       *float64 `json:"from"`
+	FromStr    *string  `json:"from_str"`
+	To         *float64 `json:"to"`
+	ToStr      *string  `json:"to_str"`
+	Count      int      `json:"count"`
+	Min        *float64 `json:"min"`
+	Max        *float64 `json:"max"`
+	TotalCount int      `json:"total_count"`
+	Total      *float64 `json:"total"`
+	Mean       *float64 `json:"mean"`
+}
+
+// searchFacetEntry is a general facet entry.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html
+type searchFacetEntry struct {
+	// Key for this facet, e.g. in histograms
+	Key interface{} `json:"key"`
+	// Date histograms contain the number of milliseconds as date:
+	// If e.Time = 1293840000000, then: Time.at(1293840000000/1000) => 2011-01-01
+	Time int64 `json:"time"`
+	// Number of hits for this facet
+	Count int `json:"count"`
+	// Min is either a string like "Infinity" or a float64.
+	// This is returned with some DateHistogram facets.
+	Min interface{} `json:"min,omitempty"`
+	// Max is either a string like "-Infinity" or a float64
+	// This is returned with some DateHistogram facets.
+	Max interface{} `json:"max,omitempty"`
+	// Total is the sum of all entries on the recorded Time
+	// This is returned with some DateHistogram facets.
+	Total float64 `json:"total,omitempty"`
+	// TotalCount is the number of entries for Total
+	// This is returned with some DateHistogram facets.
+	TotalCount int `json:"total_count,omitempty"`
+	// Mean is the mean value
+	// This is returned with some DateHistogram facets.
+	Mean float64 `json:"mean,omitempty"`
+}
+
+// Aggregations (see search_aggs.go)
+
+// Highlighting
+
+// SearchHitHighlight is the highlight information of a search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+// for a general discussion of highlighting.
+type SearchHitHighlight map[string][]string

+ 761 - 0
sensitive/src/elastic.v1/search_aggs.go

@@ -0,0 +1,761 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+)
+
+// Aggregations can be seen as a unit-of-work that build
+// analytic information over a set of documents. It is
+// (in many senses) the follow-up of facets in Elasticsearch.
+// For more details about aggregations, visit:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html
+type Aggregation interface {
+	Source() interface{}
+}
+
+// Aggregations is a list of aggregations that are part of a search result.
+type Aggregations map[string]json.RawMessage
+
+// Min returns min aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html
+func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Max returns max aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html
+func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Sum returns sum aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html
+func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Avg returns average aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html
+func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// ValueCount returns value-count aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html
+func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Cardinality returns cardinality aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
+func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Stats returns stats aggregation results.
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html
+func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationStatsMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// ExtendedStats returns extended stats aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html
+func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationExtendedStatsMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Percentiles returns percentiles results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html
+func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationPercentilesMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// PercentileRanks returns percentile ranks results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html
+func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationPercentilesMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// TopHits returns top-hits aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html
+func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationTopHitsMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Global returns global results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html
+func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Filter returns filter results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html
+func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Filters returns filters results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html
+func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketFilters)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Missing returns missing results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html
+func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Nested returns nested results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html
+func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// ReverseNested returns reverse-nested results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html
+func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Children returns children results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html
+func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Terms returns terms aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html
+func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketKeyItems)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// SignificantTerms returns significant terms aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html
+func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketSignificantTerms)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Range returns range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html
+func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketRangeItems)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// DateRange returns date range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html
+func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketRangeItems)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// IPv4Range returns IPv4 range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-iprange-aggregation.html
+func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketRangeItems)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Histogram returns histogram aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html
+func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketHistogramItems)
+		if err := json.Unmarshal(raw, &agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// DateHistogram returns date histogram aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html
+func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketHistogramItems)
+		if err := json.Unmarshal(raw, &agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// GeoBounds returns geo-bounds aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html
+func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationGeoBoundsMetric)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// GeoHash returns geo-hash aggregation results.
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html
+func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketKeyItems)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// GeoDistance returns geo distance aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geodistance-aggregation.html
+func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketRangeItems)
+		if err := json.Unmarshal(raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// -- Single value metric --
+
+// AggregationValueMetric is a single-value metric, returned e.g. by a
+// Min or Max aggregation.
+type AggregationValueMetric struct {
+	Aggregations
+
+	Value *float64 //`json:"value"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure.
+func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["value"], &a.Value)
+	return nil
+}
+
+// -- Stats metric --
+
+// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation.
+type AggregationStatsMetric struct {
+	Aggregations
+
+	Count int64    // `json:"count"`
+	Min   *float64 //`json:"min,omitempty"`
+	Max   *float64 //`json:"max,omitempty"`
+	Avg   *float64 //`json:"avg,omitempty"`
+	Sum   *float64 //`json:"sum,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure.
+func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["count"], &a.Count)
+	json.Unmarshal(aggs["min"], &a.Min)
+	json.Unmarshal(aggs["max"], &a.Max)
+	json.Unmarshal(aggs["avg"], &a.Avg)
+	json.Unmarshal(aggs["sum"], &a.Sum)
+	return nil
+}
+
+// -- Extended stats metric --
+
+// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation.
+type AggregationExtendedStatsMetric struct {
+	Aggregations
+
+	Count        int64    // `json:"count"`
+	Min          *float64 //`json:"min,omitempty"`
+	Max          *float64 //`json:"max,omitempty"`
+	Avg          *float64 //`json:"avg,omitempty"`
+	Sum          *float64 //`json:"sum,omitempty"`
+	SumOfSquares *float64 //`json:"sum_of_squares,omitempty"`
+	Variance     *float64 //`json:"variance,omitempty"`
+	StdDeviation *float64 //`json:"std_deviation,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure.
+func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["count"], &a.Count)
+	json.Unmarshal(aggs["min"], &a.Min)
+	json.Unmarshal(aggs["max"], &a.Max)
+	json.Unmarshal(aggs["avg"], &a.Avg)
+	json.Unmarshal(aggs["sum"], &a.Sum)
+	json.Unmarshal(aggs["sum_of_squares"], &a.SumOfSquares)
+	json.Unmarshal(aggs["variance"], &a.Variance)
+	json.Unmarshal(aggs["std_deviation"], &a.StdDeviation)
+	return nil
+}
+
+// -- Percentiles metric --
+
+// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation.
+type AggregationPercentilesMetric struct {
+	Aggregations
+
+	Values map[string]float64 // `json:"values"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure.
+func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["values"], &a.Values)
+	return nil
+}
+
+// -- Top-hits metric --
+
+// AggregationTopHitsMetric is a metric returned by a TopHits aggregation.
+type AggregationTopHitsMetric struct {
+	Aggregations
+
+	Hits *SearchHits //`json:"hits"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure.
+func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+	a.Hits = new(SearchHits)
+	json.Unmarshal(aggs["hits"], &a.Hits)
+	return nil
+}
+
+// -- Geo-bounds metric --
+
+// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation.
+type AggregationGeoBoundsMetric struct {
+	Aggregations
+
+	Bounds struct {
+		TopLeft struct {
+			Latitude  float64 `json:"lat"`
+			Longitude float64 `json:"lon"`
+		} `json:"top_left"`
+		BottomRight struct {
+			Latitude  float64 `json:"lat"`
+			Longitude float64 `json:"lon"`
+		} `json:"bottom_right"`
+	} `json:"bounds"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure.
+func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["bounds"], &a.Bounds)
+	return nil
+}
+
+// -- Single bucket --
+
+// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global.
+type AggregationSingleBucket struct {
+	Aggregations
+
+	DocCount int64 // `json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure.
+func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["doc_count"], &a.DocCount)
+	return nil
+}
+
+// -- Bucket range items --
+
+// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned
+// with a range aggregation.
+type AggregationBucketRangeItems struct {
+	Aggregations
+
+	DocCountErrorUpperBound int64                         //`json:"doc_count_error_upper_bound"`
+	SumOfOtherDocCount      int64                         //`json:"sum_other_doc_count"`
+	Buckets                 []*AggregationBucketRangeItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.
+func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["doc_count_error_upper_bound"], &a.DocCountErrorUpperBound)
+	json.Unmarshal(aggs["sum_other_doc_count"], &a.SumOfOtherDocCount)
+	json.Unmarshal(aggs["buckets"], &a.Buckets)
+	return nil
+}
+
+// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure.
+type AggregationBucketRangeItem struct {
+	Aggregations
+
+	Key          string   //`json:"key"`
+	DocCount     int64    //`json:"doc_count"`
+	From         *float64 //`json:"from"`
+	FromAsString string   //`json:"from_as_string"`
+	To           *float64 //`json:"to"`
+	ToAsString   string   //`json:"to_as_string"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure.
+func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["key"], &a.Key)
+	json.Unmarshal(aggs["doc_count"], &a.DocCount)
+	json.Unmarshal(aggs["from"], &a.From)
+	json.Unmarshal(aggs["from_as_string"], &a.FromAsString)
+	json.Unmarshal(aggs["to"], &a.To)
+	json.Unmarshal(aggs["to_as_string"], &a.ToAsString)
+	return nil
+}
+
+// -- Bucket key items --
+
+// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned
+// with a terms aggregation.
+type AggregationBucketKeyItems struct {
+	Aggregations
+
+	DocCountErrorUpperBound int64                       //`json:"doc_count_error_upper_bound"`
+	SumOfOtherDocCount      int64                       //`json:"sum_other_doc_count"`
+	Buckets                 []*AggregationBucketKeyItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure.
+func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["doc_count_error_upper_bound"], &a.DocCountErrorUpperBound)
+	json.Unmarshal(aggs["sum_other_doc_count"], &a.SumOfOtherDocCount)
+	json.Unmarshal(aggs["buckets"], &a.Buckets)
+	return nil
+}
+
+// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure.
+type AggregationBucketKeyItem struct {
+	Aggregations
+
+	Key       interface{} //`json:"key"`
+	KeyNumber json.Number
+	DocCount  int64 //`json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure.
+func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	dec := json.NewDecoder(bytes.NewReader(data))
+	dec.UseNumber()
+	if err := dec.Decode(&aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["key"], &a.Key)
+	json.Unmarshal(aggs["key"], &a.KeyNumber)
+	json.Unmarshal(aggs["doc_count"], &a.DocCount)
+	return nil
+}
+
+// -- Bucket types for significant terms --
+
+// AggregationBucketSignificantTerms is a bucket aggregation returned
+// with a significant terms aggregation.
+type AggregationBucketSignificantTerms struct {
+	Aggregations
+
+	DocCount int64                               //`json:"doc_count"`
+	Buckets  []*AggregationBucketSignificantTerm //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure.
+func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["doc_count"], &a.DocCount)
+	json.Unmarshal(aggs["buckets"], &a.Buckets)
+	return nil
+}
+
+// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure.
+type AggregationBucketSignificantTerm struct {
+	Aggregations
+
+	Key      string  //`json:"key"`
+	DocCount int64   //`json:"doc_count"`
+	BgCount  int64   //`json:"bg_count"`
+	Score    float64 //`json:"score"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure.
+func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["key"], &a.Key)
+	json.Unmarshal(aggs["doc_count"], &a.DocCount)
+	json.Unmarshal(aggs["bg_count"], &a.BgCount)
+	json.Unmarshal(aggs["score"], &a.Score)
+	return nil
+}
+
+// -- Bucket filters --
+
+// AggregationBucketFilters is a multi-bucket aggregation that is returned
+// with a filters aggregation.
+type AggregationBucketFilters struct {
+	Aggregations
+
+	Buckets      []*AggregationBucketKeyItem          //`json:"buckets"`
+	NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure.
+func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["buckets"], &a.Buckets)
+	json.Unmarshal(aggs["buckets"], &a.NamedBuckets)
+	return nil
+}
+
+// -- Bucket histogram items --
+
+// AggregationBucketHistogramItems is a bucket aggregation that is returned
+// with a date histogram aggregation.
+type AggregationBucketHistogramItems struct {
+	Aggregations
+
+	Buckets []*AggregationBucketHistogramItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure.
+func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["buckets"], &a.Buckets)
+	return nil
+}
+
+// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure.
+type AggregationBucketHistogramItem struct {
+	Aggregations
+
+	Key         int64   //`json:"key"`
+	KeyAsString *string //`json:"key_as_string"`
+	DocCount    int64   //`json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure.
+func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error {
+	var aggs map[string]json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+
+	json.Unmarshal(aggs["key"], &a.Key)
+	json.Unmarshal(aggs["key_as_string"], &a.KeyAsString)
+	json.Unmarshal(aggs["doc_count"], &a.DocCount)
+	return nil
+}

+ 100 - 0
sensitive/src/elastic.v1/search_aggs_avg.go

@@ -0,0 +1,100 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// AvgAggregation is a single-value metrics aggregation that computes
+// the average of numeric values that are extracted from the
+// aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by
+// a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html
+type AvgAggregation struct {
+	field           string
+	script          string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+}
+
+func NewAvgAggregation() AvgAggregation {
+	a := AvgAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a AvgAggregation) Field(field string) AvgAggregation {
+	a.field = field
+	return a
+}
+
+func (a AvgAggregation) Script(script string) AvgAggregation {
+	a.script = script
+	return a
+}
+
+func (a AvgAggregation) Lang(lang string) AvgAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a AvgAggregation) Format(format string) AvgAggregation {
+	a.format = format
+	return a
+}
+
+func (a AvgAggregation) Param(name string, value interface{}) AvgAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a AvgAggregation) SubAggregation(name string, subAggregation Aggregation) AvgAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a AvgAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "avg_grade" : { "avg" : { "field" : "grade" } }
+	//    }
+	//	}
+	// This method returns only the { "avg" : { "field" : "grade" } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["avg"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 32 - 0
sensitive/src/elastic.v1/search_aggs_avg_test.go

@@ -0,0 +1,32 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestAvgAggregation(t *testing.T) {
+	agg := NewAvgAggregation().Field("grade")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"avg":{"field":"grade"}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestAvgAggregationWithFormat(t *testing.T) {
+	agg := NewAvgAggregation().Field("grade").Format("000.0")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"avg":{"field":"grade","format":"000.0"}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 119 - 0
sensitive/src/elastic.v1/search_aggs_cardinality.go

@@ -0,0 +1,119 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// CardinalityAggregation is a single-value metrics aggregation that
+// calculates an approximate count of distinct values.
+// Values can be extracted either from specific fields in the document
+// or generated by a script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
+type CardinalityAggregation struct {
+	field              string
+	script             string
+	lang               string
+	format             string
+	params             map[string]interface{}
+	subAggregations    map[string]Aggregation
+	precisionThreshold *int64
+	rehash             *bool
+}
+
+func NewCardinalityAggregation() CardinalityAggregation {
+	a := CardinalityAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a CardinalityAggregation) Field(field string) CardinalityAggregation {
+	a.field = field
+	return a
+}
+
+func (a CardinalityAggregation) Script(script string) CardinalityAggregation {
+	a.script = script
+	return a
+}
+
+func (a CardinalityAggregation) Lang(lang string) CardinalityAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a CardinalityAggregation) Format(format string) CardinalityAggregation {
+	a.format = format
+	return a
+}
+
+func (a CardinalityAggregation) Param(name string, value interface{}) CardinalityAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) CardinalityAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a CardinalityAggregation) PrecisionThreshold(threshold int64) CardinalityAggregation {
+	a.precisionThreshold = &threshold
+	return a
+}
+
+func (a CardinalityAggregation) Rehash(rehash bool) CardinalityAggregation {
+	a.rehash = &rehash
+	return a
+}
+
+func (a CardinalityAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "author_count" : {
+	//        "cardinality" : { "field" : "author" }
+	//      }
+	//    }
+	//	}
+	// This method returns only the "cardinality" : { "field" : "author" } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["cardinality"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+	if a.precisionThreshold != nil {
+		opts["precision_threshold"] = *a.precisionThreshold
+	}
+	if a.rehash != nil {
+		opts["rehash"] = *a.rehash
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 45 - 0
sensitive/src/elastic.v1/search_aggs_cardinality_test.go

@@ -0,0 +1,45 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestCardinalityAggregation(t *testing.T) {
+	agg := NewCardinalityAggregation().Field("author.hash")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"cardinality":{"field":"author.hash"}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestCardinalityAggregationWithOptions(t *testing.T) {
+	agg := NewCardinalityAggregation().Field("author.hash").PrecisionThreshold(100).Rehash(true)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"cardinality":{"field":"author.hash","precision_threshold":100,"rehash":true}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestCardinalityAggregationWithFormat(t *testing.T) {
+	agg := NewCardinalityAggregation().Field("author.hash").Format("00000")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"cardinality":{"field":"author.hash","format":"00000"}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 57 - 0
sensitive/src/elastic.v1/search_aggs_children.go

@@ -0,0 +1,57 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ChildrenAggregation is a special single bucket aggregation that enables
+// aggregating from buckets on parent document types to buckets on child documents.
+// It is available from 1.4.0.Beta1 upwards.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html
+type ChildrenAggregation struct {
+	typ             string
+	subAggregations map[string]Aggregation
+}
+
+func NewChildrenAggregation() ChildrenAggregation {
+	a := ChildrenAggregation{
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a ChildrenAggregation) Type(typ string) ChildrenAggregation {
+	a.typ = typ
+	return a
+}
+
+func (a ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) ChildrenAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a ChildrenAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "to-answers" : {
+	//        "type" : "answer"
+	//      }
+	//    }
+	//	}
+	// This method returns only the { "type" : ... } part.
+
+	source := make(map[string]interface{})
+	source["type"] = a.typ
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 34 - 0
sensitive/src/elastic.v1/search_aggs_children_test.go

@@ -0,0 +1,34 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestChildrenAggregation(t *testing.T) {
+	agg := NewChildrenAggregation().Type("answer")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"type":"answer"}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestChildrenAggregationWithSubAggregation(t *testing.T) {
+	subAgg := NewTermsAggregation().Field("owner.display_name").Size(10)
+	agg := NewChildrenAggregation().Type("answer")
+	agg = agg.SubAggregation("top-names", subAgg)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"aggregations":{"top-names":{"terms":{"field":"owner.display_name","size":10}}},"type":"answer"}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 294 - 0
sensitive/src/elastic.v1/search_aggs_date_histogram.go

@@ -0,0 +1,294 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// DateHistogramAggregation is a multi-bucket aggregation similar to the
+// histogram except it can only be applied on date values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html
+type DateHistogramAggregation struct {
+	field           string
+	script          string
+	lang            string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+
+	interval                   string
+	order                      string
+	orderAsc                   bool
+	minDocCount                *int64
+	extendedBoundsMin          interface{}
+	extendedBoundsMax          interface{}
+	preZone                    string
+	postZone                   string
+	preZoneAdjustLargeInterval *bool
+	format                     string
+	preOffset                  int64
+	postOffset                 int64
+	factor                     *float32
+}
+
+func NewDateHistogramAggregation() DateHistogramAggregation {
+	a := DateHistogramAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a DateHistogramAggregation) Field(field string) DateHistogramAggregation {
+	a.field = field
+	return a
+}
+
+func (a DateHistogramAggregation) Script(script string) DateHistogramAggregation {
+	a.script = script
+	return a
+}
+
+func (a DateHistogramAggregation) Lang(lang string) DateHistogramAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a DateHistogramAggregation) Param(name string, value interface{}) DateHistogramAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) DateHistogramAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+// Allowed values are: "year", "quarter", "month", "week", "day",
+// "hour", "minute". It also supports time settings like "1.5h"
+// (up to "w" for weeks).
+func (a DateHistogramAggregation) Interval(interval string) DateHistogramAggregation {
+	a.interval = interval
+	return a
+}
+
+// Order specifies the sort order. Valid values for order are:
+// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
+// with a metric.
+func (a DateHistogramAggregation) Order(order string, asc bool) DateHistogramAggregation {
+	a.order = order
+	a.orderAsc = asc
+	return a
+}
+
+func (a DateHistogramAggregation) OrderByCount(asc bool) DateHistogramAggregation {
+	// "order" : { "_count" : "asc" }
+	a.order = "_count"
+	a.orderAsc = asc
+	return a
+}
+
+func (a DateHistogramAggregation) OrderByCountAsc() DateHistogramAggregation {
+	return a.OrderByCount(true)
+}
+
+func (a DateHistogramAggregation) OrderByCountDesc() DateHistogramAggregation {
+	return a.OrderByCount(false)
+}
+
+func (a DateHistogramAggregation) OrderByKey(asc bool) DateHistogramAggregation {
+	// "order" : { "_key" : "asc" }
+	a.order = "_key"
+	a.orderAsc = asc
+	return a
+}
+
+func (a DateHistogramAggregation) OrderByKeyAsc() DateHistogramAggregation {
+	return a.OrderByKey(true)
+}
+
+func (a DateHistogramAggregation) OrderByKeyDesc() DateHistogramAggregation {
+	return a.OrderByKey(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) DateHistogramAggregation {
+	// {
+	//     "aggs" : {
+	//         "genders" : {
+	//             "terms" : {
+	//                 "field" : "gender",
+	//                 "order" : { "avg_height" : "desc" }
+	//             },
+	//             "aggs" : {
+	//                 "avg_height" : { "avg" : { "field" : "height" } }
+	//             }
+	//         }
+	//     }
+	// }
+	a.order = aggName
+	a.orderAsc = asc
+	return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) DateHistogramAggregation {
+	// {
+	//     "aggs" : {
+	//         "genders" : {
+	//             "terms" : {
+	//                 "field" : "gender",
+	//                 "order" : { "height_stats.avg" : "desc" }
+	//             },
+	//             "aggs" : {
+	//                 "height_stats" : { "stats" : { "field" : "height" } }
+	//             }
+	//         }
+	//     }
+	// }
+	a.order = aggName + "." + metric
+	a.orderAsc = asc
+	return a
+}
+
+func (a DateHistogramAggregation) MinDocCount(minDocCount int64) DateHistogramAggregation {
+	a.minDocCount = &minDocCount
+	return a
+}
+
+func (a DateHistogramAggregation) PreZone(preZone string) DateHistogramAggregation {
+	a.preZone = preZone
+	return a
+}
+
+func (a DateHistogramAggregation) PostZone(postZone string) DateHistogramAggregation {
+	a.postZone = postZone
+	return a
+}
+
+func (a DateHistogramAggregation) PreZoneAdjustLargeInterval(preZoneAdjustLargeInterval bool) DateHistogramAggregation {
+	a.preZoneAdjustLargeInterval = &preZoneAdjustLargeInterval
+	return a
+}
+
+func (a DateHistogramAggregation) PreOffset(preOffset int64) DateHistogramAggregation {
+	a.preOffset = preOffset
+	return a
+}
+
+func (a DateHistogramAggregation) PostOffset(postOffset int64) DateHistogramAggregation {
+	a.postOffset = postOffset
+	return a
+}
+
+func (a DateHistogramAggregation) Factor(factor float32) DateHistogramAggregation {
+	a.factor = &factor
+	return a
+}
+
+func (a DateHistogramAggregation) Format(format string) DateHistogramAggregation {
+	a.format = format
+	return a
+}
+
+// ExtendedBoundsMin accepts int, int64, string, or time.Time values.
+func (a DateHistogramAggregation) ExtendedBoundsMin(min interface{}) DateHistogramAggregation {
+	a.extendedBoundsMin = min
+	return a
+}
+
+// ExtendedBoundsMax accepts int, int64, string, or time.Time values.
+func (a DateHistogramAggregation) ExtendedBoundsMax(max interface{}) DateHistogramAggregation {
+	a.extendedBoundsMax = max
+	return a
+}
+
+func (a DateHistogramAggregation) Source() interface{} {
+	// Example:
+	// {
+	//     "aggs" : {
+	//         "articles_over_time" : {
+	//             "date_histogram" : {
+	//                 "field" : "date",
+	//                 "interval" : "month"
+	//             }
+	//         }
+	//     }
+	// }
+	//
+	// This method returns only the { "date_histogram" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["date_histogram"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	opts["interval"] = a.interval
+	if a.minDocCount != nil {
+		opts["min_doc_count"] = *a.minDocCount
+	}
+	if a.order != "" {
+		o := make(map[string]interface{})
+		if a.orderAsc {
+			o[a.order] = "asc"
+		} else {
+			o[a.order] = "desc"
+		}
+		opts["order"] = o
+	}
+	if a.preZone != "" {
+		opts["pre_zone"] = a.preZone
+	}
+	if a.postZone != "" {
+		opts["post_zone"] = a.postZone
+	}
+	if a.preZoneAdjustLargeInterval != nil {
+		opts["pre_zone_adjust_large_interval"] = *a.preZoneAdjustLargeInterval
+	}
+	if a.preOffset != 0 {
+		opts["pre_offset"] = a.preOffset
+	}
+	if a.postOffset != 0 {
+		opts["post_offset"] = a.postOffset
+	}
+	if a.factor != nil {
+		opts["factor"] = *a.factor
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil {
+		bounds := make(map[string]interface{})
+		if a.extendedBoundsMin != nil {
+			bounds["min"] = a.extendedBoundsMin
+		}
+		if a.extendedBoundsMax != nil {
+			bounds["max"] = a.extendedBoundsMax
+		}
+		opts["extended_bounds"] = bounds
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 19 - 0
sensitive/src/elastic.v1/search_aggs_date_histogram_test.go

@@ -0,0 +1,19 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestDateHistogramAggregation(t *testing.T) {
+	agg := NewDateHistogramAggregation().Field("date").Interval("month").Format("YYYY-MM")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"date_histogram":{"field":"date","format":"YYYY-MM","interval":"month"}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 234 - 0
sensitive/src/elastic.v1/search_aggs_date_range.go

@@ -0,0 +1,234 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"time"
+)
+
+// DateRangeAggregation is a range aggregation that is dedicated for
+// date values. The main difference between this aggregation and the
+// normal range aggregation is that the from and to values can be expressed
+// in Date Math expressions, and it is also possible to specify a
+// date format by which the from and to response fields will be returned.
+// Note that this aggregration includes the from value and excludes the to
+// value for each range.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html
+type DateRangeAggregation struct {
+	field           string
+	script          string
+	lang            string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+	keyed           *bool
+	unmapped        *bool
+	format          string
+	entries         []DateRangeAggregationEntry
+}
+
+type DateRangeAggregationEntry struct {
+	Key  string
+	From interface{}
+	To   interface{}
+}
+
+func NewDateRangeAggregation() DateRangeAggregation {
+	a := DateRangeAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+		entries:         make([]DateRangeAggregationEntry, 0),
+	}
+	return a
+}
+
+func (a DateRangeAggregation) Field(field string) DateRangeAggregation {
+	a.field = field
+	return a
+}
+
+func (a DateRangeAggregation) Script(script string) DateRangeAggregation {
+	a.script = script
+	return a
+}
+
+func (a DateRangeAggregation) Lang(lang string) DateRangeAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a DateRangeAggregation) Param(name string, value interface{}) DateRangeAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) DateRangeAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a DateRangeAggregation) Keyed(keyed bool) DateRangeAggregation {
+	a.keyed = &keyed
+	return a
+}
+
+func (a DateRangeAggregation) Unmapped(unmapped bool) DateRangeAggregation {
+	a.unmapped = &unmapped
+	return a
+}
+
+func (a DateRangeAggregation) Format(format string) DateRangeAggregation {
+	a.format = format
+	return a
+}
+
+func (a DateRangeAggregation) AddRange(from, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) AddUnboundedTo(from interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
+	return a
+}
+
+func (a DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
+	return a
+}
+
+func (a DateRangeAggregation) AddUnboundedFrom(to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) Lt(to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) LtWithKey(key string, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) Between(from, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) Gt(from interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
+	return a
+}
+
+func (a DateRangeAggregation) GtWithKey(key string, from interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
+	return a
+}
+
+func (a DateRangeAggregation) Source() interface{} {
+	// Example:
+	// {
+	//     "aggs" : {
+	//         "range" : {
+	//             "date_range": {
+	//                 "field": "date",
+	//                 "format": "MM-yyy",
+	//                 "ranges": [
+	//                     { "to": "now-10M/M" },
+	//                     { "from": "now-10M/M" }
+	//                 ]
+	//             }
+	//         }
+	//         }
+	//     }
+	// }
+	//
+	// This method returns only the { "date_range" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["date_range"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	if a.keyed != nil {
+		opts["keyed"] = *a.keyed
+	}
+	if a.unmapped != nil {
+		opts["unmapped"] = *a.unmapped
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+
+	ranges := make([]interface{}, 0)
+	for _, ent := range a.entries {
+		r := make(map[string]interface{})
+		if ent.Key != "" {
+			r["key"] = ent.Key
+		}
+		if ent.From != nil {
+			switch from := ent.From.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["from"] = from
+			case time.Time:
+				r["from"] = from.Format(time.RFC3339)
+			case string:
+				r["from"] = from
+			}
+		}
+		if ent.To != nil {
+			switch to := ent.To.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["to"] = to
+			case time.Time:
+				r["to"] = to.Format(time.RFC3339)
+			case string:
+				r["to"] = to
+			}
+		}
+		ranges = append(ranges, r)
+	}
+	opts["ranges"] = ranges
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 102 - 0
sensitive/src/elastic.v1/search_aggs_date_range_test.go

@@ -0,0 +1,102 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestDateRangeAggregation(t *testing.T) {
+	agg := NewDateRangeAggregation().Field("created_at")
+	agg = agg.AddRange(nil, "2012-12-31")
+	agg = agg.AddRange("2013-01-01", "2013-12-31")
+	agg = agg.AddRange("2014-01-01", nil)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestDateRangeAggregationWithUnbounded(t *testing.T) {
+	agg := NewDateRangeAggregation().Field("created_at").
+		AddUnboundedFrom("2012-12-31").
+		AddRange("2013-01-01", "2013-12-31").
+		AddUnboundedTo("2014-01-01")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestDateRangeAggregationWithLtAndCo(t *testing.T) {
+	agg := NewDateRangeAggregation().Field("created_at").
+		Lt("2012-12-31").
+		Between("2013-01-01", "2013-12-31").
+		Gt("2014-01-01")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestDateRangeAggregationWithKeyedFlag(t *testing.T) {
+	agg := NewDateRangeAggregation().Field("created_at").
+		Keyed(true).
+		Lt("2012-12-31").
+		Between("2013-01-01", "2013-12-31").
+		Gt("2014-01-01")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestDateRangeAggregationWithKeys(t *testing.T) {
+	agg := NewDateRangeAggregation().Field("created_at").
+		Keyed(true).
+		LtWithKey("pre-2012", "2012-12-31").
+		BetweenWithKey("2013", "2013-01-01", "2013-12-31").
+		GtWithKey("post-2013", "2014-01-01")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"key":"pre-2012","to":"2012-12-31"},{"from":"2013-01-01","key":"2013","to":"2013-12-31"},{"from":"2014-01-01","key":"post-2013"}]}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestDateRangeAggregationWithSpecialNames(t *testing.T) {
+	agg := NewDateRangeAggregation().Field("created_at").
+		AddRange("now-10M/M", "now+10M/M")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"date_range":{"field":"created_at","ranges":[{"from":"now-10M/M","to":"now+10M/M"}]}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 99 - 0
sensitive/src/elastic.v1/search_aggs_extended_stats.go

@@ -0,0 +1,99 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that
+// computes stats over numeric values extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html
+type ExtendedStatsAggregation struct {
+	field           string
+	script          string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+}
+
+func NewExtendedStatsAggregation() ExtendedStatsAggregation {
+	a := ExtendedStatsAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a ExtendedStatsAggregation) Field(field string) ExtendedStatsAggregation {
+	a.field = field
+	return a
+}
+
+func (a ExtendedStatsAggregation) Script(script string) ExtendedStatsAggregation {
+	a.script = script
+	return a
+}
+
+func (a ExtendedStatsAggregation) Lang(lang string) ExtendedStatsAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a ExtendedStatsAggregation) Format(format string) ExtendedStatsAggregation {
+	a.format = format
+	return a
+}
+
+func (a ExtendedStatsAggregation) Param(name string, value interface{}) ExtendedStatsAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) ExtendedStatsAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a ExtendedStatsAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "grades_stats" : { "extended_stats" : { "field" : "grade" } }
+	//    }
+	//	}
+	// This method returns only the { "extended_stats" : { "field" : "grade" } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["extended_stats"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 32 - 0
sensitive/src/elastic.v1/search_aggs_extended_stats_test.go

@@ -0,0 +1,32 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestExtendedStatsAggregation(t *testing.T) {
+	agg := NewExtendedStatsAggregation().Field("grade")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"extended_stats":{"field":"grade"}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestExtendedStatsAggregationWithFormat(t *testing.T) {
+	agg := NewExtendedStatsAggregation().Field("grade").Format("000.0")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"extended_stats":{"field":"grade","format":"000.0"}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 58 - 0
sensitive/src/elastic.v1/search_aggs_filter.go

@@ -0,0 +1,58 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FilterAggregation defines a single bucket of all the documents
+// in the current document set context that match a specified filter.
+// Often this will be used to narrow down the current aggregation context
+// to a specific set of documents.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html
+type FilterAggregation struct {
+	filter          Filter
+	subAggregations map[string]Aggregation
+}
+
+func NewFilterAggregation() FilterAggregation {
+	a := FilterAggregation{
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a FilterAggregation) SubAggregation(name string, subAggregation Aggregation) FilterAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a FilterAggregation) Filter(filter Filter) FilterAggregation {
+	a.filter = filter
+	return a
+}
+
+func (a FilterAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//         "in_stock_products" : {
+	//             "filter" : { "range" : { "stock" : { "gt" : 0 } } }
+	//         }
+	//    }
+	//	}
+	// This method returns only the { "filter" : {} } part.
+
+	source := make(map[string]interface{})
+	source["filter"] = a.filter.Source()
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 36 - 0
sensitive/src/elastic.v1/search_aggs_filter_test.go

@@ -0,0 +1,36 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestFilterAggregation(t *testing.T) {
+	filter := NewRangeFilter("stock").Gt(0)
+	agg := NewFilterAggregation().Filter(filter)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestFilterAggregationWithSubAggregation(t *testing.T) {
+	avgPriceAgg := NewAvgAggregation().Field("price")
+	filter := NewRangeFilter("stock").Gt(0)
+	agg := NewFilterAggregation().Filter(filter).
+		SubAggregation("avg_price", avgPriceAgg)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 76 - 0
sensitive/src/elastic.v1/search_aggs_filters.go

@@ -0,0 +1,76 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FiltersAggregation defines a multi bucket aggregations where each bucket
+// is associated with a filter. Each bucket will collect all documents that
+// match its associated filter.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html
+type FiltersAggregation struct {
+	filters         []Filter
+	subAggregations map[string]Aggregation
+}
+
+func NewFiltersAggregation() FiltersAggregation {
+	return FiltersAggregation{
+		filters:         make([]Filter, 0),
+		subAggregations: make(map[string]Aggregation),
+	}
+}
+
+func (a FiltersAggregation) Filter(filter Filter) FiltersAggregation {
+	a.filters = append(a.filters, filter)
+	return a
+}
+
+func (a FiltersAggregation) Filters(filters ...Filter) FiltersAggregation {
+	if len(filters) > 0 {
+		a.filters = append(a.filters, filters...)
+	}
+	return a
+}
+
+func (a FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) FiltersAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a FiltersAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//  "aggs" : {
+	//    "messages" : {
+	//      "filters" : {
+	//        "filters" : {
+	//          "errors" :   { "term" : { "body" : "error"   }},
+	//          "warnings" : { "term" : { "body" : "warning" }}
+	//        }
+	//      }
+	//    }
+	//  }
+	//	}
+	// This method returns only the (outer) { "filters" : {} } part.
+
+	source := make(map[string]interface{})
+	filters := make(map[string]interface{})
+	source["filters"] = filters
+
+	arr := make([]interface{}, len(a.filters))
+	for i, filter := range a.filters {
+		arr[i] = filter.Source()
+	}
+	filters["filters"] = arr
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 37 - 0
sensitive/src/elastic.v1/search_aggs_filters_test.go

@@ -0,0 +1,37 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestFiltersAggregation(t *testing.T) {
+	f1 := NewRangeFilter("stock").Gt(0)
+	f2 := NewTermFilter("symbol", "GOOG")
+	agg := NewFiltersAggregation().Filters(f1, f2)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestFiltersAggregationWithSubAggregation(t *testing.T) {
+	avgPriceAgg := NewAvgAggregation().Field("price")
+	f1 := NewRangeFilter("stock").Gt(0)
+	f2 := NewTermFilter("symbol", "GOOG")
+	agg := NewFiltersAggregation().Filters(f1, f2).SubAggregation("avg_price", avgPriceAgg)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 95 - 0
sensitive/src/elastic.v1/search_aggs_geo_bounds.go

@@ -0,0 +1,95 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoBoundsAggregation is a metric aggregation that computes the
+// bounding box containing all geo_point values for a field.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html
+type GeoBoundsAggregation struct {
+	field         string
+	script        string
+	lang          string
+	params        map[string]interface{}
+	wrapLongitude *bool
+}
+
+func NewGeoBoundsAggregation() GeoBoundsAggregation {
+	a := GeoBoundsAggregation{}
+	return a
+}
+
+func (a GeoBoundsAggregation) Field(field string) GeoBoundsAggregation {
+	a.field = field
+	return a
+}
+
+func (a GeoBoundsAggregation) Script(script string) GeoBoundsAggregation {
+	a.script = script
+	return a
+}
+
+func (a GeoBoundsAggregation) Lang(lang string) GeoBoundsAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a GeoBoundsAggregation) Params(params map[string]interface{}) GeoBoundsAggregation {
+	a.params = params
+	return a
+}
+
+func (a GeoBoundsAggregation) Param(name string, value interface{}) GeoBoundsAggregation {
+	if a.params == nil {
+		a.params = make(map[string]interface{})
+	}
+	a.params[name] = value
+	return a
+}
+
+func (a GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) GeoBoundsAggregation {
+	a.wrapLongitude = &wrapLongitude
+	return a
+}
+
+func (a GeoBoundsAggregation) Source() interface{} {
+	// Example:
+	// {
+	//     "query" : {
+	//         "match" : { "business_type" : "shop" }
+	//     },
+	//     "aggs" : {
+	//         "viewport" : {
+	//             "geo_bounds" : {
+	//                 "field" : "location"
+	//                 "wrap_longitude" : "true"
+	//             }
+	//         }
+	//     }
+	// }
+	//
+	// This method returns only the { "geo_bounds" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["geo_bounds"] = opts
+
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.params != nil && len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+	if a.wrapLongitude != nil {
+		opts["wrap_longitude"] = *a.wrapLongitude
+	}
+
+	return source
+}

+ 32 - 0
sensitive/src/elastic.v1/search_aggs_geo_bounds_test.go

@@ -0,0 +1,32 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestGeoBoundsAggregation(t *testing.T) {
+	agg := NewGeoBoundsAggregation().Field("location")
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"geo_bounds":{"field":"location"}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestGeoBoundsAggregationWithWrapLongitude(t *testing.T) {
+	agg := NewGeoBoundsAggregation().Field("location").WrapLongitude(true)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"geo_bounds":{"field":"location","wrap_longitude":true}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 180 - 0
sensitive/src/elastic.v1/search_aggs_geo_distance.go

@@ -0,0 +1,180 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields
+// and conceptually works very similar to the range aggregation.
+// The user can define a point of origin and a set of distance range buckets.
+// The aggregation evaluate the distance of each document value from
+// the origin point and determines the buckets it belongs to based on
+// the ranges (a document belongs to a bucket if the distance between the
+// document and the origin falls within the distance range of the bucket).
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-geodistance-aggregation.html
+type GeoDistanceAggregation struct {
+	field           string
+	unit            string
+	distanceType    string
+	point           string
+	ranges          []geoDistAggRange
+	subAggregations map[string]Aggregation
+}
+
+type geoDistAggRange struct {
+	Key  string
+	From interface{}
+	To   interface{}
+}
+
+func NewGeoDistanceAggregation() GeoDistanceAggregation {
+	a := GeoDistanceAggregation{
+		subAggregations: make(map[string]Aggregation),
+		ranges:          make([]geoDistAggRange, 0),
+	}
+	return a
+}
+
+func (a GeoDistanceAggregation) Field(field string) GeoDistanceAggregation {
+	a.field = field
+	return a
+}
+
+func (a GeoDistanceAggregation) Unit(unit string) GeoDistanceAggregation {
+	a.unit = unit
+	return a
+}
+
+func (a GeoDistanceAggregation) DistanceType(distanceType string) GeoDistanceAggregation {
+	a.distanceType = distanceType
+	return a
+}
+
+func (a GeoDistanceAggregation) Point(latLon string) GeoDistanceAggregation {
+	a.point = latLon
+	return a
+}
+
+func (a GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) GeoDistanceAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a GeoDistanceAggregation) AddRange(from, to interface{}) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedTo(from float64) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil})
+	return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil})
+	return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedFrom(to float64) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) Between(from, to interface{}) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) Source() interface{} {
+	// Example:
+	// {
+	//    "aggs" : {
+	//        "rings_around_amsterdam" : {
+	//            "geo_distance" : {
+	//                "field" : "location",
+	//                "origin" : "52.3760, 4.894",
+	//                "ranges" : [
+	//                    { "to" : 100 },
+	//                    { "from" : 100, "to" : 300 },
+	//                    { "from" : 300 }
+	//                ]
+	//            }
+	//        }
+	//    }
+	// }
+	//
+	// This method returns only the { "range" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["geo_distance"] = opts
+
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.unit != "" {
+		opts["unit"] = a.unit
+	}
+	if a.distanceType != "" {
+		opts["distance_type"] = a.distanceType
+	}
+	if a.point != "" {
+		opts["origin"] = a.point
+	}
+
+	ranges := make([]interface{}, 0)
+	for _, ent := range a.ranges {
+		r := make(map[string]interface{})
+		if ent.Key != "" {
+			r["key"] = ent.Key
+		}
+		if ent.From != nil {
+			switch from := ent.From.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["from"] = from
+			case *int, *int16, *int32, *int64, *float32, *float64:
+				r["from"] = from
+			case string:
+				r["from"] = from
+			}
+		}
+		if ent.To != nil {
+			switch to := ent.To.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["to"] = to
+			case *int, *int16, *int32, *int64, *float32, *float64:
+				r["to"] = to
+			case string:
+				r["to"] = to
+			}
+		}
+		ranges = append(ranges, r)
+	}
+	opts["ranges"] = ranges
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 38 - 0
sensitive/src/elastic.v1/search_aggs_geo_distance_test.go

@@ -0,0 +1,38 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestGeoDistanceAggregation(t *testing.T) {
+	agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
+	agg = agg.AddRange(nil, 100)
+	agg = agg.AddRange(100, 300)
+	agg = agg.AddRange(300, nil)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestGeoDistanceAggregationWithUnbounded(t *testing.T) {
+	agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
+	agg = agg.AddUnboundedFrom(100)
+	agg = agg.AddRange(100, 300)
+	agg = agg.AddUnboundedTo(300)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 56 - 0
sensitive/src/elastic.v1/search_aggs_global.go

@@ -0,0 +1,56 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GlobalAggregation defines a single bucket of all the documents within
+// the search execution context. This context is defined by the indices
+// and the document types you’re searching on, but is not influenced
+// by the search query itself.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html
+type GlobalAggregation struct {
+	subAggregations map[string]Aggregation
+}
+
+func NewGlobalAggregation() GlobalAggregation {
+	a := GlobalAggregation{
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) GlobalAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a GlobalAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//         "all_products" : {
+	//             "global" : {},
+	//             "aggs" : {
+	//                 "avg_price" : { "avg" : { "field" : "price" } }
+	//             }
+	//         }
+	//    }
+	//	}
+	// This method returns only the { "global" : {} } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["global"] = opts
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 19 - 0
sensitive/src/elastic.v1/search_aggs_global_test.go

@@ -0,0 +1,19 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestGlobalAggregation(t *testing.T) {
+	agg := NewGlobalAggregation()
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"global":{}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 228 - 0
sensitive/src/elastic.v1/search_aggs_histogram.go

@@ -0,0 +1,228 @@
+// Copyright 2012-2014 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// HistogramAggregation is a multi-bucket values source based aggregation
+// that can be applied on numeric values extracted from the documents.
+// It dynamically builds fixed size (a.k.a. interval) buckets over the
+// values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html
+type HistogramAggregation struct {
+	field           string
+	script          string
+	lang            string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+
+	interval          int64
+	order             string
+	orderAsc          bool
+	minDocCount       *int64
+	extendedBoundsMin *int64
+	extendedBoundsMax *int64
+}
+
+func NewHistogramAggregation() HistogramAggregation {
+	a := HistogramAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a HistogramAggregation) Field(field string) HistogramAggregation {
+	a.field = field
+	return a
+}
+
+func (a HistogramAggregation) Script(script string) HistogramAggregation {
+	a.script = script
+	return a
+}
+
+func (a HistogramAggregation) Lang(lang string) HistogramAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a HistogramAggregation) Param(name string, value interface{}) HistogramAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) HistogramAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a HistogramAggregation) Interval(interval int64) HistogramAggregation {
+	a.interval = interval
+	return a
+}
+
+// Order specifies the sort order. Valid values for order are:
+// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
+// with a metric.
+func (a HistogramAggregation) Order(order string, asc bool) HistogramAggregation {
+	a.order = order
+	a.orderAsc = asc
+	return a
+}
+
+func (a HistogramAggregation) OrderByCount(asc bool) HistogramAggregation {
+	// "order" : { "_count" : "asc" }
+	a.order = "_count"
+	a.orderAsc = asc
+	return a
+}
+
+func (a HistogramAggregation) OrderByCountAsc() HistogramAggregation {
+	return a.OrderByCount(true)
+}
+
+func (a HistogramAggregation) OrderByCountDesc() HistogramAggregation {
+	return a.OrderByCount(false)
+}
+
+func (a HistogramAggregation) OrderByKey(asc bool) HistogramAggregation {
+	// "order" : { "_key" : "asc" }
+	a.order = "_key"
+	a.orderAsc = asc
+	return a
+}
+
+func (a HistogramAggregation) OrderByKeyAsc() HistogramAggregation {
+	return a.OrderByKey(true)
+}
+
+func (a HistogramAggregation) OrderByKeyDesc() HistogramAggregation {
+	return a.OrderByKey(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a HistogramAggregation) OrderByAggregation(aggName string, asc bool) HistogramAggregation {
+	// {
+	//     "aggs" : {
+	//         "genders" : {
+	//             "terms" : {
+	//                 "field" : "gender",
+	//                 "order" : { "avg_height" : "desc" }
+	//             },
+	//             "aggs" : {
+	//                 "avg_height" : { "avg" : { "field" : "height" } }
+	//             }
+	//         }
+	//     }
+	// }
+	a.order = aggName
+	a.orderAsc = asc
+	return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) HistogramAggregation {
+	// {
+	//     "aggs" : {
+	//         "genders" : {
+	//             "terms" : {
+	//                 "field" : "gender",
+	//                 "order" : { "height_stats.avg" : "desc" }
+	//             },
+	//             "aggs" : {
+	//                 "height_stats" : { "stats" : { "field" : "height" } }
+	//             }
+	//         }
+	//     }
+	// }
+	a.order = aggName + "." + metric
+	a.orderAsc = asc
+	return a
+}
+
+func (a HistogramAggregation) MinDocCount(minDocCount int64) HistogramAggregation {
+	a.minDocCount = &minDocCount
+	return a
+}
+
+func (a HistogramAggregation) ExtendedBoundsMin(min int64) HistogramAggregation {
+	a.extendedBoundsMin = &min
+	return a
+}
+
+func (a HistogramAggregation) ExtendedBoundsMax(max int64) HistogramAggregation {
+	a.extendedBoundsMax = &max
+	return a
+}
+
+func (a HistogramAggregation) Source() interface{} {
+	// Example:
+	// {
+	//     "aggs" : {
+	//         "prices" : {
+	//             "histogram" : {
+	//                 "field" : "price",
+	//                 "interval" : 50
+	//             }
+	//         }
+	//     }
+	// }
+	//
+	// This method returns only the { "histogram" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["histogram"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	opts["interval"] = a.interval
+	if a.order != "" {
+		o := make(map[string]interface{})
+		if a.orderAsc {
+			o[a.order] = "asc"
+		} else {
+			o[a.order] = "desc"
+		}
+		opts["order"] = o
+	}
+	if a.minDocCount != nil {
+		opts["min_doc_count"] = *a.minDocCount
+	}
+	if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil {
+		bounds := make(map[string]interface{})
+		if a.extendedBoundsMin != nil {
+			bounds["min"] = a.extendedBoundsMin
+		}
+		if a.extendedBoundsMax != nil {
+			bounds["max"] = a.extendedBoundsMax
+		}
+		opts["extended_bounds"] = bounds
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 19 - 0
sensitive/src/elastic.v1/search_aggs_histogram_test.go

@@ -0,0 +1,19 @@
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestHistogramAggregation(t *testing.T) {
+	agg := NewHistogramAggregation().Field("price").Interval(50)
+	data, err := json.Marshal(agg.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"histogram":{"field":"price","interval":50}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

部分文件因为文件数量过多而无法显示